diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-19 21:02:22 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-19 21:02:22 -0500 |
commit | ed55635e2e4df3169f21ae4047004b7235de956e (patch) | |
tree | 59483776aa04d2b87023059f8a84c5c08d15373f | |
parent | 5be95b7e24bde4d93ff1bff5911b303043753168 (diff) | |
parent | ae450e246e8540300699480a3780a420a028b73f (diff) |
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target fixes from Nicholas Bellinger:
"The highlights this merge window include:
- Allow target fabric drivers to function as built-in. (Roland)
- Fix tcm_loop multi-TPG endpoint nexus bug. (Hannes)
- Move per device config_item_type into se_subsystem_api, allowing
configfs attributes to be defined at module_init time. (Jerome +
nab)
- Convert existing IBLOCK/FILEIO/RAMDISK/PSCSI/TCMU drivers to use
external configfs attributes. (nab)
- A number of iser-target fixes related to active session + network
portal shutdown stability during extended stress testing. (Sagi +
Slava)
- Dynamic allocation of T10-PI contexts for iser-target, fixing a
potentially bogus iscsi_np->tpg_np pointer reference in >= v3.14
code. (Sagi)
- iser-target performance + scalability improvements. (Sagi)
- Fixes for SPC-4 Persistent Reservation AllRegistrants spec
compliance. (Ilias + James + nab)
- Avoid potential short kern_sendmsg() in iscsi-target for now until
Al's conversion to use msghdr iteration is merged post -rc1.
(Viro)
Also, Sagi has requested a number of iser-target patches (9) that
address stability issues he's encountered during extended stress
testing be considered for v3.10.y + v3.14.y code. Given the amount of
LOC involved, it will certainly require extra backporting effort.
Apologies in advance to Greg-KH & Co on this. Sagi and I will be
working post-merge to ensure they each get applied correctly"
* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (53 commits)
target: Allow AllRegistrants to re-RESERVE existing reservation
uapi/linux/target_core_user.h: fix headers_install.sh badness
iscsi-target: Fail connection on short sendmsg writes
iscsi-target: nullify session in failed login sequence
target: Avoid dropping AllRegistrants reservation during unregister
target: Fix R_HOLDER bit usage for AllRegistrants
iscsi-target: Drop left-over bogus iscsi_np->tpg_np
iser-target: Fix wc->wr_id cast warning
iser-target: Remove code duplication
iser-target: Adjust log levels and prettify some prints
iser-target: Use debug_level parameter to control logging level
iser-target: Fix logout sequence
iser-target: Don't wait for session commands from completion context
iser-target: Reduce CQ lock contention by batch polling
iser-target: Introduce isert_poll_budget
iser-target: Remove an atomic operation from the IO path
iser-target: Remove redundant call to isert_conn_terminate
iser-target: Use single CQ for TX and RX
iser-target: Centralize completion elements to a context
iser-target: Cast wr_id with uintptr_t instead of unsinged long
...
23 files changed, 1511 insertions, 1238 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 10641b7816f4..dafb3c531f96 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/socket.h> | 22 | #include <linux/socket.h> |
23 | #include <linux/in.h> | 23 | #include <linux/in.h> |
24 | #include <linux/in6.h> | 24 | #include <linux/in6.h> |
25 | #include <linux/llist.h> | ||
26 | #include <rdma/ib_verbs.h> | 25 | #include <rdma/ib_verbs.h> |
27 | #include <rdma/rdma_cm.h> | 26 | #include <rdma/rdma_cm.h> |
28 | #include <target/target_core_base.h> | 27 | #include <target/target_core_base.h> |
@@ -36,11 +35,17 @@ | |||
36 | #define ISERT_MAX_CONN 8 | 35 | #define ISERT_MAX_CONN 8 |
37 | #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) | 36 | #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) |
38 | #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN) | 37 | #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN) |
38 | #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \ | ||
39 | ISERT_MAX_CONN) | ||
40 | |||
41 | int isert_debug_level = 0; | ||
42 | module_param_named(debug_level, isert_debug_level, int, 0644); | ||
43 | MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)"); | ||
39 | 44 | ||
40 | static DEFINE_MUTEX(device_list_mutex); | 45 | static DEFINE_MUTEX(device_list_mutex); |
41 | static LIST_HEAD(device_list); | 46 | static LIST_HEAD(device_list); |
42 | static struct workqueue_struct *isert_rx_wq; | ||
43 | static struct workqueue_struct *isert_comp_wq; | 47 | static struct workqueue_struct *isert_comp_wq; |
48 | static struct workqueue_struct *isert_release_wq; | ||
44 | 49 | ||
45 | static void | 50 | static void |
46 | isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); | 51 | isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); |
@@ -54,19 +59,32 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
54 | struct isert_rdma_wr *wr); | 59 | struct isert_rdma_wr *wr); |
55 | static int | 60 | static int |
56 | isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); | 61 | isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); |
62 | static int | ||
63 | isert_rdma_post_recvl(struct isert_conn *isert_conn); | ||
64 | static int | ||
65 | isert_rdma_accept(struct isert_conn *isert_conn); | ||
66 | struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); | ||
67 | |||
68 | static inline bool | ||
69 | isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) | ||
70 | { | ||
71 | return (conn->pi_support && | ||
72 | cmd->prot_op != TARGET_PROT_NORMAL); | ||
73 | } | ||
74 | |||
57 | 75 | ||
58 | static void | 76 | static void |
59 | isert_qp_event_callback(struct ib_event *e, void *context) | 77 | isert_qp_event_callback(struct ib_event *e, void *context) |
60 | { | 78 | { |
61 | struct isert_conn *isert_conn = (struct isert_conn *)context; | 79 | struct isert_conn *isert_conn = (struct isert_conn *)context; |
62 | 80 | ||
63 | pr_err("isert_qp_event_callback event: %d\n", e->event); | 81 | isert_err("conn %p event: %d\n", isert_conn, e->event); |
64 | switch (e->event) { | 82 | switch (e->event) { |
65 | case IB_EVENT_COMM_EST: | 83 | case IB_EVENT_COMM_EST: |
66 | rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST); | 84 | rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST); |
67 | break; | 85 | break; |
68 | case IB_EVENT_QP_LAST_WQE_REACHED: | 86 | case IB_EVENT_QP_LAST_WQE_REACHED: |
69 | pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n"); | 87 | isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n"); |
70 | break; | 88 | break; |
71 | default: | 89 | default: |
72 | break; | 90 | break; |
@@ -80,39 +98,41 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr) | |||
80 | 98 | ||
81 | ret = ib_query_device(ib_dev, devattr); | 99 | ret = ib_query_device(ib_dev, devattr); |
82 | if (ret) { | 100 | if (ret) { |
83 | pr_err("ib_query_device() failed: %d\n", ret); | 101 | isert_err("ib_query_device() failed: %d\n", ret); |
84 | return ret; | 102 | return ret; |
85 | } | 103 | } |
86 | pr_debug("devattr->max_sge: %d\n", devattr->max_sge); | 104 | isert_dbg("devattr->max_sge: %d\n", devattr->max_sge); |
87 | pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd); | 105 | isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd); |
88 | 106 | ||
89 | return 0; | 107 | return 0; |
90 | } | 108 | } |
91 | 109 | ||
92 | static int | 110 | static int |
93 | isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id, | 111 | isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) |
94 | u8 protection) | ||
95 | { | 112 | { |
96 | struct isert_device *device = isert_conn->conn_device; | 113 | struct isert_device *device = isert_conn->conn_device; |
97 | struct ib_qp_init_attr attr; | 114 | struct ib_qp_init_attr attr; |
98 | int ret, index, min_index = 0; | 115 | struct isert_comp *comp; |
116 | int ret, i, min = 0; | ||
99 | 117 | ||
100 | mutex_lock(&device_list_mutex); | 118 | mutex_lock(&device_list_mutex); |
101 | for (index = 0; index < device->cqs_used; index++) | 119 | for (i = 0; i < device->comps_used; i++) |
102 | if (device->cq_active_qps[index] < | 120 | if (device->comps[i].active_qps < |
103 | device->cq_active_qps[min_index]) | 121 | device->comps[min].active_qps) |
104 | min_index = index; | 122 | min = i; |
105 | device->cq_active_qps[min_index]++; | 123 | comp = &device->comps[min]; |
106 | pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index); | 124 | comp->active_qps++; |
125 | isert_info("conn %p, using comp %p min_index: %d\n", | ||
126 | isert_conn, comp, min); | ||
107 | mutex_unlock(&device_list_mutex); | 127 | mutex_unlock(&device_list_mutex); |
108 | 128 | ||
109 | memset(&attr, 0, sizeof(struct ib_qp_init_attr)); | 129 | memset(&attr, 0, sizeof(struct ib_qp_init_attr)); |
110 | attr.event_handler = isert_qp_event_callback; | 130 | attr.event_handler = isert_qp_event_callback; |
111 | attr.qp_context = isert_conn; | 131 | attr.qp_context = isert_conn; |
112 | attr.send_cq = device->dev_tx_cq[min_index]; | 132 | attr.send_cq = comp->cq; |
113 | attr.recv_cq = device->dev_rx_cq[min_index]; | 133 | attr.recv_cq = comp->cq; |
114 | attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; | 134 | attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; |
115 | attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; | 135 | attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; |
116 | /* | 136 | /* |
117 | * FIXME: Use devattr.max_sge - 2 for max_send_sge as | 137 | * FIXME: Use devattr.max_sge - 2 for max_send_sge as |
118 | * work-around for RDMA_READs with ConnectX-2. | 138 | * work-around for RDMA_READs with ConnectX-2. |
@@ -126,29 +146,29 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id, | |||
126 | attr.cap.max_recv_sge = 1; | 146 | attr.cap.max_recv_sge = 1; |
127 | attr.sq_sig_type = IB_SIGNAL_REQ_WR; | 147 | attr.sq_sig_type = IB_SIGNAL_REQ_WR; |
128 | attr.qp_type = IB_QPT_RC; | 148 | attr.qp_type = IB_QPT_RC; |
129 | if (protection) | 149 | if (device->pi_capable) |
130 | attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; | 150 | attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; |
131 | 151 | ||
132 | pr_debug("isert_conn_setup_qp cma_id->device: %p\n", | ||
133 | cma_id->device); | ||
134 | pr_debug("isert_conn_setup_qp conn_pd->device: %p\n", | ||
135 | isert_conn->conn_pd->device); | ||
136 | |||
137 | ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr); | 152 | ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr); |
138 | if (ret) { | 153 | if (ret) { |
139 | pr_err("rdma_create_qp failed for cma_id %d\n", ret); | 154 | isert_err("rdma_create_qp failed for cma_id %d\n", ret); |
140 | return ret; | 155 | goto err; |
141 | } | 156 | } |
142 | isert_conn->conn_qp = cma_id->qp; | 157 | isert_conn->conn_qp = cma_id->qp; |
143 | pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n"); | ||
144 | 158 | ||
145 | return 0; | 159 | return 0; |
160 | err: | ||
161 | mutex_lock(&device_list_mutex); | ||
162 | comp->active_qps--; | ||
163 | mutex_unlock(&device_list_mutex); | ||
164 | |||
165 | return ret; | ||
146 | } | 166 | } |
147 | 167 | ||
148 | static void | 168 | static void |
149 | isert_cq_event_callback(struct ib_event *e, void *context) | 169 | isert_cq_event_callback(struct ib_event *e, void *context) |
150 | { | 170 | { |
151 | pr_debug("isert_cq_event_callback event: %d\n", e->event); | 171 | isert_dbg("event: %d\n", e->event); |
152 | } | 172 | } |
153 | 173 | ||
154 | static int | 174 | static int |
@@ -182,6 +202,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn) | |||
182 | } | 202 | } |
183 | 203 | ||
184 | isert_conn->conn_rx_desc_head = 0; | 204 | isert_conn->conn_rx_desc_head = 0; |
205 | |||
185 | return 0; | 206 | return 0; |
186 | 207 | ||
187 | dma_map_fail: | 208 | dma_map_fail: |
@@ -193,6 +214,8 @@ dma_map_fail: | |||
193 | kfree(isert_conn->conn_rx_descs); | 214 | kfree(isert_conn->conn_rx_descs); |
194 | isert_conn->conn_rx_descs = NULL; | 215 | isert_conn->conn_rx_descs = NULL; |
195 | fail: | 216 | fail: |
217 | isert_err("conn %p failed to allocate rx descriptors\n", isert_conn); | ||
218 | |||
196 | return -ENOMEM; | 219 | return -ENOMEM; |
197 | } | 220 | } |
198 | 221 | ||
@@ -216,27 +239,23 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn) | |||
216 | isert_conn->conn_rx_descs = NULL; | 239 | isert_conn->conn_rx_descs = NULL; |
217 | } | 240 | } |
218 | 241 | ||
219 | static void isert_cq_tx_work(struct work_struct *); | 242 | static void isert_cq_work(struct work_struct *); |
220 | static void isert_cq_tx_callback(struct ib_cq *, void *); | 243 | static void isert_cq_callback(struct ib_cq *, void *); |
221 | static void isert_cq_rx_work(struct work_struct *); | ||
222 | static void isert_cq_rx_callback(struct ib_cq *, void *); | ||
223 | 244 | ||
224 | static int | 245 | static int |
225 | isert_create_device_ib_res(struct isert_device *device) | 246 | isert_create_device_ib_res(struct isert_device *device) |
226 | { | 247 | { |
227 | struct ib_device *ib_dev = device->ib_device; | 248 | struct ib_device *ib_dev = device->ib_device; |
228 | struct isert_cq_desc *cq_desc; | ||
229 | struct ib_device_attr *dev_attr; | 249 | struct ib_device_attr *dev_attr; |
230 | int ret = 0, i, j; | 250 | int ret = 0, i; |
231 | int max_rx_cqe, max_tx_cqe; | 251 | int max_cqe; |
232 | 252 | ||
233 | dev_attr = &device->dev_attr; | 253 | dev_attr = &device->dev_attr; |
234 | ret = isert_query_device(ib_dev, dev_attr); | 254 | ret = isert_query_device(ib_dev, dev_attr); |
235 | if (ret) | 255 | if (ret) |
236 | return ret; | 256 | return ret; |
237 | 257 | ||
238 | max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe); | 258 | max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe); |
239 | max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe); | ||
240 | 259 | ||
241 | /* asign function handlers */ | 260 | /* asign function handlers */ |
242 | if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && | 261 | if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && |
@@ -254,55 +273,38 @@ isert_create_device_ib_res(struct isert_device *device) | |||
254 | device->pi_capable = dev_attr->device_cap_flags & | 273 | device->pi_capable = dev_attr->device_cap_flags & |
255 | IB_DEVICE_SIGNATURE_HANDOVER ? true : false; | 274 | IB_DEVICE_SIGNATURE_HANDOVER ? true : false; |
256 | 275 | ||
257 | device->cqs_used = min_t(int, num_online_cpus(), | 276 | device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(), |
258 | device->ib_device->num_comp_vectors); | 277 | device->ib_device->num_comp_vectors)); |
259 | device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); | 278 | isert_info("Using %d CQs, %s supports %d vectors support " |
260 | pr_debug("Using %d CQs, device %s supports %d vectors support " | 279 | "Fast registration %d pi_capable %d\n", |
261 | "Fast registration %d pi_capable %d\n", | 280 | device->comps_used, device->ib_device->name, |
262 | device->cqs_used, device->ib_device->name, | 281 | device->ib_device->num_comp_vectors, device->use_fastreg, |
263 | device->ib_device->num_comp_vectors, device->use_fastreg, | 282 | device->pi_capable); |
264 | device->pi_capable); | 283 | |
265 | device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * | 284 | device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp), |
266 | device->cqs_used, GFP_KERNEL); | 285 | GFP_KERNEL); |
267 | if (!device->cq_desc) { | 286 | if (!device->comps) { |
268 | pr_err("Unable to allocate device->cq_desc\n"); | 287 | isert_err("Unable to allocate completion contexts\n"); |
269 | return -ENOMEM; | 288 | return -ENOMEM; |
270 | } | 289 | } |
271 | cq_desc = device->cq_desc; | ||
272 | |||
273 | for (i = 0; i < device->cqs_used; i++) { | ||
274 | cq_desc[i].device = device; | ||
275 | cq_desc[i].cq_index = i; | ||
276 | |||
277 | INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work); | ||
278 | device->dev_rx_cq[i] = ib_create_cq(device->ib_device, | ||
279 | isert_cq_rx_callback, | ||
280 | isert_cq_event_callback, | ||
281 | (void *)&cq_desc[i], | ||
282 | max_rx_cqe, i); | ||
283 | if (IS_ERR(device->dev_rx_cq[i])) { | ||
284 | ret = PTR_ERR(device->dev_rx_cq[i]); | ||
285 | device->dev_rx_cq[i] = NULL; | ||
286 | goto out_cq; | ||
287 | } | ||
288 | 290 | ||
289 | INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work); | 291 | for (i = 0; i < device->comps_used; i++) { |
290 | device->dev_tx_cq[i] = ib_create_cq(device->ib_device, | 292 | struct isert_comp *comp = &device->comps[i]; |
291 | isert_cq_tx_callback, | ||
292 | isert_cq_event_callback, | ||
293 | (void *)&cq_desc[i], | ||
294 | max_tx_cqe, i); | ||
295 | if (IS_ERR(device->dev_tx_cq[i])) { | ||
296 | ret = PTR_ERR(device->dev_tx_cq[i]); | ||
297 | device->dev_tx_cq[i] = NULL; | ||
298 | goto out_cq; | ||
299 | } | ||
300 | 293 | ||
301 | ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP); | 294 | comp->device = device; |
302 | if (ret) | 295 | INIT_WORK(&comp->work, isert_cq_work); |
296 | comp->cq = ib_create_cq(device->ib_device, | ||
297 | isert_cq_callback, | ||
298 | isert_cq_event_callback, | ||
299 | (void *)comp, | ||
300 | max_cqe, i); | ||
301 | if (IS_ERR(comp->cq)) { | ||
302 | ret = PTR_ERR(comp->cq); | ||
303 | comp->cq = NULL; | ||
303 | goto out_cq; | 304 | goto out_cq; |
305 | } | ||
304 | 306 | ||
305 | ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP); | 307 | ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP); |
306 | if (ret) | 308 | if (ret) |
307 | goto out_cq; | 309 | goto out_cq; |
308 | } | 310 | } |
@@ -310,19 +312,15 @@ isert_create_device_ib_res(struct isert_device *device) | |||
310 | return 0; | 312 | return 0; |
311 | 313 | ||
312 | out_cq: | 314 | out_cq: |
313 | for (j = 0; j < i; j++) { | 315 | for (i = 0; i < device->comps_used; i++) { |
314 | cq_desc = &device->cq_desc[j]; | 316 | struct isert_comp *comp = &device->comps[i]; |
315 | 317 | ||
316 | if (device->dev_rx_cq[j]) { | 318 | if (comp->cq) { |
317 | cancel_work_sync(&cq_desc->cq_rx_work); | 319 | cancel_work_sync(&comp->work); |
318 | ib_destroy_cq(device->dev_rx_cq[j]); | 320 | ib_destroy_cq(comp->cq); |
319 | } | ||
320 | if (device->dev_tx_cq[j]) { | ||
321 | cancel_work_sync(&cq_desc->cq_tx_work); | ||
322 | ib_destroy_cq(device->dev_tx_cq[j]); | ||
323 | } | 321 | } |
324 | } | 322 | } |
325 | kfree(device->cq_desc); | 323 | kfree(device->comps); |
326 | 324 | ||
327 | return ret; | 325 | return ret; |
328 | } | 326 | } |
@@ -330,21 +328,18 @@ out_cq: | |||
330 | static void | 328 | static void |
331 | isert_free_device_ib_res(struct isert_device *device) | 329 | isert_free_device_ib_res(struct isert_device *device) |
332 | { | 330 | { |
333 | struct isert_cq_desc *cq_desc; | ||
334 | int i; | 331 | int i; |
335 | 332 | ||
336 | for (i = 0; i < device->cqs_used; i++) { | 333 | isert_info("device %p\n", device); |
337 | cq_desc = &device->cq_desc[i]; | ||
338 | 334 | ||
339 | cancel_work_sync(&cq_desc->cq_rx_work); | 335 | for (i = 0; i < device->comps_used; i++) { |
340 | cancel_work_sync(&cq_desc->cq_tx_work); | 336 | struct isert_comp *comp = &device->comps[i]; |
341 | ib_destroy_cq(device->dev_rx_cq[i]); | ||
342 | ib_destroy_cq(device->dev_tx_cq[i]); | ||
343 | device->dev_rx_cq[i] = NULL; | ||
344 | device->dev_tx_cq[i] = NULL; | ||
345 | } | ||
346 | 337 | ||
347 | kfree(device->cq_desc); | 338 | cancel_work_sync(&comp->work); |
339 | ib_destroy_cq(comp->cq); | ||
340 | comp->cq = NULL; | ||
341 | } | ||
342 | kfree(device->comps); | ||
348 | } | 343 | } |
349 | 344 | ||
350 | static void | 345 | static void |
@@ -352,6 +347,7 @@ isert_device_try_release(struct isert_device *device) | |||
352 | { | 347 | { |
353 | mutex_lock(&device_list_mutex); | 348 | mutex_lock(&device_list_mutex); |
354 | device->refcount--; | 349 | device->refcount--; |
350 | isert_info("device %p refcount %d\n", device, device->refcount); | ||
355 | if (!device->refcount) { | 351 | if (!device->refcount) { |
356 | isert_free_device_ib_res(device); | 352 | isert_free_device_ib_res(device); |
357 | list_del(&device->dev_node); | 353 | list_del(&device->dev_node); |
@@ -370,6 +366,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id) | |||
370 | list_for_each_entry(device, &device_list, dev_node) { | 366 | list_for_each_entry(device, &device_list, dev_node) { |
371 | if (device->ib_device->node_guid == cma_id->device->node_guid) { | 367 | if (device->ib_device->node_guid == cma_id->device->node_guid) { |
372 | device->refcount++; | 368 | device->refcount++; |
369 | isert_info("Found iser device %p refcount %d\n", | ||
370 | device, device->refcount); | ||
373 | mutex_unlock(&device_list_mutex); | 371 | mutex_unlock(&device_list_mutex); |
374 | return device; | 372 | return device; |
375 | } | 373 | } |
@@ -393,6 +391,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id) | |||
393 | 391 | ||
394 | device->refcount++; | 392 | device->refcount++; |
395 | list_add_tail(&device->dev_node, &device_list); | 393 | list_add_tail(&device->dev_node, &device_list); |
394 | isert_info("Created a new iser device %p refcount %d\n", | ||
395 | device, device->refcount); | ||
396 | mutex_unlock(&device_list_mutex); | 396 | mutex_unlock(&device_list_mutex); |
397 | 397 | ||
398 | return device; | 398 | return device; |
@@ -407,7 +407,7 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) | |||
407 | if (list_empty(&isert_conn->conn_fr_pool)) | 407 | if (list_empty(&isert_conn->conn_fr_pool)) |
408 | return; | 408 | return; |
409 | 409 | ||
410 | pr_debug("Freeing conn %p fastreg pool", isert_conn); | 410 | isert_info("Freeing conn %p fastreg pool", isert_conn); |
411 | 411 | ||
412 | list_for_each_entry_safe(fr_desc, tmp, | 412 | list_for_each_entry_safe(fr_desc, tmp, |
413 | &isert_conn->conn_fr_pool, list) { | 413 | &isert_conn->conn_fr_pool, list) { |
@@ -425,87 +425,97 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) | |||
425 | } | 425 | } |
426 | 426 | ||
427 | if (i < isert_conn->conn_fr_pool_size) | 427 | if (i < isert_conn->conn_fr_pool_size) |
428 | pr_warn("Pool still has %d regions registered\n", | 428 | isert_warn("Pool still has %d regions registered\n", |
429 | isert_conn->conn_fr_pool_size - i); | 429 | isert_conn->conn_fr_pool_size - i); |
430 | } | 430 | } |
431 | 431 | ||
432 | static int | 432 | static int |
433 | isert_create_pi_ctx(struct fast_reg_descriptor *desc, | ||
434 | struct ib_device *device, | ||
435 | struct ib_pd *pd) | ||
436 | { | ||
437 | struct ib_mr_init_attr mr_init_attr; | ||
438 | struct pi_context *pi_ctx; | ||
439 | int ret; | ||
440 | |||
441 | pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL); | ||
442 | if (!pi_ctx) { | ||
443 | isert_err("Failed to allocate pi context\n"); | ||
444 | return -ENOMEM; | ||
445 | } | ||
446 | |||
447 | pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device, | ||
448 | ISCSI_ISER_SG_TABLESIZE); | ||
449 | if (IS_ERR(pi_ctx->prot_frpl)) { | ||
450 | isert_err("Failed to allocate prot frpl err=%ld\n", | ||
451 | PTR_ERR(pi_ctx->prot_frpl)); | ||
452 | ret = PTR_ERR(pi_ctx->prot_frpl); | ||
453 | goto err_pi_ctx; | ||
454 | } | ||
455 | |||
456 | pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); | ||
457 | if (IS_ERR(pi_ctx->prot_mr)) { | ||
458 | isert_err("Failed to allocate prot frmr err=%ld\n", | ||
459 | PTR_ERR(pi_ctx->prot_mr)); | ||
460 | ret = PTR_ERR(pi_ctx->prot_mr); | ||
461 | goto err_prot_frpl; | ||
462 | } | ||
463 | desc->ind |= ISERT_PROT_KEY_VALID; | ||
464 | |||
465 | memset(&mr_init_attr, 0, sizeof(mr_init_attr)); | ||
466 | mr_init_attr.max_reg_descriptors = 2; | ||
467 | mr_init_attr.flags |= IB_MR_SIGNATURE_EN; | ||
468 | pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr); | ||
469 | if (IS_ERR(pi_ctx->sig_mr)) { | ||
470 | isert_err("Failed to allocate signature enabled mr err=%ld\n", | ||
471 | PTR_ERR(pi_ctx->sig_mr)); | ||
472 | ret = PTR_ERR(pi_ctx->sig_mr); | ||
473 | goto err_prot_mr; | ||
474 | } | ||
475 | |||
476 | desc->pi_ctx = pi_ctx; | ||
477 | desc->ind |= ISERT_SIG_KEY_VALID; | ||
478 | desc->ind &= ~ISERT_PROTECTED; | ||
479 | |||
480 | return 0; | ||
481 | |||
482 | err_prot_mr: | ||
483 | ib_dereg_mr(desc->pi_ctx->prot_mr); | ||
484 | err_prot_frpl: | ||
485 | ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl); | ||
486 | err_pi_ctx: | ||
487 | kfree(desc->pi_ctx); | ||
488 | |||
489 | return ret; | ||
490 | } | ||
491 | |||
492 | static int | ||
433 | isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, | 493 | isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, |
434 | struct fast_reg_descriptor *fr_desc, u8 protection) | 494 | struct fast_reg_descriptor *fr_desc) |
435 | { | 495 | { |
436 | int ret; | 496 | int ret; |
437 | 497 | ||
438 | fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, | 498 | fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, |
439 | ISCSI_ISER_SG_TABLESIZE); | 499 | ISCSI_ISER_SG_TABLESIZE); |
440 | if (IS_ERR(fr_desc->data_frpl)) { | 500 | if (IS_ERR(fr_desc->data_frpl)) { |
441 | pr_err("Failed to allocate data frpl err=%ld\n", | 501 | isert_err("Failed to allocate data frpl err=%ld\n", |
442 | PTR_ERR(fr_desc->data_frpl)); | 502 | PTR_ERR(fr_desc->data_frpl)); |
443 | return PTR_ERR(fr_desc->data_frpl); | 503 | return PTR_ERR(fr_desc->data_frpl); |
444 | } | 504 | } |
445 | 505 | ||
446 | fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); | 506 | fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); |
447 | if (IS_ERR(fr_desc->data_mr)) { | 507 | if (IS_ERR(fr_desc->data_mr)) { |
448 | pr_err("Failed to allocate data frmr err=%ld\n", | 508 | isert_err("Failed to allocate data frmr err=%ld\n", |
449 | PTR_ERR(fr_desc->data_mr)); | 509 | PTR_ERR(fr_desc->data_mr)); |
450 | ret = PTR_ERR(fr_desc->data_mr); | 510 | ret = PTR_ERR(fr_desc->data_mr); |
451 | goto err_data_frpl; | 511 | goto err_data_frpl; |
452 | } | 512 | } |
453 | pr_debug("Create fr_desc %p page_list %p\n", | ||
454 | fr_desc, fr_desc->data_frpl->page_list); | ||
455 | fr_desc->ind |= ISERT_DATA_KEY_VALID; | 513 | fr_desc->ind |= ISERT_DATA_KEY_VALID; |
456 | 514 | ||
457 | if (protection) { | 515 | isert_dbg("Created fr_desc %p\n", fr_desc); |
458 | struct ib_mr_init_attr mr_init_attr = {0}; | ||
459 | struct pi_context *pi_ctx; | ||
460 | |||
461 | fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL); | ||
462 | if (!fr_desc->pi_ctx) { | ||
463 | pr_err("Failed to allocate pi context\n"); | ||
464 | ret = -ENOMEM; | ||
465 | goto err_data_mr; | ||
466 | } | ||
467 | pi_ctx = fr_desc->pi_ctx; | ||
468 | |||
469 | pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device, | ||
470 | ISCSI_ISER_SG_TABLESIZE); | ||
471 | if (IS_ERR(pi_ctx->prot_frpl)) { | ||
472 | pr_err("Failed to allocate prot frpl err=%ld\n", | ||
473 | PTR_ERR(pi_ctx->prot_frpl)); | ||
474 | ret = PTR_ERR(pi_ctx->prot_frpl); | ||
475 | goto err_pi_ctx; | ||
476 | } | ||
477 | |||
478 | pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); | ||
479 | if (IS_ERR(pi_ctx->prot_mr)) { | ||
480 | pr_err("Failed to allocate prot frmr err=%ld\n", | ||
481 | PTR_ERR(pi_ctx->prot_mr)); | ||
482 | ret = PTR_ERR(pi_ctx->prot_mr); | ||
483 | goto err_prot_frpl; | ||
484 | } | ||
485 | fr_desc->ind |= ISERT_PROT_KEY_VALID; | ||
486 | |||
487 | mr_init_attr.max_reg_descriptors = 2; | ||
488 | mr_init_attr.flags |= IB_MR_SIGNATURE_EN; | ||
489 | pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr); | ||
490 | if (IS_ERR(pi_ctx->sig_mr)) { | ||
491 | pr_err("Failed to allocate signature enabled mr err=%ld\n", | ||
492 | PTR_ERR(pi_ctx->sig_mr)); | ||
493 | ret = PTR_ERR(pi_ctx->sig_mr); | ||
494 | goto err_prot_mr; | ||
495 | } | ||
496 | fr_desc->ind |= ISERT_SIG_KEY_VALID; | ||
497 | } | ||
498 | fr_desc->ind &= ~ISERT_PROTECTED; | ||
499 | 516 | ||
500 | return 0; | 517 | return 0; |
501 | err_prot_mr: | 518 | |
502 | ib_dereg_mr(fr_desc->pi_ctx->prot_mr); | ||
503 | err_prot_frpl: | ||
504 | ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl); | ||
505 | err_pi_ctx: | ||
506 | kfree(fr_desc->pi_ctx); | ||
507 | err_data_mr: | ||
508 | ib_dereg_mr(fr_desc->data_mr); | ||
509 | err_data_frpl: | 519 | err_data_frpl: |
510 | ib_free_fast_reg_page_list(fr_desc->data_frpl); | 520 | ib_free_fast_reg_page_list(fr_desc->data_frpl); |
511 | 521 | ||
@@ -513,7 +523,7 @@ err_data_frpl: | |||
513 | } | 523 | } |
514 | 524 | ||
515 | static int | 525 | static int |
516 | isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support) | 526 | isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) |
517 | { | 527 | { |
518 | struct fast_reg_descriptor *fr_desc; | 528 | struct fast_reg_descriptor *fr_desc; |
519 | struct isert_device *device = isert_conn->conn_device; | 529 | struct isert_device *device = isert_conn->conn_device; |
@@ -531,16 +541,15 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support) | |||
531 | for (i = 0; i < tag_num; i++) { | 541 | for (i = 0; i < tag_num; i++) { |
532 | fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); | 542 | fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); |
533 | if (!fr_desc) { | 543 | if (!fr_desc) { |
534 | pr_err("Failed to allocate fast_reg descriptor\n"); | 544 | isert_err("Failed to allocate fast_reg descriptor\n"); |
535 | ret = -ENOMEM; | 545 | ret = -ENOMEM; |
536 | goto err; | 546 | goto err; |
537 | } | 547 | } |
538 | 548 | ||
539 | ret = isert_create_fr_desc(device->ib_device, | 549 | ret = isert_create_fr_desc(device->ib_device, |
540 | isert_conn->conn_pd, fr_desc, | 550 | isert_conn->conn_pd, fr_desc); |
541 | pi_support); | ||
542 | if (ret) { | 551 | if (ret) { |
543 | pr_err("Failed to create fastreg descriptor err=%d\n", | 552 | isert_err("Failed to create fastreg descriptor err=%d\n", |
544 | ret); | 553 | ret); |
545 | kfree(fr_desc); | 554 | kfree(fr_desc); |
546 | goto err; | 555 | goto err; |
@@ -550,7 +559,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support) | |||
550 | isert_conn->conn_fr_pool_size++; | 559 | isert_conn->conn_fr_pool_size++; |
551 | } | 560 | } |
552 | 561 | ||
553 | pr_debug("Creating conn %p fastreg pool size=%d", | 562 | isert_dbg("Creating conn %p fastreg pool size=%d", |
554 | isert_conn, isert_conn->conn_fr_pool_size); | 563 | isert_conn, isert_conn->conn_fr_pool_size); |
555 | 564 | ||
556 | return 0; | 565 | return 0; |
@@ -563,47 +572,45 @@ err: | |||
563 | static int | 572 | static int |
564 | isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | 573 | isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) |
565 | { | 574 | { |
566 | struct iscsi_np *np = cma_id->context; | 575 | struct isert_np *isert_np = cma_id->context; |
567 | struct isert_np *isert_np = np->np_context; | 576 | struct iscsi_np *np = isert_np->np; |
568 | struct isert_conn *isert_conn; | 577 | struct isert_conn *isert_conn; |
569 | struct isert_device *device; | 578 | struct isert_device *device; |
570 | struct ib_device *ib_dev = cma_id->device; | 579 | struct ib_device *ib_dev = cma_id->device; |
571 | int ret = 0; | 580 | int ret = 0; |
572 | u8 pi_support; | ||
573 | 581 | ||
574 | spin_lock_bh(&np->np_thread_lock); | 582 | spin_lock_bh(&np->np_thread_lock); |
575 | if (!np->enabled) { | 583 | if (!np->enabled) { |
576 | spin_unlock_bh(&np->np_thread_lock); | 584 | spin_unlock_bh(&np->np_thread_lock); |
577 | pr_debug("iscsi_np is not enabled, reject connect request\n"); | 585 | isert_dbg("iscsi_np is not enabled, reject connect request\n"); |
578 | return rdma_reject(cma_id, NULL, 0); | 586 | return rdma_reject(cma_id, NULL, 0); |
579 | } | 587 | } |
580 | spin_unlock_bh(&np->np_thread_lock); | 588 | spin_unlock_bh(&np->np_thread_lock); |
581 | 589 | ||
582 | pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", | 590 | isert_dbg("cma_id: %p, portal: %p\n", |
583 | cma_id, cma_id->context); | 591 | cma_id, cma_id->context); |
584 | 592 | ||
585 | isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); | 593 | isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); |
586 | if (!isert_conn) { | 594 | if (!isert_conn) { |
587 | pr_err("Unable to allocate isert_conn\n"); | 595 | isert_err("Unable to allocate isert_conn\n"); |
588 | return -ENOMEM; | 596 | return -ENOMEM; |
589 | } | 597 | } |
590 | isert_conn->state = ISER_CONN_INIT; | 598 | isert_conn->state = ISER_CONN_INIT; |
591 | INIT_LIST_HEAD(&isert_conn->conn_accept_node); | 599 | INIT_LIST_HEAD(&isert_conn->conn_accept_node); |
592 | init_completion(&isert_conn->conn_login_comp); | 600 | init_completion(&isert_conn->conn_login_comp); |
601 | init_completion(&isert_conn->login_req_comp); | ||
593 | init_completion(&isert_conn->conn_wait); | 602 | init_completion(&isert_conn->conn_wait); |
594 | init_completion(&isert_conn->conn_wait_comp_err); | ||
595 | kref_init(&isert_conn->conn_kref); | 603 | kref_init(&isert_conn->conn_kref); |
596 | mutex_init(&isert_conn->conn_mutex); | 604 | mutex_init(&isert_conn->conn_mutex); |
597 | spin_lock_init(&isert_conn->conn_lock); | 605 | spin_lock_init(&isert_conn->conn_lock); |
598 | INIT_LIST_HEAD(&isert_conn->conn_fr_pool); | 606 | INIT_LIST_HEAD(&isert_conn->conn_fr_pool); |
599 | 607 | ||
600 | cma_id->context = isert_conn; | ||
601 | isert_conn->conn_cm_id = cma_id; | 608 | isert_conn->conn_cm_id = cma_id; |
602 | 609 | ||
603 | isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + | 610 | isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + |
604 | ISER_RX_LOGIN_SIZE, GFP_KERNEL); | 611 | ISER_RX_LOGIN_SIZE, GFP_KERNEL); |
605 | if (!isert_conn->login_buf) { | 612 | if (!isert_conn->login_buf) { |
606 | pr_err("Unable to allocate isert_conn->login_buf\n"); | 613 | isert_err("Unable to allocate isert_conn->login_buf\n"); |
607 | ret = -ENOMEM; | 614 | ret = -ENOMEM; |
608 | goto out; | 615 | goto out; |
609 | } | 616 | } |
@@ -611,7 +618,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
611 | isert_conn->login_req_buf = isert_conn->login_buf; | 618 | isert_conn->login_req_buf = isert_conn->login_buf; |
612 | isert_conn->login_rsp_buf = isert_conn->login_buf + | 619 | isert_conn->login_rsp_buf = isert_conn->login_buf + |
613 | ISCSI_DEF_MAX_RECV_SEG_LEN; | 620 | ISCSI_DEF_MAX_RECV_SEG_LEN; |
614 | pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n", | 621 | isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n", |
615 | isert_conn->login_buf, isert_conn->login_req_buf, | 622 | isert_conn->login_buf, isert_conn->login_req_buf, |
616 | isert_conn->login_rsp_buf); | 623 | isert_conn->login_rsp_buf); |
617 | 624 | ||
@@ -621,7 +628,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
621 | 628 | ||
622 | ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); | 629 | ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); |
623 | if (ret) { | 630 | if (ret) { |
624 | pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n", | 631 | isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n", |
625 | ret); | 632 | ret); |
626 | isert_conn->login_req_dma = 0; | 633 | isert_conn->login_req_dma = 0; |
627 | goto out_login_buf; | 634 | goto out_login_buf; |
@@ -633,7 +640,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
633 | 640 | ||
634 | ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); | 641 | ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); |
635 | if (ret) { | 642 | if (ret) { |
636 | pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n", | 643 | isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n", |
637 | ret); | 644 | ret); |
638 | isert_conn->login_rsp_dma = 0; | 645 | isert_conn->login_rsp_dma = 0; |
639 | goto out_req_dma_map; | 646 | goto out_req_dma_map; |
@@ -649,13 +656,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
649 | isert_conn->initiator_depth = min_t(u8, | 656 | isert_conn->initiator_depth = min_t(u8, |
650 | event->param.conn.initiator_depth, | 657 | event->param.conn.initiator_depth, |
651 | device->dev_attr.max_qp_init_rd_atom); | 658 | device->dev_attr.max_qp_init_rd_atom); |
652 | pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth); | 659 | isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth); |
653 | 660 | ||
654 | isert_conn->conn_device = device; | 661 | isert_conn->conn_device = device; |
655 | isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device); | 662 | isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device); |
656 | if (IS_ERR(isert_conn->conn_pd)) { | 663 | if (IS_ERR(isert_conn->conn_pd)) { |
657 | ret = PTR_ERR(isert_conn->conn_pd); | 664 | ret = PTR_ERR(isert_conn->conn_pd); |
658 | pr_err("ib_alloc_pd failed for conn %p: ret=%d\n", | 665 | isert_err("ib_alloc_pd failed for conn %p: ret=%d\n", |
659 | isert_conn, ret); | 666 | isert_conn, ret); |
660 | goto out_pd; | 667 | goto out_pd; |
661 | } | 668 | } |
@@ -664,20 +671,20 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
664 | IB_ACCESS_LOCAL_WRITE); | 671 | IB_ACCESS_LOCAL_WRITE); |
665 | if (IS_ERR(isert_conn->conn_mr)) { | 672 | if (IS_ERR(isert_conn->conn_mr)) { |
666 | ret = PTR_ERR(isert_conn->conn_mr); | 673 | ret = PTR_ERR(isert_conn->conn_mr); |
667 | pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n", | 674 | isert_err("ib_get_dma_mr failed for conn %p: ret=%d\n", |
668 | isert_conn, ret); | 675 | isert_conn, ret); |
669 | goto out_mr; | 676 | goto out_mr; |
670 | } | 677 | } |
671 | 678 | ||
672 | pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; | 679 | ret = isert_conn_setup_qp(isert_conn, cma_id); |
673 | if (pi_support && !device->pi_capable) { | 680 | if (ret) |
674 | pr_err("Protection information requested but not supported, " | 681 | goto out_conn_dev; |
675 | "rejecting connect request\n"); | ||
676 | ret = rdma_reject(cma_id, NULL, 0); | ||
677 | goto out_mr; | ||
678 | } | ||
679 | 682 | ||
680 | ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support); | 683 | ret = isert_rdma_post_recvl(isert_conn); |
684 | if (ret) | ||
685 | goto out_conn_dev; | ||
686 | |||
687 | ret = isert_rdma_accept(isert_conn); | ||
681 | if (ret) | 688 | if (ret) |
682 | goto out_conn_dev; | 689 | goto out_conn_dev; |
683 | 690 | ||
@@ -685,7 +692,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
685 | list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list); | 692 | list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list); |
686 | mutex_unlock(&isert_np->np_accept_mutex); | 693 | mutex_unlock(&isert_np->np_accept_mutex); |
687 | 694 | ||
688 | pr_debug("isert_connect_request() up np_sem np: %p\n", np); | 695 | isert_info("np %p: Allow accept_np to continue\n", np); |
689 | up(&isert_np->np_sem); | 696 | up(&isert_np->np_sem); |
690 | return 0; | 697 | return 0; |
691 | 698 | ||
@@ -705,6 +712,7 @@ out_login_buf: | |||
705 | kfree(isert_conn->login_buf); | 712 | kfree(isert_conn->login_buf); |
706 | out: | 713 | out: |
707 | kfree(isert_conn); | 714 | kfree(isert_conn); |
715 | rdma_reject(cma_id, NULL, 0); | ||
708 | return ret; | 716 | return ret; |
709 | } | 717 | } |
710 | 718 | ||
@@ -713,24 +721,25 @@ isert_connect_release(struct isert_conn *isert_conn) | |||
713 | { | 721 | { |
714 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 722 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
715 | struct isert_device *device = isert_conn->conn_device; | 723 | struct isert_device *device = isert_conn->conn_device; |
716 | int cq_index; | ||
717 | 724 | ||
718 | pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); | 725 | isert_dbg("conn %p\n", isert_conn); |
719 | 726 | ||
720 | if (device && device->use_fastreg) | 727 | if (device && device->use_fastreg) |
721 | isert_conn_free_fastreg_pool(isert_conn); | 728 | isert_conn_free_fastreg_pool(isert_conn); |
722 | 729 | ||
730 | isert_free_rx_descriptors(isert_conn); | ||
731 | rdma_destroy_id(isert_conn->conn_cm_id); | ||
732 | |||
723 | if (isert_conn->conn_qp) { | 733 | if (isert_conn->conn_qp) { |
724 | cq_index = ((struct isert_cq_desc *) | 734 | struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context; |
725 | isert_conn->conn_qp->recv_cq->cq_context)->cq_index; | ||
726 | pr_debug("isert_connect_release: cq_index: %d\n", cq_index); | ||
727 | isert_conn->conn_device->cq_active_qps[cq_index]--; | ||
728 | 735 | ||
729 | rdma_destroy_qp(isert_conn->conn_cm_id); | 736 | isert_dbg("dec completion context %p active_qps\n", comp); |
730 | } | 737 | mutex_lock(&device_list_mutex); |
738 | comp->active_qps--; | ||
739 | mutex_unlock(&device_list_mutex); | ||
731 | 740 | ||
732 | isert_free_rx_descriptors(isert_conn); | 741 | ib_destroy_qp(isert_conn->conn_qp); |
733 | rdma_destroy_id(isert_conn->conn_cm_id); | 742 | } |
734 | 743 | ||
735 | ib_dereg_mr(isert_conn->conn_mr); | 744 | ib_dereg_mr(isert_conn->conn_mr); |
736 | ib_dealloc_pd(isert_conn->conn_pd); | 745 | ib_dealloc_pd(isert_conn->conn_pd); |
@@ -747,16 +756,24 @@ isert_connect_release(struct isert_conn *isert_conn) | |||
747 | 756 | ||
748 | if (device) | 757 | if (device) |
749 | isert_device_try_release(device); | 758 | isert_device_try_release(device); |
750 | |||
751 | pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n"); | ||
752 | } | 759 | } |
753 | 760 | ||
754 | static void | 761 | static void |
755 | isert_connected_handler(struct rdma_cm_id *cma_id) | 762 | isert_connected_handler(struct rdma_cm_id *cma_id) |
756 | { | 763 | { |
757 | struct isert_conn *isert_conn = cma_id->context; | 764 | struct isert_conn *isert_conn = cma_id->qp->qp_context; |
758 | 765 | ||
759 | kref_get(&isert_conn->conn_kref); | 766 | isert_info("conn %p\n", isert_conn); |
767 | |||
768 | if (!kref_get_unless_zero(&isert_conn->conn_kref)) { | ||
769 | isert_warn("conn %p connect_release is running\n", isert_conn); | ||
770 | return; | ||
771 | } | ||
772 | |||
773 | mutex_lock(&isert_conn->conn_mutex); | ||
774 | if (isert_conn->state != ISER_CONN_FULL_FEATURE) | ||
775 | isert_conn->state = ISER_CONN_UP; | ||
776 | mutex_unlock(&isert_conn->conn_mutex); | ||
760 | } | 777 | } |
761 | 778 | ||
762 | static void | 779 | static void |
@@ -765,8 +782,8 @@ isert_release_conn_kref(struct kref *kref) | |||
765 | struct isert_conn *isert_conn = container_of(kref, | 782 | struct isert_conn *isert_conn = container_of(kref, |
766 | struct isert_conn, conn_kref); | 783 | struct isert_conn, conn_kref); |
767 | 784 | ||
768 | pr_debug("Calling isert_connect_release for final kref %s/%d\n", | 785 | isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm, |
769 | current->comm, current->pid); | 786 | current->pid); |
770 | 787 | ||
771 | isert_connect_release(isert_conn); | 788 | isert_connect_release(isert_conn); |
772 | } | 789 | } |
@@ -777,75 +794,111 @@ isert_put_conn(struct isert_conn *isert_conn) | |||
777 | kref_put(&isert_conn->conn_kref, isert_release_conn_kref); | 794 | kref_put(&isert_conn->conn_kref, isert_release_conn_kref); |
778 | } | 795 | } |
779 | 796 | ||
797 | /** | ||
798 | * isert_conn_terminate() - Initiate connection termination | ||
799 | * @isert_conn: isert connection struct | ||
800 | * | ||
801 | * Notes: | ||
802 | * In case the connection state is FULL_FEATURE, move state | ||
803 | * to TEMINATING and start teardown sequence (rdma_disconnect). | ||
804 | * In case the connection state is UP, complete flush as well. | ||
805 | * | ||
806 | * This routine must be called with conn_mutex held. Thus it is | ||
807 | * safe to call multiple times. | ||
808 | */ | ||
780 | static void | 809 | static void |
781 | isert_disconnect_work(struct work_struct *work) | 810 | isert_conn_terminate(struct isert_conn *isert_conn) |
782 | { | 811 | { |
783 | struct isert_conn *isert_conn = container_of(work, | 812 | int err; |
784 | struct isert_conn, conn_logout_work); | ||
785 | 813 | ||
786 | pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); | 814 | switch (isert_conn->state) { |
787 | mutex_lock(&isert_conn->conn_mutex); | 815 | case ISER_CONN_TERMINATING: |
788 | if (isert_conn->state == ISER_CONN_UP) | 816 | break; |
817 | case ISER_CONN_UP: | ||
818 | case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ | ||
819 | isert_info("Terminating conn %p state %d\n", | ||
820 | isert_conn, isert_conn->state); | ||
789 | isert_conn->state = ISER_CONN_TERMINATING; | 821 | isert_conn->state = ISER_CONN_TERMINATING; |
790 | 822 | err = rdma_disconnect(isert_conn->conn_cm_id); | |
791 | if (isert_conn->post_recv_buf_count == 0 && | 823 | if (err) |
792 | atomic_read(&isert_conn->post_send_buf_count) == 0) { | 824 | isert_warn("Failed rdma_disconnect isert_conn %p\n", |
793 | mutex_unlock(&isert_conn->conn_mutex); | 825 | isert_conn); |
794 | goto wake_up; | 826 | break; |
795 | } | 827 | default: |
796 | if (!isert_conn->conn_cm_id) { | 828 | isert_warn("conn %p teminating in state %d\n", |
797 | mutex_unlock(&isert_conn->conn_mutex); | 829 | isert_conn, isert_conn->state); |
798 | isert_put_conn(isert_conn); | ||
799 | return; | ||
800 | } | 830 | } |
831 | } | ||
801 | 832 | ||
802 | if (isert_conn->disconnect) { | 833 | static int |
803 | /* Send DREQ/DREP towards our initiator */ | 834 | isert_np_cma_handler(struct isert_np *isert_np, |
804 | rdma_disconnect(isert_conn->conn_cm_id); | 835 | enum rdma_cm_event_type event) |
805 | } | 836 | { |
837 | isert_dbg("isert np %p, handling event %d\n", isert_np, event); | ||
806 | 838 | ||
807 | mutex_unlock(&isert_conn->conn_mutex); | 839 | switch (event) { |
840 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | ||
841 | isert_np->np_cm_id = NULL; | ||
842 | break; | ||
843 | case RDMA_CM_EVENT_ADDR_CHANGE: | ||
844 | isert_np->np_cm_id = isert_setup_id(isert_np); | ||
845 | if (IS_ERR(isert_np->np_cm_id)) { | ||
846 | isert_err("isert np %p setup id failed: %ld\n", | ||
847 | isert_np, PTR_ERR(isert_np->np_cm_id)); | ||
848 | isert_np->np_cm_id = NULL; | ||
849 | } | ||
850 | break; | ||
851 | default: | ||
852 | isert_err("isert np %p Unexpected event %d\n", | ||
853 | isert_np, event); | ||
854 | } | ||
808 | 855 | ||
809 | wake_up: | 856 | return -1; |
810 | complete(&isert_conn->conn_wait); | ||
811 | } | 857 | } |
812 | 858 | ||
813 | static int | 859 | static int |
814 | isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) | 860 | isert_disconnected_handler(struct rdma_cm_id *cma_id, |
861 | enum rdma_cm_event_type event) | ||
815 | { | 862 | { |
863 | struct isert_np *isert_np = cma_id->context; | ||
816 | struct isert_conn *isert_conn; | 864 | struct isert_conn *isert_conn; |
817 | 865 | ||
818 | if (!cma_id->qp) { | 866 | if (isert_np->np_cm_id == cma_id) |
819 | struct isert_np *isert_np = cma_id->context; | 867 | return isert_np_cma_handler(cma_id->context, event); |
820 | 868 | ||
821 | isert_np->np_cm_id = NULL; | 869 | isert_conn = cma_id->qp->qp_context; |
822 | return -1; | ||
823 | } | ||
824 | 870 | ||
825 | isert_conn = (struct isert_conn *)cma_id->context; | 871 | mutex_lock(&isert_conn->conn_mutex); |
872 | isert_conn_terminate(isert_conn); | ||
873 | mutex_unlock(&isert_conn->conn_mutex); | ||
826 | 874 | ||
827 | isert_conn->disconnect = disconnect; | 875 | isert_info("conn %p completing conn_wait\n", isert_conn); |
828 | INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); | 876 | complete(&isert_conn->conn_wait); |
829 | schedule_work(&isert_conn->conn_logout_work); | ||
830 | 877 | ||
831 | return 0; | 878 | return 0; |
832 | } | 879 | } |
833 | 880 | ||
881 | static void | ||
882 | isert_connect_error(struct rdma_cm_id *cma_id) | ||
883 | { | ||
884 | struct isert_conn *isert_conn = cma_id->qp->qp_context; | ||
885 | |||
886 | isert_put_conn(isert_conn); | ||
887 | } | ||
888 | |||
834 | static int | 889 | static int |
835 | isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | 890 | isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) |
836 | { | 891 | { |
837 | int ret = 0; | 892 | int ret = 0; |
838 | bool disconnect = false; | ||
839 | 893 | ||
840 | pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n", | 894 | isert_info("event %d status %d id %p np %p\n", event->event, |
841 | event->event, event->status, cma_id->context, cma_id); | 895 | event->status, cma_id, cma_id->context); |
842 | 896 | ||
843 | switch (event->event) { | 897 | switch (event->event) { |
844 | case RDMA_CM_EVENT_CONNECT_REQUEST: | 898 | case RDMA_CM_EVENT_CONNECT_REQUEST: |
845 | ret = isert_connect_request(cma_id, event); | 899 | ret = isert_connect_request(cma_id, event); |
846 | if (ret) | 900 | if (ret) |
847 | pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", | 901 | isert_err("failed handle connect request %d\n", ret); |
848 | event->event, ret); | ||
849 | break; | 902 | break; |
850 | case RDMA_CM_EVENT_ESTABLISHED: | 903 | case RDMA_CM_EVENT_ESTABLISHED: |
851 | isert_connected_handler(cma_id); | 904 | isert_connected_handler(cma_id); |
@@ -853,13 +906,16 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
853 | case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ | 906 | case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ |
854 | case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ | 907 | case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ |
855 | case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ | 908 | case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ |
856 | disconnect = true; | ||
857 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ | 909 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ |
858 | ret = isert_disconnected_handler(cma_id, disconnect); | 910 | ret = isert_disconnected_handler(cma_id, event->event); |
859 | break; | 911 | break; |
912 | case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ | ||
913 | case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ | ||
860 | case RDMA_CM_EVENT_CONNECT_ERROR: | 914 | case RDMA_CM_EVENT_CONNECT_ERROR: |
915 | isert_connect_error(cma_id); | ||
916 | break; | ||
861 | default: | 917 | default: |
862 | pr_err("Unhandled RDMA CMA event: %d\n", event->event); | 918 | isert_err("Unhandled RDMA CMA event: %d\n", event->event); |
863 | break; | 919 | break; |
864 | } | 920 | } |
865 | 921 | ||
@@ -876,7 +932,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count) | |||
876 | 932 | ||
877 | for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) { | 933 | for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) { |
878 | rx_desc = &isert_conn->conn_rx_descs[rx_head]; | 934 | rx_desc = &isert_conn->conn_rx_descs[rx_head]; |
879 | rx_wr->wr_id = (unsigned long)rx_desc; | 935 | rx_wr->wr_id = (uintptr_t)rx_desc; |
880 | rx_wr->sg_list = &rx_desc->rx_sg; | 936 | rx_wr->sg_list = &rx_desc->rx_sg; |
881 | rx_wr->num_sge = 1; | 937 | rx_wr->num_sge = 1; |
882 | rx_wr->next = rx_wr + 1; | 938 | rx_wr->next = rx_wr + 1; |
@@ -890,10 +946,10 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count) | |||
890 | ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr, | 946 | ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr, |
891 | &rx_wr_failed); | 947 | &rx_wr_failed); |
892 | if (ret) { | 948 | if (ret) { |
893 | pr_err("ib_post_recv() failed with ret: %d\n", ret); | 949 | isert_err("ib_post_recv() failed with ret: %d\n", ret); |
894 | isert_conn->post_recv_buf_count -= count; | 950 | isert_conn->post_recv_buf_count -= count; |
895 | } else { | 951 | } else { |
896 | pr_debug("isert_post_recv(): Posted %d RX buffers\n", count); | 952 | isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count); |
897 | isert_conn->conn_rx_desc_head = rx_head; | 953 | isert_conn->conn_rx_desc_head = rx_head; |
898 | } | 954 | } |
899 | return ret; | 955 | return ret; |
@@ -910,19 +966,15 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) | |||
910 | ISER_HEADERS_LEN, DMA_TO_DEVICE); | 966 | ISER_HEADERS_LEN, DMA_TO_DEVICE); |
911 | 967 | ||
912 | send_wr.next = NULL; | 968 | send_wr.next = NULL; |
913 | send_wr.wr_id = (unsigned long)tx_desc; | 969 | send_wr.wr_id = (uintptr_t)tx_desc; |
914 | send_wr.sg_list = tx_desc->tx_sg; | 970 | send_wr.sg_list = tx_desc->tx_sg; |
915 | send_wr.num_sge = tx_desc->num_sge; | 971 | send_wr.num_sge = tx_desc->num_sge; |
916 | send_wr.opcode = IB_WR_SEND; | 972 | send_wr.opcode = IB_WR_SEND; |
917 | send_wr.send_flags = IB_SEND_SIGNALED; | 973 | send_wr.send_flags = IB_SEND_SIGNALED; |
918 | 974 | ||
919 | atomic_inc(&isert_conn->post_send_buf_count); | ||
920 | |||
921 | ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed); | 975 | ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed); |
922 | if (ret) { | 976 | if (ret) |
923 | pr_err("ib_post_send() failed, ret: %d\n", ret); | 977 | isert_err("ib_post_send() failed, ret: %d\n", ret); |
924 | atomic_dec(&isert_conn->post_send_buf_count); | ||
925 | } | ||
926 | 978 | ||
927 | return ret; | 979 | return ret; |
928 | } | 980 | } |
@@ -945,7 +997,7 @@ isert_create_send_desc(struct isert_conn *isert_conn, | |||
945 | 997 | ||
946 | if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) { | 998 | if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) { |
947 | tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; | 999 | tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; |
948 | pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc); | 1000 | isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc); |
949 | } | 1001 | } |
950 | } | 1002 | } |
951 | 1003 | ||
@@ -959,7 +1011,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn, | |||
959 | dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, | 1011 | dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, |
960 | ISER_HEADERS_LEN, DMA_TO_DEVICE); | 1012 | ISER_HEADERS_LEN, DMA_TO_DEVICE); |
961 | if (ib_dma_mapping_error(ib_dev, dma_addr)) { | 1013 | if (ib_dma_mapping_error(ib_dev, dma_addr)) { |
962 | pr_err("ib_dma_mapping_error() failed\n"); | 1014 | isert_err("ib_dma_mapping_error() failed\n"); |
963 | return -ENOMEM; | 1015 | return -ENOMEM; |
964 | } | 1016 | } |
965 | 1017 | ||
@@ -968,40 +1020,24 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn, | |||
968 | tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; | 1020 | tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; |
969 | tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; | 1021 | tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; |
970 | 1022 | ||
971 | pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u" | 1023 | isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n", |
972 | " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr, | 1024 | tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, |
973 | tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey); | 1025 | tx_desc->tx_sg[0].lkey); |
974 | 1026 | ||
975 | return 0; | 1027 | return 0; |
976 | } | 1028 | } |
977 | 1029 | ||
978 | static void | 1030 | static void |
979 | isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, | 1031 | isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, |
980 | struct ib_send_wr *send_wr, bool coalesce) | 1032 | struct ib_send_wr *send_wr) |
981 | { | 1033 | { |
982 | struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; | 1034 | struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; |
983 | 1035 | ||
984 | isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND; | 1036 | isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND; |
985 | send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; | 1037 | send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; |
986 | send_wr->opcode = IB_WR_SEND; | 1038 | send_wr->opcode = IB_WR_SEND; |
987 | send_wr->sg_list = &tx_desc->tx_sg[0]; | 1039 | send_wr->sg_list = &tx_desc->tx_sg[0]; |
988 | send_wr->num_sge = isert_cmd->tx_desc.num_sge; | 1040 | send_wr->num_sge = isert_cmd->tx_desc.num_sge; |
989 | /* | ||
990 | * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED | ||
991 | * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. | ||
992 | */ | ||
993 | mutex_lock(&isert_conn->conn_mutex); | ||
994 | if (coalesce && isert_conn->state == ISER_CONN_UP && | ||
995 | ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { | ||
996 | tx_desc->llnode_active = true; | ||
997 | llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); | ||
998 | mutex_unlock(&isert_conn->conn_mutex); | ||
999 | return; | ||
1000 | } | ||
1001 | isert_conn->conn_comp_batch = 0; | ||
1002 | tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist); | ||
1003 | mutex_unlock(&isert_conn->conn_mutex); | ||
1004 | |||
1005 | send_wr->send_flags = IB_SEND_SIGNALED; | 1041 | send_wr->send_flags = IB_SEND_SIGNALED; |
1006 | } | 1042 | } |
1007 | 1043 | ||
@@ -1017,22 +1053,21 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn) | |||
1017 | sge.length = ISER_RX_LOGIN_SIZE; | 1053 | sge.length = ISER_RX_LOGIN_SIZE; |
1018 | sge.lkey = isert_conn->conn_mr->lkey; | 1054 | sge.lkey = isert_conn->conn_mr->lkey; |
1019 | 1055 | ||
1020 | pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n", | 1056 | isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", |
1021 | sge.addr, sge.length, sge.lkey); | 1057 | sge.addr, sge.length, sge.lkey); |
1022 | 1058 | ||
1023 | memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); | 1059 | memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); |
1024 | rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf; | 1060 | rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf; |
1025 | rx_wr.sg_list = &sge; | 1061 | rx_wr.sg_list = &sge; |
1026 | rx_wr.num_sge = 1; | 1062 | rx_wr.num_sge = 1; |
1027 | 1063 | ||
1028 | isert_conn->post_recv_buf_count++; | 1064 | isert_conn->post_recv_buf_count++; |
1029 | ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail); | 1065 | ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail); |
1030 | if (ret) { | 1066 | if (ret) { |
1031 | pr_err("ib_post_recv() failed: %d\n", ret); | 1067 | isert_err("ib_post_recv() failed: %d\n", ret); |
1032 | isert_conn->post_recv_buf_count--; | 1068 | isert_conn->post_recv_buf_count--; |
1033 | } | 1069 | } |
1034 | 1070 | ||
1035 | pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n"); | ||
1036 | return ret; | 1071 | return ret; |
1037 | } | 1072 | } |
1038 | 1073 | ||
@@ -1072,13 +1107,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, | |||
1072 | if (login->login_complete) { | 1107 | if (login->login_complete) { |
1073 | if (!conn->sess->sess_ops->SessionType && | 1108 | if (!conn->sess->sess_ops->SessionType && |
1074 | isert_conn->conn_device->use_fastreg) { | 1109 | isert_conn->conn_device->use_fastreg) { |
1075 | /* Normal Session and fastreg is used */ | 1110 | ret = isert_conn_create_fastreg_pool(isert_conn); |
1076 | u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi; | ||
1077 | |||
1078 | ret = isert_conn_create_fastreg_pool(isert_conn, | ||
1079 | pi_support); | ||
1080 | if (ret) { | 1111 | if (ret) { |
1081 | pr_err("Conn: %p failed to create" | 1112 | isert_err("Conn: %p failed to create" |
1082 | " fastreg pool\n", isert_conn); | 1113 | " fastreg pool\n", isert_conn); |
1083 | return ret; | 1114 | return ret; |
1084 | } | 1115 | } |
@@ -1092,7 +1123,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, | |||
1092 | if (ret) | 1123 | if (ret) |
1093 | return ret; | 1124 | return ret; |
1094 | 1125 | ||
1095 | isert_conn->state = ISER_CONN_UP; | 1126 | /* Now we are in FULL_FEATURE phase */ |
1127 | mutex_lock(&isert_conn->conn_mutex); | ||
1128 | isert_conn->state = ISER_CONN_FULL_FEATURE; | ||
1129 | mutex_unlock(&isert_conn->conn_mutex); | ||
1096 | goto post_send; | 1130 | goto post_send; |
1097 | } | 1131 | } |
1098 | 1132 | ||
@@ -1109,18 +1143,17 @@ post_send: | |||
1109 | } | 1143 | } |
1110 | 1144 | ||
1111 | static void | 1145 | static void |
1112 | isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen, | 1146 | isert_rx_login_req(struct isert_conn *isert_conn) |
1113 | struct isert_conn *isert_conn) | ||
1114 | { | 1147 | { |
1148 | struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf; | ||
1149 | int rx_buflen = isert_conn->login_req_len; | ||
1115 | struct iscsi_conn *conn = isert_conn->conn; | 1150 | struct iscsi_conn *conn = isert_conn->conn; |
1116 | struct iscsi_login *login = conn->conn_login; | 1151 | struct iscsi_login *login = conn->conn_login; |
1117 | int size; | 1152 | int size; |
1118 | 1153 | ||
1119 | if (!login) { | 1154 | isert_info("conn %p\n", isert_conn); |
1120 | pr_err("conn->conn_login is NULL\n"); | 1155 | |
1121 | dump_stack(); | 1156 | WARN_ON_ONCE(!login); |
1122 | return; | ||
1123 | } | ||
1124 | 1157 | ||
1125 | if (login->first_request) { | 1158 | if (login->first_request) { |
1126 | struct iscsi_login_req *login_req = | 1159 | struct iscsi_login_req *login_req = |
@@ -1146,8 +1179,9 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen, | |||
1146 | memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); | 1179 | memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); |
1147 | 1180 | ||
1148 | size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); | 1181 | size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); |
1149 | pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n", | 1182 | isert_dbg("Using login payload size: %d, rx_buflen: %d " |
1150 | size, rx_buflen, MAX_KEY_VALUE_PAIRS); | 1183 | "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, |
1184 | MAX_KEY_VALUE_PAIRS); | ||
1151 | memcpy(login->req_buf, &rx_desc->data[0], size); | 1185 | memcpy(login->req_buf, &rx_desc->data[0], size); |
1152 | 1186 | ||
1153 | if (login->first_request) { | 1187 | if (login->first_request) { |
@@ -1166,7 +1200,7 @@ static struct iscsi_cmd | |||
1166 | 1200 | ||
1167 | cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); | 1201 | cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); |
1168 | if (!cmd) { | 1202 | if (!cmd) { |
1169 | pr_err("Unable to allocate iscsi_cmd + isert_cmd\n"); | 1203 | isert_err("Unable to allocate iscsi_cmd + isert_cmd\n"); |
1170 | return NULL; | 1204 | return NULL; |
1171 | } | 1205 | } |
1172 | isert_cmd = iscsit_priv_cmd(cmd); | 1206 | isert_cmd = iscsit_priv_cmd(cmd); |
@@ -1209,8 +1243,8 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn, | |||
1209 | sg = &cmd->se_cmd.t_data_sg[0]; | 1243 | sg = &cmd->se_cmd.t_data_sg[0]; |
1210 | sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); | 1244 | sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); |
1211 | 1245 | ||
1212 | pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n", | 1246 | isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n", |
1213 | sg, sg_nents, &rx_desc->data[0], imm_data_len); | 1247 | sg, sg_nents, &rx_desc->data[0], imm_data_len); |
1214 | 1248 | ||
1215 | sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len); | 1249 | sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len); |
1216 | 1250 | ||
@@ -1254,13 +1288,15 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn, | |||
1254 | * FIXME: Unexpected unsolicited_data out | 1288 | * FIXME: Unexpected unsolicited_data out |
1255 | */ | 1289 | */ |
1256 | if (!cmd->unsolicited_data) { | 1290 | if (!cmd->unsolicited_data) { |
1257 | pr_err("Received unexpected solicited data payload\n"); | 1291 | isert_err("Received unexpected solicited data payload\n"); |
1258 | dump_stack(); | 1292 | dump_stack(); |
1259 | return -1; | 1293 | return -1; |
1260 | } | 1294 | } |
1261 | 1295 | ||
1262 | pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n", | 1296 | isert_dbg("Unsolicited DataOut unsol_data_len: %u, " |
1263 | unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length); | 1297 | "write_data_done: %u, data_length: %u\n", |
1298 | unsol_data_len, cmd->write_data_done, | ||
1299 | cmd->se_cmd.data_length); | ||
1264 | 1300 | ||
1265 | sg_off = cmd->write_data_done / PAGE_SIZE; | 1301 | sg_off = cmd->write_data_done / PAGE_SIZE; |
1266 | sg_start = &cmd->se_cmd.t_data_sg[sg_off]; | 1302 | sg_start = &cmd->se_cmd.t_data_sg[sg_off]; |
@@ -1270,12 +1306,13 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn, | |||
1270 | * FIXME: Non page-aligned unsolicited_data out | 1306 | * FIXME: Non page-aligned unsolicited_data out |
1271 | */ | 1307 | */ |
1272 | if (page_off) { | 1308 | if (page_off) { |
1273 | pr_err("Received unexpected non-page aligned data payload\n"); | 1309 | isert_err("unexpected non-page aligned data payload\n"); |
1274 | dump_stack(); | 1310 | dump_stack(); |
1275 | return -1; | 1311 | return -1; |
1276 | } | 1312 | } |
1277 | pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n", | 1313 | isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u " |
1278 | sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len); | 1314 | "sg_nents: %u from %p %u\n", sg_start, sg_off, |
1315 | sg_nents, &rx_desc->data[0], unsol_data_len); | ||
1279 | 1316 | ||
1280 | sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], | 1317 | sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], |
1281 | unsol_data_len); | 1318 | unsol_data_len); |
@@ -1322,8 +1359,8 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd | |||
1322 | 1359 | ||
1323 | text_in = kzalloc(payload_length, GFP_KERNEL); | 1360 | text_in = kzalloc(payload_length, GFP_KERNEL); |
1324 | if (!text_in) { | 1361 | if (!text_in) { |
1325 | pr_err("Unable to allocate text_in of payload_length: %u\n", | 1362 | isert_err("Unable to allocate text_in of payload_length: %u\n", |
1326 | payload_length); | 1363 | payload_length); |
1327 | return -ENOMEM; | 1364 | return -ENOMEM; |
1328 | } | 1365 | } |
1329 | cmd->text_in_ptr = text_in; | 1366 | cmd->text_in_ptr = text_in; |
@@ -1348,8 +1385,8 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, | |||
1348 | 1385 | ||
1349 | if (sess->sess_ops->SessionType && | 1386 | if (sess->sess_ops->SessionType && |
1350 | (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { | 1387 | (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { |
1351 | pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery," | 1388 | isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery," |
1352 | " ignoring\n", opcode); | 1389 | " ignoring\n", opcode); |
1353 | return 0; | 1390 | return 0; |
1354 | } | 1391 | } |
1355 | 1392 | ||
@@ -1395,10 +1432,6 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, | |||
1395 | break; | 1432 | break; |
1396 | 1433 | ||
1397 | ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); | 1434 | ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); |
1398 | if (ret > 0) | ||
1399 | wait_for_completion_timeout(&conn->conn_logout_comp, | ||
1400 | SECONDS_FOR_LOGOUT_COMP * | ||
1401 | HZ); | ||
1402 | break; | 1435 | break; |
1403 | case ISCSI_OP_TEXT: | 1436 | case ISCSI_OP_TEXT: |
1404 | cmd = isert_allocate_cmd(conn); | 1437 | cmd = isert_allocate_cmd(conn); |
@@ -1410,7 +1443,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, | |||
1410 | rx_desc, (struct iscsi_text *)hdr); | 1443 | rx_desc, (struct iscsi_text *)hdr); |
1411 | break; | 1444 | break; |
1412 | default: | 1445 | default: |
1413 | pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); | 1446 | isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); |
1414 | dump_stack(); | 1447 | dump_stack(); |
1415 | break; | 1448 | break; |
1416 | } | 1449 | } |
@@ -1431,23 +1464,23 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn) | |||
1431 | if (iser_hdr->flags & ISER_RSV) { | 1464 | if (iser_hdr->flags & ISER_RSV) { |
1432 | read_stag = be32_to_cpu(iser_hdr->read_stag); | 1465 | read_stag = be32_to_cpu(iser_hdr->read_stag); |
1433 | read_va = be64_to_cpu(iser_hdr->read_va); | 1466 | read_va = be64_to_cpu(iser_hdr->read_va); |
1434 | pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n", | 1467 | isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n", |
1435 | read_stag, (unsigned long long)read_va); | 1468 | read_stag, (unsigned long long)read_va); |
1436 | } | 1469 | } |
1437 | if (iser_hdr->flags & ISER_WSV) { | 1470 | if (iser_hdr->flags & ISER_WSV) { |
1438 | write_stag = be32_to_cpu(iser_hdr->write_stag); | 1471 | write_stag = be32_to_cpu(iser_hdr->write_stag); |
1439 | write_va = be64_to_cpu(iser_hdr->write_va); | 1472 | write_va = be64_to_cpu(iser_hdr->write_va); |
1440 | pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n", | 1473 | isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n", |
1441 | write_stag, (unsigned long long)write_va); | 1474 | write_stag, (unsigned long long)write_va); |
1442 | } | 1475 | } |
1443 | 1476 | ||
1444 | pr_debug("ISER ISCSI_CTRL PDU\n"); | 1477 | isert_dbg("ISER ISCSI_CTRL PDU\n"); |
1445 | break; | 1478 | break; |
1446 | case ISER_HELLO: | 1479 | case ISER_HELLO: |
1447 | pr_err("iSER Hello message\n"); | 1480 | isert_err("iSER Hello message\n"); |
1448 | break; | 1481 | break; |
1449 | default: | 1482 | default: |
1450 | pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags); | 1483 | isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags); |
1451 | break; | 1484 | break; |
1452 | } | 1485 | } |
1453 | 1486 | ||
@@ -1457,7 +1490,7 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn) | |||
1457 | 1490 | ||
1458 | static void | 1491 | static void |
1459 | isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, | 1492 | isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, |
1460 | unsigned long xfer_len) | 1493 | u32 xfer_len) |
1461 | { | 1494 | { |
1462 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1495 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
1463 | struct iscsi_hdr *hdr; | 1496 | struct iscsi_hdr *hdr; |
@@ -1467,34 +1500,43 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, | |||
1467 | if ((char *)desc == isert_conn->login_req_buf) { | 1500 | if ((char *)desc == isert_conn->login_req_buf) { |
1468 | rx_dma = isert_conn->login_req_dma; | 1501 | rx_dma = isert_conn->login_req_dma; |
1469 | rx_buflen = ISER_RX_LOGIN_SIZE; | 1502 | rx_buflen = ISER_RX_LOGIN_SIZE; |
1470 | pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", | 1503 | isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", |
1471 | rx_dma, rx_buflen); | 1504 | rx_dma, rx_buflen); |
1472 | } else { | 1505 | } else { |
1473 | rx_dma = desc->dma_addr; | 1506 | rx_dma = desc->dma_addr; |
1474 | rx_buflen = ISER_RX_PAYLOAD_SIZE; | 1507 | rx_buflen = ISER_RX_PAYLOAD_SIZE; |
1475 | pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", | 1508 | isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", |
1476 | rx_dma, rx_buflen); | 1509 | rx_dma, rx_buflen); |
1477 | } | 1510 | } |
1478 | 1511 | ||
1479 | ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE); | 1512 | ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE); |
1480 | 1513 | ||
1481 | hdr = &desc->iscsi_header; | 1514 | hdr = &desc->iscsi_header; |
1482 | pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", | 1515 | isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", |
1483 | hdr->opcode, hdr->itt, hdr->flags, | 1516 | hdr->opcode, hdr->itt, hdr->flags, |
1484 | (int)(xfer_len - ISER_HEADERS_LEN)); | 1517 | (int)(xfer_len - ISER_HEADERS_LEN)); |
1485 | 1518 | ||
1486 | if ((char *)desc == isert_conn->login_req_buf) | 1519 | if ((char *)desc == isert_conn->login_req_buf) { |
1487 | isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN, | 1520 | isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN; |
1488 | isert_conn); | 1521 | if (isert_conn->conn) { |
1489 | else | 1522 | struct iscsi_login *login = isert_conn->conn->conn_login; |
1523 | |||
1524 | if (login && !login->first_request) | ||
1525 | isert_rx_login_req(isert_conn); | ||
1526 | } | ||
1527 | mutex_lock(&isert_conn->conn_mutex); | ||
1528 | complete(&isert_conn->login_req_comp); | ||
1529 | mutex_unlock(&isert_conn->conn_mutex); | ||
1530 | } else { | ||
1490 | isert_rx_do_work(desc, isert_conn); | 1531 | isert_rx_do_work(desc, isert_conn); |
1532 | } | ||
1491 | 1533 | ||
1492 | ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen, | 1534 | ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen, |
1493 | DMA_FROM_DEVICE); | 1535 | DMA_FROM_DEVICE); |
1494 | 1536 | ||
1495 | isert_conn->post_recv_buf_count--; | 1537 | isert_conn->post_recv_buf_count--; |
1496 | pr_debug("iSERT: Decremented post_recv_buf_count: %d\n", | 1538 | isert_dbg("Decremented post_recv_buf_count: %d\n", |
1497 | isert_conn->post_recv_buf_count); | 1539 | isert_conn->post_recv_buf_count); |
1498 | 1540 | ||
1499 | if ((char *)desc == isert_conn->login_req_buf) | 1541 | if ((char *)desc == isert_conn->login_req_buf) |
1500 | return; | 1542 | return; |
@@ -1505,7 +1547,7 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, | |||
1505 | ISERT_MIN_POSTED_RX); | 1547 | ISERT_MIN_POSTED_RX); |
1506 | err = isert_post_recv(isert_conn, count); | 1548 | err = isert_post_recv(isert_conn, count); |
1507 | if (err) { | 1549 | if (err) { |
1508 | pr_err("isert_post_recv() count: %d failed, %d\n", | 1550 | isert_err("isert_post_recv() count: %d failed, %d\n", |
1509 | count, err); | 1551 | count, err); |
1510 | } | 1552 | } |
1511 | } | 1553 | } |
@@ -1534,12 +1576,12 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, | |||
1534 | data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents, | 1576 | data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents, |
1535 | data->dma_dir); | 1577 | data->dma_dir); |
1536 | if (unlikely(!data->dma_nents)) { | 1578 | if (unlikely(!data->dma_nents)) { |
1537 | pr_err("Cmd: unable to dma map SGs %p\n", sg); | 1579 | isert_err("Cmd: unable to dma map SGs %p\n", sg); |
1538 | return -EINVAL; | 1580 | return -EINVAL; |
1539 | } | 1581 | } |
1540 | 1582 | ||
1541 | pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", | 1583 | isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", |
1542 | isert_cmd, data->dma_nents, data->sg, data->nents, data->len); | 1584 | isert_cmd, data->dma_nents, data->sg, data->nents, data->len); |
1543 | 1585 | ||
1544 | return 0; | 1586 | return 0; |
1545 | } | 1587 | } |
@@ -1560,21 +1602,21 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) | |||
1560 | { | 1602 | { |
1561 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; | 1603 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; |
1562 | 1604 | ||
1563 | pr_debug("isert_unmap_cmd: %p\n", isert_cmd); | 1605 | isert_dbg("Cmd %p\n", isert_cmd); |
1564 | 1606 | ||
1565 | if (wr->data.sg) { | 1607 | if (wr->data.sg) { |
1566 | pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd); | 1608 | isert_dbg("Cmd %p unmap_sg op\n", isert_cmd); |
1567 | isert_unmap_data_buf(isert_conn, &wr->data); | 1609 | isert_unmap_data_buf(isert_conn, &wr->data); |
1568 | } | 1610 | } |
1569 | 1611 | ||
1570 | if (wr->send_wr) { | 1612 | if (wr->send_wr) { |
1571 | pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd); | 1613 | isert_dbg("Cmd %p free send_wr\n", isert_cmd); |
1572 | kfree(wr->send_wr); | 1614 | kfree(wr->send_wr); |
1573 | wr->send_wr = NULL; | 1615 | wr->send_wr = NULL; |
1574 | } | 1616 | } |
1575 | 1617 | ||
1576 | if (wr->ib_sge) { | 1618 | if (wr->ib_sge) { |
1577 | pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd); | 1619 | isert_dbg("Cmd %p free ib_sge\n", isert_cmd); |
1578 | kfree(wr->ib_sge); | 1620 | kfree(wr->ib_sge); |
1579 | wr->ib_sge = NULL; | 1621 | wr->ib_sge = NULL; |
1580 | } | 1622 | } |
@@ -1586,11 +1628,10 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) | |||
1586 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; | 1628 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; |
1587 | LIST_HEAD(unmap_list); | 1629 | LIST_HEAD(unmap_list); |
1588 | 1630 | ||
1589 | pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd); | 1631 | isert_dbg("Cmd %p\n", isert_cmd); |
1590 | 1632 | ||
1591 | if (wr->fr_desc) { | 1633 | if (wr->fr_desc) { |
1592 | pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n", | 1634 | isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc); |
1593 | isert_cmd, wr->fr_desc); | ||
1594 | if (wr->fr_desc->ind & ISERT_PROTECTED) { | 1635 | if (wr->fr_desc->ind & ISERT_PROTECTED) { |
1595 | isert_unmap_data_buf(isert_conn, &wr->prot); | 1636 | isert_unmap_data_buf(isert_conn, &wr->prot); |
1596 | wr->fr_desc->ind &= ~ISERT_PROTECTED; | 1637 | wr->fr_desc->ind &= ~ISERT_PROTECTED; |
@@ -1602,7 +1643,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) | |||
1602 | } | 1643 | } |
1603 | 1644 | ||
1604 | if (wr->data.sg) { | 1645 | if (wr->data.sg) { |
1605 | pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd); | 1646 | isert_dbg("Cmd %p unmap_sg op\n", isert_cmd); |
1606 | isert_unmap_data_buf(isert_conn, &wr->data); | 1647 | isert_unmap_data_buf(isert_conn, &wr->data); |
1607 | } | 1648 | } |
1608 | 1649 | ||
@@ -1618,7 +1659,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) | |||
1618 | struct iscsi_conn *conn = isert_conn->conn; | 1659 | struct iscsi_conn *conn = isert_conn->conn; |
1619 | struct isert_device *device = isert_conn->conn_device; | 1660 | struct isert_device *device = isert_conn->conn_device; |
1620 | 1661 | ||
1621 | pr_debug("Entering isert_put_cmd: %p\n", isert_cmd); | 1662 | isert_dbg("Cmd %p\n", isert_cmd); |
1622 | 1663 | ||
1623 | switch (cmd->iscsi_opcode) { | 1664 | switch (cmd->iscsi_opcode) { |
1624 | case ISCSI_OP_SCSI_CMD: | 1665 | case ISCSI_OP_SCSI_CMD: |
@@ -1668,7 +1709,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) | |||
1668 | * associated cmd->se_cmd needs to be released. | 1709 | * associated cmd->se_cmd needs to be released. |
1669 | */ | 1710 | */ |
1670 | if (cmd->se_cmd.se_tfo != NULL) { | 1711 | if (cmd->se_cmd.se_tfo != NULL) { |
1671 | pr_debug("Calling transport_generic_free_cmd from" | 1712 | isert_dbg("Calling transport_generic_free_cmd from" |
1672 | " isert_put_cmd for 0x%02x\n", | 1713 | " isert_put_cmd for 0x%02x\n", |
1673 | cmd->iscsi_opcode); | 1714 | cmd->iscsi_opcode); |
1674 | transport_generic_free_cmd(&cmd->se_cmd, 0); | 1715 | transport_generic_free_cmd(&cmd->se_cmd, 0); |
@@ -1687,7 +1728,7 @@ static void | |||
1687 | isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) | 1728 | isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) |
1688 | { | 1729 | { |
1689 | if (tx_desc->dma_addr != 0) { | 1730 | if (tx_desc->dma_addr != 0) { |
1690 | pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n"); | 1731 | isert_dbg("unmap single for tx_desc->dma_addr\n"); |
1691 | ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, | 1732 | ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, |
1692 | ISER_HEADERS_LEN, DMA_TO_DEVICE); | 1733 | ISER_HEADERS_LEN, DMA_TO_DEVICE); |
1693 | tx_desc->dma_addr = 0; | 1734 | tx_desc->dma_addr = 0; |
@@ -1699,7 +1740,7 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, | |||
1699 | struct ib_device *ib_dev, bool comp_err) | 1740 | struct ib_device *ib_dev, bool comp_err) |
1700 | { | 1741 | { |
1701 | if (isert_cmd->pdu_buf_dma != 0) { | 1742 | if (isert_cmd->pdu_buf_dma != 0) { |
1702 | pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n"); | 1743 | isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n"); |
1703 | ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, | 1744 | ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, |
1704 | isert_cmd->pdu_buf_len, DMA_TO_DEVICE); | 1745 | isert_cmd->pdu_buf_len, DMA_TO_DEVICE); |
1705 | isert_cmd->pdu_buf_dma = 0; | 1746 | isert_cmd->pdu_buf_dma = 0; |
@@ -1717,7 +1758,7 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) | |||
1717 | 1758 | ||
1718 | ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); | 1759 | ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); |
1719 | if (ret) { | 1760 | if (ret) { |
1720 | pr_err("ib_check_mr_status failed, ret %d\n", ret); | 1761 | isert_err("ib_check_mr_status failed, ret %d\n", ret); |
1721 | goto fail_mr_status; | 1762 | goto fail_mr_status; |
1722 | } | 1763 | } |
1723 | 1764 | ||
@@ -1740,12 +1781,12 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) | |||
1740 | do_div(sec_offset_err, block_size); | 1781 | do_div(sec_offset_err, block_size); |
1741 | se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; | 1782 | se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; |
1742 | 1783 | ||
1743 | pr_err("isert: PI error found type %d at sector 0x%llx " | 1784 | isert_err("PI error found type %d at sector 0x%llx " |
1744 | "expected 0x%x vs actual 0x%x\n", | 1785 | "expected 0x%x vs actual 0x%x\n", |
1745 | mr_status.sig_err.err_type, | 1786 | mr_status.sig_err.err_type, |
1746 | (unsigned long long)se_cmd->bad_sector, | 1787 | (unsigned long long)se_cmd->bad_sector, |
1747 | mr_status.sig_err.expected, | 1788 | mr_status.sig_err.expected, |
1748 | mr_status.sig_err.actual); | 1789 | mr_status.sig_err.actual); |
1749 | ret = 1; | 1790 | ret = 1; |
1750 | } | 1791 | } |
1751 | 1792 | ||
@@ -1801,7 +1842,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, | |||
1801 | cmd->write_data_done = wr->data.len; | 1842 | cmd->write_data_done = wr->data.len; |
1802 | wr->send_wr_num = 0; | 1843 | wr->send_wr_num = 0; |
1803 | 1844 | ||
1804 | pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); | 1845 | isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); |
1805 | spin_lock_bh(&cmd->istate_lock); | 1846 | spin_lock_bh(&cmd->istate_lock); |
1806 | cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; | 1847 | cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; |
1807 | cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; | 1848 | cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; |
@@ -1823,36 +1864,22 @@ isert_do_control_comp(struct work_struct *work) | |||
1823 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1864 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
1824 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; | 1865 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; |
1825 | 1866 | ||
1867 | isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state); | ||
1868 | |||
1826 | switch (cmd->i_state) { | 1869 | switch (cmd->i_state) { |
1827 | case ISTATE_SEND_TASKMGTRSP: | 1870 | case ISTATE_SEND_TASKMGTRSP: |
1828 | pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n"); | ||
1829 | |||
1830 | atomic_dec(&isert_conn->post_send_buf_count); | ||
1831 | iscsit_tmr_post_handler(cmd, cmd->conn); | 1871 | iscsit_tmr_post_handler(cmd, cmd->conn); |
1832 | 1872 | case ISTATE_SEND_REJECT: /* FALLTHRU */ | |
1833 | cmd->i_state = ISTATE_SENT_STATUS; | 1873 | case ISTATE_SEND_TEXTRSP: /* FALLTHRU */ |
1834 | isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); | ||
1835 | break; | ||
1836 | case ISTATE_SEND_REJECT: | ||
1837 | pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n"); | ||
1838 | atomic_dec(&isert_conn->post_send_buf_count); | ||
1839 | |||
1840 | cmd->i_state = ISTATE_SENT_STATUS; | 1874 | cmd->i_state = ISTATE_SENT_STATUS; |
1841 | isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); | 1875 | isert_completion_put(&isert_cmd->tx_desc, isert_cmd, |
1876 | ib_dev, false); | ||
1842 | break; | 1877 | break; |
1843 | case ISTATE_SEND_LOGOUTRSP: | 1878 | case ISTATE_SEND_LOGOUTRSP: |
1844 | pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); | ||
1845 | |||
1846 | atomic_dec(&isert_conn->post_send_buf_count); | ||
1847 | iscsit_logout_post_handler(cmd, cmd->conn); | 1879 | iscsit_logout_post_handler(cmd, cmd->conn); |
1848 | break; | 1880 | break; |
1849 | case ISTATE_SEND_TEXTRSP: | ||
1850 | atomic_dec(&isert_conn->post_send_buf_count); | ||
1851 | cmd->i_state = ISTATE_SENT_STATUS; | ||
1852 | isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); | ||
1853 | break; | ||
1854 | default: | 1881 | default: |
1855 | pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state); | 1882 | isert_err("Unknown i_state %d\n", cmd->i_state); |
1856 | dump_stack(); | 1883 | dump_stack(); |
1857 | break; | 1884 | break; |
1858 | } | 1885 | } |
@@ -1865,7 +1892,6 @@ isert_response_completion(struct iser_tx_desc *tx_desc, | |||
1865 | struct ib_device *ib_dev) | 1892 | struct ib_device *ib_dev) |
1866 | { | 1893 | { |
1867 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; | 1894 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; |
1868 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; | ||
1869 | 1895 | ||
1870 | if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || | 1896 | if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || |
1871 | cmd->i_state == ISTATE_SEND_LOGOUTRSP || | 1897 | cmd->i_state == ISTATE_SEND_LOGOUTRSP || |
@@ -1878,267 +1904,151 @@ isert_response_completion(struct iser_tx_desc *tx_desc, | |||
1878 | return; | 1904 | return; |
1879 | } | 1905 | } |
1880 | 1906 | ||
1881 | /** | ||
1882 | * If send_wr_num is 0 this means that we got | ||
1883 | * RDMA completion and we cleared it and we should | ||
1884 | * simply decrement the response post. else the | ||
1885 | * response is incorporated in send_wr_num, just | ||
1886 | * sub it. | ||
1887 | **/ | ||
1888 | if (wr->send_wr_num) | ||
1889 | atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); | ||
1890 | else | ||
1891 | atomic_dec(&isert_conn->post_send_buf_count); | ||
1892 | |||
1893 | cmd->i_state = ISTATE_SENT_STATUS; | 1907 | cmd->i_state = ISTATE_SENT_STATUS; |
1894 | isert_completion_put(tx_desc, isert_cmd, ib_dev, false); | 1908 | isert_completion_put(tx_desc, isert_cmd, ib_dev, false); |
1895 | } | 1909 | } |
1896 | 1910 | ||
1897 | static void | 1911 | static void |
1898 | __isert_send_completion(struct iser_tx_desc *tx_desc, | 1912 | isert_send_completion(struct iser_tx_desc *tx_desc, |
1899 | struct isert_conn *isert_conn) | 1913 | struct isert_conn *isert_conn) |
1900 | { | 1914 | { |
1901 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1915 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
1902 | struct isert_cmd *isert_cmd = tx_desc->isert_cmd; | 1916 | struct isert_cmd *isert_cmd = tx_desc->isert_cmd; |
1903 | struct isert_rdma_wr *wr; | 1917 | struct isert_rdma_wr *wr; |
1904 | 1918 | ||
1905 | if (!isert_cmd) { | 1919 | if (!isert_cmd) { |
1906 | atomic_dec(&isert_conn->post_send_buf_count); | ||
1907 | isert_unmap_tx_desc(tx_desc, ib_dev); | 1920 | isert_unmap_tx_desc(tx_desc, ib_dev); |
1908 | return; | 1921 | return; |
1909 | } | 1922 | } |
1910 | wr = &isert_cmd->rdma_wr; | 1923 | wr = &isert_cmd->rdma_wr; |
1911 | 1924 | ||
1925 | isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op); | ||
1926 | |||
1912 | switch (wr->iser_ib_op) { | 1927 | switch (wr->iser_ib_op) { |
1913 | case ISER_IB_RECV: | 1928 | case ISER_IB_RECV: |
1914 | pr_err("isert_send_completion: Got ISER_IB_RECV\n"); | 1929 | isert_err("Got ISER_IB_RECV\n"); |
1915 | dump_stack(); | 1930 | dump_stack(); |
1916 | break; | 1931 | break; |
1917 | case ISER_IB_SEND: | 1932 | case ISER_IB_SEND: |
1918 | pr_debug("isert_send_completion: Got ISER_IB_SEND\n"); | ||
1919 | isert_response_completion(tx_desc, isert_cmd, | 1933 | isert_response_completion(tx_desc, isert_cmd, |
1920 | isert_conn, ib_dev); | 1934 | isert_conn, ib_dev); |
1921 | break; | 1935 | break; |
1922 | case ISER_IB_RDMA_WRITE: | 1936 | case ISER_IB_RDMA_WRITE: |
1923 | pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n"); | ||
1924 | atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); | ||
1925 | isert_completion_rdma_write(tx_desc, isert_cmd); | 1937 | isert_completion_rdma_write(tx_desc, isert_cmd); |
1926 | break; | 1938 | break; |
1927 | case ISER_IB_RDMA_READ: | 1939 | case ISER_IB_RDMA_READ: |
1928 | pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); | ||
1929 | |||
1930 | atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); | ||
1931 | isert_completion_rdma_read(tx_desc, isert_cmd); | 1940 | isert_completion_rdma_read(tx_desc, isert_cmd); |
1932 | break; | 1941 | break; |
1933 | default: | 1942 | default: |
1934 | pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op); | 1943 | isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op); |
1935 | dump_stack(); | 1944 | dump_stack(); |
1936 | break; | 1945 | break; |
1937 | } | 1946 | } |
1938 | } | 1947 | } |
1939 | 1948 | ||
1940 | static void | 1949 | /** |
1941 | isert_send_completion(struct iser_tx_desc *tx_desc, | 1950 | * is_isert_tx_desc() - Indicate if the completion wr_id |
1942 | struct isert_conn *isert_conn) | 1951 | * is a TX descriptor or not. |
1943 | { | 1952 | * @isert_conn: iser connection |
1944 | struct llist_node *llnode = tx_desc->comp_llnode_batch; | 1953 | * @wr_id: completion WR identifier |
1945 | struct iser_tx_desc *t; | 1954 | * |
1946 | /* | 1955 | * Since we cannot rely on wc opcode in FLUSH errors |
1947 | * Drain coalesced completion llist starting from comp_llnode_batch | 1956 | * we must work around it by checking if the wr_id address |
1948 | * setup in isert_init_send_wr(), and then complete trailing tx_desc. | 1957 | * falls in the iser connection rx_descs buffer. If so |
1949 | */ | 1958 | * it is an RX descriptor, otherwize it is a TX. |
1950 | while (llnode) { | 1959 | */ |
1951 | t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); | 1960 | static inline bool |
1952 | llnode = llist_next(llnode); | 1961 | is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id) |
1953 | __isert_send_completion(t, isert_conn); | ||
1954 | } | ||
1955 | __isert_send_completion(tx_desc, isert_conn); | ||
1956 | } | ||
1957 | |||
1958 | static void | ||
1959 | isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev) | ||
1960 | { | 1962 | { |
1961 | struct llist_node *llnode; | 1963 | void *start = isert_conn->conn_rx_descs; |
1962 | struct isert_rdma_wr *wr; | 1964 | int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs); |
1963 | struct iser_tx_desc *t; | ||
1964 | 1965 | ||
1965 | mutex_lock(&isert_conn->conn_mutex); | 1966 | if (wr_id >= start && wr_id < start + len) |
1966 | llnode = llist_del_all(&isert_conn->conn_comp_llist); | 1967 | return false; |
1967 | isert_conn->conn_comp_batch = 0; | ||
1968 | mutex_unlock(&isert_conn->conn_mutex); | ||
1969 | |||
1970 | while (llnode) { | ||
1971 | t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); | ||
1972 | llnode = llist_next(llnode); | ||
1973 | wr = &t->isert_cmd->rdma_wr; | ||
1974 | |||
1975 | /** | ||
1976 | * If send_wr_num is 0 this means that we got | ||
1977 | * RDMA completion and we cleared it and we should | ||
1978 | * simply decrement the response post. else the | ||
1979 | * response is incorporated in send_wr_num, just | ||
1980 | * sub it. | ||
1981 | **/ | ||
1982 | if (wr->send_wr_num) | ||
1983 | atomic_sub(wr->send_wr_num, | ||
1984 | &isert_conn->post_send_buf_count); | ||
1985 | else | ||
1986 | atomic_dec(&isert_conn->post_send_buf_count); | ||
1987 | 1968 | ||
1988 | isert_completion_put(t, t->isert_cmd, ib_dev, true); | 1969 | return true; |
1989 | } | ||
1990 | } | 1970 | } |
1991 | 1971 | ||
1992 | static void | 1972 | static void |
1993 | isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) | 1973 | isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc) |
1994 | { | 1974 | { |
1995 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1975 | if (wc->wr_id == ISER_BEACON_WRID) { |
1996 | struct isert_cmd *isert_cmd = tx_desc->isert_cmd; | 1976 | isert_info("conn %p completing conn_wait_comp_err\n", |
1997 | struct llist_node *llnode = tx_desc->comp_llnode_batch; | 1977 | isert_conn); |
1998 | struct isert_rdma_wr *wr; | 1978 | complete(&isert_conn->conn_wait_comp_err); |
1999 | struct iser_tx_desc *t; | 1979 | } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) { |
2000 | 1980 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | |
2001 | while (llnode) { | 1981 | struct isert_cmd *isert_cmd; |
2002 | t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); | 1982 | struct iser_tx_desc *desc; |
2003 | llnode = llist_next(llnode); | ||
2004 | wr = &t->isert_cmd->rdma_wr; | ||
2005 | 1983 | ||
2006 | /** | 1984 | desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id; |
2007 | * If send_wr_num is 0 this means that we got | 1985 | isert_cmd = desc->isert_cmd; |
2008 | * RDMA completion and we cleared it and we should | 1986 | if (!isert_cmd) |
2009 | * simply decrement the response post. else the | 1987 | isert_unmap_tx_desc(desc, ib_dev); |
2010 | * response is incorporated in send_wr_num, just | ||
2011 | * sub it. | ||
2012 | **/ | ||
2013 | if (wr->send_wr_num) | ||
2014 | atomic_sub(wr->send_wr_num, | ||
2015 | &isert_conn->post_send_buf_count); | ||
2016 | else | 1988 | else |
2017 | atomic_dec(&isert_conn->post_send_buf_count); | 1989 | isert_completion_put(desc, isert_cmd, ib_dev, true); |
2018 | 1990 | } else { | |
2019 | isert_completion_put(t, t->isert_cmd, ib_dev, true); | 1991 | isert_conn->post_recv_buf_count--; |
2020 | } | 1992 | if (!isert_conn->post_recv_buf_count) |
2021 | tx_desc->comp_llnode_batch = NULL; | 1993 | iscsit_cause_connection_reinstatement(isert_conn->conn, 0); |
2022 | |||
2023 | if (!isert_cmd) | ||
2024 | isert_unmap_tx_desc(tx_desc, ib_dev); | ||
2025 | else | ||
2026 | isert_completion_put(tx_desc, isert_cmd, ib_dev, true); | ||
2027 | } | ||
2028 | |||
2029 | static void | ||
2030 | isert_cq_rx_comp_err(struct isert_conn *isert_conn) | ||
2031 | { | ||
2032 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | ||
2033 | struct iscsi_conn *conn = isert_conn->conn; | ||
2034 | |||
2035 | if (isert_conn->post_recv_buf_count) | ||
2036 | return; | ||
2037 | |||
2038 | isert_cq_drain_comp_llist(isert_conn, ib_dev); | ||
2039 | |||
2040 | if (conn->sess) { | ||
2041 | target_sess_cmd_list_set_waiting(conn->sess->se_sess); | ||
2042 | target_wait_for_sess_cmds(conn->sess->se_sess); | ||
2043 | } | 1994 | } |
2044 | |||
2045 | while (atomic_read(&isert_conn->post_send_buf_count)) | ||
2046 | msleep(3000); | ||
2047 | |||
2048 | mutex_lock(&isert_conn->conn_mutex); | ||
2049 | isert_conn->state = ISER_CONN_DOWN; | ||
2050 | mutex_unlock(&isert_conn->conn_mutex); | ||
2051 | |||
2052 | iscsit_cause_connection_reinstatement(isert_conn->conn, 0); | ||
2053 | |||
2054 | complete(&isert_conn->conn_wait_comp_err); | ||
2055 | } | 1995 | } |
2056 | 1996 | ||
2057 | static void | 1997 | static void |
2058 | isert_cq_tx_work(struct work_struct *work) | 1998 | isert_handle_wc(struct ib_wc *wc) |
2059 | { | 1999 | { |
2060 | struct isert_cq_desc *cq_desc = container_of(work, | ||
2061 | struct isert_cq_desc, cq_tx_work); | ||
2062 | struct isert_device *device = cq_desc->device; | ||
2063 | int cq_index = cq_desc->cq_index; | ||
2064 | struct ib_cq *tx_cq = device->dev_tx_cq[cq_index]; | ||
2065 | struct isert_conn *isert_conn; | 2000 | struct isert_conn *isert_conn; |
2066 | struct iser_tx_desc *tx_desc; | 2001 | struct iser_tx_desc *tx_desc; |
2067 | struct ib_wc wc; | 2002 | struct iser_rx_desc *rx_desc; |
2068 | |||
2069 | while (ib_poll_cq(tx_cq, 1, &wc) == 1) { | ||
2070 | tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id; | ||
2071 | isert_conn = wc.qp->qp_context; | ||
2072 | 2003 | ||
2073 | if (wc.status == IB_WC_SUCCESS) { | 2004 | isert_conn = wc->qp->qp_context; |
2074 | isert_send_completion(tx_desc, isert_conn); | 2005 | if (likely(wc->status == IB_WC_SUCCESS)) { |
2006 | if (wc->opcode == IB_WC_RECV) { | ||
2007 | rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id; | ||
2008 | isert_rx_completion(rx_desc, isert_conn, wc->byte_len); | ||
2075 | } else { | 2009 | } else { |
2076 | pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); | 2010 | tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id; |
2077 | pr_debug("TX wc.status: 0x%08x\n", wc.status); | 2011 | isert_send_completion(tx_desc, isert_conn); |
2078 | pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); | ||
2079 | |||
2080 | if (wc.wr_id != ISER_FASTREG_LI_WRID) { | ||
2081 | if (tx_desc->llnode_active) | ||
2082 | continue; | ||
2083 | |||
2084 | atomic_dec(&isert_conn->post_send_buf_count); | ||
2085 | isert_cq_tx_comp_err(tx_desc, isert_conn); | ||
2086 | } | ||
2087 | } | 2012 | } |
2088 | } | 2013 | } else { |
2089 | 2014 | if (wc->status != IB_WC_WR_FLUSH_ERR) | |
2090 | ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP); | 2015 | isert_err("wr id %llx status %d vend_err %x\n", |
2091 | } | 2016 | wc->wr_id, wc->status, wc->vendor_err); |
2092 | 2017 | else | |
2093 | static void | 2018 | isert_dbg("flush error: wr id %llx\n", wc->wr_id); |
2094 | isert_cq_tx_callback(struct ib_cq *cq, void *context) | ||
2095 | { | ||
2096 | struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; | ||
2097 | 2019 | ||
2098 | queue_work(isert_comp_wq, &cq_desc->cq_tx_work); | 2020 | if (wc->wr_id != ISER_FASTREG_LI_WRID) |
2021 | isert_cq_comp_err(isert_conn, wc); | ||
2022 | } | ||
2099 | } | 2023 | } |
2100 | 2024 | ||
2101 | static void | 2025 | static void |
2102 | isert_cq_rx_work(struct work_struct *work) | 2026 | isert_cq_work(struct work_struct *work) |
2103 | { | 2027 | { |
2104 | struct isert_cq_desc *cq_desc = container_of(work, | 2028 | enum { isert_poll_budget = 65536 }; |
2105 | struct isert_cq_desc, cq_rx_work); | 2029 | struct isert_comp *comp = container_of(work, struct isert_comp, |
2106 | struct isert_device *device = cq_desc->device; | 2030 | work); |
2107 | int cq_index = cq_desc->cq_index; | 2031 | struct ib_wc *const wcs = comp->wcs; |
2108 | struct ib_cq *rx_cq = device->dev_rx_cq[cq_index]; | 2032 | int i, n, completed = 0; |
2109 | struct isert_conn *isert_conn; | ||
2110 | struct iser_rx_desc *rx_desc; | ||
2111 | struct ib_wc wc; | ||
2112 | unsigned long xfer_len; | ||
2113 | 2033 | ||
2114 | while (ib_poll_cq(rx_cq, 1, &wc) == 1) { | 2034 | while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) { |
2115 | rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id; | 2035 | for (i = 0; i < n; i++) |
2116 | isert_conn = wc.qp->qp_context; | 2036 | isert_handle_wc(&wcs[i]); |
2117 | 2037 | ||
2118 | if (wc.status == IB_WC_SUCCESS) { | 2038 | completed += n; |
2119 | xfer_len = (unsigned long)wc.byte_len; | 2039 | if (completed >= isert_poll_budget) |
2120 | isert_rx_completion(rx_desc, isert_conn, xfer_len); | 2040 | break; |
2121 | } else { | ||
2122 | pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); | ||
2123 | if (wc.status != IB_WC_WR_FLUSH_ERR) { | ||
2124 | pr_debug("RX wc.status: 0x%08x\n", wc.status); | ||
2125 | pr_debug("RX wc.vendor_err: 0x%08x\n", | ||
2126 | wc.vendor_err); | ||
2127 | } | ||
2128 | isert_conn->post_recv_buf_count--; | ||
2129 | isert_cq_rx_comp_err(isert_conn); | ||
2130 | } | ||
2131 | } | 2041 | } |
2132 | 2042 | ||
2133 | ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP); | 2043 | ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP); |
2134 | } | 2044 | } |
2135 | 2045 | ||
2136 | static void | 2046 | static void |
2137 | isert_cq_rx_callback(struct ib_cq *cq, void *context) | 2047 | isert_cq_callback(struct ib_cq *cq, void *context) |
2138 | { | 2048 | { |
2139 | struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; | 2049 | struct isert_comp *comp = context; |
2140 | 2050 | ||
2141 | queue_work(isert_rx_wq, &cq_desc->cq_rx_work); | 2051 | queue_work(isert_comp_wq, &comp->work); |
2142 | } | 2052 | } |
2143 | 2053 | ||
2144 | static int | 2054 | static int |
@@ -2147,13 +2057,10 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) | |||
2147 | struct ib_send_wr *wr_failed; | 2057 | struct ib_send_wr *wr_failed; |
2148 | int ret; | 2058 | int ret; |
2149 | 2059 | ||
2150 | atomic_inc(&isert_conn->post_send_buf_count); | ||
2151 | |||
2152 | ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr, | 2060 | ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr, |
2153 | &wr_failed); | 2061 | &wr_failed); |
2154 | if (ret) { | 2062 | if (ret) { |
2155 | pr_err("ib_post_send failed with %d\n", ret); | 2063 | isert_err("ib_post_send failed with %d\n", ret); |
2156 | atomic_dec(&isert_conn->post_send_buf_count); | ||
2157 | return ret; | 2064 | return ret; |
2158 | } | 2065 | } |
2159 | return ret; | 2066 | return ret; |
@@ -2200,9 +2107,9 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | |||
2200 | isert_cmd->tx_desc.num_sge = 2; | 2107 | isert_cmd->tx_desc.num_sge = 2; |
2201 | } | 2108 | } |
2202 | 2109 | ||
2203 | isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); | 2110 | isert_init_send_wr(isert_conn, isert_cmd, send_wr); |
2204 | 2111 | ||
2205 | pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); | 2112 | isert_dbg("Posting SCSI Response\n"); |
2206 | 2113 | ||
2207 | return isert_post_response(isert_conn, isert_cmd); | 2114 | return isert_post_response(isert_conn, isert_cmd); |
2208 | } | 2115 | } |
@@ -2231,8 +2138,16 @@ isert_get_sup_prot_ops(struct iscsi_conn *conn) | |||
2231 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 2138 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; |
2232 | struct isert_device *device = isert_conn->conn_device; | 2139 | struct isert_device *device = isert_conn->conn_device; |
2233 | 2140 | ||
2234 | if (device->pi_capable) | 2141 | if (conn->tpg->tpg_attrib.t10_pi) { |
2235 | return TARGET_PROT_ALL; | 2142 | if (device->pi_capable) { |
2143 | isert_info("conn %p PI offload enabled\n", isert_conn); | ||
2144 | isert_conn->pi_support = true; | ||
2145 | return TARGET_PROT_ALL; | ||
2146 | } | ||
2147 | } | ||
2148 | |||
2149 | isert_info("conn %p PI offload disabled\n", isert_conn); | ||
2150 | isert_conn->pi_support = false; | ||
2236 | 2151 | ||
2237 | return TARGET_PROT_NORMAL; | 2152 | return TARGET_PROT_NORMAL; |
2238 | } | 2153 | } |
@@ -2250,9 +2165,9 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, | |||
2250 | &isert_cmd->tx_desc.iscsi_header, | 2165 | &isert_cmd->tx_desc.iscsi_header, |
2251 | nopout_response); | 2166 | nopout_response); |
2252 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); | 2167 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); |
2253 | isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); | 2168 | isert_init_send_wr(isert_conn, isert_cmd, send_wr); |
2254 | 2169 | ||
2255 | pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); | 2170 | isert_dbg("conn %p Posting NOPIN Response\n", isert_conn); |
2256 | 2171 | ||
2257 | return isert_post_response(isert_conn, isert_cmd); | 2172 | return isert_post_response(isert_conn, isert_cmd); |
2258 | } | 2173 | } |
@@ -2268,9 +2183,9 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
2268 | iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) | 2183 | iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) |
2269 | &isert_cmd->tx_desc.iscsi_header); | 2184 | &isert_cmd->tx_desc.iscsi_header); |
2270 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); | 2185 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); |
2271 | isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); | 2186 | isert_init_send_wr(isert_conn, isert_cmd, send_wr); |
2272 | 2187 | ||
2273 | pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); | 2188 | isert_dbg("conn %p Posting Logout Response\n", isert_conn); |
2274 | 2189 | ||
2275 | return isert_post_response(isert_conn, isert_cmd); | 2190 | return isert_post_response(isert_conn, isert_cmd); |
2276 | } | 2191 | } |
@@ -2286,9 +2201,9 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
2286 | iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) | 2201 | iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) |
2287 | &isert_cmd->tx_desc.iscsi_header); | 2202 | &isert_cmd->tx_desc.iscsi_header); |
2288 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); | 2203 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); |
2289 | isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); | 2204 | isert_init_send_wr(isert_conn, isert_cmd, send_wr); |
2290 | 2205 | ||
2291 | pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); | 2206 | isert_dbg("conn %p Posting Task Management Response\n", isert_conn); |
2292 | 2207 | ||
2293 | return isert_post_response(isert_conn, isert_cmd); | 2208 | return isert_post_response(isert_conn, isert_cmd); |
2294 | } | 2209 | } |
@@ -2318,9 +2233,9 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
2318 | tx_dsg->lkey = isert_conn->conn_mr->lkey; | 2233 | tx_dsg->lkey = isert_conn->conn_mr->lkey; |
2319 | isert_cmd->tx_desc.num_sge = 2; | 2234 | isert_cmd->tx_desc.num_sge = 2; |
2320 | 2235 | ||
2321 | isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); | 2236 | isert_init_send_wr(isert_conn, isert_cmd, send_wr); |
2322 | 2237 | ||
2323 | pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); | 2238 | isert_dbg("conn %p Posting Reject\n", isert_conn); |
2324 | 2239 | ||
2325 | return isert_post_response(isert_conn, isert_cmd); | 2240 | return isert_post_response(isert_conn, isert_cmd); |
2326 | } | 2241 | } |
@@ -2358,9 +2273,9 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
2358 | tx_dsg->lkey = isert_conn->conn_mr->lkey; | 2273 | tx_dsg->lkey = isert_conn->conn_mr->lkey; |
2359 | isert_cmd->tx_desc.num_sge = 2; | 2274 | isert_cmd->tx_desc.num_sge = 2; |
2360 | } | 2275 | } |
2361 | isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); | 2276 | isert_init_send_wr(isert_conn, isert_cmd, send_wr); |
2362 | 2277 | ||
2363 | pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); | 2278 | isert_dbg("conn %p Text Reject\n", isert_conn); |
2364 | 2279 | ||
2365 | return isert_post_response(isert_conn, isert_cmd); | 2280 | return isert_post_response(isert_conn, isert_cmd); |
2366 | } | 2281 | } |
@@ -2383,30 +2298,31 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, | |||
2383 | 2298 | ||
2384 | send_wr->sg_list = ib_sge; | 2299 | send_wr->sg_list = ib_sge; |
2385 | send_wr->num_sge = sg_nents; | 2300 | send_wr->num_sge = sg_nents; |
2386 | send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; | 2301 | send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; |
2387 | /* | 2302 | /* |
2388 | * Perform mapping of TCM scatterlist memory ib_sge dma_addr. | 2303 | * Perform mapping of TCM scatterlist memory ib_sge dma_addr. |
2389 | */ | 2304 | */ |
2390 | for_each_sg(sg_start, tmp_sg, sg_nents, i) { | 2305 | for_each_sg(sg_start, tmp_sg, sg_nents, i) { |
2391 | pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n", | 2306 | isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, " |
2392 | (unsigned long long)tmp_sg->dma_address, | 2307 | "page_off: %u\n", |
2393 | tmp_sg->length, page_off); | 2308 | (unsigned long long)tmp_sg->dma_address, |
2309 | tmp_sg->length, page_off); | ||
2394 | 2310 | ||
2395 | ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off; | 2311 | ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off; |
2396 | ib_sge->length = min_t(u32, data_left, | 2312 | ib_sge->length = min_t(u32, data_left, |
2397 | ib_sg_dma_len(ib_dev, tmp_sg) - page_off); | 2313 | ib_sg_dma_len(ib_dev, tmp_sg) - page_off); |
2398 | ib_sge->lkey = isert_conn->conn_mr->lkey; | 2314 | ib_sge->lkey = isert_conn->conn_mr->lkey; |
2399 | 2315 | ||
2400 | pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", | 2316 | isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n", |
2401 | ib_sge->addr, ib_sge->length, ib_sge->lkey); | 2317 | ib_sge->addr, ib_sge->length, ib_sge->lkey); |
2402 | page_off = 0; | 2318 | page_off = 0; |
2403 | data_left -= ib_sge->length; | 2319 | data_left -= ib_sge->length; |
2404 | ib_sge++; | 2320 | ib_sge++; |
2405 | pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge); | 2321 | isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge); |
2406 | } | 2322 | } |
2407 | 2323 | ||
2408 | pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", | 2324 | isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", |
2409 | send_wr->sg_list, send_wr->num_sge); | 2325 | send_wr->sg_list, send_wr->num_sge); |
2410 | 2326 | ||
2411 | return sg_nents; | 2327 | return sg_nents; |
2412 | } | 2328 | } |
@@ -2438,7 +2354,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2438 | 2354 | ||
2439 | ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL); | 2355 | ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL); |
2440 | if (!ib_sge) { | 2356 | if (!ib_sge) { |
2441 | pr_warn("Unable to allocate ib_sge\n"); | 2357 | isert_warn("Unable to allocate ib_sge\n"); |
2442 | ret = -ENOMEM; | 2358 | ret = -ENOMEM; |
2443 | goto unmap_cmd; | 2359 | goto unmap_cmd; |
2444 | } | 2360 | } |
@@ -2448,7 +2364,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2448 | wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, | 2364 | wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, |
2449 | GFP_KERNEL); | 2365 | GFP_KERNEL); |
2450 | if (!wr->send_wr) { | 2366 | if (!wr->send_wr) { |
2451 | pr_debug("Unable to allocate wr->send_wr\n"); | 2367 | isert_dbg("Unable to allocate wr->send_wr\n"); |
2452 | ret = -ENOMEM; | 2368 | ret = -ENOMEM; |
2453 | goto unmap_cmd; | 2369 | goto unmap_cmd; |
2454 | } | 2370 | } |
@@ -2512,9 +2428,9 @@ isert_map_fr_pagelist(struct ib_device *ib_dev, | |||
2512 | chunk_start = start_addr; | 2428 | chunk_start = start_addr; |
2513 | end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg); | 2429 | end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg); |
2514 | 2430 | ||
2515 | pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n", | 2431 | isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n", |
2516 | i, (unsigned long long)tmp_sg->dma_address, | 2432 | i, (unsigned long long)tmp_sg->dma_address, |
2517 | tmp_sg->length); | 2433 | tmp_sg->length); |
2518 | 2434 | ||
2519 | if ((end_addr & ~PAGE_MASK) && i < last_ent) { | 2435 | if ((end_addr & ~PAGE_MASK) && i < last_ent) { |
2520 | new_chunk = 0; | 2436 | new_chunk = 0; |
@@ -2525,8 +2441,8 @@ isert_map_fr_pagelist(struct ib_device *ib_dev, | |||
2525 | page = chunk_start & PAGE_MASK; | 2441 | page = chunk_start & PAGE_MASK; |
2526 | do { | 2442 | do { |
2527 | fr_pl[n_pages++] = page; | 2443 | fr_pl[n_pages++] = page; |
2528 | pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n", | 2444 | isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n", |
2529 | n_pages - 1, page); | 2445 | n_pages - 1, page); |
2530 | page += PAGE_SIZE; | 2446 | page += PAGE_SIZE; |
2531 | } while (page < end_addr); | 2447 | } while (page < end_addr); |
2532 | } | 2448 | } |
@@ -2534,6 +2450,21 @@ isert_map_fr_pagelist(struct ib_device *ib_dev, | |||
2534 | return n_pages; | 2450 | return n_pages; |
2535 | } | 2451 | } |
2536 | 2452 | ||
2453 | static inline void | ||
2454 | isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr) | ||
2455 | { | ||
2456 | u32 rkey; | ||
2457 | |||
2458 | memset(inv_wr, 0, sizeof(*inv_wr)); | ||
2459 | inv_wr->wr_id = ISER_FASTREG_LI_WRID; | ||
2460 | inv_wr->opcode = IB_WR_LOCAL_INV; | ||
2461 | inv_wr->ex.invalidate_rkey = mr->rkey; | ||
2462 | |||
2463 | /* Bump the key */ | ||
2464 | rkey = ib_inc_rkey(mr->rkey); | ||
2465 | ib_update_fast_reg_key(mr, rkey); | ||
2466 | } | ||
2467 | |||
2537 | static int | 2468 | static int |
2538 | isert_fast_reg_mr(struct isert_conn *isert_conn, | 2469 | isert_fast_reg_mr(struct isert_conn *isert_conn, |
2539 | struct fast_reg_descriptor *fr_desc, | 2470 | struct fast_reg_descriptor *fr_desc, |
@@ -2548,15 +2479,13 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, | |||
2548 | struct ib_send_wr *bad_wr, *wr = NULL; | 2479 | struct ib_send_wr *bad_wr, *wr = NULL; |
2549 | int ret, pagelist_len; | 2480 | int ret, pagelist_len; |
2550 | u32 page_off; | 2481 | u32 page_off; |
2551 | u8 key; | ||
2552 | 2482 | ||
2553 | if (mem->dma_nents == 1) { | 2483 | if (mem->dma_nents == 1) { |
2554 | sge->lkey = isert_conn->conn_mr->lkey; | 2484 | sge->lkey = isert_conn->conn_mr->lkey; |
2555 | sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); | 2485 | sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); |
2556 | sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); | 2486 | sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); |
2557 | pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n", | 2487 | isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n", |
2558 | __func__, __LINE__, sge->addr, sge->length, | 2488 | sge->addr, sge->length, sge->lkey); |
2559 | sge->lkey); | ||
2560 | return 0; | 2489 | return 0; |
2561 | } | 2490 | } |
2562 | 2491 | ||
@@ -2572,21 +2501,15 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, | |||
2572 | 2501 | ||
2573 | page_off = mem->offset % PAGE_SIZE; | 2502 | page_off = mem->offset % PAGE_SIZE; |
2574 | 2503 | ||
2575 | pr_debug("Use fr_desc %p sg_nents %d offset %u\n", | 2504 | isert_dbg("Use fr_desc %p sg_nents %d offset %u\n", |
2576 | fr_desc, mem->nents, mem->offset); | 2505 | fr_desc, mem->nents, mem->offset); |
2577 | 2506 | ||
2578 | pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents, | 2507 | pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents, |
2579 | &frpl->page_list[0]); | 2508 | &frpl->page_list[0]); |
2580 | 2509 | ||
2581 | if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) { | 2510 | if (!(fr_desc->ind & ind)) { |
2582 | memset(&inv_wr, 0, sizeof(inv_wr)); | 2511 | isert_inv_rkey(&inv_wr, mr); |
2583 | inv_wr.wr_id = ISER_FASTREG_LI_WRID; | ||
2584 | inv_wr.opcode = IB_WR_LOCAL_INV; | ||
2585 | inv_wr.ex.invalidate_rkey = mr->rkey; | ||
2586 | wr = &inv_wr; | 2512 | wr = &inv_wr; |
2587 | /* Bump the key */ | ||
2588 | key = (u8)(mr->rkey & 0x000000FF); | ||
2589 | ib_update_fast_reg_key(mr, ++key); | ||
2590 | } | 2513 | } |
2591 | 2514 | ||
2592 | /* Prepare FASTREG WR */ | 2515 | /* Prepare FASTREG WR */ |
@@ -2608,7 +2531,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, | |||
2608 | 2531 | ||
2609 | ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); | 2532 | ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); |
2610 | if (ret) { | 2533 | if (ret) { |
2611 | pr_err("fast registration failed, ret:%d\n", ret); | 2534 | isert_err("fast registration failed, ret:%d\n", ret); |
2612 | return ret; | 2535 | return ret; |
2613 | } | 2536 | } |
2614 | fr_desc->ind &= ~ind; | 2537 | fr_desc->ind &= ~ind; |
@@ -2617,9 +2540,8 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, | |||
2617 | sge->addr = frpl->page_list[0] + page_off; | 2540 | sge->addr = frpl->page_list[0] + page_off; |
2618 | sge->length = mem->len; | 2541 | sge->length = mem->len; |
2619 | 2542 | ||
2620 | pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n", | 2543 | isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n", |
2621 | __func__, __LINE__, sge->addr, sge->length, | 2544 | sge->addr, sge->length, sge->lkey); |
2622 | sge->lkey); | ||
2623 | 2545 | ||
2624 | return ret; | 2546 | return ret; |
2625 | } | 2547 | } |
@@ -2665,7 +2587,7 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) | |||
2665 | isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); | 2587 | isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); |
2666 | break; | 2588 | break; |
2667 | default: | 2589 | default: |
2668 | pr_err("Unsupported PI operation %d\n", se_cmd->prot_op); | 2590 | isert_err("Unsupported PI operation %d\n", se_cmd->prot_op); |
2669 | return -EINVAL; | 2591 | return -EINVAL; |
2670 | } | 2592 | } |
2671 | 2593 | ||
@@ -2681,17 +2603,16 @@ isert_set_prot_checks(u8 prot_checks) | |||
2681 | } | 2603 | } |
2682 | 2604 | ||
2683 | static int | 2605 | static int |
2684 | isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd, | 2606 | isert_reg_sig_mr(struct isert_conn *isert_conn, |
2685 | struct fast_reg_descriptor *fr_desc, | 2607 | struct se_cmd *se_cmd, |
2686 | struct ib_sge *data_sge, struct ib_sge *prot_sge, | 2608 | struct isert_rdma_wr *rdma_wr, |
2687 | struct ib_sge *sig_sge) | 2609 | struct fast_reg_descriptor *fr_desc) |
2688 | { | 2610 | { |
2689 | struct ib_send_wr sig_wr, inv_wr; | 2611 | struct ib_send_wr sig_wr, inv_wr; |
2690 | struct ib_send_wr *bad_wr, *wr = NULL; | 2612 | struct ib_send_wr *bad_wr, *wr = NULL; |
2691 | struct pi_context *pi_ctx = fr_desc->pi_ctx; | 2613 | struct pi_context *pi_ctx = fr_desc->pi_ctx; |
2692 | struct ib_sig_attrs sig_attrs; | 2614 | struct ib_sig_attrs sig_attrs; |
2693 | int ret; | 2615 | int ret; |
2694 | u32 key; | ||
2695 | 2616 | ||
2696 | memset(&sig_attrs, 0, sizeof(sig_attrs)); | 2617 | memset(&sig_attrs, 0, sizeof(sig_attrs)); |
2697 | ret = isert_set_sig_attrs(se_cmd, &sig_attrs); | 2618 | ret = isert_set_sig_attrs(se_cmd, &sig_attrs); |
@@ -2701,26 +2622,20 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd, | |||
2701 | sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks); | 2622 | sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks); |
2702 | 2623 | ||
2703 | if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) { | 2624 | if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) { |
2704 | memset(&inv_wr, 0, sizeof(inv_wr)); | 2625 | isert_inv_rkey(&inv_wr, pi_ctx->sig_mr); |
2705 | inv_wr.opcode = IB_WR_LOCAL_INV; | ||
2706 | inv_wr.wr_id = ISER_FASTREG_LI_WRID; | ||
2707 | inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey; | ||
2708 | wr = &inv_wr; | 2626 | wr = &inv_wr; |
2709 | /* Bump the key */ | ||
2710 | key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF); | ||
2711 | ib_update_fast_reg_key(pi_ctx->sig_mr, ++key); | ||
2712 | } | 2627 | } |
2713 | 2628 | ||
2714 | memset(&sig_wr, 0, sizeof(sig_wr)); | 2629 | memset(&sig_wr, 0, sizeof(sig_wr)); |
2715 | sig_wr.opcode = IB_WR_REG_SIG_MR; | 2630 | sig_wr.opcode = IB_WR_REG_SIG_MR; |
2716 | sig_wr.wr_id = ISER_FASTREG_LI_WRID; | 2631 | sig_wr.wr_id = ISER_FASTREG_LI_WRID; |
2717 | sig_wr.sg_list = data_sge; | 2632 | sig_wr.sg_list = &rdma_wr->ib_sg[DATA]; |
2718 | sig_wr.num_sge = 1; | 2633 | sig_wr.num_sge = 1; |
2719 | sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE; | 2634 | sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE; |
2720 | sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; | 2635 | sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; |
2721 | sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; | 2636 | sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; |
2722 | if (se_cmd->t_prot_sg) | 2637 | if (se_cmd->t_prot_sg) |
2723 | sig_wr.wr.sig_handover.prot = prot_sge; | 2638 | sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT]; |
2724 | 2639 | ||
2725 | if (!wr) | 2640 | if (!wr) |
2726 | wr = &sig_wr; | 2641 | wr = &sig_wr; |
@@ -2729,39 +2644,98 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd, | |||
2729 | 2644 | ||
2730 | ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); | 2645 | ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); |
2731 | if (ret) { | 2646 | if (ret) { |
2732 | pr_err("fast registration failed, ret:%d\n", ret); | 2647 | isert_err("fast registration failed, ret:%d\n", ret); |
2733 | goto err; | 2648 | goto err; |
2734 | } | 2649 | } |
2735 | fr_desc->ind &= ~ISERT_SIG_KEY_VALID; | 2650 | fr_desc->ind &= ~ISERT_SIG_KEY_VALID; |
2736 | 2651 | ||
2737 | sig_sge->lkey = pi_ctx->sig_mr->lkey; | 2652 | rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey; |
2738 | sig_sge->addr = 0; | 2653 | rdma_wr->ib_sg[SIG].addr = 0; |
2739 | sig_sge->length = se_cmd->data_length; | 2654 | rdma_wr->ib_sg[SIG].length = se_cmd->data_length; |
2740 | if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP && | 2655 | if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP && |
2741 | se_cmd->prot_op != TARGET_PROT_DOUT_INSERT) | 2656 | se_cmd->prot_op != TARGET_PROT_DOUT_INSERT) |
2742 | /* | 2657 | /* |
2743 | * We have protection guards on the wire | 2658 | * We have protection guards on the wire |
2744 | * so we need to set a larget transfer | 2659 | * so we need to set a larget transfer |
2745 | */ | 2660 | */ |
2746 | sig_sge->length += se_cmd->prot_length; | 2661 | rdma_wr->ib_sg[SIG].length += se_cmd->prot_length; |
2747 | 2662 | ||
2748 | pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n", | 2663 | isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n", |
2749 | sig_sge->addr, sig_sge->length, | 2664 | rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length, |
2750 | sig_sge->lkey); | 2665 | rdma_wr->ib_sg[SIG].lkey); |
2751 | err: | 2666 | err: |
2752 | return ret; | 2667 | return ret; |
2753 | } | 2668 | } |
2754 | 2669 | ||
2755 | static int | 2670 | static int |
2671 | isert_handle_prot_cmd(struct isert_conn *isert_conn, | ||
2672 | struct isert_cmd *isert_cmd, | ||
2673 | struct isert_rdma_wr *wr) | ||
2674 | { | ||
2675 | struct isert_device *device = isert_conn->conn_device; | ||
2676 | struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd; | ||
2677 | int ret; | ||
2678 | |||
2679 | if (!wr->fr_desc->pi_ctx) { | ||
2680 | ret = isert_create_pi_ctx(wr->fr_desc, | ||
2681 | device->ib_device, | ||
2682 | isert_conn->conn_pd); | ||
2683 | if (ret) { | ||
2684 | isert_err("conn %p failed to allocate pi_ctx\n", | ||
2685 | isert_conn); | ||
2686 | return ret; | ||
2687 | } | ||
2688 | } | ||
2689 | |||
2690 | if (se_cmd->t_prot_sg) { | ||
2691 | ret = isert_map_data_buf(isert_conn, isert_cmd, | ||
2692 | se_cmd->t_prot_sg, | ||
2693 | se_cmd->t_prot_nents, | ||
2694 | se_cmd->prot_length, | ||
2695 | 0, wr->iser_ib_op, &wr->prot); | ||
2696 | if (ret) { | ||
2697 | isert_err("conn %p failed to map protection buffer\n", | ||
2698 | isert_conn); | ||
2699 | return ret; | ||
2700 | } | ||
2701 | |||
2702 | memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT])); | ||
2703 | ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot, | ||
2704 | ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]); | ||
2705 | if (ret) { | ||
2706 | isert_err("conn %p failed to fast reg mr\n", | ||
2707 | isert_conn); | ||
2708 | goto unmap_prot_cmd; | ||
2709 | } | ||
2710 | } | ||
2711 | |||
2712 | ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc); | ||
2713 | if (ret) { | ||
2714 | isert_err("conn %p failed to fast reg mr\n", | ||
2715 | isert_conn); | ||
2716 | goto unmap_prot_cmd; | ||
2717 | } | ||
2718 | wr->fr_desc->ind |= ISERT_PROTECTED; | ||
2719 | |||
2720 | return 0; | ||
2721 | |||
2722 | unmap_prot_cmd: | ||
2723 | if (se_cmd->t_prot_sg) | ||
2724 | isert_unmap_data_buf(isert_conn, &wr->prot); | ||
2725 | |||
2726 | return ret; | ||
2727 | } | ||
2728 | |||
2729 | static int | ||
2756 | isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | 2730 | isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, |
2757 | struct isert_rdma_wr *wr) | 2731 | struct isert_rdma_wr *wr) |
2758 | { | 2732 | { |
2759 | struct se_cmd *se_cmd = &cmd->se_cmd; | 2733 | struct se_cmd *se_cmd = &cmd->se_cmd; |
2760 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); | 2734 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); |
2761 | struct isert_conn *isert_conn = conn->context; | 2735 | struct isert_conn *isert_conn = conn->context; |
2762 | struct ib_sge data_sge; | ||
2763 | struct ib_send_wr *send_wr; | ||
2764 | struct fast_reg_descriptor *fr_desc = NULL; | 2736 | struct fast_reg_descriptor *fr_desc = NULL; |
2737 | struct ib_send_wr *send_wr; | ||
2738 | struct ib_sge *ib_sg; | ||
2765 | u32 offset; | 2739 | u32 offset; |
2766 | int ret = 0; | 2740 | int ret = 0; |
2767 | unsigned long flags; | 2741 | unsigned long flags; |
@@ -2775,8 +2749,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2775 | if (ret) | 2749 | if (ret) |
2776 | return ret; | 2750 | return ret; |
2777 | 2751 | ||
2778 | if (wr->data.dma_nents != 1 || | 2752 | if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) { |
2779 | se_cmd->prot_op != TARGET_PROT_NORMAL) { | ||
2780 | spin_lock_irqsave(&isert_conn->conn_lock, flags); | 2753 | spin_lock_irqsave(&isert_conn->conn_lock, flags); |
2781 | fr_desc = list_first_entry(&isert_conn->conn_fr_pool, | 2754 | fr_desc = list_first_entry(&isert_conn->conn_fr_pool, |
2782 | struct fast_reg_descriptor, list); | 2755 | struct fast_reg_descriptor, list); |
@@ -2786,38 +2759,21 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2786 | } | 2759 | } |
2787 | 2760 | ||
2788 | ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data, | 2761 | ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data, |
2789 | ISERT_DATA_KEY_VALID, &data_sge); | 2762 | ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]); |
2790 | if (ret) | 2763 | if (ret) |
2791 | goto unmap_cmd; | 2764 | goto unmap_cmd; |
2792 | 2765 | ||
2793 | if (se_cmd->prot_op != TARGET_PROT_NORMAL) { | 2766 | if (isert_prot_cmd(isert_conn, se_cmd)) { |
2794 | struct ib_sge prot_sge, sig_sge; | 2767 | ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr); |
2795 | |||
2796 | if (se_cmd->t_prot_sg) { | ||
2797 | ret = isert_map_data_buf(isert_conn, isert_cmd, | ||
2798 | se_cmd->t_prot_sg, | ||
2799 | se_cmd->t_prot_nents, | ||
2800 | se_cmd->prot_length, | ||
2801 | 0, wr->iser_ib_op, &wr->prot); | ||
2802 | if (ret) | ||
2803 | goto unmap_cmd; | ||
2804 | |||
2805 | ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot, | ||
2806 | ISERT_PROT_KEY_VALID, &prot_sge); | ||
2807 | if (ret) | ||
2808 | goto unmap_prot_cmd; | ||
2809 | } | ||
2810 | |||
2811 | ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc, | ||
2812 | &data_sge, &prot_sge, &sig_sge); | ||
2813 | if (ret) | 2768 | if (ret) |
2814 | goto unmap_prot_cmd; | 2769 | goto unmap_cmd; |
2815 | 2770 | ||
2816 | fr_desc->ind |= ISERT_PROTECTED; | 2771 | ib_sg = &wr->ib_sg[SIG]; |
2817 | memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge)); | 2772 | } else { |
2818 | } else | 2773 | ib_sg = &wr->ib_sg[DATA]; |
2819 | memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge)); | 2774 | } |
2820 | 2775 | ||
2776 | memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg)); | ||
2821 | wr->ib_sge = &wr->s_ib_sge; | 2777 | wr->ib_sge = &wr->s_ib_sge; |
2822 | wr->send_wr_num = 1; | 2778 | wr->send_wr_num = 1; |
2823 | memset(&wr->s_send_wr, 0, sizeof(*send_wr)); | 2779 | memset(&wr->s_send_wr, 0, sizeof(*send_wr)); |
@@ -2827,12 +2783,12 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2827 | send_wr = &isert_cmd->rdma_wr.s_send_wr; | 2783 | send_wr = &isert_cmd->rdma_wr.s_send_wr; |
2828 | send_wr->sg_list = &wr->s_ib_sge; | 2784 | send_wr->sg_list = &wr->s_ib_sge; |
2829 | send_wr->num_sge = 1; | 2785 | send_wr->num_sge = 1; |
2830 | send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; | 2786 | send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; |
2831 | if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { | 2787 | if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { |
2832 | send_wr->opcode = IB_WR_RDMA_WRITE; | 2788 | send_wr->opcode = IB_WR_RDMA_WRITE; |
2833 | send_wr->wr.rdma.remote_addr = isert_cmd->read_va; | 2789 | send_wr->wr.rdma.remote_addr = isert_cmd->read_va; |
2834 | send_wr->wr.rdma.rkey = isert_cmd->read_stag; | 2790 | send_wr->wr.rdma.rkey = isert_cmd->read_stag; |
2835 | send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ? | 2791 | send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ? |
2836 | 0 : IB_SEND_SIGNALED; | 2792 | 0 : IB_SEND_SIGNALED; |
2837 | } else { | 2793 | } else { |
2838 | send_wr->opcode = IB_WR_RDMA_READ; | 2794 | send_wr->opcode = IB_WR_RDMA_READ; |
@@ -2842,9 +2798,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2842 | } | 2798 | } |
2843 | 2799 | ||
2844 | return 0; | 2800 | return 0; |
2845 | unmap_prot_cmd: | 2801 | |
2846 | if (se_cmd->t_prot_sg) | ||
2847 | isert_unmap_data_buf(isert_conn, &wr->prot); | ||
2848 | unmap_cmd: | 2802 | unmap_cmd: |
2849 | if (fr_desc) { | 2803 | if (fr_desc) { |
2850 | spin_lock_irqsave(&isert_conn->conn_lock, flags); | 2804 | spin_lock_irqsave(&isert_conn->conn_lock, flags); |
@@ -2867,16 +2821,17 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | |||
2867 | struct ib_send_wr *wr_failed; | 2821 | struct ib_send_wr *wr_failed; |
2868 | int rc; | 2822 | int rc; |
2869 | 2823 | ||
2870 | pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n", | 2824 | isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n", |
2871 | isert_cmd, se_cmd->data_length); | 2825 | isert_cmd, se_cmd->data_length); |
2826 | |||
2872 | wr->iser_ib_op = ISER_IB_RDMA_WRITE; | 2827 | wr->iser_ib_op = ISER_IB_RDMA_WRITE; |
2873 | rc = device->reg_rdma_mem(conn, cmd, wr); | 2828 | rc = device->reg_rdma_mem(conn, cmd, wr); |
2874 | if (rc) { | 2829 | if (rc) { |
2875 | pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); | 2830 | isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); |
2876 | return rc; | 2831 | return rc; |
2877 | } | 2832 | } |
2878 | 2833 | ||
2879 | if (se_cmd->prot_op == TARGET_PROT_NORMAL) { | 2834 | if (!isert_prot_cmd(isert_conn, se_cmd)) { |
2880 | /* | 2835 | /* |
2881 | * Build isert_conn->tx_desc for iSCSI response PDU and attach | 2836 | * Build isert_conn->tx_desc for iSCSI response PDU and attach |
2882 | */ | 2837 | */ |
@@ -2886,24 +2841,20 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | |||
2886 | &isert_cmd->tx_desc.iscsi_header); | 2841 | &isert_cmd->tx_desc.iscsi_header); |
2887 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); | 2842 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); |
2888 | isert_init_send_wr(isert_conn, isert_cmd, | 2843 | isert_init_send_wr(isert_conn, isert_cmd, |
2889 | &isert_cmd->tx_desc.send_wr, false); | 2844 | &isert_cmd->tx_desc.send_wr); |
2890 | isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; | 2845 | isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; |
2891 | wr->send_wr_num += 1; | 2846 | wr->send_wr_num += 1; |
2892 | } | 2847 | } |
2893 | 2848 | ||
2894 | atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count); | ||
2895 | |||
2896 | rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); | 2849 | rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); |
2897 | if (rc) { | 2850 | if (rc) |
2898 | pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); | 2851 | isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); |
2899 | atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); | ||
2900 | } | ||
2901 | 2852 | ||
2902 | if (se_cmd->prot_op == TARGET_PROT_NORMAL) | 2853 | if (!isert_prot_cmd(isert_conn, se_cmd)) |
2903 | pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data " | 2854 | isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data " |
2904 | "READ\n", isert_cmd); | 2855 | "READ\n", isert_cmd); |
2905 | else | 2856 | else |
2906 | pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", | 2857 | isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", |
2907 | isert_cmd); | 2858 | isert_cmd); |
2908 | 2859 | ||
2909 | return 1; | 2860 | return 1; |
@@ -2920,23 +2871,20 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) | |||
2920 | struct ib_send_wr *wr_failed; | 2871 | struct ib_send_wr *wr_failed; |
2921 | int rc; | 2872 | int rc; |
2922 | 2873 | ||
2923 | pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", | 2874 | isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", |
2924 | isert_cmd, se_cmd->data_length, cmd->write_data_done); | 2875 | isert_cmd, se_cmd->data_length, cmd->write_data_done); |
2925 | wr->iser_ib_op = ISER_IB_RDMA_READ; | 2876 | wr->iser_ib_op = ISER_IB_RDMA_READ; |
2926 | rc = device->reg_rdma_mem(conn, cmd, wr); | 2877 | rc = device->reg_rdma_mem(conn, cmd, wr); |
2927 | if (rc) { | 2878 | if (rc) { |
2928 | pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); | 2879 | isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); |
2929 | return rc; | 2880 | return rc; |
2930 | } | 2881 | } |
2931 | 2882 | ||
2932 | atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count); | ||
2933 | |||
2934 | rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); | 2883 | rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); |
2935 | if (rc) { | 2884 | if (rc) |
2936 | pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); | 2885 | isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); |
2937 | atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); | 2886 | |
2938 | } | 2887 | isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", |
2939 | pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", | ||
2940 | isert_cmd); | 2888 | isert_cmd); |
2941 | 2889 | ||
2942 | return 0; | 2890 | return 0; |
@@ -2952,7 +2900,7 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) | |||
2952 | ret = isert_put_nopin(cmd, conn, false); | 2900 | ret = isert_put_nopin(cmd, conn, false); |
2953 | break; | 2901 | break; |
2954 | default: | 2902 | default: |
2955 | pr_err("Unknown immediate state: 0x%02x\n", state); | 2903 | isert_err("Unknown immediate state: 0x%02x\n", state); |
2956 | ret = -EINVAL; | 2904 | ret = -EINVAL; |
2957 | break; | 2905 | break; |
2958 | } | 2906 | } |
@@ -2963,15 +2911,14 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) | |||
2963 | static int | 2911 | static int |
2964 | isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) | 2912 | isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) |
2965 | { | 2913 | { |
2914 | struct isert_conn *isert_conn = conn->context; | ||
2966 | int ret; | 2915 | int ret; |
2967 | 2916 | ||
2968 | switch (state) { | 2917 | switch (state) { |
2969 | case ISTATE_SEND_LOGOUTRSP: | 2918 | case ISTATE_SEND_LOGOUTRSP: |
2970 | ret = isert_put_logout_rsp(cmd, conn); | 2919 | ret = isert_put_logout_rsp(cmd, conn); |
2971 | if (!ret) { | 2920 | if (!ret) |
2972 | pr_debug("Returning iSER Logout -EAGAIN\n"); | 2921 | isert_conn->logout_posted = true; |
2973 | ret = -EAGAIN; | ||
2974 | } | ||
2975 | break; | 2922 | break; |
2976 | case ISTATE_SEND_NOPIN: | 2923 | case ISTATE_SEND_NOPIN: |
2977 | ret = isert_put_nopin(cmd, conn, true); | 2924 | ret = isert_put_nopin(cmd, conn, true); |
@@ -2993,7 +2940,7 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) | |||
2993 | ret = isert_put_response(conn, cmd); | 2940 | ret = isert_put_response(conn, cmd); |
2994 | break; | 2941 | break; |
2995 | default: | 2942 | default: |
2996 | pr_err("Unknown response state: 0x%02x\n", state); | 2943 | isert_err("Unknown response state: 0x%02x\n", state); |
2997 | ret = -EINVAL; | 2944 | ret = -EINVAL; |
2998 | break; | 2945 | break; |
2999 | } | 2946 | } |
@@ -3001,27 +2948,64 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) | |||
3001 | return ret; | 2948 | return ret; |
3002 | } | 2949 | } |
3003 | 2950 | ||
2951 | struct rdma_cm_id * | ||
2952 | isert_setup_id(struct isert_np *isert_np) | ||
2953 | { | ||
2954 | struct iscsi_np *np = isert_np->np; | ||
2955 | struct rdma_cm_id *id; | ||
2956 | struct sockaddr *sa; | ||
2957 | int ret; | ||
2958 | |||
2959 | sa = (struct sockaddr *)&np->np_sockaddr; | ||
2960 | isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); | ||
2961 | |||
2962 | id = rdma_create_id(isert_cma_handler, isert_np, | ||
2963 | RDMA_PS_TCP, IB_QPT_RC); | ||
2964 | if (IS_ERR(id)) { | ||
2965 | isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id)); | ||
2966 | ret = PTR_ERR(id); | ||
2967 | goto out; | ||
2968 | } | ||
2969 | isert_dbg("id %p context %p\n", id, id->context); | ||
2970 | |||
2971 | ret = rdma_bind_addr(id, sa); | ||
2972 | if (ret) { | ||
2973 | isert_err("rdma_bind_addr() failed: %d\n", ret); | ||
2974 | goto out_id; | ||
2975 | } | ||
2976 | |||
2977 | ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG); | ||
2978 | if (ret) { | ||
2979 | isert_err("rdma_listen() failed: %d\n", ret); | ||
2980 | goto out_id; | ||
2981 | } | ||
2982 | |||
2983 | return id; | ||
2984 | out_id: | ||
2985 | rdma_destroy_id(id); | ||
2986 | out: | ||
2987 | return ERR_PTR(ret); | ||
2988 | } | ||
2989 | |||
3004 | static int | 2990 | static int |
3005 | isert_setup_np(struct iscsi_np *np, | 2991 | isert_setup_np(struct iscsi_np *np, |
3006 | struct __kernel_sockaddr_storage *ksockaddr) | 2992 | struct __kernel_sockaddr_storage *ksockaddr) |
3007 | { | 2993 | { |
3008 | struct isert_np *isert_np; | 2994 | struct isert_np *isert_np; |
3009 | struct rdma_cm_id *isert_lid; | 2995 | struct rdma_cm_id *isert_lid; |
3010 | struct sockaddr *sa; | ||
3011 | int ret; | 2996 | int ret; |
3012 | 2997 | ||
3013 | isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); | 2998 | isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); |
3014 | if (!isert_np) { | 2999 | if (!isert_np) { |
3015 | pr_err("Unable to allocate struct isert_np\n"); | 3000 | isert_err("Unable to allocate struct isert_np\n"); |
3016 | return -ENOMEM; | 3001 | return -ENOMEM; |
3017 | } | 3002 | } |
3018 | sema_init(&isert_np->np_sem, 0); | 3003 | sema_init(&isert_np->np_sem, 0); |
3019 | mutex_init(&isert_np->np_accept_mutex); | 3004 | mutex_init(&isert_np->np_accept_mutex); |
3020 | INIT_LIST_HEAD(&isert_np->np_accept_list); | 3005 | INIT_LIST_HEAD(&isert_np->np_accept_list); |
3021 | init_completion(&isert_np->np_login_comp); | 3006 | init_completion(&isert_np->np_login_comp); |
3007 | isert_np->np = np; | ||
3022 | 3008 | ||
3023 | sa = (struct sockaddr *)ksockaddr; | ||
3024 | pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa); | ||
3025 | /* | 3009 | /* |
3026 | * Setup the np->np_sockaddr from the passed sockaddr setup | 3010 | * Setup the np->np_sockaddr from the passed sockaddr setup |
3027 | * in iscsi_target_configfs.c code.. | 3011 | * in iscsi_target_configfs.c code.. |
@@ -3029,37 +3013,20 @@ isert_setup_np(struct iscsi_np *np, | |||
3029 | memcpy(&np->np_sockaddr, ksockaddr, | 3013 | memcpy(&np->np_sockaddr, ksockaddr, |
3030 | sizeof(struct __kernel_sockaddr_storage)); | 3014 | sizeof(struct __kernel_sockaddr_storage)); |
3031 | 3015 | ||
3032 | isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP, | 3016 | isert_lid = isert_setup_id(isert_np); |
3033 | IB_QPT_RC); | ||
3034 | if (IS_ERR(isert_lid)) { | 3017 | if (IS_ERR(isert_lid)) { |
3035 | pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n", | ||
3036 | PTR_ERR(isert_lid)); | ||
3037 | ret = PTR_ERR(isert_lid); | 3018 | ret = PTR_ERR(isert_lid); |
3038 | goto out; | 3019 | goto out; |
3039 | } | 3020 | } |
3040 | 3021 | ||
3041 | ret = rdma_bind_addr(isert_lid, sa); | ||
3042 | if (ret) { | ||
3043 | pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret); | ||
3044 | goto out_lid; | ||
3045 | } | ||
3046 | |||
3047 | ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG); | ||
3048 | if (ret) { | ||
3049 | pr_err("rdma_listen() for isert_lid failed: %d\n", ret); | ||
3050 | goto out_lid; | ||
3051 | } | ||
3052 | |||
3053 | isert_np->np_cm_id = isert_lid; | 3022 | isert_np->np_cm_id = isert_lid; |
3054 | np->np_context = isert_np; | 3023 | np->np_context = isert_np; |
3055 | pr_debug("Setup isert_lid->context: %p\n", isert_lid->context); | ||
3056 | 3024 | ||
3057 | return 0; | 3025 | return 0; |
3058 | 3026 | ||
3059 | out_lid: | ||
3060 | rdma_destroy_id(isert_lid); | ||
3061 | out: | 3027 | out: |
3062 | kfree(isert_np); | 3028 | kfree(isert_np); |
3029 | |||
3063 | return ret; | 3030 | return ret; |
3064 | } | 3031 | } |
3065 | 3032 | ||
@@ -3075,16 +3042,12 @@ isert_rdma_accept(struct isert_conn *isert_conn) | |||
3075 | cp.retry_count = 7; | 3042 | cp.retry_count = 7; |
3076 | cp.rnr_retry_count = 7; | 3043 | cp.rnr_retry_count = 7; |
3077 | 3044 | ||
3078 | pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n"); | ||
3079 | |||
3080 | ret = rdma_accept(cm_id, &cp); | 3045 | ret = rdma_accept(cm_id, &cp); |
3081 | if (ret) { | 3046 | if (ret) { |
3082 | pr_err("rdma_accept() failed with: %d\n", ret); | 3047 | isert_err("rdma_accept() failed with: %d\n", ret); |
3083 | return ret; | 3048 | return ret; |
3084 | } | 3049 | } |
3085 | 3050 | ||
3086 | pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n"); | ||
3087 | |||
3088 | return 0; | 3051 | return 0; |
3089 | } | 3052 | } |
3090 | 3053 | ||
@@ -3094,7 +3057,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) | |||
3094 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 3057 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; |
3095 | int ret; | 3058 | int ret; |
3096 | 3059 | ||
3097 | pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn); | 3060 | isert_info("before login_req comp conn: %p\n", isert_conn); |
3061 | ret = wait_for_completion_interruptible(&isert_conn->login_req_comp); | ||
3062 | if (ret) { | ||
3063 | isert_err("isert_conn %p interrupted before got login req\n", | ||
3064 | isert_conn); | ||
3065 | return ret; | ||
3066 | } | ||
3067 | reinit_completion(&isert_conn->login_req_comp); | ||
3068 | |||
3098 | /* | 3069 | /* |
3099 | * For login requests after the first PDU, isert_rx_login_req() will | 3070 | * For login requests after the first PDU, isert_rx_login_req() will |
3100 | * kick schedule_delayed_work(&conn->login_work) as the packet is | 3071 | * kick schedule_delayed_work(&conn->login_work) as the packet is |
@@ -3104,11 +3075,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) | |||
3104 | if (!login->first_request) | 3075 | if (!login->first_request) |
3105 | return 0; | 3076 | return 0; |
3106 | 3077 | ||
3078 | isert_rx_login_req(isert_conn); | ||
3079 | |||
3080 | isert_info("before conn_login_comp conn: %p\n", conn); | ||
3107 | ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp); | 3081 | ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp); |
3108 | if (ret) | 3082 | if (ret) |
3109 | return ret; | 3083 | return ret; |
3110 | 3084 | ||
3111 | pr_debug("isert_get_login_rx processing login->req: %p\n", login->req); | 3085 | isert_info("processing login->req: %p\n", login->req); |
3086 | |||
3112 | return 0; | 3087 | return 0; |
3113 | } | 3088 | } |
3114 | 3089 | ||
@@ -3161,7 +3136,7 @@ accept_wait: | |||
3161 | spin_lock_bh(&np->np_thread_lock); | 3136 | spin_lock_bh(&np->np_thread_lock); |
3162 | if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { | 3137 | if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { |
3163 | spin_unlock_bh(&np->np_thread_lock); | 3138 | spin_unlock_bh(&np->np_thread_lock); |
3164 | pr_debug("np_thread_state %d for isert_accept_np\n", | 3139 | isert_dbg("np_thread_state %d for isert_accept_np\n", |
3165 | np->np_thread_state); | 3140 | np->np_thread_state); |
3166 | /** | 3141 | /** |
3167 | * No point in stalling here when np_thread | 3142 | * No point in stalling here when np_thread |
@@ -3186,17 +3161,10 @@ accept_wait: | |||
3186 | isert_conn->conn = conn; | 3161 | isert_conn->conn = conn; |
3187 | max_accept = 0; | 3162 | max_accept = 0; |
3188 | 3163 | ||
3189 | ret = isert_rdma_post_recvl(isert_conn); | ||
3190 | if (ret) | ||
3191 | return ret; | ||
3192 | |||
3193 | ret = isert_rdma_accept(isert_conn); | ||
3194 | if (ret) | ||
3195 | return ret; | ||
3196 | |||
3197 | isert_set_conn_info(np, conn, isert_conn); | 3164 | isert_set_conn_info(np, conn, isert_conn); |
3198 | 3165 | ||
3199 | pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn); | 3166 | isert_dbg("Processing isert_conn: %p\n", isert_conn); |
3167 | |||
3200 | return 0; | 3168 | return 0; |
3201 | } | 3169 | } |
3202 | 3170 | ||
@@ -3204,25 +3172,103 @@ static void | |||
3204 | isert_free_np(struct iscsi_np *np) | 3172 | isert_free_np(struct iscsi_np *np) |
3205 | { | 3173 | { |
3206 | struct isert_np *isert_np = (struct isert_np *)np->np_context; | 3174 | struct isert_np *isert_np = (struct isert_np *)np->np_context; |
3175 | struct isert_conn *isert_conn, *n; | ||
3207 | 3176 | ||
3208 | if (isert_np->np_cm_id) | 3177 | if (isert_np->np_cm_id) |
3209 | rdma_destroy_id(isert_np->np_cm_id); | 3178 | rdma_destroy_id(isert_np->np_cm_id); |
3210 | 3179 | ||
3180 | /* | ||
3181 | * FIXME: At this point we don't have a good way to insure | ||
3182 | * that at this point we don't have hanging connections that | ||
3183 | * completed RDMA establishment but didn't start iscsi login | ||
3184 | * process. So work-around this by cleaning up what ever piled | ||
3185 | * up in np_accept_list. | ||
3186 | */ | ||
3187 | mutex_lock(&isert_np->np_accept_mutex); | ||
3188 | if (!list_empty(&isert_np->np_accept_list)) { | ||
3189 | isert_info("Still have isert connections, cleaning up...\n"); | ||
3190 | list_for_each_entry_safe(isert_conn, n, | ||
3191 | &isert_np->np_accept_list, | ||
3192 | conn_accept_node) { | ||
3193 | isert_info("cleaning isert_conn %p state (%d)\n", | ||
3194 | isert_conn, isert_conn->state); | ||
3195 | isert_connect_release(isert_conn); | ||
3196 | } | ||
3197 | } | ||
3198 | mutex_unlock(&isert_np->np_accept_mutex); | ||
3199 | |||
3211 | np->np_context = NULL; | 3200 | np->np_context = NULL; |
3212 | kfree(isert_np); | 3201 | kfree(isert_np); |
3213 | } | 3202 | } |
3214 | 3203 | ||
3204 | static void isert_release_work(struct work_struct *work) | ||
3205 | { | ||
3206 | struct isert_conn *isert_conn = container_of(work, | ||
3207 | struct isert_conn, | ||
3208 | release_work); | ||
3209 | |||
3210 | isert_info("Starting release conn %p\n", isert_conn); | ||
3211 | |||
3212 | wait_for_completion(&isert_conn->conn_wait); | ||
3213 | |||
3214 | mutex_lock(&isert_conn->conn_mutex); | ||
3215 | isert_conn->state = ISER_CONN_DOWN; | ||
3216 | mutex_unlock(&isert_conn->conn_mutex); | ||
3217 | |||
3218 | isert_info("Destroying conn %p\n", isert_conn); | ||
3219 | isert_put_conn(isert_conn); | ||
3220 | } | ||
3221 | |||
3222 | static void | ||
3223 | isert_wait4logout(struct isert_conn *isert_conn) | ||
3224 | { | ||
3225 | struct iscsi_conn *conn = isert_conn->conn; | ||
3226 | |||
3227 | isert_info("conn %p\n", isert_conn); | ||
3228 | |||
3229 | if (isert_conn->logout_posted) { | ||
3230 | isert_info("conn %p wait for conn_logout_comp\n", isert_conn); | ||
3231 | wait_for_completion_timeout(&conn->conn_logout_comp, | ||
3232 | SECONDS_FOR_LOGOUT_COMP * HZ); | ||
3233 | } | ||
3234 | } | ||
3235 | |||
3236 | static void | ||
3237 | isert_wait4cmds(struct iscsi_conn *conn) | ||
3238 | { | ||
3239 | isert_info("iscsi_conn %p\n", conn); | ||
3240 | |||
3241 | if (conn->sess) { | ||
3242 | target_sess_cmd_list_set_waiting(conn->sess->se_sess); | ||
3243 | target_wait_for_sess_cmds(conn->sess->se_sess); | ||
3244 | } | ||
3245 | } | ||
3246 | |||
3247 | static void | ||
3248 | isert_wait4flush(struct isert_conn *isert_conn) | ||
3249 | { | ||
3250 | struct ib_recv_wr *bad_wr; | ||
3251 | |||
3252 | isert_info("conn %p\n", isert_conn); | ||
3253 | |||
3254 | init_completion(&isert_conn->conn_wait_comp_err); | ||
3255 | isert_conn->beacon.wr_id = ISER_BEACON_WRID; | ||
3256 | /* post an indication that all flush errors were consumed */ | ||
3257 | if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) { | ||
3258 | isert_err("conn %p failed to post beacon", isert_conn); | ||
3259 | return; | ||
3260 | } | ||
3261 | |||
3262 | wait_for_completion(&isert_conn->conn_wait_comp_err); | ||
3263 | } | ||
3264 | |||
3215 | static void isert_wait_conn(struct iscsi_conn *conn) | 3265 | static void isert_wait_conn(struct iscsi_conn *conn) |
3216 | { | 3266 | { |
3217 | struct isert_conn *isert_conn = conn->context; | 3267 | struct isert_conn *isert_conn = conn->context; |
3218 | 3268 | ||
3219 | pr_debug("isert_wait_conn: Starting \n"); | 3269 | isert_info("Starting conn %p\n", isert_conn); |
3220 | 3270 | ||
3221 | mutex_lock(&isert_conn->conn_mutex); | 3271 | mutex_lock(&isert_conn->conn_mutex); |
3222 | if (isert_conn->conn_cm_id && !isert_conn->disconnect) { | ||
3223 | pr_debug("Calling rdma_disconnect from isert_wait_conn\n"); | ||
3224 | rdma_disconnect(isert_conn->conn_cm_id); | ||
3225 | } | ||
3226 | /* | 3272 | /* |
3227 | * Only wait for conn_wait_comp_err if the isert_conn made it | 3273 | * Only wait for conn_wait_comp_err if the isert_conn made it |
3228 | * into full feature phase.. | 3274 | * into full feature phase.. |
@@ -3231,14 +3277,15 @@ static void isert_wait_conn(struct iscsi_conn *conn) | |||
3231 | mutex_unlock(&isert_conn->conn_mutex); | 3277 | mutex_unlock(&isert_conn->conn_mutex); |
3232 | return; | 3278 | return; |
3233 | } | 3279 | } |
3234 | if (isert_conn->state == ISER_CONN_UP) | 3280 | isert_conn_terminate(isert_conn); |
3235 | isert_conn->state = ISER_CONN_TERMINATING; | ||
3236 | mutex_unlock(&isert_conn->conn_mutex); | 3281 | mutex_unlock(&isert_conn->conn_mutex); |
3237 | 3282 | ||
3238 | wait_for_completion(&isert_conn->conn_wait_comp_err); | 3283 | isert_wait4cmds(conn); |
3284 | isert_wait4flush(isert_conn); | ||
3285 | isert_wait4logout(isert_conn); | ||
3239 | 3286 | ||
3240 | wait_for_completion(&isert_conn->conn_wait); | 3287 | INIT_WORK(&isert_conn->release_work, isert_release_work); |
3241 | isert_put_conn(isert_conn); | 3288 | queue_work(isert_release_wq, &isert_conn->release_work); |
3242 | } | 3289 | } |
3243 | 3290 | ||
3244 | static void isert_free_conn(struct iscsi_conn *conn) | 3291 | static void isert_free_conn(struct iscsi_conn *conn) |
@@ -3273,35 +3320,39 @@ static int __init isert_init(void) | |||
3273 | { | 3320 | { |
3274 | int ret; | 3321 | int ret; |
3275 | 3322 | ||
3276 | isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0); | 3323 | isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); |
3277 | if (!isert_rx_wq) { | 3324 | if (!isert_comp_wq) { |
3278 | pr_err("Unable to allocate isert_rx_wq\n"); | 3325 | isert_err("Unable to allocate isert_comp_wq\n"); |
3326 | ret = -ENOMEM; | ||
3279 | return -ENOMEM; | 3327 | return -ENOMEM; |
3280 | } | 3328 | } |
3281 | 3329 | ||
3282 | isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); | 3330 | isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, |
3283 | if (!isert_comp_wq) { | 3331 | WQ_UNBOUND_MAX_ACTIVE); |
3284 | pr_err("Unable to allocate isert_comp_wq\n"); | 3332 | if (!isert_release_wq) { |
3333 | isert_err("Unable to allocate isert_release_wq\n"); | ||
3285 | ret = -ENOMEM; | 3334 | ret = -ENOMEM; |
3286 | goto destroy_rx_wq; | 3335 | goto destroy_comp_wq; |
3287 | } | 3336 | } |
3288 | 3337 | ||
3289 | iscsit_register_transport(&iser_target_transport); | 3338 | iscsit_register_transport(&iser_target_transport); |
3290 | pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n"); | 3339 | isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); |
3340 | |||
3291 | return 0; | 3341 | return 0; |
3292 | 3342 | ||
3293 | destroy_rx_wq: | 3343 | destroy_comp_wq: |
3294 | destroy_workqueue(isert_rx_wq); | 3344 | destroy_workqueue(isert_comp_wq); |
3345 | |||
3295 | return ret; | 3346 | return ret; |
3296 | } | 3347 | } |
3297 | 3348 | ||
3298 | static void __exit isert_exit(void) | 3349 | static void __exit isert_exit(void) |
3299 | { | 3350 | { |
3300 | flush_scheduled_work(); | 3351 | flush_scheduled_work(); |
3352 | destroy_workqueue(isert_release_wq); | ||
3301 | destroy_workqueue(isert_comp_wq); | 3353 | destroy_workqueue(isert_comp_wq); |
3302 | destroy_workqueue(isert_rx_wq); | ||
3303 | iscsit_unregister_transport(&iser_target_transport); | 3354 | iscsit_unregister_transport(&iser_target_transport); |
3304 | pr_debug("iSER_TARGET[0] - Released iser_target_transport\n"); | 3355 | isert_info("iSER_TARGET[0] - Released iser_target_transport\n"); |
3305 | } | 3356 | } |
3306 | 3357 | ||
3307 | MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); | 3358 | MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 04f51f7bf614..8dc8415d152d 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h | |||
@@ -4,9 +4,37 @@ | |||
4 | #include <rdma/ib_verbs.h> | 4 | #include <rdma/ib_verbs.h> |
5 | #include <rdma/rdma_cm.h> | 5 | #include <rdma/rdma_cm.h> |
6 | 6 | ||
7 | #define DRV_NAME "isert" | ||
8 | #define PFX DRV_NAME ": " | ||
9 | |||
10 | #define isert_dbg(fmt, arg...) \ | ||
11 | do { \ | ||
12 | if (unlikely(isert_debug_level > 2)) \ | ||
13 | printk(KERN_DEBUG PFX "%s: " fmt,\ | ||
14 | __func__ , ## arg); \ | ||
15 | } while (0) | ||
16 | |||
17 | #define isert_warn(fmt, arg...) \ | ||
18 | do { \ | ||
19 | if (unlikely(isert_debug_level > 0)) \ | ||
20 | pr_warn(PFX "%s: " fmt, \ | ||
21 | __func__ , ## arg); \ | ||
22 | } while (0) | ||
23 | |||
24 | #define isert_info(fmt, arg...) \ | ||
25 | do { \ | ||
26 | if (unlikely(isert_debug_level > 1)) \ | ||
27 | pr_info(PFX "%s: " fmt, \ | ||
28 | __func__ , ## arg); \ | ||
29 | } while (0) | ||
30 | |||
31 | #define isert_err(fmt, arg...) \ | ||
32 | pr_err(PFX "%s: " fmt, __func__ , ## arg) | ||
33 | |||
7 | #define ISERT_RDMA_LISTEN_BACKLOG 10 | 34 | #define ISERT_RDMA_LISTEN_BACKLOG 10 |
8 | #define ISCSI_ISER_SG_TABLESIZE 256 | 35 | #define ISCSI_ISER_SG_TABLESIZE 256 |
9 | #define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL | 36 | #define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL |
37 | #define ISER_BEACON_WRID 0xfffffffffffffffeULL | ||
10 | 38 | ||
11 | enum isert_desc_type { | 39 | enum isert_desc_type { |
12 | ISCSI_TX_CONTROL, | 40 | ISCSI_TX_CONTROL, |
@@ -23,6 +51,7 @@ enum iser_ib_op_code { | |||
23 | enum iser_conn_state { | 51 | enum iser_conn_state { |
24 | ISER_CONN_INIT, | 52 | ISER_CONN_INIT, |
25 | ISER_CONN_UP, | 53 | ISER_CONN_UP, |
54 | ISER_CONN_FULL_FEATURE, | ||
26 | ISER_CONN_TERMINATING, | 55 | ISER_CONN_TERMINATING, |
27 | ISER_CONN_DOWN, | 56 | ISER_CONN_DOWN, |
28 | }; | 57 | }; |
@@ -44,9 +73,6 @@ struct iser_tx_desc { | |||
44 | struct ib_sge tx_sg[2]; | 73 | struct ib_sge tx_sg[2]; |
45 | int num_sge; | 74 | int num_sge; |
46 | struct isert_cmd *isert_cmd; | 75 | struct isert_cmd *isert_cmd; |
47 | struct llist_node *comp_llnode_batch; | ||
48 | struct llist_node comp_llnode; | ||
49 | bool llnode_active; | ||
50 | struct ib_send_wr send_wr; | 76 | struct ib_send_wr send_wr; |
51 | } __packed; | 77 | } __packed; |
52 | 78 | ||
@@ -81,6 +107,12 @@ struct isert_data_buf { | |||
81 | enum dma_data_direction dma_dir; | 107 | enum dma_data_direction dma_dir; |
82 | }; | 108 | }; |
83 | 109 | ||
110 | enum { | ||
111 | DATA = 0, | ||
112 | PROT = 1, | ||
113 | SIG = 2, | ||
114 | }; | ||
115 | |||
84 | struct isert_rdma_wr { | 116 | struct isert_rdma_wr { |
85 | struct list_head wr_list; | 117 | struct list_head wr_list; |
86 | struct isert_cmd *isert_cmd; | 118 | struct isert_cmd *isert_cmd; |
@@ -90,6 +122,7 @@ struct isert_rdma_wr { | |||
90 | int send_wr_num; | 122 | int send_wr_num; |
91 | struct ib_send_wr *send_wr; | 123 | struct ib_send_wr *send_wr; |
92 | struct ib_send_wr s_send_wr; | 124 | struct ib_send_wr s_send_wr; |
125 | struct ib_sge ib_sg[3]; | ||
93 | struct isert_data_buf data; | 126 | struct isert_data_buf data; |
94 | struct isert_data_buf prot; | 127 | struct isert_data_buf prot; |
95 | struct fast_reg_descriptor *fr_desc; | 128 | struct fast_reg_descriptor *fr_desc; |
@@ -117,14 +150,15 @@ struct isert_device; | |||
117 | struct isert_conn { | 150 | struct isert_conn { |
118 | enum iser_conn_state state; | 151 | enum iser_conn_state state; |
119 | int post_recv_buf_count; | 152 | int post_recv_buf_count; |
120 | atomic_t post_send_buf_count; | ||
121 | u32 responder_resources; | 153 | u32 responder_resources; |
122 | u32 initiator_depth; | 154 | u32 initiator_depth; |
155 | bool pi_support; | ||
123 | u32 max_sge; | 156 | u32 max_sge; |
124 | char *login_buf; | 157 | char *login_buf; |
125 | char *login_req_buf; | 158 | char *login_req_buf; |
126 | char *login_rsp_buf; | 159 | char *login_rsp_buf; |
127 | u64 login_req_dma; | 160 | u64 login_req_dma; |
161 | int login_req_len; | ||
128 | u64 login_rsp_dma; | 162 | u64 login_rsp_dma; |
129 | unsigned int conn_rx_desc_head; | 163 | unsigned int conn_rx_desc_head; |
130 | struct iser_rx_desc *conn_rx_descs; | 164 | struct iser_rx_desc *conn_rx_descs; |
@@ -132,13 +166,13 @@ struct isert_conn { | |||
132 | struct iscsi_conn *conn; | 166 | struct iscsi_conn *conn; |
133 | struct list_head conn_accept_node; | 167 | struct list_head conn_accept_node; |
134 | struct completion conn_login_comp; | 168 | struct completion conn_login_comp; |
169 | struct completion login_req_comp; | ||
135 | struct iser_tx_desc conn_login_tx_desc; | 170 | struct iser_tx_desc conn_login_tx_desc; |
136 | struct rdma_cm_id *conn_cm_id; | 171 | struct rdma_cm_id *conn_cm_id; |
137 | struct ib_pd *conn_pd; | 172 | struct ib_pd *conn_pd; |
138 | struct ib_mr *conn_mr; | 173 | struct ib_mr *conn_mr; |
139 | struct ib_qp *conn_qp; | 174 | struct ib_qp *conn_qp; |
140 | struct isert_device *conn_device; | 175 | struct isert_device *conn_device; |
141 | struct work_struct conn_logout_work; | ||
142 | struct mutex conn_mutex; | 176 | struct mutex conn_mutex; |
143 | struct completion conn_wait; | 177 | struct completion conn_wait; |
144 | struct completion conn_wait_comp_err; | 178 | struct completion conn_wait_comp_err; |
@@ -147,31 +181,38 @@ struct isert_conn { | |||
147 | int conn_fr_pool_size; | 181 | int conn_fr_pool_size; |
148 | /* lock to protect fastreg pool */ | 182 | /* lock to protect fastreg pool */ |
149 | spinlock_t conn_lock; | 183 | spinlock_t conn_lock; |
150 | #define ISERT_COMP_BATCH_COUNT 8 | 184 | struct work_struct release_work; |
151 | int conn_comp_batch; | 185 | struct ib_recv_wr beacon; |
152 | struct llist_head conn_comp_llist; | 186 | bool logout_posted; |
153 | bool disconnect; | ||
154 | }; | 187 | }; |
155 | 188 | ||
156 | #define ISERT_MAX_CQ 64 | 189 | #define ISERT_MAX_CQ 64 |
157 | 190 | ||
158 | struct isert_cq_desc { | 191 | /** |
159 | struct isert_device *device; | 192 | * struct isert_comp - iSER completion context |
160 | int cq_index; | 193 | * |
161 | struct work_struct cq_rx_work; | 194 | * @device: pointer to device handle |
162 | struct work_struct cq_tx_work; | 195 | * @cq: completion queue |
196 | * @wcs: work completion array | ||
197 | * @active_qps: Number of active QPs attached | ||
198 | * to completion context | ||
199 | * @work: completion work handle | ||
200 | */ | ||
201 | struct isert_comp { | ||
202 | struct isert_device *device; | ||
203 | struct ib_cq *cq; | ||
204 | struct ib_wc wcs[16]; | ||
205 | int active_qps; | ||
206 | struct work_struct work; | ||
163 | }; | 207 | }; |
164 | 208 | ||
165 | struct isert_device { | 209 | struct isert_device { |
166 | int use_fastreg; | 210 | int use_fastreg; |
167 | bool pi_capable; | 211 | bool pi_capable; |
168 | int cqs_used; | ||
169 | int refcount; | 212 | int refcount; |
170 | int cq_active_qps[ISERT_MAX_CQ]; | ||
171 | struct ib_device *ib_device; | 213 | struct ib_device *ib_device; |
172 | struct ib_cq *dev_rx_cq[ISERT_MAX_CQ]; | 214 | struct isert_comp *comps; |
173 | struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; | 215 | int comps_used; |
174 | struct isert_cq_desc *cq_desc; | ||
175 | struct list_head dev_node; | 216 | struct list_head dev_node; |
176 | struct ib_device_attr dev_attr; | 217 | struct ib_device_attr dev_attr; |
177 | int (*reg_rdma_mem)(struct iscsi_conn *conn, | 218 | int (*reg_rdma_mem)(struct iscsi_conn *conn, |
@@ -182,6 +223,7 @@ struct isert_device { | |||
182 | }; | 223 | }; |
183 | 224 | ||
184 | struct isert_np { | 225 | struct isert_np { |
226 | struct iscsi_np *np; | ||
185 | struct semaphore np_sem; | 227 | struct semaphore np_sem; |
186 | struct rdma_cm_id *np_cm_id; | 228 | struct rdma_cm_id *np_cm_id; |
187 | struct mutex np_accept_mutex; | 229 | struct mutex np_accept_mutex; |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 73e58d22e325..f8ec32298906 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -609,6 +609,7 @@ static int __init iscsi_target_init_module(void) | |||
609 | 609 | ||
610 | return ret; | 610 | return ret; |
611 | r2t_out: | 611 | r2t_out: |
612 | iscsit_unregister_transport(&iscsi_target_transport); | ||
612 | kmem_cache_destroy(lio_r2t_cache); | 613 | kmem_cache_destroy(lio_r2t_cache); |
613 | ooo_out: | 614 | ooo_out: |
614 | kmem_cache_destroy(lio_ooo_cache); | 615 | kmem_cache_destroy(lio_ooo_cache); |
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 302eb3b78715..09a522bae222 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h | |||
@@ -790,7 +790,6 @@ struct iscsi_np { | |||
790 | void *np_context; | 790 | void *np_context; |
791 | struct iscsit_transport *np_transport; | 791 | struct iscsit_transport *np_transport; |
792 | struct list_head np_list; | 792 | struct list_head np_list; |
793 | struct iscsi_tpg_np *tpg_np; | ||
794 | } ____cacheline_aligned; | 793 | } ____cacheline_aligned; |
795 | 794 | ||
796 | struct iscsi_tpg_np { | 795 | struct iscsi_tpg_np { |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 480f2e0ecc11..713c0c1877ab 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
@@ -281,7 +281,6 @@ static int iscsi_login_zero_tsih_s1( | |||
281 | { | 281 | { |
282 | struct iscsi_session *sess = NULL; | 282 | struct iscsi_session *sess = NULL; |
283 | struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; | 283 | struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; |
284 | enum target_prot_op sup_pro_ops; | ||
285 | int ret; | 284 | int ret; |
286 | 285 | ||
287 | sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL); | 286 | sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL); |
@@ -343,9 +342,8 @@ static int iscsi_login_zero_tsih_s1( | |||
343 | kfree(sess); | 342 | kfree(sess); |
344 | return -ENOMEM; | 343 | return -ENOMEM; |
345 | } | 344 | } |
346 | sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn); | ||
347 | 345 | ||
348 | sess->se_sess = transport_init_session(sup_pro_ops); | 346 | sess->se_sess = transport_init_session(TARGET_PROT_NORMAL); |
349 | if (IS_ERR(sess->se_sess)) { | 347 | if (IS_ERR(sess->se_sess)) { |
350 | iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, | 348 | iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, |
351 | ISCSI_LOGIN_STATUS_NO_RESOURCES); | 349 | ISCSI_LOGIN_STATUS_NO_RESOURCES); |
@@ -1161,6 +1159,7 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, | |||
1161 | } | 1159 | } |
1162 | kfree(conn->sess->sess_ops); | 1160 | kfree(conn->sess->sess_ops); |
1163 | kfree(conn->sess); | 1161 | kfree(conn->sess); |
1162 | conn->sess = NULL; | ||
1164 | 1163 | ||
1165 | old_sess_out: | 1164 | old_sess_out: |
1166 | iscsi_stop_login_thread_timer(np); | 1165 | iscsi_stop_login_thread_timer(np); |
@@ -1204,6 +1203,9 @@ old_sess_out: | |||
1204 | conn->sock = NULL; | 1203 | conn->sock = NULL; |
1205 | } | 1204 | } |
1206 | 1205 | ||
1206 | if (conn->conn_transport->iscsit_wait_conn) | ||
1207 | conn->conn_transport->iscsit_wait_conn(conn); | ||
1208 | |||
1207 | if (conn->conn_transport->iscsit_free_conn) | 1209 | if (conn->conn_transport->iscsit_free_conn) |
1208 | conn->conn_transport->iscsit_free_conn(conn); | 1210 | conn->conn_transport->iscsit_free_conn(conn); |
1209 | 1211 | ||
@@ -1364,6 +1366,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) | |||
1364 | } | 1366 | } |
1365 | login->zero_tsih = zero_tsih; | 1367 | login->zero_tsih = zero_tsih; |
1366 | 1368 | ||
1369 | conn->sess->se_sess->sup_prot_ops = | ||
1370 | conn->conn_transport->iscsit_get_sup_prot_ops(conn); | ||
1371 | |||
1367 | tpg = conn->tpg; | 1372 | tpg = conn->tpg; |
1368 | if (!tpg) { | 1373 | if (!tpg) { |
1369 | pr_err("Unable to locate struct iscsi_conn->tpg\n"); | 1374 | pr_err("Unable to locate struct iscsi_conn->tpg\n"); |
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index c3cb5c15efda..9053a3c0c6e5 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c | |||
@@ -501,7 +501,6 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal( | |||
501 | init_completion(&tpg_np->tpg_np_comp); | 501 | init_completion(&tpg_np->tpg_np_comp); |
502 | kref_init(&tpg_np->tpg_np_kref); | 502 | kref_init(&tpg_np->tpg_np_kref); |
503 | tpg_np->tpg_np = np; | 503 | tpg_np->tpg_np = np; |
504 | np->tpg_np = tpg_np; | ||
505 | tpg_np->tpg = tpg; | 504 | tpg_np->tpg = tpg; |
506 | 505 | ||
507 | spin_lock(&tpg->tpg_np_lock); | 506 | spin_lock(&tpg->tpg_np_lock); |
diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c index 882728fac30c..08217d62fb0d 100644 --- a/drivers/target/iscsi/iscsi_target_transport.c +++ b/drivers/target/iscsi/iscsi_target_transport.c | |||
@@ -26,8 +26,7 @@ struct iscsit_transport *iscsit_get_transport(int type) | |||
26 | 26 | ||
27 | void iscsit_put_transport(struct iscsit_transport *t) | 27 | void iscsit_put_transport(struct iscsit_transport *t) |
28 | { | 28 | { |
29 | if (t->owner) | 29 | module_put(t->owner); |
30 | module_put(t->owner); | ||
31 | } | 30 | } |
32 | 31 | ||
33 | int iscsit_register_transport(struct iscsit_transport *t) | 32 | int iscsit_register_transport(struct iscsit_transport *t) |
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 7c6a95bcb35e..bcd88ec99793 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c | |||
@@ -1356,15 +1356,15 @@ static int iscsit_do_tx_data( | |||
1356 | struct iscsi_conn *conn, | 1356 | struct iscsi_conn *conn, |
1357 | struct iscsi_data_count *count) | 1357 | struct iscsi_data_count *count) |
1358 | { | 1358 | { |
1359 | int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; | 1359 | int ret, iov_len; |
1360 | struct kvec *iov_p; | 1360 | struct kvec *iov_p; |
1361 | struct msghdr msg; | 1361 | struct msghdr msg; |
1362 | 1362 | ||
1363 | if (!conn || !conn->sock || !conn->conn_ops) | 1363 | if (!conn || !conn->sock || !conn->conn_ops) |
1364 | return -1; | 1364 | return -1; |
1365 | 1365 | ||
1366 | if (data <= 0) { | 1366 | if (count->data_length <= 0) { |
1367 | pr_err("Data length is: %d\n", data); | 1367 | pr_err("Data length is: %d\n", count->data_length); |
1368 | return -1; | 1368 | return -1; |
1369 | } | 1369 | } |
1370 | 1370 | ||
@@ -1373,20 +1373,16 @@ static int iscsit_do_tx_data( | |||
1373 | iov_p = count->iov; | 1373 | iov_p = count->iov; |
1374 | iov_len = count->iov_count; | 1374 | iov_len = count->iov_count; |
1375 | 1375 | ||
1376 | while (total_tx < data) { | 1376 | ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, |
1377 | tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, | 1377 | count->data_length); |
1378 | (data - total_tx)); | 1378 | if (ret != count->data_length) { |
1379 | if (tx_loop <= 0) { | 1379 | pr_err("Unexpected ret: %d send data %d\n", |
1380 | pr_debug("tx_loop: %d total_tx %d\n", | 1380 | ret, count->data_length); |
1381 | tx_loop, total_tx); | 1381 | return -EPIPE; |
1382 | return tx_loop; | ||
1383 | } | ||
1384 | total_tx += tx_loop; | ||
1385 | pr_debug("tx_loop: %d, total_tx: %d, data: %d\n", | ||
1386 | tx_loop, total_tx, data); | ||
1387 | } | 1382 | } |
1383 | pr_debug("ret: %d, sent data: %d\n", ret, count->data_length); | ||
1388 | 1384 | ||
1389 | return total_tx; | 1385 | return ret; |
1390 | } | 1386 | } |
1391 | 1387 | ||
1392 | int rx_data( | 1388 | int rx_data( |
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 4d1b7224a7f2..7172a71f9f0b 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c | |||
@@ -138,7 +138,7 @@ static void tcm_loop_submission_work(struct work_struct *work) | |||
138 | set_host_byte(sc, DID_TRANSPORT_DISRUPTED); | 138 | set_host_byte(sc, DID_TRANSPORT_DISRUPTED); |
139 | goto out_done; | 139 | goto out_done; |
140 | } | 140 | } |
141 | tl_nexus = tl_hba->tl_nexus; | 141 | tl_nexus = tl_tpg->tl_nexus; |
142 | if (!tl_nexus) { | 142 | if (!tl_nexus) { |
143 | scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" | 143 | scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" |
144 | " does not exist\n"); | 144 | " does not exist\n"); |
@@ -218,16 +218,26 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) | |||
218 | * to struct scsi_device | 218 | * to struct scsi_device |
219 | */ | 219 | */ |
220 | static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, | 220 | static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, |
221 | struct tcm_loop_nexus *tl_nexus, | ||
222 | int lun, int task, enum tcm_tmreq_table tmr) | 221 | int lun, int task, enum tcm_tmreq_table tmr) |
223 | { | 222 | { |
224 | struct se_cmd *se_cmd = NULL; | 223 | struct se_cmd *se_cmd = NULL; |
225 | struct se_session *se_sess; | 224 | struct se_session *se_sess; |
226 | struct se_portal_group *se_tpg; | 225 | struct se_portal_group *se_tpg; |
226 | struct tcm_loop_nexus *tl_nexus; | ||
227 | struct tcm_loop_cmd *tl_cmd = NULL; | 227 | struct tcm_loop_cmd *tl_cmd = NULL; |
228 | struct tcm_loop_tmr *tl_tmr = NULL; | 228 | struct tcm_loop_tmr *tl_tmr = NULL; |
229 | int ret = TMR_FUNCTION_FAILED, rc; | 229 | int ret = TMR_FUNCTION_FAILED, rc; |
230 | 230 | ||
231 | /* | ||
232 | * Locate the tl_nexus and se_sess pointers | ||
233 | */ | ||
234 | tl_nexus = tl_tpg->tl_nexus; | ||
235 | if (!tl_nexus) { | ||
236 | pr_err("Unable to perform device reset without" | ||
237 | " active I_T Nexus\n"); | ||
238 | return ret; | ||
239 | } | ||
240 | |||
231 | tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); | 241 | tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); |
232 | if (!tl_cmd) { | 242 | if (!tl_cmd) { |
233 | pr_err("Unable to allocate memory for tl_cmd\n"); | 243 | pr_err("Unable to allocate memory for tl_cmd\n"); |
@@ -243,7 +253,7 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, | |||
243 | 253 | ||
244 | se_cmd = &tl_cmd->tl_se_cmd; | 254 | se_cmd = &tl_cmd->tl_se_cmd; |
245 | se_tpg = &tl_tpg->tl_se_tpg; | 255 | se_tpg = &tl_tpg->tl_se_tpg; |
246 | se_sess = tl_nexus->se_sess; | 256 | se_sess = tl_tpg->tl_nexus->se_sess; |
247 | /* | 257 | /* |
248 | * Initialize struct se_cmd descriptor from target_core_mod infrastructure | 258 | * Initialize struct se_cmd descriptor from target_core_mod infrastructure |
249 | */ | 259 | */ |
@@ -288,7 +298,6 @@ release: | |||
288 | static int tcm_loop_abort_task(struct scsi_cmnd *sc) | 298 | static int tcm_loop_abort_task(struct scsi_cmnd *sc) |
289 | { | 299 | { |
290 | struct tcm_loop_hba *tl_hba; | 300 | struct tcm_loop_hba *tl_hba; |
291 | struct tcm_loop_nexus *tl_nexus; | ||
292 | struct tcm_loop_tpg *tl_tpg; | 301 | struct tcm_loop_tpg *tl_tpg; |
293 | int ret = FAILED; | 302 | int ret = FAILED; |
294 | 303 | ||
@@ -296,21 +305,8 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc) | |||
296 | * Locate the tcm_loop_hba_t pointer | 305 | * Locate the tcm_loop_hba_t pointer |
297 | */ | 306 | */ |
298 | tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); | 307 | tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); |
299 | /* | ||
300 | * Locate the tl_nexus and se_sess pointers | ||
301 | */ | ||
302 | tl_nexus = tl_hba->tl_nexus; | ||
303 | if (!tl_nexus) { | ||
304 | pr_err("Unable to perform device reset without" | ||
305 | " active I_T Nexus\n"); | ||
306 | return FAILED; | ||
307 | } | ||
308 | |||
309 | /* | ||
310 | * Locate the tl_tpg pointer from TargetID in sc->device->id | ||
311 | */ | ||
312 | tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; | 308 | tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; |
313 | ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, | 309 | ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, |
314 | sc->request->tag, TMR_ABORT_TASK); | 310 | sc->request->tag, TMR_ABORT_TASK); |
315 | return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; | 311 | return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; |
316 | } | 312 | } |
@@ -322,7 +318,6 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc) | |||
322 | static int tcm_loop_device_reset(struct scsi_cmnd *sc) | 318 | static int tcm_loop_device_reset(struct scsi_cmnd *sc) |
323 | { | 319 | { |
324 | struct tcm_loop_hba *tl_hba; | 320 | struct tcm_loop_hba *tl_hba; |
325 | struct tcm_loop_nexus *tl_nexus; | ||
326 | struct tcm_loop_tpg *tl_tpg; | 321 | struct tcm_loop_tpg *tl_tpg; |
327 | int ret = FAILED; | 322 | int ret = FAILED; |
328 | 323 | ||
@@ -330,20 +325,9 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) | |||
330 | * Locate the tcm_loop_hba_t pointer | 325 | * Locate the tcm_loop_hba_t pointer |
331 | */ | 326 | */ |
332 | tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); | 327 | tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); |
333 | /* | ||
334 | * Locate the tl_nexus and se_sess pointers | ||
335 | */ | ||
336 | tl_nexus = tl_hba->tl_nexus; | ||
337 | if (!tl_nexus) { | ||
338 | pr_err("Unable to perform device reset without" | ||
339 | " active I_T Nexus\n"); | ||
340 | return FAILED; | ||
341 | } | ||
342 | /* | ||
343 | * Locate the tl_tpg pointer from TargetID in sc->device->id | ||
344 | */ | ||
345 | tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; | 328 | tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; |
346 | ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, | 329 | |
330 | ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, | ||
347 | 0, TMR_LUN_RESET); | 331 | 0, TMR_LUN_RESET); |
348 | return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; | 332 | return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; |
349 | } | 333 | } |
@@ -940,8 +924,8 @@ static int tcm_loop_make_nexus( | |||
940 | struct tcm_loop_nexus *tl_nexus; | 924 | struct tcm_loop_nexus *tl_nexus; |
941 | int ret = -ENOMEM; | 925 | int ret = -ENOMEM; |
942 | 926 | ||
943 | if (tl_tpg->tl_hba->tl_nexus) { | 927 | if (tl_tpg->tl_nexus) { |
944 | pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n"); | 928 | pr_debug("tl_tpg->tl_nexus already exists\n"); |
945 | return -EEXIST; | 929 | return -EEXIST; |
946 | } | 930 | } |
947 | se_tpg = &tl_tpg->tl_se_tpg; | 931 | se_tpg = &tl_tpg->tl_se_tpg; |
@@ -976,7 +960,7 @@ static int tcm_loop_make_nexus( | |||
976 | */ | 960 | */ |
977 | __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, | 961 | __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, |
978 | tl_nexus->se_sess, tl_nexus); | 962 | tl_nexus->se_sess, tl_nexus); |
979 | tl_tpg->tl_hba->tl_nexus = tl_nexus; | 963 | tl_tpg->tl_nexus = tl_nexus; |
980 | pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" | 964 | pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" |
981 | " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), | 965 | " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), |
982 | name); | 966 | name); |
@@ -992,12 +976,8 @@ static int tcm_loop_drop_nexus( | |||
992 | { | 976 | { |
993 | struct se_session *se_sess; | 977 | struct se_session *se_sess; |
994 | struct tcm_loop_nexus *tl_nexus; | 978 | struct tcm_loop_nexus *tl_nexus; |
995 | struct tcm_loop_hba *tl_hba = tpg->tl_hba; | ||
996 | 979 | ||
997 | if (!tl_hba) | 980 | tl_nexus = tpg->tl_nexus; |
998 | return -ENODEV; | ||
999 | |||
1000 | tl_nexus = tl_hba->tl_nexus; | ||
1001 | if (!tl_nexus) | 981 | if (!tl_nexus) |
1002 | return -ENODEV; | 982 | return -ENODEV; |
1003 | 983 | ||
@@ -1013,13 +993,13 @@ static int tcm_loop_drop_nexus( | |||
1013 | } | 993 | } |
1014 | 994 | ||
1015 | pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" | 995 | pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" |
1016 | " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), | 996 | " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba), |
1017 | tl_nexus->se_sess->se_node_acl->initiatorname); | 997 | tl_nexus->se_sess->se_node_acl->initiatorname); |
1018 | /* | 998 | /* |
1019 | * Release the SCSI I_T Nexus to the emulated SAS Target Port | 999 | * Release the SCSI I_T Nexus to the emulated SAS Target Port |
1020 | */ | 1000 | */ |
1021 | transport_deregister_session(tl_nexus->se_sess); | 1001 | transport_deregister_session(tl_nexus->se_sess); |
1022 | tpg->tl_hba->tl_nexus = NULL; | 1002 | tpg->tl_nexus = NULL; |
1023 | kfree(tl_nexus); | 1003 | kfree(tl_nexus); |
1024 | return 0; | 1004 | return 0; |
1025 | } | 1005 | } |
@@ -1035,7 +1015,7 @@ static ssize_t tcm_loop_tpg_show_nexus( | |||
1035 | struct tcm_loop_nexus *tl_nexus; | 1015 | struct tcm_loop_nexus *tl_nexus; |
1036 | ssize_t ret; | 1016 | ssize_t ret; |
1037 | 1017 | ||
1038 | tl_nexus = tl_tpg->tl_hba->tl_nexus; | 1018 | tl_nexus = tl_tpg->tl_nexus; |
1039 | if (!tl_nexus) | 1019 | if (!tl_nexus) |
1040 | return -ENODEV; | 1020 | return -ENODEV; |
1041 | 1021 | ||
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h index 54c59d0b6608..6ae49f272ba6 100644 --- a/drivers/target/loopback/tcm_loop.h +++ b/drivers/target/loopback/tcm_loop.h | |||
@@ -27,11 +27,6 @@ struct tcm_loop_tmr { | |||
27 | }; | 27 | }; |
28 | 28 | ||
29 | struct tcm_loop_nexus { | 29 | struct tcm_loop_nexus { |
30 | int it_nexus_active; | ||
31 | /* | ||
32 | * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h | ||
33 | */ | ||
34 | struct scsi_host *sh; | ||
35 | /* | 30 | /* |
36 | * Pointer to TCM session for I_T Nexus | 31 | * Pointer to TCM session for I_T Nexus |
37 | */ | 32 | */ |
@@ -51,6 +46,7 @@ struct tcm_loop_tpg { | |||
51 | atomic_t tl_tpg_port_count; | 46 | atomic_t tl_tpg_port_count; |
52 | struct se_portal_group tl_se_tpg; | 47 | struct se_portal_group tl_se_tpg; |
53 | struct tcm_loop_hba *tl_hba; | 48 | struct tcm_loop_hba *tl_hba; |
49 | struct tcm_loop_nexus *tl_nexus; | ||
54 | }; | 50 | }; |
55 | 51 | ||
56 | struct tcm_loop_hba { | 52 | struct tcm_loop_hba { |
@@ -59,7 +55,6 @@ struct tcm_loop_hba { | |||
59 | struct se_hba_s *se_hba; | 55 | struct se_hba_s *se_hba; |
60 | struct se_lun *tl_hba_lun; | 56 | struct se_lun *tl_hba_lun; |
61 | struct se_port *tl_hba_lun_sep; | 57 | struct se_port *tl_hba_lun_sep; |
62 | struct tcm_loop_nexus *tl_nexus; | ||
63 | struct device dev; | 58 | struct device dev; |
64 | struct Scsi_Host *sh; | 59 | struct Scsi_Host *sh; |
65 | struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA]; | 60 | struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA]; |
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 79f9296a08ae..75d89adfccc0 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
@@ -50,6 +50,19 @@ | |||
50 | #include "target_core_rd.h" | 50 | #include "target_core_rd.h" |
51 | #include "target_core_xcopy.h" | 51 | #include "target_core_xcopy.h" |
52 | 52 | ||
53 | #define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ | ||
54 | static void target_core_setup_##_name##_cit(struct se_subsystem_api *sa) \ | ||
55 | { \ | ||
56 | struct target_backend_cits *tbc = &sa->tb_cits; \ | ||
57 | struct config_item_type *cit = &tbc->tb_##_name##_cit; \ | ||
58 | \ | ||
59 | cit->ct_item_ops = _item_ops; \ | ||
60 | cit->ct_group_ops = _group_ops; \ | ||
61 | cit->ct_attrs = _attrs; \ | ||
62 | cit->ct_owner = sa->owner; \ | ||
63 | pr_debug("Setup generic %s\n", __stringify(_name)); \ | ||
64 | } | ||
65 | |||
53 | extern struct t10_alua_lu_gp *default_lu_gp; | 66 | extern struct t10_alua_lu_gp *default_lu_gp; |
54 | 67 | ||
55 | static LIST_HEAD(g_tf_list); | 68 | static LIST_HEAD(g_tf_list); |
@@ -126,48 +139,57 @@ static struct config_group *target_core_register_fabric( | |||
126 | 139 | ||
127 | pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" | 140 | pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" |
128 | " %s\n", group, name); | 141 | " %s\n", group, name); |
129 | /* | 142 | |
130 | * Below are some hardcoded request_module() calls to automatically | 143 | tf = target_core_get_fabric(name); |
131 | * local fabric modules when the following is called: | 144 | if (!tf) { |
132 | * | 145 | pr_err("target_core_register_fabric() trying autoload for %s\n", |
133 | * mkdir -p /sys/kernel/config/target/$MODULE_NAME | 146 | name); |
134 | * | 147 | |
135 | * Note that this does not limit which TCM fabric module can be | ||
136 | * registered, but simply provids auto loading logic for modules with | ||
137 | * mkdir(2) system calls with known TCM fabric modules. | ||
138 | */ | ||
139 | if (!strncmp(name, "iscsi", 5)) { | ||
140 | /* | 148 | /* |
141 | * Automatically load the LIO Target fabric module when the | 149 | * Below are some hardcoded request_module() calls to automatically |
142 | * following is called: | 150 | * local fabric modules when the following is called: |
143 | * | 151 | * |
144 | * mkdir -p $CONFIGFS/target/iscsi | 152 | * mkdir -p /sys/kernel/config/target/$MODULE_NAME |
145 | */ | ||
146 | ret = request_module("iscsi_target_mod"); | ||
147 | if (ret < 0) { | ||
148 | pr_err("request_module() failed for" | ||
149 | " iscsi_target_mod.ko: %d\n", ret); | ||
150 | return ERR_PTR(-EINVAL); | ||
151 | } | ||
152 | } else if (!strncmp(name, "loopback", 8)) { | ||
153 | /* | ||
154 | * Automatically load the tcm_loop fabric module when the | ||
155 | * following is called: | ||
156 | * | 153 | * |
157 | * mkdir -p $CONFIGFS/target/loopback | 154 | * Note that this does not limit which TCM fabric module can be |
155 | * registered, but simply provids auto loading logic for modules with | ||
156 | * mkdir(2) system calls with known TCM fabric modules. | ||
158 | */ | 157 | */ |
159 | ret = request_module("tcm_loop"); | 158 | |
160 | if (ret < 0) { | 159 | if (!strncmp(name, "iscsi", 5)) { |
161 | pr_err("request_module() failed for" | 160 | /* |
162 | " tcm_loop.ko: %d\n", ret); | 161 | * Automatically load the LIO Target fabric module when the |
163 | return ERR_PTR(-EINVAL); | 162 | * following is called: |
163 | * | ||
164 | * mkdir -p $CONFIGFS/target/iscsi | ||
165 | */ | ||
166 | ret = request_module("iscsi_target_mod"); | ||
167 | if (ret < 0) { | ||
168 | pr_err("request_module() failed for" | ||
169 | " iscsi_target_mod.ko: %d\n", ret); | ||
170 | return ERR_PTR(-EINVAL); | ||
171 | } | ||
172 | } else if (!strncmp(name, "loopback", 8)) { | ||
173 | /* | ||
174 | * Automatically load the tcm_loop fabric module when the | ||
175 | * following is called: | ||
176 | * | ||
177 | * mkdir -p $CONFIGFS/target/loopback | ||
178 | */ | ||
179 | ret = request_module("tcm_loop"); | ||
180 | if (ret < 0) { | ||
181 | pr_err("request_module() failed for" | ||
182 | " tcm_loop.ko: %d\n", ret); | ||
183 | return ERR_PTR(-EINVAL); | ||
184 | } | ||
164 | } | 185 | } |
186 | |||
187 | tf = target_core_get_fabric(name); | ||
165 | } | 188 | } |
166 | 189 | ||
167 | tf = target_core_get_fabric(name); | ||
168 | if (!tf) { | 190 | if (!tf) { |
169 | pr_err("target_core_get_fabric() failed for %s\n", | 191 | pr_err("target_core_get_fabric() failed for %s\n", |
170 | name); | 192 | name); |
171 | return ERR_PTR(-EINVAL); | 193 | return ERR_PTR(-EINVAL); |
172 | } | 194 | } |
173 | pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" | 195 | pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" |
@@ -562,198 +584,21 @@ EXPORT_SYMBOL(target_fabric_configfs_deregister); | |||
562 | // Stop functions called by external Target Fabrics Modules | 584 | // Stop functions called by external Target Fabrics Modules |
563 | //############################################################################*/ | 585 | //############################################################################*/ |
564 | 586 | ||
565 | /* Start functions for struct config_item_type target_core_dev_attrib_cit */ | 587 | /* Start functions for struct config_item_type tb_dev_attrib_cit */ |
566 | |||
567 | #define DEF_DEV_ATTRIB_SHOW(_name) \ | ||
568 | static ssize_t target_core_dev_show_attr_##_name( \ | ||
569 | struct se_dev_attrib *da, \ | ||
570 | char *page) \ | ||
571 | { \ | ||
572 | return snprintf(page, PAGE_SIZE, "%u\n", \ | ||
573 | (u32)da->da_dev->dev_attrib._name); \ | ||
574 | } | ||
575 | |||
576 | #define DEF_DEV_ATTRIB_STORE(_name) \ | ||
577 | static ssize_t target_core_dev_store_attr_##_name( \ | ||
578 | struct se_dev_attrib *da, \ | ||
579 | const char *page, \ | ||
580 | size_t count) \ | ||
581 | { \ | ||
582 | unsigned long val; \ | ||
583 | int ret; \ | ||
584 | \ | ||
585 | ret = kstrtoul(page, 0, &val); \ | ||
586 | if (ret < 0) { \ | ||
587 | pr_err("kstrtoul() failed with" \ | ||
588 | " ret: %d\n", ret); \ | ||
589 | return -EINVAL; \ | ||
590 | } \ | ||
591 | ret = se_dev_set_##_name(da->da_dev, (u32)val); \ | ||
592 | \ | ||
593 | return (!ret) ? count : -EINVAL; \ | ||
594 | } | ||
595 | |||
596 | #define DEF_DEV_ATTRIB(_name) \ | ||
597 | DEF_DEV_ATTRIB_SHOW(_name); \ | ||
598 | DEF_DEV_ATTRIB_STORE(_name); | ||
599 | |||
600 | #define DEF_DEV_ATTRIB_RO(_name) \ | ||
601 | DEF_DEV_ATTRIB_SHOW(_name); | ||
602 | 588 | ||
603 | CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib); | 589 | CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib); |
604 | #define SE_DEV_ATTR(_name, _mode) \ | ||
605 | static struct target_core_dev_attrib_attribute \ | ||
606 | target_core_dev_attrib_##_name = \ | ||
607 | __CONFIGFS_EATTR(_name, _mode, \ | ||
608 | target_core_dev_show_attr_##_name, \ | ||
609 | target_core_dev_store_attr_##_name); | ||
610 | |||
611 | #define SE_DEV_ATTR_RO(_name); \ | ||
612 | static struct target_core_dev_attrib_attribute \ | ||
613 | target_core_dev_attrib_##_name = \ | ||
614 | __CONFIGFS_EATTR_RO(_name, \ | ||
615 | target_core_dev_show_attr_##_name); | ||
616 | |||
617 | DEF_DEV_ATTRIB(emulate_model_alias); | ||
618 | SE_DEV_ATTR(emulate_model_alias, S_IRUGO | S_IWUSR); | ||
619 | |||
620 | DEF_DEV_ATTRIB(emulate_dpo); | ||
621 | SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR); | ||
622 | |||
623 | DEF_DEV_ATTRIB(emulate_fua_write); | ||
624 | SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR); | ||
625 | |||
626 | DEF_DEV_ATTRIB(emulate_fua_read); | ||
627 | SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR); | ||
628 | |||
629 | DEF_DEV_ATTRIB(emulate_write_cache); | ||
630 | SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR); | ||
631 | |||
632 | DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl); | ||
633 | SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); | ||
634 | |||
635 | DEF_DEV_ATTRIB(emulate_tas); | ||
636 | SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR); | ||
637 | |||
638 | DEF_DEV_ATTRIB(emulate_tpu); | ||
639 | SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR); | ||
640 | |||
641 | DEF_DEV_ATTRIB(emulate_tpws); | ||
642 | SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR); | ||
643 | |||
644 | DEF_DEV_ATTRIB(emulate_caw); | ||
645 | SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR); | ||
646 | |||
647 | DEF_DEV_ATTRIB(emulate_3pc); | ||
648 | SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR); | ||
649 | |||
650 | DEF_DEV_ATTRIB(pi_prot_type); | ||
651 | SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR); | ||
652 | |||
653 | DEF_DEV_ATTRIB_RO(hw_pi_prot_type); | ||
654 | SE_DEV_ATTR_RO(hw_pi_prot_type); | ||
655 | |||
656 | DEF_DEV_ATTRIB(pi_prot_format); | ||
657 | SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR); | ||
658 | |||
659 | DEF_DEV_ATTRIB(enforce_pr_isids); | ||
660 | SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); | ||
661 | |||
662 | DEF_DEV_ATTRIB(is_nonrot); | ||
663 | SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR); | ||
664 | |||
665 | DEF_DEV_ATTRIB(emulate_rest_reord); | ||
666 | SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR); | ||
667 | |||
668 | DEF_DEV_ATTRIB(force_pr_aptpl); | ||
669 | SE_DEV_ATTR(force_pr_aptpl, S_IRUGO | S_IWUSR); | ||
670 | |||
671 | DEF_DEV_ATTRIB_RO(hw_block_size); | ||
672 | SE_DEV_ATTR_RO(hw_block_size); | ||
673 | |||
674 | DEF_DEV_ATTRIB(block_size); | ||
675 | SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR); | ||
676 | |||
677 | DEF_DEV_ATTRIB_RO(hw_max_sectors); | ||
678 | SE_DEV_ATTR_RO(hw_max_sectors); | ||
679 | |||
680 | DEF_DEV_ATTRIB(fabric_max_sectors); | ||
681 | SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR); | ||
682 | |||
683 | DEF_DEV_ATTRIB(optimal_sectors); | ||
684 | SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR); | ||
685 | |||
686 | DEF_DEV_ATTRIB_RO(hw_queue_depth); | ||
687 | SE_DEV_ATTR_RO(hw_queue_depth); | ||
688 | |||
689 | DEF_DEV_ATTRIB(queue_depth); | ||
690 | SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR); | ||
691 | |||
692 | DEF_DEV_ATTRIB(max_unmap_lba_count); | ||
693 | SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR); | ||
694 | |||
695 | DEF_DEV_ATTRIB(max_unmap_block_desc_count); | ||
696 | SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR); | ||
697 | |||
698 | DEF_DEV_ATTRIB(unmap_granularity); | ||
699 | SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR); | ||
700 | |||
701 | DEF_DEV_ATTRIB(unmap_granularity_alignment); | ||
702 | SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR); | ||
703 | |||
704 | DEF_DEV_ATTRIB(max_write_same_len); | ||
705 | SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR); | ||
706 | |||
707 | CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group); | 590 | CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group); |
708 | 591 | ||
709 | static struct configfs_attribute *target_core_dev_attrib_attrs[] = { | ||
710 | &target_core_dev_attrib_emulate_model_alias.attr, | ||
711 | &target_core_dev_attrib_emulate_dpo.attr, | ||
712 | &target_core_dev_attrib_emulate_fua_write.attr, | ||
713 | &target_core_dev_attrib_emulate_fua_read.attr, | ||
714 | &target_core_dev_attrib_emulate_write_cache.attr, | ||
715 | &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr, | ||
716 | &target_core_dev_attrib_emulate_tas.attr, | ||
717 | &target_core_dev_attrib_emulate_tpu.attr, | ||
718 | &target_core_dev_attrib_emulate_tpws.attr, | ||
719 | &target_core_dev_attrib_emulate_caw.attr, | ||
720 | &target_core_dev_attrib_emulate_3pc.attr, | ||
721 | &target_core_dev_attrib_pi_prot_type.attr, | ||
722 | &target_core_dev_attrib_hw_pi_prot_type.attr, | ||
723 | &target_core_dev_attrib_pi_prot_format.attr, | ||
724 | &target_core_dev_attrib_enforce_pr_isids.attr, | ||
725 | &target_core_dev_attrib_force_pr_aptpl.attr, | ||
726 | &target_core_dev_attrib_is_nonrot.attr, | ||
727 | &target_core_dev_attrib_emulate_rest_reord.attr, | ||
728 | &target_core_dev_attrib_hw_block_size.attr, | ||
729 | &target_core_dev_attrib_block_size.attr, | ||
730 | &target_core_dev_attrib_hw_max_sectors.attr, | ||
731 | &target_core_dev_attrib_fabric_max_sectors.attr, | ||
732 | &target_core_dev_attrib_optimal_sectors.attr, | ||
733 | &target_core_dev_attrib_hw_queue_depth.attr, | ||
734 | &target_core_dev_attrib_queue_depth.attr, | ||
735 | &target_core_dev_attrib_max_unmap_lba_count.attr, | ||
736 | &target_core_dev_attrib_max_unmap_block_desc_count.attr, | ||
737 | &target_core_dev_attrib_unmap_granularity.attr, | ||
738 | &target_core_dev_attrib_unmap_granularity_alignment.attr, | ||
739 | &target_core_dev_attrib_max_write_same_len.attr, | ||
740 | NULL, | ||
741 | }; | ||
742 | |||
743 | static struct configfs_item_operations target_core_dev_attrib_ops = { | 592 | static struct configfs_item_operations target_core_dev_attrib_ops = { |
744 | .show_attribute = target_core_dev_attrib_attr_show, | 593 | .show_attribute = target_core_dev_attrib_attr_show, |
745 | .store_attribute = target_core_dev_attrib_attr_store, | 594 | .store_attribute = target_core_dev_attrib_attr_store, |
746 | }; | 595 | }; |
747 | 596 | ||
748 | static struct config_item_type target_core_dev_attrib_cit = { | 597 | TB_CIT_SETUP(dev_attrib, &target_core_dev_attrib_ops, NULL, NULL); |
749 | .ct_item_ops = &target_core_dev_attrib_ops, | ||
750 | .ct_attrs = target_core_dev_attrib_attrs, | ||
751 | .ct_owner = THIS_MODULE, | ||
752 | }; | ||
753 | 598 | ||
754 | /* End functions for struct config_item_type target_core_dev_attrib_cit */ | 599 | /* End functions for struct config_item_type tb_dev_attrib_cit */ |
755 | 600 | ||
756 | /* Start functions for struct config_item_type target_core_dev_wwn_cit */ | 601 | /* Start functions for struct config_item_type tb_dev_wwn_cit */ |
757 | 602 | ||
758 | CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn); | 603 | CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn); |
759 | #define SE_DEV_WWN_ATTR(_name, _mode) \ | 604 | #define SE_DEV_WWN_ATTR(_name, _mode) \ |
@@ -984,15 +829,11 @@ static struct configfs_item_operations target_core_dev_wwn_ops = { | |||
984 | .store_attribute = target_core_dev_wwn_attr_store, | 829 | .store_attribute = target_core_dev_wwn_attr_store, |
985 | }; | 830 | }; |
986 | 831 | ||
987 | static struct config_item_type target_core_dev_wwn_cit = { | 832 | TB_CIT_SETUP(dev_wwn, &target_core_dev_wwn_ops, NULL, target_core_dev_wwn_attrs); |
988 | .ct_item_ops = &target_core_dev_wwn_ops, | ||
989 | .ct_attrs = target_core_dev_wwn_attrs, | ||
990 | .ct_owner = THIS_MODULE, | ||
991 | }; | ||
992 | 833 | ||
993 | /* End functions for struct config_item_type target_core_dev_wwn_cit */ | 834 | /* End functions for struct config_item_type tb_dev_wwn_cit */ |
994 | 835 | ||
995 | /* Start functions for struct config_item_type target_core_dev_pr_cit */ | 836 | /* Start functions for struct config_item_type tb_dev_pr_cit */ |
996 | 837 | ||
997 | CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device); | 838 | CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device); |
998 | #define SE_DEV_PR_ATTR(_name, _mode) \ | 839 | #define SE_DEV_PR_ATTR(_name, _mode) \ |
@@ -1453,15 +1294,11 @@ static struct configfs_item_operations target_core_dev_pr_ops = { | |||
1453 | .store_attribute = target_core_dev_pr_attr_store, | 1294 | .store_attribute = target_core_dev_pr_attr_store, |
1454 | }; | 1295 | }; |
1455 | 1296 | ||
1456 | static struct config_item_type target_core_dev_pr_cit = { | 1297 | TB_CIT_SETUP(dev_pr, &target_core_dev_pr_ops, NULL, target_core_dev_pr_attrs); |
1457 | .ct_item_ops = &target_core_dev_pr_ops, | ||
1458 | .ct_attrs = target_core_dev_pr_attrs, | ||
1459 | .ct_owner = THIS_MODULE, | ||
1460 | }; | ||
1461 | 1298 | ||
1462 | /* End functions for struct config_item_type target_core_dev_pr_cit */ | 1299 | /* End functions for struct config_item_type tb_dev_pr_cit */ |
1463 | 1300 | ||
1464 | /* Start functions for struct config_item_type target_core_dev_cit */ | 1301 | /* Start functions for struct config_item_type tb_dev_cit */ |
1465 | 1302 | ||
1466 | static ssize_t target_core_show_dev_info(void *p, char *page) | 1303 | static ssize_t target_core_show_dev_info(void *p, char *page) |
1467 | { | 1304 | { |
@@ -1925,7 +1762,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_lba_map = { | |||
1925 | .store = target_core_store_dev_lba_map, | 1762 | .store = target_core_store_dev_lba_map, |
1926 | }; | 1763 | }; |
1927 | 1764 | ||
1928 | static struct configfs_attribute *lio_core_dev_attrs[] = { | 1765 | static struct configfs_attribute *target_core_dev_attrs[] = { |
1929 | &target_core_attr_dev_info.attr, | 1766 | &target_core_attr_dev_info.attr, |
1930 | &target_core_attr_dev_control.attr, | 1767 | &target_core_attr_dev_control.attr, |
1931 | &target_core_attr_dev_alias.attr, | 1768 | &target_core_attr_dev_alias.attr, |
@@ -1984,13 +1821,9 @@ static struct configfs_item_operations target_core_dev_item_ops = { | |||
1984 | .store_attribute = target_core_dev_store, | 1821 | .store_attribute = target_core_dev_store, |
1985 | }; | 1822 | }; |
1986 | 1823 | ||
1987 | static struct config_item_type target_core_dev_cit = { | 1824 | TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs); |
1988 | .ct_item_ops = &target_core_dev_item_ops, | ||
1989 | .ct_attrs = lio_core_dev_attrs, | ||
1990 | .ct_owner = THIS_MODULE, | ||
1991 | }; | ||
1992 | 1825 | ||
1993 | /* End functions for struct config_item_type target_core_dev_cit */ | 1826 | /* End functions for struct config_item_type tb_dev_cit */ |
1994 | 1827 | ||
1995 | /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */ | 1828 | /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */ |
1996 | 1829 | ||
@@ -2670,7 +2503,7 @@ static struct config_item_type target_core_alua_tg_pt_gp_cit = { | |||
2670 | 2503 | ||
2671 | /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ | 2504 | /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ |
2672 | 2505 | ||
2673 | /* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */ | 2506 | /* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */ |
2674 | 2507 | ||
2675 | static struct config_group *target_core_alua_create_tg_pt_gp( | 2508 | static struct config_group *target_core_alua_create_tg_pt_gp( |
2676 | struct config_group *group, | 2509 | struct config_group *group, |
@@ -2721,12 +2554,9 @@ static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { | |||
2721 | .drop_item = &target_core_alua_drop_tg_pt_gp, | 2554 | .drop_item = &target_core_alua_drop_tg_pt_gp, |
2722 | }; | 2555 | }; |
2723 | 2556 | ||
2724 | static struct config_item_type target_core_alua_tg_pt_gps_cit = { | 2557 | TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL); |
2725 | .ct_group_ops = &target_core_alua_tg_pt_gps_group_ops, | ||
2726 | .ct_owner = THIS_MODULE, | ||
2727 | }; | ||
2728 | 2558 | ||
2729 | /* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */ | 2559 | /* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */ |
2730 | 2560 | ||
2731 | /* Start functions for struct config_item_type target_core_alua_cit */ | 2561 | /* Start functions for struct config_item_type target_core_alua_cit */ |
2732 | 2562 | ||
@@ -2744,7 +2574,7 @@ static struct config_item_type target_core_alua_cit = { | |||
2744 | 2574 | ||
2745 | /* End functions for struct config_item_type target_core_alua_cit */ | 2575 | /* End functions for struct config_item_type target_core_alua_cit */ |
2746 | 2576 | ||
2747 | /* Start functions for struct config_item_type target_core_stat_cit */ | 2577 | /* Start functions for struct config_item_type tb_dev_stat_cit */ |
2748 | 2578 | ||
2749 | static struct config_group *target_core_stat_mkdir( | 2579 | static struct config_group *target_core_stat_mkdir( |
2750 | struct config_group *group, | 2580 | struct config_group *group, |
@@ -2765,12 +2595,9 @@ static struct configfs_group_operations target_core_stat_group_ops = { | |||
2765 | .drop_item = &target_core_stat_rmdir, | 2595 | .drop_item = &target_core_stat_rmdir, |
2766 | }; | 2596 | }; |
2767 | 2597 | ||
2768 | static struct config_item_type target_core_stat_cit = { | 2598 | TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL); |
2769 | .ct_group_ops = &target_core_stat_group_ops, | ||
2770 | .ct_owner = THIS_MODULE, | ||
2771 | }; | ||
2772 | 2599 | ||
2773 | /* End functions for struct config_item_type target_core_stat_cit */ | 2600 | /* End functions for struct config_item_type tb_dev_stat_cit */ |
2774 | 2601 | ||
2775 | /* Start functions for struct config_item_type target_core_hba_cit */ | 2602 | /* Start functions for struct config_item_type target_core_hba_cit */ |
2776 | 2603 | ||
@@ -2806,17 +2633,17 @@ static struct config_group *target_core_make_subdev( | |||
2806 | if (!dev_cg->default_groups) | 2633 | if (!dev_cg->default_groups) |
2807 | goto out_free_device; | 2634 | goto out_free_device; |
2808 | 2635 | ||
2809 | config_group_init_type_name(dev_cg, name, &target_core_dev_cit); | 2636 | config_group_init_type_name(dev_cg, name, &t->tb_cits.tb_dev_cit); |
2810 | config_group_init_type_name(&dev->dev_attrib.da_group, "attrib", | 2637 | config_group_init_type_name(&dev->dev_attrib.da_group, "attrib", |
2811 | &target_core_dev_attrib_cit); | 2638 | &t->tb_cits.tb_dev_attrib_cit); |
2812 | config_group_init_type_name(&dev->dev_pr_group, "pr", | 2639 | config_group_init_type_name(&dev->dev_pr_group, "pr", |
2813 | &target_core_dev_pr_cit); | 2640 | &t->tb_cits.tb_dev_pr_cit); |
2814 | config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn", | 2641 | config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn", |
2815 | &target_core_dev_wwn_cit); | 2642 | &t->tb_cits.tb_dev_wwn_cit); |
2816 | config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group, | 2643 | config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group, |
2817 | "alua", &target_core_alua_tg_pt_gps_cit); | 2644 | "alua", &t->tb_cits.tb_dev_alua_tg_pt_gps_cit); |
2818 | config_group_init_type_name(&dev->dev_stat_grps.stat_group, | 2645 | config_group_init_type_name(&dev->dev_stat_grps.stat_group, |
2819 | "statistics", &target_core_stat_cit); | 2646 | "statistics", &t->tb_cits.tb_dev_stat_cit); |
2820 | 2647 | ||
2821 | dev_cg->default_groups[0] = &dev->dev_attrib.da_group; | 2648 | dev_cg->default_groups[0] = &dev->dev_attrib.da_group; |
2822 | dev_cg->default_groups[1] = &dev->dev_pr_group; | 2649 | dev_cg->default_groups[1] = &dev->dev_pr_group; |
@@ -3110,6 +2937,17 @@ static struct config_item_type target_core_cit = { | |||
3110 | 2937 | ||
3111 | /* Stop functions for struct config_item_type target_core_hba_cit */ | 2938 | /* Stop functions for struct config_item_type target_core_hba_cit */ |
3112 | 2939 | ||
2940 | void target_core_setup_sub_cits(struct se_subsystem_api *sa) | ||
2941 | { | ||
2942 | target_core_setup_dev_cit(sa); | ||
2943 | target_core_setup_dev_attrib_cit(sa); | ||
2944 | target_core_setup_dev_pr_cit(sa); | ||
2945 | target_core_setup_dev_wwn_cit(sa); | ||
2946 | target_core_setup_dev_alua_tg_pt_gps_cit(sa); | ||
2947 | target_core_setup_dev_stat_cit(sa); | ||
2948 | } | ||
2949 | EXPORT_SYMBOL(target_core_setup_sub_cits); | ||
2950 | |||
3113 | static int __init target_core_init_configfs(void) | 2951 | static int __init target_core_init_configfs(void) |
3114 | { | 2952 | { |
3115 | struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; | 2953 | struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index c45f9e907e44..7653cfb027a2 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -659,6 +659,7 @@ int se_dev_set_max_unmap_lba_count( | |||
659 | dev, dev->dev_attrib.max_unmap_lba_count); | 659 | dev, dev->dev_attrib.max_unmap_lba_count); |
660 | return 0; | 660 | return 0; |
661 | } | 661 | } |
662 | EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count); | ||
662 | 663 | ||
663 | int se_dev_set_max_unmap_block_desc_count( | 664 | int se_dev_set_max_unmap_block_desc_count( |
664 | struct se_device *dev, | 665 | struct se_device *dev, |
@@ -670,6 +671,7 @@ int se_dev_set_max_unmap_block_desc_count( | |||
670 | dev, dev->dev_attrib.max_unmap_block_desc_count); | 671 | dev, dev->dev_attrib.max_unmap_block_desc_count); |
671 | return 0; | 672 | return 0; |
672 | } | 673 | } |
674 | EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count); | ||
673 | 675 | ||
674 | int se_dev_set_unmap_granularity( | 676 | int se_dev_set_unmap_granularity( |
675 | struct se_device *dev, | 677 | struct se_device *dev, |
@@ -680,6 +682,7 @@ int se_dev_set_unmap_granularity( | |||
680 | dev, dev->dev_attrib.unmap_granularity); | 682 | dev, dev->dev_attrib.unmap_granularity); |
681 | return 0; | 683 | return 0; |
682 | } | 684 | } |
685 | EXPORT_SYMBOL(se_dev_set_unmap_granularity); | ||
683 | 686 | ||
684 | int se_dev_set_unmap_granularity_alignment( | 687 | int se_dev_set_unmap_granularity_alignment( |
685 | struct se_device *dev, | 688 | struct se_device *dev, |
@@ -690,6 +693,7 @@ int se_dev_set_unmap_granularity_alignment( | |||
690 | dev, dev->dev_attrib.unmap_granularity_alignment); | 693 | dev, dev->dev_attrib.unmap_granularity_alignment); |
691 | return 0; | 694 | return 0; |
692 | } | 695 | } |
696 | EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment); | ||
693 | 697 | ||
694 | int se_dev_set_max_write_same_len( | 698 | int se_dev_set_max_write_same_len( |
695 | struct se_device *dev, | 699 | struct se_device *dev, |
@@ -700,6 +704,7 @@ int se_dev_set_max_write_same_len( | |||
700 | dev, dev->dev_attrib.max_write_same_len); | 704 | dev, dev->dev_attrib.max_write_same_len); |
701 | return 0; | 705 | return 0; |
702 | } | 706 | } |
707 | EXPORT_SYMBOL(se_dev_set_max_write_same_len); | ||
703 | 708 | ||
704 | static void dev_set_t10_wwn_model_alias(struct se_device *dev) | 709 | static void dev_set_t10_wwn_model_alias(struct se_device *dev) |
705 | { | 710 | { |
@@ -738,6 +743,7 @@ int se_dev_set_emulate_model_alias(struct se_device *dev, int flag) | |||
738 | 743 | ||
739 | return 0; | 744 | return 0; |
740 | } | 745 | } |
746 | EXPORT_SYMBOL(se_dev_set_emulate_model_alias); | ||
741 | 747 | ||
742 | int se_dev_set_emulate_dpo(struct se_device *dev, int flag) | 748 | int se_dev_set_emulate_dpo(struct se_device *dev, int flag) |
743 | { | 749 | { |
@@ -753,6 +759,7 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag) | |||
753 | 759 | ||
754 | return 0; | 760 | return 0; |
755 | } | 761 | } |
762 | EXPORT_SYMBOL(se_dev_set_emulate_dpo); | ||
756 | 763 | ||
757 | int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) | 764 | int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) |
758 | { | 765 | { |
@@ -760,17 +767,12 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) | |||
760 | pr_err("Illegal value %d\n", flag); | 767 | pr_err("Illegal value %d\n", flag); |
761 | return -EINVAL; | 768 | return -EINVAL; |
762 | } | 769 | } |
763 | |||
764 | if (flag && | ||
765 | dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | ||
766 | pr_err("emulate_fua_write not supported for pSCSI\n"); | ||
767 | return -EINVAL; | ||
768 | } | ||
769 | dev->dev_attrib.emulate_fua_write = flag; | 770 | dev->dev_attrib.emulate_fua_write = flag; |
770 | pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", | 771 | pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", |
771 | dev, dev->dev_attrib.emulate_fua_write); | 772 | dev, dev->dev_attrib.emulate_fua_write); |
772 | return 0; | 773 | return 0; |
773 | } | 774 | } |
775 | EXPORT_SYMBOL(se_dev_set_emulate_fua_write); | ||
774 | 776 | ||
775 | int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) | 777 | int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) |
776 | { | 778 | { |
@@ -786,6 +788,7 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) | |||
786 | 788 | ||
787 | return 0; | 789 | return 0; |
788 | } | 790 | } |
791 | EXPORT_SYMBOL(se_dev_set_emulate_fua_read); | ||
789 | 792 | ||
790 | int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) | 793 | int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) |
791 | { | 794 | { |
@@ -794,11 +797,6 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) | |||
794 | return -EINVAL; | 797 | return -EINVAL; |
795 | } | 798 | } |
796 | if (flag && | 799 | if (flag && |
797 | dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | ||
798 | pr_err("emulate_write_cache not supported for pSCSI\n"); | ||
799 | return -EINVAL; | ||
800 | } | ||
801 | if (flag && | ||
802 | dev->transport->get_write_cache) { | 800 | dev->transport->get_write_cache) { |
803 | pr_err("emulate_write_cache not supported for this device\n"); | 801 | pr_err("emulate_write_cache not supported for this device\n"); |
804 | return -EINVAL; | 802 | return -EINVAL; |
@@ -809,6 +807,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) | |||
809 | dev, dev->dev_attrib.emulate_write_cache); | 807 | dev, dev->dev_attrib.emulate_write_cache); |
810 | return 0; | 808 | return 0; |
811 | } | 809 | } |
810 | EXPORT_SYMBOL(se_dev_set_emulate_write_cache); | ||
812 | 811 | ||
813 | int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) | 812 | int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) |
814 | { | 813 | { |
@@ -829,6 +828,7 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) | |||
829 | 828 | ||
830 | return 0; | 829 | return 0; |
831 | } | 830 | } |
831 | EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl); | ||
832 | 832 | ||
833 | int se_dev_set_emulate_tas(struct se_device *dev, int flag) | 833 | int se_dev_set_emulate_tas(struct se_device *dev, int flag) |
834 | { | 834 | { |
@@ -849,6 +849,7 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag) | |||
849 | 849 | ||
850 | return 0; | 850 | return 0; |
851 | } | 851 | } |
852 | EXPORT_SYMBOL(se_dev_set_emulate_tas); | ||
852 | 853 | ||
853 | int se_dev_set_emulate_tpu(struct se_device *dev, int flag) | 854 | int se_dev_set_emulate_tpu(struct se_device *dev, int flag) |
854 | { | 855 | { |
@@ -870,6 +871,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag) | |||
870 | dev, flag); | 871 | dev, flag); |
871 | return 0; | 872 | return 0; |
872 | } | 873 | } |
874 | EXPORT_SYMBOL(se_dev_set_emulate_tpu); | ||
873 | 875 | ||
874 | int se_dev_set_emulate_tpws(struct se_device *dev, int flag) | 876 | int se_dev_set_emulate_tpws(struct se_device *dev, int flag) |
875 | { | 877 | { |
@@ -891,6 +893,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag) | |||
891 | dev, flag); | 893 | dev, flag); |
892 | return 0; | 894 | return 0; |
893 | } | 895 | } |
896 | EXPORT_SYMBOL(se_dev_set_emulate_tpws); | ||
894 | 897 | ||
895 | int se_dev_set_emulate_caw(struct se_device *dev, int flag) | 898 | int se_dev_set_emulate_caw(struct se_device *dev, int flag) |
896 | { | 899 | { |
@@ -904,6 +907,7 @@ int se_dev_set_emulate_caw(struct se_device *dev, int flag) | |||
904 | 907 | ||
905 | return 0; | 908 | return 0; |
906 | } | 909 | } |
910 | EXPORT_SYMBOL(se_dev_set_emulate_caw); | ||
907 | 911 | ||
908 | int se_dev_set_emulate_3pc(struct se_device *dev, int flag) | 912 | int se_dev_set_emulate_3pc(struct se_device *dev, int flag) |
909 | { | 913 | { |
@@ -917,6 +921,7 @@ int se_dev_set_emulate_3pc(struct se_device *dev, int flag) | |||
917 | 921 | ||
918 | return 0; | 922 | return 0; |
919 | } | 923 | } |
924 | EXPORT_SYMBOL(se_dev_set_emulate_3pc); | ||
920 | 925 | ||
921 | int se_dev_set_pi_prot_type(struct se_device *dev, int flag) | 926 | int se_dev_set_pi_prot_type(struct se_device *dev, int flag) |
922 | { | 927 | { |
@@ -970,6 +975,7 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag) | |||
970 | 975 | ||
971 | return 0; | 976 | return 0; |
972 | } | 977 | } |
978 | EXPORT_SYMBOL(se_dev_set_pi_prot_type); | ||
973 | 979 | ||
974 | int se_dev_set_pi_prot_format(struct se_device *dev, int flag) | 980 | int se_dev_set_pi_prot_format(struct se_device *dev, int flag) |
975 | { | 981 | { |
@@ -1005,6 +1011,7 @@ int se_dev_set_pi_prot_format(struct se_device *dev, int flag) | |||
1005 | 1011 | ||
1006 | return 0; | 1012 | return 0; |
1007 | } | 1013 | } |
1014 | EXPORT_SYMBOL(se_dev_set_pi_prot_format); | ||
1008 | 1015 | ||
1009 | int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) | 1016 | int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) |
1010 | { | 1017 | { |
@@ -1017,6 +1024,7 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) | |||
1017 | (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); | 1024 | (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); |
1018 | return 0; | 1025 | return 0; |
1019 | } | 1026 | } |
1027 | EXPORT_SYMBOL(se_dev_set_enforce_pr_isids); | ||
1020 | 1028 | ||
1021 | int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag) | 1029 | int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag) |
1022 | { | 1030 | { |
@@ -1034,6 +1042,7 @@ int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag) | |||
1034 | pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag); | 1042 | pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag); |
1035 | return 0; | 1043 | return 0; |
1036 | } | 1044 | } |
1045 | EXPORT_SYMBOL(se_dev_set_force_pr_aptpl); | ||
1037 | 1046 | ||
1038 | int se_dev_set_is_nonrot(struct se_device *dev, int flag) | 1047 | int se_dev_set_is_nonrot(struct se_device *dev, int flag) |
1039 | { | 1048 | { |
@@ -1046,6 +1055,7 @@ int se_dev_set_is_nonrot(struct se_device *dev, int flag) | |||
1046 | dev, flag); | 1055 | dev, flag); |
1047 | return 0; | 1056 | return 0; |
1048 | } | 1057 | } |
1058 | EXPORT_SYMBOL(se_dev_set_is_nonrot); | ||
1049 | 1059 | ||
1050 | int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) | 1060 | int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) |
1051 | { | 1061 | { |
@@ -1058,6 +1068,7 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) | |||
1058 | pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); | 1068 | pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); |
1059 | return 0; | 1069 | return 0; |
1060 | } | 1070 | } |
1071 | EXPORT_SYMBOL(se_dev_set_emulate_rest_reord); | ||
1061 | 1072 | ||
1062 | /* | 1073 | /* |
1063 | * Note, this can only be called on unexported SE Device Object. | 1074 | * Note, this can only be called on unexported SE Device Object. |
@@ -1076,31 +1087,21 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) | |||
1076 | return -EINVAL; | 1087 | return -EINVAL; |
1077 | } | 1088 | } |
1078 | 1089 | ||
1079 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1090 | if (queue_depth > dev->dev_attrib.queue_depth) { |
1080 | if (queue_depth > dev->dev_attrib.hw_queue_depth) { | 1091 | if (queue_depth > dev->dev_attrib.hw_queue_depth) { |
1081 | pr_err("dev[%p]: Passed queue_depth: %u" | 1092 | pr_err("dev[%p]: Passed queue_depth:" |
1082 | " exceeds TCM/SE_Device TCQ: %u\n", | 1093 | " %u exceeds TCM/SE_Device MAX" |
1083 | dev, queue_depth, | 1094 | " TCQ: %u\n", dev, queue_depth, |
1084 | dev->dev_attrib.hw_queue_depth); | 1095 | dev->dev_attrib.hw_queue_depth); |
1085 | return -EINVAL; | 1096 | return -EINVAL; |
1086 | } | 1097 | } |
1087 | } else { | ||
1088 | if (queue_depth > dev->dev_attrib.queue_depth) { | ||
1089 | if (queue_depth > dev->dev_attrib.hw_queue_depth) { | ||
1090 | pr_err("dev[%p]: Passed queue_depth:" | ||
1091 | " %u exceeds TCM/SE_Device MAX" | ||
1092 | " TCQ: %u\n", dev, queue_depth, | ||
1093 | dev->dev_attrib.hw_queue_depth); | ||
1094 | return -EINVAL; | ||
1095 | } | ||
1096 | } | ||
1097 | } | 1098 | } |
1098 | |||
1099 | dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth; | 1099 | dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth; |
1100 | pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", | 1100 | pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", |
1101 | dev, queue_depth); | 1101 | dev, queue_depth); |
1102 | return 0; | 1102 | return 0; |
1103 | } | 1103 | } |
1104 | EXPORT_SYMBOL(se_dev_set_queue_depth); | ||
1104 | 1105 | ||
1105 | int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) | 1106 | int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) |
1106 | { | 1107 | { |
@@ -1123,22 +1124,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) | |||
1123 | DA_STATUS_MAX_SECTORS_MIN); | 1124 | DA_STATUS_MAX_SECTORS_MIN); |
1124 | return -EINVAL; | 1125 | return -EINVAL; |
1125 | } | 1126 | } |
1126 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1127 | if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) { |
1127 | if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) { | 1128 | pr_err("dev[%p]: Passed fabric_max_sectors: %u" |
1128 | pr_err("dev[%p]: Passed fabric_max_sectors: %u" | 1129 | " greater than DA_STATUS_MAX_SECTORS_MAX:" |
1129 | " greater than TCM/SE_Device max_sectors:" | 1130 | " %u\n", dev, fabric_max_sectors, |
1130 | " %u\n", dev, fabric_max_sectors, | 1131 | DA_STATUS_MAX_SECTORS_MAX); |
1131 | dev->dev_attrib.hw_max_sectors); | 1132 | return -EINVAL; |
1132 | return -EINVAL; | ||
1133 | } | ||
1134 | } else { | ||
1135 | if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) { | ||
1136 | pr_err("dev[%p]: Passed fabric_max_sectors: %u" | ||
1137 | " greater than DA_STATUS_MAX_SECTORS_MAX:" | ||
1138 | " %u\n", dev, fabric_max_sectors, | ||
1139 | DA_STATUS_MAX_SECTORS_MAX); | ||
1140 | return -EINVAL; | ||
1141 | } | ||
1142 | } | 1133 | } |
1143 | /* | 1134 | /* |
1144 | * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() | 1135 | * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() |
@@ -1155,6 +1146,7 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) | |||
1155 | dev, fabric_max_sectors); | 1146 | dev, fabric_max_sectors); |
1156 | return 0; | 1147 | return 0; |
1157 | } | 1148 | } |
1149 | EXPORT_SYMBOL(se_dev_set_fabric_max_sectors); | ||
1158 | 1150 | ||
1159 | int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) | 1151 | int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) |
1160 | { | 1152 | { |
@@ -1164,11 +1156,6 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) | |||
1164 | dev, dev->export_count); | 1156 | dev, dev->export_count); |
1165 | return -EINVAL; | 1157 | return -EINVAL; |
1166 | } | 1158 | } |
1167 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | ||
1168 | pr_err("dev[%p]: Passed optimal_sectors cannot be" | ||
1169 | " changed for TCM/pSCSI\n", dev); | ||
1170 | return -EINVAL; | ||
1171 | } | ||
1172 | if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { | 1159 | if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { |
1173 | pr_err("dev[%p]: Passed optimal_sectors %u cannot be" | 1160 | pr_err("dev[%p]: Passed optimal_sectors %u cannot be" |
1174 | " greater than fabric_max_sectors: %u\n", dev, | 1161 | " greater than fabric_max_sectors: %u\n", dev, |
@@ -1181,6 +1168,7 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) | |||
1181 | dev, optimal_sectors); | 1168 | dev, optimal_sectors); |
1182 | return 0; | 1169 | return 0; |
1183 | } | 1170 | } |
1171 | EXPORT_SYMBOL(se_dev_set_optimal_sectors); | ||
1184 | 1172 | ||
1185 | int se_dev_set_block_size(struct se_device *dev, u32 block_size) | 1173 | int se_dev_set_block_size(struct se_device *dev, u32 block_size) |
1186 | { | 1174 | { |
@@ -1201,13 +1189,6 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) | |||
1201 | return -EINVAL; | 1189 | return -EINVAL; |
1202 | } | 1190 | } |
1203 | 1191 | ||
1204 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | ||
1205 | pr_err("dev[%p]: Not allowed to change block_size for" | ||
1206 | " Physical Device, use for Linux/SCSI to change" | ||
1207 | " block_size for underlying hardware\n", dev); | ||
1208 | return -EINVAL; | ||
1209 | } | ||
1210 | |||
1211 | dev->dev_attrib.block_size = block_size; | 1192 | dev->dev_attrib.block_size = block_size; |
1212 | pr_debug("dev[%p]: SE Device block_size changed to %u\n", | 1193 | pr_debug("dev[%p]: SE Device block_size changed to %u\n", |
1213 | dev, block_size); | 1194 | dev, block_size); |
@@ -1218,6 +1199,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) | |||
1218 | 1199 | ||
1219 | return 0; | 1200 | return 0; |
1220 | } | 1201 | } |
1202 | EXPORT_SYMBOL(se_dev_set_block_size); | ||
1221 | 1203 | ||
1222 | struct se_lun *core_dev_add_lun( | 1204 | struct se_lun *core_dev_add_lun( |
1223 | struct se_portal_group *tpg, | 1205 | struct se_portal_group *tpg, |
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 72c83d98662b..c2aea099ea4a 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
@@ -37,6 +37,7 @@ | |||
37 | 37 | ||
38 | #include <target/target_core_base.h> | 38 | #include <target/target_core_base.h> |
39 | #include <target/target_core_backend.h> | 39 | #include <target/target_core_backend.h> |
40 | #include <target/target_core_backend_configfs.h> | ||
40 | 41 | ||
41 | #include "target_core_file.h" | 42 | #include "target_core_file.h" |
42 | 43 | ||
@@ -934,6 +935,42 @@ fd_parse_cdb(struct se_cmd *cmd) | |||
934 | return sbc_parse_cdb(cmd, &fd_sbc_ops); | 935 | return sbc_parse_cdb(cmd, &fd_sbc_ops); |
935 | } | 936 | } |
936 | 937 | ||
938 | DEF_TB_DEFAULT_ATTRIBS(fileio); | ||
939 | |||
940 | static struct configfs_attribute *fileio_backend_dev_attrs[] = { | ||
941 | &fileio_dev_attrib_emulate_model_alias.attr, | ||
942 | &fileio_dev_attrib_emulate_dpo.attr, | ||
943 | &fileio_dev_attrib_emulate_fua_write.attr, | ||
944 | &fileio_dev_attrib_emulate_fua_read.attr, | ||
945 | &fileio_dev_attrib_emulate_write_cache.attr, | ||
946 | &fileio_dev_attrib_emulate_ua_intlck_ctrl.attr, | ||
947 | &fileio_dev_attrib_emulate_tas.attr, | ||
948 | &fileio_dev_attrib_emulate_tpu.attr, | ||
949 | &fileio_dev_attrib_emulate_tpws.attr, | ||
950 | &fileio_dev_attrib_emulate_caw.attr, | ||
951 | &fileio_dev_attrib_emulate_3pc.attr, | ||
952 | &fileio_dev_attrib_pi_prot_type.attr, | ||
953 | &fileio_dev_attrib_hw_pi_prot_type.attr, | ||
954 | &fileio_dev_attrib_pi_prot_format.attr, | ||
955 | &fileio_dev_attrib_enforce_pr_isids.attr, | ||
956 | &fileio_dev_attrib_is_nonrot.attr, | ||
957 | &fileio_dev_attrib_emulate_rest_reord.attr, | ||
958 | &fileio_dev_attrib_force_pr_aptpl.attr, | ||
959 | &fileio_dev_attrib_hw_block_size.attr, | ||
960 | &fileio_dev_attrib_block_size.attr, | ||
961 | &fileio_dev_attrib_hw_max_sectors.attr, | ||
962 | &fileio_dev_attrib_fabric_max_sectors.attr, | ||
963 | &fileio_dev_attrib_optimal_sectors.attr, | ||
964 | &fileio_dev_attrib_hw_queue_depth.attr, | ||
965 | &fileio_dev_attrib_queue_depth.attr, | ||
966 | &fileio_dev_attrib_max_unmap_lba_count.attr, | ||
967 | &fileio_dev_attrib_max_unmap_block_desc_count.attr, | ||
968 | &fileio_dev_attrib_unmap_granularity.attr, | ||
969 | &fileio_dev_attrib_unmap_granularity_alignment.attr, | ||
970 | &fileio_dev_attrib_max_write_same_len.attr, | ||
971 | NULL, | ||
972 | }; | ||
973 | |||
937 | static struct se_subsystem_api fileio_template = { | 974 | static struct se_subsystem_api fileio_template = { |
938 | .name = "fileio", | 975 | .name = "fileio", |
939 | .inquiry_prod = "FILEIO", | 976 | .inquiry_prod = "FILEIO", |
@@ -957,6 +994,11 @@ static struct se_subsystem_api fileio_template = { | |||
957 | 994 | ||
958 | static int __init fileio_module_init(void) | 995 | static int __init fileio_module_init(void) |
959 | { | 996 | { |
997 | struct target_backend_cits *tbc = &fileio_template.tb_cits; | ||
998 | |||
999 | target_core_setup_sub_cits(&fileio_template); | ||
1000 | tbc->tb_dev_attrib_cit.ct_attrs = fileio_backend_dev_attrs; | ||
1001 | |||
960 | return transport_subsystem_register(&fileio_template); | 1002 | return transport_subsystem_register(&fileio_template); |
961 | } | 1003 | } |
962 | 1004 | ||
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c index a25051a37dd7..ff95f95dcd13 100644 --- a/drivers/target/target_core_hba.c +++ b/drivers/target/target_core_hba.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <target/target_core_base.h> | 36 | #include <target/target_core_base.h> |
37 | #include <target/target_core_backend.h> | 37 | #include <target/target_core_backend.h> |
38 | #include <target/target_core_fabric.h> | 38 | #include <target/target_core_fabric.h> |
39 | #include <target/target_core_configfs.h> | ||
39 | 40 | ||
40 | #include "target_core_internal.h" | 41 | #include "target_core_internal.h" |
41 | 42 | ||
@@ -137,8 +138,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) | |||
137 | return hba; | 138 | return hba; |
138 | 139 | ||
139 | out_module_put: | 140 | out_module_put: |
140 | if (hba->transport->owner) | 141 | module_put(hba->transport->owner); |
141 | module_put(hba->transport->owner); | ||
142 | hba->transport = NULL; | 142 | hba->transport = NULL; |
143 | out_free_hba: | 143 | out_free_hba: |
144 | kfree(hba); | 144 | kfree(hba); |
@@ -159,8 +159,7 @@ core_delete_hba(struct se_hba *hba) | |||
159 | pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target" | 159 | pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target" |
160 | " Core\n", hba->hba_id); | 160 | " Core\n", hba->hba_id); |
161 | 161 | ||
162 | if (hba->transport->owner) | 162 | module_put(hba->transport->owner); |
163 | module_put(hba->transport->owner); | ||
164 | 163 | ||
165 | hba->transport = NULL; | 164 | hba->transport = NULL; |
166 | kfree(hba); | 165 | kfree(hba); |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 7e6b857c6b3f..3efff94fbd97 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -41,6 +41,7 @@ | |||
41 | 41 | ||
42 | #include <target/target_core_base.h> | 42 | #include <target/target_core_base.h> |
43 | #include <target/target_core_backend.h> | 43 | #include <target/target_core_backend.h> |
44 | #include <target/target_core_backend_configfs.h> | ||
44 | 45 | ||
45 | #include "target_core_iblock.h" | 46 | #include "target_core_iblock.h" |
46 | 47 | ||
@@ -858,6 +859,42 @@ static bool iblock_get_write_cache(struct se_device *dev) | |||
858 | return q->flush_flags & REQ_FLUSH; | 859 | return q->flush_flags & REQ_FLUSH; |
859 | } | 860 | } |
860 | 861 | ||
862 | DEF_TB_DEFAULT_ATTRIBS(iblock); | ||
863 | |||
864 | static struct configfs_attribute *iblock_backend_dev_attrs[] = { | ||
865 | &iblock_dev_attrib_emulate_model_alias.attr, | ||
866 | &iblock_dev_attrib_emulate_dpo.attr, | ||
867 | &iblock_dev_attrib_emulate_fua_write.attr, | ||
868 | &iblock_dev_attrib_emulate_fua_read.attr, | ||
869 | &iblock_dev_attrib_emulate_write_cache.attr, | ||
870 | &iblock_dev_attrib_emulate_ua_intlck_ctrl.attr, | ||
871 | &iblock_dev_attrib_emulate_tas.attr, | ||
872 | &iblock_dev_attrib_emulate_tpu.attr, | ||
873 | &iblock_dev_attrib_emulate_tpws.attr, | ||
874 | &iblock_dev_attrib_emulate_caw.attr, | ||
875 | &iblock_dev_attrib_emulate_3pc.attr, | ||
876 | &iblock_dev_attrib_pi_prot_type.attr, | ||
877 | &iblock_dev_attrib_hw_pi_prot_type.attr, | ||
878 | &iblock_dev_attrib_pi_prot_format.attr, | ||
879 | &iblock_dev_attrib_enforce_pr_isids.attr, | ||
880 | &iblock_dev_attrib_is_nonrot.attr, | ||
881 | &iblock_dev_attrib_emulate_rest_reord.attr, | ||
882 | &iblock_dev_attrib_force_pr_aptpl.attr, | ||
883 | &iblock_dev_attrib_hw_block_size.attr, | ||
884 | &iblock_dev_attrib_block_size.attr, | ||
885 | &iblock_dev_attrib_hw_max_sectors.attr, | ||
886 | &iblock_dev_attrib_fabric_max_sectors.attr, | ||
887 | &iblock_dev_attrib_optimal_sectors.attr, | ||
888 | &iblock_dev_attrib_hw_queue_depth.attr, | ||
889 | &iblock_dev_attrib_queue_depth.attr, | ||
890 | &iblock_dev_attrib_max_unmap_lba_count.attr, | ||
891 | &iblock_dev_attrib_max_unmap_block_desc_count.attr, | ||
892 | &iblock_dev_attrib_unmap_granularity.attr, | ||
893 | &iblock_dev_attrib_unmap_granularity_alignment.attr, | ||
894 | &iblock_dev_attrib_max_write_same_len.attr, | ||
895 | NULL, | ||
896 | }; | ||
897 | |||
861 | static struct se_subsystem_api iblock_template = { | 898 | static struct se_subsystem_api iblock_template = { |
862 | .name = "iblock", | 899 | .name = "iblock", |
863 | .inquiry_prod = "IBLOCK", | 900 | .inquiry_prod = "IBLOCK", |
@@ -883,6 +920,11 @@ static struct se_subsystem_api iblock_template = { | |||
883 | 920 | ||
884 | static int __init iblock_module_init(void) | 921 | static int __init iblock_module_init(void) |
885 | { | 922 | { |
923 | struct target_backend_cits *tbc = &iblock_template.tb_cits; | ||
924 | |||
925 | target_core_setup_sub_cits(&iblock_template); | ||
926 | tbc->tb_dev_attrib_cit.ct_attrs = iblock_backend_dev_attrs; | ||
927 | |||
886 | return transport_subsystem_register(&iblock_template); | 928 | return transport_subsystem_register(&iblock_template); |
887 | } | 929 | } |
888 | 930 | ||
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index e31f42f369ff..60381db90026 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h | |||
@@ -18,34 +18,6 @@ int core_dev_export(struct se_device *, struct se_portal_group *, | |||
18 | struct se_lun *); | 18 | struct se_lun *); |
19 | void core_dev_unexport(struct se_device *, struct se_portal_group *, | 19 | void core_dev_unexport(struct se_device *, struct se_portal_group *, |
20 | struct se_lun *); | 20 | struct se_lun *); |
21 | int se_dev_set_task_timeout(struct se_device *, u32); | ||
22 | int se_dev_set_max_unmap_lba_count(struct se_device *, u32); | ||
23 | int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); | ||
24 | int se_dev_set_unmap_granularity(struct se_device *, u32); | ||
25 | int se_dev_set_unmap_granularity_alignment(struct se_device *, u32); | ||
26 | int se_dev_set_max_write_same_len(struct se_device *, u32); | ||
27 | int se_dev_set_emulate_model_alias(struct se_device *, int); | ||
28 | int se_dev_set_emulate_dpo(struct se_device *, int); | ||
29 | int se_dev_set_emulate_fua_write(struct se_device *, int); | ||
30 | int se_dev_set_emulate_fua_read(struct se_device *, int); | ||
31 | int se_dev_set_emulate_write_cache(struct se_device *, int); | ||
32 | int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int); | ||
33 | int se_dev_set_emulate_tas(struct se_device *, int); | ||
34 | int se_dev_set_emulate_tpu(struct se_device *, int); | ||
35 | int se_dev_set_emulate_tpws(struct se_device *, int); | ||
36 | int se_dev_set_emulate_caw(struct se_device *, int); | ||
37 | int se_dev_set_emulate_3pc(struct se_device *, int); | ||
38 | int se_dev_set_pi_prot_type(struct se_device *, int); | ||
39 | int se_dev_set_pi_prot_format(struct se_device *, int); | ||
40 | int se_dev_set_enforce_pr_isids(struct se_device *, int); | ||
41 | int se_dev_set_force_pr_aptpl(struct se_device *, int); | ||
42 | int se_dev_set_is_nonrot(struct se_device *, int); | ||
43 | int se_dev_set_emulate_rest_reord(struct se_device *dev, int); | ||
44 | int se_dev_set_queue_depth(struct se_device *, u32); | ||
45 | int se_dev_set_max_sectors(struct se_device *, u32); | ||
46 | int se_dev_set_fabric_max_sectors(struct se_device *, u32); | ||
47 | int se_dev_set_optimal_sectors(struct se_device *, u32); | ||
48 | int se_dev_set_block_size(struct se_device *, u32); | ||
49 | struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32); | 21 | struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32); |
50 | void core_dev_del_lun(struct se_portal_group *, struct se_lun *); | 22 | void core_dev_del_lun(struct se_portal_group *, struct se_lun *); |
51 | struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); | 23 | struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 4c261c33cf55..d56f2aaba9af 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -76,7 +76,7 @@ enum preempt_type { | |||
76 | }; | 76 | }; |
77 | 77 | ||
78 | static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *, | 78 | static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *, |
79 | struct t10_pr_registration *, int); | 79 | struct t10_pr_registration *, int, int); |
80 | 80 | ||
81 | static sense_reason_t | 81 | static sense_reason_t |
82 | target_scsi2_reservation_check(struct se_cmd *cmd) | 82 | target_scsi2_reservation_check(struct se_cmd *cmd) |
@@ -1177,7 +1177,7 @@ static int core_scsi3_check_implicit_release( | |||
1177 | * service action with the SERVICE ACTION RESERVATION KEY | 1177 | * service action with the SERVICE ACTION RESERVATION KEY |
1178 | * field set to zero (see 5.7.11.3). | 1178 | * field set to zero (see 5.7.11.3). |
1179 | */ | 1179 | */ |
1180 | __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0); | 1180 | __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0, 1); |
1181 | ret = 1; | 1181 | ret = 1; |
1182 | /* | 1182 | /* |
1183 | * For 'All Registrants' reservation types, all existing | 1183 | * For 'All Registrants' reservation types, all existing |
@@ -1219,7 +1219,8 @@ static void __core_scsi3_free_registration( | |||
1219 | 1219 | ||
1220 | pr_reg->pr_reg_deve->def_pr_registered = 0; | 1220 | pr_reg->pr_reg_deve->def_pr_registered = 0; |
1221 | pr_reg->pr_reg_deve->pr_res_key = 0; | 1221 | pr_reg->pr_reg_deve->pr_res_key = 0; |
1222 | list_del(&pr_reg->pr_reg_list); | 1222 | if (!list_empty(&pr_reg->pr_reg_list)) |
1223 | list_del(&pr_reg->pr_reg_list); | ||
1223 | /* | 1224 | /* |
1224 | * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(), | 1225 | * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(), |
1225 | * so call core_scsi3_put_pr_reg() to decrement our reference. | 1226 | * so call core_scsi3_put_pr_reg() to decrement our reference. |
@@ -1271,6 +1272,7 @@ void core_scsi3_free_pr_reg_from_nacl( | |||
1271 | { | 1272 | { |
1272 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 1273 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
1273 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; | 1274 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; |
1275 | bool free_reg = false; | ||
1274 | /* | 1276 | /* |
1275 | * If the passed se_node_acl matches the reservation holder, | 1277 | * If the passed se_node_acl matches the reservation holder, |
1276 | * release the reservation. | 1278 | * release the reservation. |
@@ -1278,13 +1280,18 @@ void core_scsi3_free_pr_reg_from_nacl( | |||
1278 | spin_lock(&dev->dev_reservation_lock); | 1280 | spin_lock(&dev->dev_reservation_lock); |
1279 | pr_res_holder = dev->dev_pr_res_holder; | 1281 | pr_res_holder = dev->dev_pr_res_holder; |
1280 | if ((pr_res_holder != NULL) && | 1282 | if ((pr_res_holder != NULL) && |
1281 | (pr_res_holder->pr_reg_nacl == nacl)) | 1283 | (pr_res_holder->pr_reg_nacl == nacl)) { |
1282 | __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0); | 1284 | __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0, 1); |
1285 | free_reg = true; | ||
1286 | } | ||
1283 | spin_unlock(&dev->dev_reservation_lock); | 1287 | spin_unlock(&dev->dev_reservation_lock); |
1284 | /* | 1288 | /* |
1285 | * Release any registration associated with the struct se_node_acl. | 1289 | * Release any registration associated with the struct se_node_acl. |
1286 | */ | 1290 | */ |
1287 | spin_lock(&pr_tmpl->registration_lock); | 1291 | spin_lock(&pr_tmpl->registration_lock); |
1292 | if (pr_res_holder && free_reg) | ||
1293 | __core_scsi3_free_registration(dev, pr_res_holder, NULL, 0); | ||
1294 | |||
1288 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, | 1295 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, |
1289 | &pr_tmpl->registration_list, pr_reg_list) { | 1296 | &pr_tmpl->registration_list, pr_reg_list) { |
1290 | 1297 | ||
@@ -1307,7 +1314,7 @@ void core_scsi3_free_all_registrations( | |||
1307 | if (pr_res_holder != NULL) { | 1314 | if (pr_res_holder != NULL) { |
1308 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; | 1315 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; |
1309 | __core_scsi3_complete_pro_release(dev, pr_res_nacl, | 1316 | __core_scsi3_complete_pro_release(dev, pr_res_nacl, |
1310 | pr_res_holder, 0); | 1317 | pr_res_holder, 0, 0); |
1311 | } | 1318 | } |
1312 | spin_unlock(&dev->dev_reservation_lock); | 1319 | spin_unlock(&dev->dev_reservation_lock); |
1313 | 1320 | ||
@@ -1429,14 +1436,12 @@ core_scsi3_decode_spec_i_port( | |||
1429 | struct target_core_fabric_ops *tmp_tf_ops; | 1436 | struct target_core_fabric_ops *tmp_tf_ops; |
1430 | unsigned char *buf; | 1437 | unsigned char *buf; |
1431 | unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; | 1438 | unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; |
1432 | char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; | 1439 | char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; |
1433 | sense_reason_t ret; | 1440 | sense_reason_t ret; |
1434 | u32 tpdl, tid_len = 0; | 1441 | u32 tpdl, tid_len = 0; |
1435 | int dest_local_nexus; | 1442 | int dest_local_nexus; |
1436 | u32 dest_rtpi = 0; | 1443 | u32 dest_rtpi = 0; |
1437 | 1444 | ||
1438 | memset(dest_iport, 0, 64); | ||
1439 | |||
1440 | local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | 1445 | local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; |
1441 | /* | 1446 | /* |
1442 | * Allocate a struct pr_transport_id_holder and setup the | 1447 | * Allocate a struct pr_transport_id_holder and setup the |
@@ -2105,13 +2110,13 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, | |||
2105 | /* | 2110 | /* |
2106 | * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus. | 2111 | * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus. |
2107 | */ | 2112 | */ |
2108 | pr_holder = core_scsi3_check_implicit_release( | 2113 | type = pr_reg->pr_res_type; |
2109 | cmd->se_dev, pr_reg); | 2114 | pr_holder = core_scsi3_check_implicit_release(cmd->se_dev, |
2115 | pr_reg); | ||
2110 | if (pr_holder < 0) { | 2116 | if (pr_holder < 0) { |
2111 | ret = TCM_RESERVATION_CONFLICT; | 2117 | ret = TCM_RESERVATION_CONFLICT; |
2112 | goto out; | 2118 | goto out; |
2113 | } | 2119 | } |
2114 | type = pr_reg->pr_res_type; | ||
2115 | 2120 | ||
2116 | spin_lock(&pr_tmpl->registration_lock); | 2121 | spin_lock(&pr_tmpl->registration_lock); |
2117 | /* | 2122 | /* |
@@ -2269,6 +2274,7 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key) | |||
2269 | spin_lock(&dev->dev_reservation_lock); | 2274 | spin_lock(&dev->dev_reservation_lock); |
2270 | pr_res_holder = dev->dev_pr_res_holder; | 2275 | pr_res_holder = dev->dev_pr_res_holder; |
2271 | if (pr_res_holder) { | 2276 | if (pr_res_holder) { |
2277 | int pr_res_type = pr_res_holder->pr_res_type; | ||
2272 | /* | 2278 | /* |
2273 | * From spc4r17 Section 5.7.9: Reserving: | 2279 | * From spc4r17 Section 5.7.9: Reserving: |
2274 | * | 2280 | * |
@@ -2279,7 +2285,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key) | |||
2279 | * the logical unit, then the command shall be completed with | 2285 | * the logical unit, then the command shall be completed with |
2280 | * RESERVATION CONFLICT status. | 2286 | * RESERVATION CONFLICT status. |
2281 | */ | 2287 | */ |
2282 | if (pr_res_holder != pr_reg) { | 2288 | if ((pr_res_holder != pr_reg) && |
2289 | (pr_res_type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) && | ||
2290 | (pr_res_type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { | ||
2283 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; | 2291 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; |
2284 | pr_err("SPC-3 PR: Attempted RESERVE from" | 2292 | pr_err("SPC-3 PR: Attempted RESERVE from" |
2285 | " [%s]: %s while reservation already held by" | 2293 | " [%s]: %s while reservation already held by" |
@@ -2385,23 +2393,59 @@ static void __core_scsi3_complete_pro_release( | |||
2385 | struct se_device *dev, | 2393 | struct se_device *dev, |
2386 | struct se_node_acl *se_nacl, | 2394 | struct se_node_acl *se_nacl, |
2387 | struct t10_pr_registration *pr_reg, | 2395 | struct t10_pr_registration *pr_reg, |
2388 | int explicit) | 2396 | int explicit, |
2397 | int unreg) | ||
2389 | { | 2398 | { |
2390 | struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; | 2399 | struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; |
2391 | char i_buf[PR_REG_ISID_ID_LEN]; | 2400 | char i_buf[PR_REG_ISID_ID_LEN]; |
2401 | int pr_res_type = 0, pr_res_scope = 0; | ||
2392 | 2402 | ||
2393 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | 2403 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); |
2394 | core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); | 2404 | core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); |
2395 | /* | 2405 | /* |
2396 | * Go ahead and release the current PR reservation holder. | 2406 | * Go ahead and release the current PR reservation holder. |
2407 | * If an All Registrants reservation is currently active and | ||
2408 | * a unregister operation is requested, replace the current | ||
2409 | * dev_pr_res_holder with another active registration. | ||
2397 | */ | 2410 | */ |
2398 | dev->dev_pr_res_holder = NULL; | 2411 | if (dev->dev_pr_res_holder) { |
2412 | pr_res_type = dev->dev_pr_res_holder->pr_res_type; | ||
2413 | pr_res_scope = dev->dev_pr_res_holder->pr_res_scope; | ||
2414 | dev->dev_pr_res_holder->pr_res_type = 0; | ||
2415 | dev->dev_pr_res_holder->pr_res_scope = 0; | ||
2416 | dev->dev_pr_res_holder->pr_res_holder = 0; | ||
2417 | dev->dev_pr_res_holder = NULL; | ||
2418 | } | ||
2419 | if (!unreg) | ||
2420 | goto out; | ||
2399 | 2421 | ||
2400 | pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" | 2422 | spin_lock(&dev->t10_pr.registration_lock); |
2401 | " reservation holder TYPE: %s ALL_TG_PT: %d\n", | 2423 | list_del_init(&pr_reg->pr_reg_list); |
2402 | tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit", | 2424 | /* |
2403 | core_scsi3_pr_dump_type(pr_reg->pr_res_type), | 2425 | * If the I_T nexus is a reservation holder, the persistent reservation |
2404 | (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); | 2426 | * is of an all registrants type, and the I_T nexus is the last remaining |
2427 | * registered I_T nexus, then the device server shall also release the | ||
2428 | * persistent reservation. | ||
2429 | */ | ||
2430 | if (!list_empty(&dev->t10_pr.registration_list) && | ||
2431 | ((pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || | ||
2432 | (pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) { | ||
2433 | dev->dev_pr_res_holder = | ||
2434 | list_entry(dev->t10_pr.registration_list.next, | ||
2435 | struct t10_pr_registration, pr_reg_list); | ||
2436 | dev->dev_pr_res_holder->pr_res_type = pr_res_type; | ||
2437 | dev->dev_pr_res_holder->pr_res_scope = pr_res_scope; | ||
2438 | dev->dev_pr_res_holder->pr_res_holder = 1; | ||
2439 | } | ||
2440 | spin_unlock(&dev->t10_pr.registration_lock); | ||
2441 | out: | ||
2442 | if (!dev->dev_pr_res_holder) { | ||
2443 | pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" | ||
2444 | " reservation holder TYPE: %s ALL_TG_PT: %d\n", | ||
2445 | tfo->get_fabric_name(), (explicit) ? "explicit" : | ||
2446 | "implicit", core_scsi3_pr_dump_type(pr_res_type), | ||
2447 | (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); | ||
2448 | } | ||
2405 | pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", | 2449 | pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", |
2406 | tfo->get_fabric_name(), se_nacl->initiatorname, | 2450 | tfo->get_fabric_name(), se_nacl->initiatorname, |
2407 | i_buf); | 2451 | i_buf); |
@@ -2532,7 +2576,7 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope, | |||
2532 | * server shall not establish a unit attention condition. | 2576 | * server shall not establish a unit attention condition. |
2533 | */ | 2577 | */ |
2534 | __core_scsi3_complete_pro_release(dev, se_sess->se_node_acl, | 2578 | __core_scsi3_complete_pro_release(dev, se_sess->se_node_acl, |
2535 | pr_reg, 1); | 2579 | pr_reg, 1, 0); |
2536 | 2580 | ||
2537 | spin_unlock(&dev->dev_reservation_lock); | 2581 | spin_unlock(&dev->dev_reservation_lock); |
2538 | 2582 | ||
@@ -2620,7 +2664,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key) | |||
2620 | if (pr_res_holder) { | 2664 | if (pr_res_holder) { |
2621 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; | 2665 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; |
2622 | __core_scsi3_complete_pro_release(dev, pr_res_nacl, | 2666 | __core_scsi3_complete_pro_release(dev, pr_res_nacl, |
2623 | pr_res_holder, 0); | 2667 | pr_res_holder, 0, 0); |
2624 | } | 2668 | } |
2625 | spin_unlock(&dev->dev_reservation_lock); | 2669 | spin_unlock(&dev->dev_reservation_lock); |
2626 | /* | 2670 | /* |
@@ -2679,7 +2723,7 @@ static void __core_scsi3_complete_pro_preempt( | |||
2679 | */ | 2723 | */ |
2680 | if (dev->dev_pr_res_holder) | 2724 | if (dev->dev_pr_res_holder) |
2681 | __core_scsi3_complete_pro_release(dev, nacl, | 2725 | __core_scsi3_complete_pro_release(dev, nacl, |
2682 | dev->dev_pr_res_holder, 0); | 2726 | dev->dev_pr_res_holder, 0, 0); |
2683 | 2727 | ||
2684 | dev->dev_pr_res_holder = pr_reg; | 2728 | dev->dev_pr_res_holder = pr_reg; |
2685 | pr_reg->pr_res_holder = 1; | 2729 | pr_reg->pr_res_holder = 1; |
@@ -2924,8 +2968,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, | |||
2924 | */ | 2968 | */ |
2925 | if (pr_reg_n != pr_res_holder) | 2969 | if (pr_reg_n != pr_res_holder) |
2926 | __core_scsi3_complete_pro_release(dev, | 2970 | __core_scsi3_complete_pro_release(dev, |
2927 | pr_res_holder->pr_reg_nacl, | 2971 | pr_res_holder->pr_reg_nacl, |
2928 | dev->dev_pr_res_holder, 0); | 2972 | dev->dev_pr_res_holder, 0, 0); |
2929 | /* | 2973 | /* |
2930 | * b) Remove the registrations for all I_T nexuses identified | 2974 | * b) Remove the registrations for all I_T nexuses identified |
2931 | * by the SERVICE ACTION RESERVATION KEY field, except the | 2975 | * by the SERVICE ACTION RESERVATION KEY field, except the |
@@ -3059,7 +3103,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, | |||
3059 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 3103 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
3060 | unsigned char *buf; | 3104 | unsigned char *buf; |
3061 | unsigned char *initiator_str; | 3105 | unsigned char *initiator_str; |
3062 | char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; | 3106 | char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; |
3063 | u32 tid_len, tmp_tid_len; | 3107 | u32 tid_len, tmp_tid_len; |
3064 | int new_reg = 0, type, scope, matching_iname; | 3108 | int new_reg = 0, type, scope, matching_iname; |
3065 | sense_reason_t ret; | 3109 | sense_reason_t ret; |
@@ -3071,7 +3115,6 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, | |||
3071 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 3115 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
3072 | } | 3116 | } |
3073 | 3117 | ||
3074 | memset(dest_iport, 0, 64); | ||
3075 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | 3118 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); |
3076 | se_tpg = se_sess->se_tpg; | 3119 | se_tpg = se_sess->se_tpg; |
3077 | tf_ops = se_tpg->se_tpg_tfo; | 3120 | tf_ops = se_tpg->se_tpg_tfo; |
@@ -3389,7 +3432,7 @@ after_iport_check: | |||
3389 | * holder (i.e., the I_T nexus on which the | 3432 | * holder (i.e., the I_T nexus on which the |
3390 | */ | 3433 | */ |
3391 | __core_scsi3_complete_pro_release(dev, pr_res_nacl, | 3434 | __core_scsi3_complete_pro_release(dev, pr_res_nacl, |
3392 | dev->dev_pr_res_holder, 0); | 3435 | dev->dev_pr_res_holder, 0, 0); |
3393 | /* | 3436 | /* |
3394 | * g) Move the persistent reservation to the specified I_T nexus using | 3437 | * g) Move the persistent reservation to the specified I_T nexus using |
3395 | * the same scope and type as the persistent reservation released in | 3438 | * the same scope and type as the persistent reservation released in |
@@ -3837,7 +3880,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) | |||
3837 | unsigned char *buf; | 3880 | unsigned char *buf; |
3838 | u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; | 3881 | u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; |
3839 | u32 off = 8; /* off into first Full Status descriptor */ | 3882 | u32 off = 8; /* off into first Full Status descriptor */ |
3840 | int format_code = 0; | 3883 | int format_code = 0, pr_res_type = 0, pr_res_scope = 0; |
3884 | bool all_reg = false; | ||
3841 | 3885 | ||
3842 | if (cmd->data_length < 8) { | 3886 | if (cmd->data_length < 8) { |
3843 | pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u" | 3887 | pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u" |
@@ -3854,6 +3898,19 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) | |||
3854 | buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff); | 3898 | buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff); |
3855 | buf[3] = (dev->t10_pr.pr_generation & 0xff); | 3899 | buf[3] = (dev->t10_pr.pr_generation & 0xff); |
3856 | 3900 | ||
3901 | spin_lock(&dev->dev_reservation_lock); | ||
3902 | if (dev->dev_pr_res_holder) { | ||
3903 | struct t10_pr_registration *pr_holder = dev->dev_pr_res_holder; | ||
3904 | |||
3905 | if (pr_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG || | ||
3906 | pr_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG) { | ||
3907 | all_reg = true; | ||
3908 | pr_res_type = pr_holder->pr_res_type; | ||
3909 | pr_res_scope = pr_holder->pr_res_scope; | ||
3910 | } | ||
3911 | } | ||
3912 | spin_unlock(&dev->dev_reservation_lock); | ||
3913 | |||
3857 | spin_lock(&pr_tmpl->registration_lock); | 3914 | spin_lock(&pr_tmpl->registration_lock); |
3858 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, | 3915 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, |
3859 | &pr_tmpl->registration_list, pr_reg_list) { | 3916 | &pr_tmpl->registration_list, pr_reg_list) { |
@@ -3901,14 +3958,20 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) | |||
3901 | * reservation holder for PR_HOLDER bit. | 3958 | * reservation holder for PR_HOLDER bit. |
3902 | * | 3959 | * |
3903 | * Also, if this registration is the reservation | 3960 | * Also, if this registration is the reservation |
3904 | * holder, fill in SCOPE and TYPE in the next byte. | 3961 | * holder or there is an All Registrants reservation |
3962 | * active, fill in SCOPE and TYPE in the next byte. | ||
3905 | */ | 3963 | */ |
3906 | if (pr_reg->pr_res_holder) { | 3964 | if (pr_reg->pr_res_holder) { |
3907 | buf[off++] |= 0x01; | 3965 | buf[off++] |= 0x01; |
3908 | buf[off++] = (pr_reg->pr_res_scope & 0xf0) | | 3966 | buf[off++] = (pr_reg->pr_res_scope & 0xf0) | |
3909 | (pr_reg->pr_res_type & 0x0f); | 3967 | (pr_reg->pr_res_type & 0x0f); |
3910 | } else | 3968 | } else if (all_reg) { |
3969 | buf[off++] |= 0x01; | ||
3970 | buf[off++] = (pr_res_scope & 0xf0) | | ||
3971 | (pr_res_type & 0x0f); | ||
3972 | } else { | ||
3911 | off += 2; | 3973 | off += 2; |
3974 | } | ||
3912 | 3975 | ||
3913 | off += 4; /* Skip over reserved area */ | 3976 | off += 4; /* Skip over reserved area */ |
3914 | /* | 3977 | /* |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 7c8291f0bbbc..74873e42cf7e 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -44,6 +44,7 @@ | |||
44 | 44 | ||
45 | #include <target/target_core_base.h> | 45 | #include <target/target_core_base.h> |
46 | #include <target/target_core_backend.h> | 46 | #include <target/target_core_backend.h> |
47 | #include <target/target_core_backend_configfs.h> | ||
47 | 48 | ||
48 | #include "target_core_alua.h" | 49 | #include "target_core_alua.h" |
49 | #include "target_core_pscsi.h" | 50 | #include "target_core_pscsi.h" |
@@ -1165,6 +1166,26 @@ static void pscsi_req_done(struct request *req, int uptodate) | |||
1165 | kfree(pt); | 1166 | kfree(pt); |
1166 | } | 1167 | } |
1167 | 1168 | ||
1169 | DEF_TB_DEV_ATTRIB_RO(pscsi, hw_pi_prot_type); | ||
1170 | TB_DEV_ATTR_RO(pscsi, hw_pi_prot_type); | ||
1171 | |||
1172 | DEF_TB_DEV_ATTRIB_RO(pscsi, hw_block_size); | ||
1173 | TB_DEV_ATTR_RO(pscsi, hw_block_size); | ||
1174 | |||
1175 | DEF_TB_DEV_ATTRIB_RO(pscsi, hw_max_sectors); | ||
1176 | TB_DEV_ATTR_RO(pscsi, hw_max_sectors); | ||
1177 | |||
1178 | DEF_TB_DEV_ATTRIB_RO(pscsi, hw_queue_depth); | ||
1179 | TB_DEV_ATTR_RO(pscsi, hw_queue_depth); | ||
1180 | |||
1181 | static struct configfs_attribute *pscsi_backend_dev_attrs[] = { | ||
1182 | &pscsi_dev_attrib_hw_pi_prot_type.attr, | ||
1183 | &pscsi_dev_attrib_hw_block_size.attr, | ||
1184 | &pscsi_dev_attrib_hw_max_sectors.attr, | ||
1185 | &pscsi_dev_attrib_hw_queue_depth.attr, | ||
1186 | NULL, | ||
1187 | }; | ||
1188 | |||
1168 | static struct se_subsystem_api pscsi_template = { | 1189 | static struct se_subsystem_api pscsi_template = { |
1169 | .name = "pscsi", | 1190 | .name = "pscsi", |
1170 | .owner = THIS_MODULE, | 1191 | .owner = THIS_MODULE, |
@@ -1185,6 +1206,11 @@ static struct se_subsystem_api pscsi_template = { | |||
1185 | 1206 | ||
1186 | static int __init pscsi_module_init(void) | 1207 | static int __init pscsi_module_init(void) |
1187 | { | 1208 | { |
1209 | struct target_backend_cits *tbc = &pscsi_template.tb_cits; | ||
1210 | |||
1211 | target_core_setup_sub_cits(&pscsi_template); | ||
1212 | tbc->tb_dev_attrib_cit.ct_attrs = pscsi_backend_dev_attrs; | ||
1213 | |||
1188 | return transport_subsystem_register(&pscsi_template); | 1214 | return transport_subsystem_register(&pscsi_template); |
1189 | } | 1215 | } |
1190 | 1216 | ||
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index b920db3388cd..60ebd170a561 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c | |||
@@ -34,6 +34,7 @@ | |||
34 | 34 | ||
35 | #include <target/target_core_base.h> | 35 | #include <target/target_core_base.h> |
36 | #include <target/target_core_backend.h> | 36 | #include <target/target_core_backend.h> |
37 | #include <target/target_core_backend_configfs.h> | ||
37 | 38 | ||
38 | #include "target_core_rd.h" | 39 | #include "target_core_rd.h" |
39 | 40 | ||
@@ -632,6 +633,42 @@ rd_parse_cdb(struct se_cmd *cmd) | |||
632 | return sbc_parse_cdb(cmd, &rd_sbc_ops); | 633 | return sbc_parse_cdb(cmd, &rd_sbc_ops); |
633 | } | 634 | } |
634 | 635 | ||
636 | DEF_TB_DEFAULT_ATTRIBS(rd_mcp); | ||
637 | |||
638 | static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = { | ||
639 | &rd_mcp_dev_attrib_emulate_model_alias.attr, | ||
640 | &rd_mcp_dev_attrib_emulate_dpo.attr, | ||
641 | &rd_mcp_dev_attrib_emulate_fua_write.attr, | ||
642 | &rd_mcp_dev_attrib_emulate_fua_read.attr, | ||
643 | &rd_mcp_dev_attrib_emulate_write_cache.attr, | ||
644 | &rd_mcp_dev_attrib_emulate_ua_intlck_ctrl.attr, | ||
645 | &rd_mcp_dev_attrib_emulate_tas.attr, | ||
646 | &rd_mcp_dev_attrib_emulate_tpu.attr, | ||
647 | &rd_mcp_dev_attrib_emulate_tpws.attr, | ||
648 | &rd_mcp_dev_attrib_emulate_caw.attr, | ||
649 | &rd_mcp_dev_attrib_emulate_3pc.attr, | ||
650 | &rd_mcp_dev_attrib_pi_prot_type.attr, | ||
651 | &rd_mcp_dev_attrib_hw_pi_prot_type.attr, | ||
652 | &rd_mcp_dev_attrib_pi_prot_format.attr, | ||
653 | &rd_mcp_dev_attrib_enforce_pr_isids.attr, | ||
654 | &rd_mcp_dev_attrib_is_nonrot.attr, | ||
655 | &rd_mcp_dev_attrib_emulate_rest_reord.attr, | ||
656 | &rd_mcp_dev_attrib_force_pr_aptpl.attr, | ||
657 | &rd_mcp_dev_attrib_hw_block_size.attr, | ||
658 | &rd_mcp_dev_attrib_block_size.attr, | ||
659 | &rd_mcp_dev_attrib_hw_max_sectors.attr, | ||
660 | &rd_mcp_dev_attrib_fabric_max_sectors.attr, | ||
661 | &rd_mcp_dev_attrib_optimal_sectors.attr, | ||
662 | &rd_mcp_dev_attrib_hw_queue_depth.attr, | ||
663 | &rd_mcp_dev_attrib_queue_depth.attr, | ||
664 | &rd_mcp_dev_attrib_max_unmap_lba_count.attr, | ||
665 | &rd_mcp_dev_attrib_max_unmap_block_desc_count.attr, | ||
666 | &rd_mcp_dev_attrib_unmap_granularity.attr, | ||
667 | &rd_mcp_dev_attrib_unmap_granularity_alignment.attr, | ||
668 | &rd_mcp_dev_attrib_max_write_same_len.attr, | ||
669 | NULL, | ||
670 | }; | ||
671 | |||
635 | static struct se_subsystem_api rd_mcp_template = { | 672 | static struct se_subsystem_api rd_mcp_template = { |
636 | .name = "rd_mcp", | 673 | .name = "rd_mcp", |
637 | .inquiry_prod = "RAMDISK-MCP", | 674 | .inquiry_prod = "RAMDISK-MCP", |
@@ -653,8 +690,12 @@ static struct se_subsystem_api rd_mcp_template = { | |||
653 | 690 | ||
654 | int __init rd_module_init(void) | 691 | int __init rd_module_init(void) |
655 | { | 692 | { |
693 | struct target_backend_cits *tbc = &rd_mcp_template.tb_cits; | ||
656 | int ret; | 694 | int ret; |
657 | 695 | ||
696 | target_core_setup_sub_cits(&rd_mcp_template); | ||
697 | tbc->tb_dev_attrib_cit.ct_attrs = rd_mcp_backend_dev_attrs; | ||
698 | |||
658 | ret = transport_subsystem_register(&rd_mcp_template); | 699 | ret = transport_subsystem_register(&rd_mcp_template); |
659 | if (ret < 0) { | 700 | if (ret < 0) { |
660 | return ret; | 701 | return ret; |
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 9a1b314f6482..8bfa61c9693d 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <target/target_core_base.h> | 28 | #include <target/target_core_base.h> |
29 | #include <target/target_core_fabric.h> | 29 | #include <target/target_core_fabric.h> |
30 | #include <target/target_core_backend.h> | 30 | #include <target/target_core_backend.h> |
31 | #include <target/target_core_backend_configfs.h> | ||
32 | |||
31 | #include <linux/target_core_user.h> | 33 | #include <linux/target_core_user.h> |
32 | 34 | ||
33 | /* | 35 | /* |
@@ -1092,6 +1094,42 @@ tcmu_parse_cdb(struct se_cmd *cmd) | |||
1092 | return ret; | 1094 | return ret; |
1093 | } | 1095 | } |
1094 | 1096 | ||
1097 | DEF_TB_DEFAULT_ATTRIBS(tcmu); | ||
1098 | |||
1099 | static struct configfs_attribute *tcmu_backend_dev_attrs[] = { | ||
1100 | &tcmu_dev_attrib_emulate_model_alias.attr, | ||
1101 | &tcmu_dev_attrib_emulate_dpo.attr, | ||
1102 | &tcmu_dev_attrib_emulate_fua_write.attr, | ||
1103 | &tcmu_dev_attrib_emulate_fua_read.attr, | ||
1104 | &tcmu_dev_attrib_emulate_write_cache.attr, | ||
1105 | &tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr, | ||
1106 | &tcmu_dev_attrib_emulate_tas.attr, | ||
1107 | &tcmu_dev_attrib_emulate_tpu.attr, | ||
1108 | &tcmu_dev_attrib_emulate_tpws.attr, | ||
1109 | &tcmu_dev_attrib_emulate_caw.attr, | ||
1110 | &tcmu_dev_attrib_emulate_3pc.attr, | ||
1111 | &tcmu_dev_attrib_pi_prot_type.attr, | ||
1112 | &tcmu_dev_attrib_hw_pi_prot_type.attr, | ||
1113 | &tcmu_dev_attrib_pi_prot_format.attr, | ||
1114 | &tcmu_dev_attrib_enforce_pr_isids.attr, | ||
1115 | &tcmu_dev_attrib_is_nonrot.attr, | ||
1116 | &tcmu_dev_attrib_emulate_rest_reord.attr, | ||
1117 | &tcmu_dev_attrib_force_pr_aptpl.attr, | ||
1118 | &tcmu_dev_attrib_hw_block_size.attr, | ||
1119 | &tcmu_dev_attrib_block_size.attr, | ||
1120 | &tcmu_dev_attrib_hw_max_sectors.attr, | ||
1121 | &tcmu_dev_attrib_fabric_max_sectors.attr, | ||
1122 | &tcmu_dev_attrib_optimal_sectors.attr, | ||
1123 | &tcmu_dev_attrib_hw_queue_depth.attr, | ||
1124 | &tcmu_dev_attrib_queue_depth.attr, | ||
1125 | &tcmu_dev_attrib_max_unmap_lba_count.attr, | ||
1126 | &tcmu_dev_attrib_max_unmap_block_desc_count.attr, | ||
1127 | &tcmu_dev_attrib_unmap_granularity.attr, | ||
1128 | &tcmu_dev_attrib_unmap_granularity_alignment.attr, | ||
1129 | &tcmu_dev_attrib_max_write_same_len.attr, | ||
1130 | NULL, | ||
1131 | }; | ||
1132 | |||
1095 | static struct se_subsystem_api tcmu_template = { | 1133 | static struct se_subsystem_api tcmu_template = { |
1096 | .name = "user", | 1134 | .name = "user", |
1097 | .inquiry_prod = "USER", | 1135 | .inquiry_prod = "USER", |
@@ -1112,6 +1150,7 @@ static struct se_subsystem_api tcmu_template = { | |||
1112 | 1150 | ||
1113 | static int __init tcmu_module_init(void) | 1151 | static int __init tcmu_module_init(void) |
1114 | { | 1152 | { |
1153 | struct target_backend_cits *tbc = &tcmu_template.tb_cits; | ||
1115 | int ret; | 1154 | int ret; |
1116 | 1155 | ||
1117 | BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); | 1156 | BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); |
@@ -1134,6 +1173,9 @@ static int __init tcmu_module_init(void) | |||
1134 | goto out_unreg_device; | 1173 | goto out_unreg_device; |
1135 | } | 1174 | } |
1136 | 1175 | ||
1176 | target_core_setup_sub_cits(&tcmu_template); | ||
1177 | tbc->tb_dev_attrib_cit.ct_attrs = tcmu_backend_dev_attrs; | ||
1178 | |||
1137 | ret = transport_subsystem_register(&tcmu_template); | 1179 | ret = transport_subsystem_register(&tcmu_template); |
1138 | if (ret) | 1180 | if (ret) |
1139 | goto out_unreg_genl; | 1181 | goto out_unreg_genl; |
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 9adc1bca1178..430cfaf92285 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h | |||
@@ -5,6 +5,15 @@ | |||
5 | #define TRANSPORT_PLUGIN_VHBA_PDEV 2 | 5 | #define TRANSPORT_PLUGIN_VHBA_PDEV 2 |
6 | #define TRANSPORT_PLUGIN_VHBA_VDEV 3 | 6 | #define TRANSPORT_PLUGIN_VHBA_VDEV 3 |
7 | 7 | ||
8 | struct target_backend_cits { | ||
9 | struct config_item_type tb_dev_cit; | ||
10 | struct config_item_type tb_dev_attrib_cit; | ||
11 | struct config_item_type tb_dev_pr_cit; | ||
12 | struct config_item_type tb_dev_wwn_cit; | ||
13 | struct config_item_type tb_dev_alua_tg_pt_gps_cit; | ||
14 | struct config_item_type tb_dev_stat_cit; | ||
15 | }; | ||
16 | |||
8 | struct se_subsystem_api { | 17 | struct se_subsystem_api { |
9 | struct list_head sub_api_list; | 18 | struct list_head sub_api_list; |
10 | 19 | ||
@@ -44,6 +53,8 @@ struct se_subsystem_api { | |||
44 | int (*init_prot)(struct se_device *); | 53 | int (*init_prot)(struct se_device *); |
45 | int (*format_prot)(struct se_device *); | 54 | int (*format_prot)(struct se_device *); |
46 | void (*free_prot)(struct se_device *); | 55 | void (*free_prot)(struct se_device *); |
56 | |||
57 | struct target_backend_cits tb_cits; | ||
47 | }; | 58 | }; |
48 | 59 | ||
49 | struct sbc_ops { | 60 | struct sbc_ops { |
@@ -96,4 +107,36 @@ sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *, | |||
96 | 107 | ||
97 | void array_free(void *array, int n); | 108 | void array_free(void *array, int n); |
98 | 109 | ||
110 | /* From target_core_configfs.c to setup default backend config_item_types */ | ||
111 | void target_core_setup_sub_cits(struct se_subsystem_api *); | ||
112 | |||
113 | /* attribute helpers from target_core_device.c for backend drivers */ | ||
114 | int se_dev_set_max_unmap_lba_count(struct se_device *, u32); | ||
115 | int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); | ||
116 | int se_dev_set_unmap_granularity(struct se_device *, u32); | ||
117 | int se_dev_set_unmap_granularity_alignment(struct se_device *, u32); | ||
118 | int se_dev_set_max_write_same_len(struct se_device *, u32); | ||
119 | int se_dev_set_emulate_model_alias(struct se_device *, int); | ||
120 | int se_dev_set_emulate_dpo(struct se_device *, int); | ||
121 | int se_dev_set_emulate_fua_write(struct se_device *, int); | ||
122 | int se_dev_set_emulate_fua_read(struct se_device *, int); | ||
123 | int se_dev_set_emulate_write_cache(struct se_device *, int); | ||
124 | int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int); | ||
125 | int se_dev_set_emulate_tas(struct se_device *, int); | ||
126 | int se_dev_set_emulate_tpu(struct se_device *, int); | ||
127 | int se_dev_set_emulate_tpws(struct se_device *, int); | ||
128 | int se_dev_set_emulate_caw(struct se_device *, int); | ||
129 | int se_dev_set_emulate_3pc(struct se_device *, int); | ||
130 | int se_dev_set_pi_prot_type(struct se_device *, int); | ||
131 | int se_dev_set_pi_prot_format(struct se_device *, int); | ||
132 | int se_dev_set_enforce_pr_isids(struct se_device *, int); | ||
133 | int se_dev_set_force_pr_aptpl(struct se_device *, int); | ||
134 | int se_dev_set_is_nonrot(struct se_device *, int); | ||
135 | int se_dev_set_emulate_rest_reord(struct se_device *dev, int); | ||
136 | int se_dev_set_queue_depth(struct se_device *, u32); | ||
137 | int se_dev_set_max_sectors(struct se_device *, u32); | ||
138 | int se_dev_set_fabric_max_sectors(struct se_device *, u32); | ||
139 | int se_dev_set_optimal_sectors(struct se_device *, u32); | ||
140 | int se_dev_set_block_size(struct se_device *, u32); | ||
141 | |||
99 | #endif /* TARGET_CORE_BACKEND_H */ | 142 | #endif /* TARGET_CORE_BACKEND_H */ |
diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h new file mode 100644 index 000000000000..3247d7530107 --- /dev/null +++ b/include/target/target_core_backend_configfs.h | |||
@@ -0,0 +1,120 @@ | |||
1 | #ifndef TARGET_CORE_BACKEND_CONFIGFS_H | ||
2 | #define TARGET_CORE_BACKEND_CONFIGFS_H | ||
3 | |||
4 | #include <target/configfs_macros.h> | ||
5 | |||
6 | #define DEF_TB_DEV_ATTRIB_SHOW(_backend, _name) \ | ||
7 | static ssize_t _backend##_dev_show_attr_##_name( \ | ||
8 | struct se_dev_attrib *da, \ | ||
9 | char *page) \ | ||
10 | { \ | ||
11 | return snprintf(page, PAGE_SIZE, "%u\n", \ | ||
12 | (u32)da->da_dev->dev_attrib._name); \ | ||
13 | } | ||
14 | |||
15 | #define DEF_TB_DEV_ATTRIB_STORE(_backend, _name) \ | ||
16 | static ssize_t _backend##_dev_store_attr_##_name( \ | ||
17 | struct se_dev_attrib *da, \ | ||
18 | const char *page, \ | ||
19 | size_t count) \ | ||
20 | { \ | ||
21 | unsigned long val; \ | ||
22 | int ret; \ | ||
23 | \ | ||
24 | ret = kstrtoul(page, 0, &val); \ | ||
25 | if (ret < 0) { \ | ||
26 | pr_err("kstrtoul() failed with ret: %d\n", ret); \ | ||
27 | return -EINVAL; \ | ||
28 | } \ | ||
29 | ret = se_dev_set_##_name(da->da_dev, (u32)val); \ | ||
30 | \ | ||
31 | return (!ret) ? count : -EINVAL; \ | ||
32 | } | ||
33 | |||
34 | #define DEF_TB_DEV_ATTRIB(_backend, _name) \ | ||
35 | DEF_TB_DEV_ATTRIB_SHOW(_backend, _name); \ | ||
36 | DEF_TB_DEV_ATTRIB_STORE(_backend, _name); | ||
37 | |||
38 | #define DEF_TB_DEV_ATTRIB_RO(_backend, name) \ | ||
39 | DEF_TB_DEV_ATTRIB_SHOW(_backend, name); | ||
40 | |||
41 | CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib, se_dev_attrib); | ||
42 | #define TB_DEV_ATTR(_backend, _name, _mode) \ | ||
43 | static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \ | ||
44 | __CONFIGFS_EATTR(_name, _mode, \ | ||
45 | _backend##_dev_show_attr_##_name, \ | ||
46 | _backend##_dev_store_attr_##_name); | ||
47 | |||
48 | #define TB_DEV_ATTR_RO(_backend, _name) \ | ||
49 | static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \ | ||
50 | __CONFIGFS_EATTR_RO(_name, \ | ||
51 | _backend##_dev_show_attr_##_name); | ||
52 | |||
53 | /* | ||
54 | * Default list of target backend device attributes as defined by | ||
55 | * struct se_dev_attrib | ||
56 | */ | ||
57 | |||
58 | #define DEF_TB_DEFAULT_ATTRIBS(_backend) \ | ||
59 | DEF_TB_DEV_ATTRIB(_backend, emulate_model_alias); \ | ||
60 | TB_DEV_ATTR(_backend, emulate_model_alias, S_IRUGO | S_IWUSR); \ | ||
61 | DEF_TB_DEV_ATTRIB(_backend, emulate_dpo); \ | ||
62 | TB_DEV_ATTR(_backend, emulate_dpo, S_IRUGO | S_IWUSR); \ | ||
63 | DEF_TB_DEV_ATTRIB(_backend, emulate_fua_write); \ | ||
64 | TB_DEV_ATTR(_backend, emulate_fua_write, S_IRUGO | S_IWUSR); \ | ||
65 | DEF_TB_DEV_ATTRIB(_backend, emulate_fua_read); \ | ||
66 | TB_DEV_ATTR(_backend, emulate_fua_read, S_IRUGO | S_IWUSR); \ | ||
67 | DEF_TB_DEV_ATTRIB(_backend, emulate_write_cache); \ | ||
68 | TB_DEV_ATTR(_backend, emulate_write_cache, S_IRUGO | S_IWUSR); \ | ||
69 | DEF_TB_DEV_ATTRIB(_backend, emulate_ua_intlck_ctrl); \ | ||
70 | TB_DEV_ATTR(_backend, emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); \ | ||
71 | DEF_TB_DEV_ATTRIB(_backend, emulate_tas); \ | ||
72 | TB_DEV_ATTR(_backend, emulate_tas, S_IRUGO | S_IWUSR); \ | ||
73 | DEF_TB_DEV_ATTRIB(_backend, emulate_tpu); \ | ||
74 | TB_DEV_ATTR(_backend, emulate_tpu, S_IRUGO | S_IWUSR); \ | ||
75 | DEF_TB_DEV_ATTRIB(_backend, emulate_tpws); \ | ||
76 | TB_DEV_ATTR(_backend, emulate_tpws, S_IRUGO | S_IWUSR); \ | ||
77 | DEF_TB_DEV_ATTRIB(_backend, emulate_caw); \ | ||
78 | TB_DEV_ATTR(_backend, emulate_caw, S_IRUGO | S_IWUSR); \ | ||
79 | DEF_TB_DEV_ATTRIB(_backend, emulate_3pc); \ | ||
80 | TB_DEV_ATTR(_backend, emulate_3pc, S_IRUGO | S_IWUSR); \ | ||
81 | DEF_TB_DEV_ATTRIB(_backend, pi_prot_type); \ | ||
82 | TB_DEV_ATTR(_backend, pi_prot_type, S_IRUGO | S_IWUSR); \ | ||
83 | DEF_TB_DEV_ATTRIB_RO(_backend, hw_pi_prot_type); \ | ||
84 | TB_DEV_ATTR_RO(_backend, hw_pi_prot_type); \ | ||
85 | DEF_TB_DEV_ATTRIB(_backend, pi_prot_format); \ | ||
86 | TB_DEV_ATTR(_backend, pi_prot_format, S_IRUGO | S_IWUSR); \ | ||
87 | DEF_TB_DEV_ATTRIB(_backend, enforce_pr_isids); \ | ||
88 | TB_DEV_ATTR(_backend, enforce_pr_isids, S_IRUGO | S_IWUSR); \ | ||
89 | DEF_TB_DEV_ATTRIB(_backend, is_nonrot); \ | ||
90 | TB_DEV_ATTR(_backend, is_nonrot, S_IRUGO | S_IWUSR); \ | ||
91 | DEF_TB_DEV_ATTRIB(_backend, emulate_rest_reord); \ | ||
92 | TB_DEV_ATTR(_backend, emulate_rest_reord, S_IRUGO | S_IWUSR); \ | ||
93 | DEF_TB_DEV_ATTRIB(_backend, force_pr_aptpl); \ | ||
94 | TB_DEV_ATTR(_backend, force_pr_aptpl, S_IRUGO | S_IWUSR); \ | ||
95 | DEF_TB_DEV_ATTRIB_RO(_backend, hw_block_size); \ | ||
96 | TB_DEV_ATTR_RO(_backend, hw_block_size); \ | ||
97 | DEF_TB_DEV_ATTRIB(_backend, block_size); \ | ||
98 | TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \ | ||
99 | DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \ | ||
100 | TB_DEV_ATTR_RO(_backend, hw_max_sectors); \ | ||
101 | DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors); \ | ||
102 | TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR); \ | ||
103 | DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \ | ||
104 | TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \ | ||
105 | DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \ | ||
106 | TB_DEV_ATTR_RO(_backend, hw_queue_depth); \ | ||
107 | DEF_TB_DEV_ATTRIB(_backend, queue_depth); \ | ||
108 | TB_DEV_ATTR(_backend, queue_depth, S_IRUGO | S_IWUSR); \ | ||
109 | DEF_TB_DEV_ATTRIB(_backend, max_unmap_lba_count); \ | ||
110 | TB_DEV_ATTR(_backend, max_unmap_lba_count, S_IRUGO | S_IWUSR); \ | ||
111 | DEF_TB_DEV_ATTRIB(_backend, max_unmap_block_desc_count); \ | ||
112 | TB_DEV_ATTR(_backend, max_unmap_block_desc_count, S_IRUGO | S_IWUSR); \ | ||
113 | DEF_TB_DEV_ATTRIB(_backend, unmap_granularity); \ | ||
114 | TB_DEV_ATTR(_backend, unmap_granularity, S_IRUGO | S_IWUSR); \ | ||
115 | DEF_TB_DEV_ATTRIB(_backend, unmap_granularity_alignment); \ | ||
116 | TB_DEV_ATTR(_backend, unmap_granularity_alignment, S_IRUGO | S_IWUSR); \ | ||
117 | DEF_TB_DEV_ATTRIB(_backend, max_write_same_len); \ | ||
118 | TB_DEV_ATTR(_backend, max_write_same_len, S_IRUGO | S_IWUSR); | ||
119 | |||
120 | #endif /* TARGET_CORE_BACKEND_CONFIGFS_H */ | ||
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index 7dcfbe6771b1..b483d1909d3e 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h | |||
@@ -6,10 +6,6 @@ | |||
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | #include <linux/uio.h> | 7 | #include <linux/uio.h> |
8 | 8 | ||
9 | #ifndef __packed | ||
10 | #define __packed __attribute__((packed)) | ||
11 | #endif | ||
12 | |||
13 | #define TCMU_VERSION "1.0" | 9 | #define TCMU_VERSION "1.0" |
14 | 10 | ||
15 | /* | 11 | /* |