diff options
Diffstat (limited to 'drivers/scsi/iscsi_tcp.c')
-rw-r--r-- | drivers/scsi/iscsi_tcp.c | 2075 |
1 files changed, 892 insertions, 1183 deletions
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 57ce2251abc8..e5be5fd4ef58 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
@@ -48,7 +48,7 @@ MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, " | |||
48 | "Alex Aizman <itn780@yahoo.com>"); | 48 | "Alex Aizman <itn780@yahoo.com>"); |
49 | MODULE_DESCRIPTION("iSCSI/TCP data-path"); | 49 | MODULE_DESCRIPTION("iSCSI/TCP data-path"); |
50 | MODULE_LICENSE("GPL"); | 50 | MODULE_LICENSE("GPL"); |
51 | /* #define DEBUG_TCP */ | 51 | #undef DEBUG_TCP |
52 | #define DEBUG_ASSERT | 52 | #define DEBUG_ASSERT |
53 | 53 | ||
54 | #ifdef DEBUG_TCP | 54 | #ifdef DEBUG_TCP |
@@ -67,115 +67,429 @@ MODULE_LICENSE("GPL"); | |||
67 | static unsigned int iscsi_max_lun = 512; | 67 | static unsigned int iscsi_max_lun = 512; |
68 | module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); | 68 | module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); |
69 | 69 | ||
70 | static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn, | ||
71 | struct iscsi_segment *segment); | ||
72 | |||
73 | /* | ||
74 | * Scatterlist handling: inside the iscsi_segment, we | ||
75 | * remember an index into the scatterlist, and set data/size | ||
76 | * to the current scatterlist entry. For highmem pages, we | ||
77 | * kmap as needed. | ||
78 | * | ||
79 | * Note that the page is unmapped when we return from | ||
80 | * TCP's data_ready handler, so we may end up mapping and | ||
81 | * unmapping the same page repeatedly. The whole reason | ||
82 | * for this is that we shouldn't keep the page mapped | ||
83 | * outside the softirq. | ||
84 | */ | ||
85 | |||
86 | /** | ||
87 | * iscsi_tcp_segment_init_sg - init indicated scatterlist entry | ||
88 | * @segment: the buffer object | ||
89 | * @sg: scatterlist | ||
90 | * @offset: byte offset into that sg entry | ||
91 | * | ||
92 | * This function sets up the segment so that subsequent | ||
93 | * data is copied to the indicated sg entry, at the given | ||
94 | * offset. | ||
95 | */ | ||
70 | static inline void | 96 | static inline void |
71 | iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size) | 97 | iscsi_tcp_segment_init_sg(struct iscsi_segment *segment, |
98 | struct scatterlist *sg, unsigned int offset) | ||
72 | { | 99 | { |
73 | sg_init_one(&ibuf->sg, vbuf, size); | 100 | segment->sg = sg; |
74 | ibuf->sent = 0; | 101 | segment->sg_offset = offset; |
75 | ibuf->use_sendmsg = 1; | 102 | segment->size = min(sg->length - offset, |
103 | segment->total_size - segment->total_copied); | ||
104 | segment->data = NULL; | ||
76 | } | 105 | } |
77 | 106 | ||
107 | /** | ||
108 | * iscsi_tcp_segment_map - map the current S/G page | ||
109 | * @segment: iscsi_segment | ||
110 | * @recv: 1 if called from recv path | ||
111 | * | ||
112 | * We only need to possibly kmap data if scatter lists are being used, | ||
113 | * because the iscsi passthrough and internal IO paths will never use high | ||
114 | * mem pages. | ||
115 | */ | ||
78 | static inline void | 116 | static inline void |
79 | iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg) | 117 | iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv) |
80 | { | 118 | { |
81 | sg_init_table(&ibuf->sg, 1); | 119 | struct scatterlist *sg; |
82 | sg_set_page(&ibuf->sg, sg_page(sg), sg->length, sg->offset); | 120 | |
121 | if (segment->data != NULL || !segment->sg) | ||
122 | return; | ||
123 | |||
124 | sg = segment->sg; | ||
125 | BUG_ON(segment->sg_mapped); | ||
126 | BUG_ON(sg->length == 0); | ||
127 | |||
83 | /* | 128 | /* |
84 | * Fastpath: sg element fits into single page | 129 | * If the page count is greater than one it is ok to send |
130 | * to the network layer's zero copy send path. If not we | ||
131 | * have to go the slow sendmsg path. We always map for the | ||
132 | * recv path. | ||
85 | */ | 133 | */ |
86 | if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg_page(sg))) | 134 | if (page_count(sg_page(sg)) >= 1 && !recv) |
87 | ibuf->use_sendmsg = 0; | 135 | return; |
88 | else | 136 | |
89 | ibuf->use_sendmsg = 1; | 137 | debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit", |
90 | ibuf->sent = 0; | 138 | segment); |
139 | segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); | ||
140 | segment->data = segment->sg_mapped + sg->offset + segment->sg_offset; | ||
91 | } | 141 | } |
92 | 142 | ||
93 | static inline int | 143 | static inline void |
94 | iscsi_buf_left(struct iscsi_buf *ibuf) | 144 | iscsi_tcp_segment_unmap(struct iscsi_segment *segment) |
95 | { | 145 | { |
96 | int rc; | 146 | debug_tcp("iscsi_tcp_segment_unmap %p\n", segment); |
97 | 147 | ||
98 | rc = ibuf->sg.length - ibuf->sent; | 148 | if (segment->sg_mapped) { |
99 | BUG_ON(rc < 0); | 149 | debug_tcp("iscsi_tcp_segment_unmap valid\n"); |
100 | return rc; | 150 | kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0); |
151 | segment->sg_mapped = NULL; | ||
152 | segment->data = NULL; | ||
153 | } | ||
101 | } | 154 | } |
102 | 155 | ||
156 | /* | ||
157 | * Splice the digest buffer into the buffer | ||
158 | */ | ||
103 | static inline void | 159 | static inline void |
104 | iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf, | 160 | iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest) |
105 | u8* crc) | ||
106 | { | 161 | { |
107 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 162 | segment->data = digest; |
108 | 163 | segment->digest_len = ISCSI_DIGEST_SIZE; | |
109 | crypto_hash_digest(&tcp_conn->tx_hash, &buf->sg, buf->sg.length, crc); | 164 | segment->total_size += ISCSI_DIGEST_SIZE; |
110 | buf->sg.length += sizeof(u32); | 165 | segment->size = ISCSI_DIGEST_SIZE; |
166 | segment->copied = 0; | ||
167 | segment->sg = NULL; | ||
168 | segment->hash = NULL; | ||
111 | } | 169 | } |
112 | 170 | ||
171 | /** | ||
172 | * iscsi_tcp_segment_done - check whether the segment is complete | ||
173 | * @segment: iscsi segment to check | ||
174 | * @recv: set to one of this is called from the recv path | ||
175 | * @copied: number of bytes copied | ||
176 | * | ||
177 | * Check if we're done receiving this segment. If the receive | ||
178 | * buffer is full but we expect more data, move on to the | ||
179 | * next entry in the scatterlist. | ||
180 | * | ||
181 | * If the amount of data we received isn't a multiple of 4, | ||
182 | * we will transparently receive the pad bytes, too. | ||
183 | * | ||
184 | * This function must be re-entrant. | ||
185 | */ | ||
113 | static inline int | 186 | static inline int |
114 | iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn) | 187 | iscsi_tcp_segment_done(struct iscsi_segment *segment, int recv, unsigned copied) |
115 | { | 188 | { |
116 | struct sk_buff *skb = tcp_conn->in.skb; | 189 | static unsigned char padbuf[ISCSI_PAD_LEN]; |
117 | 190 | struct scatterlist sg; | |
118 | tcp_conn->in.zero_copy_hdr = 0; | 191 | unsigned int pad; |
119 | 192 | ||
120 | if (tcp_conn->in.copy >= tcp_conn->hdr_size && | 193 | debug_tcp("copied %u %u size %u %s\n", segment->copied, copied, |
121 | tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER) { | 194 | segment->size, recv ? "recv" : "xmit"); |
195 | if (segment->hash && copied) { | ||
122 | /* | 196 | /* |
123 | * Zero-copy PDU Header: using connection context | 197 | * If a segment is kmapd we must unmap it before sending |
124 | * to store header pointer. | 198 | * to the crypto layer since that will try to kmap it again. |
125 | */ | 199 | */ |
126 | if (skb_shinfo(skb)->frag_list == NULL && | 200 | iscsi_tcp_segment_unmap(segment); |
127 | !skb_shinfo(skb)->nr_frags) { | 201 | |
128 | tcp_conn->in.hdr = (struct iscsi_hdr *) | 202 | if (!segment->data) { |
129 | ((char*)skb->data + tcp_conn->in.offset); | 203 | sg_init_table(&sg, 1); |
130 | tcp_conn->in.zero_copy_hdr = 1; | 204 | sg_set_page(&sg, sg_page(segment->sg), copied, |
205 | segment->copied + segment->sg_offset + | ||
206 | segment->sg->offset); | ||
207 | } else | ||
208 | sg_init_one(&sg, segment->data + segment->copied, | ||
209 | copied); | ||
210 | crypto_hash_update(segment->hash, &sg, copied); | ||
211 | } | ||
212 | |||
213 | segment->copied += copied; | ||
214 | if (segment->copied < segment->size) { | ||
215 | iscsi_tcp_segment_map(segment, recv); | ||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | segment->total_copied += segment->copied; | ||
220 | segment->copied = 0; | ||
221 | segment->size = 0; | ||
222 | |||
223 | /* Unmap the current scatterlist page, if there is one. */ | ||
224 | iscsi_tcp_segment_unmap(segment); | ||
225 | |||
226 | /* Do we have more scatterlist entries? */ | ||
227 | debug_tcp("total copied %u total size %u\n", segment->total_copied, | ||
228 | segment->total_size); | ||
229 | if (segment->total_copied < segment->total_size) { | ||
230 | /* Proceed to the next entry in the scatterlist. */ | ||
231 | iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg), | ||
232 | 0); | ||
233 | iscsi_tcp_segment_map(segment, recv); | ||
234 | BUG_ON(segment->size == 0); | ||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | /* Do we need to handle padding? */ | ||
239 | pad = iscsi_padding(segment->total_copied); | ||
240 | if (pad != 0) { | ||
241 | debug_tcp("consume %d pad bytes\n", pad); | ||
242 | segment->total_size += pad; | ||
243 | segment->size = pad; | ||
244 | segment->data = padbuf; | ||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | /* | ||
249 | * Set us up for transferring the data digest. hdr digest | ||
250 | * is completely handled in hdr done function. | ||
251 | */ | ||
252 | if (segment->hash) { | ||
253 | crypto_hash_final(segment->hash, segment->digest); | ||
254 | iscsi_tcp_segment_splice_digest(segment, | ||
255 | recv ? segment->recv_digest : segment->digest); | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | return 1; | ||
260 | } | ||
261 | |||
262 | /** | ||
263 | * iscsi_tcp_xmit_segment - transmit segment | ||
264 | * @tcp_conn: the iSCSI TCP connection | ||
265 | * @segment: the buffer to transmnit | ||
266 | * | ||
267 | * This function transmits as much of the buffer as | ||
268 | * the network layer will accept, and returns the number of | ||
269 | * bytes transmitted. | ||
270 | * | ||
271 | * If CRC hashing is enabled, the function will compute the | ||
272 | * hash as it goes. When the entire segment has been transmitted, | ||
273 | * it will retrieve the hash value and send it as well. | ||
274 | */ | ||
275 | static int | ||
276 | iscsi_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn, | ||
277 | struct iscsi_segment *segment) | ||
278 | { | ||
279 | struct socket *sk = tcp_conn->sock; | ||
280 | unsigned int copied = 0; | ||
281 | int r = 0; | ||
282 | |||
283 | while (!iscsi_tcp_segment_done(segment, 0, r)) { | ||
284 | struct scatterlist *sg; | ||
285 | unsigned int offset, copy; | ||
286 | int flags = 0; | ||
287 | |||
288 | r = 0; | ||
289 | offset = segment->copied; | ||
290 | copy = segment->size - offset; | ||
291 | |||
292 | if (segment->total_copied + segment->size < segment->total_size) | ||
293 | flags |= MSG_MORE; | ||
294 | |||
295 | /* Use sendpage if we can; else fall back to sendmsg */ | ||
296 | if (!segment->data) { | ||
297 | sg = segment->sg; | ||
298 | offset += segment->sg_offset + sg->offset; | ||
299 | r = tcp_conn->sendpage(sk, sg_page(sg), offset, copy, | ||
300 | flags); | ||
131 | } else { | 301 | } else { |
132 | /* ignoring return code since we checked | 302 | struct msghdr msg = { .msg_flags = flags }; |
133 | * in.copy before */ | 303 | struct kvec iov = { |
134 | skb_copy_bits(skb, tcp_conn->in.offset, | 304 | .iov_base = segment->data + offset, |
135 | &tcp_conn->hdr, tcp_conn->hdr_size); | 305 | .iov_len = copy |
136 | tcp_conn->in.hdr = &tcp_conn->hdr; | 306 | }; |
307 | |||
308 | r = kernel_sendmsg(sk, &msg, &iov, 1, copy); | ||
137 | } | 309 | } |
138 | tcp_conn->in.offset += tcp_conn->hdr_size; | ||
139 | tcp_conn->in.copy -= tcp_conn->hdr_size; | ||
140 | } else { | ||
141 | int hdr_remains; | ||
142 | int copylen; | ||
143 | 310 | ||
144 | /* | 311 | if (r < 0) { |
145 | * PDU header scattered across SKB's, | 312 | iscsi_tcp_segment_unmap(segment); |
146 | * copying it... This'll happen quite rarely. | 313 | if (copied || r == -EAGAIN) |
147 | */ | 314 | break; |
315 | return r; | ||
316 | } | ||
317 | copied += r; | ||
318 | } | ||
319 | return copied; | ||
320 | } | ||
321 | |||
322 | /** | ||
323 | * iscsi_tcp_segment_recv - copy data to segment | ||
324 | * @tcp_conn: the iSCSI TCP connection | ||
325 | * @segment: the buffer to copy to | ||
326 | * @ptr: data pointer | ||
327 | * @len: amount of data available | ||
328 | * | ||
329 | * This function copies up to @len bytes to the | ||
330 | * given buffer, and returns the number of bytes | ||
331 | * consumed, which can actually be less than @len. | ||
332 | * | ||
333 | * If hash digest is enabled, the function will update the | ||
334 | * hash while copying. | ||
335 | * Combining these two operations doesn't buy us a lot (yet), | ||
336 | * but in the future we could implement combined copy+crc, | ||
337 | * just way we do for network layer checksums. | ||
338 | */ | ||
339 | static int | ||
340 | iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn, | ||
341 | struct iscsi_segment *segment, const void *ptr, | ||
342 | unsigned int len) | ||
343 | { | ||
344 | unsigned int copy = 0, copied = 0; | ||
345 | |||
346 | while (!iscsi_tcp_segment_done(segment, 1, copy)) { | ||
347 | if (copied == len) { | ||
348 | debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n", | ||
349 | len); | ||
350 | break; | ||
351 | } | ||
352 | |||
353 | copy = min(len - copied, segment->size - segment->copied); | ||
354 | debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy); | ||
355 | memcpy(segment->data + segment->copied, ptr + copied, copy); | ||
356 | copied += copy; | ||
357 | } | ||
358 | return copied; | ||
359 | } | ||
148 | 360 | ||
149 | if (tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER) | 361 | static inline void |
150 | tcp_conn->in.hdr_offset = 0; | 362 | iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen, |
363 | unsigned char digest[ISCSI_DIGEST_SIZE]) | ||
364 | { | ||
365 | struct scatterlist sg; | ||
151 | 366 | ||
152 | hdr_remains = tcp_conn->hdr_size - tcp_conn->in.hdr_offset; | 367 | sg_init_one(&sg, hdr, hdrlen); |
153 | BUG_ON(hdr_remains <= 0); | 368 | crypto_hash_digest(hash, &sg, hdrlen, digest); |
369 | } | ||
154 | 370 | ||
155 | copylen = min(tcp_conn->in.copy, hdr_remains); | 371 | static inline int |
156 | skb_copy_bits(skb, tcp_conn->in.offset, | 372 | iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn, |
157 | (char*)&tcp_conn->hdr + tcp_conn->in.hdr_offset, | 373 | struct iscsi_segment *segment) |
158 | copylen); | 374 | { |
375 | if (!segment->digest_len) | ||
376 | return 1; | ||
159 | 377 | ||
160 | debug_tcp("PDU gather offset %d bytes %d in.offset %d " | 378 | if (memcmp(segment->recv_digest, segment->digest, |
161 | "in.copy %d\n", tcp_conn->in.hdr_offset, copylen, | 379 | segment->digest_len)) { |
162 | tcp_conn->in.offset, tcp_conn->in.copy); | 380 | debug_scsi("digest mismatch\n"); |
381 | return 0; | ||
382 | } | ||
163 | 383 | ||
164 | tcp_conn->in.offset += copylen; | 384 | return 1; |
165 | tcp_conn->in.copy -= copylen; | 385 | } |
166 | if (copylen < hdr_remains) { | 386 | |
167 | tcp_conn->in_progress = IN_PROGRESS_HEADER_GATHER; | 387 | /* |
168 | tcp_conn->in.hdr_offset += copylen; | 388 | * Helper function to set up segment buffer |
169 | return -EAGAIN; | 389 | */ |
390 | static inline void | ||
391 | __iscsi_segment_init(struct iscsi_segment *segment, size_t size, | ||
392 | iscsi_segment_done_fn_t *done, struct hash_desc *hash) | ||
393 | { | ||
394 | memset(segment, 0, sizeof(*segment)); | ||
395 | segment->total_size = size; | ||
396 | segment->done = done; | ||
397 | |||
398 | if (hash) { | ||
399 | segment->hash = hash; | ||
400 | crypto_hash_init(hash); | ||
401 | } | ||
402 | } | ||
403 | |||
404 | static inline void | ||
405 | iscsi_segment_init_linear(struct iscsi_segment *segment, void *data, | ||
406 | size_t size, iscsi_segment_done_fn_t *done, | ||
407 | struct hash_desc *hash) | ||
408 | { | ||
409 | __iscsi_segment_init(segment, size, done, hash); | ||
410 | segment->data = data; | ||
411 | segment->size = size; | ||
412 | } | ||
413 | |||
414 | static inline int | ||
415 | iscsi_segment_seek_sg(struct iscsi_segment *segment, | ||
416 | struct scatterlist *sg_list, unsigned int sg_count, | ||
417 | unsigned int offset, size_t size, | ||
418 | iscsi_segment_done_fn_t *done, struct hash_desc *hash) | ||
419 | { | ||
420 | struct scatterlist *sg; | ||
421 | unsigned int i; | ||
422 | |||
423 | debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n", | ||
424 | offset, size); | ||
425 | __iscsi_segment_init(segment, size, done, hash); | ||
426 | for_each_sg(sg_list, sg, sg_count, i) { | ||
427 | debug_scsi("sg %d, len %u offset %u\n", i, sg->length, | ||
428 | sg->offset); | ||
429 | if (offset < sg->length) { | ||
430 | iscsi_tcp_segment_init_sg(segment, sg, offset); | ||
431 | return 0; | ||
170 | } | 432 | } |
171 | tcp_conn->in.hdr = &tcp_conn->hdr; | 433 | offset -= sg->length; |
172 | tcp_conn->discontiguous_hdr_cnt++; | ||
173 | tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; | ||
174 | } | 434 | } |
175 | 435 | ||
436 | return ISCSI_ERR_DATA_OFFSET; | ||
437 | } | ||
438 | |||
439 | /** | ||
440 | * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception | ||
441 | * @tcp_conn: iscsi connection to prep for | ||
442 | * | ||
443 | * This function always passes NULL for the hash argument, because when this | ||
444 | * function is called we do not yet know the final size of the header and want | ||
445 | * to delay the digest processing until we know that. | ||
446 | */ | ||
447 | static void | ||
448 | iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn) | ||
449 | { | ||
450 | debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn, | ||
451 | tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : ""); | ||
452 | iscsi_segment_init_linear(&tcp_conn->in.segment, | ||
453 | tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr), | ||
454 | iscsi_tcp_hdr_recv_done, NULL); | ||
455 | } | ||
456 | |||
457 | /* | ||
458 | * Handle incoming reply to any other type of command | ||
459 | */ | ||
460 | static int | ||
461 | iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn, | ||
462 | struct iscsi_segment *segment) | ||
463 | { | ||
464 | struct iscsi_conn *conn = tcp_conn->iscsi_conn; | ||
465 | int rc = 0; | ||
466 | |||
467 | if (!iscsi_tcp_dgst_verify(tcp_conn, segment)) | ||
468 | return ISCSI_ERR_DATA_DGST; | ||
469 | |||
470 | rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, | ||
471 | conn->data, tcp_conn->in.datalen); | ||
472 | if (rc) | ||
473 | return rc; | ||
474 | |||
475 | iscsi_tcp_hdr_recv_prep(tcp_conn); | ||
176 | return 0; | 476 | return 0; |
177 | } | 477 | } |
178 | 478 | ||
479 | static void | ||
480 | iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn) | ||
481 | { | ||
482 | struct iscsi_conn *conn = tcp_conn->iscsi_conn; | ||
483 | struct hash_desc *rx_hash = NULL; | ||
484 | |||
485 | if (conn->datadgst_en) | ||
486 | rx_hash = &tcp_conn->rx_hash; | ||
487 | |||
488 | iscsi_segment_init_linear(&tcp_conn->in.segment, | ||
489 | conn->data, tcp_conn->in.datalen, | ||
490 | iscsi_tcp_data_recv_done, rx_hash); | ||
491 | } | ||
492 | |||
179 | /* | 493 | /* |
180 | * must be called with session lock | 494 | * must be called with session lock |
181 | */ | 495 | */ |
@@ -184,7 +498,6 @@ iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
184 | { | 498 | { |
185 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 499 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
186 | struct iscsi_r2t_info *r2t; | 500 | struct iscsi_r2t_info *r2t; |
187 | struct scsi_cmnd *sc; | ||
188 | 501 | ||
189 | /* flush ctask's r2t queues */ | 502 | /* flush ctask's r2t queues */ |
190 | while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { | 503 | while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { |
@@ -193,12 +506,12 @@ iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
193 | debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n"); | 506 | debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n"); |
194 | } | 507 | } |
195 | 508 | ||
196 | sc = ctask->sc; | 509 | r2t = tcp_ctask->r2t; |
197 | if (unlikely(!sc)) | 510 | if (r2t != NULL) { |
198 | return; | 511 | __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, |
199 | 512 | sizeof(void*)); | |
200 | tcp_ctask->xmstate = XMSTATE_VALUE_IDLE; | 513 | tcp_ctask->r2t = NULL; |
201 | tcp_ctask->r2t = NULL; | 514 | } |
202 | } | 515 | } |
203 | 516 | ||
204 | /** | 517 | /** |
@@ -217,11 +530,6 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
217 | int datasn = be32_to_cpu(rhdr->datasn); | 530 | int datasn = be32_to_cpu(rhdr->datasn); |
218 | 531 | ||
219 | iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); | 532 | iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); |
220 | /* | ||
221 | * setup Data-In byte counter (gets decremented..) | ||
222 | */ | ||
223 | ctask->data_count = tcp_conn->in.datalen; | ||
224 | |||
225 | if (tcp_conn->in.datalen == 0) | 533 | if (tcp_conn->in.datalen == 0) |
226 | return 0; | 534 | return 0; |
227 | 535 | ||
@@ -242,22 +550,20 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
242 | } | 550 | } |
243 | 551 | ||
244 | if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) { | 552 | if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) { |
553 | sc->result = (DID_OK << 16) | rhdr->cmd_status; | ||
245 | conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; | 554 | conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; |
246 | if (rhdr->flags & ISCSI_FLAG_DATA_UNDERFLOW) { | 555 | if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW | |
556 | ISCSI_FLAG_DATA_OVERFLOW)) { | ||
247 | int res_count = be32_to_cpu(rhdr->residual_count); | 557 | int res_count = be32_to_cpu(rhdr->residual_count); |
248 | 558 | ||
249 | if (res_count > 0 && | 559 | if (res_count > 0 && |
250 | res_count <= scsi_bufflen(sc)) { | 560 | (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || |
561 | res_count <= scsi_bufflen(sc))) | ||
251 | scsi_set_resid(sc, res_count); | 562 | scsi_set_resid(sc, res_count); |
252 | sc->result = (DID_OK << 16) | rhdr->cmd_status; | 563 | else |
253 | } else | ||
254 | sc->result = (DID_BAD_TARGET << 16) | | 564 | sc->result = (DID_BAD_TARGET << 16) | |
255 | rhdr->cmd_status; | 565 | rhdr->cmd_status; |
256 | } else if (rhdr->flags & ISCSI_FLAG_DATA_OVERFLOW) { | 566 | } |
257 | scsi_set_resid(sc, be32_to_cpu(rhdr->residual_count)); | ||
258 | sc->result = (DID_OK << 16) | rhdr->cmd_status; | ||
259 | } else | ||
260 | sc->result = (DID_OK << 16) | rhdr->cmd_status; | ||
261 | } | 567 | } |
262 | 568 | ||
263 | conn->datain_pdus_cnt++; | 569 | conn->datain_pdus_cnt++; |
@@ -281,9 +587,6 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, | |||
281 | struct iscsi_r2t_info *r2t) | 587 | struct iscsi_r2t_info *r2t) |
282 | { | 588 | { |
283 | struct iscsi_data *hdr; | 589 | struct iscsi_data *hdr; |
284 | struct scsi_cmnd *sc = ctask->sc; | ||
285 | int i, sg_count = 0; | ||
286 | struct scatterlist *sg; | ||
287 | 590 | ||
288 | hdr = &r2t->dtask.hdr; | 591 | hdr = &r2t->dtask.hdr; |
289 | memset(hdr, 0, sizeof(struct iscsi_data)); | 592 | memset(hdr, 0, sizeof(struct iscsi_data)); |
@@ -307,34 +610,6 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, | |||
307 | conn->dataout_pdus_cnt++; | 610 | conn->dataout_pdus_cnt++; |
308 | 611 | ||
309 | r2t->sent = 0; | 612 | r2t->sent = 0; |
310 | |||
311 | iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr, | ||
312 | sizeof(struct iscsi_hdr)); | ||
313 | |||
314 | sg = scsi_sglist(sc); | ||
315 | r2t->sg = NULL; | ||
316 | for (i = 0; i < scsi_sg_count(sc); i++, sg += 1) { | ||
317 | /* FIXME: prefetch ? */ | ||
318 | if (sg_count + sg->length > r2t->data_offset) { | ||
319 | int page_offset; | ||
320 | |||
321 | /* sg page found! */ | ||
322 | |||
323 | /* offset within this page */ | ||
324 | page_offset = r2t->data_offset - sg_count; | ||
325 | |||
326 | /* fill in this buffer */ | ||
327 | iscsi_buf_init_sg(&r2t->sendbuf, sg); | ||
328 | r2t->sendbuf.sg.offset += page_offset; | ||
329 | r2t->sendbuf.sg.length -= page_offset; | ||
330 | |||
331 | /* xmit logic will continue with next one */ | ||
332 | r2t->sg = sg + 1; | ||
333 | break; | ||
334 | } | ||
335 | sg_count += sg->length; | ||
336 | } | ||
337 | BUG_ON(r2t->sg == NULL); | ||
338 | } | 613 | } |
339 | 614 | ||
340 | /** | 615 | /** |
@@ -366,14 +641,11 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
366 | } | 641 | } |
367 | 642 | ||
368 | /* fill-in new R2T associated with the task */ | 643 | /* fill-in new R2T associated with the task */ |
369 | spin_lock(&session->lock); | ||
370 | iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); | 644 | iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); |
371 | 645 | ||
372 | if (!ctask->sc || ctask->mtask || | 646 | if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) { |
373 | session->state != ISCSI_STATE_LOGGED_IN) { | ||
374 | printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in " | 647 | printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in " |
375 | "recovery...\n", ctask->itt); | 648 | "recovery...\n", ctask->itt); |
376 | spin_unlock(&session->lock); | ||
377 | return 0; | 649 | return 0; |
378 | } | 650 | } |
379 | 651 | ||
@@ -384,7 +656,8 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
384 | r2t->data_length = be32_to_cpu(rhdr->data_length); | 656 | r2t->data_length = be32_to_cpu(rhdr->data_length); |
385 | if (r2t->data_length == 0) { | 657 | if (r2t->data_length == 0) { |
386 | printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n"); | 658 | printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n"); |
387 | spin_unlock(&session->lock); | 659 | __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, |
660 | sizeof(void*)); | ||
388 | return ISCSI_ERR_DATALEN; | 661 | return ISCSI_ERR_DATALEN; |
389 | } | 662 | } |
390 | 663 | ||
@@ -395,10 +668,11 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
395 | 668 | ||
396 | r2t->data_offset = be32_to_cpu(rhdr->data_offset); | 669 | r2t->data_offset = be32_to_cpu(rhdr->data_offset); |
397 | if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) { | 670 | if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) { |
398 | spin_unlock(&session->lock); | ||
399 | printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at " | 671 | printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at " |
400 | "offset %u and total length %d\n", r2t->data_length, | 672 | "offset %u and total length %d\n", r2t->data_length, |
401 | r2t->data_offset, scsi_bufflen(ctask->sc)); | 673 | r2t->data_offset, scsi_bufflen(ctask->sc)); |
674 | __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, | ||
675 | sizeof(void*)); | ||
402 | return ISCSI_ERR_DATALEN; | 676 | return ISCSI_ERR_DATALEN; |
403 | } | 677 | } |
404 | 678 | ||
@@ -409,26 +683,55 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
409 | 683 | ||
410 | tcp_ctask->exp_datasn = r2tsn + 1; | 684 | tcp_ctask->exp_datasn = r2tsn + 1; |
411 | __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); | 685 | __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); |
412 | set_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate); | ||
413 | list_move_tail(&ctask->running, &conn->xmitqueue); | ||
414 | |||
415 | scsi_queue_work(session->host, &conn->xmitwork); | ||
416 | conn->r2t_pdus_cnt++; | 686 | conn->r2t_pdus_cnt++; |
417 | spin_unlock(&session->lock); | ||
418 | 687 | ||
688 | iscsi_requeue_ctask(ctask); | ||
419 | return 0; | 689 | return 0; |
420 | } | 690 | } |
421 | 691 | ||
692 | /* | ||
693 | * Handle incoming reply to DataIn command | ||
694 | */ | ||
422 | static int | 695 | static int |
423 | iscsi_tcp_hdr_recv(struct iscsi_conn *conn) | 696 | iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn, |
697 | struct iscsi_segment *segment) | ||
698 | { | ||
699 | struct iscsi_conn *conn = tcp_conn->iscsi_conn; | ||
700 | struct iscsi_hdr *hdr = tcp_conn->in.hdr; | ||
701 | int rc; | ||
702 | |||
703 | if (!iscsi_tcp_dgst_verify(tcp_conn, segment)) | ||
704 | return ISCSI_ERR_DATA_DGST; | ||
705 | |||
706 | /* check for non-exceptional status */ | ||
707 | if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { | ||
708 | rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); | ||
709 | if (rc) | ||
710 | return rc; | ||
711 | } | ||
712 | |||
713 | iscsi_tcp_hdr_recv_prep(tcp_conn); | ||
714 | return 0; | ||
715 | } | ||
716 | |||
717 | /** | ||
718 | * iscsi_tcp_hdr_dissect - process PDU header | ||
719 | * @conn: iSCSI connection | ||
720 | * @hdr: PDU header | ||
721 | * | ||
722 | * This function analyzes the header of the PDU received, | ||
723 | * and performs several sanity checks. If the PDU is accompanied | ||
724 | * by data, the receive buffer is set up to copy the incoming data | ||
725 | * to the correct location. | ||
726 | */ | ||
727 | static int | ||
728 | iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) | ||
424 | { | 729 | { |
425 | int rc = 0, opcode, ahslen; | 730 | int rc = 0, opcode, ahslen; |
426 | struct iscsi_hdr *hdr; | ||
427 | struct iscsi_session *session = conn->session; | 731 | struct iscsi_session *session = conn->session; |
428 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 732 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
429 | uint32_t cdgst, rdgst = 0, itt; | 733 | struct iscsi_cmd_task *ctask; |
430 | 734 | uint32_t itt; | |
431 | hdr = tcp_conn->in.hdr; | ||
432 | 735 | ||
433 | /* verify PDU length */ | 736 | /* verify PDU length */ |
434 | tcp_conn->in.datalen = ntoh24(hdr->dlength); | 737 | tcp_conn->in.datalen = ntoh24(hdr->dlength); |
@@ -437,78 +740,73 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) | |||
437 | tcp_conn->in.datalen, conn->max_recv_dlength); | 740 | tcp_conn->in.datalen, conn->max_recv_dlength); |
438 | return ISCSI_ERR_DATALEN; | 741 | return ISCSI_ERR_DATALEN; |
439 | } | 742 | } |
440 | tcp_conn->data_copied = 0; | ||
441 | 743 | ||
442 | /* read AHS */ | 744 | /* Additional header segments. So far, we don't |
745 | * process additional headers. | ||
746 | */ | ||
443 | ahslen = hdr->hlength << 2; | 747 | ahslen = hdr->hlength << 2; |
444 | tcp_conn->in.offset += ahslen; | ||
445 | tcp_conn->in.copy -= ahslen; | ||
446 | if (tcp_conn->in.copy < 0) { | ||
447 | printk(KERN_ERR "iscsi_tcp: can't handle AHS with length " | ||
448 | "%d bytes\n", ahslen); | ||
449 | return ISCSI_ERR_AHSLEN; | ||
450 | } | ||
451 | |||
452 | /* calculate read padding */ | ||
453 | tcp_conn->in.padding = tcp_conn->in.datalen & (ISCSI_PAD_LEN-1); | ||
454 | if (tcp_conn->in.padding) { | ||
455 | tcp_conn->in.padding = ISCSI_PAD_LEN - tcp_conn->in.padding; | ||
456 | debug_scsi("read padding %d bytes\n", tcp_conn->in.padding); | ||
457 | } | ||
458 | |||
459 | if (conn->hdrdgst_en) { | ||
460 | struct scatterlist sg; | ||
461 | |||
462 | sg_init_one(&sg, (u8 *)hdr, | ||
463 | sizeof(struct iscsi_hdr) + ahslen); | ||
464 | crypto_hash_digest(&tcp_conn->rx_hash, &sg, sg.length, | ||
465 | (u8 *)&cdgst); | ||
466 | rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) + | ||
467 | ahslen); | ||
468 | if (cdgst != rdgst) { | ||
469 | printk(KERN_ERR "iscsi_tcp: hdrdgst error " | ||
470 | "recv 0x%x calc 0x%x\n", rdgst, cdgst); | ||
471 | return ISCSI_ERR_HDR_DGST; | ||
472 | } | ||
473 | } | ||
474 | 748 | ||
475 | opcode = hdr->opcode & ISCSI_OPCODE_MASK; | 749 | opcode = hdr->opcode & ISCSI_OPCODE_MASK; |
476 | /* verify itt (itt encoding: age+cid+itt) */ | 750 | /* verify itt (itt encoding: age+cid+itt) */ |
477 | rc = iscsi_verify_itt(conn, hdr, &itt); | 751 | rc = iscsi_verify_itt(conn, hdr, &itt); |
478 | if (rc == ISCSI_ERR_NO_SCSI_CMD) { | 752 | if (rc) |
479 | tcp_conn->in.datalen = 0; /* force drop */ | ||
480 | return 0; | ||
481 | } else if (rc) | ||
482 | return rc; | 753 | return rc; |
483 | 754 | ||
484 | debug_tcp("opcode 0x%x offset %d copy %d ahslen %d datalen %d\n", | 755 | debug_tcp("opcode 0x%x ahslen %d datalen %d\n", |
485 | opcode, tcp_conn->in.offset, tcp_conn->in.copy, | 756 | opcode, ahslen, tcp_conn->in.datalen); |
486 | ahslen, tcp_conn->in.datalen); | ||
487 | 757 | ||
488 | switch(opcode) { | 758 | switch(opcode) { |
489 | case ISCSI_OP_SCSI_DATA_IN: | 759 | case ISCSI_OP_SCSI_DATA_IN: |
490 | tcp_conn->in.ctask = session->cmds[itt]; | 760 | ctask = session->cmds[itt]; |
491 | rc = iscsi_data_rsp(conn, tcp_conn->in.ctask); | 761 | spin_lock(&conn->session->lock); |
762 | rc = iscsi_data_rsp(conn, ctask); | ||
763 | spin_unlock(&conn->session->lock); | ||
492 | if (rc) | 764 | if (rc) |
493 | return rc; | 765 | return rc; |
766 | if (tcp_conn->in.datalen) { | ||
767 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | ||
768 | struct hash_desc *rx_hash = NULL; | ||
769 | |||
770 | /* | ||
771 | * Setup copy of Data-In into the Scsi_Cmnd | ||
772 | * Scatterlist case: | ||
773 | * We set up the iscsi_segment to point to the next | ||
774 | * scatterlist entry to copy to. As we go along, | ||
775 | * we move on to the next scatterlist entry and | ||
776 | * update the digest per-entry. | ||
777 | */ | ||
778 | if (conn->datadgst_en) | ||
779 | rx_hash = &tcp_conn->rx_hash; | ||
780 | |||
781 | debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, " | ||
782 | "datalen=%d)\n", tcp_conn, | ||
783 | tcp_ctask->data_offset, | ||
784 | tcp_conn->in.datalen); | ||
785 | return iscsi_segment_seek_sg(&tcp_conn->in.segment, | ||
786 | scsi_sglist(ctask->sc), | ||
787 | scsi_sg_count(ctask->sc), | ||
788 | tcp_ctask->data_offset, | ||
789 | tcp_conn->in.datalen, | ||
790 | iscsi_tcp_process_data_in, | ||
791 | rx_hash); | ||
792 | } | ||
494 | /* fall through */ | 793 | /* fall through */ |
495 | case ISCSI_OP_SCSI_CMD_RSP: | 794 | case ISCSI_OP_SCSI_CMD_RSP: |
496 | tcp_conn->in.ctask = session->cmds[itt]; | 795 | if (tcp_conn->in.datalen) { |
497 | if (tcp_conn->in.datalen) | 796 | iscsi_tcp_data_recv_prep(tcp_conn); |
498 | goto copy_hdr; | 797 | return 0; |
499 | 798 | } | |
500 | spin_lock(&session->lock); | 799 | rc = iscsi_complete_pdu(conn, hdr, NULL, 0); |
501 | rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); | ||
502 | spin_unlock(&session->lock); | ||
503 | break; | 800 | break; |
504 | case ISCSI_OP_R2T: | 801 | case ISCSI_OP_R2T: |
505 | tcp_conn->in.ctask = session->cmds[itt]; | 802 | ctask = session->cmds[itt]; |
506 | if (ahslen) | 803 | if (ahslen) |
507 | rc = ISCSI_ERR_AHSLEN; | 804 | rc = ISCSI_ERR_AHSLEN; |
508 | else if (tcp_conn->in.ctask->sc->sc_data_direction == | 805 | else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) { |
509 | DMA_TO_DEVICE) | 806 | spin_lock(&session->lock); |
510 | rc = iscsi_r2t_rsp(conn, tcp_conn->in.ctask); | 807 | rc = iscsi_r2t_rsp(conn, ctask); |
511 | else | 808 | spin_unlock(&session->lock); |
809 | } else | ||
512 | rc = ISCSI_ERR_PROTO; | 810 | rc = ISCSI_ERR_PROTO; |
513 | break; | 811 | break; |
514 | case ISCSI_OP_LOGIN_RSP: | 812 | case ISCSI_OP_LOGIN_RSP: |
@@ -520,8 +818,7 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) | |||
520 | * than 8K, but there are no targets that currently do this. | 818 | * than 8K, but there are no targets that currently do this. |
521 | * For now we fail until we find a vendor that needs it | 819 | * For now we fail until we find a vendor that needs it |
522 | */ | 820 | */ |
523 | if (ISCSI_DEF_MAX_RECV_SEG_LEN < | 821 | if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) { |
524 | tcp_conn->in.datalen) { | ||
525 | printk(KERN_ERR "iscsi_tcp: received buffer of len %u " | 822 | printk(KERN_ERR "iscsi_tcp: received buffer of len %u " |
526 | "but conn buffer is only %u (opcode %0x)\n", | 823 | "but conn buffer is only %u (opcode %0x)\n", |
527 | tcp_conn->in.datalen, | 824 | tcp_conn->in.datalen, |
@@ -530,8 +827,13 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) | |||
530 | break; | 827 | break; |
531 | } | 828 | } |
532 | 829 | ||
533 | if (tcp_conn->in.datalen) | 830 | /* If there's data coming in with the response, |
534 | goto copy_hdr; | 831 | * receive it to the connection's buffer. |
832 | */ | ||
833 | if (tcp_conn->in.datalen) { | ||
834 | iscsi_tcp_data_recv_prep(tcp_conn); | ||
835 | return 0; | ||
836 | } | ||
535 | /* fall through */ | 837 | /* fall through */ |
536 | case ISCSI_OP_LOGOUT_RSP: | 838 | case ISCSI_OP_LOGOUT_RSP: |
537 | case ISCSI_OP_NOOP_IN: | 839 | case ISCSI_OP_NOOP_IN: |
@@ -543,461 +845,161 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) | |||
543 | break; | 845 | break; |
544 | } | 846 | } |
545 | 847 | ||
546 | return rc; | 848 | if (rc == 0) { |
547 | 849 | /* Anything that comes with data should have | |
548 | copy_hdr: | 850 | * been handled above. */ |
549 | /* | 851 | if (tcp_conn->in.datalen) |
550 | * if we did zero copy for the header but we will need multiple | 852 | return ISCSI_ERR_PROTO; |
551 | * skbs to complete the command then we have to copy the header | 853 | iscsi_tcp_hdr_recv_prep(tcp_conn); |
552 | * for later use | ||
553 | */ | ||
554 | if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <= | ||
555 | (tcp_conn->in.datalen + tcp_conn->in.padding + | ||
556 | (conn->datadgst_en ? 4 : 0))) { | ||
557 | debug_tcp("Copying header for later use. in.copy %d in.datalen" | ||
558 | " %d\n", tcp_conn->in.copy, tcp_conn->in.datalen); | ||
559 | memcpy(&tcp_conn->hdr, tcp_conn->in.hdr, | ||
560 | sizeof(struct iscsi_hdr)); | ||
561 | tcp_conn->in.hdr = &tcp_conn->hdr; | ||
562 | tcp_conn->in.zero_copy_hdr = 0; | ||
563 | } | ||
564 | return 0; | ||
565 | } | ||
566 | |||
567 | /** | ||
568 | * iscsi_ctask_copy - copy skb bits to the destanation cmd task | ||
569 | * @conn: iscsi tcp connection | ||
570 | * @ctask: scsi command task | ||
571 | * @buf: buffer to copy to | ||
572 | * @buf_size: size of buffer | ||
573 | * @offset: offset within the buffer | ||
574 | * | ||
575 | * Notes: | ||
576 | * The function calls skb_copy_bits() and updates per-connection and | ||
577 | * per-cmd byte counters. | ||
578 | * | ||
579 | * Read counters (in bytes): | ||
580 | * | ||
581 | * conn->in.offset offset within in progress SKB | ||
582 | * conn->in.copy left to copy from in progress SKB | ||
583 | * including padding | ||
584 | * conn->in.copied copied already from in progress SKB | ||
585 | * conn->data_copied copied already from in progress buffer | ||
586 | * ctask->sent total bytes sent up to the MidLayer | ||
587 | * ctask->data_count left to copy from in progress Data-In | ||
588 | * buf_left left to copy from in progress buffer | ||
589 | **/ | ||
590 | static inline int | ||
591 | iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask, | ||
592 | void *buf, int buf_size, int offset) | ||
593 | { | ||
594 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | ||
595 | int buf_left = buf_size - (tcp_conn->data_copied + offset); | ||
596 | unsigned size = min(tcp_conn->in.copy, buf_left); | ||
597 | int rc; | ||
598 | |||
599 | size = min(size, ctask->data_count); | ||
600 | |||
601 | debug_tcp("ctask_copy %d bytes at offset %d copied %d\n", | ||
602 | size, tcp_conn->in.offset, tcp_conn->in.copied); | ||
603 | |||
604 | BUG_ON(size <= 0); | ||
605 | BUG_ON(tcp_ctask->sent + size > scsi_bufflen(ctask->sc)); | ||
606 | |||
607 | rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, | ||
608 | (char*)buf + (offset + tcp_conn->data_copied), size); | ||
609 | /* must fit into skb->len */ | ||
610 | BUG_ON(rc); | ||
611 | |||
612 | tcp_conn->in.offset += size; | ||
613 | tcp_conn->in.copy -= size; | ||
614 | tcp_conn->in.copied += size; | ||
615 | tcp_conn->data_copied += size; | ||
616 | tcp_ctask->sent += size; | ||
617 | ctask->data_count -= size; | ||
618 | |||
619 | BUG_ON(tcp_conn->in.copy < 0); | ||
620 | BUG_ON(ctask->data_count < 0); | ||
621 | |||
622 | if (buf_size != (tcp_conn->data_copied + offset)) { | ||
623 | if (!ctask->data_count) { | ||
624 | BUG_ON(buf_size - tcp_conn->data_copied < 0); | ||
625 | /* done with this PDU */ | ||
626 | return buf_size - tcp_conn->data_copied; | ||
627 | } | ||
628 | return -EAGAIN; | ||
629 | } | 854 | } |
630 | 855 | ||
631 | /* done with this buffer or with both - PDU and buffer */ | 856 | return rc; |
632 | tcp_conn->data_copied = 0; | ||
633 | return 0; | ||
634 | } | 857 | } |
635 | 858 | ||
636 | /** | 859 | /** |
637 | * iscsi_tcp_copy - copy skb bits to the destanation buffer | 860 | * iscsi_tcp_hdr_recv_done - process PDU header |
638 | * @conn: iscsi tcp connection | ||
639 | * | 861 | * |
640 | * Notes: | 862 | * This is the callback invoked when the PDU header has |
641 | * The function calls skb_copy_bits() and updates per-connection | 863 | * been received. If the header is followed by additional |
642 | * byte counters. | 864 | * header segments, we go back for more data. |
643 | **/ | 865 | */ |
644 | static inline int | 866 | static int |
645 | iscsi_tcp_copy(struct iscsi_conn *conn, int buf_size) | 867 | iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn, |
646 | { | 868 | struct iscsi_segment *segment) |
647 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
648 | int buf_left = buf_size - tcp_conn->data_copied; | ||
649 | int size = min(tcp_conn->in.copy, buf_left); | ||
650 | int rc; | ||
651 | |||
652 | debug_tcp("tcp_copy %d bytes at offset %d copied %d\n", | ||
653 | size, tcp_conn->in.offset, tcp_conn->data_copied); | ||
654 | BUG_ON(size <= 0); | ||
655 | |||
656 | rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, | ||
657 | (char*)conn->data + tcp_conn->data_copied, size); | ||
658 | BUG_ON(rc); | ||
659 | |||
660 | tcp_conn->in.offset += size; | ||
661 | tcp_conn->in.copy -= size; | ||
662 | tcp_conn->in.copied += size; | ||
663 | tcp_conn->data_copied += size; | ||
664 | |||
665 | if (buf_size != tcp_conn->data_copied) | ||
666 | return -EAGAIN; | ||
667 | |||
668 | return 0; | ||
669 | } | ||
670 | |||
671 | static inline void | ||
672 | partial_sg_digest_update(struct hash_desc *desc, struct scatterlist *sg, | ||
673 | int offset, int length) | ||
674 | { | ||
675 | struct scatterlist temp; | ||
676 | |||
677 | sg_init_table(&temp, 1); | ||
678 | sg_set_page(&temp, sg_page(sg), length, offset); | ||
679 | crypto_hash_update(desc, &temp, length); | ||
680 | } | ||
681 | |||
682 | static void | ||
683 | iscsi_recv_digest_update(struct iscsi_tcp_conn *tcp_conn, char* buf, int len) | ||
684 | { | ||
685 | struct scatterlist tmp; | ||
686 | |||
687 | sg_init_one(&tmp, buf, len); | ||
688 | crypto_hash_update(&tcp_conn->rx_hash, &tmp, len); | ||
689 | } | ||
690 | |||
691 | static int iscsi_scsi_data_in(struct iscsi_conn *conn) | ||
692 | { | 869 | { |
693 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 870 | struct iscsi_conn *conn = tcp_conn->iscsi_conn; |
694 | struct iscsi_cmd_task *ctask = tcp_conn->in.ctask; | 871 | struct iscsi_hdr *hdr; |
695 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | ||
696 | struct scsi_cmnd *sc = ctask->sc; | ||
697 | struct scatterlist *sg; | ||
698 | int i, offset, rc = 0; | ||
699 | |||
700 | BUG_ON((void*)ctask != sc->SCp.ptr); | ||
701 | |||
702 | offset = tcp_ctask->data_offset; | ||
703 | sg = scsi_sglist(sc); | ||
704 | |||
705 | if (tcp_ctask->data_offset) | ||
706 | for (i = 0; i < tcp_ctask->sg_count; i++) | ||
707 | offset -= sg[i].length; | ||
708 | /* we've passed through partial sg*/ | ||
709 | if (offset < 0) | ||
710 | offset = 0; | ||
711 | |||
712 | for (i = tcp_ctask->sg_count; i < scsi_sg_count(sc); i++) { | ||
713 | char *dest; | ||
714 | |||
715 | dest = kmap_atomic(sg_page(&sg[i]), KM_SOFTIRQ0); | ||
716 | rc = iscsi_ctask_copy(tcp_conn, ctask, dest + sg[i].offset, | ||
717 | sg[i].length, offset); | ||
718 | kunmap_atomic(dest, KM_SOFTIRQ0); | ||
719 | if (rc == -EAGAIN) | ||
720 | /* continue with the next SKB/PDU */ | ||
721 | return rc; | ||
722 | if (!rc) { | ||
723 | if (conn->datadgst_en) { | ||
724 | if (!offset) | ||
725 | crypto_hash_update( | ||
726 | &tcp_conn->rx_hash, | ||
727 | &sg[i], sg[i].length); | ||
728 | else | ||
729 | partial_sg_digest_update( | ||
730 | &tcp_conn->rx_hash, | ||
731 | &sg[i], | ||
732 | sg[i].offset + offset, | ||
733 | sg[i].length - offset); | ||
734 | } | ||
735 | offset = 0; | ||
736 | tcp_ctask->sg_count++; | ||
737 | } | ||
738 | |||
739 | if (!ctask->data_count) { | ||
740 | if (rc && conn->datadgst_en) | ||
741 | /* | ||
742 | * data-in is complete, but buffer not... | ||
743 | */ | ||
744 | partial_sg_digest_update(&tcp_conn->rx_hash, | ||
745 | &sg[i], | ||
746 | sg[i].offset, | ||
747 | sg[i].length-rc); | ||
748 | rc = 0; | ||
749 | break; | ||
750 | } | ||
751 | |||
752 | if (!tcp_conn->in.copy) | ||
753 | return -EAGAIN; | ||
754 | } | ||
755 | BUG_ON(ctask->data_count); | ||
756 | 872 | ||
757 | /* check for non-exceptional status */ | 873 | /* Check if there are additional header segments |
758 | if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) { | 874 | * *prior* to computing the digest, because we |
759 | debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n", | 875 | * may need to go back to the caller for more. |
760 | (long)sc, sc->result, ctask->itt, | 876 | */ |
761 | tcp_conn->in.hdr->flags); | 877 | hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf; |
762 | spin_lock(&conn->session->lock); | 878 | if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) { |
763 | __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); | 879 | /* Bump the header length - the caller will |
764 | spin_unlock(&conn->session->lock); | 880 | * just loop around and get the AHS for us, and |
881 | * call again. */ | ||
882 | unsigned int ahslen = hdr->hlength << 2; | ||
883 | |||
884 | /* Make sure we don't overflow */ | ||
885 | if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf)) | ||
886 | return ISCSI_ERR_AHSLEN; | ||
887 | |||
888 | segment->total_size += ahslen; | ||
889 | segment->size += ahslen; | ||
890 | return 0; | ||
765 | } | 891 | } |
766 | 892 | ||
767 | return rc; | 893 | /* We're done processing the header. See if we're doing |
768 | } | 894 | * header digests; if so, set up the recv_digest buffer |
769 | 895 | * and go back for more. */ | |
770 | static int | 896 | if (conn->hdrdgst_en) { |
771 | iscsi_data_recv(struct iscsi_conn *conn) | 897 | if (segment->digest_len == 0) { |
772 | { | 898 | iscsi_tcp_segment_splice_digest(segment, |
773 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 899 | segment->recv_digest); |
774 | int rc = 0, opcode; | 900 | return 0; |
775 | |||
776 | opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK; | ||
777 | switch (opcode) { | ||
778 | case ISCSI_OP_SCSI_DATA_IN: | ||
779 | rc = iscsi_scsi_data_in(conn); | ||
780 | break; | ||
781 | case ISCSI_OP_SCSI_CMD_RSP: | ||
782 | case ISCSI_OP_TEXT_RSP: | ||
783 | case ISCSI_OP_LOGIN_RSP: | ||
784 | case ISCSI_OP_ASYNC_EVENT: | ||
785 | case ISCSI_OP_REJECT: | ||
786 | /* | ||
787 | * Collect data segment to the connection's data | ||
788 | * placeholder | ||
789 | */ | ||
790 | if (iscsi_tcp_copy(conn, tcp_conn->in.datalen)) { | ||
791 | rc = -EAGAIN; | ||
792 | goto exit; | ||
793 | } | 901 | } |
902 | iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr, | ||
903 | segment->total_copied - ISCSI_DIGEST_SIZE, | ||
904 | segment->digest); | ||
794 | 905 | ||
795 | rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data, | 906 | if (!iscsi_tcp_dgst_verify(tcp_conn, segment)) |
796 | tcp_conn->in.datalen); | 907 | return ISCSI_ERR_HDR_DGST; |
797 | if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP) | ||
798 | iscsi_recv_digest_update(tcp_conn, conn->data, | ||
799 | tcp_conn->in.datalen); | ||
800 | break; | ||
801 | default: | ||
802 | BUG_ON(1); | ||
803 | } | 908 | } |
804 | exit: | 909 | |
805 | return rc; | 910 | tcp_conn->in.hdr = hdr; |
911 | return iscsi_tcp_hdr_dissect(conn, hdr); | ||
806 | } | 912 | } |
807 | 913 | ||
808 | /** | 914 | /** |
809 | * iscsi_tcp_data_recv - TCP receive in sendfile fashion | 915 | * iscsi_tcp_recv - TCP receive in sendfile fashion |
810 | * @rd_desc: read descriptor | 916 | * @rd_desc: read descriptor |
811 | * @skb: socket buffer | 917 | * @skb: socket buffer |
812 | * @offset: offset in skb | 918 | * @offset: offset in skb |
813 | * @len: skb->len - offset | 919 | * @len: skb->len - offset |
814 | **/ | 920 | **/ |
815 | static int | 921 | static int |
816 | iscsi_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, | 922 | iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, |
817 | unsigned int offset, size_t len) | 923 | unsigned int offset, size_t len) |
818 | { | 924 | { |
819 | int rc; | ||
820 | struct iscsi_conn *conn = rd_desc->arg.data; | 925 | struct iscsi_conn *conn = rd_desc->arg.data; |
821 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 926 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
822 | int processed; | 927 | struct iscsi_segment *segment = &tcp_conn->in.segment; |
823 | char pad[ISCSI_PAD_LEN]; | 928 | struct skb_seq_state seq; |
824 | struct scatterlist sg; | 929 | unsigned int consumed = 0; |
825 | 930 | int rc = 0; | |
826 | /* | ||
827 | * Save current SKB and its offset in the corresponding | ||
828 | * connection context. | ||
829 | */ | ||
830 | tcp_conn->in.copy = skb->len - offset; | ||
831 | tcp_conn->in.offset = offset; | ||
832 | tcp_conn->in.skb = skb; | ||
833 | tcp_conn->in.len = tcp_conn->in.copy; | ||
834 | BUG_ON(tcp_conn->in.copy <= 0); | ||
835 | debug_tcp("in %d bytes\n", tcp_conn->in.copy); | ||
836 | 931 | ||
837 | more: | 932 | debug_tcp("in %d bytes\n", skb->len - offset); |
838 | tcp_conn->in.copied = 0; | ||
839 | rc = 0; | ||
840 | 933 | ||
841 | if (unlikely(conn->suspend_rx)) { | 934 | if (unlikely(conn->suspend_rx)) { |
842 | debug_tcp("conn %d Rx suspended!\n", conn->id); | 935 | debug_tcp("conn %d Rx suspended!\n", conn->id); |
843 | return 0; | 936 | return 0; |
844 | } | 937 | } |
845 | 938 | ||
846 | if (tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER || | 939 | skb_prepare_seq_read(skb, offset, skb->len, &seq); |
847 | tcp_conn->in_progress == IN_PROGRESS_HEADER_GATHER) { | 940 | while (1) { |
848 | rc = iscsi_hdr_extract(tcp_conn); | 941 | unsigned int avail; |
849 | if (rc) { | 942 | const u8 *ptr; |
850 | if (rc == -EAGAIN) | ||
851 | goto nomore; | ||
852 | else { | ||
853 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | ||
854 | return 0; | ||
855 | } | ||
856 | } | ||
857 | 943 | ||
858 | /* | 944 | avail = skb_seq_read(consumed, &ptr, &seq); |
859 | * Verify and process incoming PDU header. | 945 | if (avail == 0) { |
860 | */ | 946 | debug_tcp("no more data avail. Consumed %d\n", |
861 | rc = iscsi_tcp_hdr_recv(conn); | 947 | consumed); |
862 | if (!rc && tcp_conn->in.datalen) { | 948 | break; |
863 | if (conn->datadgst_en) | ||
864 | crypto_hash_init(&tcp_conn->rx_hash); | ||
865 | tcp_conn->in_progress = IN_PROGRESS_DATA_RECV; | ||
866 | } else if (rc) { | ||
867 | iscsi_conn_failure(conn, rc); | ||
868 | return 0; | ||
869 | } | 949 | } |
870 | } | 950 | BUG_ON(segment->copied >= segment->size); |
871 | 951 | ||
872 | if (tcp_conn->in_progress == IN_PROGRESS_DDIGEST_RECV && | 952 | debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail); |
873 | tcp_conn->in.copy) { | 953 | rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail); |
874 | uint32_t recv_digest; | 954 | BUG_ON(rc == 0); |
875 | 955 | consumed += rc; | |
876 | debug_tcp("extra data_recv offset %d copy %d\n", | 956 | |
877 | tcp_conn->in.offset, tcp_conn->in.copy); | 957 | if (segment->total_copied >= segment->total_size) { |
878 | 958 | debug_tcp("segment done\n"); | |
879 | if (!tcp_conn->data_copied) { | 959 | rc = segment->done(tcp_conn, segment); |
880 | if (tcp_conn->in.padding) { | 960 | if (rc != 0) { |
881 | debug_tcp("padding -> %d\n", | 961 | skb_abort_seq_read(&seq); |
882 | tcp_conn->in.padding); | 962 | goto error; |
883 | memset(pad, 0, tcp_conn->in.padding); | ||
884 | sg_init_one(&sg, pad, tcp_conn->in.padding); | ||
885 | crypto_hash_update(&tcp_conn->rx_hash, | ||
886 | &sg, sg.length); | ||
887 | } | 963 | } |
888 | crypto_hash_final(&tcp_conn->rx_hash, | ||
889 | (u8 *) &tcp_conn->in.datadgst); | ||
890 | debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst); | ||
891 | } | ||
892 | 964 | ||
893 | rc = iscsi_tcp_copy(conn, sizeof(uint32_t)); | 965 | /* The done() functions sets up the |
894 | if (rc) { | 966 | * next segment. */ |
895 | if (rc == -EAGAIN) | ||
896 | goto again; | ||
897 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | ||
898 | return 0; | ||
899 | } | ||
900 | |||
901 | memcpy(&recv_digest, conn->data, sizeof(uint32_t)); | ||
902 | if (recv_digest != tcp_conn->in.datadgst) { | ||
903 | debug_tcp("iscsi_tcp: data digest error!" | ||
904 | "0x%x != 0x%x\n", recv_digest, | ||
905 | tcp_conn->in.datadgst); | ||
906 | iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); | ||
907 | return 0; | ||
908 | } else { | ||
909 | debug_tcp("iscsi_tcp: data digest match!" | ||
910 | "0x%x == 0x%x\n", recv_digest, | ||
911 | tcp_conn->in.datadgst); | ||
912 | tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; | ||
913 | } | 967 | } |
914 | } | 968 | } |
969 | skb_abort_seq_read(&seq); | ||
970 | conn->rxdata_octets += consumed; | ||
971 | return consumed; | ||
915 | 972 | ||
916 | if (tcp_conn->in_progress == IN_PROGRESS_DATA_RECV && | 973 | error: |
917 | tcp_conn->in.copy) { | 974 | debug_tcp("Error receiving PDU, errno=%d\n", rc); |
918 | debug_tcp("data_recv offset %d copy %d\n", | 975 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
919 | tcp_conn->in.offset, tcp_conn->in.copy); | 976 | return 0; |
920 | |||
921 | rc = iscsi_data_recv(conn); | ||
922 | if (rc) { | ||
923 | if (rc == -EAGAIN) | ||
924 | goto again; | ||
925 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | ||
926 | return 0; | ||
927 | } | ||
928 | |||
929 | if (tcp_conn->in.padding) | ||
930 | tcp_conn->in_progress = IN_PROGRESS_PAD_RECV; | ||
931 | else if (conn->datadgst_en) | ||
932 | tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; | ||
933 | else | ||
934 | tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; | ||
935 | tcp_conn->data_copied = 0; | ||
936 | } | ||
937 | |||
938 | if (tcp_conn->in_progress == IN_PROGRESS_PAD_RECV && | ||
939 | tcp_conn->in.copy) { | ||
940 | int copylen = min(tcp_conn->in.padding - tcp_conn->data_copied, | ||
941 | tcp_conn->in.copy); | ||
942 | |||
943 | tcp_conn->in.copy -= copylen; | ||
944 | tcp_conn->in.offset += copylen; | ||
945 | tcp_conn->data_copied += copylen; | ||
946 | |||
947 | if (tcp_conn->data_copied != tcp_conn->in.padding) | ||
948 | tcp_conn->in_progress = IN_PROGRESS_PAD_RECV; | ||
949 | else if (conn->datadgst_en) | ||
950 | tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; | ||
951 | else | ||
952 | tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; | ||
953 | tcp_conn->data_copied = 0; | ||
954 | } | ||
955 | |||
956 | debug_tcp("f, processed %d from out of %d padding %d\n", | ||
957 | tcp_conn->in.offset - offset, (int)len, tcp_conn->in.padding); | ||
958 | BUG_ON(tcp_conn->in.offset - offset > len); | ||
959 | |||
960 | if (tcp_conn->in.offset - offset != len) { | ||
961 | debug_tcp("continue to process %d bytes\n", | ||
962 | (int)len - (tcp_conn->in.offset - offset)); | ||
963 | goto more; | ||
964 | } | ||
965 | |||
966 | nomore: | ||
967 | processed = tcp_conn->in.offset - offset; | ||
968 | BUG_ON(processed == 0); | ||
969 | return processed; | ||
970 | |||
971 | again: | ||
972 | processed = tcp_conn->in.offset - offset; | ||
973 | debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n", | ||
974 | processed, (int)len, (int)rd_desc->count); | ||
975 | BUG_ON(processed == 0); | ||
976 | BUG_ON(processed > len); | ||
977 | |||
978 | conn->rxdata_octets += processed; | ||
979 | return processed; | ||
980 | } | 977 | } |
981 | 978 | ||
982 | static void | 979 | static void |
983 | iscsi_tcp_data_ready(struct sock *sk, int flag) | 980 | iscsi_tcp_data_ready(struct sock *sk, int flag) |
984 | { | 981 | { |
985 | struct iscsi_conn *conn = sk->sk_user_data; | 982 | struct iscsi_conn *conn = sk->sk_user_data; |
983 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
986 | read_descriptor_t rd_desc; | 984 | read_descriptor_t rd_desc; |
987 | 985 | ||
988 | read_lock(&sk->sk_callback_lock); | 986 | read_lock(&sk->sk_callback_lock); |
989 | 987 | ||
990 | /* | 988 | /* |
991 | * Use rd_desc to pass 'conn' to iscsi_tcp_data_recv. | 989 | * Use rd_desc to pass 'conn' to iscsi_tcp_recv. |
992 | * We set count to 1 because we want the network layer to | 990 | * We set count to 1 because we want the network layer to |
993 | * hand us all the skbs that are available. iscsi_tcp_data_recv | 991 | * hand us all the skbs that are available. iscsi_tcp_recv |
994 | * handled pdus that cross buffers or pdus that still need data. | 992 | * handled pdus that cross buffers or pdus that still need data. |
995 | */ | 993 | */ |
996 | rd_desc.arg.data = conn; | 994 | rd_desc.arg.data = conn; |
997 | rd_desc.count = 1; | 995 | rd_desc.count = 1; |
998 | tcp_read_sock(sk, &rd_desc, iscsi_tcp_data_recv); | 996 | tcp_read_sock(sk, &rd_desc, iscsi_tcp_recv); |
999 | 997 | ||
1000 | read_unlock(&sk->sk_callback_lock); | 998 | read_unlock(&sk->sk_callback_lock); |
999 | |||
1000 | /* If we had to (atomically) map a highmem page, | ||
1001 | * unmap it now. */ | ||
1002 | iscsi_tcp_segment_unmap(&tcp_conn->in.segment); | ||
1001 | } | 1003 | } |
1002 | 1004 | ||
1003 | static void | 1005 | static void |
@@ -1077,121 +1079,173 @@ iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn) | |||
1077 | } | 1079 | } |
1078 | 1080 | ||
1079 | /** | 1081 | /** |
1080 | * iscsi_send - generic send routine | 1082 | * iscsi_xmit - TCP transmit |
1081 | * @sk: kernel's socket | 1083 | **/ |
1082 | * @buf: buffer to write from | 1084 | static int |
1083 | * @size: actual size to write | 1085 | iscsi_xmit(struct iscsi_conn *conn) |
1084 | * @flags: socket's flags | ||
1085 | */ | ||
1086 | static inline int | ||
1087 | iscsi_send(struct iscsi_conn *conn, struct iscsi_buf *buf, int size, int flags) | ||
1088 | { | 1086 | { |
1089 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 1087 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
1090 | struct socket *sk = tcp_conn->sock; | 1088 | struct iscsi_segment *segment = &tcp_conn->out.segment; |
1091 | int offset = buf->sg.offset + buf->sent, res; | 1089 | unsigned int consumed = 0; |
1090 | int rc = 0; | ||
1092 | 1091 | ||
1093 | /* | 1092 | while (1) { |
1094 | * if we got use_sg=0 or are sending something we kmallocd | 1093 | rc = iscsi_tcp_xmit_segment(tcp_conn, segment); |
1095 | * then we did not have to do kmap (kmap returns page_address) | 1094 | if (rc < 0) |
1096 | * | 1095 | goto error; |
1097 | * if we got use_sg > 0, but had to drop down, we do not | 1096 | if (rc == 0) |
1098 | * set clustering so this should only happen for that | 1097 | break; |
1099 | * slab case. | 1098 | |
1100 | */ | 1099 | consumed += rc; |
1101 | if (buf->use_sendmsg) | 1100 | |
1102 | res = sock_no_sendpage(sk, sg_page(&buf->sg), offset, size, flags); | 1101 | if (segment->total_copied >= segment->total_size) { |
1103 | else | 1102 | if (segment->done != NULL) { |
1104 | res = tcp_conn->sendpage(sk, sg_page(&buf->sg), offset, size, flags); | 1103 | rc = segment->done(tcp_conn, segment); |
1105 | 1104 | if (rc < 0) | |
1106 | if (res >= 0) { | 1105 | goto error; |
1107 | conn->txdata_octets += res; | 1106 | } |
1108 | buf->sent += res; | 1107 | } |
1109 | return res; | ||
1110 | } | 1108 | } |
1111 | 1109 | ||
1112 | tcp_conn->sendpage_failures_cnt++; | 1110 | debug_tcp("xmit %d bytes\n", consumed); |
1113 | if (res == -EAGAIN) | 1111 | |
1114 | res = -ENOBUFS; | 1112 | conn->txdata_octets += consumed; |
1115 | else | 1113 | return consumed; |
1116 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 1114 | |
1117 | return res; | 1115 | error: |
1116 | /* Transmit error. We could initiate error recovery | ||
1117 | * here. */ | ||
1118 | debug_tcp("Error sending PDU, errno=%d\n", rc); | ||
1119 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | ||
1120 | return rc; | ||
1118 | } | 1121 | } |
1119 | 1122 | ||
1120 | /** | 1123 | /** |
1121 | * iscsi_sendhdr - send PDU Header via tcp_sendpage() | 1124 | * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit |
1122 | * @conn: iscsi connection | 1125 | */ |
1123 | * @buf: buffer to write from | ||
1124 | * @datalen: lenght of data to be sent after the header | ||
1125 | * | ||
1126 | * Notes: | ||
1127 | * (Tx, Fast Path) | ||
1128 | **/ | ||
1129 | static inline int | 1126 | static inline int |
1130 | iscsi_sendhdr(struct iscsi_conn *conn, struct iscsi_buf *buf, int datalen) | 1127 | iscsi_tcp_xmit_qlen(struct iscsi_conn *conn) |
1131 | { | 1128 | { |
1132 | int flags = 0; /* MSG_DONTWAIT; */ | 1129 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
1133 | int res, size; | 1130 | struct iscsi_segment *segment = &tcp_conn->out.segment; |
1134 | |||
1135 | size = buf->sg.length - buf->sent; | ||
1136 | BUG_ON(buf->sent + size > buf->sg.length); | ||
1137 | if (buf->sent + size != buf->sg.length || datalen) | ||
1138 | flags |= MSG_MORE; | ||
1139 | |||
1140 | res = iscsi_send(conn, buf, size, flags); | ||
1141 | debug_tcp("sendhdr %d bytes, sent %d res %d\n", size, buf->sent, res); | ||
1142 | if (res >= 0) { | ||
1143 | if (size != res) | ||
1144 | return -EAGAIN; | ||
1145 | return 0; | ||
1146 | } | ||
1147 | 1131 | ||
1148 | return res; | 1132 | return segment->total_copied - segment->total_size; |
1149 | } | 1133 | } |
1150 | 1134 | ||
1151 | /** | ||
1152 | * iscsi_sendpage - send one page of iSCSI Data-Out. | ||
1153 | * @conn: iscsi connection | ||
1154 | * @buf: buffer to write from | ||
1155 | * @count: remaining data | ||
1156 | * @sent: number of bytes sent | ||
1157 | * | ||
1158 | * Notes: | ||
1159 | * (Tx, Fast Path) | ||
1160 | **/ | ||
1161 | static inline int | 1135 | static inline int |
1162 | iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf, | 1136 | iscsi_tcp_flush(struct iscsi_conn *conn) |
1163 | int *count, int *sent) | ||
1164 | { | 1137 | { |
1165 | int flags = 0; /* MSG_DONTWAIT; */ | 1138 | int rc; |
1166 | int res, size; | 1139 | |
1167 | 1140 | while (iscsi_tcp_xmit_qlen(conn)) { | |
1168 | size = buf->sg.length - buf->sent; | 1141 | rc = iscsi_xmit(conn); |
1169 | BUG_ON(buf->sent + size > buf->sg.length); | 1142 | if (rc == 0) |
1170 | if (size > *count) | ||
1171 | size = *count; | ||
1172 | if (buf->sent + size != buf->sg.length || *count != size) | ||
1173 | flags |= MSG_MORE; | ||
1174 | |||
1175 | res = iscsi_send(conn, buf, size, flags); | ||
1176 | debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n", | ||
1177 | size, buf->sent, *count, *sent, res); | ||
1178 | if (res >= 0) { | ||
1179 | *count -= res; | ||
1180 | *sent += res; | ||
1181 | if (size != res) | ||
1182 | return -EAGAIN; | 1143 | return -EAGAIN; |
1183 | return 0; | 1144 | if (rc < 0) |
1145 | return rc; | ||
1184 | } | 1146 | } |
1185 | 1147 | ||
1186 | return res; | 1148 | return 0; |
1187 | } | 1149 | } |
1188 | 1150 | ||
1189 | static inline void | 1151 | /* |
1190 | iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn, | 1152 | * This is called when we're done sending the header. |
1191 | struct iscsi_tcp_cmd_task *tcp_ctask) | 1153 | * Simply copy the data_segment to the send segment, and return. |
1154 | */ | ||
1155 | static int | ||
1156 | iscsi_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn, | ||
1157 | struct iscsi_segment *segment) | ||
1158 | { | ||
1159 | tcp_conn->out.segment = tcp_conn->out.data_segment; | ||
1160 | debug_tcp("Header done. Next segment size %u total_size %u\n", | ||
1161 | tcp_conn->out.segment.size, tcp_conn->out.segment.total_size); | ||
1162 | return 0; | ||
1163 | } | ||
1164 | |||
1165 | static void | ||
1166 | iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen) | ||
1192 | { | 1167 | { |
1193 | crypto_hash_init(&tcp_conn->tx_hash); | 1168 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
1194 | tcp_ctask->digest_count = 4; | 1169 | |
1170 | debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn, | ||
1171 | conn->hdrdgst_en? ", digest enabled" : ""); | ||
1172 | |||
1173 | /* Clear the data segment - needs to be filled in by the | ||
1174 | * caller using iscsi_tcp_send_data_prep() */ | ||
1175 | memset(&tcp_conn->out.data_segment, 0, sizeof(struct iscsi_segment)); | ||
1176 | |||
1177 | /* If header digest is enabled, compute the CRC and | ||
1178 | * place the digest into the same buffer. We make | ||
1179 | * sure that both iscsi_tcp_ctask and mtask have | ||
1180 | * sufficient room. | ||
1181 | */ | ||
1182 | if (conn->hdrdgst_en) { | ||
1183 | iscsi_tcp_dgst_header(&tcp_conn->tx_hash, hdr, hdrlen, | ||
1184 | hdr + hdrlen); | ||
1185 | hdrlen += ISCSI_DIGEST_SIZE; | ||
1186 | } | ||
1187 | |||
1188 | /* Remember header pointer for later, when we need | ||
1189 | * to decide whether there's a payload to go along | ||
1190 | * with the header. */ | ||
1191 | tcp_conn->out.hdr = hdr; | ||
1192 | |||
1193 | iscsi_segment_init_linear(&tcp_conn->out.segment, hdr, hdrlen, | ||
1194 | iscsi_tcp_send_hdr_done, NULL); | ||
1195 | } | ||
1196 | |||
1197 | /* | ||
1198 | * Prepare the send buffer for the payload data. | ||
1199 | * Padding and checksumming will all be taken care | ||
1200 | * of by the iscsi_segment routines. | ||
1201 | */ | ||
1202 | static int | ||
1203 | iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg, | ||
1204 | unsigned int count, unsigned int offset, | ||
1205 | unsigned int len) | ||
1206 | { | ||
1207 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
1208 | struct hash_desc *tx_hash = NULL; | ||
1209 | unsigned int hdr_spec_len; | ||
1210 | |||
1211 | debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__, | ||
1212 | tcp_conn, offset, len, | ||
1213 | conn->datadgst_en? ", digest enabled" : ""); | ||
1214 | |||
1215 | /* Make sure the datalen matches what the caller | ||
1216 | said he would send. */ | ||
1217 | hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength); | ||
1218 | WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len)); | ||
1219 | |||
1220 | if (conn->datadgst_en) | ||
1221 | tx_hash = &tcp_conn->tx_hash; | ||
1222 | |||
1223 | return iscsi_segment_seek_sg(&tcp_conn->out.data_segment, | ||
1224 | sg, count, offset, len, | ||
1225 | NULL, tx_hash); | ||
1226 | } | ||
1227 | |||
1228 | static void | ||
1229 | iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data, | ||
1230 | size_t len) | ||
1231 | { | ||
1232 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
1233 | struct hash_desc *tx_hash = NULL; | ||
1234 | unsigned int hdr_spec_len; | ||
1235 | |||
1236 | debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len, | ||
1237 | conn->datadgst_en? ", digest enabled" : ""); | ||
1238 | |||
1239 | /* Make sure the datalen matches what the caller | ||
1240 | said he would send. */ | ||
1241 | hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength); | ||
1242 | WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len)); | ||
1243 | |||
1244 | if (conn->datadgst_en) | ||
1245 | tx_hash = &tcp_conn->tx_hash; | ||
1246 | |||
1247 | iscsi_segment_init_linear(&tcp_conn->out.data_segment, | ||
1248 | data, len, NULL, tx_hash); | ||
1195 | } | 1249 | } |
1196 | 1250 | ||
1197 | /** | 1251 | /** |
@@ -1207,12 +1261,17 @@ iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn, | |||
1207 | * | 1261 | * |
1208 | * Called under connection lock. | 1262 | * Called under connection lock. |
1209 | **/ | 1263 | **/ |
1210 | static void | 1264 | static int |
1211 | iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, | 1265 | iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, |
1212 | struct iscsi_r2t_info *r2t, int left) | 1266 | struct iscsi_r2t_info *r2t) |
1213 | { | 1267 | { |
1214 | struct iscsi_data *hdr; | 1268 | struct iscsi_data *hdr; |
1215 | int new_offset; | 1269 | int new_offset, left; |
1270 | |||
1271 | BUG_ON(r2t->data_length - r2t->sent < 0); | ||
1272 | left = r2t->data_length - r2t->sent; | ||
1273 | if (left == 0) | ||
1274 | return 0; | ||
1216 | 1275 | ||
1217 | hdr = &r2t->dtask.hdr; | 1276 | hdr = &r2t->dtask.hdr; |
1218 | memset(hdr, 0, sizeof(struct iscsi_data)); | 1277 | memset(hdr, 0, sizeof(struct iscsi_data)); |
@@ -1233,43 +1292,46 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, | |||
1233 | r2t->data_count = left; | 1292 | r2t->data_count = left; |
1234 | hdr->flags = ISCSI_FLAG_CMD_FINAL; | 1293 | hdr->flags = ISCSI_FLAG_CMD_FINAL; |
1235 | } | 1294 | } |
1236 | conn->dataout_pdus_cnt++; | ||
1237 | |||
1238 | iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr, | ||
1239 | sizeof(struct iscsi_hdr)); | ||
1240 | |||
1241 | if (iscsi_buf_left(&r2t->sendbuf)) | ||
1242 | return; | ||
1243 | |||
1244 | iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg); | ||
1245 | r2t->sg += 1; | ||
1246 | } | ||
1247 | 1295 | ||
1248 | static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask, | 1296 | conn->dataout_pdus_cnt++; |
1249 | unsigned long len) | 1297 | return 1; |
1250 | { | ||
1251 | tcp_ctask->pad_count = len & (ISCSI_PAD_LEN - 1); | ||
1252 | if (!tcp_ctask->pad_count) | ||
1253 | return; | ||
1254 | |||
1255 | tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count; | ||
1256 | debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count); | ||
1257 | set_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate); | ||
1258 | } | 1298 | } |
1259 | 1299 | ||
1260 | /** | 1300 | /** |
1261 | * iscsi_tcp_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands | 1301 | * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands |
1262 | * @conn: iscsi connection | 1302 | * @conn: iscsi connection |
1263 | * @ctask: scsi command task | 1303 | * @ctask: scsi command task |
1264 | * @sc: scsi command | 1304 | * @sc: scsi command |
1265 | **/ | 1305 | **/ |
1266 | static void | 1306 | static int |
1267 | iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) | 1307 | iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask) |
1268 | { | 1308 | { |
1269 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 1309 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
1310 | struct iscsi_conn *conn = ctask->conn; | ||
1311 | struct scsi_cmnd *sc = ctask->sc; | ||
1312 | int err; | ||
1270 | 1313 | ||
1271 | BUG_ON(__kfifo_len(tcp_ctask->r2tqueue)); | 1314 | BUG_ON(__kfifo_len(tcp_ctask->r2tqueue)); |
1272 | tcp_ctask->xmstate = 1 << XMSTATE_BIT_CMD_HDR_INIT; | 1315 | tcp_ctask->sent = 0; |
1316 | tcp_ctask->exp_datasn = 0; | ||
1317 | |||
1318 | /* Prepare PDU, optionally w/ immediate data */ | ||
1319 | debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n", | ||
1320 | conn->id, ctask->itt, ctask->imm_count, | ||
1321 | ctask->unsol_count); | ||
1322 | iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len); | ||
1323 | |||
1324 | if (!ctask->imm_count) | ||
1325 | return 0; | ||
1326 | |||
1327 | /* If we have immediate data, attach a payload */ | ||
1328 | err = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), scsi_sg_count(sc), | ||
1329 | 0, ctask->imm_count); | ||
1330 | if (err) | ||
1331 | return err; | ||
1332 | tcp_ctask->sent += ctask->imm_count; | ||
1333 | ctask->imm_count = 0; | ||
1334 | return 0; | ||
1273 | } | 1335 | } |
1274 | 1336 | ||
1275 | /** | 1337 | /** |
@@ -1281,484 +1343,130 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) | |||
1281 | * The function can return -EAGAIN in which case caller must | 1343 | * The function can return -EAGAIN in which case caller must |
1282 | * call it again later, or recover. '0' return code means successful | 1344 | * call it again later, or recover. '0' return code means successful |
1283 | * xmit. | 1345 | * xmit. |
1284 | * | ||
1285 | * Management xmit state machine consists of these states: | ||
1286 | * XMSTATE_BIT_IMM_HDR_INIT - calculate digest of PDU Header | ||
1287 | * XMSTATE_BIT_IMM_HDR - PDU Header xmit in progress | ||
1288 | * XMSTATE_BIT_IMM_DATA - PDU Data xmit in progress | ||
1289 | * XMSTATE_VALUE_IDLE - management PDU is done | ||
1290 | **/ | 1346 | **/ |
1291 | static int | 1347 | static int |
1292 | iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) | 1348 | iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) |
1293 | { | 1349 | { |
1294 | struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; | ||
1295 | int rc; | 1350 | int rc; |
1296 | 1351 | ||
1297 | debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n", | 1352 | /* Flush any pending data first. */ |
1298 | conn->id, tcp_mtask->xmstate, mtask->itt); | 1353 | rc = iscsi_tcp_flush(conn); |
1299 | 1354 | if (rc < 0) | |
1300 | if (test_bit(XMSTATE_BIT_IMM_HDR_INIT, &tcp_mtask->xmstate)) { | 1355 | return rc; |
1301 | iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, | ||
1302 | sizeof(struct iscsi_hdr)); | ||
1303 | |||
1304 | if (mtask->data_count) { | ||
1305 | set_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate); | ||
1306 | iscsi_buf_init_iov(&tcp_mtask->sendbuf, | ||
1307 | (char*)mtask->data, | ||
1308 | mtask->data_count); | ||
1309 | } | ||
1310 | |||
1311 | if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE && | ||
1312 | conn->stop_stage != STOP_CONN_RECOVER && | ||
1313 | conn->hdrdgst_en) | ||
1314 | iscsi_hdr_digest(conn, &tcp_mtask->headbuf, | ||
1315 | (u8*)tcp_mtask->hdrext); | ||
1316 | |||
1317 | tcp_mtask->sent = 0; | ||
1318 | clear_bit(XMSTATE_BIT_IMM_HDR_INIT, &tcp_mtask->xmstate); | ||
1319 | set_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate); | ||
1320 | } | ||
1321 | |||
1322 | if (test_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate)) { | ||
1323 | rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf, | ||
1324 | mtask->data_count); | ||
1325 | if (rc) | ||
1326 | return rc; | ||
1327 | clear_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate); | ||
1328 | } | ||
1329 | |||
1330 | if (test_and_clear_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate)) { | ||
1331 | BUG_ON(!mtask->data_count); | ||
1332 | /* FIXME: implement. | ||
1333 | * Virtual buffer could be spreaded across multiple pages... | ||
1334 | */ | ||
1335 | do { | ||
1336 | int rc; | ||
1337 | |||
1338 | rc = iscsi_sendpage(conn, &tcp_mtask->sendbuf, | ||
1339 | &mtask->data_count, &tcp_mtask->sent); | ||
1340 | if (rc) { | ||
1341 | set_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate); | ||
1342 | return rc; | ||
1343 | } | ||
1344 | } while (mtask->data_count); | ||
1345 | } | ||
1346 | 1356 | ||
1347 | BUG_ON(tcp_mtask->xmstate != XMSTATE_VALUE_IDLE); | ||
1348 | if (mtask->hdr->itt == RESERVED_ITT) { | 1357 | if (mtask->hdr->itt == RESERVED_ITT) { |
1349 | struct iscsi_session *session = conn->session; | 1358 | struct iscsi_session *session = conn->session; |
1350 | 1359 | ||
1351 | spin_lock_bh(&session->lock); | 1360 | spin_lock_bh(&session->lock); |
1352 | list_del(&conn->mtask->running); | 1361 | iscsi_free_mgmt_task(conn, mtask); |
1353 | __kfifo_put(session->mgmtpool.queue, (void*)&conn->mtask, | ||
1354 | sizeof(void*)); | ||
1355 | spin_unlock_bh(&session->lock); | 1362 | spin_unlock_bh(&session->lock); |
1356 | } | 1363 | } |
1364 | |||
1357 | return 0; | 1365 | return 0; |
1358 | } | 1366 | } |
1359 | 1367 | ||
1368 | /* | ||
1369 | * iscsi_tcp_ctask_xmit - xmit normal PDU task | ||
1370 | * @conn: iscsi connection | ||
1371 | * @ctask: iscsi command task | ||
1372 | * | ||
1373 | * We're expected to return 0 when everything was transmitted succesfully, | ||
1374 | * -EAGAIN if there's still data in the queue, or != 0 for any other kind | ||
1375 | * of error. | ||
1376 | */ | ||
1360 | static int | 1377 | static int |
1361 | iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | 1378 | iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) |
1362 | { | 1379 | { |
1363 | struct scsi_cmnd *sc = ctask->sc; | ||
1364 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 1380 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
1381 | struct scsi_cmnd *sc = ctask->sc; | ||
1365 | int rc = 0; | 1382 | int rc = 0; |
1366 | 1383 | ||
1367 | if (test_bit(XMSTATE_BIT_CMD_HDR_INIT, &tcp_ctask->xmstate)) { | 1384 | flush: |
1368 | tcp_ctask->sent = 0; | 1385 | /* Flush any pending data first. */ |
1369 | tcp_ctask->sg_count = 0; | 1386 | rc = iscsi_tcp_flush(conn); |
1370 | tcp_ctask->exp_datasn = 0; | 1387 | if (rc < 0) |
1371 | |||
1372 | if (sc->sc_data_direction == DMA_TO_DEVICE) { | ||
1373 | struct scatterlist *sg = scsi_sglist(sc); | ||
1374 | |||
1375 | iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg); | ||
1376 | tcp_ctask->sg = sg + 1; | ||
1377 | tcp_ctask->bad_sg = sg + scsi_sg_count(sc); | ||
1378 | |||
1379 | debug_scsi("cmd [itt 0x%x total %d imm_data %d " | ||
1380 | "unsol count %d, unsol offset %d]\n", | ||
1381 | ctask->itt, scsi_bufflen(sc), | ||
1382 | ctask->imm_count, ctask->unsol_count, | ||
1383 | ctask->unsol_offset); | ||
1384 | } | ||
1385 | |||
1386 | iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)ctask->hdr, | ||
1387 | sizeof(struct iscsi_hdr)); | ||
1388 | |||
1389 | if (conn->hdrdgst_en) | ||
1390 | iscsi_hdr_digest(conn, &tcp_ctask->headbuf, | ||
1391 | (u8*)tcp_ctask->hdrext); | ||
1392 | clear_bit(XMSTATE_BIT_CMD_HDR_INIT, &tcp_ctask->xmstate); | ||
1393 | set_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate); | ||
1394 | } | ||
1395 | |||
1396 | if (test_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate)) { | ||
1397 | rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count); | ||
1398 | if (rc) | ||
1399 | return rc; | ||
1400 | clear_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate); | ||
1401 | |||
1402 | if (sc->sc_data_direction != DMA_TO_DEVICE) | ||
1403 | return 0; | ||
1404 | |||
1405 | if (ctask->imm_count) { | ||
1406 | set_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate); | ||
1407 | iscsi_set_padding(tcp_ctask, ctask->imm_count); | ||
1408 | |||
1409 | if (ctask->conn->datadgst_en) { | ||
1410 | iscsi_data_digest_init(ctask->conn->dd_data, | ||
1411 | tcp_ctask); | ||
1412 | tcp_ctask->immdigest = 0; | ||
1413 | } | ||
1414 | } | ||
1415 | |||
1416 | if (ctask->unsol_count) { | ||
1417 | set_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate); | ||
1418 | set_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate); | ||
1419 | } | ||
1420 | } | ||
1421 | return rc; | ||
1422 | } | ||
1423 | |||
1424 | static int | ||
1425 | iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | ||
1426 | { | ||
1427 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | ||
1428 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
1429 | int sent = 0, rc; | ||
1430 | |||
1431 | if (test_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate)) { | ||
1432 | iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, | ||
1433 | tcp_ctask->pad_count); | ||
1434 | if (conn->datadgst_en) | ||
1435 | crypto_hash_update(&tcp_conn->tx_hash, | ||
1436 | &tcp_ctask->sendbuf.sg, | ||
1437 | tcp_ctask->sendbuf.sg.length); | ||
1438 | } else if (!test_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate)) | ||
1439 | return 0; | ||
1440 | |||
1441 | clear_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate); | ||
1442 | clear_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate); | ||
1443 | debug_scsi("sending %d pad bytes for itt 0x%x\n", | ||
1444 | tcp_ctask->pad_count, ctask->itt); | ||
1445 | rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count, | ||
1446 | &sent); | ||
1447 | if (rc) { | ||
1448 | debug_scsi("padding send failed %d\n", rc); | ||
1449 | set_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate); | ||
1450 | } | ||
1451 | return rc; | ||
1452 | } | ||
1453 | |||
1454 | static int | ||
1455 | iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, | ||
1456 | struct iscsi_buf *buf, uint32_t *digest) | ||
1457 | { | ||
1458 | struct iscsi_tcp_cmd_task *tcp_ctask; | ||
1459 | struct iscsi_tcp_conn *tcp_conn; | ||
1460 | int rc, sent = 0; | ||
1461 | |||
1462 | if (!conn->datadgst_en) | ||
1463 | return 0; | ||
1464 | |||
1465 | tcp_ctask = ctask->dd_data; | ||
1466 | tcp_conn = conn->dd_data; | ||
1467 | |||
1468 | if (!test_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate)) { | ||
1469 | crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest); | ||
1470 | iscsi_buf_init_iov(buf, (char*)digest, 4); | ||
1471 | } | ||
1472 | clear_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate); | ||
1473 | |||
1474 | rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent); | ||
1475 | if (!rc) | ||
1476 | debug_scsi("sent digest 0x%x for itt 0x%x\n", *digest, | ||
1477 | ctask->itt); | ||
1478 | else { | ||
1479 | debug_scsi("sending digest 0x%x failed for itt 0x%x!\n", | ||
1480 | *digest, ctask->itt); | ||
1481 | set_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate); | ||
1482 | } | ||
1483 | return rc; | ||
1484 | } | ||
1485 | |||
1486 | static int | ||
1487 | iscsi_send_data(struct iscsi_cmd_task *ctask, struct iscsi_buf *sendbuf, | ||
1488 | struct scatterlist **sg, int *sent, int *count, | ||
1489 | struct iscsi_buf *digestbuf, uint32_t *digest) | ||
1490 | { | ||
1491 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | ||
1492 | struct iscsi_conn *conn = ctask->conn; | ||
1493 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
1494 | int rc, buf_sent, offset; | ||
1495 | |||
1496 | while (*count) { | ||
1497 | buf_sent = 0; | ||
1498 | offset = sendbuf->sent; | ||
1499 | |||
1500 | rc = iscsi_sendpage(conn, sendbuf, count, &buf_sent); | ||
1501 | *sent = *sent + buf_sent; | ||
1502 | if (buf_sent && conn->datadgst_en) | ||
1503 | partial_sg_digest_update(&tcp_conn->tx_hash, | ||
1504 | &sendbuf->sg, sendbuf->sg.offset + offset, | ||
1505 | buf_sent); | ||
1506 | if (!iscsi_buf_left(sendbuf) && *sg != tcp_ctask->bad_sg) { | ||
1507 | iscsi_buf_init_sg(sendbuf, *sg); | ||
1508 | *sg = *sg + 1; | ||
1509 | } | ||
1510 | |||
1511 | if (rc) | ||
1512 | return rc; | ||
1513 | } | ||
1514 | |||
1515 | rc = iscsi_send_padding(conn, ctask); | ||
1516 | if (rc) | ||
1517 | return rc; | 1388 | return rc; |
1518 | 1389 | ||
1519 | return iscsi_send_digest(conn, ctask, digestbuf, digest); | 1390 | /* Are we done already? */ |
1520 | } | 1391 | if (sc->sc_data_direction != DMA_TO_DEVICE) |
1521 | 1392 | return 0; | |
1522 | static int | ||
1523 | iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | ||
1524 | { | ||
1525 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | ||
1526 | struct iscsi_data_task *dtask; | ||
1527 | int rc; | ||
1528 | |||
1529 | set_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate); | ||
1530 | if (test_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate)) { | ||
1531 | dtask = &tcp_ctask->unsol_dtask; | ||
1532 | |||
1533 | iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr); | ||
1534 | iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr, | ||
1535 | sizeof(struct iscsi_hdr)); | ||
1536 | if (conn->hdrdgst_en) | ||
1537 | iscsi_hdr_digest(conn, &tcp_ctask->headbuf, | ||
1538 | (u8*)dtask->hdrext); | ||
1539 | |||
1540 | clear_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate); | ||
1541 | iscsi_set_padding(tcp_ctask, ctask->data_count); | ||
1542 | } | ||
1543 | |||
1544 | rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count); | ||
1545 | if (rc) { | ||
1546 | clear_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate); | ||
1547 | set_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate); | ||
1548 | return rc; | ||
1549 | } | ||
1550 | 1393 | ||
1551 | if (conn->datadgst_en) { | 1394 | if (ctask->unsol_count != 0) { |
1552 | dtask = &tcp_ctask->unsol_dtask; | 1395 | struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr; |
1553 | iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask); | ||
1554 | dtask->digest = 0; | ||
1555 | } | ||
1556 | 1396 | ||
1557 | debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n", | 1397 | /* Prepare a header for the unsolicited PDU. |
1558 | ctask->itt, ctask->unsol_count, tcp_ctask->sent); | 1398 | * The amount of data we want to send will be |
1559 | return 0; | 1399 | * in ctask->data_count. |
1560 | } | 1400 | * FIXME: return the data count instead. |
1401 | */ | ||
1402 | iscsi_prep_unsolicit_data_pdu(ctask, hdr); | ||
1561 | 1403 | ||
1562 | static int | 1404 | debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n", |
1563 | iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | 1405 | ctask->itt, tcp_ctask->sent, ctask->data_count); |
1564 | { | ||
1565 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | ||
1566 | int rc; | ||
1567 | 1406 | ||
1568 | if (test_and_clear_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate)) { | 1407 | iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr)); |
1569 | BUG_ON(!ctask->unsol_count); | 1408 | rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), |
1570 | send_hdr: | 1409 | scsi_sg_count(sc), |
1571 | rc = iscsi_send_unsol_hdr(conn, ctask); | 1410 | tcp_ctask->sent, |
1411 | ctask->data_count); | ||
1572 | if (rc) | 1412 | if (rc) |
1573 | return rc; | 1413 | goto fail; |
1574 | } | 1414 | tcp_ctask->sent += ctask->data_count; |
1575 | 1415 | ctask->unsol_count -= ctask->data_count; | |
1576 | if (test_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate)) { | 1416 | goto flush; |
1577 | struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask; | 1417 | } else { |
1578 | int start = tcp_ctask->sent; | 1418 | struct iscsi_session *session = conn->session; |
1419 | struct iscsi_r2t_info *r2t; | ||
1579 | 1420 | ||
1580 | rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg, | 1421 | /* All unsolicited PDUs sent. Check for solicited PDUs. |
1581 | &tcp_ctask->sent, &ctask->data_count, | ||
1582 | &dtask->digestbuf, &dtask->digest); | ||
1583 | ctask->unsol_count -= tcp_ctask->sent - start; | ||
1584 | if (rc) | ||
1585 | return rc; | ||
1586 | clear_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate); | ||
1587 | /* | ||
1588 | * Done with the Data-Out. Next, check if we need | ||
1589 | * to send another unsolicited Data-Out. | ||
1590 | */ | 1422 | */ |
1591 | if (ctask->unsol_count) { | 1423 | spin_lock_bh(&session->lock); |
1592 | debug_scsi("sending more uns\n"); | 1424 | r2t = tcp_ctask->r2t; |
1593 | set_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate); | 1425 | if (r2t != NULL) { |
1594 | goto send_hdr; | 1426 | /* Continue with this R2T? */ |
1427 | if (!iscsi_solicit_data_cont(conn, ctask, r2t)) { | ||
1428 | debug_scsi(" done with r2t %p\n", r2t); | ||
1429 | |||
1430 | __kfifo_put(tcp_ctask->r2tpool.queue, | ||
1431 | (void*)&r2t, sizeof(void*)); | ||
1432 | tcp_ctask->r2t = r2t = NULL; | ||
1433 | } | ||
1595 | } | 1434 | } |
1596 | } | ||
1597 | return 0; | ||
1598 | } | ||
1599 | |||
1600 | static int iscsi_send_sol_pdu(struct iscsi_conn *conn, | ||
1601 | struct iscsi_cmd_task *ctask) | ||
1602 | { | ||
1603 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | ||
1604 | struct iscsi_session *session = conn->session; | ||
1605 | struct iscsi_r2t_info *r2t; | ||
1606 | struct iscsi_data_task *dtask; | ||
1607 | int left, rc; | ||
1608 | 1435 | ||
1609 | if (test_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate)) { | 1436 | if (r2t == NULL) { |
1610 | if (!tcp_ctask->r2t) { | ||
1611 | spin_lock_bh(&session->lock); | ||
1612 | __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t, | 1437 | __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t, |
1613 | sizeof(void*)); | 1438 | sizeof(void*)); |
1614 | spin_unlock_bh(&session->lock); | 1439 | r2t = tcp_ctask->r2t; |
1615 | } | 1440 | } |
1616 | send_hdr: | 1441 | spin_unlock_bh(&session->lock); |
1617 | r2t = tcp_ctask->r2t; | ||
1618 | dtask = &r2t->dtask; | ||
1619 | |||
1620 | if (conn->hdrdgst_en) | ||
1621 | iscsi_hdr_digest(conn, &r2t->headbuf, | ||
1622 | (u8*)dtask->hdrext); | ||
1623 | clear_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate); | ||
1624 | set_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate); | ||
1625 | } | ||
1626 | |||
1627 | if (test_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate)) { | ||
1628 | r2t = tcp_ctask->r2t; | ||
1629 | dtask = &r2t->dtask; | ||
1630 | |||
1631 | rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count); | ||
1632 | if (rc) | ||
1633 | return rc; | ||
1634 | clear_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate); | ||
1635 | set_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate); | ||
1636 | 1442 | ||
1637 | if (conn->datadgst_en) { | 1443 | /* Waiting for more R2Ts to arrive. */ |
1638 | iscsi_data_digest_init(conn->dd_data, tcp_ctask); | 1444 | if (r2t == NULL) { |
1639 | dtask->digest = 0; | 1445 | debug_tcp("no R2Ts yet\n"); |
1446 | return 0; | ||
1640 | } | 1447 | } |
1641 | 1448 | ||
1642 | iscsi_set_padding(tcp_ctask, r2t->data_count); | 1449 | debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n", |
1643 | debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n", | 1450 | r2t, r2t->solicit_datasn - 1, ctask->itt, |
1644 | r2t->solicit_datasn - 1, ctask->itt, r2t->data_count, | 1451 | r2t->data_offset + r2t->sent, r2t->data_count); |
1645 | r2t->sent); | ||
1646 | } | ||
1647 | 1452 | ||
1648 | if (test_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate)) { | 1453 | iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr, |
1649 | r2t = tcp_ctask->r2t; | 1454 | sizeof(struct iscsi_hdr)); |
1650 | dtask = &r2t->dtask; | ||
1651 | 1455 | ||
1652 | rc = iscsi_send_data(ctask, &r2t->sendbuf, &r2t->sg, | 1456 | rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), |
1653 | &r2t->sent, &r2t->data_count, | 1457 | scsi_sg_count(sc), |
1654 | &dtask->digestbuf, &dtask->digest); | 1458 | r2t->data_offset + r2t->sent, |
1459 | r2t->data_count); | ||
1655 | if (rc) | 1460 | if (rc) |
1656 | return rc; | 1461 | goto fail; |
1657 | clear_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate); | 1462 | tcp_ctask->sent += r2t->data_count; |
1658 | 1463 | r2t->sent += r2t->data_count; | |
1659 | /* | 1464 | goto flush; |
1660 | * Done with this Data-Out. Next, check if we have | ||
1661 | * to send another Data-Out for this R2T. | ||
1662 | */ | ||
1663 | BUG_ON(r2t->data_length - r2t->sent < 0); | ||
1664 | left = r2t->data_length - r2t->sent; | ||
1665 | if (left) { | ||
1666 | iscsi_solicit_data_cont(conn, ctask, r2t, left); | ||
1667 | goto send_hdr; | ||
1668 | } | ||
1669 | |||
1670 | /* | ||
1671 | * Done with this R2T. Check if there are more | ||
1672 | * outstanding R2Ts ready to be processed. | ||
1673 | */ | ||
1674 | spin_lock_bh(&session->lock); | ||
1675 | tcp_ctask->r2t = NULL; | ||
1676 | __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, | ||
1677 | sizeof(void*)); | ||
1678 | if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, | ||
1679 | sizeof(void*))) { | ||
1680 | tcp_ctask->r2t = r2t; | ||
1681 | spin_unlock_bh(&session->lock); | ||
1682 | goto send_hdr; | ||
1683 | } | ||
1684 | spin_unlock_bh(&session->lock); | ||
1685 | } | 1465 | } |
1686 | return 0; | 1466 | return 0; |
1687 | } | 1467 | fail: |
1688 | 1468 | iscsi_conn_failure(conn, rc); | |
1689 | /** | 1469 | return -EIO; |
1690 | * iscsi_tcp_ctask_xmit - xmit normal PDU task | ||
1691 | * @conn: iscsi connection | ||
1692 | * @ctask: iscsi command task | ||
1693 | * | ||
1694 | * Notes: | ||
1695 | * The function can return -EAGAIN in which case caller must | ||
1696 | * call it again later, or recover. '0' return code means successful | ||
1697 | * xmit. | ||
1698 | * The function is devided to logical helpers (above) for the different | ||
1699 | * xmit stages. | ||
1700 | * | ||
1701 | *iscsi_send_cmd_hdr() | ||
1702 | * XMSTATE_BIT_CMD_HDR_INIT - prepare Header and Data buffers Calculate | ||
1703 | * Header Digest | ||
1704 | * XMSTATE_BIT_CMD_HDR_XMIT - Transmit header in progress | ||
1705 | * | ||
1706 | *iscsi_send_padding | ||
1707 | * XMSTATE_BIT_W_PAD - Prepare and send pading | ||
1708 | * XMSTATE_BIT_W_RESEND_PAD - retry send pading | ||
1709 | * | ||
1710 | *iscsi_send_digest | ||
1711 | * XMSTATE_BIT_W_RESEND_DATA_DIGEST - Finalize and send Data Digest | ||
1712 | * XMSTATE_BIT_W_RESEND_DATA_DIGEST - retry sending digest | ||
1713 | * | ||
1714 | *iscsi_send_unsol_hdr | ||
1715 | * XMSTATE_BIT_UNS_INIT - prepare un-solicit data header and digest | ||
1716 | * XMSTATE_BIT_UNS_HDR - send un-solicit header | ||
1717 | * | ||
1718 | *iscsi_send_unsol_pdu | ||
1719 | * XMSTATE_BIT_UNS_DATA - send un-solicit data in progress | ||
1720 | * | ||
1721 | *iscsi_send_sol_pdu | ||
1722 | * XMSTATE_BIT_SOL_HDR_INIT - solicit data header and digest initialize | ||
1723 | * XMSTATE_BIT_SOL_HDR - send solicit header | ||
1724 | * XMSTATE_BIT_SOL_DATA - send solicit data | ||
1725 | * | ||
1726 | *iscsi_tcp_ctask_xmit | ||
1727 | * XMSTATE_BIT_IMM_DATA - xmit managment data (??) | ||
1728 | **/ | ||
1729 | static int | ||
1730 | iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | ||
1731 | { | ||
1732 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | ||
1733 | int rc = 0; | ||
1734 | |||
1735 | debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n", | ||
1736 | conn->id, tcp_ctask->xmstate, ctask->itt); | ||
1737 | |||
1738 | rc = iscsi_send_cmd_hdr(conn, ctask); | ||
1739 | if (rc) | ||
1740 | return rc; | ||
1741 | if (ctask->sc->sc_data_direction != DMA_TO_DEVICE) | ||
1742 | return 0; | ||
1743 | |||
1744 | if (test_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate)) { | ||
1745 | rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg, | ||
1746 | &tcp_ctask->sent, &ctask->imm_count, | ||
1747 | &tcp_ctask->immbuf, &tcp_ctask->immdigest); | ||
1748 | if (rc) | ||
1749 | return rc; | ||
1750 | clear_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate); | ||
1751 | } | ||
1752 | |||
1753 | rc = iscsi_send_unsol_pdu(conn, ctask); | ||
1754 | if (rc) | ||
1755 | return rc; | ||
1756 | |||
1757 | rc = iscsi_send_sol_pdu(conn, ctask); | ||
1758 | if (rc) | ||
1759 | return rc; | ||
1760 | |||
1761 | return rc; | ||
1762 | } | 1470 | } |
1763 | 1471 | ||
1764 | static struct iscsi_cls_conn * | 1472 | static struct iscsi_cls_conn * |
@@ -1784,9 +1492,6 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) | |||
1784 | 1492 | ||
1785 | conn->dd_data = tcp_conn; | 1493 | conn->dd_data = tcp_conn; |
1786 | tcp_conn->iscsi_conn = conn; | 1494 | tcp_conn->iscsi_conn = conn; |
1787 | tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; | ||
1788 | /* initial operational parameters */ | ||
1789 | tcp_conn->hdr_size = sizeof(struct iscsi_hdr); | ||
1790 | 1495 | ||
1791 | tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0, | 1496 | tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0, |
1792 | CRYPTO_ALG_ASYNC); | 1497 | CRYPTO_ALG_ASYNC); |
@@ -1863,11 +1568,9 @@ static void | |||
1863 | iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) | 1568 | iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) |
1864 | { | 1569 | { |
1865 | struct iscsi_conn *conn = cls_conn->dd_data; | 1570 | struct iscsi_conn *conn = cls_conn->dd_data; |
1866 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
1867 | 1571 | ||
1868 | iscsi_conn_stop(cls_conn, flag); | 1572 | iscsi_conn_stop(cls_conn, flag); |
1869 | iscsi_tcp_release_conn(conn); | 1573 | iscsi_tcp_release_conn(conn); |
1870 | tcp_conn->hdr_size = sizeof(struct iscsi_hdr); | ||
1871 | } | 1574 | } |
1872 | 1575 | ||
1873 | static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock, | 1576 | static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock, |
@@ -1967,7 +1670,7 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, | |||
1967 | /* | 1670 | /* |
1968 | * set receive state machine into initial state | 1671 | * set receive state machine into initial state |
1969 | */ | 1672 | */ |
1970 | tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; | 1673 | iscsi_tcp_hdr_recv_prep(tcp_conn); |
1971 | return 0; | 1674 | return 0; |
1972 | 1675 | ||
1973 | free_socket: | 1676 | free_socket: |
@@ -1977,10 +1680,17 @@ free_socket: | |||
1977 | 1680 | ||
1978 | /* called with host lock */ | 1681 | /* called with host lock */ |
1979 | static void | 1682 | static void |
1980 | iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) | 1683 | iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) |
1981 | { | 1684 | { |
1982 | struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; | 1685 | debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt); |
1983 | tcp_mtask->xmstate = 1 << XMSTATE_BIT_IMM_HDR_INIT; | 1686 | |
1687 | /* Prepare PDU, optionally w/ immediate data */ | ||
1688 | iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr)); | ||
1689 | |||
1690 | /* If we have immediate data, attach a payload */ | ||
1691 | if (mtask->data_count) | ||
1692 | iscsi_tcp_send_linear_data_prepare(conn, mtask->data, | ||
1693 | mtask->data_count); | ||
1984 | } | 1694 | } |
1985 | 1695 | ||
1986 | static int | 1696 | static int |
@@ -2003,8 +1713,7 @@ iscsi_r2tpool_alloc(struct iscsi_session *session) | |||
2003 | */ | 1713 | */ |
2004 | 1714 | ||
2005 | /* R2T pool */ | 1715 | /* R2T pool */ |
2006 | if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, | 1716 | if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL, |
2007 | (void***)&tcp_ctask->r2ts, | ||
2008 | sizeof(struct iscsi_r2t_info))) { | 1717 | sizeof(struct iscsi_r2t_info))) { |
2009 | goto r2t_alloc_fail; | 1718 | goto r2t_alloc_fail; |
2010 | } | 1719 | } |
@@ -2013,8 +1722,7 @@ iscsi_r2tpool_alloc(struct iscsi_session *session) | |||
2013 | tcp_ctask->r2tqueue = kfifo_alloc( | 1722 | tcp_ctask->r2tqueue = kfifo_alloc( |
2014 | session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL); | 1723 | session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL); |
2015 | if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) { | 1724 | if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) { |
2016 | iscsi_pool_free(&tcp_ctask->r2tpool, | 1725 | iscsi_pool_free(&tcp_ctask->r2tpool); |
2017 | (void**)tcp_ctask->r2ts); | ||
2018 | goto r2t_alloc_fail; | 1726 | goto r2t_alloc_fail; |
2019 | } | 1727 | } |
2020 | } | 1728 | } |
@@ -2027,8 +1735,7 @@ r2t_alloc_fail: | |||
2027 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 1735 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
2028 | 1736 | ||
2029 | kfifo_free(tcp_ctask->r2tqueue); | 1737 | kfifo_free(tcp_ctask->r2tqueue); |
2030 | iscsi_pool_free(&tcp_ctask->r2tpool, | 1738 | iscsi_pool_free(&tcp_ctask->r2tpool); |
2031 | (void**)tcp_ctask->r2ts); | ||
2032 | } | 1739 | } |
2033 | return -ENOMEM; | 1740 | return -ENOMEM; |
2034 | } | 1741 | } |
@@ -2043,8 +1750,7 @@ iscsi_r2tpool_free(struct iscsi_session *session) | |||
2043 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 1750 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
2044 | 1751 | ||
2045 | kfifo_free(tcp_ctask->r2tqueue); | 1752 | kfifo_free(tcp_ctask->r2tqueue); |
2046 | iscsi_pool_free(&tcp_ctask->r2tpool, | 1753 | iscsi_pool_free(&tcp_ctask->r2tpool); |
2047 | (void**)tcp_ctask->r2ts); | ||
2048 | } | 1754 | } |
2049 | } | 1755 | } |
2050 | 1756 | ||
@@ -2060,9 +1766,6 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, | |||
2060 | switch(param) { | 1766 | switch(param) { |
2061 | case ISCSI_PARAM_HDRDGST_EN: | 1767 | case ISCSI_PARAM_HDRDGST_EN: |
2062 | iscsi_set_param(cls_conn, param, buf, buflen); | 1768 | iscsi_set_param(cls_conn, param, buf, buflen); |
2063 | tcp_conn->hdr_size = sizeof(struct iscsi_hdr); | ||
2064 | if (conn->hdrdgst_en) | ||
2065 | tcp_conn->hdr_size += sizeof(__u32); | ||
2066 | break; | 1769 | break; |
2067 | case ISCSI_PARAM_DATADGST_EN: | 1770 | case ISCSI_PARAM_DATADGST_EN: |
2068 | iscsi_set_param(cls_conn, param, buf, buflen); | 1771 | iscsi_set_param(cls_conn, param, buf, buflen); |
@@ -2071,12 +1774,12 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, | |||
2071 | break; | 1774 | break; |
2072 | case ISCSI_PARAM_MAX_R2T: | 1775 | case ISCSI_PARAM_MAX_R2T: |
2073 | sscanf(buf, "%d", &value); | 1776 | sscanf(buf, "%d", &value); |
2074 | if (session->max_r2t == roundup_pow_of_two(value)) | 1777 | if (value <= 0 || !is_power_of_2(value)) |
1778 | return -EINVAL; | ||
1779 | if (session->max_r2t == value) | ||
2075 | break; | 1780 | break; |
2076 | iscsi_r2tpool_free(session); | 1781 | iscsi_r2tpool_free(session); |
2077 | iscsi_set_param(cls_conn, param, buf, buflen); | 1782 | iscsi_set_param(cls_conn, param, buf, buflen); |
2078 | if (session->max_r2t & (session->max_r2t - 1)) | ||
2079 | session->max_r2t = roundup_pow_of_two(session->max_r2t); | ||
2080 | if (iscsi_r2tpool_alloc(session)) | 1783 | if (iscsi_r2tpool_alloc(session)) |
2081 | return -ENOMEM; | 1784 | return -ENOMEM; |
2082 | break; | 1785 | break; |
@@ -2183,14 +1886,15 @@ iscsi_tcp_session_create(struct iscsi_transport *iscsit, | |||
2183 | struct iscsi_cmd_task *ctask = session->cmds[cmd_i]; | 1886 | struct iscsi_cmd_task *ctask = session->cmds[cmd_i]; |
2184 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 1887 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
2185 | 1888 | ||
2186 | ctask->hdr = &tcp_ctask->hdr; | 1889 | ctask->hdr = &tcp_ctask->hdr.cmd_hdr; |
1890 | ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE; | ||
2187 | } | 1891 | } |
2188 | 1892 | ||
2189 | for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) { | 1893 | for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) { |
2190 | struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i]; | 1894 | struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i]; |
2191 | struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; | 1895 | struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; |
2192 | 1896 | ||
2193 | mtask->hdr = &tcp_mtask->hdr; | 1897 | mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr; |
2194 | } | 1898 | } |
2195 | 1899 | ||
2196 | if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session))) | 1900 | if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session))) |
@@ -2222,12 +1926,14 @@ static struct scsi_host_template iscsi_sht = { | |||
2222 | .queuecommand = iscsi_queuecommand, | 1926 | .queuecommand = iscsi_queuecommand, |
2223 | .change_queue_depth = iscsi_change_queue_depth, | 1927 | .change_queue_depth = iscsi_change_queue_depth, |
2224 | .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1, | 1928 | .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1, |
2225 | .sg_tablesize = ISCSI_SG_TABLESIZE, | 1929 | .sg_tablesize = 4096, |
2226 | .max_sectors = 0xFFFF, | 1930 | .max_sectors = 0xFFFF, |
2227 | .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, | 1931 | .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, |
2228 | .eh_abort_handler = iscsi_eh_abort, | 1932 | .eh_abort_handler = iscsi_eh_abort, |
1933 | .eh_device_reset_handler= iscsi_eh_device_reset, | ||
2229 | .eh_host_reset_handler = iscsi_eh_host_reset, | 1934 | .eh_host_reset_handler = iscsi_eh_host_reset, |
2230 | .use_clustering = DISABLE_CLUSTERING, | 1935 | .use_clustering = DISABLE_CLUSTERING, |
1936 | .use_sg_chaining = ENABLE_SG_CHAINING, | ||
2231 | .slave_configure = iscsi_tcp_slave_configure, | 1937 | .slave_configure = iscsi_tcp_slave_configure, |
2232 | .proc_name = "iscsi_tcp", | 1938 | .proc_name = "iscsi_tcp", |
2233 | .this_id = -1, | 1939 | .this_id = -1, |
@@ -2257,14 +1963,17 @@ static struct iscsi_transport iscsi_tcp_transport = { | |||
2257 | ISCSI_PERSISTENT_ADDRESS | | 1963 | ISCSI_PERSISTENT_ADDRESS | |
2258 | ISCSI_TARGET_NAME | ISCSI_TPGT | | 1964 | ISCSI_TARGET_NAME | ISCSI_TPGT | |
2259 | ISCSI_USERNAME | ISCSI_PASSWORD | | 1965 | ISCSI_USERNAME | ISCSI_PASSWORD | |
2260 | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN, | 1966 | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | |
1967 | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | | ||
1968 | ISCSI_LU_RESET_TMO | | ||
1969 | ISCSI_PING_TMO | ISCSI_RECV_TMO, | ||
2261 | .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | | 1970 | .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | |
2262 | ISCSI_HOST_INITIATOR_NAME | | 1971 | ISCSI_HOST_INITIATOR_NAME | |
2263 | ISCSI_HOST_NETDEV_NAME, | 1972 | ISCSI_HOST_NETDEV_NAME, |
2264 | .host_template = &iscsi_sht, | 1973 | .host_template = &iscsi_sht, |
2265 | .conndata_size = sizeof(struct iscsi_conn), | 1974 | .conndata_size = sizeof(struct iscsi_conn), |
2266 | .max_conn = 1, | 1975 | .max_conn = 1, |
2267 | .max_cmd_len = ISCSI_TCP_MAX_CMD_LEN, | 1976 | .max_cmd_len = 16, |
2268 | /* session management */ | 1977 | /* session management */ |
2269 | .create_session = iscsi_tcp_session_create, | 1978 | .create_session = iscsi_tcp_session_create, |
2270 | .destroy_session = iscsi_tcp_session_destroy, | 1979 | .destroy_session = iscsi_tcp_session_destroy, |
@@ -2283,8 +1992,8 @@ static struct iscsi_transport iscsi_tcp_transport = { | |||
2283 | /* IO */ | 1992 | /* IO */ |
2284 | .send_pdu = iscsi_conn_send_pdu, | 1993 | .send_pdu = iscsi_conn_send_pdu, |
2285 | .get_stats = iscsi_conn_get_stats, | 1994 | .get_stats = iscsi_conn_get_stats, |
2286 | .init_cmd_task = iscsi_tcp_cmd_init, | 1995 | .init_cmd_task = iscsi_tcp_ctask_init, |
2287 | .init_mgmt_task = iscsi_tcp_mgmt_init, | 1996 | .init_mgmt_task = iscsi_tcp_mtask_init, |
2288 | .xmit_cmd_task = iscsi_tcp_ctask_xmit, | 1997 | .xmit_cmd_task = iscsi_tcp_ctask_xmit, |
2289 | .xmit_mgmt_task = iscsi_tcp_mtask_xmit, | 1998 | .xmit_mgmt_task = iscsi_tcp_mtask_xmit, |
2290 | .cleanup_cmd_task = iscsi_tcp_cleanup_ctask, | 1999 | .cleanup_cmd_task = iscsi_tcp_cleanup_ctask, |