diff options
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/iscsi_tcp.c | 1333 | ||||
-rw-r--r-- | drivers/scsi/iscsi_tcp.h | 75 | ||||
-rw-r--r-- | drivers/scsi/libiscsi.c | 37 |
3 files changed, 560 insertions, 885 deletions
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 9b418522c9bf..7212fe95a66d 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
@@ -68,56 +68,10 @@ static unsigned int iscsi_max_lun = 512; | |||
68 | module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); | 68 | module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); |
69 | 69 | ||
70 | static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn, | 70 | static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn, |
71 | struct iscsi_chunk *chunk); | 71 | struct iscsi_segment *segment); |
72 | |||
73 | static inline void | ||
74 | iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size) | ||
75 | { | ||
76 | ibuf->sg.page = virt_to_page(vbuf); | ||
77 | ibuf->sg.offset = offset_in_page(vbuf); | ||
78 | ibuf->sg.length = size; | ||
79 | ibuf->sent = 0; | ||
80 | ibuf->use_sendmsg = 1; | ||
81 | } | ||
82 | |||
83 | static inline void | ||
84 | iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg) | ||
85 | { | ||
86 | ibuf->sg.page = sg->page; | ||
87 | ibuf->sg.offset = sg->offset; | ||
88 | ibuf->sg.length = sg->length; | ||
89 | /* | ||
90 | * Fastpath: sg element fits into single page | ||
91 | */ | ||
92 | if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg->page)) | ||
93 | ibuf->use_sendmsg = 0; | ||
94 | else | ||
95 | ibuf->use_sendmsg = 1; | ||
96 | ibuf->sent = 0; | ||
97 | } | ||
98 | |||
99 | static inline int | ||
100 | iscsi_buf_left(struct iscsi_buf *ibuf) | ||
101 | { | ||
102 | int rc; | ||
103 | |||
104 | rc = ibuf->sg.length - ibuf->sent; | ||
105 | BUG_ON(rc < 0); | ||
106 | return rc; | ||
107 | } | ||
108 | |||
109 | static inline void | ||
110 | iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf, | ||
111 | u8* crc) | ||
112 | { | ||
113 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
114 | |||
115 | crypto_hash_digest(&tcp_conn->tx_hash, &buf->sg, buf->sg.length, crc); | ||
116 | buf->sg.length += ISCSI_DIGEST_SIZE; | ||
117 | } | ||
118 | 72 | ||
119 | /* | 73 | /* |
120 | * Scatterlist handling: inside the iscsi_chunk, we | 74 | * Scatterlist handling: inside the iscsi_segment, we |
121 | * remember an index into the scatterlist, and set data/size | 75 | * remember an index into the scatterlist, and set data/size |
122 | * to the current scatterlist entry. For highmem pages, we | 76 | * to the current scatterlist entry. For highmem pages, we |
123 | * kmap as needed. | 77 | * kmap as needed. |
@@ -130,60 +84,72 @@ iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf, | |||
130 | */ | 84 | */ |
131 | 85 | ||
132 | /** | 86 | /** |
133 | * iscsi_tcp_chunk_init_sg - init indicated scatterlist entry | 87 | * iscsi_tcp_segment_init_sg - init indicated scatterlist entry |
134 | * @chunk: the buffer object | 88 | * @segment: the buffer object |
135 | * @idx: index into scatterlist | 89 | * @sg: scatterlist |
136 | * @offset: byte offset into that sg entry | 90 | * @offset: byte offset into that sg entry |
137 | * | 91 | * |
138 | * This function sets up the chunk so that subsequent | 92 | * This function sets up the segment so that subsequent |
139 | * data is copied to the indicated sg entry, at the given | 93 | * data is copied to the indicated sg entry, at the given |
140 | * offset. | 94 | * offset. |
141 | */ | 95 | */ |
142 | static inline void | 96 | static inline void |
143 | iscsi_tcp_chunk_init_sg(struct iscsi_chunk *chunk, | 97 | iscsi_tcp_segment_init_sg(struct iscsi_segment *segment, |
144 | unsigned int idx, unsigned int offset) | 98 | struct scatterlist *sg, unsigned int offset) |
145 | { | 99 | { |
146 | struct scatterlist *sg; | 100 | segment->sg = sg; |
147 | 101 | segment->sg_offset = offset; | |
148 | BUG_ON(chunk->sg == NULL); | 102 | segment->size = min(sg->length - offset, |
149 | 103 | segment->total_size - segment->total_copied); | |
150 | sg = &chunk->sg[idx]; | 104 | segment->data = NULL; |
151 | chunk->sg_index = idx; | ||
152 | chunk->sg_offset = offset; | ||
153 | chunk->size = min(sg->length - offset, chunk->total_size); | ||
154 | chunk->data = NULL; | ||
155 | } | 105 | } |
156 | 106 | ||
157 | /** | 107 | /** |
158 | * iscsi_tcp_chunk_map - map the current S/G page | 108 | * iscsi_tcp_segment_map - map the current S/G page |
159 | * @chunk: iscsi chunk | 109 | * @segment: iscsi_segment |
110 | * @recv: 1 if called from recv path | ||
160 | * | 111 | * |
161 | * We only need to possibly kmap data if scatter lists are being used, | 112 | * We only need to possibly kmap data if scatter lists are being used, |
162 | * because the iscsi passthrough and internal IO paths will never use high | 113 | * because the iscsi passthrough and internal IO paths will never use high |
163 | * mem pages. | 114 | * mem pages. |
164 | */ | 115 | */ |
165 | static inline void | 116 | static inline void |
166 | iscsi_tcp_chunk_map(struct iscsi_chunk *chunk) | 117 | iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv) |
167 | { | 118 | { |
168 | struct scatterlist *sg; | 119 | struct scatterlist *sg; |
169 | 120 | ||
170 | if (chunk->data != NULL || !chunk->sg) | 121 | if (segment->data != NULL || !segment->sg) |
171 | return; | 122 | return; |
172 | 123 | ||
173 | sg = &chunk->sg[chunk->sg_index]; | 124 | sg = segment->sg; |
174 | BUG_ON(chunk->sg_mapped); | 125 | BUG_ON(segment->sg_mapped); |
175 | BUG_ON(sg->length == 0); | 126 | BUG_ON(sg->length == 0); |
176 | chunk->sg_mapped = kmap_atomic(sg->page, KM_SOFTIRQ0); | 127 | |
177 | chunk->data = chunk->sg_mapped + sg->offset + chunk->sg_offset; | 128 | /* |
129 | * If the page count is greater than one it is ok to send | ||
130 | * to the network layer's zero copy send path. If not we | ||
131 | * have to go the slow sendmsg path. We always map for the | ||
132 | * recv path. | ||
133 | */ | ||
134 | if (page_count(sg_page(sg)) >= 1 && !recv) | ||
135 | return; | ||
136 | |||
137 | debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit", | ||
138 | segment); | ||
139 | segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); | ||
140 | segment->data = segment->sg_mapped + sg->offset + segment->sg_offset; | ||
178 | } | 141 | } |
179 | 142 | ||
180 | static inline void | 143 | static inline void |
181 | iscsi_tcp_chunk_unmap(struct iscsi_chunk *chunk) | 144 | iscsi_tcp_segment_unmap(struct iscsi_segment *segment) |
182 | { | 145 | { |
183 | if (chunk->sg_mapped) { | 146 | debug_tcp("iscsi_tcp_segment_unmap %p\n", segment); |
184 | kunmap_atomic(chunk->sg_mapped, KM_SOFTIRQ0); | 147 | |
185 | chunk->sg_mapped = NULL; | 148 | if (segment->sg_mapped) { |
186 | chunk->data = NULL; | 149 | debug_tcp("iscsi_tcp_segment_unmap valid\n"); |
150 | kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0); | ||
151 | segment->sg_mapped = NULL; | ||
152 | segment->data = NULL; | ||
187 | } | 153 | } |
188 | } | 154 | } |
189 | 155 | ||
@@ -191,23 +157,24 @@ iscsi_tcp_chunk_unmap(struct iscsi_chunk *chunk) | |||
191 | * Splice the digest buffer into the buffer | 157 | * Splice the digest buffer into the buffer |
192 | */ | 158 | */ |
193 | static inline void | 159 | static inline void |
194 | iscsi_tcp_chunk_splice_digest(struct iscsi_chunk *chunk, void *digest) | 160 | iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest) |
195 | { | 161 | { |
196 | chunk->data = digest; | 162 | segment->data = digest; |
197 | chunk->digest_len = ISCSI_DIGEST_SIZE; | 163 | segment->digest_len = ISCSI_DIGEST_SIZE; |
198 | chunk->total_size += ISCSI_DIGEST_SIZE; | 164 | segment->total_size += ISCSI_DIGEST_SIZE; |
199 | chunk->size = ISCSI_DIGEST_SIZE; | 165 | segment->size = ISCSI_DIGEST_SIZE; |
200 | chunk->copied = 0; | 166 | segment->copied = 0; |
201 | chunk->sg = NULL; | 167 | segment->sg = NULL; |
202 | chunk->sg_index = 0; | 168 | segment->hash = NULL; |
203 | chunk->hash = NULL; | ||
204 | } | 169 | } |
205 | 170 | ||
206 | /** | 171 | /** |
207 | * iscsi_tcp_chunk_done - check whether the chunk is complete | 172 | * iscsi_tcp_segment_done - check whether the segment is complete |
208 | * @chunk: iscsi chunk to check | 173 | * @segment: iscsi segment to check |
174 | * @recv: set to one of this is called from the recv path | ||
175 | * @copied: number of bytes copied | ||
209 | * | 176 | * |
210 | * Check if we're done receiving this chunk. If the receive | 177 | * Check if we're done receiving this segment. If the receive |
211 | * buffer is full but we expect more data, move on to the | 178 | * buffer is full but we expect more data, move on to the |
212 | * next entry in the scatterlist. | 179 | * next entry in the scatterlist. |
213 | * | 180 | * |
@@ -217,62 +184,145 @@ iscsi_tcp_chunk_splice_digest(struct iscsi_chunk *chunk, void *digest) | |||
217 | * This function must be re-entrant. | 184 | * This function must be re-entrant. |
218 | */ | 185 | */ |
219 | static inline int | 186 | static inline int |
220 | iscsi_tcp_chunk_done(struct iscsi_chunk *chunk) | 187 | iscsi_tcp_segment_done(struct iscsi_segment *segment, int recv, unsigned copied) |
221 | { | 188 | { |
222 | static unsigned char padbuf[ISCSI_PAD_LEN]; | 189 | static unsigned char padbuf[ISCSI_PAD_LEN]; |
190 | struct scatterlist sg; | ||
223 | unsigned int pad; | 191 | unsigned int pad; |
224 | 192 | ||
225 | if (chunk->copied < chunk->size) { | 193 | debug_tcp("copied %u %u size %u %s\n", segment->copied, copied, |
226 | iscsi_tcp_chunk_map(chunk); | 194 | segment->size, recv ? "recv" : "xmit"); |
195 | if (segment->hash && copied) { | ||
196 | /* | ||
197 | * If a segment is kmapd we must unmap it before sending | ||
198 | * to the crypto layer since that will try to kmap it again. | ||
199 | */ | ||
200 | iscsi_tcp_segment_unmap(segment); | ||
201 | |||
202 | if (!segment->data) { | ||
203 | sg_init_table(&sg, 1); | ||
204 | sg_set_page(&sg, sg_page(segment->sg), copied, | ||
205 | segment->copied + segment->sg_offset + | ||
206 | segment->sg->offset); | ||
207 | } else | ||
208 | sg_init_one(&sg, segment->data + segment->copied, | ||
209 | copied); | ||
210 | crypto_hash_update(segment->hash, &sg, copied); | ||
211 | } | ||
212 | |||
213 | segment->copied += copied; | ||
214 | if (segment->copied < segment->size) { | ||
215 | iscsi_tcp_segment_map(segment, recv); | ||
227 | return 0; | 216 | return 0; |
228 | } | 217 | } |
229 | 218 | ||
230 | chunk->total_copied += chunk->copied; | 219 | segment->total_copied += segment->copied; |
231 | chunk->copied = 0; | 220 | segment->copied = 0; |
232 | chunk->size = 0; | 221 | segment->size = 0; |
233 | 222 | ||
234 | /* Unmap the current scatterlist page, if there is one. */ | 223 | /* Unmap the current scatterlist page, if there is one. */ |
235 | iscsi_tcp_chunk_unmap(chunk); | 224 | iscsi_tcp_segment_unmap(segment); |
236 | 225 | ||
237 | /* Do we have more scatterlist entries? */ | 226 | /* Do we have more scatterlist entries? */ |
238 | if (chunk->total_copied < chunk->total_size) { | 227 | debug_tcp("total copied %u total size %u\n", segment->total_copied, |
228 | segment->total_size); | ||
229 | if (segment->total_copied < segment->total_size) { | ||
239 | /* Proceed to the next entry in the scatterlist. */ | 230 | /* Proceed to the next entry in the scatterlist. */ |
240 | iscsi_tcp_chunk_init_sg(chunk, chunk->sg_index + 1, 0); | 231 | iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg), |
241 | iscsi_tcp_chunk_map(chunk); | 232 | 0); |
242 | BUG_ON(chunk->size == 0); | 233 | iscsi_tcp_segment_map(segment, recv); |
234 | BUG_ON(segment->size == 0); | ||
243 | return 0; | 235 | return 0; |
244 | } | 236 | } |
245 | 237 | ||
246 | /* Do we need to handle padding? */ | 238 | /* Do we need to handle padding? */ |
247 | pad = iscsi_padding(chunk->total_copied); | 239 | pad = iscsi_padding(segment->total_copied); |
248 | if (pad != 0) { | 240 | if (pad != 0) { |
249 | debug_tcp("consume %d pad bytes\n", pad); | 241 | debug_tcp("consume %d pad bytes\n", pad); |
250 | chunk->total_size += pad; | 242 | segment->total_size += pad; |
251 | chunk->size = pad; | 243 | segment->size = pad; |
252 | chunk->data = padbuf; | 244 | segment->data = padbuf; |
253 | return 0; | 245 | return 0; |
254 | } | 246 | } |
255 | 247 | ||
256 | /* | 248 | /* |
257 | * Set us up for receiving the data digest. hdr digest | 249 | * Set us up for transferring the data digest. hdr digest |
258 | * is completely handled in hdr done function. | 250 | * is completely handled in hdr done function. |
259 | */ | 251 | */ |
260 | if (chunk->hash) { | 252 | if (segment->hash) { |
261 | if (chunk->digest_len == 0) { | 253 | crypto_hash_final(segment->hash, segment->digest); |
262 | crypto_hash_final(chunk->hash, chunk->digest); | 254 | iscsi_tcp_segment_splice_digest(segment, |
263 | iscsi_tcp_chunk_splice_digest(chunk, | 255 | recv ? segment->recv_digest : segment->digest); |
264 | chunk->recv_digest); | 256 | return 0; |
265 | return 0; | ||
266 | } | ||
267 | } | 257 | } |
268 | 258 | ||
269 | return 1; | 259 | return 1; |
270 | } | 260 | } |
271 | 261 | ||
272 | /** | 262 | /** |
273 | * iscsi_tcp_chunk_recv - copy data to chunk | 263 | * iscsi_tcp_xmit_segment - transmit segment |
274 | * @tcp_conn: the iSCSI TCP connection | 264 | * @tcp_conn: the iSCSI TCP connection |
275 | * @chunk: the buffer to copy to | 265 | * @segment: the buffer to transmnit |
266 | * | ||
267 | * This function transmits as much of the buffer as | ||
268 | * the network layer will accept, and returns the number of | ||
269 | * bytes transmitted. | ||
270 | * | ||
271 | * If CRC hashing is enabled, the function will compute the | ||
272 | * hash as it goes. When the entire segment has been transmitted, | ||
273 | * it will retrieve the hash value and send it as well. | ||
274 | */ | ||
275 | static int | ||
276 | iscsi_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn, | ||
277 | struct iscsi_segment *segment) | ||
278 | { | ||
279 | struct socket *sk = tcp_conn->sock; | ||
280 | unsigned int copied = 0; | ||
281 | int r = 0; | ||
282 | |||
283 | while (!iscsi_tcp_segment_done(segment, 0, r)) { | ||
284 | struct scatterlist *sg; | ||
285 | unsigned int offset, copy; | ||
286 | int flags = 0; | ||
287 | |||
288 | r = 0; | ||
289 | offset = segment->copied; | ||
290 | copy = segment->size - offset; | ||
291 | |||
292 | if (segment->total_copied + segment->size < segment->total_size) | ||
293 | flags |= MSG_MORE; | ||
294 | |||
295 | /* Use sendpage if we can; else fall back to sendmsg */ | ||
296 | if (!segment->data) { | ||
297 | sg = segment->sg; | ||
298 | offset += segment->sg_offset + sg->offset; | ||
299 | r = tcp_conn->sendpage(sk, sg_page(sg), offset, copy, | ||
300 | flags); | ||
301 | } else { | ||
302 | struct msghdr msg = { .msg_flags = flags }; | ||
303 | struct kvec iov = { | ||
304 | .iov_base = segment->data + offset, | ||
305 | .iov_len = copy | ||
306 | }; | ||
307 | |||
308 | r = kernel_sendmsg(sk, &msg, &iov, 1, copy); | ||
309 | } | ||
310 | |||
311 | if (r < 0) { | ||
312 | iscsi_tcp_segment_unmap(segment); | ||
313 | if (copied || r == -EAGAIN) | ||
314 | break; | ||
315 | return r; | ||
316 | } | ||
317 | copied += r; | ||
318 | } | ||
319 | return copied; | ||
320 | } | ||
321 | |||
322 | /** | ||
323 | * iscsi_tcp_segment_recv - copy data to segment | ||
324 | * @tcp_conn: the iSCSI TCP connection | ||
325 | * @segment: the buffer to copy to | ||
276 | * @ptr: data pointer | 326 | * @ptr: data pointer |
277 | * @len: amount of data available | 327 | * @len: amount of data available |
278 | * | 328 | * |
@@ -287,29 +337,24 @@ iscsi_tcp_chunk_done(struct iscsi_chunk *chunk) | |||
287 | * just way we do for network layer checksums. | 337 | * just way we do for network layer checksums. |
288 | */ | 338 | */ |
289 | static int | 339 | static int |
290 | iscsi_tcp_chunk_recv(struct iscsi_tcp_conn *tcp_conn, | 340 | iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn, |
291 | struct iscsi_chunk *chunk, const void *ptr, | 341 | struct iscsi_segment *segment, const void *ptr, |
292 | unsigned int len) | 342 | unsigned int len) |
293 | { | 343 | { |
294 | struct scatterlist sg; | 344 | unsigned int copy = 0, copied = 0; |
295 | unsigned int copy, copied = 0; | ||
296 | |||
297 | while (!iscsi_tcp_chunk_done(chunk)) { | ||
298 | if (copied == len) | ||
299 | goto out; | ||
300 | 345 | ||
301 | copy = min(len - copied, chunk->size - chunk->copied); | 346 | while (!iscsi_tcp_segment_done(segment, 1, copy)) { |
302 | memcpy(chunk->data + chunk->copied, ptr + copied, copy); | 347 | if (copied == len) { |
303 | 348 | debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n", | |
304 | if (chunk->hash) { | 349 | len); |
305 | sg_init_one(&sg, ptr + copied, copy); | 350 | break; |
306 | crypto_hash_update(chunk->hash, &sg, copy); | ||
307 | } | 351 | } |
308 | chunk->copied += copy; | 352 | |
353 | copy = min(len - copied, segment->size - segment->copied); | ||
354 | debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy); | ||
355 | memcpy(segment->data + segment->copied, ptr + copied, copy); | ||
309 | copied += copy; | 356 | copied += copy; |
310 | } | 357 | } |
311 | |||
312 | out: | ||
313 | return copied; | 358 | return copied; |
314 | } | 359 | } |
315 | 360 | ||
@@ -325,12 +370,13 @@ iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen, | |||
325 | 370 | ||
326 | static inline int | 371 | static inline int |
327 | iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn, | 372 | iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn, |
328 | struct iscsi_chunk *chunk) | 373 | struct iscsi_segment *segment) |
329 | { | 374 | { |
330 | if (!chunk->digest_len) | 375 | if (!segment->digest_len) |
331 | return 1; | 376 | return 1; |
332 | 377 | ||
333 | if (memcmp(chunk->recv_digest, chunk->digest, chunk->digest_len)) { | 378 | if (memcmp(segment->recv_digest, segment->digest, |
379 | segment->digest_len)) { | ||
334 | debug_scsi("digest mismatch\n"); | 380 | debug_scsi("digest mismatch\n"); |
335 | return 0; | 381 | return 0; |
336 | } | 382 | } |
@@ -339,55 +385,59 @@ iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn, | |||
339 | } | 385 | } |
340 | 386 | ||
341 | /* | 387 | /* |
342 | * Helper function to set up chunk buffer | 388 | * Helper function to set up segment buffer |
343 | */ | 389 | */ |
344 | static inline void | 390 | static inline void |
345 | __iscsi_chunk_init(struct iscsi_chunk *chunk, size_t size, | 391 | __iscsi_segment_init(struct iscsi_segment *segment, size_t size, |
346 | iscsi_chunk_done_fn_t *done, struct hash_desc *hash) | 392 | iscsi_segment_done_fn_t *done, struct hash_desc *hash) |
347 | { | 393 | { |
348 | memset(chunk, 0, sizeof(*chunk)); | 394 | memset(segment, 0, sizeof(*segment)); |
349 | chunk->total_size = size; | 395 | segment->total_size = size; |
350 | chunk->done = done; | 396 | segment->done = done; |
351 | 397 | ||
352 | if (hash) { | 398 | if (hash) { |
353 | chunk->hash = hash; | 399 | segment->hash = hash; |
354 | crypto_hash_init(hash); | 400 | crypto_hash_init(hash); |
355 | } | 401 | } |
356 | } | 402 | } |
357 | 403 | ||
358 | static inline void | 404 | static inline void |
359 | iscsi_chunk_init_linear(struct iscsi_chunk *chunk, void *data, size_t size, | 405 | iscsi_segment_init_linear(struct iscsi_segment *segment, void *data, |
360 | iscsi_chunk_done_fn_t *done, struct hash_desc *hash) | 406 | size_t size, iscsi_segment_done_fn_t *done, |
407 | struct hash_desc *hash) | ||
361 | { | 408 | { |
362 | __iscsi_chunk_init(chunk, size, done, hash); | 409 | __iscsi_segment_init(segment, size, done, hash); |
363 | chunk->data = data; | 410 | segment->data = data; |
364 | chunk->size = size; | 411 | segment->size = size; |
365 | } | 412 | } |
366 | 413 | ||
367 | static inline int | 414 | static inline int |
368 | iscsi_chunk_seek_sg(struct iscsi_chunk *chunk, | 415 | iscsi_segment_seek_sg(struct iscsi_segment *segment, |
369 | struct scatterlist *sg, unsigned int sg_count, | 416 | struct scatterlist *sg_list, unsigned int sg_count, |
370 | unsigned int offset, size_t size, | 417 | unsigned int offset, size_t size, |
371 | iscsi_chunk_done_fn_t *done, struct hash_desc *hash) | 418 | iscsi_segment_done_fn_t *done, struct hash_desc *hash) |
372 | { | 419 | { |
420 | struct scatterlist *sg; | ||
373 | unsigned int i; | 421 | unsigned int i; |
374 | 422 | ||
375 | __iscsi_chunk_init(chunk, size, done, hash); | 423 | debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n", |
376 | for (i = 0; i < sg_count; ++i) { | 424 | offset, size); |
377 | if (offset < sg[i].length) { | 425 | __iscsi_segment_init(segment, size, done, hash); |
378 | chunk->sg = sg; | 426 | for_each_sg(sg_list, sg, sg_count, i) { |
379 | chunk->sg_count = sg_count; | 427 | debug_scsi("sg %d, len %u offset %u\n", i, sg->length, |
380 | iscsi_tcp_chunk_init_sg(chunk, i, offset); | 428 | sg->offset); |
429 | if (offset < sg->length) { | ||
430 | iscsi_tcp_segment_init_sg(segment, sg, offset); | ||
381 | return 0; | 431 | return 0; |
382 | } | 432 | } |
383 | offset -= sg[i].length; | 433 | offset -= sg->length; |
384 | } | 434 | } |
385 | 435 | ||
386 | return ISCSI_ERR_DATA_OFFSET; | 436 | return ISCSI_ERR_DATA_OFFSET; |
387 | } | 437 | } |
388 | 438 | ||
389 | /** | 439 | /** |
390 | * iscsi_tcp_hdr_recv_prep - prep chunk for hdr reception | 440 | * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception |
391 | * @tcp_conn: iscsi connection to prep for | 441 | * @tcp_conn: iscsi connection to prep for |
392 | * | 442 | * |
393 | * This function always passes NULL for the hash argument, because when this | 443 | * This function always passes NULL for the hash argument, because when this |
@@ -399,7 +449,7 @@ iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn) | |||
399 | { | 449 | { |
400 | debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn, | 450 | debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn, |
401 | tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : ""); | 451 | tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : ""); |
402 | iscsi_chunk_init_linear(&tcp_conn->in.chunk, | 452 | iscsi_segment_init_linear(&tcp_conn->in.segment, |
403 | tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr), | 453 | tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr), |
404 | iscsi_tcp_hdr_recv_done, NULL); | 454 | iscsi_tcp_hdr_recv_done, NULL); |
405 | } | 455 | } |
@@ -409,12 +459,12 @@ iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn) | |||
409 | */ | 459 | */ |
410 | static int | 460 | static int |
411 | iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn, | 461 | iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn, |
412 | struct iscsi_chunk *chunk) | 462 | struct iscsi_segment *segment) |
413 | { | 463 | { |
414 | struct iscsi_conn *conn = tcp_conn->iscsi_conn; | 464 | struct iscsi_conn *conn = tcp_conn->iscsi_conn; |
415 | int rc = 0; | 465 | int rc = 0; |
416 | 466 | ||
417 | if (!iscsi_tcp_dgst_verify(tcp_conn, chunk)) | 467 | if (!iscsi_tcp_dgst_verify(tcp_conn, segment)) |
418 | return ISCSI_ERR_DATA_DGST; | 468 | return ISCSI_ERR_DATA_DGST; |
419 | 469 | ||
420 | rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, | 470 | rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, |
@@ -435,7 +485,7 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn) | |||
435 | if (conn->datadgst_en) | 485 | if (conn->datadgst_en) |
436 | rx_hash = &tcp_conn->rx_hash; | 486 | rx_hash = &tcp_conn->rx_hash; |
437 | 487 | ||
438 | iscsi_chunk_init_linear(&tcp_conn->in.chunk, | 488 | iscsi_segment_init_linear(&tcp_conn->in.segment, |
439 | conn->data, tcp_conn->in.datalen, | 489 | conn->data, tcp_conn->in.datalen, |
440 | iscsi_tcp_data_recv_done, rx_hash); | 490 | iscsi_tcp_data_recv_done, rx_hash); |
441 | } | 491 | } |
@@ -448,7 +498,6 @@ iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
448 | { | 498 | { |
449 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 499 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
450 | struct iscsi_r2t_info *r2t; | 500 | struct iscsi_r2t_info *r2t; |
451 | struct scsi_cmnd *sc; | ||
452 | 501 | ||
453 | /* flush ctask's r2t queues */ | 502 | /* flush ctask's r2t queues */ |
454 | while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { | 503 | while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { |
@@ -457,12 +506,12 @@ iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
457 | debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n"); | 506 | debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n"); |
458 | } | 507 | } |
459 | 508 | ||
460 | sc = ctask->sc; | 509 | r2t = tcp_ctask->r2t; |
461 | if (unlikely(!sc)) | 510 | if (r2t != NULL) { |
462 | return; | 511 | __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, |
463 | 512 | sizeof(void*)); | |
464 | tcp_ctask->xmstate = XMSTATE_IDLE; | 513 | tcp_ctask->r2t = NULL; |
465 | tcp_ctask->r2t = NULL; | 514 | } |
466 | } | 515 | } |
467 | 516 | ||
468 | /** | 517 | /** |
@@ -481,11 +530,6 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
481 | int datasn = be32_to_cpu(rhdr->datasn); | 530 | int datasn = be32_to_cpu(rhdr->datasn); |
482 | 531 | ||
483 | iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); | 532 | iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); |
484 | /* | ||
485 | * setup Data-In byte counter (gets decremented..) | ||
486 | */ | ||
487 | ctask->data_count = tcp_conn->in.datalen; | ||
488 | |||
489 | if (tcp_conn->in.datalen == 0) | 533 | if (tcp_conn->in.datalen == 0) |
490 | return 0; | 534 | return 0; |
491 | 535 | ||
@@ -543,9 +587,6 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, | |||
543 | struct iscsi_r2t_info *r2t) | 587 | struct iscsi_r2t_info *r2t) |
544 | { | 588 | { |
545 | struct iscsi_data *hdr; | 589 | struct iscsi_data *hdr; |
546 | struct scsi_cmnd *sc = ctask->sc; | ||
547 | int i, sg_count = 0; | ||
548 | struct scatterlist *sg; | ||
549 | 590 | ||
550 | hdr = &r2t->dtask.hdr; | 591 | hdr = &r2t->dtask.hdr; |
551 | memset(hdr, 0, sizeof(struct iscsi_data)); | 592 | memset(hdr, 0, sizeof(struct iscsi_data)); |
@@ -569,34 +610,6 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, | |||
569 | conn->dataout_pdus_cnt++; | 610 | conn->dataout_pdus_cnt++; |
570 | 611 | ||
571 | r2t->sent = 0; | 612 | r2t->sent = 0; |
572 | |||
573 | iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr, | ||
574 | sizeof(struct iscsi_hdr)); | ||
575 | |||
576 | sg = scsi_sglist(sc); | ||
577 | r2t->sg = NULL; | ||
578 | for (i = 0; i < scsi_sg_count(sc); i++, sg += 1) { | ||
579 | /* FIXME: prefetch ? */ | ||
580 | if (sg_count + sg->length > r2t->data_offset) { | ||
581 | int page_offset; | ||
582 | |||
583 | /* sg page found! */ | ||
584 | |||
585 | /* offset within this page */ | ||
586 | page_offset = r2t->data_offset - sg_count; | ||
587 | |||
588 | /* fill in this buffer */ | ||
589 | iscsi_buf_init_sg(&r2t->sendbuf, sg); | ||
590 | r2t->sendbuf.sg.offset += page_offset; | ||
591 | r2t->sendbuf.sg.length -= page_offset; | ||
592 | |||
593 | /* xmit logic will continue with next one */ | ||
594 | r2t->sg = sg + 1; | ||
595 | break; | ||
596 | } | ||
597 | sg_count += sg->length; | ||
598 | } | ||
599 | BUG_ON(r2t->sg == NULL); | ||
600 | } | 613 | } |
601 | 614 | ||
602 | /** | 615 | /** |
@@ -670,7 +683,6 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
670 | 683 | ||
671 | tcp_ctask->exp_datasn = r2tsn + 1; | 684 | tcp_ctask->exp_datasn = r2tsn + 1; |
672 | __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); | 685 | __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); |
673 | tcp_ctask->xmstate |= XMSTATE_SOL_HDR_INIT; | ||
674 | conn->r2t_pdus_cnt++; | 686 | conn->r2t_pdus_cnt++; |
675 | 687 | ||
676 | iscsi_requeue_ctask(ctask); | 688 | iscsi_requeue_ctask(ctask); |
@@ -684,13 +696,13 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
684 | */ | 696 | */ |
685 | static int | 697 | static int |
686 | iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn, | 698 | iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn, |
687 | struct iscsi_chunk *chunk) | 699 | struct iscsi_segment *segment) |
688 | { | 700 | { |
689 | struct iscsi_conn *conn = tcp_conn->iscsi_conn; | 701 | struct iscsi_conn *conn = tcp_conn->iscsi_conn; |
690 | struct iscsi_hdr *hdr = tcp_conn->in.hdr; | 702 | struct iscsi_hdr *hdr = tcp_conn->in.hdr; |
691 | int rc; | 703 | int rc; |
692 | 704 | ||
693 | if (!iscsi_tcp_dgst_verify(tcp_conn, chunk)) | 705 | if (!iscsi_tcp_dgst_verify(tcp_conn, segment)) |
694 | return ISCSI_ERR_DATA_DGST; | 706 | return ISCSI_ERR_DATA_DGST; |
695 | 707 | ||
696 | /* check for non-exceptional status */ | 708 | /* check for non-exceptional status */ |
@@ -762,7 +774,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) | |||
762 | /* | 774 | /* |
763 | * Setup copy of Data-In into the Scsi_Cmnd | 775 | * Setup copy of Data-In into the Scsi_Cmnd |
764 | * Scatterlist case: | 776 | * Scatterlist case: |
765 | * We set up the iscsi_chunk to point to the next | 777 | * We set up the iscsi_segment to point to the next |
766 | * scatterlist entry to copy to. As we go along, | 778 | * scatterlist entry to copy to. As we go along, |
767 | * we move on to the next scatterlist entry and | 779 | * we move on to the next scatterlist entry and |
768 | * update the digest per-entry. | 780 | * update the digest per-entry. |
@@ -774,13 +786,13 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) | |||
774 | "datalen=%d)\n", tcp_conn, | 786 | "datalen=%d)\n", tcp_conn, |
775 | tcp_ctask->data_offset, | 787 | tcp_ctask->data_offset, |
776 | tcp_conn->in.datalen); | 788 | tcp_conn->in.datalen); |
777 | return iscsi_chunk_seek_sg(&tcp_conn->in.chunk, | 789 | return iscsi_segment_seek_sg(&tcp_conn->in.segment, |
778 | scsi_sglist(ctask->sc), | 790 | scsi_sglist(ctask->sc), |
779 | scsi_sg_count(ctask->sc), | 791 | scsi_sg_count(ctask->sc), |
780 | tcp_ctask->data_offset, | 792 | tcp_ctask->data_offset, |
781 | tcp_conn->in.datalen, | 793 | tcp_conn->in.datalen, |
782 | iscsi_tcp_process_data_in, | 794 | iscsi_tcp_process_data_in, |
783 | rx_hash); | 795 | rx_hash); |
784 | } | 796 | } |
785 | /* fall through */ | 797 | /* fall through */ |
786 | case ISCSI_OP_SCSI_CMD_RSP: | 798 | case ISCSI_OP_SCSI_CMD_RSP: |
@@ -846,17 +858,6 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) | |||
846 | return rc; | 858 | return rc; |
847 | } | 859 | } |
848 | 860 | ||
849 | static inline void | ||
850 | partial_sg_digest_update(struct hash_desc *desc, struct scatterlist *sg, | ||
851 | int offset, int length) | ||
852 | { | ||
853 | struct scatterlist temp; | ||
854 | |||
855 | sg_init_table(&temp, 1); | ||
856 | sg_set_page(&temp, sg_page(sg), length, offset); | ||
857 | crypto_hash_update(desc, &temp, length); | ||
858 | } | ||
859 | |||
860 | /** | 861 | /** |
861 | * iscsi_tcp_hdr_recv_done - process PDU header | 862 | * iscsi_tcp_hdr_recv_done - process PDU header |
862 | * | 863 | * |
@@ -866,7 +867,7 @@ partial_sg_digest_update(struct hash_desc *desc, struct scatterlist *sg, | |||
866 | */ | 867 | */ |
867 | static int | 868 | static int |
868 | iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn, | 869 | iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn, |
869 | struct iscsi_chunk *chunk) | 870 | struct iscsi_segment *segment) |
870 | { | 871 | { |
871 | struct iscsi_conn *conn = tcp_conn->iscsi_conn; | 872 | struct iscsi_conn *conn = tcp_conn->iscsi_conn; |
872 | struct iscsi_hdr *hdr; | 873 | struct iscsi_hdr *hdr; |
@@ -876,7 +877,7 @@ iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn, | |||
876 | * may need to go back to the caller for more. | 877 | * may need to go back to the caller for more. |
877 | */ | 878 | */ |
878 | hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf; | 879 | hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf; |
879 | if (chunk->copied == sizeof(struct iscsi_hdr) && hdr->hlength) { | 880 | if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) { |
880 | /* Bump the header length - the caller will | 881 | /* Bump the header length - the caller will |
881 | * just loop around and get the AHS for us, and | 882 | * just loop around and get the AHS for us, and |
882 | * call again. */ | 883 | * call again. */ |
@@ -886,8 +887,8 @@ iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn, | |||
886 | if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf)) | 887 | if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf)) |
887 | return ISCSI_ERR_AHSLEN; | 888 | return ISCSI_ERR_AHSLEN; |
888 | 889 | ||
889 | chunk->total_size += ahslen; | 890 | segment->total_size += ahslen; |
890 | chunk->size += ahslen; | 891 | segment->size += ahslen; |
891 | return 0; | 892 | return 0; |
892 | } | 893 | } |
893 | 894 | ||
@@ -895,16 +896,16 @@ iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn, | |||
895 | * header digests; if so, set up the recv_digest buffer | 896 | * header digests; if so, set up the recv_digest buffer |
896 | * and go back for more. */ | 897 | * and go back for more. */ |
897 | if (conn->hdrdgst_en) { | 898 | if (conn->hdrdgst_en) { |
898 | if (chunk->digest_len == 0) { | 899 | if (segment->digest_len == 0) { |
899 | iscsi_tcp_chunk_splice_digest(chunk, | 900 | iscsi_tcp_segment_splice_digest(segment, |
900 | chunk->recv_digest); | 901 | segment->recv_digest); |
901 | return 0; | 902 | return 0; |
902 | } | 903 | } |
903 | iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr, | 904 | iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr, |
904 | chunk->total_copied - ISCSI_DIGEST_SIZE, | 905 | segment->total_copied - ISCSI_DIGEST_SIZE, |
905 | chunk->digest); | 906 | segment->digest); |
906 | 907 | ||
907 | if (!iscsi_tcp_dgst_verify(tcp_conn, chunk)) | 908 | if (!iscsi_tcp_dgst_verify(tcp_conn, segment)) |
908 | return ISCSI_ERR_HDR_DGST; | 909 | return ISCSI_ERR_HDR_DGST; |
909 | } | 910 | } |
910 | 911 | ||
@@ -925,7 +926,7 @@ iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, | |||
925 | { | 926 | { |
926 | struct iscsi_conn *conn = rd_desc->arg.data; | 927 | struct iscsi_conn *conn = rd_desc->arg.data; |
927 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 928 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
928 | struct iscsi_chunk *chunk = &tcp_conn->in.chunk; | 929 | struct iscsi_segment *segment = &tcp_conn->in.segment; |
929 | struct skb_seq_state seq; | 930 | struct skb_seq_state seq; |
930 | unsigned int consumed = 0; | 931 | unsigned int consumed = 0; |
931 | int rc = 0; | 932 | int rc = 0; |
@@ -943,27 +944,31 @@ iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, | |||
943 | const u8 *ptr; | 944 | const u8 *ptr; |
944 | 945 | ||
945 | avail = skb_seq_read(consumed, &ptr, &seq); | 946 | avail = skb_seq_read(consumed, &ptr, &seq); |
946 | if (avail == 0) | 947 | if (avail == 0) { |
948 | debug_tcp("no more data avail. Consumed %d\n", | ||
949 | consumed); | ||
947 | break; | 950 | break; |
948 | BUG_ON(chunk->copied >= chunk->size); | 951 | } |
952 | BUG_ON(segment->copied >= segment->size); | ||
949 | 953 | ||
950 | debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail); | 954 | debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail); |
951 | rc = iscsi_tcp_chunk_recv(tcp_conn, chunk, ptr, avail); | 955 | rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail); |
952 | BUG_ON(rc == 0); | 956 | BUG_ON(rc == 0); |
953 | consumed += rc; | 957 | consumed += rc; |
954 | 958 | ||
955 | if (chunk->total_copied >= chunk->total_size) { | 959 | if (segment->total_copied >= segment->total_size) { |
956 | rc = chunk->done(tcp_conn, chunk); | 960 | debug_tcp("segment done\n"); |
961 | rc = segment->done(tcp_conn, segment); | ||
957 | if (rc != 0) { | 962 | if (rc != 0) { |
958 | skb_abort_seq_read(&seq); | 963 | skb_abort_seq_read(&seq); |
959 | goto error; | 964 | goto error; |
960 | } | 965 | } |
961 | 966 | ||
962 | /* The done() functions sets up the | 967 | /* The done() functions sets up the |
963 | * next chunk. */ | 968 | * next segment. */ |
964 | } | 969 | } |
965 | } | 970 | } |
966 | 971 | skb_abort_seq_read(&seq); | |
967 | conn->rxdata_octets += consumed; | 972 | conn->rxdata_octets += consumed; |
968 | return consumed; | 973 | return consumed; |
969 | 974 | ||
@@ -996,7 +1001,7 @@ iscsi_tcp_data_ready(struct sock *sk, int flag) | |||
996 | 1001 | ||
997 | /* If we had to (atomically) map a highmem page, | 1002 | /* If we had to (atomically) map a highmem page, |
998 | * unmap it now. */ | 1003 | * unmap it now. */ |
999 | iscsi_tcp_chunk_unmap(&tcp_conn->in.chunk); | 1004 | iscsi_tcp_segment_unmap(&tcp_conn->in.segment); |
1000 | } | 1005 | } |
1001 | 1006 | ||
1002 | static void | 1007 | static void |
@@ -1076,121 +1081,173 @@ iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn) | |||
1076 | } | 1081 | } |
1077 | 1082 | ||
1078 | /** | 1083 | /** |
1079 | * iscsi_send - generic send routine | 1084 | * iscsi_xmit - TCP transmit |
1080 | * @sk: kernel's socket | 1085 | **/ |
1081 | * @buf: buffer to write from | 1086 | static int |
1082 | * @size: actual size to write | 1087 | iscsi_xmit(struct iscsi_conn *conn) |
1083 | * @flags: socket's flags | ||
1084 | */ | ||
1085 | static inline int | ||
1086 | iscsi_send(struct iscsi_conn *conn, struct iscsi_buf *buf, int size, int flags) | ||
1087 | { | 1088 | { |
1088 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 1089 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
1089 | struct socket *sk = tcp_conn->sock; | 1090 | struct iscsi_segment *segment = &tcp_conn->out.segment; |
1090 | int offset = buf->sg.offset + buf->sent, res; | 1091 | unsigned int consumed = 0; |
1092 | int rc = 0; | ||
1091 | 1093 | ||
1092 | /* | 1094 | while (1) { |
1093 | * if we got use_sg=0 or are sending something we kmallocd | 1095 | rc = iscsi_tcp_xmit_segment(tcp_conn, segment); |
1094 | * then we did not have to do kmap (kmap returns page_address) | 1096 | if (rc < 0) |
1095 | * | 1097 | goto error; |
1096 | * if we got use_sg > 0, but had to drop down, we do not | 1098 | if (rc == 0) |
1097 | * set clustering so this should only happen for that | 1099 | break; |
1098 | * slab case. | 1100 | |
1099 | */ | 1101 | consumed += rc; |
1100 | if (buf->use_sendmsg) | 1102 | |
1101 | res = sock_no_sendpage(sk, buf->sg.page, offset, size, flags); | 1103 | if (segment->total_copied >= segment->total_size) { |
1102 | else | 1104 | if (segment->done != NULL) { |
1103 | res = tcp_conn->sendpage(sk, buf->sg.page, offset, size, flags); | 1105 | rc = segment->done(tcp_conn, segment); |
1104 | 1106 | if (rc < 0) | |
1105 | if (res >= 0) { | 1107 | goto error; |
1106 | conn->txdata_octets += res; | 1108 | } |
1107 | buf->sent += res; | 1109 | } |
1108 | return res; | ||
1109 | } | 1110 | } |
1110 | 1111 | ||
1111 | tcp_conn->sendpage_failures_cnt++; | 1112 | debug_tcp("xmit %d bytes\n", consumed); |
1112 | if (res == -EAGAIN) | 1113 | |
1113 | res = -ENOBUFS; | 1114 | conn->txdata_octets += consumed; |
1114 | else | 1115 | return consumed; |
1115 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 1116 | |
1116 | return res; | 1117 | error: |
1118 | /* Transmit error. We could initiate error recovery | ||
1119 | * here. */ | ||
1120 | debug_tcp("Error sending PDU, errno=%d\n", rc); | ||
1121 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | ||
1122 | return rc; | ||
1117 | } | 1123 | } |
1118 | 1124 | ||
1119 | /** | 1125 | /** |
1120 | * iscsi_sendhdr - send PDU Header via tcp_sendpage() | 1126 | * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit |
1121 | * @conn: iscsi connection | 1127 | */ |
1122 | * @buf: buffer to write from | ||
1123 | * @datalen: lenght of data to be sent after the header | ||
1124 | * | ||
1125 | * Notes: | ||
1126 | * (Tx, Fast Path) | ||
1127 | **/ | ||
1128 | static inline int | 1128 | static inline int |
1129 | iscsi_sendhdr(struct iscsi_conn *conn, struct iscsi_buf *buf, int datalen) | 1129 | iscsi_tcp_xmit_qlen(struct iscsi_conn *conn) |
1130 | { | 1130 | { |
1131 | int flags = 0; /* MSG_DONTWAIT; */ | 1131 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
1132 | int res, size; | 1132 | struct iscsi_segment *segment = &tcp_conn->out.segment; |
1133 | |||
1134 | size = buf->sg.length - buf->sent; | ||
1135 | BUG_ON(buf->sent + size > buf->sg.length); | ||
1136 | if (buf->sent + size != buf->sg.length || datalen) | ||
1137 | flags |= MSG_MORE; | ||
1138 | |||
1139 | res = iscsi_send(conn, buf, size, flags); | ||
1140 | debug_tcp("sendhdr %d bytes, sent %d res %d\n", size, buf->sent, res); | ||
1141 | if (res >= 0) { | ||
1142 | if (size != res) | ||
1143 | return -EAGAIN; | ||
1144 | return 0; | ||
1145 | } | ||
1146 | 1133 | ||
1147 | return res; | 1134 | return segment->total_copied - segment->total_size; |
1148 | } | 1135 | } |
1149 | 1136 | ||
1150 | /** | ||
1151 | * iscsi_sendpage - send one page of iSCSI Data-Out. | ||
1152 | * @conn: iscsi connection | ||
1153 | * @buf: buffer to write from | ||
1154 | * @count: remaining data | ||
1155 | * @sent: number of bytes sent | ||
1156 | * | ||
1157 | * Notes: | ||
1158 | * (Tx, Fast Path) | ||
1159 | **/ | ||
1160 | static inline int | 1137 | static inline int |
1161 | iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf, | 1138 | iscsi_tcp_flush(struct iscsi_conn *conn) |
1162 | int *count, int *sent) | ||
1163 | { | 1139 | { |
1164 | int flags = 0; /* MSG_DONTWAIT; */ | 1140 | int rc; |
1165 | int res, size; | 1141 | |
1166 | 1142 | while (iscsi_tcp_xmit_qlen(conn)) { | |
1167 | size = buf->sg.length - buf->sent; | 1143 | rc = iscsi_xmit(conn); |
1168 | BUG_ON(buf->sent + size > buf->sg.length); | 1144 | if (rc == 0) |
1169 | if (size > *count) | ||
1170 | size = *count; | ||
1171 | if (buf->sent + size != buf->sg.length || *count != size) | ||
1172 | flags |= MSG_MORE; | ||
1173 | |||
1174 | res = iscsi_send(conn, buf, size, flags); | ||
1175 | debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n", | ||
1176 | size, buf->sent, *count, *sent, res); | ||
1177 | if (res >= 0) { | ||
1178 | *count -= res; | ||
1179 | *sent += res; | ||
1180 | if (size != res) | ||
1181 | return -EAGAIN; | 1145 | return -EAGAIN; |
1182 | return 0; | 1146 | if (rc < 0) |
1147 | return rc; | ||
1183 | } | 1148 | } |
1184 | 1149 | ||
1185 | return res; | 1150 | return 0; |
1186 | } | 1151 | } |
1187 | 1152 | ||
1188 | static inline void | 1153 | /* |
1189 | iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn, | 1154 | * This is called when we're done sending the header. |
1190 | struct iscsi_tcp_cmd_task *tcp_ctask) | 1155 | * Simply copy the data_segment to the send segment, and return. |
1156 | */ | ||
1157 | static int | ||
1158 | iscsi_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn, | ||
1159 | struct iscsi_segment *segment) | ||
1191 | { | 1160 | { |
1192 | crypto_hash_init(&tcp_conn->tx_hash); | 1161 | tcp_conn->out.segment = tcp_conn->out.data_segment; |
1193 | tcp_ctask->digest_count = 4; | 1162 | debug_tcp("Header done. Next segment size %u total_size %u\n", |
1163 | tcp_conn->out.segment.size, tcp_conn->out.segment.total_size); | ||
1164 | return 0; | ||
1165 | } | ||
1166 | |||
1167 | static void | ||
1168 | iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen) | ||
1169 | { | ||
1170 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
1171 | |||
1172 | debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn, | ||
1173 | conn->hdrdgst_en? ", digest enabled" : ""); | ||
1174 | |||
1175 | /* Clear the data segment - needs to be filled in by the | ||
1176 | * caller using iscsi_tcp_send_data_prep() */ | ||
1177 | memset(&tcp_conn->out.data_segment, 0, sizeof(struct iscsi_segment)); | ||
1178 | |||
1179 | /* If header digest is enabled, compute the CRC and | ||
1180 | * place the digest into the same buffer. We make | ||
1181 | * sure that both iscsi_tcp_ctask and mtask have | ||
1182 | * sufficient room. | ||
1183 | */ | ||
1184 | if (conn->hdrdgst_en) { | ||
1185 | iscsi_tcp_dgst_header(&tcp_conn->tx_hash, hdr, hdrlen, | ||
1186 | hdr + hdrlen); | ||
1187 | hdrlen += ISCSI_DIGEST_SIZE; | ||
1188 | } | ||
1189 | |||
1190 | /* Remember header pointer for later, when we need | ||
1191 | * to decide whether there's a payload to go along | ||
1192 | * with the header. */ | ||
1193 | tcp_conn->out.hdr = hdr; | ||
1194 | |||
1195 | iscsi_segment_init_linear(&tcp_conn->out.segment, hdr, hdrlen, | ||
1196 | iscsi_tcp_send_hdr_done, NULL); | ||
1197 | } | ||
1198 | |||
1199 | /* | ||
1200 | * Prepare the send buffer for the payload data. | ||
1201 | * Padding and checksumming will all be taken care | ||
1202 | * of by the iscsi_segment routines. | ||
1203 | */ | ||
1204 | static int | ||
1205 | iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg, | ||
1206 | unsigned int count, unsigned int offset, | ||
1207 | unsigned int len) | ||
1208 | { | ||
1209 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
1210 | struct hash_desc *tx_hash = NULL; | ||
1211 | unsigned int hdr_spec_len; | ||
1212 | |||
1213 | debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__, | ||
1214 | tcp_conn, offset, len, | ||
1215 | conn->datadgst_en? ", digest enabled" : ""); | ||
1216 | |||
1217 | /* Make sure the datalen matches what the caller | ||
1218 | said he would send. */ | ||
1219 | hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength); | ||
1220 | WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len)); | ||
1221 | |||
1222 | if (conn->datadgst_en) | ||
1223 | tx_hash = &tcp_conn->tx_hash; | ||
1224 | |||
1225 | return iscsi_segment_seek_sg(&tcp_conn->out.data_segment, | ||
1226 | sg, count, offset, len, | ||
1227 | NULL, tx_hash); | ||
1228 | } | ||
1229 | |||
1230 | static void | ||
1231 | iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data, | ||
1232 | size_t len) | ||
1233 | { | ||
1234 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
1235 | struct hash_desc *tx_hash = NULL; | ||
1236 | unsigned int hdr_spec_len; | ||
1237 | |||
1238 | debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len, | ||
1239 | conn->datadgst_en? ", digest enabled" : ""); | ||
1240 | |||
1241 | /* Make sure the datalen matches what the caller | ||
1242 | said he would send. */ | ||
1243 | hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength); | ||
1244 | WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len)); | ||
1245 | |||
1246 | if (conn->datadgst_en) | ||
1247 | tx_hash = &tcp_conn->tx_hash; | ||
1248 | |||
1249 | iscsi_segment_init_linear(&tcp_conn->out.data_segment, | ||
1250 | data, len, NULL, tx_hash); | ||
1194 | } | 1251 | } |
1195 | 1252 | ||
1196 | /** | 1253 | /** |
@@ -1206,12 +1263,17 @@ iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn, | |||
1206 | * | 1263 | * |
1207 | * Called under connection lock. | 1264 | * Called under connection lock. |
1208 | **/ | 1265 | **/ |
1209 | static void | 1266 | static int |
1210 | iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, | 1267 | iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, |
1211 | struct iscsi_r2t_info *r2t, int left) | 1268 | struct iscsi_r2t_info *r2t) |
1212 | { | 1269 | { |
1213 | struct iscsi_data *hdr; | 1270 | struct iscsi_data *hdr; |
1214 | int new_offset; | 1271 | int new_offset, left; |
1272 | |||
1273 | BUG_ON(r2t->data_length - r2t->sent < 0); | ||
1274 | left = r2t->data_length - r2t->sent; | ||
1275 | if (left == 0) | ||
1276 | return 0; | ||
1215 | 1277 | ||
1216 | hdr = &r2t->dtask.hdr; | 1278 | hdr = &r2t->dtask.hdr; |
1217 | memset(hdr, 0, sizeof(struct iscsi_data)); | 1279 | memset(hdr, 0, sizeof(struct iscsi_data)); |
@@ -1232,43 +1294,46 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, | |||
1232 | r2t->data_count = left; | 1294 | r2t->data_count = left; |
1233 | hdr->flags = ISCSI_FLAG_CMD_FINAL; | 1295 | hdr->flags = ISCSI_FLAG_CMD_FINAL; |
1234 | } | 1296 | } |
1235 | conn->dataout_pdus_cnt++; | ||
1236 | |||
1237 | iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr, | ||
1238 | sizeof(struct iscsi_hdr)); | ||
1239 | |||
1240 | if (iscsi_buf_left(&r2t->sendbuf)) | ||
1241 | return; | ||
1242 | |||
1243 | iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg); | ||
1244 | r2t->sg += 1; | ||
1245 | } | ||
1246 | |||
1247 | static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask, | ||
1248 | unsigned long len) | ||
1249 | { | ||
1250 | tcp_ctask->pad_count = len & (ISCSI_PAD_LEN - 1); | ||
1251 | if (!tcp_ctask->pad_count) | ||
1252 | return; | ||
1253 | 1297 | ||
1254 | tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count; | 1298 | conn->dataout_pdus_cnt++; |
1255 | debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count); | 1299 | return 1; |
1256 | tcp_ctask->xmstate |= XMSTATE_W_PAD; | ||
1257 | } | 1300 | } |
1258 | 1301 | ||
1259 | /** | 1302 | /** |
1260 | * iscsi_tcp_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands | 1303 | * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands |
1261 | * @conn: iscsi connection | 1304 | * @conn: iscsi connection |
1262 | * @ctask: scsi command task | 1305 | * @ctask: scsi command task |
1263 | * @sc: scsi command | 1306 | * @sc: scsi command |
1264 | **/ | 1307 | **/ |
1265 | static void | 1308 | static int |
1266 | iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) | 1309 | iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask) |
1267 | { | 1310 | { |
1268 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 1311 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
1312 | struct iscsi_conn *conn = ctask->conn; | ||
1313 | struct scsi_cmnd *sc = ctask->sc; | ||
1314 | int err; | ||
1269 | 1315 | ||
1270 | BUG_ON(__kfifo_len(tcp_ctask->r2tqueue)); | 1316 | BUG_ON(__kfifo_len(tcp_ctask->r2tqueue)); |
1271 | tcp_ctask->xmstate = XMSTATE_CMD_HDR_INIT; | 1317 | tcp_ctask->sent = 0; |
1318 | tcp_ctask->exp_datasn = 0; | ||
1319 | |||
1320 | /* Prepare PDU, optionally w/ immediate data */ | ||
1321 | debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n", | ||
1322 | conn->id, ctask->itt, ctask->imm_count, | ||
1323 | ctask->unsol_count); | ||
1324 | iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len); | ||
1325 | |||
1326 | if (!ctask->imm_count) | ||
1327 | return 0; | ||
1328 | |||
1329 | /* If we have immediate data, attach a payload */ | ||
1330 | err = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), scsi_sg_count(sc), | ||
1331 | 0, ctask->imm_count); | ||
1332 | if (err) | ||
1333 | return err; | ||
1334 | tcp_ctask->sent += ctask->imm_count; | ||
1335 | ctask->imm_count = 0; | ||
1336 | return 0; | ||
1272 | } | 1337 | } |
1273 | 1338 | ||
1274 | /** | 1339 | /** |
@@ -1280,71 +1345,17 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) | |||
1280 | * The function can return -EAGAIN in which case caller must | 1345 | * The function can return -EAGAIN in which case caller must |
1281 | * call it again later, or recover. '0' return code means successful | 1346 | * call it again later, or recover. '0' return code means successful |
1282 | * xmit. | 1347 | * xmit. |
1283 | * | ||
1284 | * Management xmit state machine consists of these states: | ||
1285 | * XMSTATE_IMM_HDR_INIT - calculate digest of PDU Header | ||
1286 | * XMSTATE_IMM_HDR - PDU Header xmit in progress | ||
1287 | * XMSTATE_IMM_DATA - PDU Data xmit in progress | ||
1288 | * XMSTATE_IDLE - management PDU is done | ||
1289 | **/ | 1348 | **/ |
1290 | static int | 1349 | static int |
1291 | iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) | 1350 | iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) |
1292 | { | 1351 | { |
1293 | struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; | ||
1294 | int rc; | 1352 | int rc; |
1295 | 1353 | ||
1296 | debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n", | 1354 | /* Flush any pending data first. */ |
1297 | conn->id, tcp_mtask->xmstate, mtask->itt); | 1355 | rc = iscsi_tcp_flush(conn); |
1298 | 1356 | if (rc < 0) | |
1299 | if (tcp_mtask->xmstate & XMSTATE_IMM_HDR_INIT) { | 1357 | return rc; |
1300 | iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, | ||
1301 | sizeof(struct iscsi_hdr)); | ||
1302 | |||
1303 | if (mtask->data_count) { | ||
1304 | tcp_mtask->xmstate |= XMSTATE_IMM_DATA; | ||
1305 | iscsi_buf_init_iov(&tcp_mtask->sendbuf, | ||
1306 | (char*)mtask->data, | ||
1307 | mtask->data_count); | ||
1308 | } | ||
1309 | |||
1310 | if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE && | ||
1311 | conn->stop_stage != STOP_CONN_RECOVER && | ||
1312 | conn->hdrdgst_en) | ||
1313 | iscsi_hdr_digest(conn, &tcp_mtask->headbuf, | ||
1314 | (u8*)tcp_mtask->hdrext); | ||
1315 | |||
1316 | tcp_mtask->sent = 0; | ||
1317 | tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR_INIT; | ||
1318 | tcp_mtask->xmstate |= XMSTATE_IMM_HDR; | ||
1319 | } | ||
1320 | |||
1321 | if (tcp_mtask->xmstate & XMSTATE_IMM_HDR) { | ||
1322 | rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf, | ||
1323 | mtask->data_count); | ||
1324 | if (rc) | ||
1325 | return rc; | ||
1326 | tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR; | ||
1327 | } | ||
1328 | |||
1329 | if (tcp_mtask->xmstate & XMSTATE_IMM_DATA) { | ||
1330 | BUG_ON(!mtask->data_count); | ||
1331 | tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA; | ||
1332 | /* FIXME: implement. | ||
1333 | * Virtual buffer could be spreaded across multiple pages... | ||
1334 | */ | ||
1335 | do { | ||
1336 | int rc; | ||
1337 | |||
1338 | rc = iscsi_sendpage(conn, &tcp_mtask->sendbuf, | ||
1339 | &mtask->data_count, &tcp_mtask->sent); | ||
1340 | if (rc) { | ||
1341 | tcp_mtask->xmstate |= XMSTATE_IMM_DATA; | ||
1342 | return rc; | ||
1343 | } | ||
1344 | } while (mtask->data_count); | ||
1345 | } | ||
1346 | 1358 | ||
1347 | BUG_ON(tcp_mtask->xmstate != XMSTATE_IDLE); | ||
1348 | if (mtask->hdr->itt == RESERVED_ITT) { | 1359 | if (mtask->hdr->itt == RESERVED_ITT) { |
1349 | struct iscsi_session *session = conn->session; | 1360 | struct iscsi_session *session = conn->session; |
1350 | 1361 | ||
@@ -1352,411 +1363,112 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) | |||
1352 | iscsi_free_mgmt_task(conn, mtask); | 1363 | iscsi_free_mgmt_task(conn, mtask); |
1353 | spin_unlock_bh(&session->lock); | 1364 | spin_unlock_bh(&session->lock); |
1354 | } | 1365 | } |
1366 | |||
1355 | return 0; | 1367 | return 0; |
1356 | } | 1368 | } |
1357 | 1369 | ||
1370 | /* | ||
1371 | * iscsi_tcp_ctask_xmit - xmit normal PDU task | ||
1372 | * @conn: iscsi connection | ||
1373 | * @ctask: iscsi command task | ||
1374 | * | ||
1375 | * We're expected to return 0 when everything was transmitted succesfully, | ||
1376 | * -EAGAIN if there's still data in the queue, or != 0 for any other kind | ||
1377 | * of error. | ||
1378 | */ | ||
1358 | static int | 1379 | static int |
1359 | iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | 1380 | iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) |
1360 | { | 1381 | { |
1361 | struct scsi_cmnd *sc = ctask->sc; | ||
1362 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 1382 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
1383 | struct scsi_cmnd *sc = ctask->sc; | ||
1363 | int rc = 0; | 1384 | int rc = 0; |
1364 | 1385 | ||
1365 | if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_INIT) { | 1386 | flush: |
1366 | tcp_ctask->sent = 0; | 1387 | /* Flush any pending data first. */ |
1367 | tcp_ctask->sg_count = 0; | 1388 | rc = iscsi_tcp_flush(conn); |
1368 | tcp_ctask->exp_datasn = 0; | 1389 | if (rc < 0) |
1369 | |||
1370 | if (sc->sc_data_direction == DMA_TO_DEVICE) { | ||
1371 | struct scatterlist *sg = scsi_sglist(sc); | ||
1372 | |||
1373 | iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg); | ||
1374 | tcp_ctask->sg = sg + 1; | ||
1375 | tcp_ctask->bad_sg = sg + scsi_sg_count(sc); | ||
1376 | |||
1377 | debug_scsi("cmd [itt 0x%x total %d imm_data %d " | ||
1378 | "unsol count %d, unsol offset %d]\n", | ||
1379 | ctask->itt, scsi_bufflen(sc), | ||
1380 | ctask->imm_count, ctask->unsol_count, | ||
1381 | ctask->unsol_offset); | ||
1382 | } | ||
1383 | |||
1384 | iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)ctask->hdr, | ||
1385 | ctask->hdr_len); | ||
1386 | |||
1387 | if (conn->hdrdgst_en) | ||
1388 | iscsi_hdr_digest(conn, &tcp_ctask->headbuf, | ||
1389 | iscsi_next_hdr(ctask)); | ||
1390 | tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_INIT; | ||
1391 | tcp_ctask->xmstate |= XMSTATE_CMD_HDR_XMIT; | ||
1392 | } | ||
1393 | |||
1394 | if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_XMIT) { | ||
1395 | rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count); | ||
1396 | if (rc) | ||
1397 | return rc; | ||
1398 | tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_XMIT; | ||
1399 | |||
1400 | if (sc->sc_data_direction != DMA_TO_DEVICE) | ||
1401 | return 0; | ||
1402 | |||
1403 | if (ctask->imm_count) { | ||
1404 | tcp_ctask->xmstate |= XMSTATE_IMM_DATA; | ||
1405 | iscsi_set_padding(tcp_ctask, ctask->imm_count); | ||
1406 | |||
1407 | if (ctask->conn->datadgst_en) { | ||
1408 | iscsi_data_digest_init(ctask->conn->dd_data, | ||
1409 | tcp_ctask); | ||
1410 | tcp_ctask->immdigest = 0; | ||
1411 | } | ||
1412 | } | ||
1413 | |||
1414 | if (ctask->unsol_count) | ||
1415 | tcp_ctask->xmstate |= | ||
1416 | XMSTATE_UNS_HDR | XMSTATE_UNS_INIT; | ||
1417 | } | ||
1418 | return rc; | ||
1419 | } | ||
1420 | |||
1421 | static int | ||
1422 | iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | ||
1423 | { | ||
1424 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | ||
1425 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
1426 | int sent = 0, rc; | ||
1427 | |||
1428 | if (tcp_ctask->xmstate & XMSTATE_W_PAD) { | ||
1429 | iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, | ||
1430 | tcp_ctask->pad_count); | ||
1431 | if (conn->datadgst_en) | ||
1432 | crypto_hash_update(&tcp_conn->tx_hash, | ||
1433 | &tcp_ctask->sendbuf.sg, | ||
1434 | tcp_ctask->sendbuf.sg.length); | ||
1435 | } else if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_PAD)) | ||
1436 | return 0; | ||
1437 | |||
1438 | tcp_ctask->xmstate &= ~XMSTATE_W_PAD; | ||
1439 | tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_PAD; | ||
1440 | debug_scsi("sending %d pad bytes for itt 0x%x\n", | ||
1441 | tcp_ctask->pad_count, ctask->itt); | ||
1442 | rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count, | ||
1443 | &sent); | ||
1444 | if (rc) { | ||
1445 | debug_scsi("padding send failed %d\n", rc); | ||
1446 | tcp_ctask->xmstate |= XMSTATE_W_RESEND_PAD; | ||
1447 | } | ||
1448 | return rc; | ||
1449 | } | ||
1450 | |||
1451 | static int | ||
1452 | iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, | ||
1453 | struct iscsi_buf *buf, uint32_t *digest) | ||
1454 | { | ||
1455 | struct iscsi_tcp_cmd_task *tcp_ctask; | ||
1456 | struct iscsi_tcp_conn *tcp_conn; | ||
1457 | int rc, sent = 0; | ||
1458 | |||
1459 | if (!conn->datadgst_en) | ||
1460 | return 0; | ||
1461 | |||
1462 | tcp_ctask = ctask->dd_data; | ||
1463 | tcp_conn = conn->dd_data; | ||
1464 | |||
1465 | if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_DATA_DIGEST)) { | ||
1466 | crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest); | ||
1467 | iscsi_buf_init_iov(buf, (char*)digest, 4); | ||
1468 | } | ||
1469 | tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_DATA_DIGEST; | ||
1470 | |||
1471 | rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent); | ||
1472 | if (!rc) | ||
1473 | debug_scsi("sent digest 0x%x for itt 0x%x\n", *digest, | ||
1474 | ctask->itt); | ||
1475 | else { | ||
1476 | debug_scsi("sending digest 0x%x failed for itt 0x%x!\n", | ||
1477 | *digest, ctask->itt); | ||
1478 | tcp_ctask->xmstate |= XMSTATE_W_RESEND_DATA_DIGEST; | ||
1479 | } | ||
1480 | return rc; | ||
1481 | } | ||
1482 | |||
1483 | static int | ||
1484 | iscsi_send_data(struct iscsi_cmd_task *ctask, struct iscsi_buf *sendbuf, | ||
1485 | struct scatterlist **sg, int *sent, int *count, | ||
1486 | struct iscsi_buf *digestbuf, uint32_t *digest) | ||
1487 | { | ||
1488 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | ||
1489 | struct iscsi_conn *conn = ctask->conn; | ||
1490 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
1491 | int rc, buf_sent, offset; | ||
1492 | |||
1493 | while (*count) { | ||
1494 | buf_sent = 0; | ||
1495 | offset = sendbuf->sent; | ||
1496 | |||
1497 | rc = iscsi_sendpage(conn, sendbuf, count, &buf_sent); | ||
1498 | *sent = *sent + buf_sent; | ||
1499 | if (buf_sent && conn->datadgst_en) | ||
1500 | partial_sg_digest_update(&tcp_conn->tx_hash, | ||
1501 | &sendbuf->sg, sendbuf->sg.offset + offset, | ||
1502 | buf_sent); | ||
1503 | if (!iscsi_buf_left(sendbuf) && *sg != tcp_ctask->bad_sg) { | ||
1504 | iscsi_buf_init_sg(sendbuf, *sg); | ||
1505 | *sg = *sg + 1; | ||
1506 | } | ||
1507 | |||
1508 | if (rc) | ||
1509 | return rc; | ||
1510 | } | ||
1511 | |||
1512 | rc = iscsi_send_padding(conn, ctask); | ||
1513 | if (rc) | ||
1514 | return rc; | 1390 | return rc; |
1515 | 1391 | ||
1516 | return iscsi_send_digest(conn, ctask, digestbuf, digest); | 1392 | /* Are we done already? */ |
1517 | } | 1393 | if (sc->sc_data_direction != DMA_TO_DEVICE) |
1518 | 1394 | return 0; | |
1519 | static int | ||
1520 | iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | ||
1521 | { | ||
1522 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | ||
1523 | struct iscsi_data_task *dtask; | ||
1524 | int rc; | ||
1525 | |||
1526 | tcp_ctask->xmstate |= XMSTATE_UNS_DATA; | ||
1527 | if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) { | ||
1528 | dtask = &tcp_ctask->unsol_dtask; | ||
1529 | |||
1530 | iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr); | ||
1531 | iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr, | ||
1532 | sizeof(struct iscsi_hdr)); | ||
1533 | if (conn->hdrdgst_en) | ||
1534 | iscsi_hdr_digest(conn, &tcp_ctask->headbuf, | ||
1535 | (u8*)dtask->hdrext); | ||
1536 | |||
1537 | tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT; | ||
1538 | iscsi_set_padding(tcp_ctask, ctask->data_count); | ||
1539 | } | ||
1540 | |||
1541 | rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count); | ||
1542 | if (rc) { | ||
1543 | tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA; | ||
1544 | tcp_ctask->xmstate |= XMSTATE_UNS_HDR; | ||
1545 | return rc; | ||
1546 | } | ||
1547 | 1395 | ||
1548 | if (conn->datadgst_en) { | 1396 | if (ctask->unsol_count != 0) { |
1549 | dtask = &tcp_ctask->unsol_dtask; | 1397 | struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr; |
1550 | iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask); | ||
1551 | dtask->digest = 0; | ||
1552 | } | ||
1553 | 1398 | ||
1554 | debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n", | 1399 | /* Prepare a header for the unsolicited PDU. |
1555 | ctask->itt, ctask->unsol_count, tcp_ctask->sent); | 1400 | * The amount of data we want to send will be |
1556 | return 0; | 1401 | * in ctask->data_count. |
1557 | } | 1402 | * FIXME: return the data count instead. |
1403 | */ | ||
1404 | iscsi_prep_unsolicit_data_pdu(ctask, hdr); | ||
1558 | 1405 | ||
1559 | static int | 1406 | debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n", |
1560 | iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | 1407 | ctask->itt, tcp_ctask->sent, ctask->data_count); |
1561 | { | ||
1562 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | ||
1563 | int rc; | ||
1564 | 1408 | ||
1565 | if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) { | 1409 | iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr)); |
1566 | BUG_ON(!ctask->unsol_count); | 1410 | rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), |
1567 | tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR; | 1411 | scsi_sg_count(sc), |
1568 | send_hdr: | 1412 | tcp_ctask->sent, |
1569 | rc = iscsi_send_unsol_hdr(conn, ctask); | 1413 | ctask->data_count); |
1570 | if (rc) | 1414 | if (rc) |
1571 | return rc; | 1415 | goto fail; |
1572 | } | 1416 | tcp_ctask->sent += ctask->data_count; |
1573 | 1417 | ctask->unsol_count -= ctask->data_count; | |
1574 | if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) { | 1418 | goto flush; |
1575 | struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask; | 1419 | } else { |
1576 | int start = tcp_ctask->sent; | 1420 | struct iscsi_session *session = conn->session; |
1421 | struct iscsi_r2t_info *r2t; | ||
1577 | 1422 | ||
1578 | rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg, | 1423 | /* All unsolicited PDUs sent. Check for solicited PDUs. |
1579 | &tcp_ctask->sent, &ctask->data_count, | ||
1580 | &dtask->digestbuf, &dtask->digest); | ||
1581 | ctask->unsol_count -= tcp_ctask->sent - start; | ||
1582 | if (rc) | ||
1583 | return rc; | ||
1584 | tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA; | ||
1585 | /* | ||
1586 | * Done with the Data-Out. Next, check if we need | ||
1587 | * to send another unsolicited Data-Out. | ||
1588 | */ | 1424 | */ |
1589 | if (ctask->unsol_count) { | 1425 | spin_lock_bh(&session->lock); |
1590 | debug_scsi("sending more uns\n"); | 1426 | r2t = tcp_ctask->r2t; |
1591 | tcp_ctask->xmstate |= XMSTATE_UNS_INIT; | 1427 | if (r2t != NULL) { |
1592 | goto send_hdr; | 1428 | /* Continue with this R2T? */ |
1429 | if (!iscsi_solicit_data_cont(conn, ctask, r2t)) { | ||
1430 | debug_scsi(" done with r2t %p\n", r2t); | ||
1431 | |||
1432 | __kfifo_put(tcp_ctask->r2tpool.queue, | ||
1433 | (void*)&r2t, sizeof(void*)); | ||
1434 | tcp_ctask->r2t = r2t = NULL; | ||
1435 | } | ||
1593 | } | 1436 | } |
1594 | } | ||
1595 | return 0; | ||
1596 | } | ||
1597 | 1437 | ||
1598 | static int iscsi_send_sol_pdu(struct iscsi_conn *conn, | 1438 | if (r2t == NULL) { |
1599 | struct iscsi_cmd_task *ctask) | ||
1600 | { | ||
1601 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | ||
1602 | struct iscsi_session *session = conn->session; | ||
1603 | struct iscsi_r2t_info *r2t; | ||
1604 | struct iscsi_data_task *dtask; | ||
1605 | int left, rc; | ||
1606 | |||
1607 | if (tcp_ctask->xmstate & XMSTATE_SOL_HDR_INIT) { | ||
1608 | if (!tcp_ctask->r2t) { | ||
1609 | spin_lock_bh(&session->lock); | ||
1610 | __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t, | 1439 | __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t, |
1611 | sizeof(void*)); | 1440 | sizeof(void*)); |
1612 | spin_unlock_bh(&session->lock); | 1441 | r2t = tcp_ctask->r2t; |
1613 | } | 1442 | } |
1614 | send_hdr: | 1443 | spin_unlock_bh(&session->lock); |
1615 | r2t = tcp_ctask->r2t; | ||
1616 | dtask = &r2t->dtask; | ||
1617 | |||
1618 | if (conn->hdrdgst_en) | ||
1619 | iscsi_hdr_digest(conn, &r2t->headbuf, | ||
1620 | (u8*)dtask->hdrext); | ||
1621 | tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR_INIT; | ||
1622 | tcp_ctask->xmstate |= XMSTATE_SOL_HDR; | ||
1623 | } | ||
1624 | |||
1625 | if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) { | ||
1626 | r2t = tcp_ctask->r2t; | ||
1627 | dtask = &r2t->dtask; | ||
1628 | |||
1629 | rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count); | ||
1630 | if (rc) | ||
1631 | return rc; | ||
1632 | tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR; | ||
1633 | tcp_ctask->xmstate |= XMSTATE_SOL_DATA; | ||
1634 | 1444 | ||
1635 | if (conn->datadgst_en) { | 1445 | /* Waiting for more R2Ts to arrive. */ |
1636 | iscsi_data_digest_init(conn->dd_data, tcp_ctask); | 1446 | if (r2t == NULL) { |
1637 | dtask->digest = 0; | 1447 | debug_tcp("no R2Ts yet\n"); |
1448 | return 0; | ||
1638 | } | 1449 | } |
1639 | 1450 | ||
1640 | iscsi_set_padding(tcp_ctask, r2t->data_count); | 1451 | debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n", |
1641 | debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n", | 1452 | r2t, r2t->solicit_datasn - 1, ctask->itt, |
1642 | r2t->solicit_datasn - 1, ctask->itt, r2t->data_count, | 1453 | r2t->data_offset + r2t->sent, r2t->data_count); |
1643 | r2t->sent); | ||
1644 | } | ||
1645 | 1454 | ||
1646 | if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) { | 1455 | iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr, |
1647 | r2t = tcp_ctask->r2t; | 1456 | sizeof(struct iscsi_hdr)); |
1648 | dtask = &r2t->dtask; | ||
1649 | 1457 | ||
1650 | rc = iscsi_send_data(ctask, &r2t->sendbuf, &r2t->sg, | 1458 | rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), |
1651 | &r2t->sent, &r2t->data_count, | 1459 | scsi_sg_count(sc), |
1652 | &dtask->digestbuf, &dtask->digest); | 1460 | r2t->data_offset + r2t->sent, |
1461 | r2t->data_count); | ||
1653 | if (rc) | 1462 | if (rc) |
1654 | return rc; | 1463 | goto fail; |
1655 | tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA; | 1464 | tcp_ctask->sent += r2t->data_count; |
1656 | 1465 | r2t->sent += r2t->data_count; | |
1657 | /* | 1466 | goto flush; |
1658 | * Done with this Data-Out. Next, check if we have | ||
1659 | * to send another Data-Out for this R2T. | ||
1660 | */ | ||
1661 | BUG_ON(r2t->data_length - r2t->sent < 0); | ||
1662 | left = r2t->data_length - r2t->sent; | ||
1663 | if (left) { | ||
1664 | iscsi_solicit_data_cont(conn, ctask, r2t, left); | ||
1665 | goto send_hdr; | ||
1666 | } | ||
1667 | |||
1668 | /* | ||
1669 | * Done with this R2T. Check if there are more | ||
1670 | * outstanding R2Ts ready to be processed. | ||
1671 | */ | ||
1672 | spin_lock_bh(&session->lock); | ||
1673 | tcp_ctask->r2t = NULL; | ||
1674 | __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, | ||
1675 | sizeof(void*)); | ||
1676 | if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, | ||
1677 | sizeof(void*))) { | ||
1678 | tcp_ctask->r2t = r2t; | ||
1679 | spin_unlock_bh(&session->lock); | ||
1680 | goto send_hdr; | ||
1681 | } | ||
1682 | spin_unlock_bh(&session->lock); | ||
1683 | } | 1467 | } |
1684 | return 0; | 1468 | return 0; |
1685 | } | 1469 | fail: |
1686 | 1470 | iscsi_conn_failure(conn, rc); | |
1687 | /** | 1471 | return -EIO; |
1688 | * iscsi_tcp_ctask_xmit - xmit normal PDU task | ||
1689 | * @conn: iscsi connection | ||
1690 | * @ctask: iscsi command task | ||
1691 | * | ||
1692 | * Notes: | ||
1693 | * The function can return -EAGAIN in which case caller must | ||
1694 | * call it again later, or recover. '0' return code means successful | ||
1695 | * xmit. | ||
1696 | * The function is devided to logical helpers (above) for the different | ||
1697 | * xmit stages. | ||
1698 | * | ||
1699 | *iscsi_send_cmd_hdr() | ||
1700 | * XMSTATE_CMD_HDR_INIT - prepare Header and Data buffers Calculate | ||
1701 | * Header Digest | ||
1702 | * XMSTATE_CMD_HDR_XMIT - Transmit header in progress | ||
1703 | * | ||
1704 | *iscsi_send_padding | ||
1705 | * XMSTATE_W_PAD - Prepare and send pading | ||
1706 | * XMSTATE_W_RESEND_PAD - retry send pading | ||
1707 | * | ||
1708 | *iscsi_send_digest | ||
1709 | * XMSTATE_W_RESEND_DATA_DIGEST - Finalize and send Data Digest | ||
1710 | * XMSTATE_W_RESEND_DATA_DIGEST - retry sending digest | ||
1711 | * | ||
1712 | *iscsi_send_unsol_hdr | ||
1713 | * XMSTATE_UNS_INIT - prepare un-solicit data header and digest | ||
1714 | * XMSTATE_UNS_HDR - send un-solicit header | ||
1715 | * | ||
1716 | *iscsi_send_unsol_pdu | ||
1717 | * XMSTATE_UNS_DATA - send un-solicit data in progress | ||
1718 | * | ||
1719 | *iscsi_send_sol_pdu | ||
1720 | * XMSTATE_SOL_HDR_INIT - solicit data header and digest initialize | ||
1721 | * XMSTATE_SOL_HDR - send solicit header | ||
1722 | * XMSTATE_SOL_DATA - send solicit data | ||
1723 | * | ||
1724 | *iscsi_tcp_ctask_xmit | ||
1725 | * XMSTATE_IMM_DATA - xmit managment data (??) | ||
1726 | **/ | ||
1727 | static int | ||
1728 | iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | ||
1729 | { | ||
1730 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | ||
1731 | int rc = 0; | ||
1732 | |||
1733 | debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n", | ||
1734 | conn->id, tcp_ctask->xmstate, ctask->itt); | ||
1735 | |||
1736 | rc = iscsi_send_cmd_hdr(conn, ctask); | ||
1737 | if (rc) | ||
1738 | return rc; | ||
1739 | if (ctask->sc->sc_data_direction != DMA_TO_DEVICE) | ||
1740 | return 0; | ||
1741 | |||
1742 | if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) { | ||
1743 | rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg, | ||
1744 | &tcp_ctask->sent, &ctask->imm_count, | ||
1745 | &tcp_ctask->immbuf, &tcp_ctask->immdigest); | ||
1746 | if (rc) | ||
1747 | return rc; | ||
1748 | tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA; | ||
1749 | } | ||
1750 | |||
1751 | rc = iscsi_send_unsol_pdu(conn, ctask); | ||
1752 | if (rc) | ||
1753 | return rc; | ||
1754 | |||
1755 | rc = iscsi_send_sol_pdu(conn, ctask); | ||
1756 | if (rc) | ||
1757 | return rc; | ||
1758 | |||
1759 | return rc; | ||
1760 | } | 1472 | } |
1761 | 1473 | ||
1762 | static struct iscsi_cls_conn * | 1474 | static struct iscsi_cls_conn * |
@@ -1970,10 +1682,17 @@ free_socket: | |||
1970 | 1682 | ||
1971 | /* called with host lock */ | 1683 | /* called with host lock */ |
1972 | static void | 1684 | static void |
1973 | iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) | 1685 | iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) |
1974 | { | 1686 | { |
1975 | struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; | 1687 | debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt); |
1976 | tcp_mtask->xmstate = XMSTATE_IMM_HDR_INIT; | 1688 | |
1689 | /* Prepare PDU, optionally w/ immediate data */ | ||
1690 | iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr)); | ||
1691 | |||
1692 | /* If we have immediate data, attach a payload */ | ||
1693 | if (mtask->data_count) | ||
1694 | iscsi_tcp_send_linear_data_prepare(conn, mtask->data, | ||
1695 | mtask->data_count); | ||
1977 | } | 1696 | } |
1978 | 1697 | ||
1979 | static int | 1698 | static int |
@@ -2177,7 +1896,7 @@ iscsi_tcp_session_create(struct iscsi_transport *iscsit, | |||
2177 | struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i]; | 1896 | struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i]; |
2178 | struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; | 1897 | struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; |
2179 | 1898 | ||
2180 | mtask->hdr = &tcp_mtask->hdr; | 1899 | mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr; |
2181 | } | 1900 | } |
2182 | 1901 | ||
2183 | if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session))) | 1902 | if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session))) |
@@ -2274,8 +1993,8 @@ static struct iscsi_transport iscsi_tcp_transport = { | |||
2274 | /* IO */ | 1993 | /* IO */ |
2275 | .send_pdu = iscsi_conn_send_pdu, | 1994 | .send_pdu = iscsi_conn_send_pdu, |
2276 | .get_stats = iscsi_conn_get_stats, | 1995 | .get_stats = iscsi_conn_get_stats, |
2277 | .init_cmd_task = iscsi_tcp_cmd_init, | 1996 | .init_cmd_task = iscsi_tcp_ctask_init, |
2278 | .init_mgmt_task = iscsi_tcp_mgmt_init, | 1997 | .init_mgmt_task = iscsi_tcp_mtask_init, |
2279 | .xmit_cmd_task = iscsi_tcp_ctask_xmit, | 1998 | .xmit_cmd_task = iscsi_tcp_ctask_xmit, |
2280 | .xmit_mgmt_task = iscsi_tcp_mtask_xmit, | 1999 | .xmit_mgmt_task = iscsi_tcp_mtask_xmit, |
2281 | .cleanup_cmd_task = iscsi_tcp_cleanup_ctask, | 2000 | .cleanup_cmd_task = iscsi_tcp_cleanup_ctask, |
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index d49d87611e82..893cd2e1701e 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h | |||
@@ -24,35 +24,18 @@ | |||
24 | 24 | ||
25 | #include <scsi/libiscsi.h> | 25 | #include <scsi/libiscsi.h> |
26 | 26 | ||
27 | /* xmit state machine */ | ||
28 | #define XMSTATE_IDLE 0x0 | ||
29 | #define XMSTATE_CMD_HDR_INIT 0x1 | ||
30 | #define XMSTATE_CMD_HDR_XMIT 0x2 | ||
31 | #define XMSTATE_IMM_HDR 0x4 | ||
32 | #define XMSTATE_IMM_DATA 0x8 | ||
33 | #define XMSTATE_UNS_INIT 0x10 | ||
34 | #define XMSTATE_UNS_HDR 0x20 | ||
35 | #define XMSTATE_UNS_DATA 0x40 | ||
36 | #define XMSTATE_SOL_HDR 0x80 | ||
37 | #define XMSTATE_SOL_DATA 0x100 | ||
38 | #define XMSTATE_W_PAD 0x200 | ||
39 | #define XMSTATE_W_RESEND_PAD 0x400 | ||
40 | #define XMSTATE_W_RESEND_DATA_DIGEST 0x800 | ||
41 | #define XMSTATE_IMM_HDR_INIT 0x1000 | ||
42 | #define XMSTATE_SOL_HDR_INIT 0x2000 | ||
43 | |||
44 | #define ISCSI_SG_TABLESIZE SG_ALL | 27 | #define ISCSI_SG_TABLESIZE SG_ALL |
45 | #define ISCSI_TCP_MAX_CMD_LEN 16 | 28 | #define ISCSI_TCP_MAX_CMD_LEN 16 |
46 | 29 | ||
47 | struct crypto_hash; | 30 | struct crypto_hash; |
48 | struct socket; | 31 | struct socket; |
49 | struct iscsi_tcp_conn; | 32 | struct iscsi_tcp_conn; |
50 | struct iscsi_chunk; | 33 | struct iscsi_segment; |
51 | 34 | ||
52 | typedef int iscsi_chunk_done_fn_t(struct iscsi_tcp_conn *, | 35 | typedef int iscsi_segment_done_fn_t(struct iscsi_tcp_conn *, |
53 | struct iscsi_chunk *); | 36 | struct iscsi_segment *); |
54 | 37 | ||
55 | struct iscsi_chunk { | 38 | struct iscsi_segment { |
56 | unsigned char *data; | 39 | unsigned char *data; |
57 | unsigned int size; | 40 | unsigned int size; |
58 | unsigned int copied; | 41 | unsigned int copied; |
@@ -67,16 +50,14 @@ struct iscsi_chunk { | |||
67 | struct scatterlist *sg; | 50 | struct scatterlist *sg; |
68 | void *sg_mapped; | 51 | void *sg_mapped; |
69 | unsigned int sg_offset; | 52 | unsigned int sg_offset; |
70 | unsigned int sg_index; | ||
71 | unsigned int sg_count; | ||
72 | 53 | ||
73 | iscsi_chunk_done_fn_t *done; | 54 | iscsi_segment_done_fn_t *done; |
74 | }; | 55 | }; |
75 | 56 | ||
76 | /* Socket connection recieve helper */ | 57 | /* Socket connection recieve helper */ |
77 | struct iscsi_tcp_recv { | 58 | struct iscsi_tcp_recv { |
78 | struct iscsi_hdr *hdr; | 59 | struct iscsi_hdr *hdr; |
79 | struct iscsi_chunk chunk; | 60 | struct iscsi_segment segment; |
80 | 61 | ||
81 | /* Allocate buffer for BHS + AHS */ | 62 | /* Allocate buffer for BHS + AHS */ |
82 | uint32_t hdr_buf[64]; | 63 | uint32_t hdr_buf[64]; |
@@ -88,11 +69,8 @@ struct iscsi_tcp_recv { | |||
88 | /* Socket connection send helper */ | 69 | /* Socket connection send helper */ |
89 | struct iscsi_tcp_send { | 70 | struct iscsi_tcp_send { |
90 | struct iscsi_hdr *hdr; | 71 | struct iscsi_hdr *hdr; |
91 | struct iscsi_chunk chunk; | 72 | struct iscsi_segment segment; |
92 | struct iscsi_chunk data_chunk; | 73 | struct iscsi_segment data_segment; |
93 | |||
94 | /* Allocate buffer for BHS + AHS */ | ||
95 | uint32_t hdr_buf[64]; | ||
96 | }; | 74 | }; |
97 | 75 | ||
98 | struct iscsi_tcp_conn { | 76 | struct iscsi_tcp_conn { |
@@ -118,29 +96,19 @@ struct iscsi_tcp_conn { | |||
118 | uint32_t sendpage_failures_cnt; | 96 | uint32_t sendpage_failures_cnt; |
119 | uint32_t discontiguous_hdr_cnt; | 97 | uint32_t discontiguous_hdr_cnt; |
120 | 98 | ||
121 | ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); | 99 | int error; |
122 | }; | ||
123 | 100 | ||
124 | struct iscsi_buf { | 101 | ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); |
125 | struct scatterlist sg; | ||
126 | unsigned int sent; | ||
127 | char use_sendmsg; | ||
128 | }; | 102 | }; |
129 | 103 | ||
130 | struct iscsi_data_task { | 104 | struct iscsi_data_task { |
131 | struct iscsi_data hdr; /* PDU */ | 105 | struct iscsi_data hdr; /* PDU */ |
132 | char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */ | 106 | char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */ |
133 | struct iscsi_buf digestbuf; /* digest buffer */ | ||
134 | uint32_t digest; /* data digest */ | ||
135 | }; | 107 | }; |
136 | 108 | ||
137 | struct iscsi_tcp_mgmt_task { | 109 | struct iscsi_tcp_mgmt_task { |
138 | struct iscsi_hdr hdr; | 110 | struct iscsi_hdr hdr; |
139 | char hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */ | 111 | char hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */ |
140 | int xmstate; /* mgmt xmit progress */ | ||
141 | struct iscsi_buf headbuf; /* header buffer */ | ||
142 | struct iscsi_buf sendbuf; /* in progress buffer */ | ||
143 | int sent; | ||
144 | }; | 112 | }; |
145 | 113 | ||
146 | struct iscsi_r2t_info { | 114 | struct iscsi_r2t_info { |
@@ -148,13 +116,10 @@ struct iscsi_r2t_info { | |||
148 | __be32 exp_statsn; /* copied from R2T */ | 116 | __be32 exp_statsn; /* copied from R2T */ |
149 | uint32_t data_length; /* copied from R2T */ | 117 | uint32_t data_length; /* copied from R2T */ |
150 | uint32_t data_offset; /* copied from R2T */ | 118 | uint32_t data_offset; /* copied from R2T */ |
151 | struct iscsi_buf headbuf; /* Data-Out Header Buffer */ | ||
152 | struct iscsi_buf sendbuf; /* Data-Out in progress buffer*/ | ||
153 | int sent; /* R2T sequence progress */ | 119 | int sent; /* R2T sequence progress */ |
154 | int data_count; /* DATA-Out payload progress */ | 120 | int data_count; /* DATA-Out payload progress */ |
155 | struct scatterlist *sg; /* per-R2T SG list */ | ||
156 | int solicit_datasn; | 121 | int solicit_datasn; |
157 | struct iscsi_data_task dtask; /* which data task */ | 122 | struct iscsi_data_task dtask; /* Data-Out header buf */ |
158 | }; | 123 | }; |
159 | 124 | ||
160 | struct iscsi_tcp_cmd_task { | 125 | struct iscsi_tcp_cmd_task { |
@@ -163,24 +128,14 @@ struct iscsi_tcp_cmd_task { | |||
163 | char hdrextbuf[ISCSI_MAX_AHS_SIZE + | 128 | char hdrextbuf[ISCSI_MAX_AHS_SIZE + |
164 | ISCSI_DIGEST_SIZE]; | 129 | ISCSI_DIGEST_SIZE]; |
165 | } hdr; | 130 | } hdr; |
166 | char pad[ISCSI_PAD_LEN]; | 131 | |
167 | int pad_count; /* padded bytes */ | ||
168 | struct iscsi_buf headbuf; /* header buf (xmit) */ | ||
169 | struct iscsi_buf sendbuf; /* in progress buffer*/ | ||
170 | int xmstate; /* xmit xtate machine */ | ||
171 | int sent; | 132 | int sent; |
172 | struct scatterlist *sg; /* per-cmd SG list */ | 133 | uint32_t exp_datasn; /* expected target's R2TSN/DataSN */ |
173 | struct scatterlist *bad_sg; /* assert statement */ | ||
174 | int sg_count; /* SG's to process */ | ||
175 | uint32_t exp_datasn; /* expected target's R2TSN/DataSN */ | ||
176 | int data_offset; | 134 | int data_offset; |
177 | struct iscsi_r2t_info *r2t; /* in progress R2T */ | 135 | struct iscsi_r2t_info *r2t; /* in progress R2T */ |
178 | struct iscsi_pool r2tpool; | 136 | struct iscsi_pool r2tpool; |
179 | struct kfifo *r2tqueue; | 137 | struct kfifo *r2tqueue; |
180 | int digest_count; | 138 | struct iscsi_data_task unsol_dtask; /* Data-Out header buf */ |
181 | uint32_t immdigest; /* for imm data */ | ||
182 | struct iscsi_buf immbuf; /* for imm data digest */ | ||
183 | struct iscsi_data_task unsol_dtask; /* unsol data task */ | ||
184 | }; | 139 | }; |
185 | 140 | ||
186 | #endif /* ISCSI_H */ | 141 | #endif /* ISCSI_H */ |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index b0bc8c3b0ecb..f15df8d75fd8 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -156,20 +156,19 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) | |||
156 | rc = iscsi_add_hdr(ctask, sizeof(*hdr)); | 156 | rc = iscsi_add_hdr(ctask, sizeof(*hdr)); |
157 | if (rc) | 157 | if (rc) |
158 | return rc; | 158 | return rc; |
159 | hdr->opcode = ISCSI_OP_SCSI_CMD; | 159 | hdr->opcode = ISCSI_OP_SCSI_CMD; |
160 | hdr->flags = ISCSI_ATTR_SIMPLE; | 160 | hdr->flags = ISCSI_ATTR_SIMPLE; |
161 | int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); | 161 | int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); |
162 | hdr->itt = build_itt(ctask->itt, conn->id, session->age); | 162 | hdr->itt = build_itt(ctask->itt, conn->id, session->age); |
163 | hdr->data_length = cpu_to_be32(scsi_bufflen(sc)); | 163 | hdr->data_length = cpu_to_be32(scsi_bufflen(sc)); |
164 | hdr->cmdsn = cpu_to_be32(session->cmdsn); | 164 | hdr->cmdsn = cpu_to_be32(session->cmdsn); |
165 | session->cmdsn++; | 165 | session->cmdsn++; |
166 | hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); | 166 | hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); |
167 | memcpy(hdr->cdb, sc->cmnd, sc->cmd_len); | 167 | memcpy(hdr->cdb, sc->cmnd, sc->cmd_len); |
168 | if (sc->cmd_len < MAX_COMMAND_SIZE) | 168 | if (sc->cmd_len < MAX_COMMAND_SIZE) |
169 | memset(&hdr->cdb[sc->cmd_len], 0, | 169 | memset(&hdr->cdb[sc->cmd_len], 0, |
170 | MAX_COMMAND_SIZE - sc->cmd_len); | 170 | MAX_COMMAND_SIZE - sc->cmd_len); |
171 | 171 | ||
172 | ctask->data_count = 0; | ||
173 | ctask->imm_count = 0; | 172 | ctask->imm_count = 0; |
174 | if (sc->sc_data_direction == DMA_TO_DEVICE) { | 173 | if (sc->sc_data_direction == DMA_TO_DEVICE) { |
175 | hdr->flags |= ISCSI_FLAG_CMD_WRITE; | 174 | hdr->flags |= ISCSI_FLAG_CMD_WRITE; |
@@ -198,9 +197,9 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) | |||
198 | else | 197 | else |
199 | ctask->imm_count = min(scsi_bufflen(sc), | 198 | ctask->imm_count = min(scsi_bufflen(sc), |
200 | conn->max_xmit_dlength); | 199 | conn->max_xmit_dlength); |
201 | hton24(ctask->hdr->dlength, ctask->imm_count); | 200 | hton24(hdr->dlength, ctask->imm_count); |
202 | } else | 201 | } else |
203 | zero_data(ctask->hdr->dlength); | 202 | zero_data(hdr->dlength); |
204 | 203 | ||
205 | if (!session->initial_r2t_en) { | 204 | if (!session->initial_r2t_en) { |
206 | ctask->unsol_count = min((session->first_burst), | 205 | ctask->unsol_count = min((session->first_burst), |
@@ -210,7 +209,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) | |||
210 | 209 | ||
211 | if (!ctask->unsol_count) | 210 | if (!ctask->unsol_count) |
212 | /* No unsolicit Data-Out's */ | 211 | /* No unsolicit Data-Out's */ |
213 | ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL; | 212 | hdr->flags |= ISCSI_FLAG_CMD_FINAL; |
214 | } else { | 213 | } else { |
215 | hdr->flags |= ISCSI_FLAG_CMD_FINAL; | 214 | hdr->flags |= ISCSI_FLAG_CMD_FINAL; |
216 | zero_data(hdr->dlength); | 215 | zero_data(hdr->dlength); |
@@ -228,13 +227,15 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) | |||
228 | WARN_ON(hdrlength >= 256); | 227 | WARN_ON(hdrlength >= 256); |
229 | hdr->hlength = hdrlength & 0xFF; | 228 | hdr->hlength = hdrlength & 0xFF; |
230 | 229 | ||
231 | conn->scsicmd_pdus_cnt++; | 230 | if (conn->session->tt->init_cmd_task(conn->ctask)) |
231 | return EIO; | ||
232 | 232 | ||
233 | debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d " | 233 | conn->scsicmd_pdus_cnt++; |
234 | debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d " | ||
234 | "cmdsn %d win %d]\n", | 235 | "cmdsn %d win %d]\n", |
235 | sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", | 236 | sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", |
236 | conn->id, sc, sc->cmnd[0], ctask->itt, scsi_bufflen(sc), | 237 | conn->id, sc, sc->cmnd[0], ctask->itt, scsi_bufflen(sc), |
237 | session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); | 238 | session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); |
238 | return 0; | 239 | return 0; |
239 | } | 240 | } |
240 | 241 | ||
@@ -927,7 +928,7 @@ check_mgmt: | |||
927 | fail_command(conn, conn->ctask, DID_ABORT << 16); | 928 | fail_command(conn, conn->ctask, DID_ABORT << 16); |
928 | continue; | 929 | continue; |
929 | } | 930 | } |
930 | conn->session->tt->init_cmd_task(conn->ctask); | 931 | |
931 | conn->ctask->state = ISCSI_TASK_RUNNING; | 932 | conn->ctask->state = ISCSI_TASK_RUNNING; |
932 | list_move_tail(conn->xmitqueue.next, &conn->run_list); | 933 | list_move_tail(conn->xmitqueue.next, &conn->run_list); |
933 | rc = iscsi_xmit_ctask(conn); | 934 | rc = iscsi_xmit_ctask(conn); |