diff options
author | Christoph Hellwig <hch@infradead.org> | 2012-04-24 00:25:06 -0400 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2012-05-06 18:11:26 -0400 |
commit | 5787cacd0bd5ee016ad807b244550d34fe2beebe (patch) | |
tree | d9303fd0c5767f945c13dfc34eb0b16a4f4d2730 /drivers/target | |
parent | cf572a9627c9ae86082216de109780c1d2e2ee28 (diff) |
target: remove struct se_task
We can use struct se_cmd for everything it did. Make sure to pass the S/G
list and data direction to the execution function to ease adding back BIDI
support later on.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r-- | drivers/target/target_core_file.c | 117 | ||||
-rw-r--r-- | drivers/target/target_core_file.h | 4 | ||||
-rw-r--r-- | drivers/target/target_core_iblock.c | 142 | ||||
-rw-r--r-- | drivers/target/target_core_iblock.h | 1 | ||||
-rw-r--r-- | drivers/target/target_core_pscsi.c | 172 | ||||
-rw-r--r-- | drivers/target/target_core_pscsi.h | 1 | ||||
-rw-r--r-- | drivers/target/target_core_rd.c | 40 | ||||
-rw-r--r-- | drivers/target/target_core_tmr.c | 8 | ||||
-rw-r--r-- | drivers/target/target_core_transport.c | 127 |
9 files changed, 205 insertions, 407 deletions
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index a984964ab70a..bbcedcfc068a 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
@@ -244,53 +244,33 @@ static void fd_free_device(void *p) | |||
244 | kfree(fd_dev); | 244 | kfree(fd_dev); |
245 | } | 245 | } |
246 | 246 | ||
247 | static inline struct fd_request *FILE_REQ(struct se_task *task) | 247 | static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl, |
248 | u32 sgl_nents) | ||
248 | { | 249 | { |
249 | return container_of(task, struct fd_request, fd_task); | 250 | struct se_device *se_dev = cmd->se_dev; |
250 | } | ||
251 | |||
252 | |||
253 | static struct se_task * | ||
254 | fd_alloc_task(unsigned char *cdb) | ||
255 | { | ||
256 | struct fd_request *fd_req; | ||
257 | |||
258 | fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL); | ||
259 | if (!fd_req) { | ||
260 | pr_err("Unable to allocate struct fd_request\n"); | ||
261 | return NULL; | ||
262 | } | ||
263 | |||
264 | return &fd_req->fd_task; | ||
265 | } | ||
266 | |||
267 | static int fd_do_readv(struct se_task *task) | ||
268 | { | ||
269 | struct fd_request *req = FILE_REQ(task); | ||
270 | struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; | ||
271 | struct fd_dev *dev = se_dev->dev_ptr; | 251 | struct fd_dev *dev = se_dev->dev_ptr; |
272 | struct file *fd = dev->fd_file; | 252 | struct file *fd = dev->fd_file; |
273 | struct scatterlist *sg = task->task_sg; | 253 | struct scatterlist *sg; |
274 | struct iovec *iov; | 254 | struct iovec *iov; |
275 | mm_segment_t old_fs; | 255 | mm_segment_t old_fs; |
276 | loff_t pos = (task->task_se_cmd->t_task_lba * | 256 | loff_t pos = (cmd->t_task_lba * |
277 | se_dev->se_sub_dev->se_dev_attrib.block_size); | 257 | se_dev->se_sub_dev->se_dev_attrib.block_size); |
278 | int ret = 0, i; | 258 | int ret = 0, i; |
279 | 259 | ||
280 | iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); | 260 | iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); |
281 | if (!iov) { | 261 | if (!iov) { |
282 | pr_err("Unable to allocate fd_do_readv iov[]\n"); | 262 | pr_err("Unable to allocate fd_do_readv iov[]\n"); |
283 | return -ENOMEM; | 263 | return -ENOMEM; |
284 | } | 264 | } |
285 | 265 | ||
286 | for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { | 266 | for_each_sg(sgl, sg, sgl_nents, i) { |
287 | iov[i].iov_len = sg->length; | 267 | iov[i].iov_len = sg->length; |
288 | iov[i].iov_base = sg_virt(sg); | 268 | iov[i].iov_base = sg_virt(sg); |
289 | } | 269 | } |
290 | 270 | ||
291 | old_fs = get_fs(); | 271 | old_fs = get_fs(); |
292 | set_fs(get_ds()); | 272 | set_fs(get_ds()); |
293 | ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos); | 273 | ret = vfs_readv(fd, &iov[0], sgl_nents, &pos); |
294 | set_fs(old_fs); | 274 | set_fs(old_fs); |
295 | 275 | ||
296 | kfree(iov); | 276 | kfree(iov); |
@@ -300,10 +280,10 @@ static int fd_do_readv(struct se_task *task) | |||
300 | * block_device. | 280 | * block_device. |
301 | */ | 281 | */ |
302 | if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { | 282 | if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { |
303 | if (ret < 0 || ret != task->task_se_cmd->data_length) { | 283 | if (ret < 0 || ret != cmd->data_length) { |
304 | pr_err("vfs_readv() returned %d," | 284 | pr_err("vfs_readv() returned %d," |
305 | " expecting %d for S_ISBLK\n", ret, | 285 | " expecting %d for S_ISBLK\n", ret, |
306 | (int)task->task_se_cmd->data_length); | 286 | (int)cmd->data_length); |
307 | return (ret < 0 ? ret : -EINVAL); | 287 | return (ret < 0 ? ret : -EINVAL); |
308 | } | 288 | } |
309 | } else { | 289 | } else { |
@@ -317,38 +297,38 @@ static int fd_do_readv(struct se_task *task) | |||
317 | return 1; | 297 | return 1; |
318 | } | 298 | } |
319 | 299 | ||
320 | static int fd_do_writev(struct se_task *task) | 300 | static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl, |
301 | u32 sgl_nents) | ||
321 | { | 302 | { |
322 | struct fd_request *req = FILE_REQ(task); | 303 | struct se_device *se_dev = cmd->se_dev; |
323 | struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; | ||
324 | struct fd_dev *dev = se_dev->dev_ptr; | 304 | struct fd_dev *dev = se_dev->dev_ptr; |
325 | struct file *fd = dev->fd_file; | 305 | struct file *fd = dev->fd_file; |
326 | struct scatterlist *sg = task->task_sg; | 306 | struct scatterlist *sg; |
327 | struct iovec *iov; | 307 | struct iovec *iov; |
328 | mm_segment_t old_fs; | 308 | mm_segment_t old_fs; |
329 | loff_t pos = (task->task_se_cmd->t_task_lba * | 309 | loff_t pos = (cmd->t_task_lba * |
330 | se_dev->se_sub_dev->se_dev_attrib.block_size); | 310 | se_dev->se_sub_dev->se_dev_attrib.block_size); |
331 | int ret, i = 0; | 311 | int ret, i = 0; |
332 | 312 | ||
333 | iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); | 313 | iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); |
334 | if (!iov) { | 314 | if (!iov) { |
335 | pr_err("Unable to allocate fd_do_writev iov[]\n"); | 315 | pr_err("Unable to allocate fd_do_writev iov[]\n"); |
336 | return -ENOMEM; | 316 | return -ENOMEM; |
337 | } | 317 | } |
338 | 318 | ||
339 | for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { | 319 | for_each_sg(sgl, sg, sgl_nents, i) { |
340 | iov[i].iov_len = sg->length; | 320 | iov[i].iov_len = sg->length; |
341 | iov[i].iov_base = sg_virt(sg); | 321 | iov[i].iov_base = sg_virt(sg); |
342 | } | 322 | } |
343 | 323 | ||
344 | old_fs = get_fs(); | 324 | old_fs = get_fs(); |
345 | set_fs(get_ds()); | 325 | set_fs(get_ds()); |
346 | ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos); | 326 | ret = vfs_writev(fd, &iov[0], sgl_nents, &pos); |
347 | set_fs(old_fs); | 327 | set_fs(old_fs); |
348 | 328 | ||
349 | kfree(iov); | 329 | kfree(iov); |
350 | 330 | ||
351 | if (ret < 0 || ret != task->task_se_cmd->data_length) { | 331 | if (ret < 0 || ret != cmd->data_length) { |
352 | pr_err("vfs_writev() returned %d\n", ret); | 332 | pr_err("vfs_writev() returned %d\n", ret); |
353 | return (ret < 0 ? ret : -EINVAL); | 333 | return (ret < 0 ? ret : -EINVAL); |
354 | } | 334 | } |
@@ -369,7 +349,7 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd) | |||
369 | * for this SYNCHRONIZE_CACHE op | 349 | * for this SYNCHRONIZE_CACHE op |
370 | */ | 350 | */ |
371 | if (immed) | 351 | if (immed) |
372 | transport_complete_sync_cache(cmd, 1); | 352 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
373 | 353 | ||
374 | /* | 354 | /* |
375 | * Determine if we will be flushing the entire device. | 355 | * Determine if we will be flushing the entire device. |
@@ -389,35 +369,37 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd) | |||
389 | if (ret != 0) | 369 | if (ret != 0) |
390 | pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); | 370 | pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); |
391 | 371 | ||
392 | if (!immed) | 372 | if (immed) |
393 | transport_complete_sync_cache(cmd, ret == 0); | 373 | return; |
374 | |||
375 | if (ret) { | ||
376 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
377 | target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); | ||
378 | } else { | ||
379 | target_complete_cmd(cmd, SAM_STAT_GOOD); | ||
380 | } | ||
394 | } | 381 | } |
395 | 382 | ||
396 | /* | 383 | static void fd_emulate_write_fua(struct se_cmd *cmd) |
397 | * WRITE Force Unit Access (FUA) emulation on a per struct se_task | ||
398 | * LBA range basis.. | ||
399 | */ | ||
400 | static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task) | ||
401 | { | 384 | { |
402 | struct se_device *dev = cmd->se_dev; | 385 | struct se_device *dev = cmd->se_dev; |
403 | struct fd_dev *fd_dev = dev->dev_ptr; | 386 | struct fd_dev *fd_dev = dev->dev_ptr; |
404 | loff_t start = task->task_se_cmd->t_task_lba * | 387 | loff_t start = cmd->t_task_lba * |
405 | dev->se_sub_dev->se_dev_attrib.block_size; | 388 | dev->se_sub_dev->se_dev_attrib.block_size; |
406 | loff_t end = start + task->task_se_cmd->data_length; | 389 | loff_t end = start + cmd->data_length; |
407 | int ret; | 390 | int ret; |
408 | 391 | ||
409 | pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", | 392 | pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", |
410 | task->task_se_cmd->t_task_lba, | 393 | cmd->t_task_lba, cmd->data_length); |
411 | task->task_se_cmd->data_length); | ||
412 | 394 | ||
413 | ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); | 395 | ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); |
414 | if (ret != 0) | 396 | if (ret != 0) |
415 | pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); | 397 | pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); |
416 | } | 398 | } |
417 | 399 | ||
418 | static int fd_do_task(struct se_task *task) | 400 | static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, |
401 | u32 sgl_nents, enum dma_data_direction data_direction) | ||
419 | { | 402 | { |
420 | struct se_cmd *cmd = task->task_se_cmd; | ||
421 | struct se_device *dev = cmd->se_dev; | 403 | struct se_device *dev = cmd->se_dev; |
422 | int ret = 0; | 404 | int ret = 0; |
423 | 405 | ||
@@ -425,10 +407,10 @@ static int fd_do_task(struct se_task *task) | |||
425 | * Call vectorized fileio functions to map struct scatterlist | 407 | * Call vectorized fileio functions to map struct scatterlist |
426 | * physical memory addresses to struct iovec virtual memory. | 408 | * physical memory addresses to struct iovec virtual memory. |
427 | */ | 409 | */ |
428 | if (task->task_data_direction == DMA_FROM_DEVICE) { | 410 | if (data_direction == DMA_FROM_DEVICE) { |
429 | ret = fd_do_readv(task); | 411 | ret = fd_do_readv(cmd, sgl, sgl_nents); |
430 | } else { | 412 | } else { |
431 | ret = fd_do_writev(task); | 413 | ret = fd_do_writev(cmd, sgl, sgl_nents); |
432 | 414 | ||
433 | if (ret > 0 && | 415 | if (ret > 0 && |
434 | dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && | 416 | dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && |
@@ -439,7 +421,7 @@ static int fd_do_task(struct se_task *task) | |||
439 | * and return some sense data to let the initiator | 421 | * and return some sense data to let the initiator |
440 | * know the FUA WRITE cache sync failed..? | 422 | * know the FUA WRITE cache sync failed..? |
441 | */ | 423 | */ |
442 | fd_emulate_write_fua(cmd, task); | 424 | fd_emulate_write_fua(cmd); |
443 | } | 425 | } |
444 | 426 | ||
445 | } | 427 | } |
@@ -448,24 +430,11 @@ static int fd_do_task(struct se_task *task) | |||
448 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 430 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
449 | return ret; | 431 | return ret; |
450 | } | 432 | } |
451 | if (ret) { | 433 | if (ret) |
452 | task->task_scsi_status = GOOD; | 434 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
453 | transport_complete_task(task, 1); | ||
454 | } | ||
455 | return 0; | 435 | return 0; |
456 | } | 436 | } |
457 | 437 | ||
458 | /* fd_free_task(): (Part of se_subsystem_api_t template) | ||
459 | * | ||
460 | * | ||
461 | */ | ||
462 | static void fd_free_task(struct se_task *task) | ||
463 | { | ||
464 | struct fd_request *req = FILE_REQ(task); | ||
465 | |||
466 | kfree(req); | ||
467 | } | ||
468 | |||
469 | enum { | 438 | enum { |
470 | Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err | 439 | Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err |
471 | }; | 440 | }; |
@@ -618,10 +587,8 @@ static struct se_subsystem_api fileio_template = { | |||
618 | .allocate_virtdevice = fd_allocate_virtdevice, | 587 | .allocate_virtdevice = fd_allocate_virtdevice, |
619 | .create_virtdevice = fd_create_virtdevice, | 588 | .create_virtdevice = fd_create_virtdevice, |
620 | .free_device = fd_free_device, | 589 | .free_device = fd_free_device, |
621 | .alloc_task = fd_alloc_task, | 590 | .execute_cmd = fd_execute_cmd, |
622 | .do_task = fd_do_task, | ||
623 | .do_sync_cache = fd_emulate_sync_cache, | 591 | .do_sync_cache = fd_emulate_sync_cache, |
624 | .free_task = fd_free_task, | ||
625 | .check_configfs_dev_params = fd_check_configfs_dev_params, | 592 | .check_configfs_dev_params = fd_check_configfs_dev_params, |
626 | .set_configfs_dev_params = fd_set_configfs_dev_params, | 593 | .set_configfs_dev_params = fd_set_configfs_dev_params, |
627 | .show_configfs_dev_params = fd_show_configfs_dev_params, | 594 | .show_configfs_dev_params = fd_show_configfs_dev_params, |
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index 59e6e73106c2..fbd59ef7d8be 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h | |||
@@ -12,10 +12,6 @@ | |||
12 | #define RRF_EMULATE_CDB 0x01 | 12 | #define RRF_EMULATE_CDB 0x01 |
13 | #define RRF_GOT_LBA 0x02 | 13 | #define RRF_GOT_LBA 0x02 |
14 | 14 | ||
15 | struct fd_request { | ||
16 | struct se_task fd_task; | ||
17 | }; | ||
18 | |||
19 | #define FBDF_HAS_PATH 0x01 | 15 | #define FBDF_HAS_PATH 0x01 |
20 | #define FBDF_HAS_SIZE 0x02 | 16 | #define FBDF_HAS_SIZE 0x02 |
21 | #define FDBD_USE_BUFFERED_IO 0x04 | 17 | #define FDBD_USE_BUFFERED_IO 0x04 |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 82ec0d3fa9dd..fd47950727b4 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -189,26 +189,6 @@ static void iblock_free_device(void *p) | |||
189 | kfree(ib_dev); | 189 | kfree(ib_dev); |
190 | } | 190 | } |
191 | 191 | ||
192 | static inline struct iblock_req *IBLOCK_REQ(struct se_task *task) | ||
193 | { | ||
194 | return container_of(task, struct iblock_req, ib_task); | ||
195 | } | ||
196 | |||
197 | static struct se_task * | ||
198 | iblock_alloc_task(unsigned char *cdb) | ||
199 | { | ||
200 | struct iblock_req *ib_req; | ||
201 | |||
202 | ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); | ||
203 | if (!ib_req) { | ||
204 | pr_err("Unable to allocate memory for struct iblock_req\n"); | ||
205 | return NULL; | ||
206 | } | ||
207 | |||
208 | atomic_set(&ib_req->pending, 1); | ||
209 | return &ib_req->ib_task; | ||
210 | } | ||
211 | |||
212 | static unsigned long long iblock_emulate_read_cap_with_block_size( | 192 | static unsigned long long iblock_emulate_read_cap_with_block_size( |
213 | struct se_device *dev, | 193 | struct se_device *dev, |
214 | struct block_device *bd, | 194 | struct block_device *bd, |
@@ -295,8 +275,16 @@ static void iblock_end_io_flush(struct bio *bio, int err) | |||
295 | if (err) | 275 | if (err) |
296 | pr_err("IBLOCK: cache flush failed: %d\n", err); | 276 | pr_err("IBLOCK: cache flush failed: %d\n", err); |
297 | 277 | ||
298 | if (cmd) | 278 | if (cmd) { |
299 | transport_complete_sync_cache(cmd, err == 0); | 279 | if (err) { |
280 | cmd->scsi_sense_reason = | ||
281 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
282 | target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); | ||
283 | } else { | ||
284 | target_complete_cmd(cmd, SAM_STAT_GOOD); | ||
285 | } | ||
286 | } | ||
287 | |||
300 | bio_put(bio); | 288 | bio_put(bio); |
301 | } | 289 | } |
302 | 290 | ||
@@ -315,7 +303,7 @@ static void iblock_emulate_sync_cache(struct se_cmd *cmd) | |||
315 | * for this SYNCHRONIZE_CACHE op. | 303 | * for this SYNCHRONIZE_CACHE op. |
316 | */ | 304 | */ |
317 | if (immed) | 305 | if (immed) |
318 | transport_complete_sync_cache(cmd, 1); | 306 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
319 | 307 | ||
320 | bio = bio_alloc(GFP_KERNEL, 0); | 308 | bio = bio_alloc(GFP_KERNEL, 0); |
321 | bio->bi_end_io = iblock_end_io_flush; | 309 | bio->bi_end_io = iblock_end_io_flush; |
@@ -334,11 +322,6 @@ static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) | |||
334 | return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier); | 322 | return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier); |
335 | } | 323 | } |
336 | 324 | ||
337 | static void iblock_free_task(struct se_task *task) | ||
338 | { | ||
339 | kfree(IBLOCK_REQ(task)); | ||
340 | } | ||
341 | |||
342 | enum { | 325 | enum { |
343 | Opt_udev_path, Opt_force, Opt_err | 326 | Opt_udev_path, Opt_force, Opt_err |
344 | }; | 327 | }; |
@@ -447,19 +430,35 @@ static ssize_t iblock_show_configfs_dev_params( | |||
447 | return bl; | 430 | return bl; |
448 | } | 431 | } |
449 | 432 | ||
433 | static void iblock_complete_cmd(struct se_cmd *cmd) | ||
434 | { | ||
435 | struct iblock_req *ibr = cmd->priv; | ||
436 | u8 status; | ||
437 | |||
438 | if (!atomic_dec_and_test(&ibr->pending)) | ||
439 | return; | ||
440 | |||
441 | if (atomic_read(&ibr->ib_bio_err_cnt)) | ||
442 | status = SAM_STAT_CHECK_CONDITION; | ||
443 | else | ||
444 | status = SAM_STAT_GOOD; | ||
445 | |||
446 | target_complete_cmd(cmd, status); | ||
447 | kfree(ibr); | ||
448 | } | ||
449 | |||
450 | static void iblock_bio_destructor(struct bio *bio) | 450 | static void iblock_bio_destructor(struct bio *bio) |
451 | { | 451 | { |
452 | struct se_task *task = bio->bi_private; | 452 | struct se_cmd *cmd = bio->bi_private; |
453 | struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; | 453 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; |
454 | 454 | ||
455 | bio_free(bio, ib_dev->ibd_bio_set); | 455 | bio_free(bio, ib_dev->ibd_bio_set); |
456 | } | 456 | } |
457 | 457 | ||
458 | static struct bio * | 458 | static struct bio * |
459 | iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num) | 459 | iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) |
460 | { | 460 | { |
461 | struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; | 461 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; |
462 | struct iblock_req *ib_req = IBLOCK_REQ(task); | ||
463 | struct bio *bio; | 462 | struct bio *bio; |
464 | 463 | ||
465 | /* | 464 | /* |
@@ -475,20 +474,11 @@ iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num) | |||
475 | return NULL; | 474 | return NULL; |
476 | } | 475 | } |
477 | 476 | ||
478 | pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:" | ||
479 | " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set); | ||
480 | pr_debug("Allocated bio: %p task_size: %u\n", bio, | ||
481 | task->task_se_cmd->data_length); | ||
482 | |||
483 | bio->bi_bdev = ib_dev->ibd_bd; | 477 | bio->bi_bdev = ib_dev->ibd_bd; |
484 | bio->bi_private = task; | 478 | bio->bi_private = cmd; |
485 | bio->bi_destructor = iblock_bio_destructor; | 479 | bio->bi_destructor = iblock_bio_destructor; |
486 | bio->bi_end_io = &iblock_bio_done; | 480 | bio->bi_end_io = &iblock_bio_done; |
487 | bio->bi_sector = lba; | 481 | bio->bi_sector = lba; |
488 | atomic_inc(&ib_req->pending); | ||
489 | |||
490 | pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector); | ||
491 | pr_debug("Set ib_req->pending: %d\n", atomic_read(&ib_req->pending)); | ||
492 | return bio; | 482 | return bio; |
493 | } | 483 | } |
494 | 484 | ||
@@ -503,20 +493,21 @@ static void iblock_submit_bios(struct bio_list *list, int rw) | |||
503 | blk_finish_plug(&plug); | 493 | blk_finish_plug(&plug); |
504 | } | 494 | } |
505 | 495 | ||
506 | static int iblock_do_task(struct se_task *task) | 496 | static int iblock_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, |
497 | u32 sgl_nents, enum dma_data_direction data_direction) | ||
507 | { | 498 | { |
508 | struct se_cmd *cmd = task->task_se_cmd; | ||
509 | struct se_device *dev = cmd->se_dev; | 499 | struct se_device *dev = cmd->se_dev; |
510 | struct iblock_req *ibr = IBLOCK_REQ(task); | 500 | struct iblock_req *ibr; |
511 | struct bio *bio; | 501 | struct bio *bio; |
512 | struct bio_list list; | 502 | struct bio_list list; |
513 | struct scatterlist *sg; | 503 | struct scatterlist *sg; |
514 | u32 i, sg_num = task->task_sg_nents; | 504 | u32 sg_num = sgl_nents; |
515 | sector_t block_lba; | 505 | sector_t block_lba; |
516 | unsigned bio_cnt; | 506 | unsigned bio_cnt; |
517 | int rw; | 507 | int rw; |
508 | int i; | ||
518 | 509 | ||
519 | if (task->task_data_direction == DMA_TO_DEVICE) { | 510 | if (data_direction == DMA_TO_DEVICE) { |
520 | /* | 511 | /* |
521 | * Force data to disk if we pretend to not have a volatile | 512 | * Force data to disk if we pretend to not have a volatile |
522 | * write cache, or the initiator set the Force Unit Access bit. | 513 | * write cache, or the initiator set the Force Unit Access bit. |
@@ -532,8 +523,8 @@ static int iblock_do_task(struct se_task *task) | |||
532 | } | 523 | } |
533 | 524 | ||
534 | /* | 525 | /* |
535 | * Do starting conversion up from non 512-byte blocksize with | 526 | * Convert the blocksize advertised to the initiator to the 512 byte |
536 | * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. | 527 | * units unconditionally used by the Linux block layer. |
537 | */ | 528 | */ |
538 | if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) | 529 | if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) |
539 | block_lba = (cmd->t_task_lba << 3); | 530 | block_lba = (cmd->t_task_lba << 3); |
@@ -550,17 +541,22 @@ static int iblock_do_task(struct se_task *task) | |||
550 | return -ENOSYS; | 541 | return -ENOSYS; |
551 | } | 542 | } |
552 | 543 | ||
553 | bio = iblock_get_bio(task, block_lba, sg_num); | 544 | ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); |
554 | if (!bio) { | 545 | if (!ibr) |
555 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 546 | goto fail; |
556 | return -ENOMEM; | 547 | cmd->priv = ibr; |
557 | } | 548 | |
549 | bio = iblock_get_bio(cmd, block_lba, sgl_nents); | ||
550 | if (!bio) | ||
551 | goto fail_free_ibr; | ||
558 | 552 | ||
559 | bio_list_init(&list); | 553 | bio_list_init(&list); |
560 | bio_list_add(&list, bio); | 554 | bio_list_add(&list, bio); |
555 | |||
556 | atomic_set(&ibr->pending, 2); | ||
561 | bio_cnt = 1; | 557 | bio_cnt = 1; |
562 | 558 | ||
563 | for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { | 559 | for_each_sg(sgl, sg, sgl_nents, i) { |
564 | /* | 560 | /* |
565 | * XXX: if the length the device accepts is shorter than the | 561 | * XXX: if the length the device accepts is shorter than the |
566 | * length of the S/G list entry this will cause and | 562 | * length of the S/G list entry this will cause and |
@@ -573,9 +569,11 @@ static int iblock_do_task(struct se_task *task) | |||
573 | bio_cnt = 0; | 569 | bio_cnt = 0; |
574 | } | 570 | } |
575 | 571 | ||
576 | bio = iblock_get_bio(task, block_lba, sg_num); | 572 | bio = iblock_get_bio(cmd, block_lba, sg_num); |
577 | if (!bio) | 573 | if (!bio) |
578 | goto fail; | 574 | goto fail_put_bios; |
575 | |||
576 | atomic_inc(&ibr->pending); | ||
579 | bio_list_add(&list, bio); | 577 | bio_list_add(&list, bio); |
580 | bio_cnt++; | 578 | bio_cnt++; |
581 | } | 579 | } |
@@ -586,17 +584,16 @@ static int iblock_do_task(struct se_task *task) | |||
586 | } | 584 | } |
587 | 585 | ||
588 | iblock_submit_bios(&list, rw); | 586 | iblock_submit_bios(&list, rw); |
589 | 587 | iblock_complete_cmd(cmd); | |
590 | if (atomic_dec_and_test(&ibr->pending)) { | ||
591 | transport_complete_task(task, | ||
592 | !atomic_read(&ibr->ib_bio_err_cnt)); | ||
593 | } | ||
594 | return 0; | 588 | return 0; |
595 | 589 | ||
596 | fail: | 590 | fail_put_bios: |
597 | while ((bio = bio_list_pop(&list))) | 591 | while ((bio = bio_list_pop(&list))) |
598 | bio_put(bio); | 592 | bio_put(bio); |
593 | fail_free_ibr: | ||
594 | kfree(ibr); | ||
599 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 595 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
596 | fail: | ||
600 | return -ENOMEM; | 597 | return -ENOMEM; |
601 | } | 598 | } |
602 | 599 | ||
@@ -621,8 +618,8 @@ static sector_t iblock_get_blocks(struct se_device *dev) | |||
621 | 618 | ||
622 | static void iblock_bio_done(struct bio *bio, int err) | 619 | static void iblock_bio_done(struct bio *bio, int err) |
623 | { | 620 | { |
624 | struct se_task *task = bio->bi_private; | 621 | struct se_cmd *cmd = bio->bi_private; |
625 | struct iblock_req *ibr = IBLOCK_REQ(task); | 622 | struct iblock_req *ibr = cmd->priv; |
626 | 623 | ||
627 | /* | 624 | /* |
628 | * Set -EIO if !BIO_UPTODATE and the passed is still err=0 | 625 | * Set -EIO if !BIO_UPTODATE and the passed is still err=0 |
@@ -642,14 +639,7 @@ static void iblock_bio_done(struct bio *bio, int err) | |||
642 | 639 | ||
643 | bio_put(bio); | 640 | bio_put(bio); |
644 | 641 | ||
645 | if (!atomic_dec_and_test(&ibr->pending)) | 642 | iblock_complete_cmd(cmd); |
646 | return; | ||
647 | |||
648 | pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", | ||
649 | task, bio, task->task_se_cmd->t_task_lba, | ||
650 | (unsigned long long)bio->bi_sector, err); | ||
651 | |||
652 | transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt)); | ||
653 | } | 643 | } |
654 | 644 | ||
655 | static struct se_subsystem_api iblock_template = { | 645 | static struct se_subsystem_api iblock_template = { |
@@ -663,11 +653,9 @@ static struct se_subsystem_api iblock_template = { | |||
663 | .allocate_virtdevice = iblock_allocate_virtdevice, | 653 | .allocate_virtdevice = iblock_allocate_virtdevice, |
664 | .create_virtdevice = iblock_create_virtdevice, | 654 | .create_virtdevice = iblock_create_virtdevice, |
665 | .free_device = iblock_free_device, | 655 | .free_device = iblock_free_device, |
666 | .alloc_task = iblock_alloc_task, | 656 | .execute_cmd = iblock_execute_cmd, |
667 | .do_task = iblock_do_task, | ||
668 | .do_discard = iblock_do_discard, | 657 | .do_discard = iblock_do_discard, |
669 | .do_sync_cache = iblock_emulate_sync_cache, | 658 | .do_sync_cache = iblock_emulate_sync_cache, |
670 | .free_task = iblock_free_task, | ||
671 | .check_configfs_dev_params = iblock_check_configfs_dev_params, | 659 | .check_configfs_dev_params = iblock_check_configfs_dev_params, |
672 | .set_configfs_dev_params = iblock_set_configfs_dev_params, | 660 | .set_configfs_dev_params = iblock_set_configfs_dev_params, |
673 | .show_configfs_dev_params = iblock_show_configfs_dev_params, | 661 | .show_configfs_dev_params = iblock_show_configfs_dev_params, |
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index e929370b6fd3..66cf7b9e205e 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h | |||
@@ -7,7 +7,6 @@ | |||
7 | #define IBLOCK_LBA_SHIFT 9 | 7 | #define IBLOCK_LBA_SHIFT 9 |
8 | 8 | ||
9 | struct iblock_req { | 9 | struct iblock_req { |
10 | struct se_task ib_task; | ||
11 | atomic_t pending; | 10 | atomic_t pending; |
12 | atomic_t ib_bio_err_cnt; | 11 | atomic_t ib_bio_err_cnt; |
13 | } ____cacheline_aligned; | 12 | } ____cacheline_aligned; |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 46eb017287a6..4ce2cf642fce 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -663,22 +663,12 @@ static void pscsi_free_device(void *p) | |||
663 | kfree(pdv); | 663 | kfree(pdv); |
664 | } | 664 | } |
665 | 665 | ||
666 | static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task) | 666 | static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg) |
667 | { | 667 | { |
668 | return container_of(task, struct pscsi_plugin_task, pscsi_task); | 668 | struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; |
669 | } | ||
670 | |||
671 | |||
672 | /* pscsi_transport_complete(): | ||
673 | * | ||
674 | * | ||
675 | */ | ||
676 | static int pscsi_transport_complete(struct se_task *task) | ||
677 | { | ||
678 | struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; | ||
679 | struct scsi_device *sd = pdv->pdv_sd; | 669 | struct scsi_device *sd = pdv->pdv_sd; |
680 | int result; | 670 | int result; |
681 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | 671 | struct pscsi_plugin_task *pt = cmd->priv; |
682 | unsigned char *cdb = &pt->pscsi_cdb[0]; | 672 | unsigned char *cdb = &pt->pscsi_cdb[0]; |
683 | 673 | ||
684 | result = pt->pscsi_result; | 674 | result = pt->pscsi_result; |
@@ -688,12 +678,11 @@ static int pscsi_transport_complete(struct se_task *task) | |||
688 | */ | 678 | */ |
689 | if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && | 679 | if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && |
690 | (status_byte(result) << 1) == SAM_STAT_GOOD) { | 680 | (status_byte(result) << 1) == SAM_STAT_GOOD) { |
691 | if (!task->task_se_cmd->se_deve) | 681 | if (!cmd->se_deve) |
692 | goto after_mode_sense; | 682 | goto after_mode_sense; |
693 | 683 | ||
694 | if (task->task_se_cmd->se_deve->lun_flags & | 684 | if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) { |
695 | TRANSPORT_LUNFLAGS_READ_ONLY) { | 685 | unsigned char *buf = transport_kmap_data_sg(cmd); |
696 | unsigned char *buf = transport_kmap_data_sg(task->task_se_cmd); | ||
697 | 686 | ||
698 | if (cdb[0] == MODE_SENSE_10) { | 687 | if (cdb[0] == MODE_SENSE_10) { |
699 | if (!(buf[3] & 0x80)) | 688 | if (!(buf[3] & 0x80)) |
@@ -703,7 +692,7 @@ static int pscsi_transport_complete(struct se_task *task) | |||
703 | buf[2] |= 0x80; | 692 | buf[2] |= 0x80; |
704 | } | 693 | } |
705 | 694 | ||
706 | transport_kunmap_data_sg(task->task_se_cmd); | 695 | transport_kunmap_data_sg(cmd); |
707 | } | 696 | } |
708 | } | 697 | } |
709 | after_mode_sense: | 698 | after_mode_sense: |
@@ -722,7 +711,6 @@ after_mode_sense: | |||
722 | if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && | 711 | if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && |
723 | (status_byte(result) << 1) == SAM_STAT_GOOD) { | 712 | (status_byte(result) << 1) == SAM_STAT_GOOD) { |
724 | unsigned char *buf; | 713 | unsigned char *buf; |
725 | struct scatterlist *sg = task->task_sg; | ||
726 | u16 bdl; | 714 | u16 bdl; |
727 | u32 blocksize; | 715 | u32 blocksize; |
728 | 716 | ||
@@ -757,35 +745,6 @@ after_mode_select: | |||
757 | return 0; | 745 | return 0; |
758 | } | 746 | } |
759 | 747 | ||
760 | static struct se_task * | ||
761 | pscsi_alloc_task(unsigned char *cdb) | ||
762 | { | ||
763 | struct pscsi_plugin_task *pt; | ||
764 | |||
765 | /* | ||
766 | * Dynamically alloc cdb space, since it may be larger than | ||
767 | * TCM_MAX_COMMAND_SIZE | ||
768 | */ | ||
769 | pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL); | ||
770 | if (!pt) { | ||
771 | pr_err("Unable to allocate struct pscsi_plugin_task\n"); | ||
772 | return NULL; | ||
773 | } | ||
774 | |||
775 | return &pt->pscsi_task; | ||
776 | } | ||
777 | |||
778 | static void pscsi_free_task(struct se_task *task) | ||
779 | { | ||
780 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | ||
781 | |||
782 | /* | ||
783 | * We do not release the bio(s) here associated with this task, as | ||
784 | * this is handled by bio_put() and pscsi_bi_endio(). | ||
785 | */ | ||
786 | kfree(pt); | ||
787 | } | ||
788 | |||
789 | enum { | 748 | enum { |
790 | Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id, | 749 | Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id, |
791 | Opt_scsi_lun_id, Opt_err | 750 | Opt_scsi_lun_id, Opt_err |
@@ -958,26 +917,25 @@ static inline struct bio *pscsi_get_bio(int sg_num) | |||
958 | return bio; | 917 | return bio; |
959 | } | 918 | } |
960 | 919 | ||
961 | static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, | 920 | static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, |
921 | u32 sgl_nents, enum dma_data_direction data_direction, | ||
962 | struct bio **hbio) | 922 | struct bio **hbio) |
963 | { | 923 | { |
964 | struct se_cmd *cmd = task->task_se_cmd; | 924 | struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; |
965 | struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; | ||
966 | u32 task_sg_num = task->task_sg_nents; | ||
967 | struct bio *bio = NULL, *tbio = NULL; | 925 | struct bio *bio = NULL, *tbio = NULL; |
968 | struct page *page; | 926 | struct page *page; |
969 | struct scatterlist *sg; | 927 | struct scatterlist *sg; |
970 | u32 data_len = cmd->data_length, i, len, bytes, off; | 928 | u32 data_len = cmd->data_length, i, len, bytes, off; |
971 | int nr_pages = (cmd->data_length + task_sg[0].offset + | 929 | int nr_pages = (cmd->data_length + sgl[0].offset + |
972 | PAGE_SIZE - 1) >> PAGE_SHIFT; | 930 | PAGE_SIZE - 1) >> PAGE_SHIFT; |
973 | int nr_vecs = 0, rc; | 931 | int nr_vecs = 0, rc; |
974 | int rw = (task->task_data_direction == DMA_TO_DEVICE); | 932 | int rw = (data_direction == DMA_TO_DEVICE); |
975 | 933 | ||
976 | *hbio = NULL; | 934 | *hbio = NULL; |
977 | 935 | ||
978 | pr_debug("PSCSI: nr_pages: %d\n", nr_pages); | 936 | pr_debug("PSCSI: nr_pages: %d\n", nr_pages); |
979 | 937 | ||
980 | for_each_sg(task_sg, sg, task_sg_num, i) { | 938 | for_each_sg(sgl, sg, sgl_nents, i) { |
981 | page = sg_page(sg); | 939 | page = sg_page(sg); |
982 | off = sg->offset; | 940 | off = sg->offset; |
983 | len = sg->length; | 941 | len = sg->length; |
@@ -1009,7 +967,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, | |||
1009 | * Set *hbio pointer to handle the case: | 967 | * Set *hbio pointer to handle the case: |
1010 | * nr_pages > BIO_MAX_PAGES, where additional | 968 | * nr_pages > BIO_MAX_PAGES, where additional |
1011 | * bios need to be added to complete a given | 969 | * bios need to be added to complete a given |
1012 | * struct se_task | 970 | * command. |
1013 | */ | 971 | */ |
1014 | if (!*hbio) | 972 | if (!*hbio) |
1015 | *hbio = tbio = bio; | 973 | *hbio = tbio = bio; |
@@ -1049,7 +1007,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, | |||
1049 | } | 1007 | } |
1050 | } | 1008 | } |
1051 | 1009 | ||
1052 | return task->task_sg_nents; | 1010 | return sgl_nents; |
1053 | fail: | 1011 | fail: |
1054 | while (*hbio) { | 1012 | while (*hbio) { |
1055 | bio = *hbio; | 1013 | bio = *hbio; |
@@ -1061,53 +1019,61 @@ fail: | |||
1061 | return -ENOMEM; | 1019 | return -ENOMEM; |
1062 | } | 1020 | } |
1063 | 1021 | ||
1064 | static int pscsi_do_task(struct se_task *task) | 1022 | static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, |
1023 | u32 sgl_nents, enum dma_data_direction data_direction) | ||
1065 | { | 1024 | { |
1066 | struct se_cmd *cmd = task->task_se_cmd; | 1025 | struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; |
1067 | struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; | 1026 | struct pscsi_plugin_task *pt; |
1068 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | ||
1069 | struct request *req; | 1027 | struct request *req; |
1070 | struct bio *hbio; | 1028 | struct bio *hbio; |
1071 | int ret; | 1029 | int ret; |
1072 | 1030 | ||
1031 | /* | ||
1032 | * Dynamically alloc cdb space, since it may be larger than | ||
1033 | * TCM_MAX_COMMAND_SIZE | ||
1034 | */ | ||
1035 | pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL); | ||
1036 | if (!pt) { | ||
1037 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
1038 | return -ENOMEM; | ||
1039 | } | ||
1040 | cmd->priv = pt; | ||
1041 | |||
1073 | memcpy(pt->pscsi_cdb, cmd->t_task_cdb, | 1042 | memcpy(pt->pscsi_cdb, cmd->t_task_cdb, |
1074 | scsi_command_size(cmd->t_task_cdb)); | 1043 | scsi_command_size(cmd->t_task_cdb)); |
1075 | 1044 | ||
1076 | if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { | 1045 | if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { |
1077 | req = blk_get_request(pdv->pdv_sd->request_queue, | 1046 | req = blk_get_request(pdv->pdv_sd->request_queue, |
1078 | (task->task_data_direction == DMA_TO_DEVICE), | 1047 | (data_direction == DMA_TO_DEVICE), |
1079 | GFP_KERNEL); | 1048 | GFP_KERNEL); |
1080 | if (!req || IS_ERR(req)) { | 1049 | if (!req || IS_ERR(req)) { |
1081 | pr_err("PSCSI: blk_get_request() failed: %ld\n", | 1050 | pr_err("PSCSI: blk_get_request() failed: %ld\n", |
1082 | req ? IS_ERR(req) : -ENOMEM); | 1051 | req ? IS_ERR(req) : -ENOMEM); |
1083 | cmd->scsi_sense_reason = | 1052 | cmd->scsi_sense_reason = |
1084 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 1053 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
1085 | return -ENODEV; | 1054 | goto fail; |
1086 | } | 1055 | } |
1087 | } else { | 1056 | } else { |
1088 | BUG_ON(!cmd->data_length); | 1057 | BUG_ON(!cmd->data_length); |
1089 | 1058 | ||
1090 | /* | 1059 | ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio); |
1091 | * Setup the main struct request for the task->task_sg[] payload | ||
1092 | */ | ||
1093 | ret = pscsi_map_sg(task, task->task_sg, &hbio); | ||
1094 | if (ret < 0) { | 1060 | if (ret < 0) { |
1095 | cmd->scsi_sense_reason = | 1061 | cmd->scsi_sense_reason = |
1096 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 1062 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
1097 | return ret; | 1063 | goto fail; |
1098 | } | 1064 | } |
1099 | 1065 | ||
1100 | req = blk_make_request(pdv->pdv_sd->request_queue, hbio, | 1066 | req = blk_make_request(pdv->pdv_sd->request_queue, hbio, |
1101 | GFP_KERNEL); | 1067 | GFP_KERNEL); |
1102 | if (IS_ERR(req)) { | 1068 | if (IS_ERR(req)) { |
1103 | pr_err("pSCSI: blk_make_request() failed\n"); | 1069 | pr_err("pSCSI: blk_make_request() failed\n"); |
1104 | goto fail; | 1070 | goto fail_free_bio; |
1105 | } | 1071 | } |
1106 | } | 1072 | } |
1107 | 1073 | ||
1108 | req->cmd_type = REQ_TYPE_BLOCK_PC; | 1074 | req->cmd_type = REQ_TYPE_BLOCK_PC; |
1109 | req->end_io = pscsi_req_done; | 1075 | req->end_io = pscsi_req_done; |
1110 | req->end_io_data = task; | 1076 | req->end_io_data = cmd; |
1111 | req->cmd_len = scsi_command_size(pt->pscsi_cdb); | 1077 | req->cmd_len = scsi_command_size(pt->pscsi_cdb); |
1112 | req->cmd = &pt->pscsi_cdb[0]; | 1078 | req->cmd = &pt->pscsi_cdb[0]; |
1113 | req->sense = &pt->pscsi_sense[0]; | 1079 | req->sense = &pt->pscsi_sense[0]; |
@@ -1119,12 +1085,12 @@ static int pscsi_do_task(struct se_task *task) | |||
1119 | req->retries = PS_RETRY; | 1085 | req->retries = PS_RETRY; |
1120 | 1086 | ||
1121 | blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req, | 1087 | blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req, |
1122 | (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), | 1088 | (cmd->sam_task_attr == MSG_HEAD_TAG), |
1123 | pscsi_req_done); | 1089 | pscsi_req_done); |
1124 | 1090 | ||
1125 | return 0; | 1091 | return 0; |
1126 | 1092 | ||
1127 | fail: | 1093 | fail_free_bio: |
1128 | while (hbio) { | 1094 | while (hbio) { |
1129 | struct bio *bio = hbio; | 1095 | struct bio *bio = hbio; |
1130 | hbio = hbio->bi_next; | 1096 | hbio = hbio->bi_next; |
@@ -1132,16 +1098,14 @@ fail: | |||
1132 | bio_endio(bio, 0); /* XXX: should be error */ | 1098 | bio_endio(bio, 0); /* XXX: should be error */ |
1133 | } | 1099 | } |
1134 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 1100 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
1101 | fail: | ||
1102 | kfree(pt); | ||
1135 | return -ENOMEM; | 1103 | return -ENOMEM; |
1136 | } | 1104 | } |
1137 | 1105 | ||
1138 | /* pscsi_get_sense_buffer(): | 1106 | static unsigned char *pscsi_get_sense_buffer(struct se_cmd *cmd) |
1139 | * | ||
1140 | * | ||
1141 | */ | ||
1142 | static unsigned char *pscsi_get_sense_buffer(struct se_task *task) | ||
1143 | { | 1107 | { |
1144 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | 1108 | struct pscsi_plugin_task *pt = cmd->priv; |
1145 | 1109 | ||
1146 | return pt->pscsi_sense; | 1110 | return pt->pscsi_sense; |
1147 | } | 1111 | } |
@@ -1181,48 +1145,36 @@ static sector_t pscsi_get_blocks(struct se_device *dev) | |||
1181 | return 0; | 1145 | return 0; |
1182 | } | 1146 | } |
1183 | 1147 | ||
1184 | /* pscsi_handle_SAM_STATUS_failures(): | 1148 | static void pscsi_req_done(struct request *req, int uptodate) |
1185 | * | ||
1186 | * | ||
1187 | */ | ||
1188 | static inline void pscsi_process_SAM_status( | ||
1189 | struct se_task *task, | ||
1190 | struct pscsi_plugin_task *pt) | ||
1191 | { | 1149 | { |
1192 | task->task_scsi_status = status_byte(pt->pscsi_result); | 1150 | struct se_cmd *cmd = req->end_io_data; |
1193 | if (task->task_scsi_status) { | 1151 | struct pscsi_plugin_task *pt = cmd->priv; |
1194 | task->task_scsi_status <<= 1; | 1152 | |
1195 | pr_debug("PSCSI Status Byte exception at task: %p CDB:" | 1153 | pt->pscsi_result = req->errors; |
1196 | " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], | 1154 | pt->pscsi_resid = req->resid_len; |
1155 | |||
1156 | cmd->scsi_status = status_byte(pt->pscsi_result) << 1; | ||
1157 | if (cmd->scsi_status) { | ||
1158 | pr_debug("PSCSI Status Byte exception at cmd: %p CDB:" | ||
1159 | " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], | ||
1197 | pt->pscsi_result); | 1160 | pt->pscsi_result); |
1198 | } | 1161 | } |
1199 | 1162 | ||
1200 | switch (host_byte(pt->pscsi_result)) { | 1163 | switch (host_byte(pt->pscsi_result)) { |
1201 | case DID_OK: | 1164 | case DID_OK: |
1202 | transport_complete_task(task, (!task->task_scsi_status)); | 1165 | target_complete_cmd(cmd, cmd->scsi_status); |
1203 | break; | 1166 | break; |
1204 | default: | 1167 | default: |
1205 | pr_debug("PSCSI Host Byte exception at task: %p CDB:" | 1168 | pr_debug("PSCSI Host Byte exception at cmd: %p CDB:" |
1206 | " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], | 1169 | " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], |
1207 | pt->pscsi_result); | 1170 | pt->pscsi_result); |
1208 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; | 1171 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; |
1209 | task->task_se_cmd->scsi_sense_reason = | 1172 | target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); |
1210 | TCM_UNSUPPORTED_SCSI_OPCODE; | ||
1211 | transport_complete_task(task, 0); | ||
1212 | break; | 1173 | break; |
1213 | } | 1174 | } |
1214 | } | ||
1215 | 1175 | ||
1216 | static void pscsi_req_done(struct request *req, int uptodate) | ||
1217 | { | ||
1218 | struct se_task *task = req->end_io_data; | ||
1219 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | ||
1220 | |||
1221 | pt->pscsi_result = req->errors; | ||
1222 | pt->pscsi_resid = req->resid_len; | ||
1223 | |||
1224 | pscsi_process_SAM_status(task, pt); | ||
1225 | __blk_put_request(req->q, req); | 1176 | __blk_put_request(req->q, req); |
1177 | kfree(pt); | ||
1226 | } | 1178 | } |
1227 | 1179 | ||
1228 | static struct se_subsystem_api pscsi_template = { | 1180 | static struct se_subsystem_api pscsi_template = { |
@@ -1236,9 +1188,7 @@ static struct se_subsystem_api pscsi_template = { | |||
1236 | .create_virtdevice = pscsi_create_virtdevice, | 1188 | .create_virtdevice = pscsi_create_virtdevice, |
1237 | .free_device = pscsi_free_device, | 1189 | .free_device = pscsi_free_device, |
1238 | .transport_complete = pscsi_transport_complete, | 1190 | .transport_complete = pscsi_transport_complete, |
1239 | .alloc_task = pscsi_alloc_task, | 1191 | .execute_cmd = pscsi_execute_cmd, |
1240 | .do_task = pscsi_do_task, | ||
1241 | .free_task = pscsi_free_task, | ||
1242 | .check_configfs_dev_params = pscsi_check_configfs_dev_params, | 1192 | .check_configfs_dev_params = pscsi_check_configfs_dev_params, |
1243 | .set_configfs_dev_params = pscsi_set_configfs_dev_params, | 1193 | .set_configfs_dev_params = pscsi_set_configfs_dev_params, |
1244 | .show_configfs_dev_params = pscsi_show_configfs_dev_params, | 1194 | .show_configfs_dev_params = pscsi_show_configfs_dev_params, |
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index 43f1c419e8e5..bc1e5e11eca0 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/kobject.h> | 22 | #include <linux/kobject.h> |
23 | 23 | ||
24 | struct pscsi_plugin_task { | 24 | struct pscsi_plugin_task { |
25 | struct se_task pscsi_task; | ||
26 | unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE]; | 25 | unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE]; |
27 | int pscsi_direction; | 26 | int pscsi_direction; |
28 | int pscsi_result; | 27 | int pscsi_result; |
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index c6ce46891be9..d0ceb873c0e5 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c | |||
@@ -266,12 +266,6 @@ static void rd_free_device(void *p) | |||
266 | kfree(rd_dev); | 266 | kfree(rd_dev); |
267 | } | 267 | } |
268 | 268 | ||
269 | static struct se_task * | ||
270 | rd_alloc_task(unsigned char *cdb) | ||
271 | { | ||
272 | return kzalloc(sizeof(struct se_task), GFP_KERNEL); | ||
273 | } | ||
274 | |||
275 | static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) | 269 | static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) |
276 | { | 270 | { |
277 | u32 i; | 271 | u32 i; |
@@ -290,9 +284,10 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) | |||
290 | return NULL; | 284 | return NULL; |
291 | } | 285 | } |
292 | 286 | ||
293 | static int rd_do_task(struct se_task *task) | 287 | static int rd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, |
288 | u32 sgl_nents, enum dma_data_direction data_direction) | ||
294 | { | 289 | { |
295 | struct se_device *se_dev = task->task_se_cmd->se_dev; | 290 | struct se_device *se_dev = cmd->se_dev; |
296 | struct rd_dev *dev = se_dev->dev_ptr; | 291 | struct rd_dev *dev = se_dev->dev_ptr; |
297 | struct rd_dev_sg_table *table; | 292 | struct rd_dev_sg_table *table; |
298 | struct scatterlist *rd_sg; | 293 | struct scatterlist *rd_sg; |
@@ -303,11 +298,10 @@ static int rd_do_task(struct se_task *task) | |||
303 | u32 src_len; | 298 | u32 src_len; |
304 | u64 tmp; | 299 | u64 tmp; |
305 | 300 | ||
306 | tmp = task->task_se_cmd->t_task_lba * | 301 | tmp = cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size; |
307 | se_dev->se_sub_dev->se_dev_attrib.block_size; | ||
308 | rd_offset = do_div(tmp, PAGE_SIZE); | 302 | rd_offset = do_div(tmp, PAGE_SIZE); |
309 | rd_page = tmp; | 303 | rd_page = tmp; |
310 | rd_size = task->task_se_cmd->data_length; | 304 | rd_size = cmd->data_length; |
311 | 305 | ||
312 | table = rd_get_sg_table(dev, rd_page); | 306 | table = rd_get_sg_table(dev, rd_page); |
313 | if (!table) | 307 | if (!table) |
@@ -317,14 +311,12 @@ static int rd_do_task(struct se_task *task) | |||
317 | 311 | ||
318 | pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", | 312 | pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", |
319 | dev->rd_dev_id, | 313 | dev->rd_dev_id, |
320 | task->task_data_direction == DMA_FROM_DEVICE ? | 314 | data_direction == DMA_FROM_DEVICE ? "Read" : "Write", |
321 | "Read" : "Write", | 315 | cmd->t_task_lba, rd_size, rd_page, rd_offset); |
322 | task->task_se_cmd->t_task_lba, | ||
323 | rd_size, rd_page, rd_offset); | ||
324 | 316 | ||
325 | src_len = PAGE_SIZE - rd_offset; | 317 | src_len = PAGE_SIZE - rd_offset; |
326 | sg_miter_start(&m, task->task_sg, task->task_sg_nents, | 318 | sg_miter_start(&m, sgl, sgl_nents, |
327 | task->task_data_direction == DMA_FROM_DEVICE ? | 319 | data_direction == DMA_FROM_DEVICE ? |
328 | SG_MITER_TO_SG : SG_MITER_FROM_SG); | 320 | SG_MITER_TO_SG : SG_MITER_FROM_SG); |
329 | while (rd_size) { | 321 | while (rd_size) { |
330 | u32 len; | 322 | u32 len; |
@@ -336,7 +328,7 @@ static int rd_do_task(struct se_task *task) | |||
336 | 328 | ||
337 | rd_addr = sg_virt(rd_sg) + rd_offset; | 329 | rd_addr = sg_virt(rd_sg) + rd_offset; |
338 | 330 | ||
339 | if (task->task_data_direction == DMA_FROM_DEVICE) | 331 | if (data_direction == DMA_FROM_DEVICE) |
340 | memcpy(m.addr, rd_addr, len); | 332 | memcpy(m.addr, rd_addr, len); |
341 | else | 333 | else |
342 | memcpy(rd_addr, m.addr, len); | 334 | memcpy(rd_addr, m.addr, len); |
@@ -371,16 +363,10 @@ static int rd_do_task(struct se_task *task) | |||
371 | } | 363 | } |
372 | sg_miter_stop(&m); | 364 | sg_miter_stop(&m); |
373 | 365 | ||
374 | task->task_scsi_status = GOOD; | 366 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
375 | transport_complete_task(task, 1); | ||
376 | return 0; | 367 | return 0; |
377 | } | 368 | } |
378 | 369 | ||
379 | static void rd_free_task(struct se_task *task) | ||
380 | { | ||
381 | kfree(task); | ||
382 | } | ||
383 | |||
384 | enum { | 370 | enum { |
385 | Opt_rd_pages, Opt_err | 371 | Opt_rd_pages, Opt_err |
386 | }; | 372 | }; |
@@ -482,9 +468,7 @@ static struct se_subsystem_api rd_mcp_template = { | |||
482 | .allocate_virtdevice = rd_allocate_virtdevice, | 468 | .allocate_virtdevice = rd_allocate_virtdevice, |
483 | .create_virtdevice = rd_create_virtdevice, | 469 | .create_virtdevice = rd_create_virtdevice, |
484 | .free_device = rd_free_device, | 470 | .free_device = rd_free_device, |
485 | .alloc_task = rd_alloc_task, | 471 | .execute_cmd = rd_execute_cmd, |
486 | .do_task = rd_do_task, | ||
487 | .free_task = rd_free_task, | ||
488 | .check_configfs_dev_params = rd_check_configfs_dev_params, | 472 | .check_configfs_dev_params = rd_check_configfs_dev_params, |
489 | .set_configfs_dev_params = rd_set_configfs_dev_params, | 473 | .set_configfs_dev_params = rd_set_configfs_dev_params, |
490 | .show_configfs_dev_params = rd_show_configfs_dev_params, | 474 | .show_configfs_dev_params = rd_show_configfs_dev_params, |
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index e0ffbdc4a486..6e2378a88bda 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
@@ -374,13 +374,11 @@ static void core_tmr_drain_cmd_list( | |||
374 | struct se_queue_obj *qobj = &dev->dev_queue_obj; | 374 | struct se_queue_obj *qobj = &dev->dev_queue_obj; |
375 | struct se_cmd *cmd, *tcmd; | 375 | struct se_cmd *cmd, *tcmd; |
376 | unsigned long flags; | 376 | unsigned long flags; |
377 | |||
377 | /* | 378 | /* |
378 | * Release all commands remaining in the struct se_device cmd queue. | 379 | * Release all commands remaining in the per-device command queue. |
379 | * | 380 | * |
380 | * This follows the same logic as above for the struct se_device | 381 | * This follows the same logic as above for the state list. |
381 | * struct se_task state list, where commands are returned with | ||
382 | * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD | ||
383 | * reference, otherwise the struct se_cmd is released. | ||
384 | */ | 382 | */ |
385 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 383 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
386 | list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) { | 384 | list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) { |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 4ee33954697e..47669c6d9175 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -72,7 +72,6 @@ static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *); | |||
72 | static void transport_complete_task_attr(struct se_cmd *cmd); | 72 | static void transport_complete_task_attr(struct se_cmd *cmd); |
73 | static void transport_handle_queue_full(struct se_cmd *cmd, | 73 | static void transport_handle_queue_full(struct se_cmd *cmd, |
74 | struct se_device *dev); | 74 | struct se_device *dev); |
75 | static void transport_free_dev_tasks(struct se_cmd *cmd); | ||
76 | static int transport_generic_get_mem(struct se_cmd *cmd); | 75 | static int transport_generic_get_mem(struct se_cmd *cmd); |
77 | static void transport_put_cmd(struct se_cmd *cmd); | 76 | static void transport_put_cmd(struct se_cmd *cmd); |
78 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd); | 77 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd); |
@@ -662,28 +661,6 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd) | |||
662 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 661 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
663 | } | 662 | } |
664 | 663 | ||
665 | /* | ||
666 | * Completion function used by TCM subsystem plugins (such as FILEIO) | ||
667 | * for queueing up response from struct se_subsystem_api->do_task() | ||
668 | */ | ||
669 | void transport_complete_sync_cache(struct se_cmd *cmd, int good) | ||
670 | { | ||
671 | struct se_task *task = cmd->t_task; | ||
672 | |||
673 | if (good) { | ||
674 | cmd->scsi_status = SAM_STAT_GOOD; | ||
675 | task->task_scsi_status = GOOD; | ||
676 | } else { | ||
677 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; | ||
678 | task->task_se_cmd->scsi_sense_reason = | ||
679 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
680 | |||
681 | } | ||
682 | |||
683 | transport_complete_task(task, good); | ||
684 | } | ||
685 | EXPORT_SYMBOL(transport_complete_sync_cache); | ||
686 | |||
687 | static void target_complete_failure_work(struct work_struct *work) | 664 | static void target_complete_failure_work(struct work_struct *work) |
688 | { | 665 | { |
689 | struct se_cmd *cmd = container_of(work, struct se_cmd, work); | 666 | struct se_cmd *cmd = container_of(work, struct se_cmd, work); |
@@ -691,35 +668,28 @@ static void target_complete_failure_work(struct work_struct *work) | |||
691 | transport_generic_request_failure(cmd); | 668 | transport_generic_request_failure(cmd); |
692 | } | 669 | } |
693 | 670 | ||
694 | /* transport_complete_task(): | 671 | void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) |
695 | * | ||
696 | * Called from interrupt and non interrupt context depending | ||
697 | * on the transport plugin. | ||
698 | */ | ||
699 | void transport_complete_task(struct se_task *task, int success) | ||
700 | { | 672 | { |
701 | struct se_cmd *cmd = task->task_se_cmd; | ||
702 | struct se_device *dev = cmd->se_dev; | 673 | struct se_device *dev = cmd->se_dev; |
674 | int success = scsi_status == GOOD; | ||
703 | unsigned long flags; | 675 | unsigned long flags; |
704 | 676 | ||
677 | cmd->scsi_status = scsi_status; | ||
678 | |||
679 | |||
705 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 680 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
706 | cmd->transport_state &= ~CMD_T_BUSY; | 681 | cmd->transport_state &= ~CMD_T_BUSY; |
707 | 682 | ||
708 | /* | ||
709 | * See if any sense data exists, if so set the TASK_SENSE flag. | ||
710 | * Also check for any other post completion work that needs to be | ||
711 | * done by the plugins. | ||
712 | */ | ||
713 | if (dev && dev->transport->transport_complete) { | 683 | if (dev && dev->transport->transport_complete) { |
714 | if (dev->transport->transport_complete(task) != 0) { | 684 | if (dev->transport->transport_complete(cmd, |
685 | cmd->t_data_sg) != 0) { | ||
715 | cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; | 686 | cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; |
716 | success = 1; | 687 | success = 1; |
717 | } | 688 | } |
718 | } | 689 | } |
719 | 690 | ||
720 | /* | 691 | /* |
721 | * See if we are waiting for outstanding struct se_task | 692 | * See if we are waiting to complete for an exception condition. |
722 | * to complete for an exception condition | ||
723 | */ | 693 | */ |
724 | if (cmd->transport_state & CMD_T_REQUEST_STOP) { | 694 | if (cmd->transport_state & CMD_T_REQUEST_STOP) { |
725 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 695 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
@@ -730,15 +700,11 @@ void transport_complete_task(struct se_task *task, int success) | |||
730 | if (!success) | 700 | if (!success) |
731 | cmd->transport_state |= CMD_T_FAILED; | 701 | cmd->transport_state |= CMD_T_FAILED; |
732 | 702 | ||
733 | /* | ||
734 | * Decrement the outstanding t_task_cdbs_left count. The last | ||
735 | * struct se_task from struct se_cmd will complete itself into the | ||
736 | * device queue depending upon int success. | ||
737 | */ | ||
738 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { | 703 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { |
739 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 704 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
740 | return; | 705 | return; |
741 | } | 706 | } |
707 | |||
742 | /* | 708 | /* |
743 | * Check for case where an explict ABORT_TASK has been received | 709 | * Check for case where an explict ABORT_TASK has been received |
744 | * and transport_wait_for_tasks() will be waiting for completion.. | 710 | * and transport_wait_for_tasks() will be waiting for completion.. |
@@ -761,15 +727,6 @@ void transport_complete_task(struct se_task *task, int success) | |||
761 | 727 | ||
762 | queue_work(target_completion_wq, &cmd->work); | 728 | queue_work(target_completion_wq, &cmd->work); |
763 | } | 729 | } |
764 | EXPORT_SYMBOL(transport_complete_task); | ||
765 | |||
766 | void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) | ||
767 | { | ||
768 | struct se_task *task = cmd->t_task; | ||
769 | |||
770 | task->task_scsi_status = scsi_status; | ||
771 | transport_complete_task(task, scsi_status == GOOD); | ||
772 | } | ||
773 | EXPORT_SYMBOL(target_complete_cmd); | 730 | EXPORT_SYMBOL(target_complete_cmd); |
774 | 731 | ||
775 | static void target_add_to_state_list(struct se_cmd *cmd) | 732 | static void target_add_to_state_list(struct se_cmd *cmd) |
@@ -2076,8 +2033,10 @@ check_depth: | |||
2076 | 2033 | ||
2077 | if (cmd->execute_cmd) | 2034 | if (cmd->execute_cmd) |
2078 | error = cmd->execute_cmd(cmd); | 2035 | error = cmd->execute_cmd(cmd); |
2079 | else | 2036 | else { |
2080 | error = dev->transport->do_task(cmd->t_task); | 2037 | error = dev->transport->execute_cmd(cmd, cmd->t_data_sg, |
2038 | cmd->t_data_nents, cmd->data_direction); | ||
2039 | } | ||
2081 | 2040 | ||
2082 | if (error != 0) { | 2041 | if (error != 0) { |
2083 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2042 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
@@ -2312,7 +2271,6 @@ static int transport_get_sense_data(struct se_cmd *cmd) | |||
2312 | { | 2271 | { |
2313 | unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; | 2272 | unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; |
2314 | struct se_device *dev = cmd->se_dev; | 2273 | struct se_device *dev = cmd->se_dev; |
2315 | struct se_task *task = NULL; | ||
2316 | unsigned long flags; | 2274 | unsigned long flags; |
2317 | u32 offset = 0; | 2275 | u32 offset = 0; |
2318 | 2276 | ||
@@ -2327,9 +2285,6 @@ static int transport_get_sense_data(struct se_cmd *cmd) | |||
2327 | return 0; | 2285 | return 0; |
2328 | } | 2286 | } |
2329 | 2287 | ||
2330 | if (!cmd->t_task) | ||
2331 | goto out; | ||
2332 | |||
2333 | if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)) | 2288 | if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)) |
2334 | goto out; | 2289 | goto out; |
2335 | 2290 | ||
@@ -2338,19 +2293,19 @@ static int transport_get_sense_data(struct se_cmd *cmd) | |||
2338 | goto out; | 2293 | goto out; |
2339 | } | 2294 | } |
2340 | 2295 | ||
2341 | sense_buffer = dev->transport->get_sense_buffer(task); | 2296 | sense_buffer = dev->transport->get_sense_buffer(cmd); |
2342 | if (!sense_buffer) { | 2297 | if (!sense_buffer) { |
2343 | pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate" | 2298 | pr_err("ITT 0x%08x cmd %p: Unable to locate" |
2344 | " sense buffer for task with sense\n", | 2299 | " sense buffer for task with sense\n", |
2345 | cmd->se_tfo->get_task_tag(cmd), task); | 2300 | cmd->se_tfo->get_task_tag(cmd), cmd); |
2346 | goto out; | 2301 | goto out; |
2347 | } | 2302 | } |
2303 | |||
2348 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2304 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2349 | 2305 | ||
2350 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); | 2306 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); |
2351 | 2307 | ||
2352 | memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER); | 2308 | memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER); |
2353 | cmd->scsi_status = task->task_scsi_status; | ||
2354 | 2309 | ||
2355 | /* Automatically padded */ | 2310 | /* Automatically padded */ |
2356 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; | 2311 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; |
@@ -3199,10 +3154,6 @@ static void target_complete_ok_work(struct work_struct *work) | |||
3199 | if (transport_get_sense_data(cmd) < 0) | 3154 | if (transport_get_sense_data(cmd) < 0) |
3200 | reason = TCM_NON_EXISTENT_LUN; | 3155 | reason = TCM_NON_EXISTENT_LUN; |
3201 | 3156 | ||
3202 | /* | ||
3203 | * Only set when an struct se_task->task_scsi_status returned | ||
3204 | * a non GOOD status. | ||
3205 | */ | ||
3206 | if (cmd->scsi_status) { | 3157 | if (cmd->scsi_status) { |
3207 | ret = transport_send_check_condition_and_sense( | 3158 | ret = transport_send_check_condition_and_sense( |
3208 | cmd, reason, 1); | 3159 | cmd, reason, 1); |
@@ -3277,15 +3228,6 @@ queue_full: | |||
3277 | transport_handle_queue_full(cmd, cmd->se_dev); | 3228 | transport_handle_queue_full(cmd, cmd->se_dev); |
3278 | } | 3229 | } |
3279 | 3230 | ||
3280 | static void transport_free_dev_tasks(struct se_cmd *cmd) | ||
3281 | { | ||
3282 | struct se_task *task; | ||
3283 | |||
3284 | task = cmd->t_task; | ||
3285 | if (task && !(cmd->transport_state & CMD_T_BUSY)) | ||
3286 | cmd->se_dev->transport->free_task(task); | ||
3287 | } | ||
3288 | |||
3289 | static inline void transport_free_sgl(struct scatterlist *sgl, int nents) | 3231 | static inline void transport_free_sgl(struct scatterlist *sgl, int nents) |
3290 | { | 3232 | { |
3291 | struct scatterlist *sg; | 3233 | struct scatterlist *sg; |
@@ -3346,7 +3288,6 @@ static void transport_release_cmd(struct se_cmd *cmd) | |||
3346 | static void transport_put_cmd(struct se_cmd *cmd) | 3288 | static void transport_put_cmd(struct se_cmd *cmd) |
3347 | { | 3289 | { |
3348 | unsigned long flags; | 3290 | unsigned long flags; |
3349 | int free_tasks = 0; | ||
3350 | 3291 | ||
3351 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 3292 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
3352 | if (atomic_read(&cmd->t_fe_count)) { | 3293 | if (atomic_read(&cmd->t_fe_count)) { |
@@ -3362,13 +3303,9 @@ static void transport_put_cmd(struct se_cmd *cmd) | |||
3362 | if (cmd->transport_state & CMD_T_DEV_ACTIVE) { | 3303 | if (cmd->transport_state & CMD_T_DEV_ACTIVE) { |
3363 | cmd->transport_state &= ~CMD_T_DEV_ACTIVE; | 3304 | cmd->transport_state &= ~CMD_T_DEV_ACTIVE; |
3364 | target_remove_from_state_list(cmd); | 3305 | target_remove_from_state_list(cmd); |
3365 | free_tasks = 1; | ||
3366 | } | 3306 | } |
3367 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 3307 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
3368 | 3308 | ||
3369 | if (free_tasks != 0) | ||
3370 | transport_free_dev_tasks(cmd); | ||
3371 | |||
3372 | transport_free_pages(cmd); | 3309 | transport_free_pages(cmd); |
3373 | transport_release_cmd(cmd); | 3310 | transport_release_cmd(cmd); |
3374 | return; | 3311 | return; |
@@ -3526,7 +3463,6 @@ out: | |||
3526 | int transport_generic_new_cmd(struct se_cmd *cmd) | 3463 | int transport_generic_new_cmd(struct se_cmd *cmd) |
3527 | { | 3464 | { |
3528 | struct se_device *dev = cmd->se_dev; | 3465 | struct se_device *dev = cmd->se_dev; |
3529 | struct se_task *task; | ||
3530 | int ret = 0; | 3466 | int ret = 0; |
3531 | 3467 | ||
3532 | /* | 3468 | /* |
@@ -3572,19 +3508,6 @@ int transport_generic_new_cmd(struct se_cmd *cmd) | |||
3572 | attr->max_sectors); | 3508 | attr->max_sectors); |
3573 | } | 3509 | } |
3574 | 3510 | ||
3575 | task = dev->transport->alloc_task(cmd->t_task_cdb); | ||
3576 | if (!task) { | ||
3577 | pr_err("Unable to allocate struct se_task\n"); | ||
3578 | goto out_fail; | ||
3579 | } | ||
3580 | |||
3581 | task->task_se_cmd = cmd; | ||
3582 | task->task_data_direction = cmd->data_direction; | ||
3583 | task->task_sg = cmd->t_data_sg; | ||
3584 | task->task_sg_nents = cmd->t_data_nents; | ||
3585 | |||
3586 | cmd->t_task = task; | ||
3587 | |||
3588 | atomic_inc(&cmd->t_fe_count); | 3511 | atomic_inc(&cmd->t_fe_count); |
3589 | atomic_inc(&cmd->t_se_count); | 3512 | atomic_inc(&cmd->t_se_count); |
3590 | 3513 | ||
@@ -3592,19 +3515,17 @@ int transport_generic_new_cmd(struct se_cmd *cmd) | |||
3592 | atomic_set(&cmd->t_task_cdbs_ex_left, 1); | 3515 | atomic_set(&cmd->t_task_cdbs_ex_left, 1); |
3593 | 3516 | ||
3594 | /* | 3517 | /* |
3595 | * For WRITEs, let the fabric know its buffer is ready.. | 3518 | * For WRITEs, let the fabric know its buffer is ready. |
3596 | * This WRITE struct se_cmd (and all of its associated struct se_task's) | 3519 | * |
3597 | * will be added to the struct se_device execution queue after its WRITE | 3520 | * The command will be added to the execution queue after its write |
3598 | * data has arrived. (ie: It gets handled by the transport processing | 3521 | * data has arrived. |
3599 | * thread a second time) | ||
3600 | */ | 3522 | */ |
3601 | if (cmd->data_direction == DMA_TO_DEVICE) { | 3523 | if (cmd->data_direction == DMA_TO_DEVICE) { |
3602 | target_add_to_state_list(cmd); | 3524 | target_add_to_state_list(cmd); |
3603 | return transport_generic_write_pending(cmd); | 3525 | return transport_generic_write_pending(cmd); |
3604 | } | 3526 | } |
3605 | /* | 3527 | /* |
3606 | * Everything else but a WRITE, add the struct se_cmd's struct se_task's | 3528 | * Everything else but a WRITE, add the command to the execution queue. |
3607 | * to the execution queue. | ||
3608 | */ | 3529 | */ |
3609 | transport_execute_tasks(cmd); | 3530 | transport_execute_tasks(cmd); |
3610 | return 0; | 3531 | return 0; |
@@ -3691,8 +3612,6 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) | |||
3691 | if (cmd->se_lun) | 3612 | if (cmd->se_lun) |
3692 | transport_lun_remove_cmd(cmd); | 3613 | transport_lun_remove_cmd(cmd); |
3693 | 3614 | ||
3694 | transport_free_dev_tasks(cmd); | ||
3695 | |||
3696 | transport_put_cmd(cmd); | 3615 | transport_put_cmd(cmd); |
3697 | } | 3616 | } |
3698 | } | 3617 | } |
@@ -3832,7 +3751,6 @@ EXPORT_SYMBOL(target_wait_for_sess_cmds); | |||
3832 | */ | 3751 | */ |
3833 | static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | 3752 | static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) |
3834 | { | 3753 | { |
3835 | struct se_task *task = cmd->t_task; | ||
3836 | unsigned long flags; | 3754 | unsigned long flags; |
3837 | int ret = 0; | 3755 | int ret = 0; |
3838 | 3756 | ||
@@ -3944,7 +3862,6 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) | |||
3944 | target_remove_from_state_list(cmd); | 3862 | target_remove_from_state_list(cmd); |
3945 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); | 3863 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
3946 | 3864 | ||
3947 | transport_free_dev_tasks(cmd); | ||
3948 | /* | 3865 | /* |
3949 | * The Storage engine stopped this struct se_cmd before it was | 3866 | * The Storage engine stopped this struct se_cmd before it was |
3950 | * send to the fabric frontend for delivery back to the | 3867 | * send to the fabric frontend for delivery back to the |