diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-22 16:31:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-22 16:31:57 -0400 |
commit | cb47c1831fa406c964468b259f2082c16cc3f757 (patch) | |
tree | 4f693860680a54afc0acc9cff9b14ef9505413b2 | |
parent | 4d460fd3abf9a14e21d55ab9b67b6c58e26398eb (diff) | |
parent | bf6932f44a7b3fa7e2246a8b18a44670e5eab6c2 (diff) |
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull target updates from Nicholas Bellinger:
"There have been lots of work in a number of areas this past round.
The highlights include:
- Break out target_core_cdb.c emulation into SPC/SBC ops (hch)
- Add a parse_cdb method to target backend drivers (hch)
- Move sync_cache + write_same + unmap into spc_ops (hch)
- Use target_execute_cmd for WRITEs in iscsi_target + srpt (hch)
- Offload WRITE I/O backend submission in tcm_qla2xxx + tcm_fc (hch +
nab)
- Refactor core_update_device_list_for_node() into enable/disable
funcs (agrover)
- Replace the TCM processing thread with a TMR work queue (hch)
- Fix regression in transport_add_device_to_core_hba from TMR
conversion (DanC)
- Remove racy, now-redundant check of sess_tearing_down with qla2xxx
(roland)
- Add range checking, fix reading of data len + possible underflow in
UNMAP (roland)
- Allow for target_submit_cmd() returning errors + convert fabrics
(roland + nab)
- Drop bogus struct file usage for iSCSI/SCTP (viro)"
* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (54 commits)
iscsi-target: Drop bogus struct file usage for iSCSI/SCTP
target: NULL dereference on error path
target: Allow for target_submit_cmd() returning errors
target: Check number of unmap descriptors against our limit
target: Fix possible integer underflow in UNMAP emulation
target: Fix reading of data length fields for UNMAP commands
target: Add range checking to UNMAP emulation
target: Add generation of LOGICAL BLOCK ADDRESS OUT OF RANGE
target: Make unnecessarily global se_dev_align_max_sectors() static
target: Remove se_session.sess_wait_list
qla2xxx: Remove racy, now-redundant check of sess_tearing_down
target: Check sess_tearing_down in target_get_sess_cmd()
sbp-target: Consolidate duplicated error path code in sbp_handle_command()
target: Un-export target_get_sess_cmd()
qla2xxx: Get rid of redundant qla_tgt_sess.tearing_down
target: Make core_disable_device_list_for_node use pre-refactoring lock ordering
target: refactor core_update_device_list_for_node()
target: Eliminate else using boolean logic
target: Misc retval cleanups
target: Remove hba param from core_dev_add_lun
...
35 files changed, 1675 insertions, 2390 deletions
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 5f6b7f63cdef..7a0ce8d42887 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -1377,10 +1377,14 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) | |||
1377 | break; | 1377 | break; |
1378 | case SRPT_STATE_NEED_DATA: | 1378 | case SRPT_STATE_NEED_DATA: |
1379 | /* DMA_TO_DEVICE (write) - RDMA read error. */ | 1379 | /* DMA_TO_DEVICE (write) - RDMA read error. */ |
1380 | |||
1381 | /* XXX(hch): this is a horrible layering violation.. */ | ||
1380 | spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); | 1382 | spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); |
1381 | ioctx->cmd.transport_state |= CMD_T_LUN_STOP; | 1383 | ioctx->cmd.transport_state |= CMD_T_LUN_STOP; |
1384 | ioctx->cmd.transport_state &= ~CMD_T_ACTIVE; | ||
1382 | spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); | 1385 | spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); |
1383 | transport_generic_handle_data(&ioctx->cmd); | 1386 | |
1387 | complete(&ioctx->cmd.transport_lun_stop_comp); | ||
1384 | break; | 1388 | break; |
1385 | case SRPT_STATE_CMD_RSP_SENT: | 1389 | case SRPT_STATE_CMD_RSP_SENT: |
1386 | /* | 1390 | /* |
@@ -1463,9 +1467,10 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch, | |||
1463 | /** | 1467 | /** |
1464 | * srpt_handle_rdma_comp() - Process an IB RDMA completion notification. | 1468 | * srpt_handle_rdma_comp() - Process an IB RDMA completion notification. |
1465 | * | 1469 | * |
1466 | * Note: transport_generic_handle_data() is asynchronous so unmapping the | 1470 | * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping |
1467 | * data that has been transferred via IB RDMA must be postponed until the | 1471 | * the data that has been transferred via IB RDMA had to be postponed until the |
1468 | * check_stop_free() callback. | 1472 | * check_stop_free() callback. None of this is nessecary anymore and needs to |
1473 | * be cleaned up. | ||
1469 | */ | 1474 | */ |
1470 | static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, | 1475 | static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, |
1471 | struct srpt_send_ioctx *ioctx, | 1476 | struct srpt_send_ioctx *ioctx, |
@@ -1477,7 +1482,7 @@ static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, | |||
1477 | if (opcode == SRPT_RDMA_READ_LAST) { | 1482 | if (opcode == SRPT_RDMA_READ_LAST) { |
1478 | if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA, | 1483 | if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA, |
1479 | SRPT_STATE_DATA_IN)) | 1484 | SRPT_STATE_DATA_IN)) |
1480 | transport_generic_handle_data(&ioctx->cmd); | 1485 | target_execute_cmd(&ioctx->cmd); |
1481 | else | 1486 | else |
1482 | printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__, | 1487 | printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__, |
1483 | __LINE__, srpt_get_cmd_state(ioctx)); | 1488 | __LINE__, srpt_get_cmd_state(ioctx)); |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 77759c78cc21..5b30132960c7 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
@@ -2643,19 +2643,9 @@ static void qlt_do_work(struct work_struct *work) | |||
2643 | spin_lock_irqsave(&ha->hardware_lock, flags); | 2643 | spin_lock_irqsave(&ha->hardware_lock, flags); |
2644 | sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, | 2644 | sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, |
2645 | atio->u.isp24.fcp_hdr.s_id); | 2645 | atio->u.isp24.fcp_hdr.s_id); |
2646 | if (sess) { | 2646 | /* Do kref_get() before dropping qla_hw_data->hardware_lock. */ |
2647 | if (unlikely(sess->tearing_down)) { | 2647 | if (sess) |
2648 | sess = NULL; | 2648 | kref_get(&sess->se_sess->sess_kref); |
2649 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
2650 | goto out_term; | ||
2651 | } else { | ||
2652 | /* | ||
2653 | * Do the extra kref_get() before dropping | ||
2654 | * qla_hw_data->hardware_lock. | ||
2655 | */ | ||
2656 | kref_get(&sess->se_sess->sess_kref); | ||
2657 | } | ||
2658 | } | ||
2659 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 2649 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
2660 | 2650 | ||
2661 | if (unlikely(!sess)) { | 2651 | if (unlikely(!sess)) { |
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 9f9ef1644fd9..170af1571214 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h | |||
@@ -639,7 +639,7 @@ struct qla_tgt_func_tmpl { | |||
639 | 639 | ||
640 | int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, | 640 | int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, |
641 | unsigned char *, uint32_t, int, int, int); | 641 | unsigned char *, uint32_t, int, int, int); |
642 | int (*handle_data)(struct qla_tgt_cmd *); | 642 | void (*handle_data)(struct qla_tgt_cmd *); |
643 | int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t, | 643 | int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t, |
644 | uint32_t); | 644 | uint32_t); |
645 | void (*free_cmd)(struct qla_tgt_cmd *); | 645 | void (*free_cmd)(struct qla_tgt_cmd *); |
@@ -813,7 +813,6 @@ struct qla_tgt_sess { | |||
813 | unsigned int conf_compl_supported:1; | 813 | unsigned int conf_compl_supported:1; |
814 | unsigned int deleted:1; | 814 | unsigned int deleted:1; |
815 | unsigned int local:1; | 815 | unsigned int local:1; |
816 | unsigned int tearing_down:1; | ||
817 | 816 | ||
818 | struct se_session *se_sess; | 817 | struct se_session *se_sess; |
819 | struct scsi_qla_host *vha; | 818 | struct scsi_qla_host *vha; |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 6e64314dbbb3..4752f65a9272 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
@@ -38,8 +38,6 @@ | |||
38 | #include <linux/string.h> | 38 | #include <linux/string.h> |
39 | #include <linux/configfs.h> | 39 | #include <linux/configfs.h> |
40 | #include <linux/ctype.h> | 40 | #include <linux/ctype.h> |
41 | #include <linux/string.h> | ||
42 | #include <linux/ctype.h> | ||
43 | #include <asm/unaligned.h> | 41 | #include <asm/unaligned.h> |
44 | #include <scsi/scsi.h> | 42 | #include <scsi/scsi.h> |
45 | #include <scsi/scsi_host.h> | 43 | #include <scsi/scsi_host.h> |
@@ -466,8 +464,7 @@ static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess) | |||
466 | vha = sess->vha; | 464 | vha = sess->vha; |
467 | 465 | ||
468 | spin_lock_irqsave(&vha->hw->hardware_lock, flags); | 466 | spin_lock_irqsave(&vha->hw->hardware_lock, flags); |
469 | sess->tearing_down = 1; | 467 | target_sess_cmd_list_set_waiting(se_sess); |
470 | target_splice_sess_cmd_list(se_sess); | ||
471 | spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); | 468 | spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); |
472 | 469 | ||
473 | return 1; | 470 | return 1; |
@@ -600,28 +597,15 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, | |||
600 | return -EINVAL; | 597 | return -EINVAL; |
601 | } | 598 | } |
602 | 599 | ||
603 | target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], | 600 | return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], |
604 | cmd->unpacked_lun, data_length, fcp_task_attr, | 601 | cmd->unpacked_lun, data_length, fcp_task_attr, |
605 | data_dir, flags); | 602 | data_dir, flags); |
606 | return 0; | ||
607 | } | 603 | } |
608 | 604 | ||
609 | static void tcm_qla2xxx_do_rsp(struct work_struct *work) | 605 | static void tcm_qla2xxx_handle_data_work(struct work_struct *work) |
610 | { | 606 | { |
611 | struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); | 607 | struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); |
612 | /* | ||
613 | * Dispatch ->queue_status from workqueue process context | ||
614 | */ | ||
615 | transport_generic_request_failure(&cmd->se_cmd); | ||
616 | } | ||
617 | 608 | ||
618 | /* | ||
619 | * Called from qla_target.c:qlt_do_ctio_completion() | ||
620 | */ | ||
621 | static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) | ||
622 | { | ||
623 | struct se_cmd *se_cmd = &cmd->se_cmd; | ||
624 | unsigned long flags; | ||
625 | /* | 609 | /* |
626 | * Ensure that the complete FCP WRITE payload has been received. | 610 | * Ensure that the complete FCP WRITE payload has been received. |
627 | * Otherwise return an exception via CHECK_CONDITION status. | 611 | * Otherwise return an exception via CHECK_CONDITION status. |
@@ -631,24 +615,26 @@ static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) | |||
631 | * Check if se_cmd has already been aborted via LUN_RESET, and | 615 | * Check if se_cmd has already been aborted via LUN_RESET, and |
632 | * waiting upon completion in tcm_qla2xxx_write_pending_status() | 616 | * waiting upon completion in tcm_qla2xxx_write_pending_status() |
633 | */ | 617 | */ |
634 | spin_lock_irqsave(&se_cmd->t_state_lock, flags); | 618 | if (cmd->se_cmd.transport_state & CMD_T_ABORTED) { |
635 | if (se_cmd->transport_state & CMD_T_ABORTED) { | 619 | complete(&cmd->se_cmd.t_transport_stop_comp); |
636 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); | 620 | return; |
637 | complete(&se_cmd->t_transport_stop_comp); | ||
638 | return 0; | ||
639 | } | 621 | } |
640 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); | ||
641 | 622 | ||
642 | se_cmd->scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD; | 623 | cmd->se_cmd.scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD; |
643 | INIT_WORK(&cmd->work, tcm_qla2xxx_do_rsp); | 624 | transport_generic_request_failure(&cmd->se_cmd); |
644 | queue_work(tcm_qla2xxx_free_wq, &cmd->work); | 625 | return; |
645 | return 0; | ||
646 | } | 626 | } |
647 | /* | 627 | |
648 | * We now tell TCM to queue this WRITE CDB with TRANSPORT_PROCESS_WRITE | 628 | return target_execute_cmd(&cmd->se_cmd); |
649 | * status to the backstore processing thread. | 629 | } |
650 | */ | 630 | |
651 | return transport_generic_handle_data(&cmd->se_cmd); | 631 | /* |
632 | * Called from qla_target.c:qlt_do_ctio_completion() | ||
633 | */ | ||
634 | static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) | ||
635 | { | ||
636 | INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); | ||
637 | queue_work(tcm_qla2xxx_free_wq, &cmd->work); | ||
652 | } | 638 | } |
653 | 639 | ||
654 | /* | 640 | /* |
@@ -1690,7 +1676,6 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = { | |||
1690 | .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, | 1676 | .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, |
1691 | .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, | 1677 | .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, |
1692 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, | 1678 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, |
1693 | .new_cmd_map = NULL, | ||
1694 | .check_stop_free = tcm_qla2xxx_check_stop_free, | 1679 | .check_stop_free = tcm_qla2xxx_check_stop_free, |
1695 | .release_cmd = tcm_qla2xxx_release_cmd, | 1680 | .release_cmd = tcm_qla2xxx_release_cmd, |
1696 | .put_session = tcm_qla2xxx_put_session, | 1681 | .put_session = tcm_qla2xxx_put_session, |
diff --git a/drivers/target/Makefile b/drivers/target/Makefile index 61648d84fbb6..9fdcb561422f 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile | |||
@@ -9,7 +9,8 @@ target_core_mod-y := target_core_configfs.o \ | |||
9 | target_core_tmr.o \ | 9 | target_core_tmr.o \ |
10 | target_core_tpg.o \ | 10 | target_core_tpg.o \ |
11 | target_core_transport.o \ | 11 | target_core_transport.o \ |
12 | target_core_cdb.o \ | 12 | target_core_sbc.o \ |
13 | target_core_spc.o \ | ||
13 | target_core_ua.o \ | 14 | target_core_ua.o \ |
14 | target_core_rd.o \ | 15 | target_core_rd.o \ |
15 | target_core_stat.o | 16 | target_core_stat.o |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index d57d10cb2e47..97c0f78c3c9c 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -429,19 +429,8 @@ int iscsit_reset_np_thread( | |||
429 | 429 | ||
430 | int iscsit_del_np_comm(struct iscsi_np *np) | 430 | int iscsit_del_np_comm(struct iscsi_np *np) |
431 | { | 431 | { |
432 | if (!np->np_socket) | 432 | if (np->np_socket) |
433 | return 0; | 433 | sock_release(np->np_socket); |
434 | |||
435 | /* | ||
436 | * Some network transports allocate their own struct sock->file, | ||
437 | * see if we need to free any additional allocated resources. | ||
438 | */ | ||
439 | if (np->np_flags & NPF_SCTP_STRUCT_FILE) { | ||
440 | kfree(np->np_socket->file); | ||
441 | np->np_socket->file = NULL; | ||
442 | } | ||
443 | |||
444 | sock_release(np->np_socket); | ||
445 | return 0; | 434 | return 0; |
446 | } | 435 | } |
447 | 436 | ||
@@ -1413,8 +1402,10 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) | |||
1413 | spin_unlock_bh(&cmd->istate_lock); | 1402 | spin_unlock_bh(&cmd->istate_lock); |
1414 | 1403 | ||
1415 | iscsit_stop_dataout_timer(cmd); | 1404 | iscsit_stop_dataout_timer(cmd); |
1416 | return (!ooo_cmdsn) ? transport_generic_handle_data( | 1405 | if (ooo_cmdsn) |
1417 | &cmd->se_cmd) : 0; | 1406 | return 0; |
1407 | target_execute_cmd(&cmd->se_cmd); | ||
1408 | return 0; | ||
1418 | } else /* DATAOUT_CANNOT_RECOVER */ | 1409 | } else /* DATAOUT_CANNOT_RECOVER */ |
1419 | return -1; | 1410 | return -1; |
1420 | 1411 | ||
@@ -2683,7 +2674,7 @@ static int iscsit_send_logout_response( | |||
2683 | */ | 2674 | */ |
2684 | logout_conn = iscsit_get_conn_from_cid_rcfr(sess, | 2675 | logout_conn = iscsit_get_conn_from_cid_rcfr(sess, |
2685 | cmd->logout_cid); | 2676 | cmd->logout_cid); |
2686 | if ((logout_conn)) { | 2677 | if (logout_conn) { |
2687 | iscsit_connection_reinstatement_rcfr(logout_conn); | 2678 | iscsit_connection_reinstatement_rcfr(logout_conn); |
2688 | iscsit_dec_conn_usage_count(logout_conn); | 2679 | iscsit_dec_conn_usage_count(logout_conn); |
2689 | } | 2680 | } |
@@ -4077,13 +4068,8 @@ int iscsit_close_connection( | |||
4077 | kfree(conn->conn_ops); | 4068 | kfree(conn->conn_ops); |
4078 | conn->conn_ops = NULL; | 4069 | conn->conn_ops = NULL; |
4079 | 4070 | ||
4080 | if (conn->sock) { | 4071 | if (conn->sock) |
4081 | if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) { | ||
4082 | kfree(conn->sock->file); | ||
4083 | conn->sock->file = NULL; | ||
4084 | } | ||
4085 | sock_release(conn->sock); | 4072 | sock_release(conn->sock); |
4086 | } | ||
4087 | conn->thread_set = NULL; | 4073 | conn->thread_set = NULL; |
4088 | 4074 | ||
4089 | pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); | 4075 | pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); |
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 69dc8e35c03a..a7b25e783b58 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c | |||
@@ -47,28 +47,6 @@ struct lio_target_configfs_attribute { | |||
47 | ssize_t (*store)(void *, const char *, size_t); | 47 | ssize_t (*store)(void *, const char *, size_t); |
48 | }; | 48 | }; |
49 | 49 | ||
50 | struct iscsi_portal_group *lio_get_tpg_from_tpg_item( | ||
51 | struct config_item *item, | ||
52 | struct iscsi_tiqn **tiqn_out) | ||
53 | { | ||
54 | struct se_portal_group *se_tpg = container_of(to_config_group(item), | ||
55 | struct se_portal_group, tpg_group); | ||
56 | struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; | ||
57 | int ret; | ||
58 | |||
59 | if (!tpg) { | ||
60 | pr_err("Unable to locate struct iscsi_portal_group " | ||
61 | "pointer\n"); | ||
62 | return NULL; | ||
63 | } | ||
64 | ret = iscsit_get_tpg(tpg); | ||
65 | if (ret < 0) | ||
66 | return NULL; | ||
67 | |||
68 | *tiqn_out = tpg->tpg_tiqn; | ||
69 | return tpg; | ||
70 | } | ||
71 | |||
72 | /* Start items for lio_target_portal_cit */ | 50 | /* Start items for lio_target_portal_cit */ |
73 | 51 | ||
74 | static ssize_t lio_target_np_show_sctp( | 52 | static ssize_t lio_target_np_show_sctp( |
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 1c70144cdaf1..8a908b28d8b2 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h | |||
@@ -224,7 +224,6 @@ enum iscsi_timer_flags_table { | |||
224 | /* Used for struct iscsi_np->np_flags */ | 224 | /* Used for struct iscsi_np->np_flags */ |
225 | enum np_flags_table { | 225 | enum np_flags_table { |
226 | NPF_IP_NETWORK = 0x00, | 226 | NPF_IP_NETWORK = 0x00, |
227 | NPF_SCTP_STRUCT_FILE = 0x01 /* Bugfix */ | ||
228 | }; | 227 | }; |
229 | 228 | ||
230 | /* Used for struct iscsi_np->np_thread_state */ | 229 | /* Used for struct iscsi_np->np_thread_state */ |
@@ -481,6 +480,7 @@ struct iscsi_tmr_req { | |||
481 | bool task_reassign:1; | 480 | bool task_reassign:1; |
482 | u32 ref_cmd_sn; | 481 | u32 ref_cmd_sn; |
483 | u32 exp_data_sn; | 482 | u32 exp_data_sn; |
483 | struct iscsi_cmd *ref_cmd; | ||
484 | struct iscsi_conn_recovery *conn_recovery; | 484 | struct iscsi_conn_recovery *conn_recovery; |
485 | struct se_tmr_req *se_tmr_req; | 485 | struct se_tmr_req *se_tmr_req; |
486 | }; | 486 | }; |
@@ -503,7 +503,6 @@ struct iscsi_conn { | |||
503 | u16 local_port; | 503 | u16 local_port; |
504 | int net_size; | 504 | int net_size; |
505 | u32 auth_id; | 505 | u32 auth_id; |
506 | #define CONNFLAG_SCTP_STRUCT_FILE 0x01 | ||
507 | u32 conn_flags; | 506 | u32 conn_flags; |
508 | /* Used for iscsi_tx_login_rsp() */ | 507 | /* Used for iscsi_tx_login_rsp() */ |
509 | u32 login_itt; | 508 | u32 login_itt; |
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index ecdd46deedda..3df8a2cef86f 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c | |||
@@ -965,8 +965,8 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo) | |||
965 | if (cmd->immediate_data) { | 965 | if (cmd->immediate_data) { |
966 | if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { | 966 | if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { |
967 | spin_unlock_bh(&cmd->istate_lock); | 967 | spin_unlock_bh(&cmd->istate_lock); |
968 | return transport_generic_handle_data( | 968 | target_execute_cmd(&cmd->se_cmd); |
969 | &cmd->se_cmd); | 969 | return 0; |
970 | } | 970 | } |
971 | spin_unlock_bh(&cmd->istate_lock); | 971 | spin_unlock_bh(&cmd->istate_lock); |
972 | 972 | ||
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index a3656c9903a1..0694d9b1bce6 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
@@ -518,7 +518,7 @@ int iscsi_login_post_auth_non_zero_tsih( | |||
518 | * initiator and release the new connection. | 518 | * initiator and release the new connection. |
519 | */ | 519 | */ |
520 | conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid); | 520 | conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid); |
521 | if ((conn_ptr)) { | 521 | if (conn_ptr) { |
522 | pr_err("Connection exists with CID %hu for %s," | 522 | pr_err("Connection exists with CID %hu for %s," |
523 | " performing connection reinstatement.\n", | 523 | " performing connection reinstatement.\n", |
524 | conn_ptr->cid, sess->sess_ops->InitiatorName); | 524 | conn_ptr->cid, sess->sess_ops->InitiatorName); |
@@ -539,7 +539,7 @@ int iscsi_login_post_auth_non_zero_tsih( | |||
539 | if (sess->sess_ops->ErrorRecoveryLevel == 2) { | 539 | if (sess->sess_ops->ErrorRecoveryLevel == 2) { |
540 | cr = iscsit_get_inactive_connection_recovery_entry( | 540 | cr = iscsit_get_inactive_connection_recovery_entry( |
541 | sess, cid); | 541 | sess, cid); |
542 | if ((cr)) { | 542 | if (cr) { |
543 | pr_debug("Performing implicit logout" | 543 | pr_debug("Performing implicit logout" |
544 | " for connection recovery on CID: %hu\n", | 544 | " for connection recovery on CID: %hu\n", |
545 | conn->cid); | 545 | conn->cid); |
@@ -795,22 +795,6 @@ int iscsi_target_setup_login_socket( | |||
795 | } | 795 | } |
796 | np->np_socket = sock; | 796 | np->np_socket = sock; |
797 | /* | 797 | /* |
798 | * The SCTP stack needs struct socket->file. | ||
799 | */ | ||
800 | if ((np->np_network_transport == ISCSI_SCTP_TCP) || | ||
801 | (np->np_network_transport == ISCSI_SCTP_UDP)) { | ||
802 | if (!sock->file) { | ||
803 | sock->file = kzalloc(sizeof(struct file), GFP_KERNEL); | ||
804 | if (!sock->file) { | ||
805 | pr_err("Unable to allocate struct" | ||
806 | " file for SCTP\n"); | ||
807 | ret = -ENOMEM; | ||
808 | goto fail; | ||
809 | } | ||
810 | np->np_flags |= NPF_SCTP_STRUCT_FILE; | ||
811 | } | ||
812 | } | ||
813 | /* | ||
814 | * Setup the np->np_sockaddr from the passed sockaddr setup | 798 | * Setup the np->np_sockaddr from the passed sockaddr setup |
815 | * in iscsi_target_configfs.c code.. | 799 | * in iscsi_target_configfs.c code.. |
816 | */ | 800 | */ |
@@ -869,21 +853,15 @@ int iscsi_target_setup_login_socket( | |||
869 | 853 | ||
870 | fail: | 854 | fail: |
871 | np->np_socket = NULL; | 855 | np->np_socket = NULL; |
872 | if (sock) { | 856 | if (sock) |
873 | if (np->np_flags & NPF_SCTP_STRUCT_FILE) { | ||
874 | kfree(sock->file); | ||
875 | sock->file = NULL; | ||
876 | } | ||
877 | |||
878 | sock_release(sock); | 857 | sock_release(sock); |
879 | } | ||
880 | return ret; | 858 | return ret; |
881 | } | 859 | } |
882 | 860 | ||
883 | static int __iscsi_target_login_thread(struct iscsi_np *np) | 861 | static int __iscsi_target_login_thread(struct iscsi_np *np) |
884 | { | 862 | { |
885 | u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0; | 863 | u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0; |
886 | int err, ret = 0, set_sctp_conn_flag, stop; | 864 | int err, ret = 0, stop; |
887 | struct iscsi_conn *conn = NULL; | 865 | struct iscsi_conn *conn = NULL; |
888 | struct iscsi_login *login; | 866 | struct iscsi_login *login; |
889 | struct iscsi_portal_group *tpg = NULL; | 867 | struct iscsi_portal_group *tpg = NULL; |
@@ -894,7 +872,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) | |||
894 | struct sockaddr_in6 sock_in6; | 872 | struct sockaddr_in6 sock_in6; |
895 | 873 | ||
896 | flush_signals(current); | 874 | flush_signals(current); |
897 | set_sctp_conn_flag = 0; | ||
898 | sock = np->np_socket; | 875 | sock = np->np_socket; |
899 | 876 | ||
900 | spin_lock_bh(&np->np_thread_lock); | 877 | spin_lock_bh(&np->np_thread_lock); |
@@ -917,35 +894,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) | |||
917 | spin_unlock_bh(&np->np_thread_lock); | 894 | spin_unlock_bh(&np->np_thread_lock); |
918 | goto out; | 895 | goto out; |
919 | } | 896 | } |
920 | /* | ||
921 | * The SCTP stack needs struct socket->file. | ||
922 | */ | ||
923 | if ((np->np_network_transport == ISCSI_SCTP_TCP) || | ||
924 | (np->np_network_transport == ISCSI_SCTP_UDP)) { | ||
925 | if (!new_sock->file) { | ||
926 | new_sock->file = kzalloc( | ||
927 | sizeof(struct file), GFP_KERNEL); | ||
928 | if (!new_sock->file) { | ||
929 | pr_err("Unable to allocate struct" | ||
930 | " file for SCTP\n"); | ||
931 | sock_release(new_sock); | ||
932 | /* Get another socket */ | ||
933 | return 1; | ||
934 | } | ||
935 | set_sctp_conn_flag = 1; | ||
936 | } | ||
937 | } | ||
938 | |||
939 | iscsi_start_login_thread_timer(np); | 897 | iscsi_start_login_thread_timer(np); |
940 | 898 | ||
941 | conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL); | 899 | conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL); |
942 | if (!conn) { | 900 | if (!conn) { |
943 | pr_err("Could not allocate memory for" | 901 | pr_err("Could not allocate memory for" |
944 | " new connection\n"); | 902 | " new connection\n"); |
945 | if (set_sctp_conn_flag) { | ||
946 | kfree(new_sock->file); | ||
947 | new_sock->file = NULL; | ||
948 | } | ||
949 | sock_release(new_sock); | 903 | sock_release(new_sock); |
950 | /* Get another socket */ | 904 | /* Get another socket */ |
951 | return 1; | 905 | return 1; |
@@ -955,9 +909,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) | |||
955 | conn->conn_state = TARG_CONN_STATE_FREE; | 909 | conn->conn_state = TARG_CONN_STATE_FREE; |
956 | conn->sock = new_sock; | 910 | conn->sock = new_sock; |
957 | 911 | ||
958 | if (set_sctp_conn_flag) | ||
959 | conn->conn_flags |= CONNFLAG_SCTP_STRUCT_FILE; | ||
960 | |||
961 | pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n"); | 912 | pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n"); |
962 | conn->conn_state = TARG_CONN_STATE_XPT_UP; | 913 | conn->conn_state = TARG_CONN_STATE_XPT_UP; |
963 | 914 | ||
@@ -1081,7 +1032,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) | |||
1081 | goto new_sess_out; | 1032 | goto new_sess_out; |
1082 | 1033 | ||
1083 | zero_tsih = (pdu->tsih == 0x0000); | 1034 | zero_tsih = (pdu->tsih == 0x0000); |
1084 | if ((zero_tsih)) { | 1035 | if (zero_tsih) { |
1085 | /* | 1036 | /* |
1086 | * This is the leading connection of a new session. | 1037 | * This is the leading connection of a new session. |
1087 | * We wait until after authentication to check for | 1038 | * We wait until after authentication to check for |
@@ -1205,13 +1156,8 @@ old_sess_out: | |||
1205 | iscsi_release_param_list(conn->param_list); | 1156 | iscsi_release_param_list(conn->param_list); |
1206 | conn->param_list = NULL; | 1157 | conn->param_list = NULL; |
1207 | } | 1158 | } |
1208 | if (conn->sock) { | 1159 | if (conn->sock) |
1209 | if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) { | ||
1210 | kfree(conn->sock->file); | ||
1211 | conn->sock->file = NULL; | ||
1212 | } | ||
1213 | sock_release(conn->sock); | 1160 | sock_release(conn->sock); |
1214 | } | ||
1215 | kfree(conn); | 1161 | kfree(conn); |
1216 | 1162 | ||
1217 | if (tpg) { | 1163 | if (tpg) { |
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index ed5241e7f12a..0c4760fabfc0 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c | |||
@@ -681,7 +681,7 @@ int iscsi_update_param_value(struct iscsi_param *param, char *value) | |||
681 | param->value = kzalloc(strlen(value) + 1, GFP_KERNEL); | 681 | param->value = kzalloc(strlen(value) + 1, GFP_KERNEL); |
682 | if (!param->value) { | 682 | if (!param->value) { |
683 | pr_err("Unable to allocate memory for value.\n"); | 683 | pr_err("Unable to allocate memory for value.\n"); |
684 | return -1; | 684 | return -ENOMEM; |
685 | } | 685 | } |
686 | 686 | ||
687 | memcpy(param->value, value, strlen(value)); | 687 | memcpy(param->value, value, strlen(value)); |
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index f4e640b51fd1..f62fe123d902 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c | |||
@@ -19,6 +19,7 @@ | |||
19 | ******************************************************************************/ | 19 | ******************************************************************************/ |
20 | 20 | ||
21 | #include <asm/unaligned.h> | 21 | #include <asm/unaligned.h> |
22 | #include <scsi/scsi_device.h> | ||
22 | #include <scsi/iscsi_proto.h> | 23 | #include <scsi/iscsi_proto.h> |
23 | #include <target/target_core_base.h> | 24 | #include <target/target_core_base.h> |
24 | #include <target/target_core_fabric.h> | 25 | #include <target/target_core_fabric.h> |
@@ -61,7 +62,7 @@ u8 iscsit_tmr_abort_task( | |||
61 | } | 62 | } |
62 | 63 | ||
63 | se_tmr->ref_task_tag = hdr->rtt; | 64 | se_tmr->ref_task_tag = hdr->rtt; |
64 | se_tmr->ref_cmd = &ref_cmd->se_cmd; | 65 | tmr_req->ref_cmd = ref_cmd; |
65 | tmr_req->ref_cmd_sn = hdr->refcmdsn; | 66 | tmr_req->ref_cmd_sn = hdr->refcmdsn; |
66 | tmr_req->exp_data_sn = hdr->exp_datasn; | 67 | tmr_req->exp_data_sn = hdr->exp_datasn; |
67 | 68 | ||
@@ -121,7 +122,7 @@ u8 iscsit_tmr_task_reassign( | |||
121 | struct iscsi_tmr_req *tmr_req = cmd->tmr_req; | 122 | struct iscsi_tmr_req *tmr_req = cmd->tmr_req; |
122 | struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; | 123 | struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; |
123 | struct iscsi_tm *hdr = (struct iscsi_tm *) buf; | 124 | struct iscsi_tm *hdr = (struct iscsi_tm *) buf; |
124 | int ret; | 125 | int ret, ref_lun; |
125 | 126 | ||
126 | pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x," | 127 | pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x," |
127 | " RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n", | 128 | " RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n", |
@@ -155,9 +156,16 @@ u8 iscsit_tmr_task_reassign( | |||
155 | return ISCSI_TMF_RSP_REJECTED; | 156 | return ISCSI_TMF_RSP_REJECTED; |
156 | } | 157 | } |
157 | 158 | ||
159 | ref_lun = scsilun_to_int(&hdr->lun); | ||
160 | if (ref_lun != ref_cmd->se_cmd.orig_fe_lun) { | ||
161 | pr_err("Unable to perform connection recovery for" | ||
162 | " differing ref_lun: %d ref_cmd orig_fe_lun: %u\n", | ||
163 | ref_lun, ref_cmd->se_cmd.orig_fe_lun); | ||
164 | return ISCSI_TMF_RSP_REJECTED; | ||
165 | } | ||
166 | |||
158 | se_tmr->ref_task_tag = hdr->rtt; | 167 | se_tmr->ref_task_tag = hdr->rtt; |
159 | se_tmr->ref_cmd = &ref_cmd->se_cmd; | 168 | tmr_req->ref_cmd = ref_cmd; |
160 | se_tmr->ref_task_lun = get_unaligned_le64(&hdr->lun); | ||
161 | tmr_req->ref_cmd_sn = hdr->refcmdsn; | 169 | tmr_req->ref_cmd_sn = hdr->refcmdsn; |
162 | tmr_req->exp_data_sn = hdr->exp_datasn; | 170 | tmr_req->exp_data_sn = hdr->exp_datasn; |
163 | tmr_req->conn_recovery = cr; | 171 | tmr_req->conn_recovery = cr; |
@@ -191,9 +199,7 @@ static int iscsit_task_reassign_complete_nop_out( | |||
191 | struct iscsi_tmr_req *tmr_req, | 199 | struct iscsi_tmr_req *tmr_req, |
192 | struct iscsi_conn *conn) | 200 | struct iscsi_conn *conn) |
193 | { | 201 | { |
194 | struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; | 202 | struct iscsi_cmd *cmd = tmr_req->ref_cmd; |
195 | struct se_cmd *se_cmd = se_tmr->ref_cmd; | ||
196 | struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); | ||
197 | struct iscsi_conn_recovery *cr; | 203 | struct iscsi_conn_recovery *cr; |
198 | 204 | ||
199 | if (!cmd->cr) { | 205 | if (!cmd->cr) { |
@@ -251,7 +257,8 @@ static int iscsit_task_reassign_complete_write( | |||
251 | pr_debug("WRITE ITT: 0x%08x: t_state: %d" | 257 | pr_debug("WRITE ITT: 0x%08x: t_state: %d" |
252 | " never sent to transport\n", | 258 | " never sent to transport\n", |
253 | cmd->init_task_tag, cmd->se_cmd.t_state); | 259 | cmd->init_task_tag, cmd->se_cmd.t_state); |
254 | return transport_generic_handle_data(se_cmd); | 260 | target_execute_cmd(se_cmd); |
261 | return 0; | ||
255 | } | 262 | } |
256 | 263 | ||
257 | cmd->i_state = ISTATE_SEND_STATUS; | 264 | cmd->i_state = ISTATE_SEND_STATUS; |
@@ -360,9 +367,7 @@ static int iscsit_task_reassign_complete_scsi_cmnd( | |||
360 | struct iscsi_tmr_req *tmr_req, | 367 | struct iscsi_tmr_req *tmr_req, |
361 | struct iscsi_conn *conn) | 368 | struct iscsi_conn *conn) |
362 | { | 369 | { |
363 | struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; | 370 | struct iscsi_cmd *cmd = tmr_req->ref_cmd; |
364 | struct se_cmd *se_cmd = se_tmr->ref_cmd; | ||
365 | struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); | ||
366 | struct iscsi_conn_recovery *cr; | 371 | struct iscsi_conn_recovery *cr; |
367 | 372 | ||
368 | if (!cmd->cr) { | 373 | if (!cmd->cr) { |
@@ -385,7 +390,7 @@ static int iscsit_task_reassign_complete_scsi_cmnd( | |||
385 | list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); | 390 | list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); |
386 | spin_unlock_bh(&conn->cmd_lock); | 391 | spin_unlock_bh(&conn->cmd_lock); |
387 | 392 | ||
388 | if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | 393 | if (cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
389 | cmd->i_state = ISTATE_SEND_STATUS; | 394 | cmd->i_state = ISTATE_SEND_STATUS; |
390 | iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); | 395 | iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); |
391 | return 0; | 396 | return 0; |
@@ -411,17 +416,14 @@ static int iscsit_task_reassign_complete( | |||
411 | struct iscsi_tmr_req *tmr_req, | 416 | struct iscsi_tmr_req *tmr_req, |
412 | struct iscsi_conn *conn) | 417 | struct iscsi_conn *conn) |
413 | { | 418 | { |
414 | struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; | ||
415 | struct se_cmd *se_cmd; | ||
416 | struct iscsi_cmd *cmd; | 419 | struct iscsi_cmd *cmd; |
417 | int ret = 0; | 420 | int ret = 0; |
418 | 421 | ||
419 | if (!se_tmr->ref_cmd) { | 422 | if (!tmr_req->ref_cmd) { |
420 | pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n"); | 423 | pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n"); |
421 | return -1; | 424 | return -1; |
422 | } | 425 | } |
423 | se_cmd = se_tmr->ref_cmd; | 426 | cmd = tmr_req->ref_cmd; |
424 | cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); | ||
425 | 427 | ||
426 | cmd->conn = conn; | 428 | cmd->conn = conn; |
427 | 429 | ||
@@ -547,9 +549,7 @@ int iscsit_task_reassign_prepare_write( | |||
547 | struct iscsi_tmr_req *tmr_req, | 549 | struct iscsi_tmr_req *tmr_req, |
548 | struct iscsi_conn *conn) | 550 | struct iscsi_conn *conn) |
549 | { | 551 | { |
550 | struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; | 552 | struct iscsi_cmd *cmd = tmr_req->ref_cmd; |
551 | struct se_cmd *se_cmd = se_tmr->ref_cmd; | ||
552 | struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); | ||
553 | struct iscsi_pdu *pdu = NULL; | 553 | struct iscsi_pdu *pdu = NULL; |
554 | struct iscsi_r2t *r2t = NULL, *r2t_tmp; | 554 | struct iscsi_r2t *r2t = NULL, *r2t_tmp; |
555 | int first_incomplete_r2t = 1, i = 0; | 555 | int first_incomplete_r2t = 1, i = 0; |
@@ -782,14 +782,12 @@ int iscsit_check_task_reassign_expdatasn( | |||
782 | struct iscsi_tmr_req *tmr_req, | 782 | struct iscsi_tmr_req *tmr_req, |
783 | struct iscsi_conn *conn) | 783 | struct iscsi_conn *conn) |
784 | { | 784 | { |
785 | struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; | 785 | struct iscsi_cmd *ref_cmd = tmr_req->ref_cmd; |
786 | struct se_cmd *se_cmd = se_tmr->ref_cmd; | ||
787 | struct iscsi_cmd *ref_cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); | ||
788 | 786 | ||
789 | if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) | 787 | if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) |
790 | return 0; | 788 | return 0; |
791 | 789 | ||
792 | if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) | 790 | if (ref_cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION) |
793 | return 0; | 791 | return 0; |
794 | 792 | ||
795 | if (ref_cmd->data_direction == DMA_NONE) | 793 | if (ref_cmd->data_direction == DMA_NONE) |
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 879d8d0fa3fe..a38a3f8ab0d9 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c | |||
@@ -303,6 +303,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg) | |||
303 | { | 303 | { |
304 | struct iscsi_param *param; | 304 | struct iscsi_param *param; |
305 | struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; | 305 | struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; |
306 | int ret; | ||
306 | 307 | ||
307 | spin_lock(&tpg->tpg_state_lock); | 308 | spin_lock(&tpg->tpg_state_lock); |
308 | if (tpg->tpg_state == TPG_STATE_ACTIVE) { | 309 | if (tpg->tpg_state == TPG_STATE_ACTIVE) { |
@@ -319,19 +320,19 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg) | |||
319 | param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list); | 320 | param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list); |
320 | if (!param) { | 321 | if (!param) { |
321 | spin_unlock(&tpg->tpg_state_lock); | 322 | spin_unlock(&tpg->tpg_state_lock); |
322 | return -ENOMEM; | 323 | return -EINVAL; |
323 | } | 324 | } |
324 | 325 | ||
325 | if (ISCSI_TPG_ATTRIB(tpg)->authentication) { | 326 | if (ISCSI_TPG_ATTRIB(tpg)->authentication) { |
326 | if (!strcmp(param->value, NONE)) | 327 | if (!strcmp(param->value, NONE)) { |
327 | if (iscsi_update_param_value(param, CHAP) < 0) { | 328 | ret = iscsi_update_param_value(param, CHAP); |
328 | spin_unlock(&tpg->tpg_state_lock); | 329 | if (ret) |
329 | return -ENOMEM; | 330 | goto err; |
330 | } | ||
331 | if (iscsit_ta_authentication(tpg, 1) < 0) { | ||
332 | spin_unlock(&tpg->tpg_state_lock); | ||
333 | return -ENOMEM; | ||
334 | } | 331 | } |
332 | |||
333 | ret = iscsit_ta_authentication(tpg, 1); | ||
334 | if (ret < 0) | ||
335 | goto err; | ||
335 | } | 336 | } |
336 | 337 | ||
337 | tpg->tpg_state = TPG_STATE_ACTIVE; | 338 | tpg->tpg_state = TPG_STATE_ACTIVE; |
@@ -344,6 +345,10 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg) | |||
344 | spin_unlock(&tiqn->tiqn_tpg_lock); | 345 | spin_unlock(&tiqn->tiqn_tpg_lock); |
345 | 346 | ||
346 | return 0; | 347 | return 0; |
348 | |||
349 | err: | ||
350 | spin_unlock(&tpg->tpg_state_lock); | ||
351 | return ret; | ||
347 | } | 352 | } |
348 | 353 | ||
349 | int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force) | 354 | int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force) |
@@ -558,7 +563,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication) | |||
558 | if ((authentication != 1) && (authentication != 0)) { | 563 | if ((authentication != 1) && (authentication != 0)) { |
559 | pr_err("Illegal value for authentication parameter:" | 564 | pr_err("Illegal value for authentication parameter:" |
560 | " %u, ignoring request.\n", authentication); | 565 | " %u, ignoring request.\n", authentication); |
561 | return -1; | 566 | return -EINVAL; |
562 | } | 567 | } |
563 | 568 | ||
564 | memset(buf1, 0, sizeof(buf1)); | 569 | memset(buf1, 0, sizeof(buf1)); |
@@ -593,7 +598,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication) | |||
593 | } else { | 598 | } else { |
594 | snprintf(buf1, sizeof(buf1), "%s", param->value); | 599 | snprintf(buf1, sizeof(buf1), "%s", param->value); |
595 | none = strstr(buf1, NONE); | 600 | none = strstr(buf1, NONE); |
596 | if ((none)) | 601 | if (none) |
597 | goto out; | 602 | goto out; |
598 | strncat(buf1, ",", strlen(",")); | 603 | strncat(buf1, ",", strlen(",")); |
599 | strncat(buf1, NONE, strlen(NONE)); | 604 | strncat(buf1, NONE, strlen(NONE)); |
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 38dfac2b0a1c..5491c632a15e 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c | |||
@@ -211,12 +211,11 @@ static void tcm_loop_submission_work(struct work_struct *work) | |||
211 | /* | 211 | /* |
212 | * Because some userspace code via scsi-generic do not memset their | 212 | * Because some userspace code via scsi-generic do not memset their |
213 | * associated read buffers, go ahead and do that here for type | 213 | * associated read buffers, go ahead and do that here for type |
214 | * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently | 214 | * non-data CDBs. Also note that this is currently guaranteed to be a |
215 | * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB | 215 | * single SGL for this case by target core in |
216 | * by target core in target_setup_cmd_from_cdb() -> | 216 | * target_setup_cmd_from_cdb() -> transport_generic_cmd_sequencer(). |
217 | * transport_generic_cmd_sequencer(). | ||
218 | */ | 217 | */ |
219 | if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB && | 218 | if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && |
220 | se_cmd->data_direction == DMA_FROM_DEVICE) { | 219 | se_cmd->data_direction == DMA_FROM_DEVICE) { |
221 | struct scatterlist *sg = scsi_sglist(sc); | 220 | struct scatterlist *sg = scsi_sglist(sc); |
222 | unsigned char *buf = kmap(sg_page(sg)) + sg->offset; | 221 | unsigned char *buf = kmap(sg_page(sg)) + sg->offset; |
@@ -779,7 +778,7 @@ static int tcm_loop_write_pending(struct se_cmd *se_cmd) | |||
779 | * We now tell TCM to add this WRITE CDB directly into the TCM storage | 778 | * We now tell TCM to add this WRITE CDB directly into the TCM storage |
780 | * object execution queue. | 779 | * object execution queue. |
781 | */ | 780 | */ |
782 | transport_generic_process_write(se_cmd); | 781 | target_execute_cmd(se_cmd); |
783 | return 0; | 782 | return 0; |
784 | } | 783 | } |
785 | 784 | ||
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 7e6136e2ce81..39ddba584b30 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c | |||
@@ -1219,28 +1219,14 @@ static void sbp_handle_command(struct sbp_target_request *req) | |||
1219 | ret = sbp_fetch_command(req); | 1219 | ret = sbp_fetch_command(req); |
1220 | if (ret) { | 1220 | if (ret) { |
1221 | pr_debug("sbp_handle_command: fetch command failed: %d\n", ret); | 1221 | pr_debug("sbp_handle_command: fetch command failed: %d\n", ret); |
1222 | req->status.status |= cpu_to_be32( | 1222 | goto err; |
1223 | STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | | ||
1224 | STATUS_BLOCK_DEAD(0) | | ||
1225 | STATUS_BLOCK_LEN(1) | | ||
1226 | STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); | ||
1227 | sbp_send_status(req); | ||
1228 | sbp_free_request(req); | ||
1229 | return; | ||
1230 | } | 1223 | } |
1231 | 1224 | ||
1232 | ret = sbp_fetch_page_table(req); | 1225 | ret = sbp_fetch_page_table(req); |
1233 | if (ret) { | 1226 | if (ret) { |
1234 | pr_debug("sbp_handle_command: fetch page table failed: %d\n", | 1227 | pr_debug("sbp_handle_command: fetch page table failed: %d\n", |
1235 | ret); | 1228 | ret); |
1236 | req->status.status |= cpu_to_be32( | 1229 | goto err; |
1237 | STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | | ||
1238 | STATUS_BLOCK_DEAD(0) | | ||
1239 | STATUS_BLOCK_LEN(1) | | ||
1240 | STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); | ||
1241 | sbp_send_status(req); | ||
1242 | sbp_free_request(req); | ||
1243 | return; | ||
1244 | } | 1230 | } |
1245 | 1231 | ||
1246 | unpacked_lun = req->login->lun->unpacked_lun; | 1232 | unpacked_lun = req->login->lun->unpacked_lun; |
@@ -1249,9 +1235,21 @@ static void sbp_handle_command(struct sbp_target_request *req) | |||
1249 | pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n", | 1235 | pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n", |
1250 | req->orb_pointer, unpacked_lun, data_length, data_dir); | 1236 | req->orb_pointer, unpacked_lun, data_length, data_dir); |
1251 | 1237 | ||
1252 | target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, | 1238 | if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, |
1253 | req->sense_buf, unpacked_lun, data_length, | 1239 | req->sense_buf, unpacked_lun, data_length, |
1254 | MSG_SIMPLE_TAG, data_dir, 0); | 1240 | MSG_SIMPLE_TAG, data_dir, 0)) |
1241 | goto err; | ||
1242 | |||
1243 | return; | ||
1244 | |||
1245 | err: | ||
1246 | req->status.status |= cpu_to_be32( | ||
1247 | STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | | ||
1248 | STATUS_BLOCK_DEAD(0) | | ||
1249 | STATUS_BLOCK_LEN(1) | | ||
1250 | STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); | ||
1251 | sbp_send_status(req); | ||
1252 | sbp_free_request(req); | ||
1255 | } | 1253 | } |
1256 | 1254 | ||
1257 | /* | 1255 | /* |
@@ -1784,8 +1782,7 @@ static int sbp_write_pending(struct se_cmd *se_cmd) | |||
1784 | return ret; | 1782 | return ret; |
1785 | } | 1783 | } |
1786 | 1784 | ||
1787 | transport_generic_process_write(se_cmd); | 1785 | target_execute_cmd(se_cmd); |
1788 | |||
1789 | return 0; | 1786 | return 0; |
1790 | } | 1787 | } |
1791 | 1788 | ||
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 5ad972856a8d..cf2c66f3c116 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -300,8 +300,8 @@ int core_free_device_list_for_node( | |||
300 | lun = deve->se_lun; | 300 | lun = deve->se_lun; |
301 | 301 | ||
302 | spin_unlock_irq(&nacl->device_list_lock); | 302 | spin_unlock_irq(&nacl->device_list_lock); |
303 | core_update_device_list_for_node(lun, NULL, deve->mapped_lun, | 303 | core_disable_device_list_for_node(lun, NULL, deve->mapped_lun, |
304 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); | 304 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); |
305 | spin_lock_irq(&nacl->device_list_lock); | 305 | spin_lock_irq(&nacl->device_list_lock); |
306 | } | 306 | } |
307 | spin_unlock_irq(&nacl->device_list_lock); | 307 | spin_unlock_irq(&nacl->device_list_lock); |
@@ -342,72 +342,46 @@ void core_update_device_list_access( | |||
342 | spin_unlock_irq(&nacl->device_list_lock); | 342 | spin_unlock_irq(&nacl->device_list_lock); |
343 | } | 343 | } |
344 | 344 | ||
345 | /* core_update_device_list_for_node(): | 345 | /* core_enable_device_list_for_node(): |
346 | * | 346 | * |
347 | * | 347 | * |
348 | */ | 348 | */ |
349 | int core_update_device_list_for_node( | 349 | int core_enable_device_list_for_node( |
350 | struct se_lun *lun, | 350 | struct se_lun *lun, |
351 | struct se_lun_acl *lun_acl, | 351 | struct se_lun_acl *lun_acl, |
352 | u32 mapped_lun, | 352 | u32 mapped_lun, |
353 | u32 lun_access, | 353 | u32 lun_access, |
354 | struct se_node_acl *nacl, | 354 | struct se_node_acl *nacl, |
355 | struct se_portal_group *tpg, | 355 | struct se_portal_group *tpg) |
356 | int enable) | ||
357 | { | 356 | { |
358 | struct se_port *port = lun->lun_sep; | 357 | struct se_port *port = lun->lun_sep; |
359 | struct se_dev_entry *deve = nacl->device_list[mapped_lun]; | 358 | struct se_dev_entry *deve; |
360 | int trans = 0; | ||
361 | /* | ||
362 | * If the MappedLUN entry is being disabled, the entry in | ||
363 | * port->sep_alua_list must be removed now before clearing the | ||
364 | * struct se_dev_entry pointers below as logic in | ||
365 | * core_alua_do_transition_tg_pt() depends on these being present. | ||
366 | */ | ||
367 | if (!enable) { | ||
368 | /* | ||
369 | * deve->se_lun_acl will be NULL for demo-mode created LUNs | ||
370 | * that have not been explicitly concerted to MappedLUNs -> | ||
371 | * struct se_lun_acl, but we remove deve->alua_port_list from | ||
372 | * port->sep_alua_list. This also means that active UAs and | ||
373 | * NodeACL context specific PR metadata for demo-mode | ||
374 | * MappedLUN *deve will be released below.. | ||
375 | */ | ||
376 | spin_lock_bh(&port->sep_alua_lock); | ||
377 | list_del(&deve->alua_port_list); | ||
378 | spin_unlock_bh(&port->sep_alua_lock); | ||
379 | } | ||
380 | 359 | ||
381 | spin_lock_irq(&nacl->device_list_lock); | 360 | spin_lock_irq(&nacl->device_list_lock); |
382 | if (enable) { | 361 | |
383 | /* | 362 | deve = nacl->device_list[mapped_lun]; |
384 | * Check if the call is handling demo mode -> explict LUN ACL | 363 | |
385 | * transition. This transition must be for the same struct se_lun | 364 | /* |
386 | * + mapped_lun that was setup in demo mode.. | 365 | * Check if the call is handling demo mode -> explict LUN ACL |
387 | */ | 366 | * transition. This transition must be for the same struct se_lun |
388 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | 367 | * + mapped_lun that was setup in demo mode.. |
389 | if (deve->se_lun_acl != NULL) { | 368 | */ |
390 | pr_err("struct se_dev_entry->se_lun_acl" | 369 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { |
391 | " already set for demo mode -> explict" | 370 | if (deve->se_lun_acl != NULL) { |
392 | " LUN ACL transition\n"); | 371 | pr_err("struct se_dev_entry->se_lun_acl" |
393 | spin_unlock_irq(&nacl->device_list_lock); | 372 | " already set for demo mode -> explict" |
394 | return -EINVAL; | 373 | " LUN ACL transition\n"); |
395 | } | 374 | spin_unlock_irq(&nacl->device_list_lock); |
396 | if (deve->se_lun != lun) { | 375 | return -EINVAL; |
397 | pr_err("struct se_dev_entry->se_lun does" | ||
398 | " match passed struct se_lun for demo mode" | ||
399 | " -> explict LUN ACL transition\n"); | ||
400 | spin_unlock_irq(&nacl->device_list_lock); | ||
401 | return -EINVAL; | ||
402 | } | ||
403 | deve->se_lun_acl = lun_acl; | ||
404 | trans = 1; | ||
405 | } else { | ||
406 | deve->se_lun = lun; | ||
407 | deve->se_lun_acl = lun_acl; | ||
408 | deve->mapped_lun = mapped_lun; | ||
409 | deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; | ||
410 | } | 376 | } |
377 | if (deve->se_lun != lun) { | ||
378 | pr_err("struct se_dev_entry->se_lun does" | ||
379 | " match passed struct se_lun for demo mode" | ||
380 | " -> explict LUN ACL transition\n"); | ||
381 | spin_unlock_irq(&nacl->device_list_lock); | ||
382 | return -EINVAL; | ||
383 | } | ||
384 | deve->se_lun_acl = lun_acl; | ||
411 | 385 | ||
412 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { | 386 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { |
413 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | 387 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; |
@@ -417,27 +391,72 @@ int core_update_device_list_for_node( | |||
417 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | 391 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; |
418 | } | 392 | } |
419 | 393 | ||
420 | if (trans) { | ||
421 | spin_unlock_irq(&nacl->device_list_lock); | ||
422 | return 0; | ||
423 | } | ||
424 | deve->creation_time = get_jiffies_64(); | ||
425 | deve->attach_count++; | ||
426 | spin_unlock_irq(&nacl->device_list_lock); | 394 | spin_unlock_irq(&nacl->device_list_lock); |
395 | return 0; | ||
396 | } | ||
427 | 397 | ||
428 | spin_lock_bh(&port->sep_alua_lock); | 398 | deve->se_lun = lun; |
429 | list_add_tail(&deve->alua_port_list, &port->sep_alua_list); | 399 | deve->se_lun_acl = lun_acl; |
430 | spin_unlock_bh(&port->sep_alua_lock); | 400 | deve->mapped_lun = mapped_lun; |
401 | deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; | ||
431 | 402 | ||
432 | return 0; | 403 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { |
404 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | ||
405 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | ||
406 | } else { | ||
407 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | ||
408 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | ||
433 | } | 409 | } |
410 | |||
411 | deve->creation_time = get_jiffies_64(); | ||
412 | deve->attach_count++; | ||
413 | spin_unlock_irq(&nacl->device_list_lock); | ||
414 | |||
415 | spin_lock_bh(&port->sep_alua_lock); | ||
416 | list_add_tail(&deve->alua_port_list, &port->sep_alua_list); | ||
417 | spin_unlock_bh(&port->sep_alua_lock); | ||
418 | |||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | /* core_disable_device_list_for_node(): | ||
423 | * | ||
424 | * | ||
425 | */ | ||
426 | int core_disable_device_list_for_node( | ||
427 | struct se_lun *lun, | ||
428 | struct se_lun_acl *lun_acl, | ||
429 | u32 mapped_lun, | ||
430 | u32 lun_access, | ||
431 | struct se_node_acl *nacl, | ||
432 | struct se_portal_group *tpg) | ||
433 | { | ||
434 | struct se_port *port = lun->lun_sep; | ||
435 | struct se_dev_entry *deve = nacl->device_list[mapped_lun]; | ||
436 | |||
437 | /* | ||
438 | * If the MappedLUN entry is being disabled, the entry in | ||
439 | * port->sep_alua_list must be removed now before clearing the | ||
440 | * struct se_dev_entry pointers below as logic in | ||
441 | * core_alua_do_transition_tg_pt() depends on these being present. | ||
442 | * | ||
443 | * deve->se_lun_acl will be NULL for demo-mode created LUNs | ||
444 | * that have not been explicitly converted to MappedLUNs -> | ||
445 | * struct se_lun_acl, but we remove deve->alua_port_list from | ||
446 | * port->sep_alua_list. This also means that active UAs and | ||
447 | * NodeACL context specific PR metadata for demo-mode | ||
448 | * MappedLUN *deve will be released below.. | ||
449 | */ | ||
450 | spin_lock_bh(&port->sep_alua_lock); | ||
451 | list_del(&deve->alua_port_list); | ||
452 | spin_unlock_bh(&port->sep_alua_lock); | ||
434 | /* | 453 | /* |
435 | * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE | 454 | * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE |
436 | * PR operation to complete. | 455 | * PR operation to complete. |
437 | */ | 456 | */ |
438 | spin_unlock_irq(&nacl->device_list_lock); | ||
439 | while (atomic_read(&deve->pr_ref_count) != 0) | 457 | while (atomic_read(&deve->pr_ref_count) != 0) |
440 | cpu_relax(); | 458 | cpu_relax(); |
459 | |||
441 | spin_lock_irq(&nacl->device_list_lock); | 460 | spin_lock_irq(&nacl->device_list_lock); |
442 | /* | 461 | /* |
443 | * Disable struct se_dev_entry LUN ACL mapping | 462 | * Disable struct se_dev_entry LUN ACL mapping |
@@ -475,9 +494,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) | |||
475 | continue; | 494 | continue; |
476 | spin_unlock_irq(&nacl->device_list_lock); | 495 | spin_unlock_irq(&nacl->device_list_lock); |
477 | 496 | ||
478 | core_update_device_list_for_node(lun, NULL, | 497 | core_disable_device_list_for_node(lun, NULL, |
479 | deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, | 498 | deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, |
480 | nacl, tpg, 0); | 499 | nacl, tpg); |
481 | 500 | ||
482 | spin_lock_irq(&nacl->device_list_lock); | 501 | spin_lock_irq(&nacl->device_list_lock); |
483 | } | 502 | } |
@@ -715,7 +734,7 @@ void se_release_device_for_hba(struct se_device *dev) | |||
715 | se_dev_stop(dev); | 734 | se_dev_stop(dev); |
716 | 735 | ||
717 | if (dev->dev_ptr) { | 736 | if (dev->dev_ptr) { |
718 | kthread_stop(dev->process_thread); | 737 | destroy_workqueue(dev->tmr_wq); |
719 | if (dev->transport->free_device) | 738 | if (dev->transport->free_device) |
720 | dev->transport->free_device(dev->dev_ptr); | 739 | dev->transport->free_device(dev->dev_ptr); |
721 | } | 740 | } |
@@ -822,7 +841,7 @@ int se_dev_check_shutdown(struct se_device *dev) | |||
822 | return ret; | 841 | return ret; |
823 | } | 842 | } |
824 | 843 | ||
825 | u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) | 844 | static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) |
826 | { | 845 | { |
827 | u32 tmp, aligned_max_sectors; | 846 | u32 tmp, aligned_max_sectors; |
828 | /* | 847 | /* |
@@ -1273,7 +1292,6 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) | |||
1273 | 1292 | ||
1274 | struct se_lun *core_dev_add_lun( | 1293 | struct se_lun *core_dev_add_lun( |
1275 | struct se_portal_group *tpg, | 1294 | struct se_portal_group *tpg, |
1276 | struct se_hba *hba, | ||
1277 | struct se_device *dev, | 1295 | struct se_device *dev, |
1278 | u32 lun) | 1296 | u32 lun) |
1279 | { | 1297 | { |
@@ -1298,7 +1316,7 @@ struct se_lun *core_dev_add_lun( | |||
1298 | pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" | 1316 | pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" |
1299 | " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), | 1317 | " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), |
1300 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, | 1318 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, |
1301 | tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id); | 1319 | tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); |
1302 | /* | 1320 | /* |
1303 | * Update LUN maps for dynamically added initiators when | 1321 | * Update LUN maps for dynamically added initiators when |
1304 | * generate_node_acl is enabled. | 1322 | * generate_node_acl is enabled. |
@@ -1470,8 +1488,8 @@ int core_dev_add_initiator_node_lun_acl( | |||
1470 | 1488 | ||
1471 | lacl->se_lun = lun; | 1489 | lacl->se_lun = lun; |
1472 | 1490 | ||
1473 | if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun, | 1491 | if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, |
1474 | lun_access, nacl, tpg, 1) < 0) | 1492 | lun_access, nacl, tpg) < 0) |
1475 | return -EINVAL; | 1493 | return -EINVAL; |
1476 | 1494 | ||
1477 | spin_lock(&lun->lun_acl_lock); | 1495 | spin_lock(&lun->lun_acl_lock); |
@@ -1514,8 +1532,8 @@ int core_dev_del_initiator_node_lun_acl( | |||
1514 | smp_mb__after_atomic_dec(); | 1532 | smp_mb__after_atomic_dec(); |
1515 | spin_unlock(&lun->lun_acl_lock); | 1533 | spin_unlock(&lun->lun_acl_lock); |
1516 | 1534 | ||
1517 | core_update_device_list_for_node(lun, NULL, lacl->mapped_lun, | 1535 | core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, |
1518 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); | 1536 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); |
1519 | 1537 | ||
1520 | lacl->se_lun = NULL; | 1538 | lacl->se_lun = NULL; |
1521 | 1539 | ||
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 405cc98eaed6..ea479e54f5fd 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c | |||
@@ -764,8 +764,7 @@ static int target_fabric_port_link( | |||
764 | goto out; | 764 | goto out; |
765 | } | 765 | } |
766 | 766 | ||
767 | lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev, | 767 | lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun); |
768 | lun->unpacked_lun); | ||
769 | if (IS_ERR(lun_p)) { | 768 | if (IS_ERR(lun_p)) { |
770 | pr_err("core_dev_add_lun() failed\n"); | 769 | pr_err("core_dev_add_lun() failed\n"); |
771 | ret = PTR_ERR(lun_p); | 770 | ret = PTR_ERR(lun_p); |
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 9f99d0404908..9e2100551c78 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
@@ -331,7 +331,7 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl, | |||
331 | return 1; | 331 | return 1; |
332 | } | 332 | } |
333 | 333 | ||
334 | static void fd_emulate_sync_cache(struct se_cmd *cmd) | 334 | static int fd_execute_sync_cache(struct se_cmd *cmd) |
335 | { | 335 | { |
336 | struct se_device *dev = cmd->se_dev; | 336 | struct se_device *dev = cmd->se_dev; |
337 | struct fd_dev *fd_dev = dev->dev_ptr; | 337 | struct fd_dev *fd_dev = dev->dev_ptr; |
@@ -365,7 +365,7 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd) | |||
365 | pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); | 365 | pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); |
366 | 366 | ||
367 | if (immed) | 367 | if (immed) |
368 | return; | 368 | return 0; |
369 | 369 | ||
370 | if (ret) { | 370 | if (ret) { |
371 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 371 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
@@ -373,11 +373,15 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd) | |||
373 | } else { | 373 | } else { |
374 | target_complete_cmd(cmd, SAM_STAT_GOOD); | 374 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
375 | } | 375 | } |
376 | |||
377 | return 0; | ||
376 | } | 378 | } |
377 | 379 | ||
378 | static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, | 380 | static int fd_execute_rw(struct se_cmd *cmd) |
379 | u32 sgl_nents, enum dma_data_direction data_direction) | ||
380 | { | 381 | { |
382 | struct scatterlist *sgl = cmd->t_data_sg; | ||
383 | u32 sgl_nents = cmd->t_data_nents; | ||
384 | enum dma_data_direction data_direction = cmd->data_direction; | ||
381 | struct se_device *dev = cmd->se_dev; | 385 | struct se_device *dev = cmd->se_dev; |
382 | int ret = 0; | 386 | int ret = 0; |
383 | 387 | ||
@@ -550,6 +554,16 @@ static sector_t fd_get_blocks(struct se_device *dev) | |||
550 | return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size); | 554 | return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size); |
551 | } | 555 | } |
552 | 556 | ||
557 | static struct spc_ops fd_spc_ops = { | ||
558 | .execute_rw = fd_execute_rw, | ||
559 | .execute_sync_cache = fd_execute_sync_cache, | ||
560 | }; | ||
561 | |||
562 | static int fd_parse_cdb(struct se_cmd *cmd) | ||
563 | { | ||
564 | return sbc_parse_cdb(cmd, &fd_spc_ops); | ||
565 | } | ||
566 | |||
553 | static struct se_subsystem_api fileio_template = { | 567 | static struct se_subsystem_api fileio_template = { |
554 | .name = "fileio", | 568 | .name = "fileio", |
555 | .owner = THIS_MODULE, | 569 | .owner = THIS_MODULE, |
@@ -561,8 +575,7 @@ static struct se_subsystem_api fileio_template = { | |||
561 | .allocate_virtdevice = fd_allocate_virtdevice, | 575 | .allocate_virtdevice = fd_allocate_virtdevice, |
562 | .create_virtdevice = fd_create_virtdevice, | 576 | .create_virtdevice = fd_create_virtdevice, |
563 | .free_device = fd_free_device, | 577 | .free_device = fd_free_device, |
564 | .execute_cmd = fd_execute_cmd, | 578 | .parse_cdb = fd_parse_cdb, |
565 | .do_sync_cache = fd_emulate_sync_cache, | ||
566 | .check_configfs_dev_params = fd_check_configfs_dev_params, | 579 | .check_configfs_dev_params = fd_check_configfs_dev_params, |
567 | .set_configfs_dev_params = fd_set_configfs_dev_params, | 580 | .set_configfs_dev_params = fd_set_configfs_dev_params, |
568 | .show_configfs_dev_params = fd_show_configfs_dev_params, | 581 | .show_configfs_dev_params = fd_show_configfs_dev_params, |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index fd47950727b4..76db75e836ed 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/module.h> | 40 | #include <linux/module.h> |
41 | #include <scsi/scsi.h> | 41 | #include <scsi/scsi.h> |
42 | #include <scsi/scsi_host.h> | 42 | #include <scsi/scsi_host.h> |
43 | #include <asm/unaligned.h> | ||
43 | 44 | ||
44 | #include <target/target_core_base.h> | 45 | #include <target/target_core_base.h> |
45 | #include <target/target_core_backend.h> | 46 | #include <target/target_core_backend.h> |
@@ -96,6 +97,7 @@ static struct se_device *iblock_create_virtdevice( | |||
96 | struct request_queue *q; | 97 | struct request_queue *q; |
97 | struct queue_limits *limits; | 98 | struct queue_limits *limits; |
98 | u32 dev_flags = 0; | 99 | u32 dev_flags = 0; |
100 | fmode_t mode; | ||
99 | int ret = -EINVAL; | 101 | int ret = -EINVAL; |
100 | 102 | ||
101 | if (!ib_dev) { | 103 | if (!ib_dev) { |
@@ -117,8 +119,11 @@ static struct se_device *iblock_create_virtdevice( | |||
117 | pr_debug( "IBLOCK: Claiming struct block_device: %s\n", | 119 | pr_debug( "IBLOCK: Claiming struct block_device: %s\n", |
118 | ib_dev->ibd_udev_path); | 120 | ib_dev->ibd_udev_path); |
119 | 121 | ||
120 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, | 122 | mode = FMODE_READ|FMODE_EXCL; |
121 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); | 123 | if (!ib_dev->ibd_readonly) |
124 | mode |= FMODE_WRITE; | ||
125 | |||
126 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); | ||
122 | if (IS_ERR(bd)) { | 127 | if (IS_ERR(bd)) { |
123 | ret = PTR_ERR(bd); | 128 | ret = PTR_ERR(bd); |
124 | goto failed; | 129 | goto failed; |
@@ -292,7 +297,7 @@ static void iblock_end_io_flush(struct bio *bio, int err) | |||
292 | * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must | 297 | * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must |
293 | * always flush the whole cache. | 298 | * always flush the whole cache. |
294 | */ | 299 | */ |
295 | static void iblock_emulate_sync_cache(struct se_cmd *cmd) | 300 | static int iblock_execute_sync_cache(struct se_cmd *cmd) |
296 | { | 301 | { |
297 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; | 302 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; |
298 | int immed = (cmd->t_task_cdb[1] & 0x2); | 303 | int immed = (cmd->t_task_cdb[1] & 0x2); |
@@ -311,23 +316,98 @@ static void iblock_emulate_sync_cache(struct se_cmd *cmd) | |||
311 | if (!immed) | 316 | if (!immed) |
312 | bio->bi_private = cmd; | 317 | bio->bi_private = cmd; |
313 | submit_bio(WRITE_FLUSH, bio); | 318 | submit_bio(WRITE_FLUSH, bio); |
319 | return 0; | ||
314 | } | 320 | } |
315 | 321 | ||
316 | static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) | 322 | static int iblock_execute_unmap(struct se_cmd *cmd) |
317 | { | 323 | { |
324 | struct se_device *dev = cmd->se_dev; | ||
318 | struct iblock_dev *ibd = dev->dev_ptr; | 325 | struct iblock_dev *ibd = dev->dev_ptr; |
319 | struct block_device *bd = ibd->ibd_bd; | 326 | unsigned char *buf, *ptr = NULL; |
320 | int barrier = 0; | 327 | sector_t lba; |
328 | int size = cmd->data_length; | ||
329 | u32 range; | ||
330 | int ret = 0; | ||
331 | int dl, bd_dl; | ||
332 | |||
333 | buf = transport_kmap_data_sg(cmd); | ||
334 | |||
335 | dl = get_unaligned_be16(&buf[0]); | ||
336 | bd_dl = get_unaligned_be16(&buf[2]); | ||
337 | |||
338 | size = min(size - 8, bd_dl); | ||
339 | if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { | ||
340 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; | ||
341 | ret = -EINVAL; | ||
342 | goto err; | ||
343 | } | ||
344 | |||
345 | /* First UNMAP block descriptor starts at 8 byte offset */ | ||
346 | ptr = &buf[8]; | ||
347 | pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" | ||
348 | " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); | ||
349 | |||
350 | while (size >= 16) { | ||
351 | lba = get_unaligned_be64(&ptr[0]); | ||
352 | range = get_unaligned_be32(&ptr[8]); | ||
353 | pr_debug("UNMAP: Using lba: %llu and range: %u\n", | ||
354 | (unsigned long long)lba, range); | ||
355 | |||
356 | if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) { | ||
357 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; | ||
358 | ret = -EINVAL; | ||
359 | goto err; | ||
360 | } | ||
361 | |||
362 | if (lba + range > dev->transport->get_blocks(dev) + 1) { | ||
363 | cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE; | ||
364 | ret = -EINVAL; | ||
365 | goto err; | ||
366 | } | ||
321 | 367 | ||
322 | return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier); | 368 | ret = blkdev_issue_discard(ibd->ibd_bd, lba, range, |
369 | GFP_KERNEL, 0); | ||
370 | if (ret < 0) { | ||
371 | pr_err("blkdev_issue_discard() failed: %d\n", | ||
372 | ret); | ||
373 | goto err; | ||
374 | } | ||
375 | |||
376 | ptr += 16; | ||
377 | size -= 16; | ||
378 | } | ||
379 | |||
380 | err: | ||
381 | transport_kunmap_data_sg(cmd); | ||
382 | if (!ret) | ||
383 | target_complete_cmd(cmd, GOOD); | ||
384 | return ret; | ||
385 | } | ||
386 | |||
387 | static int iblock_execute_write_same(struct se_cmd *cmd) | ||
388 | { | ||
389 | struct iblock_dev *ibd = cmd->se_dev->dev_ptr; | ||
390 | int ret; | ||
391 | |||
392 | ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba, | ||
393 | spc_get_write_same_sectors(cmd), GFP_KERNEL, | ||
394 | 0); | ||
395 | if (ret < 0) { | ||
396 | pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n"); | ||
397 | return ret; | ||
398 | } | ||
399 | |||
400 | target_complete_cmd(cmd, GOOD); | ||
401 | return 0; | ||
323 | } | 402 | } |
324 | 403 | ||
325 | enum { | 404 | enum { |
326 | Opt_udev_path, Opt_force, Opt_err | 405 | Opt_udev_path, Opt_readonly, Opt_force, Opt_err |
327 | }; | 406 | }; |
328 | 407 | ||
329 | static match_table_t tokens = { | 408 | static match_table_t tokens = { |
330 | {Opt_udev_path, "udev_path=%s"}, | 409 | {Opt_udev_path, "udev_path=%s"}, |
410 | {Opt_readonly, "readonly=%d"}, | ||
331 | {Opt_force, "force=%d"}, | 411 | {Opt_force, "force=%d"}, |
332 | {Opt_err, NULL} | 412 | {Opt_err, NULL} |
333 | }; | 413 | }; |
@@ -340,6 +420,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, | |||
340 | char *orig, *ptr, *arg_p, *opts; | 420 | char *orig, *ptr, *arg_p, *opts; |
341 | substring_t args[MAX_OPT_ARGS]; | 421 | substring_t args[MAX_OPT_ARGS]; |
342 | int ret = 0, token; | 422 | int ret = 0, token; |
423 | unsigned long tmp_readonly; | ||
343 | 424 | ||
344 | opts = kstrdup(page, GFP_KERNEL); | 425 | opts = kstrdup(page, GFP_KERNEL); |
345 | if (!opts) | 426 | if (!opts) |
@@ -372,6 +453,22 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, | |||
372 | ib_dev->ibd_udev_path); | 453 | ib_dev->ibd_udev_path); |
373 | ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; | 454 | ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; |
374 | break; | 455 | break; |
456 | case Opt_readonly: | ||
457 | arg_p = match_strdup(&args[0]); | ||
458 | if (!arg_p) { | ||
459 | ret = -ENOMEM; | ||
460 | break; | ||
461 | } | ||
462 | ret = strict_strtoul(arg_p, 0, &tmp_readonly); | ||
463 | kfree(arg_p); | ||
464 | if (ret < 0) { | ||
465 | pr_err("strict_strtoul() failed for" | ||
466 | " readonly=\n"); | ||
467 | goto out; | ||
468 | } | ||
469 | ib_dev->ibd_readonly = tmp_readonly; | ||
470 | pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly); | ||
471 | break; | ||
375 | case Opt_force: | 472 | case Opt_force: |
376 | break; | 473 | break; |
377 | default: | 474 | default: |
@@ -411,11 +508,10 @@ static ssize_t iblock_show_configfs_dev_params( | |||
411 | if (bd) | 508 | if (bd) |
412 | bl += sprintf(b + bl, "iBlock device: %s", | 509 | bl += sprintf(b + bl, "iBlock device: %s", |
413 | bdevname(bd, buf)); | 510 | bdevname(bd, buf)); |
414 | if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) { | 511 | if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) |
415 | bl += sprintf(b + bl, " UDEV PATH: %s\n", | 512 | bl += sprintf(b + bl, " UDEV PATH: %s", |
416 | ibd->ibd_udev_path); | 513 | ibd->ibd_udev_path); |
417 | } else | 514 | bl += sprintf(b + bl, " readonly: %d\n", ibd->ibd_readonly); |
418 | bl += sprintf(b + bl, "\n"); | ||
419 | 515 | ||
420 | bl += sprintf(b + bl, " "); | 516 | bl += sprintf(b + bl, " "); |
421 | if (bd) { | 517 | if (bd) { |
@@ -493,9 +589,11 @@ static void iblock_submit_bios(struct bio_list *list, int rw) | |||
493 | blk_finish_plug(&plug); | 589 | blk_finish_plug(&plug); |
494 | } | 590 | } |
495 | 591 | ||
496 | static int iblock_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, | 592 | static int iblock_execute_rw(struct se_cmd *cmd) |
497 | u32 sgl_nents, enum dma_data_direction data_direction) | ||
498 | { | 593 | { |
594 | struct scatterlist *sgl = cmd->t_data_sg; | ||
595 | u32 sgl_nents = cmd->t_data_nents; | ||
596 | enum dma_data_direction data_direction = cmd->data_direction; | ||
499 | struct se_device *dev = cmd->se_dev; | 597 | struct se_device *dev = cmd->se_dev; |
500 | struct iblock_req *ibr; | 598 | struct iblock_req *ibr; |
501 | struct bio *bio; | 599 | struct bio *bio; |
@@ -642,6 +740,18 @@ static void iblock_bio_done(struct bio *bio, int err) | |||
642 | iblock_complete_cmd(cmd); | 740 | iblock_complete_cmd(cmd); |
643 | } | 741 | } |
644 | 742 | ||
743 | static struct spc_ops iblock_spc_ops = { | ||
744 | .execute_rw = iblock_execute_rw, | ||
745 | .execute_sync_cache = iblock_execute_sync_cache, | ||
746 | .execute_write_same = iblock_execute_write_same, | ||
747 | .execute_unmap = iblock_execute_unmap, | ||
748 | }; | ||
749 | |||
750 | static int iblock_parse_cdb(struct se_cmd *cmd) | ||
751 | { | ||
752 | return sbc_parse_cdb(cmd, &iblock_spc_ops); | ||
753 | } | ||
754 | |||
645 | static struct se_subsystem_api iblock_template = { | 755 | static struct se_subsystem_api iblock_template = { |
646 | .name = "iblock", | 756 | .name = "iblock", |
647 | .owner = THIS_MODULE, | 757 | .owner = THIS_MODULE, |
@@ -653,9 +763,7 @@ static struct se_subsystem_api iblock_template = { | |||
653 | .allocate_virtdevice = iblock_allocate_virtdevice, | 763 | .allocate_virtdevice = iblock_allocate_virtdevice, |
654 | .create_virtdevice = iblock_create_virtdevice, | 764 | .create_virtdevice = iblock_create_virtdevice, |
655 | .free_device = iblock_free_device, | 765 | .free_device = iblock_free_device, |
656 | .execute_cmd = iblock_execute_cmd, | 766 | .parse_cdb = iblock_parse_cdb, |
657 | .do_discard = iblock_do_discard, | ||
658 | .do_sync_cache = iblock_emulate_sync_cache, | ||
659 | .check_configfs_dev_params = iblock_check_configfs_dev_params, | 767 | .check_configfs_dev_params = iblock_check_configfs_dev_params, |
660 | .set_configfs_dev_params = iblock_set_configfs_dev_params, | 768 | .set_configfs_dev_params = iblock_set_configfs_dev_params, |
661 | .show_configfs_dev_params = iblock_show_configfs_dev_params, | 769 | .show_configfs_dev_params = iblock_show_configfs_dev_params, |
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index 66cf7b9e205e..533627ae79ec 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h | |||
@@ -18,6 +18,7 @@ struct iblock_dev { | |||
18 | u32 ibd_flags; | 18 | u32 ibd_flags; |
19 | struct bio_set *ibd_bio_set; | 19 | struct bio_set *ibd_bio_set; |
20 | struct block_device *ibd_bd; | 20 | struct block_device *ibd_bd; |
21 | bool ibd_readonly; | ||
21 | } ____cacheline_aligned; | 22 | } ____cacheline_aligned; |
22 | 23 | ||
23 | #endif /* TARGET_CORE_IBLOCK_H */ | 24 | #endif /* TARGET_CORE_IBLOCK_H */ |
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 165e82429687..0fd428225d11 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h | |||
@@ -4,25 +4,16 @@ | |||
4 | /* target_core_alua.c */ | 4 | /* target_core_alua.c */ |
5 | extern struct t10_alua_lu_gp *default_lu_gp; | 5 | extern struct t10_alua_lu_gp *default_lu_gp; |
6 | 6 | ||
7 | /* target_core_cdb.c */ | ||
8 | int target_emulate_inquiry(struct se_cmd *cmd); | ||
9 | int target_emulate_readcapacity(struct se_cmd *cmd); | ||
10 | int target_emulate_readcapacity_16(struct se_cmd *cmd); | ||
11 | int target_emulate_modesense(struct se_cmd *cmd); | ||
12 | int target_emulate_request_sense(struct se_cmd *cmd); | ||
13 | int target_emulate_unmap(struct se_cmd *cmd); | ||
14 | int target_emulate_write_same(struct se_cmd *cmd); | ||
15 | int target_emulate_synchronize_cache(struct se_cmd *cmd); | ||
16 | int target_emulate_noop(struct se_cmd *cmd); | ||
17 | |||
18 | /* target_core_device.c */ | 7 | /* target_core_device.c */ |
19 | struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); | 8 | struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); |
20 | int core_free_device_list_for_node(struct se_node_acl *, | 9 | int core_free_device_list_for_node(struct se_node_acl *, |
21 | struct se_portal_group *); | 10 | struct se_portal_group *); |
22 | void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *); | 11 | void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *); |
23 | void core_update_device_list_access(u32, u32, struct se_node_acl *); | 12 | void core_update_device_list_access(u32, u32, struct se_node_acl *); |
24 | int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *, | 13 | int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *, |
25 | u32, u32, struct se_node_acl *, struct se_portal_group *, int); | 14 | u32, u32, struct se_node_acl *, struct se_portal_group *); |
15 | int core_disable_device_list_for_node(struct se_lun *, struct se_lun_acl *, | ||
16 | u32, u32, struct se_node_acl *, struct se_portal_group *); | ||
26 | void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *); | 17 | void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *); |
27 | int core_dev_export(struct se_device *, struct se_portal_group *, | 18 | int core_dev_export(struct se_device *, struct se_portal_group *, |
28 | struct se_lun *); | 19 | struct se_lun *); |
@@ -56,8 +47,7 @@ int se_dev_set_max_sectors(struct se_device *, u32); | |||
56 | int se_dev_set_fabric_max_sectors(struct se_device *, u32); | 47 | int se_dev_set_fabric_max_sectors(struct se_device *, u32); |
57 | int se_dev_set_optimal_sectors(struct se_device *, u32); | 48 | int se_dev_set_optimal_sectors(struct se_device *, u32); |
58 | int se_dev_set_block_size(struct se_device *, u32); | 49 | int se_dev_set_block_size(struct se_device *, u32); |
59 | struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *, | 50 | struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32); |
60 | struct se_device *, u32); | ||
61 | int core_dev_del_lun(struct se_portal_group *, u32); | 51 | int core_dev_del_lun(struct se_portal_group *, u32); |
62 | struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); | 52 | struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); |
63 | struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, | 53 | struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, |
@@ -104,7 +94,6 @@ void release_se_kmem_caches(void); | |||
104 | u32 scsi_get_new_index(scsi_index_t); | 94 | u32 scsi_get_new_index(scsi_index_t); |
105 | void transport_subsystem_check_init(void); | 95 | void transport_subsystem_check_init(void); |
106 | void transport_cmd_finish_abort(struct se_cmd *, int); | 96 | void transport_cmd_finish_abort(struct se_cmd *, int); |
107 | void __target_remove_from_execute_list(struct se_cmd *); | ||
108 | unsigned char *transport_dump_cmd_direction(struct se_cmd *); | 97 | unsigned char *transport_dump_cmd_direction(struct se_cmd *); |
109 | void transport_dump_dev_state(struct se_device *, char *, int *); | 98 | void transport_dump_dev_state(struct se_device *, char *, int *); |
110 | void transport_dump_dev_info(struct se_device *, struct se_lun *, | 99 | void transport_dump_dev_info(struct se_device *, struct se_lun *, |
@@ -116,6 +105,7 @@ int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); | |||
116 | bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); | 105 | bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); |
117 | int transport_clear_lun_from_sessions(struct se_lun *); | 106 | int transport_clear_lun_from_sessions(struct se_lun *); |
118 | void transport_send_task_abort(struct se_cmd *); | 107 | void transport_send_task_abort(struct se_cmd *); |
108 | int target_cmd_size_check(struct se_cmd *cmd, unsigned int size); | ||
119 | 109 | ||
120 | /* target_core_stat.c */ | 110 | /* target_core_stat.c */ |
121 | void target_stat_setup_dev_default_groups(struct se_subsystem_dev *); | 111 | void target_stat_setup_dev_default_groups(struct se_subsystem_dev *); |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index a1bcd927a9e6..1e946502c378 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -507,7 +507,7 @@ static int core_scsi3_pr_seq_non_holder( | |||
507 | * Check if write exclusive initiator ports *NOT* holding the | 507 | * Check if write exclusive initiator ports *NOT* holding the |
508 | * WRITE_EXCLUSIVE_* reservation. | 508 | * WRITE_EXCLUSIVE_* reservation. |
509 | */ | 509 | */ |
510 | if ((we) && !(registered_nexus)) { | 510 | if (we && !registered_nexus) { |
511 | if (cmd->data_direction == DMA_TO_DEVICE) { | 511 | if (cmd->data_direction == DMA_TO_DEVICE) { |
512 | /* | 512 | /* |
513 | * Conflict for write exclusive | 513 | * Conflict for write exclusive |
@@ -2486,7 +2486,7 @@ static int core_scsi3_pro_reserve( | |||
2486 | */ | 2486 | */ |
2487 | spin_lock(&dev->dev_reservation_lock); | 2487 | spin_lock(&dev->dev_reservation_lock); |
2488 | pr_res_holder = dev->dev_pr_res_holder; | 2488 | pr_res_holder = dev->dev_pr_res_holder; |
2489 | if ((pr_res_holder)) { | 2489 | if (pr_res_holder) { |
2490 | /* | 2490 | /* |
2491 | * From spc4r17 Section 5.7.9: Reserving: | 2491 | * From spc4r17 Section 5.7.9: Reserving: |
2492 | * | 2492 | * |
@@ -4030,7 +4030,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) | |||
4030 | 4030 | ||
4031 | spin_lock(&se_dev->dev_reservation_lock); | 4031 | spin_lock(&se_dev->dev_reservation_lock); |
4032 | pr_reg = se_dev->dev_pr_res_holder; | 4032 | pr_reg = se_dev->dev_pr_res_holder; |
4033 | if ((pr_reg)) { | 4033 | if (pr_reg) { |
4034 | /* | 4034 | /* |
4035 | * Set the hardcoded Additional Length | 4035 | * Set the hardcoded Additional Length |
4036 | */ | 4036 | */ |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 4ce2cf642fce..6e32ff6f2fa0 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -35,8 +35,10 @@ | |||
35 | #include <linux/spinlock.h> | 35 | #include <linux/spinlock.h> |
36 | #include <linux/genhd.h> | 36 | #include <linux/genhd.h> |
37 | #include <linux/cdrom.h> | 37 | #include <linux/cdrom.h> |
38 | #include <linux/file.h> | 38 | #include <linux/ratelimit.h> |
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <asm/unaligned.h> | ||
41 | |||
40 | #include <scsi/scsi.h> | 42 | #include <scsi/scsi.h> |
41 | #include <scsi/scsi_device.h> | 43 | #include <scsi/scsi_device.h> |
42 | #include <scsi/scsi_cmnd.h> | 44 | #include <scsi/scsi_cmnd.h> |
@@ -46,12 +48,14 @@ | |||
46 | #include <target/target_core_base.h> | 48 | #include <target/target_core_base.h> |
47 | #include <target/target_core_backend.h> | 49 | #include <target/target_core_backend.h> |
48 | 50 | ||
51 | #include "target_core_alua.h" | ||
49 | #include "target_core_pscsi.h" | 52 | #include "target_core_pscsi.h" |
50 | 53 | ||
51 | #define ISPRINT(a) ((a >= ' ') && (a <= '~')) | 54 | #define ISPRINT(a) ((a >= ' ') && (a <= '~')) |
52 | 55 | ||
53 | static struct se_subsystem_api pscsi_template; | 56 | static struct se_subsystem_api pscsi_template; |
54 | 57 | ||
58 | static int pscsi_execute_cmd(struct se_cmd *cmd); | ||
55 | static void pscsi_req_done(struct request *, int); | 59 | static void pscsi_req_done(struct request *, int); |
56 | 60 | ||
57 | /* pscsi_attach_hba(): | 61 | /* pscsi_attach_hba(): |
@@ -1019,9 +1023,79 @@ fail: | |||
1019 | return -ENOMEM; | 1023 | return -ENOMEM; |
1020 | } | 1024 | } |
1021 | 1025 | ||
1022 | static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, | 1026 | /* |
1023 | u32 sgl_nents, enum dma_data_direction data_direction) | 1027 | * Clear a lun set in the cdb if the initiator talking to use spoke |
1028 | * and old standards version, as we can't assume the underlying device | ||
1029 | * won't choke up on it. | ||
1030 | */ | ||
1031 | static inline void pscsi_clear_cdb_lun(unsigned char *cdb) | ||
1032 | { | ||
1033 | switch (cdb[0]) { | ||
1034 | case READ_10: /* SBC - RDProtect */ | ||
1035 | case READ_12: /* SBC - RDProtect */ | ||
1036 | case READ_16: /* SBC - RDProtect */ | ||
1037 | case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ | ||
1038 | case VERIFY: /* SBC - VRProtect */ | ||
1039 | case VERIFY_16: /* SBC - VRProtect */ | ||
1040 | case WRITE_VERIFY: /* SBC - VRProtect */ | ||
1041 | case WRITE_VERIFY_12: /* SBC - VRProtect */ | ||
1042 | case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ | ||
1043 | break; | ||
1044 | default: | ||
1045 | cdb[1] &= 0x1f; /* clear logical unit number */ | ||
1046 | break; | ||
1047 | } | ||
1048 | } | ||
1049 | |||
1050 | static int pscsi_parse_cdb(struct se_cmd *cmd) | ||
1051 | { | ||
1052 | unsigned char *cdb = cmd->t_task_cdb; | ||
1053 | unsigned int dummy_size; | ||
1054 | int ret; | ||
1055 | |||
1056 | if (cmd->se_cmd_flags & SCF_BIDI) { | ||
1057 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
1058 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | ||
1059 | return -EINVAL; | ||
1060 | } | ||
1061 | |||
1062 | pscsi_clear_cdb_lun(cdb); | ||
1063 | |||
1064 | /* | ||
1065 | * For REPORT LUNS we always need to emulate the response, for everything | ||
1066 | * else the default for pSCSI is to pass the command to the underlying | ||
1067 | * LLD / physical hardware. | ||
1068 | */ | ||
1069 | switch (cdb[0]) { | ||
1070 | case REPORT_LUNS: | ||
1071 | ret = spc_parse_cdb(cmd, &dummy_size); | ||
1072 | if (ret) | ||
1073 | return ret; | ||
1074 | break; | ||
1075 | case READ_6: | ||
1076 | case READ_10: | ||
1077 | case READ_12: | ||
1078 | case READ_16: | ||
1079 | case WRITE_6: | ||
1080 | case WRITE_10: | ||
1081 | case WRITE_12: | ||
1082 | case WRITE_16: | ||
1083 | case WRITE_VERIFY: | ||
1084 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | ||
1085 | /* FALLTHROUGH*/ | ||
1086 | default: | ||
1087 | cmd->execute_cmd = pscsi_execute_cmd; | ||
1088 | break; | ||
1089 | } | ||
1090 | |||
1091 | return 0; | ||
1092 | } | ||
1093 | |||
1094 | static int pscsi_execute_cmd(struct se_cmd *cmd) | ||
1024 | { | 1095 | { |
1096 | struct scatterlist *sgl = cmd->t_data_sg; | ||
1097 | u32 sgl_nents = cmd->t_data_nents; | ||
1098 | enum dma_data_direction data_direction = cmd->data_direction; | ||
1025 | struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; | 1099 | struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; |
1026 | struct pscsi_plugin_task *pt; | 1100 | struct pscsi_plugin_task *pt; |
1027 | struct request *req; | 1101 | struct request *req; |
@@ -1042,7 +1116,7 @@ static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, | |||
1042 | memcpy(pt->pscsi_cdb, cmd->t_task_cdb, | 1116 | memcpy(pt->pscsi_cdb, cmd->t_task_cdb, |
1043 | scsi_command_size(cmd->t_task_cdb)); | 1117 | scsi_command_size(cmd->t_task_cdb)); |
1044 | 1118 | ||
1045 | if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { | 1119 | if (!sgl) { |
1046 | req = blk_get_request(pdv->pdv_sd->request_queue, | 1120 | req = blk_get_request(pdv->pdv_sd->request_queue, |
1047 | (data_direction == DMA_TO_DEVICE), | 1121 | (data_direction == DMA_TO_DEVICE), |
1048 | GFP_KERNEL); | 1122 | GFP_KERNEL); |
@@ -1188,7 +1262,7 @@ static struct se_subsystem_api pscsi_template = { | |||
1188 | .create_virtdevice = pscsi_create_virtdevice, | 1262 | .create_virtdevice = pscsi_create_virtdevice, |
1189 | .free_device = pscsi_free_device, | 1263 | .free_device = pscsi_free_device, |
1190 | .transport_complete = pscsi_transport_complete, | 1264 | .transport_complete = pscsi_transport_complete, |
1191 | .execute_cmd = pscsi_execute_cmd, | 1265 | .parse_cdb = pscsi_parse_cdb, |
1192 | .check_configfs_dev_params = pscsi_check_configfs_dev_params, | 1266 | .check_configfs_dev_params = pscsi_check_configfs_dev_params, |
1193 | .set_configfs_dev_params = pscsi_set_configfs_dev_params, | 1267 | .set_configfs_dev_params = pscsi_set_configfs_dev_params, |
1194 | .show_configfs_dev_params = pscsi_show_configfs_dev_params, | 1268 | .show_configfs_dev_params = pscsi_show_configfs_dev_params, |
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index d0ceb873c0e5..d00bbe33ff8b 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c | |||
@@ -284,9 +284,11 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) | |||
284 | return NULL; | 284 | return NULL; |
285 | } | 285 | } |
286 | 286 | ||
287 | static int rd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, | 287 | static int rd_execute_rw(struct se_cmd *cmd) |
288 | u32 sgl_nents, enum dma_data_direction data_direction) | ||
289 | { | 288 | { |
289 | struct scatterlist *sgl = cmd->t_data_sg; | ||
290 | u32 sgl_nents = cmd->t_data_nents; | ||
291 | enum dma_data_direction data_direction = cmd->data_direction; | ||
290 | struct se_device *se_dev = cmd->se_dev; | 292 | struct se_device *se_dev = cmd->se_dev; |
291 | struct rd_dev *dev = se_dev->dev_ptr; | 293 | struct rd_dev *dev = se_dev->dev_ptr; |
292 | struct rd_dev_sg_table *table; | 294 | struct rd_dev_sg_table *table; |
@@ -460,6 +462,15 @@ static sector_t rd_get_blocks(struct se_device *dev) | |||
460 | return blocks_long; | 462 | return blocks_long; |
461 | } | 463 | } |
462 | 464 | ||
465 | static struct spc_ops rd_spc_ops = { | ||
466 | .execute_rw = rd_execute_rw, | ||
467 | }; | ||
468 | |||
469 | static int rd_parse_cdb(struct se_cmd *cmd) | ||
470 | { | ||
471 | return sbc_parse_cdb(cmd, &rd_spc_ops); | ||
472 | } | ||
473 | |||
463 | static struct se_subsystem_api rd_mcp_template = { | 474 | static struct se_subsystem_api rd_mcp_template = { |
464 | .name = "rd_mcp", | 475 | .name = "rd_mcp", |
465 | .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, | 476 | .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, |
@@ -468,7 +479,7 @@ static struct se_subsystem_api rd_mcp_template = { | |||
468 | .allocate_virtdevice = rd_allocate_virtdevice, | 479 | .allocate_virtdevice = rd_allocate_virtdevice, |
469 | .create_virtdevice = rd_create_virtdevice, | 480 | .create_virtdevice = rd_create_virtdevice, |
470 | .free_device = rd_free_device, | 481 | .free_device = rd_free_device, |
471 | .execute_cmd = rd_execute_cmd, | 482 | .parse_cdb = rd_parse_cdb, |
472 | .check_configfs_dev_params = rd_check_configfs_dev_params, | 483 | .check_configfs_dev_params = rd_check_configfs_dev_params, |
473 | .set_configfs_dev_params = rd_set_configfs_dev_params, | 484 | .set_configfs_dev_params = rd_set_configfs_dev_params, |
474 | .show_configfs_dev_params = rd_show_configfs_dev_params, | 485 | .show_configfs_dev_params = rd_show_configfs_dev_params, |
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c new file mode 100644 index 000000000000..a9dd9469e3bd --- /dev/null +++ b/drivers/target/target_core_sbc.c | |||
@@ -0,0 +1,581 @@ | |||
1 | /* | ||
2 | * SCSI Block Commands (SBC) parsing and emulation. | ||
3 | * | ||
4 | * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. | ||
5 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | ||
6 | * Copyright (c) 2007-2010 Rising Tide Systems | ||
7 | * Copyright (c) 2008-2010 Linux-iSCSI.org | ||
8 | * | ||
9 | * Nicholas A. Bellinger <nab@kernel.org> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
24 | */ | ||
25 | |||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/ratelimit.h> | ||
29 | #include <asm/unaligned.h> | ||
30 | #include <scsi/scsi.h> | ||
31 | |||
32 | #include <target/target_core_base.h> | ||
33 | #include <target/target_core_backend.h> | ||
34 | #include <target/target_core_fabric.h> | ||
35 | |||
36 | #include "target_core_internal.h" | ||
37 | #include "target_core_ua.h" | ||
38 | |||
39 | |||
40 | static int sbc_emulate_readcapacity(struct se_cmd *cmd) | ||
41 | { | ||
42 | struct se_device *dev = cmd->se_dev; | ||
43 | unsigned char *buf; | ||
44 | unsigned long long blocks_long = dev->transport->get_blocks(dev); | ||
45 | u32 blocks; | ||
46 | |||
47 | if (blocks_long >= 0x00000000ffffffff) | ||
48 | blocks = 0xffffffff; | ||
49 | else | ||
50 | blocks = (u32)blocks_long; | ||
51 | |||
52 | buf = transport_kmap_data_sg(cmd); | ||
53 | |||
54 | buf[0] = (blocks >> 24) & 0xff; | ||
55 | buf[1] = (blocks >> 16) & 0xff; | ||
56 | buf[2] = (blocks >> 8) & 0xff; | ||
57 | buf[3] = blocks & 0xff; | ||
58 | buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; | ||
59 | buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; | ||
60 | buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; | ||
61 | buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; | ||
62 | |||
63 | transport_kunmap_data_sg(cmd); | ||
64 | |||
65 | target_complete_cmd(cmd, GOOD); | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static int sbc_emulate_readcapacity_16(struct se_cmd *cmd) | ||
70 | { | ||
71 | struct se_device *dev = cmd->se_dev; | ||
72 | unsigned char *buf; | ||
73 | unsigned long long blocks = dev->transport->get_blocks(dev); | ||
74 | |||
75 | buf = transport_kmap_data_sg(cmd); | ||
76 | |||
77 | buf[0] = (blocks >> 56) & 0xff; | ||
78 | buf[1] = (blocks >> 48) & 0xff; | ||
79 | buf[2] = (blocks >> 40) & 0xff; | ||
80 | buf[3] = (blocks >> 32) & 0xff; | ||
81 | buf[4] = (blocks >> 24) & 0xff; | ||
82 | buf[5] = (blocks >> 16) & 0xff; | ||
83 | buf[6] = (blocks >> 8) & 0xff; | ||
84 | buf[7] = blocks & 0xff; | ||
85 | buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; | ||
86 | buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; | ||
87 | buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; | ||
88 | buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; | ||
89 | /* | ||
90 | * Set Thin Provisioning Enable bit following sbc3r22 in section | ||
91 | * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. | ||
92 | */ | ||
93 | if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) | ||
94 | buf[14] = 0x80; | ||
95 | |||
96 | transport_kunmap_data_sg(cmd); | ||
97 | |||
98 | target_complete_cmd(cmd, GOOD); | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | int spc_get_write_same_sectors(struct se_cmd *cmd) | ||
103 | { | ||
104 | u32 num_blocks; | ||
105 | |||
106 | if (cmd->t_task_cdb[0] == WRITE_SAME) | ||
107 | num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); | ||
108 | else if (cmd->t_task_cdb[0] == WRITE_SAME_16) | ||
109 | num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); | ||
110 | else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ | ||
111 | num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); | ||
112 | |||
113 | /* | ||
114 | * Use the explicit range when non zero is supplied, otherwise calculate | ||
115 | * the remaining range based on ->get_blocks() - starting LBA. | ||
116 | */ | ||
117 | if (num_blocks) | ||
118 | return num_blocks; | ||
119 | |||
120 | return cmd->se_dev->transport->get_blocks(cmd->se_dev) - | ||
121 | cmd->t_task_lba + 1; | ||
122 | } | ||
123 | EXPORT_SYMBOL(spc_get_write_same_sectors); | ||
124 | |||
125 | static int sbc_emulate_verify(struct se_cmd *cmd) | ||
126 | { | ||
127 | target_complete_cmd(cmd, GOOD); | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) | ||
132 | { | ||
133 | return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors; | ||
134 | } | ||
135 | |||
136 | static int sbc_check_valid_sectors(struct se_cmd *cmd) | ||
137 | { | ||
138 | struct se_device *dev = cmd->se_dev; | ||
139 | unsigned long long end_lba; | ||
140 | u32 sectors; | ||
141 | |||
142 | sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size; | ||
143 | end_lba = dev->transport->get_blocks(dev) + 1; | ||
144 | |||
145 | if (cmd->t_task_lba + sectors > end_lba) { | ||
146 | pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n", | ||
147 | cmd->t_task_lba, sectors, end_lba); | ||
148 | return -EINVAL; | ||
149 | } | ||
150 | |||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | static inline u32 transport_get_sectors_6(unsigned char *cdb) | ||
155 | { | ||
156 | /* | ||
157 | * Use 8-bit sector value. SBC-3 says: | ||
158 | * | ||
159 | * A TRANSFER LENGTH field set to zero specifies that 256 | ||
160 | * logical blocks shall be written. Any other value | ||
161 | * specifies the number of logical blocks that shall be | ||
162 | * written. | ||
163 | */ | ||
164 | return cdb[4] ? : 256; | ||
165 | } | ||
166 | |||
167 | static inline u32 transport_get_sectors_10(unsigned char *cdb) | ||
168 | { | ||
169 | return (u32)(cdb[7] << 8) + cdb[8]; | ||
170 | } | ||
171 | |||
172 | static inline u32 transport_get_sectors_12(unsigned char *cdb) | ||
173 | { | ||
174 | return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; | ||
175 | } | ||
176 | |||
177 | static inline u32 transport_get_sectors_16(unsigned char *cdb) | ||
178 | { | ||
179 | return (u32)(cdb[10] << 24) + (cdb[11] << 16) + | ||
180 | (cdb[12] << 8) + cdb[13]; | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants | ||
185 | */ | ||
186 | static inline u32 transport_get_sectors_32(unsigned char *cdb) | ||
187 | { | ||
188 | return (u32)(cdb[28] << 24) + (cdb[29] << 16) + | ||
189 | (cdb[30] << 8) + cdb[31]; | ||
190 | |||
191 | } | ||
192 | |||
193 | static inline u32 transport_lba_21(unsigned char *cdb) | ||
194 | { | ||
195 | return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; | ||
196 | } | ||
197 | |||
198 | static inline u32 transport_lba_32(unsigned char *cdb) | ||
199 | { | ||
200 | return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | ||
201 | } | ||
202 | |||
203 | static inline unsigned long long transport_lba_64(unsigned char *cdb) | ||
204 | { | ||
205 | unsigned int __v1, __v2; | ||
206 | |||
207 | __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | ||
208 | __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | ||
209 | |||
210 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | ||
211 | } | ||
212 | |||
213 | /* | ||
214 | * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs | ||
215 | */ | ||
216 | static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) | ||
217 | { | ||
218 | unsigned int __v1, __v2; | ||
219 | |||
220 | __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; | ||
221 | __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; | ||
222 | |||
223 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | ||
224 | } | ||
225 | |||
226 | static int sbc_write_same_supported(struct se_device *dev, | ||
227 | unsigned char *flags) | ||
228 | { | ||
229 | if ((flags[0] & 0x04) || (flags[0] & 0x02)) { | ||
230 | pr_err("WRITE_SAME PBDATA and LBDATA" | ||
231 | " bits not supported for Block Discard" | ||
232 | " Emulation\n"); | ||
233 | return -ENOSYS; | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | * Currently for the emulated case we only accept | ||
238 | * tpws with the UNMAP=1 bit set. | ||
239 | */ | ||
240 | if (!(flags[0] & 0x08)) { | ||
241 | pr_err("WRITE_SAME w/o UNMAP bit not" | ||
242 | " supported for Block Discard Emulation\n"); | ||
243 | return -ENOSYS; | ||
244 | } | ||
245 | |||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | static void xdreadwrite_callback(struct se_cmd *cmd) | ||
250 | { | ||
251 | unsigned char *buf, *addr; | ||
252 | struct scatterlist *sg; | ||
253 | unsigned int offset; | ||
254 | int i; | ||
255 | int count; | ||
256 | /* | ||
257 | * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command | ||
258 | * | ||
259 | * 1) read the specified logical block(s); | ||
260 | * 2) transfer logical blocks from the data-out buffer; | ||
261 | * 3) XOR the logical blocks transferred from the data-out buffer with | ||
262 | * the logical blocks read, storing the resulting XOR data in a buffer; | ||
263 | * 4) if the DISABLE WRITE bit is set to zero, then write the logical | ||
264 | * blocks transferred from the data-out buffer; and | ||
265 | * 5) transfer the resulting XOR data to the data-in buffer. | ||
266 | */ | ||
267 | buf = kmalloc(cmd->data_length, GFP_KERNEL); | ||
268 | if (!buf) { | ||
269 | pr_err("Unable to allocate xor_callback buf\n"); | ||
270 | return; | ||
271 | } | ||
272 | /* | ||
273 | * Copy the scatterlist WRITE buffer located at cmd->t_data_sg | ||
274 | * into the locally allocated *buf | ||
275 | */ | ||
276 | sg_copy_to_buffer(cmd->t_data_sg, | ||
277 | cmd->t_data_nents, | ||
278 | buf, | ||
279 | cmd->data_length); | ||
280 | |||
281 | /* | ||
282 | * Now perform the XOR against the BIDI read memory located at | ||
283 | * cmd->t_mem_bidi_list | ||
284 | */ | ||
285 | |||
286 | offset = 0; | ||
287 | for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { | ||
288 | addr = kmap_atomic(sg_page(sg)); | ||
289 | if (!addr) | ||
290 | goto out; | ||
291 | |||
292 | for (i = 0; i < sg->length; i++) | ||
293 | *(addr + sg->offset + i) ^= *(buf + offset + i); | ||
294 | |||
295 | offset += sg->length; | ||
296 | kunmap_atomic(addr); | ||
297 | } | ||
298 | |||
299 | out: | ||
300 | kfree(buf); | ||
301 | } | ||
302 | |||
303 | int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops) | ||
304 | { | ||
305 | struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; | ||
306 | struct se_device *dev = cmd->se_dev; | ||
307 | unsigned char *cdb = cmd->t_task_cdb; | ||
308 | unsigned int size; | ||
309 | u32 sectors = 0; | ||
310 | int ret; | ||
311 | |||
312 | switch (cdb[0]) { | ||
313 | case READ_6: | ||
314 | sectors = transport_get_sectors_6(cdb); | ||
315 | cmd->t_task_lba = transport_lba_21(cdb); | ||
316 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | ||
317 | cmd->execute_cmd = ops->execute_rw; | ||
318 | break; | ||
319 | case READ_10: | ||
320 | sectors = transport_get_sectors_10(cdb); | ||
321 | cmd->t_task_lba = transport_lba_32(cdb); | ||
322 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | ||
323 | cmd->execute_cmd = ops->execute_rw; | ||
324 | break; | ||
325 | case READ_12: | ||
326 | sectors = transport_get_sectors_12(cdb); | ||
327 | cmd->t_task_lba = transport_lba_32(cdb); | ||
328 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | ||
329 | cmd->execute_cmd = ops->execute_rw; | ||
330 | break; | ||
331 | case READ_16: | ||
332 | sectors = transport_get_sectors_16(cdb); | ||
333 | cmd->t_task_lba = transport_lba_64(cdb); | ||
334 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | ||
335 | cmd->execute_cmd = ops->execute_rw; | ||
336 | break; | ||
337 | case WRITE_6: | ||
338 | sectors = transport_get_sectors_6(cdb); | ||
339 | cmd->t_task_lba = transport_lba_21(cdb); | ||
340 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | ||
341 | cmd->execute_cmd = ops->execute_rw; | ||
342 | break; | ||
343 | case WRITE_10: | ||
344 | case WRITE_VERIFY: | ||
345 | sectors = transport_get_sectors_10(cdb); | ||
346 | cmd->t_task_lba = transport_lba_32(cdb); | ||
347 | if (cdb[1] & 0x8) | ||
348 | cmd->se_cmd_flags |= SCF_FUA; | ||
349 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | ||
350 | cmd->execute_cmd = ops->execute_rw; | ||
351 | break; | ||
352 | case WRITE_12: | ||
353 | sectors = transport_get_sectors_12(cdb); | ||
354 | cmd->t_task_lba = transport_lba_32(cdb); | ||
355 | if (cdb[1] & 0x8) | ||
356 | cmd->se_cmd_flags |= SCF_FUA; | ||
357 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | ||
358 | cmd->execute_cmd = ops->execute_rw; | ||
359 | break; | ||
360 | case WRITE_16: | ||
361 | sectors = transport_get_sectors_16(cdb); | ||
362 | cmd->t_task_lba = transport_lba_64(cdb); | ||
363 | if (cdb[1] & 0x8) | ||
364 | cmd->se_cmd_flags |= SCF_FUA; | ||
365 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | ||
366 | cmd->execute_cmd = ops->execute_rw; | ||
367 | break; | ||
368 | case XDWRITEREAD_10: | ||
369 | if ((cmd->data_direction != DMA_TO_DEVICE) || | ||
370 | !(cmd->se_cmd_flags & SCF_BIDI)) | ||
371 | goto out_invalid_cdb_field; | ||
372 | sectors = transport_get_sectors_10(cdb); | ||
373 | |||
374 | cmd->t_task_lba = transport_lba_32(cdb); | ||
375 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | ||
376 | |||
377 | /* | ||
378 | * Setup BIDI XOR callback to be run after I/O completion. | ||
379 | */ | ||
380 | cmd->execute_cmd = ops->execute_rw; | ||
381 | cmd->transport_complete_callback = &xdreadwrite_callback; | ||
382 | if (cdb[1] & 0x8) | ||
383 | cmd->se_cmd_flags |= SCF_FUA; | ||
384 | break; | ||
385 | case VARIABLE_LENGTH_CMD: | ||
386 | { | ||
387 | u16 service_action = get_unaligned_be16(&cdb[8]); | ||
388 | switch (service_action) { | ||
389 | case XDWRITEREAD_32: | ||
390 | sectors = transport_get_sectors_32(cdb); | ||
391 | |||
392 | /* | ||
393 | * Use WRITE_32 and READ_32 opcodes for the emulated | ||
394 | * XDWRITE_READ_32 logic. | ||
395 | */ | ||
396 | cmd->t_task_lba = transport_lba_64_ext(cdb); | ||
397 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | ||
398 | |||
399 | /* | ||
400 | * Setup BIDI XOR callback to be run during after I/O | ||
401 | * completion. | ||
402 | */ | ||
403 | cmd->execute_cmd = ops->execute_rw; | ||
404 | cmd->transport_complete_callback = &xdreadwrite_callback; | ||
405 | if (cdb[1] & 0x8) | ||
406 | cmd->se_cmd_flags |= SCF_FUA; | ||
407 | break; | ||
408 | case WRITE_SAME_32: | ||
409 | if (!ops->execute_write_same) | ||
410 | goto out_unsupported_cdb; | ||
411 | |||
412 | sectors = transport_get_sectors_32(cdb); | ||
413 | if (!sectors) { | ||
414 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" | ||
415 | " supported\n"); | ||
416 | goto out_invalid_cdb_field; | ||
417 | } | ||
418 | |||
419 | size = sbc_get_size(cmd, 1); | ||
420 | cmd->t_task_lba = get_unaligned_be64(&cdb[12]); | ||
421 | |||
422 | if (sbc_write_same_supported(dev, &cdb[10]) < 0) | ||
423 | goto out_unsupported_cdb; | ||
424 | cmd->execute_cmd = ops->execute_write_same; | ||
425 | break; | ||
426 | default: | ||
427 | pr_err("VARIABLE_LENGTH_CMD service action" | ||
428 | " 0x%04x not supported\n", service_action); | ||
429 | goto out_unsupported_cdb; | ||
430 | } | ||
431 | break; | ||
432 | } | ||
433 | case READ_CAPACITY: | ||
434 | size = READ_CAP_LEN; | ||
435 | cmd->execute_cmd = sbc_emulate_readcapacity; | ||
436 | break; | ||
437 | case SERVICE_ACTION_IN: | ||
438 | switch (cmd->t_task_cdb[1] & 0x1f) { | ||
439 | case SAI_READ_CAPACITY_16: | ||
440 | cmd->execute_cmd = sbc_emulate_readcapacity_16; | ||
441 | break; | ||
442 | default: | ||
443 | pr_err("Unsupported SA: 0x%02x\n", | ||
444 | cmd->t_task_cdb[1] & 0x1f); | ||
445 | goto out_invalid_cdb_field; | ||
446 | } | ||
447 | size = (cdb[10] << 24) | (cdb[11] << 16) | | ||
448 | (cdb[12] << 8) | cdb[13]; | ||
449 | break; | ||
450 | case SYNCHRONIZE_CACHE: | ||
451 | case SYNCHRONIZE_CACHE_16: | ||
452 | if (!ops->execute_sync_cache) | ||
453 | goto out_unsupported_cdb; | ||
454 | |||
455 | /* | ||
456 | * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE | ||
457 | */ | ||
458 | if (cdb[0] == SYNCHRONIZE_CACHE) { | ||
459 | sectors = transport_get_sectors_10(cdb); | ||
460 | cmd->t_task_lba = transport_lba_32(cdb); | ||
461 | } else { | ||
462 | sectors = transport_get_sectors_16(cdb); | ||
463 | cmd->t_task_lba = transport_lba_64(cdb); | ||
464 | } | ||
465 | |||
466 | size = sbc_get_size(cmd, sectors); | ||
467 | |||
468 | /* | ||
469 | * Check to ensure that LBA + Range does not exceed past end of | ||
470 | * device for IBLOCK and FILEIO ->do_sync_cache() backend calls | ||
471 | */ | ||
472 | if (cmd->t_task_lba || sectors) { | ||
473 | if (sbc_check_valid_sectors(cmd) < 0) | ||
474 | goto out_invalid_cdb_field; | ||
475 | } | ||
476 | cmd->execute_cmd = ops->execute_sync_cache; | ||
477 | break; | ||
478 | case UNMAP: | ||
479 | if (!ops->execute_unmap) | ||
480 | goto out_unsupported_cdb; | ||
481 | |||
482 | size = get_unaligned_be16(&cdb[7]); | ||
483 | cmd->execute_cmd = ops->execute_unmap; | ||
484 | break; | ||
485 | case WRITE_SAME_16: | ||
486 | if (!ops->execute_write_same) | ||
487 | goto out_unsupported_cdb; | ||
488 | |||
489 | sectors = transport_get_sectors_16(cdb); | ||
490 | if (!sectors) { | ||
491 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); | ||
492 | goto out_invalid_cdb_field; | ||
493 | } | ||
494 | |||
495 | size = sbc_get_size(cmd, 1); | ||
496 | cmd->t_task_lba = get_unaligned_be64(&cdb[2]); | ||
497 | |||
498 | if (sbc_write_same_supported(dev, &cdb[1]) < 0) | ||
499 | goto out_unsupported_cdb; | ||
500 | cmd->execute_cmd = ops->execute_write_same; | ||
501 | break; | ||
502 | case WRITE_SAME: | ||
503 | if (!ops->execute_write_same) | ||
504 | goto out_unsupported_cdb; | ||
505 | |||
506 | sectors = transport_get_sectors_10(cdb); | ||
507 | if (!sectors) { | ||
508 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); | ||
509 | goto out_invalid_cdb_field; | ||
510 | } | ||
511 | |||
512 | size = sbc_get_size(cmd, 1); | ||
513 | cmd->t_task_lba = get_unaligned_be32(&cdb[2]); | ||
514 | |||
515 | /* | ||
516 | * Follow sbcr26 with WRITE_SAME (10) and check for the existence | ||
517 | * of byte 1 bit 3 UNMAP instead of original reserved field | ||
518 | */ | ||
519 | if (sbc_write_same_supported(dev, &cdb[1]) < 0) | ||
520 | goto out_unsupported_cdb; | ||
521 | cmd->execute_cmd = ops->execute_write_same; | ||
522 | break; | ||
523 | case VERIFY: | ||
524 | size = 0; | ||
525 | cmd->execute_cmd = sbc_emulate_verify; | ||
526 | break; | ||
527 | default: | ||
528 | ret = spc_parse_cdb(cmd, &size); | ||
529 | if (ret) | ||
530 | return ret; | ||
531 | } | ||
532 | |||
533 | /* reject any command that we don't have a handler for */ | ||
534 | if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd) | ||
535 | goto out_unsupported_cdb; | ||
536 | |||
537 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { | ||
538 | unsigned long long end_lba; | ||
539 | |||
540 | if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) { | ||
541 | printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" | ||
542 | " big sectors %u exceeds fabric_max_sectors:" | ||
543 | " %u\n", cdb[0], sectors, | ||
544 | su_dev->se_dev_attrib.fabric_max_sectors); | ||
545 | goto out_invalid_cdb_field; | ||
546 | } | ||
547 | if (sectors > su_dev->se_dev_attrib.hw_max_sectors) { | ||
548 | printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" | ||
549 | " big sectors %u exceeds backend hw_max_sectors:" | ||
550 | " %u\n", cdb[0], sectors, | ||
551 | su_dev->se_dev_attrib.hw_max_sectors); | ||
552 | goto out_invalid_cdb_field; | ||
553 | } | ||
554 | |||
555 | end_lba = dev->transport->get_blocks(dev) + 1; | ||
556 | if (cmd->t_task_lba + sectors > end_lba) { | ||
557 | pr_err("cmd exceeds last lba %llu " | ||
558 | "(lba %llu, sectors %u)\n", | ||
559 | end_lba, cmd->t_task_lba, sectors); | ||
560 | goto out_invalid_cdb_field; | ||
561 | } | ||
562 | |||
563 | size = sbc_get_size(cmd, sectors); | ||
564 | } | ||
565 | |||
566 | ret = target_cmd_size_check(cmd, size); | ||
567 | if (ret < 0) | ||
568 | return ret; | ||
569 | |||
570 | return 0; | ||
571 | |||
572 | out_unsupported_cdb: | ||
573 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
574 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | ||
575 | return -EINVAL; | ||
576 | out_invalid_cdb_field: | ||
577 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
578 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | ||
579 | return -EINVAL; | ||
580 | } | ||
581 | EXPORT_SYMBOL(sbc_parse_cdb); | ||
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_spc.c index 664f6e775d0e..4c861de538c9 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_spc.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * CDB emulation for non-READ/WRITE commands. | 2 | * SCSI Primary Commands (SPC) parsing and emulation. |
3 | * | 3 | * |
4 | * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. | 4 | * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. |
5 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | 5 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. |
@@ -26,17 +26,21 @@ | |||
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <asm/unaligned.h> | 28 | #include <asm/unaligned.h> |
29 | |||
29 | #include <scsi/scsi.h> | 30 | #include <scsi/scsi.h> |
31 | #include <scsi/scsi_tcq.h> | ||
30 | 32 | ||
31 | #include <target/target_core_base.h> | 33 | #include <target/target_core_base.h> |
32 | #include <target/target_core_backend.h> | 34 | #include <target/target_core_backend.h> |
33 | #include <target/target_core_fabric.h> | 35 | #include <target/target_core_fabric.h> |
34 | 36 | ||
35 | #include "target_core_internal.h" | 37 | #include "target_core_internal.h" |
38 | #include "target_core_alua.h" | ||
39 | #include "target_core_pr.h" | ||
36 | #include "target_core_ua.h" | 40 | #include "target_core_ua.h" |
37 | 41 | ||
38 | static void | 42 | |
39 | target_fill_alua_data(struct se_port *port, unsigned char *buf) | 43 | static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) |
40 | { | 44 | { |
41 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 45 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
42 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | 46 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; |
@@ -65,8 +69,7 @@ target_fill_alua_data(struct se_port *port, unsigned char *buf) | |||
65 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 69 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
66 | } | 70 | } |
67 | 71 | ||
68 | static int | 72 | static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf) |
69 | target_emulate_inquiry_std(struct se_cmd *cmd, char *buf) | ||
70 | { | 73 | { |
71 | struct se_lun *lun = cmd->se_lun; | 74 | struct se_lun *lun = cmd->se_lun; |
72 | struct se_device *dev = cmd->se_dev; | 75 | struct se_device *dev = cmd->se_dev; |
@@ -93,7 +96,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd, char *buf) | |||
93 | * Enable SCCS and TPGS fields for Emulated ALUA | 96 | * Enable SCCS and TPGS fields for Emulated ALUA |
94 | */ | 97 | */ |
95 | if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) | 98 | if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) |
96 | target_fill_alua_data(lun->lun_sep, buf); | 99 | spc_fill_alua_data(lun->lun_sep, buf); |
97 | 100 | ||
98 | buf[7] = 0x2; /* CmdQue=1 */ | 101 | buf[7] = 0x2; /* CmdQue=1 */ |
99 | 102 | ||
@@ -106,8 +109,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd, char *buf) | |||
106 | } | 109 | } |
107 | 110 | ||
108 | /* unit serial number */ | 111 | /* unit serial number */ |
109 | static int | 112 | static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) |
110 | target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) | ||
111 | { | 113 | { |
112 | struct se_device *dev = cmd->se_dev; | 114 | struct se_device *dev = cmd->se_dev; |
113 | u16 len = 0; | 115 | u16 len = 0; |
@@ -127,8 +129,8 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) | |||
127 | return 0; | 129 | return 0; |
128 | } | 130 | } |
129 | 131 | ||
130 | static void | 132 | static void spc_parse_naa_6h_vendor_specific(struct se_device *dev, |
131 | target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf) | 133 | unsigned char *buf) |
132 | { | 134 | { |
133 | unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; | 135 | unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; |
134 | int cnt; | 136 | int cnt; |
@@ -162,8 +164,7 @@ target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf) | |||
162 | * Device identification VPD, for a complete list of | 164 | * Device identification VPD, for a complete list of |
163 | * DESIGNATOR TYPEs see spc4r17 Table 459. | 165 | * DESIGNATOR TYPEs see spc4r17 Table 459. |
164 | */ | 166 | */ |
165 | static int | 167 | static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) |
166 | target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) | ||
167 | { | 168 | { |
168 | struct se_device *dev = cmd->se_dev; | 169 | struct se_device *dev = cmd->se_dev; |
169 | struct se_lun *lun = cmd->se_lun; | 170 | struct se_lun *lun = cmd->se_lun; |
@@ -220,7 +221,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) | |||
220 | * VENDOR_SPECIFIC_IDENTIFIER and | 221 | * VENDOR_SPECIFIC_IDENTIFIER and |
221 | * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION | 222 | * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION |
222 | */ | 223 | */ |
223 | target_parse_naa_6h_vendor_specific(dev, &buf[off]); | 224 | spc_parse_naa_6h_vendor_specific(dev, &buf[off]); |
224 | 225 | ||
225 | len = 20; | 226 | len = 20; |
226 | off = (len + 4); | 227 | off = (len + 4); |
@@ -414,8 +415,7 @@ check_scsi_name: | |||
414 | } | 415 | } |
415 | 416 | ||
416 | /* Extended INQUIRY Data VPD Page */ | 417 | /* Extended INQUIRY Data VPD Page */ |
417 | static int | 418 | static int spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) |
418 | target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) | ||
419 | { | 419 | { |
420 | buf[3] = 0x3c; | 420 | buf[3] = 0x3c; |
421 | /* Set HEADSUP, ORDSUP, SIMPSUP */ | 421 | /* Set HEADSUP, ORDSUP, SIMPSUP */ |
@@ -428,15 +428,14 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) | |||
428 | } | 428 | } |
429 | 429 | ||
430 | /* Block Limits VPD page */ | 430 | /* Block Limits VPD page */ |
431 | static int | 431 | static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) |
432 | target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | ||
433 | { | 432 | { |
434 | struct se_device *dev = cmd->se_dev; | 433 | struct se_device *dev = cmd->se_dev; |
435 | u32 max_sectors; | 434 | u32 max_sectors; |
436 | int have_tp = 0; | 435 | int have_tp = 0; |
437 | 436 | ||
438 | /* | 437 | /* |
439 | * Following sbc3r22 section 6.5.3 Block Limits VPD page, when | 438 | * Following spc3r22 section 6.5.3 Block Limits VPD page, when |
440 | * emulate_tpu=1 or emulate_tpws=1 we will be expect a | 439 | * emulate_tpu=1 or emulate_tpws=1 we will be expect a |
441 | * different page length for Thin Provisioning. | 440 | * different page length for Thin Provisioning. |
442 | */ | 441 | */ |
@@ -500,8 +499,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | |||
500 | } | 499 | } |
501 | 500 | ||
502 | /* Block Device Characteristics VPD page */ | 501 | /* Block Device Characteristics VPD page */ |
503 | static int | 502 | static int spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) |
504 | target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) | ||
505 | { | 503 | { |
506 | struct se_device *dev = cmd->se_dev; | 504 | struct se_device *dev = cmd->se_dev; |
507 | 505 | ||
@@ -513,13 +511,12 @@ target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) | |||
513 | } | 511 | } |
514 | 512 | ||
515 | /* Thin Provisioning VPD */ | 513 | /* Thin Provisioning VPD */ |
516 | static int | 514 | static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) |
517 | target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) | ||
518 | { | 515 | { |
519 | struct se_device *dev = cmd->se_dev; | 516 | struct se_device *dev = cmd->se_dev; |
520 | 517 | ||
521 | /* | 518 | /* |
522 | * From sbc3r22 section 6.5.4 Thin Provisioning VPD page: | 519 | * From spc3r22 section 6.5.4 Thin Provisioning VPD page: |
523 | * | 520 | * |
524 | * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to | 521 | * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to |
525 | * zero, then the page length shall be set to 0004h. If the DP bit | 522 | * zero, then the page length shall be set to 0004h. If the DP bit |
@@ -564,25 +561,23 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) | |||
564 | return 0; | 561 | return 0; |
565 | } | 562 | } |
566 | 563 | ||
567 | static int | 564 | static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); |
568 | target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); | ||
569 | 565 | ||
570 | static struct { | 566 | static struct { |
571 | uint8_t page; | 567 | uint8_t page; |
572 | int (*emulate)(struct se_cmd *, unsigned char *); | 568 | int (*emulate)(struct se_cmd *, unsigned char *); |
573 | } evpd_handlers[] = { | 569 | } evpd_handlers[] = { |
574 | { .page = 0x00, .emulate = target_emulate_evpd_00 }, | 570 | { .page = 0x00, .emulate = spc_emulate_evpd_00 }, |
575 | { .page = 0x80, .emulate = target_emulate_evpd_80 }, | 571 | { .page = 0x80, .emulate = spc_emulate_evpd_80 }, |
576 | { .page = 0x83, .emulate = target_emulate_evpd_83 }, | 572 | { .page = 0x83, .emulate = spc_emulate_evpd_83 }, |
577 | { .page = 0x86, .emulate = target_emulate_evpd_86 }, | 573 | { .page = 0x86, .emulate = spc_emulate_evpd_86 }, |
578 | { .page = 0xb0, .emulate = target_emulate_evpd_b0 }, | 574 | { .page = 0xb0, .emulate = spc_emulate_evpd_b0 }, |
579 | { .page = 0xb1, .emulate = target_emulate_evpd_b1 }, | 575 | { .page = 0xb1, .emulate = spc_emulate_evpd_b1 }, |
580 | { .page = 0xb2, .emulate = target_emulate_evpd_b2 }, | 576 | { .page = 0xb2, .emulate = spc_emulate_evpd_b2 }, |
581 | }; | 577 | }; |
582 | 578 | ||
583 | /* supported vital product data pages */ | 579 | /* supported vital product data pages */ |
584 | static int | 580 | static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) |
585 | target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) | ||
586 | { | 581 | { |
587 | int p; | 582 | int p; |
588 | 583 | ||
@@ -601,7 +596,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) | |||
601 | return 0; | 596 | return 0; |
602 | } | 597 | } |
603 | 598 | ||
604 | int target_emulate_inquiry(struct se_cmd *cmd) | 599 | static int spc_emulate_inquiry(struct se_cmd *cmd) |
605 | { | 600 | { |
606 | struct se_device *dev = cmd->se_dev; | 601 | struct se_device *dev = cmd->se_dev; |
607 | struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; | 602 | struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; |
@@ -643,7 +638,7 @@ int target_emulate_inquiry(struct se_cmd *cmd) | |||
643 | goto out; | 638 | goto out; |
644 | } | 639 | } |
645 | 640 | ||
646 | ret = target_emulate_inquiry_std(cmd, buf); | 641 | ret = spc_emulate_inquiry_std(cmd, buf); |
647 | goto out; | 642 | goto out; |
648 | } | 643 | } |
649 | 644 | ||
@@ -671,70 +666,7 @@ out: | |||
671 | return ret; | 666 | return ret; |
672 | } | 667 | } |
673 | 668 | ||
674 | int target_emulate_readcapacity(struct se_cmd *cmd) | 669 | static int spc_modesense_rwrecovery(unsigned char *p) |
675 | { | ||
676 | struct se_device *dev = cmd->se_dev; | ||
677 | unsigned char *buf; | ||
678 | unsigned long long blocks_long = dev->transport->get_blocks(dev); | ||
679 | u32 blocks; | ||
680 | |||
681 | if (blocks_long >= 0x00000000ffffffff) | ||
682 | blocks = 0xffffffff; | ||
683 | else | ||
684 | blocks = (u32)blocks_long; | ||
685 | |||
686 | buf = transport_kmap_data_sg(cmd); | ||
687 | |||
688 | buf[0] = (blocks >> 24) & 0xff; | ||
689 | buf[1] = (blocks >> 16) & 0xff; | ||
690 | buf[2] = (blocks >> 8) & 0xff; | ||
691 | buf[3] = blocks & 0xff; | ||
692 | buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; | ||
693 | buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; | ||
694 | buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; | ||
695 | buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; | ||
696 | |||
697 | transport_kunmap_data_sg(cmd); | ||
698 | |||
699 | target_complete_cmd(cmd, GOOD); | ||
700 | return 0; | ||
701 | } | ||
702 | |||
703 | int target_emulate_readcapacity_16(struct se_cmd *cmd) | ||
704 | { | ||
705 | struct se_device *dev = cmd->se_dev; | ||
706 | unsigned char *buf; | ||
707 | unsigned long long blocks = dev->transport->get_blocks(dev); | ||
708 | |||
709 | buf = transport_kmap_data_sg(cmd); | ||
710 | |||
711 | buf[0] = (blocks >> 56) & 0xff; | ||
712 | buf[1] = (blocks >> 48) & 0xff; | ||
713 | buf[2] = (blocks >> 40) & 0xff; | ||
714 | buf[3] = (blocks >> 32) & 0xff; | ||
715 | buf[4] = (blocks >> 24) & 0xff; | ||
716 | buf[5] = (blocks >> 16) & 0xff; | ||
717 | buf[6] = (blocks >> 8) & 0xff; | ||
718 | buf[7] = blocks & 0xff; | ||
719 | buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; | ||
720 | buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; | ||
721 | buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; | ||
722 | buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; | ||
723 | /* | ||
724 | * Set Thin Provisioning Enable bit following sbc3r22 in section | ||
725 | * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. | ||
726 | */ | ||
727 | if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) | ||
728 | buf[14] = 0x80; | ||
729 | |||
730 | transport_kunmap_data_sg(cmd); | ||
731 | |||
732 | target_complete_cmd(cmd, GOOD); | ||
733 | return 0; | ||
734 | } | ||
735 | |||
736 | static int | ||
737 | target_modesense_rwrecovery(unsigned char *p) | ||
738 | { | 670 | { |
739 | p[0] = 0x01; | 671 | p[0] = 0x01; |
740 | p[1] = 0x0a; | 672 | p[1] = 0x0a; |
@@ -742,8 +674,7 @@ target_modesense_rwrecovery(unsigned char *p) | |||
742 | return 12; | 674 | return 12; |
743 | } | 675 | } |
744 | 676 | ||
745 | static int | 677 | static int spc_modesense_control(struct se_device *dev, unsigned char *p) |
746 | target_modesense_control(struct se_device *dev, unsigned char *p) | ||
747 | { | 678 | { |
748 | p[0] = 0x0a; | 679 | p[0] = 0x0a; |
749 | p[1] = 0x0a; | 680 | p[1] = 0x0a; |
@@ -828,8 +759,7 @@ target_modesense_control(struct se_device *dev, unsigned char *p) | |||
828 | return 12; | 759 | return 12; |
829 | } | 760 | } |
830 | 761 | ||
831 | static int | 762 | static int spc_modesense_caching(struct se_device *dev, unsigned char *p) |
832 | target_modesense_caching(struct se_device *dev, unsigned char *p) | ||
833 | { | 763 | { |
834 | p[0] = 0x08; | 764 | p[0] = 0x08; |
835 | p[1] = 0x12; | 765 | p[1] = 0x12; |
@@ -840,8 +770,7 @@ target_modesense_caching(struct se_device *dev, unsigned char *p) | |||
840 | return 20; | 770 | return 20; |
841 | } | 771 | } |
842 | 772 | ||
843 | static void | 773 | static void spc_modesense_write_protect(unsigned char *buf, int type) |
844 | target_modesense_write_protect(unsigned char *buf, int type) | ||
845 | { | 774 | { |
846 | /* | 775 | /* |
847 | * I believe that the WP bit (bit 7) in the mode header is the same for | 776 | * I believe that the WP bit (bit 7) in the mode header is the same for |
@@ -856,8 +785,7 @@ target_modesense_write_protect(unsigned char *buf, int type) | |||
856 | } | 785 | } |
857 | } | 786 | } |
858 | 787 | ||
859 | static void | 788 | static void spc_modesense_dpofua(unsigned char *buf, int type) |
860 | target_modesense_dpofua(unsigned char *buf, int type) | ||
861 | { | 789 | { |
862 | switch (type) { | 790 | switch (type) { |
863 | case TYPE_DISK: | 791 | case TYPE_DISK: |
@@ -868,7 +796,7 @@ target_modesense_dpofua(unsigned char *buf, int type) | |||
868 | } | 796 | } |
869 | } | 797 | } |
870 | 798 | ||
871 | int target_emulate_modesense(struct se_cmd *cmd) | 799 | static int spc_emulate_modesense(struct se_cmd *cmd) |
872 | { | 800 | { |
873 | struct se_device *dev = cmd->se_dev; | 801 | struct se_device *dev = cmd->se_dev; |
874 | char *cdb = cmd->t_task_cdb; | 802 | char *cdb = cmd->t_task_cdb; |
@@ -883,18 +811,18 @@ int target_emulate_modesense(struct se_cmd *cmd) | |||
883 | 811 | ||
884 | switch (cdb[2] & 0x3f) { | 812 | switch (cdb[2] & 0x3f) { |
885 | case 0x01: | 813 | case 0x01: |
886 | length = target_modesense_rwrecovery(&buf[offset]); | 814 | length = spc_modesense_rwrecovery(&buf[offset]); |
887 | break; | 815 | break; |
888 | case 0x08: | 816 | case 0x08: |
889 | length = target_modesense_caching(dev, &buf[offset]); | 817 | length = spc_modesense_caching(dev, &buf[offset]); |
890 | break; | 818 | break; |
891 | case 0x0a: | 819 | case 0x0a: |
892 | length = target_modesense_control(dev, &buf[offset]); | 820 | length = spc_modesense_control(dev, &buf[offset]); |
893 | break; | 821 | break; |
894 | case 0x3f: | 822 | case 0x3f: |
895 | length = target_modesense_rwrecovery(&buf[offset]); | 823 | length = spc_modesense_rwrecovery(&buf[offset]); |
896 | length += target_modesense_caching(dev, &buf[offset+length]); | 824 | length += spc_modesense_caching(dev, &buf[offset+length]); |
897 | length += target_modesense_control(dev, &buf[offset+length]); | 825 | length += spc_modesense_control(dev, &buf[offset+length]); |
898 | break; | 826 | break; |
899 | default: | 827 | default: |
900 | pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", | 828 | pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", |
@@ -912,11 +840,11 @@ int target_emulate_modesense(struct se_cmd *cmd) | |||
912 | if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || | 840 | if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || |
913 | (cmd->se_deve && | 841 | (cmd->se_deve && |
914 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) | 842 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) |
915 | target_modesense_write_protect(&buf[3], type); | 843 | spc_modesense_write_protect(&buf[3], type); |
916 | 844 | ||
917 | if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && | 845 | if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && |
918 | (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) | 846 | (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) |
919 | target_modesense_dpofua(&buf[3], type); | 847 | spc_modesense_dpofua(&buf[3], type); |
920 | 848 | ||
921 | if ((offset + 2) > cmd->data_length) | 849 | if ((offset + 2) > cmd->data_length) |
922 | offset = cmd->data_length; | 850 | offset = cmd->data_length; |
@@ -928,11 +856,11 @@ int target_emulate_modesense(struct se_cmd *cmd) | |||
928 | if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || | 856 | if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || |
929 | (cmd->se_deve && | 857 | (cmd->se_deve && |
930 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) | 858 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) |
931 | target_modesense_write_protect(&buf[2], type); | 859 | spc_modesense_write_protect(&buf[2], type); |
932 | 860 | ||
933 | if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && | 861 | if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && |
934 | (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) | 862 | (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) |
935 | target_modesense_dpofua(&buf[2], type); | 863 | spc_modesense_dpofua(&buf[2], type); |
936 | 864 | ||
937 | if ((offset + 1) > cmd->data_length) | 865 | if ((offset + 1) > cmd->data_length) |
938 | offset = cmd->data_length; | 866 | offset = cmd->data_length; |
@@ -946,7 +874,7 @@ int target_emulate_modesense(struct se_cmd *cmd) | |||
946 | return 0; | 874 | return 0; |
947 | } | 875 | } |
948 | 876 | ||
949 | int target_emulate_request_sense(struct se_cmd *cmd) | 877 | static int spc_emulate_request_sense(struct se_cmd *cmd) |
950 | { | 878 | { |
951 | unsigned char *cdb = cmd->t_task_cdb; | 879 | unsigned char *cdb = cmd->t_task_cdb; |
952 | unsigned char *buf; | 880 | unsigned char *buf; |
@@ -1005,126 +933,172 @@ end: | |||
1005 | return 0; | 933 | return 0; |
1006 | } | 934 | } |
1007 | 935 | ||
1008 | /* | 936 | static int spc_emulate_testunitready(struct se_cmd *cmd) |
1009 | * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. | ||
1010 | * Note this is not used for TCM/pSCSI passthrough | ||
1011 | */ | ||
1012 | int target_emulate_unmap(struct se_cmd *cmd) | ||
1013 | { | 937 | { |
1014 | struct se_device *dev = cmd->se_dev; | 938 | target_complete_cmd(cmd, GOOD); |
1015 | unsigned char *buf, *ptr = NULL; | 939 | return 0; |
1016 | unsigned char *cdb = &cmd->t_task_cdb[0]; | ||
1017 | sector_t lba; | ||
1018 | unsigned int size = cmd->data_length, range; | ||
1019 | int ret = 0, offset; | ||
1020 | unsigned short dl, bd_dl; | ||
1021 | |||
1022 | if (!dev->transport->do_discard) { | ||
1023 | pr_err("UNMAP emulation not supported for: %s\n", | ||
1024 | dev->transport->name); | ||
1025 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | ||
1026 | return -ENOSYS; | ||
1027 | } | ||
1028 | |||
1029 | /* First UNMAP block descriptor starts at 8 byte offset */ | ||
1030 | offset = 8; | ||
1031 | size -= 8; | ||
1032 | dl = get_unaligned_be16(&cdb[0]); | ||
1033 | bd_dl = get_unaligned_be16(&cdb[2]); | ||
1034 | |||
1035 | buf = transport_kmap_data_sg(cmd); | ||
1036 | |||
1037 | ptr = &buf[offset]; | ||
1038 | pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" | ||
1039 | " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); | ||
1040 | |||
1041 | while (size) { | ||
1042 | lba = get_unaligned_be64(&ptr[0]); | ||
1043 | range = get_unaligned_be32(&ptr[8]); | ||
1044 | pr_debug("UNMAP: Using lba: %llu and range: %u\n", | ||
1045 | (unsigned long long)lba, range); | ||
1046 | |||
1047 | ret = dev->transport->do_discard(dev, lba, range); | ||
1048 | if (ret < 0) { | ||
1049 | pr_err("blkdev_issue_discard() failed: %d\n", | ||
1050 | ret); | ||
1051 | goto err; | ||
1052 | } | ||
1053 | |||
1054 | ptr += 16; | ||
1055 | size -= 16; | ||
1056 | } | ||
1057 | |||
1058 | err: | ||
1059 | transport_kunmap_data_sg(cmd); | ||
1060 | if (!ret) | ||
1061 | target_complete_cmd(cmd, GOOD); | ||
1062 | return ret; | ||
1063 | } | 940 | } |
1064 | 941 | ||
1065 | /* | 942 | int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) |
1066 | * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. | ||
1067 | * Note this is not used for TCM/pSCSI passthrough | ||
1068 | */ | ||
1069 | int target_emulate_write_same(struct se_cmd *cmd) | ||
1070 | { | 943 | { |
1071 | struct se_device *dev = cmd->se_dev; | 944 | struct se_device *dev = cmd->se_dev; |
1072 | sector_t range; | 945 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
1073 | sector_t lba = cmd->t_task_lba; | 946 | unsigned char *cdb = cmd->t_task_cdb; |
1074 | u32 num_blocks; | ||
1075 | int ret; | ||
1076 | |||
1077 | if (!dev->transport->do_discard) { | ||
1078 | pr_err("WRITE_SAME emulation not supported" | ||
1079 | " for: %s\n", dev->transport->name); | ||
1080 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | ||
1081 | return -ENOSYS; | ||
1082 | } | ||
1083 | |||
1084 | if (cmd->t_task_cdb[0] == WRITE_SAME) | ||
1085 | num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); | ||
1086 | else if (cmd->t_task_cdb[0] == WRITE_SAME_16) | ||
1087 | num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); | ||
1088 | else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ | ||
1089 | num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); | ||
1090 | |||
1091 | /* | ||
1092 | * Use the explicit range when non zero is supplied, otherwise calculate | ||
1093 | * the remaining range based on ->get_blocks() - starting LBA. | ||
1094 | */ | ||
1095 | if (num_blocks != 0) | ||
1096 | range = num_blocks; | ||
1097 | else | ||
1098 | range = (dev->transport->get_blocks(dev) - lba) + 1; | ||
1099 | |||
1100 | pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n", | ||
1101 | (unsigned long long)lba, (unsigned long long)range); | ||
1102 | 947 | ||
1103 | ret = dev->transport->do_discard(dev, lba, range); | 948 | switch (cdb[0]) { |
1104 | if (ret < 0) { | 949 | case MODE_SELECT: |
1105 | pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n"); | 950 | *size = cdb[4]; |
1106 | return ret; | 951 | break; |
1107 | } | 952 | case MODE_SELECT_10: |
953 | *size = (cdb[7] << 8) + cdb[8]; | ||
954 | break; | ||
955 | case MODE_SENSE: | ||
956 | *size = cdb[4]; | ||
957 | cmd->execute_cmd = spc_emulate_modesense; | ||
958 | break; | ||
959 | case MODE_SENSE_10: | ||
960 | *size = (cdb[7] << 8) + cdb[8]; | ||
961 | cmd->execute_cmd = spc_emulate_modesense; | ||
962 | break; | ||
963 | case LOG_SELECT: | ||
964 | case LOG_SENSE: | ||
965 | *size = (cdb[7] << 8) + cdb[8]; | ||
966 | break; | ||
967 | case PERSISTENT_RESERVE_IN: | ||
968 | if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) | ||
969 | cmd->execute_cmd = target_scsi3_emulate_pr_in; | ||
970 | *size = (cdb[7] << 8) + cdb[8]; | ||
971 | break; | ||
972 | case PERSISTENT_RESERVE_OUT: | ||
973 | if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) | ||
974 | cmd->execute_cmd = target_scsi3_emulate_pr_out; | ||
975 | *size = (cdb[7] << 8) + cdb[8]; | ||
976 | break; | ||
977 | case RELEASE: | ||
978 | case RELEASE_10: | ||
979 | if (cdb[0] == RELEASE_10) | ||
980 | *size = (cdb[7] << 8) | cdb[8]; | ||
981 | else | ||
982 | *size = cmd->data_length; | ||
983 | |||
984 | if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) | ||
985 | cmd->execute_cmd = target_scsi2_reservation_release; | ||
986 | break; | ||
987 | case RESERVE: | ||
988 | case RESERVE_10: | ||
989 | /* | ||
990 | * The SPC-2 RESERVE does not contain a size in the SCSI CDB. | ||
991 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | ||
992 | */ | ||
993 | if (cdb[0] == RESERVE_10) | ||
994 | *size = (cdb[7] << 8) | cdb[8]; | ||
995 | else | ||
996 | *size = cmd->data_length; | ||
1108 | 997 | ||
1109 | target_complete_cmd(cmd, GOOD); | 998 | /* |
1110 | return 0; | 999 | * Setup the legacy emulated handler for SPC-2 and |
1111 | } | 1000 | * >= SPC-3 compatible reservation handling (CRH=1) |
1001 | * Otherwise, we assume the underlying SCSI logic is | ||
1002 | * is running in SPC_PASSTHROUGH, and wants reservations | ||
1003 | * emulation disabled. | ||
1004 | */ | ||
1005 | if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) | ||
1006 | cmd->execute_cmd = target_scsi2_reservation_reserve; | ||
1007 | break; | ||
1008 | case REQUEST_SENSE: | ||
1009 | *size = cdb[4]; | ||
1010 | cmd->execute_cmd = spc_emulate_request_sense; | ||
1011 | break; | ||
1012 | case INQUIRY: | ||
1013 | *size = (cdb[3] << 8) + cdb[4]; | ||
1112 | 1014 | ||
1113 | int target_emulate_synchronize_cache(struct se_cmd *cmd) | 1015 | /* |
1114 | { | 1016 | * Do implict HEAD_OF_QUEUE processing for INQUIRY. |
1115 | if (!cmd->se_dev->transport->do_sync_cache) { | 1017 | * See spc4r17 section 5.3 |
1116 | pr_err("SYNCHRONIZE_CACHE emulation not supported" | 1018 | */ |
1117 | " for: %s\n", cmd->se_dev->transport->name); | 1019 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
1020 | cmd->sam_task_attr = MSG_HEAD_TAG; | ||
1021 | cmd->execute_cmd = spc_emulate_inquiry; | ||
1022 | break; | ||
1023 | case SECURITY_PROTOCOL_IN: | ||
1024 | case SECURITY_PROTOCOL_OUT: | ||
1025 | *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | ||
1026 | break; | ||
1027 | case EXTENDED_COPY: | ||
1028 | case READ_ATTRIBUTE: | ||
1029 | case RECEIVE_COPY_RESULTS: | ||
1030 | case WRITE_ATTRIBUTE: | ||
1031 | *size = (cdb[10] << 24) | (cdb[11] << 16) | | ||
1032 | (cdb[12] << 8) | cdb[13]; | ||
1033 | break; | ||
1034 | case RECEIVE_DIAGNOSTIC: | ||
1035 | case SEND_DIAGNOSTIC: | ||
1036 | *size = (cdb[3] << 8) | cdb[4]; | ||
1037 | break; | ||
1038 | case WRITE_BUFFER: | ||
1039 | *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | ||
1040 | break; | ||
1041 | case REPORT_LUNS: | ||
1042 | cmd->execute_cmd = target_report_luns; | ||
1043 | *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | ||
1044 | /* | ||
1045 | * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS | ||
1046 | * See spc4r17 section 5.3 | ||
1047 | */ | ||
1048 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | ||
1049 | cmd->sam_task_attr = MSG_HEAD_TAG; | ||
1050 | break; | ||
1051 | case TEST_UNIT_READY: | ||
1052 | cmd->execute_cmd = spc_emulate_testunitready; | ||
1053 | *size = 0; | ||
1054 | break; | ||
1055 | case MAINTENANCE_IN: | ||
1056 | if (dev->transport->get_device_type(dev) != TYPE_ROM) { | ||
1057 | /* | ||
1058 | * MAINTENANCE_IN from SCC-2 | ||
1059 | * Check for emulated MI_REPORT_TARGET_PGS | ||
1060 | */ | ||
1061 | if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS && | ||
1062 | su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { | ||
1063 | cmd->execute_cmd = | ||
1064 | target_emulate_report_target_port_groups; | ||
1065 | } | ||
1066 | *size = get_unaligned_be32(&cdb[6]); | ||
1067 | } else { | ||
1068 | /* | ||
1069 | * GPCMD_SEND_KEY from multi media commands | ||
1070 | */ | ||
1071 | *size = get_unaligned_be16(&cdb[8]); | ||
1072 | } | ||
1073 | break; | ||
1074 | case MAINTENANCE_OUT: | ||
1075 | if (dev->transport->get_device_type(dev) != TYPE_ROM) { | ||
1076 | /* | ||
1077 | * MAINTENANCE_OUT from SCC-2 | ||
1078 | * Check for emulated MO_SET_TARGET_PGS. | ||
1079 | */ | ||
1080 | if (cdb[1] == MO_SET_TARGET_PGS && | ||
1081 | su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { | ||
1082 | cmd->execute_cmd = | ||
1083 | target_emulate_set_target_port_groups; | ||
1084 | } | ||
1085 | *size = get_unaligned_be32(&cdb[6]); | ||
1086 | } else { | ||
1087 | /* | ||
1088 | * GPCMD_SEND_KEY from multi media commands | ||
1089 | */ | ||
1090 | *size = get_unaligned_be16(&cdb[8]); | ||
1091 | } | ||
1092 | break; | ||
1093 | default: | ||
1094 | pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" | ||
1095 | " 0x%02x, sending CHECK_CONDITION.\n", | ||
1096 | cmd->se_tfo->get_fabric_name(), cdb[0]); | ||
1097 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
1118 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | 1098 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; |
1119 | return -ENOSYS; | 1099 | return -EINVAL; |
1120 | } | 1100 | } |
1121 | 1101 | ||
1122 | cmd->se_dev->transport->do_sync_cache(cmd); | ||
1123 | return 0; | ||
1124 | } | ||
1125 | |||
1126 | int target_emulate_noop(struct se_cmd *cmd) | ||
1127 | { | ||
1128 | target_complete_cmd(cmd, GOOD); | ||
1129 | return 0; | 1102 | return 0; |
1130 | } | 1103 | } |
1104 | EXPORT_SYMBOL(spc_parse_cdb); | ||
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 84caf1bed9a3..1c59a3c23b2c 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
@@ -295,9 +295,6 @@ static void core_tmr_drain_state_list( | |||
295 | 295 | ||
296 | list_move_tail(&cmd->state_list, &drain_task_list); | 296 | list_move_tail(&cmd->state_list, &drain_task_list); |
297 | cmd->state_active = false; | 297 | cmd->state_active = false; |
298 | |||
299 | if (!list_empty(&cmd->execute_list)) | ||
300 | __target_remove_from_execute_list(cmd); | ||
301 | } | 298 | } |
302 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 299 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
303 | 300 | ||
@@ -354,57 +351,6 @@ static void core_tmr_drain_state_list( | |||
354 | } | 351 | } |
355 | } | 352 | } |
356 | 353 | ||
357 | static void core_tmr_drain_cmd_list( | ||
358 | struct se_device *dev, | ||
359 | struct se_cmd *prout_cmd, | ||
360 | struct se_node_acl *tmr_nacl, | ||
361 | int tas, | ||
362 | struct list_head *preempt_and_abort_list) | ||
363 | { | ||
364 | LIST_HEAD(drain_cmd_list); | ||
365 | struct se_queue_obj *qobj = &dev->dev_queue_obj; | ||
366 | struct se_cmd *cmd, *tcmd; | ||
367 | unsigned long flags; | ||
368 | |||
369 | /* | ||
370 | * Release all commands remaining in the per-device command queue. | ||
371 | * | ||
372 | * This follows the same logic as above for the state list. | ||
373 | */ | ||
374 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | ||
375 | list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) { | ||
376 | /* | ||
377 | * For PREEMPT_AND_ABORT usage, only process commands | ||
378 | * with a matching reservation key. | ||
379 | */ | ||
380 | if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) | ||
381 | continue; | ||
382 | /* | ||
383 | * Not aborting PROUT PREEMPT_AND_ABORT CDB.. | ||
384 | */ | ||
385 | if (prout_cmd == cmd) | ||
386 | continue; | ||
387 | |||
388 | cmd->transport_state &= ~CMD_T_QUEUED; | ||
389 | atomic_dec(&qobj->queue_cnt); | ||
390 | list_move_tail(&cmd->se_queue_node, &drain_cmd_list); | ||
391 | } | ||
392 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
393 | |||
394 | while (!list_empty(&drain_cmd_list)) { | ||
395 | cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node); | ||
396 | list_del_init(&cmd->se_queue_node); | ||
397 | |||
398 | pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" | ||
399 | " %d t_fe_count: %d\n", (preempt_and_abort_list) ? | ||
400 | "Preempt" : "", cmd, cmd->t_state, | ||
401 | atomic_read(&cmd->t_fe_count)); | ||
402 | |||
403 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, | ||
404 | atomic_read(&cmd->t_fe_count)); | ||
405 | } | ||
406 | } | ||
407 | |||
408 | int core_tmr_lun_reset( | 354 | int core_tmr_lun_reset( |
409 | struct se_device *dev, | 355 | struct se_device *dev, |
410 | struct se_tmr_req *tmr, | 356 | struct se_tmr_req *tmr, |
@@ -447,8 +393,7 @@ int core_tmr_lun_reset( | |||
447 | core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); | 393 | core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); |
448 | core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, | 394 | core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, |
449 | preempt_and_abort_list); | 395 | preempt_and_abort_list); |
450 | core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas, | 396 | |
451 | preempt_and_abort_list); | ||
452 | /* | 397 | /* |
453 | * Clear any legacy SPC-2 reservation when called during | 398 | * Clear any legacy SPC-2 reservation when called during |
454 | * LOGICAL UNIT RESET | 399 | * LOGICAL UNIT RESET |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 8bd58e284185..b8628a5014b9 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
@@ -77,8 +77,8 @@ static void core_clear_initiator_node_from_tpg( | |||
77 | 77 | ||
78 | lun = deve->se_lun; | 78 | lun = deve->se_lun; |
79 | spin_unlock_irq(&nacl->device_list_lock); | 79 | spin_unlock_irq(&nacl->device_list_lock); |
80 | core_update_device_list_for_node(lun, NULL, deve->mapped_lun, | 80 | core_disable_device_list_for_node(lun, NULL, deve->mapped_lun, |
81 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); | 81 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); |
82 | 82 | ||
83 | spin_lock_irq(&nacl->device_list_lock); | 83 | spin_lock_irq(&nacl->device_list_lock); |
84 | } | 84 | } |
@@ -172,8 +172,8 @@ void core_tpg_add_node_to_devs( | |||
172 | (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? | 172 | (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? |
173 | "READ-WRITE" : "READ-ONLY"); | 173 | "READ-WRITE" : "READ-ONLY"); |
174 | 174 | ||
175 | core_update_device_list_for_node(lun, NULL, lun->unpacked_lun, | 175 | core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, |
176 | lun_access, acl, tpg, 1); | 176 | lun_access, acl, tpg); |
177 | spin_lock(&tpg->tpg_lun_lock); | 177 | spin_lock(&tpg->tpg_lun_lock); |
178 | } | 178 | } |
179 | spin_unlock(&tpg->tpg_lun_lock); | 179 | spin_unlock(&tpg->tpg_lun_lock); |
@@ -306,10 +306,8 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
306 | * TPG LUNs if the fabric is not explictly asking for | 306 | * TPG LUNs if the fabric is not explictly asking for |
307 | * tpg_check_demo_mode_login_only() == 1. | 307 | * tpg_check_demo_mode_login_only() == 1. |
308 | */ | 308 | */ |
309 | if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) && | 309 | if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) || |
310 | (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1)) | 310 | (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1)) |
311 | do { ; } while (0); | ||
312 | else | ||
313 | core_tpg_add_node_to_devs(acl, tpg); | 311 | core_tpg_add_node_to_devs(acl, tpg); |
314 | 312 | ||
315 | spin_lock_irq(&tpg->acl_node_lock); | 313 | spin_lock_irq(&tpg->acl_node_lock); |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 634d0f31a28c..0eaae23d12b5 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -66,15 +66,12 @@ struct kmem_cache *t10_alua_lu_gp_mem_cache; | |||
66 | struct kmem_cache *t10_alua_tg_pt_gp_cache; | 66 | struct kmem_cache *t10_alua_tg_pt_gp_cache; |
67 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; | 67 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; |
68 | 68 | ||
69 | static int transport_generic_write_pending(struct se_cmd *); | ||
70 | static int transport_processing_thread(void *param); | ||
71 | static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *); | ||
72 | static void transport_complete_task_attr(struct se_cmd *cmd); | 69 | static void transport_complete_task_attr(struct se_cmd *cmd); |
73 | static void transport_handle_queue_full(struct se_cmd *cmd, | 70 | static void transport_handle_queue_full(struct se_cmd *cmd, |
74 | struct se_device *dev); | 71 | struct se_device *dev); |
75 | static int transport_generic_get_mem(struct se_cmd *cmd); | 72 | static int transport_generic_get_mem(struct se_cmd *cmd); |
73 | static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool); | ||
76 | static void transport_put_cmd(struct se_cmd *cmd); | 74 | static void transport_put_cmd(struct se_cmd *cmd); |
77 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd); | ||
78 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); | 75 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); |
79 | static void target_complete_ok_work(struct work_struct *work); | 76 | static void target_complete_ok_work(struct work_struct *work); |
80 | 77 | ||
@@ -195,14 +192,6 @@ u32 scsi_get_new_index(scsi_index_t type) | |||
195 | return new_index; | 192 | return new_index; |
196 | } | 193 | } |
197 | 194 | ||
198 | static void transport_init_queue_obj(struct se_queue_obj *qobj) | ||
199 | { | ||
200 | atomic_set(&qobj->queue_cnt, 0); | ||
201 | INIT_LIST_HEAD(&qobj->qobj_list); | ||
202 | init_waitqueue_head(&qobj->thread_wq); | ||
203 | spin_lock_init(&qobj->cmd_queue_lock); | ||
204 | } | ||
205 | |||
206 | void transport_subsystem_check_init(void) | 195 | void transport_subsystem_check_init(void) |
207 | { | 196 | { |
208 | int ret; | 197 | int ret; |
@@ -243,7 +232,6 @@ struct se_session *transport_init_session(void) | |||
243 | INIT_LIST_HEAD(&se_sess->sess_list); | 232 | INIT_LIST_HEAD(&se_sess->sess_list); |
244 | INIT_LIST_HEAD(&se_sess->sess_acl_list); | 233 | INIT_LIST_HEAD(&se_sess->sess_acl_list); |
245 | INIT_LIST_HEAD(&se_sess->sess_cmd_list); | 234 | INIT_LIST_HEAD(&se_sess->sess_cmd_list); |
246 | INIT_LIST_HEAD(&se_sess->sess_wait_list); | ||
247 | spin_lock_init(&se_sess->sess_cmd_lock); | 235 | spin_lock_init(&se_sess->sess_cmd_lock); |
248 | kref_init(&se_sess->sess_kref); | 236 | kref_init(&se_sess->sess_kref); |
249 | 237 | ||
@@ -468,18 +456,7 @@ static void target_remove_from_state_list(struct se_cmd *cmd) | |||
468 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 456 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
469 | } | 457 | } |
470 | 458 | ||
471 | /* transport_cmd_check_stop(): | 459 | static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists) |
472 | * | ||
473 | * 'transport_off = 1' determines if CMD_T_ACTIVE should be cleared. | ||
474 | * 'transport_off = 2' determines if task_dev_state should be removed. | ||
475 | * | ||
476 | * A non-zero u8 t_state sets cmd->t_state. | ||
477 | * Returns 1 when command is stopped, else 0. | ||
478 | */ | ||
479 | static int transport_cmd_check_stop( | ||
480 | struct se_cmd *cmd, | ||
481 | int transport_off, | ||
482 | u8 t_state) | ||
483 | { | 460 | { |
484 | unsigned long flags; | 461 | unsigned long flags; |
485 | 462 | ||
@@ -493,13 +470,23 @@ static int transport_cmd_check_stop( | |||
493 | __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); | 470 | __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); |
494 | 471 | ||
495 | cmd->transport_state &= ~CMD_T_ACTIVE; | 472 | cmd->transport_state &= ~CMD_T_ACTIVE; |
496 | if (transport_off == 2) | 473 | if (remove_from_lists) |
497 | target_remove_from_state_list(cmd); | 474 | target_remove_from_state_list(cmd); |
498 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 475 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
499 | 476 | ||
500 | complete(&cmd->transport_lun_stop_comp); | 477 | complete(&cmd->transport_lun_stop_comp); |
501 | return 1; | 478 | return 1; |
502 | } | 479 | } |
480 | |||
481 | if (remove_from_lists) { | ||
482 | target_remove_from_state_list(cmd); | ||
483 | |||
484 | /* | ||
485 | * Clear struct se_cmd->se_lun before the handoff to FE. | ||
486 | */ | ||
487 | cmd->se_lun = NULL; | ||
488 | } | ||
489 | |||
503 | /* | 490 | /* |
504 | * Determine if frontend context caller is requesting the stopping of | 491 | * Determine if frontend context caller is requesting the stopping of |
505 | * this command for frontend exceptions. | 492 | * this command for frontend exceptions. |
@@ -509,58 +496,36 @@ static int transport_cmd_check_stop( | |||
509 | __func__, __LINE__, | 496 | __func__, __LINE__, |
510 | cmd->se_tfo->get_task_tag(cmd)); | 497 | cmd->se_tfo->get_task_tag(cmd)); |
511 | 498 | ||
512 | if (transport_off == 2) | ||
513 | target_remove_from_state_list(cmd); | ||
514 | |||
515 | /* | ||
516 | * Clear struct se_cmd->se_lun before the transport_off == 2 handoff | ||
517 | * to FE. | ||
518 | */ | ||
519 | if (transport_off == 2) | ||
520 | cmd->se_lun = NULL; | ||
521 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 499 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
522 | 500 | ||
523 | complete(&cmd->t_transport_stop_comp); | 501 | complete(&cmd->t_transport_stop_comp); |
524 | return 1; | 502 | return 1; |
525 | } | 503 | } |
526 | if (transport_off) { | 504 | |
527 | cmd->transport_state &= ~CMD_T_ACTIVE; | 505 | cmd->transport_state &= ~CMD_T_ACTIVE; |
528 | if (transport_off == 2) { | 506 | if (remove_from_lists) { |
529 | target_remove_from_state_list(cmd); | 507 | /* |
530 | /* | 508 | * Some fabric modules like tcm_loop can release |
531 | * Clear struct se_cmd->se_lun before the transport_off == 2 | 509 | * their internally allocated I/O reference now and |
532 | * handoff to fabric module. | 510 | * struct se_cmd now. |
533 | */ | 511 | * |
534 | cmd->se_lun = NULL; | 512 | * Fabric modules are expected to return '1' here if the |
535 | /* | 513 | * se_cmd being passed is released at this point, |
536 | * Some fabric modules like tcm_loop can release | 514 | * or zero if not being released. |
537 | * their internally allocated I/O reference now and | 515 | */ |
538 | * struct se_cmd now. | 516 | if (cmd->se_tfo->check_stop_free != NULL) { |
539 | * | 517 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
540 | * Fabric modules are expected to return '1' here if the | 518 | return cmd->se_tfo->check_stop_free(cmd); |
541 | * se_cmd being passed is released at this point, | ||
542 | * or zero if not being released. | ||
543 | */ | ||
544 | if (cmd->se_tfo->check_stop_free != NULL) { | ||
545 | spin_unlock_irqrestore( | ||
546 | &cmd->t_state_lock, flags); | ||
547 | |||
548 | return cmd->se_tfo->check_stop_free(cmd); | ||
549 | } | ||
550 | } | 519 | } |
551 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 520 | } |
552 | 521 | ||
553 | return 0; | ||
554 | } else if (t_state) | ||
555 | cmd->t_state = t_state; | ||
556 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 522 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
557 | |||
558 | return 0; | 523 | return 0; |
559 | } | 524 | } |
560 | 525 | ||
561 | static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) | 526 | static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) |
562 | { | 527 | { |
563 | return transport_cmd_check_stop(cmd, 2, 0); | 528 | return transport_cmd_check_stop(cmd, true); |
564 | } | 529 | } |
565 | 530 | ||
566 | static void transport_lun_remove_cmd(struct se_cmd *cmd) | 531 | static void transport_lun_remove_cmd(struct se_cmd *cmd) |
@@ -591,79 +556,8 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | |||
591 | 556 | ||
592 | if (transport_cmd_check_stop_to_fabric(cmd)) | 557 | if (transport_cmd_check_stop_to_fabric(cmd)) |
593 | return; | 558 | return; |
594 | if (remove) { | 559 | if (remove) |
595 | transport_remove_cmd_from_queue(cmd); | ||
596 | transport_put_cmd(cmd); | 560 | transport_put_cmd(cmd); |
597 | } | ||
598 | } | ||
599 | |||
600 | static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state, | ||
601 | bool at_head) | ||
602 | { | ||
603 | struct se_device *dev = cmd->se_dev; | ||
604 | struct se_queue_obj *qobj = &dev->dev_queue_obj; | ||
605 | unsigned long flags; | ||
606 | |||
607 | if (t_state) { | ||
608 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
609 | cmd->t_state = t_state; | ||
610 | cmd->transport_state |= CMD_T_ACTIVE; | ||
611 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
612 | } | ||
613 | |||
614 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | ||
615 | |||
616 | /* If the cmd is already on the list, remove it before we add it */ | ||
617 | if (!list_empty(&cmd->se_queue_node)) | ||
618 | list_del(&cmd->se_queue_node); | ||
619 | else | ||
620 | atomic_inc(&qobj->queue_cnt); | ||
621 | |||
622 | if (at_head) | ||
623 | list_add(&cmd->se_queue_node, &qobj->qobj_list); | ||
624 | else | ||
625 | list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); | ||
626 | cmd->transport_state |= CMD_T_QUEUED; | ||
627 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
628 | |||
629 | wake_up_interruptible(&qobj->thread_wq); | ||
630 | } | ||
631 | |||
632 | static struct se_cmd * | ||
633 | transport_get_cmd_from_queue(struct se_queue_obj *qobj) | ||
634 | { | ||
635 | struct se_cmd *cmd; | ||
636 | unsigned long flags; | ||
637 | |||
638 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | ||
639 | if (list_empty(&qobj->qobj_list)) { | ||
640 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
641 | return NULL; | ||
642 | } | ||
643 | cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); | ||
644 | |||
645 | cmd->transport_state &= ~CMD_T_QUEUED; | ||
646 | list_del_init(&cmd->se_queue_node); | ||
647 | atomic_dec(&qobj->queue_cnt); | ||
648 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
649 | |||
650 | return cmd; | ||
651 | } | ||
652 | |||
653 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd) | ||
654 | { | ||
655 | struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj; | ||
656 | unsigned long flags; | ||
657 | |||
658 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | ||
659 | if (!(cmd->transport_state & CMD_T_QUEUED)) { | ||
660 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
661 | return; | ||
662 | } | ||
663 | cmd->transport_state &= ~CMD_T_QUEUED; | ||
664 | atomic_dec(&qobj->queue_cnt); | ||
665 | list_del_init(&cmd->se_queue_node); | ||
666 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
667 | } | 561 | } |
668 | 562 | ||
669 | static void target_complete_failure_work(struct work_struct *work) | 563 | static void target_complete_failure_work(struct work_struct *work) |
@@ -742,68 +636,11 @@ static void target_add_to_state_list(struct se_cmd *cmd) | |||
742 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 636 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
743 | } | 637 | } |
744 | 638 | ||
745 | static void __target_add_to_execute_list(struct se_cmd *cmd) | ||
746 | { | ||
747 | struct se_device *dev = cmd->se_dev; | ||
748 | bool head_of_queue = false; | ||
749 | |||
750 | if (!list_empty(&cmd->execute_list)) | ||
751 | return; | ||
752 | |||
753 | if (dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED && | ||
754 | cmd->sam_task_attr == MSG_HEAD_TAG) | ||
755 | head_of_queue = true; | ||
756 | |||
757 | if (head_of_queue) | ||
758 | list_add(&cmd->execute_list, &dev->execute_list); | ||
759 | else | ||
760 | list_add_tail(&cmd->execute_list, &dev->execute_list); | ||
761 | |||
762 | atomic_inc(&dev->execute_tasks); | ||
763 | |||
764 | if (cmd->state_active) | ||
765 | return; | ||
766 | |||
767 | if (head_of_queue) | ||
768 | list_add(&cmd->state_list, &dev->state_list); | ||
769 | else | ||
770 | list_add_tail(&cmd->state_list, &dev->state_list); | ||
771 | |||
772 | cmd->state_active = true; | ||
773 | } | ||
774 | |||
775 | static void target_add_to_execute_list(struct se_cmd *cmd) | ||
776 | { | ||
777 | unsigned long flags; | ||
778 | struct se_device *dev = cmd->se_dev; | ||
779 | |||
780 | spin_lock_irqsave(&dev->execute_task_lock, flags); | ||
781 | __target_add_to_execute_list(cmd); | ||
782 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | ||
783 | } | ||
784 | |||
785 | void __target_remove_from_execute_list(struct se_cmd *cmd) | ||
786 | { | ||
787 | list_del_init(&cmd->execute_list); | ||
788 | atomic_dec(&cmd->se_dev->execute_tasks); | ||
789 | } | ||
790 | |||
791 | static void target_remove_from_execute_list(struct se_cmd *cmd) | ||
792 | { | ||
793 | struct se_device *dev = cmd->se_dev; | ||
794 | unsigned long flags; | ||
795 | |||
796 | if (WARN_ON(list_empty(&cmd->execute_list))) | ||
797 | return; | ||
798 | |||
799 | spin_lock_irqsave(&dev->execute_task_lock, flags); | ||
800 | __target_remove_from_execute_list(cmd); | ||
801 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | ||
802 | } | ||
803 | |||
804 | /* | 639 | /* |
805 | * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status | 640 | * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status |
806 | */ | 641 | */ |
642 | static void transport_write_pending_qf(struct se_cmd *cmd); | ||
643 | static void transport_complete_qf(struct se_cmd *cmd); | ||
807 | 644 | ||
808 | static void target_qf_do_work(struct work_struct *work) | 645 | static void target_qf_do_work(struct work_struct *work) |
809 | { | 646 | { |
@@ -827,7 +664,10 @@ static void target_qf_do_work(struct work_struct *work) | |||
827 | (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" | 664 | (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" |
828 | : "UNKNOWN"); | 665 | : "UNKNOWN"); |
829 | 666 | ||
830 | transport_add_cmd_to_queue(cmd, cmd->t_state, true); | 667 | if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) |
668 | transport_write_pending_qf(cmd); | ||
669 | else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) | ||
670 | transport_complete_qf(cmd); | ||
831 | } | 671 | } |
832 | } | 672 | } |
833 | 673 | ||
@@ -874,8 +714,7 @@ void transport_dump_dev_state( | |||
874 | break; | 714 | break; |
875 | } | 715 | } |
876 | 716 | ||
877 | *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d", | 717 | *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); |
878 | atomic_read(&dev->execute_tasks), dev->queue_depth); | ||
879 | *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", | 718 | *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", |
880 | dev->se_sub_dev->se_dev_attrib.block_size, | 719 | dev->se_sub_dev->se_dev_attrib.block_size, |
881 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors); | 720 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors); |
@@ -1212,7 +1051,6 @@ struct se_device *transport_add_device_to_core_hba( | |||
1212 | return NULL; | 1051 | return NULL; |
1213 | } | 1052 | } |
1214 | 1053 | ||
1215 | transport_init_queue_obj(&dev->dev_queue_obj); | ||
1216 | dev->dev_flags = device_flags; | 1054 | dev->dev_flags = device_flags; |
1217 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | 1055 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; |
1218 | dev->dev_ptr = transport_dev; | 1056 | dev->dev_ptr = transport_dev; |
@@ -1222,7 +1060,6 @@ struct se_device *transport_add_device_to_core_hba( | |||
1222 | INIT_LIST_HEAD(&dev->dev_list); | 1060 | INIT_LIST_HEAD(&dev->dev_list); |
1223 | INIT_LIST_HEAD(&dev->dev_sep_list); | 1061 | INIT_LIST_HEAD(&dev->dev_sep_list); |
1224 | INIT_LIST_HEAD(&dev->dev_tmr_list); | 1062 | INIT_LIST_HEAD(&dev->dev_tmr_list); |
1225 | INIT_LIST_HEAD(&dev->execute_list); | ||
1226 | INIT_LIST_HEAD(&dev->delayed_cmd_list); | 1063 | INIT_LIST_HEAD(&dev->delayed_cmd_list); |
1227 | INIT_LIST_HEAD(&dev->state_list); | 1064 | INIT_LIST_HEAD(&dev->state_list); |
1228 | INIT_LIST_HEAD(&dev->qf_cmd_list); | 1065 | INIT_LIST_HEAD(&dev->qf_cmd_list); |
@@ -1261,17 +1098,17 @@ struct se_device *transport_add_device_to_core_hba( | |||
1261 | * Setup the Asymmetric Logical Unit Assignment for struct se_device | 1098 | * Setup the Asymmetric Logical Unit Assignment for struct se_device |
1262 | */ | 1099 | */ |
1263 | if (core_setup_alua(dev, force_pt) < 0) | 1100 | if (core_setup_alua(dev, force_pt) < 0) |
1264 | goto out; | 1101 | goto err_dev_list; |
1265 | 1102 | ||
1266 | /* | 1103 | /* |
1267 | * Startup the struct se_device processing thread | 1104 | * Startup the struct se_device processing thread |
1268 | */ | 1105 | */ |
1269 | dev->process_thread = kthread_run(transport_processing_thread, dev, | 1106 | dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, |
1270 | "LIO_%s", dev->transport->name); | 1107 | dev->transport->name); |
1271 | if (IS_ERR(dev->process_thread)) { | 1108 | if (!dev->tmr_wq) { |
1272 | pr_err("Unable to create kthread: LIO_%s\n", | 1109 | pr_err("Unable to create tmr workqueue for %s\n", |
1273 | dev->transport->name); | 1110 | dev->transport->name); |
1274 | goto out; | 1111 | goto err_dev_list; |
1275 | } | 1112 | } |
1276 | /* | 1113 | /* |
1277 | * Setup work_queue for QUEUE_FULL | 1114 | * Setup work_queue for QUEUE_FULL |
@@ -1289,7 +1126,7 @@ struct se_device *transport_add_device_to_core_hba( | |||
1289 | if (!inquiry_prod || !inquiry_rev) { | 1126 | if (!inquiry_prod || !inquiry_rev) { |
1290 | pr_err("All non TCM/pSCSI plugins require" | 1127 | pr_err("All non TCM/pSCSI plugins require" |
1291 | " INQUIRY consts\n"); | 1128 | " INQUIRY consts\n"); |
1292 | goto out; | 1129 | goto err_wq; |
1293 | } | 1130 | } |
1294 | 1131 | ||
1295 | strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); | 1132 | strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); |
@@ -1299,9 +1136,10 @@ struct se_device *transport_add_device_to_core_hba( | |||
1299 | scsi_dump_inquiry(dev); | 1136 | scsi_dump_inquiry(dev); |
1300 | 1137 | ||
1301 | return dev; | 1138 | return dev; |
1302 | out: | ||
1303 | kthread_stop(dev->process_thread); | ||
1304 | 1139 | ||
1140 | err_wq: | ||
1141 | destroy_workqueue(dev->tmr_wq); | ||
1142 | err_dev_list: | ||
1305 | spin_lock(&hba->device_lock); | 1143 | spin_lock(&hba->device_lock); |
1306 | list_del(&dev->dev_list); | 1144 | list_del(&dev->dev_list); |
1307 | hba->dev_count--; | 1145 | hba->dev_count--; |
@@ -1315,35 +1153,54 @@ out: | |||
1315 | } | 1153 | } |
1316 | EXPORT_SYMBOL(transport_add_device_to_core_hba); | 1154 | EXPORT_SYMBOL(transport_add_device_to_core_hba); |
1317 | 1155 | ||
1318 | /* transport_generic_prepare_cdb(): | 1156 | int target_cmd_size_check(struct se_cmd *cmd, unsigned int size) |
1319 | * | ||
1320 | * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will | ||
1321 | * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2. | ||
1322 | * The point of this is since we are mapping iSCSI LUNs to | ||
1323 | * SCSI Target IDs having a non-zero LUN in the CDB will throw the | ||
1324 | * devices and HBAs for a loop. | ||
1325 | */ | ||
1326 | static inline void transport_generic_prepare_cdb( | ||
1327 | unsigned char *cdb) | ||
1328 | { | 1157 | { |
1329 | switch (cdb[0]) { | 1158 | struct se_device *dev = cmd->se_dev; |
1330 | case READ_10: /* SBC - RDProtect */ | 1159 | |
1331 | case READ_12: /* SBC - RDProtect */ | 1160 | if (cmd->unknown_data_length) { |
1332 | case READ_16: /* SBC - RDProtect */ | 1161 | cmd->data_length = size; |
1333 | case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ | 1162 | } else if (size != cmd->data_length) { |
1334 | case VERIFY: /* SBC - VRProtect */ | 1163 | pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" |
1335 | case VERIFY_16: /* SBC - VRProtect */ | 1164 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" |
1336 | case WRITE_VERIFY: /* SBC - VRProtect */ | 1165 | " 0x%02x\n", cmd->se_tfo->get_fabric_name(), |
1337 | case WRITE_VERIFY_12: /* SBC - VRProtect */ | 1166 | cmd->data_length, size, cmd->t_task_cdb[0]); |
1338 | case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ | 1167 | |
1339 | break; | 1168 | cmd->cmd_spdtl = size; |
1340 | default: | 1169 | |
1341 | cdb[1] &= 0x1f; /* clear logical unit number */ | 1170 | if (cmd->data_direction == DMA_TO_DEVICE) { |
1342 | break; | 1171 | pr_err("Rejecting underflow/overflow" |
1172 | " WRITE data\n"); | ||
1173 | goto out_invalid_cdb_field; | ||
1174 | } | ||
1175 | /* | ||
1176 | * Reject READ_* or WRITE_* with overflow/underflow for | ||
1177 | * type SCF_SCSI_DATA_CDB. | ||
1178 | */ | ||
1179 | if (dev->se_sub_dev->se_dev_attrib.block_size != 512) { | ||
1180 | pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" | ||
1181 | " CDB on non 512-byte sector setup subsystem" | ||
1182 | " plugin: %s\n", dev->transport->name); | ||
1183 | /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ | ||
1184 | goto out_invalid_cdb_field; | ||
1185 | } | ||
1186 | |||
1187 | if (size > cmd->data_length) { | ||
1188 | cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; | ||
1189 | cmd->residual_count = (size - cmd->data_length); | ||
1190 | } else { | ||
1191 | cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; | ||
1192 | cmd->residual_count = (cmd->data_length - size); | ||
1193 | } | ||
1194 | cmd->data_length = size; | ||
1343 | } | 1195 | } |
1344 | } | ||
1345 | 1196 | ||
1346 | static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); | 1197 | return 0; |
1198 | |||
1199 | out_invalid_cdb_field: | ||
1200 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
1201 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | ||
1202 | return -EINVAL; | ||
1203 | } | ||
1347 | 1204 | ||
1348 | /* | 1205 | /* |
1349 | * Used by fabric modules containing a local struct se_cmd within their | 1206 | * Used by fabric modules containing a local struct se_cmd within their |
@@ -1361,9 +1218,7 @@ void transport_init_se_cmd( | |||
1361 | INIT_LIST_HEAD(&cmd->se_lun_node); | 1218 | INIT_LIST_HEAD(&cmd->se_lun_node); |
1362 | INIT_LIST_HEAD(&cmd->se_delayed_node); | 1219 | INIT_LIST_HEAD(&cmd->se_delayed_node); |
1363 | INIT_LIST_HEAD(&cmd->se_qf_node); | 1220 | INIT_LIST_HEAD(&cmd->se_qf_node); |
1364 | INIT_LIST_HEAD(&cmd->se_queue_node); | ||
1365 | INIT_LIST_HEAD(&cmd->se_cmd_list); | 1221 | INIT_LIST_HEAD(&cmd->se_cmd_list); |
1366 | INIT_LIST_HEAD(&cmd->execute_list); | ||
1367 | INIT_LIST_HEAD(&cmd->state_list); | 1222 | INIT_LIST_HEAD(&cmd->state_list); |
1368 | init_completion(&cmd->transport_lun_fe_stop_comp); | 1223 | init_completion(&cmd->transport_lun_fe_stop_comp); |
1369 | init_completion(&cmd->transport_lun_stop_comp); | 1224 | init_completion(&cmd->transport_lun_stop_comp); |
@@ -1418,9 +1273,12 @@ int target_setup_cmd_from_cdb( | |||
1418 | struct se_cmd *cmd, | 1273 | struct se_cmd *cmd, |
1419 | unsigned char *cdb) | 1274 | unsigned char *cdb) |
1420 | { | 1275 | { |
1276 | struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; | ||
1277 | u32 pr_reg_type = 0; | ||
1278 | u8 alua_ascq = 0; | ||
1279 | unsigned long flags; | ||
1421 | int ret; | 1280 | int ret; |
1422 | 1281 | ||
1423 | transport_generic_prepare_cdb(cdb); | ||
1424 | /* | 1282 | /* |
1425 | * Ensure that the received CDB is less than the max (252 + 8) bytes | 1283 | * Ensure that the received CDB is less than the max (252 + 8) bytes |
1426 | * for VARIABLE_LENGTH_CMD | 1284 | * for VARIABLE_LENGTH_CMD |
@@ -1457,15 +1315,66 @@ int target_setup_cmd_from_cdb( | |||
1457 | * Copy the original CDB into cmd-> | 1315 | * Copy the original CDB into cmd-> |
1458 | */ | 1316 | */ |
1459 | memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); | 1317 | memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); |
1318 | |||
1460 | /* | 1319 | /* |
1461 | * Setup the received CDB based on SCSI defined opcodes and | 1320 | * Check for an existing UNIT ATTENTION condition |
1462 | * perform unit attention, persistent reservations and ALUA | ||
1463 | * checks for virtual device backends. The cmd->t_task_cdb | ||
1464 | * pointer is expected to be setup before we reach this point. | ||
1465 | */ | 1321 | */ |
1466 | ret = transport_generic_cmd_sequencer(cmd, cdb); | 1322 | if (core_scsi3_ua_check(cmd, cdb) < 0) { |
1323 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
1324 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; | ||
1325 | return -EINVAL; | ||
1326 | } | ||
1327 | |||
1328 | ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); | ||
1329 | if (ret != 0) { | ||
1330 | /* | ||
1331 | * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; | ||
1332 | * The ALUA additional sense code qualifier (ASCQ) is determined | ||
1333 | * by the ALUA primary or secondary access state.. | ||
1334 | */ | ||
1335 | if (ret > 0) { | ||
1336 | pr_debug("[%s]: ALUA TG Port not available, " | ||
1337 | "SenseKey: NOT_READY, ASC/ASCQ: " | ||
1338 | "0x04/0x%02x\n", | ||
1339 | cmd->se_tfo->get_fabric_name(), alua_ascq); | ||
1340 | |||
1341 | transport_set_sense_codes(cmd, 0x04, alua_ascq); | ||
1342 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
1343 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; | ||
1344 | return -EINVAL; | ||
1345 | } | ||
1346 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
1347 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | ||
1348 | return -EINVAL; | ||
1349 | } | ||
1350 | |||
1351 | /* | ||
1352 | * Check status for SPC-3 Persistent Reservations | ||
1353 | */ | ||
1354 | if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) { | ||
1355 | if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( | ||
1356 | cmd, cdb, pr_reg_type) != 0) { | ||
1357 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
1358 | cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; | ||
1359 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | ||
1360 | cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; | ||
1361 | return -EBUSY; | ||
1362 | } | ||
1363 | /* | ||
1364 | * This means the CDB is allowed for the SCSI Initiator port | ||
1365 | * when said port is *NOT* holding the legacy SPC-2 or | ||
1366 | * SPC-3 Persistent Reservation. | ||
1367 | */ | ||
1368 | } | ||
1369 | |||
1370 | ret = cmd->se_dev->transport->parse_cdb(cmd); | ||
1467 | if (ret < 0) | 1371 | if (ret < 0) |
1468 | return ret; | 1372 | return ret; |
1373 | |||
1374 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
1375 | cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; | ||
1376 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
1377 | |||
1469 | /* | 1378 | /* |
1470 | * Check for SAM Task Attribute Emulation | 1379 | * Check for SAM Task Attribute Emulation |
1471 | */ | 1380 | */ |
@@ -1503,10 +1412,9 @@ int transport_handle_cdb_direct( | |||
1503 | return -EINVAL; | 1412 | return -EINVAL; |
1504 | } | 1413 | } |
1505 | /* | 1414 | /* |
1506 | * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following | 1415 | * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that |
1507 | * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() | 1416 | * outstanding descriptors are handled correctly during shutdown via |
1508 | * in existing usage to ensure that outstanding descriptors are handled | 1417 | * transport_wait_for_tasks() |
1509 | * correctly during shutdown via transport_wait_for_tasks() | ||
1510 | * | 1418 | * |
1511 | * Also, we don't take cmd->t_state_lock here as we only expect | 1419 | * Also, we don't take cmd->t_state_lock here as we only expect |
1512 | * this to be called for initial descriptor submission. | 1420 | * this to be called for initial descriptor submission. |
@@ -1540,10 +1448,14 @@ EXPORT_SYMBOL(transport_handle_cdb_direct); | |||
1540 | * @data_dir: DMA data direction | 1448 | * @data_dir: DMA data direction |
1541 | * @flags: flags for command submission from target_sc_flags_tables | 1449 | * @flags: flags for command submission from target_sc_flags_tables |
1542 | * | 1450 | * |
1451 | * Returns non zero to signal active I/O shutdown failure. All other | ||
1452 | * setup exceptions will be returned as a SCSI CHECK_CONDITION response, | ||
1453 | * but still return zero here. | ||
1454 | * | ||
1543 | * This may only be called from process context, and also currently | 1455 | * This may only be called from process context, and also currently |
1544 | * assumes internal allocation of fabric payload buffer by target-core. | 1456 | * assumes internal allocation of fabric payload buffer by target-core. |
1545 | **/ | 1457 | **/ |
1546 | void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, | 1458 | int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, |
1547 | unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, | 1459 | unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, |
1548 | u32 data_length, int task_attr, int data_dir, int flags) | 1460 | u32 data_length, int task_attr, int data_dir, int flags) |
1549 | { | 1461 | { |
@@ -1569,7 +1481,9 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, | |||
1569 | * for fabrics using TARGET_SCF_ACK_KREF that expect a second | 1481 | * for fabrics using TARGET_SCF_ACK_KREF that expect a second |
1570 | * kref_put() to happen during fabric packet acknowledgement. | 1482 | * kref_put() to happen during fabric packet acknowledgement. |
1571 | */ | 1483 | */ |
1572 | target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); | 1484 | rc = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); |
1485 | if (rc) | ||
1486 | return rc; | ||
1573 | /* | 1487 | /* |
1574 | * Signal bidirectional data payloads to target-core | 1488 | * Signal bidirectional data payloads to target-core |
1575 | */ | 1489 | */ |
@@ -1582,16 +1496,13 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, | |||
1582 | transport_send_check_condition_and_sense(se_cmd, | 1496 | transport_send_check_condition_and_sense(se_cmd, |
1583 | se_cmd->scsi_sense_reason, 0); | 1497 | se_cmd->scsi_sense_reason, 0); |
1584 | target_put_sess_cmd(se_sess, se_cmd); | 1498 | target_put_sess_cmd(se_sess, se_cmd); |
1585 | return; | 1499 | return 0; |
1586 | } | 1500 | } |
1587 | /* | 1501 | |
1588 | * Sanitize CDBs via transport_generic_cmd_sequencer() and | ||
1589 | * allocate the necessary tasks to complete the received CDB+data | ||
1590 | */ | ||
1591 | rc = target_setup_cmd_from_cdb(se_cmd, cdb); | 1502 | rc = target_setup_cmd_from_cdb(se_cmd, cdb); |
1592 | if (rc != 0) { | 1503 | if (rc != 0) { |
1593 | transport_generic_request_failure(se_cmd); | 1504 | transport_generic_request_failure(se_cmd); |
1594 | return; | 1505 | return 0; |
1595 | } | 1506 | } |
1596 | 1507 | ||
1597 | /* | 1508 | /* |
@@ -1600,14 +1511,8 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, | |||
1600 | */ | 1511 | */ |
1601 | core_alua_check_nonop_delay(se_cmd); | 1512 | core_alua_check_nonop_delay(se_cmd); |
1602 | 1513 | ||
1603 | /* | ||
1604 | * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend | ||
1605 | * for immediate execution of READs, otherwise wait for | ||
1606 | * transport_generic_handle_data() to be called for WRITEs | ||
1607 | * when fabric has filled the incoming buffer. | ||
1608 | */ | ||
1609 | transport_handle_cdb_direct(se_cmd); | 1514 | transport_handle_cdb_direct(se_cmd); |
1610 | return; | 1515 | return 0; |
1611 | } | 1516 | } |
1612 | EXPORT_SYMBOL(target_submit_cmd); | 1517 | EXPORT_SYMBOL(target_submit_cmd); |
1613 | 1518 | ||
@@ -1662,7 +1567,11 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, | |||
1662 | se_cmd->se_tmr_req->ref_task_tag = tag; | 1567 | se_cmd->se_tmr_req->ref_task_tag = tag; |
1663 | 1568 | ||
1664 | /* See target_submit_cmd for commentary */ | 1569 | /* See target_submit_cmd for commentary */ |
1665 | target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); | 1570 | ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); |
1571 | if (ret) { | ||
1572 | core_tmr_release_req(se_cmd->se_tmr_req); | ||
1573 | return ret; | ||
1574 | } | ||
1666 | 1575 | ||
1667 | ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); | 1576 | ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); |
1668 | if (ret) { | 1577 | if (ret) { |
@@ -1680,67 +1589,6 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, | |||
1680 | EXPORT_SYMBOL(target_submit_tmr); | 1589 | EXPORT_SYMBOL(target_submit_tmr); |
1681 | 1590 | ||
1682 | /* | 1591 | /* |
1683 | * Used by fabric module frontends defining a TFO->new_cmd_map() caller | ||
1684 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to | ||
1685 | * complete setup in TCM process context w/ TFO->new_cmd_map(). | ||
1686 | */ | ||
1687 | int transport_generic_handle_cdb_map( | ||
1688 | struct se_cmd *cmd) | ||
1689 | { | ||
1690 | if (!cmd->se_lun) { | ||
1691 | dump_stack(); | ||
1692 | pr_err("cmd->se_lun is NULL\n"); | ||
1693 | return -EINVAL; | ||
1694 | } | ||
1695 | |||
1696 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false); | ||
1697 | return 0; | ||
1698 | } | ||
1699 | EXPORT_SYMBOL(transport_generic_handle_cdb_map); | ||
1700 | |||
1701 | /* transport_generic_handle_data(): | ||
1702 | * | ||
1703 | * | ||
1704 | */ | ||
1705 | int transport_generic_handle_data( | ||
1706 | struct se_cmd *cmd) | ||
1707 | { | ||
1708 | /* | ||
1709 | * For the software fabric case, then we assume the nexus is being | ||
1710 | * failed/shutdown when signals are pending from the kthread context | ||
1711 | * caller, so we return a failure. For the HW target mode case running | ||
1712 | * in interrupt code, the signal_pending() check is skipped. | ||
1713 | */ | ||
1714 | if (!in_interrupt() && signal_pending(current)) | ||
1715 | return -EPERM; | ||
1716 | /* | ||
1717 | * If the received CDB has aleady been ABORTED by the generic | ||
1718 | * target engine, we now call transport_check_aborted_status() | ||
1719 | * to queue any delated TASK_ABORTED status for the received CDB to the | ||
1720 | * fabric module as we are expecting no further incoming DATA OUT | ||
1721 | * sequences at this point. | ||
1722 | */ | ||
1723 | if (transport_check_aborted_status(cmd, 1) != 0) | ||
1724 | return 0; | ||
1725 | |||
1726 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false); | ||
1727 | return 0; | ||
1728 | } | ||
1729 | EXPORT_SYMBOL(transport_generic_handle_data); | ||
1730 | |||
1731 | /* transport_generic_handle_tmr(): | ||
1732 | * | ||
1733 | * | ||
1734 | */ | ||
1735 | int transport_generic_handle_tmr( | ||
1736 | struct se_cmd *cmd) | ||
1737 | { | ||
1738 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false); | ||
1739 | return 0; | ||
1740 | } | ||
1741 | EXPORT_SYMBOL(transport_generic_handle_tmr); | ||
1742 | |||
1743 | /* | ||
1744 | * If the cmd is active, request it to be stopped and sleep until it | 1592 | * If the cmd is active, request it to be stopped and sleep until it |
1745 | * has completed. | 1593 | * has completed. |
1746 | */ | 1594 | */ |
@@ -1797,6 +1645,7 @@ void transport_generic_request_failure(struct se_cmd *cmd) | |||
1797 | case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: | 1645 | case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: |
1798 | case TCM_UNKNOWN_MODE_PAGE: | 1646 | case TCM_UNKNOWN_MODE_PAGE: |
1799 | case TCM_WRITE_PROTECTED: | 1647 | case TCM_WRITE_PROTECTED: |
1648 | case TCM_ADDRESS_OUT_OF_RANGE: | ||
1800 | case TCM_CHECK_CONDITION_ABORT_CMD: | 1649 | case TCM_CHECK_CONDITION_ABORT_CMD: |
1801 | case TCM_CHECK_CONDITION_UNIT_ATTENTION: | 1650 | case TCM_CHECK_CONDITION_UNIT_ATTENTION: |
1802 | case TCM_CHECK_CONDITION_NOT_READY: | 1651 | case TCM_CHECK_CONDITION_NOT_READY: |
@@ -1832,13 +1681,7 @@ void transport_generic_request_failure(struct se_cmd *cmd) | |||
1832 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | 1681 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; |
1833 | break; | 1682 | break; |
1834 | } | 1683 | } |
1835 | /* | 1684 | |
1836 | * If a fabric does not define a cmd->se_tfo->new_cmd_map caller, | ||
1837 | * make the call to transport_send_check_condition_and_sense() | ||
1838 | * directly. Otherwise expect the fabric to make the call to | ||
1839 | * transport_send_check_condition_and_sense() after handling | ||
1840 | * possible unsoliticied write data payloads. | ||
1841 | */ | ||
1842 | ret = transport_send_check_condition_and_sense(cmd, | 1685 | ret = transport_send_check_condition_and_sense(cmd, |
1843 | cmd->scsi_sense_reason, 0); | 1686 | cmd->scsi_sense_reason, 0); |
1844 | if (ret == -EAGAIN || ret == -ENOMEM) | 1687 | if (ret == -EAGAIN || ret == -ENOMEM) |
@@ -1856,406 +1699,123 @@ queue_full: | |||
1856 | } | 1699 | } |
1857 | EXPORT_SYMBOL(transport_generic_request_failure); | 1700 | EXPORT_SYMBOL(transport_generic_request_failure); |
1858 | 1701 | ||
1859 | static inline u32 transport_lba_21(unsigned char *cdb) | 1702 | static void __target_execute_cmd(struct se_cmd *cmd) |
1860 | { | 1703 | { |
1861 | return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; | 1704 | int error = 0; |
1862 | } | ||
1863 | 1705 | ||
1864 | static inline u32 transport_lba_32(unsigned char *cdb) | 1706 | spin_lock_irq(&cmd->t_state_lock); |
1865 | { | 1707 | cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT); |
1866 | return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | 1708 | spin_unlock_irq(&cmd->t_state_lock); |
1867 | } | ||
1868 | |||
1869 | static inline unsigned long long transport_lba_64(unsigned char *cdb) | ||
1870 | { | ||
1871 | unsigned int __v1, __v2; | ||
1872 | |||
1873 | __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | ||
1874 | __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | ||
1875 | |||
1876 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | ||
1877 | } | ||
1878 | |||
1879 | /* | ||
1880 | * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs | ||
1881 | */ | ||
1882 | static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) | ||
1883 | { | ||
1884 | unsigned int __v1, __v2; | ||
1885 | |||
1886 | __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; | ||
1887 | __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; | ||
1888 | |||
1889 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | ||
1890 | } | ||
1891 | |||
1892 | static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) | ||
1893 | { | ||
1894 | unsigned long flags; | ||
1895 | |||
1896 | spin_lock_irqsave(&se_cmd->t_state_lock, flags); | ||
1897 | se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; | ||
1898 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); | ||
1899 | } | ||
1900 | |||
1901 | /* | ||
1902 | * Called from Fabric Module context from transport_execute_tasks() | ||
1903 | * | ||
1904 | * The return of this function determins if the tasks from struct se_cmd | ||
1905 | * get added to the execution queue in transport_execute_tasks(), | ||
1906 | * or are added to the delayed or ordered lists here. | ||
1907 | */ | ||
1908 | static inline int transport_execute_task_attr(struct se_cmd *cmd) | ||
1909 | { | ||
1910 | if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) | ||
1911 | return 1; | ||
1912 | /* | ||
1913 | * Check for the existence of HEAD_OF_QUEUE, and if true return 1 | ||
1914 | * to allow the passed struct se_cmd list of tasks to the front of the list. | ||
1915 | */ | ||
1916 | if (cmd->sam_task_attr == MSG_HEAD_TAG) { | ||
1917 | pr_debug("Added HEAD_OF_QUEUE for CDB:" | ||
1918 | " 0x%02x, se_ordered_id: %u\n", | ||
1919 | cmd->t_task_cdb[0], | ||
1920 | cmd->se_ordered_id); | ||
1921 | return 1; | ||
1922 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { | ||
1923 | atomic_inc(&cmd->se_dev->dev_ordered_sync); | ||
1924 | smp_mb__after_atomic_inc(); | ||
1925 | |||
1926 | pr_debug("Added ORDERED for CDB: 0x%02x to ordered" | ||
1927 | " list, se_ordered_id: %u\n", | ||
1928 | cmd->t_task_cdb[0], | ||
1929 | cmd->se_ordered_id); | ||
1930 | /* | ||
1931 | * Add ORDERED command to tail of execution queue if | ||
1932 | * no other older commands exist that need to be | ||
1933 | * completed first. | ||
1934 | */ | ||
1935 | if (!atomic_read(&cmd->se_dev->simple_cmds)) | ||
1936 | return 1; | ||
1937 | } else { | ||
1938 | /* | ||
1939 | * For SIMPLE and UNTAGGED Task Attribute commands | ||
1940 | */ | ||
1941 | atomic_inc(&cmd->se_dev->simple_cmds); | ||
1942 | smp_mb__after_atomic_inc(); | ||
1943 | } | ||
1944 | /* | ||
1945 | * Otherwise if one or more outstanding ORDERED task attribute exist, | ||
1946 | * add the dormant task(s) built for the passed struct se_cmd to the | ||
1947 | * execution queue and become in Active state for this struct se_device. | ||
1948 | */ | ||
1949 | if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) { | ||
1950 | /* | ||
1951 | * Otherwise, add cmd w/ tasks to delayed cmd queue that | ||
1952 | * will be drained upon completion of HEAD_OF_QUEUE task. | ||
1953 | */ | ||
1954 | spin_lock(&cmd->se_dev->delayed_cmd_lock); | ||
1955 | cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; | ||
1956 | list_add_tail(&cmd->se_delayed_node, | ||
1957 | &cmd->se_dev->delayed_cmd_list); | ||
1958 | spin_unlock(&cmd->se_dev->delayed_cmd_lock); | ||
1959 | |||
1960 | pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" | ||
1961 | " delayed CMD list, se_ordered_id: %u\n", | ||
1962 | cmd->t_task_cdb[0], cmd->sam_task_attr, | ||
1963 | cmd->se_ordered_id); | ||
1964 | /* | ||
1965 | * Return zero to let transport_execute_tasks() know | ||
1966 | * not to add the delayed tasks to the execution list. | ||
1967 | */ | ||
1968 | return 0; | ||
1969 | } | ||
1970 | /* | ||
1971 | * Otherwise, no ORDERED task attributes exist.. | ||
1972 | */ | ||
1973 | return 1; | ||
1974 | } | ||
1975 | |||
1976 | /* | ||
1977 | * Called from fabric module context in transport_generic_new_cmd() and | ||
1978 | * transport_generic_process_write() | ||
1979 | */ | ||
1980 | static void transport_execute_tasks(struct se_cmd *cmd) | ||
1981 | { | ||
1982 | int add_tasks; | ||
1983 | struct se_device *se_dev = cmd->se_dev; | ||
1984 | /* | ||
1985 | * Call transport_cmd_check_stop() to see if a fabric exception | ||
1986 | * has occurred that prevents execution. | ||
1987 | */ | ||
1988 | if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) { | ||
1989 | /* | ||
1990 | * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE | ||
1991 | * attribute for the tasks of the received struct se_cmd CDB | ||
1992 | */ | ||
1993 | add_tasks = transport_execute_task_attr(cmd); | ||
1994 | if (add_tasks) { | ||
1995 | __transport_execute_tasks(se_dev, cmd); | ||
1996 | return; | ||
1997 | } | ||
1998 | } | ||
1999 | __transport_execute_tasks(se_dev, NULL); | ||
2000 | } | ||
2001 | |||
2002 | static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd) | ||
2003 | { | ||
2004 | int error; | ||
2005 | struct se_cmd *cmd = NULL; | ||
2006 | unsigned long flags; | ||
2007 | |||
2008 | check_depth: | ||
2009 | spin_lock_irq(&dev->execute_task_lock); | ||
2010 | if (new_cmd != NULL) | ||
2011 | __target_add_to_execute_list(new_cmd); | ||
2012 | |||
2013 | if (list_empty(&dev->execute_list)) { | ||
2014 | spin_unlock_irq(&dev->execute_task_lock); | ||
2015 | return 0; | ||
2016 | } | ||
2017 | cmd = list_first_entry(&dev->execute_list, struct se_cmd, execute_list); | ||
2018 | __target_remove_from_execute_list(cmd); | ||
2019 | spin_unlock_irq(&dev->execute_task_lock); | ||
2020 | |||
2021 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
2022 | cmd->transport_state |= CMD_T_BUSY; | ||
2023 | cmd->transport_state |= CMD_T_SENT; | ||
2024 | |||
2025 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
2026 | 1709 | ||
2027 | if (cmd->execute_cmd) | 1710 | if (cmd->execute_cmd) |
2028 | error = cmd->execute_cmd(cmd); | 1711 | error = cmd->execute_cmd(cmd); |
2029 | else { | ||
2030 | error = dev->transport->execute_cmd(cmd, cmd->t_data_sg, | ||
2031 | cmd->t_data_nents, cmd->data_direction); | ||
2032 | } | ||
2033 | 1712 | ||
2034 | if (error != 0) { | 1713 | if (error) { |
2035 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 1714 | spin_lock_irq(&cmd->t_state_lock); |
2036 | cmd->transport_state &= ~CMD_T_BUSY; | 1715 | cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); |
2037 | cmd->transport_state &= ~CMD_T_SENT; | 1716 | spin_unlock_irq(&cmd->t_state_lock); |
2038 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
2039 | 1717 | ||
2040 | transport_generic_request_failure(cmd); | 1718 | transport_generic_request_failure(cmd); |
2041 | } | 1719 | } |
2042 | |||
2043 | new_cmd = NULL; | ||
2044 | goto check_depth; | ||
2045 | |||
2046 | return 0; | ||
2047 | } | 1720 | } |
2048 | 1721 | ||
2049 | static inline u32 transport_get_sectors_6( | 1722 | void target_execute_cmd(struct se_cmd *cmd) |
2050 | unsigned char *cdb, | ||
2051 | struct se_cmd *cmd, | ||
2052 | int *ret) | ||
2053 | { | 1723 | { |
2054 | struct se_device *dev = cmd->se_dev; | 1724 | struct se_device *dev = cmd->se_dev; |
2055 | 1725 | ||
2056 | /* | 1726 | /* |
2057 | * Assume TYPE_DISK for non struct se_device objects. | 1727 | * If the received CDB has aleady been aborted stop processing it here. |
2058 | * Use 8-bit sector value. | ||
2059 | */ | ||
2060 | if (!dev) | ||
2061 | goto type_disk; | ||
2062 | |||
2063 | /* | ||
2064 | * Use 24-bit allocation length for TYPE_TAPE. | ||
2065 | */ | 1728 | */ |
2066 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) | 1729 | if (transport_check_aborted_status(cmd, 1)) |
2067 | return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; | 1730 | return; |
2068 | |||
2069 | /* | ||
2070 | * Everything else assume TYPE_DISK Sector CDB location. | ||
2071 | * Use 8-bit sector value. SBC-3 says: | ||
2072 | * | ||
2073 | * A TRANSFER LENGTH field set to zero specifies that 256 | ||
2074 | * logical blocks shall be written. Any other value | ||
2075 | * specifies the number of logical blocks that shall be | ||
2076 | * written. | ||
2077 | */ | ||
2078 | type_disk: | ||
2079 | return cdb[4] ? : 256; | ||
2080 | } | ||
2081 | |||
2082 | static inline u32 transport_get_sectors_10( | ||
2083 | unsigned char *cdb, | ||
2084 | struct se_cmd *cmd, | ||
2085 | int *ret) | ||
2086 | { | ||
2087 | struct se_device *dev = cmd->se_dev; | ||
2088 | 1731 | ||
2089 | /* | 1732 | /* |
2090 | * Assume TYPE_DISK for non struct se_device objects. | 1733 | * Determine if IOCTL context caller in requesting the stopping of this |
2091 | * Use 16-bit sector value. | 1734 | * command for LUN shutdown purposes. |
2092 | */ | 1735 | */ |
2093 | if (!dev) | 1736 | spin_lock_irq(&cmd->t_state_lock); |
2094 | goto type_disk; | 1737 | if (cmd->transport_state & CMD_T_LUN_STOP) { |
1738 | pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", | ||
1739 | __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); | ||
2095 | 1740 | ||
2096 | /* | 1741 | cmd->transport_state &= ~CMD_T_ACTIVE; |
2097 | * XXX_10 is not defined in SSC, throw an exception | 1742 | spin_unlock_irq(&cmd->t_state_lock); |
2098 | */ | 1743 | complete(&cmd->transport_lun_stop_comp); |
2099 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { | 1744 | return; |
2100 | *ret = -EINVAL; | ||
2101 | return 0; | ||
2102 | } | 1745 | } |
2103 | |||
2104 | /* | 1746 | /* |
2105 | * Everything else assume TYPE_DISK Sector CDB location. | 1747 | * Determine if frontend context caller is requesting the stopping of |
2106 | * Use 16-bit sector value. | 1748 | * this command for frontend exceptions. |
2107 | */ | ||
2108 | type_disk: | ||
2109 | return (u32)(cdb[7] << 8) + cdb[8]; | ||
2110 | } | ||
2111 | |||
2112 | static inline u32 transport_get_sectors_12( | ||
2113 | unsigned char *cdb, | ||
2114 | struct se_cmd *cmd, | ||
2115 | int *ret) | ||
2116 | { | ||
2117 | struct se_device *dev = cmd->se_dev; | ||
2118 | |||
2119 | /* | ||
2120 | * Assume TYPE_DISK for non struct se_device objects. | ||
2121 | * Use 32-bit sector value. | ||
2122 | */ | 1749 | */ |
2123 | if (!dev) | 1750 | if (cmd->transport_state & CMD_T_STOP) { |
2124 | goto type_disk; | 1751 | pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", |
1752 | __func__, __LINE__, | ||
1753 | cmd->se_tfo->get_task_tag(cmd)); | ||
2125 | 1754 | ||
2126 | /* | 1755 | spin_unlock_irq(&cmd->t_state_lock); |
2127 | * XXX_12 is not defined in SSC, throw an exception | 1756 | complete(&cmd->t_transport_stop_comp); |
2128 | */ | 1757 | return; |
2129 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { | ||
2130 | *ret = -EINVAL; | ||
2131 | return 0; | ||
2132 | } | 1758 | } |
2133 | 1759 | ||
2134 | /* | 1760 | cmd->t_state = TRANSPORT_PROCESSING; |
2135 | * Everything else assume TYPE_DISK Sector CDB location. | 1761 | spin_unlock_irq(&cmd->t_state_lock); |
2136 | * Use 32-bit sector value. | ||
2137 | */ | ||
2138 | type_disk: | ||
2139 | return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; | ||
2140 | } | ||
2141 | |||
2142 | static inline u32 transport_get_sectors_16( | ||
2143 | unsigned char *cdb, | ||
2144 | struct se_cmd *cmd, | ||
2145 | int *ret) | ||
2146 | { | ||
2147 | struct se_device *dev = cmd->se_dev; | ||
2148 | |||
2149 | /* | ||
2150 | * Assume TYPE_DISK for non struct se_device objects. | ||
2151 | * Use 32-bit sector value. | ||
2152 | */ | ||
2153 | if (!dev) | ||
2154 | goto type_disk; | ||
2155 | 1762 | ||
2156 | /* | 1763 | if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
2157 | * Use 24-bit allocation length for TYPE_TAPE. | 1764 | goto execute; |
2158 | */ | ||
2159 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) | ||
2160 | return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; | ||
2161 | |||
2162 | type_disk: | ||
2163 | return (u32)(cdb[10] << 24) + (cdb[11] << 16) + | ||
2164 | (cdb[12] << 8) + cdb[13]; | ||
2165 | } | ||
2166 | 1765 | ||
2167 | /* | ||
2168 | * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants | ||
2169 | */ | ||
2170 | static inline u32 transport_get_sectors_32( | ||
2171 | unsigned char *cdb, | ||
2172 | struct se_cmd *cmd, | ||
2173 | int *ret) | ||
2174 | { | ||
2175 | /* | 1766 | /* |
2176 | * Assume TYPE_DISK for non struct se_device objects. | 1767 | * Check for the existence of HEAD_OF_QUEUE, and if true return 1 |
2177 | * Use 32-bit sector value. | 1768 | * to allow the passed struct se_cmd list of tasks to the front of the list. |
2178 | */ | 1769 | */ |
2179 | return (u32)(cdb[28] << 24) + (cdb[29] << 16) + | 1770 | switch (cmd->sam_task_attr) { |
2180 | (cdb[30] << 8) + cdb[31]; | 1771 | case MSG_HEAD_TAG: |
1772 | pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, " | ||
1773 | "se_ordered_id: %u\n", | ||
1774 | cmd->t_task_cdb[0], cmd->se_ordered_id); | ||
1775 | goto execute; | ||
1776 | case MSG_ORDERED_TAG: | ||
1777 | atomic_inc(&dev->dev_ordered_sync); | ||
1778 | smp_mb__after_atomic_inc(); | ||
2181 | 1779 | ||
2182 | } | 1780 | pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " |
1781 | " se_ordered_id: %u\n", | ||
1782 | cmd->t_task_cdb[0], cmd->se_ordered_id); | ||
2183 | 1783 | ||
2184 | static inline u32 transport_get_size( | 1784 | /* |
2185 | u32 sectors, | 1785 | * Execute an ORDERED command if no other older commands |
2186 | unsigned char *cdb, | 1786 | * exist that need to be completed first. |
2187 | struct se_cmd *cmd) | 1787 | */ |
2188 | { | 1788 | if (!atomic_read(&dev->simple_cmds)) |
2189 | struct se_device *dev = cmd->se_dev; | 1789 | goto execute; |
2190 | 1790 | break; | |
2191 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { | 1791 | default: |
2192 | if (cdb[1] & 1) { /* sectors */ | 1792 | /* |
2193 | return dev->se_sub_dev->se_dev_attrib.block_size * sectors; | 1793 | * For SIMPLE and UNTAGGED Task Attribute commands |
2194 | } else /* bytes */ | 1794 | */ |
2195 | return sectors; | 1795 | atomic_inc(&dev->simple_cmds); |
1796 | smp_mb__after_atomic_inc(); | ||
1797 | break; | ||
2196 | } | 1798 | } |
2197 | 1799 | ||
2198 | pr_debug("Returning block_size: %u, sectors: %u == %u for" | 1800 | if (atomic_read(&dev->dev_ordered_sync) != 0) { |
2199 | " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, | 1801 | spin_lock(&dev->delayed_cmd_lock); |
2200 | sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors, | 1802 | list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); |
2201 | dev->transport->name); | 1803 | spin_unlock(&dev->delayed_cmd_lock); |
2202 | |||
2203 | return dev->se_sub_dev->se_dev_attrib.block_size * sectors; | ||
2204 | } | ||
2205 | 1804 | ||
2206 | static void transport_xor_callback(struct se_cmd *cmd) | 1805 | pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" |
2207 | { | 1806 | " delayed CMD list, se_ordered_id: %u\n", |
2208 | unsigned char *buf, *addr; | 1807 | cmd->t_task_cdb[0], cmd->sam_task_attr, |
2209 | struct scatterlist *sg; | 1808 | cmd->se_ordered_id); |
2210 | unsigned int offset; | ||
2211 | int i; | ||
2212 | int count; | ||
2213 | /* | ||
2214 | * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command | ||
2215 | * | ||
2216 | * 1) read the specified logical block(s); | ||
2217 | * 2) transfer logical blocks from the data-out buffer; | ||
2218 | * 3) XOR the logical blocks transferred from the data-out buffer with | ||
2219 | * the logical blocks read, storing the resulting XOR data in a buffer; | ||
2220 | * 4) if the DISABLE WRITE bit is set to zero, then write the logical | ||
2221 | * blocks transferred from the data-out buffer; and | ||
2222 | * 5) transfer the resulting XOR data to the data-in buffer. | ||
2223 | */ | ||
2224 | buf = kmalloc(cmd->data_length, GFP_KERNEL); | ||
2225 | if (!buf) { | ||
2226 | pr_err("Unable to allocate xor_callback buf\n"); | ||
2227 | return; | 1809 | return; |
2228 | } | 1810 | } |
2229 | /* | ||
2230 | * Copy the scatterlist WRITE buffer located at cmd->t_data_sg | ||
2231 | * into the locally allocated *buf | ||
2232 | */ | ||
2233 | sg_copy_to_buffer(cmd->t_data_sg, | ||
2234 | cmd->t_data_nents, | ||
2235 | buf, | ||
2236 | cmd->data_length); | ||
2237 | 1811 | ||
1812 | execute: | ||
2238 | /* | 1813 | /* |
2239 | * Now perform the XOR against the BIDI read memory located at | 1814 | * Otherwise, no ORDERED task attributes exist.. |
2240 | * cmd->t_mem_bidi_list | ||
2241 | */ | 1815 | */ |
2242 | 1816 | __target_execute_cmd(cmd); | |
2243 | offset = 0; | ||
2244 | for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { | ||
2245 | addr = kmap_atomic(sg_page(sg)); | ||
2246 | if (!addr) | ||
2247 | goto out; | ||
2248 | |||
2249 | for (i = 0; i < sg->length; i++) | ||
2250 | *(addr + sg->offset + i) ^= *(buf + offset + i); | ||
2251 | |||
2252 | offset += sg->length; | ||
2253 | kunmap_atomic(addr); | ||
2254 | } | ||
2255 | |||
2256 | out: | ||
2257 | kfree(buf); | ||
2258 | } | 1817 | } |
1818 | EXPORT_SYMBOL(target_execute_cmd); | ||
2259 | 1819 | ||
2260 | /* | 1820 | /* |
2261 | * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd | 1821 | * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd |
@@ -2312,737 +1872,31 @@ out: | |||
2312 | return -1; | 1872 | return -1; |
2313 | } | 1873 | } |
2314 | 1874 | ||
2315 | static inline long long transport_dev_end_lba(struct se_device *dev) | 1875 | /* |
2316 | { | 1876 | * Process all commands up to the last received ORDERED task attribute which |
2317 | return dev->transport->get_blocks(dev) + 1; | 1877 | * requires another blocking boundary |
2318 | } | ||
2319 | |||
2320 | static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) | ||
2321 | { | ||
2322 | struct se_device *dev = cmd->se_dev; | ||
2323 | u32 sectors; | ||
2324 | |||
2325 | if (dev->transport->get_device_type(dev) != TYPE_DISK) | ||
2326 | return 0; | ||
2327 | |||
2328 | sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); | ||
2329 | |||
2330 | if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) { | ||
2331 | pr_err("LBA: %llu Sectors: %u exceeds" | ||
2332 | " transport_dev_end_lba(): %llu\n", | ||
2333 | cmd->t_task_lba, sectors, | ||
2334 | transport_dev_end_lba(dev)); | ||
2335 | return -EINVAL; | ||
2336 | } | ||
2337 | |||
2338 | return 0; | ||
2339 | } | ||
2340 | |||
2341 | static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev) | ||
2342 | { | ||
2343 | /* | ||
2344 | * Determine if the received WRITE_SAME is used to for direct | ||
2345 | * passthrough into Linux/SCSI with struct request via TCM/pSCSI | ||
2346 | * or we are signaling the use of internal WRITE_SAME + UNMAP=1 | ||
2347 | * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code. | ||
2348 | */ | ||
2349 | int passthrough = (dev->transport->transport_type == | ||
2350 | TRANSPORT_PLUGIN_PHBA_PDEV); | ||
2351 | |||
2352 | if (!passthrough) { | ||
2353 | if ((flags[0] & 0x04) || (flags[0] & 0x02)) { | ||
2354 | pr_err("WRITE_SAME PBDATA and LBDATA" | ||
2355 | " bits not supported for Block Discard" | ||
2356 | " Emulation\n"); | ||
2357 | return -ENOSYS; | ||
2358 | } | ||
2359 | /* | ||
2360 | * Currently for the emulated case we only accept | ||
2361 | * tpws with the UNMAP=1 bit set. | ||
2362 | */ | ||
2363 | if (!(flags[0] & 0x08)) { | ||
2364 | pr_err("WRITE_SAME w/o UNMAP bit not" | ||
2365 | " supported for Block Discard Emulation\n"); | ||
2366 | return -ENOSYS; | ||
2367 | } | ||
2368 | } | ||
2369 | |||
2370 | return 0; | ||
2371 | } | ||
2372 | |||
2373 | /* transport_generic_cmd_sequencer(): | ||
2374 | * | ||
2375 | * Generic Command Sequencer that should work for most DAS transport | ||
2376 | * drivers. | ||
2377 | * | ||
2378 | * Called from target_setup_cmd_from_cdb() in the $FABRIC_MOD | ||
2379 | * RX Thread. | ||
2380 | * | ||
2381 | * FIXME: Need to support other SCSI OPCODES where as well. | ||
2382 | */ | 1878 | */ |
2383 | static int transport_generic_cmd_sequencer( | 1879 | static void target_restart_delayed_cmds(struct se_device *dev) |
2384 | struct se_cmd *cmd, | ||
2385 | unsigned char *cdb) | ||
2386 | { | 1880 | { |
2387 | struct se_device *dev = cmd->se_dev; | 1881 | for (;;) { |
2388 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; | 1882 | struct se_cmd *cmd; |
2389 | int ret = 0, sector_ret = 0, passthrough; | ||
2390 | u32 sectors = 0, size = 0, pr_reg_type = 0; | ||
2391 | u16 service_action; | ||
2392 | u8 alua_ascq = 0; | ||
2393 | /* | ||
2394 | * Check for an existing UNIT ATTENTION condition | ||
2395 | */ | ||
2396 | if (core_scsi3_ua_check(cmd, cdb) < 0) { | ||
2397 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
2398 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; | ||
2399 | return -EINVAL; | ||
2400 | } | ||
2401 | /* | ||
2402 | * Check status of Asymmetric Logical Unit Assignment port | ||
2403 | */ | ||
2404 | ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); | ||
2405 | if (ret != 0) { | ||
2406 | /* | ||
2407 | * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; | ||
2408 | * The ALUA additional sense code qualifier (ASCQ) is determined | ||
2409 | * by the ALUA primary or secondary access state.. | ||
2410 | */ | ||
2411 | if (ret > 0) { | ||
2412 | pr_debug("[%s]: ALUA TG Port not available," | ||
2413 | " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", | ||
2414 | cmd->se_tfo->get_fabric_name(), alua_ascq); | ||
2415 | 1883 | ||
2416 | transport_set_sense_codes(cmd, 0x04, alua_ascq); | 1884 | spin_lock(&dev->delayed_cmd_lock); |
2417 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 1885 | if (list_empty(&dev->delayed_cmd_list)) { |
2418 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; | 1886 | spin_unlock(&dev->delayed_cmd_lock); |
2419 | return -EINVAL; | ||
2420 | } | ||
2421 | goto out_invalid_cdb_field; | ||
2422 | } | ||
2423 | /* | ||
2424 | * Check status for SPC-3 Persistent Reservations | ||
2425 | */ | ||
2426 | if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { | ||
2427 | if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( | ||
2428 | cmd, cdb, pr_reg_type) != 0) { | ||
2429 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
2430 | cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; | ||
2431 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | ||
2432 | cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; | ||
2433 | return -EBUSY; | ||
2434 | } | ||
2435 | /* | ||
2436 | * This means the CDB is allowed for the SCSI Initiator port | ||
2437 | * when said port is *NOT* holding the legacy SPC-2 or | ||
2438 | * SPC-3 Persistent Reservation. | ||
2439 | */ | ||
2440 | } | ||
2441 | |||
2442 | /* | ||
2443 | * If we operate in passthrough mode we skip most CDB emulation and | ||
2444 | * instead hand the commands down to the physical SCSI device. | ||
2445 | */ | ||
2446 | passthrough = | ||
2447 | (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); | ||
2448 | |||
2449 | switch (cdb[0]) { | ||
2450 | case READ_6: | ||
2451 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | ||
2452 | if (sector_ret) | ||
2453 | goto out_unsupported_cdb; | ||
2454 | size = transport_get_size(sectors, cdb, cmd); | ||
2455 | cmd->t_task_lba = transport_lba_21(cdb); | ||
2456 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
2457 | break; | ||
2458 | case READ_10: | ||
2459 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | ||
2460 | if (sector_ret) | ||
2461 | goto out_unsupported_cdb; | ||
2462 | size = transport_get_size(sectors, cdb, cmd); | ||
2463 | cmd->t_task_lba = transport_lba_32(cdb); | ||
2464 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
2465 | break; | ||
2466 | case READ_12: | ||
2467 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | ||
2468 | if (sector_ret) | ||
2469 | goto out_unsupported_cdb; | ||
2470 | size = transport_get_size(sectors, cdb, cmd); | ||
2471 | cmd->t_task_lba = transport_lba_32(cdb); | ||
2472 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
2473 | break; | ||
2474 | case READ_16: | ||
2475 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | ||
2476 | if (sector_ret) | ||
2477 | goto out_unsupported_cdb; | ||
2478 | size = transport_get_size(sectors, cdb, cmd); | ||
2479 | cmd->t_task_lba = transport_lba_64(cdb); | ||
2480 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
2481 | break; | ||
2482 | case WRITE_6: | ||
2483 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | ||
2484 | if (sector_ret) | ||
2485 | goto out_unsupported_cdb; | ||
2486 | size = transport_get_size(sectors, cdb, cmd); | ||
2487 | cmd->t_task_lba = transport_lba_21(cdb); | ||
2488 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
2489 | break; | ||
2490 | case WRITE_10: | ||
2491 | case WRITE_VERIFY: | ||
2492 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | ||
2493 | if (sector_ret) | ||
2494 | goto out_unsupported_cdb; | ||
2495 | size = transport_get_size(sectors, cdb, cmd); | ||
2496 | cmd->t_task_lba = transport_lba_32(cdb); | ||
2497 | if (cdb[1] & 0x8) | ||
2498 | cmd->se_cmd_flags |= SCF_FUA; | ||
2499 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
2500 | break; | ||
2501 | case WRITE_12: | ||
2502 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | ||
2503 | if (sector_ret) | ||
2504 | goto out_unsupported_cdb; | ||
2505 | size = transport_get_size(sectors, cdb, cmd); | ||
2506 | cmd->t_task_lba = transport_lba_32(cdb); | ||
2507 | if (cdb[1] & 0x8) | ||
2508 | cmd->se_cmd_flags |= SCF_FUA; | ||
2509 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
2510 | break; | ||
2511 | case WRITE_16: | ||
2512 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | ||
2513 | if (sector_ret) | ||
2514 | goto out_unsupported_cdb; | ||
2515 | size = transport_get_size(sectors, cdb, cmd); | ||
2516 | cmd->t_task_lba = transport_lba_64(cdb); | ||
2517 | if (cdb[1] & 0x8) | ||
2518 | cmd->se_cmd_flags |= SCF_FUA; | ||
2519 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
2520 | break; | ||
2521 | case XDWRITEREAD_10: | ||
2522 | if ((cmd->data_direction != DMA_TO_DEVICE) || | ||
2523 | !(cmd->se_cmd_flags & SCF_BIDI)) | ||
2524 | goto out_invalid_cdb_field; | ||
2525 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | ||
2526 | if (sector_ret) | ||
2527 | goto out_unsupported_cdb; | ||
2528 | size = transport_get_size(sectors, cdb, cmd); | ||
2529 | cmd->t_task_lba = transport_lba_32(cdb); | ||
2530 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
2531 | |||
2532 | /* | ||
2533 | * Do now allow BIDI commands for passthrough mode. | ||
2534 | */ | ||
2535 | if (passthrough) | ||
2536 | goto out_unsupported_cdb; | ||
2537 | |||
2538 | /* | ||
2539 | * Setup BIDI XOR callback to be run after I/O completion. | ||
2540 | */ | ||
2541 | cmd->transport_complete_callback = &transport_xor_callback; | ||
2542 | if (cdb[1] & 0x8) | ||
2543 | cmd->se_cmd_flags |= SCF_FUA; | ||
2544 | break; | ||
2545 | case VARIABLE_LENGTH_CMD: | ||
2546 | service_action = get_unaligned_be16(&cdb[8]); | ||
2547 | switch (service_action) { | ||
2548 | case XDWRITEREAD_32: | ||
2549 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | ||
2550 | if (sector_ret) | ||
2551 | goto out_unsupported_cdb; | ||
2552 | size = transport_get_size(sectors, cdb, cmd); | ||
2553 | /* | ||
2554 | * Use WRITE_32 and READ_32 opcodes for the emulated | ||
2555 | * XDWRITE_READ_32 logic. | ||
2556 | */ | ||
2557 | cmd->t_task_lba = transport_lba_64_ext(cdb); | ||
2558 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
2559 | |||
2560 | /* | ||
2561 | * Do now allow BIDI commands for passthrough mode. | ||
2562 | */ | ||
2563 | if (passthrough) | ||
2564 | goto out_unsupported_cdb; | ||
2565 | |||
2566 | /* | ||
2567 | * Setup BIDI XOR callback to be run during after I/O | ||
2568 | * completion. | ||
2569 | */ | ||
2570 | cmd->transport_complete_callback = &transport_xor_callback; | ||
2571 | if (cdb[1] & 0x8) | ||
2572 | cmd->se_cmd_flags |= SCF_FUA; | ||
2573 | break; | ||
2574 | case WRITE_SAME_32: | ||
2575 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | ||
2576 | if (sector_ret) | ||
2577 | goto out_unsupported_cdb; | ||
2578 | |||
2579 | if (sectors) | ||
2580 | size = transport_get_size(1, cdb, cmd); | ||
2581 | else { | ||
2582 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" | ||
2583 | " supported\n"); | ||
2584 | goto out_invalid_cdb_field; | ||
2585 | } | ||
2586 | |||
2587 | cmd->t_task_lba = get_unaligned_be64(&cdb[12]); | ||
2588 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2589 | |||
2590 | if (target_check_write_same_discard(&cdb[10], dev) < 0) | ||
2591 | goto out_unsupported_cdb; | ||
2592 | if (!passthrough) | ||
2593 | cmd->execute_cmd = target_emulate_write_same; | ||
2594 | break; | ||
2595 | default: | ||
2596 | pr_err("VARIABLE_LENGTH_CMD service action" | ||
2597 | " 0x%04x not supported\n", service_action); | ||
2598 | goto out_unsupported_cdb; | ||
2599 | } | ||
2600 | break; | ||
2601 | case MAINTENANCE_IN: | ||
2602 | if (dev->transport->get_device_type(dev) != TYPE_ROM) { | ||
2603 | /* MAINTENANCE_IN from SCC-2 */ | ||
2604 | /* | ||
2605 | * Check for emulated MI_REPORT_TARGET_PGS. | ||
2606 | */ | ||
2607 | if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS && | ||
2608 | su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { | ||
2609 | cmd->execute_cmd = | ||
2610 | target_emulate_report_target_port_groups; | ||
2611 | } | ||
2612 | size = (cdb[6] << 24) | (cdb[7] << 16) | | ||
2613 | (cdb[8] << 8) | cdb[9]; | ||
2614 | } else { | ||
2615 | /* GPCMD_SEND_KEY from multi media commands */ | ||
2616 | size = (cdb[8] << 8) + cdb[9]; | ||
2617 | } | ||
2618 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2619 | break; | ||
2620 | case MODE_SELECT: | ||
2621 | size = cdb[4]; | ||
2622 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2623 | break; | ||
2624 | case MODE_SELECT_10: | ||
2625 | size = (cdb[7] << 8) + cdb[8]; | ||
2626 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2627 | break; | ||
2628 | case MODE_SENSE: | ||
2629 | size = cdb[4]; | ||
2630 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2631 | if (!passthrough) | ||
2632 | cmd->execute_cmd = target_emulate_modesense; | ||
2633 | break; | ||
2634 | case MODE_SENSE_10: | ||
2635 | size = (cdb[7] << 8) + cdb[8]; | ||
2636 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2637 | if (!passthrough) | ||
2638 | cmd->execute_cmd = target_emulate_modesense; | ||
2639 | break; | ||
2640 | case GPCMD_READ_BUFFER_CAPACITY: | ||
2641 | case GPCMD_SEND_OPC: | ||
2642 | case LOG_SELECT: | ||
2643 | case LOG_SENSE: | ||
2644 | size = (cdb[7] << 8) + cdb[8]; | ||
2645 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2646 | break; | ||
2647 | case READ_BLOCK_LIMITS: | ||
2648 | size = READ_BLOCK_LEN; | ||
2649 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2650 | break; | ||
2651 | case GPCMD_GET_CONFIGURATION: | ||
2652 | case GPCMD_READ_FORMAT_CAPACITIES: | ||
2653 | case GPCMD_READ_DISC_INFO: | ||
2654 | case GPCMD_READ_TRACK_RZONE_INFO: | ||
2655 | size = (cdb[7] << 8) + cdb[8]; | ||
2656 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2657 | break; | ||
2658 | case PERSISTENT_RESERVE_IN: | ||
2659 | if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) | ||
2660 | cmd->execute_cmd = target_scsi3_emulate_pr_in; | ||
2661 | size = (cdb[7] << 8) + cdb[8]; | ||
2662 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2663 | break; | ||
2664 | case PERSISTENT_RESERVE_OUT: | ||
2665 | if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) | ||
2666 | cmd->execute_cmd = target_scsi3_emulate_pr_out; | ||
2667 | size = (cdb[7] << 8) + cdb[8]; | ||
2668 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2669 | break; | ||
2670 | case GPCMD_MECHANISM_STATUS: | ||
2671 | case GPCMD_READ_DVD_STRUCTURE: | ||
2672 | size = (cdb[8] << 8) + cdb[9]; | ||
2673 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2674 | break; | ||
2675 | case READ_POSITION: | ||
2676 | size = READ_POSITION_LEN; | ||
2677 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2678 | break; | ||
2679 | case MAINTENANCE_OUT: | ||
2680 | if (dev->transport->get_device_type(dev) != TYPE_ROM) { | ||
2681 | /* MAINTENANCE_OUT from SCC-2 | ||
2682 | * | ||
2683 | * Check for emulated MO_SET_TARGET_PGS. | ||
2684 | */ | ||
2685 | if (cdb[1] == MO_SET_TARGET_PGS && | ||
2686 | su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { | ||
2687 | cmd->execute_cmd = | ||
2688 | target_emulate_set_target_port_groups; | ||
2689 | } | ||
2690 | |||
2691 | size = (cdb[6] << 24) | (cdb[7] << 16) | | ||
2692 | (cdb[8] << 8) | cdb[9]; | ||
2693 | } else { | ||
2694 | /* GPCMD_REPORT_KEY from multi media commands */ | ||
2695 | size = (cdb[8] << 8) + cdb[9]; | ||
2696 | } | ||
2697 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2698 | break; | ||
2699 | case INQUIRY: | ||
2700 | size = (cdb[3] << 8) + cdb[4]; | ||
2701 | /* | ||
2702 | * Do implict HEAD_OF_QUEUE processing for INQUIRY. | ||
2703 | * See spc4r17 section 5.3 | ||
2704 | */ | ||
2705 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | ||
2706 | cmd->sam_task_attr = MSG_HEAD_TAG; | ||
2707 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2708 | if (!passthrough) | ||
2709 | cmd->execute_cmd = target_emulate_inquiry; | ||
2710 | break; | ||
2711 | case READ_BUFFER: | ||
2712 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | ||
2713 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2714 | break; | ||
2715 | case READ_CAPACITY: | ||
2716 | size = READ_CAP_LEN; | ||
2717 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2718 | if (!passthrough) | ||
2719 | cmd->execute_cmd = target_emulate_readcapacity; | ||
2720 | break; | ||
2721 | case READ_MEDIA_SERIAL_NUMBER: | ||
2722 | case SECURITY_PROTOCOL_IN: | ||
2723 | case SECURITY_PROTOCOL_OUT: | ||
2724 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | ||
2725 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2726 | break; | ||
2727 | case SERVICE_ACTION_IN: | ||
2728 | switch (cmd->t_task_cdb[1] & 0x1f) { | ||
2729 | case SAI_READ_CAPACITY_16: | ||
2730 | if (!passthrough) | ||
2731 | cmd->execute_cmd = | ||
2732 | target_emulate_readcapacity_16; | ||
2733 | break; | ||
2734 | default: | ||
2735 | if (passthrough) | ||
2736 | break; | ||
2737 | |||
2738 | pr_err("Unsupported SA: 0x%02x\n", | ||
2739 | cmd->t_task_cdb[1] & 0x1f); | ||
2740 | goto out_invalid_cdb_field; | ||
2741 | } | ||
2742 | /*FALLTHROUGH*/ | ||
2743 | case ACCESS_CONTROL_IN: | ||
2744 | case ACCESS_CONTROL_OUT: | ||
2745 | case EXTENDED_COPY: | ||
2746 | case READ_ATTRIBUTE: | ||
2747 | case RECEIVE_COPY_RESULTS: | ||
2748 | case WRITE_ATTRIBUTE: | ||
2749 | size = (cdb[10] << 24) | (cdb[11] << 16) | | ||
2750 | (cdb[12] << 8) | cdb[13]; | ||
2751 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2752 | break; | ||
2753 | case RECEIVE_DIAGNOSTIC: | ||
2754 | case SEND_DIAGNOSTIC: | ||
2755 | size = (cdb[3] << 8) | cdb[4]; | ||
2756 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2757 | break; | ||
2758 | /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ | ||
2759 | #if 0 | ||
2760 | case GPCMD_READ_CD: | ||
2761 | sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | ||
2762 | size = (2336 * sectors); | ||
2763 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2764 | break; | ||
2765 | #endif | ||
2766 | case READ_TOC: | ||
2767 | size = cdb[8]; | ||
2768 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2769 | break; | ||
2770 | case REQUEST_SENSE: | ||
2771 | size = cdb[4]; | ||
2772 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2773 | if (!passthrough) | ||
2774 | cmd->execute_cmd = target_emulate_request_sense; | ||
2775 | break; | ||
2776 | case READ_ELEMENT_STATUS: | ||
2777 | size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; | ||
2778 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2779 | break; | ||
2780 | case WRITE_BUFFER: | ||
2781 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | ||
2782 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2783 | break; | ||
2784 | case RESERVE: | ||
2785 | case RESERVE_10: | ||
2786 | /* | ||
2787 | * The SPC-2 RESERVE does not contain a size in the SCSI CDB. | ||
2788 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | ||
2789 | */ | ||
2790 | if (cdb[0] == RESERVE_10) | ||
2791 | size = (cdb[7] << 8) | cdb[8]; | ||
2792 | else | ||
2793 | size = cmd->data_length; | ||
2794 | |||
2795 | /* | ||
2796 | * Setup the legacy emulated handler for SPC-2 and | ||
2797 | * >= SPC-3 compatible reservation handling (CRH=1) | ||
2798 | * Otherwise, we assume the underlying SCSI logic is | ||
2799 | * is running in SPC_PASSTHROUGH, and wants reservations | ||
2800 | * emulation disabled. | ||
2801 | */ | ||
2802 | if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) | ||
2803 | cmd->execute_cmd = target_scsi2_reservation_reserve; | ||
2804 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | ||
2805 | break; | ||
2806 | case RELEASE: | ||
2807 | case RELEASE_10: | ||
2808 | /* | ||
2809 | * The SPC-2 RELEASE does not contain a size in the SCSI CDB. | ||
2810 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | ||
2811 | */ | ||
2812 | if (cdb[0] == RELEASE_10) | ||
2813 | size = (cdb[7] << 8) | cdb[8]; | ||
2814 | else | ||
2815 | size = cmd->data_length; | ||
2816 | |||
2817 | if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) | ||
2818 | cmd->execute_cmd = target_scsi2_reservation_release; | ||
2819 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | ||
2820 | break; | ||
2821 | case SYNCHRONIZE_CACHE: | ||
2822 | case SYNCHRONIZE_CACHE_16: | ||
2823 | /* | ||
2824 | * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE | ||
2825 | */ | ||
2826 | if (cdb[0] == SYNCHRONIZE_CACHE) { | ||
2827 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | ||
2828 | cmd->t_task_lba = transport_lba_32(cdb); | ||
2829 | } else { | ||
2830 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | ||
2831 | cmd->t_task_lba = transport_lba_64(cdb); | ||
2832 | } | ||
2833 | if (sector_ret) | ||
2834 | goto out_unsupported_cdb; | ||
2835 | |||
2836 | size = transport_get_size(sectors, cdb, cmd); | ||
2837 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | ||
2838 | |||
2839 | if (passthrough) | ||
2840 | break; | 1887 | break; |
2841 | |||
2842 | /* | ||
2843 | * Check to ensure that LBA + Range does not exceed past end of | ||
2844 | * device for IBLOCK and FILEIO ->do_sync_cache() backend calls | ||
2845 | */ | ||
2846 | if ((cmd->t_task_lba != 0) || (sectors != 0)) { | ||
2847 | if (transport_cmd_get_valid_sectors(cmd) < 0) | ||
2848 | goto out_invalid_cdb_field; | ||
2849 | } | ||
2850 | cmd->execute_cmd = target_emulate_synchronize_cache; | ||
2851 | break; | ||
2852 | case UNMAP: | ||
2853 | size = get_unaligned_be16(&cdb[7]); | ||
2854 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2855 | if (!passthrough) | ||
2856 | cmd->execute_cmd = target_emulate_unmap; | ||
2857 | break; | ||
2858 | case WRITE_SAME_16: | ||
2859 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | ||
2860 | if (sector_ret) | ||
2861 | goto out_unsupported_cdb; | ||
2862 | |||
2863 | if (sectors) | ||
2864 | size = transport_get_size(1, cdb, cmd); | ||
2865 | else { | ||
2866 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); | ||
2867 | goto out_invalid_cdb_field; | ||
2868 | } | 1888 | } |
2869 | 1889 | ||
2870 | cmd->t_task_lba = get_unaligned_be64(&cdb[2]); | 1890 | cmd = list_entry(dev->delayed_cmd_list.next, |
2871 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | 1891 | struct se_cmd, se_delayed_node); |
2872 | 1892 | list_del(&cmd->se_delayed_node); | |
2873 | if (target_check_write_same_discard(&cdb[1], dev) < 0) | 1893 | spin_unlock(&dev->delayed_cmd_lock); |
2874 | goto out_unsupported_cdb; | ||
2875 | if (!passthrough) | ||
2876 | cmd->execute_cmd = target_emulate_write_same; | ||
2877 | break; | ||
2878 | case WRITE_SAME: | ||
2879 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | ||
2880 | if (sector_ret) | ||
2881 | goto out_unsupported_cdb; | ||
2882 | 1894 | ||
2883 | if (sectors) | 1895 | __target_execute_cmd(cmd); |
2884 | size = transport_get_size(1, cdb, cmd); | ||
2885 | else { | ||
2886 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); | ||
2887 | goto out_invalid_cdb_field; | ||
2888 | } | ||
2889 | 1896 | ||
2890 | cmd->t_task_lba = get_unaligned_be32(&cdb[2]); | 1897 | if (cmd->sam_task_attr == MSG_ORDERED_TAG) |
2891 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2892 | /* | ||
2893 | * Follow sbcr26 with WRITE_SAME (10) and check for the existence | ||
2894 | * of byte 1 bit 3 UNMAP instead of original reserved field | ||
2895 | */ | ||
2896 | if (target_check_write_same_discard(&cdb[1], dev) < 0) | ||
2897 | goto out_unsupported_cdb; | ||
2898 | if (!passthrough) | ||
2899 | cmd->execute_cmd = target_emulate_write_same; | ||
2900 | break; | ||
2901 | case ALLOW_MEDIUM_REMOVAL: | ||
2902 | case ERASE: | ||
2903 | case REZERO_UNIT: | ||
2904 | case SEEK_10: | ||
2905 | case SPACE: | ||
2906 | case START_STOP: | ||
2907 | case TEST_UNIT_READY: | ||
2908 | case VERIFY: | ||
2909 | case WRITE_FILEMARKS: | ||
2910 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | ||
2911 | if (!passthrough) | ||
2912 | cmd->execute_cmd = target_emulate_noop; | ||
2913 | break; | ||
2914 | case GPCMD_CLOSE_TRACK: | ||
2915 | case INITIALIZE_ELEMENT_STATUS: | ||
2916 | case GPCMD_LOAD_UNLOAD: | ||
2917 | case GPCMD_SET_SPEED: | ||
2918 | case MOVE_MEDIUM: | ||
2919 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | ||
2920 | break; | ||
2921 | case REPORT_LUNS: | ||
2922 | cmd->execute_cmd = target_report_luns; | ||
2923 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | ||
2924 | /* | ||
2925 | * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS | ||
2926 | * See spc4r17 section 5.3 | ||
2927 | */ | ||
2928 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | ||
2929 | cmd->sam_task_attr = MSG_HEAD_TAG; | ||
2930 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2931 | break; | ||
2932 | case GET_EVENT_STATUS_NOTIFICATION: | ||
2933 | size = (cdb[7] << 8) | cdb[8]; | ||
2934 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2935 | break; | ||
2936 | case ATA_16: | ||
2937 | /* Only support ATA passthrough to pSCSI backends.. */ | ||
2938 | if (!passthrough) | ||
2939 | goto out_unsupported_cdb; | ||
2940 | |||
2941 | /* T_LENGTH */ | ||
2942 | switch (cdb[2] & 0x3) { | ||
2943 | case 0x0: | ||
2944 | sectors = 0; | ||
2945 | break; | ||
2946 | case 0x1: | ||
2947 | sectors = (((cdb[1] & 0x1) ? cdb[3] : 0) << 8) | cdb[4]; | ||
2948 | break; | ||
2949 | case 0x2: | ||
2950 | sectors = (((cdb[1] & 0x1) ? cdb[5] : 0) << 8) | cdb[6]; | ||
2951 | break; | 1898 | break; |
2952 | case 0x3: | ||
2953 | pr_err("T_LENGTH=0x3 not supported for ATA_16\n"); | ||
2954 | goto out_invalid_cdb_field; | ||
2955 | } | ||
2956 | |||
2957 | /* BYTE_BLOCK */ | ||
2958 | if (cdb[2] & 0x4) { | ||
2959 | /* BLOCK T_TYPE: 512 or sector */ | ||
2960 | size = sectors * ((cdb[2] & 0x10) ? | ||
2961 | dev->se_sub_dev->se_dev_attrib.block_size : 512); | ||
2962 | } else { | ||
2963 | /* BYTE */ | ||
2964 | size = sectors; | ||
2965 | } | ||
2966 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
2967 | break; | ||
2968 | default: | ||
2969 | pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" | ||
2970 | " 0x%02x, sending CHECK_CONDITION.\n", | ||
2971 | cmd->se_tfo->get_fabric_name(), cdb[0]); | ||
2972 | goto out_unsupported_cdb; | ||
2973 | } | ||
2974 | |||
2975 | if (cmd->unknown_data_length) | ||
2976 | cmd->data_length = size; | ||
2977 | |||
2978 | if (size != cmd->data_length) { | ||
2979 | pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" | ||
2980 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" | ||
2981 | " 0x%02x\n", cmd->se_tfo->get_fabric_name(), | ||
2982 | cmd->data_length, size, cdb[0]); | ||
2983 | |||
2984 | cmd->cmd_spdtl = size; | ||
2985 | |||
2986 | if (cmd->data_direction == DMA_TO_DEVICE) { | ||
2987 | pr_err("Rejecting underflow/overflow" | ||
2988 | " WRITE data\n"); | ||
2989 | goto out_invalid_cdb_field; | ||
2990 | } | ||
2991 | /* | ||
2992 | * Reject READ_* or WRITE_* with overflow/underflow for | ||
2993 | * type SCF_SCSI_DATA_SG_IO_CDB. | ||
2994 | */ | ||
2995 | if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { | ||
2996 | pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" | ||
2997 | " CDB on non 512-byte sector setup subsystem" | ||
2998 | " plugin: %s\n", dev->transport->name); | ||
2999 | /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ | ||
3000 | goto out_invalid_cdb_field; | ||
3001 | } | ||
3002 | |||
3003 | if (size > cmd->data_length) { | ||
3004 | cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; | ||
3005 | cmd->residual_count = (size - cmd->data_length); | ||
3006 | } else { | ||
3007 | cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; | ||
3008 | cmd->residual_count = (cmd->data_length - size); | ||
3009 | } | ||
3010 | cmd->data_length = size; | ||
3011 | } | 1899 | } |
3012 | |||
3013 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { | ||
3014 | if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) { | ||
3015 | printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" | ||
3016 | " big sectors %u exceeds fabric_max_sectors:" | ||
3017 | " %u\n", cdb[0], sectors, | ||
3018 | su_dev->se_dev_attrib.fabric_max_sectors); | ||
3019 | goto out_invalid_cdb_field; | ||
3020 | } | ||
3021 | if (sectors > su_dev->se_dev_attrib.hw_max_sectors) { | ||
3022 | printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" | ||
3023 | " big sectors %u exceeds backend hw_max_sectors:" | ||
3024 | " %u\n", cdb[0], sectors, | ||
3025 | su_dev->se_dev_attrib.hw_max_sectors); | ||
3026 | goto out_invalid_cdb_field; | ||
3027 | } | ||
3028 | } | ||
3029 | |||
3030 | /* reject any command that we don't have a handler for */ | ||
3031 | if (!(passthrough || cmd->execute_cmd || | ||
3032 | (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) | ||
3033 | goto out_unsupported_cdb; | ||
3034 | |||
3035 | transport_set_supported_SAM_opcode(cmd); | ||
3036 | return ret; | ||
3037 | |||
3038 | out_unsupported_cdb: | ||
3039 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
3040 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | ||
3041 | return -EINVAL; | ||
3042 | out_invalid_cdb_field: | ||
3043 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
3044 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | ||
3045 | return -EINVAL; | ||
3046 | } | 1900 | } |
3047 | 1901 | ||
3048 | /* | 1902 | /* |
@@ -3052,8 +1906,6 @@ out_invalid_cdb_field: | |||
3052 | static void transport_complete_task_attr(struct se_cmd *cmd) | 1906 | static void transport_complete_task_attr(struct se_cmd *cmd) |
3053 | { | 1907 | { |
3054 | struct se_device *dev = cmd->se_dev; | 1908 | struct se_device *dev = cmd->se_dev; |
3055 | struct se_cmd *cmd_p, *cmd_tmp; | ||
3056 | int new_active_tasks = 0; | ||
3057 | 1909 | ||
3058 | if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { | 1910 | if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { |
3059 | atomic_dec(&dev->simple_cmds); | 1911 | atomic_dec(&dev->simple_cmds); |
@@ -3075,38 +1927,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd) | |||
3075 | pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" | 1927 | pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" |
3076 | " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); | 1928 | " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); |
3077 | } | 1929 | } |
3078 | /* | ||
3079 | * Process all commands up to the last received | ||
3080 | * ORDERED task attribute which requires another blocking | ||
3081 | * boundary | ||
3082 | */ | ||
3083 | spin_lock(&dev->delayed_cmd_lock); | ||
3084 | list_for_each_entry_safe(cmd_p, cmd_tmp, | ||
3085 | &dev->delayed_cmd_list, se_delayed_node) { | ||
3086 | |||
3087 | list_del(&cmd_p->se_delayed_node); | ||
3088 | spin_unlock(&dev->delayed_cmd_lock); | ||
3089 | |||
3090 | pr_debug("Calling add_tasks() for" | ||
3091 | " cmd_p: 0x%02x Task Attr: 0x%02x" | ||
3092 | " Dormant -> Active, se_ordered_id: %u\n", | ||
3093 | cmd_p->t_task_cdb[0], | ||
3094 | cmd_p->sam_task_attr, cmd_p->se_ordered_id); | ||
3095 | 1930 | ||
3096 | target_add_to_execute_list(cmd_p); | 1931 | target_restart_delayed_cmds(dev); |
3097 | new_active_tasks++; | ||
3098 | |||
3099 | spin_lock(&dev->delayed_cmd_lock); | ||
3100 | if (cmd_p->sam_task_attr == MSG_ORDERED_TAG) | ||
3101 | break; | ||
3102 | } | ||
3103 | spin_unlock(&dev->delayed_cmd_lock); | ||
3104 | /* | ||
3105 | * If new tasks have become active, wake up the transport thread | ||
3106 | * to do the processing of the Active tasks. | ||
3107 | */ | ||
3108 | if (new_active_tasks != 0) | ||
3109 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); | ||
3110 | } | 1932 | } |
3111 | 1933 | ||
3112 | static void transport_complete_qf(struct se_cmd *cmd) | 1934 | static void transport_complete_qf(struct se_cmd *cmd) |
@@ -3365,31 +2187,27 @@ int transport_generic_map_mem_to_cmd( | |||
3365 | if (!sgl || !sgl_count) | 2187 | if (!sgl || !sgl_count) |
3366 | return 0; | 2188 | return 0; |
3367 | 2189 | ||
3368 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || | 2190 | /* |
3369 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { | 2191 | * Reject SCSI data overflow with map_mem_to_cmd() as incoming |
3370 | /* | 2192 | * scatterlists already have been set to follow what the fabric |
3371 | * Reject SCSI data overflow with map_mem_to_cmd() as incoming | 2193 | * passes for the original expected data transfer length. |
3372 | * scatterlists already have been set to follow what the fabric | 2194 | */ |
3373 | * passes for the original expected data transfer length. | 2195 | if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { |
3374 | */ | 2196 | pr_warn("Rejecting SCSI DATA overflow for fabric using" |
3375 | if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { | 2197 | " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); |
3376 | pr_warn("Rejecting SCSI DATA overflow for fabric using" | 2198 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3377 | " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); | 2199 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; |
3378 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 2200 | return -EINVAL; |
3379 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | 2201 | } |
3380 | return -EINVAL; | ||
3381 | } | ||
3382 | 2202 | ||
3383 | cmd->t_data_sg = sgl; | 2203 | cmd->t_data_sg = sgl; |
3384 | cmd->t_data_nents = sgl_count; | 2204 | cmd->t_data_nents = sgl_count; |
3385 | 2205 | ||
3386 | if (sgl_bidi && sgl_bidi_count) { | 2206 | if (sgl_bidi && sgl_bidi_count) { |
3387 | cmd->t_bidi_data_sg = sgl_bidi; | 2207 | cmd->t_bidi_data_sg = sgl_bidi; |
3388 | cmd->t_bidi_data_nents = sgl_bidi_count; | 2208 | cmd->t_bidi_data_nents = sgl_bidi_count; |
3389 | } | ||
3390 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; | ||
3391 | } | 2209 | } |
3392 | 2210 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; | |
3393 | return 0; | 2211 | return 0; |
3394 | } | 2212 | } |
3395 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); | 2213 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); |
@@ -3461,7 +2279,7 @@ transport_generic_get_mem(struct se_cmd *cmd) | |||
3461 | cmd->t_data_nents = nents; | 2279 | cmd->t_data_nents = nents; |
3462 | sg_init_table(cmd->t_data_sg, nents); | 2280 | sg_init_table(cmd->t_data_sg, nents); |
3463 | 2281 | ||
3464 | zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO; | 2282 | zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_CDB ? 0 : __GFP_ZERO; |
3465 | 2283 | ||
3466 | while (length) { | 2284 | while (length) { |
3467 | u32 page_len = min_t(u32, length, PAGE_SIZE); | 2285 | u32 page_len = min_t(u32, length, PAGE_SIZE); |
@@ -3492,7 +2310,6 @@ out: | |||
3492 | */ | 2310 | */ |
3493 | int transport_generic_new_cmd(struct se_cmd *cmd) | 2311 | int transport_generic_new_cmd(struct se_cmd *cmd) |
3494 | { | 2312 | { |
3495 | struct se_device *dev = cmd->se_dev; | ||
3496 | int ret = 0; | 2313 | int ret = 0; |
3497 | 2314 | ||
3498 | /* | 2315 | /* |
@@ -3508,8 +2325,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd) | |||
3508 | } | 2325 | } |
3509 | 2326 | ||
3510 | /* Workaround for handling zero-length control CDBs */ | 2327 | /* Workaround for handling zero-length control CDBs */ |
3511 | if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && | 2328 | if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->data_length) { |
3512 | !cmd->data_length) { | ||
3513 | spin_lock_irq(&cmd->t_state_lock); | 2329 | spin_lock_irq(&cmd->t_state_lock); |
3514 | cmd->t_state = TRANSPORT_COMPLETE; | 2330 | cmd->t_state = TRANSPORT_COMPLETE; |
3515 | cmd->transport_state |= CMD_T_ACTIVE; | 2331 | cmd->transport_state |= CMD_T_ACTIVE; |
@@ -3527,52 +2343,45 @@ int transport_generic_new_cmd(struct se_cmd *cmd) | |||
3527 | return 0; | 2343 | return 0; |
3528 | } | 2344 | } |
3529 | 2345 | ||
3530 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { | ||
3531 | struct se_dev_attrib *attr = &dev->se_sub_dev->se_dev_attrib; | ||
3532 | |||
3533 | if (transport_cmd_get_valid_sectors(cmd) < 0) | ||
3534 | return -EINVAL; | ||
3535 | |||
3536 | BUG_ON(cmd->data_length % attr->block_size); | ||
3537 | BUG_ON(DIV_ROUND_UP(cmd->data_length, attr->block_size) > | ||
3538 | attr->hw_max_sectors); | ||
3539 | } | ||
3540 | |||
3541 | atomic_inc(&cmd->t_fe_count); | 2346 | atomic_inc(&cmd->t_fe_count); |
3542 | 2347 | ||
3543 | /* | 2348 | /* |
3544 | * For WRITEs, let the fabric know its buffer is ready. | 2349 | * If this command is not a write we can execute it right here, |
3545 | * | 2350 | * for write buffers we need to notify the fabric driver first |
3546 | * The command will be added to the execution queue after its write | 2351 | * and let it call back once the write buffers are ready. |
3547 | * data has arrived. | ||
3548 | */ | 2352 | */ |
3549 | if (cmd->data_direction == DMA_TO_DEVICE) { | 2353 | target_add_to_state_list(cmd); |
3550 | target_add_to_state_list(cmd); | 2354 | if (cmd->data_direction != DMA_TO_DEVICE) { |
3551 | return transport_generic_write_pending(cmd); | 2355 | target_execute_cmd(cmd); |
2356 | return 0; | ||
3552 | } | 2357 | } |
3553 | /* | 2358 | |
3554 | * Everything else but a WRITE, add the command to the execution queue. | 2359 | spin_lock_irq(&cmd->t_state_lock); |
3555 | */ | 2360 | cmd->t_state = TRANSPORT_WRITE_PENDING; |
3556 | transport_execute_tasks(cmd); | 2361 | spin_unlock_irq(&cmd->t_state_lock); |
3557 | return 0; | 2362 | |
2363 | transport_cmd_check_stop(cmd, false); | ||
2364 | |||
2365 | ret = cmd->se_tfo->write_pending(cmd); | ||
2366 | if (ret == -EAGAIN || ret == -ENOMEM) | ||
2367 | goto queue_full; | ||
2368 | |||
2369 | if (ret < 0) | ||
2370 | return ret; | ||
2371 | return 1; | ||
3558 | 2372 | ||
3559 | out_fail: | 2373 | out_fail: |
3560 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 2374 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3561 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 2375 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
3562 | return -EINVAL; | 2376 | return -EINVAL; |
2377 | queue_full: | ||
2378 | pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); | ||
2379 | cmd->t_state = TRANSPORT_COMPLETE_QF_WP; | ||
2380 | transport_handle_queue_full(cmd, cmd->se_dev); | ||
2381 | return 0; | ||
3563 | } | 2382 | } |
3564 | EXPORT_SYMBOL(transport_generic_new_cmd); | 2383 | EXPORT_SYMBOL(transport_generic_new_cmd); |
3565 | 2384 | ||
3566 | /* transport_generic_process_write(): | ||
3567 | * | ||
3568 | * | ||
3569 | */ | ||
3570 | void transport_generic_process_write(struct se_cmd *cmd) | ||
3571 | { | ||
3572 | transport_execute_tasks(cmd); | ||
3573 | } | ||
3574 | EXPORT_SYMBOL(transport_generic_process_write); | ||
3575 | |||
3576 | static void transport_write_pending_qf(struct se_cmd *cmd) | 2385 | static void transport_write_pending_qf(struct se_cmd *cmd) |
3577 | { | 2386 | { |
3578 | int ret; | 2387 | int ret; |
@@ -3585,43 +2394,6 @@ static void transport_write_pending_qf(struct se_cmd *cmd) | |||
3585 | } | 2394 | } |
3586 | } | 2395 | } |
3587 | 2396 | ||
3588 | static int transport_generic_write_pending(struct se_cmd *cmd) | ||
3589 | { | ||
3590 | unsigned long flags; | ||
3591 | int ret; | ||
3592 | |||
3593 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
3594 | cmd->t_state = TRANSPORT_WRITE_PENDING; | ||
3595 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
3596 | |||
3597 | /* | ||
3598 | * Clear the se_cmd for WRITE_PENDING status in order to set | ||
3599 | * CMD_T_ACTIVE so that transport_generic_handle_data can be called | ||
3600 | * from HW target mode interrupt code. This is safe to be called | ||
3601 | * with transport_off=1 before the cmd->se_tfo->write_pending | ||
3602 | * because the se_cmd->se_lun pointer is not being cleared. | ||
3603 | */ | ||
3604 | transport_cmd_check_stop(cmd, 1, 0); | ||
3605 | |||
3606 | /* | ||
3607 | * Call the fabric write_pending function here to let the | ||
3608 | * frontend know that WRITE buffers are ready. | ||
3609 | */ | ||
3610 | ret = cmd->se_tfo->write_pending(cmd); | ||
3611 | if (ret == -EAGAIN || ret == -ENOMEM) | ||
3612 | goto queue_full; | ||
3613 | else if (ret < 0) | ||
3614 | return ret; | ||
3615 | |||
3616 | return 1; | ||
3617 | |||
3618 | queue_full: | ||
3619 | pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); | ||
3620 | cmd->t_state = TRANSPORT_COMPLETE_QF_WP; | ||
3621 | transport_handle_queue_full(cmd, cmd->se_dev); | ||
3622 | return 0; | ||
3623 | } | ||
3624 | |||
3625 | void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) | 2397 | void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) |
3626 | { | 2398 | { |
3627 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { | 2399 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { |
@@ -3648,10 +2420,11 @@ EXPORT_SYMBOL(transport_generic_free_cmd); | |||
3648 | * @se_cmd: command descriptor to add | 2420 | * @se_cmd: command descriptor to add |
3649 | * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() | 2421 | * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() |
3650 | */ | 2422 | */ |
3651 | void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, | 2423 | static int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, |
3652 | bool ack_kref) | 2424 | bool ack_kref) |
3653 | { | 2425 | { |
3654 | unsigned long flags; | 2426 | unsigned long flags; |
2427 | int ret = 0; | ||
3655 | 2428 | ||
3656 | kref_init(&se_cmd->cmd_kref); | 2429 | kref_init(&se_cmd->cmd_kref); |
3657 | /* | 2430 | /* |
@@ -3665,11 +2438,17 @@ void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, | |||
3665 | } | 2438 | } |
3666 | 2439 | ||
3667 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | 2440 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
2441 | if (se_sess->sess_tearing_down) { | ||
2442 | ret = -ESHUTDOWN; | ||
2443 | goto out; | ||
2444 | } | ||
3668 | list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); | 2445 | list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); |
3669 | se_cmd->check_release = 1; | 2446 | se_cmd->check_release = 1; |
2447 | |||
2448 | out: | ||
3670 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2449 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
2450 | return ret; | ||
3671 | } | 2451 | } |
3672 | EXPORT_SYMBOL(target_get_sess_cmd); | ||
3673 | 2452 | ||
3674 | static void target_release_cmd_kref(struct kref *kref) | 2453 | static void target_release_cmd_kref(struct kref *kref) |
3675 | { | 2454 | { |
@@ -3704,28 +2483,27 @@ int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) | |||
3704 | } | 2483 | } |
3705 | EXPORT_SYMBOL(target_put_sess_cmd); | 2484 | EXPORT_SYMBOL(target_put_sess_cmd); |
3706 | 2485 | ||
3707 | /* target_splice_sess_cmd_list - Split active cmds into sess_wait_list | 2486 | /* target_sess_cmd_list_set_waiting - Flag all commands in |
3708 | * @se_sess: session to split | 2487 | * sess_cmd_list to complete cmd_wait_comp. Set |
2488 | * sess_tearing_down so no more commands are queued. | ||
2489 | * @se_sess: session to flag | ||
3709 | */ | 2490 | */ |
3710 | void target_splice_sess_cmd_list(struct se_session *se_sess) | 2491 | void target_sess_cmd_list_set_waiting(struct se_session *se_sess) |
3711 | { | 2492 | { |
3712 | struct se_cmd *se_cmd; | 2493 | struct se_cmd *se_cmd; |
3713 | unsigned long flags; | 2494 | unsigned long flags; |
3714 | 2495 | ||
3715 | WARN_ON(!list_empty(&se_sess->sess_wait_list)); | ||
3716 | INIT_LIST_HEAD(&se_sess->sess_wait_list); | ||
3717 | |||
3718 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | 2496 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
3719 | se_sess->sess_tearing_down = 1; | ||
3720 | 2497 | ||
3721 | list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); | 2498 | WARN_ON(se_sess->sess_tearing_down); |
2499 | se_sess->sess_tearing_down = 1; | ||
3722 | 2500 | ||
3723 | list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) | 2501 | list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) |
3724 | se_cmd->cmd_wait_set = 1; | 2502 | se_cmd->cmd_wait_set = 1; |
3725 | 2503 | ||
3726 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2504 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
3727 | } | 2505 | } |
3728 | EXPORT_SYMBOL(target_splice_sess_cmd_list); | 2506 | EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); |
3729 | 2507 | ||
3730 | /* target_wait_for_sess_cmds - Wait for outstanding descriptors | 2508 | /* target_wait_for_sess_cmds - Wait for outstanding descriptors |
3731 | * @se_sess: session to wait for active I/O | 2509 | * @se_sess: session to wait for active I/O |
@@ -3739,7 +2517,7 @@ void target_wait_for_sess_cmds( | |||
3739 | bool rc = false; | 2517 | bool rc = false; |
3740 | 2518 | ||
3741 | list_for_each_entry_safe(se_cmd, tmp_cmd, | 2519 | list_for_each_entry_safe(se_cmd, tmp_cmd, |
3742 | &se_sess->sess_wait_list, se_cmd_list) { | 2520 | &se_sess->sess_cmd_list, se_cmd_list) { |
3743 | list_del(&se_cmd->se_cmd_list); | 2521 | list_del(&se_cmd->se_cmd_list); |
3744 | 2522 | ||
3745 | pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" | 2523 | pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" |
@@ -3791,26 +2569,20 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | |||
3791 | pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n", | 2569 | pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n", |
3792 | cmd->se_tfo->get_task_tag(cmd)); | 2570 | cmd->se_tfo->get_task_tag(cmd)); |
3793 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2571 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
3794 | transport_cmd_check_stop(cmd, 1, 0); | 2572 | transport_cmd_check_stop(cmd, false); |
3795 | return -EPERM; | 2573 | return -EPERM; |
3796 | } | 2574 | } |
3797 | cmd->transport_state |= CMD_T_LUN_FE_STOP; | 2575 | cmd->transport_state |= CMD_T_LUN_FE_STOP; |
3798 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2576 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
3799 | 2577 | ||
3800 | wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); | ||
3801 | |||
3802 | // XXX: audit task_flags checks. | 2578 | // XXX: audit task_flags checks. |
3803 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2579 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
3804 | if ((cmd->transport_state & CMD_T_BUSY) && | 2580 | if ((cmd->transport_state & CMD_T_BUSY) && |
3805 | (cmd->transport_state & CMD_T_SENT)) { | 2581 | (cmd->transport_state & CMD_T_SENT)) { |
3806 | if (!target_stop_cmd(cmd, &flags)) | 2582 | if (!target_stop_cmd(cmd, &flags)) |
3807 | ret++; | 2583 | ret++; |
3808 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
3809 | } else { | ||
3810 | spin_unlock_irqrestore(&cmd->t_state_lock, | ||
3811 | flags); | ||
3812 | target_remove_from_execute_list(cmd); | ||
3813 | } | 2584 | } |
2585 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
3814 | 2586 | ||
3815 | pr_debug("ConfigFS: cmd: %p stop tasks ret:" | 2587 | pr_debug("ConfigFS: cmd: %p stop tasks ret:" |
3816 | " %d\n", cmd, ret); | 2588 | " %d\n", cmd, ret); |
@@ -3821,7 +2593,6 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | |||
3821 | pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", | 2593 | pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", |
3822 | cmd->se_tfo->get_task_tag(cmd)); | 2594 | cmd->se_tfo->get_task_tag(cmd)); |
3823 | } | 2595 | } |
3824 | transport_remove_cmd_from_queue(cmd); | ||
3825 | 2596 | ||
3826 | return 0; | 2597 | return 0; |
3827 | } | 2598 | } |
@@ -3840,11 +2611,6 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) | |||
3840 | struct se_cmd, se_lun_node); | 2611 | struct se_cmd, se_lun_node); |
3841 | list_del_init(&cmd->se_lun_node); | 2612 | list_del_init(&cmd->se_lun_node); |
3842 | 2613 | ||
3843 | /* | ||
3844 | * This will notify iscsi_target_transport.c: | ||
3845 | * transport_cmd_check_stop() that a LUN shutdown is in | ||
3846 | * progress for the iscsi_cmd_t. | ||
3847 | */ | ||
3848 | spin_lock(&cmd->t_state_lock); | 2614 | spin_lock(&cmd->t_state_lock); |
3849 | pr_debug("SE_LUN[%d] - Setting cmd->transport" | 2615 | pr_debug("SE_LUN[%d] - Setting cmd->transport" |
3850 | "_lun_stop for ITT: 0x%08x\n", | 2616 | "_lun_stop for ITT: 0x%08x\n", |
@@ -3911,7 +2677,7 @@ check_cond: | |||
3911 | 2677 | ||
3912 | spin_unlock_irqrestore(&cmd->t_state_lock, | 2678 | spin_unlock_irqrestore(&cmd->t_state_lock, |
3913 | cmd_flags); | 2679 | cmd_flags); |
3914 | transport_cmd_check_stop(cmd, 1, 0); | 2680 | transport_cmd_check_stop(cmd, false); |
3915 | complete(&cmd->transport_lun_fe_stop_comp); | 2681 | complete(&cmd->transport_lun_fe_stop_comp); |
3916 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | 2682 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
3917 | continue; | 2683 | continue; |
@@ -3967,10 +2733,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) | |||
3967 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2733 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
3968 | return false; | 2734 | return false; |
3969 | } | 2735 | } |
3970 | /* | 2736 | |
3971 | * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE | ||
3972 | * has been set in transport_set_supported_SAM_opcode(). | ||
3973 | */ | ||
3974 | if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && | 2737 | if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && |
3975 | !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { | 2738 | !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { |
3976 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2739 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
@@ -4028,8 +2791,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) | |||
4028 | 2791 | ||
4029 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2792 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4030 | 2793 | ||
4031 | wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); | ||
4032 | |||
4033 | wait_for_completion(&cmd->t_transport_stop_comp); | 2794 | wait_for_completion(&cmd->t_transport_stop_comp); |
4034 | 2795 | ||
4035 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2796 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
@@ -4212,6 +2973,15 @@ int transport_send_check_condition_and_sense( | |||
4212 | /* WRITE PROTECTED */ | 2973 | /* WRITE PROTECTED */ |
4213 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; | 2974 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; |
4214 | break; | 2975 | break; |
2976 | case TCM_ADDRESS_OUT_OF_RANGE: | ||
2977 | /* CURRENT ERROR */ | ||
2978 | buffer[offset] = 0x70; | ||
2979 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; | ||
2980 | /* ILLEGAL REQUEST */ | ||
2981 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | ||
2982 | /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ | ||
2983 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x21; | ||
2984 | break; | ||
4215 | case TCM_CHECK_CONDITION_UNIT_ATTENTION: | 2985 | case TCM_CHECK_CONDITION_UNIT_ATTENTION: |
4216 | /* CURRENT ERROR */ | 2986 | /* CURRENT ERROR */ |
4217 | buffer[offset] = 0x70; | 2987 | buffer[offset] = 0x70; |
@@ -4312,8 +3082,9 @@ void transport_send_task_abort(struct se_cmd *cmd) | |||
4312 | cmd->se_tfo->queue_status(cmd); | 3082 | cmd->se_tfo->queue_status(cmd); |
4313 | } | 3083 | } |
4314 | 3084 | ||
4315 | static int transport_generic_do_tmr(struct se_cmd *cmd) | 3085 | static void target_tmr_work(struct work_struct *work) |
4316 | { | 3086 | { |
3087 | struct se_cmd *cmd = container_of(work, struct se_cmd, work); | ||
4317 | struct se_device *dev = cmd->se_dev; | 3088 | struct se_device *dev = cmd->se_dev; |
4318 | struct se_tmr_req *tmr = cmd->se_tmr_req; | 3089 | struct se_tmr_req *tmr = cmd->se_tmr_req; |
4319 | int ret; | 3090 | int ret; |
@@ -4349,80 +3120,13 @@ static int transport_generic_do_tmr(struct se_cmd *cmd) | |||
4349 | cmd->se_tfo->queue_tm_rsp(cmd); | 3120 | cmd->se_tfo->queue_tm_rsp(cmd); |
4350 | 3121 | ||
4351 | transport_cmd_check_stop_to_fabric(cmd); | 3122 | transport_cmd_check_stop_to_fabric(cmd); |
4352 | return 0; | ||
4353 | } | 3123 | } |
4354 | 3124 | ||
4355 | /* transport_processing_thread(): | 3125 | int transport_generic_handle_tmr( |
4356 | * | 3126 | struct se_cmd *cmd) |
4357 | * | ||
4358 | */ | ||
4359 | static int transport_processing_thread(void *param) | ||
4360 | { | 3127 | { |
4361 | int ret; | 3128 | INIT_WORK(&cmd->work, target_tmr_work); |
4362 | struct se_cmd *cmd; | 3129 | queue_work(cmd->se_dev->tmr_wq, &cmd->work); |
4363 | struct se_device *dev = param; | ||
4364 | |||
4365 | while (!kthread_should_stop()) { | ||
4366 | ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, | ||
4367 | atomic_read(&dev->dev_queue_obj.queue_cnt) || | ||
4368 | kthread_should_stop()); | ||
4369 | if (ret < 0) | ||
4370 | goto out; | ||
4371 | |||
4372 | get_cmd: | ||
4373 | cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); | ||
4374 | if (!cmd) | ||
4375 | continue; | ||
4376 | |||
4377 | switch (cmd->t_state) { | ||
4378 | case TRANSPORT_NEW_CMD: | ||
4379 | BUG(); | ||
4380 | break; | ||
4381 | case TRANSPORT_NEW_CMD_MAP: | ||
4382 | if (!cmd->se_tfo->new_cmd_map) { | ||
4383 | pr_err("cmd->se_tfo->new_cmd_map is" | ||
4384 | " NULL for TRANSPORT_NEW_CMD_MAP\n"); | ||
4385 | BUG(); | ||
4386 | } | ||
4387 | ret = cmd->se_tfo->new_cmd_map(cmd); | ||
4388 | if (ret < 0) { | ||
4389 | transport_generic_request_failure(cmd); | ||
4390 | break; | ||
4391 | } | ||
4392 | ret = transport_generic_new_cmd(cmd); | ||
4393 | if (ret < 0) { | ||
4394 | transport_generic_request_failure(cmd); | ||
4395 | break; | ||
4396 | } | ||
4397 | break; | ||
4398 | case TRANSPORT_PROCESS_WRITE: | ||
4399 | transport_generic_process_write(cmd); | ||
4400 | break; | ||
4401 | case TRANSPORT_PROCESS_TMR: | ||
4402 | transport_generic_do_tmr(cmd); | ||
4403 | break; | ||
4404 | case TRANSPORT_COMPLETE_QF_WP: | ||
4405 | transport_write_pending_qf(cmd); | ||
4406 | break; | ||
4407 | case TRANSPORT_COMPLETE_QF_OK: | ||
4408 | transport_complete_qf(cmd); | ||
4409 | break; | ||
4410 | default: | ||
4411 | pr_err("Unknown t_state: %d for ITT: 0x%08x " | ||
4412 | "i_state: %d on SE LUN: %u\n", | ||
4413 | cmd->t_state, | ||
4414 | cmd->se_tfo->get_task_tag(cmd), | ||
4415 | cmd->se_tfo->get_cmd_state(cmd), | ||
4416 | cmd->se_lun->unpacked_lun); | ||
4417 | BUG(); | ||
4418 | } | ||
4419 | |||
4420 | goto get_cmd; | ||
4421 | } | ||
4422 | |||
4423 | out: | ||
4424 | WARN_ON(!list_empty(&dev->state_list)); | ||
4425 | WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list)); | ||
4426 | dev->process_thread = NULL; | ||
4427 | return 0; | 3130 | return 0; |
4428 | } | 3131 | } |
3132 | EXPORT_SYMBOL(transport_generic_handle_tmr); | ||
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 5b65f33939a8..b9cb5006177e 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c | |||
@@ -215,7 +215,7 @@ int ft_write_pending(struct se_cmd *se_cmd) | |||
215 | */ | 215 | */ |
216 | if ((ep->xid <= lport->lro_xid) && | 216 | if ((ep->xid <= lport->lro_xid) && |
217 | (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { | 217 | (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { |
218 | if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) && | 218 | if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && |
219 | lport->tt.ddp_target(lport, ep->xid, | 219 | lport->tt.ddp_target(lport, ep->xid, |
220 | se_cmd->t_data_sg, | 220 | se_cmd->t_data_sg, |
221 | se_cmd->t_data_nents)) | 221 | se_cmd->t_data_nents)) |
@@ -543,9 +543,11 @@ static void ft_send_work(struct work_struct *work) | |||
543 | * Use a single se_cmd->cmd_kref as we expect to release se_cmd | 543 | * Use a single se_cmd->cmd_kref as we expect to release se_cmd |
544 | * directly from ft_check_stop_free callback in response path. | 544 | * directly from ft_check_stop_free callback in response path. |
545 | */ | 545 | */ |
546 | target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, | 546 | if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, |
547 | &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), | 547 | &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), |
548 | ntohl(fcp->fc_dl), task_attr, data_dir, 0); | 548 | ntohl(fcp->fc_dl), task_attr, data_dir, 0)) |
549 | goto err; | ||
550 | |||
549 | pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl); | 551 | pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl); |
550 | return; | 552 | return; |
551 | 553 | ||
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 071a505f98fc..ad36ede1a1ea 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c | |||
@@ -183,6 +183,13 @@ int ft_queue_data_in(struct se_cmd *se_cmd) | |||
183 | return ft_queue_status(se_cmd); | 183 | return ft_queue_status(se_cmd); |
184 | } | 184 | } |
185 | 185 | ||
186 | static void ft_execute_work(struct work_struct *work) | ||
187 | { | ||
188 | struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); | ||
189 | |||
190 | target_execute_cmd(&cmd->se_cmd); | ||
191 | } | ||
192 | |||
186 | /* | 193 | /* |
187 | * Receive write data frame. | 194 | * Receive write data frame. |
188 | */ | 195 | */ |
@@ -307,8 +314,10 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) | |||
307 | cmd->write_data_len += tlen; | 314 | cmd->write_data_len += tlen; |
308 | } | 315 | } |
309 | last_frame: | 316 | last_frame: |
310 | if (cmd->write_data_len == se_cmd->data_length) | 317 | if (cmd->write_data_len == se_cmd->data_length) { |
311 | transport_generic_handle_data(se_cmd); | 318 | INIT_WORK(&cmd->work, ft_execute_work); |
319 | queue_work(cmd->sess->tport->tpg->workqueue, &cmd->work); | ||
320 | } | ||
312 | drop: | 321 | drop: |
313 | fc_frame_free(fp); | 322 | fc_frame_free(fp); |
314 | } | 323 | } |
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c index c46439c8dd74..5444866e13ef 100644 --- a/drivers/usb/gadget/tcm_usb_gadget.c +++ b/drivers/usb/gadget/tcm_usb_gadget.c | |||
@@ -294,7 +294,7 @@ static int bot_send_write_request(struct usbg_cmd *cmd) | |||
294 | pr_err("%s(%d)\n", __func__, __LINE__); | 294 | pr_err("%s(%d)\n", __func__, __LINE__); |
295 | 295 | ||
296 | wait_for_completion(&cmd->write_complete); | 296 | wait_for_completion(&cmd->write_complete); |
297 | transport_generic_process_write(se_cmd); | 297 | target_execute_cmd(se_cmd); |
298 | cleanup: | 298 | cleanup: |
299 | return ret; | 299 | return ret; |
300 | } | 300 | } |
@@ -725,7 +725,7 @@ static int uasp_send_write_request(struct usbg_cmd *cmd) | |||
725 | } | 725 | } |
726 | 726 | ||
727 | wait_for_completion(&cmd->write_complete); | 727 | wait_for_completion(&cmd->write_complete); |
728 | transport_generic_process_write(se_cmd); | 728 | target_execute_cmd(se_cmd); |
729 | cleanup: | 729 | cleanup: |
730 | return ret; | 730 | return ret; |
731 | } | 731 | } |
@@ -1065,16 +1065,20 @@ static void usbg_cmd_work(struct work_struct *work) | |||
1065 | tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo, | 1065 | tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo, |
1066 | tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE, | 1066 | tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE, |
1067 | cmd->prio_attr, cmd->sense_iu.sense); | 1067 | cmd->prio_attr, cmd->sense_iu.sense); |
1068 | 1068 | goto out; | |
1069 | transport_send_check_condition_and_sense(se_cmd, | ||
1070 | TCM_UNSUPPORTED_SCSI_OPCODE, 1); | ||
1071 | usbg_cleanup_cmd(cmd); | ||
1072 | return; | ||
1073 | } | 1069 | } |
1074 | 1070 | ||
1075 | target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, | 1071 | if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, |
1076 | cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun, | 1072 | cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun, |
1077 | 0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE); | 1073 | 0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE) < 0) |
1074 | goto out; | ||
1075 | |||
1076 | return; | ||
1077 | |||
1078 | out: | ||
1079 | transport_send_check_condition_and_sense(se_cmd, | ||
1080 | TCM_UNSUPPORTED_SCSI_OPCODE, 1); | ||
1081 | usbg_cleanup_cmd(cmd); | ||
1078 | } | 1082 | } |
1079 | 1083 | ||
1080 | static int usbg_submit_command(struct f_uas *fu, | 1084 | static int usbg_submit_command(struct f_uas *fu, |
@@ -1177,16 +1181,20 @@ static void bot_cmd_work(struct work_struct *work) | |||
1177 | tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo, | 1181 | tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo, |
1178 | tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE, | 1182 | tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE, |
1179 | cmd->prio_attr, cmd->sense_iu.sense); | 1183 | cmd->prio_attr, cmd->sense_iu.sense); |
1180 | 1184 | goto out; | |
1181 | transport_send_check_condition_and_sense(se_cmd, | ||
1182 | TCM_UNSUPPORTED_SCSI_OPCODE, 1); | ||
1183 | usbg_cleanup_cmd(cmd); | ||
1184 | return; | ||
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, | 1187 | if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, |
1188 | cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun, | 1188 | cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun, |
1189 | cmd->data_len, cmd->prio_attr, dir, 0); | 1189 | cmd->data_len, cmd->prio_attr, dir, 0) < 0) |
1190 | goto out; | ||
1191 | |||
1192 | return; | ||
1193 | |||
1194 | out: | ||
1195 | transport_send_check_condition_and_sense(se_cmd, | ||
1196 | TCM_UNSUPPORTED_SCSI_OPCODE, 1); | ||
1197 | usbg_cleanup_cmd(cmd); | ||
1190 | } | 1198 | } |
1191 | 1199 | ||
1192 | static int bot_submit_command(struct f_uas *fu, | 1200 | static int bot_submit_command(struct f_uas *fu, |
@@ -1400,19 +1408,6 @@ static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg) | |||
1400 | return 1; | 1408 | return 1; |
1401 | } | 1409 | } |
1402 | 1410 | ||
1403 | static int usbg_new_cmd(struct se_cmd *se_cmd) | ||
1404 | { | ||
1405 | struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, | ||
1406 | se_cmd); | ||
1407 | int ret; | ||
1408 | |||
1409 | ret = target_setup_cmd_from_cdb(se_cmd, cmd->cmd_buf); | ||
1410 | if (ret) | ||
1411 | return ret; | ||
1412 | |||
1413 | return transport_generic_map_mem_to_cmd(se_cmd, NULL, 0, NULL, 0); | ||
1414 | } | ||
1415 | |||
1416 | static void usbg_cmd_release(struct kref *ref) | 1411 | static void usbg_cmd_release(struct kref *ref) |
1417 | { | 1412 | { |
1418 | struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd, | 1413 | struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd, |
@@ -1902,7 +1897,6 @@ static struct target_core_fabric_ops usbg_ops = { | |||
1902 | .tpg_alloc_fabric_acl = usbg_alloc_fabric_acl, | 1897 | .tpg_alloc_fabric_acl = usbg_alloc_fabric_acl, |
1903 | .tpg_release_fabric_acl = usbg_release_fabric_acl, | 1898 | .tpg_release_fabric_acl = usbg_release_fabric_acl, |
1904 | .tpg_get_inst_index = usbg_tpg_get_inst_index, | 1899 | .tpg_get_inst_index = usbg_tpg_get_inst_index, |
1905 | .new_cmd_map = usbg_new_cmd, | ||
1906 | .release_cmd = usbg_release_cmd, | 1900 | .release_cmd = usbg_release_cmd, |
1907 | .shutdown_session = usbg_shutdown_session, | 1901 | .shutdown_session = usbg_shutdown_session, |
1908 | .close_session = usbg_close_session, | 1902 | .close_session = usbg_close_session, |
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 2d7db85e93ae..f1405d335a96 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h | |||
@@ -24,10 +24,8 @@ struct se_subsystem_api { | |||
24 | struct se_subsystem_dev *, void *); | 24 | struct se_subsystem_dev *, void *); |
25 | void (*free_device)(void *); | 25 | void (*free_device)(void *); |
26 | int (*transport_complete)(struct se_cmd *cmd, struct scatterlist *); | 26 | int (*transport_complete)(struct se_cmd *cmd, struct scatterlist *); |
27 | int (*execute_cmd)(struct se_cmd *, struct scatterlist *, u32, | 27 | |
28 | enum dma_data_direction); | 28 | int (*parse_cdb)(struct se_cmd *cmd); |
29 | int (*do_discard)(struct se_device *, sector_t, u32); | ||
30 | void (*do_sync_cache)(struct se_cmd *); | ||
31 | ssize_t (*check_configfs_dev_params)(struct se_hba *, | 29 | ssize_t (*check_configfs_dev_params)(struct se_hba *, |
32 | struct se_subsystem_dev *); | 30 | struct se_subsystem_dev *); |
33 | ssize_t (*set_configfs_dev_params)(struct se_hba *, | 31 | ssize_t (*set_configfs_dev_params)(struct se_hba *, |
@@ -40,6 +38,13 @@ struct se_subsystem_api { | |||
40 | unsigned char *(*get_sense_buffer)(struct se_cmd *); | 38 | unsigned char *(*get_sense_buffer)(struct se_cmd *); |
41 | }; | 39 | }; |
42 | 40 | ||
41 | struct spc_ops { | ||
42 | int (*execute_rw)(struct se_cmd *cmd); | ||
43 | int (*execute_sync_cache)(struct se_cmd *cmd); | ||
44 | int (*execute_write_same)(struct se_cmd *cmd); | ||
45 | int (*execute_unmap)(struct se_cmd *cmd); | ||
46 | }; | ||
47 | |||
43 | int transport_subsystem_register(struct se_subsystem_api *); | 48 | int transport_subsystem_register(struct se_subsystem_api *); |
44 | void transport_subsystem_release(struct se_subsystem_api *); | 49 | void transport_subsystem_release(struct se_subsystem_api *); |
45 | 50 | ||
@@ -49,6 +54,10 @@ struct se_device *transport_add_device_to_core_hba(struct se_hba *, | |||
49 | 54 | ||
50 | void target_complete_cmd(struct se_cmd *, u8); | 55 | void target_complete_cmd(struct se_cmd *, u8); |
51 | 56 | ||
57 | int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops); | ||
58 | int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size); | ||
59 | int spc_get_write_same_sectors(struct se_cmd *cmd); | ||
60 | |||
52 | void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); | 61 | void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); |
53 | int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); | 62 | int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); |
54 | int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *); | 63 | int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *); |
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index dc35d8660aa6..128ce46fa48a 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
@@ -145,12 +145,9 @@ enum transport_state_table { | |||
145 | TRANSPORT_NO_STATE = 0, | 145 | TRANSPORT_NO_STATE = 0, |
146 | TRANSPORT_NEW_CMD = 1, | 146 | TRANSPORT_NEW_CMD = 1, |
147 | TRANSPORT_WRITE_PENDING = 3, | 147 | TRANSPORT_WRITE_PENDING = 3, |
148 | TRANSPORT_PROCESS_WRITE = 4, | ||
149 | TRANSPORT_PROCESSING = 5, | 148 | TRANSPORT_PROCESSING = 5, |
150 | TRANSPORT_COMPLETE = 6, | 149 | TRANSPORT_COMPLETE = 6, |
151 | TRANSPORT_PROCESS_TMR = 9, | ||
152 | TRANSPORT_ISTATE_PROCESSING = 11, | 150 | TRANSPORT_ISTATE_PROCESSING = 11, |
153 | TRANSPORT_NEW_CMD_MAP = 16, | ||
154 | TRANSPORT_COMPLETE_QF_WP = 18, | 151 | TRANSPORT_COMPLETE_QF_WP = 18, |
155 | TRANSPORT_COMPLETE_QF_OK = 19, | 152 | TRANSPORT_COMPLETE_QF_OK = 19, |
156 | }; | 153 | }; |
@@ -160,25 +157,20 @@ enum se_cmd_flags_table { | |||
160 | SCF_SUPPORTED_SAM_OPCODE = 0x00000001, | 157 | SCF_SUPPORTED_SAM_OPCODE = 0x00000001, |
161 | SCF_TRANSPORT_TASK_SENSE = 0x00000002, | 158 | SCF_TRANSPORT_TASK_SENSE = 0x00000002, |
162 | SCF_EMULATED_TASK_SENSE = 0x00000004, | 159 | SCF_EMULATED_TASK_SENSE = 0x00000004, |
163 | SCF_SCSI_DATA_SG_IO_CDB = 0x00000008, | 160 | SCF_SCSI_DATA_CDB = 0x00000008, |
164 | SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010, | 161 | SCF_SCSI_TMR_CDB = 0x00000010, |
165 | SCF_SCSI_NON_DATA_CDB = 0x00000020, | 162 | SCF_SCSI_CDB_EXCEPTION = 0x00000020, |
166 | SCF_SCSI_TMR_CDB = 0x00000040, | 163 | SCF_SCSI_RESERVATION_CONFLICT = 0x00000040, |
167 | SCF_SCSI_CDB_EXCEPTION = 0x00000080, | 164 | SCF_FUA = 0x00000080, |
168 | SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, | 165 | SCF_SE_LUN_CMD = 0x00000100, |
169 | SCF_FUA = 0x00000200, | 166 | SCF_BIDI = 0x00000400, |
170 | SCF_SE_LUN_CMD = 0x00000800, | 167 | SCF_SENT_CHECK_CONDITION = 0x00000800, |
171 | SCF_SE_ALLOW_EOO = 0x00001000, | 168 | SCF_OVERFLOW_BIT = 0x00001000, |
172 | SCF_BIDI = 0x00002000, | 169 | SCF_UNDERFLOW_BIT = 0x00002000, |
173 | SCF_SENT_CHECK_CONDITION = 0x00004000, | 170 | SCF_SENT_DELAYED_TAS = 0x00004000, |
174 | SCF_OVERFLOW_BIT = 0x00008000, | 171 | SCF_ALUA_NON_OPTIMIZED = 0x00008000, |
175 | SCF_UNDERFLOW_BIT = 0x00010000, | 172 | SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, |
176 | SCF_SENT_DELAYED_TAS = 0x00020000, | 173 | SCF_ACK_KREF = 0x00040000, |
177 | SCF_ALUA_NON_OPTIMIZED = 0x00040000, | ||
178 | SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, | ||
179 | SCF_UNUSED = 0x00100000, | ||
180 | SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00200000, | ||
181 | SCF_ACK_KREF = 0x00400000, | ||
182 | }; | 174 | }; |
183 | 175 | ||
184 | /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ | 176 | /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ |
@@ -220,6 +212,7 @@ enum tcm_sense_reason_table { | |||
220 | TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e, | 212 | TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e, |
221 | TCM_CHECK_CONDITION_NOT_READY = 0x0f, | 213 | TCM_CHECK_CONDITION_NOT_READY = 0x0f, |
222 | TCM_RESERVATION_CONFLICT = 0x10, | 214 | TCM_RESERVATION_CONFLICT = 0x10, |
215 | TCM_ADDRESS_OUT_OF_RANGE = 0x11, | ||
223 | }; | 216 | }; |
224 | 217 | ||
225 | enum target_sc_flags_table { | 218 | enum target_sc_flags_table { |
@@ -471,13 +464,6 @@ struct t10_reservation { | |||
471 | struct t10_reservation_ops pr_ops; | 464 | struct t10_reservation_ops pr_ops; |
472 | }; | 465 | }; |
473 | 466 | ||
474 | struct se_queue_obj { | ||
475 | atomic_t queue_cnt; | ||
476 | spinlock_t cmd_queue_lock; | ||
477 | struct list_head qobj_list; | ||
478 | wait_queue_head_t thread_wq; | ||
479 | }; | ||
480 | |||
481 | struct se_tmr_req { | 467 | struct se_tmr_req { |
482 | /* Task Management function to be performed */ | 468 | /* Task Management function to be performed */ |
483 | u8 function; | 469 | u8 function; |
@@ -486,11 +472,8 @@ struct se_tmr_req { | |||
486 | int call_transport; | 472 | int call_transport; |
487 | /* Reference to ITT that Task Mgmt should be performed */ | 473 | /* Reference to ITT that Task Mgmt should be performed */ |
488 | u32 ref_task_tag; | 474 | u32 ref_task_tag; |
489 | /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */ | ||
490 | u64 ref_task_lun; | ||
491 | void *fabric_tmr_ptr; | 475 | void *fabric_tmr_ptr; |
492 | struct se_cmd *task_cmd; | 476 | struct se_cmd *task_cmd; |
493 | struct se_cmd *ref_cmd; | ||
494 | struct se_device *tmr_dev; | 477 | struct se_device *tmr_dev; |
495 | struct se_lun *tmr_lun; | 478 | struct se_lun *tmr_lun; |
496 | struct list_head tmr_list; | 479 | struct list_head tmr_list; |
@@ -537,7 +520,6 @@ struct se_cmd { | |||
537 | /* Only used for internal passthrough and legacy TCM fabric modules */ | 520 | /* Only used for internal passthrough and legacy TCM fabric modules */ |
538 | struct se_session *se_sess; | 521 | struct se_session *se_sess; |
539 | struct se_tmr_req *se_tmr_req; | 522 | struct se_tmr_req *se_tmr_req; |
540 | struct list_head se_queue_node; | ||
541 | struct list_head se_cmd_list; | 523 | struct list_head se_cmd_list; |
542 | struct completion cmd_wait_comp; | 524 | struct completion cmd_wait_comp; |
543 | struct kref cmd_kref; | 525 | struct kref cmd_kref; |
@@ -575,7 +557,6 @@ struct se_cmd { | |||
575 | struct scatterlist *t_bidi_data_sg; | 557 | struct scatterlist *t_bidi_data_sg; |
576 | unsigned int t_bidi_data_nents; | 558 | unsigned int t_bidi_data_nents; |
577 | 559 | ||
578 | struct list_head execute_list; | ||
579 | struct list_head state_list; | 560 | struct list_head state_list; |
580 | bool state_active; | 561 | bool state_active; |
581 | 562 | ||
@@ -633,7 +614,6 @@ struct se_session { | |||
633 | struct list_head sess_list; | 614 | struct list_head sess_list; |
634 | struct list_head sess_acl_list; | 615 | struct list_head sess_acl_list; |
635 | struct list_head sess_cmd_list; | 616 | struct list_head sess_cmd_list; |
636 | struct list_head sess_wait_list; | ||
637 | spinlock_t sess_cmd_lock; | 617 | spinlock_t sess_cmd_lock; |
638 | struct kref sess_kref; | 618 | struct kref sess_kref; |
639 | }; | 619 | }; |
@@ -780,13 +760,11 @@ struct se_device { | |||
780 | /* Active commands on this virtual SE device */ | 760 | /* Active commands on this virtual SE device */ |
781 | atomic_t simple_cmds; | 761 | atomic_t simple_cmds; |
782 | atomic_t dev_ordered_id; | 762 | atomic_t dev_ordered_id; |
783 | atomic_t execute_tasks; | ||
784 | atomic_t dev_ordered_sync; | 763 | atomic_t dev_ordered_sync; |
785 | atomic_t dev_qf_count; | 764 | atomic_t dev_qf_count; |
786 | struct se_obj dev_obj; | 765 | struct se_obj dev_obj; |
787 | struct se_obj dev_access_obj; | 766 | struct se_obj dev_access_obj; |
788 | struct se_obj dev_export_obj; | 767 | struct se_obj dev_export_obj; |
789 | struct se_queue_obj dev_queue_obj; | ||
790 | spinlock_t delayed_cmd_lock; | 768 | spinlock_t delayed_cmd_lock; |
791 | spinlock_t execute_task_lock; | 769 | spinlock_t execute_task_lock; |
792 | spinlock_t dev_reservation_lock; | 770 | spinlock_t dev_reservation_lock; |
@@ -802,11 +780,9 @@ struct se_device { | |||
802 | struct t10_pr_registration *dev_pr_res_holder; | 780 | struct t10_pr_registration *dev_pr_res_holder; |
803 | struct list_head dev_sep_list; | 781 | struct list_head dev_sep_list; |
804 | struct list_head dev_tmr_list; | 782 | struct list_head dev_tmr_list; |
805 | /* Pointer to descriptor for processing thread */ | 783 | struct workqueue_struct *tmr_wq; |
806 | struct task_struct *process_thread; | ||
807 | struct work_struct qf_work_queue; | 784 | struct work_struct qf_work_queue; |
808 | struct list_head delayed_cmd_list; | 785 | struct list_head delayed_cmd_list; |
809 | struct list_head execute_list; | ||
810 | struct list_head state_list; | 786 | struct list_head state_list; |
811 | struct list_head qf_cmd_list; | 787 | struct list_head qf_cmd_list; |
812 | /* Pointer to associated SE HBA */ | 788 | /* Pointer to associated SE HBA */ |
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index c78a23333c4f..69fb3cfd02d7 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h | |||
@@ -33,12 +33,6 @@ struct target_core_fabric_ops { | |||
33 | struct se_node_acl *); | 33 | struct se_node_acl *); |
34 | u32 (*tpg_get_inst_index)(struct se_portal_group *); | 34 | u32 (*tpg_get_inst_index)(struct se_portal_group *); |
35 | /* | 35 | /* |
36 | * Optional function pointer for TCM to perform command map | ||
37 | * from TCM processing thread context, for those struct se_cmd | ||
38 | * initially allocated in interrupt context. | ||
39 | */ | ||
40 | int (*new_cmd_map)(struct se_cmd *); | ||
41 | /* | ||
42 | * Optional to release struct se_cmd and fabric dependent allocated | 36 | * Optional to release struct se_cmd and fabric dependent allocated |
43 | * I/O descriptor in transport_cmd_check_stop(). | 37 | * I/O descriptor in transport_cmd_check_stop(). |
44 | * | 38 | * |
@@ -108,20 +102,18 @@ void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *, | |||
108 | struct se_session *, u32, int, int, unsigned char *); | 102 | struct se_session *, u32, int, int, unsigned char *); |
109 | int transport_lookup_cmd_lun(struct se_cmd *, u32); | 103 | int transport_lookup_cmd_lun(struct se_cmd *, u32); |
110 | int target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *); | 104 | int target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *); |
111 | void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, | 105 | int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, |
112 | unsigned char *, u32, u32, int, int, int); | 106 | unsigned char *, u32, u32, int, int, int); |
113 | int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, | 107 | int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, |
114 | unsigned char *sense, u32 unpacked_lun, | 108 | unsigned char *sense, u32 unpacked_lun, |
115 | void *fabric_tmr_ptr, unsigned char tm_type, | 109 | void *fabric_tmr_ptr, unsigned char tm_type, |
116 | gfp_t, unsigned int, int); | 110 | gfp_t, unsigned int, int); |
117 | int transport_handle_cdb_direct(struct se_cmd *); | 111 | int transport_handle_cdb_direct(struct se_cmd *); |
118 | int transport_generic_handle_cdb_map(struct se_cmd *); | ||
119 | int transport_generic_handle_data(struct se_cmd *); | ||
120 | int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, | 112 | int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, |
121 | struct scatterlist *, u32, struct scatterlist *, u32); | 113 | struct scatterlist *, u32, struct scatterlist *, u32); |
122 | int transport_generic_new_cmd(struct se_cmd *); | 114 | int transport_generic_new_cmd(struct se_cmd *); |
123 | 115 | ||
124 | void transport_generic_process_write(struct se_cmd *); | 116 | void target_execute_cmd(struct se_cmd *cmd); |
125 | 117 | ||
126 | void transport_generic_free_cmd(struct se_cmd *, int); | 118 | void transport_generic_free_cmd(struct se_cmd *, int); |
127 | 119 | ||
@@ -129,9 +121,8 @@ bool transport_wait_for_tasks(struct se_cmd *); | |||
129 | int transport_check_aborted_status(struct se_cmd *, int); | 121 | int transport_check_aborted_status(struct se_cmd *, int); |
130 | int transport_send_check_condition_and_sense(struct se_cmd *, u8, int); | 122 | int transport_send_check_condition_and_sense(struct se_cmd *, u8, int); |
131 | 123 | ||
132 | void target_get_sess_cmd(struct se_session *, struct se_cmd *, bool); | ||
133 | int target_put_sess_cmd(struct se_session *, struct se_cmd *); | 124 | int target_put_sess_cmd(struct se_session *, struct se_cmd *); |
134 | void target_splice_sess_cmd_list(struct se_session *); | 125 | void target_sess_cmd_list_set_waiting(struct se_session *); |
135 | void target_wait_for_sess_cmds(struct se_session *, int); | 126 | void target_wait_for_sess_cmds(struct se_session *, int); |
136 | 127 | ||
137 | int core_alua_check_nonop_delay(struct se_cmd *); | 128 | int core_alua_check_nonop_delay(struct se_cmd *); |