diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-22 15:38:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-22 15:38:04 -0400 |
commit | 1ab142d499294b844ecc81e8004db4ce029b0b61 (patch) | |
tree | 9db85a456d0cba3de8b9bd6671b1b52fa939770c | |
parent | 267d7b23dd62f6ec55e0fba777e456495c308fc7 (diff) | |
parent | 187e70a554e0f0717a65998bc9199945cbbd4692 (diff) |
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger:
"This contains the usual set of updates and bugfixes to target-core +
existing fabric module code, along with a handful of the patches
destined for v3.3 stable.
It also contains the necessary target-core infrastructure pieces
required to run using tcm_qla2xxx.ko WWPNs with the new Qlogic Fibre
Channel fabric module currently queued in target-pending/for-next-merge,
and coming for round 2.
The highlights for this series include:
- Add target_submit_tmr() helper function for fabric task management
(andy)
- Convert tcm_fc to use target_submit_tmr() (andy)
- Replace target core various cmd flags with a transport state (hch)
- Convert loopback to use workqueue submission (hch)
- Convert target core to use array_zalloc for tpg_lun_list (joern)
- Convert target core to use array_zalloc for device_list (joern)
- Add target core support for TMR_ABORT_TASK (nab)
- Add target core se_sess->sess_kref + get/put helpers (nab)
- Add target core se_node_acl->acl_kref for ->acl_free_comp usage
(nab)
- Convert iscsi-target to use target_put_session + sess_kref (nab)
- Fix tcm_fc fc_exch memory leak in ft_send_resp_status (nab)
- Fix ib_srpt srpt_handle_cmd send_ioctx->ioctx_kref leak on
exception (nab)
- Fix target core up handling of short INQUIRY buffers (roland)
- Untangle target-core front-end and back-end meanings of max_sectors
attribute (roland)
- Set loopback residual field for SCSI commands (roland)
- Fix target-core 16-bit target ports for SET TARGET PORT GROUPS
emulation (roland)
Thanks again to Andy, Christoph, Joern, Roland, and everyone who has
contributed this round!"
* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (64 commits)
ib_srpt: Fix srpt_handle_cmd send_ioctx->ioctx_kref leak on exception
loopback: Fix transport_generic_allocate_tasks error handling
iscsi-target: remove improper externs
iscsi-target: Remove unused variables in iscsi_target_parameters.c
target: remove obvious warnings
target: Use array_zalloc for device_list
target: Use array_zalloc for tpg_lun_list
target: Fix sense code for unsupported SERVICE ACTION IN
target: Remove hack to make READ CAPACITY(10) lie if thin provisioning is enabled
target: Bump core version to v4.1.0-rc2-ml + fabric versions
tcm_fc: Fix fc_exch memory leak in ft_send_resp_status
target: Drop unused legacy target_core_fabric_ops API callers
iscsi-target: Convert to use target_put_session + sess_kref
target: Convert se_node_acl->acl_group removal to use ->acl_kref
target: Add se_node_acl->acl_kref for ->acl_free_comp usage
target: Add se_node_acl->acl_free_comp for NodeACL release path
target: Add se_sess->sess_kref + get/put helpers
target: Convert session_lock to irqsave
target: Fix typo in drivers/target
iscsi-target: Fix dynamic -> explict NodeACL pointer reference
...
44 files changed, 961 insertions, 1061 deletions
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index ebe33d960d77..69e2ad06e515 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -1378,7 +1378,9 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) | |||
1378 | break; | 1378 | break; |
1379 | case SRPT_STATE_NEED_DATA: | 1379 | case SRPT_STATE_NEED_DATA: |
1380 | /* DMA_TO_DEVICE (write) - RDMA read error. */ | 1380 | /* DMA_TO_DEVICE (write) - RDMA read error. */ |
1381 | atomic_set(&ioctx->cmd.transport_lun_stop, 1); | 1381 | spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); |
1382 | ioctx->cmd.transport_state |= CMD_T_LUN_STOP; | ||
1383 | spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); | ||
1382 | transport_generic_handle_data(&ioctx->cmd); | 1384 | transport_generic_handle_data(&ioctx->cmd); |
1383 | break; | 1385 | break; |
1384 | case SRPT_STATE_CMD_RSP_SENT: | 1386 | case SRPT_STATE_CMD_RSP_SENT: |
@@ -1387,7 +1389,9 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) | |||
1387 | * not been received in time. | 1389 | * not been received in time. |
1388 | */ | 1390 | */ |
1389 | srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); | 1391 | srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); |
1390 | atomic_set(&ioctx->cmd.transport_lun_stop, 1); | 1392 | spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); |
1393 | ioctx->cmd.transport_state |= CMD_T_LUN_STOP; | ||
1394 | spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); | ||
1391 | kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); | 1395 | kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); |
1392 | break; | 1396 | break; |
1393 | case SRPT_STATE_MGMT_RSP_SENT: | 1397 | case SRPT_STATE_MGMT_RSP_SENT: |
@@ -1494,6 +1498,7 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch, | |||
1494 | { | 1498 | { |
1495 | struct se_cmd *cmd; | 1499 | struct se_cmd *cmd; |
1496 | enum srpt_command_state state; | 1500 | enum srpt_command_state state; |
1501 | unsigned long flags; | ||
1497 | 1502 | ||
1498 | cmd = &ioctx->cmd; | 1503 | cmd = &ioctx->cmd; |
1499 | state = srpt_get_cmd_state(ioctx); | 1504 | state = srpt_get_cmd_state(ioctx); |
@@ -1513,7 +1518,9 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch, | |||
1513 | __func__, __LINE__, state); | 1518 | __func__, __LINE__, state); |
1514 | break; | 1519 | break; |
1515 | case SRPT_RDMA_WRITE_LAST: | 1520 | case SRPT_RDMA_WRITE_LAST: |
1516 | atomic_set(&ioctx->cmd.transport_lun_stop, 1); | 1521 | spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); |
1522 | ioctx->cmd.transport_state |= CMD_T_LUN_STOP; | ||
1523 | spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); | ||
1517 | break; | 1524 | break; |
1518 | default: | 1525 | default: |
1519 | printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__, | 1526 | printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__, |
@@ -1750,6 +1757,7 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch, | |||
1750 | srp_cmd->tag); | 1757 | srp_cmd->tag); |
1751 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 1758 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
1752 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | 1759 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; |
1760 | kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref); | ||
1753 | goto send_sense; | 1761 | goto send_sense; |
1754 | } | 1762 | } |
1755 | 1763 | ||
@@ -1757,15 +1765,19 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch, | |||
1757 | cmd->data_direction = dir; | 1765 | cmd->data_direction = dir; |
1758 | unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun, | 1766 | unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun, |
1759 | sizeof(srp_cmd->lun)); | 1767 | sizeof(srp_cmd->lun)); |
1760 | if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0) | 1768 | if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0) { |
1769 | kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref); | ||
1761 | goto send_sense; | 1770 | goto send_sense; |
1771 | } | ||
1762 | ret = transport_generic_allocate_tasks(cmd, srp_cmd->cdb); | 1772 | ret = transport_generic_allocate_tasks(cmd, srp_cmd->cdb); |
1763 | if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) | 1773 | if (ret < 0) { |
1764 | srpt_queue_status(cmd); | 1774 | kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref); |
1765 | else if (cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) | 1775 | if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) { |
1766 | goto send_sense; | 1776 | srpt_queue_status(cmd); |
1767 | else | 1777 | return 0; |
1768 | WARN_ON_ONCE(ret); | 1778 | } else |
1779 | goto send_sense; | ||
1780 | } | ||
1769 | 1781 | ||
1770 | transport_handle_cdb_direct(cmd); | 1782 | transport_handle_cdb_direct(cmd); |
1771 | return 0; | 1783 | return 0; |
@@ -1871,8 +1883,8 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch, | |||
1871 | TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; | 1883 | TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; |
1872 | goto process_tmr; | 1884 | goto process_tmr; |
1873 | } | 1885 | } |
1874 | cmd->se_tmr_req = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL); | 1886 | res = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL); |
1875 | if (!cmd->se_tmr_req) { | 1887 | if (res < 0) { |
1876 | send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 1888 | send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
1877 | send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED; | 1889 | send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED; |
1878 | goto process_tmr; | 1890 | goto process_tmr; |
@@ -3514,25 +3526,6 @@ static void srpt_close_session(struct se_session *se_sess) | |||
3514 | } | 3526 | } |
3515 | 3527 | ||
3516 | /** | 3528 | /** |
3517 | * To do: Find out whether stop_session() has a meaning for transports | ||
3518 | * other than iSCSI. | ||
3519 | */ | ||
3520 | static void srpt_stop_session(struct se_session *se_sess, int sess_sleep, | ||
3521 | int conn_sleep) | ||
3522 | { | ||
3523 | } | ||
3524 | |||
3525 | static void srpt_reset_nexus(struct se_session *sess) | ||
3526 | { | ||
3527 | printk(KERN_ERR "This is the SRP protocol, not iSCSI\n"); | ||
3528 | } | ||
3529 | |||
3530 | static int srpt_sess_logged_in(struct se_session *se_sess) | ||
3531 | { | ||
3532 | return true; | ||
3533 | } | ||
3534 | |||
3535 | /** | ||
3536 | * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB). | 3529 | * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB). |
3537 | * | 3530 | * |
3538 | * A quote from RFC 4455 (SCSI-MIB) about this MIB object: | 3531 | * A quote from RFC 4455 (SCSI-MIB) about this MIB object: |
@@ -3576,11 +3569,6 @@ static u16 srpt_get_fabric_sense_len(void) | |||
3576 | return 0; | 3569 | return 0; |
3577 | } | 3570 | } |
3578 | 3571 | ||
3579 | static int srpt_is_state_remove(struct se_cmd *se_cmd) | ||
3580 | { | ||
3581 | return 0; | ||
3582 | } | ||
3583 | |||
3584 | /** | 3572 | /** |
3585 | * srpt_parse_i_port_id() - Parse an initiator port ID. | 3573 | * srpt_parse_i_port_id() - Parse an initiator port ID. |
3586 | * @name: ASCII representation of a 128-bit initiator port ID. | 3574 | * @name: ASCII representation of a 128-bit initiator port ID. |
@@ -3950,9 +3938,6 @@ static struct target_core_fabric_ops srpt_template = { | |||
3950 | .check_stop_free = srpt_check_stop_free, | 3938 | .check_stop_free = srpt_check_stop_free, |
3951 | .shutdown_session = srpt_shutdown_session, | 3939 | .shutdown_session = srpt_shutdown_session, |
3952 | .close_session = srpt_close_session, | 3940 | .close_session = srpt_close_session, |
3953 | .stop_session = srpt_stop_session, | ||
3954 | .fall_back_to_erl0 = srpt_reset_nexus, | ||
3955 | .sess_logged_in = srpt_sess_logged_in, | ||
3956 | .sess_get_index = srpt_sess_get_index, | 3941 | .sess_get_index = srpt_sess_get_index, |
3957 | .sess_get_initiator_sid = NULL, | 3942 | .sess_get_initiator_sid = NULL, |
3958 | .write_pending = srpt_write_pending, | 3943 | .write_pending = srpt_write_pending, |
@@ -3965,7 +3950,6 @@ static struct target_core_fabric_ops srpt_template = { | |||
3965 | .queue_tm_rsp = srpt_queue_response, | 3950 | .queue_tm_rsp = srpt_queue_response, |
3966 | .get_fabric_sense_len = srpt_get_fabric_sense_len, | 3951 | .get_fabric_sense_len = srpt_get_fabric_sense_len, |
3967 | .set_fabric_sense_len = srpt_set_fabric_sense_len, | 3952 | .set_fabric_sense_len = srpt_set_fabric_sense_len, |
3968 | .is_state_remove = srpt_is_state_remove, | ||
3969 | /* | 3953 | /* |
3970 | * Setup function pointers for generic logic in | 3954 | * Setup function pointers for generic logic in |
3971 | * target_core_fabric_configfs.c | 3955 | * target_core_fabric_configfs.c |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 84a78af83f90..e897ce975bb8 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c | |||
@@ -1682,9 +1682,7 @@ void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, | |||
1682 | 1682 | ||
1683 | memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); | 1683 | memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); |
1684 | 1684 | ||
1685 | int_to_scsilun(sc_cmd->device->lun, | 1685 | int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun); |
1686 | (struct scsi_lun *) fcp_cmnd->fc_lun); | ||
1687 | |||
1688 | 1686 | ||
1689 | fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); | 1687 | fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); |
1690 | memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); | 1688 | memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); |
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index b577c907b318..f7357308655a 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
@@ -1074,8 +1074,7 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp) | |||
1074 | fsp->cdb_cmd.fc_dl = htonl(fsp->data_len); | 1074 | fsp->cdb_cmd.fc_dl = htonl(fsp->data_len); |
1075 | fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK; | 1075 | fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK; |
1076 | 1076 | ||
1077 | int_to_scsilun(fsp->cmd->device->lun, | 1077 | int_to_scsilun(fsp->cmd->device->lun, &fsp->cdb_cmd.fc_lun); |
1078 | (struct scsi_lun *)fsp->cdb_cmd.fc_lun); | ||
1079 | memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); | 1078 | memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); |
1080 | 1079 | ||
1081 | spin_lock_irqsave(&si->scsi_queue_lock, flags); | 1080 | spin_lock_irqsave(&si->scsi_queue_lock, flags); |
@@ -1257,7 +1256,7 @@ static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp, | |||
1257 | 1256 | ||
1258 | fsp->cdb_cmd.fc_dl = htonl(fsp->data_len); | 1257 | fsp->cdb_cmd.fc_dl = htonl(fsp->data_len); |
1259 | fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET; | 1258 | fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET; |
1260 | int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun); | 1259 | int_to_scsilun(lun, &fsp->cdb_cmd.fc_lun); |
1261 | 1260 | ||
1262 | fsp->wait_for_comp = 1; | 1261 | fsp->wait_for_comp = 1; |
1263 | init_completion(&fsp->tm_done); | 1262 | init_completion(&fsp->tm_done); |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 1c6f700f5faa..8b1d5e62ed40 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -781,7 +781,7 @@ static int iscsit_alloc_buffs(struct iscsi_cmd *cmd) | |||
781 | struct scatterlist *sgl; | 781 | struct scatterlist *sgl; |
782 | u32 length = cmd->se_cmd.data_length; | 782 | u32 length = cmd->se_cmd.data_length; |
783 | int nents = DIV_ROUND_UP(length, PAGE_SIZE); | 783 | int nents = DIV_ROUND_UP(length, PAGE_SIZE); |
784 | int i = 0, ret; | 784 | int i = 0, j = 0, ret; |
785 | /* | 785 | /* |
786 | * If no SCSI payload is present, allocate the default iovecs used for | 786 | * If no SCSI payload is present, allocate the default iovecs used for |
787 | * iSCSI PDU Header | 787 | * iSCSI PDU Header |
@@ -822,17 +822,15 @@ static int iscsit_alloc_buffs(struct iscsi_cmd *cmd) | |||
822 | */ | 822 | */ |
823 | ret = iscsit_allocate_iovecs(cmd); | 823 | ret = iscsit_allocate_iovecs(cmd); |
824 | if (ret < 0) | 824 | if (ret < 0) |
825 | goto page_alloc_failed; | 825 | return -ENOMEM; |
826 | 826 | ||
827 | return 0; | 827 | return 0; |
828 | 828 | ||
829 | page_alloc_failed: | 829 | page_alloc_failed: |
830 | while (i >= 0) { | 830 | while (j < i) |
831 | __free_page(sg_page(&sgl[i])); | 831 | __free_page(sg_page(&sgl[j++])); |
832 | i--; | 832 | |
833 | } | 833 | kfree(sgl); |
834 | kfree(cmd->t_mem_sg); | ||
835 | cmd->t_mem_sg = NULL; | ||
836 | return -ENOMEM; | 834 | return -ENOMEM; |
837 | } | 835 | } |
838 | 836 | ||
@@ -1007,8 +1005,8 @@ done: | |||
1007 | /* | 1005 | /* |
1008 | * The CDB is going to an se_device_t. | 1006 | * The CDB is going to an se_device_t. |
1009 | */ | 1007 | */ |
1010 | ret = iscsit_get_lun_for_cmd(cmd, hdr->cdb, | 1008 | ret = transport_lookup_cmd_lun(&cmd->se_cmd, |
1011 | get_unaligned_le64(&hdr->lun)); | 1009 | scsilun_to_int(&hdr->lun)); |
1012 | if (ret < 0) { | 1010 | if (ret < 0) { |
1013 | if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) { | 1011 | if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) { |
1014 | pr_debug("Responding to non-acl'ed," | 1012 | pr_debug("Responding to non-acl'ed," |
@@ -1364,7 +1362,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) | |||
1364 | * outstanding_r2ts reaches zero, go ahead and send the delayed | 1362 | * outstanding_r2ts reaches zero, go ahead and send the delayed |
1365 | * TASK_ABORTED status. | 1363 | * TASK_ABORTED status. |
1366 | */ | 1364 | */ |
1367 | if (atomic_read(&se_cmd->t_transport_aborted) != 0) { | 1365 | if (se_cmd->transport_state & CMD_T_ABORTED) { |
1368 | if (hdr->flags & ISCSI_FLAG_CMD_FINAL) | 1366 | if (hdr->flags & ISCSI_FLAG_CMD_FINAL) |
1369 | if (--cmd->outstanding_r2ts < 1) { | 1367 | if (--cmd->outstanding_r2ts < 1) { |
1370 | iscsit_stop_dataout_timer(cmd); | 1368 | iscsit_stop_dataout_timer(cmd); |
@@ -1472,14 +1470,12 @@ static int iscsit_handle_nop_out( | |||
1472 | unsigned char *ping_data = NULL; | 1470 | unsigned char *ping_data = NULL; |
1473 | int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size; | 1471 | int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size; |
1474 | u32 checksum, data_crc, padding = 0, payload_length; | 1472 | u32 checksum, data_crc, padding = 0, payload_length; |
1475 | u64 lun; | ||
1476 | struct iscsi_cmd *cmd = NULL; | 1473 | struct iscsi_cmd *cmd = NULL; |
1477 | struct kvec *iov = NULL; | 1474 | struct kvec *iov = NULL; |
1478 | struct iscsi_nopout *hdr; | 1475 | struct iscsi_nopout *hdr; |
1479 | 1476 | ||
1480 | hdr = (struct iscsi_nopout *) buf; | 1477 | hdr = (struct iscsi_nopout *) buf; |
1481 | payload_length = ntoh24(hdr->dlength); | 1478 | payload_length = ntoh24(hdr->dlength); |
1482 | lun = get_unaligned_le64(&hdr->lun); | ||
1483 | hdr->itt = be32_to_cpu(hdr->itt); | 1479 | hdr->itt = be32_to_cpu(hdr->itt); |
1484 | hdr->ttt = be32_to_cpu(hdr->ttt); | 1480 | hdr->ttt = be32_to_cpu(hdr->ttt); |
1485 | hdr->cmdsn = be32_to_cpu(hdr->cmdsn); | 1481 | hdr->cmdsn = be32_to_cpu(hdr->cmdsn); |
@@ -1689,13 +1685,11 @@ static int iscsit_handle_task_mgt_cmd( | |||
1689 | struct se_tmr_req *se_tmr; | 1685 | struct se_tmr_req *se_tmr; |
1690 | struct iscsi_tmr_req *tmr_req; | 1686 | struct iscsi_tmr_req *tmr_req; |
1691 | struct iscsi_tm *hdr; | 1687 | struct iscsi_tm *hdr; |
1692 | u32 payload_length; | ||
1693 | int out_of_order_cmdsn = 0; | 1688 | int out_of_order_cmdsn = 0; |
1694 | int ret; | 1689 | int ret; |
1695 | u8 function; | 1690 | u8 function; |
1696 | 1691 | ||
1697 | hdr = (struct iscsi_tm *) buf; | 1692 | hdr = (struct iscsi_tm *) buf; |
1698 | payload_length = ntoh24(hdr->dlength); | ||
1699 | hdr->itt = be32_to_cpu(hdr->itt); | 1693 | hdr->itt = be32_to_cpu(hdr->itt); |
1700 | hdr->rtt = be32_to_cpu(hdr->rtt); | 1694 | hdr->rtt = be32_to_cpu(hdr->rtt); |
1701 | hdr->cmdsn = be32_to_cpu(hdr->cmdsn); | 1695 | hdr->cmdsn = be32_to_cpu(hdr->cmdsn); |
@@ -1747,8 +1741,8 @@ static int iscsit_handle_task_mgt_cmd( | |||
1747 | * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN | 1741 | * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN |
1748 | */ | 1742 | */ |
1749 | if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { | 1743 | if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { |
1750 | ret = iscsit_get_lun_for_tmr(cmd, | 1744 | ret = transport_lookup_tmr_lun(&cmd->se_cmd, |
1751 | get_unaligned_le64(&hdr->lun)); | 1745 | scsilun_to_int(&hdr->lun)); |
1752 | if (ret < 0) { | 1746 | if (ret < 0) { |
1753 | cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 1747 | cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
1754 | se_tmr->response = ISCSI_TMF_RSP_NO_LUN; | 1748 | se_tmr->response = ISCSI_TMF_RSP_NO_LUN; |
@@ -2207,14 +2201,10 @@ static int iscsit_handle_snack( | |||
2207 | struct iscsi_conn *conn, | 2201 | struct iscsi_conn *conn, |
2208 | unsigned char *buf) | 2202 | unsigned char *buf) |
2209 | { | 2203 | { |
2210 | u32 unpacked_lun; | ||
2211 | u64 lun; | ||
2212 | struct iscsi_snack *hdr; | 2204 | struct iscsi_snack *hdr; |
2213 | 2205 | ||
2214 | hdr = (struct iscsi_snack *) buf; | 2206 | hdr = (struct iscsi_snack *) buf; |
2215 | hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; | 2207 | hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; |
2216 | lun = get_unaligned_le64(&hdr->lun); | ||
2217 | unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); | ||
2218 | hdr->itt = be32_to_cpu(hdr->itt); | 2208 | hdr->itt = be32_to_cpu(hdr->itt); |
2219 | hdr->ttt = be32_to_cpu(hdr->ttt); | 2209 | hdr->ttt = be32_to_cpu(hdr->ttt); |
2220 | hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); | 2210 | hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); |
@@ -3514,7 +3504,6 @@ int iscsi_target_tx_thread(void *arg) | |||
3514 | struct iscsi_cmd *cmd = NULL; | 3504 | struct iscsi_cmd *cmd = NULL; |
3515 | struct iscsi_conn *conn; | 3505 | struct iscsi_conn *conn; |
3516 | struct iscsi_queue_req *qr = NULL; | 3506 | struct iscsi_queue_req *qr = NULL; |
3517 | struct se_cmd *se_cmd; | ||
3518 | struct iscsi_thread_set *ts = arg; | 3507 | struct iscsi_thread_set *ts = arg; |
3519 | /* | 3508 | /* |
3520 | * Allow ourselves to be interrupted by SIGINT so that a | 3509 | * Allow ourselves to be interrupted by SIGINT so that a |
@@ -3697,8 +3686,6 @@ check_rsp_state: | |||
3697 | goto transport_err; | 3686 | goto transport_err; |
3698 | } | 3687 | } |
3699 | 3688 | ||
3700 | se_cmd = &cmd->se_cmd; | ||
3701 | |||
3702 | if (map_sg && !conn->conn_ops->IFMarker) { | 3689 | if (map_sg && !conn->conn_ops->IFMarker) { |
3703 | if (iscsit_fe_sendpage_sg(cmd, conn) < 0) { | 3690 | if (iscsit_fe_sendpage_sg(cmd, conn) < 0) { |
3704 | conn->tx_response_queue = 0; | 3691 | conn->tx_response_queue = 0; |
@@ -4171,7 +4158,7 @@ int iscsit_close_connection( | |||
4171 | if (!atomic_read(&sess->session_reinstatement) && | 4158 | if (!atomic_read(&sess->session_reinstatement) && |
4172 | atomic_read(&sess->session_fall_back_to_erl0)) { | 4159 | atomic_read(&sess->session_fall_back_to_erl0)) { |
4173 | spin_unlock_bh(&sess->conn_lock); | 4160 | spin_unlock_bh(&sess->conn_lock); |
4174 | iscsit_close_session(sess); | 4161 | target_put_session(sess->se_sess); |
4175 | 4162 | ||
4176 | return 0; | 4163 | return 0; |
4177 | } else if (atomic_read(&sess->session_logout)) { | 4164 | } else if (atomic_read(&sess->session_logout)) { |
@@ -4292,7 +4279,7 @@ static void iscsit_logout_post_handler_closesession( | |||
4292 | iscsit_dec_conn_usage_count(conn); | 4279 | iscsit_dec_conn_usage_count(conn); |
4293 | iscsit_stop_session(sess, 1, 1); | 4280 | iscsit_stop_session(sess, 1, 1); |
4294 | iscsit_dec_session_usage_count(sess); | 4281 | iscsit_dec_session_usage_count(sess); |
4295 | iscsit_close_session(sess); | 4282 | target_put_session(sess->se_sess); |
4296 | } | 4283 | } |
4297 | 4284 | ||
4298 | static void iscsit_logout_post_handler_samecid( | 4285 | static void iscsit_logout_post_handler_samecid( |
@@ -4458,7 +4445,7 @@ int iscsit_free_session(struct iscsi_session *sess) | |||
4458 | } else | 4445 | } else |
4459 | spin_unlock_bh(&sess->conn_lock); | 4446 | spin_unlock_bh(&sess->conn_lock); |
4460 | 4447 | ||
4461 | iscsit_close_session(sess); | 4448 | target_put_session(sess->se_sess); |
4462 | return 0; | 4449 | return 0; |
4463 | } | 4450 | } |
4464 | 4451 | ||
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 6b35b37988ed..00c58cc82c85 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c | |||
@@ -812,9 +812,6 @@ static struct se_node_acl *lio_target_make_nodeacl( | |||
812 | if (!se_nacl_new) | 812 | if (!se_nacl_new) |
813 | return ERR_PTR(-ENOMEM); | 813 | return ERR_PTR(-ENOMEM); |
814 | 814 | ||
815 | acl = container_of(se_nacl_new, struct iscsi_node_acl, | ||
816 | se_node_acl); | ||
817 | |||
818 | cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth; | 815 | cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth; |
819 | /* | 816 | /* |
820 | * se_nacl_new may be released by core_tpg_add_initiator_node_acl() | 817 | * se_nacl_new may be released by core_tpg_add_initiator_node_acl() |
@@ -825,7 +822,8 @@ static struct se_node_acl *lio_target_make_nodeacl( | |||
825 | if (IS_ERR(se_nacl)) | 822 | if (IS_ERR(se_nacl)) |
826 | return se_nacl; | 823 | return se_nacl; |
827 | 824 | ||
828 | stats_cg = &acl->se_node_acl.acl_fabric_stat_group; | 825 | acl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl); |
826 | stats_cg = &se_nacl->acl_fabric_stat_group; | ||
829 | 827 | ||
830 | stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | 828 | stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, |
831 | GFP_KERNEL); | 829 | GFP_KERNEL); |
@@ -1505,28 +1503,6 @@ static int iscsi_get_cmd_state(struct se_cmd *se_cmd) | |||
1505 | return cmd->i_state; | 1503 | return cmd->i_state; |
1506 | } | 1504 | } |
1507 | 1505 | ||
1508 | static int iscsi_is_state_remove(struct se_cmd *se_cmd) | ||
1509 | { | ||
1510 | struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); | ||
1511 | |||
1512 | return (cmd->i_state == ISTATE_REMOVE); | ||
1513 | } | ||
1514 | |||
1515 | static int lio_sess_logged_in(struct se_session *se_sess) | ||
1516 | { | ||
1517 | struct iscsi_session *sess = se_sess->fabric_sess_ptr; | ||
1518 | int ret; | ||
1519 | /* | ||
1520 | * Called with spin_lock_bh(&tpg_lock); and | ||
1521 | * spin_lock(&se_tpg->session_lock); held. | ||
1522 | */ | ||
1523 | spin_lock(&sess->conn_lock); | ||
1524 | ret = (sess->session_state != TARG_SESS_STATE_LOGGED_IN); | ||
1525 | spin_unlock(&sess->conn_lock); | ||
1526 | |||
1527 | return ret; | ||
1528 | } | ||
1529 | |||
1530 | static u32 lio_sess_get_index(struct se_session *se_sess) | 1506 | static u32 lio_sess_get_index(struct se_session *se_sess) |
1531 | { | 1507 | { |
1532 | struct iscsi_session *sess = se_sess->fabric_sess_ptr; | 1508 | struct iscsi_session *sess = se_sess->fabric_sess_ptr; |
@@ -1700,8 +1676,8 @@ static int lio_tpg_shutdown_session(struct se_session *se_sess) | |||
1700 | atomic_set(&sess->session_reinstatement, 1); | 1676 | atomic_set(&sess->session_reinstatement, 1); |
1701 | spin_unlock(&sess->conn_lock); | 1677 | spin_unlock(&sess->conn_lock); |
1702 | 1678 | ||
1703 | iscsit_inc_session_usage_count(sess); | ||
1704 | iscsit_stop_time2retain_timer(sess); | 1679 | iscsit_stop_time2retain_timer(sess); |
1680 | iscsit_stop_session(sess, 1, 1); | ||
1705 | 1681 | ||
1706 | return 1; | 1682 | return 1; |
1707 | } | 1683 | } |
@@ -1717,28 +1693,9 @@ static void lio_tpg_close_session(struct se_session *se_sess) | |||
1717 | * If the iSCSI Session for the iSCSI Initiator Node exists, | 1693 | * If the iSCSI Session for the iSCSI Initiator Node exists, |
1718 | * forcefully shutdown the iSCSI NEXUS. | 1694 | * forcefully shutdown the iSCSI NEXUS. |
1719 | */ | 1695 | */ |
1720 | iscsit_stop_session(sess, 1, 1); | ||
1721 | iscsit_dec_session_usage_count(sess); | ||
1722 | iscsit_close_session(sess); | 1696 | iscsit_close_session(sess); |
1723 | } | 1697 | } |
1724 | 1698 | ||
1725 | static void lio_tpg_stop_session( | ||
1726 | struct se_session *se_sess, | ||
1727 | int sess_sleep, | ||
1728 | int conn_sleep) | ||
1729 | { | ||
1730 | struct iscsi_session *sess = se_sess->fabric_sess_ptr; | ||
1731 | |||
1732 | iscsit_stop_session(sess, sess_sleep, conn_sleep); | ||
1733 | } | ||
1734 | |||
1735 | static void lio_tpg_fall_back_to_erl0(struct se_session *se_sess) | ||
1736 | { | ||
1737 | struct iscsi_session *sess = se_sess->fabric_sess_ptr; | ||
1738 | |||
1739 | iscsit_fall_back_to_erl0(sess); | ||
1740 | } | ||
1741 | |||
1742 | static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg) | 1699 | static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg) |
1743 | { | 1700 | { |
1744 | struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; | 1701 | struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; |
@@ -1802,9 +1759,6 @@ int iscsi_target_register_configfs(void) | |||
1802 | fabric->tf_ops.release_cmd = &lio_release_cmd; | 1759 | fabric->tf_ops.release_cmd = &lio_release_cmd; |
1803 | fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session; | 1760 | fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session; |
1804 | fabric->tf_ops.close_session = &lio_tpg_close_session; | 1761 | fabric->tf_ops.close_session = &lio_tpg_close_session; |
1805 | fabric->tf_ops.stop_session = &lio_tpg_stop_session; | ||
1806 | fabric->tf_ops.fall_back_to_erl0 = &lio_tpg_fall_back_to_erl0; | ||
1807 | fabric->tf_ops.sess_logged_in = &lio_sess_logged_in; | ||
1808 | fabric->tf_ops.sess_get_index = &lio_sess_get_index; | 1762 | fabric->tf_ops.sess_get_index = &lio_sess_get_index; |
1809 | fabric->tf_ops.sess_get_initiator_sid = &lio_sess_get_initiator_sid; | 1763 | fabric->tf_ops.sess_get_initiator_sid = &lio_sess_get_initiator_sid; |
1810 | fabric->tf_ops.write_pending = &lio_write_pending; | 1764 | fabric->tf_ops.write_pending = &lio_write_pending; |
@@ -1818,7 +1772,6 @@ int iscsi_target_register_configfs(void) | |||
1818 | fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp; | 1772 | fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp; |
1819 | fabric->tf_ops.set_fabric_sense_len = &lio_set_fabric_sense_len; | 1773 | fabric->tf_ops.set_fabric_sense_len = &lio_set_fabric_sense_len; |
1820 | fabric->tf_ops.get_fabric_sense_len = &lio_get_fabric_sense_len; | 1774 | fabric->tf_ops.get_fabric_sense_len = &lio_get_fabric_sense_len; |
1821 | fabric->tf_ops.is_state_remove = &iscsi_is_state_remove; | ||
1822 | /* | 1775 | /* |
1823 | * Setup function pointers for generic logic in target_core_fabric_configfs.c | 1776 | * Setup function pointers for generic logic in target_core_fabric_configfs.c |
1824 | */ | 1777 | */ |
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 0ec3b77a0c27..2aaee7efa683 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h | |||
@@ -9,7 +9,7 @@ | |||
9 | #include <scsi/iscsi_proto.h> | 9 | #include <scsi/iscsi_proto.h> |
10 | #include <target/target_core_base.h> | 10 | #include <target/target_core_base.h> |
11 | 11 | ||
12 | #define ISCSIT_VERSION "v4.1.0-rc1" | 12 | #define ISCSIT_VERSION "v4.1.0-rc2" |
13 | #define ISCSI_MAX_DATASN_MISSING_COUNT 16 | 13 | #define ISCSI_MAX_DATASN_MISSING_COUNT 16 |
14 | #define ISCSI_TX_THREAD_TCP_TIMEOUT 2 | 14 | #define ISCSI_TX_THREAD_TCP_TIMEOUT 2 |
15 | #define ISCSI_RX_THREAD_TCP_TIMEOUT 2 | 15 | #define ISCSI_RX_THREAD_TCP_TIMEOUT 2 |
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c index f63ea35bc4ae..bcc409853a67 100644 --- a/drivers/target/iscsi/iscsi_target_device.c +++ b/drivers/target/iscsi/iscsi_target_device.c | |||
@@ -28,25 +28,6 @@ | |||
28 | #include "iscsi_target_tpg.h" | 28 | #include "iscsi_target_tpg.h" |
29 | #include "iscsi_target_util.h" | 29 | #include "iscsi_target_util.h" |
30 | 30 | ||
31 | int iscsit_get_lun_for_tmr( | ||
32 | struct iscsi_cmd *cmd, | ||
33 | u64 lun) | ||
34 | { | ||
35 | u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); | ||
36 | |||
37 | return transport_lookup_tmr_lun(&cmd->se_cmd, unpacked_lun); | ||
38 | } | ||
39 | |||
40 | int iscsit_get_lun_for_cmd( | ||
41 | struct iscsi_cmd *cmd, | ||
42 | unsigned char *cdb, | ||
43 | u64 lun) | ||
44 | { | ||
45 | u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); | ||
46 | |||
47 | return transport_lookup_cmd_lun(&cmd->se_cmd, unpacked_lun); | ||
48 | } | ||
49 | |||
50 | void iscsit_determine_maxcmdsn(struct iscsi_session *sess) | 31 | void iscsit_determine_maxcmdsn(struct iscsi_session *sess) |
51 | { | 32 | { |
52 | struct se_node_acl *se_nacl; | 33 | struct se_node_acl *se_nacl; |
diff --git a/drivers/target/iscsi/iscsi_target_device.h b/drivers/target/iscsi/iscsi_target_device.h index bef1cada15f8..a0e2df9e8090 100644 --- a/drivers/target/iscsi/iscsi_target_device.h +++ b/drivers/target/iscsi/iscsi_target_device.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef ISCSI_TARGET_DEVICE_H | 1 | #ifndef ISCSI_TARGET_DEVICE_H |
2 | #define ISCSI_TARGET_DEVICE_H | 2 | #define ISCSI_TARGET_DEVICE_H |
3 | 3 | ||
4 | extern int iscsit_get_lun_for_tmr(struct iscsi_cmd *, u64); | ||
5 | extern int iscsit_get_lun_for_cmd(struct iscsi_cmd *, unsigned char *, u64); | ||
6 | extern void iscsit_determine_maxcmdsn(struct iscsi_session *); | 4 | extern void iscsit_determine_maxcmdsn(struct iscsi_session *); |
7 | extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *); | 5 | extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *); |
8 | 6 | ||
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 478451167b62..1ab0560b0924 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c | |||
@@ -783,7 +783,7 @@ static void iscsit_handle_time2retain_timeout(unsigned long data) | |||
783 | } | 783 | } |
784 | 784 | ||
785 | spin_unlock_bh(&se_tpg->session_lock); | 785 | spin_unlock_bh(&se_tpg->session_lock); |
786 | iscsit_close_session(sess); | 786 | target_put_session(sess->se_sess); |
787 | } | 787 | } |
788 | 788 | ||
789 | extern void iscsit_start_time2retain_handler(struct iscsi_session *sess) | 789 | extern void iscsit_start_time2retain_handler(struct iscsi_session *sess) |
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index 27901e37c125..006f605edb08 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c | |||
@@ -416,7 +416,7 @@ static int iscsit_handle_recovery_datain( | |||
416 | struct iscsi_datain_req *dr; | 416 | struct iscsi_datain_req *dr; |
417 | struct se_cmd *se_cmd = &cmd->se_cmd; | 417 | struct se_cmd *se_cmd = &cmd->se_cmd; |
418 | 418 | ||
419 | if (!atomic_read(&se_cmd->t_transport_complete)) { | 419 | if (!(se_cmd->transport_state & CMD_T_COMPLETE)) { |
420 | pr_err("Ignoring ITT: 0x%08x Data SNACK\n", | 420 | pr_err("Ignoring ITT: 0x%08x Data SNACK\n", |
421 | cmd->init_task_tag); | 421 | cmd->init_task_tag); |
422 | return 0; | 422 | return 0; |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 1ee33a8c3fab..a3656c9903a1 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
@@ -181,14 +181,16 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) | |||
181 | if (sess->session_state == TARG_SESS_STATE_FAILED) { | 181 | if (sess->session_state == TARG_SESS_STATE_FAILED) { |
182 | spin_unlock_bh(&sess->conn_lock); | 182 | spin_unlock_bh(&sess->conn_lock); |
183 | iscsit_dec_session_usage_count(sess); | 183 | iscsit_dec_session_usage_count(sess); |
184 | return iscsit_close_session(sess); | 184 | target_put_session(sess->se_sess); |
185 | return 0; | ||
185 | } | 186 | } |
186 | spin_unlock_bh(&sess->conn_lock); | 187 | spin_unlock_bh(&sess->conn_lock); |
187 | 188 | ||
188 | iscsit_stop_session(sess, 1, 1); | 189 | iscsit_stop_session(sess, 1, 1); |
189 | iscsit_dec_session_usage_count(sess); | 190 | iscsit_dec_session_usage_count(sess); |
190 | 191 | ||
191 | return iscsit_close_session(sess); | 192 | target_put_session(sess->se_sess); |
193 | return 0; | ||
192 | } | 194 | } |
193 | 195 | ||
194 | static void iscsi_login_set_conn_values( | 196 | static void iscsi_login_set_conn_values( |
@@ -881,7 +883,7 @@ fail: | |||
881 | static int __iscsi_target_login_thread(struct iscsi_np *np) | 883 | static int __iscsi_target_login_thread(struct iscsi_np *np) |
882 | { | 884 | { |
883 | u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0; | 885 | u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0; |
884 | int err, ret = 0, ip_proto, sock_type, set_sctp_conn_flag, stop; | 886 | int err, ret = 0, set_sctp_conn_flag, stop; |
885 | struct iscsi_conn *conn = NULL; | 887 | struct iscsi_conn *conn = NULL; |
886 | struct iscsi_login *login; | 888 | struct iscsi_login *login; |
887 | struct iscsi_portal_group *tpg = NULL; | 889 | struct iscsi_portal_group *tpg = NULL; |
@@ -894,8 +896,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) | |||
894 | flush_signals(current); | 896 | flush_signals(current); |
895 | set_sctp_conn_flag = 0; | 897 | set_sctp_conn_flag = 0; |
896 | sock = np->np_socket; | 898 | sock = np->np_socket; |
897 | ip_proto = np->np_ip_proto; | ||
898 | sock_type = np->np_sock_type; | ||
899 | 899 | ||
900 | spin_lock_bh(&np->np_thread_lock); | 900 | spin_lock_bh(&np->np_thread_lock); |
901 | if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { | 901 | if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { |
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index e89fa7457254..2dba448cac19 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c | |||
@@ -90,7 +90,7 @@ int extract_param( | |||
90 | return -1; | 90 | return -1; |
91 | 91 | ||
92 | if (len > max_length) { | 92 | if (len > max_length) { |
93 | pr_err("Length of input: %d exeeds max_length:" | 93 | pr_err("Length of input: %d exceeds max_length:" |
94 | " %d\n", len, max_length); | 94 | " %d\n", len, max_length); |
95 | return -1; | 95 | return -1; |
96 | } | 96 | } |
@@ -173,13 +173,11 @@ static int iscsi_target_check_login_request( | |||
173 | struct iscsi_conn *conn, | 173 | struct iscsi_conn *conn, |
174 | struct iscsi_login *login) | 174 | struct iscsi_login *login) |
175 | { | 175 | { |
176 | int req_csg, req_nsg, rsp_csg, rsp_nsg; | 176 | int req_csg, req_nsg; |
177 | u32 payload_length; | 177 | u32 payload_length; |
178 | struct iscsi_login_req *login_req; | 178 | struct iscsi_login_req *login_req; |
179 | struct iscsi_login_rsp *login_rsp; | ||
180 | 179 | ||
181 | login_req = (struct iscsi_login_req *) login->req; | 180 | login_req = (struct iscsi_login_req *) login->req; |
182 | login_rsp = (struct iscsi_login_rsp *) login->rsp; | ||
183 | payload_length = ntoh24(login_req->dlength); | 181 | payload_length = ntoh24(login_req->dlength); |
184 | 182 | ||
185 | switch (login_req->opcode & ISCSI_OPCODE_MASK) { | 183 | switch (login_req->opcode & ISCSI_OPCODE_MASK) { |
@@ -203,9 +201,7 @@ static int iscsi_target_check_login_request( | |||
203 | } | 201 | } |
204 | 202 | ||
205 | req_csg = (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2; | 203 | req_csg = (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2; |
206 | rsp_csg = (login_rsp->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2; | ||
207 | req_nsg = (login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK); | 204 | req_nsg = (login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK); |
208 | rsp_nsg = (login_rsp->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK); | ||
209 | 205 | ||
210 | if (req_csg != login->current_stage) { | 206 | if (req_csg != login->current_stage) { |
211 | pr_err("Initiator unexpectedly changed login stage" | 207 | pr_err("Initiator unexpectedly changed login stage" |
@@ -753,12 +749,10 @@ static int iscsi_target_locate_portal( | |||
753 | struct iscsi_session *sess = conn->sess; | 749 | struct iscsi_session *sess = conn->sess; |
754 | struct iscsi_tiqn *tiqn; | 750 | struct iscsi_tiqn *tiqn; |
755 | struct iscsi_login_req *login_req; | 751 | struct iscsi_login_req *login_req; |
756 | struct iscsi_targ_login_rsp *login_rsp; | ||
757 | u32 payload_length; | 752 | u32 payload_length; |
758 | int sessiontype = 0, ret = 0; | 753 | int sessiontype = 0, ret = 0; |
759 | 754 | ||
760 | login_req = (struct iscsi_login_req *) login->req; | 755 | login_req = (struct iscsi_login_req *) login->req; |
761 | login_rsp = (struct iscsi_targ_login_rsp *) login->rsp; | ||
762 | payload_length = ntoh24(login_req->dlength); | 756 | payload_length = ntoh24(login_req->dlength); |
763 | 757 | ||
764 | login->first_request = 1; | 758 | login->first_request = 1; |
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c index b3c699c4fe8c..11dc2936af76 100644 --- a/drivers/target/iscsi/iscsi_target_nodeattrib.c +++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c | |||
@@ -49,7 +49,7 @@ void iscsit_set_default_node_attribues( | |||
49 | a->default_erl = NA_DEFAULT_ERL; | 49 | a->default_erl = NA_DEFAULT_ERL; |
50 | } | 50 | } |
51 | 51 | ||
52 | extern int iscsit_na_dataout_timeout( | 52 | int iscsit_na_dataout_timeout( |
53 | struct iscsi_node_acl *acl, | 53 | struct iscsi_node_acl *acl, |
54 | u32 dataout_timeout) | 54 | u32 dataout_timeout) |
55 | { | 55 | { |
@@ -74,7 +74,7 @@ extern int iscsit_na_dataout_timeout( | |||
74 | return 0; | 74 | return 0; |
75 | } | 75 | } |
76 | 76 | ||
77 | extern int iscsit_na_dataout_timeout_retries( | 77 | int iscsit_na_dataout_timeout_retries( |
78 | struct iscsi_node_acl *acl, | 78 | struct iscsi_node_acl *acl, |
79 | u32 dataout_timeout_retries) | 79 | u32 dataout_timeout_retries) |
80 | { | 80 | { |
@@ -100,7 +100,7 @@ extern int iscsit_na_dataout_timeout_retries( | |||
100 | return 0; | 100 | return 0; |
101 | } | 101 | } |
102 | 102 | ||
103 | extern int iscsit_na_nopin_timeout( | 103 | int iscsit_na_nopin_timeout( |
104 | struct iscsi_node_acl *acl, | 104 | struct iscsi_node_acl *acl, |
105 | u32 nopin_timeout) | 105 | u32 nopin_timeout) |
106 | { | 106 | { |
@@ -155,7 +155,7 @@ extern int iscsit_na_nopin_timeout( | |||
155 | return 0; | 155 | return 0; |
156 | } | 156 | } |
157 | 157 | ||
158 | extern int iscsit_na_nopin_response_timeout( | 158 | int iscsit_na_nopin_response_timeout( |
159 | struct iscsi_node_acl *acl, | 159 | struct iscsi_node_acl *acl, |
160 | u32 nopin_response_timeout) | 160 | u32 nopin_response_timeout) |
161 | { | 161 | { |
@@ -181,7 +181,7 @@ extern int iscsit_na_nopin_response_timeout( | |||
181 | return 0; | 181 | return 0; |
182 | } | 182 | } |
183 | 183 | ||
184 | extern int iscsit_na_random_datain_pdu_offsets( | 184 | int iscsit_na_random_datain_pdu_offsets( |
185 | struct iscsi_node_acl *acl, | 185 | struct iscsi_node_acl *acl, |
186 | u32 random_datain_pdu_offsets) | 186 | u32 random_datain_pdu_offsets) |
187 | { | 187 | { |
@@ -201,7 +201,7 @@ extern int iscsit_na_random_datain_pdu_offsets( | |||
201 | return 0; | 201 | return 0; |
202 | } | 202 | } |
203 | 203 | ||
204 | extern int iscsit_na_random_datain_seq_offsets( | 204 | int iscsit_na_random_datain_seq_offsets( |
205 | struct iscsi_node_acl *acl, | 205 | struct iscsi_node_acl *acl, |
206 | u32 random_datain_seq_offsets) | 206 | u32 random_datain_seq_offsets) |
207 | { | 207 | { |
@@ -221,7 +221,7 @@ extern int iscsit_na_random_datain_seq_offsets( | |||
221 | return 0; | 221 | return 0; |
222 | } | 222 | } |
223 | 223 | ||
224 | extern int iscsit_na_random_r2t_offsets( | 224 | int iscsit_na_random_r2t_offsets( |
225 | struct iscsi_node_acl *acl, | 225 | struct iscsi_node_acl *acl, |
226 | u32 random_r2t_offsets) | 226 | u32 random_r2t_offsets) |
227 | { | 227 | { |
@@ -241,7 +241,7 @@ extern int iscsit_na_random_r2t_offsets( | |||
241 | return 0; | 241 | return 0; |
242 | } | 242 | } |
243 | 243 | ||
244 | extern int iscsit_na_default_erl( | 244 | int iscsit_na_default_erl( |
245 | struct iscsi_node_acl *acl, | 245 | struct iscsi_node_acl *acl, |
246 | u32 default_erl) | 246 | u32 default_erl) |
247 | { | 247 | { |
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 5b773160200f..eb05c9d751ea 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c | |||
@@ -874,8 +874,8 @@ static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_pt | |||
874 | static int iscsi_check_numerical_range_value(struct iscsi_param *param, char *value) | 874 | static int iscsi_check_numerical_range_value(struct iscsi_param *param, char *value) |
875 | { | 875 | { |
876 | char *left_val_ptr = NULL, *right_val_ptr = NULL; | 876 | char *left_val_ptr = NULL, *right_val_ptr = NULL; |
877 | char *tilde_ptr = NULL, *tmp_ptr = NULL; | 877 | char *tilde_ptr = NULL; |
878 | u32 left_val, right_val, local_left_val, local_right_val; | 878 | u32 left_val, right_val, local_left_val; |
879 | 879 | ||
880 | if (strcmp(param->name, IFMARKINT) && | 880 | if (strcmp(param->name, IFMARKINT) && |
881 | strcmp(param->name, OFMARKINT)) { | 881 | strcmp(param->name, OFMARKINT)) { |
@@ -903,8 +903,8 @@ static int iscsi_check_numerical_range_value(struct iscsi_param *param, char *va | |||
903 | if (iscsi_check_numerical_value(param, right_val_ptr) < 0) | 903 | if (iscsi_check_numerical_value(param, right_val_ptr) < 0) |
904 | return -1; | 904 | return -1; |
905 | 905 | ||
906 | left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0); | 906 | left_val = simple_strtoul(left_val_ptr, NULL, 0); |
907 | right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0); | 907 | right_val = simple_strtoul(right_val_ptr, NULL, 0); |
908 | *tilde_ptr = '~'; | 908 | *tilde_ptr = '~'; |
909 | 909 | ||
910 | if (right_val < left_val) { | 910 | if (right_val < left_val) { |
@@ -928,8 +928,7 @@ static int iscsi_check_numerical_range_value(struct iscsi_param *param, char *va | |||
928 | left_val_ptr = param->value; | 928 | left_val_ptr = param->value; |
929 | right_val_ptr = param->value + strlen(left_val_ptr) + 1; | 929 | right_val_ptr = param->value + strlen(left_val_ptr) + 1; |
930 | 930 | ||
931 | local_left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0); | 931 | local_left_val = simple_strtoul(left_val_ptr, NULL, 0); |
932 | local_right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0); | ||
933 | *tilde_ptr = '~'; | 932 | *tilde_ptr = '~'; |
934 | 933 | ||
935 | if (param->set_param) { | 934 | if (param->set_param) { |
@@ -1189,7 +1188,7 @@ static int iscsi_check_proposer_state(struct iscsi_param *param, char *value) | |||
1189 | if (IS_TYPE_NUMBER_RANGE(param)) { | 1188 | if (IS_TYPE_NUMBER_RANGE(param)) { |
1190 | u32 left_val = 0, right_val = 0, recieved_value = 0; | 1189 | u32 left_val = 0, right_val = 0, recieved_value = 0; |
1191 | char *left_val_ptr = NULL, *right_val_ptr = NULL; | 1190 | char *left_val_ptr = NULL, *right_val_ptr = NULL; |
1192 | char *tilde_ptr = NULL, *tmp_ptr = NULL; | 1191 | char *tilde_ptr = NULL; |
1193 | 1192 | ||
1194 | if (!strcmp(value, IRRELEVANT) || !strcmp(value, REJECT)) { | 1193 | if (!strcmp(value, IRRELEVANT) || !strcmp(value, REJECT)) { |
1195 | if (iscsi_update_param_value(param, value) < 0) | 1194 | if (iscsi_update_param_value(param, value) < 0) |
@@ -1213,9 +1212,9 @@ static int iscsi_check_proposer_state(struct iscsi_param *param, char *value) | |||
1213 | 1212 | ||
1214 | left_val_ptr = param->value; | 1213 | left_val_ptr = param->value; |
1215 | right_val_ptr = param->value + strlen(left_val_ptr) + 1; | 1214 | right_val_ptr = param->value + strlen(left_val_ptr) + 1; |
1216 | left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0); | 1215 | left_val = simple_strtoul(left_val_ptr, NULL, 0); |
1217 | right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0); | 1216 | right_val = simple_strtoul(right_val_ptr, NULL, 0); |
1218 | recieved_value = simple_strtoul(value, &tmp_ptr, 0); | 1217 | recieved_value = simple_strtoul(value, NULL, 0); |
1219 | 1218 | ||
1220 | *tilde_ptr = '~'; | 1219 | *tilde_ptr = '~'; |
1221 | 1220 | ||
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index 255ed35da815..e01da9d2b37e 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c | |||
@@ -250,7 +250,7 @@ static int iscsit_task_reassign_complete_write( | |||
250 | * so if we have received all DataOUT we can safety ignore Initiator. | 250 | * so if we have received all DataOUT we can safety ignore Initiator. |
251 | */ | 251 | */ |
252 | if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { | 252 | if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { |
253 | if (!atomic_read(&cmd->se_cmd.t_transport_sent)) { | 253 | if (!(cmd->se_cmd.transport_state & CMD_T_SENT)) { |
254 | pr_debug("WRITE ITT: 0x%08x: t_state: %d" | 254 | pr_debug("WRITE ITT: 0x%08x: t_state: %d" |
255 | " never sent to transport\n", | 255 | " never sent to transport\n", |
256 | cmd->init_task_tag, cmd->se_cmd.t_state); | 256 | cmd->init_task_tag, cmd->se_cmd.t_state); |
@@ -314,7 +314,7 @@ static int iscsit_task_reassign_complete_read( | |||
314 | cmd->acked_data_sn = (tmr_req->exp_data_sn - 1); | 314 | cmd->acked_data_sn = (tmr_req->exp_data_sn - 1); |
315 | } | 315 | } |
316 | 316 | ||
317 | if (!atomic_read(&cmd->se_cmd.t_transport_sent)) { | 317 | if (!(cmd->se_cmd.transport_state & CMD_T_SENT)) { |
318 | pr_debug("READ ITT: 0x%08x: t_state: %d never sent to" | 318 | pr_debug("READ ITT: 0x%08x: t_state: %d never sent to" |
319 | " transport\n", cmd->init_task_tag, | 319 | " transport\n", cmd->init_task_tag, |
320 | cmd->se_cmd.t_state); | 320 | cmd->se_cmd.t_state); |
@@ -322,7 +322,7 @@ static int iscsit_task_reassign_complete_read( | |||
322 | return 0; | 322 | return 0; |
323 | } | 323 | } |
324 | 324 | ||
325 | if (!atomic_read(&se_cmd->t_transport_complete)) { | 325 | if (!(se_cmd->transport_state & CMD_T_COMPLETE)) { |
326 | pr_err("READ ITT: 0x%08x: t_state: %d, never returned" | 326 | pr_err("READ ITT: 0x%08x: t_state: %d, never returned" |
327 | " from transport\n", cmd->init_task_tag, | 327 | " from transport\n", cmd->init_task_tag, |
328 | cmd->se_cmd.t_state); | 328 | cmd->se_cmd.t_state); |
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c index 0baac5bcebd4..977e1cf90e83 100644 --- a/drivers/target/iscsi/iscsi_target_tq.c +++ b/drivers/target/iscsi/iscsi_target_tq.c | |||
@@ -536,12 +536,6 @@ int iscsi_thread_set_init(void) | |||
536 | return -ENOMEM; | 536 | return -ENOMEM; |
537 | } | 537 | } |
538 | 538 | ||
539 | spin_lock_init(&active_ts_lock); | ||
540 | spin_lock_init(&inactive_ts_lock); | ||
541 | spin_lock_init(&ts_bitmap_lock); | ||
542 | INIT_LIST_HEAD(&active_ts_list); | ||
543 | INIT_LIST_HEAD(&inactive_ts_list); | ||
544 | |||
545 | return 0; | 539 | return 0; |
546 | } | 540 | } |
547 | 541 | ||
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 11287e1ece13..4eba86d2bd82 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c | |||
@@ -229,6 +229,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( | |||
229 | { | 229 | { |
230 | struct iscsi_cmd *cmd; | 230 | struct iscsi_cmd *cmd; |
231 | struct se_cmd *se_cmd; | 231 | struct se_cmd *se_cmd; |
232 | int rc; | ||
232 | u8 tcm_function; | 233 | u8 tcm_function; |
233 | 234 | ||
234 | cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); | 235 | cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); |
@@ -286,10 +287,8 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( | |||
286 | goto out; | 287 | goto out; |
287 | } | 288 | } |
288 | 289 | ||
289 | se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, | 290 | rc = core_tmr_alloc_req(se_cmd, cmd->tmr_req, tcm_function, GFP_KERNEL); |
290 | cmd->tmr_req, tcm_function, | 291 | if (rc < 0) |
291 | GFP_KERNEL); | ||
292 | if (!se_cmd->se_tmr_req) | ||
293 | goto out; | 292 | goto out; |
294 | 293 | ||
295 | cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req; | 294 | cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req; |
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index c47ff7f59e57..a9b4eeefe9fc 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c | |||
@@ -44,138 +44,12 @@ | |||
44 | /* Local pointer to allocated TCM configfs fabric module */ | 44 | /* Local pointer to allocated TCM configfs fabric module */ |
45 | static struct target_fabric_configfs *tcm_loop_fabric_configfs; | 45 | static struct target_fabric_configfs *tcm_loop_fabric_configfs; |
46 | 46 | ||
47 | static struct workqueue_struct *tcm_loop_workqueue; | ||
47 | static struct kmem_cache *tcm_loop_cmd_cache; | 48 | static struct kmem_cache *tcm_loop_cmd_cache; |
48 | 49 | ||
49 | static int tcm_loop_hba_no_cnt; | 50 | static int tcm_loop_hba_no_cnt; |
50 | 51 | ||
51 | /* | 52 | static int tcm_loop_queue_status(struct se_cmd *se_cmd); |
52 | * Allocate a tcm_loop cmd descriptor from target_core_mod code | ||
53 | * | ||
54 | * Can be called from interrupt context in tcm_loop_queuecommand() below | ||
55 | */ | ||
56 | static struct se_cmd *tcm_loop_allocate_core_cmd( | ||
57 | struct tcm_loop_hba *tl_hba, | ||
58 | struct se_portal_group *se_tpg, | ||
59 | struct scsi_cmnd *sc) | ||
60 | { | ||
61 | struct se_cmd *se_cmd; | ||
62 | struct se_session *se_sess; | ||
63 | struct tcm_loop_nexus *tl_nexus = tl_hba->tl_nexus; | ||
64 | struct tcm_loop_cmd *tl_cmd; | ||
65 | int sam_task_attr; | ||
66 | |||
67 | if (!tl_nexus) { | ||
68 | scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" | ||
69 | " does not exist\n"); | ||
70 | set_host_byte(sc, DID_ERROR); | ||
71 | return NULL; | ||
72 | } | ||
73 | se_sess = tl_nexus->se_sess; | ||
74 | |||
75 | tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); | ||
76 | if (!tl_cmd) { | ||
77 | pr_err("Unable to allocate struct tcm_loop_cmd\n"); | ||
78 | set_host_byte(sc, DID_ERROR); | ||
79 | return NULL; | ||
80 | } | ||
81 | se_cmd = &tl_cmd->tl_se_cmd; | ||
82 | /* | ||
83 | * Save the pointer to struct scsi_cmnd *sc | ||
84 | */ | ||
85 | tl_cmd->sc = sc; | ||
86 | /* | ||
87 | * Locate the SAM Task Attr from struct scsi_cmnd * | ||
88 | */ | ||
89 | if (sc->device->tagged_supported) { | ||
90 | switch (sc->tag) { | ||
91 | case HEAD_OF_QUEUE_TAG: | ||
92 | sam_task_attr = MSG_HEAD_TAG; | ||
93 | break; | ||
94 | case ORDERED_QUEUE_TAG: | ||
95 | sam_task_attr = MSG_ORDERED_TAG; | ||
96 | break; | ||
97 | default: | ||
98 | sam_task_attr = MSG_SIMPLE_TAG; | ||
99 | break; | ||
100 | } | ||
101 | } else | ||
102 | sam_task_attr = MSG_SIMPLE_TAG; | ||
103 | |||
104 | /* | ||
105 | * Initialize struct se_cmd descriptor from target_core_mod infrastructure | ||
106 | */ | ||
107 | transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, | ||
108 | scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr, | ||
109 | &tl_cmd->tl_sense_buf[0]); | ||
110 | |||
111 | if (scsi_bidi_cmnd(sc)) | ||
112 | se_cmd->se_cmd_flags |= SCF_BIDI; | ||
113 | |||
114 | /* | ||
115 | * Locate the struct se_lun pointer and attach it to struct se_cmd | ||
116 | */ | ||
117 | if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) { | ||
118 | kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); | ||
119 | set_host_byte(sc, DID_NO_CONNECT); | ||
120 | return NULL; | ||
121 | } | ||
122 | |||
123 | return se_cmd; | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Called by struct target_core_fabric_ops->new_cmd_map() | ||
128 | * | ||
129 | * Always called in process context. A non zero return value | ||
130 | * here will signal to handle an exception based on the return code. | ||
131 | */ | ||
132 | static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) | ||
133 | { | ||
134 | struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, | ||
135 | struct tcm_loop_cmd, tl_se_cmd); | ||
136 | struct scsi_cmnd *sc = tl_cmd->sc; | ||
137 | struct scatterlist *sgl_bidi = NULL; | ||
138 | u32 sgl_bidi_count = 0; | ||
139 | int ret; | ||
140 | /* | ||
141 | * Allocate the necessary tasks to complete the received CDB+data | ||
142 | */ | ||
143 | ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd); | ||
144 | if (ret != 0) | ||
145 | return ret; | ||
146 | /* | ||
147 | * For BIDI commands, pass in the extra READ buffer | ||
148 | * to transport_generic_map_mem_to_cmd() below.. | ||
149 | */ | ||
150 | if (se_cmd->se_cmd_flags & SCF_BIDI) { | ||
151 | struct scsi_data_buffer *sdb = scsi_in(sc); | ||
152 | |||
153 | sgl_bidi = sdb->table.sgl; | ||
154 | sgl_bidi_count = sdb->table.nents; | ||
155 | } | ||
156 | /* | ||
157 | * Because some userspace code via scsi-generic do not memset their | ||
158 | * associated read buffers, go ahead and do that here for type | ||
159 | * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently | ||
160 | * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB | ||
161 | * by target core in transport_generic_allocate_tasks() -> | ||
162 | * transport_generic_cmd_sequencer(). | ||
163 | */ | ||
164 | if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB && | ||
165 | se_cmd->data_direction == DMA_FROM_DEVICE) { | ||
166 | struct scatterlist *sg = scsi_sglist(sc); | ||
167 | unsigned char *buf = kmap(sg_page(sg)) + sg->offset; | ||
168 | |||
169 | if (buf != NULL) { | ||
170 | memset(buf, 0, sg->length); | ||
171 | kunmap(sg_page(sg)); | ||
172 | } | ||
173 | } | ||
174 | |||
175 | /* Tell the core about our preallocated memory */ | ||
176 | return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), | ||
177 | scsi_sg_count(sc), sgl_bidi, sgl_bidi_count); | ||
178 | } | ||
179 | 53 | ||
180 | /* | 54 | /* |
181 | * Called from struct target_core_fabric_ops->check_stop_free() | 55 | * Called from struct target_core_fabric_ops->check_stop_free() |
@@ -187,7 +61,7 @@ static int tcm_loop_check_stop_free(struct se_cmd *se_cmd) | |||
187 | * pointer. These will be released directly in tcm_loop_device_reset() | 61 | * pointer. These will be released directly in tcm_loop_device_reset() |
188 | * with transport_generic_free_cmd(). | 62 | * with transport_generic_free_cmd(). |
189 | */ | 63 | */ |
190 | if (se_cmd->se_tmr_req) | 64 | if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) |
191 | return 0; | 65 | return 0; |
192 | /* | 66 | /* |
193 | * Release the struct se_cmd, which will make a callback to release | 67 | * Release the struct se_cmd, which will make a callback to release |
@@ -263,50 +137,152 @@ static int tcm_loop_change_queue_depth( | |||
263 | } | 137 | } |
264 | 138 | ||
265 | /* | 139 | /* |
266 | * Main entry point from struct scsi_host_template for incoming SCSI CDB+Data | 140 | * Locate the SAM Task Attr from struct scsi_cmnd * |
267 | * from Linux/SCSI subsystem for SCSI low level device drivers (LLDs) | ||
268 | */ | 141 | */ |
269 | static int tcm_loop_queuecommand( | 142 | static int tcm_loop_sam_attr(struct scsi_cmnd *sc) |
270 | struct Scsi_Host *sh, | ||
271 | struct scsi_cmnd *sc) | ||
272 | { | 143 | { |
273 | struct se_cmd *se_cmd; | 144 | if (sc->device->tagged_supported) { |
274 | struct se_portal_group *se_tpg; | 145 | switch (sc->tag) { |
146 | case HEAD_OF_QUEUE_TAG: | ||
147 | return MSG_HEAD_TAG; | ||
148 | case ORDERED_QUEUE_TAG: | ||
149 | return MSG_ORDERED_TAG; | ||
150 | default: | ||
151 | break; | ||
152 | } | ||
153 | } | ||
154 | |||
155 | return MSG_SIMPLE_TAG; | ||
156 | } | ||
157 | |||
158 | static void tcm_loop_submission_work(struct work_struct *work) | ||
159 | { | ||
160 | struct tcm_loop_cmd *tl_cmd = | ||
161 | container_of(work, struct tcm_loop_cmd, work); | ||
162 | struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd; | ||
163 | struct scsi_cmnd *sc = tl_cmd->sc; | ||
164 | struct tcm_loop_nexus *tl_nexus; | ||
275 | struct tcm_loop_hba *tl_hba; | 165 | struct tcm_loop_hba *tl_hba; |
276 | struct tcm_loop_tpg *tl_tpg; | 166 | struct tcm_loop_tpg *tl_tpg; |
167 | struct scatterlist *sgl_bidi = NULL; | ||
168 | u32 sgl_bidi_count = 0; | ||
169 | int ret; | ||
277 | 170 | ||
278 | pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x" | ||
279 | " scsi_buf_len: %u\n", sc->device->host->host_no, | ||
280 | sc->device->id, sc->device->channel, sc->device->lun, | ||
281 | sc->cmnd[0], scsi_bufflen(sc)); | ||
282 | /* | ||
283 | * Locate the tcm_loop_hba_t pointer | ||
284 | */ | ||
285 | tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); | 171 | tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); |
286 | tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; | 172 | tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; |
173 | |||
287 | /* | 174 | /* |
288 | * Ensure that this tl_tpg reference from the incoming sc->device->id | 175 | * Ensure that this tl_tpg reference from the incoming sc->device->id |
289 | * has already been configured via tcm_loop_make_naa_tpg(). | 176 | * has already been configured via tcm_loop_make_naa_tpg(). |
290 | */ | 177 | */ |
291 | if (!tl_tpg->tl_hba) { | 178 | if (!tl_tpg->tl_hba) { |
292 | set_host_byte(sc, DID_NO_CONNECT); | 179 | set_host_byte(sc, DID_NO_CONNECT); |
293 | sc->scsi_done(sc); | 180 | goto out_done; |
294 | return 0; | ||
295 | } | 181 | } |
296 | se_tpg = &tl_tpg->tl_se_tpg; | 182 | |
183 | tl_nexus = tl_hba->tl_nexus; | ||
184 | if (!tl_nexus) { | ||
185 | scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" | ||
186 | " does not exist\n"); | ||
187 | set_host_byte(sc, DID_ERROR); | ||
188 | goto out_done; | ||
189 | } | ||
190 | |||
191 | transport_init_se_cmd(se_cmd, tl_tpg->tl_se_tpg.se_tpg_tfo, | ||
192 | tl_nexus->se_sess, | ||
193 | scsi_bufflen(sc), sc->sc_data_direction, | ||
194 | tcm_loop_sam_attr(sc), &tl_cmd->tl_sense_buf[0]); | ||
195 | |||
196 | if (scsi_bidi_cmnd(sc)) { | ||
197 | struct scsi_data_buffer *sdb = scsi_in(sc); | ||
198 | |||
199 | sgl_bidi = sdb->table.sgl; | ||
200 | sgl_bidi_count = sdb->table.nents; | ||
201 | se_cmd->se_cmd_flags |= SCF_BIDI; | ||
202 | |||
203 | } | ||
204 | |||
205 | if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) { | ||
206 | kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); | ||
207 | set_host_byte(sc, DID_NO_CONNECT); | ||
208 | goto out_done; | ||
209 | } | ||
210 | |||
297 | /* | 211 | /* |
298 | * Determine the SAM Task Attribute and allocate tl_cmd and | 212 | * Because some userspace code via scsi-generic do not memset their |
299 | * tl_cmd->tl_se_cmd from TCM infrastructure | 213 | * associated read buffers, go ahead and do that here for type |
214 | * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently | ||
215 | * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB | ||
216 | * by target core in transport_generic_allocate_tasks() -> | ||
217 | * transport_generic_cmd_sequencer(). | ||
300 | */ | 218 | */ |
301 | se_cmd = tcm_loop_allocate_core_cmd(tl_hba, se_tpg, sc); | 219 | if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB && |
302 | if (!se_cmd) { | 220 | se_cmd->data_direction == DMA_FROM_DEVICE) { |
221 | struct scatterlist *sg = scsi_sglist(sc); | ||
222 | unsigned char *buf = kmap(sg_page(sg)) + sg->offset; | ||
223 | |||
224 | if (buf != NULL) { | ||
225 | memset(buf, 0, sg->length); | ||
226 | kunmap(sg_page(sg)); | ||
227 | } | ||
228 | } | ||
229 | |||
230 | ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd); | ||
231 | if (ret == -ENOMEM) { | ||
232 | transport_send_check_condition_and_sense(se_cmd, | ||
233 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | ||
234 | transport_generic_free_cmd(se_cmd, 0); | ||
235 | return; | ||
236 | } else if (ret < 0) { | ||
237 | if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) | ||
238 | tcm_loop_queue_status(se_cmd); | ||
239 | else | ||
240 | transport_send_check_condition_and_sense(se_cmd, | ||
241 | se_cmd->scsi_sense_reason, 0); | ||
242 | transport_generic_free_cmd(se_cmd, 0); | ||
243 | return; | ||
244 | } | ||
245 | |||
246 | ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), | ||
247 | scsi_sg_count(sc), sgl_bidi, sgl_bidi_count); | ||
248 | if (ret) { | ||
249 | transport_send_check_condition_and_sense(se_cmd, | ||
250 | se_cmd->scsi_sense_reason, 0); | ||
251 | transport_generic_free_cmd(se_cmd, 0); | ||
252 | return; | ||
253 | } | ||
254 | transport_handle_cdb_direct(se_cmd); | ||
255 | return; | ||
256 | |||
257 | out_done: | ||
258 | sc->scsi_done(sc); | ||
259 | return; | ||
260 | } | ||
261 | |||
262 | /* | ||
263 | * ->queuecommand can be and usually is called from interrupt context, so | ||
264 | * defer the actual submission to a workqueue. | ||
265 | */ | ||
266 | static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) | ||
267 | { | ||
268 | struct tcm_loop_cmd *tl_cmd; | ||
269 | |||
270 | pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x" | ||
271 | " scsi_buf_len: %u\n", sc->device->host->host_no, | ||
272 | sc->device->id, sc->device->channel, sc->device->lun, | ||
273 | sc->cmnd[0], scsi_bufflen(sc)); | ||
274 | |||
275 | tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); | ||
276 | if (!tl_cmd) { | ||
277 | pr_err("Unable to allocate struct tcm_loop_cmd\n"); | ||
278 | set_host_byte(sc, DID_ERROR); | ||
303 | sc->scsi_done(sc); | 279 | sc->scsi_done(sc); |
304 | return 0; | 280 | return 0; |
305 | } | 281 | } |
306 | /* | 282 | |
307 | * Queue up the newly allocated to be processed in TCM thread context. | 283 | tl_cmd->sc = sc; |
308 | */ | 284 | INIT_WORK(&tl_cmd->work, tcm_loop_submission_work); |
309 | transport_generic_handle_cdb_map(se_cmd); | 285 | queue_work(tcm_loop_workqueue, &tl_cmd->work); |
310 | return 0; | 286 | return 0; |
311 | } | 287 | } |
312 | 288 | ||
@@ -324,7 +300,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) | |||
324 | struct tcm_loop_nexus *tl_nexus; | 300 | struct tcm_loop_nexus *tl_nexus; |
325 | struct tcm_loop_tmr *tl_tmr = NULL; | 301 | struct tcm_loop_tmr *tl_tmr = NULL; |
326 | struct tcm_loop_tpg *tl_tpg; | 302 | struct tcm_loop_tpg *tl_tpg; |
327 | int ret = FAILED; | 303 | int ret = FAILED, rc; |
328 | /* | 304 | /* |
329 | * Locate the tcm_loop_hba_t pointer | 305 | * Locate the tcm_loop_hba_t pointer |
330 | */ | 306 | */ |
@@ -365,12 +341,9 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) | |||
365 | transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0, | 341 | transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0, |
366 | DMA_NONE, MSG_SIMPLE_TAG, | 342 | DMA_NONE, MSG_SIMPLE_TAG, |
367 | &tl_cmd->tl_sense_buf[0]); | 343 | &tl_cmd->tl_sense_buf[0]); |
368 | /* | 344 | |
369 | * Allocate the LUN_RESET TMR | 345 | rc = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET, GFP_KERNEL); |
370 | */ | 346 | if (rc < 0) |
371 | se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr, | ||
372 | TMR_LUN_RESET, GFP_KERNEL); | ||
373 | if (IS_ERR(se_cmd->se_tmr_req)) | ||
374 | goto release; | 347 | goto release; |
375 | /* | 348 | /* |
376 | * Locate the underlying TCM struct se_lun from sc->device->lun | 349 | * Locate the underlying TCM struct se_lun from sc->device->lun |
@@ -762,22 +735,6 @@ static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg) | |||
762 | return 1; | 735 | return 1; |
763 | } | 736 | } |
764 | 737 | ||
765 | static int tcm_loop_is_state_remove(struct se_cmd *se_cmd) | ||
766 | { | ||
767 | /* | ||
768 | * Assume struct scsi_cmnd is not in remove state.. | ||
769 | */ | ||
770 | return 0; | ||
771 | } | ||
772 | |||
773 | static int tcm_loop_sess_logged_in(struct se_session *se_sess) | ||
774 | { | ||
775 | /* | ||
776 | * Assume that TL Nexus is always active | ||
777 | */ | ||
778 | return 1; | ||
779 | } | ||
780 | |||
781 | static u32 tcm_loop_sess_get_index(struct se_session *se_sess) | 738 | static u32 tcm_loop_sess_get_index(struct se_session *se_sess) |
782 | { | 739 | { |
783 | return 1; | 740 | return 1; |
@@ -811,19 +768,6 @@ static void tcm_loop_close_session(struct se_session *se_sess) | |||
811 | return; | 768 | return; |
812 | }; | 769 | }; |
813 | 770 | ||
814 | static void tcm_loop_stop_session( | ||
815 | struct se_session *se_sess, | ||
816 | int sess_sleep, | ||
817 | int conn_sleep) | ||
818 | { | ||
819 | return; | ||
820 | } | ||
821 | |||
822 | static void tcm_loop_fall_back_to_erl0(struct se_session *se_sess) | ||
823 | { | ||
824 | return; | ||
825 | } | ||
826 | |||
827 | static int tcm_loop_write_pending(struct se_cmd *se_cmd) | 771 | static int tcm_loop_write_pending(struct se_cmd *se_cmd) |
828 | { | 772 | { |
829 | /* | 773 | /* |
@@ -855,6 +799,9 @@ static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) | |||
855 | 799 | ||
856 | sc->result = SAM_STAT_GOOD; | 800 | sc->result = SAM_STAT_GOOD; |
857 | set_host_byte(sc, DID_OK); | 801 | set_host_byte(sc, DID_OK); |
802 | if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || | ||
803 | (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) | ||
804 | scsi_set_resid(sc, se_cmd->residual_count); | ||
858 | sc->scsi_done(sc); | 805 | sc->scsi_done(sc); |
859 | return 0; | 806 | return 0; |
860 | } | 807 | } |
@@ -880,6 +827,9 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd) | |||
880 | sc->result = se_cmd->scsi_status; | 827 | sc->result = se_cmd->scsi_status; |
881 | 828 | ||
882 | set_host_byte(sc, DID_OK); | 829 | set_host_byte(sc, DID_OK); |
830 | if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || | ||
831 | (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) | ||
832 | scsi_set_resid(sc, se_cmd->residual_count); | ||
883 | sc->scsi_done(sc); | 833 | sc->scsi_done(sc); |
884 | return 0; | 834 | return 0; |
885 | } | 835 | } |
@@ -1361,7 +1311,6 @@ static struct configfs_attribute *tcm_loop_wwn_attrs[] = { | |||
1361 | static int tcm_loop_register_configfs(void) | 1311 | static int tcm_loop_register_configfs(void) |
1362 | { | 1312 | { |
1363 | struct target_fabric_configfs *fabric; | 1313 | struct target_fabric_configfs *fabric; |
1364 | struct config_group *tf_cg; | ||
1365 | int ret; | 1314 | int ret; |
1366 | /* | 1315 | /* |
1367 | * Set the TCM Loop HBA counter to zero | 1316 | * Set the TCM Loop HBA counter to zero |
@@ -1407,14 +1356,10 @@ static int tcm_loop_register_configfs(void) | |||
1407 | /* | 1356 | /* |
1408 | * Used for setting up remaining TCM resources in process context | 1357 | * Used for setting up remaining TCM resources in process context |
1409 | */ | 1358 | */ |
1410 | fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map; | ||
1411 | fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free; | 1359 | fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free; |
1412 | fabric->tf_ops.release_cmd = &tcm_loop_release_cmd; | 1360 | fabric->tf_ops.release_cmd = &tcm_loop_release_cmd; |
1413 | fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session; | 1361 | fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session; |
1414 | fabric->tf_ops.close_session = &tcm_loop_close_session; | 1362 | fabric->tf_ops.close_session = &tcm_loop_close_session; |
1415 | fabric->tf_ops.stop_session = &tcm_loop_stop_session; | ||
1416 | fabric->tf_ops.fall_back_to_erl0 = &tcm_loop_fall_back_to_erl0; | ||
1417 | fabric->tf_ops.sess_logged_in = &tcm_loop_sess_logged_in; | ||
1418 | fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index; | 1363 | fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index; |
1419 | fabric->tf_ops.sess_get_initiator_sid = NULL; | 1364 | fabric->tf_ops.sess_get_initiator_sid = NULL; |
1420 | fabric->tf_ops.write_pending = &tcm_loop_write_pending; | 1365 | fabric->tf_ops.write_pending = &tcm_loop_write_pending; |
@@ -1431,9 +1376,7 @@ static int tcm_loop_register_configfs(void) | |||
1431 | fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; | 1376 | fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; |
1432 | fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len; | 1377 | fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len; |
1433 | fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len; | 1378 | fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len; |
1434 | fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove; | ||
1435 | 1379 | ||
1436 | tf_cg = &fabric->tf_group; | ||
1437 | /* | 1380 | /* |
1438 | * Setup function pointers for generic logic in target_core_fabric_configfs.c | 1381 | * Setup function pointers for generic logic in target_core_fabric_configfs.c |
1439 | */ | 1382 | */ |
@@ -1490,7 +1433,11 @@ static void tcm_loop_deregister_configfs(void) | |||
1490 | 1433 | ||
1491 | static int __init tcm_loop_fabric_init(void) | 1434 | static int __init tcm_loop_fabric_init(void) |
1492 | { | 1435 | { |
1493 | int ret; | 1436 | int ret = -ENOMEM; |
1437 | |||
1438 | tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0); | ||
1439 | if (!tcm_loop_workqueue) | ||
1440 | goto out; | ||
1494 | 1441 | ||
1495 | tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache", | 1442 | tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache", |
1496 | sizeof(struct tcm_loop_cmd), | 1443 | sizeof(struct tcm_loop_cmd), |
@@ -1499,20 +1446,27 @@ static int __init tcm_loop_fabric_init(void) | |||
1499 | if (!tcm_loop_cmd_cache) { | 1446 | if (!tcm_loop_cmd_cache) { |
1500 | pr_debug("kmem_cache_create() for" | 1447 | pr_debug("kmem_cache_create() for" |
1501 | " tcm_loop_cmd_cache failed\n"); | 1448 | " tcm_loop_cmd_cache failed\n"); |
1502 | return -ENOMEM; | 1449 | goto out_destroy_workqueue; |
1503 | } | 1450 | } |
1504 | 1451 | ||
1505 | ret = tcm_loop_alloc_core_bus(); | 1452 | ret = tcm_loop_alloc_core_bus(); |
1506 | if (ret) | 1453 | if (ret) |
1507 | return ret; | 1454 | goto out_destroy_cache; |
1508 | 1455 | ||
1509 | ret = tcm_loop_register_configfs(); | 1456 | ret = tcm_loop_register_configfs(); |
1510 | if (ret) { | 1457 | if (ret) |
1511 | tcm_loop_release_core_bus(); | 1458 | goto out_release_core_bus; |
1512 | return ret; | ||
1513 | } | ||
1514 | 1459 | ||
1515 | return 0; | 1460 | return 0; |
1461 | |||
1462 | out_release_core_bus: | ||
1463 | tcm_loop_release_core_bus(); | ||
1464 | out_destroy_cache: | ||
1465 | kmem_cache_destroy(tcm_loop_cmd_cache); | ||
1466 | out_destroy_workqueue: | ||
1467 | destroy_workqueue(tcm_loop_workqueue); | ||
1468 | out: | ||
1469 | return ret; | ||
1516 | } | 1470 | } |
1517 | 1471 | ||
1518 | static void __exit tcm_loop_fabric_exit(void) | 1472 | static void __exit tcm_loop_fabric_exit(void) |
@@ -1520,6 +1474,7 @@ static void __exit tcm_loop_fabric_exit(void) | |||
1520 | tcm_loop_deregister_configfs(); | 1474 | tcm_loop_deregister_configfs(); |
1521 | tcm_loop_release_core_bus(); | 1475 | tcm_loop_release_core_bus(); |
1522 | kmem_cache_destroy(tcm_loop_cmd_cache); | 1476 | kmem_cache_destroy(tcm_loop_cmd_cache); |
1477 | destroy_workqueue(tcm_loop_workqueue); | ||
1523 | } | 1478 | } |
1524 | 1479 | ||
1525 | MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module"); | 1480 | MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module"); |
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h index 15a036441471..7b54893db665 100644 --- a/drivers/target/loopback/tcm_loop.h +++ b/drivers/target/loopback/tcm_loop.h | |||
@@ -1,4 +1,4 @@ | |||
1 | #define TCM_LOOP_VERSION "v2.1-rc1" | 1 | #define TCM_LOOP_VERSION "v2.1-rc2" |
2 | #define TL_WWN_ADDR_LEN 256 | 2 | #define TL_WWN_ADDR_LEN 256 |
3 | #define TL_TPGS_PER_HBA 32 | 3 | #define TL_TPGS_PER_HBA 32 |
4 | 4 | ||
@@ -12,9 +12,9 @@ struct tcm_loop_cmd { | |||
12 | u32 sc_cmd_state; | 12 | u32 sc_cmd_state; |
13 | /* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */ | 13 | /* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */ |
14 | struct scsi_cmnd *sc; | 14 | struct scsi_cmnd *sc; |
15 | struct list_head *tl_cmd_list; | ||
16 | /* The TCM I/O descriptor that is accessed via container_of() */ | 15 | /* The TCM I/O descriptor that is accessed via container_of() */ |
17 | struct se_cmd tl_se_cmd; | 16 | struct se_cmd tl_se_cmd; |
17 | struct work_struct work; | ||
18 | /* Sense buffer that will be mapped into outgoing status */ | 18 | /* Sense buffer that will be mapped into outgoing status */ |
19 | unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER]; | 19 | unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER]; |
20 | }; | 20 | }; |
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 01a2691dfb47..c7746a3339d4 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/export.h> | 30 | #include <linux/export.h> |
31 | #include <scsi/scsi.h> | 31 | #include <scsi/scsi.h> |
32 | #include <scsi/scsi_cmnd.h> | 32 | #include <scsi/scsi_cmnd.h> |
33 | #include <asm/unaligned.h> | ||
33 | 34 | ||
34 | #include <target/target_core_base.h> | 35 | #include <target/target_core_base.h> |
35 | #include <target/target_core_backend.h> | 36 | #include <target/target_core_backend.h> |
@@ -267,8 +268,7 @@ int target_emulate_set_target_port_groups(struct se_task *task) | |||
267 | * changed. | 268 | * changed. |
268 | */ | 269 | */ |
269 | if (primary) { | 270 | if (primary) { |
270 | tg_pt_id = ((ptr[2] << 8) & 0xff); | 271 | tg_pt_id = get_unaligned_be16(ptr + 2); |
271 | tg_pt_id |= (ptr[3] & 0xff); | ||
272 | /* | 272 | /* |
273 | * Locate the matching target port group ID from | 273 | * Locate the matching target port group ID from |
274 | * the global tg_pt_gp list | 274 | * the global tg_pt_gp list |
@@ -312,8 +312,7 @@ int target_emulate_set_target_port_groups(struct se_task *task) | |||
312 | * the Target Port in question for the the incoming | 312 | * the Target Port in question for the the incoming |
313 | * SET_TARGET_PORT_GROUPS op. | 313 | * SET_TARGET_PORT_GROUPS op. |
314 | */ | 314 | */ |
315 | rtpi = ((ptr[2] << 8) & 0xff); | 315 | rtpi = get_unaligned_be16(ptr + 2); |
316 | rtpi |= (ptr[3] & 0xff); | ||
317 | /* | 316 | /* |
318 | * Locate the matching relative target port identifer | 317 | * Locate the matching relative target port identifer |
319 | * for the struct se_device storage object. | 318 | * for the struct se_device storage object. |
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index f3d71fa88a28..30a67707036f 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c | |||
@@ -66,32 +66,15 @@ target_fill_alua_data(struct se_port *port, unsigned char *buf) | |||
66 | } | 66 | } |
67 | 67 | ||
68 | static int | 68 | static int |
69 | target_emulate_inquiry_std(struct se_cmd *cmd) | 69 | target_emulate_inquiry_std(struct se_cmd *cmd, char *buf) |
70 | { | 70 | { |
71 | struct se_lun *lun = cmd->se_lun; | 71 | struct se_lun *lun = cmd->se_lun; |
72 | struct se_device *dev = cmd->se_dev; | 72 | struct se_device *dev = cmd->se_dev; |
73 | struct se_portal_group *tpg = lun->lun_sep->sep_tpg; | ||
74 | unsigned char *buf; | ||
75 | 73 | ||
76 | /* | 74 | /* Set RMB (removable media) for tape devices */ |
77 | * Make sure we at least have 6 bytes of INQUIRY response | 75 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) |
78 | * payload going back for EVPD=0 | 76 | buf[1] = 0x80; |
79 | */ | ||
80 | if (cmd->data_length < 6) { | ||
81 | pr_err("SCSI Inquiry payload length: %u" | ||
82 | " too small for EVPD=0\n", cmd->data_length); | ||
83 | return -EINVAL; | ||
84 | } | ||
85 | 77 | ||
86 | buf = transport_kmap_data_sg(cmd); | ||
87 | |||
88 | if (dev == tpg->tpg_virt_lun0.lun_se_dev) { | ||
89 | buf[0] = 0x3f; /* Not connected */ | ||
90 | } else { | ||
91 | buf[0] = dev->transport->get_device_type(dev); | ||
92 | if (buf[0] == TYPE_TAPE) | ||
93 | buf[1] = 0x80; | ||
94 | } | ||
95 | buf[2] = dev->transport->get_device_rev(dev); | 78 | buf[2] = dev->transport->get_device_rev(dev); |
96 | 79 | ||
97 | /* | 80 | /* |
@@ -112,29 +95,13 @@ target_emulate_inquiry_std(struct se_cmd *cmd) | |||
112 | if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) | 95 | if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) |
113 | target_fill_alua_data(lun->lun_sep, buf); | 96 | target_fill_alua_data(lun->lun_sep, buf); |
114 | 97 | ||
115 | if (cmd->data_length < 8) { | 98 | buf[7] = 0x2; /* CmdQue=1 */ |
116 | buf[4] = 1; /* Set additional length to 1 */ | ||
117 | goto out; | ||
118 | } | ||
119 | |||
120 | buf[7] = 0x32; /* Sync=1 and CmdQue=1 */ | ||
121 | |||
122 | /* | ||
123 | * Do not include vendor, product, reversion info in INQUIRY | ||
124 | * response payload for cdbs with a small allocation length. | ||
125 | */ | ||
126 | if (cmd->data_length < 36) { | ||
127 | buf[4] = 3; /* Set additional length to 3 */ | ||
128 | goto out; | ||
129 | } | ||
130 | 99 | ||
131 | snprintf(&buf[8], 8, "LIO-ORG"); | 100 | snprintf(&buf[8], 8, "LIO-ORG"); |
132 | snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model); | 101 | snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model); |
133 | snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision); | 102 | snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision); |
134 | buf[4] = 31; /* Set additional length to 31 */ | 103 | buf[4] = 31; /* Set additional length to 31 */ |
135 | 104 | ||
136 | out: | ||
137 | transport_kunmap_data_sg(cmd); | ||
138 | return 0; | 105 | return 0; |
139 | } | 106 | } |
140 | 107 | ||
@@ -152,12 +119,6 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) | |||
152 | unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial); | 119 | unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial); |
153 | unit_serial_len++; /* For NULL Terminator */ | 120 | unit_serial_len++; /* For NULL Terminator */ |
154 | 121 | ||
155 | if (((len + 4) + unit_serial_len) > cmd->data_length) { | ||
156 | len += unit_serial_len; | ||
157 | buf[2] = ((len >> 8) & 0xff); | ||
158 | buf[3] = (len & 0xff); | ||
159 | return 0; | ||
160 | } | ||
161 | len += sprintf(&buf[4], "%s", | 122 | len += sprintf(&buf[4], "%s", |
162 | dev->se_sub_dev->t10_wwn.unit_serial); | 123 | dev->se_sub_dev->t10_wwn.unit_serial); |
163 | len++; /* Extra Byte for NULL Terminator */ | 124 | len++; /* Extra Byte for NULL Terminator */ |
@@ -229,9 +190,6 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) | |||
229 | if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL)) | 190 | if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL)) |
230 | goto check_t10_vend_desc; | 191 | goto check_t10_vend_desc; |
231 | 192 | ||
232 | if (off + 20 > cmd->data_length) | ||
233 | goto check_t10_vend_desc; | ||
234 | |||
235 | /* CODE SET == Binary */ | 193 | /* CODE SET == Binary */ |
236 | buf[off++] = 0x1; | 194 | buf[off++] = 0x1; |
237 | 195 | ||
@@ -283,12 +241,6 @@ check_t10_vend_desc: | |||
283 | strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]); | 241 | strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]); |
284 | unit_serial_len++; /* For NULL Terminator */ | 242 | unit_serial_len++; /* For NULL Terminator */ |
285 | 243 | ||
286 | if ((len + (id_len + 4) + | ||
287 | (prod_len + unit_serial_len)) > | ||
288 | cmd->data_length) { | ||
289 | len += (prod_len + unit_serial_len); | ||
290 | goto check_port; | ||
291 | } | ||
292 | id_len += sprintf(&buf[off+12], "%s:%s", prod, | 244 | id_len += sprintf(&buf[off+12], "%s:%s", prod, |
293 | &dev->se_sub_dev->t10_wwn.unit_serial[0]); | 245 | &dev->se_sub_dev->t10_wwn.unit_serial[0]); |
294 | } | 246 | } |
@@ -306,7 +258,6 @@ check_t10_vend_desc: | |||
306 | /* | 258 | /* |
307 | * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD | 259 | * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD |
308 | */ | 260 | */ |
309 | check_port: | ||
310 | port = lun->lun_sep; | 261 | port = lun->lun_sep; |
311 | if (port) { | 262 | if (port) { |
312 | struct t10_alua_lu_gp *lu_gp; | 263 | struct t10_alua_lu_gp *lu_gp; |
@@ -323,10 +274,6 @@ check_port: | |||
323 | * Get the PROTOCOL IDENTIFIER as defined by spc4r17 | 274 | * Get the PROTOCOL IDENTIFIER as defined by spc4r17 |
324 | * section 7.5.1 Table 362 | 275 | * section 7.5.1 Table 362 |
325 | */ | 276 | */ |
326 | if (((len + 4) + 8) > cmd->data_length) { | ||
327 | len += 8; | ||
328 | goto check_tpgi; | ||
329 | } | ||
330 | buf[off] = | 277 | buf[off] = |
331 | (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); | 278 | (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); |
332 | buf[off++] |= 0x1; /* CODE SET == Binary */ | 279 | buf[off++] |= 0x1; /* CODE SET == Binary */ |
@@ -350,15 +297,10 @@ check_port: | |||
350 | * Get the PROTOCOL IDENTIFIER as defined by spc4r17 | 297 | * Get the PROTOCOL IDENTIFIER as defined by spc4r17 |
351 | * section 7.5.1 Table 362 | 298 | * section 7.5.1 Table 362 |
352 | */ | 299 | */ |
353 | check_tpgi: | ||
354 | if (dev->se_sub_dev->t10_alua.alua_type != | 300 | if (dev->se_sub_dev->t10_alua.alua_type != |
355 | SPC3_ALUA_EMULATED) | 301 | SPC3_ALUA_EMULATED) |
356 | goto check_scsi_name; | 302 | goto check_scsi_name; |
357 | 303 | ||
358 | if (((len + 4) + 8) > cmd->data_length) { | ||
359 | len += 8; | ||
360 | goto check_lu_gp; | ||
361 | } | ||
362 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | 304 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; |
363 | if (!tg_pt_gp_mem) | 305 | if (!tg_pt_gp_mem) |
364 | goto check_lu_gp; | 306 | goto check_lu_gp; |
@@ -391,10 +333,6 @@ check_tpgi: | |||
391 | * section 7.7.3.8 | 333 | * section 7.7.3.8 |
392 | */ | 334 | */ |
393 | check_lu_gp: | 335 | check_lu_gp: |
394 | if (((len + 4) + 8) > cmd->data_length) { | ||
395 | len += 8; | ||
396 | goto check_scsi_name; | ||
397 | } | ||
398 | lu_gp_mem = dev->dev_alua_lu_gp_mem; | 336 | lu_gp_mem = dev->dev_alua_lu_gp_mem; |
399 | if (!lu_gp_mem) | 337 | if (!lu_gp_mem) |
400 | goto check_scsi_name; | 338 | goto check_scsi_name; |
@@ -435,10 +373,6 @@ check_scsi_name: | |||
435 | /* Header size + Designation descriptor */ | 373 | /* Header size + Designation descriptor */ |
436 | scsi_name_len += 4; | 374 | scsi_name_len += 4; |
437 | 375 | ||
438 | if (((len + 4) + scsi_name_len) > cmd->data_length) { | ||
439 | len += scsi_name_len; | ||
440 | goto set_len; | ||
441 | } | ||
442 | buf[off] = | 376 | buf[off] = |
443 | (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); | 377 | (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); |
444 | buf[off++] |= 0x3; /* CODE SET == UTF-8 */ | 378 | buf[off++] |= 0x3; /* CODE SET == UTF-8 */ |
@@ -474,7 +408,6 @@ check_scsi_name: | |||
474 | /* Header size + Designation descriptor */ | 408 | /* Header size + Designation descriptor */ |
475 | len += (scsi_name_len + 4); | 409 | len += (scsi_name_len + 4); |
476 | } | 410 | } |
477 | set_len: | ||
478 | buf[2] = ((len >> 8) & 0xff); | 411 | buf[2] = ((len >> 8) & 0xff); |
479 | buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ | 412 | buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ |
480 | return 0; | 413 | return 0; |
@@ -484,9 +417,6 @@ set_len: | |||
484 | static int | 417 | static int |
485 | target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) | 418 | target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) |
486 | { | 419 | { |
487 | if (cmd->data_length < 60) | ||
488 | return 0; | ||
489 | |||
490 | buf[3] = 0x3c; | 420 | buf[3] = 0x3c; |
491 | /* Set HEADSUP, ORDSUP, SIMPSUP */ | 421 | /* Set HEADSUP, ORDSUP, SIMPSUP */ |
492 | buf[5] = 0x07; | 422 | buf[5] = 0x07; |
@@ -512,20 +442,6 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | |||
512 | if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) | 442 | if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) |
513 | have_tp = 1; | 443 | have_tp = 1; |
514 | 444 | ||
515 | if (cmd->data_length < (0x10 + 4)) { | ||
516 | pr_debug("Received data_length: %u" | ||
517 | " too small for EVPD 0xb0\n", | ||
518 | cmd->data_length); | ||
519 | return -EINVAL; | ||
520 | } | ||
521 | |||
522 | if (have_tp && cmd->data_length < (0x3c + 4)) { | ||
523 | pr_debug("Received data_length: %u" | ||
524 | " too small for TPE=1 EVPD 0xb0\n", | ||
525 | cmd->data_length); | ||
526 | have_tp = 0; | ||
527 | } | ||
528 | |||
529 | buf[0] = dev->transport->get_device_type(dev); | 445 | buf[0] = dev->transport->get_device_type(dev); |
530 | buf[3] = have_tp ? 0x3c : 0x10; | 446 | buf[3] = have_tp ? 0x3c : 0x10; |
531 | 447 | ||
@@ -540,7 +456,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | |||
540 | /* | 456 | /* |
541 | * Set MAXIMUM TRANSFER LENGTH | 457 | * Set MAXIMUM TRANSFER LENGTH |
542 | */ | 458 | */ |
543 | put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]); | 459 | put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors, &buf[8]); |
544 | 460 | ||
545 | /* | 461 | /* |
546 | * Set OPTIMAL TRANSFER LENGTH | 462 | * Set OPTIMAL TRANSFER LENGTH |
@@ -548,10 +464,9 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | |||
548 | put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]); | 464 | put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]); |
549 | 465 | ||
550 | /* | 466 | /* |
551 | * Exit now if we don't support TP or the initiator sent a too | 467 | * Exit now if we don't support TP. |
552 | * short buffer. | ||
553 | */ | 468 | */ |
554 | if (!have_tp || cmd->data_length < (0x3c + 4)) | 469 | if (!have_tp) |
555 | return 0; | 470 | return 0; |
556 | 471 | ||
557 | /* | 472 | /* |
@@ -589,10 +504,7 @@ target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) | |||
589 | 504 | ||
590 | buf[0] = dev->transport->get_device_type(dev); | 505 | buf[0] = dev->transport->get_device_type(dev); |
591 | buf[3] = 0x3c; | 506 | buf[3] = 0x3c; |
592 | 507 | buf[5] = dev->se_sub_dev->se_dev_attrib.is_nonrot ? 1 : 0; | |
593 | if (cmd->data_length >= 5 && | ||
594 | dev->se_sub_dev->se_dev_attrib.is_nonrot) | ||
595 | buf[5] = 1; | ||
596 | 508 | ||
597 | return 0; | 509 | return 0; |
598 | } | 510 | } |
@@ -671,8 +583,6 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) | |||
671 | { | 583 | { |
672 | int p; | 584 | int p; |
673 | 585 | ||
674 | if (cmd->data_length < 8) | ||
675 | return 0; | ||
676 | /* | 586 | /* |
677 | * Only report the INQUIRY EVPD=1 pages after a valid NAA | 587 | * Only report the INQUIRY EVPD=1 pages after a valid NAA |
678 | * Registered Extended LUN WWN has been set via ConfigFS | 588 | * Registered Extended LUN WWN has been set via ConfigFS |
@@ -681,8 +591,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) | |||
681 | if (cmd->se_dev->se_sub_dev->su_dev_flags & | 591 | if (cmd->se_dev->se_sub_dev->su_dev_flags & |
682 | SDF_EMULATED_VPD_UNIT_SERIAL) { | 592 | SDF_EMULATED_VPD_UNIT_SERIAL) { |
683 | buf[3] = ARRAY_SIZE(evpd_handlers); | 593 | buf[3] = ARRAY_SIZE(evpd_handlers); |
684 | for (p = 0; p < min_t(int, ARRAY_SIZE(evpd_handlers), | 594 | for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) |
685 | cmd->data_length - 4); ++p) | ||
686 | buf[p + 4] = evpd_handlers[p].page; | 595 | buf[p + 4] = evpd_handlers[p].page; |
687 | } | 596 | } |
688 | 597 | ||
@@ -693,45 +602,54 @@ int target_emulate_inquiry(struct se_task *task) | |||
693 | { | 602 | { |
694 | struct se_cmd *cmd = task->task_se_cmd; | 603 | struct se_cmd *cmd = task->task_se_cmd; |
695 | struct se_device *dev = cmd->se_dev; | 604 | struct se_device *dev = cmd->se_dev; |
696 | unsigned char *buf; | 605 | struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; |
606 | unsigned char *buf, *map_buf; | ||
697 | unsigned char *cdb = cmd->t_task_cdb; | 607 | unsigned char *cdb = cmd->t_task_cdb; |
698 | int p, ret; | 608 | int p, ret; |
699 | 609 | ||
610 | map_buf = transport_kmap_data_sg(cmd); | ||
611 | /* | ||
612 | * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we | ||
613 | * know we actually allocated a full page. Otherwise, if the | ||
614 | * data buffer is too small, allocate a temporary buffer so we | ||
615 | * don't have to worry about overruns in all our INQUIRY | ||
616 | * emulation handling. | ||
617 | */ | ||
618 | if (cmd->data_length < SE_INQUIRY_BUF && | ||
619 | (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { | ||
620 | buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL); | ||
621 | if (!buf) { | ||
622 | transport_kunmap_data_sg(cmd); | ||
623 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
624 | return -ENOMEM; | ||
625 | } | ||
626 | } else { | ||
627 | buf = map_buf; | ||
628 | } | ||
629 | |||
630 | if (dev == tpg->tpg_virt_lun0.lun_se_dev) | ||
631 | buf[0] = 0x3f; /* Not connected */ | ||
632 | else | ||
633 | buf[0] = dev->transport->get_device_type(dev); | ||
634 | |||
700 | if (!(cdb[1] & 0x1)) { | 635 | if (!(cdb[1] & 0x1)) { |
701 | if (cdb[2]) { | 636 | if (cdb[2]) { |
702 | pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n", | 637 | pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n", |
703 | cdb[2]); | 638 | cdb[2]); |
704 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | 639 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; |
705 | return -EINVAL; | 640 | ret = -EINVAL; |
641 | goto out; | ||
706 | } | 642 | } |
707 | 643 | ||
708 | ret = target_emulate_inquiry_std(cmd); | 644 | ret = target_emulate_inquiry_std(cmd, buf); |
709 | goto out; | 645 | goto out; |
710 | } | 646 | } |
711 | 647 | ||
712 | /* | ||
713 | * Make sure we at least have 4 bytes of INQUIRY response | ||
714 | * payload for 0x00 going back for EVPD=1. Note that 0x80 | ||
715 | * and 0x83 will check for enough payload data length and | ||
716 | * jump to set_len: label when there is not enough inquiry EVPD | ||
717 | * payload length left for the next outgoing EVPD metadata | ||
718 | */ | ||
719 | if (cmd->data_length < 4) { | ||
720 | pr_err("SCSI Inquiry payload length: %u" | ||
721 | " too small for EVPD=1\n", cmd->data_length); | ||
722 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | ||
723 | return -EINVAL; | ||
724 | } | ||
725 | |||
726 | buf = transport_kmap_data_sg(cmd); | ||
727 | |||
728 | buf[0] = dev->transport->get_device_type(dev); | ||
729 | |||
730 | for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) { | 648 | for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) { |
731 | if (cdb[2] == evpd_handlers[p].page) { | 649 | if (cdb[2] == evpd_handlers[p].page) { |
732 | buf[1] = cdb[2]; | 650 | buf[1] = cdb[2]; |
733 | ret = evpd_handlers[p].emulate(cmd, buf); | 651 | ret = evpd_handlers[p].emulate(cmd, buf); |
734 | goto out_unmap; | 652 | goto out; |
735 | } | 653 | } |
736 | } | 654 | } |
737 | 655 | ||
@@ -739,9 +657,13 @@ int target_emulate_inquiry(struct se_task *task) | |||
739 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | 657 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; |
740 | ret = -EINVAL; | 658 | ret = -EINVAL; |
741 | 659 | ||
742 | out_unmap: | ||
743 | transport_kunmap_data_sg(cmd); | ||
744 | out: | 660 | out: |
661 | if (buf != map_buf) { | ||
662 | memcpy(map_buf, buf, cmd->data_length); | ||
663 | kfree(buf); | ||
664 | } | ||
665 | transport_kunmap_data_sg(cmd); | ||
666 | |||
745 | if (!ret) { | 667 | if (!ret) { |
746 | task->task_scsi_status = GOOD; | 668 | task->task_scsi_status = GOOD; |
747 | transport_complete_task(task, 1); | 669 | transport_complete_task(task, 1); |
@@ -772,11 +694,6 @@ int target_emulate_readcapacity(struct se_task *task) | |||
772 | buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; | 694 | buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; |
773 | buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; | 695 | buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; |
774 | buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; | 696 | buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; |
775 | /* | ||
776 | * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16 | ||
777 | */ | ||
778 | if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) | ||
779 | put_unaligned_be32(0xFFFFFFFF, &buf[0]); | ||
780 | 697 | ||
781 | transport_kunmap_data_sg(cmd); | 698 | transport_kunmap_data_sg(cmd); |
782 | 699 | ||
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 6e043eeb1db9..cbb66537d230 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
@@ -52,8 +52,8 @@ | |||
52 | 52 | ||
53 | extern struct t10_alua_lu_gp *default_lu_gp; | 53 | extern struct t10_alua_lu_gp *default_lu_gp; |
54 | 54 | ||
55 | static struct list_head g_tf_list; | 55 | static LIST_HEAD(g_tf_list); |
56 | static struct mutex g_tf_lock; | 56 | static DEFINE_MUTEX(g_tf_lock); |
57 | 57 | ||
58 | struct target_core_configfs_attribute { | 58 | struct target_core_configfs_attribute { |
59 | struct configfs_attribute attr; | 59 | struct configfs_attribute attr; |
@@ -421,18 +421,6 @@ static int target_fabric_tf_ops_check( | |||
421 | pr_err("Missing tfo->close_session()\n"); | 421 | pr_err("Missing tfo->close_session()\n"); |
422 | return -EINVAL; | 422 | return -EINVAL; |
423 | } | 423 | } |
424 | if (!tfo->stop_session) { | ||
425 | pr_err("Missing tfo->stop_session()\n"); | ||
426 | return -EINVAL; | ||
427 | } | ||
428 | if (!tfo->fall_back_to_erl0) { | ||
429 | pr_err("Missing tfo->fall_back_to_erl0()\n"); | ||
430 | return -EINVAL; | ||
431 | } | ||
432 | if (!tfo->sess_logged_in) { | ||
433 | pr_err("Missing tfo->sess_logged_in()\n"); | ||
434 | return -EINVAL; | ||
435 | } | ||
436 | if (!tfo->sess_get_index) { | 424 | if (!tfo->sess_get_index) { |
437 | pr_err("Missing tfo->sess_get_index()\n"); | 425 | pr_err("Missing tfo->sess_get_index()\n"); |
438 | return -EINVAL; | 426 | return -EINVAL; |
@@ -477,10 +465,6 @@ static int target_fabric_tf_ops_check( | |||
477 | pr_err("Missing tfo->get_fabric_sense_len()\n"); | 465 | pr_err("Missing tfo->get_fabric_sense_len()\n"); |
478 | return -EINVAL; | 466 | return -EINVAL; |
479 | } | 467 | } |
480 | if (!tfo->is_state_remove) { | ||
481 | pr_err("Missing tfo->is_state_remove()\n"); | ||
482 | return -EINVAL; | ||
483 | } | ||
484 | /* | 468 | /* |
485 | * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() | 469 | * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() |
486 | * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in | 470 | * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in |
@@ -702,6 +686,9 @@ SE_DEV_ATTR_RO(hw_max_sectors); | |||
702 | DEF_DEV_ATTRIB(max_sectors); | 686 | DEF_DEV_ATTRIB(max_sectors); |
703 | SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR); | 687 | SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR); |
704 | 688 | ||
689 | DEF_DEV_ATTRIB(fabric_max_sectors); | ||
690 | SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR); | ||
691 | |||
705 | DEF_DEV_ATTRIB(optimal_sectors); | 692 | DEF_DEV_ATTRIB(optimal_sectors); |
706 | SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR); | 693 | SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR); |
707 | 694 | ||
@@ -741,6 +728,7 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = { | |||
741 | &target_core_dev_attrib_block_size.attr, | 728 | &target_core_dev_attrib_block_size.attr, |
742 | &target_core_dev_attrib_hw_max_sectors.attr, | 729 | &target_core_dev_attrib_hw_max_sectors.attr, |
743 | &target_core_dev_attrib_max_sectors.attr, | 730 | &target_core_dev_attrib_max_sectors.attr, |
731 | &target_core_dev_attrib_fabric_max_sectors.attr, | ||
744 | &target_core_dev_attrib_optimal_sectors.attr, | 732 | &target_core_dev_attrib_optimal_sectors.attr, |
745 | &target_core_dev_attrib_hw_queue_depth.attr, | 733 | &target_core_dev_attrib_hw_queue_depth.attr, |
746 | &target_core_dev_attrib_queue_depth.attr, | 734 | &target_core_dev_attrib_queue_depth.attr, |
@@ -2304,7 +2292,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state( | |||
2304 | 2292 | ||
2305 | if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { | 2293 | if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { |
2306 | pr_err("Unable to process implict configfs ALUA" | 2294 | pr_err("Unable to process implict configfs ALUA" |
2307 | " transition while TPGS_IMPLICT_ALUA is diabled\n"); | 2295 | " transition while TPGS_IMPLICT_ALUA is disabled\n"); |
2308 | return -EINVAL; | 2296 | return -EINVAL; |
2309 | } | 2297 | } |
2310 | 2298 | ||
@@ -2865,7 +2853,6 @@ static void target_core_drop_subdev( | |||
2865 | struct se_subsystem_dev *se_dev = container_of(to_config_group(item), | 2853 | struct se_subsystem_dev *se_dev = container_of(to_config_group(item), |
2866 | struct se_subsystem_dev, se_dev_group); | 2854 | struct se_subsystem_dev, se_dev_group); |
2867 | struct se_hba *hba; | 2855 | struct se_hba *hba; |
2868 | struct se_subsystem_api *t; | ||
2869 | struct config_item *df_item; | 2856 | struct config_item *df_item; |
2870 | struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp; | 2857 | struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp; |
2871 | int i; | 2858 | int i; |
@@ -2873,7 +2860,6 @@ static void target_core_drop_subdev( | |||
2873 | hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); | 2860 | hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); |
2874 | 2861 | ||
2875 | mutex_lock(&hba->hba_access_mutex); | 2862 | mutex_lock(&hba->hba_access_mutex); |
2876 | t = hba->transport; | ||
2877 | 2863 | ||
2878 | dev_stat_grp = &se_dev->dev_stat_grps.stat_group; | 2864 | dev_stat_grp = &se_dev->dev_stat_grps.stat_group; |
2879 | for (i = 0; dev_stat_grp->default_groups[i]; i++) { | 2865 | for (i = 0; dev_stat_grp->default_groups[i]; i++) { |
@@ -3117,8 +3103,6 @@ static int __init target_core_init_configfs(void) | |||
3117 | config_group_init(&subsys->su_group); | 3103 | config_group_init(&subsys->su_group); |
3118 | mutex_init(&subsys->su_mutex); | 3104 | mutex_init(&subsys->su_mutex); |
3119 | 3105 | ||
3120 | INIT_LIST_HEAD(&g_tf_list); | ||
3121 | mutex_init(&g_tf_lock); | ||
3122 | ret = init_se_kmem_caches(); | 3106 | ret = init_se_kmem_caches(); |
3123 | if (ret < 0) | 3107 | if (ret < 0) |
3124 | return ret; | 3108 | return ret; |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index edbcabbf85f7..aa6267746383 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -72,7 +72,7 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) | |||
72 | } | 72 | } |
73 | 73 | ||
74 | spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); | 74 | spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); |
75 | se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun]; | 75 | se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun]; |
76 | if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | 76 | if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { |
77 | struct se_dev_entry *deve = se_cmd->se_deve; | 77 | struct se_dev_entry *deve = se_cmd->se_deve; |
78 | 78 | ||
@@ -159,13 +159,8 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) | |||
159 | dev->read_bytes += se_cmd->data_length; | 159 | dev->read_bytes += se_cmd->data_length; |
160 | spin_unlock_irqrestore(&dev->stats_lock, flags); | 160 | spin_unlock_irqrestore(&dev->stats_lock, flags); |
161 | 161 | ||
162 | /* | ||
163 | * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used | ||
164 | * for tracking state of struct se_cmds during LUN shutdown events. | ||
165 | */ | ||
166 | spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); | 162 | spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); |
167 | list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list); | 163 | list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list); |
168 | atomic_set(&se_cmd->transport_lun_active, 1); | ||
169 | spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); | 164 | spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); |
170 | 165 | ||
171 | return 0; | 166 | return 0; |
@@ -187,7 +182,7 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) | |||
187 | } | 182 | } |
188 | 183 | ||
189 | spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); | 184 | spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); |
190 | se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun]; | 185 | se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun]; |
191 | deve = se_cmd->se_deve; | 186 | deve = se_cmd->se_deve; |
192 | 187 | ||
193 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | 188 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { |
@@ -245,7 +240,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi( | |||
245 | 240 | ||
246 | spin_lock_irq(&nacl->device_list_lock); | 241 | spin_lock_irq(&nacl->device_list_lock); |
247 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 242 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
248 | deve = &nacl->device_list[i]; | 243 | deve = nacl->device_list[i]; |
249 | 244 | ||
250 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | 245 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) |
251 | continue; | 246 | continue; |
@@ -291,7 +286,7 @@ int core_free_device_list_for_node( | |||
291 | 286 | ||
292 | spin_lock_irq(&nacl->device_list_lock); | 287 | spin_lock_irq(&nacl->device_list_lock); |
293 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 288 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
294 | deve = &nacl->device_list[i]; | 289 | deve = nacl->device_list[i]; |
295 | 290 | ||
296 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | 291 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) |
297 | continue; | 292 | continue; |
@@ -311,7 +306,7 @@ int core_free_device_list_for_node( | |||
311 | } | 306 | } |
312 | spin_unlock_irq(&nacl->device_list_lock); | 307 | spin_unlock_irq(&nacl->device_list_lock); |
313 | 308 | ||
314 | kfree(nacl->device_list); | 309 | array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG); |
315 | nacl->device_list = NULL; | 310 | nacl->device_list = NULL; |
316 | 311 | ||
317 | return 0; | 312 | return 0; |
@@ -323,7 +318,7 @@ void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) | |||
323 | unsigned long flags; | 318 | unsigned long flags; |
324 | 319 | ||
325 | spin_lock_irqsave(&se_nacl->device_list_lock, flags); | 320 | spin_lock_irqsave(&se_nacl->device_list_lock, flags); |
326 | deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; | 321 | deve = se_nacl->device_list[se_cmd->orig_fe_lun]; |
327 | deve->deve_cmds--; | 322 | deve->deve_cmds--; |
328 | spin_unlock_irqrestore(&se_nacl->device_list_lock, flags); | 323 | spin_unlock_irqrestore(&se_nacl->device_list_lock, flags); |
329 | } | 324 | } |
@@ -336,7 +331,7 @@ void core_update_device_list_access( | |||
336 | struct se_dev_entry *deve; | 331 | struct se_dev_entry *deve; |
337 | 332 | ||
338 | spin_lock_irq(&nacl->device_list_lock); | 333 | spin_lock_irq(&nacl->device_list_lock); |
339 | deve = &nacl->device_list[mapped_lun]; | 334 | deve = nacl->device_list[mapped_lun]; |
340 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { | 335 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { |
341 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | 336 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; |
342 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | 337 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; |
@@ -361,7 +356,7 @@ int core_update_device_list_for_node( | |||
361 | int enable) | 356 | int enable) |
362 | { | 357 | { |
363 | struct se_port *port = lun->lun_sep; | 358 | struct se_port *port = lun->lun_sep; |
364 | struct se_dev_entry *deve = &nacl->device_list[mapped_lun]; | 359 | struct se_dev_entry *deve = nacl->device_list[mapped_lun]; |
365 | int trans = 0; | 360 | int trans = 0; |
366 | /* | 361 | /* |
367 | * If the MappedLUN entry is being disabled, the entry in | 362 | * If the MappedLUN entry is being disabled, the entry in |
@@ -475,7 +470,7 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) | |||
475 | 470 | ||
476 | spin_lock_irq(&nacl->device_list_lock); | 471 | spin_lock_irq(&nacl->device_list_lock); |
477 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 472 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
478 | deve = &nacl->device_list[i]; | 473 | deve = nacl->device_list[i]; |
479 | if (lun != deve->se_lun) | 474 | if (lun != deve->se_lun) |
480 | continue; | 475 | continue; |
481 | spin_unlock_irq(&nacl->device_list_lock); | 476 | spin_unlock_irq(&nacl->device_list_lock); |
@@ -652,12 +647,13 @@ int target_report_luns(struct se_task *se_task) | |||
652 | { | 647 | { |
653 | struct se_cmd *se_cmd = se_task->task_se_cmd; | 648 | struct se_cmd *se_cmd = se_task->task_se_cmd; |
654 | struct se_dev_entry *deve; | 649 | struct se_dev_entry *deve; |
655 | struct se_lun *se_lun; | ||
656 | struct se_session *se_sess = se_cmd->se_sess; | 650 | struct se_session *se_sess = se_cmd->se_sess; |
657 | unsigned char *buf; | 651 | unsigned char *buf; |
658 | u32 cdb_offset = 0, lun_count = 0, offset = 8, i; | 652 | u32 lun_count = 0, offset = 8, i; |
659 | 653 | ||
660 | buf = (unsigned char *) transport_kmap_data_sg(se_cmd); | 654 | buf = transport_kmap_data_sg(se_cmd); |
655 | if (!buf) | ||
656 | return -ENOMEM; | ||
661 | 657 | ||
662 | /* | 658 | /* |
663 | * If no struct se_session pointer is present, this struct se_cmd is | 659 | * If no struct se_session pointer is present, this struct se_cmd is |
@@ -672,22 +668,20 @@ int target_report_luns(struct se_task *se_task) | |||
672 | 668 | ||
673 | spin_lock_irq(&se_sess->se_node_acl->device_list_lock); | 669 | spin_lock_irq(&se_sess->se_node_acl->device_list_lock); |
674 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 670 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
675 | deve = &se_sess->se_node_acl->device_list[i]; | 671 | deve = se_sess->se_node_acl->device_list[i]; |
676 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | 672 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) |
677 | continue; | 673 | continue; |
678 | se_lun = deve->se_lun; | ||
679 | /* | 674 | /* |
680 | * We determine the correct LUN LIST LENGTH even once we | 675 | * We determine the correct LUN LIST LENGTH even once we |
681 | * have reached the initial allocation length. | 676 | * have reached the initial allocation length. |
682 | * See SPC2-R20 7.19. | 677 | * See SPC2-R20 7.19. |
683 | */ | 678 | */ |
684 | lun_count++; | 679 | lun_count++; |
685 | if ((cdb_offset + 8) >= se_cmd->data_length) | 680 | if ((offset + 8) > se_cmd->data_length) |
686 | continue; | 681 | continue; |
687 | 682 | ||
688 | int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]); | 683 | int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]); |
689 | offset += 8; | 684 | offset += 8; |
690 | cdb_offset += 8; | ||
691 | } | 685 | } |
692 | spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); | 686 | spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); |
693 | 687 | ||
@@ -695,12 +689,12 @@ int target_report_luns(struct se_task *se_task) | |||
695 | * See SPC3 r07, page 159. | 689 | * See SPC3 r07, page 159. |
696 | */ | 690 | */ |
697 | done: | 691 | done: |
698 | transport_kunmap_data_sg(se_cmd); | ||
699 | lun_count *= 8; | 692 | lun_count *= 8; |
700 | buf[0] = ((lun_count >> 24) & 0xff); | 693 | buf[0] = ((lun_count >> 24) & 0xff); |
701 | buf[1] = ((lun_count >> 16) & 0xff); | 694 | buf[1] = ((lun_count >> 16) & 0xff); |
702 | buf[2] = ((lun_count >> 8) & 0xff); | 695 | buf[2] = ((lun_count >> 8) & 0xff); |
703 | buf[3] = (lun_count & 0xff); | 696 | buf[3] = (lun_count & 0xff); |
697 | transport_kunmap_data_sg(se_cmd); | ||
704 | 698 | ||
705 | se_task->task_scsi_status = GOOD; | 699 | se_task->task_scsi_status = GOOD; |
706 | transport_complete_task(se_task, 1); | 700 | transport_complete_task(se_task, 1); |
@@ -894,10 +888,15 @@ void se_dev_set_default_attribs( | |||
894 | limits->logical_block_size); | 888 | limits->logical_block_size); |
895 | dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; | 889 | dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; |
896 | /* | 890 | /* |
897 | * Set optimal_sectors from max_sectors, which can be lowered via | 891 | * Set fabric_max_sectors, which is reported in block limits |
898 | * configfs. | 892 | * VPD page (B0h). |
899 | */ | 893 | */ |
900 | dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors; | 894 | dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; |
895 | /* | ||
896 | * Set optimal_sectors from fabric_max_sectors, which can be | ||
897 | * lowered via configfs. | ||
898 | */ | ||
899 | dev->se_sub_dev->se_dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; | ||
901 | /* | 900 | /* |
902 | * queue_depth is based on subsystem plugin dependent requirements. | 901 | * queue_depth is based on subsystem plugin dependent requirements. |
903 | */ | 902 | */ |
@@ -1229,6 +1228,54 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) | |||
1229 | return 0; | 1228 | return 0; |
1230 | } | 1229 | } |
1231 | 1230 | ||
1231 | int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) | ||
1232 | { | ||
1233 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | ||
1234 | pr_err("dev[%p]: Unable to change SE Device" | ||
1235 | " fabric_max_sectors while dev_export_obj: %d count exists\n", | ||
1236 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); | ||
1237 | return -EINVAL; | ||
1238 | } | ||
1239 | if (!fabric_max_sectors) { | ||
1240 | pr_err("dev[%p]: Illegal ZERO value for" | ||
1241 | " fabric_max_sectors\n", dev); | ||
1242 | return -EINVAL; | ||
1243 | } | ||
1244 | if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) { | ||
1245 | pr_err("dev[%p]: Passed fabric_max_sectors: %u less than" | ||
1246 | " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors, | ||
1247 | DA_STATUS_MAX_SECTORS_MIN); | ||
1248 | return -EINVAL; | ||
1249 | } | ||
1250 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | ||
1251 | if (fabric_max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { | ||
1252 | pr_err("dev[%p]: Passed fabric_max_sectors: %u" | ||
1253 | " greater than TCM/SE_Device max_sectors:" | ||
1254 | " %u\n", dev, fabric_max_sectors, | ||
1255 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors); | ||
1256 | return -EINVAL; | ||
1257 | } | ||
1258 | } else { | ||
1259 | if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) { | ||
1260 | pr_err("dev[%p]: Passed fabric_max_sectors: %u" | ||
1261 | " greater than DA_STATUS_MAX_SECTORS_MAX:" | ||
1262 | " %u\n", dev, fabric_max_sectors, | ||
1263 | DA_STATUS_MAX_SECTORS_MAX); | ||
1264 | return -EINVAL; | ||
1265 | } | ||
1266 | } | ||
1267 | /* | ||
1268 | * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() | ||
1269 | */ | ||
1270 | fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, | ||
1271 | dev->se_sub_dev->se_dev_attrib.block_size); | ||
1272 | |||
1273 | dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors; | ||
1274 | pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", | ||
1275 | dev, fabric_max_sectors); | ||
1276 | return 0; | ||
1277 | } | ||
1278 | |||
1232 | int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) | 1279 | int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) |
1233 | { | 1280 | { |
1234 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1281 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
@@ -1242,10 +1289,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) | |||
1242 | " changed for TCM/pSCSI\n", dev); | 1289 | " changed for TCM/pSCSI\n", dev); |
1243 | return -EINVAL; | 1290 | return -EINVAL; |
1244 | } | 1291 | } |
1245 | if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { | 1292 | if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) { |
1246 | pr_err("dev[%p]: Passed optimal_sectors %u cannot be" | 1293 | pr_err("dev[%p]: Passed optimal_sectors %u cannot be" |
1247 | " greater than max_sectors: %u\n", dev, | 1294 | " greater than fabric_max_sectors: %u\n", dev, |
1248 | optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); | 1295 | optimal_sectors, dev->se_sub_dev->se_dev_attrib.fabric_max_sectors); |
1249 | return -EINVAL; | 1296 | return -EINVAL; |
1250 | } | 1297 | } |
1251 | 1298 | ||
@@ -1380,7 +1427,7 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l | |||
1380 | spin_unlock(&tpg->tpg_lun_lock); | 1427 | spin_unlock(&tpg->tpg_lun_lock); |
1381 | return NULL; | 1428 | return NULL; |
1382 | } | 1429 | } |
1383 | lun = &tpg->tpg_lun_list[unpacked_lun]; | 1430 | lun = tpg->tpg_lun_list[unpacked_lun]; |
1384 | 1431 | ||
1385 | if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { | 1432 | if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { |
1386 | pr_err("%s Logical Unit Number: %u is not free on" | 1433 | pr_err("%s Logical Unit Number: %u is not free on" |
@@ -1413,7 +1460,7 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked | |||
1413 | spin_unlock(&tpg->tpg_lun_lock); | 1460 | spin_unlock(&tpg->tpg_lun_lock); |
1414 | return NULL; | 1461 | return NULL; |
1415 | } | 1462 | } |
1416 | lun = &tpg->tpg_lun_list[unpacked_lun]; | 1463 | lun = tpg->tpg_lun_list[unpacked_lun]; |
1417 | 1464 | ||
1418 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { | 1465 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { |
1419 | pr_err("%s Logical Unit Number: %u is not active on" | 1466 | pr_err("%s Logical Unit Number: %u is not active on" |
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 9a2ce11e1a6e..405cc98eaed6 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c | |||
@@ -108,7 +108,7 @@ static int target_fabric_mappedlun_link( | |||
108 | * tpg_1/attrib/demo_mode_write_protect=1 | 108 | * tpg_1/attrib/demo_mode_write_protect=1 |
109 | */ | 109 | */ |
110 | spin_lock_irq(&lacl->se_lun_nacl->device_list_lock); | 110 | spin_lock_irq(&lacl->se_lun_nacl->device_list_lock); |
111 | deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun]; | 111 | deve = lacl->se_lun_nacl->device_list[lacl->mapped_lun]; |
112 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) | 112 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) |
113 | lun_access = deve->lun_flags; | 113 | lun_access = deve->lun_flags; |
114 | else | 114 | else |
@@ -137,7 +137,7 @@ static int target_fabric_mappedlun_unlink( | |||
137 | struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci), | 137 | struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci), |
138 | struct se_lun_acl, se_lun_group); | 138 | struct se_lun_acl, se_lun_group); |
139 | struct se_node_acl *nacl = lacl->se_lun_nacl; | 139 | struct se_node_acl *nacl = lacl->se_lun_nacl; |
140 | struct se_dev_entry *deve = &nacl->device_list[lacl->mapped_lun]; | 140 | struct se_dev_entry *deve = nacl->device_list[lacl->mapped_lun]; |
141 | struct se_portal_group *se_tpg; | 141 | struct se_portal_group *se_tpg; |
142 | /* | 142 | /* |
143 | * Determine if the underlying MappedLUN has already been released.. | 143 | * Determine if the underlying MappedLUN has already been released.. |
@@ -168,7 +168,7 @@ static ssize_t target_fabric_mappedlun_show_write_protect( | |||
168 | ssize_t len; | 168 | ssize_t len; |
169 | 169 | ||
170 | spin_lock_irq(&se_nacl->device_list_lock); | 170 | spin_lock_irq(&se_nacl->device_list_lock); |
171 | deve = &se_nacl->device_list[lacl->mapped_lun]; | 171 | deve = se_nacl->device_list[lacl->mapped_lun]; |
172 | len = sprintf(page, "%d\n", | 172 | len = sprintf(page, "%d\n", |
173 | (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? | 173 | (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? |
174 | 1 : 0); | 174 | 1 : 0); |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 8572eae62da7..2ec299e8a73e 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -46,6 +46,9 @@ | |||
46 | 46 | ||
47 | #include "target_core_iblock.h" | 47 | #include "target_core_iblock.h" |
48 | 48 | ||
49 | #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ | ||
50 | #define IBLOCK_BIO_POOL_SIZE 128 | ||
51 | |||
49 | static struct se_subsystem_api iblock_template; | 52 | static struct se_subsystem_api iblock_template; |
50 | 53 | ||
51 | static void iblock_bio_done(struct bio *, int); | 54 | static void iblock_bio_done(struct bio *, int); |
@@ -56,51 +59,25 @@ static void iblock_bio_done(struct bio *, int); | |||
56 | */ | 59 | */ |
57 | static int iblock_attach_hba(struct se_hba *hba, u32 host_id) | 60 | static int iblock_attach_hba(struct se_hba *hba, u32 host_id) |
58 | { | 61 | { |
59 | struct iblock_hba *ib_host; | ||
60 | |||
61 | ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL); | ||
62 | if (!ib_host) { | ||
63 | pr_err("Unable to allocate memory for" | ||
64 | " struct iblock_hba\n"); | ||
65 | return -ENOMEM; | ||
66 | } | ||
67 | |||
68 | ib_host->iblock_host_id = host_id; | ||
69 | |||
70 | hba->hba_ptr = ib_host; | ||
71 | |||
72 | pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" | 62 | pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" |
73 | " Generic Target Core Stack %s\n", hba->hba_id, | 63 | " Generic Target Core Stack %s\n", hba->hba_id, |
74 | IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); | 64 | IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); |
75 | |||
76 | pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n", | ||
77 | hba->hba_id, ib_host->iblock_host_id); | ||
78 | |||
79 | return 0; | 65 | return 0; |
80 | } | 66 | } |
81 | 67 | ||
82 | static void iblock_detach_hba(struct se_hba *hba) | 68 | static void iblock_detach_hba(struct se_hba *hba) |
83 | { | 69 | { |
84 | struct iblock_hba *ib_host = hba->hba_ptr; | ||
85 | |||
86 | pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" | ||
87 | " Target Core\n", hba->hba_id, ib_host->iblock_host_id); | ||
88 | |||
89 | kfree(ib_host); | ||
90 | hba->hba_ptr = NULL; | ||
91 | } | 70 | } |
92 | 71 | ||
93 | static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) | 72 | static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) |
94 | { | 73 | { |
95 | struct iblock_dev *ib_dev = NULL; | 74 | struct iblock_dev *ib_dev = NULL; |
96 | struct iblock_hba *ib_host = hba->hba_ptr; | ||
97 | 75 | ||
98 | ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); | 76 | ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); |
99 | if (!ib_dev) { | 77 | if (!ib_dev) { |
100 | pr_err("Unable to allocate struct iblock_dev\n"); | 78 | pr_err("Unable to allocate struct iblock_dev\n"); |
101 | return NULL; | 79 | return NULL; |
102 | } | 80 | } |
103 | ib_dev->ibd_host = ib_host; | ||
104 | 81 | ||
105 | pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); | 82 | pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); |
106 | 83 | ||
@@ -126,10 +103,8 @@ static struct se_device *iblock_create_virtdevice( | |||
126 | return ERR_PTR(ret); | 103 | return ERR_PTR(ret); |
127 | } | 104 | } |
128 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); | 105 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); |
129 | /* | 106 | |
130 | * These settings need to be made tunable.. | 107 | ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0); |
131 | */ | ||
132 | ib_dev->ibd_bio_set = bioset_create(32, 0); | ||
133 | if (!ib_dev->ibd_bio_set) { | 108 | if (!ib_dev->ibd_bio_set) { |
134 | pr_err("IBLOCK: Unable to create bioset()\n"); | 109 | pr_err("IBLOCK: Unable to create bioset()\n"); |
135 | return ERR_PTR(-ENOMEM); | 110 | return ERR_PTR(-ENOMEM); |
@@ -155,8 +130,8 @@ static struct se_device *iblock_create_virtdevice( | |||
155 | q = bdev_get_queue(bd); | 130 | q = bdev_get_queue(bd); |
156 | limits = &dev_limits.limits; | 131 | limits = &dev_limits.limits; |
157 | limits->logical_block_size = bdev_logical_block_size(bd); | 132 | limits->logical_block_size = bdev_logical_block_size(bd); |
158 | limits->max_hw_sectors = queue_max_hw_sectors(q); | 133 | limits->max_hw_sectors = UINT_MAX; |
159 | limits->max_sectors = queue_max_sectors(q); | 134 | limits->max_sectors = UINT_MAX; |
160 | dev_limits.hw_queue_depth = q->nr_requests; | 135 | dev_limits.hw_queue_depth = q->nr_requests; |
161 | dev_limits.queue_depth = q->nr_requests; | 136 | dev_limits.queue_depth = q->nr_requests; |
162 | 137 | ||
@@ -230,7 +205,7 @@ iblock_alloc_task(unsigned char *cdb) | |||
230 | return NULL; | 205 | return NULL; |
231 | } | 206 | } |
232 | 207 | ||
233 | atomic_set(&ib_req->ib_bio_cnt, 0); | 208 | atomic_set(&ib_req->pending, 1); |
234 | return &ib_req->ib_task; | 209 | return &ib_req->ib_task; |
235 | } | 210 | } |
236 | 211 | ||
@@ -510,24 +485,35 @@ iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num) | |||
510 | bio->bi_destructor = iblock_bio_destructor; | 485 | bio->bi_destructor = iblock_bio_destructor; |
511 | bio->bi_end_io = &iblock_bio_done; | 486 | bio->bi_end_io = &iblock_bio_done; |
512 | bio->bi_sector = lba; | 487 | bio->bi_sector = lba; |
513 | atomic_inc(&ib_req->ib_bio_cnt); | 488 | atomic_inc(&ib_req->pending); |
514 | 489 | ||
515 | pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector); | 490 | pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector); |
516 | pr_debug("Set ib_req->ib_bio_cnt: %d\n", | 491 | pr_debug("Set ib_req->pending: %d\n", atomic_read(&ib_req->pending)); |
517 | atomic_read(&ib_req->ib_bio_cnt)); | ||
518 | return bio; | 492 | return bio; |
519 | } | 493 | } |
520 | 494 | ||
495 | static void iblock_submit_bios(struct bio_list *list, int rw) | ||
496 | { | ||
497 | struct blk_plug plug; | ||
498 | struct bio *bio; | ||
499 | |||
500 | blk_start_plug(&plug); | ||
501 | while ((bio = bio_list_pop(list))) | ||
502 | submit_bio(rw, bio); | ||
503 | blk_finish_plug(&plug); | ||
504 | } | ||
505 | |||
521 | static int iblock_do_task(struct se_task *task) | 506 | static int iblock_do_task(struct se_task *task) |
522 | { | 507 | { |
523 | struct se_cmd *cmd = task->task_se_cmd; | 508 | struct se_cmd *cmd = task->task_se_cmd; |
524 | struct se_device *dev = cmd->se_dev; | 509 | struct se_device *dev = cmd->se_dev; |
510 | struct iblock_req *ibr = IBLOCK_REQ(task); | ||
525 | struct bio *bio; | 511 | struct bio *bio; |
526 | struct bio_list list; | 512 | struct bio_list list; |
527 | struct scatterlist *sg; | 513 | struct scatterlist *sg; |
528 | u32 i, sg_num = task->task_sg_nents; | 514 | u32 i, sg_num = task->task_sg_nents; |
529 | sector_t block_lba; | 515 | sector_t block_lba; |
530 | struct blk_plug plug; | 516 | unsigned bio_cnt; |
531 | int rw; | 517 | int rw; |
532 | 518 | ||
533 | if (task->task_data_direction == DMA_TO_DEVICE) { | 519 | if (task->task_data_direction == DMA_TO_DEVICE) { |
@@ -572,6 +558,7 @@ static int iblock_do_task(struct se_task *task) | |||
572 | 558 | ||
573 | bio_list_init(&list); | 559 | bio_list_init(&list); |
574 | bio_list_add(&list, bio); | 560 | bio_list_add(&list, bio); |
561 | bio_cnt = 1; | ||
575 | 562 | ||
576 | for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { | 563 | for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { |
577 | /* | 564 | /* |
@@ -581,10 +568,16 @@ static int iblock_do_task(struct se_task *task) | |||
581 | */ | 568 | */ |
582 | while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) | 569 | while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) |
583 | != sg->length) { | 570 | != sg->length) { |
571 | if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { | ||
572 | iblock_submit_bios(&list, rw); | ||
573 | bio_cnt = 0; | ||
574 | } | ||
575 | |||
584 | bio = iblock_get_bio(task, block_lba, sg_num); | 576 | bio = iblock_get_bio(task, block_lba, sg_num); |
585 | if (!bio) | 577 | if (!bio) |
586 | goto fail; | 578 | goto fail; |
587 | bio_list_add(&list, bio); | 579 | bio_list_add(&list, bio); |
580 | bio_cnt++; | ||
588 | } | 581 | } |
589 | 582 | ||
590 | /* Always in 512 byte units for Linux/Block */ | 583 | /* Always in 512 byte units for Linux/Block */ |
@@ -592,11 +585,12 @@ static int iblock_do_task(struct se_task *task) | |||
592 | sg_num--; | 585 | sg_num--; |
593 | } | 586 | } |
594 | 587 | ||
595 | blk_start_plug(&plug); | 588 | iblock_submit_bios(&list, rw); |
596 | while ((bio = bio_list_pop(&list))) | ||
597 | submit_bio(rw, bio); | ||
598 | blk_finish_plug(&plug); | ||
599 | 589 | ||
590 | if (atomic_dec_and_test(&ibr->pending)) { | ||
591 | transport_complete_task(task, | ||
592 | !atomic_read(&ibr->ib_bio_err_cnt)); | ||
593 | } | ||
600 | return 0; | 594 | return 0; |
601 | 595 | ||
602 | fail: | 596 | fail: |
@@ -648,7 +642,7 @@ static void iblock_bio_done(struct bio *bio, int err) | |||
648 | 642 | ||
649 | bio_put(bio); | 643 | bio_put(bio); |
650 | 644 | ||
651 | if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) | 645 | if (!atomic_dec_and_test(&ibr->pending)) |
652 | return; | 646 | return; |
653 | 647 | ||
654 | pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", | 648 | pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", |
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index 5cf1860c10d0..e929370b6fd3 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h | |||
@@ -8,7 +8,7 @@ | |||
8 | 8 | ||
9 | struct iblock_req { | 9 | struct iblock_req { |
10 | struct se_task ib_task; | 10 | struct se_task ib_task; |
11 | atomic_t ib_bio_cnt; | 11 | atomic_t pending; |
12 | atomic_t ib_bio_err_cnt; | 12 | atomic_t ib_bio_err_cnt; |
13 | } ____cacheline_aligned; | 13 | } ____cacheline_aligned; |
14 | 14 | ||
@@ -19,11 +19,6 @@ struct iblock_dev { | |||
19 | u32 ibd_flags; | 19 | u32 ibd_flags; |
20 | struct bio_set *ibd_bio_set; | 20 | struct bio_set *ibd_bio_set; |
21 | struct block_device *ibd_bd; | 21 | struct block_device *ibd_bd; |
22 | struct iblock_hba *ibd_host; | ||
23 | } ____cacheline_aligned; | ||
24 | |||
25 | struct iblock_hba { | ||
26 | int iblock_host_id; | ||
27 | } ____cacheline_aligned; | 22 | } ____cacheline_aligned; |
28 | 23 | ||
29 | #endif /* TARGET_CORE_IBLOCK_H */ | 24 | #endif /* TARGET_CORE_IBLOCK_H */ |
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 45001364788a..21c05638f158 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h | |||
@@ -53,6 +53,7 @@ int se_dev_set_is_nonrot(struct se_device *, int); | |||
53 | int se_dev_set_emulate_rest_reord(struct se_device *dev, int); | 53 | int se_dev_set_emulate_rest_reord(struct se_device *dev, int); |
54 | int se_dev_set_queue_depth(struct se_device *, u32); | 54 | int se_dev_set_queue_depth(struct se_device *, u32); |
55 | int se_dev_set_max_sectors(struct se_device *, u32); | 55 | int se_dev_set_max_sectors(struct se_device *, u32); |
56 | int se_dev_set_fabric_max_sectors(struct se_device *, u32); | ||
56 | int se_dev_set_optimal_sectors(struct se_device *, u32); | 57 | int se_dev_set_optimal_sectors(struct se_device *, u32); |
57 | int se_dev_set_block_size(struct se_device *, u32); | 58 | int se_dev_set_block_size(struct se_device *, u32); |
58 | struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *, | 59 | struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *, |
@@ -75,6 +76,8 @@ struct se_hba *core_alloc_hba(const char *, u32, u32); | |||
75 | int core_delete_hba(struct se_hba *); | 76 | int core_delete_hba(struct se_hba *); |
76 | 77 | ||
77 | /* target_core_tmr.c */ | 78 | /* target_core_tmr.c */ |
79 | void core_tmr_abort_task(struct se_device *, struct se_tmr_req *, | ||
80 | struct se_session *); | ||
78 | int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *, | 81 | int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *, |
79 | struct list_head *, struct se_cmd *); | 82 | struct list_head *, struct se_cmd *); |
80 | 83 | ||
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 63e703bb6ac9..86f0c3b5d500 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -338,7 +338,7 @@ static int core_scsi3_pr_seq_non_holder( | |||
338 | return core_scsi2_reservation_seq_non_holder(cmd, | 338 | return core_scsi2_reservation_seq_non_holder(cmd, |
339 | cdb, pr_reg_type); | 339 | cdb, pr_reg_type); |
340 | 340 | ||
341 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | 341 | se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; |
342 | /* | 342 | /* |
343 | * Determine if the registration should be ignored due to | 343 | * Determine if the registration should be ignored due to |
344 | * non-matching ISIDs in core_scsi3_pr_reservation_check(). | 344 | * non-matching ISIDs in core_scsi3_pr_reservation_check(). |
@@ -1000,7 +1000,7 @@ int core_scsi3_check_aptpl_registration( | |||
1000 | { | 1000 | { |
1001 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; | 1001 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
1002 | struct se_node_acl *nacl = lun_acl->se_lun_nacl; | 1002 | struct se_node_acl *nacl = lun_acl->se_lun_nacl; |
1003 | struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun]; | 1003 | struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun]; |
1004 | 1004 | ||
1005 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) | 1005 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
1006 | return 0; | 1006 | return 0; |
@@ -1497,7 +1497,7 @@ static int core_scsi3_decode_spec_i_port( | |||
1497 | struct se_dev_entry *dest_se_deve = NULL, *local_se_deve; | 1497 | struct se_dev_entry *dest_se_deve = NULL, *local_se_deve; |
1498 | struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e; | 1498 | struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e; |
1499 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; | 1499 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; |
1500 | struct list_head tid_dest_list; | 1500 | LIST_HEAD(tid_dest_list); |
1501 | struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; | 1501 | struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; |
1502 | struct target_core_fabric_ops *tmp_tf_ops; | 1502 | struct target_core_fabric_ops *tmp_tf_ops; |
1503 | unsigned char *buf; | 1503 | unsigned char *buf; |
@@ -1508,9 +1508,8 @@ static int core_scsi3_decode_spec_i_port( | |||
1508 | u32 dest_rtpi = 0; | 1508 | u32 dest_rtpi = 0; |
1509 | 1509 | ||
1510 | memset(dest_iport, 0, 64); | 1510 | memset(dest_iport, 0, 64); |
1511 | INIT_LIST_HEAD(&tid_dest_list); | ||
1512 | 1511 | ||
1513 | local_se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | 1512 | local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; |
1514 | /* | 1513 | /* |
1515 | * Allocate a struct pr_transport_id_holder and setup the | 1514 | * Allocate a struct pr_transport_id_holder and setup the |
1516 | * local_node_acl and local_se_deve pointers and add to | 1515 | * local_node_acl and local_se_deve pointers and add to |
@@ -2127,7 +2126,7 @@ static int core_scsi3_emulate_pro_register( | |||
2127 | return -EINVAL; | 2126 | return -EINVAL; |
2128 | } | 2127 | } |
2129 | se_tpg = se_sess->se_tpg; | 2128 | se_tpg = se_sess->se_tpg; |
2130 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | 2129 | se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; |
2131 | 2130 | ||
2132 | if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) { | 2131 | if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) { |
2133 | memset(&isid_buf[0], 0, PR_REG_ISID_LEN); | 2132 | memset(&isid_buf[0], 0, PR_REG_ISID_LEN); |
@@ -2427,9 +2426,7 @@ static int core_scsi3_pro_reserve( | |||
2427 | u64 res_key) | 2426 | u64 res_key) |
2428 | { | 2427 | { |
2429 | struct se_session *se_sess = cmd->se_sess; | 2428 | struct se_session *se_sess = cmd->se_sess; |
2430 | struct se_dev_entry *se_deve; | ||
2431 | struct se_lun *se_lun = cmd->se_lun; | 2429 | struct se_lun *se_lun = cmd->se_lun; |
2432 | struct se_portal_group *se_tpg; | ||
2433 | struct t10_pr_registration *pr_reg, *pr_res_holder; | 2430 | struct t10_pr_registration *pr_reg, *pr_res_holder; |
2434 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; | 2431 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
2435 | char i_buf[PR_REG_ISID_ID_LEN]; | 2432 | char i_buf[PR_REG_ISID_ID_LEN]; |
@@ -2442,8 +2439,6 @@ static int core_scsi3_pro_reserve( | |||
2442 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 2439 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
2443 | return -EINVAL; | 2440 | return -EINVAL; |
2444 | } | 2441 | } |
2445 | se_tpg = se_sess->se_tpg; | ||
2446 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | ||
2447 | /* | 2442 | /* |
2448 | * Locate the existing *pr_reg via struct se_node_acl pointers | 2443 | * Locate the existing *pr_reg via struct se_node_acl pointers |
2449 | */ | 2444 | */ |
@@ -3001,10 +2996,9 @@ static int core_scsi3_pro_preempt( | |||
3001 | int abort) | 2996 | int abort) |
3002 | { | 2997 | { |
3003 | struct se_device *dev = cmd->se_dev; | 2998 | struct se_device *dev = cmd->se_dev; |
3004 | struct se_dev_entry *se_deve; | ||
3005 | struct se_node_acl *pr_reg_nacl; | 2999 | struct se_node_acl *pr_reg_nacl; |
3006 | struct se_session *se_sess = cmd->se_sess; | 3000 | struct se_session *se_sess = cmd->se_sess; |
3007 | struct list_head preempt_and_abort_list; | 3001 | LIST_HEAD(preempt_and_abort_list); |
3008 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; | 3002 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; |
3009 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; | 3003 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
3010 | u32 pr_res_mapped_lun = 0; | 3004 | u32 pr_res_mapped_lun = 0; |
@@ -3016,7 +3010,6 @@ static int core_scsi3_pro_preempt( | |||
3016 | return -EINVAL; | 3010 | return -EINVAL; |
3017 | } | 3011 | } |
3018 | 3012 | ||
3019 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | ||
3020 | pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, | 3013 | pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, |
3021 | se_sess); | 3014 | se_sess); |
3022 | if (!pr_reg_n) { | 3015 | if (!pr_reg_n) { |
@@ -3037,7 +3030,6 @@ static int core_scsi3_pro_preempt( | |||
3037 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; | 3030 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; |
3038 | return -EINVAL; | 3031 | return -EINVAL; |
3039 | } | 3032 | } |
3040 | INIT_LIST_HEAD(&preempt_and_abort_list); | ||
3041 | 3033 | ||
3042 | spin_lock(&dev->dev_reservation_lock); | 3034 | spin_lock(&dev->dev_reservation_lock); |
3043 | pr_res_holder = dev->dev_pr_res_holder; | 3035 | pr_res_holder = dev->dev_pr_res_holder; |
@@ -3353,7 +3345,7 @@ static int core_scsi3_emulate_pro_register_and_move( | |||
3353 | { | 3345 | { |
3354 | struct se_session *se_sess = cmd->se_sess; | 3346 | struct se_session *se_sess = cmd->se_sess; |
3355 | struct se_device *dev = cmd->se_dev; | 3347 | struct se_device *dev = cmd->se_dev; |
3356 | struct se_dev_entry *se_deve, *dest_se_deve = NULL; | 3348 | struct se_dev_entry *dest_se_deve = NULL; |
3357 | struct se_lun *se_lun = cmd->se_lun; | 3349 | struct se_lun *se_lun = cmd->se_lun; |
3358 | struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; | 3350 | struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; |
3359 | struct se_port *se_port; | 3351 | struct se_port *se_port; |
@@ -3378,7 +3370,6 @@ static int core_scsi3_emulate_pro_register_and_move( | |||
3378 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | 3370 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); |
3379 | se_tpg = se_sess->se_tpg; | 3371 | se_tpg = se_sess->se_tpg; |
3380 | tf_ops = se_tpg->se_tpg_tfo; | 3372 | tf_ops = se_tpg->se_tpg_tfo; |
3381 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | ||
3382 | /* | 3373 | /* |
3383 | * Follow logic from spc4r17 Section 5.7.8, Table 50 -- | 3374 | * Follow logic from spc4r17 Section 5.7.8, Table 50 -- |
3384 | * Register behaviors for a REGISTER AND MOVE service action | 3375 | * Register behaviors for a REGISTER AND MOVE service action |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 8d4def30e9e8..94c905fcbceb 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -69,7 +69,7 @@ static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) | |||
69 | return -ENOMEM; | 69 | return -ENOMEM; |
70 | } | 70 | } |
71 | phv->phv_host_id = host_id; | 71 | phv->phv_host_id = host_id; |
72 | phv->phv_mode = PHV_VIRUTAL_HOST_ID; | 72 | phv->phv_mode = PHV_VIRTUAL_HOST_ID; |
73 | 73 | ||
74 | hba->hba_ptr = phv; | 74 | hba->hba_ptr = phv; |
75 | 75 | ||
@@ -114,7 +114,7 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) | |||
114 | return 0; | 114 | return 0; |
115 | 115 | ||
116 | phv->phv_lld_host = NULL; | 116 | phv->phv_lld_host = NULL; |
117 | phv->phv_mode = PHV_VIRUTAL_HOST_ID; | 117 | phv->phv_mode = PHV_VIRTUAL_HOST_ID; |
118 | 118 | ||
119 | pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" | 119 | pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" |
120 | " %s\n", hba->hba_id, (sh->hostt->name) ? | 120 | " %s\n", hba->hba_id, (sh->hostt->name) ? |
@@ -531,7 +531,7 @@ static struct se_device *pscsi_create_virtdevice( | |||
531 | return ERR_PTR(-ENODEV); | 531 | return ERR_PTR(-ENODEV); |
532 | } | 532 | } |
533 | /* | 533 | /* |
534 | * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device | 534 | * For the newer PHV_VIRTUAL_HOST_ID struct scsi_device |
535 | * reference, we enforce that udev_path has been set | 535 | * reference, we enforce that udev_path has been set |
536 | */ | 536 | */ |
537 | if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { | 537 | if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { |
@@ -540,7 +540,7 @@ static struct se_device *pscsi_create_virtdevice( | |||
540 | return ERR_PTR(-EINVAL); | 540 | return ERR_PTR(-EINVAL); |
541 | } | 541 | } |
542 | /* | 542 | /* |
543 | * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID, | 543 | * If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID, |
544 | * use the original TCM hba ID to reference Linux/SCSI Host No | 544 | * use the original TCM hba ID to reference Linux/SCSI Host No |
545 | * and enable for PHV_LLD_SCSI_HOST_NO mode. | 545 | * and enable for PHV_LLD_SCSI_HOST_NO mode. |
546 | */ | 546 | */ |
@@ -569,8 +569,8 @@ static struct se_device *pscsi_create_virtdevice( | |||
569 | } | 569 | } |
570 | } | 570 | } |
571 | } else { | 571 | } else { |
572 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) { | 572 | if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) { |
573 | pr_err("pSCSI: PHV_VIRUTAL_HOST_ID set while" | 573 | pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while" |
574 | " struct Scsi_Host exists\n"); | 574 | " struct Scsi_Host exists\n"); |
575 | return ERR_PTR(-EEXIST); | 575 | return ERR_PTR(-EEXIST); |
576 | } | 576 | } |
@@ -600,7 +600,7 @@ static struct se_device *pscsi_create_virtdevice( | |||
600 | } | 600 | } |
601 | 601 | ||
602 | if (!dev) { | 602 | if (!dev) { |
603 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) | 603 | if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) |
604 | scsi_host_put(sh); | 604 | scsi_host_put(sh); |
605 | else if (legacy_mode_enable) { | 605 | else if (legacy_mode_enable) { |
606 | pscsi_pmode_enable_hba(hba, 0); | 606 | pscsi_pmode_enable_hba(hba, 0); |
@@ -616,7 +616,7 @@ static struct se_device *pscsi_create_virtdevice( | |||
616 | pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, | 616 | pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, |
617 | pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); | 617 | pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); |
618 | 618 | ||
619 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) | 619 | if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) |
620 | scsi_host_put(sh); | 620 | scsi_host_put(sh); |
621 | else if (legacy_mode_enable) { | 621 | else if (legacy_mode_enable) { |
622 | pscsi_pmode_enable_hba(hba, 0); | 622 | pscsi_pmode_enable_hba(hba, 0); |
@@ -898,7 +898,7 @@ static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba, | |||
898 | ssize_t bl; | 898 | ssize_t bl; |
899 | int i; | 899 | int i; |
900 | 900 | ||
901 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) | 901 | if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) |
902 | snprintf(host_id, 16, "%d", pdv->pdv_host_id); | 902 | snprintf(host_id, 16, "%d", pdv->pdv_host_id); |
903 | else | 903 | else |
904 | snprintf(host_id, 16, "PHBA Mode"); | 904 | snprintf(host_id, 16, "PHBA Mode"); |
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index fdc17b6aefb3..43f1c419e8e5 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h | |||
@@ -49,7 +49,7 @@ struct pscsi_dev_virt { | |||
49 | } ____cacheline_aligned; | 49 | } ____cacheline_aligned; |
50 | 50 | ||
51 | typedef enum phv_modes { | 51 | typedef enum phv_modes { |
52 | PHV_VIRUTAL_HOST_ID, | 52 | PHV_VIRTUAL_HOST_ID, |
53 | PHV_LLD_SCSI_HOST_NO | 53 | PHV_LLD_SCSI_HOST_NO |
54 | } phv_modes_t; | 54 | } phv_modes_t; |
55 | 55 | ||
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index f8c2d2cc3431..3d44beb0cf1f 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c | |||
@@ -954,7 +954,6 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds( | |||
954 | { | 954 | { |
955 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 955 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
956 | struct se_port *sep; | 956 | struct se_port *sep; |
957 | struct se_portal_group *tpg; | ||
958 | ssize_t ret; | 957 | ssize_t ret; |
959 | 958 | ||
960 | spin_lock(&lun->lun_sep_lock); | 959 | spin_lock(&lun->lun_sep_lock); |
@@ -963,7 +962,6 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds( | |||
963 | spin_unlock(&lun->lun_sep_lock); | 962 | spin_unlock(&lun->lun_sep_lock); |
964 | return -ENODEV; | 963 | return -ENODEV; |
965 | } | 964 | } |
966 | tpg = sep->sep_tpg; | ||
967 | 965 | ||
968 | ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus); | 966 | ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus); |
969 | spin_unlock(&lun->lun_sep_lock); | 967 | spin_unlock(&lun->lun_sep_lock); |
@@ -976,7 +974,6 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes( | |||
976 | { | 974 | { |
977 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 975 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
978 | struct se_port *sep; | 976 | struct se_port *sep; |
979 | struct se_portal_group *tpg; | ||
980 | ssize_t ret; | 977 | ssize_t ret; |
981 | 978 | ||
982 | spin_lock(&lun->lun_sep_lock); | 979 | spin_lock(&lun->lun_sep_lock); |
@@ -985,7 +982,6 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes( | |||
985 | spin_unlock(&lun->lun_sep_lock); | 982 | spin_unlock(&lun->lun_sep_lock); |
986 | return -ENODEV; | 983 | return -ENODEV; |
987 | } | 984 | } |
988 | tpg = sep->sep_tpg; | ||
989 | 985 | ||
990 | ret = snprintf(page, PAGE_SIZE, "%u\n", | 986 | ret = snprintf(page, PAGE_SIZE, "%u\n", |
991 | (u32)(sep->sep_stats.rx_data_octets >> 20)); | 987 | (u32)(sep->sep_stats.rx_data_octets >> 20)); |
@@ -999,7 +995,6 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes( | |||
999 | { | 995 | { |
1000 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 996 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
1001 | struct se_port *sep; | 997 | struct se_port *sep; |
1002 | struct se_portal_group *tpg; | ||
1003 | ssize_t ret; | 998 | ssize_t ret; |
1004 | 999 | ||
1005 | spin_lock(&lun->lun_sep_lock); | 1000 | spin_lock(&lun->lun_sep_lock); |
@@ -1008,7 +1003,6 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes( | |||
1008 | spin_unlock(&lun->lun_sep_lock); | 1003 | spin_unlock(&lun->lun_sep_lock); |
1009 | return -ENODEV; | 1004 | return -ENODEV; |
1010 | } | 1005 | } |
1011 | tpg = sep->sep_tpg; | ||
1012 | 1006 | ||
1013 | ret = snprintf(page, PAGE_SIZE, "%u\n", | 1007 | ret = snprintf(page, PAGE_SIZE, "%u\n", |
1014 | (u32)(sep->sep_stats.tx_data_octets >> 20)); | 1008 | (u32)(sep->sep_stats.tx_data_octets >> 20)); |
@@ -1022,7 +1016,6 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds( | |||
1022 | { | 1016 | { |
1023 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 1017 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
1024 | struct se_port *sep; | 1018 | struct se_port *sep; |
1025 | struct se_portal_group *tpg; | ||
1026 | ssize_t ret; | 1019 | ssize_t ret; |
1027 | 1020 | ||
1028 | spin_lock(&lun->lun_sep_lock); | 1021 | spin_lock(&lun->lun_sep_lock); |
@@ -1031,7 +1024,6 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds( | |||
1031 | spin_unlock(&lun->lun_sep_lock); | 1024 | spin_unlock(&lun->lun_sep_lock); |
1032 | return -ENODEV; | 1025 | return -ENODEV; |
1033 | } | 1026 | } |
1034 | tpg = sep->sep_tpg; | ||
1035 | 1027 | ||
1036 | /* FIXME: scsiTgtPortHsInCommands */ | 1028 | /* FIXME: scsiTgtPortHsInCommands */ |
1037 | ret = snprintf(page, PAGE_SIZE, "%u\n", 0); | 1029 | ret = snprintf(page, PAGE_SIZE, "%u\n", 0); |
@@ -1253,7 +1245,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_inst( | |||
1253 | ssize_t ret; | 1245 | ssize_t ret; |
1254 | 1246 | ||
1255 | spin_lock_irq(&nacl->device_list_lock); | 1247 | spin_lock_irq(&nacl->device_list_lock); |
1256 | deve = &nacl->device_list[lacl->mapped_lun]; | 1248 | deve = nacl->device_list[lacl->mapped_lun]; |
1257 | if (!deve->se_lun || !deve->se_lun_acl) { | 1249 | if (!deve->se_lun || !deve->se_lun_acl) { |
1258 | spin_unlock_irq(&nacl->device_list_lock); | 1250 | spin_unlock_irq(&nacl->device_list_lock); |
1259 | return -ENODEV; | 1251 | return -ENODEV; |
@@ -1275,16 +1267,14 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_dev( | |||
1275 | struct se_node_acl *nacl = lacl->se_lun_nacl; | 1267 | struct se_node_acl *nacl = lacl->se_lun_nacl; |
1276 | struct se_dev_entry *deve; | 1268 | struct se_dev_entry *deve; |
1277 | struct se_lun *lun; | 1269 | struct se_lun *lun; |
1278 | struct se_portal_group *tpg; | ||
1279 | ssize_t ret; | 1270 | ssize_t ret; |
1280 | 1271 | ||
1281 | spin_lock_irq(&nacl->device_list_lock); | 1272 | spin_lock_irq(&nacl->device_list_lock); |
1282 | deve = &nacl->device_list[lacl->mapped_lun]; | 1273 | deve = nacl->device_list[lacl->mapped_lun]; |
1283 | if (!deve->se_lun || !deve->se_lun_acl) { | 1274 | if (!deve->se_lun || !deve->se_lun_acl) { |
1284 | spin_unlock_irq(&nacl->device_list_lock); | 1275 | spin_unlock_irq(&nacl->device_list_lock); |
1285 | return -ENODEV; | 1276 | return -ENODEV; |
1286 | } | 1277 | } |
1287 | tpg = nacl->se_tpg; | ||
1288 | lun = deve->se_lun; | 1278 | lun = deve->se_lun; |
1289 | /* scsiDeviceIndex */ | 1279 | /* scsiDeviceIndex */ |
1290 | ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index); | 1280 | ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index); |
@@ -1304,7 +1294,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_port( | |||
1304 | ssize_t ret; | 1294 | ssize_t ret; |
1305 | 1295 | ||
1306 | spin_lock_irq(&nacl->device_list_lock); | 1296 | spin_lock_irq(&nacl->device_list_lock); |
1307 | deve = &nacl->device_list[lacl->mapped_lun]; | 1297 | deve = nacl->device_list[lacl->mapped_lun]; |
1308 | if (!deve->se_lun || !deve->se_lun_acl) { | 1298 | if (!deve->se_lun || !deve->se_lun_acl) { |
1309 | spin_unlock_irq(&nacl->device_list_lock); | 1299 | spin_unlock_irq(&nacl->device_list_lock); |
1310 | return -ENODEV; | 1300 | return -ENODEV; |
@@ -1327,7 +1317,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_indx( | |||
1327 | ssize_t ret; | 1317 | ssize_t ret; |
1328 | 1318 | ||
1329 | spin_lock_irq(&nacl->device_list_lock); | 1319 | spin_lock_irq(&nacl->device_list_lock); |
1330 | deve = &nacl->device_list[lacl->mapped_lun]; | 1320 | deve = nacl->device_list[lacl->mapped_lun]; |
1331 | if (!deve->se_lun || !deve->se_lun_acl) { | 1321 | if (!deve->se_lun || !deve->se_lun_acl) { |
1332 | spin_unlock_irq(&nacl->device_list_lock); | 1322 | spin_unlock_irq(&nacl->device_list_lock); |
1333 | return -ENODEV; | 1323 | return -ENODEV; |
@@ -1349,7 +1339,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_dev_or_port( | |||
1349 | ssize_t ret; | 1339 | ssize_t ret; |
1350 | 1340 | ||
1351 | spin_lock_irq(&nacl->device_list_lock); | 1341 | spin_lock_irq(&nacl->device_list_lock); |
1352 | deve = &nacl->device_list[lacl->mapped_lun]; | 1342 | deve = nacl->device_list[lacl->mapped_lun]; |
1353 | if (!deve->se_lun || !deve->se_lun_acl) { | 1343 | if (!deve->se_lun || !deve->se_lun_acl) { |
1354 | spin_unlock_irq(&nacl->device_list_lock); | 1344 | spin_unlock_irq(&nacl->device_list_lock); |
1355 | return -ENODEV; | 1345 | return -ENODEV; |
@@ -1371,7 +1361,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_intr_name( | |||
1371 | ssize_t ret; | 1361 | ssize_t ret; |
1372 | 1362 | ||
1373 | spin_lock_irq(&nacl->device_list_lock); | 1363 | spin_lock_irq(&nacl->device_list_lock); |
1374 | deve = &nacl->device_list[lacl->mapped_lun]; | 1364 | deve = nacl->device_list[lacl->mapped_lun]; |
1375 | if (!deve->se_lun || !deve->se_lun_acl) { | 1365 | if (!deve->se_lun || !deve->se_lun_acl) { |
1376 | spin_unlock_irq(&nacl->device_list_lock); | 1366 | spin_unlock_irq(&nacl->device_list_lock); |
1377 | return -ENODEV; | 1367 | return -ENODEV; |
@@ -1393,7 +1383,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_map_indx( | |||
1393 | ssize_t ret; | 1383 | ssize_t ret; |
1394 | 1384 | ||
1395 | spin_lock_irq(&nacl->device_list_lock); | 1385 | spin_lock_irq(&nacl->device_list_lock); |
1396 | deve = &nacl->device_list[lacl->mapped_lun]; | 1386 | deve = nacl->device_list[lacl->mapped_lun]; |
1397 | if (!deve->se_lun || !deve->se_lun_acl) { | 1387 | if (!deve->se_lun || !deve->se_lun_acl) { |
1398 | spin_unlock_irq(&nacl->device_list_lock); | 1388 | spin_unlock_irq(&nacl->device_list_lock); |
1399 | return -ENODEV; | 1389 | return -ENODEV; |
@@ -1415,7 +1405,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_att_count( | |||
1415 | ssize_t ret; | 1405 | ssize_t ret; |
1416 | 1406 | ||
1417 | spin_lock_irq(&nacl->device_list_lock); | 1407 | spin_lock_irq(&nacl->device_list_lock); |
1418 | deve = &nacl->device_list[lacl->mapped_lun]; | 1408 | deve = nacl->device_list[lacl->mapped_lun]; |
1419 | if (!deve->se_lun || !deve->se_lun_acl) { | 1409 | if (!deve->se_lun || !deve->se_lun_acl) { |
1420 | spin_unlock_irq(&nacl->device_list_lock); | 1410 | spin_unlock_irq(&nacl->device_list_lock); |
1421 | return -ENODEV; | 1411 | return -ENODEV; |
@@ -1437,7 +1427,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds( | |||
1437 | ssize_t ret; | 1427 | ssize_t ret; |
1438 | 1428 | ||
1439 | spin_lock_irq(&nacl->device_list_lock); | 1429 | spin_lock_irq(&nacl->device_list_lock); |
1440 | deve = &nacl->device_list[lacl->mapped_lun]; | 1430 | deve = nacl->device_list[lacl->mapped_lun]; |
1441 | if (!deve->se_lun || !deve->se_lun_acl) { | 1431 | if (!deve->se_lun || !deve->se_lun_acl) { |
1442 | spin_unlock_irq(&nacl->device_list_lock); | 1432 | spin_unlock_irq(&nacl->device_list_lock); |
1443 | return -ENODEV; | 1433 | return -ENODEV; |
@@ -1459,7 +1449,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_read_mbytes( | |||
1459 | ssize_t ret; | 1449 | ssize_t ret; |
1460 | 1450 | ||
1461 | spin_lock_irq(&nacl->device_list_lock); | 1451 | spin_lock_irq(&nacl->device_list_lock); |
1462 | deve = &nacl->device_list[lacl->mapped_lun]; | 1452 | deve = nacl->device_list[lacl->mapped_lun]; |
1463 | if (!deve->se_lun || !deve->se_lun_acl) { | 1453 | if (!deve->se_lun || !deve->se_lun_acl) { |
1464 | spin_unlock_irq(&nacl->device_list_lock); | 1454 | spin_unlock_irq(&nacl->device_list_lock); |
1465 | return -ENODEV; | 1455 | return -ENODEV; |
@@ -1481,7 +1471,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_write_mbytes( | |||
1481 | ssize_t ret; | 1471 | ssize_t ret; |
1482 | 1472 | ||
1483 | spin_lock_irq(&nacl->device_list_lock); | 1473 | spin_lock_irq(&nacl->device_list_lock); |
1484 | deve = &nacl->device_list[lacl->mapped_lun]; | 1474 | deve = nacl->device_list[lacl->mapped_lun]; |
1485 | if (!deve->se_lun || !deve->se_lun_acl) { | 1475 | if (!deve->se_lun || !deve->se_lun_acl) { |
1486 | spin_unlock_irq(&nacl->device_list_lock); | 1476 | spin_unlock_irq(&nacl->device_list_lock); |
1487 | return -ENODEV; | 1477 | return -ENODEV; |
@@ -1503,7 +1493,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_hs_num_cmds( | |||
1503 | ssize_t ret; | 1493 | ssize_t ret; |
1504 | 1494 | ||
1505 | spin_lock_irq(&nacl->device_list_lock); | 1495 | spin_lock_irq(&nacl->device_list_lock); |
1506 | deve = &nacl->device_list[lacl->mapped_lun]; | 1496 | deve = nacl->device_list[lacl->mapped_lun]; |
1507 | if (!deve->se_lun || !deve->se_lun_acl) { | 1497 | if (!deve->se_lun || !deve->se_lun_acl) { |
1508 | spin_unlock_irq(&nacl->device_list_lock); | 1498 | spin_unlock_irq(&nacl->device_list_lock); |
1509 | return -ENODEV; | 1499 | return -ENODEV; |
@@ -1525,7 +1515,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_creation_time( | |||
1525 | ssize_t ret; | 1515 | ssize_t ret; |
1526 | 1516 | ||
1527 | spin_lock_irq(&nacl->device_list_lock); | 1517 | spin_lock_irq(&nacl->device_list_lock); |
1528 | deve = &nacl->device_list[lacl->mapped_lun]; | 1518 | deve = nacl->device_list[lacl->mapped_lun]; |
1529 | if (!deve->se_lun || !deve->se_lun_acl) { | 1519 | if (!deve->se_lun || !deve->se_lun_acl) { |
1530 | spin_unlock_irq(&nacl->device_list_lock); | 1520 | spin_unlock_irq(&nacl->device_list_lock); |
1531 | return -ENODEV; | 1521 | return -ENODEV; |
@@ -1548,7 +1538,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_row_status( | |||
1548 | ssize_t ret; | 1538 | ssize_t ret; |
1549 | 1539 | ||
1550 | spin_lock_irq(&nacl->device_list_lock); | 1540 | spin_lock_irq(&nacl->device_list_lock); |
1551 | deve = &nacl->device_list[lacl->mapped_lun]; | 1541 | deve = nacl->device_list[lacl->mapped_lun]; |
1552 | if (!deve->se_lun || !deve->se_lun_acl) { | 1542 | if (!deve->se_lun || !deve->se_lun_acl) { |
1553 | spin_unlock_irq(&nacl->device_list_lock); | 1543 | spin_unlock_irq(&nacl->device_list_lock); |
1554 | return -ENODEV; | 1544 | return -ENODEV; |
@@ -1621,7 +1611,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_inst( | |||
1621 | ssize_t ret; | 1611 | ssize_t ret; |
1622 | 1612 | ||
1623 | spin_lock_irq(&nacl->device_list_lock); | 1613 | spin_lock_irq(&nacl->device_list_lock); |
1624 | deve = &nacl->device_list[lacl->mapped_lun]; | 1614 | deve = nacl->device_list[lacl->mapped_lun]; |
1625 | if (!deve->se_lun || !deve->se_lun_acl) { | 1615 | if (!deve->se_lun || !deve->se_lun_acl) { |
1626 | spin_unlock_irq(&nacl->device_list_lock); | 1616 | spin_unlock_irq(&nacl->device_list_lock); |
1627 | return -ENODEV; | 1617 | return -ENODEV; |
@@ -1643,16 +1633,14 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_dev( | |||
1643 | struct se_node_acl *nacl = lacl->se_lun_nacl; | 1633 | struct se_node_acl *nacl = lacl->se_lun_nacl; |
1644 | struct se_dev_entry *deve; | 1634 | struct se_dev_entry *deve; |
1645 | struct se_lun *lun; | 1635 | struct se_lun *lun; |
1646 | struct se_portal_group *tpg; | ||
1647 | ssize_t ret; | 1636 | ssize_t ret; |
1648 | 1637 | ||
1649 | spin_lock_irq(&nacl->device_list_lock); | 1638 | spin_lock_irq(&nacl->device_list_lock); |
1650 | deve = &nacl->device_list[lacl->mapped_lun]; | 1639 | deve = nacl->device_list[lacl->mapped_lun]; |
1651 | if (!deve->se_lun || !deve->se_lun_acl) { | 1640 | if (!deve->se_lun || !deve->se_lun_acl) { |
1652 | spin_unlock_irq(&nacl->device_list_lock); | 1641 | spin_unlock_irq(&nacl->device_list_lock); |
1653 | return -ENODEV; | 1642 | return -ENODEV; |
1654 | } | 1643 | } |
1655 | tpg = nacl->se_tpg; | ||
1656 | lun = deve->se_lun; | 1644 | lun = deve->se_lun; |
1657 | /* scsiDeviceIndex */ | 1645 | /* scsiDeviceIndex */ |
1658 | ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index); | 1646 | ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index); |
@@ -1672,7 +1660,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port( | |||
1672 | ssize_t ret; | 1660 | ssize_t ret; |
1673 | 1661 | ||
1674 | spin_lock_irq(&nacl->device_list_lock); | 1662 | spin_lock_irq(&nacl->device_list_lock); |
1675 | deve = &nacl->device_list[lacl->mapped_lun]; | 1663 | deve = nacl->device_list[lacl->mapped_lun]; |
1676 | if (!deve->se_lun || !deve->se_lun_acl) { | 1664 | if (!deve->se_lun || !deve->se_lun_acl) { |
1677 | spin_unlock_irq(&nacl->device_list_lock); | 1665 | spin_unlock_irq(&nacl->device_list_lock); |
1678 | return -ENODEV; | 1666 | return -ENODEV; |
@@ -1721,7 +1709,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_auth_indx( | |||
1721 | ssize_t ret; | 1709 | ssize_t ret; |
1722 | 1710 | ||
1723 | spin_lock_irq(&nacl->device_list_lock); | 1711 | spin_lock_irq(&nacl->device_list_lock); |
1724 | deve = &nacl->device_list[lacl->mapped_lun]; | 1712 | deve = nacl->device_list[lacl->mapped_lun]; |
1725 | if (!deve->se_lun || !deve->se_lun_acl) { | 1713 | if (!deve->se_lun || !deve->se_lun_acl) { |
1726 | spin_unlock_irq(&nacl->device_list_lock); | 1714 | spin_unlock_irq(&nacl->device_list_lock); |
1727 | return -ENODEV; | 1715 | return -ENODEV; |
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index dcb0618c9388..f015839aef89 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include "target_core_alua.h" | 40 | #include "target_core_alua.h" |
41 | #include "target_core_pr.h" | 41 | #include "target_core_pr.h" |
42 | 42 | ||
43 | struct se_tmr_req *core_tmr_alloc_req( | 43 | int core_tmr_alloc_req( |
44 | struct se_cmd *se_cmd, | 44 | struct se_cmd *se_cmd, |
45 | void *fabric_tmr_ptr, | 45 | void *fabric_tmr_ptr, |
46 | u8 function, | 46 | u8 function, |
@@ -48,17 +48,20 @@ struct se_tmr_req *core_tmr_alloc_req( | |||
48 | { | 48 | { |
49 | struct se_tmr_req *tmr; | 49 | struct se_tmr_req *tmr; |
50 | 50 | ||
51 | tmr = kmem_cache_zalloc(se_tmr_req_cache, gfp_flags); | 51 | tmr = kzalloc(sizeof(struct se_tmr_req), gfp_flags); |
52 | if (!tmr) { | 52 | if (!tmr) { |
53 | pr_err("Unable to allocate struct se_tmr_req\n"); | 53 | pr_err("Unable to allocate struct se_tmr_req\n"); |
54 | return ERR_PTR(-ENOMEM); | 54 | return -ENOMEM; |
55 | } | 55 | } |
56 | |||
57 | se_cmd->se_cmd_flags |= SCF_SCSI_TMR_CDB; | ||
58 | se_cmd->se_tmr_req = tmr; | ||
56 | tmr->task_cmd = se_cmd; | 59 | tmr->task_cmd = se_cmd; |
57 | tmr->fabric_tmr_ptr = fabric_tmr_ptr; | 60 | tmr->fabric_tmr_ptr = fabric_tmr_ptr; |
58 | tmr->function = function; | 61 | tmr->function = function; |
59 | INIT_LIST_HEAD(&tmr->tmr_list); | 62 | INIT_LIST_HEAD(&tmr->tmr_list); |
60 | 63 | ||
61 | return tmr; | 64 | return 0; |
62 | } | 65 | } |
63 | EXPORT_SYMBOL(core_tmr_alloc_req); | 66 | EXPORT_SYMBOL(core_tmr_alloc_req); |
64 | 67 | ||
@@ -69,7 +72,7 @@ void core_tmr_release_req( | |||
69 | unsigned long flags; | 72 | unsigned long flags; |
70 | 73 | ||
71 | if (!dev) { | 74 | if (!dev) { |
72 | kmem_cache_free(se_tmr_req_cache, tmr); | 75 | kfree(tmr); |
73 | return; | 76 | return; |
74 | } | 77 | } |
75 | 78 | ||
@@ -77,7 +80,7 @@ void core_tmr_release_req( | |||
77 | list_del(&tmr->tmr_list); | 80 | list_del(&tmr->tmr_list); |
78 | spin_unlock_irqrestore(&dev->se_tmr_lock, flags); | 81 | spin_unlock_irqrestore(&dev->se_tmr_lock, flags); |
79 | 82 | ||
80 | kmem_cache_free(se_tmr_req_cache, tmr); | 83 | kfree(tmr); |
81 | } | 84 | } |
82 | 85 | ||
83 | static void core_tmr_handle_tas_abort( | 86 | static void core_tmr_handle_tas_abort( |
@@ -115,6 +118,70 @@ static int target_check_cdb_and_preempt(struct list_head *list, | |||
115 | return 1; | 118 | return 1; |
116 | } | 119 | } |
117 | 120 | ||
121 | void core_tmr_abort_task( | ||
122 | struct se_device *dev, | ||
123 | struct se_tmr_req *tmr, | ||
124 | struct se_session *se_sess) | ||
125 | { | ||
126 | struct se_cmd *se_cmd, *tmp_cmd; | ||
127 | unsigned long flags; | ||
128 | int ref_tag; | ||
129 | |||
130 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | ||
131 | list_for_each_entry_safe(se_cmd, tmp_cmd, | ||
132 | &se_sess->sess_cmd_list, se_cmd_list) { | ||
133 | |||
134 | if (dev != se_cmd->se_dev) | ||
135 | continue; | ||
136 | ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd); | ||
137 | if (tmr->ref_task_tag != ref_tag) | ||
138 | continue; | ||
139 | |||
140 | printk("ABORT_TASK: Found referenced %s task_tag: %u\n", | ||
141 | se_cmd->se_tfo->get_fabric_name(), ref_tag); | ||
142 | |||
143 | spin_lock_irq(&se_cmd->t_state_lock); | ||
144 | if (se_cmd->transport_state & CMD_T_COMPLETE) { | ||
145 | printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag); | ||
146 | spin_unlock_irq(&se_cmd->t_state_lock); | ||
147 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | ||
148 | goto out; | ||
149 | } | ||
150 | se_cmd->transport_state |= CMD_T_ABORTED; | ||
151 | spin_unlock_irq(&se_cmd->t_state_lock); | ||
152 | |||
153 | list_del_init(&se_cmd->se_cmd_list); | ||
154 | kref_get(&se_cmd->cmd_kref); | ||
155 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | ||
156 | |||
157 | cancel_work_sync(&se_cmd->work); | ||
158 | transport_wait_for_tasks(se_cmd); | ||
159 | /* | ||
160 | * Now send SAM_STAT_TASK_ABORTED status for the referenced | ||
161 | * se_cmd descriptor.. | ||
162 | */ | ||
163 | transport_send_task_abort(se_cmd); | ||
164 | /* | ||
165 | * Also deal with possible extra acknowledge reference.. | ||
166 | */ | ||
167 | if (se_cmd->se_cmd_flags & SCF_ACK_KREF) | ||
168 | target_put_sess_cmd(se_sess, se_cmd); | ||
169 | |||
170 | target_put_sess_cmd(se_sess, se_cmd); | ||
171 | |||
172 | printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" | ||
173 | " ref_tag: %d\n", ref_tag); | ||
174 | tmr->response = TMR_FUNCTION_COMPLETE; | ||
175 | return; | ||
176 | } | ||
177 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | ||
178 | |||
179 | out: | ||
180 | printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %d\n", | ||
181 | tmr->ref_task_tag); | ||
182 | tmr->response = TMR_TASK_DOES_NOT_EXIST; | ||
183 | } | ||
184 | |||
118 | static void core_tmr_drain_tmr_list( | 185 | static void core_tmr_drain_tmr_list( |
119 | struct se_device *dev, | 186 | struct se_device *dev, |
120 | struct se_tmr_req *tmr, | 187 | struct se_tmr_req *tmr, |
@@ -150,7 +217,7 @@ static void core_tmr_drain_tmr_list( | |||
150 | continue; | 217 | continue; |
151 | 218 | ||
152 | spin_lock(&cmd->t_state_lock); | 219 | spin_lock(&cmd->t_state_lock); |
153 | if (!atomic_read(&cmd->t_transport_active)) { | 220 | if (!(cmd->transport_state & CMD_T_ACTIVE)) { |
154 | spin_unlock(&cmd->t_state_lock); | 221 | spin_unlock(&cmd->t_state_lock); |
155 | continue; | 222 | continue; |
156 | } | 223 | } |
@@ -255,15 +322,15 @@ static void core_tmr_drain_task_list( | |||
255 | cmd->t_task_cdb[0]); | 322 | cmd->t_task_cdb[0]); |
256 | pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" | 323 | pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" |
257 | " t_task_cdbs: %d t_task_cdbs_left: %d" | 324 | " t_task_cdbs: %d t_task_cdbs_left: %d" |
258 | " t_task_cdbs_sent: %d -- t_transport_active: %d" | 325 | " t_task_cdbs_sent: %d -- CMD_T_ACTIVE: %d" |
259 | " t_transport_stop: %d t_transport_sent: %d\n", | 326 | " CMD_T_STOP: %d CMD_T_SENT: %d\n", |
260 | cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, | 327 | cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, |
261 | cmd->t_task_list_num, | 328 | cmd->t_task_list_num, |
262 | atomic_read(&cmd->t_task_cdbs_left), | 329 | atomic_read(&cmd->t_task_cdbs_left), |
263 | atomic_read(&cmd->t_task_cdbs_sent), | 330 | atomic_read(&cmd->t_task_cdbs_sent), |
264 | atomic_read(&cmd->t_transport_active), | 331 | (cmd->transport_state & CMD_T_ACTIVE) != 0, |
265 | atomic_read(&cmd->t_transport_stop), | 332 | (cmd->transport_state & CMD_T_STOP) != 0, |
266 | atomic_read(&cmd->t_transport_sent)); | 333 | (cmd->transport_state & CMD_T_SENT) != 0); |
267 | 334 | ||
268 | /* | 335 | /* |
269 | * If the command may be queued onto a workqueue cancel it now. | 336 | * If the command may be queued onto a workqueue cancel it now. |
@@ -287,19 +354,19 @@ static void core_tmr_drain_task_list( | |||
287 | } | 354 | } |
288 | fe_count = atomic_read(&cmd->t_fe_count); | 355 | fe_count = atomic_read(&cmd->t_fe_count); |
289 | 356 | ||
290 | if (atomic_read(&cmd->t_transport_active)) { | 357 | if (!(cmd->transport_state & CMD_T_ACTIVE)) { |
291 | pr_debug("LUN_RESET: got t_transport_active = 1 for" | 358 | pr_debug("LUN_RESET: got CMD_T_ACTIVE for" |
292 | " task: %p, t_fe_count: %d dev: %p\n", task, | 359 | " task: %p, t_fe_count: %d dev: %p\n", task, |
293 | fe_count, dev); | 360 | fe_count, dev); |
294 | atomic_set(&cmd->t_transport_aborted, 1); | 361 | cmd->transport_state |= CMD_T_ABORTED; |
295 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 362 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
296 | 363 | ||
297 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); | 364 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); |
298 | continue; | 365 | continue; |
299 | } | 366 | } |
300 | pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p," | 367 | pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for task: %p," |
301 | " t_fe_count: %d dev: %p\n", task, fe_count, dev); | 368 | " t_fe_count: %d dev: %p\n", task, fe_count, dev); |
302 | atomic_set(&cmd->t_transport_aborted, 1); | 369 | cmd->transport_state |= CMD_T_ABORTED; |
303 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 370 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
304 | 371 | ||
305 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); | 372 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); |
@@ -339,7 +406,7 @@ static void core_tmr_drain_cmd_list( | |||
339 | if (prout_cmd == cmd) | 406 | if (prout_cmd == cmd) |
340 | continue; | 407 | continue; |
341 | 408 | ||
342 | atomic_set(&cmd->t_transport_queue_active, 0); | 409 | cmd->transport_state &= ~CMD_T_QUEUED; |
343 | atomic_dec(&qobj->queue_cnt); | 410 | atomic_dec(&qobj->queue_cnt); |
344 | list_move_tail(&cmd->se_queue_node, &drain_cmd_list); | 411 | list_move_tail(&cmd->se_queue_node, &drain_cmd_list); |
345 | } | 412 | } |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 06336ecd872d..70c3ffb981e7 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
@@ -64,7 +64,7 @@ static void core_clear_initiator_node_from_tpg( | |||
64 | 64 | ||
65 | spin_lock_irq(&nacl->device_list_lock); | 65 | spin_lock_irq(&nacl->device_list_lock); |
66 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 66 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
67 | deve = &nacl->device_list[i]; | 67 | deve = nacl->device_list[i]; |
68 | 68 | ||
69 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | 69 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) |
70 | continue; | 70 | continue; |
@@ -163,7 +163,7 @@ void core_tpg_add_node_to_devs( | |||
163 | 163 | ||
164 | spin_lock(&tpg->tpg_lun_lock); | 164 | spin_lock(&tpg->tpg_lun_lock); |
165 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 165 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
166 | lun = &tpg->tpg_lun_list[i]; | 166 | lun = tpg->tpg_lun_list[i]; |
167 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) | 167 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) |
168 | continue; | 168 | continue; |
169 | 169 | ||
@@ -222,6 +222,34 @@ static int core_set_queue_depth_for_node( | |||
222 | return 0; | 222 | return 0; |
223 | } | 223 | } |
224 | 224 | ||
225 | void array_free(void *array, int n) | ||
226 | { | ||
227 | void **a = array; | ||
228 | int i; | ||
229 | |||
230 | for (i = 0; i < n; i++) | ||
231 | kfree(a[i]); | ||
232 | kfree(a); | ||
233 | } | ||
234 | |||
235 | static void *array_zalloc(int n, size_t size, gfp_t flags) | ||
236 | { | ||
237 | void **a; | ||
238 | int i; | ||
239 | |||
240 | a = kzalloc(n * sizeof(void*), flags); | ||
241 | if (!a) | ||
242 | return NULL; | ||
243 | for (i = 0; i < n; i++) { | ||
244 | a[i] = kzalloc(size, flags); | ||
245 | if (!a[i]) { | ||
246 | array_free(a, n); | ||
247 | return NULL; | ||
248 | } | ||
249 | } | ||
250 | return a; | ||
251 | } | ||
252 | |||
225 | /* core_create_device_list_for_node(): | 253 | /* core_create_device_list_for_node(): |
226 | * | 254 | * |
227 | * | 255 | * |
@@ -231,15 +259,15 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl) | |||
231 | struct se_dev_entry *deve; | 259 | struct se_dev_entry *deve; |
232 | int i; | 260 | int i; |
233 | 261 | ||
234 | nacl->device_list = kzalloc(sizeof(struct se_dev_entry) * | 262 | nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG, |
235 | TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL); | 263 | sizeof(struct se_dev_entry), GFP_KERNEL); |
236 | if (!nacl->device_list) { | 264 | if (!nacl->device_list) { |
237 | pr_err("Unable to allocate memory for" | 265 | pr_err("Unable to allocate memory for" |
238 | " struct se_node_acl->device_list\n"); | 266 | " struct se_node_acl->device_list\n"); |
239 | return -ENOMEM; | 267 | return -ENOMEM; |
240 | } | 268 | } |
241 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 269 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
242 | deve = &nacl->device_list[i]; | 270 | deve = nacl->device_list[i]; |
243 | 271 | ||
244 | atomic_set(&deve->ua_count, 0); | 272 | atomic_set(&deve->ua_count, 0); |
245 | atomic_set(&deve->pr_ref_count, 0); | 273 | atomic_set(&deve->pr_ref_count, 0); |
@@ -274,6 +302,8 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
274 | 302 | ||
275 | INIT_LIST_HEAD(&acl->acl_list); | 303 | INIT_LIST_HEAD(&acl->acl_list); |
276 | INIT_LIST_HEAD(&acl->acl_sess_list); | 304 | INIT_LIST_HEAD(&acl->acl_sess_list); |
305 | kref_init(&acl->acl_kref); | ||
306 | init_completion(&acl->acl_free_comp); | ||
277 | spin_lock_init(&acl->device_list_lock); | 307 | spin_lock_init(&acl->device_list_lock); |
278 | spin_lock_init(&acl->nacl_sess_lock); | 308 | spin_lock_init(&acl->nacl_sess_lock); |
279 | atomic_set(&acl->acl_pr_ref_count, 0); | 309 | atomic_set(&acl->acl_pr_ref_count, 0); |
@@ -329,19 +359,19 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) | |||
329 | 359 | ||
330 | void core_tpg_clear_object_luns(struct se_portal_group *tpg) | 360 | void core_tpg_clear_object_luns(struct se_portal_group *tpg) |
331 | { | 361 | { |
332 | int i, ret; | 362 | int i; |
333 | struct se_lun *lun; | 363 | struct se_lun *lun; |
334 | 364 | ||
335 | spin_lock(&tpg->tpg_lun_lock); | 365 | spin_lock(&tpg->tpg_lun_lock); |
336 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 366 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
337 | lun = &tpg->tpg_lun_list[i]; | 367 | lun = tpg->tpg_lun_list[i]; |
338 | 368 | ||
339 | if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) || | 369 | if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) || |
340 | (lun->lun_se_dev == NULL)) | 370 | (lun->lun_se_dev == NULL)) |
341 | continue; | 371 | continue; |
342 | 372 | ||
343 | spin_unlock(&tpg->tpg_lun_lock); | 373 | spin_unlock(&tpg->tpg_lun_lock); |
344 | ret = core_dev_del_lun(tpg, lun->unpacked_lun); | 374 | core_dev_del_lun(tpg, lun->unpacked_lun); |
345 | spin_lock(&tpg->tpg_lun_lock); | 375 | spin_lock(&tpg->tpg_lun_lock); |
346 | } | 376 | } |
347 | spin_unlock(&tpg->tpg_lun_lock); | 377 | spin_unlock(&tpg->tpg_lun_lock); |
@@ -402,6 +432,8 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
402 | 432 | ||
403 | INIT_LIST_HEAD(&acl->acl_list); | 433 | INIT_LIST_HEAD(&acl->acl_list); |
404 | INIT_LIST_HEAD(&acl->acl_sess_list); | 434 | INIT_LIST_HEAD(&acl->acl_sess_list); |
435 | kref_init(&acl->acl_kref); | ||
436 | init_completion(&acl->acl_free_comp); | ||
405 | spin_lock_init(&acl->device_list_lock); | 437 | spin_lock_init(&acl->device_list_lock); |
406 | spin_lock_init(&acl->nacl_sess_lock); | 438 | spin_lock_init(&acl->nacl_sess_lock); |
407 | atomic_set(&acl->acl_pr_ref_count, 0); | 439 | atomic_set(&acl->acl_pr_ref_count, 0); |
@@ -448,39 +480,47 @@ int core_tpg_del_initiator_node_acl( | |||
448 | struct se_node_acl *acl, | 480 | struct se_node_acl *acl, |
449 | int force) | 481 | int force) |
450 | { | 482 | { |
483 | LIST_HEAD(sess_list); | ||
451 | struct se_session *sess, *sess_tmp; | 484 | struct se_session *sess, *sess_tmp; |
452 | int dynamic_acl = 0; | 485 | unsigned long flags; |
486 | int rc; | ||
453 | 487 | ||
454 | spin_lock_irq(&tpg->acl_node_lock); | 488 | spin_lock_irq(&tpg->acl_node_lock); |
455 | if (acl->dynamic_node_acl) { | 489 | if (acl->dynamic_node_acl) { |
456 | acl->dynamic_node_acl = 0; | 490 | acl->dynamic_node_acl = 0; |
457 | dynamic_acl = 1; | ||
458 | } | 491 | } |
459 | list_del(&acl->acl_list); | 492 | list_del(&acl->acl_list); |
460 | tpg->num_node_acls--; | 493 | tpg->num_node_acls--; |
461 | spin_unlock_irq(&tpg->acl_node_lock); | 494 | spin_unlock_irq(&tpg->acl_node_lock); |
462 | 495 | ||
463 | spin_lock_bh(&tpg->session_lock); | 496 | spin_lock_irqsave(&acl->nacl_sess_lock, flags); |
464 | list_for_each_entry_safe(sess, sess_tmp, | 497 | acl->acl_stop = 1; |
465 | &tpg->tpg_sess_list, sess_list) { | 498 | |
466 | if (sess->se_node_acl != acl) | 499 | list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list, |
467 | continue; | 500 | sess_acl_list) { |
468 | /* | 501 | if (sess->sess_tearing_down != 0) |
469 | * Determine if the session needs to be closed by our context. | ||
470 | */ | ||
471 | if (!tpg->se_tpg_tfo->shutdown_session(sess)) | ||
472 | continue; | 502 | continue; |
473 | 503 | ||
474 | spin_unlock_bh(&tpg->session_lock); | 504 | target_get_session(sess); |
475 | /* | 505 | list_move(&sess->sess_acl_list, &sess_list); |
476 | * If the $FABRIC_MOD session for the Initiator Node ACL exists, | 506 | } |
477 | * forcefully shutdown the $FABRIC_MOD session/nexus. | 507 | spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); |
478 | */ | 508 | |
479 | tpg->se_tpg_tfo->close_session(sess); | 509 | list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) { |
510 | list_del(&sess->sess_acl_list); | ||
480 | 511 | ||
481 | spin_lock_bh(&tpg->session_lock); | 512 | rc = tpg->se_tpg_tfo->shutdown_session(sess); |
513 | target_put_session(sess); | ||
514 | if (!rc) | ||
515 | continue; | ||
516 | target_put_session(sess); | ||
482 | } | 517 | } |
483 | spin_unlock_bh(&tpg->session_lock); | 518 | target_put_nacl(acl); |
519 | /* | ||
520 | * Wait for last target_put_nacl() to complete in target_complete_nacl() | ||
521 | * for active fabric session transport_deregister_session() callbacks. | ||
522 | */ | ||
523 | wait_for_completion(&acl->acl_free_comp); | ||
484 | 524 | ||
485 | core_tpg_wait_for_nacl_pr_ref(acl); | 525 | core_tpg_wait_for_nacl_pr_ref(acl); |
486 | core_clear_initiator_node_from_tpg(acl, tpg); | 526 | core_clear_initiator_node_from_tpg(acl, tpg); |
@@ -507,6 +547,7 @@ int core_tpg_set_initiator_node_queue_depth( | |||
507 | { | 547 | { |
508 | struct se_session *sess, *init_sess = NULL; | 548 | struct se_session *sess, *init_sess = NULL; |
509 | struct se_node_acl *acl; | 549 | struct se_node_acl *acl; |
550 | unsigned long flags; | ||
510 | int dynamic_acl = 0; | 551 | int dynamic_acl = 0; |
511 | 552 | ||
512 | spin_lock_irq(&tpg->acl_node_lock); | 553 | spin_lock_irq(&tpg->acl_node_lock); |
@@ -525,7 +566,7 @@ int core_tpg_set_initiator_node_queue_depth( | |||
525 | } | 566 | } |
526 | spin_unlock_irq(&tpg->acl_node_lock); | 567 | spin_unlock_irq(&tpg->acl_node_lock); |
527 | 568 | ||
528 | spin_lock_bh(&tpg->session_lock); | 569 | spin_lock_irqsave(&tpg->session_lock, flags); |
529 | list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { | 570 | list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { |
530 | if (sess->se_node_acl != acl) | 571 | if (sess->se_node_acl != acl) |
531 | continue; | 572 | continue; |
@@ -537,7 +578,7 @@ int core_tpg_set_initiator_node_queue_depth( | |||
537 | " depth and force session reinstatement" | 578 | " depth and force session reinstatement" |
538 | " use the \"force=1\" parameter.\n", | 579 | " use the \"force=1\" parameter.\n", |
539 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); | 580 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); |
540 | spin_unlock_bh(&tpg->session_lock); | 581 | spin_unlock_irqrestore(&tpg->session_lock, flags); |
541 | 582 | ||
542 | spin_lock_irq(&tpg->acl_node_lock); | 583 | spin_lock_irq(&tpg->acl_node_lock); |
543 | if (dynamic_acl) | 584 | if (dynamic_acl) |
@@ -567,7 +608,7 @@ int core_tpg_set_initiator_node_queue_depth( | |||
567 | acl->queue_depth = queue_depth; | 608 | acl->queue_depth = queue_depth; |
568 | 609 | ||
569 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { | 610 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { |
570 | spin_unlock_bh(&tpg->session_lock); | 611 | spin_unlock_irqrestore(&tpg->session_lock, flags); |
571 | /* | 612 | /* |
572 | * Force session reinstatement if | 613 | * Force session reinstatement if |
573 | * core_set_queue_depth_for_node() failed, because we assume | 614 | * core_set_queue_depth_for_node() failed, because we assume |
@@ -583,7 +624,7 @@ int core_tpg_set_initiator_node_queue_depth( | |||
583 | spin_unlock_irq(&tpg->acl_node_lock); | 624 | spin_unlock_irq(&tpg->acl_node_lock); |
584 | return -EINVAL; | 625 | return -EINVAL; |
585 | } | 626 | } |
586 | spin_unlock_bh(&tpg->session_lock); | 627 | spin_unlock_irqrestore(&tpg->session_lock, flags); |
587 | /* | 628 | /* |
588 | * If the $FABRIC_MOD session for the Initiator Node ACL exists, | 629 | * If the $FABRIC_MOD session for the Initiator Node ACL exists, |
589 | * forcefully shutdown the $FABRIC_MOD session/nexus. | 630 | * forcefully shutdown the $FABRIC_MOD session/nexus. |
@@ -647,8 +688,8 @@ int core_tpg_register( | |||
647 | struct se_lun *lun; | 688 | struct se_lun *lun; |
648 | u32 i; | 689 | u32 i; |
649 | 690 | ||
650 | se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) * | 691 | se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG, |
651 | TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL); | 692 | sizeof(struct se_lun), GFP_KERNEL); |
652 | if (!se_tpg->tpg_lun_list) { | 693 | if (!se_tpg->tpg_lun_list) { |
653 | pr_err("Unable to allocate struct se_portal_group->" | 694 | pr_err("Unable to allocate struct se_portal_group->" |
654 | "tpg_lun_list\n"); | 695 | "tpg_lun_list\n"); |
@@ -656,7 +697,7 @@ int core_tpg_register( | |||
656 | } | 697 | } |
657 | 698 | ||
658 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 699 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
659 | lun = &se_tpg->tpg_lun_list[i]; | 700 | lun = se_tpg->tpg_lun_list[i]; |
660 | lun->unpacked_lun = i; | 701 | lun->unpacked_lun = i; |
661 | lun->lun_status = TRANSPORT_LUN_STATUS_FREE; | 702 | lun->lun_status = TRANSPORT_LUN_STATUS_FREE; |
662 | atomic_set(&lun->lun_acl_count, 0); | 703 | atomic_set(&lun->lun_acl_count, 0); |
@@ -742,7 +783,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) | |||
742 | core_tpg_release_virtual_lun0(se_tpg); | 783 | core_tpg_release_virtual_lun0(se_tpg); |
743 | 784 | ||
744 | se_tpg->se_tpg_fabric_ptr = NULL; | 785 | se_tpg->se_tpg_fabric_ptr = NULL; |
745 | kfree(se_tpg->tpg_lun_list); | 786 | array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG); |
746 | return 0; | 787 | return 0; |
747 | } | 788 | } |
748 | EXPORT_SYMBOL(core_tpg_deregister); | 789 | EXPORT_SYMBOL(core_tpg_deregister); |
@@ -763,7 +804,7 @@ struct se_lun *core_tpg_pre_addlun( | |||
763 | } | 804 | } |
764 | 805 | ||
765 | spin_lock(&tpg->tpg_lun_lock); | 806 | spin_lock(&tpg->tpg_lun_lock); |
766 | lun = &tpg->tpg_lun_list[unpacked_lun]; | 807 | lun = tpg->tpg_lun_list[unpacked_lun]; |
767 | if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { | 808 | if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { |
768 | pr_err("TPG Logical Unit Number: %u is already active" | 809 | pr_err("TPG Logical Unit Number: %u is already active" |
769 | " on %s Target Portal Group: %u, ignoring request.\n", | 810 | " on %s Target Portal Group: %u, ignoring request.\n", |
@@ -821,7 +862,7 @@ struct se_lun *core_tpg_pre_dellun( | |||
821 | } | 862 | } |
822 | 863 | ||
823 | spin_lock(&tpg->tpg_lun_lock); | 864 | spin_lock(&tpg->tpg_lun_lock); |
824 | lun = &tpg->tpg_lun_list[unpacked_lun]; | 865 | lun = tpg->tpg_lun_list[unpacked_lun]; |
825 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { | 866 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { |
826 | pr_err("%s Logical Unit Number: %u is not active on" | 867 | pr_err("%s Logical Unit Number: %u is not active on" |
827 | " Target Portal Group: %u, ignoring request.\n", | 868 | " Target Portal Group: %u, ignoring request.\n", |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 929cc9364c8a..443704f84fd5 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/in.h> | 37 | #include <linux/in.h> |
38 | #include <linux/cdrom.h> | 38 | #include <linux/cdrom.h> |
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <linux/ratelimit.h> | ||
40 | #include <asm/unaligned.h> | 41 | #include <asm/unaligned.h> |
41 | #include <net/sock.h> | 42 | #include <net/sock.h> |
42 | #include <net/tcp.h> | 43 | #include <net/tcp.h> |
@@ -58,7 +59,6 @@ static int sub_api_initialized; | |||
58 | 59 | ||
59 | static struct workqueue_struct *target_completion_wq; | 60 | static struct workqueue_struct *target_completion_wq; |
60 | static struct kmem_cache *se_sess_cache; | 61 | static struct kmem_cache *se_sess_cache; |
61 | struct kmem_cache *se_tmr_req_cache; | ||
62 | struct kmem_cache *se_ua_cache; | 62 | struct kmem_cache *se_ua_cache; |
63 | struct kmem_cache *t10_pr_reg_cache; | 63 | struct kmem_cache *t10_pr_reg_cache; |
64 | struct kmem_cache *t10_alua_lu_gp_cache; | 64 | struct kmem_cache *t10_alua_lu_gp_cache; |
@@ -77,26 +77,17 @@ static int transport_generic_get_mem(struct se_cmd *cmd); | |||
77 | static void transport_put_cmd(struct se_cmd *cmd); | 77 | static void transport_put_cmd(struct se_cmd *cmd); |
78 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd); | 78 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd); |
79 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); | 79 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); |
80 | static void transport_generic_request_failure(struct se_cmd *); | ||
81 | static void target_complete_ok_work(struct work_struct *work); | 80 | static void target_complete_ok_work(struct work_struct *work); |
82 | 81 | ||
83 | int init_se_kmem_caches(void) | 82 | int init_se_kmem_caches(void) |
84 | { | 83 | { |
85 | se_tmr_req_cache = kmem_cache_create("se_tmr_cache", | ||
86 | sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), | ||
87 | 0, NULL); | ||
88 | if (!se_tmr_req_cache) { | ||
89 | pr_err("kmem_cache_create() for struct se_tmr_req" | ||
90 | " failed\n"); | ||
91 | goto out; | ||
92 | } | ||
93 | se_sess_cache = kmem_cache_create("se_sess_cache", | 84 | se_sess_cache = kmem_cache_create("se_sess_cache", |
94 | sizeof(struct se_session), __alignof__(struct se_session), | 85 | sizeof(struct se_session), __alignof__(struct se_session), |
95 | 0, NULL); | 86 | 0, NULL); |
96 | if (!se_sess_cache) { | 87 | if (!se_sess_cache) { |
97 | pr_err("kmem_cache_create() for struct se_session" | 88 | pr_err("kmem_cache_create() for struct se_session" |
98 | " failed\n"); | 89 | " failed\n"); |
99 | goto out_free_tmr_req_cache; | 90 | goto out; |
100 | } | 91 | } |
101 | se_ua_cache = kmem_cache_create("se_ua_cache", | 92 | se_ua_cache = kmem_cache_create("se_ua_cache", |
102 | sizeof(struct se_ua), __alignof__(struct se_ua), | 93 | sizeof(struct se_ua), __alignof__(struct se_ua), |
@@ -169,8 +160,6 @@ out_free_ua_cache: | |||
169 | kmem_cache_destroy(se_ua_cache); | 160 | kmem_cache_destroy(se_ua_cache); |
170 | out_free_sess_cache: | 161 | out_free_sess_cache: |
171 | kmem_cache_destroy(se_sess_cache); | 162 | kmem_cache_destroy(se_sess_cache); |
172 | out_free_tmr_req_cache: | ||
173 | kmem_cache_destroy(se_tmr_req_cache); | ||
174 | out: | 163 | out: |
175 | return -ENOMEM; | 164 | return -ENOMEM; |
176 | } | 165 | } |
@@ -178,7 +167,6 @@ out: | |||
178 | void release_se_kmem_caches(void) | 167 | void release_se_kmem_caches(void) |
179 | { | 168 | { |
180 | destroy_workqueue(target_completion_wq); | 169 | destroy_workqueue(target_completion_wq); |
181 | kmem_cache_destroy(se_tmr_req_cache); | ||
182 | kmem_cache_destroy(se_sess_cache); | 170 | kmem_cache_destroy(se_sess_cache); |
183 | kmem_cache_destroy(se_ua_cache); | 171 | kmem_cache_destroy(se_ua_cache); |
184 | kmem_cache_destroy(t10_pr_reg_cache); | 172 | kmem_cache_destroy(t10_pr_reg_cache); |
@@ -258,13 +246,14 @@ struct se_session *transport_init_session(void) | |||
258 | INIT_LIST_HEAD(&se_sess->sess_cmd_list); | 246 | INIT_LIST_HEAD(&se_sess->sess_cmd_list); |
259 | INIT_LIST_HEAD(&se_sess->sess_wait_list); | 247 | INIT_LIST_HEAD(&se_sess->sess_wait_list); |
260 | spin_lock_init(&se_sess->sess_cmd_lock); | 248 | spin_lock_init(&se_sess->sess_cmd_lock); |
249 | kref_init(&se_sess->sess_kref); | ||
261 | 250 | ||
262 | return se_sess; | 251 | return se_sess; |
263 | } | 252 | } |
264 | EXPORT_SYMBOL(transport_init_session); | 253 | EXPORT_SYMBOL(transport_init_session); |
265 | 254 | ||
266 | /* | 255 | /* |
267 | * Called with spin_lock_bh(&struct se_portal_group->session_lock called. | 256 | * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. |
268 | */ | 257 | */ |
269 | void __transport_register_session( | 258 | void __transport_register_session( |
270 | struct se_portal_group *se_tpg, | 259 | struct se_portal_group *se_tpg, |
@@ -293,6 +282,8 @@ void __transport_register_session( | |||
293 | &buf[0], PR_REG_ISID_LEN); | 282 | &buf[0], PR_REG_ISID_LEN); |
294 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); | 283 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); |
295 | } | 284 | } |
285 | kref_get(&se_nacl->acl_kref); | ||
286 | |||
296 | spin_lock_irq(&se_nacl->nacl_sess_lock); | 287 | spin_lock_irq(&se_nacl->nacl_sess_lock); |
297 | /* | 288 | /* |
298 | * The se_nacl->nacl_sess pointer will be set to the | 289 | * The se_nacl->nacl_sess pointer will be set to the |
@@ -317,12 +308,48 @@ void transport_register_session( | |||
317 | struct se_session *se_sess, | 308 | struct se_session *se_sess, |
318 | void *fabric_sess_ptr) | 309 | void *fabric_sess_ptr) |
319 | { | 310 | { |
320 | spin_lock_bh(&se_tpg->session_lock); | 311 | unsigned long flags; |
312 | |||
313 | spin_lock_irqsave(&se_tpg->session_lock, flags); | ||
321 | __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); | 314 | __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); |
322 | spin_unlock_bh(&se_tpg->session_lock); | 315 | spin_unlock_irqrestore(&se_tpg->session_lock, flags); |
323 | } | 316 | } |
324 | EXPORT_SYMBOL(transport_register_session); | 317 | EXPORT_SYMBOL(transport_register_session); |
325 | 318 | ||
319 | static void target_release_session(struct kref *kref) | ||
320 | { | ||
321 | struct se_session *se_sess = container_of(kref, | ||
322 | struct se_session, sess_kref); | ||
323 | struct se_portal_group *se_tpg = se_sess->se_tpg; | ||
324 | |||
325 | se_tpg->se_tpg_tfo->close_session(se_sess); | ||
326 | } | ||
327 | |||
328 | void target_get_session(struct se_session *se_sess) | ||
329 | { | ||
330 | kref_get(&se_sess->sess_kref); | ||
331 | } | ||
332 | EXPORT_SYMBOL(target_get_session); | ||
333 | |||
334 | int target_put_session(struct se_session *se_sess) | ||
335 | { | ||
336 | return kref_put(&se_sess->sess_kref, target_release_session); | ||
337 | } | ||
338 | EXPORT_SYMBOL(target_put_session); | ||
339 | |||
340 | static void target_complete_nacl(struct kref *kref) | ||
341 | { | ||
342 | struct se_node_acl *nacl = container_of(kref, | ||
343 | struct se_node_acl, acl_kref); | ||
344 | |||
345 | complete(&nacl->acl_free_comp); | ||
346 | } | ||
347 | |||
348 | void target_put_nacl(struct se_node_acl *nacl) | ||
349 | { | ||
350 | kref_put(&nacl->acl_kref, target_complete_nacl); | ||
351 | } | ||
352 | |||
326 | void transport_deregister_session_configfs(struct se_session *se_sess) | 353 | void transport_deregister_session_configfs(struct se_session *se_sess) |
327 | { | 354 | { |
328 | struct se_node_acl *se_nacl; | 355 | struct se_node_acl *se_nacl; |
@@ -333,7 +360,8 @@ void transport_deregister_session_configfs(struct se_session *se_sess) | |||
333 | se_nacl = se_sess->se_node_acl; | 360 | se_nacl = se_sess->se_node_acl; |
334 | if (se_nacl) { | 361 | if (se_nacl) { |
335 | spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); | 362 | spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); |
336 | list_del(&se_sess->sess_acl_list); | 363 | if (se_nacl->acl_stop == 0) |
364 | list_del(&se_sess->sess_acl_list); | ||
337 | /* | 365 | /* |
338 | * If the session list is empty, then clear the pointer. | 366 | * If the session list is empty, then clear the pointer. |
339 | * Otherwise, set the struct se_session pointer from the tail | 367 | * Otherwise, set the struct se_session pointer from the tail |
@@ -360,13 +388,16 @@ EXPORT_SYMBOL(transport_free_session); | |||
360 | void transport_deregister_session(struct se_session *se_sess) | 388 | void transport_deregister_session(struct se_session *se_sess) |
361 | { | 389 | { |
362 | struct se_portal_group *se_tpg = se_sess->se_tpg; | 390 | struct se_portal_group *se_tpg = se_sess->se_tpg; |
391 | struct target_core_fabric_ops *se_tfo; | ||
363 | struct se_node_acl *se_nacl; | 392 | struct se_node_acl *se_nacl; |
364 | unsigned long flags; | 393 | unsigned long flags; |
394 | bool comp_nacl = true; | ||
365 | 395 | ||
366 | if (!se_tpg) { | 396 | if (!se_tpg) { |
367 | transport_free_session(se_sess); | 397 | transport_free_session(se_sess); |
368 | return; | 398 | return; |
369 | } | 399 | } |
400 | se_tfo = se_tpg->se_tpg_tfo; | ||
370 | 401 | ||
371 | spin_lock_irqsave(&se_tpg->session_lock, flags); | 402 | spin_lock_irqsave(&se_tpg->session_lock, flags); |
372 | list_del(&se_sess->sess_list); | 403 | list_del(&se_sess->sess_list); |
@@ -379,29 +410,34 @@ void transport_deregister_session(struct se_session *se_sess) | |||
379 | * struct se_node_acl if it had been previously dynamically generated. | 410 | * struct se_node_acl if it had been previously dynamically generated. |
380 | */ | 411 | */ |
381 | se_nacl = se_sess->se_node_acl; | 412 | se_nacl = se_sess->se_node_acl; |
382 | if (se_nacl) { | 413 | |
383 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); | 414 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); |
384 | if (se_nacl->dynamic_node_acl) { | 415 | if (se_nacl && se_nacl->dynamic_node_acl) { |
385 | if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( | 416 | if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { |
386 | se_tpg)) { | 417 | list_del(&se_nacl->acl_list); |
387 | list_del(&se_nacl->acl_list); | 418 | se_tpg->num_node_acls--; |
388 | se_tpg->num_node_acls--; | 419 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); |
389 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); | 420 | core_tpg_wait_for_nacl_pr_ref(se_nacl); |
390 | 421 | core_free_device_list_for_node(se_nacl, se_tpg); | |
391 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | 422 | se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl); |
392 | core_free_device_list_for_node(se_nacl, se_tpg); | 423 | |
393 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, | 424 | comp_nacl = false; |
394 | se_nacl); | 425 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); |
395 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); | ||
396 | } | ||
397 | } | 426 | } |
398 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); | ||
399 | } | 427 | } |
400 | 428 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); | |
401 | transport_free_session(se_sess); | ||
402 | 429 | ||
403 | pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", | 430 | pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", |
404 | se_tpg->se_tpg_tfo->get_fabric_name()); | 431 | se_tpg->se_tpg_tfo->get_fabric_name()); |
432 | /* | ||
433 | * If last kref is dropping now for an explict NodeACL, awake sleeping | ||
434 | * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group | ||
435 | * removal context. | ||
436 | */ | ||
437 | if (se_nacl && comp_nacl == true) | ||
438 | target_put_nacl(se_nacl); | ||
439 | |||
440 | transport_free_session(se_sess); | ||
405 | } | 441 | } |
406 | EXPORT_SYMBOL(transport_deregister_session); | 442 | EXPORT_SYMBOL(transport_deregister_session); |
407 | 443 | ||
@@ -437,7 +473,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) | |||
437 | 473 | ||
438 | /* transport_cmd_check_stop(): | 474 | /* transport_cmd_check_stop(): |
439 | * | 475 | * |
440 | * 'transport_off = 1' determines if t_transport_active should be cleared. | 476 | * 'transport_off = 1' determines if CMD_T_ACTIVE should be cleared. |
441 | * 'transport_off = 2' determines if task_dev_state should be removed. | 477 | * 'transport_off = 2' determines if task_dev_state should be removed. |
442 | * | 478 | * |
443 | * A non-zero u8 t_state sets cmd->t_state. | 479 | * A non-zero u8 t_state sets cmd->t_state. |
@@ -455,12 +491,11 @@ static int transport_cmd_check_stop( | |||
455 | * Determine if IOCTL context caller in requesting the stopping of this | 491 | * Determine if IOCTL context caller in requesting the stopping of this |
456 | * command for LUN shutdown purposes. | 492 | * command for LUN shutdown purposes. |
457 | */ | 493 | */ |
458 | if (atomic_read(&cmd->transport_lun_stop)) { | 494 | if (cmd->transport_state & CMD_T_LUN_STOP) { |
459 | pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)" | 495 | pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", |
460 | " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, | 496 | __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); |
461 | cmd->se_tfo->get_task_tag(cmd)); | ||
462 | 497 | ||
463 | atomic_set(&cmd->t_transport_active, 0); | 498 | cmd->transport_state &= ~CMD_T_ACTIVE; |
464 | if (transport_off == 2) | 499 | if (transport_off == 2) |
465 | transport_all_task_dev_remove_state(cmd); | 500 | transport_all_task_dev_remove_state(cmd); |
466 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 501 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
@@ -472,9 +507,9 @@ static int transport_cmd_check_stop( | |||
472 | * Determine if frontend context caller is requesting the stopping of | 507 | * Determine if frontend context caller is requesting the stopping of |
473 | * this command for frontend exceptions. | 508 | * this command for frontend exceptions. |
474 | */ | 509 | */ |
475 | if (atomic_read(&cmd->t_transport_stop)) { | 510 | if (cmd->transport_state & CMD_T_STOP) { |
476 | pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) ==" | 511 | pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", |
477 | " TRUE for ITT: 0x%08x\n", __func__, __LINE__, | 512 | __func__, __LINE__, |
478 | cmd->se_tfo->get_task_tag(cmd)); | 513 | cmd->se_tfo->get_task_tag(cmd)); |
479 | 514 | ||
480 | if (transport_off == 2) | 515 | if (transport_off == 2) |
@@ -492,7 +527,7 @@ static int transport_cmd_check_stop( | |||
492 | return 1; | 527 | return 1; |
493 | } | 528 | } |
494 | if (transport_off) { | 529 | if (transport_off) { |
495 | atomic_set(&cmd->t_transport_active, 0); | 530 | cmd->transport_state &= ~CMD_T_ACTIVE; |
496 | if (transport_off == 2) { | 531 | if (transport_off == 2) { |
497 | transport_all_task_dev_remove_state(cmd); | 532 | transport_all_task_dev_remove_state(cmd); |
498 | /* | 533 | /* |
@@ -540,31 +575,21 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) | |||
540 | return; | 575 | return; |
541 | 576 | ||
542 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 577 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
543 | if (!atomic_read(&cmd->transport_dev_active)) { | 578 | if (cmd->transport_state & CMD_T_DEV_ACTIVE) { |
544 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 579 | cmd->transport_state &= ~CMD_T_DEV_ACTIVE; |
545 | goto check_lun; | 580 | transport_all_task_dev_remove_state(cmd); |
546 | } | 581 | } |
547 | atomic_set(&cmd->transport_dev_active, 0); | ||
548 | transport_all_task_dev_remove_state(cmd); | ||
549 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 582 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
550 | 583 | ||
551 | |||
552 | check_lun: | ||
553 | spin_lock_irqsave(&lun->lun_cmd_lock, flags); | 584 | spin_lock_irqsave(&lun->lun_cmd_lock, flags); |
554 | if (atomic_read(&cmd->transport_lun_active)) { | 585 | if (!list_empty(&cmd->se_lun_node)) |
555 | list_del(&cmd->se_lun_node); | 586 | list_del_init(&cmd->se_lun_node); |
556 | atomic_set(&cmd->transport_lun_active, 0); | ||
557 | #if 0 | ||
558 | pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n" | ||
559 | cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); | ||
560 | #endif | ||
561 | } | ||
562 | spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); | 587 | spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); |
563 | } | 588 | } |
564 | 589 | ||
565 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | 590 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) |
566 | { | 591 | { |
567 | if (!cmd->se_tmr_req) | 592 | if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) |
568 | transport_lun_remove_cmd(cmd); | 593 | transport_lun_remove_cmd(cmd); |
569 | 594 | ||
570 | if (transport_cmd_check_stop_to_fabric(cmd)) | 595 | if (transport_cmd_check_stop_to_fabric(cmd)) |
@@ -585,7 +610,7 @@ static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state, | |||
585 | if (t_state) { | 610 | if (t_state) { |
586 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 611 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
587 | cmd->t_state = t_state; | 612 | cmd->t_state = t_state; |
588 | atomic_set(&cmd->t_transport_active, 1); | 613 | cmd->transport_state |= CMD_T_ACTIVE; |
589 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 614 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
590 | } | 615 | } |
591 | 616 | ||
@@ -601,7 +626,7 @@ static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state, | |||
601 | list_add(&cmd->se_queue_node, &qobj->qobj_list); | 626 | list_add(&cmd->se_queue_node, &qobj->qobj_list); |
602 | else | 627 | else |
603 | list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); | 628 | list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); |
604 | atomic_set(&cmd->t_transport_queue_active, 1); | 629 | cmd->transport_state |= CMD_T_QUEUED; |
605 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 630 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
606 | 631 | ||
607 | wake_up_interruptible(&qobj->thread_wq); | 632 | wake_up_interruptible(&qobj->thread_wq); |
@@ -620,8 +645,7 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj) | |||
620 | } | 645 | } |
621 | cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); | 646 | cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); |
622 | 647 | ||
623 | atomic_set(&cmd->t_transport_queue_active, 0); | 648 | cmd->transport_state &= ~CMD_T_QUEUED; |
624 | |||
625 | list_del_init(&cmd->se_queue_node); | 649 | list_del_init(&cmd->se_queue_node); |
626 | atomic_dec(&qobj->queue_cnt); | 650 | atomic_dec(&qobj->queue_cnt); |
627 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 651 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
@@ -635,20 +659,14 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd) | |||
635 | unsigned long flags; | 659 | unsigned long flags; |
636 | 660 | ||
637 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 661 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
638 | if (!atomic_read(&cmd->t_transport_queue_active)) { | 662 | if (!(cmd->transport_state & CMD_T_QUEUED)) { |
639 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 663 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
640 | return; | 664 | return; |
641 | } | 665 | } |
642 | atomic_set(&cmd->t_transport_queue_active, 0); | 666 | cmd->transport_state &= ~CMD_T_QUEUED; |
643 | atomic_dec(&qobj->queue_cnt); | 667 | atomic_dec(&qobj->queue_cnt); |
644 | list_del_init(&cmd->se_queue_node); | 668 | list_del_init(&cmd->se_queue_node); |
645 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 669 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
646 | |||
647 | if (atomic_read(&cmd->t_transport_queue_active)) { | ||
648 | pr_err("ITT: 0x%08x t_transport_queue_active: %d\n", | ||
649 | cmd->se_tfo->get_task_tag(cmd), | ||
650 | atomic_read(&cmd->t_transport_queue_active)); | ||
651 | } | ||
652 | } | 670 | } |
653 | 671 | ||
654 | /* | 672 | /* |
@@ -719,7 +737,7 @@ void transport_complete_task(struct se_task *task, int success) | |||
719 | } | 737 | } |
720 | 738 | ||
721 | if (!success) | 739 | if (!success) |
722 | cmd->t_tasks_failed = 1; | 740 | cmd->transport_state |= CMD_T_FAILED; |
723 | 741 | ||
724 | /* | 742 | /* |
725 | * Decrement the outstanding t_task_cdbs_left count. The last | 743 | * Decrement the outstanding t_task_cdbs_left count. The last |
@@ -730,17 +748,24 @@ void transport_complete_task(struct se_task *task, int success) | |||
730 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 748 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
731 | return; | 749 | return; |
732 | } | 750 | } |
733 | 751 | /* | |
734 | if (cmd->t_tasks_failed) { | 752 | * Check for case where an explict ABORT_TASK has been received |
753 | * and transport_wait_for_tasks() will be waiting for completion.. | ||
754 | */ | ||
755 | if (cmd->transport_state & CMD_T_ABORTED && | ||
756 | cmd->transport_state & CMD_T_STOP) { | ||
757 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
758 | complete(&cmd->t_transport_stop_comp); | ||
759 | return; | ||
760 | } else if (cmd->transport_state & CMD_T_FAILED) { | ||
735 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 761 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
736 | INIT_WORK(&cmd->work, target_complete_failure_work); | 762 | INIT_WORK(&cmd->work, target_complete_failure_work); |
737 | } else { | 763 | } else { |
738 | atomic_set(&cmd->t_transport_complete, 1); | ||
739 | INIT_WORK(&cmd->work, target_complete_ok_work); | 764 | INIT_WORK(&cmd->work, target_complete_ok_work); |
740 | } | 765 | } |
741 | 766 | ||
742 | cmd->t_state = TRANSPORT_COMPLETE; | 767 | cmd->t_state = TRANSPORT_COMPLETE; |
743 | atomic_set(&cmd->t_transport_active, 1); | 768 | cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); |
744 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 769 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
745 | 770 | ||
746 | queue_work(target_completion_wq, &cmd->work); | 771 | queue_work(target_completion_wq, &cmd->work); |
@@ -1488,7 +1513,7 @@ void transport_init_se_cmd( | |||
1488 | init_completion(&cmd->t_transport_stop_comp); | 1513 | init_completion(&cmd->t_transport_stop_comp); |
1489 | init_completion(&cmd->cmd_wait_comp); | 1514 | init_completion(&cmd->cmd_wait_comp); |
1490 | spin_lock_init(&cmd->t_state_lock); | 1515 | spin_lock_init(&cmd->t_state_lock); |
1491 | atomic_set(&cmd->transport_dev_active, 1); | 1516 | cmd->transport_state = CMD_T_DEV_ACTIVE; |
1492 | 1517 | ||
1493 | cmd->se_tfo = tfo; | 1518 | cmd->se_tfo = tfo; |
1494 | cmd->se_sess = se_sess; | 1519 | cmd->se_sess = se_sess; |
@@ -1618,7 +1643,7 @@ int transport_handle_cdb_direct( | |||
1618 | return -EINVAL; | 1643 | return -EINVAL; |
1619 | } | 1644 | } |
1620 | /* | 1645 | /* |
1621 | * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following | 1646 | * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following |
1622 | * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() | 1647 | * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() |
1623 | * in existing usage to ensure that outstanding descriptors are handled | 1648 | * in existing usage to ensure that outstanding descriptors are handled |
1624 | * correctly during shutdown via transport_wait_for_tasks() | 1649 | * correctly during shutdown via transport_wait_for_tasks() |
@@ -1627,7 +1652,8 @@ int transport_handle_cdb_direct( | |||
1627 | * this to be called for initial descriptor submission. | 1652 | * this to be called for initial descriptor submission. |
1628 | */ | 1653 | */ |
1629 | cmd->t_state = TRANSPORT_NEW_CMD; | 1654 | cmd->t_state = TRANSPORT_NEW_CMD; |
1630 | atomic_set(&cmd->t_transport_active, 1); | 1655 | cmd->transport_state |= CMD_T_ACTIVE; |
1656 | |||
1631 | /* | 1657 | /* |
1632 | * transport_generic_new_cmd() is already handling QUEUE_FULL, | 1658 | * transport_generic_new_cmd() is already handling QUEUE_FULL, |
1633 | * so follow TRANSPORT_NEW_CMD processing thread context usage | 1659 | * so follow TRANSPORT_NEW_CMD processing thread context usage |
@@ -1716,6 +1742,74 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, | |||
1716 | } | 1742 | } |
1717 | EXPORT_SYMBOL(target_submit_cmd); | 1743 | EXPORT_SYMBOL(target_submit_cmd); |
1718 | 1744 | ||
1745 | static void target_complete_tmr_failure(struct work_struct *work) | ||
1746 | { | ||
1747 | struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); | ||
1748 | |||
1749 | se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; | ||
1750 | se_cmd->se_tfo->queue_tm_rsp(se_cmd); | ||
1751 | transport_generic_free_cmd(se_cmd, 0); | ||
1752 | } | ||
1753 | |||
1754 | /** | ||
1755 | * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd | ||
1756 | * for TMR CDBs | ||
1757 | * | ||
1758 | * @se_cmd: command descriptor to submit | ||
1759 | * @se_sess: associated se_sess for endpoint | ||
1760 | * @sense: pointer to SCSI sense buffer | ||
1761 | * @unpacked_lun: unpacked LUN to reference for struct se_lun | ||
1762 | * @fabric_context: fabric context for TMR req | ||
1763 | * @tm_type: Type of TM request | ||
1764 | * @gfp: gfp type for caller | ||
1765 | * @tag: referenced task tag for TMR_ABORT_TASK | ||
1766 | * @flags: submit cmd flags | ||
1767 | * | ||
1768 | * Callable from all contexts. | ||
1769 | **/ | ||
1770 | |||
1771 | int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, | ||
1772 | unsigned char *sense, u32 unpacked_lun, | ||
1773 | void *fabric_tmr_ptr, unsigned char tm_type, | ||
1774 | gfp_t gfp, unsigned int tag, int flags) | ||
1775 | { | ||
1776 | struct se_portal_group *se_tpg; | ||
1777 | int ret; | ||
1778 | |||
1779 | se_tpg = se_sess->se_tpg; | ||
1780 | BUG_ON(!se_tpg); | ||
1781 | |||
1782 | transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, | ||
1783 | 0, DMA_NONE, MSG_SIMPLE_TAG, sense); | ||
1784 | /* | ||
1785 | * FIXME: Currently expect caller to handle se_cmd->se_tmr_req | ||
1786 | * allocation failure. | ||
1787 | */ | ||
1788 | ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); | ||
1789 | if (ret < 0) | ||
1790 | return -ENOMEM; | ||
1791 | |||
1792 | if (tm_type == TMR_ABORT_TASK) | ||
1793 | se_cmd->se_tmr_req->ref_task_tag = tag; | ||
1794 | |||
1795 | /* See target_submit_cmd for commentary */ | ||
1796 | target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); | ||
1797 | |||
1798 | ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); | ||
1799 | if (ret) { | ||
1800 | /* | ||
1801 | * For callback during failure handling, push this work off | ||
1802 | * to process context with TMR_LUN_DOES_NOT_EXIST status. | ||
1803 | */ | ||
1804 | INIT_WORK(&se_cmd->work, target_complete_tmr_failure); | ||
1805 | schedule_work(&se_cmd->work); | ||
1806 | return 0; | ||
1807 | } | ||
1808 | transport_generic_handle_tmr(se_cmd); | ||
1809 | return 0; | ||
1810 | } | ||
1811 | EXPORT_SYMBOL(target_submit_tmr); | ||
1812 | |||
1719 | /* | 1813 | /* |
1720 | * Used by fabric module frontends defining a TFO->new_cmd_map() caller | 1814 | * Used by fabric module frontends defining a TFO->new_cmd_map() caller |
1721 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to | 1815 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to |
@@ -1847,7 +1941,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) | |||
1847 | /* | 1941 | /* |
1848 | * Handle SAM-esque emulation for generic transport request failures. | 1942 | * Handle SAM-esque emulation for generic transport request failures. |
1849 | */ | 1943 | */ |
1850 | static void transport_generic_request_failure(struct se_cmd *cmd) | 1944 | void transport_generic_request_failure(struct se_cmd *cmd) |
1851 | { | 1945 | { |
1852 | int ret = 0; | 1946 | int ret = 0; |
1853 | 1947 | ||
@@ -1859,14 +1953,14 @@ static void transport_generic_request_failure(struct se_cmd *cmd) | |||
1859 | cmd->t_state, cmd->scsi_sense_reason); | 1953 | cmd->t_state, cmd->scsi_sense_reason); |
1860 | pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" | 1954 | pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" |
1861 | " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" | 1955 | " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" |
1862 | " t_transport_active: %d t_transport_stop: %d" | 1956 | " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", |
1863 | " t_transport_sent: %d\n", cmd->t_task_list_num, | 1957 | cmd->t_task_list_num, |
1864 | atomic_read(&cmd->t_task_cdbs_left), | 1958 | atomic_read(&cmd->t_task_cdbs_left), |
1865 | atomic_read(&cmd->t_task_cdbs_sent), | 1959 | atomic_read(&cmd->t_task_cdbs_sent), |
1866 | atomic_read(&cmd->t_task_cdbs_ex_left), | 1960 | atomic_read(&cmd->t_task_cdbs_ex_left), |
1867 | atomic_read(&cmd->t_transport_active), | 1961 | (cmd->transport_state & CMD_T_ACTIVE) != 0, |
1868 | atomic_read(&cmd->t_transport_stop), | 1962 | (cmd->transport_state & CMD_T_STOP) != 0, |
1869 | atomic_read(&cmd->t_transport_sent)); | 1963 | (cmd->transport_state & CMD_T_SENT) != 0); |
1870 | 1964 | ||
1871 | /* | 1965 | /* |
1872 | * For SAM Task Attribute emulation for failed struct se_cmd | 1966 | * For SAM Task Attribute emulation for failed struct se_cmd |
@@ -1939,6 +2033,7 @@ queue_full: | |||
1939 | cmd->t_state = TRANSPORT_COMPLETE_QF_OK; | 2033 | cmd->t_state = TRANSPORT_COMPLETE_QF_OK; |
1940 | transport_handle_queue_full(cmd, cmd->se_dev); | 2034 | transport_handle_queue_full(cmd, cmd->se_dev); |
1941 | } | 2035 | } |
2036 | EXPORT_SYMBOL(transport_generic_request_failure); | ||
1942 | 2037 | ||
1943 | static inline u32 transport_lba_21(unsigned char *cdb) | 2038 | static inline u32 transport_lba_21(unsigned char *cdb) |
1944 | { | 2039 | { |
@@ -2125,7 +2220,7 @@ check_depth: | |||
2125 | 2220 | ||
2126 | if (atomic_read(&cmd->t_task_cdbs_sent) == | 2221 | if (atomic_read(&cmd->t_task_cdbs_sent) == |
2127 | cmd->t_task_list_num) | 2222 | cmd->t_task_list_num) |
2128 | atomic_set(&cmd->t_transport_sent, 1); | 2223 | cmd->transport_state |= CMD_T_SENT; |
2129 | 2224 | ||
2130 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2225 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2131 | 2226 | ||
@@ -2136,8 +2231,9 @@ check_depth: | |||
2136 | if (error != 0) { | 2231 | if (error != 0) { |
2137 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2232 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2138 | task->task_flags &= ~TF_ACTIVE; | 2233 | task->task_flags &= ~TF_ACTIVE; |
2234 | cmd->transport_state &= ~CMD_T_SENT; | ||
2139 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2235 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2140 | atomic_set(&cmd->t_transport_sent, 0); | 2236 | |
2141 | transport_stop_tasks_for_cmd(cmd); | 2237 | transport_stop_tasks_for_cmd(cmd); |
2142 | transport_generic_request_failure(cmd); | 2238 | transport_generic_request_failure(cmd); |
2143 | } | 2239 | } |
@@ -2847,7 +2943,7 @@ static int transport_generic_cmd_sequencer( | |||
2847 | 2943 | ||
2848 | pr_err("Unsupported SA: 0x%02x\n", | 2944 | pr_err("Unsupported SA: 0x%02x\n", |
2849 | cmd->t_task_cdb[1] & 0x1f); | 2945 | cmd->t_task_cdb[1] & 0x1f); |
2850 | goto out_unsupported_cdb; | 2946 | goto out_invalid_cdb_field; |
2851 | } | 2947 | } |
2852 | /*FALLTHROUGH*/ | 2948 | /*FALLTHROUGH*/ |
2853 | case ACCESS_CONTROL_IN: | 2949 | case ACCESS_CONTROL_IN: |
@@ -2929,7 +3025,7 @@ static int transport_generic_cmd_sequencer( | |||
2929 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | 3025 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
2930 | break; | 3026 | break; |
2931 | case SYNCHRONIZE_CACHE: | 3027 | case SYNCHRONIZE_CACHE: |
2932 | case 0x91: /* SYNCHRONIZE_CACHE_16: */ | 3028 | case SYNCHRONIZE_CACHE_16: |
2933 | /* | 3029 | /* |
2934 | * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE | 3030 | * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE |
2935 | */ | 3031 | */ |
@@ -3081,6 +3177,13 @@ static int transport_generic_cmd_sequencer( | |||
3081 | cmd->data_length = size; | 3177 | cmd->data_length = size; |
3082 | } | 3178 | } |
3083 | 3179 | ||
3180 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB && | ||
3181 | sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) { | ||
3182 | printk_ratelimited(KERN_ERR "SCSI OP %02xh with too big sectors %u\n", | ||
3183 | cdb[0], sectors); | ||
3184 | goto out_invalid_cdb_field; | ||
3185 | } | ||
3186 | |||
3084 | /* reject any command that we don't have a handler for */ | 3187 | /* reject any command that we don't have a handler for */ |
3085 | if (!(passthrough || cmd->execute_task || | 3188 | if (!(passthrough || cmd->execute_task || |
3086 | (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) | 3189 | (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) |
@@ -3384,7 +3487,7 @@ static void transport_release_cmd(struct se_cmd *cmd) | |||
3384 | { | 3487 | { |
3385 | BUG_ON(!cmd->se_tfo); | 3488 | BUG_ON(!cmd->se_tfo); |
3386 | 3489 | ||
3387 | if (cmd->se_tmr_req) | 3490 | if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) |
3388 | core_tmr_release_req(cmd->se_tmr_req); | 3491 | core_tmr_release_req(cmd->se_tmr_req); |
3389 | if (cmd->t_task_cdb != cmd->__t_task_cdb) | 3492 | if (cmd->t_task_cdb != cmd->__t_task_cdb) |
3390 | kfree(cmd->t_task_cdb); | 3493 | kfree(cmd->t_task_cdb); |
@@ -3421,8 +3524,8 @@ static void transport_put_cmd(struct se_cmd *cmd) | |||
3421 | goto out_busy; | 3524 | goto out_busy; |
3422 | } | 3525 | } |
3423 | 3526 | ||
3424 | if (atomic_read(&cmd->transport_dev_active)) { | 3527 | if (cmd->transport_state & CMD_T_DEV_ACTIVE) { |
3425 | atomic_set(&cmd->transport_dev_active, 0); | 3528 | cmd->transport_state &= ~CMD_T_DEV_ACTIVE; |
3426 | transport_all_task_dev_remove_state(cmd); | 3529 | transport_all_task_dev_remove_state(cmd); |
3427 | free_tasks = 1; | 3530 | free_tasks = 1; |
3428 | } | 3531 | } |
@@ -3527,10 +3630,12 @@ EXPORT_SYMBOL(transport_kmap_data_sg); | |||
3527 | 3630 | ||
3528 | void transport_kunmap_data_sg(struct se_cmd *cmd) | 3631 | void transport_kunmap_data_sg(struct se_cmd *cmd) |
3529 | { | 3632 | { |
3530 | if (!cmd->t_data_nents) | 3633 | if (!cmd->t_data_nents) { |
3531 | return; | 3634 | return; |
3532 | else if (cmd->t_data_nents == 1) | 3635 | } else if (cmd->t_data_nents == 1) { |
3533 | kunmap(sg_page(cmd->t_data_sg)); | 3636 | kunmap(sg_page(cmd->t_data_sg)); |
3637 | return; | ||
3638 | } | ||
3534 | 3639 | ||
3535 | vunmap(cmd->t_data_vmap); | 3640 | vunmap(cmd->t_data_vmap); |
3536 | cmd->t_data_vmap = NULL; | 3641 | cmd->t_data_vmap = NULL; |
@@ -3860,8 +3965,10 @@ int transport_generic_new_cmd(struct se_cmd *cmd) | |||
3860 | if (task_cdbs < 0) | 3965 | if (task_cdbs < 0) |
3861 | goto out_fail; | 3966 | goto out_fail; |
3862 | else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { | 3967 | else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { |
3968 | spin_lock_irq(&cmd->t_state_lock); | ||
3863 | cmd->t_state = TRANSPORT_COMPLETE; | 3969 | cmd->t_state = TRANSPORT_COMPLETE; |
3864 | atomic_set(&cmd->t_transport_active, 1); | 3970 | cmd->transport_state |= CMD_T_ACTIVE; |
3971 | spin_unlock_irq(&cmd->t_state_lock); | ||
3865 | 3972 | ||
3866 | if (cmd->t_task_cdb[0] == REQUEST_SENSE) { | 3973 | if (cmd->t_task_cdb[0] == REQUEST_SENSE) { |
3867 | u8 ua_asc = 0, ua_ascq = 0; | 3974 | u8 ua_asc = 0, ua_ascq = 0; |
@@ -3942,9 +4049,9 @@ static int transport_generic_write_pending(struct se_cmd *cmd) | |||
3942 | 4049 | ||
3943 | /* | 4050 | /* |
3944 | * Clear the se_cmd for WRITE_PENDING status in order to set | 4051 | * Clear the se_cmd for WRITE_PENDING status in order to set |
3945 | * cmd->t_transport_active=0 so that transport_generic_handle_data | 4052 | * CMD_T_ACTIVE so that transport_generic_handle_data can be called |
3946 | * can be called from HW target mode interrupt code. This is safe | 4053 | * from HW target mode interrupt code. This is safe to be called |
3947 | * to be called with transport_off=1 before the cmd->se_tfo->write_pending | 4054 | * with transport_off=1 before the cmd->se_tfo->write_pending |
3948 | * because the se_cmd->se_lun pointer is not being cleared. | 4055 | * because the se_cmd->se_lun pointer is not being cleared. |
3949 | */ | 4056 | */ |
3950 | transport_cmd_check_stop(cmd, 1, 0); | 4057 | transport_cmd_check_stop(cmd, 1, 0); |
@@ -3971,7 +4078,7 @@ queue_full: | |||
3971 | void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) | 4078 | void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) |
3972 | { | 4079 | { |
3973 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { | 4080 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { |
3974 | if (wait_for_tasks && cmd->se_tmr_req) | 4081 | if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) |
3975 | transport_wait_for_tasks(cmd); | 4082 | transport_wait_for_tasks(cmd); |
3976 | 4083 | ||
3977 | transport_release_cmd(cmd); | 4084 | transport_release_cmd(cmd); |
@@ -4007,8 +4114,10 @@ void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, | |||
4007 | * fabric acknowledgement that requires two target_put_sess_cmd() | 4114 | * fabric acknowledgement that requires two target_put_sess_cmd() |
4008 | * invocations before se_cmd descriptor release. | 4115 | * invocations before se_cmd descriptor release. |
4009 | */ | 4116 | */ |
4010 | if (ack_kref == true) | 4117 | if (ack_kref == true) { |
4011 | kref_get(&se_cmd->cmd_kref); | 4118 | kref_get(&se_cmd->cmd_kref); |
4119 | se_cmd->se_cmd_flags |= SCF_ACK_KREF; | ||
4120 | } | ||
4012 | 4121 | ||
4013 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | 4122 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
4014 | list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); | 4123 | list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); |
@@ -4026,7 +4135,7 @@ static void target_release_cmd_kref(struct kref *kref) | |||
4026 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | 4135 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
4027 | if (list_empty(&se_cmd->se_cmd_list)) { | 4136 | if (list_empty(&se_cmd->se_cmd_list)) { |
4028 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 4137 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
4029 | WARN_ON(1); | 4138 | se_cmd->se_tfo->release_cmd(se_cmd); |
4030 | return; | 4139 | return; |
4031 | } | 4140 | } |
4032 | if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { | 4141 | if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { |
@@ -4130,15 +4239,16 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | |||
4130 | * be stopped, we can safely ignore this struct se_cmd. | 4239 | * be stopped, we can safely ignore this struct se_cmd. |
4131 | */ | 4240 | */ |
4132 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 4241 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4133 | if (atomic_read(&cmd->t_transport_stop)) { | 4242 | if (cmd->transport_state & CMD_T_STOP) { |
4134 | atomic_set(&cmd->transport_lun_stop, 0); | 4243 | cmd->transport_state &= ~CMD_T_LUN_STOP; |
4135 | pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop ==" | 4244 | |
4136 | " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); | 4245 | pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n", |
4246 | cmd->se_tfo->get_task_tag(cmd)); | ||
4137 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 4247 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4138 | transport_cmd_check_stop(cmd, 1, 0); | 4248 | transport_cmd_check_stop(cmd, 1, 0); |
4139 | return -EPERM; | 4249 | return -EPERM; |
4140 | } | 4250 | } |
4141 | atomic_set(&cmd->transport_lun_fe_stop, 1); | 4251 | cmd->transport_state |= CMD_T_LUN_FE_STOP; |
4142 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 4252 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4143 | 4253 | ||
4144 | wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); | 4254 | wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); |
@@ -4171,9 +4281,8 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) | |||
4171 | while (!list_empty(&lun->lun_cmd_list)) { | 4281 | while (!list_empty(&lun->lun_cmd_list)) { |
4172 | cmd = list_first_entry(&lun->lun_cmd_list, | 4282 | cmd = list_first_entry(&lun->lun_cmd_list, |
4173 | struct se_cmd, se_lun_node); | 4283 | struct se_cmd, se_lun_node); |
4174 | list_del(&cmd->se_lun_node); | 4284 | list_del_init(&cmd->se_lun_node); |
4175 | 4285 | ||
4176 | atomic_set(&cmd->transport_lun_active, 0); | ||
4177 | /* | 4286 | /* |
4178 | * This will notify iscsi_target_transport.c: | 4287 | * This will notify iscsi_target_transport.c: |
4179 | * transport_cmd_check_stop() that a LUN shutdown is in | 4288 | * transport_cmd_check_stop() that a LUN shutdown is in |
@@ -4184,7 +4293,7 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) | |||
4184 | "_lun_stop for ITT: 0x%08x\n", | 4293 | "_lun_stop for ITT: 0x%08x\n", |
4185 | cmd->se_lun->unpacked_lun, | 4294 | cmd->se_lun->unpacked_lun, |
4186 | cmd->se_tfo->get_task_tag(cmd)); | 4295 | cmd->se_tfo->get_task_tag(cmd)); |
4187 | atomic_set(&cmd->transport_lun_stop, 1); | 4296 | cmd->transport_state |= CMD_T_LUN_STOP; |
4188 | spin_unlock(&cmd->t_state_lock); | 4297 | spin_unlock(&cmd->t_state_lock); |
4189 | 4298 | ||
4190 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | 4299 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); |
@@ -4214,11 +4323,11 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) | |||
4214 | cmd->se_tfo->get_task_tag(cmd)); | 4323 | cmd->se_tfo->get_task_tag(cmd)); |
4215 | 4324 | ||
4216 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); | 4325 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); |
4217 | if (!atomic_read(&cmd->transport_dev_active)) { | 4326 | if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) { |
4218 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); | 4327 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
4219 | goto check_cond; | 4328 | goto check_cond; |
4220 | } | 4329 | } |
4221 | atomic_set(&cmd->transport_dev_active, 0); | 4330 | cmd->transport_state &= ~CMD_T_DEV_ACTIVE; |
4222 | transport_all_task_dev_remove_state(cmd); | 4331 | transport_all_task_dev_remove_state(cmd); |
4223 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); | 4332 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
4224 | 4333 | ||
@@ -4238,7 +4347,7 @@ check_cond: | |||
4238 | * finished accessing it. | 4347 | * finished accessing it. |
4239 | */ | 4348 | */ |
4240 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); | 4349 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); |
4241 | if (atomic_read(&cmd->transport_lun_fe_stop)) { | 4350 | if (cmd->transport_state & CMD_T_LUN_FE_STOP) { |
4242 | pr_debug("SE_LUN[%d] - Detected FE stop for" | 4351 | pr_debug("SE_LUN[%d] - Detected FE stop for" |
4243 | " struct se_cmd: %p ITT: 0x%08x\n", | 4352 | " struct se_cmd: %p ITT: 0x%08x\n", |
4244 | lun->unpacked_lun, | 4353 | lun->unpacked_lun, |
@@ -4297,7 +4406,8 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) | |||
4297 | unsigned long flags; | 4406 | unsigned long flags; |
4298 | 4407 | ||
4299 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 4408 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4300 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) { | 4409 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && |
4410 | !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { | ||
4301 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 4411 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4302 | return false; | 4412 | return false; |
4303 | } | 4413 | } |
@@ -4305,7 +4415,8 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) | |||
4305 | * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE | 4415 | * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE |
4306 | * has been set in transport_set_supported_SAM_opcode(). | 4416 | * has been set in transport_set_supported_SAM_opcode(). |
4307 | */ | 4417 | */ |
4308 | if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) { | 4418 | if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && |
4419 | !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { | ||
4309 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 4420 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4310 | return false; | 4421 | return false; |
4311 | } | 4422 | } |
@@ -4316,8 +4427,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) | |||
4316 | * transport_clear_lun_from_sessions() once the ConfigFS context caller | 4427 | * transport_clear_lun_from_sessions() once the ConfigFS context caller |
4317 | * has completed its operation on the struct se_cmd. | 4428 | * has completed its operation on the struct se_cmd. |
4318 | */ | 4429 | */ |
4319 | if (atomic_read(&cmd->transport_lun_stop)) { | 4430 | if (cmd->transport_state & CMD_T_LUN_STOP) { |
4320 | |||
4321 | pr_debug("wait_for_tasks: Stopping" | 4431 | pr_debug("wait_for_tasks: Stopping" |
4322 | " wait_for_completion(&cmd->t_tasktransport_lun_fe" | 4432 | " wait_for_completion(&cmd->t_tasktransport_lun_fe" |
4323 | "_stop_comp); for ITT: 0x%08x\n", | 4433 | "_stop_comp); for ITT: 0x%08x\n", |
@@ -4345,18 +4455,18 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) | |||
4345 | "stop_comp); for ITT: 0x%08x\n", | 4455 | "stop_comp); for ITT: 0x%08x\n", |
4346 | cmd->se_tfo->get_task_tag(cmd)); | 4456 | cmd->se_tfo->get_task_tag(cmd)); |
4347 | 4457 | ||
4348 | atomic_set(&cmd->transport_lun_stop, 0); | 4458 | cmd->transport_state &= ~CMD_T_LUN_STOP; |
4349 | } | 4459 | } |
4350 | if (!atomic_read(&cmd->t_transport_active) || | 4460 | |
4351 | atomic_read(&cmd->t_transport_aborted)) { | 4461 | if (!(cmd->transport_state & CMD_T_ACTIVE)) { |
4352 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 4462 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4353 | return false; | 4463 | return false; |
4354 | } | 4464 | } |
4355 | 4465 | ||
4356 | atomic_set(&cmd->t_transport_stop, 1); | 4466 | cmd->transport_state |= CMD_T_STOP; |
4357 | 4467 | ||
4358 | pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" | 4468 | pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" |
4359 | " i_state: %d, t_state: %d, t_transport_stop = TRUE\n", | 4469 | " i_state: %d, t_state: %d, CMD_T_STOP\n", |
4360 | cmd, cmd->se_tfo->get_task_tag(cmd), | 4470 | cmd, cmd->se_tfo->get_task_tag(cmd), |
4361 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); | 4471 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); |
4362 | 4472 | ||
@@ -4367,8 +4477,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) | |||
4367 | wait_for_completion(&cmd->t_transport_stop_comp); | 4477 | wait_for_completion(&cmd->t_transport_stop_comp); |
4368 | 4478 | ||
4369 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 4479 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4370 | atomic_set(&cmd->t_transport_active, 0); | 4480 | cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); |
4371 | atomic_set(&cmd->t_transport_stop, 0); | ||
4372 | 4481 | ||
4373 | pr_debug("wait_for_tasks: Stopped wait_for_compltion(" | 4482 | pr_debug("wait_for_tasks: Stopped wait_for_compltion(" |
4374 | "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", | 4483 | "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", |
@@ -4597,7 +4706,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) | |||
4597 | { | 4706 | { |
4598 | int ret = 0; | 4707 | int ret = 0; |
4599 | 4708 | ||
4600 | if (atomic_read(&cmd->t_transport_aborted) != 0) { | 4709 | if (cmd->transport_state & CMD_T_ABORTED) { |
4601 | if (!send_status || | 4710 | if (!send_status || |
4602 | (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) | 4711 | (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) |
4603 | return 1; | 4712 | return 1; |
@@ -4634,7 +4743,7 @@ void transport_send_task_abort(struct se_cmd *cmd) | |||
4634 | */ | 4743 | */ |
4635 | if (cmd->data_direction == DMA_TO_DEVICE) { | 4744 | if (cmd->data_direction == DMA_TO_DEVICE) { |
4636 | if (cmd->se_tfo->write_pending_status(cmd) != 0) { | 4745 | if (cmd->se_tfo->write_pending_status(cmd) != 0) { |
4637 | atomic_inc(&cmd->t_transport_aborted); | 4746 | cmd->transport_state |= CMD_T_ABORTED; |
4638 | smp_mb__after_atomic_inc(); | 4747 | smp_mb__after_atomic_inc(); |
4639 | } | 4748 | } |
4640 | } | 4749 | } |
@@ -4655,7 +4764,7 @@ static int transport_generic_do_tmr(struct se_cmd *cmd) | |||
4655 | 4764 | ||
4656 | switch (tmr->function) { | 4765 | switch (tmr->function) { |
4657 | case TMR_ABORT_TASK: | 4766 | case TMR_ABORT_TASK: |
4658 | tmr->response = TMR_FUNCTION_REJECTED; | 4767 | core_tmr_abort_task(dev, tmr, cmd->se_sess); |
4659 | break; | 4768 | break; |
4660 | case TMR_ABORT_TASK_SET: | 4769 | case TMR_ABORT_TASK_SET: |
4661 | case TMR_CLEAR_ACA: | 4770 | case TMR_CLEAR_ACA: |
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index 3e12f6bcfa10..6666a0c74f60 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c | |||
@@ -53,7 +53,7 @@ int core_scsi3_ua_check( | |||
53 | if (!nacl) | 53 | if (!nacl) |
54 | return 0; | 54 | return 0; |
55 | 55 | ||
56 | deve = &nacl->device_list[cmd->orig_fe_lun]; | 56 | deve = nacl->device_list[cmd->orig_fe_lun]; |
57 | if (!atomic_read(&deve->ua_count)) | 57 | if (!atomic_read(&deve->ua_count)) |
58 | return 0; | 58 | return 0; |
59 | /* | 59 | /* |
@@ -110,7 +110,7 @@ int core_scsi3_ua_allocate( | |||
110 | ua->ua_ascq = ascq; | 110 | ua->ua_ascq = ascq; |
111 | 111 | ||
112 | spin_lock_irq(&nacl->device_list_lock); | 112 | spin_lock_irq(&nacl->device_list_lock); |
113 | deve = &nacl->device_list[unpacked_lun]; | 113 | deve = nacl->device_list[unpacked_lun]; |
114 | 114 | ||
115 | spin_lock(&deve->ua_lock); | 115 | spin_lock(&deve->ua_lock); |
116 | list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) { | 116 | list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) { |
@@ -220,7 +220,7 @@ void core_scsi3_ua_for_check_condition( | |||
220 | return; | 220 | return; |
221 | 221 | ||
222 | spin_lock_irq(&nacl->device_list_lock); | 222 | spin_lock_irq(&nacl->device_list_lock); |
223 | deve = &nacl->device_list[cmd->orig_fe_lun]; | 223 | deve = nacl->device_list[cmd->orig_fe_lun]; |
224 | if (!atomic_read(&deve->ua_count)) { | 224 | if (!atomic_read(&deve->ua_count)) { |
225 | spin_unlock_irq(&nacl->device_list_lock); | 225 | spin_unlock_irq(&nacl->device_list_lock); |
226 | return; | 226 | return; |
@@ -289,7 +289,7 @@ int core_scsi3_ua_clear_for_request_sense( | |||
289 | return -EINVAL; | 289 | return -EINVAL; |
290 | 290 | ||
291 | spin_lock_irq(&nacl->device_list_lock); | 291 | spin_lock_irq(&nacl->device_list_lock); |
292 | deve = &nacl->device_list[cmd->orig_fe_lun]; | 292 | deve = nacl->device_list[cmd->orig_fe_lun]; |
293 | if (!atomic_read(&deve->ua_count)) { | 293 | if (!atomic_read(&deve->ua_count)) { |
294 | spin_unlock_irq(&nacl->device_list_lock); | 294 | spin_unlock_irq(&nacl->device_list_lock); |
295 | return -EPERM; | 295 | return -EPERM; |
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index e05c55100ec6..830657908db8 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #ifndef __TCM_FC_H__ | 17 | #ifndef __TCM_FC_H__ |
18 | #define __TCM_FC_H__ | 18 | #define __TCM_FC_H__ |
19 | 19 | ||
20 | #define FT_VERSION "0.3" | 20 | #define FT_VERSION "0.4" |
21 | 21 | ||
22 | #define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */ | 22 | #define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */ |
23 | #define FT_TPG_NAMELEN 32 /* max length of TPG name */ | 23 | #define FT_TPG_NAMELEN 32 /* max length of TPG name */ |
@@ -113,12 +113,10 @@ struct ft_lport_acl { | |||
113 | * Commands | 113 | * Commands |
114 | */ | 114 | */ |
115 | struct ft_cmd { | 115 | struct ft_cmd { |
116 | u32 lun; /* LUN from request */ | ||
117 | struct ft_sess *sess; /* session held for cmd */ | 116 | struct ft_sess *sess; /* session held for cmd */ |
118 | struct fc_seq *seq; /* sequence in exchange mgr */ | 117 | struct fc_seq *seq; /* sequence in exchange mgr */ |
119 | struct se_cmd se_cmd; /* Local TCM I/O descriptor */ | 118 | struct se_cmd se_cmd; /* Local TCM I/O descriptor */ |
120 | struct fc_frame *req_frame; | 119 | struct fc_frame *req_frame; |
121 | unsigned char *cdb; /* pointer to CDB inside frame */ | ||
122 | u32 write_data_len; /* data received on writes */ | 120 | u32 write_data_len; /* data received on writes */ |
123 | struct work_struct work; | 121 | struct work_struct work; |
124 | /* Local sense buffer */ | 122 | /* Local sense buffer */ |
@@ -143,11 +141,8 @@ extern struct target_fabric_configfs *ft_configfs; | |||
143 | void ft_sess_put(struct ft_sess *); | 141 | void ft_sess_put(struct ft_sess *); |
144 | int ft_sess_shutdown(struct se_session *); | 142 | int ft_sess_shutdown(struct se_session *); |
145 | void ft_sess_close(struct se_session *); | 143 | void ft_sess_close(struct se_session *); |
146 | void ft_sess_stop(struct se_session *, int, int); | ||
147 | int ft_sess_logged_in(struct se_session *); | ||
148 | u32 ft_sess_get_index(struct se_session *); | 144 | u32 ft_sess_get_index(struct se_session *); |
149 | u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32); | 145 | u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32); |
150 | void ft_sess_set_erl0(struct se_session *); | ||
151 | 146 | ||
152 | void ft_lport_add(struct fc_lport *, void *); | 147 | void ft_lport_add(struct fc_lport *, void *); |
153 | void ft_lport_del(struct fc_lport *, void *); | 148 | void ft_lport_del(struct fc_lport *, void *); |
@@ -165,7 +160,6 @@ int ft_write_pending_status(struct se_cmd *); | |||
165 | u32 ft_get_task_tag(struct se_cmd *); | 160 | u32 ft_get_task_tag(struct se_cmd *); |
166 | int ft_get_cmd_state(struct se_cmd *); | 161 | int ft_get_cmd_state(struct se_cmd *); |
167 | int ft_queue_tm_resp(struct se_cmd *); | 162 | int ft_queue_tm_resp(struct se_cmd *); |
168 | int ft_is_state_remove(struct se_cmd *); | ||
169 | 163 | ||
170 | /* | 164 | /* |
171 | * other internal functions. | 165 | * other internal functions. |
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 9e7e26c74c79..62dec9715ce5 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c | |||
@@ -59,9 +59,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) | |||
59 | se_cmd = &cmd->se_cmd; | 59 | se_cmd = &cmd->se_cmd; |
60 | pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n", | 60 | pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n", |
61 | caller, cmd, cmd->sess, cmd->seq, se_cmd); | 61 | caller, cmd, cmd->sess, cmd->seq, se_cmd); |
62 | pr_debug("%s: cmd %p cdb %p\n", | ||
63 | caller, cmd, cmd->cdb); | ||
64 | pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun); | ||
65 | 62 | ||
66 | pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n", | 63 | pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n", |
67 | caller, cmd, se_cmd->t_data_nents, | 64 | caller, cmd, se_cmd->t_data_nents, |
@@ -81,8 +78,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) | |||
81 | caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid, | 78 | caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid, |
82 | sp->id, ep->esb_stat); | 79 | sp->id, ep->esb_stat); |
83 | } | 80 | } |
84 | print_hex_dump(KERN_INFO, "ft_dump_cmd ", DUMP_PREFIX_NONE, | ||
85 | 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0); | ||
86 | } | 81 | } |
87 | 82 | ||
88 | static void ft_free_cmd(struct ft_cmd *cmd) | 83 | static void ft_free_cmd(struct ft_cmd *cmd) |
@@ -249,11 +244,6 @@ int ft_get_cmd_state(struct se_cmd *se_cmd) | |||
249 | return 0; | 244 | return 0; |
250 | } | 245 | } |
251 | 246 | ||
252 | int ft_is_state_remove(struct se_cmd *se_cmd) | ||
253 | { | ||
254 | return 0; /* XXX TBD */ | ||
255 | } | ||
256 | |||
257 | /* | 247 | /* |
258 | * FC sequence response handler for follow-on sequences (data) and aborts. | 248 | * FC sequence response handler for follow-on sequences (data) and aborts. |
259 | */ | 249 | */ |
@@ -325,10 +315,12 @@ static void ft_send_resp_status(struct fc_lport *lport, | |||
325 | 315 | ||
326 | fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0); | 316 | fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0); |
327 | sp = fr_seq(fp); | 317 | sp = fr_seq(fp); |
328 | if (sp) | 318 | if (sp) { |
329 | lport->tt.seq_send(lport, sp, fp); | 319 | lport->tt.seq_send(lport, sp, fp); |
330 | else | 320 | lport->tt.exch_done(sp); |
321 | } else { | ||
331 | lport->tt.frame_send(lport, fp); | 322 | lport->tt.frame_send(lport, fp); |
323 | } | ||
332 | } | 324 | } |
333 | 325 | ||
334 | /* | 326 | /* |
@@ -358,16 +350,10 @@ static void ft_send_resp_code_and_free(struct ft_cmd *cmd, | |||
358 | */ | 350 | */ |
359 | static void ft_send_tm(struct ft_cmd *cmd) | 351 | static void ft_send_tm(struct ft_cmd *cmd) |
360 | { | 352 | { |
361 | struct se_tmr_req *tmr; | ||
362 | struct fcp_cmnd *fcp; | 353 | struct fcp_cmnd *fcp; |
363 | struct ft_sess *sess; | 354 | int rc; |
364 | u8 tm_func; | 355 | u8 tm_func; |
365 | 356 | ||
366 | transport_init_se_cmd(&cmd->se_cmd, &ft_configfs->tf_ops, | ||
367 | cmd->sess->se_sess, 0, DMA_NONE, 0, | ||
368 | &cmd->ft_sense_buffer[0]); | ||
369 | target_get_sess_cmd(cmd->sess->se_sess, &cmd->se_cmd, false); | ||
370 | |||
371 | fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); | 357 | fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); |
372 | 358 | ||
373 | switch (fcp->fc_tm_flags) { | 359 | switch (fcp->fc_tm_flags) { |
@@ -396,44 +382,12 @@ static void ft_send_tm(struct ft_cmd *cmd) | |||
396 | return; | 382 | return; |
397 | } | 383 | } |
398 | 384 | ||
399 | pr_debug("alloc tm cmd fn %d\n", tm_func); | 385 | /* FIXME: Add referenced task tag for ABORT_TASK */ |
400 | tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func, GFP_KERNEL); | 386 | rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess, |
401 | if (!tmr) { | 387 | &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), |
402 | pr_debug("alloc failed\n"); | 388 | cmd, tm_func, GFP_KERNEL, 0, 0); |
389 | if (rc < 0) | ||
403 | ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED); | 390 | ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED); |
404 | return; | ||
405 | } | ||
406 | cmd->se_cmd.se_tmr_req = tmr; | ||
407 | |||
408 | switch (fcp->fc_tm_flags) { | ||
409 | case FCP_TMF_LUN_RESET: | ||
410 | cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); | ||
411 | if (transport_lookup_tmr_lun(&cmd->se_cmd, cmd->lun) < 0) { | ||
412 | /* | ||
413 | * Make sure to clean up newly allocated TMR request | ||
414 | * since "unable to handle TMR request because failed | ||
415 | * to get to LUN" | ||
416 | */ | ||
417 | pr_debug("Failed to get LUN for TMR func %d, " | ||
418 | "se_cmd %p, unpacked_lun %d\n", | ||
419 | tm_func, &cmd->se_cmd, cmd->lun); | ||
420 | ft_dump_cmd(cmd, __func__); | ||
421 | sess = cmd->sess; | ||
422 | transport_send_check_condition_and_sense(&cmd->se_cmd, | ||
423 | cmd->se_cmd.scsi_sense_reason, 0); | ||
424 | ft_sess_put(sess); | ||
425 | return; | ||
426 | } | ||
427 | break; | ||
428 | case FCP_TMF_TGT_RESET: | ||
429 | case FCP_TMF_CLR_TASK_SET: | ||
430 | case FCP_TMF_ABT_TASK_SET: | ||
431 | case FCP_TMF_CLR_ACA: | ||
432 | break; | ||
433 | default: | ||
434 | return; | ||
435 | } | ||
436 | transport_generic_handle_tmr(&cmd->se_cmd); | ||
437 | } | 391 | } |
438 | 392 | ||
439 | /* | 393 | /* |
@@ -538,7 +492,6 @@ static void ft_send_work(struct work_struct *work) | |||
538 | struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); | 492 | struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); |
539 | struct fcp_cmnd *fcp; | 493 | struct fcp_cmnd *fcp; |
540 | int data_dir = 0; | 494 | int data_dir = 0; |
541 | u32 data_len; | ||
542 | int task_attr; | 495 | int task_attr; |
543 | 496 | ||
544 | fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); | 497 | fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); |
@@ -548,47 +501,6 @@ static void ft_send_work(struct work_struct *work) | |||
548 | if (fcp->fc_flags & FCP_CFL_LEN_MASK) | 501 | if (fcp->fc_flags & FCP_CFL_LEN_MASK) |
549 | goto err; /* not handling longer CDBs yet */ | 502 | goto err; /* not handling longer CDBs yet */ |
550 | 503 | ||
551 | if (fcp->fc_tm_flags) { | ||
552 | task_attr = FCP_PTA_SIMPLE; | ||
553 | data_dir = DMA_NONE; | ||
554 | data_len = 0; | ||
555 | } else { | ||
556 | switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) { | ||
557 | case 0: | ||
558 | data_dir = DMA_NONE; | ||
559 | break; | ||
560 | case FCP_CFL_RDDATA: | ||
561 | data_dir = DMA_FROM_DEVICE; | ||
562 | break; | ||
563 | case FCP_CFL_WRDATA: | ||
564 | data_dir = DMA_TO_DEVICE; | ||
565 | break; | ||
566 | case FCP_CFL_WRDATA | FCP_CFL_RDDATA: | ||
567 | goto err; /* TBD not supported by tcm_fc yet */ | ||
568 | } | ||
569 | /* | ||
570 | * Locate the SAM Task Attr from fc_pri_ta | ||
571 | */ | ||
572 | switch (fcp->fc_pri_ta & FCP_PTA_MASK) { | ||
573 | case FCP_PTA_HEADQ: | ||
574 | task_attr = MSG_HEAD_TAG; | ||
575 | break; | ||
576 | case FCP_PTA_ORDERED: | ||
577 | task_attr = MSG_ORDERED_TAG; | ||
578 | break; | ||
579 | case FCP_PTA_ACA: | ||
580 | task_attr = MSG_ACA_TAG; | ||
581 | break; | ||
582 | case FCP_PTA_SIMPLE: /* Fallthrough */ | ||
583 | default: | ||
584 | task_attr = MSG_SIMPLE_TAG; | ||
585 | } | ||
586 | |||
587 | |||
588 | task_attr = fcp->fc_pri_ta & FCP_PTA_MASK; | ||
589 | data_len = ntohl(fcp->fc_dl); | ||
590 | cmd->cdb = fcp->fc_cdb; | ||
591 | } | ||
592 | /* | 504 | /* |
593 | * Check for FCP task management flags | 505 | * Check for FCP task management flags |
594 | */ | 506 | */ |
@@ -596,15 +508,46 @@ static void ft_send_work(struct work_struct *work) | |||
596 | ft_send_tm(cmd); | 508 | ft_send_tm(cmd); |
597 | return; | 509 | return; |
598 | } | 510 | } |
511 | |||
512 | switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) { | ||
513 | case 0: | ||
514 | data_dir = DMA_NONE; | ||
515 | break; | ||
516 | case FCP_CFL_RDDATA: | ||
517 | data_dir = DMA_FROM_DEVICE; | ||
518 | break; | ||
519 | case FCP_CFL_WRDATA: | ||
520 | data_dir = DMA_TO_DEVICE; | ||
521 | break; | ||
522 | case FCP_CFL_WRDATA | FCP_CFL_RDDATA: | ||
523 | goto err; /* TBD not supported by tcm_fc yet */ | ||
524 | } | ||
525 | /* | ||
526 | * Locate the SAM Task Attr from fc_pri_ta | ||
527 | */ | ||
528 | switch (fcp->fc_pri_ta & FCP_PTA_MASK) { | ||
529 | case FCP_PTA_HEADQ: | ||
530 | task_attr = MSG_HEAD_TAG; | ||
531 | break; | ||
532 | case FCP_PTA_ORDERED: | ||
533 | task_attr = MSG_ORDERED_TAG; | ||
534 | break; | ||
535 | case FCP_PTA_ACA: | ||
536 | task_attr = MSG_ACA_TAG; | ||
537 | break; | ||
538 | case FCP_PTA_SIMPLE: /* Fallthrough */ | ||
539 | default: | ||
540 | task_attr = MSG_SIMPLE_TAG; | ||
541 | } | ||
542 | |||
599 | fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); | 543 | fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); |
600 | cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); | ||
601 | /* | 544 | /* |
602 | * Use a single se_cmd->cmd_kref as we expect to release se_cmd | 545 | * Use a single se_cmd->cmd_kref as we expect to release se_cmd |
603 | * directly from ft_check_stop_free callback in response path. | 546 | * directly from ft_check_stop_free callback in response path. |
604 | */ | 547 | */ |
605 | target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, cmd->cdb, | 548 | target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, |
606 | &cmd->ft_sense_buffer[0], cmd->lun, data_len, | 549 | &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), |
607 | task_attr, data_dir, 0); | 550 | ntohl(fcp->fc_dl), task_attr, data_dir, 0); |
608 | pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl); | 551 | pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl); |
609 | return; | 552 | return; |
610 | 553 | ||
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 73852fbc857b..f357039349ba 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c | |||
@@ -529,9 +529,6 @@ static struct target_core_fabric_ops ft_fabric_ops = { | |||
529 | .release_cmd = ft_release_cmd, | 529 | .release_cmd = ft_release_cmd, |
530 | .shutdown_session = ft_sess_shutdown, | 530 | .shutdown_session = ft_sess_shutdown, |
531 | .close_session = ft_sess_close, | 531 | .close_session = ft_sess_close, |
532 | .stop_session = ft_sess_stop, | ||
533 | .fall_back_to_erl0 = ft_sess_set_erl0, | ||
534 | .sess_logged_in = ft_sess_logged_in, | ||
535 | .sess_get_index = ft_sess_get_index, | 532 | .sess_get_index = ft_sess_get_index, |
536 | .sess_get_initiator_sid = NULL, | 533 | .sess_get_initiator_sid = NULL, |
537 | .write_pending = ft_write_pending, | 534 | .write_pending = ft_write_pending, |
@@ -544,7 +541,6 @@ static struct target_core_fabric_ops ft_fabric_ops = { | |||
544 | .queue_tm_rsp = ft_queue_tm_resp, | 541 | .queue_tm_rsp = ft_queue_tm_resp, |
545 | .get_fabric_sense_len = ft_get_fabric_sense_len, | 542 | .get_fabric_sense_len = ft_get_fabric_sense_len, |
546 | .set_fabric_sense_len = ft_set_fabric_sense_len, | 543 | .set_fabric_sense_len = ft_set_fabric_sense_len, |
547 | .is_state_remove = ft_is_state_remove, | ||
548 | /* | 544 | /* |
549 | * Setup function pointers for generic logic in | 545 | * Setup function pointers for generic logic in |
550 | * target_core_fabric_configfs.c | 546 | * target_core_fabric_configfs.c |
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index eff512b5a2a0..cb99da920068 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c | |||
@@ -309,11 +309,9 @@ int ft_sess_shutdown(struct se_session *se_sess) | |||
309 | void ft_sess_close(struct se_session *se_sess) | 309 | void ft_sess_close(struct se_session *se_sess) |
310 | { | 310 | { |
311 | struct ft_sess *sess = se_sess->fabric_sess_ptr; | 311 | struct ft_sess *sess = se_sess->fabric_sess_ptr; |
312 | struct fc_lport *lport; | ||
313 | u32 port_id; | 312 | u32 port_id; |
314 | 313 | ||
315 | mutex_lock(&ft_lport_lock); | 314 | mutex_lock(&ft_lport_lock); |
316 | lport = sess->tport->lport; | ||
317 | port_id = sess->port_id; | 315 | port_id = sess->port_id; |
318 | if (port_id == -1) { | 316 | if (port_id == -1) { |
319 | mutex_unlock(&ft_lport_lock); | 317 | mutex_unlock(&ft_lport_lock); |
@@ -328,20 +326,6 @@ void ft_sess_close(struct se_session *se_sess) | |||
328 | synchronize_rcu(); /* let transport deregister happen */ | 326 | synchronize_rcu(); /* let transport deregister happen */ |
329 | } | 327 | } |
330 | 328 | ||
331 | void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep) | ||
332 | { | ||
333 | struct ft_sess *sess = se_sess->fabric_sess_ptr; | ||
334 | |||
335 | pr_debug("port_id %x\n", sess->port_id); | ||
336 | } | ||
337 | |||
338 | int ft_sess_logged_in(struct se_session *se_sess) | ||
339 | { | ||
340 | struct ft_sess *sess = se_sess->fabric_sess_ptr; | ||
341 | |||
342 | return sess->port_id != -1; | ||
343 | } | ||
344 | |||
345 | u32 ft_sess_get_index(struct se_session *se_sess) | 329 | u32 ft_sess_get_index(struct se_session *se_sess) |
346 | { | 330 | { |
347 | struct ft_sess *sess = se_sess->fabric_sess_ptr; | 331 | struct ft_sess *sess = se_sess->fabric_sess_ptr; |
@@ -357,11 +341,6 @@ u32 ft_sess_get_port_name(struct se_session *se_sess, | |||
357 | return ft_format_wwn(buf, len, sess->port_name); | 341 | return ft_format_wwn(buf, len, sess->port_name); |
358 | } | 342 | } |
359 | 343 | ||
360 | void ft_sess_set_erl0(struct se_session *se_sess) | ||
361 | { | ||
362 | /* XXX TBD called when out of memory */ | ||
363 | } | ||
364 | |||
365 | /* | 344 | /* |
366 | * libfc ops involving sessions. | 345 | * libfc ops involving sessions. |
367 | */ | 346 | */ |
diff --git a/include/scsi/fc/fc_fcp.h b/include/scsi/fc/fc_fcp.h index 652dec230514..0d7d67e96d43 100644 --- a/include/scsi/fc/fc_fcp.h +++ b/include/scsi/fc/fc_fcp.h | |||
@@ -20,6 +20,8 @@ | |||
20 | #ifndef _FC_FCP_H_ | 20 | #ifndef _FC_FCP_H_ |
21 | #define _FC_FCP_H_ | 21 | #define _FC_FCP_H_ |
22 | 22 | ||
23 | #include <scsi/scsi.h> | ||
24 | |||
23 | /* | 25 | /* |
24 | * Fibre Channel Protocol for SCSI. | 26 | * Fibre Channel Protocol for SCSI. |
25 | * From T10 FCP-3, T10 project 1560-D Rev 4, Sept. 13, 2005. | 27 | * From T10 FCP-3, T10 project 1560-D Rev 4, Sept. 13, 2005. |
@@ -45,7 +47,7 @@ | |||
45 | * FCP_CMND IU Payload. | 47 | * FCP_CMND IU Payload. |
46 | */ | 48 | */ |
47 | struct fcp_cmnd { | 49 | struct fcp_cmnd { |
48 | __u8 fc_lun[8]; /* logical unit number */ | 50 | struct scsi_lun fc_lun; /* logical unit number */ |
49 | __u8 fc_cmdref; /* command reference number */ | 51 | __u8 fc_cmdref; /* command reference number */ |
50 | __u8 fc_pri_ta; /* priority and task attribute */ | 52 | __u8 fc_pri_ta; /* priority and task attribute */ |
51 | __u8 fc_tm_flags; /* task management flags */ | 53 | __u8 fc_tm_flags; /* task management flags */ |
@@ -57,7 +59,7 @@ struct fcp_cmnd { | |||
57 | #define FCP_CMND_LEN 32 /* expected length of structure */ | 59 | #define FCP_CMND_LEN 32 /* expected length of structure */ |
58 | 60 | ||
59 | struct fcp_cmnd32 { | 61 | struct fcp_cmnd32 { |
60 | __u8 fc_lun[8]; /* logical unit number */ | 62 | struct scsi_lun fc_lun; /* logical unit number */ |
61 | __u8 fc_cmdref; /* command reference number */ | 63 | __u8 fc_cmdref; /* command reference number */ |
62 | __u8 fc_pri_ta; /* priority and task attribute */ | 64 | __u8 fc_pri_ta; /* priority and task attribute */ |
63 | __u8 fc_tm_flags; /* task management flags */ | 65 | __u8 fc_tm_flags; /* task management flags */ |
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 8001ae4cd7ba..f34a5a87af38 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h | |||
@@ -143,6 +143,7 @@ struct scsi_cmnd; | |||
143 | #define READ_ATTRIBUTE 0x8c | 143 | #define READ_ATTRIBUTE 0x8c |
144 | #define WRITE_ATTRIBUTE 0x8d | 144 | #define WRITE_ATTRIBUTE 0x8d |
145 | #define VERIFY_16 0x8f | 145 | #define VERIFY_16 0x8f |
146 | #define SYNCHRONIZE_CACHE_16 0x91 | ||
146 | #define WRITE_SAME_16 0x93 | 147 | #define WRITE_SAME_16 0x93 |
147 | #define SERVICE_ACTION_IN 0x9e | 148 | #define SERVICE_ACTION_IN 0x9e |
148 | /* values for service action in */ | 149 | /* values for service action in */ |
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index e5e6ff98f0fa..8c9ff1b14396 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h | |||
@@ -62,4 +62,6 @@ int transport_set_vpd_ident(struct t10_vpd *, unsigned char *); | |||
62 | void *transport_kmap_data_sg(struct se_cmd *); | 62 | void *transport_kmap_data_sg(struct se_cmd *); |
63 | void transport_kunmap_data_sg(struct se_cmd *); | 63 | void transport_kunmap_data_sg(struct se_cmd *); |
64 | 64 | ||
65 | void array_free(void *array, int n); | ||
66 | |||
65 | #endif /* TARGET_CORE_BACKEND_H */ | 67 | #endif /* TARGET_CORE_BACKEND_H */ |
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index dc4e345a0163..aaccc5f5fc9f 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
@@ -9,7 +9,7 @@ | |||
9 | #include <net/sock.h> | 9 | #include <net/sock.h> |
10 | #include <net/tcp.h> | 10 | #include <net/tcp.h> |
11 | 11 | ||
12 | #define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml" | 12 | #define TARGET_CORE_MOD_VERSION "v4.1.0-rc2-ml" |
13 | #define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION | 13 | #define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION |
14 | 14 | ||
15 | /* Maximum Number of LUNs per Target Portal Group */ | 15 | /* Maximum Number of LUNs per Target Portal Group */ |
@@ -86,6 +86,8 @@ | |||
86 | #define DA_UNMAP_GRANULARITY_DEFAULT 0 | 86 | #define DA_UNMAP_GRANULARITY_DEFAULT 0 |
87 | /* Default unmap_granularity_alignment */ | 87 | /* Default unmap_granularity_alignment */ |
88 | #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 | 88 | #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 |
89 | /* Default max transfer length */ | ||
90 | #define DA_FABRIC_MAX_SECTORS 8192 | ||
89 | /* Emulation for Direct Page Out */ | 91 | /* Emulation for Direct Page Out */ |
90 | #define DA_EMULATE_DPO 0 | 92 | #define DA_EMULATE_DPO 0 |
91 | /* Emulation for Forced Unit Access WRITEs */ | 93 | /* Emulation for Forced Unit Access WRITEs */ |
@@ -118,9 +120,9 @@ | |||
118 | /* Queue Algorithm Modifier default for restricted reordering in control mode page */ | 120 | /* Queue Algorithm Modifier default for restricted reordering in control mode page */ |
119 | #define DA_EMULATE_REST_REORD 0 | 121 | #define DA_EMULATE_REST_REORD 0 |
120 | 122 | ||
123 | #define SE_INQUIRY_BUF 512 | ||
121 | #define SE_MODE_PAGE_BUF 512 | 124 | #define SE_MODE_PAGE_BUF 512 |
122 | 125 | ||
123 | |||
124 | /* struct se_hba->hba_flags */ | 126 | /* struct se_hba->hba_flags */ |
125 | enum hba_flags_table { | 127 | enum hba_flags_table { |
126 | HBA_FLAGS_INTERNAL_USE = 0x01, | 128 | HBA_FLAGS_INTERNAL_USE = 0x01, |
@@ -169,7 +171,8 @@ enum se_cmd_flags_table { | |||
169 | SCF_EMULATED_TASK_SENSE = 0x00000004, | 171 | SCF_EMULATED_TASK_SENSE = 0x00000004, |
170 | SCF_SCSI_DATA_SG_IO_CDB = 0x00000008, | 172 | SCF_SCSI_DATA_SG_IO_CDB = 0x00000008, |
171 | SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010, | 173 | SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010, |
172 | SCF_SCSI_NON_DATA_CDB = 0x00000040, | 174 | SCF_SCSI_NON_DATA_CDB = 0x00000020, |
175 | SCF_SCSI_TMR_CDB = 0x00000040, | ||
173 | SCF_SCSI_CDB_EXCEPTION = 0x00000080, | 176 | SCF_SCSI_CDB_EXCEPTION = 0x00000080, |
174 | SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, | 177 | SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, |
175 | SCF_FUA = 0x00000200, | 178 | SCF_FUA = 0x00000200, |
@@ -183,7 +186,8 @@ enum se_cmd_flags_table { | |||
183 | SCF_ALUA_NON_OPTIMIZED = 0x00040000, | 186 | SCF_ALUA_NON_OPTIMIZED = 0x00040000, |
184 | SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, | 187 | SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, |
185 | SCF_UNUSED = 0x00100000, | 188 | SCF_UNUSED = 0x00100000, |
186 | SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, | 189 | SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00200000, |
190 | SCF_ACK_KREF = 0x00400000, | ||
187 | }; | 191 | }; |
188 | 192 | ||
189 | /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ | 193 | /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ |
@@ -474,12 +478,6 @@ struct t10_reservation { | |||
474 | struct t10_reservation_ops pr_ops; | 478 | struct t10_reservation_ops pr_ops; |
475 | }; | 479 | }; |
476 | 480 | ||
477 | struct se_queue_req { | ||
478 | int state; | ||
479 | struct se_cmd *cmd; | ||
480 | struct list_head qr_list; | ||
481 | }; | ||
482 | |||
483 | struct se_queue_obj { | 481 | struct se_queue_obj { |
484 | atomic_t queue_cnt; | 482 | atomic_t queue_cnt; |
485 | spinlock_t cmd_queue_lock; | 483 | spinlock_t cmd_queue_lock; |
@@ -504,6 +502,24 @@ struct se_task { | |||
504 | struct completion task_stop_comp; | 502 | struct completion task_stop_comp; |
505 | }; | 503 | }; |
506 | 504 | ||
505 | struct se_tmr_req { | ||
506 | /* Task Management function to be performed */ | ||
507 | u8 function; | ||
508 | /* Task Management response to send */ | ||
509 | u8 response; | ||
510 | int call_transport; | ||
511 | /* Reference to ITT that Task Mgmt should be performed */ | ||
512 | u32 ref_task_tag; | ||
513 | /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */ | ||
514 | u64 ref_task_lun; | ||
515 | void *fabric_tmr_ptr; | ||
516 | struct se_cmd *task_cmd; | ||
517 | struct se_cmd *ref_cmd; | ||
518 | struct se_device *tmr_dev; | ||
519 | struct se_lun *tmr_lun; | ||
520 | struct list_head tmr_list; | ||
521 | }; | ||
522 | |||
507 | struct se_cmd { | 523 | struct se_cmd { |
508 | /* SAM response code being sent to initiator */ | 524 | /* SAM response code being sent to initiator */ |
509 | u8 scsi_status; | 525 | u8 scsi_status; |
@@ -555,23 +571,23 @@ struct se_cmd { | |||
555 | unsigned char *t_task_cdb; | 571 | unsigned char *t_task_cdb; |
556 | unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; | 572 | unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; |
557 | unsigned long long t_task_lba; | 573 | unsigned long long t_task_lba; |
558 | int t_tasks_failed; | ||
559 | u32 t_tasks_sg_chained_no; | 574 | u32 t_tasks_sg_chained_no; |
560 | atomic_t t_fe_count; | 575 | atomic_t t_fe_count; |
561 | atomic_t t_se_count; | 576 | atomic_t t_se_count; |
562 | atomic_t t_task_cdbs_left; | 577 | atomic_t t_task_cdbs_left; |
563 | atomic_t t_task_cdbs_ex_left; | 578 | atomic_t t_task_cdbs_ex_left; |
564 | atomic_t t_task_cdbs_sent; | 579 | atomic_t t_task_cdbs_sent; |
565 | atomic_t t_transport_aborted; | 580 | unsigned int transport_state; |
566 | atomic_t t_transport_active; | 581 | #define CMD_T_ABORTED (1 << 0) |
567 | atomic_t t_transport_complete; | 582 | #define CMD_T_ACTIVE (1 << 1) |
568 | atomic_t t_transport_queue_active; | 583 | #define CMD_T_COMPLETE (1 << 2) |
569 | atomic_t t_transport_sent; | 584 | #define CMD_T_QUEUED (1 << 3) |
570 | atomic_t t_transport_stop; | 585 | #define CMD_T_SENT (1 << 4) |
571 | atomic_t transport_dev_active; | 586 | #define CMD_T_STOP (1 << 5) |
572 | atomic_t transport_lun_active; | 587 | #define CMD_T_FAILED (1 << 6) |
573 | atomic_t transport_lun_fe_stop; | 588 | #define CMD_T_LUN_STOP (1 << 7) |
574 | atomic_t transport_lun_stop; | 589 | #define CMD_T_LUN_FE_STOP (1 << 8) |
590 | #define CMD_T_DEV_ACTIVE (1 << 9) | ||
575 | spinlock_t t_state_lock; | 591 | spinlock_t t_state_lock; |
576 | struct completion t_transport_stop_comp; | 592 | struct completion t_transport_stop_comp; |
577 | struct completion transport_lun_fe_stop_comp; | 593 | struct completion transport_lun_fe_stop_comp; |
@@ -592,24 +608,6 @@ struct se_cmd { | |||
592 | 608 | ||
593 | }; | 609 | }; |
594 | 610 | ||
595 | struct se_tmr_req { | ||
596 | /* Task Management function to be preformed */ | ||
597 | u8 function; | ||
598 | /* Task Management response to send */ | ||
599 | u8 response; | ||
600 | int call_transport; | ||
601 | /* Reference to ITT that Task Mgmt should be preformed */ | ||
602 | u32 ref_task_tag; | ||
603 | /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */ | ||
604 | u64 ref_task_lun; | ||
605 | void *fabric_tmr_ptr; | ||
606 | struct se_cmd *task_cmd; | ||
607 | struct se_cmd *ref_cmd; | ||
608 | struct se_device *tmr_dev; | ||
609 | struct se_lun *tmr_lun; | ||
610 | struct list_head tmr_list; | ||
611 | }; | ||
612 | |||
613 | struct se_ua { | 611 | struct se_ua { |
614 | u8 ua_asc; | 612 | u8 ua_asc; |
615 | u8 ua_ascq; | 613 | u8 ua_ascq; |
@@ -622,6 +620,7 @@ struct se_node_acl { | |||
622 | char initiatorname[TRANSPORT_IQN_LEN]; | 620 | char initiatorname[TRANSPORT_IQN_LEN]; |
623 | /* Used to signal demo mode created ACL, disabled by default */ | 621 | /* Used to signal demo mode created ACL, disabled by default */ |
624 | bool dynamic_node_acl; | 622 | bool dynamic_node_acl; |
623 | bool acl_stop:1; | ||
625 | u32 queue_depth; | 624 | u32 queue_depth; |
626 | u32 acl_index; | 625 | u32 acl_index; |
627 | u64 num_cmds; | 626 | u64 num_cmds; |
@@ -630,7 +629,7 @@ struct se_node_acl { | |||
630 | spinlock_t stats_lock; | 629 | spinlock_t stats_lock; |
631 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ | 630 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ |
632 | atomic_t acl_pr_ref_count; | 631 | atomic_t acl_pr_ref_count; |
633 | struct se_dev_entry *device_list; | 632 | struct se_dev_entry **device_list; |
634 | struct se_session *nacl_sess; | 633 | struct se_session *nacl_sess; |
635 | struct se_portal_group *se_tpg; | 634 | struct se_portal_group *se_tpg; |
636 | spinlock_t device_list_lock; | 635 | spinlock_t device_list_lock; |
@@ -643,6 +642,8 @@ struct se_node_acl { | |||
643 | struct config_group *acl_default_groups[5]; | 642 | struct config_group *acl_default_groups[5]; |
644 | struct list_head acl_list; | 643 | struct list_head acl_list; |
645 | struct list_head acl_sess_list; | 644 | struct list_head acl_sess_list; |
645 | struct completion acl_free_comp; | ||
646 | struct kref acl_kref; | ||
646 | }; | 647 | }; |
647 | 648 | ||
648 | struct se_session { | 649 | struct se_session { |
@@ -656,6 +657,7 @@ struct se_session { | |||
656 | struct list_head sess_cmd_list; | 657 | struct list_head sess_cmd_list; |
657 | struct list_head sess_wait_list; | 658 | struct list_head sess_wait_list; |
658 | spinlock_t sess_cmd_lock; | 659 | spinlock_t sess_cmd_lock; |
660 | struct kref sess_kref; | ||
659 | }; | 661 | }; |
660 | 662 | ||
661 | struct se_device; | 663 | struct se_device; |
@@ -730,6 +732,7 @@ struct se_dev_attrib { | |||
730 | u32 block_size; | 732 | u32 block_size; |
731 | u32 hw_max_sectors; | 733 | u32 hw_max_sectors; |
732 | u32 max_sectors; | 734 | u32 max_sectors; |
735 | u32 fabric_max_sectors; | ||
733 | u32 optimal_sectors; | 736 | u32 optimal_sectors; |
734 | u32 hw_queue_depth; | 737 | u32 hw_queue_depth; |
735 | u32 queue_depth; | 738 | u32 queue_depth; |
@@ -931,7 +934,7 @@ struct se_portal_group { | |||
931 | struct list_head se_tpg_node; | 934 | struct list_head se_tpg_node; |
932 | /* linked list for initiator ACL list */ | 935 | /* linked list for initiator ACL list */ |
933 | struct list_head acl_node_list; | 936 | struct list_head acl_node_list; |
934 | struct se_lun *tpg_lun_list; | 937 | struct se_lun **tpg_lun_list; |
935 | struct se_lun tpg_virt_lun0; | 938 | struct se_lun tpg_virt_lun0; |
936 | /* List of TCM sessions associated wth this TPG */ | 939 | /* List of TCM sessions associated wth this TPG */ |
937 | struct list_head tpg_sess_list; | 940 | struct list_head tpg_sess_list; |
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index d36fad317e78..10c690809601 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h | |||
@@ -58,9 +58,6 @@ struct target_core_fabric_ops { | |||
58 | */ | 58 | */ |
59 | int (*shutdown_session)(struct se_session *); | 59 | int (*shutdown_session)(struct se_session *); |
60 | void (*close_session)(struct se_session *); | 60 | void (*close_session)(struct se_session *); |
61 | void (*stop_session)(struct se_session *, int, int); | ||
62 | void (*fall_back_to_erl0)(struct se_session *); | ||
63 | int (*sess_logged_in)(struct se_session *); | ||
64 | u32 (*sess_get_index)(struct se_session *); | 61 | u32 (*sess_get_index)(struct se_session *); |
65 | /* | 62 | /* |
66 | * Used only for SCSI fabrics that contain multi-value TransportIDs | 63 | * Used only for SCSI fabrics that contain multi-value TransportIDs |
@@ -78,7 +75,6 @@ struct target_core_fabric_ops { | |||
78 | int (*queue_tm_rsp)(struct se_cmd *); | 75 | int (*queue_tm_rsp)(struct se_cmd *); |
79 | u16 (*set_fabric_sense_len)(struct se_cmd *, u32); | 76 | u16 (*set_fabric_sense_len)(struct se_cmd *, u32); |
80 | u16 (*get_fabric_sense_len)(void); | 77 | u16 (*get_fabric_sense_len)(void); |
81 | int (*is_state_remove)(struct se_cmd *); | ||
82 | /* | 78 | /* |
83 | * fabric module calls for target_core_fabric_configfs.c | 79 | * fabric module calls for target_core_fabric_configfs.c |
84 | */ | 80 | */ |
@@ -105,7 +101,10 @@ void __transport_register_session(struct se_portal_group *, | |||
105 | struct se_node_acl *, struct se_session *, void *); | 101 | struct se_node_acl *, struct se_session *, void *); |
106 | void transport_register_session(struct se_portal_group *, | 102 | void transport_register_session(struct se_portal_group *, |
107 | struct se_node_acl *, struct se_session *, void *); | 103 | struct se_node_acl *, struct se_session *, void *); |
104 | void target_get_session(struct se_session *); | ||
105 | int target_put_session(struct se_session *); | ||
108 | void transport_free_session(struct se_session *); | 106 | void transport_free_session(struct se_session *); |
107 | void target_put_nacl(struct se_node_acl *); | ||
109 | void transport_deregister_session_configfs(struct se_session *); | 108 | void transport_deregister_session_configfs(struct se_session *); |
110 | void transport_deregister_session(struct se_session *); | 109 | void transport_deregister_session(struct se_session *); |
111 | 110 | ||
@@ -116,6 +115,10 @@ int transport_lookup_cmd_lun(struct se_cmd *, u32); | |||
116 | int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); | 115 | int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); |
117 | void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, | 116 | void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, |
118 | unsigned char *, u32, u32, int, int, int); | 117 | unsigned char *, u32, u32, int, int, int); |
118 | int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, | ||
119 | unsigned char *sense, u32 unpacked_lun, | ||
120 | void *fabric_tmr_ptr, unsigned char tm_type, | ||
121 | gfp_t, unsigned int, int); | ||
119 | int transport_handle_cdb_direct(struct se_cmd *); | 122 | int transport_handle_cdb_direct(struct se_cmd *); |
120 | int transport_generic_handle_cdb_map(struct se_cmd *); | 123 | int transport_generic_handle_cdb_map(struct se_cmd *); |
121 | int transport_generic_handle_data(struct se_cmd *); | 124 | int transport_generic_handle_data(struct se_cmd *); |
@@ -139,9 +142,10 @@ void target_wait_for_sess_cmds(struct se_session *, int); | |||
139 | 142 | ||
140 | int core_alua_check_nonop_delay(struct se_cmd *); | 143 | int core_alua_check_nonop_delay(struct se_cmd *); |
141 | 144 | ||
142 | struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t); | 145 | int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t); |
143 | void core_tmr_release_req(struct se_tmr_req *); | 146 | void core_tmr_release_req(struct se_tmr_req *); |
144 | int transport_generic_handle_tmr(struct se_cmd *); | 147 | int transport_generic_handle_tmr(struct se_cmd *); |
148 | void transport_generic_request_failure(struct se_cmd *); | ||
145 | int transport_lookup_tmr_lun(struct se_cmd *, u32); | 149 | int transport_lookup_tmr_lun(struct se_cmd *, u32); |
146 | 150 | ||
147 | struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *, | 151 | struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *, |