summaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-25 00:19:20 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-25 00:19:20 -0500
commiteda5d47134b385813b36eddb6d82320dc57e1e53 (patch)
treed64fb6a0afc8b632cff3f6521f2a7084ba1702a4 /drivers/target
parent1d3b78bbc6e983fabb3fbf91b76339bf66e4a12c (diff)
parent97488c73190bb785cba818bf31e7361a27aded41 (diff)
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger: "This series is predominantly bug-fixes, with a few small improvements that have been outstanding over the last release cycle. As usual, the associated bug-fixes have CC' tags for stable. Also, things have been particularly quiet wrt new developments the last months, with most folks continuing to focus on stability atop 4.x stable kernels for their respective production configurations. Also at this point, the stable trees have been synced up with mainline. This will continue to be a priority, as production users tend to run exclusively atop stable kernels, a few releases behind mainline. The highlights include: - Fix PR PREEMPT_AND_ABORT null pointer dereference regression in v4.11+ (tangwenji) - Fix OOPs during removing TCMU device (Xiubo Li + Zhang Zhuoyu) - Add netlink command reply supported option for each device (Kenjiro Nakayama) - cxgbit: Abort the TCP connection in case of data out timeout (Varun Prakash) - Fix PR/ALUA file path truncation (David Disseldorp) - Fix double se_cmd completion during ->cmd_time_out (Mike Christie) - Fix QUEUE_FULL + SCSI task attribute handling in 4.1+ (Bryant Ly + nab) - Fix quiese during transport_write_pending_qf endless loop (nab) - Avoid early CMD_T_PRE_EXECUTE failures during ABORT_TASK in 3.14+ (Don White + nab)" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (35 commits) tcmu: Add a missing unlock on an error path tcmu: Fix some memory corruption iscsi-target: Fix non-immediate TMR reference leak iscsi-target: Make TASK_REASSIGN use proper se_cmd->cmd_kref target: Avoid early CMD_T_PRE_EXECUTE failures during ABORT_TASK target: Fix quiese during transport_write_pending_qf endless loop target: Fix caw_sem leak in transport_generic_request_failure target: Fix QUEUE_FULL + SCSI task attribute handling iSCSI-target: Use common error handling code in iscsi_decode_text_input() target/iscsi: Detect conn_cmd_list corruption early target/iscsi: Fix a race condition in iscsit_add_reject_from_cmd() target/iscsi: Modify iscsit_do_crypto_hash_buf() prototype target/iscsi: Fix endianness in an error message target/iscsi: Use min() in iscsit_dump_data_payload() instead of open-coding it target/iscsi: Define OFFLOAD_BUF_SIZE once target: Inline transport_put_cmd() target: Suppress gcc 7 fallthrough warnings target: Move a declaration of a global variable into a header file tcmu: fix double se_cmd completion target: return SAM_STAT_TASK_SET_FULL for TCM_OUT_OF_RESOURCES ...
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit.h2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c45
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c8
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c80
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c39
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c4
-rw-r--r--drivers/target/target_core_alua.c51
-rw-r--r--drivers/target/target_core_alua.h9
-rw-r--r--drivers/target/target_core_configfs.c14
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_file.c4
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_pr.c41
-rw-r--r--drivers/target/target_core_tmr.c12
-rw-r--r--drivers/target/target_core_transport.c84
-rw-r--r--drivers/target/target_core_user.c208
21 files changed, 385 insertions, 239 deletions
diff --git a/drivers/target/iscsi/cxgbit/cxgbit.h b/drivers/target/iscsi/cxgbit/cxgbit.h
index 90388698c222..417b9e66b0cd 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit.h
+++ b/drivers/target/iscsi/cxgbit/cxgbit.h
@@ -165,6 +165,7 @@ enum cxgbit_csk_flags {
165 CSK_LOGIN_PDU_DONE, 165 CSK_LOGIN_PDU_DONE,
166 CSK_LOGIN_DONE, 166 CSK_LOGIN_DONE,
167 CSK_DDP_ENABLE, 167 CSK_DDP_ENABLE,
168 CSK_ABORT_RPL_WAIT,
168}; 169};
169 170
170struct cxgbit_sock_common { 171struct cxgbit_sock_common {
@@ -321,6 +322,7 @@ int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
321int cxgbit_setup_conn_digest(struct cxgbit_sock *); 322int cxgbit_setup_conn_digest(struct cxgbit_sock *);
322int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *); 323int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
323void cxgbit_free_np(struct iscsi_np *); 324void cxgbit_free_np(struct iscsi_np *);
325void cxgbit_abort_conn(struct cxgbit_sock *csk);
324void cxgbit_free_conn(struct iscsi_conn *); 326void cxgbit_free_conn(struct iscsi_conn *);
325extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS]; 327extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
326int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); 328int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index d4fa41be80f9..92eb57e2adaf 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -665,6 +665,46 @@ static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
665 return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t); 665 return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
666} 666}
667 667
668static void
669__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
670{
671 __kfree_skb(skb);
672
673 if (csk->com.state != CSK_STATE_ESTABLISHED)
674 goto no_abort;
675
676 set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
677 csk->com.state = CSK_STATE_ABORTING;
678
679 cxgbit_send_abort_req(csk);
680
681 return;
682
683no_abort:
684 cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
685 cxgbit_put_csk(csk);
686}
687
688void cxgbit_abort_conn(struct cxgbit_sock *csk)
689{
690 struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
691
692 cxgbit_get_csk(csk);
693 cxgbit_init_wr_wait(&csk->com.wr_wait);
694
695 spin_lock_bh(&csk->lock);
696 if (csk->lock_owner) {
697 cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
698 __skb_queue_tail(&csk->backlogq, skb);
699 } else {
700 __cxgbit_abort_conn(csk, skb);
701 }
702 spin_unlock_bh(&csk->lock);
703
704 cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
705 csk->tid, 600, __func__);
706}
707
668void cxgbit_free_conn(struct iscsi_conn *conn) 708void cxgbit_free_conn(struct iscsi_conn *conn)
669{ 709{
670 struct cxgbit_sock *csk = conn->context; 710 struct cxgbit_sock *csk = conn->context;
@@ -1709,12 +1749,17 @@ rel_skb:
1709 1749
1710static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb) 1750static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1711{ 1751{
1752 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1753
1712 pr_debug("%s: csk %p; tid %u; state %d\n", 1754 pr_debug("%s: csk %p; tid %u; state %d\n",
1713 __func__, csk, csk->tid, csk->com.state); 1755 __func__, csk, csk->tid, csk->com.state);
1714 1756
1715 switch (csk->com.state) { 1757 switch (csk->com.state) {
1716 case CSK_STATE_ABORTING: 1758 case CSK_STATE_ABORTING:
1717 csk->com.state = CSK_STATE_DEAD; 1759 csk->com.state = CSK_STATE_DEAD;
1760 if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
1761 cxgbit_wake_up(&csk->com.wr_wait, __func__,
1762 rpl->status);
1718 cxgbit_put_csk(csk); 1763 cxgbit_put_csk(csk);
1719 break; 1764 break;
1720 default: 1765 default:
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index 5fdb57cac968..768cce0ccb80 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -275,6 +275,14 @@ void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
275 struct cxgbit_device *cdev = csk->com.cdev; 275 struct cxgbit_device *cdev = csk->com.cdev;
276 struct cxgbi_ppm *ppm = cdev2ppm(cdev); 276 struct cxgbi_ppm *ppm = cdev2ppm(cdev);
277 277
278 /* Abort the TCP conn if DDP is not complete to
279 * avoid any possibility of DDP after freeing
280 * the cmd.
281 */
282 if (unlikely(cmd->write_data_done !=
283 cmd->se_cmd.data_length))
284 cxgbit_abort_conn(csk);
285
278 cxgbi_ppm_ppod_release(ppm, ttinfo->idx); 286 cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
279 287
280 dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, 288 dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index 4fd775ace541..f3f8856bfb68 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -446,6 +446,7 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
446 case CPL_RX_ISCSI_DDP: 446 case CPL_RX_ISCSI_DDP:
447 case CPL_FW4_ACK: 447 case CPL_FW4_ACK:
448 lro_flush = false; 448 lro_flush = false;
449 /* fall through */
449 case CPL_ABORT_RPL_RSS: 450 case CPL_ABORT_RPL_RSS:
450 case CPL_PASS_ESTABLISH: 451 case CPL_PASS_ESTABLISH:
451 case CPL_PEER_CLOSE: 452 case CPL_PEER_CLOSE:
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 9e67c7678c86..9eb10d34682c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -502,7 +502,7 @@ void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
502EXPORT_SYMBOL(iscsit_aborted_task); 502EXPORT_SYMBOL(iscsit_aborted_task);
503 503
504static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *, 504static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
505 u32, u32, u8 *, u8 *); 505 u32, u32, const void *, void *);
506static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *); 506static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *);
507 507
508static int 508static int
@@ -523,7 +523,7 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
523 523
524 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, 524 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
525 ISCSI_HDR_LEN, 0, NULL, 525 ISCSI_HDR_LEN, 0, NULL,
526 (u8 *)header_digest); 526 header_digest);
527 527
528 iov[0].iov_len += ISCSI_CRC_LEN; 528 iov[0].iov_len += ISCSI_CRC_LEN;
529 tx_size += ISCSI_CRC_LEN; 529 tx_size += ISCSI_CRC_LEN;
@@ -550,9 +550,8 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
550 if (conn->conn_ops->DataDigest) { 550 if (conn->conn_ops->DataDigest) {
551 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, 551 iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
552 data_buf, data_buf_len, 552 data_buf, data_buf_len,
553 padding, 553 padding, &cmd->pad_bytes,
554 (u8 *)&cmd->pad_bytes, 554 &cmd->data_crc);
555 (u8 *)&cmd->data_crc);
556 555
557 iov[niov].iov_base = &cmd->data_crc; 556 iov[niov].iov_base = &cmd->data_crc;
558 iov[niov++].iov_len = ISCSI_CRC_LEN; 557 iov[niov++].iov_len = ISCSI_CRC_LEN;
@@ -597,7 +596,7 @@ iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
597 596
598 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu, 597 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
599 ISCSI_HDR_LEN, 0, NULL, 598 ISCSI_HDR_LEN, 0, NULL,
600 (u8 *)header_digest); 599 header_digest);
601 600
602 iov[0].iov_len += ISCSI_CRC_LEN; 601 iov[0].iov_len += ISCSI_CRC_LEN;
603 tx_size += ISCSI_CRC_LEN; 602 tx_size += ISCSI_CRC_LEN;
@@ -836,6 +835,7 @@ static int iscsit_add_reject_from_cmd(
836 unsigned char *buf) 835 unsigned char *buf)
837{ 836{
838 struct iscsi_conn *conn; 837 struct iscsi_conn *conn;
838 const bool do_put = cmd->se_cmd.se_tfo != NULL;
839 839
840 if (!cmd->conn) { 840 if (!cmd->conn) {
841 pr_err("cmd->conn is NULL for ITT: 0x%08x\n", 841 pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
@@ -866,7 +866,7 @@ static int iscsit_add_reject_from_cmd(
866 * Perform the kref_put now if se_cmd has already been setup by 866 * Perform the kref_put now if se_cmd has already been setup by
867 * scsit_setup_scsi_cmd() 867 * scsit_setup_scsi_cmd()
868 */ 868 */
869 if (cmd->se_cmd.se_tfo != NULL) { 869 if (do_put) {
870 pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n"); 870 pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
871 target_put_sess_cmd(&cmd->se_cmd); 871 target_put_sess_cmd(&cmd->se_cmd);
872 } 872 }
@@ -1410,13 +1410,9 @@ static u32 iscsit_do_crypto_hash_sg(
1410 return data_crc; 1410 return data_crc;
1411} 1411}
1412 1412
1413static void iscsit_do_crypto_hash_buf( 1413static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
1414 struct ahash_request *hash, 1414 const void *buf, u32 payload_length, u32 padding,
1415 const void *buf, 1415 const void *pad_bytes, void *data_crc)
1416 u32 payload_length,
1417 u32 padding,
1418 u8 *pad_bytes,
1419 u8 *data_crc)
1420{ 1416{
1421 struct scatterlist sg[2]; 1417 struct scatterlist sg[2];
1422 1418
@@ -1462,9 +1458,9 @@ __iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
1462 iscsit_mod_dataout_timer(cmd); 1458 iscsit_mod_dataout_timer(cmd);
1463 1459
1464 if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) { 1460 if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
1465 pr_err("DataOut Offset: %u, Length %u greater than" 1461 pr_err("DataOut Offset: %u, Length %u greater than iSCSI Command EDTL %u, protocol error.\n",
1466 " iSCSI Command EDTL %u, protocol error.\n", 1462 be32_to_cpu(hdr->offset), payload_length,
1467 hdr->offset, payload_length, cmd->se_cmd.data_length); 1463 cmd->se_cmd.data_length);
1468 return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf); 1464 return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
1469 } 1465 }
1470 1466
@@ -1878,10 +1874,9 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1878 } 1874 }
1879 1875
1880 if (conn->conn_ops->DataDigest) { 1876 if (conn->conn_ops->DataDigest) {
1881 iscsit_do_crypto_hash_buf(conn->conn_rx_hash, 1877 iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data,
1882 ping_data, payload_length, 1878 payload_length, padding,
1883 padding, cmd->pad_bytes, 1879 cmd->pad_bytes, &data_crc);
1884 (u8 *)&data_crc);
1885 1880
1886 if (checksum != data_crc) { 1881 if (checksum != data_crc) {
1887 pr_err("Ping data CRC32C DataDigest" 1882 pr_err("Ping data CRC32C DataDigest"
@@ -1962,7 +1957,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1962 struct iscsi_tmr_req *tmr_req; 1957 struct iscsi_tmr_req *tmr_req;
1963 struct iscsi_tm *hdr; 1958 struct iscsi_tm *hdr;
1964 int out_of_order_cmdsn = 0, ret; 1959 int out_of_order_cmdsn = 0, ret;
1965 bool sess_ref = false;
1966 u8 function, tcm_function = TMR_UNKNOWN; 1960 u8 function, tcm_function = TMR_UNKNOWN;
1967 1961
1968 hdr = (struct iscsi_tm *) buf; 1962 hdr = (struct iscsi_tm *) buf;
@@ -1995,22 +1989,23 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1995 1989
1996 cmd->data_direction = DMA_NONE; 1990 cmd->data_direction = DMA_NONE;
1997 cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL); 1991 cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
1998 if (!cmd->tmr_req) 1992 if (!cmd->tmr_req) {
1999 return iscsit_add_reject_cmd(cmd, 1993 return iscsit_add_reject_cmd(cmd,
2000 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1994 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
2001 buf); 1995 buf);
1996 }
1997
1998 transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
1999 conn->sess->se_sess, 0, DMA_NONE,
2000 TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
2001
2002 target_get_sess_cmd(&cmd->se_cmd, true);
2002 2003
2003 /* 2004 /*
2004 * TASK_REASSIGN for ERL=2 / connection stays inside of 2005 * TASK_REASSIGN for ERL=2 / connection stays inside of
2005 * LIO-Target $FABRIC_MOD 2006 * LIO-Target $FABRIC_MOD
2006 */ 2007 */
2007 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { 2008 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
2008 transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
2009 conn->sess->se_sess, 0, DMA_NONE,
2010 TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
2011
2012 target_get_sess_cmd(&cmd->se_cmd, true);
2013 sess_ref = true;
2014 tcm_function = iscsit_convert_tmf(function); 2009 tcm_function = iscsit_convert_tmf(function);
2015 if (tcm_function == TMR_UNKNOWN) { 2010 if (tcm_function == TMR_UNKNOWN) {
2016 pr_err("Unknown iSCSI TMR Function:" 2011 pr_err("Unknown iSCSI TMR Function:"
@@ -2101,12 +2096,14 @@ attach:
2101 2096
2102 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 2097 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2103 int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); 2098 int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
2104 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) 2099 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
2105 out_of_order_cmdsn = 1; 2100 out_of_order_cmdsn = 1;
2106 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) 2101 } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
2102 target_put_sess_cmd(&cmd->se_cmd);
2107 return 0; 2103 return 0;
2108 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 2104 } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
2109 return -1; 2105 return -1;
2106 }
2110 } 2107 }
2111 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); 2108 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2112 2109
@@ -2126,12 +2123,8 @@ attach:
2126 * For connection recovery, this is also the default action for 2123 * For connection recovery, this is also the default action for
2127 * TMR TASK_REASSIGN. 2124 * TMR TASK_REASSIGN.
2128 */ 2125 */
2129 if (sess_ref) {
2130 pr_debug("Handle TMR, using sess_ref=true check\n");
2131 target_put_sess_cmd(&cmd->se_cmd);
2132 }
2133
2134 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2126 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2127 target_put_sess_cmd(&cmd->se_cmd);
2135 return 0; 2128 return 0;
2136} 2129}
2137EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd); 2130EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
@@ -2287,10 +2280,9 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2287 goto reject; 2280 goto reject;
2288 2281
2289 if (conn->conn_ops->DataDigest) { 2282 if (conn->conn_ops->DataDigest) {
2290 iscsit_do_crypto_hash_buf(conn->conn_rx_hash, 2283 iscsit_do_crypto_hash_buf(conn->conn_rx_hash, text_in,
2291 text_in, payload_length, 2284 payload_length, padding,
2292 padding, (u8 *)&pad_bytes, 2285 &pad_bytes, &data_crc);
2293 (u8 *)&data_crc);
2294 2286
2295 if (checksum != data_crc) { 2287 if (checksum != data_crc) {
2296 pr_err("Text data CRC32C DataDigest" 2288 pr_err("Text data CRC32C DataDigest"
@@ -3978,9 +3970,9 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
3978 return; 3970 return;
3979 } 3971 }
3980 3972
3981 iscsit_do_crypto_hash_buf(conn->conn_rx_hash, 3973 iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
3982 buffer, ISCSI_HDR_LEN, 3974 ISCSI_HDR_LEN, 0, NULL,
3983 0, NULL, (u8 *)&checksum); 3975 &checksum);
3984 3976
3985 if (digest != checksum) { 3977 if (digest != checksum) {
3986 pr_err("HeaderDigest CRC32C failed," 3978 pr_err("HeaderDigest CRC32C failed,"
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 0dd4c45f7575..0ebc4818e132 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1123,7 +1123,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
1123 1123
1124 ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI); 1124 ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI);
1125 if (ret < 0) 1125 if (ret < 0)
1126 return NULL; 1126 goto free_out;
1127 1127
1128 ret = iscsit_tpg_add_portal_group(tiqn, tpg); 1128 ret = iscsit_tpg_add_portal_group(tiqn, tpg);
1129 if (ret != 0) 1129 if (ret != 0)
@@ -1135,6 +1135,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
1135 return &tpg->tpg_se_tpg; 1135 return &tpg->tpg_se_tpg;
1136out: 1136out:
1137 core_tpg_deregister(&tpg->tpg_se_tpg); 1137 core_tpg_deregister(&tpg->tpg_se_tpg);
1138free_out:
1138 kfree(tpg); 1139 kfree(tpg);
1139 return NULL; 1140 return NULL;
1140} 1141}
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 76184094a0cf..5efa42b939a1 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -34,7 +34,7 @@
34#include "iscsi_target_erl2.h" 34#include "iscsi_target_erl2.h"
35#include "iscsi_target.h" 35#include "iscsi_target.h"
36 36
37#define OFFLOAD_BUF_SIZE 32768 37#define OFFLOAD_BUF_SIZE 32768U
38 38
39/* 39/*
40 * Used to dump excess datain payload for certain error recovery 40 * Used to dump excess datain payload for certain error recovery
@@ -56,7 +56,7 @@ int iscsit_dump_data_payload(
56 if (conn->sess->sess_ops->RDMAExtensions) 56 if (conn->sess->sess_ops->RDMAExtensions)
57 return 0; 57 return 0;
58 58
59 length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len; 59 length = min(buf_len, OFFLOAD_BUF_SIZE);
60 60
61 buf = kzalloc(length, GFP_ATOMIC); 61 buf = kzalloc(length, GFP_ATOMIC);
62 if (!buf) { 62 if (!buf) {
@@ -67,8 +67,7 @@ int iscsit_dump_data_payload(
67 memset(&iov, 0, sizeof(struct kvec)); 67 memset(&iov, 0, sizeof(struct kvec));
68 68
69 while (offset < buf_len) { 69 while (offset < buf_len) {
70 size = ((offset + length) > buf_len) ? 70 size = min(buf_len - offset, length);
71 (buf_len - offset) : length;
72 71
73 iov.iov_len = size; 72 iov.iov_len = size;
74 iov.iov_base = buf; 73 iov.iov_base = buf;
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index caab1045742d..29a37b242d30 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1380,10 +1380,8 @@ int iscsi_decode_text_input(
1380 char *key, *value; 1380 char *key, *value;
1381 struct iscsi_param *param; 1381 struct iscsi_param *param;
1382 1382
1383 if (iscsi_extract_key_value(start, &key, &value) < 0) { 1383 if (iscsi_extract_key_value(start, &key, &value) < 0)
1384 kfree(tmpbuf); 1384 goto free_buffer;
1385 return -1;
1386 }
1387 1385
1388 pr_debug("Got key: %s=%s\n", key, value); 1386 pr_debug("Got key: %s=%s\n", key, value);
1389 1387
@@ -1396,38 +1394,37 @@ int iscsi_decode_text_input(
1396 1394
1397 param = iscsi_check_key(key, phase, sender, param_list); 1395 param = iscsi_check_key(key, phase, sender, param_list);
1398 if (!param) { 1396 if (!param) {
1399 if (iscsi_add_notunderstood_response(key, 1397 if (iscsi_add_notunderstood_response(key, value,
1400 value, param_list) < 0) { 1398 param_list) < 0)
1401 kfree(tmpbuf); 1399 goto free_buffer;
1402 return -1; 1400
1403 }
1404 start += strlen(key) + strlen(value) + 2; 1401 start += strlen(key) + strlen(value) + 2;
1405 continue; 1402 continue;
1406 } 1403 }
1407 if (iscsi_check_value(param, value) < 0) { 1404 if (iscsi_check_value(param, value) < 0)
1408 kfree(tmpbuf); 1405 goto free_buffer;
1409 return -1;
1410 }
1411 1406
1412 start += strlen(key) + strlen(value) + 2; 1407 start += strlen(key) + strlen(value) + 2;
1413 1408
1414 if (IS_PSTATE_PROPOSER(param)) { 1409 if (IS_PSTATE_PROPOSER(param)) {
1415 if (iscsi_check_proposer_state(param, value) < 0) { 1410 if (iscsi_check_proposer_state(param, value) < 0)
1416 kfree(tmpbuf); 1411 goto free_buffer;
1417 return -1; 1412
1418 }
1419 SET_PSTATE_RESPONSE_GOT(param); 1413 SET_PSTATE_RESPONSE_GOT(param);
1420 } else { 1414 } else {
1421 if (iscsi_check_acceptor_state(param, value, conn) < 0) { 1415 if (iscsi_check_acceptor_state(param, value, conn) < 0)
1422 kfree(tmpbuf); 1416 goto free_buffer;
1423 return -1; 1417
1424 }
1425 SET_PSTATE_ACCEPTOR(param); 1418 SET_PSTATE_ACCEPTOR(param);
1426 } 1419 }
1427 } 1420 }
1428 1421
1429 kfree(tmpbuf); 1422 kfree(tmpbuf);
1430 return 0; 1423 return 0;
1424
1425free_buffer:
1426 kfree(tmpbuf);
1427 return -1;
1431} 1428}
1432 1429
1433int iscsi_encode_text_output( 1430int iscsi_encode_text_output(
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
index e446a09c886b..f65e5e584212 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -25,8 +25,6 @@
25#include "iscsi_target_tpg.h" 25#include "iscsi_target_tpg.h"
26#include "iscsi_target_seq_pdu_list.h" 26#include "iscsi_target_seq_pdu_list.h"
27 27
28#define OFFLOAD_BUF_SIZE 32768
29
30#ifdef DEBUG 28#ifdef DEBUG
31static void iscsit_dump_seq_list(struct iscsi_cmd *cmd) 29static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
32{ 30{
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 594d07a1e995..4b34f71547c6 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -90,10 +90,10 @@ int iscsit_load_discovery_tpg(void)
90 */ 90 */
91 param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list); 91 param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
92 if (!param) 92 if (!param)
93 goto out; 93 goto free_pl_out;
94 94
95 if (iscsi_update_param_value(param, "CHAP,None") < 0) 95 if (iscsi_update_param_value(param, "CHAP,None") < 0)
96 goto out; 96 goto free_pl_out;
97 97
98 tpg->tpg_attrib.authentication = 0; 98 tpg->tpg_attrib.authentication = 0;
99 99
@@ -105,6 +105,8 @@ int iscsit_load_discovery_tpg(void)
105 pr_debug("CORE[0] - Allocated Discovery TPG\n"); 105 pr_debug("CORE[0] - Allocated Discovery TPG\n");
106 106
107 return 0; 107 return 0;
108free_pl_out:
109 iscsi_release_param_list(tpg->param_list);
108out: 110out:
109 if (tpg->sid == 1) 111 if (tpg->sid == 1)
110 core_tpg_deregister(&tpg->tpg_se_tpg); 112 core_tpg_deregister(&tpg->tpg_se_tpg);
@@ -119,6 +121,7 @@ void iscsit_release_discovery_tpg(void)
119 if (!tpg) 121 if (!tpg)
120 return; 122 return;
121 123
124 iscsi_release_param_list(tpg->param_list);
122 core_tpg_deregister(&tpg->tpg_se_tpg); 125 core_tpg_deregister(&tpg->tpg_se_tpg);
123 126
124 kfree(tpg); 127 kfree(tpg);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 54f20f184dd6..4435bf374d2d 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -695,6 +695,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
695 struct iscsi_session *sess; 695 struct iscsi_session *sess;
696 struct se_cmd *se_cmd = &cmd->se_cmd; 696 struct se_cmd *se_cmd = &cmd->se_cmd;
697 697
698 WARN_ON(!list_empty(&cmd->i_conn_node));
699
698 if (cmd->conn) 700 if (cmd->conn)
699 sess = cmd->conn->sess; 701 sess = cmd->conn->sess;
700 else 702 else
@@ -717,6 +719,8 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues)
717{ 719{
718 struct iscsi_conn *conn = cmd->conn; 720 struct iscsi_conn *conn = cmd->conn;
719 721
722 WARN_ON(!list_empty(&cmd->i_conn_node));
723
720 if (cmd->data_direction == DMA_TO_DEVICE) { 724 if (cmd->data_direction == DMA_TO_DEVICE) {
721 iscsit_stop_dataout_timer(cmd); 725 iscsit_stop_dataout_timer(cmd);
722 iscsit_free_r2ts_from_list(cmd); 726 iscsit_free_r2ts_from_list(cmd);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 928127642574..e46ca968009c 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -918,7 +918,7 @@ static int core_alua_update_tpg_primary_metadata(
918{ 918{
919 unsigned char *md_buf; 919 unsigned char *md_buf;
920 struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn; 920 struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
921 char path[ALUA_METADATA_PATH_LEN]; 921 char *path;
922 int len, rc; 922 int len, rc;
923 923
924 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); 924 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
@@ -927,8 +927,6 @@ static int core_alua_update_tpg_primary_metadata(
927 return -ENOMEM; 927 return -ENOMEM;
928 } 928 }
929 929
930 memset(path, 0, ALUA_METADATA_PATH_LEN);
931
932 len = snprintf(md_buf, ALUA_MD_BUF_LEN, 930 len = snprintf(md_buf, ALUA_MD_BUF_LEN,
933 "tg_pt_gp_id=%hu\n" 931 "tg_pt_gp_id=%hu\n"
934 "alua_access_state=0x%02x\n" 932 "alua_access_state=0x%02x\n"
@@ -937,11 +935,14 @@ static int core_alua_update_tpg_primary_metadata(
937 tg_pt_gp->tg_pt_gp_alua_access_state, 935 tg_pt_gp->tg_pt_gp_alua_access_state,
938 tg_pt_gp->tg_pt_gp_alua_access_status); 936 tg_pt_gp->tg_pt_gp_alua_access_status);
939 937
940 snprintf(path, ALUA_METADATA_PATH_LEN, 938 rc = -ENOMEM;
941 "%s/alua/tpgs_%s/%s", db_root, &wwn->unit_serial[0], 939 path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
942 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); 940 &wwn->unit_serial[0],
943 941 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
944 rc = core_alua_write_tpg_metadata(path, md_buf, len); 942 if (path) {
943 rc = core_alua_write_tpg_metadata(path, md_buf, len);
944 kfree(path);
945 }
945 kfree(md_buf); 946 kfree(md_buf);
946 return rc; 947 return rc;
947} 948}
@@ -1209,7 +1210,7 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
1209{ 1210{
1210 struct se_portal_group *se_tpg = lun->lun_tpg; 1211 struct se_portal_group *se_tpg = lun->lun_tpg;
1211 unsigned char *md_buf; 1212 unsigned char *md_buf;
1212 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; 1213 char *path;
1213 int len, rc; 1214 int len, rc;
1214 1215
1215 mutex_lock(&lun->lun_tg_pt_md_mutex); 1216 mutex_lock(&lun->lun_tg_pt_md_mutex);
@@ -1221,28 +1222,32 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
1221 goto out_unlock; 1222 goto out_unlock;
1222 } 1223 }
1223 1224
1224 memset(path, 0, ALUA_METADATA_PATH_LEN);
1225 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
1226
1227 len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
1228 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
1229
1230 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
1231 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
1232 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
1233
1234 len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n" 1225 len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
1235 "alua_tg_pt_status=0x%02x\n", 1226 "alua_tg_pt_status=0x%02x\n",
1236 atomic_read(&lun->lun_tg_pt_secondary_offline), 1227 atomic_read(&lun->lun_tg_pt_secondary_offline),
1237 lun->lun_tg_pt_secondary_stat); 1228 lun->lun_tg_pt_secondary_stat);
1238 1229
1239 snprintf(path, ALUA_METADATA_PATH_LEN, "%s/alua/%s/%s/lun_%llu", 1230 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
1240 db_root, se_tpg->se_tpg_tfo->get_fabric_name(), wwn, 1231 path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
1241 lun->unpacked_lun); 1232 db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
1233 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1234 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
1235 lun->unpacked_lun);
1236 } else {
1237 path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
1238 db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
1239 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1240 lun->unpacked_lun);
1241 }
1242 if (!path) {
1243 rc = -ENOMEM;
1244 goto out_free;
1245 }
1242 1246
1243 rc = core_alua_write_tpg_metadata(path, md_buf, len); 1247 rc = core_alua_write_tpg_metadata(path, md_buf, len);
1248 kfree(path);
1249out_free:
1244 kfree(md_buf); 1250 kfree(md_buf);
1245
1246out_unlock: 1251out_unlock:
1247 mutex_unlock(&lun->lun_tg_pt_md_mutex); 1252 mutex_unlock(&lun->lun_tg_pt_md_mutex);
1248 return rc; 1253 return rc;
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index 1902cb5c3b52..fc9637cce825 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -72,15 +72,6 @@
72 */ 72 */
73#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0 73#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0
74#define ALUA_MAX_IMPLICIT_TRANS_SECS 255 74#define ALUA_MAX_IMPLICIT_TRANS_SECS 255
75/*
76 * Used by core_alua_update_tpg_primary_metadata() and
77 * core_alua_update_tpg_secondary_metadata()
78 */
79#define ALUA_METADATA_PATH_LEN 512
80/*
81 * Used by core_alua_update_tpg_secondary_metadata()
82 */
83#define ALUA_SECONDARY_METADATA_WWN_LEN 256
84 75
85/* Used by core_alua_update_tpg_(primary,secondary)_metadata */ 76/* Used by core_alua_update_tpg_(primary,secondary)_metadata */
86#define ALUA_MD_BUF_LEN 1024 77#define ALUA_MD_BUF_LEN 1024
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index bd87cc26c6e5..72b1cd1bf9d9 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1611,12 +1611,12 @@ static match_table_t tokens = {
1611 {Opt_res_type, "res_type=%d"}, 1611 {Opt_res_type, "res_type=%d"},
1612 {Opt_res_scope, "res_scope=%d"}, 1612 {Opt_res_scope, "res_scope=%d"},
1613 {Opt_res_all_tg_pt, "res_all_tg_pt=%d"}, 1613 {Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
1614 {Opt_mapped_lun, "mapped_lun=%lld"}, 1614 {Opt_mapped_lun, "mapped_lun=%u"},
1615 {Opt_target_fabric, "target_fabric=%s"}, 1615 {Opt_target_fabric, "target_fabric=%s"},
1616 {Opt_target_node, "target_node=%s"}, 1616 {Opt_target_node, "target_node=%s"},
1617 {Opt_tpgt, "tpgt=%d"}, 1617 {Opt_tpgt, "tpgt=%d"},
1618 {Opt_port_rtpi, "port_rtpi=%d"}, 1618 {Opt_port_rtpi, "port_rtpi=%d"},
1619 {Opt_target_lun, "target_lun=%lld"}, 1619 {Opt_target_lun, "target_lun=%u"},
1620 {Opt_err, NULL} 1620 {Opt_err, NULL}
1621}; 1621};
1622 1622
@@ -1693,7 +1693,7 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
1693 } 1693 }
1694 break; 1694 break;
1695 case Opt_sa_res_key: 1695 case Opt_sa_res_key:
1696 ret = kstrtoull(args->from, 0, &tmp_ll); 1696 ret = match_u64(args, &tmp_ll);
1697 if (ret < 0) { 1697 if (ret < 0) {
1698 pr_err("kstrtoull() failed for sa_res_key=\n"); 1698 pr_err("kstrtoull() failed for sa_res_key=\n");
1699 goto out; 1699 goto out;
@@ -1727,10 +1727,10 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
1727 all_tg_pt = (int)arg; 1727 all_tg_pt = (int)arg;
1728 break; 1728 break;
1729 case Opt_mapped_lun: 1729 case Opt_mapped_lun:
1730 ret = match_int(args, &arg); 1730 ret = match_u64(args, &tmp_ll);
1731 if (ret) 1731 if (ret)
1732 goto out; 1732 goto out;
1733 mapped_lun = (u64)arg; 1733 mapped_lun = (u64)tmp_ll;
1734 break; 1734 break;
1735 /* 1735 /*
1736 * PR APTPL Metadata for Target Port 1736 * PR APTPL Metadata for Target Port
@@ -1768,10 +1768,10 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
1768 goto out; 1768 goto out;
1769 break; 1769 break;
1770 case Opt_target_lun: 1770 case Opt_target_lun:
1771 ret = match_int(args, &arg); 1771 ret = match_u64(args, &tmp_ll);
1772 if (ret) 1772 if (ret)
1773 goto out; 1773 goto out;
1774 target_lun = (u64)arg; 1774 target_lun = (u64)tmp_ll;
1775 break; 1775 break;
1776 default: 1776 default:
1777 break; 1777 break;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index e9e917cc6441..e1416b007aa4 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -623,8 +623,6 @@ static struct configfs_attribute *target_fabric_port_attrs[] = {
623 NULL, 623 NULL,
624}; 624};
625 625
626extern struct configfs_item_operations target_core_dev_item_ops;
627
628static int target_fabric_port_link( 626static int target_fabric_port_link(
629 struct config_item *lun_ci, 627 struct config_item *lun_ci,
630 struct config_item *se_dev_ci) 628 struct config_item *se_dev_ci)
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index c629817a8854..9b2c0c773022 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -482,6 +482,10 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
482 struct inode *inode = file->f_mapping->host; 482 struct inode *inode = file->f_mapping->host;
483 int ret; 483 int ret;
484 484
485 if (!nolb) {
486 return 0;
487 }
488
485 if (cmd->se_dev->dev_attrib.pi_prot_type) { 489 if (cmd->se_dev->dev_attrib.pi_prot_type) {
486 ret = fd_do_prot_unmap(cmd, lba, nolb); 490 ret = fd_do_prot_unmap(cmd, lba, nolb);
487 if (ret) 491 if (ret)
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 18e3eb16e756..9384d19a7326 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -89,6 +89,7 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
89 void *data); 89 void *data);
90 90
91/* target_core_configfs.c */ 91/* target_core_configfs.c */
92extern struct configfs_item_operations target_core_dev_item_ops;
92void target_setup_backend_cits(struct target_backend *); 93void target_setup_backend_cits(struct target_backend *);
93 94
94/* target_core_fabric_configfs.c */ 95/* target_core_fabric_configfs.c */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index dd2cd8048582..b024613f9217 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -58,8 +58,10 @@ void core_pr_dump_initiator_port(
58 char *buf, 58 char *buf,
59 u32 size) 59 u32 size)
60{ 60{
61 if (!pr_reg->isid_present_at_reg) 61 if (!pr_reg->isid_present_at_reg) {
62 buf[0] = '\0'; 62 buf[0] = '\0';
63 return;
64 }
63 65
64 snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid); 66 snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid);
65} 67}
@@ -351,6 +353,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
351 break; 353 break;
352 case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: 354 case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
353 we = 1; 355 we = 1;
356 /* fall through */
354 case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: 357 case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
355 /* 358 /*
356 * Some commands are only allowed for registered I_T Nexuses. 359 * Some commands are only allowed for registered I_T Nexuses.
@@ -359,6 +362,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
359 break; 362 break;
360 case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: 363 case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
361 we = 1; 364 we = 1;
365 /* fall through */
362 case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: 366 case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
363 /* 367 /*
364 * Each registered I_T Nexus is a reservation holder. 368 * Each registered I_T Nexus is a reservation holder.
@@ -1521,7 +1525,7 @@ core_scsi3_decode_spec_i_port(
1521 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); 1525 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
1522 if (!tidh_new) { 1526 if (!tidh_new) {
1523 pr_err("Unable to allocate tidh_new\n"); 1527 pr_err("Unable to allocate tidh_new\n");
1524 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1528 return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
1525 } 1529 }
1526 INIT_LIST_HEAD(&tidh_new->dest_list); 1530 INIT_LIST_HEAD(&tidh_new->dest_list);
1527 tidh_new->dest_tpg = tpg; 1531 tidh_new->dest_tpg = tpg;
@@ -1533,7 +1537,7 @@ core_scsi3_decode_spec_i_port(
1533 sa_res_key, all_tg_pt, aptpl); 1537 sa_res_key, all_tg_pt, aptpl);
1534 if (!local_pr_reg) { 1538 if (!local_pr_reg) {
1535 kfree(tidh_new); 1539 kfree(tidh_new);
1536 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1540 return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
1537 } 1541 }
1538 tidh_new->dest_pr_reg = local_pr_reg; 1542 tidh_new->dest_pr_reg = local_pr_reg;
1539 /* 1543 /*
@@ -1553,7 +1557,7 @@ core_scsi3_decode_spec_i_port(
1553 1557
1554 buf = transport_kmap_data_sg(cmd); 1558 buf = transport_kmap_data_sg(cmd);
1555 if (!buf) { 1559 if (!buf) {
1556 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1560 ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
1557 goto out; 1561 goto out;
1558 } 1562 }
1559 1563
@@ -1767,7 +1771,7 @@ core_scsi3_decode_spec_i_port(
1767 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1771 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1768 core_scsi3_tpg_undepend_item(dest_tpg); 1772 core_scsi3_tpg_undepend_item(dest_tpg);
1769 kfree(tidh_new); 1773 kfree(tidh_new);
1770 ret = TCM_INVALID_PARAMETER_LIST; 1774 ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
1771 goto out_unmap; 1775 goto out_unmap;
1772 } 1776 }
1773 tidh_new->dest_pr_reg = dest_pr_reg; 1777 tidh_new->dest_pr_reg = dest_pr_reg;
@@ -1971,24 +1975,21 @@ static int __core_scsi3_write_aptpl_to_file(
1971 struct t10_wwn *wwn = &dev->t10_wwn; 1975 struct t10_wwn *wwn = &dev->t10_wwn;
1972 struct file *file; 1976 struct file *file;
1973 int flags = O_RDWR | O_CREAT | O_TRUNC; 1977 int flags = O_RDWR | O_CREAT | O_TRUNC;
1974 char path[512]; 1978 char *path;
1975 u32 pr_aptpl_buf_len; 1979 u32 pr_aptpl_buf_len;
1976 int ret; 1980 int ret;
1977 loff_t pos = 0; 1981 loff_t pos = 0;
1978 1982
1979 memset(path, 0, 512); 1983 path = kasprintf(GFP_KERNEL, "%s/pr/aptpl_%s", db_root,
1980 1984 &wwn->unit_serial[0]);
1981 if (strlen(&wwn->unit_serial[0]) >= 512) { 1985 if (!path)
1982 pr_err("WWN value for struct se_device does not fit" 1986 return -ENOMEM;
1983 " into path buffer\n");
1984 return -EMSGSIZE;
1985 }
1986 1987
1987 snprintf(path, 512, "%s/pr/aptpl_%s", db_root, &wwn->unit_serial[0]);
1988 file = filp_open(path, flags, 0600); 1988 file = filp_open(path, flags, 0600);
1989 if (IS_ERR(file)) { 1989 if (IS_ERR(file)) {
1990 pr_err("filp_open(%s) for APTPL metadata" 1990 pr_err("filp_open(%s) for APTPL metadata"
1991 " failed\n", path); 1991 " failed\n", path);
1992 kfree(path);
1992 return PTR_ERR(file); 1993 return PTR_ERR(file);
1993 } 1994 }
1994 1995
@@ -1999,6 +2000,7 @@ static int __core_scsi3_write_aptpl_to_file(
1999 if (ret < 0) 2000 if (ret < 0)
2000 pr_debug("Error writing APTPL metadata file: %s\n", path); 2001 pr_debug("Error writing APTPL metadata file: %s\n", path);
2001 fput(file); 2002 fput(file);
2003 kfree(path);
2002 2004
2003 return (ret < 0) ? -EIO : 0; 2005 return (ret < 0) ? -EIO : 0;
2004} 2006}
@@ -2103,7 +2105,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
2103 register_type, 0)) { 2105 register_type, 0)) {
2104 pr_err("Unable to allocate" 2106 pr_err("Unable to allocate"
2105 " struct t10_pr_registration\n"); 2107 " struct t10_pr_registration\n");
2106 return TCM_INVALID_PARAMETER_LIST; 2108 return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
2107 } 2109 }
2108 } else { 2110 } else {
2109 /* 2111 /*
@@ -3215,7 +3217,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
3215 */ 3217 */
3216 buf = transport_kmap_data_sg(cmd); 3218 buf = transport_kmap_data_sg(cmd);
3217 if (!buf) { 3219 if (!buf) {
3218 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3220 ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
3219 goto out_put_pr_reg; 3221 goto out_put_pr_reg;
3220 } 3222 }
3221 3223
@@ -3267,7 +3269,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
3267 3269
3268 buf = transport_kmap_data_sg(cmd); 3270 buf = transport_kmap_data_sg(cmd);
3269 if (!buf) { 3271 if (!buf) {
3270 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3272 ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
3271 goto out_put_pr_reg; 3273 goto out_put_pr_reg;
3272 } 3274 }
3273 proto_ident = (buf[24] & 0x0f); 3275 proto_ident = (buf[24] & 0x0f);
@@ -3466,7 +3468,7 @@ after_iport_check:
3466 if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl, 3468 if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
3467 dest_lun, dest_se_deve, dest_se_deve->mapped_lun, 3469 dest_lun, dest_se_deve, dest_se_deve->mapped_lun,
3468 iport_ptr, sa_res_key, 0, aptpl, 2, 1)) { 3470 iport_ptr, sa_res_key, 0, aptpl, 2, 1)) {
3469 ret = TCM_INVALID_PARAMETER_LIST; 3471 ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
3470 goto out; 3472 goto out;
3471 } 3473 }
3472 spin_lock(&dev->dev_reservation_lock); 3474 spin_lock(&dev->dev_reservation_lock);
@@ -3528,8 +3530,6 @@ after_iport_check:
3528 3530
3529 core_scsi3_update_and_write_aptpl(cmd->se_dev, aptpl); 3531 core_scsi3_update_and_write_aptpl(cmd->se_dev, aptpl);
3530 3532
3531 transport_kunmap_data_sg(cmd);
3532
3533 core_scsi3_put_pr_reg(dest_pr_reg); 3533 core_scsi3_put_pr_reg(dest_pr_reg);
3534 return 0; 3534 return 0;
3535out: 3535out:
@@ -4011,6 +4011,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4011 * Set the ADDITIONAL DESCRIPTOR LENGTH 4011 * Set the ADDITIONAL DESCRIPTOR LENGTH
4012 */ 4012 */
4013 put_unaligned_be32(desc_len, &buf[off]); 4013 put_unaligned_be32(desc_len, &buf[off]);
4014 off += 4;
4014 /* 4015 /*
4015 * Size of full desctipor header minus TransportID 4016 * Size of full desctipor header minus TransportID
4016 * containing $FABRIC_MOD specific) initiator device/port 4017 * containing $FABRIC_MOD specific) initiator device/port
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index e22847bd79b9..9c7bc1ca341a 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -133,6 +133,15 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
133 spin_unlock(&se_cmd->t_state_lock); 133 spin_unlock(&se_cmd->t_state_lock);
134 return false; 134 return false;
135 } 135 }
136 if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) {
137 if (se_cmd->scsi_status) {
138 pr_debug("Attempted to abort io tag: %llu early failure"
139 " status: 0x%02x\n", se_cmd->tag,
140 se_cmd->scsi_status);
141 spin_unlock(&se_cmd->t_state_lock);
142 return false;
143 }
144 }
136 if (sess->sess_tearing_down || se_cmd->cmd_wait_set) { 145 if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
137 pr_debug("Attempted to abort io tag: %llu already shutdown," 146 pr_debug("Attempted to abort io tag: %llu already shutdown,"
138 " skipping\n", se_cmd->tag); 147 " skipping\n", se_cmd->tag);
@@ -217,7 +226,8 @@ static void core_tmr_drain_tmr_list(
217 * LUN_RESET tmr.. 226 * LUN_RESET tmr..
218 */ 227 */
219 spin_lock_irqsave(&dev->se_tmr_lock, flags); 228 spin_lock_irqsave(&dev->se_tmr_lock, flags);
220 list_del_init(&tmr->tmr_list); 229 if (tmr)
230 list_del_init(&tmr->tmr_list);
221 list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { 231 list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
222 cmd = tmr_p->task_cmd; 232 cmd = tmr_p->task_cmd;
223 if (!cmd) { 233 if (!cmd) {
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 836d552b0385..58caacd54a3b 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -67,7 +67,6 @@ static void transport_complete_task_attr(struct se_cmd *cmd);
67static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason); 67static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
68static void transport_handle_queue_full(struct se_cmd *cmd, 68static void transport_handle_queue_full(struct se_cmd *cmd,
69 struct se_device *dev, int err, bool write_pending); 69 struct se_device *dev, int err, bool write_pending);
70static int transport_put_cmd(struct se_cmd *cmd);
71static void target_complete_ok_work(struct work_struct *work); 70static void target_complete_ok_work(struct work_struct *work);
72 71
73int init_se_kmem_caches(void) 72int init_se_kmem_caches(void)
@@ -668,7 +667,7 @@ int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
668 if (transport_cmd_check_stop_to_fabric(cmd)) 667 if (transport_cmd_check_stop_to_fabric(cmd))
669 return 1; 668 return 1;
670 if (remove && ack_kref) 669 if (remove && ack_kref)
671 ret = transport_put_cmd(cmd); 670 ret = target_put_sess_cmd(cmd);
672 671
673 return ret; 672 return ret;
674} 673}
@@ -1730,9 +1729,6 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1730{ 1729{
1731 int ret = 0, post_ret = 0; 1730 int ret = 0, post_ret = 0;
1732 1731
1733 if (transport_check_aborted_status(cmd, 1))
1734 return;
1735
1736 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", 1732 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
1737 sense_reason); 1733 sense_reason);
1738 target_show_cmd("-----[ ", cmd); 1734 target_show_cmd("-----[ ", cmd);
@@ -1741,6 +1737,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1741 * For SAM Task Attribute emulation for failed struct se_cmd 1737 * For SAM Task Attribute emulation for failed struct se_cmd
1742 */ 1738 */
1743 transport_complete_task_attr(cmd); 1739 transport_complete_task_attr(cmd);
1740
1744 /* 1741 /*
1745 * Handle special case for COMPARE_AND_WRITE failure, where the 1742 * Handle special case for COMPARE_AND_WRITE failure, where the
1746 * callback is expected to drop the per device ->caw_sem. 1743 * callback is expected to drop the per device ->caw_sem.
@@ -1749,6 +1746,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1749 cmd->transport_complete_callback) 1746 cmd->transport_complete_callback)
1750 cmd->transport_complete_callback(cmd, false, &post_ret); 1747 cmd->transport_complete_callback(cmd, false, &post_ret);
1751 1748
1749 if (transport_check_aborted_status(cmd, 1))
1750 return;
1751
1752 switch (sense_reason) { 1752 switch (sense_reason) {
1753 case TCM_NON_EXISTENT_LUN: 1753 case TCM_NON_EXISTENT_LUN:
1754 case TCM_UNSUPPORTED_SCSI_OPCODE: 1754 case TCM_UNSUPPORTED_SCSI_OPCODE:
@@ -1772,8 +1772,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1772 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: 1772 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
1773 break; 1773 break;
1774 case TCM_OUT_OF_RESOURCES: 1774 case TCM_OUT_OF_RESOURCES:
1775 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1775 cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
1776 break; 1776 goto queue_status;
1777 case TCM_RESERVATION_CONFLICT: 1777 case TCM_RESERVATION_CONFLICT:
1778 /* 1778 /*
1779 * No SENSE Data payload for this case, set SCSI Status 1779 * No SENSE Data payload for this case, set SCSI Status
@@ -1795,11 +1795,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1795 cmd->orig_fe_lun, 0x2C, 1795 cmd->orig_fe_lun, 0x2C,
1796 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1796 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1797 } 1797 }
1798 trace_target_cmd_complete(cmd); 1798
1799 ret = cmd->se_tfo->queue_status(cmd); 1799 goto queue_status;
1800 if (ret)
1801 goto queue_full;
1802 goto check_stop;
1803 default: 1800 default:
1804 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1801 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1805 cmd->t_task_cdb[0], sense_reason); 1802 cmd->t_task_cdb[0], sense_reason);
@@ -1816,6 +1813,11 @@ check_stop:
1816 transport_cmd_check_stop_to_fabric(cmd); 1813 transport_cmd_check_stop_to_fabric(cmd);
1817 return; 1814 return;
1818 1815
1816queue_status:
1817 trace_target_cmd_complete(cmd);
1818 ret = cmd->se_tfo->queue_status(cmd);
1819 if (!ret)
1820 goto check_stop;
1819queue_full: 1821queue_full:
1820 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 1822 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
1821} 1823}
@@ -1973,6 +1975,7 @@ void target_execute_cmd(struct se_cmd *cmd)
1973 } 1975 }
1974 1976
1975 cmd->t_state = TRANSPORT_PROCESSING; 1977 cmd->t_state = TRANSPORT_PROCESSING;
1978 cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
1976 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; 1979 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
1977 spin_unlock_irq(&cmd->t_state_lock); 1980 spin_unlock_irq(&cmd->t_state_lock);
1978 1981
@@ -2010,6 +2013,8 @@ static void target_restart_delayed_cmds(struct se_device *dev)
2010 list_del(&cmd->se_delayed_node); 2013 list_del(&cmd->se_delayed_node);
2011 spin_unlock(&dev->delayed_cmd_lock); 2014 spin_unlock(&dev->delayed_cmd_lock);
2012 2015
2016 cmd->transport_state |= CMD_T_SENT;
2017
2013 __target_execute_cmd(cmd, true); 2018 __target_execute_cmd(cmd, true);
2014 2019
2015 if (cmd->sam_task_attr == TCM_ORDERED_TAG) 2020 if (cmd->sam_task_attr == TCM_ORDERED_TAG)
@@ -2045,6 +2050,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
2045 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", 2050 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
2046 dev->dev_cur_ordered_id); 2051 dev->dev_cur_ordered_id);
2047 } 2052 }
2053 cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
2054
2048restart: 2055restart:
2049 target_restart_delayed_cmds(dev); 2056 target_restart_delayed_cmds(dev);
2050} 2057}
@@ -2090,7 +2097,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
2090 ret = cmd->se_tfo->queue_data_in(cmd); 2097 ret = cmd->se_tfo->queue_data_in(cmd);
2091 break; 2098 break;
2092 } 2099 }
2093 /* Fall through for DMA_TO_DEVICE */ 2100 /* fall through */
2094 case DMA_NONE: 2101 case DMA_NONE:
2095queue_status: 2102queue_status:
2096 trace_target_cmd_complete(cmd); 2103 trace_target_cmd_complete(cmd);
@@ -2268,7 +2275,7 @@ queue_rsp:
2268 goto queue_full; 2275 goto queue_full;
2269 break; 2276 break;
2270 } 2277 }
2271 /* Fall through for DMA_TO_DEVICE */ 2278 /* fall through */
2272 case DMA_NONE: 2279 case DMA_NONE:
2273queue_status: 2280queue_status:
2274 trace_target_cmd_complete(cmd); 2281 trace_target_cmd_complete(cmd);
@@ -2352,22 +2359,6 @@ static inline void transport_free_pages(struct se_cmd *cmd)
2352 cmd->t_bidi_data_nents = 0; 2359 cmd->t_bidi_data_nents = 0;
2353} 2360}
2354 2361
2355/**
2356 * transport_put_cmd - release a reference to a command
2357 * @cmd: command to release
2358 *
2359 * This routine releases our reference to the command and frees it if possible.
2360 */
2361static int transport_put_cmd(struct se_cmd *cmd)
2362{
2363 BUG_ON(!cmd->se_tfo);
2364 /*
2365 * If this cmd has been setup with target_get_sess_cmd(), drop
2366 * the kref and call ->release_cmd() in kref callback.
2367 */
2368 return target_put_sess_cmd(cmd);
2369}
2370
2371void *transport_kmap_data_sg(struct se_cmd *cmd) 2362void *transport_kmap_data_sg(struct se_cmd *cmd)
2372{ 2363{
2373 struct scatterlist *sg = cmd->t_data_sg; 2364 struct scatterlist *sg = cmd->t_data_sg;
@@ -2570,7 +2561,20 @@ EXPORT_SYMBOL(transport_generic_new_cmd);
2570 2561
2571static void transport_write_pending_qf(struct se_cmd *cmd) 2562static void transport_write_pending_qf(struct se_cmd *cmd)
2572{ 2563{
2564 unsigned long flags;
2573 int ret; 2565 int ret;
2566 bool stop;
2567
2568 spin_lock_irqsave(&cmd->t_state_lock, flags);
2569 stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
2570 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2571
2572 if (stop) {
2573 pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
2574 __func__, __LINE__, cmd->tag);
2575 complete_all(&cmd->t_transport_stop_comp);
2576 return;
2577 }
2574 2578
2575 ret = cmd->se_tfo->write_pending(cmd); 2579 ret = cmd->se_tfo->write_pending(cmd);
2576 if (ret) { 2580 if (ret) {
@@ -2603,7 +2607,7 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2603 target_wait_free_cmd(cmd, &aborted, &tas); 2607 target_wait_free_cmd(cmd, &aborted, &tas);
2604 2608
2605 if (!aborted || tas) 2609 if (!aborted || tas)
2606 ret = transport_put_cmd(cmd); 2610 ret = target_put_sess_cmd(cmd);
2607 } else { 2611 } else {
2608 if (wait_for_tasks) 2612 if (wait_for_tasks)
2609 target_wait_free_cmd(cmd, &aborted, &tas); 2613 target_wait_free_cmd(cmd, &aborted, &tas);
@@ -2619,7 +2623,7 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2619 transport_lun_remove_cmd(cmd); 2623 transport_lun_remove_cmd(cmd);
2620 2624
2621 if (!aborted || tas) 2625 if (!aborted || tas)
2622 ret = transport_put_cmd(cmd); 2626 ret = target_put_sess_cmd(cmd);
2623 } 2627 }
2624 /* 2628 /*
2625 * If the task has been internally aborted due to TMR ABORT_TASK 2629 * If the task has been internally aborted due to TMR ABORT_TASK
@@ -2664,6 +2668,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2664 ret = -ESHUTDOWN; 2668 ret = -ESHUTDOWN;
2665 goto out; 2669 goto out;
2666 } 2670 }
2671 se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
2667 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2672 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2668out: 2673out:
2669 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2674 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -3145,6 +3150,21 @@ static const struct sense_info sense_info_table[] = {
3145 .key = NOT_READY, 3150 .key = NOT_READY,
3146 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ 3151 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
3147 }, 3152 },
3153 [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
3154 /*
3155 * From spc4r22 section5.7.7,5.7.8
3156 * If a PERSISTENT RESERVE OUT command with a REGISTER service action
3157 * or a REGISTER AND IGNORE EXISTING KEY service action or
3158 * REGISTER AND MOVE service actionis attempted,
3159 * but there are insufficient device server resources to complete the
3160 * operation, then the command shall be terminated with CHECK CONDITION
3161 * status, with the sense key set to ILLEGAL REQUEST,and the additonal
3162 * sense code set to INSUFFICIENT REGISTRATION RESOURCES.
3163 */
3164 .key = ILLEGAL_REQUEST,
3165 .asc = 0x55,
3166 .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
3167 },
3148}; 3168};
3149 3169
3150static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) 3170static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9469695f5871..cc2468a299d3 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -150,6 +150,8 @@ struct tcmu_dev {
150 wait_queue_head_t nl_cmd_wq; 150 wait_queue_head_t nl_cmd_wq;
151 151
152 char dev_config[TCMU_CONFIG_LEN]; 152 char dev_config[TCMU_CONFIG_LEN];
153
154 int nl_reply_supported;
153}; 155};
154 156
155#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) 157#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
@@ -430,7 +432,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
430 struct se_device *se_dev = se_cmd->se_dev; 432 struct se_device *se_dev = se_cmd->se_dev;
431 struct tcmu_dev *udev = TCMU_DEV(se_dev); 433 struct tcmu_dev *udev = TCMU_DEV(se_dev);
432 struct tcmu_cmd *tcmu_cmd; 434 struct tcmu_cmd *tcmu_cmd;
433 int cmd_id;
434 435
435 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL); 436 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
436 if (!tcmu_cmd) 437 if (!tcmu_cmd)
@@ -438,9 +439,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
438 439
439 tcmu_cmd->se_cmd = se_cmd; 440 tcmu_cmd->se_cmd = se_cmd;
440 tcmu_cmd->tcmu_dev = udev; 441 tcmu_cmd->tcmu_dev = udev;
441 if (udev->cmd_time_out)
442 tcmu_cmd->deadline = jiffies +
443 msecs_to_jiffies(udev->cmd_time_out);
444 442
445 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 443 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
446 tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd); 444 tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
@@ -451,19 +449,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
451 return NULL; 449 return NULL;
452 } 450 }
453 451
454 idr_preload(GFP_KERNEL);
455 spin_lock_irq(&udev->commands_lock);
456 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
457 USHRT_MAX, GFP_NOWAIT);
458 spin_unlock_irq(&udev->commands_lock);
459 idr_preload_end();
460
461 if (cmd_id < 0) {
462 tcmu_free_cmd(tcmu_cmd);
463 return NULL;
464 }
465 tcmu_cmd->cmd_id = cmd_id;
466
467 return tcmu_cmd; 452 return tcmu_cmd;
468} 453}
469 454
@@ -746,6 +731,30 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
746 return command_size; 731 return command_size;
747} 732}
748 733
734static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
735{
736 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
737 unsigned long tmo = udev->cmd_time_out;
738 int cmd_id;
739
740 if (tcmu_cmd->cmd_id)
741 return 0;
742
743 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
744 if (cmd_id < 0) {
745 pr_err("tcmu: Could not allocate cmd id.\n");
746 return cmd_id;
747 }
748 tcmu_cmd->cmd_id = cmd_id;
749
750 if (!tmo)
751 return 0;
752
753 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
754 mod_timer(&udev->timeout, tcmu_cmd->deadline);
755 return 0;
756}
757
749static sense_reason_t 758static sense_reason_t
750tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) 759tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
751{ 760{
@@ -839,7 +848,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
839 entry = (void *) mb + CMDR_OFF + cmd_head; 848 entry = (void *) mb + CMDR_OFF + cmd_head;
840 memset(entry, 0, command_size); 849 memset(entry, 0, command_size);
841 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); 850 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
842 entry->hdr.cmd_id = tcmu_cmd->cmd_id;
843 851
844 /* Handle allocating space from the data area */ 852 /* Handle allocating space from the data area */
845 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 853 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
@@ -877,6 +885,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
877 } 885 }
878 entry->req.iov_bidi_cnt = iov_cnt; 886 entry->req.iov_bidi_cnt = iov_cnt;
879 887
888 ret = tcmu_setup_cmd_timer(tcmu_cmd);
889 if (ret) {
890 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
891 mutex_unlock(&udev->cmdr_lock);
892 return TCM_OUT_OF_RESOURCES;
893 }
894 entry->hdr.cmd_id = tcmu_cmd->cmd_id;
895
880 /* 896 /*
881 * Recalaulate the command's base size and size according 897 * Recalaulate the command's base size and size according
882 * to the actual needs 898 * to the actual needs
@@ -910,8 +926,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
910static sense_reason_t 926static sense_reason_t
911tcmu_queue_cmd(struct se_cmd *se_cmd) 927tcmu_queue_cmd(struct se_cmd *se_cmd)
912{ 928{
913 struct se_device *se_dev = se_cmd->se_dev;
914 struct tcmu_dev *udev = TCMU_DEV(se_dev);
915 struct tcmu_cmd *tcmu_cmd; 929 struct tcmu_cmd *tcmu_cmd;
916 sense_reason_t ret; 930 sense_reason_t ret;
917 931
@@ -922,9 +936,6 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
922 ret = tcmu_queue_cmd_ring(tcmu_cmd); 936 ret = tcmu_queue_cmd_ring(tcmu_cmd);
923 if (ret != TCM_NO_SENSE) { 937 if (ret != TCM_NO_SENSE) {
924 pr_err("TCMU: Could not queue command\n"); 938 pr_err("TCMU: Could not queue command\n");
925 spin_lock_irq(&udev->commands_lock);
926 idr_remove(&udev->commands, tcmu_cmd->cmd_id);
927 spin_unlock_irq(&udev->commands_lock);
928 939
929 tcmu_free_cmd(tcmu_cmd); 940 tcmu_free_cmd(tcmu_cmd);
930 } 941 }
@@ -1112,6 +1123,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1112 init_waitqueue_head(&udev->nl_cmd_wq); 1123 init_waitqueue_head(&udev->nl_cmd_wq);
1113 spin_lock_init(&udev->nl_cmd_lock); 1124 spin_lock_init(&udev->nl_cmd_lock);
1114 1125
1126 INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
1127
1115 return &udev->se_dev; 1128 return &udev->se_dev;
1116} 1129}
1117 1130
@@ -1280,10 +1293,54 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
1280 kfree(udev); 1293 kfree(udev);
1281} 1294}
1282 1295
1296static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
1297{
1298 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1299 kmem_cache_free(tcmu_cmd_cache, cmd);
1300 return 0;
1301 }
1302 return -EINVAL;
1303}
1304
1305static void tcmu_blocks_release(struct tcmu_dev *udev)
1306{
1307 int i;
1308 struct page *page;
1309
1310 /* Try to release all block pages */
1311 mutex_lock(&udev->cmdr_lock);
1312 for (i = 0; i <= udev->dbi_max; i++) {
1313 page = radix_tree_delete(&udev->data_blocks, i);
1314 if (page) {
1315 __free_page(page);
1316 atomic_dec(&global_db_count);
1317 }
1318 }
1319 mutex_unlock(&udev->cmdr_lock);
1320}
1321
1283static void tcmu_dev_kref_release(struct kref *kref) 1322static void tcmu_dev_kref_release(struct kref *kref)
1284{ 1323{
1285 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); 1324 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
1286 struct se_device *dev = &udev->se_dev; 1325 struct se_device *dev = &udev->se_dev;
1326 struct tcmu_cmd *cmd;
1327 bool all_expired = true;
1328 int i;
1329
1330 vfree(udev->mb_addr);
1331 udev->mb_addr = NULL;
1332
1333 /* Upper layer should drain all requests before calling this */
1334 spin_lock_irq(&udev->commands_lock);
1335 idr_for_each_entry(&udev->commands, cmd, i) {
1336 if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1337 all_expired = false;
1338 }
1339 idr_destroy(&udev->commands);
1340 spin_unlock_irq(&udev->commands_lock);
1341 WARN_ON(!all_expired);
1342
1343 tcmu_blocks_release(udev);
1287 1344
1288 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1345 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
1289} 1346}
@@ -1306,6 +1363,10 @@ static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
1306 1363
1307 if (!tcmu_kern_cmd_reply_supported) 1364 if (!tcmu_kern_cmd_reply_supported)
1308 return; 1365 return;
1366
1367 if (udev->nl_reply_supported <= 0)
1368 return;
1369
1309relock: 1370relock:
1310 spin_lock(&udev->nl_cmd_lock); 1371 spin_lock(&udev->nl_cmd_lock);
1311 1372
@@ -1332,6 +1393,9 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
1332 if (!tcmu_kern_cmd_reply_supported) 1393 if (!tcmu_kern_cmd_reply_supported)
1333 return 0; 1394 return 0;
1334 1395
1396 if (udev->nl_reply_supported <= 0)
1397 return 0;
1398
1335 pr_debug("sleeping for nl reply\n"); 1399 pr_debug("sleeping for nl reply\n");
1336 wait_for_completion(&nl_cmd->complete); 1400 wait_for_completion(&nl_cmd->complete);
1337 1401
@@ -1476,8 +1540,6 @@ static int tcmu_configure_device(struct se_device *dev)
1476 WARN_ON(udev->data_size % PAGE_SIZE); 1540 WARN_ON(udev->data_size % PAGE_SIZE);
1477 WARN_ON(udev->data_size % DATA_BLOCK_SIZE); 1541 WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
1478 1542
1479 INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
1480
1481 info->version = __stringify(TCMU_MAILBOX_VERSION); 1543 info->version = __stringify(TCMU_MAILBOX_VERSION);
1482 1544
1483 info->mem[0].name = "tcm-user command & data buffer"; 1545 info->mem[0].name = "tcm-user command & data buffer";
@@ -1506,6 +1568,12 @@ static int tcmu_configure_device(struct se_device *dev)
1506 dev->dev_attrib.emulate_write_cache = 0; 1568 dev->dev_attrib.emulate_write_cache = 0;
1507 dev->dev_attrib.hw_queue_depth = 128; 1569 dev->dev_attrib.hw_queue_depth = 128;
1508 1570
1571 /* If user didn't explicitly disable netlink reply support, use
1572 * module scope setting.
1573 */
1574 if (udev->nl_reply_supported >= 0)
1575 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
1576
1509 /* 1577 /*
1510 * Get a ref incase userspace does a close on the uio device before 1578 * Get a ref incase userspace does a close on the uio device before
1511 * LIO has initiated tcmu_free_device. 1579 * LIO has initiated tcmu_free_device.
@@ -1527,6 +1595,7 @@ err_netlink:
1527 uio_unregister_device(&udev->uio_info); 1595 uio_unregister_device(&udev->uio_info);
1528err_register: 1596err_register:
1529 vfree(udev->mb_addr); 1597 vfree(udev->mb_addr);
1598 udev->mb_addr = NULL;
1530err_vzalloc: 1599err_vzalloc:
1531 kfree(info->name); 1600 kfree(info->name);
1532 info->name = NULL; 1601 info->name = NULL;
@@ -1534,37 +1603,11 @@ err_vzalloc:
1534 return ret; 1603 return ret;
1535} 1604}
1536 1605
1537static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
1538{
1539 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1540 kmem_cache_free(tcmu_cmd_cache, cmd);
1541 return 0;
1542 }
1543 return -EINVAL;
1544}
1545
1546static bool tcmu_dev_configured(struct tcmu_dev *udev) 1606static bool tcmu_dev_configured(struct tcmu_dev *udev)
1547{ 1607{
1548 return udev->uio_info.uio_dev ? true : false; 1608 return udev->uio_info.uio_dev ? true : false;
1549} 1609}
1550 1610
1551static void tcmu_blocks_release(struct tcmu_dev *udev)
1552{
1553 int i;
1554 struct page *page;
1555
1556 /* Try to release all block pages */
1557 mutex_lock(&udev->cmdr_lock);
1558 for (i = 0; i <= udev->dbi_max; i++) {
1559 page = radix_tree_delete(&udev->data_blocks, i);
1560 if (page) {
1561 __free_page(page);
1562 atomic_dec(&global_db_count);
1563 }
1564 }
1565 mutex_unlock(&udev->cmdr_lock);
1566}
1567
1568static void tcmu_free_device(struct se_device *dev) 1611static void tcmu_free_device(struct se_device *dev)
1569{ 1612{
1570 struct tcmu_dev *udev = TCMU_DEV(dev); 1613 struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1576,9 +1619,6 @@ static void tcmu_free_device(struct se_device *dev)
1576static void tcmu_destroy_device(struct se_device *dev) 1619static void tcmu_destroy_device(struct se_device *dev)
1577{ 1620{
1578 struct tcmu_dev *udev = TCMU_DEV(dev); 1621 struct tcmu_dev *udev = TCMU_DEV(dev);
1579 struct tcmu_cmd *cmd;
1580 bool all_expired = true;
1581 int i;
1582 1622
1583 del_timer_sync(&udev->timeout); 1623 del_timer_sync(&udev->timeout);
1584 1624
@@ -1586,20 +1626,6 @@ static void tcmu_destroy_device(struct se_device *dev)
1586 list_del(&udev->node); 1626 list_del(&udev->node);
1587 mutex_unlock(&root_udev_mutex); 1627 mutex_unlock(&root_udev_mutex);
1588 1628
1589 vfree(udev->mb_addr);
1590
1591 /* Upper layer should drain all requests before calling this */
1592 spin_lock_irq(&udev->commands_lock);
1593 idr_for_each_entry(&udev->commands, cmd, i) {
1594 if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1595 all_expired = false;
1596 }
1597 idr_destroy(&udev->commands);
1598 spin_unlock_irq(&udev->commands_lock);
1599 WARN_ON(!all_expired);
1600
1601 tcmu_blocks_release(udev);
1602
1603 tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL); 1629 tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
1604 1630
1605 uio_unregister_device(&udev->uio_info); 1631 uio_unregister_device(&udev->uio_info);
@@ -1610,7 +1636,7 @@ static void tcmu_destroy_device(struct se_device *dev)
1610 1636
1611enum { 1637enum {
1612 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, 1638 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
1613 Opt_err, 1639 Opt_nl_reply_supported, Opt_err,
1614}; 1640};
1615 1641
1616static match_table_t tokens = { 1642static match_table_t tokens = {
@@ -1618,6 +1644,7 @@ static match_table_t tokens = {
1618 {Opt_dev_size, "dev_size=%u"}, 1644 {Opt_dev_size, "dev_size=%u"},
1619 {Opt_hw_block_size, "hw_block_size=%u"}, 1645 {Opt_hw_block_size, "hw_block_size=%u"},
1620 {Opt_hw_max_sectors, "hw_max_sectors=%u"}, 1646 {Opt_hw_max_sectors, "hw_max_sectors=%u"},
1647 {Opt_nl_reply_supported, "nl_reply_supported=%d"},
1621 {Opt_err, NULL} 1648 {Opt_err, NULL}
1622}; 1649};
1623 1650
@@ -1692,6 +1719,17 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
1692 ret = tcmu_set_dev_attrib(&args[0], 1719 ret = tcmu_set_dev_attrib(&args[0],
1693 &(dev->dev_attrib.hw_max_sectors)); 1720 &(dev->dev_attrib.hw_max_sectors));
1694 break; 1721 break;
1722 case Opt_nl_reply_supported:
1723 arg_p = match_strdup(&args[0]);
1724 if (!arg_p) {
1725 ret = -ENOMEM;
1726 break;
1727 }
1728 ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
1729 kfree(arg_p);
1730 if (ret < 0)
1731 pr_err("kstrtoint() failed for nl_reply_supported=\n");
1732 break;
1695 default: 1733 default:
1696 break; 1734 break;
1697 } 1735 }
@@ -1734,8 +1772,7 @@ static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
1734{ 1772{
1735 struct se_dev_attrib *da = container_of(to_config_group(item), 1773 struct se_dev_attrib *da = container_of(to_config_group(item),
1736 struct se_dev_attrib, da_group); 1774 struct se_dev_attrib, da_group);
1737 struct tcmu_dev *udev = container_of(da->da_dev, 1775 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1738 struct tcmu_dev, se_dev);
1739 1776
1740 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); 1777 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
1741} 1778}
@@ -1842,6 +1879,34 @@ static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
1842} 1879}
1843CONFIGFS_ATTR(tcmu_, dev_size); 1880CONFIGFS_ATTR(tcmu_, dev_size);
1844 1881
1882static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
1883 char *page)
1884{
1885 struct se_dev_attrib *da = container_of(to_config_group(item),
1886 struct se_dev_attrib, da_group);
1887 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1888
1889 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
1890}
1891
1892static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
1893 const char *page, size_t count)
1894{
1895 struct se_dev_attrib *da = container_of(to_config_group(item),
1896 struct se_dev_attrib, da_group);
1897 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1898 s8 val;
1899 int ret;
1900
1901 ret = kstrtos8(page, 0, &val);
1902 if (ret < 0)
1903 return ret;
1904
1905 udev->nl_reply_supported = val;
1906 return count;
1907}
1908CONFIGFS_ATTR(tcmu_, nl_reply_supported);
1909
1845static ssize_t tcmu_emulate_write_cache_show(struct config_item *item, 1910static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
1846 char *page) 1911 char *page)
1847{ 1912{
@@ -1884,6 +1949,7 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = {
1884 &tcmu_attr_dev_config, 1949 &tcmu_attr_dev_config,
1885 &tcmu_attr_dev_size, 1950 &tcmu_attr_dev_size,
1886 &tcmu_attr_emulate_write_cache, 1951 &tcmu_attr_emulate_write_cache,
1952 &tcmu_attr_nl_reply_supported,
1887 NULL, 1953 NULL,
1888}; 1954};
1889 1955