aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMike Christie <michaelc@cs.wisc.edu>2007-05-30 13:57:18 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2007-06-02 15:34:14 -0400
commit77a23c21aaa723f6b0ffc4a701be8c8e5a32346d (patch)
tree5b51b8299a8deede4c91dffde032899ab76e331a /drivers
parent218432c68085d6c2b04df57daaf105d2ffa2aa61 (diff)
[SCSI] libiscsi: fix iscsi cmdsn allocation
The cmdsn allocation and pdu transmit code can race, and we can end up sending a pdu with cmdsn 10 before a pdu with 5. The target will then fail the connection/session. This patch fixes the problem by delaying the cmdsn allocation until we are about to send the pdu. This also removes the xmitmutex. We were using the connection xmitmutex during error handling to handle races with mtask and ctask cleanup and completion. For ctasks we now have nice refcounting and for the mtask, if we hit the case where the mtask timesout and it is floating around somewhere in the driver, we end up dropping the session. And to handle session level cleanup, we use the xmit suspend bit along with scsi_flush_queue and the session lock to make sure that the xmit thread is not possibly transmitting a task while we are trying to kill it. Signed-off-by: Mike Christie <michaelc@cs.wisc.edu> Cc: Roland Dreier <rdreier@cisco.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c18
-rw-r--r--drivers/scsi/iscsi_tcp.c33
-rw-r--r--drivers/scsi/libiscsi.c491
3 files changed, 247 insertions, 295 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 6c8cd09c58f0..9782190a9ee5 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -134,19 +134,9 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
134{ 134{
135 struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data; 135 struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
136 struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; 136 struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
137 struct scsi_cmnd *sc = ctask->sc;
138 137
139 iser_ctask->command_sent = 0; 138 iser_ctask->command_sent = 0;
140 iser_ctask->iser_conn = iser_conn; 139 iser_ctask->iser_conn = iser_conn;
141
142 if (sc->sc_data_direction == DMA_TO_DEVICE) {
143 BUG_ON(sc->request_bufflen == 0);
144
145 debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
146 ctask->itt, sc->request_bufflen, ctask->imm_count,
147 ctask->unsol_count);
148 }
149
150 iser_ctask_rdma_init(iser_ctask); 140 iser_ctask_rdma_init(iser_ctask);
151} 141}
152 142
@@ -219,6 +209,14 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
219 struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; 209 struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
220 int error = 0; 210 int error = 0;
221 211
212 if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
213 BUG_ON(ctask->sc->request_bufflen == 0);
214
215 debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
216 ctask->itt, ctask->sc->request_bufflen,
217 ctask->imm_count, ctask->unsol_count);
218 }
219
222 debug_scsi("ctask deq [cid %d itt 0x%x]\n", 220 debug_scsi("ctask deq [cid %d itt 0x%x]\n",
223 conn->id, ctask->itt); 221 conn->id, ctask->itt);
224 222
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 0afdca2224c2..8edcfddc0baf 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -34,7 +34,6 @@
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/kfifo.h> 35#include <linux/kfifo.h>
36#include <linux/scatterlist.h> 36#include <linux/scatterlist.h>
37#include <linux/mutex.h>
38#include <net/tcp.h> 37#include <net/tcp.h>
39#include <scsi/scsi_cmnd.h> 38#include <scsi/scsi_cmnd.h>
40#include <scsi/scsi_host.h> 39#include <scsi/scsi_host.h>
@@ -211,7 +210,6 @@ iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
211static int 210static int
212iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 211iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
213{ 212{
214 int rc;
215 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 213 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
216 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 214 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
217 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr; 215 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
@@ -219,9 +217,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
219 struct scsi_cmnd *sc = ctask->sc; 217 struct scsi_cmnd *sc = ctask->sc;
220 int datasn = be32_to_cpu(rhdr->datasn); 218 int datasn = be32_to_cpu(rhdr->datasn);
221 219
222 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr); 220 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
223 if (rc)
224 return rc;
225 /* 221 /*
226 * setup Data-In byte counter (gets decremented..) 222 * setup Data-In byte counter (gets decremented..)
227 */ 223 */
@@ -377,12 +373,10 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
377 return ISCSI_ERR_R2TSN; 373 return ISCSI_ERR_R2TSN;
378 } 374 }
379 375
380 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
381 if (rc)
382 return rc;
383
384 /* fill-in new R2T associated with the task */ 376 /* fill-in new R2T associated with the task */
385 spin_lock(&session->lock); 377 spin_lock(&session->lock);
378 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
379
386 if (!ctask->sc || ctask->mtask || 380 if (!ctask->sc || ctask->mtask ||
387 session->state != ISCSI_STATE_LOGGED_IN) { 381 session->state != ISCSI_STATE_LOGGED_IN) {
388 printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in " 382 printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
@@ -1762,12 +1756,6 @@ iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1762 debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n", 1756 debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
1763 conn->id, tcp_ctask->xmstate, ctask->itt); 1757 conn->id, tcp_ctask->xmstate, ctask->itt);
1764 1758
1765 /*
1766 * serialize with TMF AbortTask
1767 */
1768 if (ctask->mtask)
1769 return rc;
1770
1771 rc = iscsi_send_cmd_hdr(conn, ctask); 1759 rc = iscsi_send_cmd_hdr(conn, ctask);
1772 if (rc) 1760 if (rc)
1773 return rc; 1761 return rc;
@@ -1949,8 +1937,7 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1949 1937
1950/* called with host lock */ 1938/* called with host lock */
1951static void 1939static void
1952iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, 1940iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1953 char *data, uint32_t data_size)
1954{ 1941{
1955 struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; 1942 struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
1956 tcp_mtask->xmstate = XMSTATE_IMM_HDR_INIT; 1943 tcp_mtask->xmstate = XMSTATE_IMM_HDR_INIT;
@@ -2073,22 +2060,15 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
2073 2060
2074 switch(param) { 2061 switch(param) {
2075 case ISCSI_PARAM_CONN_PORT: 2062 case ISCSI_PARAM_CONN_PORT:
2076 mutex_lock(&conn->xmitmutex); 2063 if (!tcp_conn->sock)
2077 if (!tcp_conn->sock) {
2078 mutex_unlock(&conn->xmitmutex);
2079 return -EINVAL; 2064 return -EINVAL;
2080 }
2081 2065
2082 inet = inet_sk(tcp_conn->sock->sk); 2066 inet = inet_sk(tcp_conn->sock->sk);
2083 len = sprintf(buf, "%hu\n", be16_to_cpu(inet->dport)); 2067 len = sprintf(buf, "%hu\n", be16_to_cpu(inet->dport));
2084 mutex_unlock(&conn->xmitmutex);
2085 break; 2068 break;
2086 case ISCSI_PARAM_CONN_ADDRESS: 2069 case ISCSI_PARAM_CONN_ADDRESS:
2087 mutex_lock(&conn->xmitmutex); 2070 if (!tcp_conn->sock)
2088 if (!tcp_conn->sock) {
2089 mutex_unlock(&conn->xmitmutex);
2090 return -EINVAL; 2071 return -EINVAL;
2091 }
2092 2072
2093 sk = tcp_conn->sock->sk; 2073 sk = tcp_conn->sock->sk;
2094 if (sk->sk_family == PF_INET) { 2074 if (sk->sk_family == PF_INET) {
@@ -2099,7 +2079,6 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
2099 np = inet6_sk(sk); 2079 np = inet6_sk(sk);
2100 len = sprintf(buf, NIP6_FMT "\n", NIP6(np->daddr)); 2080 len = sprintf(buf, NIP6_FMT "\n", NIP6(np->daddr));
2101 } 2081 }
2102 mutex_unlock(&conn->xmitmutex);
2103 break; 2082 break;
2104 default: 2083 default:
2105 return iscsi_conn_get_param(cls_conn, param, buf); 2084 return iscsi_conn_get_param(cls_conn, param, buf);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 63f0a15d9887..938f527cd81a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -22,7 +22,6 @@
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 */ 23 */
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/mutex.h>
26#include <linux/kfifo.h> 25#include <linux/kfifo.h>
27#include <linux/delay.h> 26#include <linux/delay.h>
28#include <asm/unaligned.h> 27#include <asm/unaligned.h>
@@ -46,27 +45,53 @@ class_to_transport_session(struct iscsi_cls_session *cls_session)
46} 45}
47EXPORT_SYMBOL_GPL(class_to_transport_session); 46EXPORT_SYMBOL_GPL(class_to_transport_session);
48 47
49#define INVALID_SN_DELTA 0xffff 48/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
49#define SNA32_CHECK 2147483648UL
50 50
51int 51static int iscsi_sna_lt(u32 n1, u32 n2)
52iscsi_check_assign_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) 52{
53 return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
54 (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
55}
56
57/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
58static int iscsi_sna_lte(u32 n1, u32 n2)
59{
60 return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
61 (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
62}
63
64void
65iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
53{ 66{
54 uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn); 67 uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
55 uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn); 68 uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
56 69
57 if (max_cmdsn < exp_cmdsn -1 && 70 /*
58 max_cmdsn > exp_cmdsn - INVALID_SN_DELTA) 71 * standard specifies this check for when to update expected and
59 return ISCSI_ERR_MAX_CMDSN; 72 * max sequence numbers
60 if (max_cmdsn > session->max_cmdsn || 73 */
61 max_cmdsn < session->max_cmdsn - INVALID_SN_DELTA) 74 if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1))
62 session->max_cmdsn = max_cmdsn; 75 return;
63 if (exp_cmdsn > session->exp_cmdsn || 76
64 exp_cmdsn < session->exp_cmdsn - INVALID_SN_DELTA) 77 if (exp_cmdsn != session->exp_cmdsn &&
78 !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn))
65 session->exp_cmdsn = exp_cmdsn; 79 session->exp_cmdsn = exp_cmdsn;
66 80
67 return 0; 81 if (max_cmdsn != session->max_cmdsn &&
82 !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) {
83 session->max_cmdsn = max_cmdsn;
84 /*
85 * if the window closed with IO queued, then kick the
86 * xmit thread
87 */
88 if (!list_empty(&session->leadconn->xmitqueue) ||
89 __kfifo_len(session->leadconn->mgmtqueue))
90 scsi_queue_work(session->host,
91 &session->leadconn->xmitwork);
92 }
68} 93}
69EXPORT_SYMBOL_GPL(iscsi_check_assign_cmdsn); 94EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
70 95
71void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask, 96void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
72 struct iscsi_data *hdr) 97 struct iscsi_data *hdr)
@@ -175,8 +200,13 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
175 } 200 }
176 201
177 conn->scsicmd_pdus_cnt++; 202 conn->scsicmd_pdus_cnt++;
203
204 debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
205 "cmdsn %d win %d]\n",
206 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
207 conn->id, sc, sc->cmnd[0], ctask->itt, sc->request_bufflen,
208 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
178} 209}
179EXPORT_SYMBOL_GPL(iscsi_prep_scsi_cmd_pdu);
180 210
181/** 211/**
182 * iscsi_complete_command - return command back to scsi-ml 212 * iscsi_complete_command - return command back to scsi-ml
@@ -205,26 +235,12 @@ static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
205 atomic_inc(&ctask->refcount); 235 atomic_inc(&ctask->refcount);
206} 236}
207 237
208static void iscsi_get_ctask(struct iscsi_cmd_task *ctask)
209{
210 spin_lock_bh(&ctask->conn->session->lock);
211 __iscsi_get_ctask(ctask);
212 spin_unlock_bh(&ctask->conn->session->lock);
213}
214
215static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask) 238static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
216{ 239{
217 if (atomic_dec_and_test(&ctask->refcount)) 240 if (atomic_dec_and_test(&ctask->refcount))
218 iscsi_complete_command(ctask); 241 iscsi_complete_command(ctask);
219} 242}
220 243
221static void iscsi_put_ctask(struct iscsi_cmd_task *ctask)
222{
223 spin_lock_bh(&ctask->conn->session->lock);
224 __iscsi_put_ctask(ctask);
225 spin_unlock_bh(&ctask->conn->session->lock);
226}
227
228/** 244/**
229 * iscsi_cmd_rsp - SCSI Command Response processing 245 * iscsi_cmd_rsp - SCSI Command Response processing
230 * @conn: iscsi connection 246 * @conn: iscsi connection
@@ -236,21 +252,15 @@ static void iscsi_put_ctask(struct iscsi_cmd_task *ctask)
236 * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and 252 * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
237 * then completes the command and task. 253 * then completes the command and task.
238 **/ 254 **/
239static int iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 255static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
240 struct iscsi_cmd_task *ctask, char *data, 256 struct iscsi_cmd_task *ctask, char *data,
241 int datalen) 257 int datalen)
242{ 258{
243 int rc;
244 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr; 259 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
245 struct iscsi_session *session = conn->session; 260 struct iscsi_session *session = conn->session;
246 struct scsi_cmnd *sc = ctask->sc; 261 struct scsi_cmnd *sc = ctask->sc;
247 262
248 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr); 263 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
249 if (rc) {
250 sc->result = DID_ERROR << 16;
251 goto out;
252 }
253
254 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 264 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
255 265
256 sc->result = (DID_OK << 16) | rhdr->cmd_status; 266 sc->result = (DID_OK << 16) | rhdr->cmd_status;
@@ -302,7 +312,6 @@ out:
302 conn->scsirsp_pdus_cnt++; 312 conn->scsirsp_pdus_cnt++;
303 313
304 __iscsi_put_ctask(ctask); 314 __iscsi_put_ctask(ctask);
305 return rc;
306} 315}
307 316
308static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) 317static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
@@ -382,8 +391,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
382 switch(opcode) { 391 switch(opcode) {
383 case ISCSI_OP_SCSI_CMD_RSP: 392 case ISCSI_OP_SCSI_CMD_RSP:
384 BUG_ON((void*)ctask != ctask->sc->SCp.ptr); 393 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
385 rc = iscsi_scsi_cmd_rsp(conn, hdr, ctask, data, 394 iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
386 datalen); 395 datalen);
387 break; 396 break;
388 case ISCSI_OP_SCSI_DATA_IN: 397 case ISCSI_OP_SCSI_DATA_IN:
389 BUG_ON((void*)ctask != ctask->sc->SCp.ptr); 398 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
@@ -406,11 +415,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
406 debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n", 415 debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
407 opcode, conn->id, mtask->itt, datalen); 416 opcode, conn->id, mtask->itt, datalen);
408 417
409 rc = iscsi_check_assign_cmdsn(session, 418 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
410 (struct iscsi_nopin*)hdr);
411 if (rc)
412 goto done;
413
414 switch(opcode) { 419 switch(opcode) {
415 case ISCSI_OP_LOGOUT_RSP: 420 case ISCSI_OP_LOGOUT_RSP:
416 if (datalen) { 421 if (datalen) {
@@ -459,10 +464,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
459 break; 464 break;
460 } 465 }
461 } else if (itt == ~0U) { 466 } else if (itt == ~0U) {
462 rc = iscsi_check_assign_cmdsn(session, 467 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
463 (struct iscsi_nopin*)hdr);
464 if (rc)
465 goto done;
466 468
467 switch(opcode) { 469 switch(opcode) {
468 case ISCSI_OP_NOOP_IN: 470 case ISCSI_OP_NOOP_IN:
@@ -492,7 +494,6 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
492 } else 494 } else
493 rc = ISCSI_ERR_BAD_ITT; 495 rc = ISCSI_ERR_BAD_ITT;
494 496
495done:
496 return rc; 497 return rc;
497} 498}
498EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); 499EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
@@ -579,17 +580,47 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
579} 580}
580EXPORT_SYMBOL_GPL(iscsi_conn_failure); 581EXPORT_SYMBOL_GPL(iscsi_conn_failure);
581 582
583static void iscsi_prep_mtask(struct iscsi_conn *conn,
584 struct iscsi_mgmt_task *mtask)
585{
586 struct iscsi_session *session = conn->session;
587 struct iscsi_hdr *hdr = mtask->hdr;
588 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
589
590 if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
591 hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
592 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
593 /*
594 * pre-format CmdSN for outgoing PDU.
595 */
596 nop->cmdsn = cpu_to_be32(session->cmdsn);
597 if (hdr->itt != RESERVED_ITT) {
598 hdr->itt = build_itt(mtask->itt, conn->id, session->age);
599 if (conn->c_stage == ISCSI_CONN_STARTED &&
600 !(hdr->opcode & ISCSI_OP_IMMEDIATE))
601 session->cmdsn++;
602 }
603
604 if (session->tt->init_mgmt_task)
605 session->tt->init_mgmt_task(conn, mtask);
606
607 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
608 hdr->opcode, hdr->itt, mtask->data_count);
609}
610
582static int iscsi_xmit_mtask(struct iscsi_conn *conn) 611static int iscsi_xmit_mtask(struct iscsi_conn *conn)
583{ 612{
584 struct iscsi_hdr *hdr = conn->mtask->hdr; 613 struct iscsi_hdr *hdr = conn->mtask->hdr;
585 int rc, was_logout = 0; 614 int rc, was_logout = 0;
586 615
616 spin_unlock_bh(&conn->session->lock);
587 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) { 617 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) {
588 conn->session->state = ISCSI_STATE_IN_RECOVERY; 618 conn->session->state = ISCSI_STATE_IN_RECOVERY;
589 iscsi_block_session(session_to_cls(conn->session)); 619 iscsi_block_session(session_to_cls(conn->session));
590 was_logout = 1; 620 was_logout = 1;
591 } 621 }
592 rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask); 622 rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
623 spin_lock_bh(&conn->session->lock);
593 if (rc) 624 if (rc)
594 return rc; 625 return rc;
595 626
@@ -603,6 +634,45 @@ static int iscsi_xmit_mtask(struct iscsi_conn *conn)
603 return 0; 634 return 0;
604} 635}
605 636
637static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
638{
639 struct iscsi_session *session = conn->session;
640
641 /*
642 * Check for iSCSI window and take care of CmdSN wrap-around
643 */
644 if (!iscsi_sna_lte(session->cmdsn, session->max_cmdsn)) {
645 debug_scsi("iSCSI CmdSN closed. MaxCmdSN %u CmdSN %u\n",
646 session->max_cmdsn, session->cmdsn);
647 return -ENOSPC;
648 }
649 return 0;
650}
651
652static int iscsi_xmit_ctask(struct iscsi_conn *conn)
653{
654 struct iscsi_cmd_task *ctask = conn->ctask;
655 int rc = 0;
656
657 /*
658 * serialize with TMF AbortTask
659 */
660 if (ctask->state == ISCSI_TASK_ABORTING)
661 goto done;
662
663 __iscsi_get_ctask(ctask);
664 spin_unlock_bh(&conn->session->lock);
665 rc = conn->session->tt->xmit_cmd_task(conn, ctask);
666 spin_lock_bh(&conn->session->lock);
667 __iscsi_put_ctask(ctask);
668
669done:
670 if (!rc)
671 /* done with this ctask */
672 conn->ctask = NULL;
673 return rc;
674}
675
606/** 676/**
607 * iscsi_data_xmit - xmit any command into the scheduled connection 677 * iscsi_data_xmit - xmit any command into the scheduled connection
608 * @conn: iscsi connection 678 * @conn: iscsi connection
@@ -614,106 +684,79 @@ static int iscsi_xmit_mtask(struct iscsi_conn *conn)
614 **/ 684 **/
615static int iscsi_data_xmit(struct iscsi_conn *conn) 685static int iscsi_data_xmit(struct iscsi_conn *conn)
616{ 686{
617 struct iscsi_transport *tt;
618 int rc = 0; 687 int rc = 0;
619 688
689 spin_lock_bh(&conn->session->lock);
620 if (unlikely(conn->suspend_tx)) { 690 if (unlikely(conn->suspend_tx)) {
621 debug_scsi("conn %d Tx suspended!\n", conn->id); 691 debug_scsi("conn %d Tx suspended!\n", conn->id);
692 spin_unlock_bh(&conn->session->lock);
622 return -ENODATA; 693 return -ENODATA;
623 } 694 }
624 tt = conn->session->tt;
625
626 /*
627 * Transmit in the following order:
628 *
629 * 1) un-finished xmit (ctask or mtask)
630 * 2) immediate control PDUs
631 * 3) write data
632 * 4) SCSI commands
633 * 5) non-immediate control PDUs
634 *
635 * No need to lock around __kfifo_get as long as
636 * there's one producer and one consumer.
637 */
638
639 BUG_ON(conn->ctask && conn->mtask);
640 695
641 if (conn->ctask) { 696 if (conn->ctask) {
642 iscsi_get_ctask(conn->ctask); 697 rc = iscsi_xmit_ctask(conn);
643 rc = tt->xmit_cmd_task(conn, conn->ctask);
644 iscsi_put_ctask(conn->ctask);
645 if (rc) 698 if (rc)
646 goto again; 699 goto again;
647 /* done with this in-progress ctask */
648 conn->ctask = NULL;
649 } 700 }
701
650 if (conn->mtask) { 702 if (conn->mtask) {
651 rc = iscsi_xmit_mtask(conn); 703 rc = iscsi_xmit_mtask(conn);
652 if (rc) 704 if (rc)
653 goto again; 705 goto again;
654 } 706 }
655 707
656 /* process immediate first */ 708 /*
657 if (unlikely(__kfifo_len(conn->immqueue))) { 709 * process mgmt pdus like nops before commands since we should
658 while (__kfifo_get(conn->immqueue, (void*)&conn->mtask, 710 * only have one nop-out as a ping from us and targets should not
659 sizeof(void*))) { 711 * overflow us with nop-ins
660 spin_lock_bh(&conn->session->lock); 712 */
661 list_add_tail(&conn->mtask->running, 713check_mgmt:
662 &conn->mgmt_run_list); 714 while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask,
663 spin_unlock_bh(&conn->session->lock); 715 sizeof(void*))) {
664 rc = iscsi_xmit_mtask(conn); 716 iscsi_prep_mtask(conn, conn->mtask);
665 if (rc) 717 list_add_tail(&conn->mtask->running, &conn->mgmt_run_list);
666 goto again; 718 rc = iscsi_xmit_mtask(conn);
667 } 719 if (rc)
720 goto again;
668 } 721 }
669 722
670 /* process command queue */ 723 /* process command queue */
671 spin_lock_bh(&conn->session->lock);
672 while (!list_empty(&conn->xmitqueue)) { 724 while (!list_empty(&conn->xmitqueue)) {
725 rc = iscsi_check_cmdsn_window_closed(conn);
726 if (rc) {
727 spin_unlock_bh(&conn->session->lock);
728 return rc;
729 }
673 /* 730 /*
674 * iscsi tcp may readd the task to the xmitqueue to send 731 * iscsi tcp may readd the task to the xmitqueue to send
675 * write data 732 * write data
676 */ 733 */
677 conn->ctask = list_entry(conn->xmitqueue.next, 734 conn->ctask = list_entry(conn->xmitqueue.next,
678 struct iscsi_cmd_task, running); 735 struct iscsi_cmd_task, running);
736 if (conn->ctask->state == ISCSI_TASK_PENDING) {
737 iscsi_prep_scsi_cmd_pdu(conn->ctask);
738 conn->session->tt->init_cmd_task(conn->ctask);
739 }
679 conn->ctask->state = ISCSI_TASK_RUNNING; 740 conn->ctask->state = ISCSI_TASK_RUNNING;
680 list_move_tail(conn->xmitqueue.next, &conn->run_list); 741 list_move_tail(conn->xmitqueue.next, &conn->run_list);
681 __iscsi_get_ctask(conn->ctask); 742 rc = iscsi_xmit_ctask(conn);
682 spin_unlock_bh(&conn->session->lock); 743 if (rc)
683
684 rc = tt->xmit_cmd_task(conn, conn->ctask);
685
686 spin_lock_bh(&conn->session->lock);
687 __iscsi_put_ctask(conn->ctask);
688 if (rc) {
689 spin_unlock_bh(&conn->session->lock);
690 goto again; 744 goto again;
691 } 745 /*
746 * we could continuously get new ctask requests so
747 * we need to check the mgmt queue for nops that need to
748 * be sent to aviod starvation
749 */
750 if (__kfifo_len(conn->mgmtqueue))
751 goto check_mgmt;
692 } 752 }
693 spin_unlock_bh(&conn->session->lock); 753 spin_unlock_bh(&conn->session->lock);
694 /* done with this ctask */
695 conn->ctask = NULL;
696
697 /* process the rest control plane PDUs, if any */
698 if (unlikely(__kfifo_len(conn->mgmtqueue))) {
699 while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask,
700 sizeof(void*))) {
701 spin_lock_bh(&conn->session->lock);
702 list_add_tail(&conn->mtask->running,
703 &conn->mgmt_run_list);
704 spin_unlock_bh(&conn->session->lock);
705 rc = iscsi_xmit_mtask(conn);
706 if (rc)
707 goto again;
708 }
709 }
710
711 return -ENODATA; 754 return -ENODATA;
712 755
713again: 756again:
714 if (unlikely(conn->suspend_tx)) 757 if (unlikely(conn->suspend_tx))
715 return -ENODATA; 758 rc = -ENODATA;
716 759 spin_unlock_bh(&conn->session->lock);
717 return rc; 760 return rc;
718} 761}
719 762
@@ -725,11 +768,9 @@ static void iscsi_xmitworker(struct work_struct *work)
725 /* 768 /*
726 * serialize Xmit worker on a per-connection basis. 769 * serialize Xmit worker on a per-connection basis.
727 */ 770 */
728 mutex_lock(&conn->xmitmutex);
729 do { 771 do {
730 rc = iscsi_data_xmit(conn); 772 rc = iscsi_data_xmit(conn);
731 } while (rc >= 0 || rc == -EAGAIN); 773 } while (rc >= 0 || rc == -EAGAIN);
732 mutex_unlock(&conn->xmitmutex);
733} 774}
734 775
735enum { 776enum {
@@ -787,20 +828,23 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
787 goto fault; 828 goto fault;
788 } 829 }
789 830
790 /*
791 * Check for iSCSI window and take care of CmdSN wrap-around
792 */
793 if ((int)(session->max_cmdsn - session->cmdsn) < 0) {
794 reason = FAILURE_WINDOW_CLOSED;
795 goto reject;
796 }
797
798 conn = session->leadconn; 831 conn = session->leadconn;
799 if (!conn) { 832 if (!conn) {
800 reason = FAILURE_SESSION_FREED; 833 reason = FAILURE_SESSION_FREED;
801 goto fault; 834 goto fault;
802 } 835 }
803 836
837 /*
838 * We check this here and in data xmit, because if we get to the point
839 * that this check is hitting the window then we have enough IO in
840 * flight and enough IO waiting to be transmitted it is better
841 * to let the scsi/block layer queue up.
842 */
843 if (iscsi_check_cmdsn_window_closed(conn)) {
844 reason = FAILURE_WINDOW_CLOSED;
845 goto reject;
846 }
847
804 if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask, 848 if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
805 sizeof(void*))) { 849 sizeof(void*))) {
806 reason = FAILURE_OOM; 850 reason = FAILURE_OOM;
@@ -815,17 +859,8 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
815 ctask->conn = conn; 859 ctask->conn = conn;
816 ctask->sc = sc; 860 ctask->sc = sc;
817 INIT_LIST_HEAD(&ctask->running); 861 INIT_LIST_HEAD(&ctask->running);
818 iscsi_prep_scsi_cmd_pdu(ctask);
819
820 session->tt->init_cmd_task(ctask);
821 862
822 list_add_tail(&ctask->running, &conn->xmitqueue); 863 list_add_tail(&ctask->running, &conn->xmitqueue);
823 debug_scsi(
824 "ctask enq [%s cid %d sc %p cdb 0x%x itt 0x%x len %d cmdsn %d "
825 "win %d]\n",
826 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
827 conn->id, sc, sc->cmnd[0], ctask->itt, sc->request_bufflen,
828 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
829 spin_unlock(&session->lock); 864 spin_unlock(&session->lock);
830 865
831 scsi_queue_work(host, &conn->xmitwork); 866 scsi_queue_work(host, &conn->xmitwork);
@@ -856,19 +891,16 @@ int iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
856} 891}
857EXPORT_SYMBOL_GPL(iscsi_change_queue_depth); 892EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
858 893
859static int 894static struct iscsi_mgmt_task *
860iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 895__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
861 char *data, uint32_t data_size) 896 char *data, uint32_t data_size)
862{ 897{
863 struct iscsi_session *session = conn->session; 898 struct iscsi_session *session = conn->session;
864 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
865 struct iscsi_mgmt_task *mtask; 899 struct iscsi_mgmt_task *mtask;
866 900
867 spin_lock_bh(&session->lock); 901 if (session->state == ISCSI_STATE_TERMINATE)
868 if (session->state == ISCSI_STATE_TERMINATE) { 902 return NULL;
869 spin_unlock_bh(&session->lock); 903
870 return -EPERM;
871 }
872 if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) || 904 if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
873 hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE)) 905 hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
874 /* 906 /*
@@ -882,27 +914,11 @@ iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
882 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); 914 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
883 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); 915 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
884 916
885 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
886 if (!__kfifo_get(session->mgmtpool.queue, 917 if (!__kfifo_get(session->mgmtpool.queue,
887 (void*)&mtask, sizeof(void*))) { 918 (void*)&mtask, sizeof(void*)))
888 spin_unlock_bh(&session->lock); 919 return NULL;
889 return -ENOSPC;
890 }
891 } 920 }
892 921
893 /*
894 * pre-format CmdSN for outgoing PDU.
895 */
896 if (hdr->itt != RESERVED_ITT) {
897 hdr->itt = build_itt(mtask->itt, conn->id, session->age);
898 nop->cmdsn = cpu_to_be32(session->cmdsn);
899 if (conn->c_stage == ISCSI_CONN_STARTED &&
900 !(hdr->opcode & ISCSI_OP_IMMEDIATE))
901 session->cmdsn++;
902 } else
903 /* do not advance CmdSN */
904 nop->cmdsn = cpu_to_be32(session->cmdsn);
905
906 if (data_size) { 922 if (data_size) {
907 memcpy(mtask->data, data, data_size); 923 memcpy(mtask->data, data, data_size);
908 mtask->data_count = data_size; 924 mtask->data_count = data_size;
@@ -911,38 +927,23 @@ iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
911 927
912 INIT_LIST_HEAD(&mtask->running); 928 INIT_LIST_HEAD(&mtask->running);
913 memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr)); 929 memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
914 if (session->tt->init_mgmt_task) 930 __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
915 session->tt->init_mgmt_task(conn, mtask, data, data_size); 931 return mtask;
916 spin_unlock_bh(&session->lock);
917
918 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
919 hdr->opcode, hdr->itt, data_size);
920
921 /*
922 * since send_pdu() could be called at least from two contexts,
923 * we need to serialize __kfifo_put, so we don't have to take
924 * additional lock on fast data-path
925 */
926 if (hdr->opcode & ISCSI_OP_IMMEDIATE)
927 __kfifo_put(conn->immqueue, (void*)&mtask, sizeof(void*));
928 else
929 __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
930
931 scsi_queue_work(session->host, &conn->xmitwork);
932 return 0;
933} 932}
934 933
935int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, 934int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
936 char *data, uint32_t data_size) 935 char *data, uint32_t data_size)
937{ 936{
938 struct iscsi_conn *conn = cls_conn->dd_data; 937 struct iscsi_conn *conn = cls_conn->dd_data;
939 int rc; 938 struct iscsi_session *session = conn->session;
940 939 int err = 0;
941 mutex_lock(&conn->xmitmutex);
942 rc = iscsi_conn_send_generic(conn, hdr, data, data_size);
943 mutex_unlock(&conn->xmitmutex);
944 940
945 return rc; 941 spin_lock_bh(&session->lock);
942 if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
943 err = -EPERM;
944 spin_unlock_bh(&session->lock);
945 scsi_queue_work(session->host, &conn->xmitwork);
946 return err;
946} 947}
947EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu); 948EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
948 949
@@ -1027,14 +1028,12 @@ static void iscsi_tmabort_timedout(unsigned long data)
1027 spin_unlock(&session->lock); 1028 spin_unlock(&session->lock);
1028} 1029}
1029 1030
1030/* must be called with the mutex lock */
1031static int iscsi_exec_abort_task(struct scsi_cmnd *sc, 1031static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
1032 struct iscsi_cmd_task *ctask) 1032 struct iscsi_cmd_task *ctask)
1033{ 1033{
1034 struct iscsi_conn *conn = ctask->conn; 1034 struct iscsi_conn *conn = ctask->conn;
1035 struct iscsi_session *session = conn->session; 1035 struct iscsi_session *session = conn->session;
1036 struct iscsi_tm *hdr = &conn->tmhdr; 1036 struct iscsi_tm *hdr = &conn->tmhdr;
1037 int rc;
1038 1037
1039 /* 1038 /*
1040 * ctask timed out but session is OK requests must be serialized. 1039 * ctask timed out but session is OK requests must be serialized.
@@ -1047,32 +1046,27 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
1047 hdr->rtt = ctask->hdr->itt; 1046 hdr->rtt = ctask->hdr->itt;
1048 hdr->refcmdsn = ctask->hdr->cmdsn; 1047 hdr->refcmdsn = ctask->hdr->cmdsn;
1049 1048
1050 rc = iscsi_conn_send_generic(conn, (struct iscsi_hdr *)hdr, 1049 ctask->mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
1051 NULL, 0); 1050 NULL, 0);
1052 if (rc) { 1051 if (!ctask->mtask) {
1053 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1052 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1054 debug_scsi("abort sent failure [itt 0x%x] %d\n", ctask->itt, 1053 debug_scsi("abort sent failure [itt 0x%x]\n", ctask->itt);
1055 rc); 1054 return -EPERM;
1056 return rc;
1057 } 1055 }
1056 ctask->state = ISCSI_TASK_ABORTING;
1058 1057
1059 debug_scsi("abort sent [itt 0x%x]\n", ctask->itt); 1058 debug_scsi("abort sent [itt 0x%x]\n", ctask->itt);
1060 1059
1061 spin_lock_bh(&session->lock);
1062 ctask->mtask = (struct iscsi_mgmt_task *)
1063 session->mgmt_cmds[get_itt(hdr->itt) -
1064 ISCSI_MGMT_ITT_OFFSET];
1065
1066 if (conn->tmabort_state == TMABORT_INITIAL) { 1060 if (conn->tmabort_state == TMABORT_INITIAL) {
1067 conn->tmfcmd_pdus_cnt++; 1061 conn->tmfcmd_pdus_cnt++;
1068 conn->tmabort_timer.expires = 10*HZ + jiffies; 1062 conn->tmabort_timer.expires = 20*HZ + jiffies;
1069 conn->tmabort_timer.function = iscsi_tmabort_timedout; 1063 conn->tmabort_timer.function = iscsi_tmabort_timedout;
1070 conn->tmabort_timer.data = (unsigned long)ctask; 1064 conn->tmabort_timer.data = (unsigned long)ctask;
1071 add_timer(&conn->tmabort_timer); 1065 add_timer(&conn->tmabort_timer);
1072 debug_scsi("abort set timeout [itt 0x%x]\n", ctask->itt); 1066 debug_scsi("abort set timeout [itt 0x%x]\n", ctask->itt);
1073 } 1067 }
1074 spin_unlock_bh(&session->lock); 1068 spin_unlock_bh(&session->lock);
1075 mutex_unlock(&conn->xmitmutex); 1069 scsi_queue_work(session->host, &conn->xmitwork);
1076 1070
1077 /* 1071 /*
1078 * block eh thread until: 1072 * block eh thread until:
@@ -1089,13 +1083,12 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
1089 if (signal_pending(current)) 1083 if (signal_pending(current))
1090 flush_signals(current); 1084 flush_signals(current);
1091 del_timer_sync(&conn->tmabort_timer); 1085 del_timer_sync(&conn->tmabort_timer);
1092 1086 spin_lock_bh(&session->lock);
1093 mutex_lock(&conn->xmitmutex);
1094 return 0; 1087 return 0;
1095} 1088}
1096 1089
1097/* 1090/*
1098 * xmit mutex and session lock must be held 1091 * session lock must be held
1099 */ 1092 */
1100static struct iscsi_mgmt_task * 1093static struct iscsi_mgmt_task *
1101iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt) 1094iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt)
@@ -1127,7 +1120,7 @@ static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
1127 if (!ctask->mtask) 1120 if (!ctask->mtask)
1128 return -EINVAL; 1121 return -EINVAL;
1129 1122
1130 if (!iscsi_remove_mgmt_task(conn->immqueue, ctask->mtask->itt)) 1123 if (!iscsi_remove_mgmt_task(conn->mgmtqueue, ctask->mtask->itt))
1131 list_del(&ctask->mtask->running); 1124 list_del(&ctask->mtask->running);
1132 __kfifo_put(session->mgmtpool.queue, (void*)&ctask->mtask, 1125 __kfifo_put(session->mgmtpool.queue, (void*)&ctask->mtask,
1133 sizeof(void*)); 1126 sizeof(void*));
@@ -1136,7 +1129,7 @@ static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
1136} 1129}
1137 1130
1138/* 1131/*
1139 * session lock and xmitmutex must be held 1132 * session lock must be held
1140 */ 1133 */
1141static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, 1134static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1142 int err) 1135 int err)
@@ -1147,11 +1140,14 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1147 if (!sc) 1140 if (!sc)
1148 return; 1141 return;
1149 1142
1150 conn->session->tt->cleanup_cmd_task(conn, ctask); 1143 if (ctask->state != ISCSI_TASK_PENDING)
1144 conn->session->tt->cleanup_cmd_task(conn, ctask);
1151 iscsi_ctask_mtask_cleanup(ctask); 1145 iscsi_ctask_mtask_cleanup(ctask);
1152 1146
1153 sc->result = err; 1147 sc->result = err;
1154 sc->resid = sc->request_bufflen; 1148 sc->resid = sc->request_bufflen;
1149 if (conn->ctask == ctask)
1150 conn->ctask = NULL;
1155 /* release ref from queuecommand */ 1151 /* release ref from queuecommand */
1156 __iscsi_put_ctask(ctask); 1152 __iscsi_put_ctask(ctask);
1157} 1153}
@@ -1179,7 +1175,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1179 conn->eh_abort_cnt++; 1175 conn->eh_abort_cnt++;
1180 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt); 1176 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
1181 1177
1182 mutex_lock(&conn->xmitmutex);
1183 spin_lock_bh(&session->lock); 1178 spin_lock_bh(&session->lock);
1184 1179
1185 /* 1180 /*
@@ -1192,9 +1187,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1192 1187
1193 /* ctask completed before time out */ 1188 /* ctask completed before time out */
1194 if (!ctask->sc) { 1189 if (!ctask->sc) {
1195 spin_unlock_bh(&session->lock);
1196 debug_scsi("sc completed while abort in progress\n"); 1190 debug_scsi("sc completed while abort in progress\n");
1197 goto success_rel_mutex; 1191 goto success;
1198 } 1192 }
1199 1193
1200 /* what should we do here ? */ 1194 /* what should we do here ? */
@@ -1204,15 +1198,13 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1204 goto failed; 1198 goto failed;
1205 } 1199 }
1206 1200
1207 if (ctask->state == ISCSI_TASK_PENDING) 1201 if (ctask->state == ISCSI_TASK_PENDING) {
1208 goto success_cleanup; 1202 fail_command(conn, ctask, DID_ABORT << 16);
1203 goto success;
1204 }
1209 1205
1210 conn->tmabort_state = TMABORT_INITIAL; 1206 conn->tmabort_state = TMABORT_INITIAL;
1211
1212 spin_unlock_bh(&session->lock);
1213 rc = iscsi_exec_abort_task(sc, ctask); 1207 rc = iscsi_exec_abort_task(sc, ctask);
1214 spin_lock_bh(&session->lock);
1215
1216 if (rc || sc->SCp.phase != session->age || 1208 if (rc || sc->SCp.phase != session->age ||
1217 session->state != ISCSI_STATE_LOGGED_IN) 1209 session->state != ISCSI_STATE_LOGGED_IN)
1218 goto failed; 1210 goto failed;
@@ -1220,45 +1212,44 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1220 1212
1221 switch (conn->tmabort_state) { 1213 switch (conn->tmabort_state) {
1222 case TMABORT_SUCCESS: 1214 case TMABORT_SUCCESS:
1223 goto success_cleanup; 1215 spin_unlock_bh(&session->lock);
1216 /*
1217 * clean up task if aborted. grab the recv lock as a writer
1218 */
1219 write_lock_bh(conn->recv_lock);
1220 spin_lock(&session->lock);
1221 fail_command(conn, ctask, DID_ABORT << 16);
1222 spin_unlock(&session->lock);
1223 write_unlock_bh(conn->recv_lock);
1224 /*
1225 * make sure xmit thread is not still touching the
1226 * ctask/scsi_cmnd
1227 */
1228 scsi_flush_work(session->host);
1229 goto success_unlocked;
1224 case TMABORT_NOT_FOUND: 1230 case TMABORT_NOT_FOUND:
1225 if (!ctask->sc) { 1231 if (!ctask->sc) {
1226 /* ctask completed before tmf abort response */ 1232 /* ctask completed before tmf abort response */
1227 spin_unlock_bh(&session->lock);
1228 debug_scsi("sc completed while abort in progress\n"); 1233 debug_scsi("sc completed while abort in progress\n");
1229 goto success_rel_mutex; 1234 goto success;
1230 } 1235 }
1231 /* fall through */ 1236 /* fall through */
1232 default: 1237 default:
1233 /* timedout or failed */ 1238 /* timedout or failed */
1234 spin_unlock_bh(&session->lock); 1239 spin_unlock_bh(&session->lock);
1235 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1240 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1236 spin_lock_bh(&session->lock); 1241 goto failed_unlocked;
1237 goto failed;
1238 } 1242 }
1239 1243
1240success_cleanup: 1244success:
1241 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
1242 spin_unlock_bh(&session->lock); 1245 spin_unlock_bh(&session->lock);
1243 1246success_unlocked:
1244 /* 1247 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
1245 * clean up task if aborted. we have the xmitmutex so grab
1246 * the recv lock as a writer
1247 */
1248 write_lock_bh(conn->recv_lock);
1249 spin_lock(&session->lock);
1250 fail_command(conn, ctask, DID_ABORT << 16);
1251 spin_unlock(&session->lock);
1252 write_unlock_bh(conn->recv_lock);
1253
1254success_rel_mutex:
1255 mutex_unlock(&conn->xmitmutex);
1256 return SUCCESS; 1248 return SUCCESS;
1257 1249
1258failed: 1250failed:
1259 spin_unlock_bh(&session->lock); 1251 spin_unlock_bh(&session->lock);
1260 mutex_unlock(&conn->xmitmutex); 1252failed_unlocked:
1261
1262 debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); 1253 debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
1263 return FAILED; 1254 return FAILED;
1264} 1255}
@@ -1505,11 +1496,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1505 INIT_LIST_HEAD(&conn->xmitqueue); 1496 INIT_LIST_HEAD(&conn->xmitqueue);
1506 1497
1507 /* initialize general immediate & non-immediate PDU commands queue */ 1498 /* initialize general immediate & non-immediate PDU commands queue */
1508 conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
1509 GFP_KERNEL, NULL);
1510 if (conn->immqueue == ERR_PTR(-ENOMEM))
1511 goto immqueue_alloc_fail;
1512
1513 conn->mgmtqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*), 1499 conn->mgmtqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
1514 GFP_KERNEL, NULL); 1500 GFP_KERNEL, NULL);
1515 if (conn->mgmtqueue == ERR_PTR(-ENOMEM)) 1501 if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
@@ -1533,7 +1519,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1533 conn->login_mtask->data = conn->data = data; 1519 conn->login_mtask->data = conn->data = data;
1534 1520
1535 init_timer(&conn->tmabort_timer); 1521 init_timer(&conn->tmabort_timer);
1536 mutex_init(&conn->xmitmutex);
1537 init_waitqueue_head(&conn->ehwait); 1522 init_waitqueue_head(&conn->ehwait);
1538 1523
1539 return cls_conn; 1524 return cls_conn;
@@ -1544,8 +1529,6 @@ login_mtask_data_alloc_fail:
1544login_mtask_alloc_fail: 1529login_mtask_alloc_fail:
1545 kfifo_free(conn->mgmtqueue); 1530 kfifo_free(conn->mgmtqueue);
1546mgmtqueue_alloc_fail: 1531mgmtqueue_alloc_fail:
1547 kfifo_free(conn->immqueue);
1548immqueue_alloc_fail:
1549 iscsi_destroy_conn(cls_conn); 1532 iscsi_destroy_conn(cls_conn);
1550 return NULL; 1533 return NULL;
1551} 1534}
@@ -1564,10 +1547,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1564 struct iscsi_session *session = conn->session; 1547 struct iscsi_session *session = conn->session;
1565 unsigned long flags; 1548 unsigned long flags;
1566 1549
1567 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1568 mutex_lock(&conn->xmitmutex);
1569
1570 spin_lock_bh(&session->lock); 1550 spin_lock_bh(&session->lock);
1551 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1571 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; 1552 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
1572 if (session->leadconn == conn) { 1553 if (session->leadconn == conn) {
1573 /* 1554 /*
@@ -1578,8 +1559,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1578 } 1559 }
1579 spin_unlock_bh(&session->lock); 1560 spin_unlock_bh(&session->lock);
1580 1561
1581 mutex_unlock(&conn->xmitmutex);
1582
1583 /* 1562 /*
1584 * Block until all in-progress commands for this connection 1563 * Block until all in-progress commands for this connection
1585 * time out or fail. 1564 * time out or fail.
@@ -1616,7 +1595,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1616 } 1595 }
1617 spin_unlock_bh(&session->lock); 1596 spin_unlock_bh(&session->lock);
1618 1597
1619 kfifo_free(conn->immqueue);
1620 kfifo_free(conn->mgmtqueue); 1598 kfifo_free(conn->mgmtqueue);
1621 1599
1622 iscsi_destroy_conn(cls_conn); 1600 iscsi_destroy_conn(cls_conn);
@@ -1677,8 +1655,7 @@ flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
1677 struct iscsi_mgmt_task *mtask, *tmp; 1655 struct iscsi_mgmt_task *mtask, *tmp;
1678 1656
1679 /* handle pending */ 1657 /* handle pending */
1680 while (__kfifo_get(conn->immqueue, (void*)&mtask, sizeof(void*)) || 1658 while (__kfifo_get(conn->mgmtqueue, (void*)&mtask, sizeof(void*))) {
1681 __kfifo_get(conn->mgmtqueue, (void*)&mtask, sizeof(void*))) {
1682 if (mtask == conn->login_mtask) 1659 if (mtask == conn->login_mtask)
1683 continue; 1660 continue;
1684 debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt); 1661 debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
@@ -1748,12 +1725,12 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
1748 conn->c_stage = ISCSI_CONN_STOPPED; 1725 conn->c_stage = ISCSI_CONN_STOPPED;
1749 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1726 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1750 spin_unlock_bh(&session->lock); 1727 spin_unlock_bh(&session->lock);
1728 scsi_flush_work(session->host);
1751 1729
1752 write_lock_bh(conn->recv_lock); 1730 write_lock_bh(conn->recv_lock);
1753 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 1731 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1754 write_unlock_bh(conn->recv_lock); 1732 write_unlock_bh(conn->recv_lock);
1755 1733
1756 mutex_lock(&conn->xmitmutex);
1757 /* 1734 /*
1758 * for connection level recovery we should not calculate 1735 * for connection level recovery we should not calculate
1759 * header digest. conn->hdr_size used for optimization 1736 * header digest. conn->hdr_size used for optimization
@@ -1777,8 +1754,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
1777 fail_all_commands(conn); 1754 fail_all_commands(conn);
1778 flush_control_queues(session, conn); 1755 flush_control_queues(session, conn);
1779 spin_unlock_bh(&session->lock); 1756 spin_unlock_bh(&session->lock);
1780
1781 mutex_unlock(&conn->xmitmutex);
1782} 1757}
1783 1758
1784void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) 1759void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)