aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHiral Patel <hiralpat@cisco.com>2013-02-12 20:01:01 -0500
committerJames Bottomley <JBottomley@Parallels.com>2013-02-22 12:31:09 -0500
commit14eb5d905d16ecd33e5e3113eb44cfa2bb47e7d7 (patch)
tree1f2b548b0d70db43afc389caa4151cbd2fa8f242
parenta0bf1ca27b644c1c4b1f0ea2d81f99471b2549e8 (diff)
[SCSI] fnic: New debug flags and debug log messages
Added new fnic debug flags for identifying IO state at every stage of IO while debugging and also added more log messages for better debugging capability. Signed-off-by: Sesidhar Baddela <sebaddel@cisco.com> Signed-off-by: Hiral Patel <hiralpat@cisco.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
-rw-r--r--drivers/scsi/fnic/fnic.h31
-rw-r--r--drivers/scsi/fnic/fnic_io.h4
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c118
3 files changed, 132 insertions, 21 deletions
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index b8e6644ad237..9c95a1ad56b9 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -59,14 +59,29 @@
59 * Command flags to identify the type of command and for other future 59 * Command flags to identify the type of command and for other future
60 * use. 60 * use.
61 */ 61 */
62#define FNIC_NO_FLAGS 0 62#define FNIC_NO_FLAGS 0
63#define FNIC_CDB_REQ BIT(1) /* All IOs with a valid CDB */ 63#define FNIC_IO_INITIALIZED BIT(0)
64#define FNIC_BLOCKING_REQ BIT(2) /* All blocking Requests */ 64#define FNIC_IO_ISSUED BIT(1)
65#define FNIC_DEVICE_RESET BIT(3) /* Device reset request */ 65#define FNIC_IO_DONE BIT(2)
66#define FNIC_DEV_RST_PENDING BIT(4) /* Device reset pending */ 66#define FNIC_IO_REQ_NULL BIT(3)
67#define FNIC_DEV_RST_TIMED_OUT BIT(5) /* Device reset timed out */ 67#define FNIC_IO_ABTS_PENDING BIT(4)
68#define FNIC_DEV_RST_TERM_ISSUED BIT(6) /* Device reset terminate */ 68#define FNIC_IO_ABORTED BIT(5)
69#define FNIC_DEV_RST_DONE BIT(7) /* Device reset done */ 69#define FNIC_IO_ABTS_ISSUED BIT(6)
70#define FNIC_IO_TERM_ISSUED BIT(7)
71#define FNIC_IO_INTERNAL_TERM_ISSUED BIT(8)
72#define FNIC_IO_ABT_TERM_DONE BIT(9)
73#define FNIC_IO_ABT_TERM_REQ_NULL BIT(10)
74#define FNIC_IO_ABT_TERM_TIMED_OUT BIT(11)
75#define FNIC_DEVICE_RESET BIT(12) /* Device reset request */
76#define FNIC_DEV_RST_ISSUED BIT(13)
77#define FNIC_DEV_RST_TIMED_OUT BIT(14)
78#define FNIC_DEV_RST_ABTS_ISSUED BIT(15)
79#define FNIC_DEV_RST_TERM_ISSUED BIT(16)
80#define FNIC_DEV_RST_DONE BIT(17)
81#define FNIC_DEV_RST_REQ_NULL BIT(18)
82#define FNIC_DEV_RST_ABTS_DONE BIT(19)
83#define FNIC_DEV_RST_TERM_DONE BIT(20)
84#define FNIC_DEV_RST_ABTS_PENDING BIT(21)
70 85
71/* 86/*
72 * Usage of the scsi_cmnd scratchpad. 87 * Usage of the scsi_cmnd scratchpad.
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
index 3455c34ada48..c35b8f1889ea 100644
--- a/drivers/scsi/fnic/fnic_io.h
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -45,7 +45,8 @@ enum fnic_sgl_list_type {
45}; 45};
46 46
47enum fnic_ioreq_state { 47enum fnic_ioreq_state {
48 FNIC_IOREQ_CMD_PENDING = 0, 48 FNIC_IOREQ_NOT_INITED = 0,
49 FNIC_IOREQ_CMD_PENDING,
49 FNIC_IOREQ_ABTS_PENDING, 50 FNIC_IOREQ_ABTS_PENDING,
50 FNIC_IOREQ_ABTS_COMPLETE, 51 FNIC_IOREQ_ABTS_COMPLETE,
51 FNIC_IOREQ_CMD_COMPLETE, 52 FNIC_IOREQ_CMD_COMPLETE,
@@ -60,6 +61,7 @@ struct fnic_io_req {
60 u8 sgl_type; /* device DMA descriptor list type */ 61 u8 sgl_type; /* device DMA descriptor list type */
61 u8 io_completed:1; /* set to 1 when fw completes IO */ 62 u8 io_completed:1; /* set to 1 when fw completes IO */
62 u32 port_id; /* remote port DID */ 63 u32 port_id; /* remote port DID */
64 unsigned long start_time; /* in jiffies */
63 struct completion *abts_done; /* completion for abts */ 65 struct completion *abts_done; /* completion for abts */
64 struct completion *dr_done; /* completion for device reset */ 66 struct completion *dr_done; /* completion for device reset */
65}; 67};
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 64830814da0d..7cb653309125 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -47,6 +47,7 @@ const char *fnic_state_str[] = {
47}; 47};
48 48
49static const char *fnic_ioreq_state_str[] = { 49static const char *fnic_ioreq_state_str[] = {
50 [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
50 [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING", 51 [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
51 [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING", 52 [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
52 [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE", 53 [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
@@ -349,6 +350,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
349 350
350 if (unlikely(!vnic_wq_copy_desc_avail(wq))) { 351 if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
351 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); 352 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
353 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
354 "fnic_queue_wq_copy_desc failure - no descriptors\n");
352 return SCSI_MLQUEUE_HOST_BUSY; 355 return SCSI_MLQUEUE_HOST_BUSY;
353 } 356 }
354 357
@@ -420,7 +423,8 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
420 * caller disabling them. 423 * caller disabling them.
421 */ 424 */
422 spin_unlock(lp->host->host_lock); 425 spin_unlock(lp->host->host_lock);
423 CMD_FLAGS(sc) = FNIC_CDB_REQ; 426 CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
427 CMD_FLAGS(sc) = FNIC_NO_FLAGS;
424 428
425 /* Get a new io_req for this SCSI IO */ 429 /* Get a new io_req for this SCSI IO */
426 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); 430 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
@@ -467,8 +471,10 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
467 471
468 /* initialize rest of io_req */ 472 /* initialize rest of io_req */
469 io_req->port_id = rport->port_id; 473 io_req->port_id = rport->port_id;
474 io_req->start_time = jiffies;
470 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; 475 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
471 CMD_SP(sc) = (char *)io_req; 476 CMD_SP(sc) = (char *)io_req;
477 CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
472 sc->scsi_done = done; 478 sc->scsi_done = done;
473 479
474 /* create copy wq desc and enqueue it */ 480 /* create copy wq desc and enqueue it */
@@ -490,6 +496,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
490 fnic_release_ioreq_buf(fnic, io_req, sc); 496 fnic_release_ioreq_buf(fnic, io_req, sc);
491 mempool_free(io_req, fnic->io_req_pool); 497 mempool_free(io_req, fnic->io_req_pool);
492 } 498 }
499 } else {
500 /* REVISIT: Use per IO lock in the final code */
501 CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
493 } 502 }
494out: 503out:
495 atomic_dec(&fnic->in_flight); 504 atomic_dec(&fnic->in_flight);
@@ -694,10 +703,12 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
694 struct scsi_cmnd *sc; 703 struct scsi_cmnd *sc;
695 unsigned long flags; 704 unsigned long flags;
696 spinlock_t *io_lock; 705 spinlock_t *io_lock;
706 unsigned long start_time;
697 707
698 /* Decode the cmpl description to get the io_req id */ 708 /* Decode the cmpl description to get the io_req id */
699 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); 709 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
700 fcpio_tag_id_dec(&tag, &id); 710 fcpio_tag_id_dec(&tag, &id);
711 icmnd_cmpl = &desc->u.icmnd_cmpl;
701 712
702 if (id >= FNIC_MAX_IO_REQ) { 713 if (id >= FNIC_MAX_IO_REQ) {
703 shost_printk(KERN_ERR, fnic->lport->host, 714 shost_printk(KERN_ERR, fnic->lport->host,
@@ -708,17 +719,28 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
708 719
709 sc = scsi_host_find_tag(fnic->lport->host, id); 720 sc = scsi_host_find_tag(fnic->lport->host, id);
710 WARN_ON_ONCE(!sc); 721 WARN_ON_ONCE(!sc);
711 if (!sc) 722 if (!sc) {
723 shost_printk(KERN_ERR, fnic->lport->host,
724 "icmnd_cmpl sc is null - "
725 "hdr status = %s tag = 0x%x desc = 0x%p\n",
726 fnic_fcpio_status_to_str(hdr_status), id, desc);
712 return; 727 return;
728 }
713 729
714 io_lock = fnic_io_lock_hash(fnic, sc); 730 io_lock = fnic_io_lock_hash(fnic, sc);
715 spin_lock_irqsave(io_lock, flags); 731 spin_lock_irqsave(io_lock, flags);
716 io_req = (struct fnic_io_req *)CMD_SP(sc); 732 io_req = (struct fnic_io_req *)CMD_SP(sc);
717 WARN_ON_ONCE(!io_req); 733 WARN_ON_ONCE(!io_req);
718 if (!io_req) { 734 if (!io_req) {
735 CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
719 spin_unlock_irqrestore(io_lock, flags); 736 spin_unlock_irqrestore(io_lock, flags);
737 shost_printk(KERN_ERR, fnic->lport->host,
738 "icmnd_cmpl io_req is null - "
739 "hdr status = %s tag = 0x%x sc 0x%p\n",
740 fnic_fcpio_status_to_str(hdr_status), id, sc);
720 return; 741 return;
721 } 742 }
743 start_time = io_req->start_time;
722 744
723 /* firmware completed the io */ 745 /* firmware completed the io */
724 io_req->io_completed = 1; 746 io_req->io_completed = 1;
@@ -729,6 +751,28 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
729 */ 751 */
730 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { 752 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
731 spin_unlock_irqrestore(io_lock, flags); 753 spin_unlock_irqrestore(io_lock, flags);
754 CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
755 switch (hdr_status) {
756 case FCPIO_SUCCESS:
757 CMD_FLAGS(sc) |= FNIC_IO_DONE;
758 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
759 "icmnd_cmpl ABTS pending hdr status = %s "
760 "sc 0x%p scsi_status %x residual %d\n",
761 fnic_fcpio_status_to_str(hdr_status), sc,
762 icmnd_cmpl->scsi_status,
763 icmnd_cmpl->residual);
764 break;
765 case FCPIO_ABORTED:
766 CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
767 break;
768 default:
769 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
770 "icmnd_cmpl abts pending "
771 "hdr status = %s tag = 0x%x sc = 0x%p\n",
772 fnic_fcpio_status_to_str(hdr_status),
773 id, sc);
774 break;
775 }
732 return; 776 return;
733 } 777 }
734 778
@@ -812,6 +856,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
812 856
813 /* Break link with the SCSI command */ 857 /* Break link with the SCSI command */
814 CMD_SP(sc) = NULL; 858 CMD_SP(sc) = NULL;
859 CMD_FLAGS(sc) |= FNIC_IO_DONE;
815 860
816 spin_unlock_irqrestore(io_lock, flags); 861 spin_unlock_irqrestore(io_lock, flags);
817 862
@@ -848,6 +893,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
848 struct fnic_io_req *io_req; 893 struct fnic_io_req *io_req;
849 unsigned long flags; 894 unsigned long flags;
850 spinlock_t *io_lock; 895 spinlock_t *io_lock;
896 unsigned long start_time;
851 897
852 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); 898 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
853 fcpio_tag_id_dec(&tag, &id); 899 fcpio_tag_id_dec(&tag, &id);
@@ -861,17 +907,26 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
861 907
862 sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); 908 sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
863 WARN_ON_ONCE(!sc); 909 WARN_ON_ONCE(!sc);
864 if (!sc) 910 if (!sc) {
911 shost_printk(KERN_ERR, fnic->lport->host,
912 "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
913 fnic_fcpio_status_to_str(hdr_status), id);
865 return; 914 return;
866 915 }
867 io_lock = fnic_io_lock_hash(fnic, sc); 916 io_lock = fnic_io_lock_hash(fnic, sc);
868 spin_lock_irqsave(io_lock, flags); 917 spin_lock_irqsave(io_lock, flags);
869 io_req = (struct fnic_io_req *)CMD_SP(sc); 918 io_req = (struct fnic_io_req *)CMD_SP(sc);
870 WARN_ON_ONCE(!io_req); 919 WARN_ON_ONCE(!io_req);
871 if (!io_req) { 920 if (!io_req) {
872 spin_unlock_irqrestore(io_lock, flags); 921 spin_unlock_irqrestore(io_lock, flags);
922 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
923 shost_printk(KERN_ERR, fnic->lport->host,
924 "itmf_cmpl io_req is null - "
925 "hdr status = %s tag = 0x%x sc 0x%p\n",
926 fnic_fcpio_status_to_str(hdr_status), id, sc);
873 return; 927 return;
874 } 928 }
929 start_time = io_req->start_time;
875 930
876 if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) { 931 if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
877 /* Abort and terminate completion of device reset req */ 932 /* Abort and terminate completion of device reset req */
@@ -895,6 +950,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
895 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; 950 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
896 CMD_ABTS_STATUS(sc) = hdr_status; 951 CMD_ABTS_STATUS(sc) = hdr_status;
897 952
953 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
898 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 954 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
899 "abts cmpl recd. id %d status %s\n", 955 "abts cmpl recd. id %d status %s\n",
900 (int)(id & FNIC_TAG_MASK), 956 (int)(id & FNIC_TAG_MASK),
@@ -927,6 +983,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
927 CMD_LR_STATUS(sc) = hdr_status; 983 CMD_LR_STATUS(sc) = hdr_status;
928 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { 984 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
929 spin_unlock_irqrestore(io_lock, flags); 985 spin_unlock_irqrestore(io_lock, flags);
986 CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
930 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 987 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
931 "Terminate pending " 988 "Terminate pending "
932 "dev reset cmpl recd. id %d status %s\n", 989 "dev reset cmpl recd. id %d status %s\n",
@@ -1032,6 +1089,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
1032 unsigned long flags = 0; 1089 unsigned long flags = 0;
1033 struct scsi_cmnd *sc; 1090 struct scsi_cmnd *sc;
1034 spinlock_t *io_lock; 1091 spinlock_t *io_lock;
1092 unsigned long start_time = 0;
1035 1093
1036 for (i = 0; i < FNIC_MAX_IO_REQ; i++) { 1094 for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
1037 if (i == exclude_id) 1095 if (i == exclude_id)
@@ -1074,6 +1132,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
1074 * If there is a scsi_cmnd associated with this io_req, then 1132 * If there is a scsi_cmnd associated with this io_req, then
1075 * free the corresponding state 1133 * free the corresponding state
1076 */ 1134 */
1135 start_time = io_req->start_time;
1077 fnic_release_ioreq_buf(fnic, io_req, sc); 1136 fnic_release_ioreq_buf(fnic, io_req, sc);
1078 mempool_free(io_req, fnic->io_req_pool); 1137 mempool_free(io_req, fnic->io_req_pool);
1079 1138
@@ -1097,6 +1156,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
1097 struct scsi_cmnd *sc; 1156 struct scsi_cmnd *sc;
1098 unsigned long flags; 1157 unsigned long flags;
1099 spinlock_t *io_lock; 1158 spinlock_t *io_lock;
1159 unsigned long start_time = 0;
1100 1160
1101 /* get the tag reference */ 1161 /* get the tag reference */
1102 fcpio_tag_id_dec(&desc->hdr.tag, &id); 1162 fcpio_tag_id_dec(&desc->hdr.tag, &id);
@@ -1126,6 +1186,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
1126 1186
1127 spin_unlock_irqrestore(io_lock, flags); 1187 spin_unlock_irqrestore(io_lock, flags);
1128 1188
1189 start_time = io_req->start_time;
1129 fnic_release_ioreq_buf(fnic, io_req, sc); 1190 fnic_release_ioreq_buf(fnic, io_req, sc);
1130 mempool_free(io_req, fnic->io_req_pool); 1191 mempool_free(io_req, fnic->io_req_pool);
1131 1192
@@ -1163,7 +1224,7 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1163 if (!vnic_wq_copy_desc_avail(wq)) { 1224 if (!vnic_wq_copy_desc_avail(wq)) {
1164 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); 1225 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1165 atomic_dec(&fnic->in_flight); 1226 atomic_dec(&fnic->in_flight);
1166 shost_printk(KERN_DEBUG, fnic->lport->host, 1227 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1167 "fnic_queue_abort_io_req: failure: no descriptors\n"); 1228 "fnic_queue_abort_io_req: failure: no descriptors\n");
1168 return 1; 1229 return 1;
1169 } 1230 }
@@ -1213,7 +1274,7 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1213 } 1274 }
1214 1275
1215 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && 1276 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1216 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_PENDING))) { 1277 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1217 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1278 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1218 "fnic_rport_exch_reset dev rst not pending sc 0x%p\n", 1279 "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
1219 sc); 1280 sc);
@@ -1236,6 +1297,13 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1236 fnic_ioreq_state_to_str(CMD_STATE(sc))); 1297 fnic_ioreq_state_to_str(CMD_STATE(sc)));
1237 } 1298 }
1238 1299
1300 if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1301 shost_printk(KERN_ERR, fnic->lport->host,
1302 "rport_exch_reset "
1303 "IO not yet issued %p tag 0x%x flags "
1304 "%x state %d\n",
1305 sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1306 }
1239 old_ioreq_state = CMD_STATE(sc); 1307 old_ioreq_state = CMD_STATE(sc);
1240 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; 1308 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1241 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; 1309 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
@@ -1273,6 +1341,8 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1273 spin_lock_irqsave(io_lock, flags); 1341 spin_lock_irqsave(io_lock, flags);
1274 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) 1342 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1275 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; 1343 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1344 else
1345 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1276 spin_unlock_irqrestore(io_lock, flags); 1346 spin_unlock_irqrestore(io_lock, flags);
1277 } 1347 }
1278 } 1348 }
@@ -1324,7 +1394,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
1324 } 1394 }
1325 1395
1326 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && 1396 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1327 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_PENDING))) { 1397 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1328 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1398 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1329 "fnic_terminate_rport_io dev rst not pending sc 0x%p\n", 1399 "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
1330 sc); 1400 sc);
@@ -1345,6 +1415,13 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
1345 "state is %s\n", 1415 "state is %s\n",
1346 fnic_ioreq_state_to_str(CMD_STATE(sc))); 1416 fnic_ioreq_state_to_str(CMD_STATE(sc)));
1347 } 1417 }
1418 if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1419 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1420 "fnic_terminate_rport_io "
1421 "IO not yet issued %p tag 0x%x flags "
1422 "%x state %d\n",
1423 sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1424 }
1348 old_ioreq_state = CMD_STATE(sc); 1425 old_ioreq_state = CMD_STATE(sc);
1349 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; 1426 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1350 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; 1427 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
@@ -1382,6 +1459,8 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
1382 spin_lock_irqsave(io_lock, flags); 1459 spin_lock_irqsave(io_lock, flags);
1383 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) 1460 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1384 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; 1461 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1462 else
1463 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1385 spin_unlock_irqrestore(io_lock, flags); 1464 spin_unlock_irqrestore(io_lock, flags);
1386 } 1465 }
1387 } 1466 }
@@ -1401,8 +1480,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
1401 struct fc_rport *rport; 1480 struct fc_rport *rport;
1402 spinlock_t *io_lock; 1481 spinlock_t *io_lock;
1403 unsigned long flags; 1482 unsigned long flags;
1483 unsigned long start_time = 0;
1404 int ret = SUCCESS; 1484 int ret = SUCCESS;
1405 u32 task_req; 1485 u32 task_req = 0;
1406 struct scsi_lun fc_lun; 1486 struct scsi_lun fc_lun;
1407 int tag; 1487 int tag;
1408 DECLARE_COMPLETION_ONSTACK(tm_done); 1488 DECLARE_COMPLETION_ONSTACK(tm_done);
@@ -1489,6 +1569,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
1489 ret = FAILED; 1569 ret = FAILED;
1490 goto fnic_abort_cmd_end; 1570 goto fnic_abort_cmd_end;
1491 } 1571 }
1572 if (task_req == FCPIO_ITMF_ABT_TASK)
1573 CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
1574 else
1575 CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
1492 1576
1493 /* 1577 /*
1494 * We queued an abort IO, wait for its completion. 1578 * We queued an abort IO, wait for its completion.
@@ -1507,6 +1591,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
1507 io_req = (struct fnic_io_req *)CMD_SP(sc); 1591 io_req = (struct fnic_io_req *)CMD_SP(sc);
1508 if (!io_req) { 1592 if (!io_req) {
1509 spin_unlock_irqrestore(io_lock, flags); 1593 spin_unlock_irqrestore(io_lock, flags);
1594 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1510 ret = FAILED; 1595 ret = FAILED;
1511 goto fnic_abort_cmd_end; 1596 goto fnic_abort_cmd_end;
1512 } 1597 }
@@ -1515,6 +1600,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
1515 /* fw did not complete abort, timed out */ 1600 /* fw did not complete abort, timed out */
1516 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { 1601 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1517 spin_unlock_irqrestore(io_lock, flags); 1602 spin_unlock_irqrestore(io_lock, flags);
1603 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
1518 ret = FAILED; 1604 ret = FAILED;
1519 goto fnic_abort_cmd_end; 1605 goto fnic_abort_cmd_end;
1520 } 1606 }
@@ -1530,12 +1616,13 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
1530 1616
1531 spin_unlock_irqrestore(io_lock, flags); 1617 spin_unlock_irqrestore(io_lock, flags);
1532 1618
1619 start_time = io_req->start_time;
1533 fnic_release_ioreq_buf(fnic, io_req, sc); 1620 fnic_release_ioreq_buf(fnic, io_req, sc);
1534 mempool_free(io_req, fnic->io_req_pool); 1621 mempool_free(io_req, fnic->io_req_pool);
1535 1622
1536fnic_abort_cmd_end: 1623fnic_abort_cmd_end:
1537 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1624 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1538 "Returning from abort cmd %s\n", 1625 "Returning from abort cmd type %x %s\n", task_req,
1539 (ret == SUCCESS) ? 1626 (ret == SUCCESS) ?
1540 "SUCCESS" : "FAILED"); 1627 "SUCCESS" : "FAILED");
1541 return ret; 1628 return ret;
@@ -1566,6 +1653,8 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
1566 free_wq_copy_descs(fnic, wq); 1653 free_wq_copy_descs(fnic, wq);
1567 1654
1568 if (!vnic_wq_copy_desc_avail(wq)) { 1655 if (!vnic_wq_copy_desc_avail(wq)) {
1656 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1657 "queue_dr_io_req failure - no descriptors\n");
1569 ret = -EAGAIN; 1658 ret = -EAGAIN;
1570 goto lr_io_req_end; 1659 goto lr_io_req_end;
1571 } 1660 }
@@ -1637,7 +1726,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
1637 continue; 1726 continue;
1638 } 1727 }
1639 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && 1728 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1640 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_PENDING))) { 1729 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1641 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, 1730 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1642 "%s dev rst not pending sc 0x%p\n", __func__, 1731 "%s dev rst not pending sc 0x%p\n", __func__,
1643 sc); 1732 sc);
@@ -1693,6 +1782,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
1693 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; 1782 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1694 spin_unlock_irqrestore(io_lock, flags); 1783 spin_unlock_irqrestore(io_lock, flags);
1695 } 1784 }
1785 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1696 1786
1697 wait_for_completion_timeout(&tm_done, 1787 wait_for_completion_timeout(&tm_done,
1698 msecs_to_jiffies 1788 msecs_to_jiffies
@@ -1703,6 +1793,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
1703 io_req = (struct fnic_io_req *)CMD_SP(sc); 1793 io_req = (struct fnic_io_req *)CMD_SP(sc);
1704 if (!io_req) { 1794 if (!io_req) {
1705 spin_unlock_irqrestore(io_lock, flags); 1795 spin_unlock_irqrestore(io_lock, flags);
1796 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1706 continue; 1797 continue;
1707 } 1798 }
1708 1799
@@ -1711,6 +1802,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
1711 /* if abort is still pending with fw, fail */ 1802 /* if abort is still pending with fw, fail */
1712 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { 1803 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1713 spin_unlock_irqrestore(io_lock, flags); 1804 spin_unlock_irqrestore(io_lock, flags);
1805 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
1714 ret = 1; 1806 ret = 1;
1715 goto clean_pending_aborts_end; 1807 goto clean_pending_aborts_end;
1716 } 1808 }
@@ -1805,6 +1897,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
1805 int ret = FAILED; 1897 int ret = FAILED;
1806 spinlock_t *io_lock; 1898 spinlock_t *io_lock;
1807 unsigned long flags; 1899 unsigned long flags;
1900 unsigned long start_time = 0;
1808 struct scsi_lun fc_lun; 1901 struct scsi_lun fc_lun;
1809 int tag; 1902 int tag;
1810 DECLARE_COMPLETION_ONSTACK(tm_done); 1903 DECLARE_COMPLETION_ONSTACK(tm_done);
@@ -1830,7 +1923,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
1830 if (fc_remote_port_chkready(rport)) 1923 if (fc_remote_port_chkready(rport))
1831 goto fnic_device_reset_end; 1924 goto fnic_device_reset_end;
1832 1925
1833 CMD_FLAGS(sc) = (FNIC_DEVICE_RESET | FNIC_BLOCKING_REQ); 1926 CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
1834 /* Allocate tag if not present */ 1927 /* Allocate tag if not present */
1835 1928
1836 tag = sc->request->tag; 1929 tag = sc->request->tag;
@@ -1877,7 +1970,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
1877 goto fnic_device_reset_clean; 1970 goto fnic_device_reset_clean;
1878 } 1971 }
1879 spin_lock_irqsave(io_lock, flags); 1972 spin_lock_irqsave(io_lock, flags);
1880 CMD_FLAGS(sc) |= FNIC_DEV_RST_PENDING; 1973 CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
1881 spin_unlock_irqrestore(io_lock, flags); 1974 spin_unlock_irqrestore(io_lock, flags);
1882 1975
1883 /* 1976 /*
@@ -1995,6 +2088,7 @@ fnic_device_reset_clean:
1995 spin_unlock_irqrestore(io_lock, flags); 2088 spin_unlock_irqrestore(io_lock, flags);
1996 2089
1997 if (io_req) { 2090 if (io_req) {
2091 start_time = io_req->start_time;
1998 fnic_release_ioreq_buf(fnic, io_req, sc); 2092 fnic_release_ioreq_buf(fnic, io_req, sc);
1999 mempool_free(io_req, fnic->io_req_pool); 2093 mempool_free(io_req, fnic->io_req_pool);
2000 } 2094 }