aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorHiral Patel <hiralpat@cisco.com>2013-02-12 20:00:58 -0500
committerJames Bottomley <JBottomley@Parallels.com>2013-02-22 12:28:19 -0500
commit03298552cba38f7c805ed338826dc76c405465c7 (patch)
treec7a5fd280335ad68e8c69f4cbab36658fb616620 /drivers/scsi
parentbfb4809f7fff2f2db3d0de41ea4f49fd3f2f0aa4 (diff)
[SCSI] fnic: fixing issues in device and firmware reset code
1. Handling overlapped firmware resets This fix serialize multiple firmware resets to avoid situation where fnic device fails to come up for link up event, when firmware resets are issued back to back. If there are overlapped firmware resets are issued, the firmware reset operation checks whether there is any firmware reset in progress, if so it polls for its completion in a loop with 100ms delay. 2. Handling device reset timeout fnic_device_reset code has been modified to handle Device reset timeout: - Issue terminate on device reset timeout. - Introduced flags field (one of the scratch fields in scsi_cmnd). With this, device reset request would have DEVICE_RESET flag set for other routines to determine the type of the request. Also modified fnic_terminate_rport_io, fnic_rport_exch_rset, completion routines to handle SCSI commands with DEVICE_RESET flag. 3. LUN/Device Reset hangs when issued through IOCTL using utilities like sg_reset. Each SCSI command is associated with a valid tag, fnic uses this tag to retrieve associated scsi command on completion. the LUN/Device Reset issued through IOCTL resulting into a SCSI command that is not associated with a valid tag. So fnic fails to retrieve associated scsi command on completion, which causes hang. This fix allocates tag, associates it with the scsi command and frees the tag, when the operation completed. 4. Preventing IOs during firmware reset. Current fnic implementation allows IO submissions during firmware reset. This fix synchronizes IO submissions and firmware reset operations. It ensures that IOs issued to fnic prior to reset will be issued to the firmware before firmware reset. Signed-off-by: Narsimhulu Musini <nmusini@cisco.com> Signed-off-by: Hiral Patel <hiralpat@cisco.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/fnic/fnic.h42
-rw-r--r--drivers/scsi/fnic/fnic_main.c3
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c384
3 files changed, 397 insertions, 32 deletions
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 95a5ba29320d..63b35c8e40bd 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -56,6 +56,19 @@
56#define FNIC_NO_TAG -1 56#define FNIC_NO_TAG -1
57 57
58/* 58/*
59 * Command flags to identify the type of command and for other future
60 * use.
61 */
62#define FNIC_NO_FLAGS 0
63#define FNIC_CDB_REQ BIT(1) /* All IOs with a valid CDB */
64#define FNIC_BLOCKING_REQ BIT(2) /* All blocking Requests */
65#define FNIC_DEVICE_RESET BIT(3) /* Device reset request */
66#define FNIC_DEV_RST_PENDING BIT(4) /* Device reset pending */
67#define FNIC_DEV_RST_TIMED_OUT BIT(5) /* Device reset timed out */
68#define FNIC_DEV_RST_TERM_ISSUED BIT(6) /* Device reset terminate */
69#define FNIC_DEV_RST_DONE BIT(7) /* Device reset done */
70
71/*
59 * Usage of the scsi_cmnd scratchpad. 72 * Usage of the scsi_cmnd scratchpad.
60 * These fields are locked by the hashed io_req_lock. 73 * These fields are locked by the hashed io_req_lock.
61 */ 74 */
@@ -64,6 +77,7 @@
64#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message) 77#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message)
65#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) 78#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
66#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command) 79#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command)
80#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status)
67 81
68#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */ 82#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
69 83
@@ -71,9 +85,28 @@
71#define FNIC_HOST_RESET_TIMEOUT 10000 /* mSec */ 85#define FNIC_HOST_RESET_TIMEOUT 10000 /* mSec */
72#define FNIC_RMDEVICE_TIMEOUT 1000 /* mSec */ 86#define FNIC_RMDEVICE_TIMEOUT 1000 /* mSec */
73#define FNIC_HOST_RESET_SETTLE_TIME 30 /* Sec */ 87#define FNIC_HOST_RESET_SETTLE_TIME 30 /* Sec */
88#define FNIC_ABT_TERM_DELAY_TIMEOUT 500 /* mSec */
74 89
75#define FNIC_MAX_FCP_TARGET 256 90#define FNIC_MAX_FCP_TARGET 256
76 91
92/**
93 * state_flags to identify host state along along with fnic's state
94 **/
95#define __FNIC_FLAGS_FWRESET BIT(0) /* fwreset in progress */
96#define __FNIC_FLAGS_BLOCK_IO BIT(1) /* IOs are blocked */
97
98#define FNIC_FLAGS_NONE (0)
99#define FNIC_FLAGS_FWRESET (__FNIC_FLAGS_FWRESET | \
100 __FNIC_FLAGS_BLOCK_IO)
101
102#define FNIC_FLAGS_IO_BLOCKED (__FNIC_FLAGS_BLOCK_IO)
103
104#define fnic_set_state_flags(fnicp, st_flags) \
105 __fnic_set_state_flags(fnicp, st_flags, 0)
106
107#define fnic_clear_state_flags(fnicp, st_flags) \
108 __fnic_set_state_flags(fnicp, st_flags, 1)
109
77extern unsigned int fnic_log_level; 110extern unsigned int fnic_log_level;
78 111
79#define FNIC_MAIN_LOGGING 0x01 112#define FNIC_MAIN_LOGGING 0x01
@@ -170,6 +203,9 @@ struct fnic {
170 203
171 struct completion *remove_wait; /* device remove thread blocks */ 204 struct completion *remove_wait; /* device remove thread blocks */
172 205
206 atomic_t in_flight; /* io counter */
207 u32 _reserved; /* fill hole */
208 unsigned long state_flags; /* protected by host lock */
173 enum fnic_state state; 209 enum fnic_state state;
174 spinlock_t fnic_lock; 210 spinlock_t fnic_lock;
175 211
@@ -267,4 +303,10 @@ const char *fnic_state_to_str(unsigned int state);
267void fnic_log_q_error(struct fnic *fnic); 303void fnic_log_q_error(struct fnic *fnic);
268void fnic_handle_link_event(struct fnic *fnic); 304void fnic_handle_link_event(struct fnic *fnic);
269 305
306static inline int
307fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
308{
309 return ((fnic->state_flags & st_flags) == st_flags);
310}
311void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
270#endif /* _FNIC_H_ */ 312#endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index fbf3ac6e0c55..fcecbb7281aa 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -624,6 +624,9 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
624 } 624 }
625 fnic->state = FNIC_IN_FC_MODE; 625 fnic->state = FNIC_IN_FC_MODE;
626 626
627 atomic_set(&fnic->in_flight, 0);
628 fnic->state_flags = FNIC_FLAGS_NONE;
629
627 /* Enable hardware stripping of vlan header on ingress */ 630 /* Enable hardware stripping of vlan header on ingress */
628 fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1); 631 fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
629 632
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index c40ce52ed7c6..2f46509f5b5a 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -165,6 +165,33 @@ static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
165} 165}
166 166
167 167
168/**
169 * __fnic_set_state_flags
170 * Sets/Clears bits in fnic's state_flags
171 **/
172void
173__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
174 unsigned long clearbits)
175{
176 struct Scsi_Host *host = fnic->lport->host;
177 int sh_locked = spin_is_locked(host->host_lock);
178 unsigned long flags = 0;
179
180 if (!sh_locked)
181 spin_lock_irqsave(host->host_lock, flags);
182
183 if (clearbits)
184 fnic->state_flags &= ~st_flags;
185 else
186 fnic->state_flags |= st_flags;
187
188 if (!sh_locked)
189 spin_unlock_irqrestore(host->host_lock, flags);
190
191 return;
192}
193
194
168/* 195/*
169 * fnic_fw_reset_handler 196 * fnic_fw_reset_handler
170 * Routine to send reset msg to fw 197 * Routine to send reset msg to fw
@@ -175,9 +202,16 @@ int fnic_fw_reset_handler(struct fnic *fnic)
175 int ret = 0; 202 int ret = 0;
176 unsigned long flags; 203 unsigned long flags;
177 204
205 /* indicate fwreset to io path */
206 fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
207
178 skb_queue_purge(&fnic->frame_queue); 208 skb_queue_purge(&fnic->frame_queue);
179 skb_queue_purge(&fnic->tx_queue); 209 skb_queue_purge(&fnic->tx_queue);
180 210
211 /* wait for io cmpl */
212 while (atomic_read(&fnic->in_flight))
213 schedule_timeout(msecs_to_jiffies(1));
214
181 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); 215 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
182 216
183 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) 217 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
@@ -193,9 +227,12 @@ int fnic_fw_reset_handler(struct fnic *fnic)
193 if (!ret) 227 if (!ret)
194 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 228 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
195 "Issued fw reset\n"); 229 "Issued fw reset\n");
196 else 230 else {
231 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
197 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 232 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
198 "Failed to issue fw reset\n"); 233 "Failed to issue fw reset\n");
234 }
235
199 return ret; 236 return ret;
200} 237}
201 238
@@ -351,16 +388,19 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
351 */ 388 */
352static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) 389static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
353{ 390{
354 struct fc_lport *lp; 391 struct fc_lport *lp = shost_priv(sc->device->host);
355 struct fc_rport *rport; 392 struct fc_rport *rport;
356 struct fnic_io_req *io_req; 393 struct fnic_io_req *io_req;
357 struct fnic *fnic; 394 struct fnic *fnic = lport_priv(lp);
358 struct vnic_wq_copy *wq; 395 struct vnic_wq_copy *wq;
359 int ret; 396 int ret;
360 int sg_count; 397 int sg_count;
361 unsigned long flags; 398 unsigned long flags;
362 unsigned long ptr; 399 unsigned long ptr;
363 400
401 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
402 return SCSI_MLQUEUE_HOST_BUSY;
403
364 rport = starget_to_rport(scsi_target(sc->device)); 404 rport = starget_to_rport(scsi_target(sc->device));
365 ret = fc_remote_port_chkready(rport); 405 ret = fc_remote_port_chkready(rport);
366 if (ret) { 406 if (ret) {
@@ -369,20 +409,20 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
369 return 0; 409 return 0;
370 } 410 }
371 411
372 lp = shost_priv(sc->device->host);
373 if (lp->state != LPORT_ST_READY || !(lp->link_up)) 412 if (lp->state != LPORT_ST_READY || !(lp->link_up))
374 return SCSI_MLQUEUE_HOST_BUSY; 413 return SCSI_MLQUEUE_HOST_BUSY;
375 414
415 atomic_inc(&fnic->in_flight);
416
376 /* 417 /*
377 * Release host lock, use driver resource specific locks from here. 418 * Release host lock, use driver resource specific locks from here.
378 * Don't re-enable interrupts in case they were disabled prior to the 419 * Don't re-enable interrupts in case they were disabled prior to the
379 * caller disabling them. 420 * caller disabling them.
380 */ 421 */
381 spin_unlock(lp->host->host_lock); 422 spin_unlock(lp->host->host_lock);
423 CMD_FLAGS(sc) = FNIC_CDB_REQ;
382 424
383 /* Get a new io_req for this SCSI IO */ 425 /* Get a new io_req for this SCSI IO */
384 fnic = lport_priv(lp);
385
386 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); 426 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
387 if (!io_req) { 427 if (!io_req) {
388 ret = SCSI_MLQUEUE_HOST_BUSY; 428 ret = SCSI_MLQUEUE_HOST_BUSY;
@@ -452,6 +492,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
452 } 492 }
453 } 493 }
454out: 494out:
495 atomic_dec(&fnic->in_flight);
455 /* acquire host lock before returning to SCSI */ 496 /* acquire host lock before returning to SCSI */
456 spin_lock(lp->host->host_lock); 497 spin_lock(lp->host->host_lock);
457 return ret; 498 return ret;
@@ -529,6 +570,8 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
529 fnic_flush_tx(fnic); 570 fnic_flush_tx(fnic);
530 571
531 reset_cmpl_handler_end: 572 reset_cmpl_handler_end:
573 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
574
532 return ret; 575 return ret;
533} 576}
534 577
@@ -656,8 +699,12 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
656 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); 699 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
657 fcpio_tag_id_dec(&tag, &id); 700 fcpio_tag_id_dec(&tag, &id);
658 701
659 if (id >= FNIC_MAX_IO_REQ) 702 if (id >= FNIC_MAX_IO_REQ) {
703 shost_printk(KERN_ERR, fnic->lport->host,
704 "Tag out of range tag %x hdr status = %s\n",
705 id, fnic_fcpio_status_to_str(hdr_status));
660 return; 706 return;
707 }
661 708
662 sc = scsi_host_find_tag(fnic->lport->host, id); 709 sc = scsi_host_find_tag(fnic->lport->host, id);
663 WARN_ON_ONCE(!sc); 710 WARN_ON_ONCE(!sc);
@@ -805,8 +852,12 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
805 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); 852 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
806 fcpio_tag_id_dec(&tag, &id); 853 fcpio_tag_id_dec(&tag, &id);
807 854
808 if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) 855 if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) {
856 shost_printk(KERN_ERR, fnic->lport->host,
857 "Tag out of range tag %x hdr status = %s\n",
858 id, fnic_fcpio_status_to_str(hdr_status));
809 return; 859 return;
860 }
810 861
811 sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); 862 sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
812 WARN_ON_ONCE(!sc); 863 WARN_ON_ONCE(!sc);
@@ -822,7 +873,19 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
822 return; 873 return;
823 } 874 }
824 875
825 if (id & FNIC_TAG_ABORT) { 876 if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
877 /* Abort and terminate completion of device reset req */
878 /* REVISIT : Add asserts about various flags */
879 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
880 "dev reset abts cmpl recd. id %x status %s\n",
881 id, fnic_fcpio_status_to_str(hdr_status));
882 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
883 CMD_ABTS_STATUS(sc) = hdr_status;
884 CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
885 if (io_req->abts_done)
886 complete(io_req->abts_done);
887 spin_unlock_irqrestore(io_lock, flags);
888 } else if (id & FNIC_TAG_ABORT) {
826 /* Completion of abort cmd */ 889 /* Completion of abort cmd */
827 if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) { 890 if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
828 /* This is a late completion. Ignore it */ 891 /* This is a late completion. Ignore it */
@@ -862,7 +925,27 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
862 } else if (id & FNIC_TAG_DEV_RST) { 925 } else if (id & FNIC_TAG_DEV_RST) {
863 /* Completion of device reset */ 926 /* Completion of device reset */
864 CMD_LR_STATUS(sc) = hdr_status; 927 CMD_LR_STATUS(sc) = hdr_status;
928 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
929 spin_unlock_irqrestore(io_lock, flags);
930 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
931 "Terminate pending "
932 "dev reset cmpl recd. id %d status %s\n",
933 (int)(id & FNIC_TAG_MASK),
934 fnic_fcpio_status_to_str(hdr_status));
935 return;
936 }
937 if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
938 /* Need to wait for terminate completion */
939 spin_unlock_irqrestore(io_lock, flags);
940 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
941 "dev reset cmpl recd after time out. "
942 "id %d status %s\n",
943 (int)(id & FNIC_TAG_MASK),
944 fnic_fcpio_status_to_str(hdr_status));
945 return;
946 }
865 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; 947 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
948 CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
866 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 949 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
867 "dev reset cmpl recd. id %d status %s\n", 950 "dev reset cmpl recd. id %d status %s\n",
868 (int)(id & FNIC_TAG_MASK), 951 (int)(id & FNIC_TAG_MASK),
@@ -889,7 +972,6 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
889 struct fcpio_fw_req *desc) 972 struct fcpio_fw_req *desc)
890{ 973{
891 struct fnic *fnic = vnic_dev_priv(vdev); 974 struct fnic *fnic = vnic_dev_priv(vdev);
892 int ret = 0;
893 975
894 switch (desc->hdr.type) { 976 switch (desc->hdr.type) {
895 case FCPIO_ACK: /* fw copied copy wq desc to its queue */ 977 case FCPIO_ACK: /* fw copied copy wq desc to its queue */
@@ -906,11 +988,11 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
906 988
907 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ 989 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
908 case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */ 990 case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
909 ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc); 991 fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
910 break; 992 break;
911 993
912 case FCPIO_RESET_CMPL: /* fw completed reset */ 994 case FCPIO_RESET_CMPL: /* fw completed reset */
913 ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc); 995 fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
914 break; 996 break;
915 997
916 default: 998 default:
@@ -920,7 +1002,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
920 break; 1002 break;
921 } 1003 }
922 1004
923 return ret; 1005 return 0;
924} 1006}
925 1007
926/* 1008/*
@@ -962,6 +1044,23 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
962 io_lock = fnic_io_lock_hash(fnic, sc); 1044 io_lock = fnic_io_lock_hash(fnic, sc);
963 spin_lock_irqsave(io_lock, flags); 1045 spin_lock_irqsave(io_lock, flags);
964 io_req = (struct fnic_io_req *)CMD_SP(sc); 1046 io_req = (struct fnic_io_req *)CMD_SP(sc);
1047 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1048 !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
1049 /*
1050 * We will be here only when FW completes reset
1051 * without sending completions for outstanding ios.
1052 */
1053 CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1054 if (io_req && io_req->dr_done)
1055 complete(io_req->dr_done);
1056 else if (io_req && io_req->abts_done)
1057 complete(io_req->abts_done);
1058 spin_unlock_irqrestore(io_lock, flags);
1059 continue;
1060 } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1061 spin_unlock_irqrestore(io_lock, flags);
1062 continue;
1063 }
965 if (!io_req) { 1064 if (!io_req) {
966 spin_unlock_irqrestore(io_lock, flags); 1065 spin_unlock_irqrestore(io_lock, flags);
967 goto cleanup_scsi_cmd; 1066 goto cleanup_scsi_cmd;
@@ -1044,8 +1143,18 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1044 struct fnic_io_req *io_req) 1143 struct fnic_io_req *io_req)
1045{ 1144{
1046 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; 1145 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1146 struct Scsi_Host *host = fnic->lport->host;
1047 unsigned long flags; 1147 unsigned long flags;
1048 1148
1149 spin_lock_irqsave(host->host_lock, flags);
1150 if (unlikely(fnic_chk_state_flags_locked(fnic,
1151 FNIC_FLAGS_IO_BLOCKED))) {
1152 spin_unlock_irqrestore(host->host_lock, flags);
1153 return 1;
1154 } else
1155 atomic_inc(&fnic->in_flight);
1156 spin_unlock_irqrestore(host->host_lock, flags);
1157
1049 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); 1158 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1050 1159
1051 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) 1160 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
@@ -1053,6 +1162,9 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1053 1162
1054 if (!vnic_wq_copy_desc_avail(wq)) { 1163 if (!vnic_wq_copy_desc_avail(wq)) {
1055 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); 1164 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1165 atomic_dec(&fnic->in_flight);
1166 shost_printk(KERN_DEBUG, fnic->lport->host,
1167 "fnic_queue_abort_io_req: failure: no descriptors\n");
1056 return 1; 1168 return 1;
1057 } 1169 }
1058 fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT, 1170 fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
@@ -1060,12 +1172,15 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1060 fnic->config.ra_tov, fnic->config.ed_tov); 1172 fnic->config.ra_tov, fnic->config.ed_tov);
1061 1173
1062 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); 1174 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1175 atomic_dec(&fnic->in_flight);
1176
1063 return 0; 1177 return 0;
1064} 1178}
1065 1179
1066void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) 1180static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1067{ 1181{
1068 int tag; 1182 int tag;
1183 int abt_tag;
1069 struct fnic_io_req *io_req; 1184 struct fnic_io_req *io_req;
1070 spinlock_t *io_lock; 1185 spinlock_t *io_lock;
1071 unsigned long flags; 1186 unsigned long flags;
@@ -1075,13 +1190,14 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1075 1190
1076 FNIC_SCSI_DBG(KERN_DEBUG, 1191 FNIC_SCSI_DBG(KERN_DEBUG,
1077 fnic->lport->host, 1192 fnic->lport->host,
1078 "fnic_rport_reset_exch called portid 0x%06x\n", 1193 "fnic_rport_exch_reset called portid 0x%06x\n",
1079 port_id); 1194 port_id);
1080 1195
1081 if (fnic->in_remove) 1196 if (fnic->in_remove)
1082 return; 1197 return;
1083 1198
1084 for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { 1199 for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
1200 abt_tag = tag;
1085 sc = scsi_host_find_tag(fnic->lport->host, tag); 1201 sc = scsi_host_find_tag(fnic->lport->host, tag);
1086 if (!sc) 1202 if (!sc)
1087 continue; 1203 continue;
@@ -1096,6 +1212,15 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1096 continue; 1212 continue;
1097 } 1213 }
1098 1214
1215 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1216 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_PENDING))) {
1217 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1218 "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
1219 sc);
1220 spin_unlock_irqrestore(io_lock, flags);
1221 continue;
1222 }
1223
1099 /* 1224 /*
1100 * Found IO that is still pending with firmware and 1225 * Found IO that is still pending with firmware and
1101 * belongs to rport that went away 1226 * belongs to rport that went away
@@ -1104,9 +1229,22 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1104 spin_unlock_irqrestore(io_lock, flags); 1229 spin_unlock_irqrestore(io_lock, flags);
1105 continue; 1230 continue;
1106 } 1231 }
1232 if (io_req->abts_done) {
1233 shost_printk(KERN_ERR, fnic->lport->host,
1234 "fnic_rport_exch_reset: io_req->abts_done is set "
1235 "state is %s\n",
1236 fnic_ioreq_state_to_str(CMD_STATE(sc)));
1237 }
1238
1107 old_ioreq_state = CMD_STATE(sc); 1239 old_ioreq_state = CMD_STATE(sc);
1108 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; 1240 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1109 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; 1241 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1242 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1243 abt_tag = (tag | FNIC_TAG_DEV_RST);
1244 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1245 "fnic_rport_exch_reset dev rst sc 0x%p\n",
1246 sc);
1247 }
1110 1248
1111 BUG_ON(io_req->abts_done); 1249 BUG_ON(io_req->abts_done);
1112 1250
@@ -1118,7 +1256,7 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1118 /* Now queue the abort command to firmware */ 1256 /* Now queue the abort command to firmware */
1119 int_to_scsilun(sc->device->lun, &fc_lun); 1257 int_to_scsilun(sc->device->lun, &fc_lun);
1120 1258
1121 if (fnic_queue_abort_io_req(fnic, tag, 1259 if (fnic_queue_abort_io_req(fnic, abt_tag,
1122 FCPIO_ITMF_ABT_TASK_TERM, 1260 FCPIO_ITMF_ABT_TASK_TERM,
1123 fc_lun.scsi_lun, io_req)) { 1261 fc_lun.scsi_lun, io_req)) {
1124 /* 1262 /*
@@ -1127,12 +1265,14 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1127 * aborted later by scsi_eh, or cleaned up during 1265 * aborted later by scsi_eh, or cleaned up during
1128 * lun reset 1266 * lun reset
1129 */ 1267 */
1130 io_lock = fnic_io_lock_hash(fnic, sc);
1131
1132 spin_lock_irqsave(io_lock, flags); 1268 spin_lock_irqsave(io_lock, flags);
1133 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) 1269 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1134 CMD_STATE(sc) = old_ioreq_state; 1270 CMD_STATE(sc) = old_ioreq_state;
1135 spin_unlock_irqrestore(io_lock, flags); 1271 spin_unlock_irqrestore(io_lock, flags);
1272 } else {
1273 spin_lock_irqsave(io_lock, flags);
1274 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1275 spin_unlock_irqrestore(io_lock, flags);
1136 } 1276 }
1137 } 1277 }
1138 1278
@@ -1141,6 +1281,7 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1141void fnic_terminate_rport_io(struct fc_rport *rport) 1281void fnic_terminate_rport_io(struct fc_rport *rport)
1142{ 1282{
1143 int tag; 1283 int tag;
1284 int abt_tag;
1144 struct fnic_io_req *io_req; 1285 struct fnic_io_req *io_req;
1145 spinlock_t *io_lock; 1286 spinlock_t *io_lock;
1146 unsigned long flags; 1287 unsigned long flags;
@@ -1154,14 +1295,15 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
1154 1295
1155 FNIC_SCSI_DBG(KERN_DEBUG, 1296 FNIC_SCSI_DBG(KERN_DEBUG,
1156 fnic->lport->host, "fnic_terminate_rport_io called" 1297 fnic->lport->host, "fnic_terminate_rport_io called"
1157 " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n", 1298 " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
1158 rport->port_name, rport->node_name, 1299 rport->port_name, rport->node_name, rport,
1159 rport->port_id); 1300 rport->port_id);
1160 1301
1161 if (fnic->in_remove) 1302 if (fnic->in_remove)
1162 return; 1303 return;
1163 1304
1164 for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { 1305 for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
1306 abt_tag = tag;
1165 sc = scsi_host_find_tag(fnic->lport->host, tag); 1307 sc = scsi_host_find_tag(fnic->lport->host, tag);
1166 if (!sc) 1308 if (!sc)
1167 continue; 1309 continue;
@@ -1180,6 +1322,14 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
1180 continue; 1322 continue;
1181 } 1323 }
1182 1324
1325 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1326 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_PENDING))) {
1327 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1328 "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
1329 sc);
1330 spin_unlock_irqrestore(io_lock, flags);
1331 continue;
1332 }
1183 /* 1333 /*
1184 * Found IO that is still pending with firmware and 1334 * Found IO that is still pending with firmware and
1185 * belongs to rport that went away 1335 * belongs to rport that went away
@@ -1188,9 +1338,20 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
1188 spin_unlock_irqrestore(io_lock, flags); 1338 spin_unlock_irqrestore(io_lock, flags);
1189 continue; 1339 continue;
1190 } 1340 }
1341 if (io_req->abts_done) {
1342 shost_printk(KERN_ERR, fnic->lport->host,
1343 "fnic_terminate_rport_io: io_req->abts_done is set "
1344 "state is %s\n",
1345 fnic_ioreq_state_to_str(CMD_STATE(sc)));
1346 }
1191 old_ioreq_state = CMD_STATE(sc); 1347 old_ioreq_state = CMD_STATE(sc);
1192 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; 1348 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1193 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; 1349 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1350 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1351 abt_tag = (tag | FNIC_TAG_DEV_RST);
1352 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1353 "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
1354 }
1194 1355
1195 BUG_ON(io_req->abts_done); 1356 BUG_ON(io_req->abts_done);
1196 1357
@@ -1203,7 +1364,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
1203 /* Now queue the abort command to firmware */ 1364 /* Now queue the abort command to firmware */
1204 int_to_scsilun(sc->device->lun, &fc_lun); 1365 int_to_scsilun(sc->device->lun, &fc_lun);
1205 1366
1206 if (fnic_queue_abort_io_req(fnic, tag, 1367 if (fnic_queue_abort_io_req(fnic, abt_tag,
1207 FCPIO_ITMF_ABT_TASK_TERM, 1368 FCPIO_ITMF_ABT_TASK_TERM,
1208 fc_lun.scsi_lun, io_req)) { 1369 fc_lun.scsi_lun, io_req)) {
1209 /* 1370 /*
@@ -1212,12 +1373,14 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
1212 * aborted later by scsi_eh, or cleaned up during 1373 * aborted later by scsi_eh, or cleaned up during
1213 * lun reset 1374 * lun reset
1214 */ 1375 */
1215 io_lock = fnic_io_lock_hash(fnic, sc);
1216
1217 spin_lock_irqsave(io_lock, flags); 1376 spin_lock_irqsave(io_lock, flags);
1218 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) 1377 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1219 CMD_STATE(sc) = old_ioreq_state; 1378 CMD_STATE(sc) = old_ioreq_state;
1220 spin_unlock_irqrestore(io_lock, flags); 1379 spin_unlock_irqrestore(io_lock, flags);
1380 } else {
1381 spin_lock_irqsave(io_lock, flags);
1382 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1383 spin_unlock_irqrestore(io_lock, flags);
1221 } 1384 }
1222 } 1385 }
1223 1386
@@ -1239,6 +1402,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
1239 int ret = SUCCESS; 1402 int ret = SUCCESS;
1240 u32 task_req; 1403 u32 task_req;
1241 struct scsi_lun fc_lun; 1404 struct scsi_lun fc_lun;
1405 int tag;
1242 DECLARE_COMPLETION_ONSTACK(tm_done); 1406 DECLARE_COMPLETION_ONSTACK(tm_done);
1243 1407
1244 /* Wait for rport to unblock */ 1408 /* Wait for rport to unblock */
@@ -1249,9 +1413,14 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
1249 1413
1250 fnic = lport_priv(lp); 1414 fnic = lport_priv(lp);
1251 rport = starget_to_rport(scsi_target(sc->device)); 1415 rport = starget_to_rport(scsi_target(sc->device));
1252 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1416 tag = sc->request->tag;
1253 "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n", 1417 FNIC_SCSI_DBG(KERN_DEBUG,
1254 rport->port_id, sc->device->lun, sc->request->tag); 1418 fnic->lport->host,
1419 "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %x flags %x\n",
1420 rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
1421
1422 CMD_FLAGS(sc) = FNIC_NO_FLAGS;
1423
1255 1424
1256 if (lp->state != LPORT_ST_READY || !(lp->link_up)) { 1425 if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1257 ret = FAILED; 1426 ret = FAILED;
@@ -1375,10 +1544,20 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
1375 struct fnic_io_req *io_req) 1544 struct fnic_io_req *io_req)
1376{ 1545{
1377 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; 1546 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1547 struct Scsi_Host *host = fnic->lport->host;
1378 struct scsi_lun fc_lun; 1548 struct scsi_lun fc_lun;
1379 int ret = 0; 1549 int ret = 0;
1380 unsigned long intr_flags; 1550 unsigned long intr_flags;
1381 1551
1552 spin_lock_irqsave(host->host_lock, intr_flags);
1553 if (unlikely(fnic_chk_state_flags_locked(fnic,
1554 FNIC_FLAGS_IO_BLOCKED))) {
1555 spin_unlock_irqrestore(host->host_lock, intr_flags);
1556 return FAILED;
1557 } else
1558 atomic_inc(&fnic->in_flight);
1559 spin_unlock_irqrestore(host->host_lock, intr_flags);
1560
1382 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); 1561 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
1383 1562
1384 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) 1563 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
@@ -1399,6 +1578,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
1399 1578
1400lr_io_req_end: 1579lr_io_req_end:
1401 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); 1580 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
1581 atomic_dec(&fnic->in_flight);
1402 1582
1403 return ret; 1583 return ret;
1404} 1584}
@@ -1502,6 +1682,65 @@ clean_pending_aborts_end:
1502 return ret; 1682 return ret;
1503} 1683}
1504 1684
1685/**
1686 * fnic_scsi_host_start_tag
1687 * Allocates tagid from host's tag list
1688 **/
1689static inline int
1690fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
1691{
1692 struct blk_queue_tag *bqt = fnic->lport->host->bqt;
1693 int tag, ret = SCSI_NO_TAG;
1694
1695 BUG_ON(!bqt);
1696 if (!bqt) {
1697 pr_err("Tags are not supported\n");
1698 goto end;
1699 }
1700
1701 do {
1702 tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
1703 if (tag >= bqt->max_depth) {
1704 pr_err("Tag allocation failure\n");
1705 goto end;
1706 }
1707 } while (test_and_set_bit(tag, bqt->tag_map));
1708
1709 bqt->tag_index[tag] = sc->request;
1710 sc->request->tag = tag;
1711 sc->tag = tag;
1712 if (!sc->request->special)
1713 sc->request->special = sc;
1714
1715 ret = tag;
1716
1717end:
1718 return ret;
1719}
1720
1721/**
1722 * fnic_scsi_host_end_tag
1723 * frees tag allocated by fnic_scsi_host_start_tag.
1724 **/
1725static inline void
1726fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
1727{
1728 struct blk_queue_tag *bqt = fnic->lport->host->bqt;
1729 int tag = sc->request->tag;
1730
1731 if (tag == SCSI_NO_TAG)
1732 return;
1733
1734 BUG_ON(!bqt || !bqt->tag_index[tag]);
1735 if (!bqt)
1736 return;
1737
1738 bqt->tag_index[tag] = NULL;
1739 clear_bit(tag, bqt->tag_map);
1740
1741 return;
1742}
1743
1505/* 1744/*
1506 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN 1745 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
1507 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command 1746 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
@@ -1517,7 +1756,10 @@ int fnic_device_reset(struct scsi_cmnd *sc)
1517 int ret = FAILED; 1756 int ret = FAILED;
1518 spinlock_t *io_lock; 1757 spinlock_t *io_lock;
1519 unsigned long flags; 1758 unsigned long flags;
1759 struct scsi_lun fc_lun;
1760 int tag;
1520 DECLARE_COMPLETION_ONSTACK(tm_done); 1761 DECLARE_COMPLETION_ONSTACK(tm_done);
1762 int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
1521 1763
1522 /* Wait for rport to unblock */ 1764 /* Wait for rport to unblock */
1523 fc_block_scsi_eh(sc); 1765 fc_block_scsi_eh(sc);
@@ -1529,8 +1771,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
1529 1771
1530 rport = starget_to_rport(scsi_target(sc->device)); 1772 rport = starget_to_rport(scsi_target(sc->device));
1531 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1773 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1532 "Device reset called FCID 0x%x, LUN 0x%x\n", 1774 "Device reset called FCID 0x%x, LUN 0x%x sc 0x%p\n",
1533 rport->port_id, sc->device->lun); 1775 rport->port_id, sc->device->lun, sc);
1534 1776
1535 if (lp->state != LPORT_ST_READY || !(lp->link_up)) 1777 if (lp->state != LPORT_ST_READY || !(lp->link_up))
1536 goto fnic_device_reset_end; 1778 goto fnic_device_reset_end;
@@ -1539,6 +1781,16 @@ int fnic_device_reset(struct scsi_cmnd *sc)
1539 if (fc_remote_port_chkready(rport)) 1781 if (fc_remote_port_chkready(rport))
1540 goto fnic_device_reset_end; 1782 goto fnic_device_reset_end;
1541 1783
1784 CMD_FLAGS(sc) = (FNIC_DEVICE_RESET | FNIC_BLOCKING_REQ);
1785 /* Allocate tag if not present */
1786
1787 tag = sc->request->tag;
1788 if (unlikely(tag < 0)) {
1789 tag = fnic_scsi_host_start_tag(fnic, sc);
1790 if (unlikely(tag == SCSI_NO_TAG))
1791 goto fnic_device_reset_end;
1792 tag_gen_flag = 1;
1793 }
1542 io_lock = fnic_io_lock_hash(fnic, sc); 1794 io_lock = fnic_io_lock_hash(fnic, sc);
1543 spin_lock_irqsave(io_lock, flags); 1795 spin_lock_irqsave(io_lock, flags);
1544 io_req = (struct fnic_io_req *)CMD_SP(sc); 1796 io_req = (struct fnic_io_req *)CMD_SP(sc);
@@ -1562,8 +1814,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
1562 CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE; 1814 CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
1563 spin_unlock_irqrestore(io_lock, flags); 1815 spin_unlock_irqrestore(io_lock, flags);
1564 1816
1565 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n", 1817 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
1566 sc->request->tag);
1567 1818
1568 /* 1819 /*
1569 * issue the device reset, if enqueue failed, clean up the ioreq 1820 * issue the device reset, if enqueue failed, clean up the ioreq
@@ -1576,6 +1827,9 @@ int fnic_device_reset(struct scsi_cmnd *sc)
1576 io_req->dr_done = NULL; 1827 io_req->dr_done = NULL;
1577 goto fnic_device_reset_clean; 1828 goto fnic_device_reset_clean;
1578 } 1829 }
1830 spin_lock_irqsave(io_lock, flags);
1831 CMD_FLAGS(sc) |= FNIC_DEV_RST_PENDING;
1832 spin_unlock_irqrestore(io_lock, flags);
1579 1833
1580 /* 1834 /*
1581 * Wait on the local completion for LUN reset. The io_req may be 1835 * Wait on the local completion for LUN reset. The io_req may be
@@ -1588,12 +1842,13 @@ int fnic_device_reset(struct scsi_cmnd *sc)
1588 io_req = (struct fnic_io_req *)CMD_SP(sc); 1842 io_req = (struct fnic_io_req *)CMD_SP(sc);
1589 if (!io_req) { 1843 if (!io_req) {
1590 spin_unlock_irqrestore(io_lock, flags); 1844 spin_unlock_irqrestore(io_lock, flags);
1845 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1846 "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
1591 goto fnic_device_reset_end; 1847 goto fnic_device_reset_end;
1592 } 1848 }
1593 io_req->dr_done = NULL; 1849 io_req->dr_done = NULL;
1594 1850
1595 status = CMD_LR_STATUS(sc); 1851 status = CMD_LR_STATUS(sc);
1596 spin_unlock_irqrestore(io_lock, flags);
1597 1852
1598 /* 1853 /*
1599 * If lun reset not completed, bail out with failed. io_req 1854 * If lun reset not completed, bail out with failed. io_req
@@ -1602,7 +1857,53 @@ int fnic_device_reset(struct scsi_cmnd *sc)
1602 if (status == FCPIO_INVALID_CODE) { 1857 if (status == FCPIO_INVALID_CODE) {
1603 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1858 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1604 "Device reset timed out\n"); 1859 "Device reset timed out\n");
1605 goto fnic_device_reset_end; 1860 CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
1861 spin_unlock_irqrestore(io_lock, flags);
1862 int_to_scsilun(sc->device->lun, &fc_lun);
1863 /*
1864 * Issue abort and terminate on the device reset request.
1865 * If q'ing of the abort fails, retry issue it after a delay.
1866 */
1867 while (1) {
1868 spin_lock_irqsave(io_lock, flags);
1869 if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
1870 spin_unlock_irqrestore(io_lock, flags);
1871 break;
1872 }
1873 spin_unlock_irqrestore(io_lock, flags);
1874 if (fnic_queue_abort_io_req(fnic,
1875 tag | FNIC_TAG_DEV_RST,
1876 FCPIO_ITMF_ABT_TASK_TERM,
1877 fc_lun.scsi_lun, io_req)) {
1878 wait_for_completion_timeout(&tm_done,
1879 msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
1880 } else {
1881 spin_lock_irqsave(io_lock, flags);
1882 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1883 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1884 io_req->abts_done = &tm_done;
1885 spin_unlock_irqrestore(io_lock, flags);
1886 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1887 "Abort and terminate issued on Device reset "
1888 "tag 0x%x sc 0x%p\n", tag, sc);
1889 break;
1890 }
1891 }
1892 while (1) {
1893 spin_lock_irqsave(io_lock, flags);
1894 if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
1895 spin_unlock_irqrestore(io_lock, flags);
1896 wait_for_completion_timeout(&tm_done,
1897 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
1898 break;
1899 } else {
1900 io_req = (struct fnic_io_req *)CMD_SP(sc);
1901 io_req->abts_done = NULL;
1902 goto fnic_device_reset_clean;
1903 }
1904 }
1905 } else {
1906 spin_unlock_irqrestore(io_lock, flags);
1606 } 1907 }
1607 1908
1608 /* Completed, but not successful, clean up the io_req, return fail */ 1909 /* Completed, but not successful, clean up the io_req, return fail */
@@ -1650,6 +1951,10 @@ fnic_device_reset_clean:
1650 } 1951 }
1651 1952
1652fnic_device_reset_end: 1953fnic_device_reset_end:
1954 /* free tag if it is allocated */
1955 if (unlikely(tag_gen_flag))
1956 fnic_scsi_host_end_tag(fnic, sc);
1957
1653 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1958 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1654 "Returning from device reset %s\n", 1959 "Returning from device reset %s\n",
1655 (ret == SUCCESS) ? 1960 (ret == SUCCESS) ?
@@ -1735,7 +2040,15 @@ void fnic_scsi_abort_io(struct fc_lport *lp)
1735 DECLARE_COMPLETION_ONSTACK(remove_wait); 2040 DECLARE_COMPLETION_ONSTACK(remove_wait);
1736 2041
1737 /* Issue firmware reset for fnic, wait for reset to complete */ 2042 /* Issue firmware reset for fnic, wait for reset to complete */
2043retry_fw_reset:
1738 spin_lock_irqsave(&fnic->fnic_lock, flags); 2044 spin_lock_irqsave(&fnic->fnic_lock, flags);
2045 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2046 /* fw reset is in progress, poll for its completion */
2047 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2048 schedule_timeout(msecs_to_jiffies(100));
2049 goto retry_fw_reset;
2050 }
2051
1739 fnic->remove_wait = &remove_wait; 2052 fnic->remove_wait = &remove_wait;
1740 old_state = fnic->state; 2053 old_state = fnic->state;
1741 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; 2054 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
@@ -1776,7 +2089,14 @@ void fnic_scsi_cleanup(struct fc_lport *lp)
1776 struct fnic *fnic = lport_priv(lp); 2089 struct fnic *fnic = lport_priv(lp);
1777 2090
1778 /* issue fw reset */ 2091 /* issue fw reset */
2092retry_fw_reset:
1779 spin_lock_irqsave(&fnic->fnic_lock, flags); 2093 spin_lock_irqsave(&fnic->fnic_lock, flags);
2094 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2095 /* fw reset is in progress, poll for its completion */
2096 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2097 schedule_timeout(msecs_to_jiffies(100));
2098 goto retry_fw_reset;
2099 }
1780 old_state = fnic->state; 2100 old_state = fnic->state;
1781 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; 2101 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1782 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); 2102 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);