diff options
Diffstat (limited to 'drivers/scsi/fnic/fnic_scsi.c')
-rw-r--r-- | drivers/scsi/fnic/fnic_scsi.c | 253 |
1 files changed, 236 insertions, 17 deletions
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index d014aae19134..0521436d05d6 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c | |||
@@ -226,15 +226,23 @@ int fnic_fw_reset_handler(struct fnic *fnic) | |||
226 | 226 | ||
227 | if (!vnic_wq_copy_desc_avail(wq)) | 227 | if (!vnic_wq_copy_desc_avail(wq)) |
228 | ret = -EAGAIN; | 228 | ret = -EAGAIN; |
229 | else | 229 | else { |
230 | fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); | 230 | fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); |
231 | atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); | ||
232 | if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > | ||
233 | atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) | ||
234 | atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, | ||
235 | atomic64_read( | ||
236 | &fnic->fnic_stats.fw_stats.active_fw_reqs)); | ||
237 | } | ||
231 | 238 | ||
232 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); | 239 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); |
233 | 240 | ||
234 | if (!ret) | 241 | if (!ret) { |
242 | atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets); | ||
235 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 243 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
236 | "Issued fw reset\n"); | 244 | "Issued fw reset\n"); |
237 | else { | 245 | } else { |
238 | fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); | 246 | fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); |
239 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 247 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
240 | "Failed to issue fw reset\n"); | 248 | "Failed to issue fw reset\n"); |
@@ -291,6 +299,12 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id) | |||
291 | fc_id, fnic->ctlr.map_dest, gw_mac); | 299 | fc_id, fnic->ctlr.map_dest, gw_mac); |
292 | } | 300 | } |
293 | 301 | ||
302 | atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); | ||
303 | if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > | ||
304 | atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) | ||
305 | atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, | ||
306 | atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); | ||
307 | |||
294 | flogi_reg_ioreq_end: | 308 | flogi_reg_ioreq_end: |
295 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); | 309 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); |
296 | return ret; | 310 | return ret; |
@@ -310,6 +324,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, | |||
310 | struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); | 324 | struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); |
311 | struct fc_rport_libfc_priv *rp = rport->dd_data; | 325 | struct fc_rport_libfc_priv *rp = rport->dd_data; |
312 | struct host_sg_desc *desc; | 326 | struct host_sg_desc *desc; |
327 | struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; | ||
313 | u8 pri_tag = 0; | 328 | u8 pri_tag = 0; |
314 | unsigned int i; | 329 | unsigned int i; |
315 | unsigned long intr_flags; | 330 | unsigned long intr_flags; |
@@ -358,6 +373,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, | |||
358 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); | 373 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); |
359 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, | 374 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, |
360 | "fnic_queue_wq_copy_desc failure - no descriptors\n"); | 375 | "fnic_queue_wq_copy_desc failure - no descriptors\n"); |
376 | atomic64_inc(&misc_stats->io_cpwq_alloc_failures); | ||
361 | return SCSI_MLQUEUE_HOST_BUSY; | 377 | return SCSI_MLQUEUE_HOST_BUSY; |
362 | } | 378 | } |
363 | 379 | ||
@@ -386,6 +402,12 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, | |||
386 | rport->maxframe_size, rp->r_a_tov, | 402 | rport->maxframe_size, rp->r_a_tov, |
387 | rp->e_d_tov); | 403 | rp->e_d_tov); |
388 | 404 | ||
405 | atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); | ||
406 | if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > | ||
407 | atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) | ||
408 | atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, | ||
409 | atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); | ||
410 | |||
389 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); | 411 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); |
390 | return 0; | 412 | return 0; |
391 | } | 413 | } |
@@ -401,6 +423,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ | |||
401 | struct fc_rport *rport; | 423 | struct fc_rport *rport; |
402 | struct fnic_io_req *io_req = NULL; | 424 | struct fnic_io_req *io_req = NULL; |
403 | struct fnic *fnic = lport_priv(lp); | 425 | struct fnic *fnic = lport_priv(lp); |
426 | struct fnic_stats *fnic_stats = &fnic->fnic_stats; | ||
404 | struct vnic_wq_copy *wq; | 427 | struct vnic_wq_copy *wq; |
405 | int ret; | 428 | int ret; |
406 | u64 cmd_trace; | 429 | u64 cmd_trace; |
@@ -414,6 +437,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ | |||
414 | rport = starget_to_rport(scsi_target(sc->device)); | 437 | rport = starget_to_rport(scsi_target(sc->device)); |
415 | ret = fc_remote_port_chkready(rport); | 438 | ret = fc_remote_port_chkready(rport); |
416 | if (ret) { | 439 | if (ret) { |
440 | atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); | ||
417 | sc->result = ret; | 441 | sc->result = ret; |
418 | done(sc); | 442 | done(sc); |
419 | return 0; | 443 | return 0; |
@@ -436,6 +460,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ | |||
436 | /* Get a new io_req for this SCSI IO */ | 460 | /* Get a new io_req for this SCSI IO */ |
437 | io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); | 461 | io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); |
438 | if (!io_req) { | 462 | if (!io_req) { |
463 | atomic64_inc(&fnic_stats->io_stats.alloc_failures); | ||
439 | ret = SCSI_MLQUEUE_HOST_BUSY; | 464 | ret = SCSI_MLQUEUE_HOST_BUSY; |
440 | goto out; | 465 | goto out; |
441 | } | 466 | } |
@@ -462,6 +487,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ | |||
462 | mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type], | 487 | mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type], |
463 | GFP_ATOMIC); | 488 | GFP_ATOMIC); |
464 | if (!io_req->sgl_list) { | 489 | if (!io_req->sgl_list) { |
490 | atomic64_inc(&fnic_stats->io_stats.alloc_failures); | ||
465 | ret = SCSI_MLQUEUE_HOST_BUSY; | 491 | ret = SCSI_MLQUEUE_HOST_BUSY; |
466 | scsi_dma_unmap(sc); | 492 | scsi_dma_unmap(sc); |
467 | mempool_free(io_req, fnic->io_req_pool); | 493 | mempool_free(io_req, fnic->io_req_pool); |
@@ -509,6 +535,13 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ | |||
509 | mempool_free(io_req, fnic->io_req_pool); | 535 | mempool_free(io_req, fnic->io_req_pool); |
510 | } | 536 | } |
511 | } else { | 537 | } else { |
538 | atomic64_inc(&fnic_stats->io_stats.active_ios); | ||
539 | atomic64_inc(&fnic_stats->io_stats.num_ios); | ||
540 | if (atomic64_read(&fnic_stats->io_stats.active_ios) > | ||
541 | atomic64_read(&fnic_stats->io_stats.max_active_ios)) | ||
542 | atomic64_set(&fnic_stats->io_stats.max_active_ios, | ||
543 | atomic64_read(&fnic_stats->io_stats.active_ios)); | ||
544 | |||
512 | /* REVISIT: Use per IO lock in the final code */ | 545 | /* REVISIT: Use per IO lock in the final code */ |
513 | CMD_FLAGS(sc) |= FNIC_IO_ISSUED; | 546 | CMD_FLAGS(sc) |= FNIC_IO_ISSUED; |
514 | } | 547 | } |
@@ -542,12 +575,18 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, | |||
542 | struct fcpio_tag tag; | 575 | struct fcpio_tag tag; |
543 | int ret = 0; | 576 | int ret = 0; |
544 | unsigned long flags; | 577 | unsigned long flags; |
578 | struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; | ||
545 | 579 | ||
546 | fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); | 580 | fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); |
547 | 581 | ||
582 | atomic64_inc(&reset_stats->fw_reset_completions); | ||
583 | |||
548 | /* Clean up all outstanding io requests */ | 584 | /* Clean up all outstanding io requests */ |
549 | fnic_cleanup_io(fnic, SCSI_NO_TAG); | 585 | fnic_cleanup_io(fnic, SCSI_NO_TAG); |
550 | 586 | ||
587 | atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0); | ||
588 | atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0); | ||
589 | |||
551 | spin_lock_irqsave(&fnic->fnic_lock, flags); | 590 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
552 | 591 | ||
553 | /* fnic should be in FC_TRANS_ETH_MODE */ | 592 | /* fnic should be in FC_TRANS_ETH_MODE */ |
@@ -571,6 +610,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, | |||
571 | * reset the firmware. Free the cached flogi | 610 | * reset the firmware. Free the cached flogi |
572 | */ | 611 | */ |
573 | fnic->state = FNIC_IN_FC_MODE; | 612 | fnic->state = FNIC_IN_FC_MODE; |
613 | atomic64_inc(&reset_stats->fw_reset_failures); | ||
574 | ret = -1; | 614 | ret = -1; |
575 | } | 615 | } |
576 | } else { | 616 | } else { |
@@ -578,6 +618,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, | |||
578 | fnic->lport->host, | 618 | fnic->lport->host, |
579 | "Unexpected state %s while processing" | 619 | "Unexpected state %s while processing" |
580 | " reset cmpl\n", fnic_state_to_str(fnic->state)); | 620 | " reset cmpl\n", fnic_state_to_str(fnic->state)); |
621 | atomic64_inc(&reset_stats->fw_reset_failures); | ||
581 | ret = -1; | 622 | ret = -1; |
582 | } | 623 | } |
583 | 624 | ||
@@ -701,10 +742,14 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic, | |||
701 | wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; | 742 | wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; |
702 | spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); | 743 | spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); |
703 | 744 | ||
745 | fnic->fnic_stats.misc_stats.last_ack_time = jiffies; | ||
704 | if (is_ack_index_in_range(wq, request_out)) { | 746 | if (is_ack_index_in_range(wq, request_out)) { |
705 | fnic->fw_ack_index[0] = request_out; | 747 | fnic->fw_ack_index[0] = request_out; |
706 | fnic->fw_ack_recd[0] = 1; | 748 | fnic->fw_ack_recd[0] = 1; |
707 | } | 749 | } else |
750 | atomic64_inc( | ||
751 | &fnic->fnic_stats.misc_stats.ack_index_out_of_range); | ||
752 | |||
708 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); | 753 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); |
709 | FNIC_TRACE(fnic_fcpio_ack_handler, | 754 | FNIC_TRACE(fnic_fcpio_ack_handler, |
710 | fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], | 755 | fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], |
@@ -726,6 +771,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, | |||
726 | struct fcpio_icmnd_cmpl *icmnd_cmpl; | 771 | struct fcpio_icmnd_cmpl *icmnd_cmpl; |
727 | struct fnic_io_req *io_req; | 772 | struct fnic_io_req *io_req; |
728 | struct scsi_cmnd *sc; | 773 | struct scsi_cmnd *sc; |
774 | struct fnic_stats *fnic_stats = &fnic->fnic_stats; | ||
729 | unsigned long flags; | 775 | unsigned long flags; |
730 | spinlock_t *io_lock; | 776 | spinlock_t *io_lock; |
731 | u64 cmd_trace; | 777 | u64 cmd_trace; |
@@ -746,6 +792,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, | |||
746 | sc = scsi_host_find_tag(fnic->lport->host, id); | 792 | sc = scsi_host_find_tag(fnic->lport->host, id); |
747 | WARN_ON_ONCE(!sc); | 793 | WARN_ON_ONCE(!sc); |
748 | if (!sc) { | 794 | if (!sc) { |
795 | atomic64_inc(&fnic_stats->io_stats.sc_null); | ||
749 | shost_printk(KERN_ERR, fnic->lport->host, | 796 | shost_printk(KERN_ERR, fnic->lport->host, |
750 | "icmnd_cmpl sc is null - " | 797 | "icmnd_cmpl sc is null - " |
751 | "hdr status = %s tag = 0x%x desc = 0x%p\n", | 798 | "hdr status = %s tag = 0x%x desc = 0x%p\n", |
@@ -766,6 +813,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, | |||
766 | io_req = (struct fnic_io_req *)CMD_SP(sc); | 813 | io_req = (struct fnic_io_req *)CMD_SP(sc); |
767 | WARN_ON_ONCE(!io_req); | 814 | WARN_ON_ONCE(!io_req); |
768 | if (!io_req) { | 815 | if (!io_req) { |
816 | atomic64_inc(&fnic_stats->io_stats.ioreq_null); | ||
769 | CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL; | 817 | CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL; |
770 | spin_unlock_irqrestore(io_lock, flags); | 818 | spin_unlock_irqrestore(io_lock, flags); |
771 | shost_printk(KERN_ERR, fnic->lport->host, | 819 | shost_printk(KERN_ERR, fnic->lport->host, |
@@ -824,31 +872,54 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, | |||
824 | if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) | 872 | if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) |
825 | xfer_len -= icmnd_cmpl->residual; | 873 | xfer_len -= icmnd_cmpl->residual; |
826 | 874 | ||
875 | if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) | ||
876 | atomic64_inc(&fnic_stats->misc_stats.queue_fulls); | ||
827 | break; | 877 | break; |
828 | 878 | ||
829 | case FCPIO_TIMEOUT: /* request was timed out */ | 879 | case FCPIO_TIMEOUT: /* request was timed out */ |
880 | atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout); | ||
830 | sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status; | 881 | sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status; |
831 | break; | 882 | break; |
832 | 883 | ||
833 | case FCPIO_ABORTED: /* request was aborted */ | 884 | case FCPIO_ABORTED: /* request was aborted */ |
885 | atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted); | ||
834 | sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; | 886 | sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; |
835 | break; | 887 | break; |
836 | 888 | ||
837 | case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */ | 889 | case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */ |
890 | atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch); | ||
838 | scsi_set_resid(sc, icmnd_cmpl->residual); | 891 | scsi_set_resid(sc, icmnd_cmpl->residual); |
839 | sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; | 892 | sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; |
840 | break; | 893 | break; |
841 | 894 | ||
842 | case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */ | 895 | case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */ |
896 | atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources); | ||
843 | sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status; | 897 | sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status; |
844 | break; | 898 | break; |
845 | case FCPIO_INVALID_HEADER: /* header contains invalid data */ | 899 | |
846 | case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ | ||
847 | case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ | ||
848 | case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */ | 900 | case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */ |
901 | atomic64_inc(&fnic_stats->io_stats.io_not_found); | ||
902 | sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; | ||
903 | break; | ||
904 | |||
849 | case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */ | 905 | case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */ |
850 | case FCPIO_MSS_INVALID: /* request was aborted due to mss error */ | 906 | atomic64_inc(&fnic_stats->misc_stats.sgl_invalid); |
907 | sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; | ||
908 | break; | ||
909 | |||
851 | case FCPIO_FW_ERR: /* request was terminated due fw error */ | 910 | case FCPIO_FW_ERR: /* request was terminated due fw error */ |
911 | atomic64_inc(&fnic_stats->fw_stats.io_fw_errs); | ||
912 | sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; | ||
913 | break; | ||
914 | |||
915 | case FCPIO_MSS_INVALID: /* request was aborted due to mss error */ | ||
916 | atomic64_inc(&fnic_stats->misc_stats.mss_invalid); | ||
917 | sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; | ||
918 | break; | ||
919 | |||
920 | case FCPIO_INVALID_HEADER: /* header contains invalid data */ | ||
921 | case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ | ||
922 | case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ | ||
852 | default: | 923 | default: |
853 | shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", | 924 | shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", |
854 | fnic_fcpio_status_to_str(hdr_status)); | 925 | fnic_fcpio_status_to_str(hdr_status)); |
@@ -856,6 +927,11 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, | |||
856 | break; | 927 | break; |
857 | } | 928 | } |
858 | 929 | ||
930 | if (hdr_status != FCPIO_SUCCESS) { | ||
931 | atomic64_inc(&fnic_stats->io_stats.io_failures); | ||
932 | shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", | ||
933 | fnic_fcpio_status_to_str(hdr_status)); | ||
934 | } | ||
859 | /* Break link with the SCSI command */ | 935 | /* Break link with the SCSI command */ |
860 | CMD_SP(sc) = NULL; | 936 | CMD_SP(sc) = NULL; |
861 | CMD_FLAGS(sc) |= FNIC_IO_DONE; | 937 | CMD_FLAGS(sc) |= FNIC_IO_DONE; |
@@ -889,6 +965,12 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, | |||
889 | } else | 965 | } else |
890 | fnic->lport->host_stats.fcp_control_requests++; | 966 | fnic->lport->host_stats.fcp_control_requests++; |
891 | 967 | ||
968 | atomic64_dec(&fnic_stats->io_stats.active_ios); | ||
969 | if (atomic64_read(&fnic->io_cmpl_skip)) | ||
970 | atomic64_dec(&fnic->io_cmpl_skip); | ||
971 | else | ||
972 | atomic64_inc(&fnic_stats->io_stats.io_completions); | ||
973 | |||
892 | /* Call SCSI completion function to complete the IO */ | 974 | /* Call SCSI completion function to complete the IO */ |
893 | if (sc->scsi_done) | 975 | if (sc->scsi_done) |
894 | sc->scsi_done(sc); | 976 | sc->scsi_done(sc); |
@@ -906,6 +988,10 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, | |||
906 | u32 id; | 988 | u32 id; |
907 | struct scsi_cmnd *sc; | 989 | struct scsi_cmnd *sc; |
908 | struct fnic_io_req *io_req; | 990 | struct fnic_io_req *io_req; |
991 | struct fnic_stats *fnic_stats = &fnic->fnic_stats; | ||
992 | struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats; | ||
993 | struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; | ||
994 | struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; | ||
909 | unsigned long flags; | 995 | unsigned long flags; |
910 | spinlock_t *io_lock; | 996 | spinlock_t *io_lock; |
911 | unsigned long start_time; | 997 | unsigned long start_time; |
@@ -923,6 +1009,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, | |||
923 | sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); | 1009 | sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); |
924 | WARN_ON_ONCE(!sc); | 1010 | WARN_ON_ONCE(!sc); |
925 | if (!sc) { | 1011 | if (!sc) { |
1012 | atomic64_inc(&fnic_stats->io_stats.sc_null); | ||
926 | shost_printk(KERN_ERR, fnic->lport->host, | 1013 | shost_printk(KERN_ERR, fnic->lport->host, |
927 | "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", | 1014 | "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", |
928 | fnic_fcpio_status_to_str(hdr_status), id); | 1015 | fnic_fcpio_status_to_str(hdr_status), id); |
@@ -933,6 +1020,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, | |||
933 | io_req = (struct fnic_io_req *)CMD_SP(sc); | 1020 | io_req = (struct fnic_io_req *)CMD_SP(sc); |
934 | WARN_ON_ONCE(!io_req); | 1021 | WARN_ON_ONCE(!io_req); |
935 | if (!io_req) { | 1022 | if (!io_req) { |
1023 | atomic64_inc(&fnic_stats->io_stats.ioreq_null); | ||
936 | spin_unlock_irqrestore(io_lock, flags); | 1024 | spin_unlock_irqrestore(io_lock, flags); |
937 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; | 1025 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; |
938 | shost_printk(KERN_ERR, fnic->lport->host, | 1026 | shost_printk(KERN_ERR, fnic->lport->host, |
@@ -957,6 +1045,31 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, | |||
957 | spin_unlock_irqrestore(io_lock, flags); | 1045 | spin_unlock_irqrestore(io_lock, flags); |
958 | } else if (id & FNIC_TAG_ABORT) { | 1046 | } else if (id & FNIC_TAG_ABORT) { |
959 | /* Completion of abort cmd */ | 1047 | /* Completion of abort cmd */ |
1048 | switch (hdr_status) { | ||
1049 | case FCPIO_SUCCESS: | ||
1050 | break; | ||
1051 | case FCPIO_TIMEOUT: | ||
1052 | if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) | ||
1053 | atomic64_inc(&abts_stats->abort_fw_timeouts); | ||
1054 | else | ||
1055 | atomic64_inc( | ||
1056 | &term_stats->terminate_fw_timeouts); | ||
1057 | break; | ||
1058 | case FCPIO_IO_NOT_FOUND: | ||
1059 | if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) | ||
1060 | atomic64_inc(&abts_stats->abort_io_not_found); | ||
1061 | else | ||
1062 | atomic64_inc( | ||
1063 | &term_stats->terminate_io_not_found); | ||
1064 | break; | ||
1065 | default: | ||
1066 | if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) | ||
1067 | atomic64_inc(&abts_stats->abort_failures); | ||
1068 | else | ||
1069 | atomic64_inc( | ||
1070 | &term_stats->terminate_failures); | ||
1071 | break; | ||
1072 | } | ||
960 | if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) { | 1073 | if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) { |
961 | /* This is a late completion. Ignore it */ | 1074 | /* This is a late completion. Ignore it */ |
962 | spin_unlock_irqrestore(io_lock, flags); | 1075 | spin_unlock_irqrestore(io_lock, flags); |
@@ -964,6 +1077,16 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, | |||
964 | } | 1077 | } |
965 | CMD_ABTS_STATUS(sc) = hdr_status; | 1078 | CMD_ABTS_STATUS(sc) = hdr_status; |
966 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; | 1079 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; |
1080 | |||
1081 | atomic64_dec(&fnic_stats->io_stats.active_ios); | ||
1082 | if (atomic64_read(&fnic->io_cmpl_skip)) | ||
1083 | atomic64_dec(&fnic->io_cmpl_skip); | ||
1084 | else | ||
1085 | atomic64_inc(&fnic_stats->io_stats.io_completions); | ||
1086 | |||
1087 | if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) | ||
1088 | atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls); | ||
1089 | |||
967 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 1090 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
968 | "abts cmpl recd. id %d status %s\n", | 1091 | "abts cmpl recd. id %d status %s\n", |
969 | (int)(id & FNIC_TAG_MASK), | 1092 | (int)(id & FNIC_TAG_MASK), |
@@ -1067,6 +1190,18 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, | |||
1067 | struct fnic *fnic = vnic_dev_priv(vdev); | 1190 | struct fnic *fnic = vnic_dev_priv(vdev); |
1068 | 1191 | ||
1069 | switch (desc->hdr.type) { | 1192 | switch (desc->hdr.type) { |
1193 | case FCPIO_ICMND_CMPL: /* fw completed a command */ | ||
1194 | case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/ | ||
1195 | case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ | ||
1196 | case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */ | ||
1197 | case FCPIO_RESET_CMPL: /* fw completed reset */ | ||
1198 | atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs); | ||
1199 | break; | ||
1200 | default: | ||
1201 | break; | ||
1202 | } | ||
1203 | |||
1204 | switch (desc->hdr.type) { | ||
1070 | case FCPIO_ACK: /* fw copied copy wq desc to its queue */ | 1205 | case FCPIO_ACK: /* fw copied copy wq desc to its queue */ |
1071 | fnic_fcpio_ack_handler(fnic, cq_index, desc); | 1206 | fnic_fcpio_ack_handler(fnic, cq_index, desc); |
1072 | break; | 1207 | break; |
@@ -1126,6 +1261,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) | |||
1126 | struct scsi_cmnd *sc; | 1261 | struct scsi_cmnd *sc; |
1127 | spinlock_t *io_lock; | 1262 | spinlock_t *io_lock; |
1128 | unsigned long start_time = 0; | 1263 | unsigned long start_time = 0; |
1264 | struct fnic_stats *fnic_stats = &fnic->fnic_stats; | ||
1129 | 1265 | ||
1130 | for (i = 0; i < fnic->fnic_max_tag_id; i++) { | 1266 | for (i = 0; i < fnic->fnic_max_tag_id; i++) { |
1131 | if (i == exclude_id) | 1267 | if (i == exclude_id) |
@@ -1179,6 +1315,11 @@ cleanup_scsi_cmd: | |||
1179 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:" | 1315 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:" |
1180 | " DID_TRANSPORT_DISRUPTED\n"); | 1316 | " DID_TRANSPORT_DISRUPTED\n"); |
1181 | 1317 | ||
1318 | if (atomic64_read(&fnic->io_cmpl_skip)) | ||
1319 | atomic64_dec(&fnic->io_cmpl_skip); | ||
1320 | else | ||
1321 | atomic64_inc(&fnic_stats->io_stats.io_completions); | ||
1322 | |||
1182 | /* Complete the command to SCSI */ | 1323 | /* Complete the command to SCSI */ |
1183 | if (sc->scsi_done) { | 1324 | if (sc->scsi_done) { |
1184 | FNIC_TRACE(fnic_cleanup_io, | 1325 | FNIC_TRACE(fnic_cleanup_io, |
@@ -1262,6 +1403,7 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, | |||
1262 | { | 1403 | { |
1263 | struct vnic_wq_copy *wq = &fnic->wq_copy[0]; | 1404 | struct vnic_wq_copy *wq = &fnic->wq_copy[0]; |
1264 | struct Scsi_Host *host = fnic->lport->host; | 1405 | struct Scsi_Host *host = fnic->lport->host; |
1406 | struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; | ||
1265 | unsigned long flags; | 1407 | unsigned long flags; |
1266 | 1408 | ||
1267 | spin_lock_irqsave(host->host_lock, flags); | 1409 | spin_lock_irqsave(host->host_lock, flags); |
@@ -1283,12 +1425,19 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, | |||
1283 | atomic_dec(&fnic->in_flight); | 1425 | atomic_dec(&fnic->in_flight); |
1284 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 1426 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1285 | "fnic_queue_abort_io_req: failure: no descriptors\n"); | 1427 | "fnic_queue_abort_io_req: failure: no descriptors\n"); |
1428 | atomic64_inc(&misc_stats->abts_cpwq_alloc_failures); | ||
1286 | return 1; | 1429 | return 1; |
1287 | } | 1430 | } |
1288 | fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT, | 1431 | fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT, |
1289 | 0, task_req, tag, fc_lun, io_req->port_id, | 1432 | 0, task_req, tag, fc_lun, io_req->port_id, |
1290 | fnic->config.ra_tov, fnic->config.ed_tov); | 1433 | fnic->config.ra_tov, fnic->config.ed_tov); |
1291 | 1434 | ||
1435 | atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); | ||
1436 | if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > | ||
1437 | atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) | ||
1438 | atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, | ||
1439 | atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); | ||
1440 | |||
1292 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); | 1441 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); |
1293 | atomic_dec(&fnic->in_flight); | 1442 | atomic_dec(&fnic->in_flight); |
1294 | 1443 | ||
@@ -1299,10 +1448,13 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) | |||
1299 | { | 1448 | { |
1300 | int tag; | 1449 | int tag; |
1301 | int abt_tag; | 1450 | int abt_tag; |
1451 | int term_cnt = 0; | ||
1302 | struct fnic_io_req *io_req; | 1452 | struct fnic_io_req *io_req; |
1303 | spinlock_t *io_lock; | 1453 | spinlock_t *io_lock; |
1304 | unsigned long flags; | 1454 | unsigned long flags; |
1305 | struct scsi_cmnd *sc; | 1455 | struct scsi_cmnd *sc; |
1456 | struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; | ||
1457 | struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; | ||
1306 | struct scsi_lun fc_lun; | 1458 | struct scsi_lun fc_lun; |
1307 | enum fnic_ioreq_state old_ioreq_state; | 1459 | enum fnic_ioreq_state old_ioreq_state; |
1308 | 1460 | ||
@@ -1366,6 +1518,7 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) | |||
1366 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; | 1518 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; |
1367 | CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; | 1519 | CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; |
1368 | if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { | 1520 | if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { |
1521 | atomic64_inc(&reset_stats->device_reset_terminates); | ||
1369 | abt_tag = (tag | FNIC_TAG_DEV_RST); | 1522 | abt_tag = (tag | FNIC_TAG_DEV_RST); |
1370 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 1523 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1371 | "fnic_rport_exch_reset dev rst sc 0x%p\n", | 1524 | "fnic_rport_exch_reset dev rst sc 0x%p\n", |
@@ -1402,8 +1555,12 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) | |||
1402 | else | 1555 | else |
1403 | CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; | 1556 | CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; |
1404 | spin_unlock_irqrestore(io_lock, flags); | 1557 | spin_unlock_irqrestore(io_lock, flags); |
1558 | atomic64_inc(&term_stats->terminates); | ||
1559 | term_cnt++; | ||
1405 | } | 1560 | } |
1406 | } | 1561 | } |
1562 | if (term_cnt > atomic64_read(&term_stats->max_terminates)) | ||
1563 | atomic64_set(&term_stats->max_terminates, term_cnt); | ||
1407 | 1564 | ||
1408 | } | 1565 | } |
1409 | 1566 | ||
@@ -1411,6 +1568,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport) | |||
1411 | { | 1568 | { |
1412 | int tag; | 1569 | int tag; |
1413 | int abt_tag; | 1570 | int abt_tag; |
1571 | int term_cnt = 0; | ||
1414 | struct fnic_io_req *io_req; | 1572 | struct fnic_io_req *io_req; |
1415 | spinlock_t *io_lock; | 1573 | spinlock_t *io_lock; |
1416 | unsigned long flags; | 1574 | unsigned long flags; |
@@ -1420,6 +1578,8 @@ void fnic_terminate_rport_io(struct fc_rport *rport) | |||
1420 | struct fc_lport *lport; | 1578 | struct fc_lport *lport; |
1421 | struct fnic *fnic; | 1579 | struct fnic *fnic; |
1422 | struct fc_rport *cmd_rport; | 1580 | struct fc_rport *cmd_rport; |
1581 | struct reset_stats *reset_stats; | ||
1582 | struct terminate_stats *term_stats; | ||
1423 | enum fnic_ioreq_state old_ioreq_state; | 1583 | enum fnic_ioreq_state old_ioreq_state; |
1424 | 1584 | ||
1425 | if (!rport) { | 1585 | if (!rport) { |
@@ -1448,6 +1608,9 @@ void fnic_terminate_rport_io(struct fc_rport *rport) | |||
1448 | if (fnic->in_remove) | 1608 | if (fnic->in_remove) |
1449 | return; | 1609 | return; |
1450 | 1610 | ||
1611 | reset_stats = &fnic->fnic_stats.reset_stats; | ||
1612 | term_stats = &fnic->fnic_stats.term_stats; | ||
1613 | |||
1451 | for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { | 1614 | for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { |
1452 | abt_tag = tag; | 1615 | abt_tag = tag; |
1453 | io_lock = fnic_io_lock_tag(fnic, tag); | 1616 | io_lock = fnic_io_lock_tag(fnic, tag); |
@@ -1504,6 +1667,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport) | |||
1504 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; | 1667 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; |
1505 | CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; | 1668 | CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; |
1506 | if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { | 1669 | if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { |
1670 | atomic64_inc(&reset_stats->device_reset_terminates); | ||
1507 | abt_tag = (tag | FNIC_TAG_DEV_RST); | 1671 | abt_tag = (tag | FNIC_TAG_DEV_RST); |
1508 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 1672 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1509 | "fnic_terminate_rport_io dev rst sc 0x%p\n", sc); | 1673 | "fnic_terminate_rport_io dev rst sc 0x%p\n", sc); |
@@ -1540,8 +1704,12 @@ void fnic_terminate_rport_io(struct fc_rport *rport) | |||
1540 | else | 1704 | else |
1541 | CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; | 1705 | CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; |
1542 | spin_unlock_irqrestore(io_lock, flags); | 1706 | spin_unlock_irqrestore(io_lock, flags); |
1707 | atomic64_inc(&term_stats->terminates); | ||
1708 | term_cnt++; | ||
1543 | } | 1709 | } |
1544 | } | 1710 | } |
1711 | if (term_cnt > atomic64_read(&term_stats->max_terminates)) | ||
1712 | atomic64_set(&term_stats->max_terminates, term_cnt); | ||
1545 | 1713 | ||
1546 | } | 1714 | } |
1547 | 1715 | ||
@@ -1562,6 +1730,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
1562 | int ret = SUCCESS; | 1730 | int ret = SUCCESS; |
1563 | u32 task_req = 0; | 1731 | u32 task_req = 0; |
1564 | struct scsi_lun fc_lun; | 1732 | struct scsi_lun fc_lun; |
1733 | struct fnic_stats *fnic_stats; | ||
1734 | struct abort_stats *abts_stats; | ||
1735 | struct terminate_stats *term_stats; | ||
1565 | int tag; | 1736 | int tag; |
1566 | DECLARE_COMPLETION_ONSTACK(tm_done); | 1737 | DECLARE_COMPLETION_ONSTACK(tm_done); |
1567 | 1738 | ||
@@ -1572,6 +1743,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
1572 | lp = shost_priv(sc->device->host); | 1743 | lp = shost_priv(sc->device->host); |
1573 | 1744 | ||
1574 | fnic = lport_priv(lp); | 1745 | fnic = lport_priv(lp); |
1746 | fnic_stats = &fnic->fnic_stats; | ||
1747 | abts_stats = &fnic->fnic_stats.abts_stats; | ||
1748 | term_stats = &fnic->fnic_stats.term_stats; | ||
1749 | |||
1575 | rport = starget_to_rport(scsi_target(sc->device)); | 1750 | rport = starget_to_rport(scsi_target(sc->device)); |
1576 | tag = sc->request->tag; | 1751 | tag = sc->request->tag; |
1577 | FNIC_SCSI_DBG(KERN_DEBUG, | 1752 | FNIC_SCSI_DBG(KERN_DEBUG, |
@@ -1630,8 +1805,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
1630 | */ | 1805 | */ |
1631 | if (fc_remote_port_chkready(rport) == 0) | 1806 | if (fc_remote_port_chkready(rport) == 0) |
1632 | task_req = FCPIO_ITMF_ABT_TASK; | 1807 | task_req = FCPIO_ITMF_ABT_TASK; |
1633 | else | 1808 | else { |
1809 | atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); | ||
1634 | task_req = FCPIO_ITMF_ABT_TASK_TERM; | 1810 | task_req = FCPIO_ITMF_ABT_TASK_TERM; |
1811 | } | ||
1635 | 1812 | ||
1636 | /* Now queue the abort command to firmware */ | 1813 | /* Now queue the abort command to firmware */ |
1637 | int_to_scsilun(sc->device->lun, &fc_lun); | 1814 | int_to_scsilun(sc->device->lun, &fc_lun); |
@@ -1646,10 +1823,13 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
1646 | ret = FAILED; | 1823 | ret = FAILED; |
1647 | goto fnic_abort_cmd_end; | 1824 | goto fnic_abort_cmd_end; |
1648 | } | 1825 | } |
1649 | if (task_req == FCPIO_ITMF_ABT_TASK) | 1826 | if (task_req == FCPIO_ITMF_ABT_TASK) { |
1650 | CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED; | 1827 | CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED; |
1651 | else | 1828 | atomic64_inc(&fnic_stats->abts_stats.aborts); |
1829 | } else { | ||
1652 | CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED; | 1830 | CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED; |
1831 | atomic64_inc(&fnic_stats->term_stats.terminates); | ||
1832 | } | ||
1653 | 1833 | ||
1654 | /* | 1834 | /* |
1655 | * We queued an abort IO, wait for its completion. | 1835 | * We queued an abort IO, wait for its completion. |
@@ -1667,6 +1847,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
1667 | 1847 | ||
1668 | io_req = (struct fnic_io_req *)CMD_SP(sc); | 1848 | io_req = (struct fnic_io_req *)CMD_SP(sc); |
1669 | if (!io_req) { | 1849 | if (!io_req) { |
1850 | atomic64_inc(&fnic_stats->io_stats.ioreq_null); | ||
1670 | spin_unlock_irqrestore(io_lock, flags); | 1851 | spin_unlock_irqrestore(io_lock, flags); |
1671 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; | 1852 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; |
1672 | ret = FAILED; | 1853 | ret = FAILED; |
@@ -1677,6 +1858,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
1677 | /* fw did not complete abort, timed out */ | 1858 | /* fw did not complete abort, timed out */ |
1678 | if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { | 1859 | if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { |
1679 | spin_unlock_irqrestore(io_lock, flags); | 1860 | spin_unlock_irqrestore(io_lock, flags); |
1861 | if (task_req == FCPIO_ITMF_ABT_TASK) { | ||
1862 | FNIC_SCSI_DBG(KERN_INFO, | ||
1863 | fnic->lport->host, "Abort Driver Timeout\n"); | ||
1864 | atomic64_inc(&abts_stats->abort_drv_timeouts); | ||
1865 | } else { | ||
1866 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, | ||
1867 | "Terminate Driver Timeout\n"); | ||
1868 | atomic64_inc(&term_stats->terminate_drv_timeouts); | ||
1869 | } | ||
1680 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; | 1870 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; |
1681 | ret = FAILED; | 1871 | ret = FAILED; |
1682 | goto fnic_abort_cmd_end; | 1872 | goto fnic_abort_cmd_end; |
@@ -1721,6 +1911,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, | |||
1721 | { | 1911 | { |
1722 | struct vnic_wq_copy *wq = &fnic->wq_copy[0]; | 1912 | struct vnic_wq_copy *wq = &fnic->wq_copy[0]; |
1723 | struct Scsi_Host *host = fnic->lport->host; | 1913 | struct Scsi_Host *host = fnic->lport->host; |
1914 | struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; | ||
1724 | struct scsi_lun fc_lun; | 1915 | struct scsi_lun fc_lun; |
1725 | int ret = 0; | 1916 | int ret = 0; |
1726 | unsigned long intr_flags; | 1917 | unsigned long intr_flags; |
@@ -1742,6 +1933,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, | |||
1742 | if (!vnic_wq_copy_desc_avail(wq)) { | 1933 | if (!vnic_wq_copy_desc_avail(wq)) { |
1743 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 1934 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1744 | "queue_dr_io_req failure - no descriptors\n"); | 1935 | "queue_dr_io_req failure - no descriptors\n"); |
1936 | atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures); | ||
1745 | ret = -EAGAIN; | 1937 | ret = -EAGAIN; |
1746 | goto lr_io_req_end; | 1938 | goto lr_io_req_end; |
1747 | } | 1939 | } |
@@ -1754,6 +1946,12 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, | |||
1754 | fc_lun.scsi_lun, io_req->port_id, | 1946 | fc_lun.scsi_lun, io_req->port_id, |
1755 | fnic->config.ra_tov, fnic->config.ed_tov); | 1947 | fnic->config.ra_tov, fnic->config.ed_tov); |
1756 | 1948 | ||
1949 | atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); | ||
1950 | if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > | ||
1951 | atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) | ||
1952 | atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, | ||
1953 | atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); | ||
1954 | |||
1757 | lr_io_req_end: | 1955 | lr_io_req_end: |
1758 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); | 1956 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); |
1759 | atomic_dec(&fnic->in_flight); | 1957 | atomic_dec(&fnic->in_flight); |
@@ -1988,6 +2186,8 @@ int fnic_device_reset(struct scsi_cmnd *sc) | |||
1988 | unsigned long flags; | 2186 | unsigned long flags; |
1989 | unsigned long start_time = 0; | 2187 | unsigned long start_time = 0; |
1990 | struct scsi_lun fc_lun; | 2188 | struct scsi_lun fc_lun; |
2189 | struct fnic_stats *fnic_stats; | ||
2190 | struct reset_stats *reset_stats; | ||
1991 | int tag = 0; | 2191 | int tag = 0; |
1992 | DECLARE_COMPLETION_ONSTACK(tm_done); | 2192 | DECLARE_COMPLETION_ONSTACK(tm_done); |
1993 | int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/ | 2193 | int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/ |
@@ -1999,6 +2199,10 @@ int fnic_device_reset(struct scsi_cmnd *sc) | |||
1999 | lp = shost_priv(sc->device->host); | 2199 | lp = shost_priv(sc->device->host); |
2000 | 2200 | ||
2001 | fnic = lport_priv(lp); | 2201 | fnic = lport_priv(lp); |
2202 | fnic_stats = &fnic->fnic_stats; | ||
2203 | reset_stats = &fnic->fnic_stats.reset_stats; | ||
2204 | |||
2205 | atomic64_inc(&reset_stats->device_resets); | ||
2002 | 2206 | ||
2003 | rport = starget_to_rport(scsi_target(sc->device)); | 2207 | rport = starget_to_rport(scsi_target(sc->device)); |
2004 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 2208 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
@@ -2009,8 +2213,10 @@ int fnic_device_reset(struct scsi_cmnd *sc) | |||
2009 | goto fnic_device_reset_end; | 2213 | goto fnic_device_reset_end; |
2010 | 2214 | ||
2011 | /* Check if remote port up */ | 2215 | /* Check if remote port up */ |
2012 | if (fc_remote_port_chkready(rport)) | 2216 | if (fc_remote_port_chkready(rport)) { |
2217 | atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); | ||
2013 | goto fnic_device_reset_end; | 2218 | goto fnic_device_reset_end; |
2219 | } | ||
2014 | 2220 | ||
2015 | CMD_FLAGS(sc) = FNIC_DEVICE_RESET; | 2221 | CMD_FLAGS(sc) = FNIC_DEVICE_RESET; |
2016 | /* Allocate tag if not present */ | 2222 | /* Allocate tag if not present */ |
@@ -2086,6 +2292,7 @@ int fnic_device_reset(struct scsi_cmnd *sc) | |||
2086 | * gets cleaned up during higher levels of EH | 2292 | * gets cleaned up during higher levels of EH |
2087 | */ | 2293 | */ |
2088 | if (status == FCPIO_INVALID_CODE) { | 2294 | if (status == FCPIO_INVALID_CODE) { |
2295 | atomic64_inc(&reset_stats->device_reset_timeouts); | ||
2089 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 2296 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
2090 | "Device reset timed out\n"); | 2297 | "Device reset timed out\n"); |
2091 | CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT; | 2298 | CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT; |
@@ -2199,6 +2406,10 @@ fnic_device_reset_end: | |||
2199 | "Returning from device reset %s\n", | 2406 | "Returning from device reset %s\n", |
2200 | (ret == SUCCESS) ? | 2407 | (ret == SUCCESS) ? |
2201 | "SUCCESS" : "FAILED"); | 2408 | "SUCCESS" : "FAILED"); |
2409 | |||
2410 | if (ret == FAILED) | ||
2411 | atomic64_inc(&reset_stats->device_reset_failures); | ||
2412 | |||
2202 | return ret; | 2413 | return ret; |
2203 | } | 2414 | } |
2204 | 2415 | ||
@@ -2207,26 +2418,34 @@ int fnic_reset(struct Scsi_Host *shost) | |||
2207 | { | 2418 | { |
2208 | struct fc_lport *lp; | 2419 | struct fc_lport *lp; |
2209 | struct fnic *fnic; | 2420 | struct fnic *fnic; |
2210 | int ret = SUCCESS; | 2421 | int ret = 0; |
2422 | struct reset_stats *reset_stats; | ||
2211 | 2423 | ||
2212 | lp = shost_priv(shost); | 2424 | lp = shost_priv(shost); |
2213 | fnic = lport_priv(lp); | 2425 | fnic = lport_priv(lp); |
2426 | reset_stats = &fnic->fnic_stats.reset_stats; | ||
2214 | 2427 | ||
2215 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 2428 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
2216 | "fnic_reset called\n"); | 2429 | "fnic_reset called\n"); |
2217 | 2430 | ||
2431 | atomic64_inc(&reset_stats->fnic_resets); | ||
2432 | |||
2218 | /* | 2433 | /* |
2219 | * Reset local port, this will clean up libFC exchanges, | 2434 | * Reset local port, this will clean up libFC exchanges, |
2220 | * reset remote port sessions, and if link is up, begin flogi | 2435 | * reset remote port sessions, and if link is up, begin flogi |
2221 | */ | 2436 | */ |
2222 | if (lp->tt.lport_reset(lp)) | 2437 | ret = lp->tt.lport_reset(lp); |
2223 | ret = FAILED; | ||
2224 | 2438 | ||
2225 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 2439 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
2226 | "Returning from fnic reset %s\n", | 2440 | "Returning from fnic reset %s\n", |
2227 | (ret == SUCCESS) ? | 2441 | (ret == 0) ? |
2228 | "SUCCESS" : "FAILED"); | 2442 | "SUCCESS" : "FAILED"); |
2229 | 2443 | ||
2444 | if (ret == 0) | ||
2445 | atomic64_inc(&reset_stats->fnic_reset_completions); | ||
2446 | else | ||
2447 | atomic64_inc(&reset_stats->fnic_reset_failures); | ||
2448 | |||
2230 | return ret; | 2449 | return ret; |
2231 | } | 2450 | } |
2232 | 2451 | ||
@@ -2251,7 +2470,7 @@ int fnic_host_reset(struct scsi_cmnd *sc) | |||
2251 | * scsi-ml tries to send a TUR to every device if host reset is | 2470 | * scsi-ml tries to send a TUR to every device if host reset is |
2252 | * successful, so before returning to scsi, fabric should be up | 2471 | * successful, so before returning to scsi, fabric should be up |
2253 | */ | 2472 | */ |
2254 | ret = fnic_reset(shost); | 2473 | ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED; |
2255 | if (ret == SUCCESS) { | 2474 | if (ret == SUCCESS) { |
2256 | wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ; | 2475 | wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ; |
2257 | ret = FAILED; | 2476 | ret = FAILED; |