aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorJames Smart <james.smart@emulex.com>2012-08-03 12:35:13 -0400
committerJames Bottomley <JBottomley@Parallels.com>2012-09-14 09:35:32 -0400
commitb84daac9dce4c87b83668d6790f3b092a2e906ae (patch)
treed94f8bd3900f32eb91f8ae1aa2d8cfbf655cc1c2 /drivers/scsi/lpfc
parent34f5ad8bddeda2a6b8d0dd1b61a015d06e62e1a4 (diff)
[SCSI] lpfc 8.3.33: Add debugfs interface to display SLI queue information
Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h29
3 files changed, 66 insertions, 1 deletions
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index afe368fd1b98..a9593ac9c134 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -36,6 +36,9 @@
36/* dumpHostSlim output buffer size */ 36/* dumpHostSlim output buffer size */
37#define LPFC_DUMPHOSTSLIM_SIZE 4096 37#define LPFC_DUMPHOSTSLIM_SIZE 4096
38 38
39/* dumpSLIqinfo output buffer size */
40#define LPFC_DUMPSLIQINFO_SIZE 4096
41
39/* hbqinfo output buffer size */ 42/* hbqinfo output buffer size */
40#define LPFC_HBQINFO_SIZE 8192 43#define LPFC_HBQINFO_SIZE 8192
41 44
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 9cbd20b1328b..296a6f8473e0 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -101,8 +101,11 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
101 temp_wqe = q->qe[q->host_index].wqe; 101 temp_wqe = q->qe[q->host_index].wqe;
102 102
103 /* If the host has not yet processed the next entry then we are done */ 103 /* If the host has not yet processed the next entry then we are done */
104 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 104 if (((q->host_index + 1) % q->entry_count) == q->hba_index) {
105 q->WQ_overflow++;
105 return -ENOMEM; 106 return -ENOMEM;
107 }
108 q->WQ_posted++;
106 /* set consumption flag every once in a while */ 109 /* set consumption flag every once in a while */
107 if (!((q->host_index + 1) % q->entry_repost)) 110 if (!((q->host_index + 1) % q->entry_repost))
108 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 111 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
@@ -11311,14 +11314,17 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
11311 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 11314 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
11312 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11315 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11313 "2537 Receive Frame Truncated!!\n"); 11316 "2537 Receive Frame Truncated!!\n");
11317 hrq->RQ_buf_trunc++;
11314 case FC_STATUS_RQ_SUCCESS: 11318 case FC_STATUS_RQ_SUCCESS:
11315 lpfc_sli4_rq_release(hrq, drq); 11319 lpfc_sli4_rq_release(hrq, drq);
11316 spin_lock_irqsave(&phba->hbalock, iflags); 11320 spin_lock_irqsave(&phba->hbalock, iflags);
11317 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 11321 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
11318 if (!dma_buf) { 11322 if (!dma_buf) {
11323 hrq->RQ_no_buf_found++;
11319 spin_unlock_irqrestore(&phba->hbalock, iflags); 11324 spin_unlock_irqrestore(&phba->hbalock, iflags);
11320 goto out; 11325 goto out;
11321 } 11326 }
11327 hrq->RQ_rcv_buf++;
11322 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 11328 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
11323 /* save off the frame for the word thread to process */ 11329 /* save off the frame for the word thread to process */
11324 list_add_tail(&dma_buf->cq_event.list, 11330 list_add_tail(&dma_buf->cq_event.list,
@@ -11330,6 +11336,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
11330 break; 11336 break;
11331 case FC_STATUS_INSUFF_BUF_NEED_BUF: 11337 case FC_STATUS_INSUFF_BUF_NEED_BUF:
11332 case FC_STATUS_INSUFF_BUF_FRM_DISC: 11338 case FC_STATUS_INSUFF_BUF_FRM_DISC:
11339 hrq->RQ_no_posted_buf++;
11333 /* Post more buffers if possible */ 11340 /* Post more buffers if possible */
11334 spin_lock_irqsave(&phba->hbalock, iflags); 11341 spin_lock_irqsave(&phba->hbalock, iflags);
11335 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 11342 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
@@ -11457,6 +11464,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
11457 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 11464 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
11458 if (!(++ecount % cq->entry_repost)) 11465 if (!(++ecount % cq->entry_repost))
11459 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11466 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11467 cq->CQ_mbox++;
11460 } 11468 }
11461 break; 11469 break;
11462 case LPFC_WCQ: 11470 case LPFC_WCQ:
@@ -11470,6 +11478,10 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
11470 if (!(++ecount % cq->entry_repost)) 11478 if (!(++ecount % cq->entry_repost))
11471 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11479 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11472 } 11480 }
11481
11482 /* Track the max number of CQEs processed in 1 EQ */
11483 if (ecount > cq->CQ_max_cqe)
11484 cq->CQ_max_cqe = ecount;
11473 break; 11485 break;
11474 default: 11486 default:
11475 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11487 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -11621,17 +11633,20 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11621 /* Check and process for different type of WCQE and dispatch */ 11633 /* Check and process for different type of WCQE and dispatch */
11622 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 11634 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
11623 case CQE_CODE_COMPL_WQE: 11635 case CQE_CODE_COMPL_WQE:
11636 cq->CQ_wq++;
11624 /* Process the WQ complete event */ 11637 /* Process the WQ complete event */
11625 phba->last_completion_time = jiffies; 11638 phba->last_completion_time = jiffies;
11626 lpfc_sli4_fp_handle_fcp_wcqe(phba, 11639 lpfc_sli4_fp_handle_fcp_wcqe(phba,
11627 (struct lpfc_wcqe_complete *)&wcqe); 11640 (struct lpfc_wcqe_complete *)&wcqe);
11628 break; 11641 break;
11629 case CQE_CODE_RELEASE_WQE: 11642 case CQE_CODE_RELEASE_WQE:
11643 cq->CQ_release_wqe++;
11630 /* Process the WQ release event */ 11644 /* Process the WQ release event */
11631 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 11645 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
11632 (struct lpfc_wcqe_release *)&wcqe); 11646 (struct lpfc_wcqe_release *)&wcqe);
11633 break; 11647 break;
11634 case CQE_CODE_XRI_ABORTED: 11648 case CQE_CODE_XRI_ABORTED:
11649 cq->CQ_xri_aborted++;
11635 /* Process the WQ XRI abort event */ 11650 /* Process the WQ XRI abort event */
11636 phba->last_completion_time = jiffies; 11651 phba->last_completion_time = jiffies;
11637 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 11652 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
@@ -11709,6 +11724,10 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11709 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11724 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11710 } 11725 }
11711 11726
11727 /* Track the max number of CQEs processed in 1 EQ */
11728 if (ecount > cq->CQ_max_cqe)
11729 cq->CQ_max_cqe = ecount;
11730
11712 /* Catch the no cq entry condition */ 11731 /* Catch the no cq entry condition */
11713 if (unlikely(ecount == 0)) 11732 if (unlikely(ecount == 0))
11714 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11733 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -11780,6 +11799,7 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
11780 11799
11781 /* Check device state for handling interrupt */ 11800 /* Check device state for handling interrupt */
11782 if (unlikely(lpfc_intr_state_check(phba))) { 11801 if (unlikely(lpfc_intr_state_check(phba))) {
11802 speq->EQ_badstate++;
11783 /* Check again for link_state with lock held */ 11803 /* Check again for link_state with lock held */
11784 spin_lock_irqsave(&phba->hbalock, iflag); 11804 spin_lock_irqsave(&phba->hbalock, iflag);
11785 if (phba->link_state < LPFC_LINK_DOWN) 11805 if (phba->link_state < LPFC_LINK_DOWN)
@@ -11796,13 +11816,19 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
11796 lpfc_sli4_sp_handle_eqe(phba, eqe); 11816 lpfc_sli4_sp_handle_eqe(phba, eqe);
11797 if (!(++ecount % speq->entry_repost)) 11817 if (!(++ecount % speq->entry_repost))
11798 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM); 11818 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
11819 speq->EQ_processed++;
11799 } 11820 }
11800 11821
11822 /* Track the max number of EQEs processed in 1 intr */
11823 if (ecount > speq->EQ_max_eqe)
11824 speq->EQ_max_eqe = ecount;
11825
11801 /* Always clear and re-arm the slow-path EQ */ 11826 /* Always clear and re-arm the slow-path EQ */
11802 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM); 11827 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
11803 11828
11804 /* Catch the no cq entry condition */ 11829 /* Catch the no cq entry condition */
11805 if (unlikely(ecount == 0)) { 11830 if (unlikely(ecount == 0)) {
11831 speq->EQ_no_entry++;
11806 if (phba->intr_type == MSIX) 11832 if (phba->intr_type == MSIX)
11807 /* MSI-X treated interrupt served as no EQ share INT */ 11833 /* MSI-X treated interrupt served as no EQ share INT */
11808 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11834 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
@@ -11864,6 +11890,7 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11864 11890
11865 /* Check device state for handling interrupt */ 11891 /* Check device state for handling interrupt */
11866 if (unlikely(lpfc_intr_state_check(phba))) { 11892 if (unlikely(lpfc_intr_state_check(phba))) {
11893 fpeq->EQ_badstate++;
11867 /* Check again for link_state with lock held */ 11894 /* Check again for link_state with lock held */
11868 spin_lock_irqsave(&phba->hbalock, iflag); 11895 spin_lock_irqsave(&phba->hbalock, iflag);
11869 if (phba->link_state < LPFC_LINK_DOWN) 11896 if (phba->link_state < LPFC_LINK_DOWN)
@@ -11880,12 +11907,18 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11880 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); 11907 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
11881 if (!(++ecount % fpeq->entry_repost)) 11908 if (!(++ecount % fpeq->entry_repost))
11882 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 11909 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
11910 fpeq->EQ_processed++;
11883 } 11911 }
11884 11912
11913 /* Track the max number of EQEs processed in 1 intr */
11914 if (ecount > fpeq->EQ_max_eqe)
11915 fpeq->EQ_max_eqe = ecount;
11916
11885 /* Always clear and re-arm the fast-path EQ */ 11917 /* Always clear and re-arm the fast-path EQ */
11886 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 11918 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
11887 11919
11888 if (unlikely(ecount == 0)) { 11920 if (unlikely(ecount == 0)) {
11921 fpeq->EQ_no_entry++;
11889 if (phba->intr_type == MSIX) 11922 if (phba->intr_type == MSIX)
11890 /* MSI-X treated interrupt served as no EQ share INT */ 11923 /* MSI-X treated interrupt served as no EQ share INT */
11891 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11924 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index ec756118c5c1..f4b57654787b 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -141,6 +141,35 @@ struct lpfc_queue {
141 uint32_t page_count; /* Number of pages allocated for this queue */ 141 uint32_t page_count; /* Number of pages allocated for this queue */
142 uint32_t host_index; /* The host's index for putting or getting */ 142 uint32_t host_index; /* The host's index for putting or getting */
143 uint32_t hba_index; /* The last known hba index for get or put */ 143 uint32_t hba_index; /* The last known hba index for get or put */
144
145 /* For q stats */
146 uint32_t q_cnt_1;
147 uint32_t q_cnt_2;
148 uint32_t q_cnt_3;
149 uint64_t q_cnt_4;
150/* defines for EQ stats */
151#define EQ_max_eqe q_cnt_1
152#define EQ_no_entry q_cnt_2
153#define EQ_badstate q_cnt_3
154#define EQ_processed q_cnt_4
155
156/* defines for CQ stats */
157#define CQ_mbox q_cnt_1
158#define CQ_max_cqe q_cnt_1
159#define CQ_release_wqe q_cnt_2
160#define CQ_xri_aborted q_cnt_3
161#define CQ_wq q_cnt_4
162
163/* defines for WQ stats */
164#define WQ_overflow q_cnt_1
165#define WQ_posted q_cnt_4
166
167/* defines for RQ stats */
168#define RQ_no_posted_buf q_cnt_1
169#define RQ_no_buf_found q_cnt_2
170#define RQ_buf_trunc q_cnt_3
171#define RQ_rcv_buf q_cnt_4
172
144 union sli4_qe qe[1]; /* array to index entries (must be last) */ 173 union sli4_qe qe[1]; /* array to index entries (must be last) */
145}; 174};
146 175