diff options
| -rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 10 | ||||
| -rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/sge.c | 122 | ||||
| -rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 106 | ||||
| -rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 9 |
4 files changed, 219 insertions, 28 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 944f2cbc1795..50abe1d61287 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
| @@ -556,8 +556,13 @@ struct sge { | |||
| 556 | u32 pktshift; /* padding between CPL & packet data */ | 556 | u32 pktshift; /* padding between CPL & packet data */ |
| 557 | u32 fl_align; /* response queue message alignment */ | 557 | u32 fl_align; /* response queue message alignment */ |
| 558 | u32 fl_starve_thres; /* Free List starvation threshold */ | 558 | u32 fl_starve_thres; /* Free List starvation threshold */ |
| 559 | unsigned int starve_thres; | 559 | |
| 560 | u8 idma_state[2]; | 560 | /* State variables for detecting an SGE Ingress DMA hang */ |
| 561 | unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */ | ||
| 562 | unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */ | ||
| 563 | unsigned int idma_state[2]; /* SGE IDMA Hang detect state */ | ||
| 564 | unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */ | ||
| 565 | |||
| 561 | unsigned int egr_start; | 566 | unsigned int egr_start; |
| 562 | unsigned int ingr_start; | 567 | unsigned int ingr_start; |
| 563 | void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ | 568 | void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ |
| @@ -1032,4 +1037,5 @@ void t4_db_dropped(struct adapter *adapter); | |||
| 1032 | int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len); | 1037 | int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len); |
| 1033 | int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, | 1038 | int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, |
| 1034 | u32 addr, u32 val); | 1039 | u32 addr, u32 val); |
| 1040 | void t4_sge_decode_idma_state(struct adapter *adapter, int state); | ||
| 1035 | #endif /* __CXGB4_H__ */ | 1041 | #endif /* __CXGB4_H__ */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index af76b25bb606..46429f9d0592 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
| @@ -93,6 +93,16 @@ | |||
| 93 | */ | 93 | */ |
| 94 | #define TX_QCHECK_PERIOD (HZ / 2) | 94 | #define TX_QCHECK_PERIOD (HZ / 2) |
| 95 | 95 | ||
| 96 | /* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate | ||
| 97 | * (in RX_QCHECK_PERIOD multiples). If we find one of the SGE Ingress DMA | ||
| 98 | * State Machines in the same state for this amount of time (in HZ) then we'll | ||
| 99 | * issue a warning about a potential hang. We'll repeat the warning as the | ||
| 100 | * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till | ||
| 101 | * the situation clears. If the situation clears, we'll note that as well. | ||
| 102 | */ | ||
| 103 | #define SGE_IDMA_WARN_THRESH (1 * HZ) | ||
| 104 | #define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD) | ||
| 105 | |||
| 96 | /* | 106 | /* |
| 97 | * Max number of Tx descriptors to be reclaimed by the Tx timer. | 107 | * Max number of Tx descriptors to be reclaimed by the Tx timer. |
| 98 | */ | 108 | */ |
| @@ -1041,7 +1051,6 @@ out_free: dev_kfree_skb(skb); | |||
| 1041 | end = (u64 *)wr + flits; | 1051 | end = (u64 *)wr + flits; |
| 1042 | 1052 | ||
| 1043 | len = immediate ? skb->len : 0; | 1053 | len = immediate ? skb->len : 0; |
| 1044 | len += sizeof(*cpl); | ||
| 1045 | ssi = skb_shinfo(skb); | 1054 | ssi = skb_shinfo(skb); |
| 1046 | if (ssi->gso_size) { | 1055 | if (ssi->gso_size) { |
| 1047 | struct cpl_tx_pkt_lso *lso = (void *)wr; | 1056 | struct cpl_tx_pkt_lso *lso = (void *)wr; |
| @@ -1069,6 +1078,7 @@ out_free: dev_kfree_skb(skb); | |||
| 1069 | q->tso++; | 1078 | q->tso++; |
| 1070 | q->tx_cso += ssi->gso_segs; | 1079 | q->tx_cso += ssi->gso_segs; |
| 1071 | } else { | 1080 | } else { |
| 1081 | len += sizeof(*cpl); | ||
| 1072 | wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | | 1082 | wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | |
| 1073 | FW_WR_IMMDLEN(len)); | 1083 | FW_WR_IMMDLEN(len)); |
| 1074 | cpl = (void *)(wr + 1); | 1084 | cpl = (void *)(wr + 1); |
| @@ -2008,7 +2018,7 @@ irq_handler_t t4_intr_handler(struct adapter *adap) | |||
| 2008 | static void sge_rx_timer_cb(unsigned long data) | 2018 | static void sge_rx_timer_cb(unsigned long data) |
| 2009 | { | 2019 | { |
| 2010 | unsigned long m; | 2020 | unsigned long m; |
| 2011 | unsigned int i, cnt[2]; | 2021 | unsigned int i, idma_same_state_cnt[2]; |
| 2012 | struct adapter *adap = (struct adapter *)data; | 2022 | struct adapter *adap = (struct adapter *)data; |
| 2013 | struct sge *s = &adap->sge; | 2023 | struct sge *s = &adap->sge; |
| 2014 | 2024 | ||
| @@ -2031,21 +2041,64 @@ static void sge_rx_timer_cb(unsigned long data) | |||
| 2031 | } | 2041 | } |
| 2032 | 2042 | ||
| 2033 | t4_write_reg(adap, SGE_DEBUG_INDEX, 13); | 2043 | t4_write_reg(adap, SGE_DEBUG_INDEX, 13); |
| 2034 | cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH); | 2044 | idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH); |
| 2035 | cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); | 2045 | idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); |
| 2036 | 2046 | ||
| 2037 | for (i = 0; i < 2; i++) | 2047 | for (i = 0; i < 2; i++) { |
| 2038 | if (cnt[i] >= s->starve_thres) { | 2048 | u32 debug0, debug11; |
| 2039 | if (s->idma_state[i] || cnt[i] == 0xffffffff) | 2049 | |
| 2040 | continue; | 2050 | /* If the Ingress DMA Same State Counter ("timer") is less |
| 2041 | s->idma_state[i] = 1; | 2051 | * than 1s, then we can reset our synthesized Stall Timer and |
| 2042 | t4_write_reg(adap, SGE_DEBUG_INDEX, 11); | 2052 | * continue. If we have previously emitted warnings about a |
| 2043 | m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16); | 2053 | * potential stalled Ingress Queue, issue a note indicating |
| 2044 | dev_warn(adap->pdev_dev, | 2054 | * that the Ingress Queue has resumed forward progress. |
| 2045 | "SGE idma%u starvation detected for " | 2055 | */ |
| 2046 | "queue %lu\n", i, m & 0xffff); | 2056 | if (idma_same_state_cnt[i] < s->idma_1s_thresh) { |
| 2047 | } else if (s->idma_state[i]) | 2057 | if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH) |
| 2048 | s->idma_state[i] = 0; | 2058 | CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n", |
| 2059 | i, s->idma_qid[i], | ||
| 2060 | s->idma_stalled[i]/HZ); | ||
| 2061 | s->idma_stalled[i] = 0; | ||
| 2062 | continue; | ||
| 2063 | } | ||
| 2064 | |||
| 2065 | /* Synthesize an SGE Ingress DMA Same State Timer in the Hz | ||
| 2066 | * domain. The first time we get here it'll be because we | ||
| 2067 | * passed the 1s Threshold; each additional time it'll be | ||
| 2068 | * because the RX Timer Callback is being fired on its regular | ||
| 2069 | * schedule. | ||
| 2070 | * | ||
| 2071 | * If the stall is below our Potential Hung Ingress Queue | ||
| 2072 | * Warning Threshold, continue. | ||
| 2073 | */ | ||
| 2074 | if (s->idma_stalled[i] == 0) | ||
| 2075 | s->idma_stalled[i] = HZ; | ||
| 2076 | else | ||
| 2077 | s->idma_stalled[i] += RX_QCHECK_PERIOD; | ||
| 2078 | |||
| 2079 | if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH) | ||
| 2080 | continue; | ||
| 2081 | |||
| 2082 | /* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */ | ||
| 2083 | if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0) | ||
| 2084 | continue; | ||
| 2085 | |||
| 2086 | /* Read and save the SGE IDMA State and Queue ID information. | ||
| 2087 | * We do this every time in case it changes across time ... | ||
| 2088 | */ | ||
| 2089 | t4_write_reg(adap, SGE_DEBUG_INDEX, 0); | ||
| 2090 | debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); | ||
| 2091 | s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f; | ||
| 2092 | |||
| 2093 | t4_write_reg(adap, SGE_DEBUG_INDEX, 11); | ||
| 2094 | debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); | ||
| 2095 | s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff; | ||
| 2096 | |||
| 2097 | CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n", | ||
| 2098 | i, s->idma_qid[i], s->idma_state[i], | ||
| 2099 | s->idma_stalled[i]/HZ, debug0, debug11); | ||
| 2100 | t4_sge_decode_idma_state(adap, s->idma_state[i]); | ||
| 2101 | } | ||
| 2049 | 2102 | ||
| 2050 | mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); | 2103 | mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); |
| 2051 | } | 2104 | } |
| @@ -2596,11 +2649,19 @@ static int t4_sge_init_soft(struct adapter *adap) | |||
| 2596 | fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); | 2649 | fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); |
| 2597 | fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); | 2650 | fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); |
| 2598 | 2651 | ||
| 2652 | /* We only bother using the Large Page logic if the Large Page Buffer | ||
| 2653 | * is larger than our Page Size Buffer. | ||
| 2654 | */ | ||
| 2655 | if (fl_large_pg <= fl_small_pg) | ||
| 2656 | fl_large_pg = 0; | ||
| 2657 | |||
| 2599 | #undef READ_FL_BUF | 2658 | #undef READ_FL_BUF |
| 2600 | 2659 | ||
| 2660 | /* The Page Size Buffer must be exactly equal to our Page Size and the | ||
| 2661 | * Large Page Size Buffer should be 0 (per above) or a power of 2. | ||
| 2662 | */ | ||
| 2601 | if (fl_small_pg != PAGE_SIZE || | 2663 | if (fl_small_pg != PAGE_SIZE || |
| 2602 | (fl_large_pg != 0 && (fl_large_pg < fl_small_pg || | 2664 | (fl_large_pg & (fl_large_pg-1)) != 0) { |
| 2603 | (fl_large_pg & (fl_large_pg-1)) != 0))) { | ||
| 2604 | dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", | 2665 | dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", |
| 2605 | fl_small_pg, fl_large_pg); | 2666 | fl_small_pg, fl_large_pg); |
| 2606 | return -EINVAL; | 2667 | return -EINVAL; |
| @@ -2715,8 +2776,8 @@ static int t4_sge_init_hard(struct adapter *adap) | |||
| 2715 | int t4_sge_init(struct adapter *adap) | 2776 | int t4_sge_init(struct adapter *adap) |
| 2716 | { | 2777 | { |
| 2717 | struct sge *s = &adap->sge; | 2778 | struct sge *s = &adap->sge; |
| 2718 | u32 sge_control; | 2779 | u32 sge_control, sge_conm_ctrl; |
| 2719 | int ret; | 2780 | int ret, egress_threshold; |
| 2720 | 2781 | ||
| 2721 | /* | 2782 | /* |
| 2722 | * Ingress Padding Boundary and Egress Status Page Size are set up by | 2783 | * Ingress Padding Boundary and Egress Status Page Size are set up by |
| @@ -2741,15 +2802,24 @@ int t4_sge_init(struct adapter *adap) | |||
| 2741 | * SGE's Egress Congestion Threshold. If it isn't, then we can get | 2802 | * SGE's Egress Congestion Threshold. If it isn't, then we can get |
| 2742 | * stuck waiting for new packets while the SGE is waiting for us to | 2803 | * stuck waiting for new packets while the SGE is waiting for us to |
| 2743 | * give it more Free List entries. (Note that the SGE's Egress | 2804 | * give it more Free List entries. (Note that the SGE's Egress |
| 2744 | * Congestion Threshold is in units of 2 Free List pointers.) | 2805 | * Congestion Threshold is in units of 2 Free List pointers.) For T4, |
| 2806 | * there was only a single field to control this. For T5 there's the | ||
| 2807 | * original field which now only applies to Unpacked Mode Free List | ||
| 2808 | * buffers and a new field which only applies to Packed Mode Free List | ||
| 2809 | * buffers. | ||
| 2745 | */ | 2810 | */ |
| 2746 | s->fl_starve_thres | 2811 | sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL); |
| 2747 | = EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1; | 2812 | if (is_t4(adap->params.chip)) |
| 2813 | egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl); | ||
| 2814 | else | ||
| 2815 | egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl); | ||
| 2816 | s->fl_starve_thres = 2*egress_threshold + 1; | ||
| 2748 | 2817 | ||
| 2749 | setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); | 2818 | setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); |
| 2750 | setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); | 2819 | setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); |
| 2751 | s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */ | 2820 | s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000; /* 1 s */ |
| 2752 | s->idma_state[0] = s->idma_state[1] = 0; | 2821 | s->idma_stalled[0] = 0; |
| 2822 | s->idma_stalled[1] = 0; | ||
| 2753 | spin_lock_init(&s->intrq_lock); | 2823 | spin_lock_init(&s->intrq_lock); |
| 2754 | 2824 | ||
| 2755 | return 0; | 2825 | return 0; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index d3c2a516fa88..fb2fe65903c2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
| @@ -2597,6 +2597,112 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, | |||
| 2597 | } | 2597 | } |
| 2598 | 2598 | ||
| 2599 | /** | 2599 | /** |
| 2600 | * t4_sge_decode_idma_state - decode the idma state | ||
| 2601 | * @adap: the adapter | ||
| 2602 | * @state: the state idma is stuck in | ||
| 2603 | */ | ||
| 2604 | void t4_sge_decode_idma_state(struct adapter *adapter, int state) | ||
| 2605 | { | ||
| 2606 | static const char * const t4_decode[] = { | ||
| 2607 | "IDMA_IDLE", | ||
| 2608 | "IDMA_PUSH_MORE_CPL_FIFO", | ||
| 2609 | "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", | ||
| 2610 | "Not used", | ||
| 2611 | "IDMA_PHYSADDR_SEND_PCIEHDR", | ||
| 2612 | "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", | ||
| 2613 | "IDMA_PHYSADDR_SEND_PAYLOAD", | ||
| 2614 | "IDMA_SEND_FIFO_TO_IMSG", | ||
| 2615 | "IDMA_FL_REQ_DATA_FL_PREP", | ||
| 2616 | "IDMA_FL_REQ_DATA_FL", | ||
| 2617 | "IDMA_FL_DROP", | ||
| 2618 | "IDMA_FL_H_REQ_HEADER_FL", | ||
| 2619 | "IDMA_FL_H_SEND_PCIEHDR", | ||
| 2620 | "IDMA_FL_H_PUSH_CPL_FIFO", | ||
| 2621 | "IDMA_FL_H_SEND_CPL", | ||
| 2622 | "IDMA_FL_H_SEND_IP_HDR_FIRST", | ||
| 2623 | "IDMA_FL_H_SEND_IP_HDR", | ||
| 2624 | "IDMA_FL_H_REQ_NEXT_HEADER_FL", | ||
| 2625 | "IDMA_FL_H_SEND_NEXT_PCIEHDR", | ||
| 2626 | "IDMA_FL_H_SEND_IP_HDR_PADDING", | ||
| 2627 | "IDMA_FL_D_SEND_PCIEHDR", | ||
| 2628 | "IDMA_FL_D_SEND_CPL_AND_IP_HDR", | ||
| 2629 | "IDMA_FL_D_REQ_NEXT_DATA_FL", | ||
| 2630 | "IDMA_FL_SEND_PCIEHDR", | ||
| 2631 | "IDMA_FL_PUSH_CPL_FIFO", | ||
| 2632 | "IDMA_FL_SEND_CPL", | ||
| 2633 | "IDMA_FL_SEND_PAYLOAD_FIRST", | ||
| 2634 | "IDMA_FL_SEND_PAYLOAD", | ||
| 2635 | "IDMA_FL_REQ_NEXT_DATA_FL", | ||
| 2636 | "IDMA_FL_SEND_NEXT_PCIEHDR", | ||
| 2637 | "IDMA_FL_SEND_PADDING", | ||
| 2638 | "IDMA_FL_SEND_COMPLETION_TO_IMSG", | ||
| 2639 | "IDMA_FL_SEND_FIFO_TO_IMSG", | ||
| 2640 | "IDMA_FL_REQ_DATAFL_DONE", | ||
| 2641 | "IDMA_FL_REQ_HEADERFL_DONE", | ||
| 2642 | }; | ||
| 2643 | static const char * const t5_decode[] = { | ||
| 2644 | "IDMA_IDLE", | ||
| 2645 | "IDMA_ALMOST_IDLE", | ||
| 2646 | "IDMA_PUSH_MORE_CPL_FIFO", | ||
| 2647 | "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", | ||
| 2648 | "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", | ||
| 2649 | "IDMA_PHYSADDR_SEND_PCIEHDR", | ||
| 2650 | "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", | ||
| 2651 | "IDMA_PHYSADDR_SEND_PAYLOAD", | ||
| 2652 | "IDMA_SEND_FIFO_TO_IMSG", | ||
| 2653 | "IDMA_FL_REQ_DATA_FL", | ||
| 2654 | "IDMA_FL_DROP", | ||
| 2655 | "IDMA_FL_DROP_SEND_INC", | ||
| 2656 | "IDMA_FL_H_REQ_HEADER_FL", | ||
| 2657 | "IDMA_FL_H_SEND_PCIEHDR", | ||
| 2658 | "IDMA_FL_H_PUSH_CPL_FIFO", | ||
| 2659 | "IDMA_FL_H_SEND_CPL", | ||
| 2660 | "IDMA_FL_H_SEND_IP_HDR_FIRST", | ||
| 2661 | "IDMA_FL_H_SEND_IP_HDR", | ||
| 2662 | "IDMA_FL_H_REQ_NEXT_HEADER_FL", | ||
| 2663 | "IDMA_FL_H_SEND_NEXT_PCIEHDR", | ||
| 2664 | "IDMA_FL_H_SEND_IP_HDR_PADDING", | ||
| 2665 | "IDMA_FL_D_SEND_PCIEHDR", | ||
| 2666 | "IDMA_FL_D_SEND_CPL_AND_IP_HDR", | ||
| 2667 | "IDMA_FL_D_REQ_NEXT_DATA_FL", | ||
| 2668 | "IDMA_FL_SEND_PCIEHDR", | ||
| 2669 | "IDMA_FL_PUSH_CPL_FIFO", | ||
| 2670 | "IDMA_FL_SEND_CPL", | ||
| 2671 | "IDMA_FL_SEND_PAYLOAD_FIRST", | ||
| 2672 | "IDMA_FL_SEND_PAYLOAD", | ||
| 2673 | "IDMA_FL_REQ_NEXT_DATA_FL", | ||
| 2674 | "IDMA_FL_SEND_NEXT_PCIEHDR", | ||
| 2675 | "IDMA_FL_SEND_PADDING", | ||
| 2676 | "IDMA_FL_SEND_COMPLETION_TO_IMSG", | ||
| 2677 | }; | ||
| 2678 | static const u32 sge_regs[] = { | ||
| 2679 | SGE_DEBUG_DATA_LOW_INDEX_2, | ||
| 2680 | SGE_DEBUG_DATA_LOW_INDEX_3, | ||
| 2681 | SGE_DEBUG_DATA_HIGH_INDEX_10, | ||
| 2682 | }; | ||
| 2683 | const char **sge_idma_decode; | ||
| 2684 | int sge_idma_decode_nstates; | ||
| 2685 | int i; | ||
| 2686 | |||
| 2687 | if (is_t4(adapter->params.chip)) { | ||
| 2688 | sge_idma_decode = (const char **)t4_decode; | ||
| 2689 | sge_idma_decode_nstates = ARRAY_SIZE(t4_decode); | ||
| 2690 | } else { | ||
| 2691 | sge_idma_decode = (const char **)t5_decode; | ||
| 2692 | sge_idma_decode_nstates = ARRAY_SIZE(t5_decode); | ||
| 2693 | } | ||
| 2694 | |||
| 2695 | if (state < sge_idma_decode_nstates) | ||
| 2696 | CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]); | ||
| 2697 | else | ||
| 2698 | CH_WARN(adapter, "idma state %d unknown\n", state); | ||
| 2699 | |||
| 2700 | for (i = 0; i < ARRAY_SIZE(sge_regs); i++) | ||
| 2701 | CH_WARN(adapter, "SGE register %#x value %#x\n", | ||
| 2702 | sge_regs[i], t4_read_reg(adapter, sge_regs[i])); | ||
| 2703 | } | ||
| 2704 | |||
| 2705 | /** | ||
| 2600 | * t4_fw_hello - establish communication with FW | 2706 | * t4_fw_hello - establish communication with FW |
| 2601 | * @adap: the adapter | 2707 | * @adap: the adapter |
| 2602 | * @mbox: mailbox to use for the FW command | 2708 | * @mbox: mailbox to use for the FW command |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 4082522d8140..225ad8a5722d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | |||
| @@ -230,6 +230,12 @@ | |||
| 230 | #define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift) | 230 | #define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift) |
| 231 | #define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift) | 231 | #define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift) |
| 232 | 232 | ||
| 233 | #define EGRTHRESHOLDPACKING_MASK 0x3fU | ||
| 234 | #define EGRTHRESHOLDPACKING_SHIFT 14 | ||
| 235 | #define EGRTHRESHOLDPACKING(x) ((x) << EGRTHRESHOLDPACKING_SHIFT) | ||
| 236 | #define EGRTHRESHOLDPACKING_GET(x) (((x) >> EGRTHRESHOLDPACKING_SHIFT) & \ | ||
| 237 | EGRTHRESHOLDPACKING_MASK) | ||
| 238 | |||
| 233 | #define SGE_DBFIFO_STATUS 0x10a4 | 239 | #define SGE_DBFIFO_STATUS 0x10a4 |
| 234 | #define HP_INT_THRESH_SHIFT 28 | 240 | #define HP_INT_THRESH_SHIFT 28 |
| 235 | #define HP_INT_THRESH_MASK 0xfU | 241 | #define HP_INT_THRESH_MASK 0xfU |
| @@ -278,6 +284,9 @@ | |||
| 278 | #define SGE_DEBUG_INDEX 0x10cc | 284 | #define SGE_DEBUG_INDEX 0x10cc |
| 279 | #define SGE_DEBUG_DATA_HIGH 0x10d0 | 285 | #define SGE_DEBUG_DATA_HIGH 0x10d0 |
| 280 | #define SGE_DEBUG_DATA_LOW 0x10d4 | 286 | #define SGE_DEBUG_DATA_LOW 0x10d4 |
| 287 | #define SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8 | ||
| 288 | #define SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc | ||
| 289 | #define SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8 | ||
| 281 | #define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 | 290 | #define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 |
| 282 | 291 | ||
| 283 | #define S_HP_INT_THRESH 28 | 292 | #define S_HP_INT_THRESH 28 |
