aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHariprasad Shenai <hariprasad@chelsio.com>2015-05-11 19:13:43 -0400
committerDavid S. Miller <davem@davemloft.net>2015-05-12 23:11:40 -0400
commit1ecc7b7a5998eb8fc4e9f79979638e77436b0b0b (patch)
tree4977b5551f46293ec2ca79bcdbe4cc355fbe5264
parent82fa3c776e5abba7ed6e4b4f4983d14731c37d6a (diff)
cxgb4/cxgb4vf: Cleanup macros, add comments and add new MACROS
Cleanup few MACROS left out in t4_hw.h to be consistent with the existing ones. Also replace few hardcoded values with MACROS. Also update comments for some code Signed-off-by: Hariprasad Shenai <hariprasad@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c160
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h34
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h83
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_values.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c73
9 files changed, 235 insertions, 147 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 10d82b51d7ef..401272a2691e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -578,7 +578,7 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
578 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq; 578 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
579 579
580 c->rx_coalesce_usecs = qtimer_val(adap, rq); 580 c->rx_coalesce_usecs = qtimer_val(adap, rq);
581 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ? 581 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
582 adap->sge.counter_val[rq->pktcnt_idx] : 0; 582 adap->sge.counter_val[rq->pktcnt_idx] : 0;
583 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev); 583 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
584 return 0; 584 return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 223995e7b643..5aecf69efe56 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1420,7 +1420,7 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1420 } 1420 }
1421 1421
1422 us = us == 0 ? 6 : closest_timer(&adap->sge, us); 1422 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1423 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0); 1423 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1424 return 0; 1424 return 0;
1425} 1425}
1426 1426
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 898842df38fc..dd18fcb644f9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -530,6 +530,10 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
530 val = PIDX_T5_V(q->pend_cred / 8) | 530 val = PIDX_T5_V(q->pend_cred / 8) |
531 DBTYPE_F; 531 DBTYPE_F;
532 val |= DBPRIO_F; 532 val |= DBPRIO_F;
533
534 /* Make sure all memory writes to the Free List queue are
535 * committed before we tell the hardware about them.
536 */
533 wmb(); 537 wmb();
534 538
535 /* If we don't have access to the new User Doorbell (T5+), use 539 /* If we don't have access to the new User Doorbell (T5+), use
@@ -920,7 +924,10 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
920 */ 924 */
921static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) 925static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
922{ 926{
923 wmb(); /* write descriptors before telling HW */ 927 /* Make sure that all writes to the TX Descriptors are committed
928 * before we tell the hardware about them.
929 */
930 wmb();
924 931
925 /* If we don't have access to the new User Doorbell (T5+), use the old 932 /* If we don't have access to the new User Doorbell (T5+), use the old
926 * doorbell mechanism; otherwise use the new BAR2 mechanism. 933 * doorbell mechanism; otherwise use the new BAR2 mechanism.
@@ -1037,7 +1044,7 @@ nocsum: /*
1037 * unknown protocol, disable HW csum 1044 * unknown protocol, disable HW csum
1038 * and hope a bad packet is detected 1045 * and hope a bad packet is detected
1039 */ 1046 */
1040 return TXPKT_L4CSUM_DIS; 1047 return TXPKT_L4CSUM_DIS_F;
1041 } 1048 }
1042 } else { 1049 } else {
1043 /* 1050 /*
@@ -1054,14 +1061,15 @@ nocsum: /*
1054 } 1061 }
1055 1062
1056 if (likely(csum_type >= TX_CSUM_TCPIP)) 1063 if (likely(csum_type >= TX_CSUM_TCPIP))
1057 return TXPKT_CSUM_TYPE(csum_type) | 1064 return TXPKT_CSUM_TYPE_V(csum_type) |
1058 TXPKT_IPHDR_LEN(skb_network_header_len(skb)) | 1065 TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)) |
1059 TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN); 1066 TXPKT_ETHHDR_LEN_V(skb_network_offset(skb) - ETH_HLEN);
1060 else { 1067 else {
1061 int start = skb_transport_offset(skb); 1068 int start = skb_transport_offset(skb);
1062 1069
1063 return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) | 1070 return TXPKT_CSUM_TYPE_V(csum_type) |
1064 TXPKT_CSUM_LOC(start + skb->csum_offset); 1071 TXPKT_CSUM_START_V(start) |
1072 TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1065 } 1073 }
1066} 1074}
1067 1075
@@ -1102,11 +1110,11 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
1102 return -ENOTSUPP; 1110 return -ENOTSUPP;
1103 1111
1104 /* FC CRC offload */ 1112 /* FC CRC offload */
1105 *cntrl = TXPKT_CSUM_TYPE(TX_CSUM_FCOE) | 1113 *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
1106 TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS | 1114 TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
1107 TXPKT_CSUM_START(CXGB_FCOE_TXPKT_CSUM_START) | 1115 TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
1108 TXPKT_CSUM_END(CXGB_FCOE_TXPKT_CSUM_END) | 1116 TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
1109 TXPKT_CSUM_LOC(CXGB_FCOE_TXPKT_CSUM_END); 1117 TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
1110 return 0; 1118 return 0;
1111} 1119}
1112#endif /* CONFIG_CHELSIO_T4_FCOE */ 1120#endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1159,7 +1167,7 @@ out_free: dev_kfree_skb_any(skb);
1159 q = &adap->sge.ethtxq[qidx + pi->first_qset]; 1167 q = &adap->sge.ethtxq[qidx + pi->first_qset];
1160 1168
1161 reclaim_completed_tx(adap, &q->q, true); 1169 reclaim_completed_tx(adap, &q->q, true);
1162 cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; 1170 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1163 1171
1164#ifdef CONFIG_CHELSIO_T4_FCOE 1172#ifdef CONFIG_CHELSIO_T4_FCOE
1165 err = cxgb_fcoe_offload(skb, adap, pi, &cntrl); 1173 err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
@@ -1210,23 +1218,23 @@ out_free: dev_kfree_skb_any(skb);
1210 len += sizeof(*lso); 1218 len += sizeof(*lso);
1211 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | 1219 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1212 FW_WR_IMMDLEN_V(len)); 1220 FW_WR_IMMDLEN_V(len));
1213 lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | 1221 lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1214 LSO_FIRST_SLICE | LSO_LAST_SLICE | 1222 LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
1215 LSO_IPV6(v6) | 1223 LSO_IPV6_V(v6) |
1216 LSO_ETHHDR_LEN(eth_xtra_len / 4) | 1224 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1217 LSO_IPHDR_LEN(l3hdr_len / 4) | 1225 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1218 LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); 1226 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1219 lso->c.ipid_ofst = htons(0); 1227 lso->c.ipid_ofst = htons(0);
1220 lso->c.mss = htons(ssi->gso_size); 1228 lso->c.mss = htons(ssi->gso_size);
1221 lso->c.seqno_offset = htonl(0); 1229 lso->c.seqno_offset = htonl(0);
1222 if (is_t4(adap->params.chip)) 1230 if (is_t4(adap->params.chip))
1223 lso->c.len = htonl(skb->len); 1231 lso->c.len = htonl(skb->len);
1224 else 1232 else
1225 lso->c.len = htonl(LSO_T5_XFER_SIZE(skb->len)); 1233 lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
1226 cpl = (void *)(lso + 1); 1234 cpl = (void *)(lso + 1);
1227 cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | 1235 cntrl = TXPKT_CSUM_TYPE_V(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1228 TXPKT_IPHDR_LEN(l3hdr_len) | 1236 TXPKT_IPHDR_LEN_V(l3hdr_len) |
1229 TXPKT_ETHHDR_LEN(eth_xtra_len); 1237 TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1230 q->tso++; 1238 q->tso++;
1231 q->tx_cso += ssi->gso_segs; 1239 q->tx_cso += ssi->gso_segs;
1232 } else { 1240 } else {
@@ -1235,23 +1243,24 @@ out_free: dev_kfree_skb_any(skb);
1235 FW_WR_IMMDLEN_V(len)); 1243 FW_WR_IMMDLEN_V(len));
1236 cpl = (void *)(wr + 1); 1244 cpl = (void *)(wr + 1);
1237 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1245 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1238 cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; 1246 cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS_F;
1239 q->tx_cso++; 1247 q->tx_cso++;
1240 } 1248 }
1241 } 1249 }
1242 1250
1243 if (skb_vlan_tag_present(skb)) { 1251 if (skb_vlan_tag_present(skb)) {
1244 q->vlan_ins++; 1252 q->vlan_ins++;
1245 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb)); 1253 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1246#ifdef CONFIG_CHELSIO_T4_FCOE 1254#ifdef CONFIG_CHELSIO_T4_FCOE
1247 if (skb->protocol == htons(ETH_P_FCOE)) 1255 if (skb->protocol == htons(ETH_P_FCOE))
1248 cntrl |= TXPKT_VLAN( 1256 cntrl |= TXPKT_VLAN_V(
1249 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT)); 1257 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
1250#endif /* CONFIG_CHELSIO_T4_FCOE */ 1258#endif /* CONFIG_CHELSIO_T4_FCOE */
1251 } 1259 }
1252 1260
1253 cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) | 1261 cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
1254 TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn)); 1262 TXPKT_INTF_V(pi->tx_chan) |
1263 TXPKT_PF_V(adap->fn));
1255 cpl->pack = htons(0); 1264 cpl->pack = htons(0);
1256 cpl->len = htons(skb->len); 1265 cpl->len = htons(skb->len);
1257 cpl->ctrl1 = cpu_to_be64(cntrl); 1266 cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1961,7 +1970,7 @@ static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
1961static inline bool is_new_response(const struct rsp_ctrl *r, 1970static inline bool is_new_response(const struct rsp_ctrl *r,
1962 const struct sge_rspq *q) 1971 const struct sge_rspq *q)
1963{ 1972{
1964 return RSPD_GEN(r->type_gen) == q->gen; 1973 return (r->type_gen >> RSPD_GEN_S) == q->gen;
1965} 1974}
1966 1975
1967/** 1976/**
@@ -2008,19 +2017,19 @@ static int process_responses(struct sge_rspq *q, int budget)
2008 break; 2017 break;
2009 2018
2010 dma_rmb(); 2019 dma_rmb();
2011 rsp_type = RSPD_TYPE(rc->type_gen); 2020 rsp_type = RSPD_TYPE_G(rc->type_gen);
2012 if (likely(rsp_type == RSP_TYPE_FLBUF)) { 2021 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
2013 struct page_frag *fp; 2022 struct page_frag *fp;
2014 struct pkt_gl si; 2023 struct pkt_gl si;
2015 const struct rx_sw_desc *rsd; 2024 const struct rx_sw_desc *rsd;
2016 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; 2025 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
2017 2026
2018 if (len & RSPD_NEWBUF) { 2027 if (len & RSPD_NEWBUF_F) {
2019 if (likely(q->offset > 0)) { 2028 if (likely(q->offset > 0)) {
2020 free_rx_bufs(q->adap, &rxq->fl, 1); 2029 free_rx_bufs(q->adap, &rxq->fl, 1);
2021 q->offset = 0; 2030 q->offset = 0;
2022 } 2031 }
2023 len = RSPD_LEN(len); 2032 len = RSPD_LEN_G(len);
2024 } 2033 }
2025 si.tot_len = len; 2034 si.tot_len = len;
2026 2035
@@ -2055,7 +2064,7 @@ static int process_responses(struct sge_rspq *q, int budget)
2055 q->offset += ALIGN(fp->size, s->fl_align); 2064 q->offset += ALIGN(fp->size, s->fl_align);
2056 else 2065 else
2057 restore_rx_bufs(&si, &rxq->fl, frags); 2066 restore_rx_bufs(&si, &rxq->fl, frags);
2058 } else if (likely(rsp_type == RSP_TYPE_CPL)) { 2067 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
2059 ret = q->handler(q, q->cur_desc, NULL); 2068 ret = q->handler(q, q->cur_desc, NULL);
2060 } else { 2069 } else {
2061 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); 2070 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
@@ -2063,7 +2072,7 @@ static int process_responses(struct sge_rspq *q, int budget)
2063 2072
2064 if (unlikely(ret)) { 2073 if (unlikely(ret)) {
2065 /* couldn't process descriptor, back off for recovery */ 2074 /* couldn't process descriptor, back off for recovery */
2066 q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX); 2075 q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
2067 break; 2076 break;
2068 } 2077 }
2069 2078
@@ -2087,7 +2096,7 @@ int cxgb_busy_poll(struct napi_struct *napi)
2087 return LL_FLUSH_BUSY; 2096 return LL_FLUSH_BUSY;
2088 2097
2089 work_done = process_responses(q, 4); 2098 work_done = process_responses(q, 4);
2090 params = QINTR_TIMER_IDX(TIMERREG_COUNTER0_X) | QINTR_CNT_EN; 2099 params = QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X) | QINTR_CNT_EN_V(1);
2091 q->next_intr_params = params; 2100 q->next_intr_params = params;
2092 val = CIDXINC_V(work_done) | SEINTARM_V(params); 2101 val = CIDXINC_V(work_done) | SEINTARM_V(params);
2093 2102
@@ -2134,7 +2143,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
2134 int timer_index; 2143 int timer_index;
2135 2144
2136 napi_complete(napi); 2145 napi_complete(napi);
2137 timer_index = QINTR_TIMER_IDX_GET(q->next_intr_params); 2146 timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
2138 2147
2139 if (q->adaptive_rx) { 2148 if (q->adaptive_rx) {
2140 if (work_done > max(timer_pkt_quota[timer_index], 2149 if (work_done > max(timer_pkt_quota[timer_index],
@@ -2144,15 +2153,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
2144 timer_index = timer_index - 1; 2153 timer_index = timer_index - 1;
2145 2154
2146 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1); 2155 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
2147 q->next_intr_params = QINTR_TIMER_IDX(timer_index) | 2156 q->next_intr_params =
2148 V_QINTR_CNT_EN; 2157 QINTR_TIMER_IDX_V(timer_index) |
2158 QINTR_CNT_EN_V(0);
2149 params = q->next_intr_params; 2159 params = q->next_intr_params;
2150 } else { 2160 } else {
2151 params = q->next_intr_params; 2161 params = q->next_intr_params;
2152 q->next_intr_params = q->intr_params; 2162 q->next_intr_params = q->intr_params;
2153 } 2163 }
2154 } else 2164 } else
2155 params = QINTR_TIMER_IDX(7); 2165 params = QINTR_TIMER_IDX_V(7);
2156 2166
2157 val = CIDXINC_V(work_done) | SEINTARM_V(params); 2167 val = CIDXINC_V(work_done) | SEINTARM_V(params);
2158 2168
@@ -2200,7 +2210,7 @@ static unsigned int process_intrq(struct adapter *adap)
2200 break; 2210 break;
2201 2211
2202 dma_rmb(); 2212 dma_rmb();
2203 if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) { 2213 if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
2204 unsigned int qid = ntohl(rc->pldbuflen_qid); 2214 unsigned int qid = ntohl(rc->pldbuflen_qid);
2205 2215
2206 qid -= adap->sge.ingr_start; 2216 qid -= adap->sge.ingr_start;
@@ -2411,7 +2421,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2411 FW_LEN16(c)); 2421 FW_LEN16(c));
2412 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | 2422 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
2413 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) | 2423 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
2414 FW_IQ_CMD_IQANDST_V(intr_idx < 0) | FW_IQ_CMD_IQANUD_V(1) | 2424 FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
2425 FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
2415 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx : 2426 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
2416 -intr_idx - 1)); 2427 -intr_idx - 1));
2417 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) | 2428 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
@@ -2450,8 +2461,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2450 htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) | 2461 htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
2451 FW_IQ_CMD_FL0CONGCIF_F | 2462 FW_IQ_CMD_FL0CONGCIF_F |
2452 FW_IQ_CMD_FL0CONGEN_F); 2463 FW_IQ_CMD_FL0CONGEN_F);
2453 c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(2) | 2464 c.fl0dcaen_to_fl0cidxfthresh =
2454 FW_IQ_CMD_FL0FBMAX_V(3)); 2465 htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) |
2466 FW_IQ_CMD_FL0FBMAX_V(FETCHBURSTMAX_512B_X));
2455 c.fl0size = htons(flsz); 2467 c.fl0size = htons(flsz);
2456 c.fl0addr = cpu_to_be64(fl->addr); 2468 c.fl0addr = cpu_to_be64(fl->addr);
2457 } 2469 }
@@ -2595,14 +2607,15 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2595 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c)); 2607 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
2596 c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F | 2608 c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
2597 FW_EQ_ETH_CMD_VIID_V(pi->viid)); 2609 FW_EQ_ETH_CMD_VIID_V(pi->viid));
2598 c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) | 2610 c.fetchszm_to_iqid =
2599 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | 2611 htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
2600 FW_EQ_ETH_CMD_FETCHRO_V(1) | 2612 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
2601 FW_EQ_ETH_CMD_IQID_V(iqid)); 2613 FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
2602 c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN_V(2) | 2614 c.dcaen_to_eqsize =
2603 FW_EQ_ETH_CMD_FBMAX_V(3) | 2615 htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
2604 FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) | 2616 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
2605 FW_EQ_ETH_CMD_EQSIZE_V(nentries)); 2617 FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
2618 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2606 c.eqaddr = cpu_to_be64(txq->q.phys_addr); 2619 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2607 2620
2608 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); 2621 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
@@ -2649,14 +2662,15 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2649 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c)); 2662 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
2650 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid)); 2663 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
2651 c.physeqid_pkd = htonl(0); 2664 c.physeqid_pkd = htonl(0);
2652 c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) | 2665 c.fetchszm_to_iqid =
2653 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | 2666 htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
2654 FW_EQ_CTRL_CMD_FETCHRO_F | 2667 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
2655 FW_EQ_CTRL_CMD_IQID_V(iqid)); 2668 FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
2656 c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) | 2669 c.dcaen_to_eqsize =
2657 FW_EQ_CTRL_CMD_FBMAX_V(3) | 2670 htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
2658 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) | 2671 FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
2659 FW_EQ_CTRL_CMD_EQSIZE_V(nentries)); 2672 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
2673 FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
2660 c.eqaddr = cpu_to_be64(txq->q.phys_addr); 2674 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2661 2675
2662 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); 2676 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
@@ -2701,14 +2715,15 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2701 FW_EQ_OFLD_CMD_VFN_V(0)); 2715 FW_EQ_OFLD_CMD_VFN_V(0));
2702 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | 2716 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
2703 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c)); 2717 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
2704 c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) | 2718 c.fetchszm_to_iqid =
2705 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | 2719 htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
2706 FW_EQ_OFLD_CMD_FETCHRO_F | 2720 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
2707 FW_EQ_OFLD_CMD_IQID_V(iqid)); 2721 FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
2708 c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) | 2722 c.dcaen_to_eqsize =
2709 FW_EQ_OFLD_CMD_FBMAX_V(3) | 2723 htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
2710 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) | 2724 FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
2711 FW_EQ_OFLD_CMD_EQSIZE_V(nentries)); 2725 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
2726 FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
2712 c.eqaddr = cpu_to_be64(txq->q.phys_addr); 2727 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2713 2728
2714 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); 2729 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
@@ -3023,7 +3038,11 @@ int t4_sge_init(struct adapter *adap)
3023 * Packing Boundary. T5 introduced the ability to specify these 3038 * Packing Boundary. T5 introduced the ability to specify these
3024 * separately. The actual Ingress Packet Data alignment boundary 3039 * separately. The actual Ingress Packet Data alignment boundary
3025 * within Packed Buffer Mode is the maximum of these two 3040 * within Packed Buffer Mode is the maximum of these two
3026 * specifications. 3041 * specifications. (Note that it makes no real practical sense to
3042 * have the Pading Boudary be larger than the Packing Boundary but you
3043 * could set the chip up that way and, in fact, legacy T4 code would
3044 * end doing this because it would initialize the Padding Boundary and
3045 * leave the Packing Boundary initialized to 0 (16 bytes).)
3027 */ 3046 */
3028 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + 3047 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
3029 INGPADBOUNDARY_SHIFT_X); 3048 INGPADBOUNDARY_SHIFT_X);
@@ -3069,6 +3088,9 @@ int t4_sge_init(struct adapter *adap)
3069 3088
3070 t4_idma_monitor_init(adap, &s->idma_monitor); 3089 t4_idma_monitor_init(adap, &s->idma_monitor);
3071 3090
3091 /* Set up timers used for recuring callbacks to process RX and TX
3092 * administrative tasks.
3093 */
3072 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); 3094 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
3073 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); 3095 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
3074 3096
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 380b15c0417a..88067d90121c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -152,17 +152,33 @@ struct rsp_ctrl {
152 }; 152 };
153}; 153};
154 154
155#define RSPD_NEWBUF 0x80000000U 155#define RSPD_NEWBUF_S 31
156#define RSPD_LEN(x) (((x) >> 0) & 0x7fffffffU) 156#define RSPD_NEWBUF_V(x) ((x) << RSPD_NEWBUF_S)
157#define RSPD_QID(x) RSPD_LEN(x) 157#define RSPD_NEWBUF_F RSPD_NEWBUF_V(1U)
158 158
159#define RSPD_GEN(x) ((x) >> 7) 159#define RSPD_LEN_S 0
160#define RSPD_TYPE(x) (((x) >> 4) & 3) 160#define RSPD_LEN_M 0x7fffffff
161#define RSPD_LEN_G(x) (((x) >> RSPD_LEN_S) & RSPD_LEN_M)
161 162
162#define V_QINTR_CNT_EN 0x0 163#define RSPD_QID_S RSPD_LEN_S
163#define QINTR_CNT_EN 0x1 164#define RSPD_QID_M RSPD_LEN_M
164#define QINTR_TIMER_IDX(x) ((x) << 1) 165#define RSPD_QID_G(x) RSPD_LEN_G(x)
165#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7) 166
167#define RSPD_GEN_S 7
168
169#define RSPD_TYPE_S 4
170#define RSPD_TYPE_M 0x3
171#define RSPD_TYPE_G(x) (((x) >> RSPD_TYPE_S) & RSPD_TYPE_M)
172
173/* Rx queue interrupt deferral fields: counter enable and timer index */
174#define QINTR_CNT_EN_S 0
175#define QINTR_CNT_EN_V(x) ((x) << QINTR_CNT_EN_S)
176#define QINTR_CNT_EN_F QINTR_CNT_EN_V(1U)
177
178#define QINTR_TIMER_IDX_S 1
179#define QINTR_TIMER_IDX_M 0x7
180#define QINTR_TIMER_IDX_V(x) ((x) << QINTR_TIMER_IDX_S)
181#define QINTR_TIMER_IDX_G(x) (((x) >> QINTR_TIMER_IDX_S) & QINTR_TIMER_IDX_M)
166 182
167/* 183/*
168 * Flash layout. 184 * Flash layout.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 30a2f56e99c2..d90f8a03e378 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -634,26 +634,9 @@ struct cpl_tid_release {
634 634
635struct cpl_tx_pkt_core { 635struct cpl_tx_pkt_core {
636 __be32 ctrl0; 636 __be32 ctrl0;
637#define TXPKT_VF(x) ((x) << 0)
638#define TXPKT_PF(x) ((x) << 8)
639#define TXPKT_VF_VLD (1 << 11)
640#define TXPKT_OVLAN_IDX(x) ((x) << 12)
641#define TXPKT_INTF(x) ((x) << 16)
642#define TXPKT_INS_OVLAN (1 << 21)
643#define TXPKT_OPCODE(x) ((x) << 24)
644 __be16 pack; 637 __be16 pack;
645 __be16 len; 638 __be16 len;
646 __be64 ctrl1; 639 __be64 ctrl1;
647#define TXPKT_CSUM_END(x) ((x) << 12)
648#define TXPKT_CSUM_START(x) ((x) << 20)
649#define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20)
650#define TXPKT_CSUM_LOC(x) ((u64)(x) << 30)
651#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34)
652#define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40)
653#define TXPKT_VLAN(x) ((u64)(x) << 44)
654#define TXPKT_VLAN_VLD (1ULL << 60)
655#define TXPKT_IPCSUM_DIS (1ULL << 62)
656#define TXPKT_L4CSUM_DIS (1ULL << 63)
657}; 640};
658 641
659struct cpl_tx_pkt { 642struct cpl_tx_pkt {
@@ -663,16 +646,66 @@ struct cpl_tx_pkt {
663 646
664#define cpl_tx_pkt_xt cpl_tx_pkt 647#define cpl_tx_pkt_xt cpl_tx_pkt
665 648
649/* cpl_tx_pkt_core.ctrl0 fields */
650#define TXPKT_VF_S 0
651#define TXPKT_VF_V(x) ((x) << TXPKT_VF_S)
652
653#define TXPKT_PF_S 8
654#define TXPKT_PF_V(x) ((x) << TXPKT_PF_S)
655
656#define TXPKT_VF_VLD_S 11
657#define TXPKT_VF_VLD_V(x) ((x) << TXPKT_VF_VLD_S)
658#define TXPKT_VF_VLD_F TXPKT_VF_VLD_V(1U)
659
660#define TXPKT_OVLAN_IDX_S 12
661#define TXPKT_OVLAN_IDX_V(x) ((x) << TXPKT_OVLAN_IDX_S)
662
663#define TXPKT_INTF_S 16
664#define TXPKT_INTF_V(x) ((x) << TXPKT_INTF_S)
665
666#define TXPKT_INS_OVLAN_S 21
667#define TXPKT_INS_OVLAN_V(x) ((x) << TXPKT_INS_OVLAN_S)
668#define TXPKT_INS_OVLAN_F TXPKT_INS_OVLAN_V(1U)
669
670#define TXPKT_OPCODE_S 24
671#define TXPKT_OPCODE_V(x) ((x) << TXPKT_OPCODE_S)
672
673/* cpl_tx_pkt_core.ctrl1 fields */
674#define TXPKT_CSUM_END_S 12
675#define TXPKT_CSUM_END_V(x) ((x) << TXPKT_CSUM_END_S)
676
677#define TXPKT_CSUM_START_S 20
678#define TXPKT_CSUM_START_V(x) ((x) << TXPKT_CSUM_START_S)
679
680#define TXPKT_IPHDR_LEN_S 20
681#define TXPKT_IPHDR_LEN_V(x) ((__u64)(x) << TXPKT_IPHDR_LEN_S)
682
683#define TXPKT_CSUM_LOC_S 30
684#define TXPKT_CSUM_LOC_V(x) ((__u64)(x) << TXPKT_CSUM_LOC_S)
685
686#define TXPKT_ETHHDR_LEN_S 34
687#define TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << TXPKT_ETHHDR_LEN_S)
688
689#define TXPKT_CSUM_TYPE_S 40
690#define TXPKT_CSUM_TYPE_V(x) ((__u64)(x) << TXPKT_CSUM_TYPE_S)
691
692#define TXPKT_VLAN_S 44
693#define TXPKT_VLAN_V(x) ((__u64)(x) << TXPKT_VLAN_S)
694
695#define TXPKT_VLAN_VLD_S 60
696#define TXPKT_VLAN_VLD_V(x) ((__u64)(x) << TXPKT_VLAN_VLD_S)
697#define TXPKT_VLAN_VLD_F TXPKT_VLAN_VLD_V(1ULL)
698
699#define TXPKT_IPCSUM_DIS_S 62
700#define TXPKT_IPCSUM_DIS_V(x) ((__u64)(x) << TXPKT_IPCSUM_DIS_S)
701#define TXPKT_IPCSUM_DIS_F TXPKT_IPCSUM_DIS_V(1ULL)
702
703#define TXPKT_L4CSUM_DIS_S 63
704#define TXPKT_L4CSUM_DIS_V(x) ((__u64)(x) << TXPKT_L4CSUM_DIS_S)
705#define TXPKT_L4CSUM_DIS_F TXPKT_L4CSUM_DIS_V(1ULL)
706
666struct cpl_tx_pkt_lso_core { 707struct cpl_tx_pkt_lso_core {
667 __be32 lso_ctrl; 708 __be32 lso_ctrl;
668#define LSO_TCPHDR_LEN(x) ((x) << 0)
669#define LSO_IPHDR_LEN(x) ((x) << 4)
670#define LSO_ETHHDR_LEN(x) ((x) << 16)
671#define LSO_IPV6(x) ((x) << 20)
672#define LSO_LAST_SLICE (1 << 22)
673#define LSO_FIRST_SLICE (1 << 23)
674#define LSO_OPCODE(x) ((x) << 24)
675#define LSO_T5_XFER_SIZE(x) ((x) << 0)
676 __be16 ipid_ofst; 709 __be16 ipid_ofst;
677 __be16 mss; 710 __be16 mss;
678 __be32 seqno_offset; 711 __be32 seqno_offset;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
index c4d9952f814b..72ec1f91d29f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -61,6 +61,20 @@
61#define SGE_TIMERREGS 6 61#define SGE_TIMERREGS 6
62#define TIMERREG_COUNTER0_X 0 62#define TIMERREG_COUNTER0_X 0
63 63
64#define FETCHBURSTMIN_64B_X 2
65
66#define FETCHBURSTMAX_512B_X 3
67
68#define HOSTFCMODE_STATUS_PAGE_X 2
69
70#define CIDXFLUSHTHRESH_32_X 5
71
72#define UPDATEDELIVERY_INTERRUPT_X 1
73
74#define RSPD_TYPE_FLBUF_X 0
75#define RSPD_TYPE_CPL_X 1
76#define RSPD_TYPE_INTR_X 2
77
64/* Congestion Manager Definitions. 78/* Congestion Manager Definitions.
65 */ 79 */
66#define CONMCTXT_CNGTPMODE_S 19 80#define CONMCTXT_CNGTPMODE_S 19
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index d75fca7695eb..16c6d67370ee 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1592,6 +1592,7 @@ struct fw_eq_eth_cmd {
1592 1592
1593#define FW_EQ_ETH_CMD_FETCHRO_S 22 1593#define FW_EQ_ETH_CMD_FETCHRO_S 22
1594#define FW_EQ_ETH_CMD_FETCHRO_V(x) ((x) << FW_EQ_ETH_CMD_FETCHRO_S) 1594#define FW_EQ_ETH_CMD_FETCHRO_V(x) ((x) << FW_EQ_ETH_CMD_FETCHRO_S)
1595#define FW_EQ_ETH_CMD_FETCHRO_F FW_EQ_ETH_CMD_FETCHRO_V(1U)
1595 1596
1596#define FW_EQ_ETH_CMD_HOSTFCMODE_S 20 1597#define FW_EQ_ETH_CMD_HOSTFCMODE_S 20
1597#define FW_EQ_ETH_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_ETH_CMD_HOSTFCMODE_S) 1598#define FW_EQ_ETH_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_ETH_CMD_HOSTFCMODE_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 1d893b0b7ddf..b2b5e5bbe04c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1021,7 +1021,7 @@ static int closest_thres(const struct sge *s, int thres)
1021static unsigned int qtimer_val(const struct adapter *adapter, 1021static unsigned int qtimer_val(const struct adapter *adapter,
1022 const struct sge_rspq *rspq) 1022 const struct sge_rspq *rspq)
1023{ 1023{
1024 unsigned int timer_idx = QINTR_TIMER_IDX_GET(rspq->intr_params); 1024 unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
1025 1025
1026 return timer_idx < SGE_NTIMERS 1026 return timer_idx < SGE_NTIMERS
1027 ? adapter->sge.timer_val[timer_idx] 1027 ? adapter->sge.timer_val[timer_idx]
@@ -1086,8 +1086,8 @@ static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
1086 * Update the response queue's interrupt coalescing parameters and 1086 * Update the response queue's interrupt coalescing parameters and
1087 * return success. 1087 * return success.
1088 */ 1088 */
1089 rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) | 1089 rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
1090 (cnt > 0 ? QINTR_CNT_EN : 0)); 1090 QINTR_CNT_EN_V(cnt > 0));
1091 return 0; 1091 return 0;
1092} 1092}
1093 1093
@@ -1439,7 +1439,7 @@ static int cxgb4vf_get_coalesce(struct net_device *dev,
1439 1439
1440 coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq); 1440 coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
1441 coalesce->rx_max_coalesced_frames = 1441 coalesce->rx_max_coalesced_frames =
1442 ((rspq->intr_params & QINTR_CNT_EN) 1442 ((rspq->intr_params & QINTR_CNT_EN_F)
1443 ? adapter->sge.counter_val[rspq->pktcnt_idx] 1443 ? adapter->sge.counter_val[rspq->pktcnt_idx]
1444 : 0); 1444 : 0);
1445 return 0; 1445 return 0;
@@ -2393,8 +2393,9 @@ static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
2393 u8 pkt_cnt_idx, unsigned int size, 2393 u8 pkt_cnt_idx, unsigned int size,
2394 unsigned int iqe_size) 2394 unsigned int iqe_size)
2395{ 2395{
2396 rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) | 2396 rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
2397 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0)); 2397 (pkt_cnt_idx < SGE_NCOUNTERS ?
2398 QINTR_CNT_EN_F : 0));
2398 rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS 2399 rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
2399 ? pkt_cnt_idx 2400 ? pkt_cnt_idx
2400 : 0); 2401 : 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 98cd47c373c5..2e41d1541d73 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1100,7 +1100,7 @@ nocsum:
1100 * unknown protocol, disable HW csum 1100 * unknown protocol, disable HW csum
1101 * and hope a bad packet is detected 1101 * and hope a bad packet is detected
1102 */ 1102 */
1103 return TXPKT_L4CSUM_DIS; 1103 return TXPKT_L4CSUM_DIS_F;
1104 } 1104 }
1105 } else { 1105 } else {
1106 /* 1106 /*
@@ -1117,15 +1117,15 @@ nocsum:
1117 } 1117 }
1118 1118
1119 if (likely(csum_type >= TX_CSUM_TCPIP)) 1119 if (likely(csum_type >= TX_CSUM_TCPIP))
1120 return TXPKT_CSUM_TYPE(csum_type) | 1120 return TXPKT_CSUM_TYPE_V(csum_type) |
1121 TXPKT_IPHDR_LEN(skb_network_header_len(skb)) | 1121 TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)) |
1122 TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN); 1122 TXPKT_ETHHDR_LEN_V(skb_network_offset(skb) - ETH_HLEN);
1123 else { 1123 else {
1124 int start = skb_transport_offset(skb); 1124 int start = skb_transport_offset(skb);
1125 1125
1126 return TXPKT_CSUM_TYPE(csum_type) | 1126 return TXPKT_CSUM_TYPE_V(csum_type) |
1127 TXPKT_CSUM_START(start) | 1127 TXPKT_CSUM_START_V(start) |
1128 TXPKT_CSUM_LOC(start + skb->csum_offset); 1128 TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1129 } 1129 }
1130} 1130}
1131 1131
@@ -1288,29 +1288,30 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1288 * Fill in the LSO CPL message. 1288 * Fill in the LSO CPL message.
1289 */ 1289 */
1290 lso->lso_ctrl = 1290 lso->lso_ctrl =
1291 cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) | 1291 cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1292 LSO_FIRST_SLICE | 1292 LSO_FIRST_SLICE_F |
1293 LSO_LAST_SLICE | 1293 LSO_LAST_SLICE_F |
1294 LSO_IPV6(v6) | 1294 LSO_IPV6_V(v6) |
1295 LSO_ETHHDR_LEN(eth_xtra_len/4) | 1295 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1296 LSO_IPHDR_LEN(l3hdr_len/4) | 1296 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1297 LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); 1297 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1298 lso->ipid_ofst = cpu_to_be16(0); 1298 lso->ipid_ofst = cpu_to_be16(0);
1299 lso->mss = cpu_to_be16(ssi->gso_size); 1299 lso->mss = cpu_to_be16(ssi->gso_size);
1300 lso->seqno_offset = cpu_to_be32(0); 1300 lso->seqno_offset = cpu_to_be32(0);
1301 if (is_t4(adapter->params.chip)) 1301 if (is_t4(adapter->params.chip))
1302 lso->len = cpu_to_be32(skb->len); 1302 lso->len = cpu_to_be32(skb->len);
1303 else 1303 else
1304 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE(skb->len)); 1304 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
1305 1305
1306 /* 1306 /*
1307 * Set up TX Packet CPL pointer, control word and perform 1307 * Set up TX Packet CPL pointer, control word and perform
1308 * accounting. 1308 * accounting.
1309 */ 1309 */
1310 cpl = (void *)(lso + 1); 1310 cpl = (void *)(lso + 1);
1311 cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | 1311 cntrl = (TXPKT_CSUM_TYPE_V(v6 ?
1312 TXPKT_IPHDR_LEN(l3hdr_len) | 1312 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1313 TXPKT_ETHHDR_LEN(eth_xtra_len)); 1313 TXPKT_IPHDR_LEN_V(l3hdr_len) |
1314 TXPKT_ETHHDR_LEN_V(eth_xtra_len));
1314 txq->tso++; 1315 txq->tso++;
1315 txq->tx_cso += ssi->gso_segs; 1316 txq->tx_cso += ssi->gso_segs;
1316 } else { 1317 } else {
@@ -1327,10 +1328,10 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1327 */ 1328 */
1328 cpl = (void *)(wr + 1); 1329 cpl = (void *)(wr + 1);
1329 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1330 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1330 cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; 1331 cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS_F;
1331 txq->tx_cso++; 1332 txq->tx_cso++;
1332 } else 1333 } else
1333 cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; 1334 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1334 } 1335 }
1335 1336
1336 /* 1337 /*
@@ -1339,15 +1340,15 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1339 */ 1340 */
1340 if (skb_vlan_tag_present(skb)) { 1341 if (skb_vlan_tag_present(skb)) {
1341 txq->vlan_ins++; 1342 txq->vlan_ins++;
1342 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb)); 1343 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1343 } 1344 }
1344 1345
1345 /* 1346 /*
1346 * Fill in the TX Packet CPL message header. 1347 * Fill in the TX Packet CPL message header.
1347 */ 1348 */
1348 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) | 1349 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
1349 TXPKT_INTF(pi->port_id) | 1350 TXPKT_INTF_V(pi->port_id) |
1350 TXPKT_PF(0)); 1351 TXPKT_PF_V(0));
1351 cpl->pack = cpu_to_be16(0); 1352 cpl->pack = cpu_to_be16(0);
1352 cpl->len = cpu_to_be16(skb->len); 1353 cpl->len = cpu_to_be16(skb->len);
1353 cpl->ctrl1 = cpu_to_be64(cntrl); 1354 cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1670,7 +1671,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1670static inline bool is_new_response(const struct rsp_ctrl *rc, 1671static inline bool is_new_response(const struct rsp_ctrl *rc,
1671 const struct sge_rspq *rspq) 1672 const struct sge_rspq *rspq)
1672{ 1673{
1673 return RSPD_GEN(rc->type_gen) == rspq->gen; 1674 return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen;
1674} 1675}
1675 1676
1676/** 1677/**
@@ -1759,8 +1760,8 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1759 * SGE. 1760 * SGE.
1760 */ 1761 */
1761 dma_rmb(); 1762 dma_rmb();
1762 rsp_type = RSPD_TYPE(rc->type_gen); 1763 rsp_type = RSPD_TYPE_G(rc->type_gen);
1763 if (likely(rsp_type == RSP_TYPE_FLBUF)) { 1764 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
1764 struct page_frag *fp; 1765 struct page_frag *fp;
1765 struct pkt_gl gl; 1766 struct pkt_gl gl;
1766 const struct rx_sw_desc *sdesc; 1767 const struct rx_sw_desc *sdesc;
@@ -1771,7 +1772,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1771 * If we get a "new buffer" message from the SGE we 1772 * If we get a "new buffer" message from the SGE we
1772 * need to move on to the next Free List buffer. 1773 * need to move on to the next Free List buffer.
1773 */ 1774 */
1774 if (len & RSPD_NEWBUF) { 1775 if (len & RSPD_NEWBUF_F) {
1775 /* 1776 /*
1776 * We get one "new buffer" message when we 1777 * We get one "new buffer" message when we
1777 * first start up a queue so we need to ignore 1778 * first start up a queue so we need to ignore
@@ -1782,7 +1783,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1782 1); 1783 1);
1783 rspq->offset = 0; 1784 rspq->offset = 0;
1784 } 1785 }
1785 len = RSPD_LEN(len); 1786 len = RSPD_LEN_G(len);
1786 } 1787 }
1787 gl.tot_len = len; 1788 gl.tot_len = len;
1788 1789
@@ -1825,10 +1826,10 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1825 rspq->offset += ALIGN(fp->size, s->fl_align); 1826 rspq->offset += ALIGN(fp->size, s->fl_align);
1826 else 1827 else
1827 restore_rx_bufs(&gl, &rxq->fl, frag); 1828 restore_rx_bufs(&gl, &rxq->fl, frag);
1828 } else if (likely(rsp_type == RSP_TYPE_CPL)) { 1829 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
1829 ret = rspq->handler(rspq, rspq->cur_desc, NULL); 1830 ret = rspq->handler(rspq, rspq->cur_desc, NULL);
1830 } else { 1831 } else {
1831 WARN_ON(rsp_type > RSP_TYPE_CPL); 1832 WARN_ON(rsp_type > RSPD_TYPE_CPL_X);
1832 ret = 0; 1833 ret = 0;
1833 } 1834 }
1834 1835
@@ -1840,7 +1841,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1840 */ 1841 */
1841 const int NOMEM_TIMER_IDX = SGE_NTIMERS-1; 1842 const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
1842 rspq->next_intr_params = 1843 rspq->next_intr_params =
1843 QINTR_TIMER_IDX(NOMEM_TIMER_IDX); 1844 QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX);
1844 break; 1845 break;
1845 } 1846 }
1846 1847
@@ -1882,7 +1883,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
1882 intr_params = rspq->next_intr_params; 1883 intr_params = rspq->next_intr_params;
1883 rspq->next_intr_params = rspq->intr_params; 1884 rspq->next_intr_params = rspq->intr_params;
1884 } else 1885 } else
1885 intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX); 1886 intr_params = QINTR_TIMER_IDX_V(SGE_TIMER_UPD_CIDX);
1886 1887
1887 if (unlikely(work_done == 0)) 1888 if (unlikely(work_done == 0))
1888 rspq->unhandled_irqs++; 1889 rspq->unhandled_irqs++;
@@ -1943,10 +1944,10 @@ static unsigned int process_intrq(struct adapter *adapter)
1943 * never happen ... 1944 * never happen ...
1944 */ 1945 */
1945 dma_rmb(); 1946 dma_rmb();
1946 if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) { 1947 if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) {
1947 dev_err(adapter->pdev_dev, 1948 dev_err(adapter->pdev_dev,
1948 "Unexpected INTRQ response type %d\n", 1949 "Unexpected INTRQ response type %d\n",
1949 RSPD_TYPE(rc->type_gen)); 1950 RSPD_TYPE_G(rc->type_gen));
1950 continue; 1951 continue;
1951 } 1952 }
1952 1953
@@ -1958,7 +1959,7 @@ static unsigned int process_intrq(struct adapter *adapter)
1958 * want to either make them fatal and/or conditionalized under 1959 * want to either make them fatal and/or conditionalized under
1959 * DEBUG. 1960 * DEBUG.
1960 */ 1961 */
1961 qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid)); 1962 qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid));
1962 iq_idx = IQ_IDX(s, qid); 1963 iq_idx = IQ_IDX(s, qid);
1963 if (unlikely(iq_idx >= MAX_INGQ)) { 1964 if (unlikely(iq_idx >= MAX_INGQ)) {
1964 dev_err(adapter->pdev_dev, 1965 dev_err(adapter->pdev_dev,