aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3/sge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3/sge.c')
-rw-r--r--drivers/net/cxgb3/sge.c63
1 files changed, 15 insertions, 48 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 8ff96c6f6de5..76bf5892b962 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -37,6 +37,7 @@
37#include <linux/tcp.h> 37#include <linux/tcp.h>
38#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/prefetch.h>
40#include <net/arp.h> 41#include <net/arp.h>
41#include "common.h" 42#include "common.h"
42#include "regs.h" 43#include "regs.h"
@@ -199,7 +200,7 @@ static inline void refill_rspq(struct adapter *adapter,
199 * need_skb_unmap - does the platform need unmapping of sk_buffs? 200 * need_skb_unmap - does the platform need unmapping of sk_buffs?
200 * 201 *
201 * Returns true if the platform needs sk_buff unmapping. The compiler 202 * Returns true if the platform needs sk_buff unmapping. The compiler
202 * optimizes away unecessary code if this returns true. 203 * optimizes away unnecessary code if this returns true.
203 */ 204 */
204static inline int need_skb_unmap(void) 205static inline int need_skb_unmap(void)
205{ 206{
@@ -296,8 +297,10 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
296 if (d->skb) { /* an SGL is present */ 297 if (d->skb) { /* an SGL is present */
297 if (need_unmap) 298 if (need_unmap)
298 unmap_skb(d->skb, q, cidx, pdev); 299 unmap_skb(d->skb, q, cidx, pdev);
299 if (d->eop) 300 if (d->eop) {
300 kfree_skb(d->skb); 301 kfree_skb(d->skb);
302 d->skb = NULL;
303 }
301 } 304 }
302 ++d; 305 ++d;
303 if (++cidx == q->size) { 306 if (++cidx == q->size) {
@@ -1145,7 +1148,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1145 cpl->len = htonl(skb->len); 1148 cpl->len = htonl(skb->len);
1146 cntrl = V_TXPKT_INTF(pi->port_id); 1149 cntrl = V_TXPKT_INTF(pi->port_id);
1147 1150
1148 if (vlan_tx_tag_present(skb) && pi->vlan_grp) 1151 if (vlan_tx_tag_present(skb))
1149 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb)); 1152 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1150 1153
1151 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size); 1154 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
@@ -1279,7 +1282,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1279 qs->port_stats[SGE_PSTAT_TX_CSUM]++; 1282 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1280 if (skb_shinfo(skb)->gso_size) 1283 if (skb_shinfo(skb)->gso_size)
1281 qs->port_stats[SGE_PSTAT_TSO]++; 1284 qs->port_stats[SGE_PSTAT_TSO]++;
1282 if (vlan_tx_tag_present(skb) && pi->vlan_grp) 1285 if (vlan_tx_tag_present(skb))
1283 qs->port_stats[SGE_PSTAT_VLANINS]++; 1286 qs->port_stats[SGE_PSTAT_VLANINS]++;
1284 1287
1285 /* 1288 /*
@@ -2017,13 +2020,13 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2017 skb_pull(skb, sizeof(*p) + pad); 2020 skb_pull(skb, sizeof(*p) + pad);
2018 skb->protocol = eth_type_trans(skb, adap->port[p->iff]); 2021 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2019 pi = netdev_priv(skb->dev); 2022 pi = netdev_priv(skb->dev);
2020 if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && 2023 if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
2021 p->csum == htons(0xffff) && !p->fragment) { 2024 p->csum == htons(0xffff) && !p->fragment) {
2022 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; 2025 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2023 skb->ip_summed = CHECKSUM_UNNECESSARY; 2026 skb->ip_summed = CHECKSUM_UNNECESSARY;
2024 } else 2027 } else
2025 skb->ip_summed = CHECKSUM_NONE; 2028 skb_checksum_none_assert(skb);
2026 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); 2029 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2027 2030
2028 if (unlikely(p->vlan_valid)) { 2031 if (unlikely(p->vlan_valid)) {
2029 struct vlan_group *grp = pi->vlan_grp; 2032 struct vlan_group *grp = pi->vlan_grp;
@@ -2118,7 +2121,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2118 offset = 2 + sizeof(struct cpl_rx_pkt); 2121 offset = 2 + sizeof(struct cpl_rx_pkt);
2119 cpl = qs->lro_va = sd->pg_chunk.va + 2; 2122 cpl = qs->lro_va = sd->pg_chunk.va + 2;
2120 2123
2121 if ((pi->rx_offload & T3_RX_CSUM) && 2124 if ((qs->netdev->features & NETIF_F_RXCSUM) &&
2122 cpl->csum_valid && cpl->csum == htons(0xffff)) { 2125 cpl->csum_valid && cpl->csum == htons(0xffff)) {
2123 skb->ip_summed = CHECKSUM_UNNECESSARY; 2126 skb->ip_summed = CHECKSUM_UNNECESSARY;
2124 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; 2127 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
@@ -2142,7 +2145,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2142 if (!complete) 2145 if (!complete)
2143 return; 2146 return;
2144 2147
2145 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); 2148 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2146 2149
2147 if (unlikely(cpl->vlan_valid)) { 2150 if (unlikely(cpl->vlan_valid)) {
2148 struct vlan_group *grp = pi->vlan_grp; 2151 struct vlan_group *grp = pi->vlan_grp;
@@ -2283,7 +2286,8 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
2283 q->next_holdoff = q->holdoff_tmr; 2286 q->next_holdoff = q->holdoff_tmr;
2284 2287
2285 while (likely(budget_left && is_new_response(r, q))) { 2288 while (likely(budget_left && is_new_response(r, q))) {
2286 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled; 2289 int packet_complete, eth, ethpad = 2;
2290 int lro = !!(qs->netdev->features & NETIF_F_GRO);
2287 struct sk_buff *skb = NULL; 2291 struct sk_buff *skb = NULL;
2288 u32 len, flags; 2292 u32 len, flags;
2289 __be32 rss_hi, rss_lo; 2293 __be32 rss_hi, rss_lo;
@@ -2554,7 +2558,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2554 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case 2558 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2555 * (i.e., response queue serviced in hard interrupt). 2559 * (i.e., response queue serviced in hard interrupt).
2556 */ 2560 */
2557irqreturn_t t3_sge_intr_msix(int irq, void *cookie) 2561static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2558{ 2562{
2559 struct sge_qset *qs = cookie; 2563 struct sge_qset *qs = cookie;
2560 struct adapter *adap = qs->adap; 2564 struct adapter *adap = qs->adap;
@@ -3320,40 +3324,3 @@ void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3320 3324
3321 spin_lock_init(&adap->sge.reg_lock); 3325 spin_lock_init(&adap->sge.reg_lock);
3322} 3326}
3323
3324/**
3325 * t3_get_desc - dump an SGE descriptor for debugging purposes
3326 * @qs: the queue set
3327 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3328 * @idx: the descriptor index in the queue
3329 * @data: where to dump the descriptor contents
3330 *
3331 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3332 * size of the descriptor.
3333 */
3334int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3335 unsigned char *data)
3336{
3337 if (qnum >= 6)
3338 return -EINVAL;
3339
3340 if (qnum < 3) {
3341 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3342 return -EINVAL;
3343 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3344 return sizeof(struct tx_desc);
3345 }
3346
3347 if (qnum == 3) {
3348 if (!qs->rspq.desc || idx >= qs->rspq.size)
3349 return -EINVAL;
3350 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3351 return sizeof(struct rsp_desc);
3352 }
3353
3354 qnum -= 4;
3355 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3356 return -EINVAL;
3357 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3358 return sizeof(struct rx_desc);
3359}