aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/chelsio/sge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/chelsio/sge.c')
-rw-r--r--drivers/net/chelsio/sge.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index f01cfdb995de..58380d240619 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -54,6 +54,7 @@
54#include <linux/in.h> 54#include <linux/in.h>
55#include <linux/if_arp.h> 55#include <linux/if_arp.h>
56#include <linux/slab.h> 56#include <linux/slab.h>
57#include <linux/prefetch.h>
57 58
58#include "cpl5_cmd.h" 59#include "cpl5_cmd.h"
59#include "sge.h" 60#include "sge.h"
@@ -273,6 +274,10 @@ struct sge {
273 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; 274 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
274}; 275};
275 276
277static const u8 ch_mac_addr[ETH_ALEN] = {
278 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
279};
280
276/* 281/*
277 * stop tasklet and free all pending skb's 282 * stop tasklet and free all pending skb's
278 */ 283 */
@@ -925,7 +930,7 @@ void t1_sge_intr_enable(struct sge *sge)
925 u32 en = SGE_INT_ENABLE; 930 u32 en = SGE_INT_ENABLE;
926 u32 val = readl(sge->adapter->regs + A_PL_ENABLE); 931 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
927 932
928 if (sge->adapter->flags & TSO_CAPABLE) 933 if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
929 en &= ~F_PACKET_TOO_BIG; 934 en &= ~F_PACKET_TOO_BIG;
930 writel(en, sge->adapter->regs + A_SG_INT_ENABLE); 935 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
931 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); 936 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
@@ -948,7 +953,7 @@ int t1_sge_intr_error_handler(struct sge *sge)
948 struct adapter *adapter = sge->adapter; 953 struct adapter *adapter = sge->adapter;
949 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE); 954 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
950 955
951 if (adapter->flags & TSO_CAPABLE) 956 if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
952 cause &= ~F_PACKET_TOO_BIG; 957 cause &= ~F_PACKET_TOO_BIG;
953 if (cause & F_RESPQ_EXHAUSTED) 958 if (cause & F_RESPQ_EXHAUSTED)
954 sge->stats.respQ_empty++; 959 sge->stats.respQ_empty++;
@@ -1365,6 +1370,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1365 const struct cpl_rx_pkt *p; 1370 const struct cpl_rx_pkt *p;
1366 struct adapter *adapter = sge->adapter; 1371 struct adapter *adapter = sge->adapter;
1367 struct sge_port_stats *st; 1372 struct sge_port_stats *st;
1373 struct net_device *dev;
1368 1374
1369 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad); 1375 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
1370 if (unlikely(!skb)) { 1376 if (unlikely(!skb)) {
@@ -1380,15 +1386,16 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1380 __skb_pull(skb, sizeof(*p)); 1386 __skb_pull(skb, sizeof(*p));
1381 1387
1382 st = this_cpu_ptr(sge->port_stats[p->iff]); 1388 st = this_cpu_ptr(sge->port_stats[p->iff]);
1389 dev = adapter->port[p->iff].dev;
1383 1390
1384 skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); 1391 skb->protocol = eth_type_trans(skb, dev);
1385 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && 1392 if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
1386 skb->protocol == htons(ETH_P_IP) && 1393 skb->protocol == htons(ETH_P_IP) &&
1387 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { 1394 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
1388 ++st->rx_cso_good; 1395 ++st->rx_cso_good;
1389 skb->ip_summed = CHECKSUM_UNNECESSARY; 1396 skb->ip_summed = CHECKSUM_UNNECESSARY;
1390 } else 1397 } else
1391 skb->ip_summed = CHECKSUM_NONE; 1398 skb_checksum_none_assert(skb);
1392 1399
1393 if (unlikely(adapter->vlan_grp && p->vlan_valid)) { 1400 if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
1394 st->vlan_xtract++; 1401 st->vlan_xtract++;
@@ -1551,7 +1558,7 @@ static inline int responses_pending(const struct adapter *adapter)
1551 const struct respQ *Q = &adapter->sge->respQ; 1558 const struct respQ *Q = &adapter->sge->respQ;
1552 const struct respQ_e *e = &Q->entries[Q->cidx]; 1559 const struct respQ_e *e = &Q->entries[Q->cidx];
1553 1560
1554 return (e->GenerationBit == Q->genbit); 1561 return e->GenerationBit == Q->genbit;
1555} 1562}
1556 1563
1557/* 1564/*
@@ -1658,7 +1665,7 @@ irqreturn_t t1_interrupt(int irq, void *data)
1658 * The code figures out how many entries the sk_buff will require in the 1665 * The code figures out how many entries the sk_buff will require in the
1659 * cmdQ and updates the cmdQ data structure with the state once the enqueue 1666 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1660 * has complete. Then, it doesn't access the global structure anymore, but 1667 * has complete. Then, it doesn't access the global structure anymore, but
1661 * uses the corresponding fields on the stack. In conjuction with a spinlock 1668 * uses the corresponding fields on the stack. In conjunction with a spinlock
1662 * around that code, we can make the function reentrant without holding the 1669 * around that code, we can make the function reentrant without holding the
1663 * lock when we actually enqueue (which might be expensive, especially on 1670 * lock when we actually enqueue (which might be expensive, especially on
1664 * architectures with IO MMUs). 1671 * architectures with IO MMUs).
@@ -1834,8 +1841,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1834 return NETDEV_TX_OK; 1841 return NETDEV_TX_OK;
1835 } 1842 }
1836 1843
1837 if (!(adapter->flags & UDP_CSUM_CAPABLE) && 1844 if (skb->ip_summed == CHECKSUM_PARTIAL &&
1838 skb->ip_summed == CHECKSUM_PARTIAL &&
1839 ip_hdr(skb)->protocol == IPPROTO_UDP) { 1845 ip_hdr(skb)->protocol == IPPROTO_UDP) {
1840 if (unlikely(skb_checksum_help(skb))) { 1846 if (unlikely(skb_checksum_help(skb))) {
1841 pr_debug("%s: unable to do udp checksum\n", dev->name); 1847 pr_debug("%s: unable to do udp checksum\n", dev->name);
@@ -1870,7 +1876,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1870 cpl->iff = dev->if_port; 1876 cpl->iff = dev->if_port;
1871 1877
1872#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 1878#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1873 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) { 1879 if (vlan_tx_tag_present(skb)) {
1874 cpl->vlan_valid = 1; 1880 cpl->vlan_valid = 1;
1875 cpl->vlan = htons(vlan_tx_tag_get(skb)); 1881 cpl->vlan = htons(vlan_tx_tag_get(skb));
1876 st->vlan_insert++; 1882 st->vlan_insert++;
@@ -2012,10 +2018,6 @@ static void espibug_workaround_t204(unsigned long data)
2012 continue; 2018 continue;
2013 2019
2014 if (!skb->cb[0]) { 2020 if (!skb->cb[0]) {
2015 u8 ch_mac_addr[ETH_ALEN] = {
2016 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
2017 };
2018
2019 skb_copy_to_linear_data_offset(skb, 2021 skb_copy_to_linear_data_offset(skb,
2020 sizeof(struct cpl_tx_pkt), 2022 sizeof(struct cpl_tx_pkt),
2021 ch_mac_addr, 2023 ch_mac_addr,
@@ -2048,8 +2050,6 @@ static void espibug_workaround(unsigned long data)
2048 2050
2049 if ((seop & 0xfff0fff) == 0xfff && skb) { 2051 if ((seop & 0xfff0fff) == 0xfff && skb) {
2050 if (!skb->cb[0]) { 2052 if (!skb->cb[0]) {
2051 u8 ch_mac_addr[ETH_ALEN] =
2052 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
2053 skb_copy_to_linear_data_offset(skb, 2053 skb_copy_to_linear_data_offset(skb,
2054 sizeof(struct cpl_tx_pkt), 2054 sizeof(struct cpl_tx_pkt),
2055 ch_mac_addr, 2055 ch_mac_addr,