aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c126
2 files changed, 102 insertions, 26 deletions
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index cc62272d485b..0c9c4e895595 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
18 */ 18 */
19#define DRV_NAME "qlge" 19#define DRV_NAME "qlge"
20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
21#define DRV_VERSION "v1.00.00.32" 21#define DRV_VERSION "1.00.00.33"
22 22
23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
24 24
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 2553cf4503b9..64f94098bc02 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -96,8 +96,10 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
96 96
97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); 97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98 98
99static int ql_wol(struct ql_adapter *qdev); 99static int ql_wol(struct ql_adapter *);
100static void qlge_set_multicast_list(struct net_device *ndev); 100static void qlge_set_multicast_list(struct net_device *);
101static int ql_adapter_down(struct ql_adapter *);
102static int ql_adapter_up(struct ql_adapter *);
101 103
102/* This hardware semaphore causes exclusive access to 104/* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware, 105 * resources shared between the NIC driver, MPI firmware,
@@ -1464,6 +1466,29 @@ static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1464 } 1466 }
1465} 1467}
1466 1468
1469/**
1470 * ql_update_mac_hdr_len - helper routine to update the mac header length
1471 * based on vlan tags if present
1472 */
1473static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1474 struct ib_mac_iocb_rsp *ib_mac_rsp,
1475 void *page, size_t *len)
1476{
1477 u16 *tags;
1478
1479 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1480 return;
1481 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1482 tags = (u16 *)page;
1483 /* Look for stacked vlan tags in ethertype field */
1484 if (tags[6] == ETH_P_8021Q &&
1485 tags[8] == ETH_P_8021Q)
1486 *len += 2 * VLAN_HLEN;
1487 else
1488 *len += VLAN_HLEN;
1489 }
1490}
1491
1467/* Process an inbound completion from an rx ring. */ 1492/* Process an inbound completion from an rx ring. */
1468static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, 1493static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1469 struct rx_ring *rx_ring, 1494 struct rx_ring *rx_ring,
@@ -1523,6 +1548,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1523 void *addr; 1548 void *addr;
1524 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); 1549 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1525 struct napi_struct *napi = &rx_ring->napi; 1550 struct napi_struct *napi = &rx_ring->napi;
1551 size_t hlen = ETH_HLEN;
1526 1552
1527 skb = netdev_alloc_skb(ndev, length); 1553 skb = netdev_alloc_skb(ndev, length);
1528 if (!skb) { 1554 if (!skb) {
@@ -1540,25 +1566,28 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1540 goto err_out; 1566 goto err_out;
1541 } 1567 }
1542 1568
1569 /* Update the MAC header length*/
1570 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1571
1543 /* The max framesize filter on this chip is set higher than 1572 /* The max framesize filter on this chip is set higher than
1544 * MTU since FCoE uses 2k frames. 1573 * MTU since FCoE uses 2k frames.
1545 */ 1574 */
1546 if (skb->len > ndev->mtu + ETH_HLEN) { 1575 if (skb->len > ndev->mtu + hlen) {
1547 netif_err(qdev, drv, qdev->ndev, 1576 netif_err(qdev, drv, qdev->ndev,
1548 "Segment too small, dropping.\n"); 1577 "Segment too small, dropping.\n");
1549 rx_ring->rx_dropped++; 1578 rx_ring->rx_dropped++;
1550 goto err_out; 1579 goto err_out;
1551 } 1580 }
1552 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN); 1581 memcpy(skb_put(skb, hlen), addr, hlen);
1553 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, 1582 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1554 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", 1583 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1555 length); 1584 length);
1556 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, 1585 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1557 lbq_desc->p.pg_chunk.offset+ETH_HLEN, 1586 lbq_desc->p.pg_chunk.offset + hlen,
1558 length-ETH_HLEN); 1587 length - hlen);
1559 skb->len += length-ETH_HLEN; 1588 skb->len += length - hlen;
1560 skb->data_len += length-ETH_HLEN; 1589 skb->data_len += length - hlen;
1561 skb->truesize += length-ETH_HLEN; 1590 skb->truesize += length - hlen;
1562 1591
1563 rx_ring->rx_packets++; 1592 rx_ring->rx_packets++;
1564 rx_ring->rx_bytes += skb->len; 1593 rx_ring->rx_bytes += skb->len;
@@ -1576,7 +1605,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1576 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { 1605 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1577 /* Unfragmented ipv4 UDP frame. */ 1606 /* Unfragmented ipv4 UDP frame. */
1578 struct iphdr *iph = 1607 struct iphdr *iph =
1579 (struct iphdr *) ((u8 *)addr + ETH_HLEN); 1608 (struct iphdr *)((u8 *)addr + hlen);
1580 if (!(iph->frag_off & 1609 if (!(iph->frag_off &
1581 htons(IP_MF|IP_OFFSET))) { 1610 htons(IP_MF|IP_OFFSET))) {
1582 skb->ip_summed = CHECKSUM_UNNECESSARY; 1611 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1726,7 +1755,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1726 struct bq_desc *sbq_desc; 1755 struct bq_desc *sbq_desc;
1727 struct sk_buff *skb = NULL; 1756 struct sk_buff *skb = NULL;
1728 u32 length = le32_to_cpu(ib_mac_rsp->data_len); 1757 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1729 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len); 1758 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1759 size_t hlen = ETH_HLEN;
1730 1760
1731 /* 1761 /*
1732 * Handle the header buffer if present. 1762 * Handle the header buffer if present.
@@ -1853,9 +1883,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1853 skb->data_len += length; 1883 skb->data_len += length;
1854 skb->truesize += length; 1884 skb->truesize += length;
1855 length -= length; 1885 length -= length;
1856 __pskb_pull_tail(skb, 1886 ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1857 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? 1887 lbq_desc->p.pg_chunk.va,
1858 VLAN_ETH_HLEN : ETH_HLEN); 1888 &hlen);
1889 __pskb_pull_tail(skb, hlen);
1859 } 1890 }
1860 } else { 1891 } else {
1861 /* 1892 /*
@@ -1910,8 +1941,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1910 length -= size; 1941 length -= size;
1911 i++; 1942 i++;
1912 } 1943 }
1913 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? 1944 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1914 VLAN_ETH_HLEN : ETH_HLEN); 1945 &hlen);
1946 __pskb_pull_tail(skb, hlen);
1915 } 1947 }
1916 return skb; 1948 return skb;
1917} 1949}
@@ -2003,7 +2035,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
2003 rx_ring->rx_packets++; 2035 rx_ring->rx_packets++;
2004 rx_ring->rx_bytes += skb->len; 2036 rx_ring->rx_bytes += skb->len;
2005 skb_record_rx_queue(skb, rx_ring->cq_id); 2037 skb_record_rx_queue(skb, rx_ring->cq_id);
2006 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0)) 2038 if (vlan_id != 0xffff)
2007 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id); 2039 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2008 if (skb->ip_summed == CHECKSUM_UNNECESSARY) 2040 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2009 napi_gro_receive(&rx_ring->napi, skb); 2041 napi_gro_receive(&rx_ring->napi, skb);
@@ -2017,7 +2049,8 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2017 struct ib_mac_iocb_rsp *ib_mac_rsp) 2049 struct ib_mac_iocb_rsp *ib_mac_rsp)
2018{ 2050{
2019 u32 length = le32_to_cpu(ib_mac_rsp->data_len); 2051 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2020 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? 2052 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2053 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
2021 ((le16_to_cpu(ib_mac_rsp->vlan_id) & 2054 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2022 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff; 2055 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2023 2056
@@ -2310,9 +2343,39 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2310 } 2343 }
2311} 2344}
2312 2345
2346/**
2347 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2348 * based on the features to enable/disable hardware vlan accel
2349 */
2350static int qlge_update_hw_vlan_features(struct net_device *ndev,
2351 netdev_features_t features)
2352{
2353 struct ql_adapter *qdev = netdev_priv(ndev);
2354 int status = 0;
2355
2356 status = ql_adapter_down(qdev);
2357 if (status) {
2358 netif_err(qdev, link, qdev->ndev,
2359 "Failed to bring down the adapter\n");
2360 return status;
2361 }
2362
2363 /* update the features with resent change */
2364 ndev->features = features;
2365
2366 status = ql_adapter_up(qdev);
2367 if (status) {
2368 netif_err(qdev, link, qdev->ndev,
2369 "Failed to bring up the adapter\n");
2370 return status;
2371 }
2372 return status;
2373}
2374
2313static netdev_features_t qlge_fix_features(struct net_device *ndev, 2375static netdev_features_t qlge_fix_features(struct net_device *ndev,
2314 netdev_features_t features) 2376 netdev_features_t features)
2315{ 2377{
2378 int err;
2316 /* 2379 /*
2317 * Since there is no support for separate rx/tx vlan accel 2380 * Since there is no support for separate rx/tx vlan accel
2318 * enable/disable make sure tx flag is always in same state as rx. 2381 * enable/disable make sure tx flag is always in same state as rx.
@@ -2322,6 +2385,11 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
2322 else 2385 else
2323 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 2386 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2324 2387
2388 /* Update the behavior of vlan accel in the adapter */
2389 err = qlge_update_hw_vlan_features(ndev, features);
2390 if (err)
2391 return err;
2392
2325 return features; 2393 return features;
2326} 2394}
2327 2395
@@ -3704,8 +3772,12 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3704 ql_write32(qdev, SYS, mask | value); 3772 ql_write32(qdev, SYS, mask | value);
3705 3773
3706 /* Set the default queue, and VLAN behavior. */ 3774 /* Set the default queue, and VLAN behavior. */
3707 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV; 3775 value = NIC_RCV_CFG_DFQ;
3708 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16); 3776 mask = NIC_RCV_CFG_DFQ_MASK;
3777 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3778 value |= NIC_RCV_CFG_RV;
3779 mask |= (NIC_RCV_CFG_RV << 16);
3780 }
3709 ql_write32(qdev, NIC_RCV_CFG, (mask | value)); 3781 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3710 3782
3711 /* Set the MPI interrupt to enabled. */ 3783 /* Set the MPI interrupt to enabled. */
@@ -4692,11 +4764,15 @@ static int qlge_probe(struct pci_dev *pdev,
4692 4764
4693 qdev = netdev_priv(ndev); 4765 qdev = netdev_priv(ndev);
4694 SET_NETDEV_DEV(ndev, &pdev->dev); 4766 SET_NETDEV_DEV(ndev, &pdev->dev);
4695 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | 4767 ndev->hw_features = NETIF_F_SG |
4696 NETIF_F_TSO | NETIF_F_TSO_ECN | 4768 NETIF_F_IP_CSUM |
4697 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM; 4769 NETIF_F_TSO |
4698 ndev->features = ndev->hw_features | 4770 NETIF_F_TSO_ECN |
4699 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; 4771 NETIF_F_HW_VLAN_CTAG_TX |
4772 NETIF_F_HW_VLAN_CTAG_RX |
4773 NETIF_F_HW_VLAN_CTAG_FILTER |
4774 NETIF_F_RXCSUM;
4775 ndev->features = ndev->hw_features;
4700 ndev->vlan_features = ndev->hw_features; 4776 ndev->vlan_features = ndev->hw_features;
4701 4777
4702 if (test_bit(QL_DMA64, &qdev->flags)) 4778 if (test_bit(QL_DMA64, &qdev->flags))