diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/8139cp.c | 83 |
1 files changed, 27 insertions, 56 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 73b10b07f9b5..cc4c210a91f8 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -78,17 +78,6 @@ | |||
78 | #include <asm/irq.h> | 78 | #include <asm/irq.h> |
79 | #include <asm/uaccess.h> | 79 | #include <asm/uaccess.h> |
80 | 80 | ||
81 | /* VLAN tagging feature enable/disable */ | ||
82 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
83 | #define CP_VLAN_TAG_USED 1 | ||
84 | #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \ | ||
85 | do { (tx_desc)->opts2 = cpu_to_le32(vlan_tag_value); } while (0) | ||
86 | #else | ||
87 | #define CP_VLAN_TAG_USED 0 | ||
88 | #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \ | ||
89 | do { (tx_desc)->opts2 = 0; } while (0) | ||
90 | #endif | ||
91 | |||
92 | /* These identify the driver base version and may not be removed. */ | 81 | /* These identify the driver base version and may not be removed. */ |
93 | static char version[] = | 82 | static char version[] = |
94 | DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; | 83 | DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; |
@@ -356,9 +345,6 @@ struct cp_private { | |||
356 | unsigned rx_buf_sz; | 345 | unsigned rx_buf_sz; |
357 | unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */ | 346 | unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */ |
358 | 347 | ||
359 | #if CP_VLAN_TAG_USED | ||
360 | struct vlan_group *vlgrp; | ||
361 | #endif | ||
362 | dma_addr_t ring_dma; | 348 | dma_addr_t ring_dma; |
363 | 349 | ||
364 | struct mii_if_info mii_if; | 350 | struct mii_if_info mii_if; |
@@ -423,24 +409,6 @@ static struct { | |||
423 | }; | 409 | }; |
424 | 410 | ||
425 | 411 | ||
426 | #if CP_VLAN_TAG_USED | ||
427 | static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | ||
428 | { | ||
429 | struct cp_private *cp = netdev_priv(dev); | ||
430 | unsigned long flags; | ||
431 | |||
432 | spin_lock_irqsave(&cp->lock, flags); | ||
433 | cp->vlgrp = grp; | ||
434 | if (grp) | ||
435 | cp->cpcmd |= RxVlanOn; | ||
436 | else | ||
437 | cp->cpcmd &= ~RxVlanOn; | ||
438 | |||
439 | cpw16(CpCmd, cp->cpcmd); | ||
440 | spin_unlock_irqrestore(&cp->lock, flags); | ||
441 | } | ||
442 | #endif /* CP_VLAN_TAG_USED */ | ||
443 | |||
444 | static inline void cp_set_rxbufsize (struct cp_private *cp) | 412 | static inline void cp_set_rxbufsize (struct cp_private *cp) |
445 | { | 413 | { |
446 | unsigned int mtu = cp->dev->mtu; | 414 | unsigned int mtu = cp->dev->mtu; |
@@ -455,18 +423,17 @@ static inline void cp_set_rxbufsize (struct cp_private *cp) | |||
455 | static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, | 423 | static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, |
456 | struct cp_desc *desc) | 424 | struct cp_desc *desc) |
457 | { | 425 | { |
426 | u32 opts2 = le32_to_cpu(desc->opts2); | ||
427 | |||
458 | skb->protocol = eth_type_trans (skb, cp->dev); | 428 | skb->protocol = eth_type_trans (skb, cp->dev); |
459 | 429 | ||
460 | cp->dev->stats.rx_packets++; | 430 | cp->dev->stats.rx_packets++; |
461 | cp->dev->stats.rx_bytes += skb->len; | 431 | cp->dev->stats.rx_bytes += skb->len; |
462 | 432 | ||
463 | #if CP_VLAN_TAG_USED | 433 | if (opts2 & RxVlanTagged) |
464 | if (cp->vlgrp && (desc->opts2 & cpu_to_le32(RxVlanTagged))) { | 434 | __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); |
465 | vlan_hwaccel_receive_skb(skb, cp->vlgrp, | 435 | |
466 | swab16(le32_to_cpu(desc->opts2) & 0xffff)); | 436 | napi_gro_receive(&cp->napi, skb); |
467 | } else | ||
468 | #endif | ||
469 | netif_receive_skb(skb); | ||
470 | } | 437 | } |
471 | 438 | ||
472 | static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail, | 439 | static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail, |
@@ -730,6 +697,12 @@ static void cp_tx (struct cp_private *cp) | |||
730 | netif_wake_queue(cp->dev); | 697 | netif_wake_queue(cp->dev); |
731 | } | 698 | } |
732 | 699 | ||
700 | static inline u32 cp_tx_vlan_tag(struct sk_buff *skb) | ||
701 | { | ||
702 | return vlan_tx_tag_present(skb) ? | ||
703 | TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; | ||
704 | } | ||
705 | |||
733 | static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | 706 | static netdev_tx_t cp_start_xmit (struct sk_buff *skb, |
734 | struct net_device *dev) | 707 | struct net_device *dev) |
735 | { | 708 | { |
@@ -737,9 +710,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | |||
737 | unsigned entry; | 710 | unsigned entry; |
738 | u32 eor, flags; | 711 | u32 eor, flags; |
739 | unsigned long intr_flags; | 712 | unsigned long intr_flags; |
740 | #if CP_VLAN_TAG_USED | 713 | __le32 opts2; |
741 | u32 vlan_tag = 0; | ||
742 | #endif | ||
743 | int mss = 0; | 714 | int mss = 0; |
744 | 715 | ||
745 | spin_lock_irqsave(&cp->lock, intr_flags); | 716 | spin_lock_irqsave(&cp->lock, intr_flags); |
@@ -752,15 +723,12 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | |||
752 | return NETDEV_TX_BUSY; | 723 | return NETDEV_TX_BUSY; |
753 | } | 724 | } |
754 | 725 | ||
755 | #if CP_VLAN_TAG_USED | ||
756 | if (vlan_tx_tag_present(skb)) | ||
757 | vlan_tag = TxVlanTag | swab16(vlan_tx_tag_get(skb)); | ||
758 | #endif | ||
759 | |||
760 | entry = cp->tx_head; | 726 | entry = cp->tx_head; |
761 | eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; | 727 | eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; |
762 | mss = skb_shinfo(skb)->gso_size; | 728 | mss = skb_shinfo(skb)->gso_size; |
763 | 729 | ||
730 | opts2 = cpu_to_le32(cp_tx_vlan_tag(skb)); | ||
731 | |||
764 | if (skb_shinfo(skb)->nr_frags == 0) { | 732 | if (skb_shinfo(skb)->nr_frags == 0) { |
765 | struct cp_desc *txd = &cp->tx_ring[entry]; | 733 | struct cp_desc *txd = &cp->tx_ring[entry]; |
766 | u32 len; | 734 | u32 len; |
@@ -768,7 +736,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | |||
768 | 736 | ||
769 | len = skb->len; | 737 | len = skb->len; |
770 | mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); | 738 | mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); |
771 | CP_VLAN_TX_TAG(txd, vlan_tag); | 739 | txd->opts2 = opts2; |
772 | txd->addr = cpu_to_le64(mapping); | 740 | txd->addr = cpu_to_le64(mapping); |
773 | wmb(); | 741 | wmb(); |
774 | 742 | ||
@@ -839,7 +807,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | |||
839 | ctrl |= LastFrag; | 807 | ctrl |= LastFrag; |
840 | 808 | ||
841 | txd = &cp->tx_ring[entry]; | 809 | txd = &cp->tx_ring[entry]; |
842 | CP_VLAN_TX_TAG(txd, vlan_tag); | 810 | txd->opts2 = opts2; |
843 | txd->addr = cpu_to_le64(mapping); | 811 | txd->addr = cpu_to_le64(mapping); |
844 | wmb(); | 812 | wmb(); |
845 | 813 | ||
@@ -851,7 +819,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | |||
851 | } | 819 | } |
852 | 820 | ||
853 | txd = &cp->tx_ring[first_entry]; | 821 | txd = &cp->tx_ring[first_entry]; |
854 | CP_VLAN_TX_TAG(txd, vlan_tag); | 822 | txd->opts2 = opts2; |
855 | txd->addr = cpu_to_le64(first_mapping); | 823 | txd->addr = cpu_to_le64(first_mapping); |
856 | wmb(); | 824 | wmb(); |
857 | 825 | ||
@@ -1431,6 +1399,11 @@ static int cp_set_features(struct net_device *dev, u32 features) | |||
1431 | else | 1399 | else |
1432 | cp->cpcmd &= ~RxChkSum; | 1400 | cp->cpcmd &= ~RxChkSum; |
1433 | 1401 | ||
1402 | if (features & NETIF_F_HW_VLAN_RX) | ||
1403 | cp->cpcmd |= RxVlanOn; | ||
1404 | else | ||
1405 | cp->cpcmd &= ~RxVlanOn; | ||
1406 | |||
1434 | cpw16_f(CpCmd, cp->cpcmd); | 1407 | cpw16_f(CpCmd, cp->cpcmd); |
1435 | spin_unlock_irqrestore(&cp->lock, flags); | 1408 | spin_unlock_irqrestore(&cp->lock, flags); |
1436 | 1409 | ||
@@ -1818,9 +1791,6 @@ static const struct net_device_ops cp_netdev_ops = { | |||
1818 | .ndo_start_xmit = cp_start_xmit, | 1791 | .ndo_start_xmit = cp_start_xmit, |
1819 | .ndo_tx_timeout = cp_tx_timeout, | 1792 | .ndo_tx_timeout = cp_tx_timeout, |
1820 | .ndo_set_features = cp_set_features, | 1793 | .ndo_set_features = cp_set_features, |
1821 | #if CP_VLAN_TAG_USED | ||
1822 | .ndo_vlan_rx_register = cp_vlan_rx_register, | ||
1823 | #endif | ||
1824 | #ifdef BROKEN | 1794 | #ifdef BROKEN |
1825 | .ndo_change_mtu = cp_change_mtu, | 1795 | .ndo_change_mtu = cp_change_mtu, |
1826 | #endif | 1796 | #endif |
@@ -1949,15 +1919,16 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1949 | dev->ethtool_ops = &cp_ethtool_ops; | 1919 | dev->ethtool_ops = &cp_ethtool_ops; |
1950 | dev->watchdog_timeo = TX_TIMEOUT; | 1920 | dev->watchdog_timeo = TX_TIMEOUT; |
1951 | 1921 | ||
1952 | #if CP_VLAN_TAG_USED | ||
1953 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | 1922 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; |
1954 | #endif | ||
1955 | 1923 | ||
1956 | if (pci_using_dac) | 1924 | if (pci_using_dac) |
1957 | dev->features |= NETIF_F_HIGHDMA; | 1925 | dev->features |= NETIF_F_HIGHDMA; |
1958 | 1926 | ||
1959 | /* disabled by default until verified */ | 1927 | /* disabled by default until verified */ |
1960 | dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; | 1928 | dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | |
1929 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
1930 | dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | | ||
1931 | NETIF_F_HIGHDMA; | ||
1961 | 1932 | ||
1962 | dev->irq = pdev->irq; | 1933 | dev->irq = pdev->irq; |
1963 | 1934 | ||