diff options
Diffstat (limited to 'drivers/net/8139cp.c')
-rw-r--r-- | drivers/net/8139cp.c | 100 |
1 files changed, 69 insertions, 31 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index d639cb8dc461..72cdf19e1be1 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -54,6 +54,7 @@ | |||
54 | 54 | ||
55 | #include <linux/config.h> | 55 | #include <linux/config.h> |
56 | #include <linux/module.h> | 56 | #include <linux/module.h> |
57 | #include <linux/moduleparam.h> | ||
57 | #include <linux/kernel.h> | 58 | #include <linux/kernel.h> |
58 | #include <linux/compiler.h> | 59 | #include <linux/compiler.h> |
59 | #include <linux/netdevice.h> | 60 | #include <linux/netdevice.h> |
@@ -91,16 +92,17 @@ KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE | |||
91 | 92 | ||
92 | MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); | 93 | MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); |
93 | MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver"); | 94 | MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver"); |
95 | MODULE_VERSION(DRV_VERSION); | ||
94 | MODULE_LICENSE("GPL"); | 96 | MODULE_LICENSE("GPL"); |
95 | 97 | ||
96 | static int debug = -1; | 98 | static int debug = -1; |
97 | MODULE_PARM (debug, "i"); | 99 | module_param(debug, int, 0); |
98 | MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number"); | 100 | MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number"); |
99 | 101 | ||
100 | /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). | 102 | /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). |
101 | The RTL chips use a 64 element hash table based on the Ethernet CRC. */ | 103 | The RTL chips use a 64 element hash table based on the Ethernet CRC. */ |
102 | static int multicast_filter_limit = 32; | 104 | static int multicast_filter_limit = 32; |
103 | MODULE_PARM (multicast_filter_limit, "i"); | 105 | module_param(multicast_filter_limit, int, 0); |
104 | MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses"); | 106 | MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses"); |
105 | 107 | ||
106 | #define PFX DRV_NAME ": " | 108 | #define PFX DRV_NAME ": " |
@@ -186,6 +188,9 @@ enum { | |||
186 | RingEnd = (1 << 30), /* End of descriptor ring */ | 188 | RingEnd = (1 << 30), /* End of descriptor ring */ |
187 | FirstFrag = (1 << 29), /* First segment of a packet */ | 189 | FirstFrag = (1 << 29), /* First segment of a packet */ |
188 | LastFrag = (1 << 28), /* Final segment of a packet */ | 190 | LastFrag = (1 << 28), /* Final segment of a packet */ |
191 | LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */ | ||
192 | MSSShift = 16, /* MSS value position */ | ||
193 | MSSMask = 0xfff, /* MSS value: 11 bits */ | ||
189 | TxError = (1 << 23), /* Tx error summary */ | 194 | TxError = (1 << 23), /* Tx error summary */ |
190 | RxError = (1 << 20), /* Rx error summary */ | 195 | RxError = (1 << 20), /* Rx error summary */ |
191 | IPCS = (1 << 18), /* Calculate IP checksum */ | 196 | IPCS = (1 << 18), /* Calculate IP checksum */ |
@@ -312,7 +317,7 @@ struct cp_desc { | |||
312 | struct ring_info { | 317 | struct ring_info { |
313 | struct sk_buff *skb; | 318 | struct sk_buff *skb; |
314 | dma_addr_t mapping; | 319 | dma_addr_t mapping; |
315 | unsigned frag; | 320 | u32 len; |
316 | }; | 321 | }; |
317 | 322 | ||
318 | struct cp_dma_stats { | 323 | struct cp_dma_stats { |
@@ -394,6 +399,9 @@ struct cp_private { | |||
394 | static void __cp_set_rx_mode (struct net_device *dev); | 399 | static void __cp_set_rx_mode (struct net_device *dev); |
395 | static void cp_tx (struct cp_private *cp); | 400 | static void cp_tx (struct cp_private *cp); |
396 | static void cp_clean_rings (struct cp_private *cp); | 401 | static void cp_clean_rings (struct cp_private *cp); |
402 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
403 | static void cp_poll_controller(struct net_device *dev); | ||
404 | #endif | ||
397 | 405 | ||
398 | static struct pci_device_id cp_pci_tbl[] = { | 406 | static struct pci_device_id cp_pci_tbl[] = { |
399 | { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139, | 407 | { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139, |
@@ -688,6 +696,19 @@ cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs) | |||
688 | return IRQ_HANDLED; | 696 | return IRQ_HANDLED; |
689 | } | 697 | } |
690 | 698 | ||
699 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
700 | /* | ||
701 | * Polling receive - used by netconsole and other diagnostic tools | ||
702 | * to allow network i/o with interrupts disabled. | ||
703 | */ | ||
704 | static void cp_poll_controller(struct net_device *dev) | ||
705 | { | ||
706 | disable_irq(dev->irq); | ||
707 | cp_interrupt(dev->irq, dev, NULL); | ||
708 | enable_irq(dev->irq); | ||
709 | } | ||
710 | #endif | ||
711 | |||
691 | static void cp_tx (struct cp_private *cp) | 712 | static void cp_tx (struct cp_private *cp) |
692 | { | 713 | { |
693 | unsigned tx_head = cp->tx_head; | 714 | unsigned tx_head = cp->tx_head; |
@@ -707,7 +728,7 @@ static void cp_tx (struct cp_private *cp) | |||
707 | BUG(); | 728 | BUG(); |
708 | 729 | ||
709 | pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping, | 730 | pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping, |
710 | skb->len, PCI_DMA_TODEVICE); | 731 | cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE); |
711 | 732 | ||
712 | if (status & LastFrag) { | 733 | if (status & LastFrag) { |
713 | if (status & (TxError | TxFIFOUnder)) { | 734 | if (status & (TxError | TxFIFOUnder)) { |
@@ -749,10 +770,11 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
749 | { | 770 | { |
750 | struct cp_private *cp = netdev_priv(dev); | 771 | struct cp_private *cp = netdev_priv(dev); |
751 | unsigned entry; | 772 | unsigned entry; |
752 | u32 eor; | 773 | u32 eor, flags; |
753 | #if CP_VLAN_TAG_USED | 774 | #if CP_VLAN_TAG_USED |
754 | u32 vlan_tag = 0; | 775 | u32 vlan_tag = 0; |
755 | #endif | 776 | #endif |
777 | int mss = 0; | ||
756 | 778 | ||
757 | spin_lock_irq(&cp->lock); | 779 | spin_lock_irq(&cp->lock); |
758 | 780 | ||
@@ -772,6 +794,9 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
772 | 794 | ||
773 | entry = cp->tx_head; | 795 | entry = cp->tx_head; |
774 | eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; | 796 | eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; |
797 | if (dev->features & NETIF_F_TSO) | ||
798 | mss = skb_shinfo(skb)->tso_size; | ||
799 | |||
775 | if (skb_shinfo(skb)->nr_frags == 0) { | 800 | if (skb_shinfo(skb)->nr_frags == 0) { |
776 | struct cp_desc *txd = &cp->tx_ring[entry]; | 801 | struct cp_desc *txd = &cp->tx_ring[entry]; |
777 | u32 len; | 802 | u32 len; |
@@ -783,26 +808,26 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
783 | txd->addr = cpu_to_le64(mapping); | 808 | txd->addr = cpu_to_le64(mapping); |
784 | wmb(); | 809 | wmb(); |
785 | 810 | ||
786 | if (skb->ip_summed == CHECKSUM_HW) { | 811 | flags = eor | len | DescOwn | FirstFrag | LastFrag; |
812 | |||
813 | if (mss) | ||
814 | flags |= LargeSend | ((mss & MSSMask) << MSSShift); | ||
815 | else if (skb->ip_summed == CHECKSUM_HW) { | ||
787 | const struct iphdr *ip = skb->nh.iph; | 816 | const struct iphdr *ip = skb->nh.iph; |
788 | if (ip->protocol == IPPROTO_TCP) | 817 | if (ip->protocol == IPPROTO_TCP) |
789 | txd->opts1 = cpu_to_le32(eor | len | DescOwn | | 818 | flags |= IPCS | TCPCS; |
790 | FirstFrag | LastFrag | | ||
791 | IPCS | TCPCS); | ||
792 | else if (ip->protocol == IPPROTO_UDP) | 819 | else if (ip->protocol == IPPROTO_UDP) |
793 | txd->opts1 = cpu_to_le32(eor | len | DescOwn | | 820 | flags |= IPCS | UDPCS; |
794 | FirstFrag | LastFrag | | ||
795 | IPCS | UDPCS); | ||
796 | else | 821 | else |
797 | BUG(); | 822 | WARN_ON(1); /* we need a WARN() */ |
798 | } else | 823 | } |
799 | txd->opts1 = cpu_to_le32(eor | len | DescOwn | | 824 | |
800 | FirstFrag | LastFrag); | 825 | txd->opts1 = cpu_to_le32(flags); |
801 | wmb(); | 826 | wmb(); |
802 | 827 | ||
803 | cp->tx_skb[entry].skb = skb; | 828 | cp->tx_skb[entry].skb = skb; |
804 | cp->tx_skb[entry].mapping = mapping; | 829 | cp->tx_skb[entry].mapping = mapping; |
805 | cp->tx_skb[entry].frag = 0; | 830 | cp->tx_skb[entry].len = len; |
806 | entry = NEXT_TX(entry); | 831 | entry = NEXT_TX(entry); |
807 | } else { | 832 | } else { |
808 | struct cp_desc *txd; | 833 | struct cp_desc *txd; |
@@ -820,7 +845,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
820 | first_len, PCI_DMA_TODEVICE); | 845 | first_len, PCI_DMA_TODEVICE); |
821 | cp->tx_skb[entry].skb = skb; | 846 | cp->tx_skb[entry].skb = skb; |
822 | cp->tx_skb[entry].mapping = first_mapping; | 847 | cp->tx_skb[entry].mapping = first_mapping; |
823 | cp->tx_skb[entry].frag = 1; | 848 | cp->tx_skb[entry].len = first_len; |
824 | entry = NEXT_TX(entry); | 849 | entry = NEXT_TX(entry); |
825 | 850 | ||
826 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | 851 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { |
@@ -836,16 +861,19 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
836 | len, PCI_DMA_TODEVICE); | 861 | len, PCI_DMA_TODEVICE); |
837 | eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; | 862 | eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; |
838 | 863 | ||
839 | if (skb->ip_summed == CHECKSUM_HW) { | 864 | ctrl = eor | len | DescOwn; |
840 | ctrl = eor | len | DescOwn | IPCS; | 865 | |
866 | if (mss) | ||
867 | ctrl |= LargeSend | | ||
868 | ((mss & MSSMask) << MSSShift); | ||
869 | else if (skb->ip_summed == CHECKSUM_HW) { | ||
841 | if (ip->protocol == IPPROTO_TCP) | 870 | if (ip->protocol == IPPROTO_TCP) |
842 | ctrl |= TCPCS; | 871 | ctrl |= IPCS | TCPCS; |
843 | else if (ip->protocol == IPPROTO_UDP) | 872 | else if (ip->protocol == IPPROTO_UDP) |
844 | ctrl |= UDPCS; | 873 | ctrl |= IPCS | UDPCS; |
845 | else | 874 | else |
846 | BUG(); | 875 | BUG(); |
847 | } else | 876 | } |
848 | ctrl = eor | len | DescOwn; | ||
849 | 877 | ||
850 | if (frag == skb_shinfo(skb)->nr_frags - 1) | 878 | if (frag == skb_shinfo(skb)->nr_frags - 1) |
851 | ctrl |= LastFrag; | 879 | ctrl |= LastFrag; |
@@ -860,7 +888,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
860 | 888 | ||
861 | cp->tx_skb[entry].skb = skb; | 889 | cp->tx_skb[entry].skb = skb; |
862 | cp->tx_skb[entry].mapping = mapping; | 890 | cp->tx_skb[entry].mapping = mapping; |
863 | cp->tx_skb[entry].frag = frag + 2; | 891 | cp->tx_skb[entry].len = len; |
864 | entry = NEXT_TX(entry); | 892 | entry = NEXT_TX(entry); |
865 | } | 893 | } |
866 | 894 | ||
@@ -1074,7 +1102,6 @@ static int cp_refill_rx (struct cp_private *cp) | |||
1074 | cp->rx_skb[i].mapping = pci_map_single(cp->pdev, | 1102 | cp->rx_skb[i].mapping = pci_map_single(cp->pdev, |
1075 | skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1103 | skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); |
1076 | cp->rx_skb[i].skb = skb; | 1104 | cp->rx_skb[i].skb = skb; |
1077 | cp->rx_skb[i].frag = 0; | ||
1078 | 1105 | ||
1079 | cp->rx_ring[i].opts2 = 0; | 1106 | cp->rx_ring[i].opts2 = 0; |
1080 | cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping); | 1107 | cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping); |
@@ -1126,9 +1153,6 @@ static void cp_clean_rings (struct cp_private *cp) | |||
1126 | { | 1153 | { |
1127 | unsigned i; | 1154 | unsigned i; |
1128 | 1155 | ||
1129 | memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); | ||
1130 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); | ||
1131 | |||
1132 | for (i = 0; i < CP_RX_RING_SIZE; i++) { | 1156 | for (i = 0; i < CP_RX_RING_SIZE; i++) { |
1133 | if (cp->rx_skb[i].skb) { | 1157 | if (cp->rx_skb[i].skb) { |
1134 | pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping, | 1158 | pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping, |
@@ -1140,13 +1164,18 @@ static void cp_clean_rings (struct cp_private *cp) | |||
1140 | for (i = 0; i < CP_TX_RING_SIZE; i++) { | 1164 | for (i = 0; i < CP_TX_RING_SIZE; i++) { |
1141 | if (cp->tx_skb[i].skb) { | 1165 | if (cp->tx_skb[i].skb) { |
1142 | struct sk_buff *skb = cp->tx_skb[i].skb; | 1166 | struct sk_buff *skb = cp->tx_skb[i].skb; |
1167 | |||
1143 | pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping, | 1168 | pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping, |
1144 | skb->len, PCI_DMA_TODEVICE); | 1169 | cp->tx_skb[i].len, PCI_DMA_TODEVICE); |
1145 | dev_kfree_skb(skb); | 1170 | if (le32_to_cpu(cp->tx_ring[i].opts1) & LastFrag) |
1171 | dev_kfree_skb(skb); | ||
1146 | cp->net_stats.tx_dropped++; | 1172 | cp->net_stats.tx_dropped++; |
1147 | } | 1173 | } |
1148 | } | 1174 | } |
1149 | 1175 | ||
1176 | memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); | ||
1177 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); | ||
1178 | |||
1150 | memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE); | 1179 | memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE); |
1151 | memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE); | 1180 | memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE); |
1152 | } | 1181 | } |
@@ -1538,6 +1567,8 @@ static struct ethtool_ops cp_ethtool_ops = { | |||
1538 | .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ | 1567 | .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ |
1539 | .get_sg = ethtool_op_get_sg, | 1568 | .get_sg = ethtool_op_get_sg, |
1540 | .set_sg = ethtool_op_set_sg, | 1569 | .set_sg = ethtool_op_set_sg, |
1570 | .get_tso = ethtool_op_get_tso, | ||
1571 | .set_tso = ethtool_op_set_tso, | ||
1541 | .get_regs = cp_get_regs, | 1572 | .get_regs = cp_get_regs, |
1542 | .get_wol = cp_get_wol, | 1573 | .get_wol = cp_get_wol, |
1543 | .set_wol = cp_set_wol, | 1574 | .set_wol = cp_set_wol, |
@@ -1749,6 +1780,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1749 | dev->get_stats = cp_get_stats; | 1780 | dev->get_stats = cp_get_stats; |
1750 | dev->do_ioctl = cp_ioctl; | 1781 | dev->do_ioctl = cp_ioctl; |
1751 | dev->poll = cp_rx_poll; | 1782 | dev->poll = cp_rx_poll; |
1783 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1784 | dev->poll_controller = cp_poll_controller; | ||
1785 | #endif | ||
1752 | dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */ | 1786 | dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */ |
1753 | #ifdef BROKEN | 1787 | #ifdef BROKEN |
1754 | dev->change_mtu = cp_change_mtu; | 1788 | dev->change_mtu = cp_change_mtu; |
@@ -1768,6 +1802,10 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1768 | if (pci_using_dac) | 1802 | if (pci_using_dac) |
1769 | dev->features |= NETIF_F_HIGHDMA; | 1803 | dev->features |= NETIF_F_HIGHDMA; |
1770 | 1804 | ||
1805 | #if 0 /* disabled by default until verified */ | ||
1806 | dev->features |= NETIF_F_TSO; | ||
1807 | #endif | ||
1808 | |||
1771 | dev->irq = pdev->irq; | 1809 | dev->irq = pdev->irq; |
1772 | 1810 | ||
1773 | rc = register_netdev(dev); | 1811 | rc = register_netdev(dev); |