aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/8139cp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/8139cp.c')
-rw-r--r--drivers/net/8139cp.c100
1 files changed, 69 insertions, 31 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index ca4c9ac7e115..ca7746dd164f 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -54,6 +54,7 @@
54 54
55#include <linux/config.h> 55#include <linux/config.h>
56#include <linux/module.h> 56#include <linux/module.h>
57#include <linux/moduleparam.h>
57#include <linux/kernel.h> 58#include <linux/kernel.h>
58#include <linux/compiler.h> 59#include <linux/compiler.h>
59#include <linux/netdevice.h> 60#include <linux/netdevice.h>
@@ -92,16 +93,17 @@ KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE
92 93
93MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); 94MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
94MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver"); 95MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
96MODULE_VERSION(DRV_VERSION);
95MODULE_LICENSE("GPL"); 97MODULE_LICENSE("GPL");
96 98
97static int debug = -1; 99static int debug = -1;
98MODULE_PARM (debug, "i"); 100module_param(debug, int, 0);
99MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number"); 101MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
100 102
101/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). 103/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
102 The RTL chips use a 64 element hash table based on the Ethernet CRC. */ 104 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
103static int multicast_filter_limit = 32; 105static int multicast_filter_limit = 32;
104MODULE_PARM (multicast_filter_limit, "i"); 106module_param(multicast_filter_limit, int, 0);
105MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses"); 107MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
106 108
107#define PFX DRV_NAME ": " 109#define PFX DRV_NAME ": "
@@ -187,6 +189,9 @@ enum {
187 RingEnd = (1 << 30), /* End of descriptor ring */ 189 RingEnd = (1 << 30), /* End of descriptor ring */
188 FirstFrag = (1 << 29), /* First segment of a packet */ 190 FirstFrag = (1 << 29), /* First segment of a packet */
189 LastFrag = (1 << 28), /* Final segment of a packet */ 191 LastFrag = (1 << 28), /* Final segment of a packet */
192 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
193 MSSShift = 16, /* MSS value position */
194 MSSMask = 0xfff, /* MSS value: 11 bits */
190 TxError = (1 << 23), /* Tx error summary */ 195 TxError = (1 << 23), /* Tx error summary */
191 RxError = (1 << 20), /* Rx error summary */ 196 RxError = (1 << 20), /* Rx error summary */
192 IPCS = (1 << 18), /* Calculate IP checksum */ 197 IPCS = (1 << 18), /* Calculate IP checksum */
@@ -313,7 +318,7 @@ struct cp_desc {
313struct ring_info { 318struct ring_info {
314 struct sk_buff *skb; 319 struct sk_buff *skb;
315 dma_addr_t mapping; 320 dma_addr_t mapping;
316 unsigned frag; 321 u32 len;
317}; 322};
318 323
319struct cp_dma_stats { 324struct cp_dma_stats {
@@ -395,6 +400,9 @@ struct cp_private {
395static void __cp_set_rx_mode (struct net_device *dev); 400static void __cp_set_rx_mode (struct net_device *dev);
396static void cp_tx (struct cp_private *cp); 401static void cp_tx (struct cp_private *cp);
397static void cp_clean_rings (struct cp_private *cp); 402static void cp_clean_rings (struct cp_private *cp);
403#ifdef CONFIG_NET_POLL_CONTROLLER
404static void cp_poll_controller(struct net_device *dev);
405#endif
398 406
399static struct pci_device_id cp_pci_tbl[] = { 407static struct pci_device_id cp_pci_tbl[] = {
400 { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139, 408 { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139,
@@ -689,6 +697,19 @@ cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
689 return IRQ_HANDLED; 697 return IRQ_HANDLED;
690} 698}
691 699
700#ifdef CONFIG_NET_POLL_CONTROLLER
701/*
702 * Polling receive - used by netconsole and other diagnostic tools
703 * to allow network i/o with interrupts disabled.
704 */
705static void cp_poll_controller(struct net_device *dev)
706{
707 disable_irq(dev->irq);
708 cp_interrupt(dev->irq, dev, NULL);
709 enable_irq(dev->irq);
710}
711#endif
712
692static void cp_tx (struct cp_private *cp) 713static void cp_tx (struct cp_private *cp)
693{ 714{
694 unsigned tx_head = cp->tx_head; 715 unsigned tx_head = cp->tx_head;
@@ -708,7 +729,7 @@ static void cp_tx (struct cp_private *cp)
708 BUG(); 729 BUG();
709 730
710 pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping, 731 pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
711 skb->len, PCI_DMA_TODEVICE); 732 cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE);
712 733
713 if (status & LastFrag) { 734 if (status & LastFrag) {
714 if (status & (TxError | TxFIFOUnder)) { 735 if (status & (TxError | TxFIFOUnder)) {
@@ -750,10 +771,11 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
750{ 771{
751 struct cp_private *cp = netdev_priv(dev); 772 struct cp_private *cp = netdev_priv(dev);
752 unsigned entry; 773 unsigned entry;
753 u32 eor; 774 u32 eor, flags;
754#if CP_VLAN_TAG_USED 775#if CP_VLAN_TAG_USED
755 u32 vlan_tag = 0; 776 u32 vlan_tag = 0;
756#endif 777#endif
778 int mss = 0;
757 779
758 spin_lock_irq(&cp->lock); 780 spin_lock_irq(&cp->lock);
759 781
@@ -773,6 +795,9 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
773 795
774 entry = cp->tx_head; 796 entry = cp->tx_head;
775 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 797 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
798 if (dev->features & NETIF_F_TSO)
799 mss = skb_shinfo(skb)->tso_size;
800
776 if (skb_shinfo(skb)->nr_frags == 0) { 801 if (skb_shinfo(skb)->nr_frags == 0) {
777 struct cp_desc *txd = &cp->tx_ring[entry]; 802 struct cp_desc *txd = &cp->tx_ring[entry];
778 u32 len; 803 u32 len;
@@ -784,26 +809,26 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
784 txd->addr = cpu_to_le64(mapping); 809 txd->addr = cpu_to_le64(mapping);
785 wmb(); 810 wmb();
786 811
787 if (skb->ip_summed == CHECKSUM_HW) { 812 flags = eor | len | DescOwn | FirstFrag | LastFrag;
813
814 if (mss)
815 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
816 else if (skb->ip_summed == CHECKSUM_HW) {
788 const struct iphdr *ip = skb->nh.iph; 817 const struct iphdr *ip = skb->nh.iph;
789 if (ip->protocol == IPPROTO_TCP) 818 if (ip->protocol == IPPROTO_TCP)
790 txd->opts1 = cpu_to_le32(eor | len | DescOwn | 819 flags |= IPCS | TCPCS;
791 FirstFrag | LastFrag |
792 IPCS | TCPCS);
793 else if (ip->protocol == IPPROTO_UDP) 820 else if (ip->protocol == IPPROTO_UDP)
794 txd->opts1 = cpu_to_le32(eor | len | DescOwn | 821 flags |= IPCS | UDPCS;
795 FirstFrag | LastFrag |
796 IPCS | UDPCS);
797 else 822 else
798 BUG(); 823 WARN_ON(1); /* we need a WARN() */
799 } else 824 }
800 txd->opts1 = cpu_to_le32(eor | len | DescOwn | 825
801 FirstFrag | LastFrag); 826 txd->opts1 = cpu_to_le32(flags);
802 wmb(); 827 wmb();
803 828
804 cp->tx_skb[entry].skb = skb; 829 cp->tx_skb[entry].skb = skb;
805 cp->tx_skb[entry].mapping = mapping; 830 cp->tx_skb[entry].mapping = mapping;
806 cp->tx_skb[entry].frag = 0; 831 cp->tx_skb[entry].len = len;
807 entry = NEXT_TX(entry); 832 entry = NEXT_TX(entry);
808 } else { 833 } else {
809 struct cp_desc *txd; 834 struct cp_desc *txd;
@@ -821,7 +846,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
821 first_len, PCI_DMA_TODEVICE); 846 first_len, PCI_DMA_TODEVICE);
822 cp->tx_skb[entry].skb = skb; 847 cp->tx_skb[entry].skb = skb;
823 cp->tx_skb[entry].mapping = first_mapping; 848 cp->tx_skb[entry].mapping = first_mapping;
824 cp->tx_skb[entry].frag = 1; 849 cp->tx_skb[entry].len = first_len;
825 entry = NEXT_TX(entry); 850 entry = NEXT_TX(entry);
826 851
827 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 852 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -837,16 +862,19 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
837 len, PCI_DMA_TODEVICE); 862 len, PCI_DMA_TODEVICE);
838 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 863 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
839 864
840 if (skb->ip_summed == CHECKSUM_HW) { 865 ctrl = eor | len | DescOwn;
841 ctrl = eor | len | DescOwn | IPCS; 866
867 if (mss)
868 ctrl |= LargeSend |
869 ((mss & MSSMask) << MSSShift);
870 else if (skb->ip_summed == CHECKSUM_HW) {
842 if (ip->protocol == IPPROTO_TCP) 871 if (ip->protocol == IPPROTO_TCP)
843 ctrl |= TCPCS; 872 ctrl |= IPCS | TCPCS;
844 else if (ip->protocol == IPPROTO_UDP) 873 else if (ip->protocol == IPPROTO_UDP)
845 ctrl |= UDPCS; 874 ctrl |= IPCS | UDPCS;
846 else 875 else
847 BUG(); 876 BUG();
848 } else 877 }
849 ctrl = eor | len | DescOwn;
850 878
851 if (frag == skb_shinfo(skb)->nr_frags - 1) 879 if (frag == skb_shinfo(skb)->nr_frags - 1)
852 ctrl |= LastFrag; 880 ctrl |= LastFrag;
@@ -861,7 +889,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
861 889
862 cp->tx_skb[entry].skb = skb; 890 cp->tx_skb[entry].skb = skb;
863 cp->tx_skb[entry].mapping = mapping; 891 cp->tx_skb[entry].mapping = mapping;
864 cp->tx_skb[entry].frag = frag + 2; 892 cp->tx_skb[entry].len = len;
865 entry = NEXT_TX(entry); 893 entry = NEXT_TX(entry);
866 } 894 }
867 895
@@ -1075,7 +1103,6 @@ static int cp_refill_rx (struct cp_private *cp)
1075 cp->rx_skb[i].mapping = pci_map_single(cp->pdev, 1103 cp->rx_skb[i].mapping = pci_map_single(cp->pdev,
1076 skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1104 skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1077 cp->rx_skb[i].skb = skb; 1105 cp->rx_skb[i].skb = skb;
1078 cp->rx_skb[i].frag = 0;
1079 1106
1080 cp->rx_ring[i].opts2 = 0; 1107 cp->rx_ring[i].opts2 = 0;
1081 cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping); 1108 cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping);
@@ -1127,9 +1154,6 @@ static void cp_clean_rings (struct cp_private *cp)
1127{ 1154{
1128 unsigned i; 1155 unsigned i;
1129 1156
1130 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1131 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1132
1133 for (i = 0; i < CP_RX_RING_SIZE; i++) { 1157 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1134 if (cp->rx_skb[i].skb) { 1158 if (cp->rx_skb[i].skb) {
1135 pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping, 1159 pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping,
@@ -1141,13 +1165,18 @@ static void cp_clean_rings (struct cp_private *cp)
1141 for (i = 0; i < CP_TX_RING_SIZE; i++) { 1165 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1142 if (cp->tx_skb[i].skb) { 1166 if (cp->tx_skb[i].skb) {
1143 struct sk_buff *skb = cp->tx_skb[i].skb; 1167 struct sk_buff *skb = cp->tx_skb[i].skb;
1168
1144 pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping, 1169 pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping,
1145 skb->len, PCI_DMA_TODEVICE); 1170 cp->tx_skb[i].len, PCI_DMA_TODEVICE);
1146 dev_kfree_skb(skb); 1171 if (le32_to_cpu(cp->tx_ring[i].opts1) & LastFrag)
1172 dev_kfree_skb(skb);
1147 cp->net_stats.tx_dropped++; 1173 cp->net_stats.tx_dropped++;
1148 } 1174 }
1149 } 1175 }
1150 1176
1177 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1178 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1179
1151 memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE); 1180 memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE);
1152 memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE); 1181 memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE);
1153} 1182}
@@ -1539,6 +1568,8 @@ static struct ethtool_ops cp_ethtool_ops = {
1539 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ 1568 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
1540 .get_sg = ethtool_op_get_sg, 1569 .get_sg = ethtool_op_get_sg,
1541 .set_sg = ethtool_op_set_sg, 1570 .set_sg = ethtool_op_set_sg,
1571 .get_tso = ethtool_op_get_tso,
1572 .set_tso = ethtool_op_set_tso,
1542 .get_regs = cp_get_regs, 1573 .get_regs = cp_get_regs,
1543 .get_wol = cp_get_wol, 1574 .get_wol = cp_get_wol,
1544 .set_wol = cp_set_wol, 1575 .set_wol = cp_set_wol,
@@ -1750,6 +1781,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1750 dev->get_stats = cp_get_stats; 1781 dev->get_stats = cp_get_stats;
1751 dev->do_ioctl = cp_ioctl; 1782 dev->do_ioctl = cp_ioctl;
1752 dev->poll = cp_rx_poll; 1783 dev->poll = cp_rx_poll;
1784#ifdef CONFIG_NET_POLL_CONTROLLER
1785 dev->poll_controller = cp_poll_controller;
1786#endif
1753 dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */ 1787 dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */
1754#ifdef BROKEN 1788#ifdef BROKEN
1755 dev->change_mtu = cp_change_mtu; 1789 dev->change_mtu = cp_change_mtu;
@@ -1769,6 +1803,10 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1769 if (pci_using_dac) 1803 if (pci_using_dac)
1770 dev->features |= NETIF_F_HIGHDMA; 1804 dev->features |= NETIF_F_HIGHDMA;
1771 1805
1806#if 0 /* disabled by default until verified */
1807 dev->features |= NETIF_F_TSO;
1808#endif
1809
1772 dev->irq = pdev->irq; 1810 dev->irq = pdev->irq;
1773 1811
1774 rc = register_netdev(dev); 1812 rc = register_netdev(dev);