aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/8139cp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/8139cp.c')
-rw-r--r--drivers/net/8139cp.c121
1 files changed, 80 insertions, 41 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index d639cb8dc461..e4b3c5c88542 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -54,12 +54,14 @@
54 54
55#include <linux/config.h> 55#include <linux/config.h>
56#include <linux/module.h> 56#include <linux/module.h>
57#include <linux/moduleparam.h>
57#include <linux/kernel.h> 58#include <linux/kernel.h>
58#include <linux/compiler.h> 59#include <linux/compiler.h>
59#include <linux/netdevice.h> 60#include <linux/netdevice.h>
60#include <linux/etherdevice.h> 61#include <linux/etherdevice.h>
61#include <linux/init.h> 62#include <linux/init.h>
62#include <linux/pci.h> 63#include <linux/pci.h>
64#include <linux/dma-mapping.h>
63#include <linux/delay.h> 65#include <linux/delay.h>
64#include <linux/ethtool.h> 66#include <linux/ethtool.h>
65#include <linux/mii.h> 67#include <linux/mii.h>
@@ -91,16 +93,17 @@ KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE
91 93
92MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); 94MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
93MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver"); 95MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
96MODULE_VERSION(DRV_VERSION);
94MODULE_LICENSE("GPL"); 97MODULE_LICENSE("GPL");
95 98
96static int debug = -1; 99static int debug = -1;
97MODULE_PARM (debug, "i"); 100module_param(debug, int, 0);
98MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number"); 101MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
99 102
100/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). 103/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
101 The RTL chips use a 64 element hash table based on the Ethernet CRC. */ 104 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
102static int multicast_filter_limit = 32; 105static int multicast_filter_limit = 32;
103MODULE_PARM (multicast_filter_limit, "i"); 106module_param(multicast_filter_limit, int, 0);
104MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses"); 107MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
105 108
106#define PFX DRV_NAME ": " 109#define PFX DRV_NAME ": "
@@ -186,6 +189,9 @@ enum {
186 RingEnd = (1 << 30), /* End of descriptor ring */ 189 RingEnd = (1 << 30), /* End of descriptor ring */
187 FirstFrag = (1 << 29), /* First segment of a packet */ 190 FirstFrag = (1 << 29), /* First segment of a packet */
188 LastFrag = (1 << 28), /* Final segment of a packet */ 191 LastFrag = (1 << 28), /* Final segment of a packet */
192 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
193 MSSShift = 16, /* MSS value position */
194 MSSMask = 0xfff, /* MSS value: 11 bits */
189 TxError = (1 << 23), /* Tx error summary */ 195 TxError = (1 << 23), /* Tx error summary */
190 RxError = (1 << 20), /* Rx error summary */ 196 RxError = (1 << 20), /* Rx error summary */
191 IPCS = (1 << 18), /* Calculate IP checksum */ 197 IPCS = (1 << 18), /* Calculate IP checksum */
@@ -312,7 +318,7 @@ struct cp_desc {
312struct ring_info { 318struct ring_info {
313 struct sk_buff *skb; 319 struct sk_buff *skb;
314 dma_addr_t mapping; 320 dma_addr_t mapping;
315 unsigned frag; 321 u32 len;
316}; 322};
317 323
318struct cp_dma_stats { 324struct cp_dma_stats {
@@ -394,6 +400,9 @@ struct cp_private {
394static void __cp_set_rx_mode (struct net_device *dev); 400static void __cp_set_rx_mode (struct net_device *dev);
395static void cp_tx (struct cp_private *cp); 401static void cp_tx (struct cp_private *cp);
396static void cp_clean_rings (struct cp_private *cp); 402static void cp_clean_rings (struct cp_private *cp);
403#ifdef CONFIG_NET_POLL_CONTROLLER
404static void cp_poll_controller(struct net_device *dev);
405#endif
397 406
398static struct pci_device_id cp_pci_tbl[] = { 407static struct pci_device_id cp_pci_tbl[] = {
399 { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139, 408 { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139,
@@ -688,6 +697,19 @@ cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
688 return IRQ_HANDLED; 697 return IRQ_HANDLED;
689} 698}
690 699
700#ifdef CONFIG_NET_POLL_CONTROLLER
701/*
702 * Polling receive - used by netconsole and other diagnostic tools
703 * to allow network i/o with interrupts disabled.
704 */
705static void cp_poll_controller(struct net_device *dev)
706{
707 disable_irq(dev->irq);
708 cp_interrupt(dev->irq, dev, NULL);
709 enable_irq(dev->irq);
710}
711#endif
712
691static void cp_tx (struct cp_private *cp) 713static void cp_tx (struct cp_private *cp)
692{ 714{
693 unsigned tx_head = cp->tx_head; 715 unsigned tx_head = cp->tx_head;
@@ -707,7 +729,7 @@ static void cp_tx (struct cp_private *cp)
707 BUG(); 729 BUG();
708 730
709 pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping, 731 pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
710 skb->len, PCI_DMA_TODEVICE); 732 cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE);
711 733
712 if (status & LastFrag) { 734 if (status & LastFrag) {
713 if (status & (TxError | TxFIFOUnder)) { 735 if (status & (TxError | TxFIFOUnder)) {
@@ -749,10 +771,11 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
749{ 771{
750 struct cp_private *cp = netdev_priv(dev); 772 struct cp_private *cp = netdev_priv(dev);
751 unsigned entry; 773 unsigned entry;
752 u32 eor; 774 u32 eor, flags;
753#if CP_VLAN_TAG_USED 775#if CP_VLAN_TAG_USED
754 u32 vlan_tag = 0; 776 u32 vlan_tag = 0;
755#endif 777#endif
778 int mss = 0;
756 779
757 spin_lock_irq(&cp->lock); 780 spin_lock_irq(&cp->lock);
758 781
@@ -772,6 +795,9 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
772 795
773 entry = cp->tx_head; 796 entry = cp->tx_head;
774 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 797 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
798 if (dev->features & NETIF_F_TSO)
799 mss = skb_shinfo(skb)->tso_size;
800
775 if (skb_shinfo(skb)->nr_frags == 0) { 801 if (skb_shinfo(skb)->nr_frags == 0) {
776 struct cp_desc *txd = &cp->tx_ring[entry]; 802 struct cp_desc *txd = &cp->tx_ring[entry];
777 u32 len; 803 u32 len;
@@ -783,26 +809,26 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
783 txd->addr = cpu_to_le64(mapping); 809 txd->addr = cpu_to_le64(mapping);
784 wmb(); 810 wmb();
785 811
786 if (skb->ip_summed == CHECKSUM_HW) { 812 flags = eor | len | DescOwn | FirstFrag | LastFrag;
813
814 if (mss)
815 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
816 else if (skb->ip_summed == CHECKSUM_HW) {
787 const struct iphdr *ip = skb->nh.iph; 817 const struct iphdr *ip = skb->nh.iph;
788 if (ip->protocol == IPPROTO_TCP) 818 if (ip->protocol == IPPROTO_TCP)
789 txd->opts1 = cpu_to_le32(eor | len | DescOwn | 819 flags |= IPCS | TCPCS;
790 FirstFrag | LastFrag |
791 IPCS | TCPCS);
792 else if (ip->protocol == IPPROTO_UDP) 820 else if (ip->protocol == IPPROTO_UDP)
793 txd->opts1 = cpu_to_le32(eor | len | DescOwn | 821 flags |= IPCS | UDPCS;
794 FirstFrag | LastFrag |
795 IPCS | UDPCS);
796 else 822 else
797 BUG(); 823 WARN_ON(1); /* we need a WARN() */
798 } else 824 }
799 txd->opts1 = cpu_to_le32(eor | len | DescOwn | 825
800 FirstFrag | LastFrag); 826 txd->opts1 = cpu_to_le32(flags);
801 wmb(); 827 wmb();
802 828
803 cp->tx_skb[entry].skb = skb; 829 cp->tx_skb[entry].skb = skb;
804 cp->tx_skb[entry].mapping = mapping; 830 cp->tx_skb[entry].mapping = mapping;
805 cp->tx_skb[entry].frag = 0; 831 cp->tx_skb[entry].len = len;
806 entry = NEXT_TX(entry); 832 entry = NEXT_TX(entry);
807 } else { 833 } else {
808 struct cp_desc *txd; 834 struct cp_desc *txd;
@@ -820,7 +846,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
820 first_len, PCI_DMA_TODEVICE); 846 first_len, PCI_DMA_TODEVICE);
821 cp->tx_skb[entry].skb = skb; 847 cp->tx_skb[entry].skb = skb;
822 cp->tx_skb[entry].mapping = first_mapping; 848 cp->tx_skb[entry].mapping = first_mapping;
823 cp->tx_skb[entry].frag = 1; 849 cp->tx_skb[entry].len = first_len;
824 entry = NEXT_TX(entry); 850 entry = NEXT_TX(entry);
825 851
826 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 852 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -836,16 +862,19 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
836 len, PCI_DMA_TODEVICE); 862 len, PCI_DMA_TODEVICE);
837 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 863 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
838 864
839 if (skb->ip_summed == CHECKSUM_HW) { 865 ctrl = eor | len | DescOwn;
840 ctrl = eor | len | DescOwn | IPCS; 866
867 if (mss)
868 ctrl |= LargeSend |
869 ((mss & MSSMask) << MSSShift);
870 else if (skb->ip_summed == CHECKSUM_HW) {
841 if (ip->protocol == IPPROTO_TCP) 871 if (ip->protocol == IPPROTO_TCP)
842 ctrl |= TCPCS; 872 ctrl |= IPCS | TCPCS;
843 else if (ip->protocol == IPPROTO_UDP) 873 else if (ip->protocol == IPPROTO_UDP)
844 ctrl |= UDPCS; 874 ctrl |= IPCS | UDPCS;
845 else 875 else
846 BUG(); 876 BUG();
847 } else 877 }
848 ctrl = eor | len | DescOwn;
849 878
850 if (frag == skb_shinfo(skb)->nr_frags - 1) 879 if (frag == skb_shinfo(skb)->nr_frags - 1)
851 ctrl |= LastFrag; 880 ctrl |= LastFrag;
@@ -860,7 +889,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
860 889
861 cp->tx_skb[entry].skb = skb; 890 cp->tx_skb[entry].skb = skb;
862 cp->tx_skb[entry].mapping = mapping; 891 cp->tx_skb[entry].mapping = mapping;
863 cp->tx_skb[entry].frag = frag + 2; 892 cp->tx_skb[entry].len = len;
864 entry = NEXT_TX(entry); 893 entry = NEXT_TX(entry);
865 } 894 }
866 895
@@ -1074,7 +1103,6 @@ static int cp_refill_rx (struct cp_private *cp)
1074 cp->rx_skb[i].mapping = pci_map_single(cp->pdev, 1103 cp->rx_skb[i].mapping = pci_map_single(cp->pdev,
1075 skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1104 skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1076 cp->rx_skb[i].skb = skb; 1105 cp->rx_skb[i].skb = skb;
1077 cp->rx_skb[i].frag = 0;
1078 1106
1079 cp->rx_ring[i].opts2 = 0; 1107 cp->rx_ring[i].opts2 = 0;
1080 cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping); 1108 cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping);
@@ -1126,9 +1154,6 @@ static void cp_clean_rings (struct cp_private *cp)
1126{ 1154{
1127 unsigned i; 1155 unsigned i;
1128 1156
1129 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1130 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1131
1132 for (i = 0; i < CP_RX_RING_SIZE; i++) { 1157 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1133 if (cp->rx_skb[i].skb) { 1158 if (cp->rx_skb[i].skb) {
1134 pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping, 1159 pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping,
@@ -1140,13 +1165,18 @@ static void cp_clean_rings (struct cp_private *cp)
1140 for (i = 0; i < CP_TX_RING_SIZE; i++) { 1165 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1141 if (cp->tx_skb[i].skb) { 1166 if (cp->tx_skb[i].skb) {
1142 struct sk_buff *skb = cp->tx_skb[i].skb; 1167 struct sk_buff *skb = cp->tx_skb[i].skb;
1168
1143 pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping, 1169 pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping,
1144 skb->len, PCI_DMA_TODEVICE); 1170 cp->tx_skb[i].len, PCI_DMA_TODEVICE);
1145 dev_kfree_skb(skb); 1171 if (le32_to_cpu(cp->tx_ring[i].opts1) & LastFrag)
1172 dev_kfree_skb(skb);
1146 cp->net_stats.tx_dropped++; 1173 cp->net_stats.tx_dropped++;
1147 } 1174 }
1148 } 1175 }
1149 1176
1177 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1178 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1179
1150 memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE); 1180 memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE);
1151 memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE); 1181 memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE);
1152} 1182}
@@ -1486,22 +1516,22 @@ static void cp_get_ethtool_stats (struct net_device *dev,
1486 struct ethtool_stats *estats, u64 *tmp_stats) 1516 struct ethtool_stats *estats, u64 *tmp_stats)
1487{ 1517{
1488 struct cp_private *cp = netdev_priv(dev); 1518 struct cp_private *cp = netdev_priv(dev);
1489 unsigned int work = 100;
1490 int i; 1519 int i;
1491 1520
1521 memset(cp->nic_stats, 0, sizeof(struct cp_dma_stats));
1522
1492 /* begin NIC statistics dump */ 1523 /* begin NIC statistics dump */
1493 cpw32(StatsAddr + 4, (cp->nic_stats_dma >> 16) >> 16); 1524 cpw32(StatsAddr + 4, (cp->nic_stats_dma >> 16) >> 16);
1494 cpw32(StatsAddr, (cp->nic_stats_dma & 0xffffffff) | DumpStats); 1525 cpw32(StatsAddr, (cp->nic_stats_dma & 0xffffffff) | DumpStats);
1495 cpr32(StatsAddr); 1526 cpr32(StatsAddr);
1496 1527
1497 while (work-- > 0) { 1528 for (i = 0; i < 1000; i++) {
1498 if ((cpr32(StatsAddr) & DumpStats) == 0) 1529 if ((cpr32(StatsAddr) & DumpStats) == 0)
1499 break; 1530 break;
1500 cpu_relax(); 1531 udelay(10);
1501 } 1532 }
1502 1533 cpw32(StatsAddr, 0);
1503 if (cpr32(StatsAddr) & DumpStats) 1534 cpw32(StatsAddr + 4, 0);
1504 return /* -EIO */;
1505 1535
1506 i = 0; 1536 i = 0;
1507 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_ok); 1537 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_ok);
@@ -1538,6 +1568,8 @@ static struct ethtool_ops cp_ethtool_ops = {
1538 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ 1568 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
1539 .get_sg = ethtool_op_get_sg, 1569 .get_sg = ethtool_op_get_sg,
1540 .set_sg = ethtool_op_set_sg, 1570 .set_sg = ethtool_op_set_sg,
1571 .get_tso = ethtool_op_get_tso,
1572 .set_tso = ethtool_op_set_tso,
1541 .get_regs = cp_get_regs, 1573 .get_regs = cp_get_regs,
1542 .get_wol = cp_get_wol, 1574 .get_wol = cp_get_wol,
1543 .set_wol = cp_set_wol, 1575 .set_wol = cp_set_wol,
@@ -1701,19 +1733,19 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1701 1733
1702 /* Configure DMA attributes. */ 1734 /* Configure DMA attributes. */
1703 if ((sizeof(dma_addr_t) > 4) && 1735 if ((sizeof(dma_addr_t) > 4) &&
1704 !pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL) && 1736 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) &&
1705 !pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) { 1737 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1706 pci_using_dac = 1; 1738 pci_using_dac = 1;
1707 } else { 1739 } else {
1708 pci_using_dac = 0; 1740 pci_using_dac = 0;
1709 1741
1710 rc = pci_set_dma_mask(pdev, 0xffffffffULL); 1742 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1711 if (rc) { 1743 if (rc) {
1712 printk(KERN_ERR PFX "No usable DMA configuration, " 1744 printk(KERN_ERR PFX "No usable DMA configuration, "
1713 "aborting.\n"); 1745 "aborting.\n");
1714 goto err_out_res; 1746 goto err_out_res;
1715 } 1747 }
1716 rc = pci_set_consistent_dma_mask(pdev, 0xffffffffULL); 1748 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1717 if (rc) { 1749 if (rc) {
1718 printk(KERN_ERR PFX "No usable consistent DMA configuration, " 1750 printk(KERN_ERR PFX "No usable consistent DMA configuration, "
1719 "aborting.\n"); 1751 "aborting.\n");
@@ -1749,6 +1781,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1749 dev->get_stats = cp_get_stats; 1781 dev->get_stats = cp_get_stats;
1750 dev->do_ioctl = cp_ioctl; 1782 dev->do_ioctl = cp_ioctl;
1751 dev->poll = cp_rx_poll; 1783 dev->poll = cp_rx_poll;
1784#ifdef CONFIG_NET_POLL_CONTROLLER
1785 dev->poll_controller = cp_poll_controller;
1786#endif
1752 dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */ 1787 dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */
1753#ifdef BROKEN 1788#ifdef BROKEN
1754 dev->change_mtu = cp_change_mtu; 1789 dev->change_mtu = cp_change_mtu;
@@ -1768,6 +1803,10 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1768 if (pci_using_dac) 1803 if (pci_using_dac)
1769 dev->features |= NETIF_F_HIGHDMA; 1804 dev->features |= NETIF_F_HIGHDMA;
1770 1805
1806#if 0 /* disabled by default until verified */
1807 dev->features |= NETIF_F_TSO;
1808#endif
1809
1771 dev->irq = pdev->irq; 1810 dev->irq = pdev->irq;
1772 1811
1773 rc = register_netdev(dev); 1812 rc = register_netdev(dev);