aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/dl2k.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-09-24 13:15:13 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-24 13:15:13 -0400
commita319a2773a13bab56a0d0b3744ba8703324313b5 (patch)
treef02c86acabd1031439fd422a167784007e84ebb1 /drivers/net/dl2k.c
parente18fa700c9a31360bc8f193aa543b7ef7b39a06b (diff)
parent183798799216fad36c7219fe8d4d6dee6b8fa755 (diff)
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (217 commits) net/ieee80211: fix more crypto-related build breakage [PATCH] Spidernet: add ethtool -S (show statistics) [NET] GT96100: Delete bitrotting ethernet driver [PATCH] mv643xx_eth: restrict to 32-bit PPC_MULTIPLATFORM [PATCH] Cirrus Logic ep93xx ethernet driver r8169: the MMIO region of the 8167 stands behin BAR#1 e1000, ixgb: Remove pointless wrappers [PATCH] Remove powerpc specific parts of 3c509 driver [PATCH] s2io: Switch to pci_get_device [PATCH] gt96100: move to pci_get_device API [PATCH] ehea: bugfix for register access functions [PATCH] e1000 disable device on PCI error drivers/net/phy/fixed: #if 0 some incomplete code drivers/net: const-ify ethtool_ops declarations [PATCH] ethtool: allow const ethtool_ops [PATCH] sky2: big endian [PATCH] sky2: fiber support [PATCH] sky2: tx pause bug fix drivers/net: Trim trailing whitespace [PATCH] ehea: IBM eHEA Ethernet Device Driver ... Manually resolved conflicts in drivers/net/ixgb/ixgb_main.c and drivers/net/sky2.c related to CHECKSUM_HW/CHECKSUM_PARTIAL changes by commit 84fa7933a33f806bbbaae6775e87459b1ec584c0 that just happened to be next to unrelated changes in this update.
Diffstat (limited to 'drivers/net/dl2k.c')
-rw-r--r--drivers/net/dl2k.c164
1 files changed, 82 insertions, 82 deletions
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index b74e6765476..7e95cf1a487 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -17,7 +17,7 @@
17#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
18 18
19static char version[] __devinitdata = 19static char version[] __devinitdata =
20 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; 20 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
21#define MAX_UNITS 8 21#define MAX_UNITS 8
22static int mtu[MAX_UNITS]; 22static int mtu[MAX_UNITS];
23static int vlan[MAX_UNITS]; 23static int vlan[MAX_UNITS];
@@ -83,7 +83,7 @@ static int mii_read (struct net_device *dev, int phy_addr, int reg_num);
83static int mii_write (struct net_device *dev, int phy_addr, int reg_num, 83static int mii_write (struct net_device *dev, int phy_addr, int reg_num,
84 u16 data); 84 u16 data);
85 85
86static struct ethtool_ops ethtool_ops; 86static const struct ethtool_ops ethtool_ops;
87 87
88static int __devinit 88static int __devinit
89rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) 89rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -144,9 +144,9 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
144 if (media[card_idx] != NULL) { 144 if (media[card_idx] != NULL) {
145 np->an_enable = 0; 145 np->an_enable = 0;
146 if (strcmp (media[card_idx], "auto") == 0 || 146 if (strcmp (media[card_idx], "auto") == 0 ||
147 strcmp (media[card_idx], "autosense") == 0 || 147 strcmp (media[card_idx], "autosense") == 0 ||
148 strcmp (media[card_idx], "0") == 0 ) { 148 strcmp (media[card_idx], "0") == 0 ) {
149 np->an_enable = 2; 149 np->an_enable = 2;
150 } else if (strcmp (media[card_idx], "100mbps_fd") == 0 || 150 } else if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
151 strcmp (media[card_idx], "4") == 0) { 151 strcmp (media[card_idx], "4") == 0) {
152 np->speed = 100; 152 np->speed = 100;
@@ -232,7 +232,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
232 err = find_miiphy (dev); 232 err = find_miiphy (dev);
233 if (err) 233 if (err)
234 goto err_out_unmap_rx; 234 goto err_out_unmap_rx;
235 235
236 /* Fiber device? */ 236 /* Fiber device? */
237 np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0; 237 np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0;
238 np->link_status = 0; 238 np->link_status = 0;
@@ -263,11 +263,11 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
263 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 263 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
264 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], irq); 264 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], irq);
265 if (tx_coalesce > 1) 265 if (tx_coalesce > 1)
266 printk(KERN_INFO "tx_coalesce:\t%d packets\n", 266 printk(KERN_INFO "tx_coalesce:\t%d packets\n",
267 tx_coalesce); 267 tx_coalesce);
268 if (np->coalesce) 268 if (np->coalesce)
269 printk(KERN_INFO "rx_coalesce:\t%d packets\n" 269 printk(KERN_INFO "rx_coalesce:\t%d packets\n"
270 KERN_INFO "rx_timeout: \t%d ns\n", 270 KERN_INFO "rx_timeout: \t%d ns\n",
271 np->rx_coalesce, np->rx_timeout*640); 271 np->rx_coalesce, np->rx_timeout*640);
272 if (np->vlan) 272 if (np->vlan)
273 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); 273 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
@@ -339,7 +339,7 @@ parse_eeprom (struct net_device *dev)
339 } 339 }
340#ifdef MEM_MAPPING 340#ifdef MEM_MAPPING
341 ioaddr = dev->base_addr; 341 ioaddr = dev->base_addr;
342#endif 342#endif
343 /* Check CRC */ 343 /* Check CRC */
344 crc = ~ether_crc_le (256 - 4, sromdata); 344 crc = ~ether_crc_le (256 - 4, sromdata);
345 if (psrom->crc != crc) { 345 if (psrom->crc != crc) {
@@ -400,16 +400,16 @@ rio_open (struct net_device *dev)
400 long ioaddr = dev->base_addr; 400 long ioaddr = dev->base_addr;
401 int i; 401 int i;
402 u16 macctrl; 402 u16 macctrl;
403 403
404 i = request_irq (dev->irq, &rio_interrupt, IRQF_SHARED, dev->name, dev); 404 i = request_irq (dev->irq, &rio_interrupt, IRQF_SHARED, dev->name, dev);
405 if (i) 405 if (i)
406 return i; 406 return i;
407 407
408 /* Reset all logic functions */ 408 /* Reset all logic functions */
409 writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset, 409 writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset,
410 ioaddr + ASICCtrl + 2); 410 ioaddr + ASICCtrl + 2);
411 mdelay(10); 411 mdelay(10);
412 412
413 /* DebugCtrl bit 4, 5, 9 must set */ 413 /* DebugCtrl bit 4, 5, 9 must set */
414 writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl); 414 writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl);
415 415
@@ -440,7 +440,7 @@ rio_open (struct net_device *dev)
440 /* VLAN supported */ 440 /* VLAN supported */
441 if (np->vlan) { 441 if (np->vlan) {
442 /* priority field in RxDMAIntCtrl */ 442 /* priority field in RxDMAIntCtrl */
443 writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10, 443 writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10,
444 ioaddr + RxDMAIntCtrl); 444 ioaddr + RxDMAIntCtrl);
445 /* VLANId */ 445 /* VLANId */
446 writew (np->vlan, ioaddr + VLANId); 446 writew (np->vlan, ioaddr + VLANId);
@@ -459,9 +459,9 @@ rio_open (struct net_device *dev)
459 add_timer (&np->timer); 459 add_timer (&np->timer);
460 460
461 /* Start Tx/Rx */ 461 /* Start Tx/Rx */
462 writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable, 462 writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable,
463 ioaddr + MACCtrl); 463 ioaddr + MACCtrl);
464 464
465 macctrl = 0; 465 macctrl = 0;
466 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 466 macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
467 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 467 macctrl |= (np->full_duplex) ? DuplexSelect : 0;
@@ -470,13 +470,13 @@ rio_open (struct net_device *dev)
470 writew(macctrl, ioaddr + MACCtrl); 470 writew(macctrl, ioaddr + MACCtrl);
471 471
472 netif_start_queue (dev); 472 netif_start_queue (dev);
473 473
474 /* Enable default interrupts */ 474 /* Enable default interrupts */
475 EnableInt (); 475 EnableInt ();
476 return 0; 476 return 0;
477} 477}
478 478
479static void 479static void
480rio_timer (unsigned long data) 480rio_timer (unsigned long data)
481{ 481{
482 struct net_device *dev = (struct net_device *)data; 482 struct net_device *dev = (struct net_device *)data;
@@ -521,7 +521,7 @@ rio_timer (unsigned long data)
521 np->timer.expires = jiffies + next_tick; 521 np->timer.expires = jiffies + next_tick;
522 add_timer(&np->timer); 522 add_timer(&np->timer);
523} 523}
524 524
525static void 525static void
526rio_tx_timeout (struct net_device *dev) 526rio_tx_timeout (struct net_device *dev)
527{ 527{
@@ -632,12 +632,12 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
632 * Work around: Always use 1 descriptor in 10Mbps mode */ 632 * Work around: Always use 1 descriptor in 10Mbps mode */
633 if (entry % np->tx_coalesce == 0 || np->speed == 10) 633 if (entry % np->tx_coalesce == 0 || np->speed == 10)
634 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | 634 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
635 WordAlignDisable | 635 WordAlignDisable |
636 TxDMAIndicate | 636 TxDMAIndicate |
637 (1 << FragCountShift)); 637 (1 << FragCountShift));
638 else 638 else
639 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | 639 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
640 WordAlignDisable | 640 WordAlignDisable |
641 (1 << FragCountShift)); 641 (1 << FragCountShift));
642 642
643 /* TxDMAPollNow */ 643 /* TxDMAPollNow */
@@ -658,7 +658,7 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
658 dev->base_addr + TFDListPtr0); 658 dev->base_addr + TFDListPtr0);
659 writel (0, dev->base_addr + TFDListPtr1); 659 writel (0, dev->base_addr + TFDListPtr1);
660 } 660 }
661 661
662 /* NETDEV WATCHDOG timer */ 662 /* NETDEV WATCHDOG timer */
663 dev->trans_start = jiffies; 663 dev->trans_start = jiffies;
664 return 0; 664 return 0;
@@ -677,7 +677,7 @@ rio_interrupt (int irq, void *dev_instance, struct pt_regs *rgs)
677 ioaddr = dev->base_addr; 677 ioaddr = dev->base_addr;
678 np = netdev_priv(dev); 678 np = netdev_priv(dev);
679 while (1) { 679 while (1) {
680 int_status = readw (ioaddr + IntStatus); 680 int_status = readw (ioaddr + IntStatus);
681 writew (int_status, ioaddr + IntStatus); 681 writew (int_status, ioaddr + IntStatus);
682 int_status &= DEFAULT_INTR; 682 int_status &= DEFAULT_INTR;
683 if (int_status == 0 || --cnt < 0) 683 if (int_status == 0 || --cnt < 0)
@@ -693,7 +693,7 @@ rio_interrupt (int irq, void *dev_instance, struct pt_regs *rgs)
693 if (tx_status & 0x01) 693 if (tx_status & 0x01)
694 tx_error (dev, tx_status); 694 tx_error (dev, tx_status);
695 /* Free used tx skbuffs */ 695 /* Free used tx skbuffs */
696 rio_free_tx (dev, 1); 696 rio_free_tx (dev, 1);
697 } 697 }
698 698
699 /* Handle uncommon events */ 699 /* Handle uncommon events */
@@ -706,19 +706,19 @@ rio_interrupt (int irq, void *dev_instance, struct pt_regs *rgs)
706 return IRQ_RETVAL(handled); 706 return IRQ_RETVAL(handled);
707} 707}
708 708
709static void 709static void
710rio_free_tx (struct net_device *dev, int irq) 710rio_free_tx (struct net_device *dev, int irq)
711{ 711{
712 struct netdev_private *np = netdev_priv(dev); 712 struct netdev_private *np = netdev_priv(dev);
713 int entry = np->old_tx % TX_RING_SIZE; 713 int entry = np->old_tx % TX_RING_SIZE;
714 int tx_use = 0; 714 int tx_use = 0;
715 unsigned long flag = 0; 715 unsigned long flag = 0;
716 716
717 if (irq) 717 if (irq)
718 spin_lock(&np->tx_lock); 718 spin_lock(&np->tx_lock);
719 else 719 else
720 spin_lock_irqsave(&np->tx_lock, flag); 720 spin_lock_irqsave(&np->tx_lock, flag);
721 721
722 /* Free used tx skbuffs */ 722 /* Free used tx skbuffs */
723 while (entry != np->cur_tx) { 723 while (entry != np->cur_tx) {
724 struct sk_buff *skb; 724 struct sk_buff *skb;
@@ -744,11 +744,11 @@ rio_free_tx (struct net_device *dev, int irq)
744 spin_unlock_irqrestore(&np->tx_lock, flag); 744 spin_unlock_irqrestore(&np->tx_lock, flag);
745 np->old_tx = entry; 745 np->old_tx = entry;
746 746
747 /* If the ring is no longer full, clear tx_full and 747 /* If the ring is no longer full, clear tx_full and
748 call netif_wake_queue() */ 748 call netif_wake_queue() */
749 749
750 if (netif_queue_stopped(dev) && 750 if (netif_queue_stopped(dev) &&
751 ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE 751 ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
752 < TX_QUEUE_LEN - 1 || np->speed == 10)) { 752 < TX_QUEUE_LEN - 1 || np->speed == 10)) {
753 netif_wake_queue (dev); 753 netif_wake_queue (dev);
754 } 754 }
@@ -805,11 +805,11 @@ tx_error (struct net_device *dev, int tx_status)
805 /* Let TxStartThresh stay default value */ 805 /* Let TxStartThresh stay default value */
806 } 806 }
807 /* Maximum Collisions */ 807 /* Maximum Collisions */
808#ifdef ETHER_STATS 808#ifdef ETHER_STATS
809 if (tx_status & 0x08) 809 if (tx_status & 0x08)
810 np->stats.collisions16++; 810 np->stats.collisions16++;
811#else 811#else
812 if (tx_status & 0x08) 812 if (tx_status & 0x08)
813 np->stats.collisions++; 813 np->stats.collisions++;
814#endif 814#endif
815 /* Restart the Tx */ 815 /* Restart the Tx */
@@ -862,7 +862,7 @@ receive_packet (struct net_device *dev)
862 np->rx_skbuff[entry] = NULL; 862 np->rx_skbuff[entry] = NULL;
863 } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) { 863 } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
864 pci_dma_sync_single_for_cpu(np->pdev, 864 pci_dma_sync_single_for_cpu(np->pdev,
865 desc->fraginfo & 865 desc->fraginfo &
866 DMA_48BIT_MASK, 866 DMA_48BIT_MASK,
867 np->rx_buf_sz, 867 np->rx_buf_sz,
868 PCI_DMA_FROMDEVICE); 868 PCI_DMA_FROMDEVICE);
@@ -880,12 +880,12 @@ receive_packet (struct net_device *dev)
880 PCI_DMA_FROMDEVICE); 880 PCI_DMA_FROMDEVICE);
881 } 881 }
882 skb->protocol = eth_type_trans (skb, dev); 882 skb->protocol = eth_type_trans (skb, dev);
883#if 0 883#if 0
884 /* Checksum done by hw, but csum value unavailable. */ 884 /* Checksum done by hw, but csum value unavailable. */
885 if (np->pci_rev_id >= 0x0c && 885 if (np->pci_rev_id >= 0x0c &&
886 !(frame_status & (TCPError | UDPError | IPError))) { 886 !(frame_status & (TCPError | UDPError | IPError))) {
887 skb->ip_summed = CHECKSUM_UNNECESSARY; 887 skb->ip_summed = CHECKSUM_UNNECESSARY;
888 } 888 }
889#endif 889#endif
890 netif_rx (skb); 890 netif_rx (skb);
891 dev->last_rx = jiffies; 891 dev->last_rx = jiffies;
@@ -945,14 +945,14 @@ rio_error (struct net_device *dev, int int_status)
945 mii_get_media (dev); 945 mii_get_media (dev);
946 if (np->speed == 1000) 946 if (np->speed == 1000)
947 np->tx_coalesce = tx_coalesce; 947 np->tx_coalesce = tx_coalesce;
948 else 948 else
949 np->tx_coalesce = 1; 949 np->tx_coalesce = 1;
950 macctrl = 0; 950 macctrl = 0;
951 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 951 macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
952 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 952 macctrl |= (np->full_duplex) ? DuplexSelect : 0;
953 macctrl |= (np->tx_flow) ? 953 macctrl |= (np->tx_flow) ?
954 TxFlowControlEnable : 0; 954 TxFlowControlEnable : 0;
955 macctrl |= (np->rx_flow) ? 955 macctrl |= (np->rx_flow) ?
956 RxFlowControlEnable : 0; 956 RxFlowControlEnable : 0;
957 writew(macctrl, ioaddr + MACCtrl); 957 writew(macctrl, ioaddr + MACCtrl);
958 np->link_status = 1; 958 np->link_status = 1;
@@ -969,7 +969,7 @@ rio_error (struct net_device *dev, int int_status)
969 get_stats (dev); 969 get_stats (dev);
970 } 970 }
971 971
972 /* PCI Error, a catastronphic error related to the bus interface 972 /* PCI Error, a catastronphic error related to the bus interface
973 occurs, set GlobalReset and HostReset to reset. */ 973 occurs, set GlobalReset and HostReset to reset. */
974 if (int_status & HostError) { 974 if (int_status & HostError) {
975 printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", 975 printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
@@ -991,16 +991,16 @@ get_stats (struct net_device *dev)
991 991
992 /* All statistics registers need to be acknowledged, 992 /* All statistics registers need to be acknowledged,
993 else statistic overflow could cause problems */ 993 else statistic overflow could cause problems */
994 994
995 np->stats.rx_packets += readl (ioaddr + FramesRcvOk); 995 np->stats.rx_packets += readl (ioaddr + FramesRcvOk);
996 np->stats.tx_packets += readl (ioaddr + FramesXmtOk); 996 np->stats.tx_packets += readl (ioaddr + FramesXmtOk);
997 np->stats.rx_bytes += readl (ioaddr + OctetRcvOk); 997 np->stats.rx_bytes += readl (ioaddr + OctetRcvOk);
998 np->stats.tx_bytes += readl (ioaddr + OctetXmtOk); 998 np->stats.tx_bytes += readl (ioaddr + OctetXmtOk);
999 999
1000 np->stats.multicast = readl (ioaddr + McstFramesRcvdOk); 1000 np->stats.multicast = readl (ioaddr + McstFramesRcvdOk);
1001 np->stats.collisions += readl (ioaddr + SingleColFrames) 1001 np->stats.collisions += readl (ioaddr + SingleColFrames)
1002 + readl (ioaddr + MultiColFrames); 1002 + readl (ioaddr + MultiColFrames);
1003 1003
1004 /* detailed tx errors */ 1004 /* detailed tx errors */
1005 stat_reg = readw (ioaddr + FramesAbortXSColls); 1005 stat_reg = readw (ioaddr + FramesAbortXSColls);
1006 np->stats.tx_aborted_errors += stat_reg; 1006 np->stats.tx_aborted_errors += stat_reg;
@@ -1047,7 +1047,7 @@ clear_stats (struct net_device *dev)
1047 long ioaddr = dev->base_addr; 1047 long ioaddr = dev->base_addr;
1048#ifdef MEM_MAPPING 1048#ifdef MEM_MAPPING
1049 int i; 1049 int i;
1050#endif 1050#endif
1051 1051
1052 /* All statistics registers need to be acknowledged, 1052 /* All statistics registers need to be acknowledged,
1053 else statistic overflow could cause problems */ 1053 else statistic overflow could cause problems */
@@ -1060,7 +1060,7 @@ clear_stats (struct net_device *dev)
1060 readl (ioaddr + SingleColFrames); 1060 readl (ioaddr + SingleColFrames);
1061 readl (ioaddr + MultiColFrames); 1061 readl (ioaddr + MultiColFrames);
1062 readl (ioaddr + LateCollisions); 1062 readl (ioaddr + LateCollisions);
1063 /* detailed rx errors */ 1063 /* detailed rx errors */
1064 readw (ioaddr + FrameTooLongErrors); 1064 readw (ioaddr + FrameTooLongErrors);
1065 readw (ioaddr + InRangeLengthErrors); 1065 readw (ioaddr + InRangeLengthErrors);
1066 readw (ioaddr + FramesCheckSeqErrors); 1066 readw (ioaddr + FramesCheckSeqErrors);
@@ -1086,7 +1086,7 @@ clear_stats (struct net_device *dev)
1086#ifdef MEM_MAPPING 1086#ifdef MEM_MAPPING
1087 for (i = 0x100; i <= 0x150; i += 4) 1087 for (i = 0x100; i <= 0x150; i += 4)
1088 readl (ioaddr + i); 1088 readl (ioaddr + i);
1089#endif 1089#endif
1090 readw (ioaddr + TxJumboFrames); 1090 readw (ioaddr + TxJumboFrames);
1091 readw (ioaddr + RxJumboFrames); 1091 readw (ioaddr + RxJumboFrames);
1092 readw (ioaddr + TCPCheckSumErrors); 1092 readw (ioaddr + TCPCheckSumErrors);
@@ -1118,26 +1118,26 @@ set_multicast (struct net_device *dev)
1118 u32 hash_table[2]; 1118 u32 hash_table[2];
1119 u16 rx_mode = 0; 1119 u16 rx_mode = 0;
1120 struct netdev_private *np = netdev_priv(dev); 1120 struct netdev_private *np = netdev_priv(dev);
1121 1121
1122 hash_table[0] = hash_table[1] = 0; 1122 hash_table[0] = hash_table[1] = 0;
1123 /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ 1123 /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
1124 hash_table[1] |= cpu_to_le32(0x02000000); 1124 hash_table[1] |= cpu_to_le32(0x02000000);
1125 if (dev->flags & IFF_PROMISC) { 1125 if (dev->flags & IFF_PROMISC) {
1126 /* Receive all frames promiscuously. */ 1126 /* Receive all frames promiscuously. */
1127 rx_mode = ReceiveAllFrames; 1127 rx_mode = ReceiveAllFrames;
1128 } else if ((dev->flags & IFF_ALLMULTI) || 1128 } else if ((dev->flags & IFF_ALLMULTI) ||
1129 (dev->mc_count > multicast_filter_limit)) { 1129 (dev->mc_count > multicast_filter_limit)) {
1130 /* Receive broadcast and multicast frames */ 1130 /* Receive broadcast and multicast frames */
1131 rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast; 1131 rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
1132 } else if (dev->mc_count > 0) { 1132 } else if (dev->mc_count > 0) {
1133 int i; 1133 int i;
1134 struct dev_mc_list *mclist; 1134 struct dev_mc_list *mclist;
1135 /* Receive broadcast frames and multicast frames filtering 1135 /* Receive broadcast frames and multicast frames filtering
1136 by Hashtable */ 1136 by Hashtable */
1137 rx_mode = 1137 rx_mode =
1138 ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast; 1138 ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
1139 for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1139 for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1140 i++, mclist=mclist->next) 1140 i++, mclist=mclist->next)
1141 { 1141 {
1142 int bit, index = 0; 1142 int bit, index = 0;
1143 int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr); 1143 int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
@@ -1167,7 +1167,7 @@ static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
1167 strcpy(info->driver, "dl2k"); 1167 strcpy(info->driver, "dl2k");
1168 strcpy(info->version, DRV_VERSION); 1168 strcpy(info->version, DRV_VERSION);
1169 strcpy(info->bus_info, pci_name(np->pdev)); 1169 strcpy(info->bus_info, pci_name(np->pdev));
1170} 1170}
1171 1171
1172static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1172static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1173{ 1173{
@@ -1177,10 +1177,10 @@ static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1177 cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE; 1177 cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1178 cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE; 1178 cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE;
1179 cmd->port = PORT_FIBRE; 1179 cmd->port = PORT_FIBRE;
1180 cmd->transceiver = XCVR_INTERNAL; 1180 cmd->transceiver = XCVR_INTERNAL;
1181 } else { 1181 } else {
1182 /* copper device */ 1182 /* copper device */
1183 cmd->supported = SUPPORTED_10baseT_Half | 1183 cmd->supported = SUPPORTED_10baseT_Half |
1184 SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half 1184 SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half
1185 | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | 1185 | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full |
1186 SUPPORTED_Autoneg | SUPPORTED_MII; 1186 SUPPORTED_Autoneg | SUPPORTED_MII;
@@ -1191,7 +1191,7 @@ static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1191 cmd->port = PORT_MII; 1191 cmd->port = PORT_MII;
1192 cmd->transceiver = XCVR_INTERNAL; 1192 cmd->transceiver = XCVR_INTERNAL;
1193 } 1193 }
1194 if ( np->link_status ) { 1194 if ( np->link_status ) {
1195 cmd->speed = np->speed; 1195 cmd->speed = np->speed;
1196 cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 1196 cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1197 } else { 1197 } else {
@@ -1202,9 +1202,9 @@ static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1202 cmd->autoneg = AUTONEG_ENABLE; 1202 cmd->autoneg = AUTONEG_ENABLE;
1203 else 1203 else
1204 cmd->autoneg = AUTONEG_DISABLE; 1204 cmd->autoneg = AUTONEG_DISABLE;
1205 1205
1206 cmd->phy_address = np->phy_addr; 1206 cmd->phy_address = np->phy_addr;
1207 return 0; 1207 return 0;
1208} 1208}
1209 1209
1210static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1210static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -1217,22 +1217,22 @@ static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1217 else { 1217 else {
1218 np->an_enable = 1; 1218 np->an_enable = 1;
1219 mii_set_media(dev); 1219 mii_set_media(dev);
1220 return 0; 1220 return 0;
1221 } 1221 }
1222 } else { 1222 } else {
1223 np->an_enable = 0; 1223 np->an_enable = 0;
1224 if (np->speed == 1000) { 1224 if (np->speed == 1000) {
1225 cmd->speed = SPEED_100; 1225 cmd->speed = SPEED_100;
1226 cmd->duplex = DUPLEX_FULL; 1226 cmd->duplex = DUPLEX_FULL;
1227 printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n"); 1227 printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n");
1228 } 1228 }
1229 switch(cmd->speed + cmd->duplex) { 1229 switch(cmd->speed + cmd->duplex) {
1230 1230
1231 case SPEED_10 + DUPLEX_HALF: 1231 case SPEED_10 + DUPLEX_HALF:
1232 np->speed = 10; 1232 np->speed = 10;
1233 np->full_duplex = 0; 1233 np->full_duplex = 0;
1234 break; 1234 break;
1235 1235
1236 case SPEED_10 + DUPLEX_FULL: 1236 case SPEED_10 + DUPLEX_FULL:
1237 np->speed = 10; 1237 np->speed = 10;
1238 np->full_duplex = 1; 1238 np->full_duplex = 1;
@@ -1248,7 +1248,7 @@ static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1248 case SPEED_1000 + DUPLEX_HALF:/* not supported */ 1248 case SPEED_1000 + DUPLEX_HALF:/* not supported */
1249 case SPEED_1000 + DUPLEX_FULL:/* not supported */ 1249 case SPEED_1000 + DUPLEX_FULL:/* not supported */
1250 default: 1250 default:
1251 return -EINVAL; 1251 return -EINVAL;
1252 } 1252 }
1253 mii_set_media(dev); 1253 mii_set_media(dev);
1254 } 1254 }
@@ -1261,7 +1261,7 @@ static u32 rio_get_link(struct net_device *dev)
1261 return np->link_status; 1261 return np->link_status;
1262} 1262}
1263 1263
1264static struct ethtool_ops ethtool_ops = { 1264static const struct ethtool_ops ethtool_ops = {
1265 .get_drvinfo = rio_get_drvinfo, 1265 .get_drvinfo = rio_get_drvinfo,
1266 .get_settings = rio_get_settings, 1266 .get_settings = rio_get_settings,
1267 .set_settings = rio_set_settings, 1267 .set_settings = rio_set_settings,
@@ -1274,7 +1274,7 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1274 int phy_addr; 1274 int phy_addr;
1275 struct netdev_private *np = netdev_priv(dev); 1275 struct netdev_private *np = netdev_priv(dev);
1276 struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru; 1276 struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru;
1277 1277
1278 struct netdev_desc *desc; 1278 struct netdev_desc *desc;
1279 int i; 1279 int i;
1280 1280
@@ -1282,7 +1282,7 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1282 switch (cmd) { 1282 switch (cmd) {
1283 case SIOCDEVPRIVATE: 1283 case SIOCDEVPRIVATE:
1284 break; 1284 break;
1285 1285
1286 case SIOCDEVPRIVATE + 1: 1286 case SIOCDEVPRIVATE + 1:
1287 miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num); 1287 miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
1288 break; 1288 break;
@@ -1467,7 +1467,7 @@ mii_get_media (struct net_device *dev)
1467 /* Auto-Negotiation not completed */ 1467 /* Auto-Negotiation not completed */
1468 return -1; 1468 return -1;
1469 } 1469 }
1470 negotiate.image = mii_read (dev, phy_addr, MII_ANAR) & 1470 negotiate.image = mii_read (dev, phy_addr, MII_ANAR) &
1471 mii_read (dev, phy_addr, MII_ANLPAR); 1471 mii_read (dev, phy_addr, MII_ANLPAR);
1472 mscr.image = mii_read (dev, phy_addr, MII_MSCR); 1472 mscr.image = mii_read (dev, phy_addr, MII_MSCR);
1473 mssr.image = mii_read (dev, phy_addr, MII_MSSR); 1473 mssr.image = mii_read (dev, phy_addr, MII_MSSR);
@@ -1519,9 +1519,9 @@ mii_get_media (struct net_device *dev)
1519 printk ("Half duplex\n"); 1519 printk ("Half duplex\n");
1520 } 1520 }
1521 } 1521 }
1522 if (np->tx_flow) 1522 if (np->tx_flow)
1523 printk(KERN_INFO "Enable Tx Flow Control\n"); 1523 printk(KERN_INFO "Enable Tx Flow Control\n");
1524 else 1524 else
1525 printk(KERN_INFO "Disable Tx Flow Control\n"); 1525 printk(KERN_INFO "Disable Tx Flow Control\n");
1526 if (np->rx_flow) 1526 if (np->rx_flow)
1527 printk(KERN_INFO "Enable Rx Flow Control\n"); 1527 printk(KERN_INFO "Enable Rx Flow Control\n");
@@ -1561,7 +1561,7 @@ mii_set_media (struct net_device *dev)
1561 pscr.image = mii_read (dev, phy_addr, MII_PHY_SCR); 1561 pscr.image = mii_read (dev, phy_addr, MII_PHY_SCR);
1562 pscr.bits.mdi_crossover_mode = 3; /* 11'b */ 1562 pscr.bits.mdi_crossover_mode = 3; /* 11'b */
1563 mii_write (dev, phy_addr, MII_PHY_SCR, pscr.image); 1563 mii_write (dev, phy_addr, MII_PHY_SCR, pscr.image);
1564 1564
1565 /* Soft reset PHY */ 1565 /* Soft reset PHY */
1566 mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET); 1566 mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
1567 bmcr.image = 0; 1567 bmcr.image = 0;
@@ -1639,7 +1639,7 @@ mii_get_media_pcs (struct net_device *dev)
1639 /* Auto-Negotiation not completed */ 1639 /* Auto-Negotiation not completed */
1640 return -1; 1640 return -1;
1641 } 1641 }
1642 negotiate.image = mii_read (dev, phy_addr, PCS_ANAR) & 1642 negotiate.image = mii_read (dev, phy_addr, PCS_ANAR) &
1643 mii_read (dev, phy_addr, PCS_ANLPAR); 1643 mii_read (dev, phy_addr, PCS_ANLPAR);
1644 np->speed = 1000; 1644 np->speed = 1000;
1645 if (negotiate.bits.full_duplex) { 1645 if (negotiate.bits.full_duplex) {
@@ -1666,9 +1666,9 @@ mii_get_media_pcs (struct net_device *dev)
1666 printk ("Half duplex\n"); 1666 printk ("Half duplex\n");
1667 } 1667 }
1668 } 1668 }
1669 if (np->tx_flow) 1669 if (np->tx_flow)
1670 printk(KERN_INFO "Enable Tx Flow Control\n"); 1670 printk(KERN_INFO "Enable Tx Flow Control\n");
1671 else 1671 else
1672 printk(KERN_INFO "Disable Tx Flow Control\n"); 1672 printk(KERN_INFO "Disable Tx Flow Control\n");
1673 if (np->rx_flow) 1673 if (np->rx_flow)
1674 printk(KERN_INFO "Enable Rx Flow Control\n"); 1674 printk(KERN_INFO "Enable Rx Flow Control\n");
@@ -1694,9 +1694,9 @@ mii_set_media_pcs (struct net_device *dev)
1694 /* Advertise capabilities */ 1694 /* Advertise capabilities */
1695 esr.image = mii_read (dev, phy_addr, PCS_ESR); 1695 esr.image = mii_read (dev, phy_addr, PCS_ESR);
1696 anar.image = mii_read (dev, phy_addr, MII_ANAR); 1696 anar.image = mii_read (dev, phy_addr, MII_ANAR);
1697 anar.bits.half_duplex = 1697 anar.bits.half_duplex =
1698 esr.bits.media_1000BT_HD | esr.bits.media_1000BX_HD; 1698 esr.bits.media_1000BT_HD | esr.bits.media_1000BX_HD;
1699 anar.bits.full_duplex = 1699 anar.bits.full_duplex =
1700 esr.bits.media_1000BT_FD | esr.bits.media_1000BX_FD; 1700 esr.bits.media_1000BT_FD | esr.bits.media_1000BX_FD;
1701 anar.bits.pause = 1; 1701 anar.bits.pause = 1;
1702 anar.bits.asymmetric = 1; 1702 anar.bits.asymmetric = 1;
@@ -1754,14 +1754,14 @@ rio_close (struct net_device *dev)
1754 synchronize_irq (dev->irq); 1754 synchronize_irq (dev->irq);
1755 free_irq (dev->irq, dev); 1755 free_irq (dev->irq, dev);
1756 del_timer_sync (&np->timer); 1756 del_timer_sync (&np->timer);
1757 1757
1758 /* Free all the skbuffs in the queue. */ 1758 /* Free all the skbuffs in the queue. */
1759 for (i = 0; i < RX_RING_SIZE; i++) { 1759 for (i = 0; i < RX_RING_SIZE; i++) {
1760 np->rx_ring[i].status = 0; 1760 np->rx_ring[i].status = 0;
1761 np->rx_ring[i].fraginfo = 0; 1761 np->rx_ring[i].fraginfo = 0;
1762 skb = np->rx_skbuff[i]; 1762 skb = np->rx_skbuff[i];
1763 if (skb) { 1763 if (skb) {
1764 pci_unmap_single(np->pdev, 1764 pci_unmap_single(np->pdev,
1765 np->rx_ring[i].fraginfo & DMA_48BIT_MASK, 1765 np->rx_ring[i].fraginfo & DMA_48BIT_MASK,
1766 skb->len, PCI_DMA_FROMDEVICE); 1766 skb->len, PCI_DMA_FROMDEVICE);
1767 dev_kfree_skb (skb); 1767 dev_kfree_skb (skb);
@@ -1771,7 +1771,7 @@ rio_close (struct net_device *dev)
1771 for (i = 0; i < TX_RING_SIZE; i++) { 1771 for (i = 0; i < TX_RING_SIZE; i++) {
1772 skb = np->tx_skbuff[i]; 1772 skb = np->tx_skbuff[i];
1773 if (skb) { 1773 if (skb) {
1774 pci_unmap_single(np->pdev, 1774 pci_unmap_single(np->pdev,
1775 np->tx_ring[i].fraginfo & DMA_48BIT_MASK, 1775 np->tx_ring[i].fraginfo & DMA_48BIT_MASK,
1776 skb->len, PCI_DMA_TODEVICE); 1776 skb->len, PCI_DMA_TODEVICE);
1777 dev_kfree_skb (skb); 1777 dev_kfree_skb (skb);
@@ -1815,7 +1815,7 @@ static struct pci_driver rio_driver = {
1815static int __init 1815static int __init
1816rio_init (void) 1816rio_init (void)
1817{ 1817{
1818 return pci_module_init (&rio_driver); 1818 return pci_register_driver(&rio_driver);
1819} 1819}
1820 1820
1821static void __exit 1821static void __exit
@@ -1828,9 +1828,9 @@ module_init (rio_init);
1828module_exit (rio_exit); 1828module_exit (rio_exit);
1829 1829
1830/* 1830/*
1831 1831
1832Compile command: 1832Compile command:
1833 1833
1834gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c 1834gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c
1835 1835
1836Read Documentation/networking/dl2k.txt for details. 1836Read Documentation/networking/dl2k.txt for details.