aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/generic-hdlc.txt51
-rw-r--r--Documentation/networking/multicast.txt1
-rw-r--r--Documentation/networking/net-modules.txt3
-rw-r--r--drivers/net/8139cp.c100
-rw-r--r--drivers/net/8139too.c194
-rw-r--r--drivers/net/Kconfig47
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/Space.c6
-rw-r--r--drivers/net/arm/etherh.c16
-rw-r--r--drivers/net/au1000_eth.c10
-rw-r--r--drivers/net/bmac.c2
-rw-r--r--drivers/net/dm9000.c1219
-rw-r--r--drivers/net/dm9000.h135
-rw-r--r--drivers/net/fmv18x.c689
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c17
-rw-r--r--drivers/net/ppp_generic.c177
-rw-r--r--drivers/net/r8169.c320
-rw-r--r--drivers/net/sk98lin/skge.c8
-rw-r--r--drivers/net/sk_g16.c2066
-rw-r--r--drivers/net/sk_g16.h165
-rw-r--r--drivers/net/skge.c3386
-rw-r--r--drivers/net/skge.h3005
-rw-r--r--drivers/net/smc91x.c58
-rw-r--r--drivers/net/smc91x.h15
-rw-r--r--drivers/net/starfire.c142
-rw-r--r--drivers/net/starfire_firmware.h346
-rw-r--r--drivers/net/tlan.c4
-rw-r--r--drivers/net/tokenring/ibmtr.c11
-rw-r--r--drivers/net/wan/hdlc_fr.c320
-rw-r--r--drivers/net/wan/hdlc_generic.c16
-rw-r--r--drivers/net/wan/lmc/lmc_main.c8
-rw-r--r--drivers/net/wireless/orinoco.c332
-rw-r--r--drivers/net/wireless/orinoco.h1
-rw-r--r--include/linux/dm9000.h36
-rw-r--r--include/linux/hdlc.h4
-rw-r--r--include/linux/if.h2
-rw-r--r--include/linux/wireless.h283
-rw-r--r--net/core/wireless.c74
38 files changed, 9416 insertions, 3857 deletions
diff --git a/Documentation/networking/generic-hdlc.txt b/Documentation/networking/generic-hdlc.txt
index 7d1dc6b884f3..31bc8b759b75 100644
--- a/Documentation/networking/generic-hdlc.txt
+++ b/Documentation/networking/generic-hdlc.txt
@@ -1,21 +1,21 @@
1Generic HDLC layer 1Generic HDLC layer
2Krzysztof Halasa <khc@pm.waw.pl> 2Krzysztof Halasa <khc@pm.waw.pl>
3January, 2003
4 3
5 4
6Generic HDLC layer currently supports: 5Generic HDLC layer currently supports:
7- Frame Relay (ANSI, CCITT and no LMI), with ARP support (no InARP). 61. Frame Relay (ANSI, CCITT, Cisco and no LMI).
8 Normal (routed) and Ethernet-bridged (Ethernet device emulation) 7 - Normal (routed) and Ethernet-bridged (Ethernet device emulation)
9 interfaces can share a single PVC. 8 interfaces can share a single PVC.
10- raw HDLC - either IP (IPv4) interface or Ethernet device emulation. 9 - ARP support (no InARP support in the kernel - there is an
11- Cisco HDLC, 10 experimental InARP user-space daemon available on:
12- PPP (uses syncppp.c), 11 http://www.kernel.org/pub/linux/utils/net/hdlc/).
13- X.25 (uses X.25 routines). 122. raw HDLC - either IP (IPv4) interface or Ethernet device emulation.
14 133. Cisco HDLC.
15There are hardware drivers for the following cards: 144. PPP (uses syncppp.c).
16- C101 by Moxa Technologies Co., Ltd. 155. X.25 (uses X.25 routines).
17- RISCom/N2 by SDL Communications Inc. 16
18- and others, some not in the official kernel. 17Generic HDLC is a protocol driver only - it needs a low-level driver
18for your particular hardware.
19 19
20Ethernet device emulation (using HDLC or Frame-Relay PVC) is compatible 20Ethernet device emulation (using HDLC or Frame-Relay PVC) is compatible
21with IEEE 802.1Q (VLANs) and 802.1D (Ethernet bridging). 21with IEEE 802.1Q (VLANs) and 802.1D (Ethernet bridging).
@@ -24,7 +24,7 @@ with IEEE 802.1Q (VLANs) and 802.1D (Ethernet bridging).
24Make sure the hdlc.o and the hardware driver are loaded. It should 24Make sure the hdlc.o and the hardware driver are loaded. It should
25create a number of "hdlc" (hdlc0 etc) network devices, one for each 25create a number of "hdlc" (hdlc0 etc) network devices, one for each
26WAN port. You'll need the "sethdlc" utility, get it from: 26WAN port. You'll need the "sethdlc" utility, get it from:
27 http://hq.pm.waw.pl/hdlc/ 27 http://www.kernel.org/pub/linux/utils/net/hdlc/
28 28
29Compile sethdlc.c utility: 29Compile sethdlc.c utility:
30 gcc -O2 -Wall -o sethdlc sethdlc.c 30 gcc -O2 -Wall -o sethdlc sethdlc.c
@@ -52,12 +52,12 @@ Setting interface:
52* v35 | rs232 | x21 | t1 | e1 - sets physical interface for a given port 52* v35 | rs232 | x21 | t1 | e1 - sets physical interface for a given port
53 if the card has software-selectable interfaces 53 if the card has software-selectable interfaces
54 loopback - activate hardware loopback (for testing only) 54 loopback - activate hardware loopback (for testing only)
55* clock ext - external clock (uses DTE RX and TX clock) 55* clock ext - both RX clock and TX clock external
56* clock int - internal clock (provides clock signal on DCE clock output) 56* clock int - both RX clock and TX clock internal
57* clock txint - TX internal, RX external (provides TX clock on DCE output) 57* clock txint - RX clock external, TX clock internal
58* clock txfromrx - TX clock derived from RX clock (TX clock on DCE output) 58* clock txfromrx - RX clock external, TX clock derived from RX clock
59* rate - sets clock rate in bps (not required for external clock or 59* rate - sets clock rate in bps (for "int" or "txint" clock only)
60 for txfromrx) 60
61 61
62Setting protocol: 62Setting protocol:
63 63
@@ -79,7 +79,7 @@ Setting protocol:
79* x25 - sets X.25 mode 79* x25 - sets X.25 mode
80 80
81* fr - Frame Relay mode 81* fr - Frame Relay mode
82 lmi ansi / ccitt / none - LMI (link management) type 82 lmi ansi / ccitt / cisco / none - LMI (link management) type
83 dce - Frame Relay DCE (network) side LMI instead of default DTE (user). 83 dce - Frame Relay DCE (network) side LMI instead of default DTE (user).
84 It has nothing to do with clocks! 84 It has nothing to do with clocks!
85 t391 - link integrity verification polling timer (in seconds) - user 85 t391 - link integrity verification polling timer (in seconds) - user
@@ -119,13 +119,14 @@ or
119 119
120 120
121 121
122If you have a problem with N2 or C101 card, you can issue the "private" 122If you have a problem with N2, C101 or PLX200SYN card, you can issue the
123command to see port's packet descriptor rings (in kernel logs): 123"private" command to see port's packet descriptor rings (in kernel logs):
124 124
125 sethdlc hdlc0 private 125 sethdlc hdlc0 private
126 126
127The hardware driver has to be build with CONFIG_HDLC_DEBUG_RINGS. 127The hardware driver has to be build with #define DEBUG_RINGS.
128Attaching this info to bug reports would be helpful. Anyway, let me know 128Attaching this info to bug reports would be helpful. Anyway, let me know
129if you have problems using this. 129if you have problems using this.
130 130
131For patches and other info look at http://hq.pm.waw.pl/hdlc/ 131For patches and other info look at:
132<http://www.kernel.org/pub/linux/utils/net/hdlc/>.
diff --git a/Documentation/networking/multicast.txt b/Documentation/networking/multicast.txt
index 5049a64313d1..b06c8c69266f 100644
--- a/Documentation/networking/multicast.txt
+++ b/Documentation/networking/multicast.txt
@@ -47,7 +47,6 @@ ni52 <------------------ Buggy ------------------>
47ni65 YES YES YES Software(#) 47ni65 YES YES YES Software(#)
48seeq NO NO NO N/A 48seeq NO NO NO N/A
49sgiseek <------------------ Buggy ------------------> 49sgiseek <------------------ Buggy ------------------>
50sk_g16 NO NO YES N/A
51smc-ultra YES YES YES Hardware 50smc-ultra YES YES YES Hardware
52sunlance YES YES YES Hardware 51sunlance YES YES YES Hardware
53tulip YES YES YES Hardware 52tulip YES YES YES Hardware
diff --git a/Documentation/networking/net-modules.txt b/Documentation/networking/net-modules.txt
index 3830a83513d2..0b27863f155c 100644
--- a/Documentation/networking/net-modules.txt
+++ b/Documentation/networking/net-modules.txt
@@ -284,9 +284,6 @@ ppp.c:
284seeq8005.c: *Not modularized* 284seeq8005.c: *Not modularized*
285 (Probes ports: 0x300, 0x320, 0x340, 0x360) 285 (Probes ports: 0x300, 0x320, 0x340, 0x360)
286 286
287sk_g16.c: *Not modularized*
288 (Probes ports: 0x100, 0x180, 0x208, 0x220m 0x288, 0x320, 0x328, 0x390)
289
290skeleton.c: *Skeleton* 287skeleton.c: *Skeleton*
291 288
292slhc.c: 289slhc.c:
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index d639cb8dc461..72cdf19e1be1 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -54,6 +54,7 @@
54 54
55#include <linux/config.h> 55#include <linux/config.h>
56#include <linux/module.h> 56#include <linux/module.h>
57#include <linux/moduleparam.h>
57#include <linux/kernel.h> 58#include <linux/kernel.h>
58#include <linux/compiler.h> 59#include <linux/compiler.h>
59#include <linux/netdevice.h> 60#include <linux/netdevice.h>
@@ -91,16 +92,17 @@ KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE
91 92
92MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); 93MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
93MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver"); 94MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
95MODULE_VERSION(DRV_VERSION);
94MODULE_LICENSE("GPL"); 96MODULE_LICENSE("GPL");
95 97
96static int debug = -1; 98static int debug = -1;
97MODULE_PARM (debug, "i"); 99module_param(debug, int, 0);
98MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number"); 100MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
99 101
100/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). 102/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
101 The RTL chips use a 64 element hash table based on the Ethernet CRC. */ 103 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
102static int multicast_filter_limit = 32; 104static int multicast_filter_limit = 32;
103MODULE_PARM (multicast_filter_limit, "i"); 105module_param(multicast_filter_limit, int, 0);
104MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses"); 106MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
105 107
106#define PFX DRV_NAME ": " 108#define PFX DRV_NAME ": "
@@ -186,6 +188,9 @@ enum {
186 RingEnd = (1 << 30), /* End of descriptor ring */ 188 RingEnd = (1 << 30), /* End of descriptor ring */
187 FirstFrag = (1 << 29), /* First segment of a packet */ 189 FirstFrag = (1 << 29), /* First segment of a packet */
188 LastFrag = (1 << 28), /* Final segment of a packet */ 190 LastFrag = (1 << 28), /* Final segment of a packet */
191 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
192 MSSShift = 16, /* MSS value position */
193 MSSMask = 0xfff, /* MSS value: 11 bits */
189 TxError = (1 << 23), /* Tx error summary */ 194 TxError = (1 << 23), /* Tx error summary */
190 RxError = (1 << 20), /* Rx error summary */ 195 RxError = (1 << 20), /* Rx error summary */
191 IPCS = (1 << 18), /* Calculate IP checksum */ 196 IPCS = (1 << 18), /* Calculate IP checksum */
@@ -312,7 +317,7 @@ struct cp_desc {
312struct ring_info { 317struct ring_info {
313 struct sk_buff *skb; 318 struct sk_buff *skb;
314 dma_addr_t mapping; 319 dma_addr_t mapping;
315 unsigned frag; 320 u32 len;
316}; 321};
317 322
318struct cp_dma_stats { 323struct cp_dma_stats {
@@ -394,6 +399,9 @@ struct cp_private {
394static void __cp_set_rx_mode (struct net_device *dev); 399static void __cp_set_rx_mode (struct net_device *dev);
395static void cp_tx (struct cp_private *cp); 400static void cp_tx (struct cp_private *cp);
396static void cp_clean_rings (struct cp_private *cp); 401static void cp_clean_rings (struct cp_private *cp);
402#ifdef CONFIG_NET_POLL_CONTROLLER
403static void cp_poll_controller(struct net_device *dev);
404#endif
397 405
398static struct pci_device_id cp_pci_tbl[] = { 406static struct pci_device_id cp_pci_tbl[] = {
399 { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139, 407 { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139,
@@ -688,6 +696,19 @@ cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
688 return IRQ_HANDLED; 696 return IRQ_HANDLED;
689} 697}
690 698
699#ifdef CONFIG_NET_POLL_CONTROLLER
700/*
701 * Polling receive - used by netconsole and other diagnostic tools
702 * to allow network i/o with interrupts disabled.
703 */
704static void cp_poll_controller(struct net_device *dev)
705{
706 disable_irq(dev->irq);
707 cp_interrupt(dev->irq, dev, NULL);
708 enable_irq(dev->irq);
709}
710#endif
711
691static void cp_tx (struct cp_private *cp) 712static void cp_tx (struct cp_private *cp)
692{ 713{
693 unsigned tx_head = cp->tx_head; 714 unsigned tx_head = cp->tx_head;
@@ -707,7 +728,7 @@ static void cp_tx (struct cp_private *cp)
707 BUG(); 728 BUG();
708 729
709 pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping, 730 pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
710 skb->len, PCI_DMA_TODEVICE); 731 cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE);
711 732
712 if (status & LastFrag) { 733 if (status & LastFrag) {
713 if (status & (TxError | TxFIFOUnder)) { 734 if (status & (TxError | TxFIFOUnder)) {
@@ -749,10 +770,11 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
749{ 770{
750 struct cp_private *cp = netdev_priv(dev); 771 struct cp_private *cp = netdev_priv(dev);
751 unsigned entry; 772 unsigned entry;
752 u32 eor; 773 u32 eor, flags;
753#if CP_VLAN_TAG_USED 774#if CP_VLAN_TAG_USED
754 u32 vlan_tag = 0; 775 u32 vlan_tag = 0;
755#endif 776#endif
777 int mss = 0;
756 778
757 spin_lock_irq(&cp->lock); 779 spin_lock_irq(&cp->lock);
758 780
@@ -772,6 +794,9 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
772 794
773 entry = cp->tx_head; 795 entry = cp->tx_head;
774 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 796 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
797 if (dev->features & NETIF_F_TSO)
798 mss = skb_shinfo(skb)->tso_size;
799
775 if (skb_shinfo(skb)->nr_frags == 0) { 800 if (skb_shinfo(skb)->nr_frags == 0) {
776 struct cp_desc *txd = &cp->tx_ring[entry]; 801 struct cp_desc *txd = &cp->tx_ring[entry];
777 u32 len; 802 u32 len;
@@ -783,26 +808,26 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
783 txd->addr = cpu_to_le64(mapping); 808 txd->addr = cpu_to_le64(mapping);
784 wmb(); 809 wmb();
785 810
786 if (skb->ip_summed == CHECKSUM_HW) { 811 flags = eor | len | DescOwn | FirstFrag | LastFrag;
812
813 if (mss)
814 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
815 else if (skb->ip_summed == CHECKSUM_HW) {
787 const struct iphdr *ip = skb->nh.iph; 816 const struct iphdr *ip = skb->nh.iph;
788 if (ip->protocol == IPPROTO_TCP) 817 if (ip->protocol == IPPROTO_TCP)
789 txd->opts1 = cpu_to_le32(eor | len | DescOwn | 818 flags |= IPCS | TCPCS;
790 FirstFrag | LastFrag |
791 IPCS | TCPCS);
792 else if (ip->protocol == IPPROTO_UDP) 819 else if (ip->protocol == IPPROTO_UDP)
793 txd->opts1 = cpu_to_le32(eor | len | DescOwn | 820 flags |= IPCS | UDPCS;
794 FirstFrag | LastFrag |
795 IPCS | UDPCS);
796 else 821 else
797 BUG(); 822 WARN_ON(1); /* we need a WARN() */
798 } else 823 }
799 txd->opts1 = cpu_to_le32(eor | len | DescOwn | 824
800 FirstFrag | LastFrag); 825 txd->opts1 = cpu_to_le32(flags);
801 wmb(); 826 wmb();
802 827
803 cp->tx_skb[entry].skb = skb; 828 cp->tx_skb[entry].skb = skb;
804 cp->tx_skb[entry].mapping = mapping; 829 cp->tx_skb[entry].mapping = mapping;
805 cp->tx_skb[entry].frag = 0; 830 cp->tx_skb[entry].len = len;
806 entry = NEXT_TX(entry); 831 entry = NEXT_TX(entry);
807 } else { 832 } else {
808 struct cp_desc *txd; 833 struct cp_desc *txd;
@@ -820,7 +845,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
820 first_len, PCI_DMA_TODEVICE); 845 first_len, PCI_DMA_TODEVICE);
821 cp->tx_skb[entry].skb = skb; 846 cp->tx_skb[entry].skb = skb;
822 cp->tx_skb[entry].mapping = first_mapping; 847 cp->tx_skb[entry].mapping = first_mapping;
823 cp->tx_skb[entry].frag = 1; 848 cp->tx_skb[entry].len = first_len;
824 entry = NEXT_TX(entry); 849 entry = NEXT_TX(entry);
825 850
826 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 851 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -836,16 +861,19 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
836 len, PCI_DMA_TODEVICE); 861 len, PCI_DMA_TODEVICE);
837 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 862 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
838 863
839 if (skb->ip_summed == CHECKSUM_HW) { 864 ctrl = eor | len | DescOwn;
840 ctrl = eor | len | DescOwn | IPCS; 865
866 if (mss)
867 ctrl |= LargeSend |
868 ((mss & MSSMask) << MSSShift);
869 else if (skb->ip_summed == CHECKSUM_HW) {
841 if (ip->protocol == IPPROTO_TCP) 870 if (ip->protocol == IPPROTO_TCP)
842 ctrl |= TCPCS; 871 ctrl |= IPCS | TCPCS;
843 else if (ip->protocol == IPPROTO_UDP) 872 else if (ip->protocol == IPPROTO_UDP)
844 ctrl |= UDPCS; 873 ctrl |= IPCS | UDPCS;
845 else 874 else
846 BUG(); 875 BUG();
847 } else 876 }
848 ctrl = eor | len | DescOwn;
849 877
850 if (frag == skb_shinfo(skb)->nr_frags - 1) 878 if (frag == skb_shinfo(skb)->nr_frags - 1)
851 ctrl |= LastFrag; 879 ctrl |= LastFrag;
@@ -860,7 +888,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
860 888
861 cp->tx_skb[entry].skb = skb; 889 cp->tx_skb[entry].skb = skb;
862 cp->tx_skb[entry].mapping = mapping; 890 cp->tx_skb[entry].mapping = mapping;
863 cp->tx_skb[entry].frag = frag + 2; 891 cp->tx_skb[entry].len = len;
864 entry = NEXT_TX(entry); 892 entry = NEXT_TX(entry);
865 } 893 }
866 894
@@ -1074,7 +1102,6 @@ static int cp_refill_rx (struct cp_private *cp)
1074 cp->rx_skb[i].mapping = pci_map_single(cp->pdev, 1102 cp->rx_skb[i].mapping = pci_map_single(cp->pdev,
1075 skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1103 skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1076 cp->rx_skb[i].skb = skb; 1104 cp->rx_skb[i].skb = skb;
1077 cp->rx_skb[i].frag = 0;
1078 1105
1079 cp->rx_ring[i].opts2 = 0; 1106 cp->rx_ring[i].opts2 = 0;
1080 cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping); 1107 cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping);
@@ -1126,9 +1153,6 @@ static void cp_clean_rings (struct cp_private *cp)
1126{ 1153{
1127 unsigned i; 1154 unsigned i;
1128 1155
1129 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1130 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1131
1132 for (i = 0; i < CP_RX_RING_SIZE; i++) { 1156 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1133 if (cp->rx_skb[i].skb) { 1157 if (cp->rx_skb[i].skb) {
1134 pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping, 1158 pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping,
@@ -1140,13 +1164,18 @@ static void cp_clean_rings (struct cp_private *cp)
1140 for (i = 0; i < CP_TX_RING_SIZE; i++) { 1164 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1141 if (cp->tx_skb[i].skb) { 1165 if (cp->tx_skb[i].skb) {
1142 struct sk_buff *skb = cp->tx_skb[i].skb; 1166 struct sk_buff *skb = cp->tx_skb[i].skb;
1167
1143 pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping, 1168 pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping,
1144 skb->len, PCI_DMA_TODEVICE); 1169 cp->tx_skb[i].len, PCI_DMA_TODEVICE);
1145 dev_kfree_skb(skb); 1170 if (le32_to_cpu(cp->tx_ring[i].opts1) & LastFrag)
1171 dev_kfree_skb(skb);
1146 cp->net_stats.tx_dropped++; 1172 cp->net_stats.tx_dropped++;
1147 } 1173 }
1148 } 1174 }
1149 1175
1176 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1177 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1178
1150 memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE); 1179 memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE);
1151 memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE); 1180 memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE);
1152} 1181}
@@ -1538,6 +1567,8 @@ static struct ethtool_ops cp_ethtool_ops = {
1538 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ 1567 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
1539 .get_sg = ethtool_op_get_sg, 1568 .get_sg = ethtool_op_get_sg,
1540 .set_sg = ethtool_op_set_sg, 1569 .set_sg = ethtool_op_set_sg,
1570 .get_tso = ethtool_op_get_tso,
1571 .set_tso = ethtool_op_set_tso,
1541 .get_regs = cp_get_regs, 1572 .get_regs = cp_get_regs,
1542 .get_wol = cp_get_wol, 1573 .get_wol = cp_get_wol,
1543 .set_wol = cp_set_wol, 1574 .set_wol = cp_set_wol,
@@ -1749,6 +1780,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1749 dev->get_stats = cp_get_stats; 1780 dev->get_stats = cp_get_stats;
1750 dev->do_ioctl = cp_ioctl; 1781 dev->do_ioctl = cp_ioctl;
1751 dev->poll = cp_rx_poll; 1782 dev->poll = cp_rx_poll;
1783#ifdef CONFIG_NET_POLL_CONTROLLER
1784 dev->poll_controller = cp_poll_controller;
1785#endif
1752 dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */ 1786 dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */
1753#ifdef BROKEN 1787#ifdef BROKEN
1754 dev->change_mtu = cp_change_mtu; 1788 dev->change_mtu = cp_change_mtu;
@@ -1768,6 +1802,10 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1768 if (pci_using_dac) 1802 if (pci_using_dac)
1769 dev->features |= NETIF_F_HIGHDMA; 1803 dev->features |= NETIF_F_HIGHDMA;
1770 1804
1805#if 0 /* disabled by default until verified */
1806 dev->features |= NETIF_F_TSO;
1807#endif
1808
1771 dev->irq = pdev->irq; 1809 dev->irq = pdev->irq;
1772 1810
1773 rc = register_netdev(dev); 1811 rc = register_netdev(dev);
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index d4bd20c21a1f..047202c4d9a8 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -569,7 +569,7 @@ struct rtl_extra_stats {
569}; 569};
570 570
571struct rtl8139_private { 571struct rtl8139_private {
572 void *mmio_addr; 572 void __iomem *mmio_addr;
573 int drv_flags; 573 int drv_flags;
574 struct pci_dev *pci_dev; 574 struct pci_dev *pci_dev;
575 u32 msg_enable; 575 u32 msg_enable;
@@ -614,7 +614,7 @@ MODULE_PARM_DESC (multicast_filter_limit, "8139too maximum number of filtered mu
614MODULE_PARM_DESC (media, "8139too: Bits 4+9: force full duplex, bit 5: 100Mbps"); 614MODULE_PARM_DESC (media, "8139too: Bits 4+9: force full duplex, bit 5: 100Mbps");
615MODULE_PARM_DESC (full_duplex, "8139too: Force full duplex for board(s) (1)"); 615MODULE_PARM_DESC (full_duplex, "8139too: Force full duplex for board(s) (1)");
616 616
617static int read_eeprom (void *ioaddr, int location, int addr_len); 617static int read_eeprom (void __iomem *ioaddr, int location, int addr_len);
618static int rtl8139_open (struct net_device *dev); 618static int rtl8139_open (struct net_device *dev);
619static int mdio_read (struct net_device *dev, int phy_id, int location); 619static int mdio_read (struct net_device *dev, int phy_id, int location);
620static void mdio_write (struct net_device *dev, int phy_id, int location, 620static void mdio_write (struct net_device *dev, int phy_id, int location,
@@ -638,46 +638,20 @@ static void __set_rx_mode (struct net_device *dev);
638static void rtl8139_hw_start (struct net_device *dev); 638static void rtl8139_hw_start (struct net_device *dev);
639static struct ethtool_ops rtl8139_ethtool_ops; 639static struct ethtool_ops rtl8139_ethtool_ops;
640 640
641#ifdef USE_IO_OPS
642
643#define RTL_R8(reg) inb (((unsigned long)ioaddr) + (reg))
644#define RTL_R16(reg) inw (((unsigned long)ioaddr) + (reg))
645#define RTL_R32(reg) ((unsigned long) inl (((unsigned long)ioaddr) + (reg)))
646#define RTL_W8(reg, val8) outb ((val8), ((unsigned long)ioaddr) + (reg))
647#define RTL_W16(reg, val16) outw ((val16), ((unsigned long)ioaddr) + (reg))
648#define RTL_W32(reg, val32) outl ((val32), ((unsigned long)ioaddr) + (reg))
649#define RTL_W8_F RTL_W8
650#define RTL_W16_F RTL_W16
651#define RTL_W32_F RTL_W32
652#undef readb
653#undef readw
654#undef readl
655#undef writeb
656#undef writew
657#undef writel
658#define readb(addr) inb((unsigned long)(addr))
659#define readw(addr) inw((unsigned long)(addr))
660#define readl(addr) inl((unsigned long)(addr))
661#define writeb(val,addr) outb((val),(unsigned long)(addr))
662#define writew(val,addr) outw((val),(unsigned long)(addr))
663#define writel(val,addr) outl((val),(unsigned long)(addr))
664
665#else
666
667/* write MMIO register, with flush */ 641/* write MMIO register, with flush */
668/* Flush avoids rtl8139 bug w/ posted MMIO writes */ 642/* Flush avoids rtl8139 bug w/ posted MMIO writes */
669#define RTL_W8_F(reg, val8) do { writeb ((val8), ioaddr + (reg)); readb (ioaddr + (reg)); } while (0) 643#define RTL_W8_F(reg, val8) do { iowrite8 ((val8), ioaddr + (reg)); ioread8 (ioaddr + (reg)); } while (0)
670#define RTL_W16_F(reg, val16) do { writew ((val16), ioaddr + (reg)); readw (ioaddr + (reg)); } while (0) 644#define RTL_W16_F(reg, val16) do { iowrite16 ((val16), ioaddr + (reg)); ioread16 (ioaddr + (reg)); } while (0)
671#define RTL_W32_F(reg, val32) do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0) 645#define RTL_W32_F(reg, val32) do { iowrite32 ((val32), ioaddr + (reg)); ioread32 (ioaddr + (reg)); } while (0)
672 646
673 647
674#define MMIO_FLUSH_AUDIT_COMPLETE 1 648#define MMIO_FLUSH_AUDIT_COMPLETE 1
675#if MMIO_FLUSH_AUDIT_COMPLETE 649#if MMIO_FLUSH_AUDIT_COMPLETE
676 650
677/* write MMIO register */ 651/* write MMIO register */
678#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) 652#define RTL_W8(reg, val8) iowrite8 ((val8), ioaddr + (reg))
679#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg)) 653#define RTL_W16(reg, val16) iowrite16 ((val16), ioaddr + (reg))
680#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg)) 654#define RTL_W32(reg, val32) iowrite32 ((val32), ioaddr + (reg))
681 655
682#else 656#else
683 657
@@ -689,11 +663,9 @@ static struct ethtool_ops rtl8139_ethtool_ops;
689#endif /* MMIO_FLUSH_AUDIT_COMPLETE */ 663#endif /* MMIO_FLUSH_AUDIT_COMPLETE */
690 664
691/* read MMIO register */ 665/* read MMIO register */
692#define RTL_R8(reg) readb (ioaddr + (reg)) 666#define RTL_R8(reg) ioread8 (ioaddr + (reg))
693#define RTL_R16(reg) readw (ioaddr + (reg)) 667#define RTL_R16(reg) ioread16 (ioaddr + (reg))
694#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg))) 668#define RTL_R32(reg) ((unsigned long) ioread32 (ioaddr + (reg)))
695
696#endif /* USE_IO_OPS */
697 669
698 670
699static const u16 rtl8139_intr_mask = 671static const u16 rtl8139_intr_mask =
@@ -740,10 +712,13 @@ static void __rtl8139_cleanup_dev (struct net_device *dev)
740 assert (tp->pci_dev != NULL); 712 assert (tp->pci_dev != NULL);
741 pdev = tp->pci_dev; 713 pdev = tp->pci_dev;
742 714
743#ifndef USE_IO_OPS 715#ifdef USE_IO_OPS
716 if (tp->mmio_addr)
717 ioport_unmap (tp->mmio_addr);
718#else
744 if (tp->mmio_addr) 719 if (tp->mmio_addr)
745 iounmap (tp->mmio_addr); 720 pci_iounmap (pdev, tp->mmio_addr);
746#endif /* !USE_IO_OPS */ 721#endif /* USE_IO_OPS */
747 722
748 /* it's ok to call this even if we have no regions to free */ 723 /* it's ok to call this even if we have no regions to free */
749 pci_release_regions (pdev); 724 pci_release_regions (pdev);
@@ -753,7 +728,7 @@ static void __rtl8139_cleanup_dev (struct net_device *dev)
753} 728}
754 729
755 730
756static void rtl8139_chip_reset (void *ioaddr) 731static void rtl8139_chip_reset (void __iomem *ioaddr)
757{ 732{
758 int i; 733 int i;
759 734
@@ -773,7 +748,7 @@ static void rtl8139_chip_reset (void *ioaddr)
773static int __devinit rtl8139_init_board (struct pci_dev *pdev, 748static int __devinit rtl8139_init_board (struct pci_dev *pdev,
774 struct net_device **dev_out) 749 struct net_device **dev_out)
775{ 750{
776 void *ioaddr; 751 void __iomem *ioaddr;
777 struct net_device *dev; 752 struct net_device *dev;
778 struct rtl8139_private *tp; 753 struct rtl8139_private *tp;
779 u8 tmp8; 754 u8 tmp8;
@@ -855,13 +830,18 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev,
855 pci_set_master (pdev); 830 pci_set_master (pdev);
856 831
857#ifdef USE_IO_OPS 832#ifdef USE_IO_OPS
858 ioaddr = (void *) pio_start; 833 ioaddr = ioport_map(pio_start, pio_len);
834 if (!ioaddr) {
835 printk (KERN_ERR PFX "%s: cannot map PIO, aborting\n", pci_name(pdev));
836 rc = -EIO;
837 goto err_out;
838 }
859 dev->base_addr = pio_start; 839 dev->base_addr = pio_start;
860 tp->mmio_addr = ioaddr; 840 tp->mmio_addr = ioaddr;
861 tp->regs_len = pio_len; 841 tp->regs_len = pio_len;
862#else 842#else
863 /* ioremap MMIO region */ 843 /* ioremap MMIO region */
864 ioaddr = ioremap (mmio_start, mmio_len); 844 ioaddr = pci_iomap(pdev, 1, 0);
865 if (ioaddr == NULL) { 845 if (ioaddr == NULL) {
866 printk (KERN_ERR PFX "%s: cannot remap MMIO, aborting\n", pci_name(pdev)); 846 printk (KERN_ERR PFX "%s: cannot remap MMIO, aborting\n", pci_name(pdev));
867 rc = -EIO; 847 rc = -EIO;
@@ -947,7 +927,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
947 struct net_device *dev = NULL; 927 struct net_device *dev = NULL;
948 struct rtl8139_private *tp; 928 struct rtl8139_private *tp;
949 int i, addr_len, option; 929 int i, addr_len, option;
950 void *ioaddr; 930 void __iomem *ioaddr;
951 static int board_idx = -1; 931 static int board_idx = -1;
952 u8 pci_rev; 932 u8 pci_rev;
953 933
@@ -1147,47 +1127,46 @@ static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
1147 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this. 1127 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1148 */ 1128 */
1149 1129
1150#define eeprom_delay() readl(ee_addr) 1130#define eeprom_delay() RTL_R32(Cfg9346)
1151 1131
1152/* The EEPROM commands include the alway-set leading bit. */ 1132/* The EEPROM commands include the alway-set leading bit. */
1153#define EE_WRITE_CMD (5) 1133#define EE_WRITE_CMD (5)
1154#define EE_READ_CMD (6) 1134#define EE_READ_CMD (6)
1155#define EE_ERASE_CMD (7) 1135#define EE_ERASE_CMD (7)
1156 1136
1157static int __devinit read_eeprom (void *ioaddr, int location, int addr_len) 1137static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1158{ 1138{
1159 int i; 1139 int i;
1160 unsigned retval = 0; 1140 unsigned retval = 0;
1161 void *ee_addr = ioaddr + Cfg9346;
1162 int read_cmd = location | (EE_READ_CMD << addr_len); 1141 int read_cmd = location | (EE_READ_CMD << addr_len);
1163 1142
1164 writeb (EE_ENB & ~EE_CS, ee_addr); 1143 RTL_W8 (Cfg9346, EE_ENB & ~EE_CS);
1165 writeb (EE_ENB, ee_addr); 1144 RTL_W8 (Cfg9346, EE_ENB);
1166 eeprom_delay (); 1145 eeprom_delay ();
1167 1146
1168 /* Shift the read command bits out. */ 1147 /* Shift the read command bits out. */
1169 for (i = 4 + addr_len; i >= 0; i--) { 1148 for (i = 4 + addr_len; i >= 0; i--) {
1170 int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0; 1149 int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1171 writeb (EE_ENB | dataval, ee_addr); 1150 RTL_W8 (Cfg9346, EE_ENB | dataval);
1172 eeprom_delay (); 1151 eeprom_delay ();
1173 writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); 1152 RTL_W8 (Cfg9346, EE_ENB | dataval | EE_SHIFT_CLK);
1174 eeprom_delay (); 1153 eeprom_delay ();
1175 } 1154 }
1176 writeb (EE_ENB, ee_addr); 1155 RTL_W8 (Cfg9346, EE_ENB);
1177 eeprom_delay (); 1156 eeprom_delay ();
1178 1157
1179 for (i = 16; i > 0; i--) { 1158 for (i = 16; i > 0; i--) {
1180 writeb (EE_ENB | EE_SHIFT_CLK, ee_addr); 1159 RTL_W8 (Cfg9346, EE_ENB | EE_SHIFT_CLK);
1181 eeprom_delay (); 1160 eeprom_delay ();
1182 retval = 1161 retval =
1183 (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 : 1162 (retval << 1) | ((RTL_R8 (Cfg9346) & EE_DATA_READ) ? 1 :
1184 0); 1163 0);
1185 writeb (EE_ENB, ee_addr); 1164 RTL_W8 (Cfg9346, EE_ENB);
1186 eeprom_delay (); 1165 eeprom_delay ();
1187 } 1166 }
1188 1167
1189 /* Terminate the EEPROM access. */ 1168 /* Terminate the EEPROM access. */
1190 writeb (~EE_CS, ee_addr); 1169 RTL_W8 (Cfg9346, ~EE_CS);
1191 eeprom_delay (); 1170 eeprom_delay ();
1192 1171
1193 return retval; 1172 return retval;
@@ -1206,7 +1185,7 @@ static int __devinit read_eeprom (void *ioaddr, int location, int addr_len)
1206#define MDIO_WRITE0 (MDIO_DIR) 1185#define MDIO_WRITE0 (MDIO_DIR)
1207#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT) 1186#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
1208 1187
1209#define mdio_delay(mdio_addr) readb(mdio_addr) 1188#define mdio_delay() RTL_R8(Config4)
1210 1189
1211 1190
1212static char mii_2_8139_map[8] = { 1191static char mii_2_8139_map[8] = {
@@ -1223,15 +1202,15 @@ static char mii_2_8139_map[8] = {
1223 1202
1224#ifdef CONFIG_8139TOO_8129 1203#ifdef CONFIG_8139TOO_8129
1225/* Syncronize the MII management interface by shifting 32 one bits out. */ 1204/* Syncronize the MII management interface by shifting 32 one bits out. */
1226static void mdio_sync (void *mdio_addr) 1205static void mdio_sync (void __iomem *ioaddr)
1227{ 1206{
1228 int i; 1207 int i;
1229 1208
1230 for (i = 32; i >= 0; i--) { 1209 for (i = 32; i >= 0; i--) {
1231 writeb (MDIO_WRITE1, mdio_addr); 1210 RTL_W8 (Config4, MDIO_WRITE1);
1232 mdio_delay (mdio_addr); 1211 mdio_delay ();
1233 writeb (MDIO_WRITE1 | MDIO_CLK, mdio_addr); 1212 RTL_W8 (Config4, MDIO_WRITE1 | MDIO_CLK);
1234 mdio_delay (mdio_addr); 1213 mdio_delay ();
1235 } 1214 }
1236} 1215}
1237#endif 1216#endif
@@ -1241,35 +1220,36 @@ static int mdio_read (struct net_device *dev, int phy_id, int location)
1241 struct rtl8139_private *tp = netdev_priv(dev); 1220 struct rtl8139_private *tp = netdev_priv(dev);
1242 int retval = 0; 1221 int retval = 0;
1243#ifdef CONFIG_8139TOO_8129 1222#ifdef CONFIG_8139TOO_8129
1244 void *mdio_addr = tp->mmio_addr + Config4; 1223 void __iomem *ioaddr = tp->mmio_addr;
1245 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; 1224 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
1246 int i; 1225 int i;
1247#endif 1226#endif
1248 1227
1249 if (phy_id > 31) { /* Really a 8139. Use internal registers. */ 1228 if (phy_id > 31) { /* Really a 8139. Use internal registers. */
1229 void __iomem *ioaddr = tp->mmio_addr;
1250 return location < 8 && mii_2_8139_map[location] ? 1230 return location < 8 && mii_2_8139_map[location] ?
1251 readw (tp->mmio_addr + mii_2_8139_map[location]) : 0; 1231 RTL_R16 (mii_2_8139_map[location]) : 0;
1252 } 1232 }
1253 1233
1254#ifdef CONFIG_8139TOO_8129 1234#ifdef CONFIG_8139TOO_8129
1255 mdio_sync (mdio_addr); 1235 mdio_sync (ioaddr);
1256 /* Shift the read command bits out. */ 1236 /* Shift the read command bits out. */
1257 for (i = 15; i >= 0; i--) { 1237 for (i = 15; i >= 0; i--) {
1258 int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0; 1238 int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
1259 1239
1260 writeb (MDIO_DIR | dataval, mdio_addr); 1240 RTL_W8 (Config4, MDIO_DIR | dataval);
1261 mdio_delay (mdio_addr); 1241 mdio_delay ();
1262 writeb (MDIO_DIR | dataval | MDIO_CLK, mdio_addr); 1242 RTL_W8 (Config4, MDIO_DIR | dataval | MDIO_CLK);
1263 mdio_delay (mdio_addr); 1243 mdio_delay ();
1264 } 1244 }
1265 1245
1266 /* Read the two transition, 16 data, and wire-idle bits. */ 1246 /* Read the two transition, 16 data, and wire-idle bits. */
1267 for (i = 19; i > 0; i--) { 1247 for (i = 19; i > 0; i--) {
1268 writeb (0, mdio_addr); 1248 RTL_W8 (Config4, 0);
1269 mdio_delay (mdio_addr); 1249 mdio_delay ();
1270 retval = (retval << 1) | ((readb (mdio_addr) & MDIO_DATA_IN) ? 1 : 0); 1250 retval = (retval << 1) | ((RTL_R8 (Config4) & MDIO_DATA_IN) ? 1 : 0);
1271 writeb (MDIO_CLK, mdio_addr); 1251 RTL_W8 (Config4, MDIO_CLK);
1272 mdio_delay (mdio_addr); 1252 mdio_delay ();
1273 } 1253 }
1274#endif 1254#endif
1275 1255
@@ -1282,13 +1262,13 @@ static void mdio_write (struct net_device *dev, int phy_id, int location,
1282{ 1262{
1283 struct rtl8139_private *tp = netdev_priv(dev); 1263 struct rtl8139_private *tp = netdev_priv(dev);
1284#ifdef CONFIG_8139TOO_8129 1264#ifdef CONFIG_8139TOO_8129
1285 void *mdio_addr = tp->mmio_addr + Config4; 1265 void __iomem *ioaddr = tp->mmio_addr;
1286 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value; 1266 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
1287 int i; 1267 int i;
1288#endif 1268#endif
1289 1269
1290 if (phy_id > 31) { /* Really a 8139. Use internal registers. */ 1270 if (phy_id > 31) { /* Really a 8139. Use internal registers. */
1291 void *ioaddr = tp->mmio_addr; 1271 void __iomem *ioaddr = tp->mmio_addr;
1292 if (location == 0) { 1272 if (location == 0) {
1293 RTL_W8 (Cfg9346, Cfg9346_Unlock); 1273 RTL_W8 (Cfg9346, Cfg9346_Unlock);
1294 RTL_W16 (BasicModeCtrl, value); 1274 RTL_W16 (BasicModeCtrl, value);
@@ -1299,23 +1279,23 @@ static void mdio_write (struct net_device *dev, int phy_id, int location,
1299 } 1279 }
1300 1280
1301#ifdef CONFIG_8139TOO_8129 1281#ifdef CONFIG_8139TOO_8129
1302 mdio_sync (mdio_addr); 1282 mdio_sync (ioaddr);
1303 1283
1304 /* Shift the command bits out. */ 1284 /* Shift the command bits out. */
1305 for (i = 31; i >= 0; i--) { 1285 for (i = 31; i >= 0; i--) {
1306 int dataval = 1286 int dataval =
1307 (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; 1287 (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
1308 writeb (dataval, mdio_addr); 1288 RTL_W8 (Config4, dataval);
1309 mdio_delay (mdio_addr); 1289 mdio_delay ();
1310 writeb (dataval | MDIO_CLK, mdio_addr); 1290 RTL_W8 (Config4, dataval | MDIO_CLK);
1311 mdio_delay (mdio_addr); 1291 mdio_delay ();
1312 } 1292 }
1313 /* Clear out extra bits. */ 1293 /* Clear out extra bits. */
1314 for (i = 2; i > 0; i--) { 1294 for (i = 2; i > 0; i--) {
1315 writeb (0, mdio_addr); 1295 RTL_W8 (Config4, 0);
1316 mdio_delay (mdio_addr); 1296 mdio_delay ();
1317 writeb (MDIO_CLK, mdio_addr); 1297 RTL_W8 (Config4, MDIO_CLK);
1318 mdio_delay (mdio_addr); 1298 mdio_delay ();
1319 } 1299 }
1320#endif 1300#endif
1321} 1301}
@@ -1325,7 +1305,7 @@ static int rtl8139_open (struct net_device *dev)
1325{ 1305{
1326 struct rtl8139_private *tp = netdev_priv(dev); 1306 struct rtl8139_private *tp = netdev_priv(dev);
1327 int retval; 1307 int retval;
1328 void *ioaddr = tp->mmio_addr; 1308 void __iomem *ioaddr = tp->mmio_addr;
1329 1309
1330 retval = request_irq (dev->irq, rtl8139_interrupt, SA_SHIRQ, dev->name, dev); 1310 retval = request_irq (dev->irq, rtl8139_interrupt, SA_SHIRQ, dev->name, dev);
1331 if (retval) 1311 if (retval)
@@ -1382,7 +1362,7 @@ static void rtl_check_media (struct net_device *dev, unsigned int init_media)
1382static void rtl8139_hw_start (struct net_device *dev) 1362static void rtl8139_hw_start (struct net_device *dev)
1383{ 1363{
1384 struct rtl8139_private *tp = netdev_priv(dev); 1364 struct rtl8139_private *tp = netdev_priv(dev);
1385 void *ioaddr = tp->mmio_addr; 1365 void __iomem *ioaddr = tp->mmio_addr;
1386 u32 i; 1366 u32 i;
1387 u8 tmp; 1367 u8 tmp;
1388 1368
@@ -1484,7 +1464,7 @@ static void rtl8139_tune_twister (struct net_device *dev,
1484 struct rtl8139_private *tp) 1464 struct rtl8139_private *tp)
1485{ 1465{
1486 int linkcase; 1466 int linkcase;
1487 void *ioaddr = tp->mmio_addr; 1467 void __iomem *ioaddr = tp->mmio_addr;
1488 1468
1489 /* This is a complicated state machine to configure the "twister" for 1469 /* This is a complicated state machine to configure the "twister" for
1490 impedance/echos based on the cable length. 1470 impedance/echos based on the cable length.
@@ -1568,7 +1548,7 @@ static void rtl8139_tune_twister (struct net_device *dev,
1568 1548
1569static inline void rtl8139_thread_iter (struct net_device *dev, 1549static inline void rtl8139_thread_iter (struct net_device *dev,
1570 struct rtl8139_private *tp, 1550 struct rtl8139_private *tp,
1571 void *ioaddr) 1551 void __iomem *ioaddr)
1572{ 1552{
1573 int mii_lpa; 1553 int mii_lpa;
1574 1554
@@ -1676,7 +1656,7 @@ static inline void rtl8139_tx_clear (struct rtl8139_private *tp)
1676static void rtl8139_tx_timeout (struct net_device *dev) 1656static void rtl8139_tx_timeout (struct net_device *dev)
1677{ 1657{
1678 struct rtl8139_private *tp = netdev_priv(dev); 1658 struct rtl8139_private *tp = netdev_priv(dev);
1679 void *ioaddr = tp->mmio_addr; 1659 void __iomem *ioaddr = tp->mmio_addr;
1680 int i; 1660 int i;
1681 u8 tmp8; 1661 u8 tmp8;
1682 unsigned long flags; 1662 unsigned long flags;
@@ -1721,7 +1701,7 @@ static void rtl8139_tx_timeout (struct net_device *dev)
1721static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev) 1701static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
1722{ 1702{
1723 struct rtl8139_private *tp = netdev_priv(dev); 1703 struct rtl8139_private *tp = netdev_priv(dev);
1724 void *ioaddr = tp->mmio_addr; 1704 void __iomem *ioaddr = tp->mmio_addr;
1725 unsigned int entry; 1705 unsigned int entry;
1726 unsigned int len = skb->len; 1706 unsigned int len = skb->len;
1727 1707
@@ -1763,7 +1743,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
1763 1743
1764static void rtl8139_tx_interrupt (struct net_device *dev, 1744static void rtl8139_tx_interrupt (struct net_device *dev,
1765 struct rtl8139_private *tp, 1745 struct rtl8139_private *tp,
1766 void *ioaddr) 1746 void __iomem *ioaddr)
1767{ 1747{
1768 unsigned long dirty_tx, tx_left; 1748 unsigned long dirty_tx, tx_left;
1769 1749
@@ -1833,7 +1813,7 @@ static void rtl8139_tx_interrupt (struct net_device *dev,
1833 1813
1834/* TODO: clean this up! Rx reset need not be this intensive */ 1814/* TODO: clean this up! Rx reset need not be this intensive */
1835static void rtl8139_rx_err (u32 rx_status, struct net_device *dev, 1815static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
1836 struct rtl8139_private *tp, void *ioaddr) 1816 struct rtl8139_private *tp, void __iomem *ioaddr)
1837{ 1817{
1838 u8 tmp8; 1818 u8 tmp8;
1839#ifdef CONFIG_8139_OLD_RX_RESET 1819#ifdef CONFIG_8139_OLD_RX_RESET
@@ -1930,7 +1910,7 @@ static __inline__ void wrap_copy(struct sk_buff *skb, const unsigned char *ring,
1930 1910
1931static void rtl8139_isr_ack(struct rtl8139_private *tp) 1911static void rtl8139_isr_ack(struct rtl8139_private *tp)
1932{ 1912{
1933 void *ioaddr = tp->mmio_addr; 1913 void __iomem *ioaddr = tp->mmio_addr;
1934 u16 status; 1914 u16 status;
1935 1915
1936 status = RTL_R16 (IntrStatus) & RxAckBits; 1916 status = RTL_R16 (IntrStatus) & RxAckBits;
@@ -1949,7 +1929,7 @@ static void rtl8139_isr_ack(struct rtl8139_private *tp)
1949static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp, 1929static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
1950 int budget) 1930 int budget)
1951{ 1931{
1952 void *ioaddr = tp->mmio_addr; 1932 void __iomem *ioaddr = tp->mmio_addr;
1953 int received = 0; 1933 int received = 0;
1954 unsigned char *rx_ring = tp->rx_ring; 1934 unsigned char *rx_ring = tp->rx_ring;
1955 unsigned int cur_rx = tp->cur_rx; 1935 unsigned int cur_rx = tp->cur_rx;
@@ -2087,7 +2067,7 @@ out:
2087 2067
2088static void rtl8139_weird_interrupt (struct net_device *dev, 2068static void rtl8139_weird_interrupt (struct net_device *dev,
2089 struct rtl8139_private *tp, 2069 struct rtl8139_private *tp,
2090 void *ioaddr, 2070 void __iomem *ioaddr,
2091 int status, int link_changed) 2071 int status, int link_changed)
2092{ 2072{
2093 DPRINTK ("%s: Abnormal interrupt, status %8.8x.\n", 2073 DPRINTK ("%s: Abnormal interrupt, status %8.8x.\n",
@@ -2127,7 +2107,7 @@ static void rtl8139_weird_interrupt (struct net_device *dev,
2127static int rtl8139_poll(struct net_device *dev, int *budget) 2107static int rtl8139_poll(struct net_device *dev, int *budget)
2128{ 2108{
2129 struct rtl8139_private *tp = netdev_priv(dev); 2109 struct rtl8139_private *tp = netdev_priv(dev);
2130 void *ioaddr = tp->mmio_addr; 2110 void __iomem *ioaddr = tp->mmio_addr;
2131 int orig_budget = min(*budget, dev->quota); 2111 int orig_budget = min(*budget, dev->quota);
2132 int done = 1; 2112 int done = 1;
2133 2113
@@ -2165,7 +2145,7 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance,
2165{ 2145{
2166 struct net_device *dev = (struct net_device *) dev_instance; 2146 struct net_device *dev = (struct net_device *) dev_instance;
2167 struct rtl8139_private *tp = netdev_priv(dev); 2147 struct rtl8139_private *tp = netdev_priv(dev);
2168 void *ioaddr = tp->mmio_addr; 2148 void __iomem *ioaddr = tp->mmio_addr;
2169 u16 status, ackstat; 2149 u16 status, ackstat;
2170 int link_changed = 0; /* avoid bogus "uninit" warning */ 2150 int link_changed = 0; /* avoid bogus "uninit" warning */
2171 int handled = 0; 2151 int handled = 0;
@@ -2241,7 +2221,7 @@ static void rtl8139_poll_controller(struct net_device *dev)
2241static int rtl8139_close (struct net_device *dev) 2221static int rtl8139_close (struct net_device *dev)
2242{ 2222{
2243 struct rtl8139_private *tp = netdev_priv(dev); 2223 struct rtl8139_private *tp = netdev_priv(dev);
2244 void *ioaddr = tp->mmio_addr; 2224 void __iomem *ioaddr = tp->mmio_addr;
2245 int ret = 0; 2225 int ret = 0;
2246 unsigned long flags; 2226 unsigned long flags;
2247 2227
@@ -2304,7 +2284,7 @@ static int rtl8139_close (struct net_device *dev)
2304static void rtl8139_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2284static void rtl8139_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2305{ 2285{
2306 struct rtl8139_private *np = netdev_priv(dev); 2286 struct rtl8139_private *np = netdev_priv(dev);
2307 void *ioaddr = np->mmio_addr; 2287 void __iomem *ioaddr = np->mmio_addr;
2308 2288
2309 spin_lock_irq(&np->lock); 2289 spin_lock_irq(&np->lock);
2310 if (rtl_chip_info[np->chipset].flags & HasLWake) { 2290 if (rtl_chip_info[np->chipset].flags & HasLWake) {
@@ -2338,7 +2318,7 @@ static void rtl8139_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2338static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2318static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2339{ 2319{
2340 struct rtl8139_private *np = netdev_priv(dev); 2320 struct rtl8139_private *np = netdev_priv(dev);
2341 void *ioaddr = np->mmio_addr; 2321 void __iomem *ioaddr = np->mmio_addr;
2342 u32 support; 2322 u32 support;
2343 u8 cfg3, cfg5; 2323 u8 cfg3, cfg5;
2344 2324
@@ -2506,7 +2486,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2506static struct net_device_stats *rtl8139_get_stats (struct net_device *dev) 2486static struct net_device_stats *rtl8139_get_stats (struct net_device *dev)
2507{ 2487{
2508 struct rtl8139_private *tp = netdev_priv(dev); 2488 struct rtl8139_private *tp = netdev_priv(dev);
2509 void *ioaddr = tp->mmio_addr; 2489 void __iomem *ioaddr = tp->mmio_addr;
2510 unsigned long flags; 2490 unsigned long flags;
2511 2491
2512 if (netif_running(dev)) { 2492 if (netif_running(dev)) {
@@ -2525,7 +2505,7 @@ static struct net_device_stats *rtl8139_get_stats (struct net_device *dev)
2525static void __set_rx_mode (struct net_device *dev) 2505static void __set_rx_mode (struct net_device *dev)
2526{ 2506{
2527 struct rtl8139_private *tp = netdev_priv(dev); 2507 struct rtl8139_private *tp = netdev_priv(dev);
2528 void *ioaddr = tp->mmio_addr; 2508 void __iomem *ioaddr = tp->mmio_addr;
2529 u32 mc_filter[2]; /* Multicast hash filter */ 2509 u32 mc_filter[2]; /* Multicast hash filter */
2530 int i, rx_mode; 2510 int i, rx_mode;
2531 u32 tmp; 2511 u32 tmp;
@@ -2586,7 +2566,7 @@ static int rtl8139_suspend (struct pci_dev *pdev, pm_message_t state)
2586{ 2566{
2587 struct net_device *dev = pci_get_drvdata (pdev); 2567 struct net_device *dev = pci_get_drvdata (pdev);
2588 struct rtl8139_private *tp = netdev_priv(dev); 2568 struct rtl8139_private *tp = netdev_priv(dev);
2589 void *ioaddr = tp->mmio_addr; 2569 void __iomem *ioaddr = tp->mmio_addr;
2590 unsigned long flags; 2570 unsigned long flags;
2591 2571
2592 pci_save_state (pdev); 2572 pci_save_state (pdev);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f08e01b2fd19..fa9f76c953dd 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -824,6 +824,18 @@ config SMC9194
824 <file:Documentation/networking/net-modules.txt>. The module 824 <file:Documentation/networking/net-modules.txt>. The module
825 will be called smc9194. 825 will be called smc9194.
826 826
827config DM9000
828 tristate "DM9000 support"
829 depends on ARM && NET_ETHERNET
830 select CRC32
831 select MII
832 ---help---
833 Support for DM9000 chipset.
834
835 To compile this driver as a module, choose M here and read
836 <file:Documentation/networking/net-modules.txt>. The module will be
837 called dm9000.
838
827config NET_VENDOR_RACAL 839config NET_VENDOR_RACAL
828 bool "Racal-Interlan (Micom) NI cards" 840 bool "Racal-Interlan (Micom) NI cards"
829 depends on NET_ETHERNET && ISA 841 depends on NET_ETHERNET && ISA
@@ -989,21 +1001,6 @@ config EEXPRESS_PRO
989 <file:Documentation/networking/net-modules.txt>. The module 1001 <file:Documentation/networking/net-modules.txt>. The module
990 will be called eepro. 1002 will be called eepro.
991 1003
992config FMV18X
993 tristate "FMV-181/182/183/184 support (OBSOLETE)"
994 depends on NET_ISA && OBSOLETE
995 ---help---
996 If you have a Fujitsu FMV-181/182/183/184 network (Ethernet) card,
997 say Y and read the Ethernet-HOWTO, available from
998 <http://www.tldp.org/docs.html#howto>.
999
1000 If you use an FMV-183 or FMV-184 and it is not working, you may need
1001 to disable Plug & Play mode of the card.
1002
1003 To compile this driver as a module, choose M here and read
1004 <file:Documentation/networking/net-modules.txt>. The module
1005 will be called fmv18x.
1006
1007config HPLAN_PLUS 1004config HPLAN_PLUS
1008 tristate "HP PCLAN+ (27247B and 27252A) support" 1005 tristate "HP PCLAN+ (27247B and 27252A) support"
1009 depends on NET_ISA 1006 depends on NET_ISA
@@ -1092,14 +1089,6 @@ config SEEQ8005
1092 <file:Documentation/networking/net-modules.txt>. The module 1089 <file:Documentation/networking/net-modules.txt>. The module
1093 will be called seeq8005. 1090 will be called seeq8005.
1094 1091
1095config SK_G16
1096 tristate "SK_G16 support (OBSOLETE)"
1097 depends on NET_ISA && OBSOLETE
1098 help
1099 If you have a network (Ethernet) card of this type, say Y and read
1100 the Ethernet-HOWTO, available from
1101 <http://www.tldp.org/docs.html#howto>.
1102
1103config SKMC 1092config SKMC
1104 tristate "SKnet MCA support" 1093 tristate "SKnet MCA support"
1105 depends on NET_ETHERNET && MCA && BROKEN 1094 depends on NET_ETHERNET && MCA && BROKEN
@@ -1932,6 +1921,18 @@ config R8169_VLAN
1932 1921
1933 If in doubt, say Y. 1922 If in doubt, say Y.
1934 1923
1924config SKGE
1925 tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)"
1926 depends on PCI && EXPERIMENTAL
1927 select CRC32
1928 ---help---
1929 This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx
1930 and related Gigabit Ethernet adapters. It is a new smaller driver
1931 driver with better performance and more complete ethtool support.
1932
1933 It does not support the link failover and network management
1934 features that "portable" vendor supplied sk98lin driver does.
1935
1935config SK98LIN 1936config SK98LIN
1936 tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support" 1937 tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support"
1937 depends on PCI 1938 depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 30c7567001fe..63c6d1e6d4d9 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_FEALNX) += fealnx.o
53obj-$(CONFIG_TIGON3) += tg3.o 53obj-$(CONFIG_TIGON3) += tg3.o
54obj-$(CONFIG_BNX2) += bnx2.o 54obj-$(CONFIG_BNX2) += bnx2.o
55obj-$(CONFIG_TC35815) += tc35815.o 55obj-$(CONFIG_TC35815) += tc35815.o
56obj-$(CONFIG_SKGE) += skge.o
56obj-$(CONFIG_SK98LIN) += sk98lin/ 57obj-$(CONFIG_SK98LIN) += sk98lin/
57obj-$(CONFIG_SKFP) += skfp/ 58obj-$(CONFIG_SKFP) += skfp/
58obj-$(CONFIG_VIA_RHINE) += via-rhine.o 59obj-$(CONFIG_VIA_RHINE) += via-rhine.o
@@ -74,7 +75,6 @@ obj-$(CONFIG_MAC8390) += mac8390.o 8390.o
74obj-$(CONFIG_APNE) += apne.o 8390.o 75obj-$(CONFIG_APNE) += apne.o 8390.o
75obj-$(CONFIG_PCMCIA_PCNET) += 8390.o 76obj-$(CONFIG_PCMCIA_PCNET) += 8390.o
76obj-$(CONFIG_SHAPER) += shaper.o 77obj-$(CONFIG_SHAPER) += shaper.o
77obj-$(CONFIG_SK_G16) += sk_g16.o
78obj-$(CONFIG_HP100) += hp100.o 78obj-$(CONFIG_HP100) += hp100.o
79obj-$(CONFIG_SMC9194) += smc9194.o 79obj-$(CONFIG_SMC9194) += smc9194.o
80obj-$(CONFIG_FEC) += fec.o 80obj-$(CONFIG_FEC) += fec.o
@@ -122,7 +122,6 @@ obj-$(CONFIG_DEFXX) += defxx.o
122obj-$(CONFIG_SGISEEQ) += sgiseeq.o 122obj-$(CONFIG_SGISEEQ) += sgiseeq.o
123obj-$(CONFIG_SGI_O2MACE_ETH) += meth.o 123obj-$(CONFIG_SGI_O2MACE_ETH) += meth.o
124obj-$(CONFIG_AT1700) += at1700.o 124obj-$(CONFIG_AT1700) += at1700.o
125obj-$(CONFIG_FMV18X) += fmv18x.o
126obj-$(CONFIG_EL1) += 3c501.o 125obj-$(CONFIG_EL1) += 3c501.o
127obj-$(CONFIG_EL16) += 3c507.o 126obj-$(CONFIG_EL16) += 3c507.o
128obj-$(CONFIG_ELMC) += 3c523.o 127obj-$(CONFIG_ELMC) += 3c523.o
@@ -180,6 +179,7 @@ obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
180obj-$(CONFIG_IBMVETH) += ibmveth.o 179obj-$(CONFIG_IBMVETH) += ibmveth.o
181obj-$(CONFIG_S2IO) += s2io.o 180obj-$(CONFIG_S2IO) += s2io.o
182obj-$(CONFIG_SMC91X) += smc91x.o 181obj-$(CONFIG_SMC91X) += smc91x.o
182obj-$(CONFIG_DM9000) += dm9000.o
183obj-$(CONFIG_FEC_8XX) += fec_8xx/ 183obj-$(CONFIG_FEC_8XX) += fec_8xx/
184 184
185obj-$(CONFIG_ARM) += arm/ 185obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index fb433325aa27..3707df6b0cfa 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -210,9 +210,6 @@ static struct devprobe2 isa_probes[] __initdata = {
210#ifdef CONFIG_AT1700 210#ifdef CONFIG_AT1700
211 {at1700_probe, 0}, 211 {at1700_probe, 0},
212#endif 212#endif
213#ifdef CONFIG_FMV18X /* Fujitsu FMV-181/182 */
214 {fmv18x_probe, 0},
215#endif
216#ifdef CONFIG_ETH16I 213#ifdef CONFIG_ETH16I
217 {eth16i_probe, 0}, /* ICL EtherTeam 16i/32 */ 214 {eth16i_probe, 0}, /* ICL EtherTeam 16i/32 */
218#endif 215#endif
@@ -243,9 +240,6 @@ static struct devprobe2 isa_probes[] __initdata = {
243#ifdef CONFIG_ELPLUS /* 3c505 */ 240#ifdef CONFIG_ELPLUS /* 3c505 */
244 {elplus_probe, 0}, 241 {elplus_probe, 0},
245#endif 242#endif
246#ifdef CONFIG_SK_G16
247 {SK_init, 0},
248#endif
249#ifdef CONFIG_NI5010 243#ifdef CONFIG_NI5010
250 {ni5010_probe, 0}, 244 {ni5010_probe, 0},
251#endif 245#endif
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index 942a2819576c..2e28c201dcc0 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -68,6 +68,7 @@ struct etherh_priv {
68 void __iomem *dma_base; 68 void __iomem *dma_base;
69 unsigned int id; 69 unsigned int id;
70 void __iomem *ctrl_port; 70 void __iomem *ctrl_port;
71 void __iomem *base;
71 unsigned char ctrl; 72 unsigned char ctrl;
72 u32 supported; 73 u32 supported;
73}; 74};
@@ -177,7 +178,7 @@ etherh_setif(struct net_device *dev)
177 switch (etherh_priv(dev)->id) { 178 switch (etherh_priv(dev)->id) {
178 case PROD_I3_ETHERLAN600: 179 case PROD_I3_ETHERLAN600:
179 case PROD_I3_ETHERLAN600A: 180 case PROD_I3_ETHERLAN600A:
180 addr = (void *)dev->base_addr + EN0_RCNTHI; 181 addr = etherh_priv(dev)->base + EN0_RCNTHI;
181 182
182 switch (dev->if_port) { 183 switch (dev->if_port) {
183 case IF_PORT_10BASE2: 184 case IF_PORT_10BASE2:
@@ -218,7 +219,7 @@ etherh_getifstat(struct net_device *dev)
218 switch (etherh_priv(dev)->id) { 219 switch (etherh_priv(dev)->id) {
219 case PROD_I3_ETHERLAN600: 220 case PROD_I3_ETHERLAN600:
220 case PROD_I3_ETHERLAN600A: 221 case PROD_I3_ETHERLAN600A:
221 addr = (void *)dev->base_addr + EN0_RCNTHI; 222 addr = etherh_priv(dev)->base + EN0_RCNTHI;
222 switch (dev->if_port) { 223 switch (dev->if_port) {
223 case IF_PORT_10BASE2: 224 case IF_PORT_10BASE2:
224 stat = 1; 225 stat = 1;
@@ -281,7 +282,7 @@ static void
281etherh_reset(struct net_device *dev) 282etherh_reset(struct net_device *dev)
282{ 283{
283 struct ei_device *ei_local = netdev_priv(dev); 284 struct ei_device *ei_local = netdev_priv(dev);
284 void __iomem *addr = (void *)dev->base_addr; 285 void __iomem *addr = etherh_priv(dev)->base;
285 286
286 writeb(E8390_NODMA+E8390_PAGE0+E8390_STOP, addr); 287 writeb(E8390_NODMA+E8390_PAGE0+E8390_STOP, addr);
287 288
@@ -327,7 +328,7 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf
327 328
328 ei_local->dmaing = 1; 329 ei_local->dmaing = 1;
329 330
330 addr = (void *)dev->base_addr; 331 addr = etherh_priv(dev)->base;
331 dma_base = etherh_priv(dev)->dma_base; 332 dma_base = etherh_priv(dev)->dma_base;
332 333
333 count = (count + 1) & ~1; 334 count = (count + 1) & ~1;
@@ -387,7 +388,7 @@ etherh_block_input (struct net_device *dev, int count, struct sk_buff *skb, int
387 388
388 ei_local->dmaing = 1; 389 ei_local->dmaing = 1;
389 390
390 addr = (void *)dev->base_addr; 391 addr = etherh_priv(dev)->base;
391 dma_base = etherh_priv(dev)->dma_base; 392 dma_base = etherh_priv(dev)->dma_base;
392 393
393 buf = skb->data; 394 buf = skb->data;
@@ -427,7 +428,7 @@ etherh_get_header (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_p
427 428
428 ei_local->dmaing = 1; 429 ei_local->dmaing = 1;
429 430
430 addr = (void *)dev->base_addr; 431 addr = etherh_priv(dev)->base;
431 dma_base = etherh_priv(dev)->dma_base; 432 dma_base = etherh_priv(dev)->dma_base;
432 433
433 writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD); 434 writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD);
@@ -696,7 +697,8 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
696 eh->ctrl_port = eh->ioc_fast; 697 eh->ctrl_port = eh->ioc_fast;
697 } 698 }
698 699
699 dev->base_addr = (unsigned long)eh->memc + data->ns8390_offset; 700 eh->base = eh->memc + data->ns8390_offset;
701 dev->base_addr = (unsigned long)eh->base;
700 eh->dma_base = eh->memc + data->dataport_offset; 702 eh->dma_base = eh->memc + data->dataport_offset;
701 eh->ctrl_port += data->ctrlport_offset; 703 eh->ctrl_port += data->ctrlport_offset;
702 704
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 5a2efd343db4..c82b9cd1c924 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -1681,10 +1681,6 @@ static int au1000_init(struct net_device *dev)
1681 control |= MAC_FULL_DUPLEX; 1681 control |= MAC_FULL_DUPLEX;
1682 } 1682 }
1683 1683
1684 /* fix for startup without cable */
1685 if (!link)
1686 dev->flags &= ~IFF_RUNNING;
1687
1688 aup->mac->control = control; 1684 aup->mac->control = control;
1689 aup->mac->vlan1_tag = 0x8100; /* activate vlan support */ 1685 aup->mac->vlan1_tag = 0x8100; /* activate vlan support */
1690 au_sync(); 1686 au_sync();
@@ -1709,16 +1705,14 @@ static void au1000_timer(unsigned long data)
1709 if_port = dev->if_port; 1705 if_port = dev->if_port;
1710 if (aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed) == 0) { 1706 if (aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed) == 0) {
1711 if (link) { 1707 if (link) {
1712 if (!(dev->flags & IFF_RUNNING)) { 1708 if (!netif_carrier_ok(dev)) {
1713 netif_carrier_on(dev); 1709 netif_carrier_on(dev);
1714 dev->flags |= IFF_RUNNING;
1715 printk(KERN_INFO "%s: link up\n", dev->name); 1710 printk(KERN_INFO "%s: link up\n", dev->name);
1716 } 1711 }
1717 } 1712 }
1718 else { 1713 else {
1719 if (dev->flags & IFF_RUNNING) { 1714 if (netif_carrier_ok(dev)) {
1720 netif_carrier_off(dev); 1715 netif_carrier_off(dev);
1721 dev->flags &= ~IFF_RUNNING;
1722 dev->if_port = 0; 1716 dev->if_port = 0;
1723 printk(KERN_INFO "%s: link down\n", dev->name); 1717 printk(KERN_INFO "%s: link down\n", dev->name);
1724 } 1718 }
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 734bd4ee3f9b..00e5257b176f 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -1412,7 +1412,6 @@ static int bmac_open(struct net_device *dev)
1412 bp->opened = 1; 1412 bp->opened = 1;
1413 bmac_reset_and_enable(dev); 1413 bmac_reset_and_enable(dev);
1414 enable_irq(dev->irq); 1414 enable_irq(dev->irq);
1415 dev->flags |= IFF_RUNNING;
1416 return 0; 1415 return 0;
1417} 1416}
1418 1417
@@ -1425,7 +1424,6 @@ static int bmac_close(struct net_device *dev)
1425 int i; 1424 int i;
1426 1425
1427 bp->sleeping = 1; 1426 bp->sleeping = 1;
1428 dev->flags &= ~(IFF_UP | IFF_RUNNING);
1429 1427
1430 /* disable rx and tx */ 1428 /* disable rx and tx */
1431 config = bmread(dev, RXCFG); 1429 config = bmread(dev, RXCFG);
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
new file mode 100644
index 000000000000..f4ba0ffb8637
--- /dev/null
+++ b/drivers/net/dm9000.c
@@ -0,0 +1,1219 @@
1/*
2 * dm9000.c: Version 1.2 03/18/2003
3 *
4 * A Davicom DM9000 ISA NIC fast Ethernet driver for Linux.
5 * Copyright (C) 1997 Sten Wang
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
18 *
19 * V0.11 06/20/2001 REG_0A bit3=1, default enable BP with DA match
20 * 06/22/2001 Support DM9801 progrmming
21 * E3: R25 = ((R24 + NF) & 0x00ff) | 0xf000
22 * E4: R25 = ((R24 + NF) & 0x00ff) | 0xc200
23 * R17 = (R17 & 0xfff0) | NF + 3
24 * E5: R25 = ((R24 + NF - 3) & 0x00ff) | 0xc200
25 * R17 = (R17 & 0xfff0) | NF
26 *
27 * v1.00 modify by simon 2001.9.5
28 * change for kernel 2.4.x
29 *
30 * v1.1 11/09/2001 fix force mode bug
31 *
32 * v1.2 03/18/2003 Weilun Huang <weilun_huang@davicom.com.tw>:
33 * Fixed phy reset.
34 * Added tx/rx 32 bit mode.
35 * Cleaned up for kernel merge.
36 *
37 * 03/03/2004 Sascha Hauer <s.hauer@pengutronix.de>
38 * Port to 2.6 kernel
39 *
40 * 24-Sep-2004 Ben Dooks <ben@simtec.co.uk>
41 * Cleanup of code to remove ifdefs
42 * Allowed platform device data to influence access width
43 * Reformatting areas of code
44 *
45 * 17-Mar-2005 Sascha Hauer <s.hauer@pengutronix.de>
46 * * removed 2.4 style module parameters
47 * * removed removed unused stat counter and fixed
48 * net_device_stats
49 * * introduced tx_timeout function
50 * * reworked locking
51 */
52
53#include <linux/module.h>
54#include <linux/ioport.h>
55#include <linux/netdevice.h>
56#include <linux/etherdevice.h>
57#include <linux/init.h>
58#include <linux/skbuff.h>
59#include <linux/version.h>
60#include <linux/spinlock.h>
61#include <linux/crc32.h>
62#include <linux/mii.h>
63#include <linux/dm9000.h>
64#include <linux/delay.h>
65
66#include <asm/delay.h>
67#include <asm/irq.h>
68#include <asm/io.h>
69
70#include "dm9000.h"
71
72/* Board/System/Debug information/definition ---------------- */
73
74#define DM9000_PHY 0x40 /* PHY address 0x01 */
75
76#define TRUE 1
77#define FALSE 0
78
79#define CARDNAME "dm9000"
80#define PFX CARDNAME ": "
81
82#define DM9000_TIMER_WUT jiffies+(HZ*2) /* timer wakeup time : 2 second */
83
84#define DM9000_DEBUG 0
85
86#if DM9000_DEBUG > 2
87#define PRINTK3(args...) printk(CARDNAME ": " args)
88#else
89#define PRINTK3(args...) do { } while(0)
90#endif
91
92#if DM9000_DEBUG > 1
93#define PRINTK2(args...) printk(CARDNAME ": " args)
94#else
95#define PRINTK2(args...) do { } while(0)
96#endif
97
98#if DM9000_DEBUG > 0
99#define PRINTK1(args...) printk(CARDNAME ": " args)
100#define PRINTK(args...) printk(CARDNAME ": " args)
101#else
102#define PRINTK1(args...) do { } while(0)
103#define PRINTK(args...) printk(KERN_DEBUG args)
104#endif
105
106/*
107 * Transmit timeout, default 5 seconds.
108 */
109static int watchdog = 5000;
110module_param(watchdog, int, 0400);
111MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
112
113/* Structure/enum declaration ------------------------------- */
114typedef struct board_info {
115
116 void __iomem *io_addr; /* Register I/O base address */
117 void __iomem *io_data; /* Data I/O address */
118 u16 irq; /* IRQ */
119
120 u16 tx_pkt_cnt;
121 u16 queue_pkt_len;
122 u16 queue_start_addr;
123 u16 dbug_cnt;
124 u8 io_mode; /* 0:word, 2:byte */
125 u8 phy_addr;
126
127 void (*inblk)(void __iomem *port, void *data, int length);
128 void (*outblk)(void __iomem *port, void *data, int length);
129 void (*dumpblk)(void __iomem *port, int length);
130
131 struct resource *addr_res; /* resources found */
132 struct resource *data_res;
133 struct resource *addr_req; /* resources requested */
134 struct resource *data_req;
135 struct resource *irq_res;
136
137 struct timer_list timer;
138 struct net_device_stats stats;
139 unsigned char srom[128];
140 spinlock_t lock;
141
142 struct mii_if_info mii;
143 u32 msg_enable;
144} board_info_t;
145
146/* function declaration ------------------------------------- */
147static int dm9000_probe(struct device *);
148static int dm9000_open(struct net_device *);
149static int dm9000_start_xmit(struct sk_buff *, struct net_device *);
150static int dm9000_stop(struct net_device *);
151static int dm9000_do_ioctl(struct net_device *, struct ifreq *, int);
152
153
154static void dm9000_timer(unsigned long);
155static void dm9000_init_dm9000(struct net_device *);
156
157static struct net_device_stats *dm9000_get_stats(struct net_device *);
158
159static irqreturn_t dm9000_interrupt(int, void *, struct pt_regs *);
160
161static int dm9000_phy_read(struct net_device *dev, int phyaddr_unsused, int reg);
162static void dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg,
163 int value);
164static u16 read_srom_word(board_info_t *, int);
165static void dm9000_rx(struct net_device *);
166static void dm9000_hash_table(struct net_device *);
167
168//#define DM9000_PROGRAM_EEPROM
169#ifdef DM9000_PROGRAM_EEPROM
170static void program_eeprom(board_info_t * db);
171#endif
172/* DM9000 network board routine ---------------------------- */
173
174static void
175dm9000_reset(board_info_t * db)
176{
177 PRINTK1("dm9000x: resetting\n");
178 /* RESET device */
179 writeb(DM9000_NCR, db->io_addr);
180 udelay(200);
181 writeb(NCR_RST, db->io_data);
182 udelay(200);
183}
184
185/*
186 * Read a byte from I/O port
187 */
188static u8
189ior(board_info_t * db, int reg)
190{
191 writeb(reg, db->io_addr);
192 return readb(db->io_data);
193}
194
195/*
196 * Write a byte to I/O port
197 */
198
199static void
200iow(board_info_t * db, int reg, int value)
201{
202 writeb(reg, db->io_addr);
203 writeb(value, db->io_data);
204}
205
206/* routines for sending block to chip */
207
208static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
209{
210 writesb(reg, data, count);
211}
212
213static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
214{
215 writesw(reg, data, (count+1) >> 1);
216}
217
218static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
219{
220 writesl(reg, data, (count+3) >> 2);
221}
222
223/* input block from chip to memory */
224
225static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
226{
227 readsb(reg, data, count+1);
228}
229
230
231static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
232{
233 readsw(reg, data, (count+1) >> 1);
234}
235
236static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
237{
238 readsl(reg, data, (count+3) >> 2);
239}
240
241/* dump block from chip to null */
242
243static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
244{
245 int i;
246 int tmp;
247
248 for (i = 0; i < count; i++)
249 tmp = readb(reg);
250}
251
252static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
253{
254 int i;
255 int tmp;
256
257 count = (count + 1) >> 1;
258
259 for (i = 0; i < count; i++)
260 tmp = readw(reg);
261}
262
263static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
264{
265 int i;
266 int tmp;
267
268 count = (count + 3) >> 2;
269
270 for (i = 0; i < count; i++)
271 tmp = readl(reg);
272}
273
274/* dm9000_set_io
275 *
276 * select the specified set of io routines to use with the
277 * device
278 */
279
280static void dm9000_set_io(struct board_info *db, int byte_width)
281{
282 /* use the size of the data resource to work out what IO
283 * routines we want to use
284 */
285
286 switch (byte_width) {
287 case 1:
288 db->dumpblk = dm9000_dumpblk_8bit;
289 db->outblk = dm9000_outblk_8bit;
290 db->inblk = dm9000_inblk_8bit;
291 break;
292
293 case 2:
294 db->dumpblk = dm9000_dumpblk_16bit;
295 db->outblk = dm9000_outblk_16bit;
296 db->inblk = dm9000_inblk_16bit;
297 break;
298
299 case 3:
300 printk(KERN_ERR PFX ": 3 byte IO, falling back to 16bit\n");
301 db->dumpblk = dm9000_dumpblk_16bit;
302 db->outblk = dm9000_outblk_16bit;
303 db->inblk = dm9000_inblk_16bit;
304 break;
305
306 case 4:
307 default:
308 db->dumpblk = dm9000_dumpblk_32bit;
309 db->outblk = dm9000_outblk_32bit;
310 db->inblk = dm9000_inblk_32bit;
311 break;
312 }
313}
314
315
316/* Our watchdog timed out. Called by the networking layer */
317static void dm9000_timeout(struct net_device *dev)
318{
319 board_info_t *db = (board_info_t *) dev->priv;
320 u8 reg_save;
321 unsigned long flags;
322
323 /* Save previous register address */
324 reg_save = readb(db->io_addr);
325 spin_lock_irqsave(db->lock,flags);
326
327 netif_stop_queue(dev);
328 dm9000_reset(db);
329 dm9000_init_dm9000(dev);
330 /* We can accept TX packets again */
331 dev->trans_start = jiffies;
332 netif_wake_queue(dev);
333
334 /* Restore previous register address */
335 writeb(reg_save, db->io_addr);
336 spin_unlock_irqrestore(db->lock,flags);
337}
338
339
340/* dm9000_release_board
341 *
342 * release a board, and any mapped resources
343 */
344
345static void
346dm9000_release_board(struct platform_device *pdev, struct board_info *db)
347{
348 if (db->data_res == NULL) {
349 if (db->addr_res != NULL)
350 release_mem_region((unsigned long)db->io_addr, 4);
351 return;
352 }
353
354 /* unmap our resources */
355
356 iounmap(db->io_addr);
357 iounmap(db->io_data);
358
359 /* release the resources */
360
361 if (db->data_req != NULL) {
362 release_resource(db->data_req);
363 kfree(db->data_req);
364 }
365
366 if (db->addr_res != NULL) {
367 release_resource(db->data_req);
368 kfree(db->addr_req);
369 }
370}
371
372#define res_size(_r) (((_r)->end - (_r)->start) + 1)
373
374/*
375 * Search DM9000 board, allocate space and register it
376 */
377static int
378dm9000_probe(struct device *dev)
379{
380 struct platform_device *pdev = to_platform_device(dev);
381 struct dm9000_plat_data *pdata = pdev->dev.platform_data;
382 struct board_info *db; /* Point a board information structure */
383 struct net_device *ndev;
384 unsigned long base;
385 int ret = 0;
386 int iosize;
387 int i;
388 u32 id_val;
389
390 printk(KERN_INFO "%s Ethernet Driver\n", CARDNAME);
391
392 /* Init network device */
393 ndev = alloc_etherdev(sizeof (struct board_info));
394 if (!ndev) {
395 printk("%s: could not allocate device.\n", CARDNAME);
396 return -ENOMEM;
397 }
398
399 SET_MODULE_OWNER(ndev);
400 SET_NETDEV_DEV(ndev, dev);
401
402 PRINTK2("dm9000_probe()");
403
404 /* setup board info structure */
405 db = (struct board_info *) ndev->priv;
406 memset(db, 0, sizeof (*db));
407
408 if (pdev->num_resources < 2) {
409 ret = -ENODEV;
410 goto out;
411 }
412
413 switch (pdev->num_resources) {
414 case 2:
415 base = pdev->resource[0].start;
416
417 if (!request_mem_region(base, 4, ndev->name)) {
418 ret = -EBUSY;
419 goto out;
420 }
421
422 ndev->base_addr = base;
423 ndev->irq = pdev->resource[1].start;
424 db->io_addr = (void *)base;
425 db->io_data = (void *)(base + 4);
426
427 break;
428
429 case 3:
430 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
431 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
432 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
433
434 if (db->addr_res == NULL || db->data_res == NULL) {
435 printk(KERN_ERR PFX "insufficient resources\n");
436 ret = -ENOENT;
437 goto out;
438 }
439
440 i = res_size(db->addr_res);
441 db->addr_req = request_mem_region(db->addr_res->start, i,
442 pdev->name);
443
444 if (db->addr_req == NULL) {
445 printk(KERN_ERR PFX "cannot claim address reg area\n");
446 ret = -EIO;
447 goto out;
448 }
449
450 db->io_addr = ioremap(db->addr_res->start, i);
451
452 if (db->io_addr == NULL) {
453 printk(KERN_ERR "failed to ioremap address reg\n");
454 ret = -EINVAL;
455 goto out;
456 }
457
458 iosize = res_size(db->data_res);
459 db->data_req = request_mem_region(db->data_res->start, iosize,
460 pdev->name);
461
462 if (db->data_req == NULL) {
463 printk(KERN_ERR PFX "cannot claim data reg area\n");
464 ret = -EIO;
465 goto out;
466 }
467
468 db->io_data = ioremap(db->data_res->start, iosize);
469
470 if (db->io_data == NULL) {
471 printk(KERN_ERR "failed to ioremap data reg\n");
472 ret = -EINVAL;
473 goto out;
474 }
475
476 /* fill in parameters for net-dev structure */
477
478 ndev->base_addr = (unsigned long)db->io_addr;
479 ndev->irq = db->irq_res->start;
480
481 /* ensure at least we have a default set of IO routines */
482 dm9000_set_io(db, iosize);
483
484 }
485
486 /* check to see if anything is being over-ridden */
487 if (pdata != NULL) {
488 /* check to see if the driver wants to over-ride the
489 * default IO width */
490
491 if (pdata->flags & DM9000_PLATF_8BITONLY)
492 dm9000_set_io(db, 1);
493
494 if (pdata->flags & DM9000_PLATF_16BITONLY)
495 dm9000_set_io(db, 2);
496
497 if (pdata->flags & DM9000_PLATF_32BITONLY)
498 dm9000_set_io(db, 4);
499
500 /* check to see if there are any IO routine
501 * over-rides */
502
503 if (pdata->inblk != NULL)
504 db->inblk = pdata->inblk;
505
506 if (pdata->outblk != NULL)
507 db->outblk = pdata->outblk;
508
509 if (pdata->dumpblk != NULL)
510 db->dumpblk = pdata->dumpblk;
511 }
512
513 dm9000_reset(db);
514
515 /* try two times, DM9000 sometimes gets the first read wrong */
516 for (i = 0; i < 2; i++) {
517 id_val = ior(db, DM9000_VIDL);
518 id_val |= (u32)ior(db, DM9000_VIDH) << 8;
519 id_val |= (u32)ior(db, DM9000_PIDL) << 16;
520 id_val |= (u32)ior(db, DM9000_PIDH) << 24;
521
522 if (id_val == DM9000_ID)
523 break;
524 printk("%s: read wrong id 0x%08x\n", CARDNAME, id_val);
525 }
526
527 if (id_val != DM9000_ID) {
528 printk("%s: wrong id: 0x%08x\n", CARDNAME, id_val);
529 goto release;
530 }
531
532 /* from this point we assume that we have found a DM9000 */
533
534 /* driver system function */
535 ether_setup(ndev);
536
537 ndev->open = &dm9000_open;
538 ndev->hard_start_xmit = &dm9000_start_xmit;
539 ndev->tx_timeout = &dm9000_timeout;
540 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
541 ndev->stop = &dm9000_stop;
542 ndev->get_stats = &dm9000_get_stats;
543 ndev->set_multicast_list = &dm9000_hash_table;
544 ndev->do_ioctl = &dm9000_do_ioctl;
545
546#ifdef DM9000_PROGRAM_EEPROM
547 program_eeprom(db);
548#endif
549 db->msg_enable = NETIF_MSG_LINK;
550 db->mii.phy_id_mask = 0x1f;
551 db->mii.reg_num_mask = 0x1f;
552 db->mii.force_media = 0;
553 db->mii.full_duplex = 0;
554 db->mii.dev = ndev;
555 db->mii.mdio_read = dm9000_phy_read;
556 db->mii.mdio_write = dm9000_phy_write;
557
558 /* Read SROM content */
559 for (i = 0; i < 64; i++)
560 ((u16 *) db->srom)[i] = read_srom_word(db, i);
561
562 /* Set Node Address */
563 for (i = 0; i < 6; i++)
564 ndev->dev_addr[i] = db->srom[i];
565
566 if (!is_valid_ether_addr(ndev->dev_addr))
567 printk("%s: Invalid ethernet MAC address. Please "
568 "set using ifconfig\n", ndev->name);
569
570 dev_set_drvdata(dev, ndev);
571 ret = register_netdev(ndev);
572
573 if (ret == 0) {
574 printk("%s: dm9000 at %p,%p IRQ %d MAC: ",
575 ndev->name, db->io_addr, db->io_data, ndev->irq);
576 for (i = 0; i < 5; i++)
577 printk("%02x:", ndev->dev_addr[i]);
578 printk("%02x\n", ndev->dev_addr[5]);
579 }
580 return 0;
581
582 release:
583 out:
584 printk("%s: not found (%d).\n", CARDNAME, ret);
585
586 dm9000_release_board(pdev, db);
587 kfree(ndev);
588
589 return ret;
590}
591
592/*
593 * Open the interface.
594 * The interface is opened whenever "ifconfig" actives it.
595 */
596static int
597dm9000_open(struct net_device *dev)
598{
599 board_info_t *db = (board_info_t *) dev->priv;
600
601 PRINTK2("entering dm9000_open\n");
602
603 if (request_irq(dev->irq, &dm9000_interrupt, SA_SHIRQ, dev->name, dev))
604 return -EAGAIN;
605
606 /* Initialize DM9000 board */
607 dm9000_reset(db);
608 dm9000_init_dm9000(dev);
609
610 /* Init driver variable */
611 db->dbug_cnt = 0;
612
613 /* set and active a timer process */
614 init_timer(&db->timer);
615 db->timer.expires = DM9000_TIMER_WUT * 2;
616 db->timer.data = (unsigned long) dev;
617 db->timer.function = &dm9000_timer;
618 add_timer(&db->timer);
619
620 mii_check_media(&db->mii, netif_msg_link(db), 1);
621 netif_start_queue(dev);
622
623 return 0;
624}
625
626/*
627 * Initilize dm9000 board
628 */
629static void
630dm9000_init_dm9000(struct net_device *dev)
631{
632 board_info_t *db = (board_info_t *) dev->priv;
633
634 PRINTK1("entering %s\n",__FUNCTION__);
635
636 /* I/O mode */
637 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
638
639 /* GPIO0 on pre-activate PHY */
640 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
641 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
642 iow(db, DM9000_GPR, 0); /* Enable PHY */
643
644 /* Program operating register */
645 iow(db, DM9000_TCR, 0); /* TX Polling clear */
646 iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */
647 iow(db, DM9000_FCR, 0xff); /* Flow Control */
648 iow(db, DM9000_SMCR, 0); /* Special Mode */
649 /* clear TX status */
650 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
651 iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
652
653 /* Set address filter table */
654 dm9000_hash_table(dev);
655
656 /* Activate DM9000 */
657 iow(db, DM9000_RCR, RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN);
658 /* Enable TX/RX interrupt mask */
659 iow(db, DM9000_IMR, IMR_PAR | IMR_PTM | IMR_PRM);
660
661 /* Init Driver variable */
662 db->tx_pkt_cnt = 0;
663 db->queue_pkt_len = 0;
664 dev->trans_start = 0;
665 spin_lock_init(&db->lock);
666}
667
668/*
669 * Hardware start transmission.
670 * Send a packet to media from the upper layer.
671 */
672static int
673dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
674{
675 board_info_t *db = (board_info_t *) dev->priv;
676
677 PRINTK3("dm9000_start_xmit\n");
678
679 if (db->tx_pkt_cnt > 1)
680 return 1;
681
682 netif_stop_queue(dev);
683
684 /* Disable all interrupts */
685 iow(db, DM9000_IMR, IMR_PAR);
686
687 /* Move data to DM9000 TX RAM */
688 writeb(DM9000_MWCMD, db->io_addr);
689
690 (db->outblk)(db->io_data, skb->data, skb->len);
691 db->stats.tx_bytes += skb->len;
692
693 /* TX control: First packet immediately send, second packet queue */
694 if (db->tx_pkt_cnt == 0) {
695
696 /* First Packet */
697 db->tx_pkt_cnt++;
698
699 /* Set TX length to DM9000 */
700 iow(db, DM9000_TXPLL, skb->len & 0xff);
701 iow(db, DM9000_TXPLH, (skb->len >> 8) & 0xff);
702
703 /* Issue TX polling command */
704 iow(db, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */
705
706 dev->trans_start = jiffies; /* save the time stamp */
707
708 } else {
709 /* Second packet */
710 db->tx_pkt_cnt++;
711 db->queue_pkt_len = skb->len;
712 }
713
714 /* free this SKB */
715 dev_kfree_skb(skb);
716
717 /* Re-enable resource check */
718 if (db->tx_pkt_cnt == 1)
719 netif_wake_queue(dev);
720
721 /* Re-enable interrupt */
722 iow(db, DM9000_IMR, IMR_PAR | IMR_PTM | IMR_PRM);
723
724 return 0;
725}
726
727static void
728dm9000_shutdown(struct net_device *dev)
729{
730 board_info_t *db = (board_info_t *) dev->priv;
731
732 /* RESET device */
733 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
734 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
735 iow(db, DM9000_IMR, IMR_PAR); /* Disable all interrupt */
736 iow(db, DM9000_RCR, 0x00); /* Disable RX */
737}
738
739/*
740 * Stop the interface.
741 * The interface is stopped when it is brought.
742 */
743static int
744dm9000_stop(struct net_device *ndev)
745{
746 board_info_t *db = (board_info_t *) ndev->priv;
747
748 PRINTK1("entering %s\n",__FUNCTION__);
749
750 /* deleted timer */
751 del_timer(&db->timer);
752
753 netif_stop_queue(ndev);
754 netif_carrier_off(ndev);
755
756 /* free interrupt */
757 free_irq(ndev->irq, ndev);
758
759 dm9000_shutdown(ndev);
760
761 return 0;
762}
763
764/*
765 * DM9000 interrupt handler
766 * receive the packet to upper layer, free the transmitted packet
767 */
768
769void
770dm9000_tx_done(struct net_device *dev, board_info_t * db)
771{
772 int tx_status = ior(db, DM9000_NSR); /* Got TX status */
773
774 if (tx_status & (NSR_TX2END | NSR_TX1END)) {
775 /* One packet sent complete */
776 db->tx_pkt_cnt--;
777 db->stats.tx_packets++;
778
779 /* Queue packet check & send */
780 if (db->tx_pkt_cnt > 0) {
781 iow(db, DM9000_TXPLL, db->queue_pkt_len & 0xff);
782 iow(db, DM9000_TXPLH, (db->queue_pkt_len >> 8) & 0xff);
783 iow(db, DM9000_TCR, TCR_TXREQ);
784 dev->trans_start = jiffies;
785 }
786 netif_wake_queue(dev);
787 }
788}
789
790static irqreturn_t
791dm9000_interrupt(int irq, void *dev_id, struct pt_regs *regs)
792{
793 struct net_device *dev = dev_id;
794 board_info_t *db;
795 int int_status;
796 u8 reg_save;
797
798 PRINTK3("entering %s\n",__FUNCTION__);
799
800 if (!dev) {
801 PRINTK1("dm9000_interrupt() without DEVICE arg\n");
802 return IRQ_HANDLED;
803 }
804
805 /* A real interrupt coming */
806 db = (board_info_t *) dev->priv;
807 spin_lock(&db->lock);
808
809 /* Save previous register address */
810 reg_save = readb(db->io_addr);
811
812 /* Disable all interrupts */
813 iow(db, DM9000_IMR, IMR_PAR);
814
815 /* Got DM9000 interrupt status */
816 int_status = ior(db, DM9000_ISR); /* Got ISR */
817 iow(db, DM9000_ISR, int_status); /* Clear ISR status */
818
819 /* Received the coming packet */
820 if (int_status & ISR_PRS)
821 dm9000_rx(dev);
822
823 /* Trnasmit Interrupt check */
824 if (int_status & ISR_PTS)
825 dm9000_tx_done(dev, db);
826
827 /* Re-enable interrupt mask */
828 iow(db, DM9000_IMR, IMR_PAR | IMR_PTM | IMR_PRM);
829
830 /* Restore previous register address */
831 writeb(reg_save, db->io_addr);
832
833 spin_unlock(&db->lock);
834
835 return IRQ_HANDLED;
836}
837
838/*
839 * Get statistics from driver.
840 */
841static struct net_device_stats *
842dm9000_get_stats(struct net_device *dev)
843{
844 board_info_t *db = (board_info_t *) dev->priv;
845 return &db->stats;
846}
847
848/*
849 * Process the upper socket ioctl command
850 */
851static int
852dm9000_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
853{
854 PRINTK1("entering %s\n",__FUNCTION__);
855 return 0;
856}
857
858/*
859 * A periodic timer routine
860 * Dynamic media sense, allocated Rx buffer...
861 */
862static void
863dm9000_timer(unsigned long data)
864{
865 struct net_device *dev = (struct net_device *) data;
866 board_info_t *db = (board_info_t *) dev->priv;
867 u8 reg_save;
868 unsigned long flags;
869
870 PRINTK3("dm9000_timer()\n");
871
872 spin_lock_irqsave(db->lock,flags);
873 /* Save previous register address */
874 reg_save = readb(db->io_addr);
875
876 mii_check_media(&db->mii, netif_msg_link(db), 0);
877
878 /* Restore previous register address */
879 writeb(reg_save, db->io_addr);
880 spin_unlock_irqrestore(db->lock,flags);
881
882 /* Set timer again */
883 db->timer.expires = DM9000_TIMER_WUT;
884 add_timer(&db->timer);
885}
886
887struct dm9000_rxhdr {
888 u16 RxStatus;
889 u16 RxLen;
890} __attribute__((__packed__));
891
892/*
893 * Received a packet and pass to upper layer
894 */
895static void
896dm9000_rx(struct net_device *dev)
897{
898 board_info_t *db = (board_info_t *) dev->priv;
899 struct dm9000_rxhdr rxhdr;
900 struct sk_buff *skb;
901 u8 rxbyte, *rdptr;
902 int GoodPacket;
903 int RxLen;
904
905 /* Check packet ready or not */
906 do {
907 ior(db, DM9000_MRCMDX); /* Dummy read */
908
909 /* Get most updated data */
910 rxbyte = readb(db->io_data);
911
912 /* Status check: this byte must be 0 or 1 */
913 if (rxbyte > DM9000_PKT_RDY) {
914 printk("status check failed: %d\n", rxbyte);
915 iow(db, DM9000_RCR, 0x00); /* Stop Device */
916 iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */
917 return;
918 }
919
920 if (rxbyte != DM9000_PKT_RDY)
921 return;
922
923 /* A packet ready now & Get status/length */
924 GoodPacket = TRUE;
925 writeb(DM9000_MRCMD, db->io_addr);
926
927 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
928
929 RxLen = rxhdr.RxLen;
930
931 /* Packet Status check */
932 if (RxLen < 0x40) {
933 GoodPacket = FALSE;
934 PRINTK1("Bad Packet received (runt)\n");
935 }
936
937 if (RxLen > DM9000_PKT_MAX) {
938 PRINTK1("RST: RX Len:%x\n", RxLen);
939 }
940
941 if (rxhdr.RxStatus & 0xbf00) {
942 GoodPacket = FALSE;
943 if (rxhdr.RxStatus & 0x100) {
944 PRINTK1("fifo error\n");
945 db->stats.rx_fifo_errors++;
946 }
947 if (rxhdr.RxStatus & 0x200) {
948 PRINTK1("crc error\n");
949 db->stats.rx_crc_errors++;
950 }
951 if (rxhdr.RxStatus & 0x8000) {
952 PRINTK1("length error\n");
953 db->stats.rx_length_errors++;
954 }
955 }
956
957 /* Move data from DM9000 */
958 if (GoodPacket
959 && ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) {
960 skb->dev = dev;
961 skb_reserve(skb, 2);
962 rdptr = (u8 *) skb_put(skb, RxLen - 4);
963
964 /* Read received packet from RX SRAM */
965
966 (db->inblk)(db->io_data, rdptr, RxLen);
967 db->stats.rx_bytes += RxLen;
968
969 /* Pass to upper layer */
970 skb->protocol = eth_type_trans(skb, dev);
971 netif_rx(skb);
972 db->stats.rx_packets++;
973
974 } else {
975 /* need to dump the packet's data */
976
977 (db->dumpblk)(db->io_data, RxLen);
978 }
979 } while (rxbyte == DM9000_PKT_RDY);
980}
981
982/*
983 * Read a word data from SROM
984 */
985static u16
986read_srom_word(board_info_t * db, int offset)
987{
988 iow(db, DM9000_EPAR, offset);
989 iow(db, DM9000_EPCR, EPCR_ERPRR);
990 mdelay(8); /* according to the datasheet 200us should be enough,
991 but it doesn't work */
992 iow(db, DM9000_EPCR, 0x0);
993 return (ior(db, DM9000_EPDRL) + (ior(db, DM9000_EPDRH) << 8));
994}
995
996#ifdef DM9000_PROGRAM_EEPROM
997/*
998 * Write a word data to SROM
999 */
1000static void
1001write_srom_word(board_info_t * db, int offset, u16 val)
1002{
1003 iow(db, DM9000_EPAR, offset);
1004 iow(db, DM9000_EPDRH, ((val >> 8) & 0xff));
1005 iow(db, DM9000_EPDRL, (val & 0xff));
1006 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
1007 mdelay(8); /* same shit */
1008 iow(db, DM9000_EPCR, 0);
1009}
1010
1011/*
1012 * Only for development:
1013 * Here we write static data to the eeprom in case
1014 * we don't have valid content on a new board
1015 */
1016static void
1017program_eeprom(board_info_t * db)
1018{
1019 u16 eeprom[] = { 0x0c00, 0x007f, 0x1300, /* MAC Address */
1020 0x0000, /* Autoload: accept nothing */
1021 0x0a46, 0x9000, /* Vendor / Product ID */
1022 0x0000, /* pin control */
1023 0x0000,
1024 }; /* Wake-up mode control */
1025 int i;
1026 for (i = 0; i < 8; i++)
1027 write_srom_word(db, i, eeprom[i]);
1028}
1029#endif
1030
1031
1032/*
1033 * Calculate the CRC valude of the Rx packet
1034 * flag = 1 : return the reverse CRC (for the received packet CRC)
1035 * 0 : return the normal CRC (for Hash Table index)
1036 */
1037
1038static unsigned long
1039cal_CRC(unsigned char *Data, unsigned int Len, u8 flag)
1040{
1041
1042 u32 crc = ether_crc_le(Len, Data);
1043
1044 if (flag)
1045 return ~crc;
1046
1047 return crc;
1048}
1049
1050/*
1051 * Set DM9000 multicast address
1052 */
1053static void
1054dm9000_hash_table(struct net_device *dev)
1055{
1056 board_info_t *db = (board_info_t *) dev->priv;
1057 struct dev_mc_list *mcptr = dev->mc_list;
1058 int mc_cnt = dev->mc_count;
1059 u32 hash_val;
1060 u16 i, oft, hash_table[4];
1061 unsigned long flags;
1062
1063 PRINTK2("dm9000_hash_table()\n");
1064
1065 spin_lock_irqsave(&db->lock,flags);
1066
1067 for (i = 0, oft = 0x10; i < 6; i++, oft++)
1068 iow(db, oft, dev->dev_addr[i]);
1069
1070 /* Clear Hash Table */
1071 for (i = 0; i < 4; i++)
1072 hash_table[i] = 0x0;
1073
1074 /* broadcast address */
1075 hash_table[3] = 0x8000;
1076
1077 /* the multicast address in Hash Table : 64 bits */
1078 for (i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1079 hash_val = cal_CRC((char *) mcptr->dmi_addr, 6, 0) & 0x3f;
1080 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1081 }
1082
1083 /* Write the hash table to MAC MD table */
1084 for (i = 0, oft = 0x16; i < 4; i++) {
1085 iow(db, oft++, hash_table[i] & 0xff);
1086 iow(db, oft++, (hash_table[i] >> 8) & 0xff);
1087 }
1088
1089 spin_unlock_irqrestore(&db->lock,flags);
1090}
1091
1092
1093/*
1094 * Read a word from phyxcer
1095 */
1096static int
1097dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
1098{
1099 board_info_t *db = (board_info_t *) dev->priv;
1100 unsigned long flags;
1101 int ret;
1102
1103 spin_lock_irqsave(&db->lock,flags);
1104 /* Fill the phyxcer register into REG_0C */
1105 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1106
1107 iow(db, DM9000_EPCR, 0xc); /* Issue phyxcer read command */
1108 udelay(100); /* Wait read complete */
1109 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
1110
1111 /* The read data keeps on REG_0D & REG_0E */
1112 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
1113
1114 spin_unlock_irqrestore(&db->lock,flags);
1115
1116 return ret;
1117}
1118
1119/*
1120 * Write a word to phyxcer
1121 */
1122static void
1123dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, int value)
1124{
1125 board_info_t *db = (board_info_t *) dev->priv;
1126 unsigned long flags;
1127
1128 spin_lock_irqsave(&db->lock,flags);
1129
1130 /* Fill the phyxcer register into REG_0C */
1131 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1132
1133 /* Fill the written data into REG_0D & REG_0E */
1134 iow(db, DM9000_EPDRL, (value & 0xff));
1135 iow(db, DM9000_EPDRH, ((value >> 8) & 0xff));
1136
1137 iow(db, DM9000_EPCR, 0xa); /* Issue phyxcer write command */
1138 udelay(500); /* Wait write complete */
1139 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
1140
1141 spin_unlock_irqrestore(&db->lock,flags);
1142}
1143
1144static int
1145dm9000_drv_suspend(struct device *dev, u32 state, u32 level)
1146{
1147 struct net_device *ndev = dev_get_drvdata(dev);
1148
1149 if (ndev && level == SUSPEND_DISABLE) {
1150 if (netif_running(ndev)) {
1151 netif_device_detach(ndev);
1152 dm9000_shutdown(ndev);
1153 }
1154 }
1155 return 0;
1156}
1157
1158static int
1159dm9000_drv_resume(struct device *dev, u32 level)
1160{
1161 struct net_device *ndev = dev_get_drvdata(dev);
1162 board_info_t *db = (board_info_t *) ndev->priv;
1163
1164 if (ndev && level == RESUME_ENABLE) {
1165
1166 if (netif_running(ndev)) {
1167 dm9000_reset(db);
1168 dm9000_init_dm9000(ndev);
1169
1170 netif_device_attach(ndev);
1171 }
1172 }
1173 return 0;
1174}
1175
1176static int
1177dm9000_drv_remove(struct device *dev)
1178{
1179 struct platform_device *pdev = to_platform_device(dev);
1180 struct net_device *ndev = dev_get_drvdata(dev);
1181
1182 dev_set_drvdata(dev, NULL);
1183
1184 unregister_netdev(ndev);
1185 dm9000_release_board(pdev, (board_info_t *) ndev->priv);
1186 kfree(ndev); /* free device structure */
1187
1188 PRINTK1("clean_module() exit\n");
1189
1190 return 0;
1191}
1192
1193static struct device_driver dm9000_driver = {
1194 .name = "dm9000",
1195 .bus = &platform_bus_type,
1196 .probe = dm9000_probe,
1197 .remove = dm9000_drv_remove,
1198 .suspend = dm9000_drv_suspend,
1199 .resume = dm9000_drv_resume,
1200};
1201
1202static int __init
1203dm9000_init(void)
1204{
1205 return driver_register(&dm9000_driver); /* search board and register */
1206}
1207
1208static void __exit
1209dm9000_cleanup(void)
1210{
1211 driver_unregister(&dm9000_driver);
1212}
1213
1214module_init(dm9000_init);
1215module_exit(dm9000_cleanup);
1216
1217MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1218MODULE_DESCRIPTION("Davicom DM9000 network driver");
1219MODULE_LICENSE("GPL");
diff --git a/drivers/net/dm9000.h b/drivers/net/dm9000.h
new file mode 100644
index 000000000000..82cad360bafc
--- /dev/null
+++ b/drivers/net/dm9000.h
@@ -0,0 +1,135 @@
1/*
2 * dm9000 Ethernet
3 */
4
5#ifndef _DM9000X_H_
6#define _DM9000X_H_
7
8#define DM9000_ID 0x90000A46
9
10/* although the registers are 16 bit, they are 32-bit aligned.
11 */
12
13#define DM9000_NCR 0x00
14#define DM9000_NSR 0x01
15#define DM9000_TCR 0x02
16#define DM9000_TSR1 0x03
17#define DM9000_TSR2 0x04
18#define DM9000_RCR 0x05
19#define DM9000_RSR 0x06
20#define DM9000_ROCR 0x07
21#define DM9000_BPTR 0x08
22#define DM9000_FCTR 0x09
23#define DM9000_FCR 0x0A
24#define DM9000_EPCR 0x0B
25#define DM9000_EPAR 0x0C
26#define DM9000_EPDRL 0x0D
27#define DM9000_EPDRH 0x0E
28#define DM9000_WCR 0x0F
29
30#define DM9000_PAR 0x10
31#define DM9000_MAR 0x16
32
33#define DM9000_GPCR 0x1e
34#define DM9000_GPR 0x1f
35#define DM9000_TRPAL 0x22
36#define DM9000_TRPAH 0x23
37#define DM9000_RWPAL 0x24
38#define DM9000_RWPAH 0x25
39
40#define DM9000_VIDL 0x28
41#define DM9000_VIDH 0x29
42#define DM9000_PIDL 0x2A
43#define DM9000_PIDH 0x2B
44
45#define DM9000_CHIPR 0x2C
46#define DM9000_SMCR 0x2F
47
48#define DM9000_MRCMDX 0xF0
49#define DM9000_MRCMD 0xF2
50#define DM9000_MRRL 0xF4
51#define DM9000_MRRH 0xF5
52#define DM9000_MWCMDX 0xF6
53#define DM9000_MWCMD 0xF8
54#define DM9000_MWRL 0xFA
55#define DM9000_MWRH 0xFB
56#define DM9000_TXPLL 0xFC
57#define DM9000_TXPLH 0xFD
58#define DM9000_ISR 0xFE
59#define DM9000_IMR 0xFF
60
61#define NCR_EXT_PHY (1<<7)
62#define NCR_WAKEEN (1<<6)
63#define NCR_FCOL (1<<4)
64#define NCR_FDX (1<<3)
65#define NCR_LBK (3<<1)
66#define NCR_RST (1<<0)
67
68#define NSR_SPEED (1<<7)
69#define NSR_LINKST (1<<6)
70#define NSR_WAKEST (1<<5)
71#define NSR_TX2END (1<<3)
72#define NSR_TX1END (1<<2)
73#define NSR_RXOV (1<<1)
74
75#define TCR_TJDIS (1<<6)
76#define TCR_EXCECM (1<<5)
77#define TCR_PAD_DIS2 (1<<4)
78#define TCR_CRC_DIS2 (1<<3)
79#define TCR_PAD_DIS1 (1<<2)
80#define TCR_CRC_DIS1 (1<<1)
81#define TCR_TXREQ (1<<0)
82
83#define TSR_TJTO (1<<7)
84#define TSR_LC (1<<6)
85#define TSR_NC (1<<5)
86#define TSR_LCOL (1<<4)
87#define TSR_COL (1<<3)
88#define TSR_EC (1<<2)
89
90#define RCR_WTDIS (1<<6)
91#define RCR_DIS_LONG (1<<5)
92#define RCR_DIS_CRC (1<<4)
93#define RCR_ALL (1<<3)
94#define RCR_RUNT (1<<2)
95#define RCR_PRMSC (1<<1)
96#define RCR_RXEN (1<<0)
97
98#define RSR_RF (1<<7)
99#define RSR_MF (1<<6)
100#define RSR_LCS (1<<5)
101#define RSR_RWTO (1<<4)
102#define RSR_PLE (1<<3)
103#define RSR_AE (1<<2)
104#define RSR_CE (1<<1)
105#define RSR_FOE (1<<0)
106
107#define FCTR_HWOT(ot) (( ot & 0xf ) << 4 )
108#define FCTR_LWOT(ot) ( ot & 0xf )
109
110#define IMR_PAR (1<<7)
111#define IMR_ROOM (1<<3)
112#define IMR_ROM (1<<2)
113#define IMR_PTM (1<<1)
114#define IMR_PRM (1<<0)
115
116#define ISR_ROOS (1<<3)
117#define ISR_ROS (1<<2)
118#define ISR_PTS (1<<1)
119#define ISR_PRS (1<<0)
120#define ISR_CLR_STATUS (ISR_ROOS | ISR_ROS | ISR_PTS | ISR_PRS)
121
122#define EPCR_REEP (1<<5)
123#define EPCR_WEP (1<<4)
124#define EPCR_EPOS (1<<3)
125#define EPCR_ERPRR (1<<2)
126#define EPCR_ERPRW (1<<1)
127#define EPCR_ERRE (1<<0)
128
129#define GPCR_GEP_CNTL (1<<0)
130
131#define DM9000_PKT_RDY 0x01 /* Packet ready to receive */
132#define DM9000_PKT_MAX 1536 /* Received packet max size */
133
134#endif /* _DM9000X_H_ */
135
diff --git a/drivers/net/fmv18x.c b/drivers/net/fmv18x.c
deleted file mode 100644
index 04c748523471..000000000000
--- a/drivers/net/fmv18x.c
+++ /dev/null
@@ -1,689 +0,0 @@
1/* fmv18x.c: A network device driver for the Fujitsu FMV-181/182/183/184.
2
3 Original: at1700.c (1993-94 by Donald Becker).
4 Copyright 1993 United States Government as represented by the
5 Director, National Security Agency.
6 The author may be reached as becker@scyld.com, or C/O
7 Scyld Computing Corporation
8 410 Severn Ave., Suite 210
9 Annapolis MD 21403
10
11 Modified by Yutaka TAMIYA (tamy@flab.fujitsu.co.jp)
12 Copyright 1994 Fujitsu Laboratories Ltd.
13 Special thanks to:
14 Masayoshi UTAKA (utaka@ace.yk.fujitsu.co.jp)
15 for testing this driver.
16 H. NEGISHI (agy, negishi@sun45.psd.cs.fujitsu.co.jp)
17 for suggestion of some program modification.
18 Masahiro SEKIGUCHI <seki@sysrap.cs.fujitsu.co.jp>
19 for suggestion of some program modification.
20 Kazutoshi MORIOKA (morioka@aurora.oaks.cs.fujitsu.co.jp)
21 for testing this driver.
22
23 This software may be used and distributed according to the terms
24 of the GNU General Public License, incorporated herein by reference.
25
26 This is a device driver for the Fujitsu FMV-181/182/183/184, which
27 is a straight-forward Fujitsu MB86965 implementation.
28
29 Sources:
30 at1700.c
31 The Fujitsu MB86965 datasheet.
32 The Fujitsu FMV-181/182 user's guide
33*/
34
35static const char version[] =
36 "fmv18x.c:v2.2.0 09/24/98 Yutaka TAMIYA (tamy@flab.fujitsu.co.jp)\n";
37
38#include <linux/module.h>
39#include <linux/kernel.h>
40#include <linux/types.h>
41#include <linux/fcntl.h>
42#include <linux/interrupt.h>
43#include <linux/ioport.h>
44#include <linux/in.h>
45#include <linux/slab.h>
46#include <linux/string.h>
47#include <linux/init.h>
48#include <linux/errno.h>
49#include <linux/spinlock.h>
50#include <linux/netdevice.h>
51#include <linux/etherdevice.h>
52#include <linux/skbuff.h>
53#include <linux/delay.h>
54#include <linux/bitops.h>
55
56#include <asm/system.h>
57#include <asm/io.h>
58#include <asm/dma.h>
59
60#define DRV_NAME "fmv18x"
61
62static unsigned fmv18x_probe_list[] __initdata = {
63 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x300, 0x340, 0
64};
65
66/* use 0 for production, 1 for verification, >2 for debug */
67#ifndef NET_DEBUG
68#define NET_DEBUG 1
69#endif
70static unsigned int net_debug = NET_DEBUG;
71
72typedef unsigned char uchar;
73
74/* Information that need to be kept for each board. */
75struct net_local {
76 struct net_device_stats stats;
77 long open_time; /* Useless example local info. */
78 uint tx_started:1; /* Number of packet on the Tx queue. */
79 uint tx_queue_ready:1; /* Tx queue is ready to be sent. */
80 uint rx_started:1; /* Packets are Rxing. */
81 uchar tx_queue; /* Number of packet on the Tx queue. */
82 ushort tx_queue_len; /* Current length of the Tx queue. */
83 spinlock_t lock;
84};
85
86
87/* Offsets from the base address. */
88#define STATUS 0
89#define TX_STATUS 0
90#define RX_STATUS 1
91#define TX_INTR 2 /* Bit-mapped interrupt enable registers. */
92#define RX_INTR 3
93#define TX_MODE 4
94#define RX_MODE 5
95#define CONFIG_0 6 /* Misc. configuration settings. */
96#define CONFIG_1 7
97/* Run-time register bank 2 definitions. */
98#define DATAPORT 8 /* Word-wide DMA or programmed-I/O dataport. */
99#define TX_START 10
100#define COL16CNTL 11 /* Controll Reg for 16 collisions */
101#define MODE13 13
102/* Fujitsu FMV-18x Card Configuration */
103#define FJ_STATUS0 0x10
104#define FJ_STATUS1 0x11
105#define FJ_CONFIG0 0x12
106#define FJ_CONFIG1 0x13
107#define FJ_MACADDR 0x14 /* 0x14 - 0x19 */
108#define FJ_BUFCNTL 0x1A
109#define FJ_BUFDATA 0x1C
110#define FMV18X_IO_EXTENT 32
111
112/* Index to functions, as function prototypes. */
113
114static int fmv18x_probe1(struct net_device *dev, short ioaddr);
115static int net_open(struct net_device *dev);
116static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
117static irqreturn_t net_interrupt(int irq, void *dev_id, struct pt_regs *regs);
118static void net_rx(struct net_device *dev);
119static void net_timeout(struct net_device *dev);
120static int net_close(struct net_device *dev);
121static struct net_device_stats *net_get_stats(struct net_device *dev);
122static void set_multicast_list(struct net_device *dev);
123
124
125/* Check for a network adaptor of this type, and return '0' iff one exists.
126 If dev->base_addr == 0, probe all likely locations.
127 If dev->base_addr == 1, always return failure.
128 If dev->base_addr == 2, allocate space for the device and return success
129 (detachable devices only).
130 */
131
132static int io = 0x220;
133static int irq;
134
135struct net_device * __init fmv18x_probe(int unit)
136{
137 struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
138 unsigned *port;
139 int err = 0;
140
141 if (!dev)
142 return ERR_PTR(-ENODEV);
143
144 if (unit >= 0) {
145 sprintf(dev->name, "eth%d", unit);
146 netdev_boot_setup_check(dev);
147 io = dev->base_addr;
148 irq = dev->irq;
149 }
150
151 SET_MODULE_OWNER(dev);
152
153 if (io > 0x1ff) { /* Check a single specified location. */
154 err = fmv18x_probe1(dev, io);
155 } else if (io != 0) { /* Don't probe at all. */
156 err = -ENXIO;
157 } else {
158 for (port = fmv18x_probe_list; *port; port++)
159 if (fmv18x_probe1(dev, *port) == 0)
160 break;
161 if (!*port)
162 err = -ENODEV;
163 }
164 if (err)
165 goto out;
166 err = register_netdev(dev);
167 if (err)
168 goto out1;
169 return dev;
170out1:
171 free_irq(dev->irq, dev);
172 release_region(dev->base_addr, FMV18X_IO_EXTENT);
173out:
174 free_netdev(dev);
175 return ERR_PTR(err);
176}
177
178/* The Fujitsu datasheet suggests that the NIC be probed for by checking its
179 "signature", the default bit pattern after a reset. This *doesn't* work --
180 there is no way to reset the bus interface without a complete power-cycle!
181
182 It turns out that ATI came to the same conclusion I did: the only thing
183 that can be done is checking a few bits and then diving right into MAC
184 address check. */
185
186static int __init fmv18x_probe1(struct net_device *dev, short ioaddr)
187{
188 char irqmap[4] = {3, 7, 10, 15};
189 char irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15};
190 unsigned int i, retval;
191 struct net_local *lp;
192
193 /* Resetting the chip doesn't reset the ISA interface, so don't bother.
194 That means we have to be careful with the register values we probe for.
195 */
196
197 if (!request_region(ioaddr, FMV18X_IO_EXTENT, DRV_NAME))
198 return -EBUSY;
199
200 dev->irq = irq;
201 dev->base_addr = ioaddr;
202
203 /* Check I/O address configuration and Fujitsu vendor code */
204 if (inb(ioaddr+FJ_MACADDR ) != 0x00
205 || inb(ioaddr+FJ_MACADDR+1) != 0x00
206 || inb(ioaddr+FJ_MACADDR+2) != 0x0e) {
207 retval = -ENODEV;
208 goto out;
209 }
210
211 /* Check PnP mode for FMV-183/184/183A/184A. */
212 /* This PnP routine is very poor. IO and IRQ should be known. */
213 if (inb(ioaddr + FJ_STATUS1) & 0x20) {
214 for (i = 0; i < 8; i++) {
215 if (dev->irq == irqmap_pnp[i])
216 break;
217 }
218 if (i == 8) {
219 retval = -ENODEV;
220 goto out;
221 }
222 } else {
223 if (fmv18x_probe_list[inb(ioaddr + FJ_CONFIG0) & 0x07] != ioaddr)
224 return -ENODEV;
225 dev->irq = irqmap[(inb(ioaddr + FJ_CONFIG0)>>6) & 0x03];
226 }
227
228 /* Snarf the interrupt vector now. */
229 retval = request_irq(dev->irq, &net_interrupt, 0, DRV_NAME, dev);
230 if (retval) {
231 printk ("FMV-18x found at %#3x, but it's unusable due to a conflict on"
232 "IRQ %d.\n", ioaddr, dev->irq);
233 goto out;
234 }
235
236 printk("%s: FMV-18x found at %#3x, IRQ %d, address ", dev->name,
237 ioaddr, dev->irq);
238
239 for(i = 0; i < 6; i++) {
240 unsigned char val = inb(ioaddr + FJ_MACADDR + i);
241 printk("%02x", val);
242 dev->dev_addr[i] = val;
243 }
244
245 /* "FJ_STATUS0" 12 bit 0x0400 means use regular 100 ohm 10baseT signals,
246 rather than 150 ohm shielded twisted pair compensation.
247 0x0000 == auto-sense the interface
248 0x0800 == use TP interface
249 0x1800 == use coax interface
250 */
251 {
252 const char *porttype[] = {"auto-sense", "10baseT", "auto-sense", "10base2/5"};
253 ushort setup_value = inb(ioaddr + FJ_STATUS0);
254
255 switch( setup_value & 0x07 ){
256 case 0x01 /* 10base5 */:
257 case 0x02 /* 10base2 */: dev->if_port = 0x18; break;
258 case 0x04 /* 10baseT */: dev->if_port = 0x08; break;
259 default /* auto-sense*/: dev->if_port = 0x00; break;
260 }
261 printk(" %s interface.\n", porttype[(dev->if_port>>3) & 3]);
262 }
263
264 /* Initialize LAN Controller and LAN Card */
265 outb(0xda, ioaddr + CONFIG_0); /* Initialize LAN Controller */
266 outb(0x00, ioaddr + CONFIG_1); /* Stand by mode */
267 outb(0x00, ioaddr + FJ_CONFIG1); /* Disable IRQ of LAN Card */
268 outb(0x00, ioaddr + FJ_BUFCNTL); /* Reset ? I'm not sure (TAMIYA) */
269
270 /* wait for a while */
271 udelay(200);
272
273 /* Set the station address in bank zero. */
274 outb(0x00, ioaddr + CONFIG_1);
275 for (i = 0; i < 6; i++)
276 outb(dev->dev_addr[i], ioaddr + 8 + i);
277
278 /* Switch to bank 1 and set the multicast table to accept none. */
279 outb(0x04, ioaddr + CONFIG_1);
280 for (i = 0; i < 8; i++)
281 outb(0x00, ioaddr + 8 + i);
282
283 /* Switch to bank 2 and lock our I/O address. */
284 outb(0x08, ioaddr + CONFIG_1);
285 outb(dev->if_port, ioaddr + MODE13);
286 outb(0x00, ioaddr + COL16CNTL);
287
288 if (net_debug)
289 printk(version);
290
291 /* Initialize the device structure. */
292 dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
293 if (!dev->priv) {
294 retval = -ENOMEM;
295 goto out_irq;
296 }
297 memset(dev->priv, 0, sizeof(struct net_local));
298 lp = dev->priv;
299 spin_lock_init(&lp->lock);
300
301 dev->open = net_open;
302 dev->stop = net_close;
303 dev->hard_start_xmit = net_send_packet;
304 dev->tx_timeout = net_timeout;
305 dev->watchdog_timeo = HZ/10;
306 dev->get_stats = net_get_stats;
307 dev->set_multicast_list = set_multicast_list;
308 return 0;
309
310out_irq:
311 free_irq(dev->irq, dev);
312out:
313 release_region(ioaddr, FMV18X_IO_EXTENT);
314 return retval;
315}
316
317
318static int net_open(struct net_device *dev)
319{
320 struct net_local *lp = dev->priv;
321 int ioaddr = dev->base_addr;
322
323 /* Set the configuration register 0 to 32K 100ns. byte-wide memory,
324 16 bit bus access, and two 4K Tx, enable the Rx and Tx. */
325 outb(0x5a, ioaddr + CONFIG_0);
326
327 /* Powerup and switch to register bank 2 for the run-time registers. */
328 outb(0xe8, ioaddr + CONFIG_1);
329
330 lp->tx_started = 0;
331 lp->tx_queue_ready = 1;
332 lp->rx_started = 0;
333 lp->tx_queue = 0;
334 lp->tx_queue_len = 0;
335
336 /* Clear Tx and Rx Status */
337 outb(0xff, ioaddr + TX_STATUS);
338 outb(0xff, ioaddr + RX_STATUS);
339 lp->open_time = jiffies;
340
341 netif_start_queue(dev);
342
343 /* Enable the IRQ of the LAN Card */
344 outb(0x80, ioaddr + FJ_CONFIG1);
345
346 /* Enable both Tx and Rx interrupts */
347 outw(0x8182, ioaddr+TX_INTR);
348
349 return 0;
350}
351
352static void net_timeout(struct net_device *dev)
353{
354 struct net_local *lp = dev->priv;
355 int ioaddr = dev->base_addr;
356 unsigned long flags;
357
358
359 printk(KERN_WARNING "%s: transmit timed out with status %04x, %s?\n", dev->name,
360 htons(inw(ioaddr + TX_STATUS)),
361 inb(ioaddr + TX_STATUS) & 0x80
362 ? "IRQ conflict" : "network cable problem");
363 printk(KERN_WARNING "%s: timeout registers: %04x %04x %04x %04x %04x %04x %04x %04x.\n",
364 dev->name, htons(inw(ioaddr + 0)),
365 htons(inw(ioaddr + 2)), htons(inw(ioaddr + 4)),
366 htons(inw(ioaddr + 6)), htons(inw(ioaddr + 8)),
367 htons(inw(ioaddr +10)), htons(inw(ioaddr +12)),
368 htons(inw(ioaddr +14)));
369 printk(KERN_WARNING "eth card: %04x %04x\n",
370 htons(inw(ioaddr+FJ_STATUS0)),
371 htons(inw(ioaddr+FJ_CONFIG0)));
372 lp->stats.tx_errors++;
373 /* ToDo: We should try to restart the adaptor... */
374 spin_lock_irqsave(&lp->lock, flags);
375
376 /* Initialize LAN Controller and LAN Card */
377 outb(0xda, ioaddr + CONFIG_0); /* Initialize LAN Controller */
378 outb(0x00, ioaddr + CONFIG_1); /* Stand by mode */
379 outb(0x00, ioaddr + FJ_CONFIG1); /* Disable IRQ of LAN Card */
380 outb(0x00, ioaddr + FJ_BUFCNTL); /* Reset ? I'm not sure */
381 net_open(dev);
382 spin_unlock_irqrestore(&lp->lock, flags);
383
384 netif_wake_queue(dev);
385}
386
387static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
388{
389 struct net_local *lp = dev->priv;
390 int ioaddr = dev->base_addr;
391 short length = skb->len;
392 unsigned char *buf;
393 unsigned long flags;
394
395 /* Block a transmit from overlapping. */
396
397 if (length > ETH_FRAME_LEN) {
398 if (net_debug)
399 printk("%s: Attempting to send a large packet (%d bytes).\n",
400 dev->name, length);
401 return 1;
402 }
403
404 if (length < ETH_ZLEN) {
405 skb = skb_padto(skb, ETH_ZLEN);
406 if (skb == NULL)
407 return 0;
408 length = ETH_ZLEN;
409 }
410 buf = skb->data;
411
412 if (net_debug > 4)
413 printk("%s: Transmitting a packet of length %lu.\n", dev->name,
414 (unsigned long)skb->len);
415 /* We may not start transmitting unless we finish transferring
416 a packet into the Tx queue. During executing the following
417 codes we possibly catch a Tx interrupt. Thus we flag off
418 tx_queue_ready, so that we prevent the interrupt routine
419 (net_interrupt) to start transmitting. */
420 spin_lock_irqsave(&lp->lock, flags);
421 lp->tx_queue_ready = 0;
422 {
423 outw(length, ioaddr + DATAPORT);
424 outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
425 lp->tx_queue++;
426 lp->tx_queue_len += length + 2;
427 }
428 lp->tx_queue_ready = 1;
429 spin_unlock_irqrestore(&lp->lock, flags);
430
431 if (lp->tx_started == 0) {
432 /* If the Tx is idle, always trigger a transmit. */
433 outb(0x80 | lp->tx_queue, ioaddr + TX_START);
434 lp->tx_queue = 0;
435 lp->tx_queue_len = 0;
436 dev->trans_start = jiffies;
437 lp->tx_started = 1;
438 } else if (lp->tx_queue_len >= 4096 - 1502) /* No room for a packet */
439 netif_stop_queue(dev);
440
441 dev_kfree_skb(skb);
442 return 0;
443}
444
445/* The typical workload of the driver:
446 Handle the network interface interrupts. */
447static irqreturn_t
448net_interrupt(int irq, void *dev_id, struct pt_regs *regs)
449{
450 struct net_device *dev = dev_id;
451 struct net_local *lp;
452 int ioaddr, status;
453
454 ioaddr = dev->base_addr;
455 lp = dev->priv;
456 status = inw(ioaddr + TX_STATUS);
457 outw(status, ioaddr + TX_STATUS);
458
459 if (net_debug > 4)
460 printk("%s: Interrupt with status %04x.\n", dev->name, status);
461 if (lp->rx_started == 0 &&
462 (status & 0xff00 || (inb(ioaddr + RX_MODE) & 0x40) == 0)) {
463 /* Got a packet(s).
464 We cannot execute net_rx more than once at the same time for
465 the same device. During executing net_rx, we possibly catch a
466 Tx interrupt. Thus we flag on rx_started, so that we prevent
467 the interrupt routine (net_interrupt) to dive into net_rx
468 again. */
469 lp->rx_started = 1;
470 outb(0x00, ioaddr + RX_INTR); /* Disable RX intr. */
471 net_rx(dev);
472 outb(0x81, ioaddr + RX_INTR); /* Enable RX intr. */
473 lp->rx_started = 0;
474 }
475 if (status & 0x00ff) {
476 if (status & 0x02) {
477 /* More than 16 collisions occurred */
478 if (net_debug > 4)
479 printk("%s: 16 Collision occur during Txing.\n", dev->name);
480 /* Cancel sending a packet. */
481 outb(0x03, ioaddr + COL16CNTL);
482 lp->stats.collisions++;
483 }
484 if (status & 0x82) {
485 spin_lock(&lp->lock);
486 lp->stats.tx_packets++;
487 if (lp->tx_queue && lp->tx_queue_ready) {
488 outb(0x80 | lp->tx_queue, ioaddr + TX_START);
489 lp->tx_queue = 0;
490 lp->tx_queue_len = 0;
491 dev->trans_start = jiffies;
492 netif_wake_queue(dev); /* Inform upper layers. */
493 } else {
494 lp->tx_started = 0;
495 netif_wake_queue(dev); /* Inform upper layers. */
496 }
497 spin_unlock(&lp->lock);
498 }
499 }
500 return IRQ_RETVAL(status);
501}
502
503/* We have a good packet(s), get it/them out of the buffers. */
504static void net_rx(struct net_device *dev)
505{
506 struct net_local *lp = dev->priv;
507 int ioaddr = dev->base_addr;
508 int boguscount = 5;
509
510 while ((inb(ioaddr + RX_MODE) & 0x40) == 0) {
511 /* Clear PKT_RDY bit: by agy 19940922 */
512 /* outb(0x80, ioaddr + RX_STATUS); */
513 ushort status = inw(ioaddr + DATAPORT);
514
515 if (net_debug > 4)
516 printk("%s: Rxing packet mode %02x status %04x.\n",
517 dev->name, inb(ioaddr + RX_MODE), status);
518#ifndef final_version
519 if (status == 0) {
520 outb(0x05, ioaddr + 14);
521 break;
522 }
523#endif
524
525 if ((status & 0xF0) != 0x20) { /* There was an error. */
526 lp->stats.rx_errors++;
527 if (status & 0x08) lp->stats.rx_length_errors++;
528 if (status & 0x04) lp->stats.rx_frame_errors++;
529 if (status & 0x02) lp->stats.rx_crc_errors++;
530 if (status & 0x01) lp->stats.rx_over_errors++;
531 } else {
532 ushort pkt_len = inw(ioaddr + DATAPORT);
533 /* Malloc up new buffer. */
534 struct sk_buff *skb;
535
536 if (pkt_len > 1550) {
537 printk("%s: The FMV-18x claimed a very large packet, size %d.\n",
538 dev->name, pkt_len);
539 outb(0x05, ioaddr + 14);
540 lp->stats.rx_errors++;
541 break;
542 }
543 skb = dev_alloc_skb(pkt_len+3);
544 if (skb == NULL) {
545 printk("%s: Memory squeeze, dropping packet (len %d).\n",
546 dev->name, pkt_len);
547 outb(0x05, ioaddr + 14);
548 lp->stats.rx_dropped++;
549 break;
550 }
551 skb->dev = dev;
552 skb_reserve(skb,2);
553
554 insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1);
555
556 if (net_debug > 5) {
557 int i;
558 printk("%s: Rxed packet of length %d: ", dev->name, pkt_len);
559 for (i = 0; i < 14; i++)
560 printk(" %02x", skb->data[i]);
561 printk(".\n");
562 }
563
564 skb->protocol=eth_type_trans(skb, dev);
565 netif_rx(skb);
566 dev->last_rx = jiffies;
567 lp->stats.rx_packets++;
568 lp->stats.rx_bytes += pkt_len;
569 }
570 if (--boguscount <= 0)
571 break;
572 }
573
574 /* If any worth-while packets have been received, dev_rint()
575 has done a mark_bh(NET_BH) for us and will work on them
576 when we get to the bottom-half routine. */
577 {
578 int i;
579 for (i = 0; i < 20; i++) {
580 if ((inb(ioaddr + RX_MODE) & 0x40) == 0x40)
581 break;
582 (void)inw(ioaddr + DATAPORT); /* dummy status read */
583 outb(0x05, ioaddr + 14);
584 }
585
586 if (net_debug > 5 && i > 0)
587 printk("%s: Exint Rx packet with mode %02x after %d ticks.\n",
588 dev->name, inb(ioaddr + RX_MODE), i);
589 }
590
591 return;
592}
593
594/* The inverse routine to net_open(). */
595static int net_close(struct net_device *dev)
596{
597 int ioaddr = dev->base_addr;
598
599 ((struct net_local *)dev->priv)->open_time = 0;
600
601 netif_stop_queue(dev);
602
603 /* Set configuration register 0 to disable Tx and Rx. */
604 outb(0xda, ioaddr + CONFIG_0);
605
606 /* Update the statistics -- ToDo. */
607
608 /* Power-down the chip. Green, green, green! */
609 outb(0x00, ioaddr + CONFIG_1);
610
611 /* Set the ethernet adaptor disable IRQ */
612 outb(0x00, ioaddr + FJ_CONFIG1);
613
614 return 0;
615}
616
617/* Get the current statistics. This may be called with the card open or
618 closed. */
619static struct net_device_stats *net_get_stats(struct net_device *dev)
620{
621 struct net_local *lp = dev->priv;
622 return &lp->stats;
623}
624
625/* Set or clear the multicast filter for this adaptor.
626 num_addrs == -1 Promiscuous mode, receive all packets
627 num_addrs == 0 Normal mode, clear multicast list
628 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
629 best-effort filtering.
630 */
631
632static void set_multicast_list(struct net_device *dev)
633{
634 short ioaddr = dev->base_addr;
635 if (dev->mc_count || dev->flags&(IFF_PROMISC|IFF_ALLMULTI))
636 {
637 /*
638 * We must make the kernel realise we had to move
639 * into promisc mode or we start all out war on
640 * the cable. - AC
641 */
642 dev->flags|=IFF_PROMISC;
643
644 outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
645 }
646 else
647 outb(2, ioaddr + RX_MODE); /* Disable promiscuous, use normal mode */
648}
649
650#ifdef MODULE
651static struct net_device *dev_fmv18x;
652
653MODULE_PARM(io, "i");
654MODULE_PARM(irq, "i");
655MODULE_PARM(net_debug, "i");
656MODULE_PARM_DESC(io, "FMV-18X I/O address");
657MODULE_PARM_DESC(irq, "FMV-18X IRQ number");
658MODULE_PARM_DESC(net_debug, "FMV-18X debug level (0-1,5-6)");
659MODULE_LICENSE("GPL");
660
661int init_module(void)
662{
663 if (io == 0)
664 printk("fmv18x: You should not use auto-probing with insmod!\n");
665 dev_fmv18x = fmv18x_probe(-1);
666 if (IS_ERR(dev_fmv18x))
667 return PTR_ERR(dev_fmv18x);
668 return 0;
669}
670
671void
672cleanup_module(void)
673{
674 unregister_netdev(dev_fmv18x);
675 free_irq(dev_fmv18x->irq, dev_fmv18x);
676 release_region(dev_fmv18x->base_addr, FMV18X_IO_EXTENT);
677 free_netdev(dev_fmv18x);
678}
679#endif /* MODULE */
680
681/*
682 * Local variables:
683 * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c fmv18x.c"
684 * version-control: t
685 * kept-new-versions: 5
686 * tab-width: 4
687 * c-indent-level: 4
688 * End:
689 */
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index b0126304ca08..181b6ed55003 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1537,20 +1537,20 @@ static void shmem_get_8390_hdr(struct net_device *dev,
1537static void shmem_block_input(struct net_device *dev, int count, 1537static void shmem_block_input(struct net_device *dev, int count,
1538 struct sk_buff *skb, int ring_offset) 1538 struct sk_buff *skb, int ring_offset)
1539{ 1539{
1540 void __iomem *xfer_start = ei_status.mem + (TX_PAGES<<8) 1540 void __iomem *base = ei_status.mem;
1541 + ring_offset 1541 unsigned long offset = (TX_PAGES<<8) + ring_offset
1542 - (ei_status.rx_start_page << 8); 1542 - (ei_status.rx_start_page << 8);
1543 char *buf = skb->data; 1543 char *buf = skb->data;
1544 1544
1545 if (xfer_start + count > (void __iomem *)ei_status.rmem_end) { 1545 if (offset + count > ei_status.priv) {
1546 /* We must wrap the input move. */ 1546 /* We must wrap the input move. */
1547 int semi_count = (void __iomem *)ei_status.rmem_end - xfer_start; 1547 int semi_count = ei_status.priv - offset;
1548 copyin(buf, xfer_start, semi_count); 1548 copyin(buf, base + offset, semi_count);
1549 buf += semi_count; 1549 buf += semi_count;
1550 xfer_start = ei_status.mem + (TX_PAGES<<8); 1550 offset = TX_PAGES<<8;
1551 count -= semi_count; 1551 count -= semi_count;
1552 } 1552 }
1553 copyin(buf, xfer_start, count); 1553 copyin(buf, base + offset, count);
1554} 1554}
1555 1555
1556/*====================================================================*/ 1556/*====================================================================*/
@@ -1611,8 +1611,9 @@ static int setup_shmem_window(dev_link_t *link, int start_pg,
1611 } 1611 }
1612 1612
1613 ei_status.mem = info->base + offset; 1613 ei_status.mem = info->base + offset;
1614 ei_status.priv = req.Size;
1614 dev->mem_start = (u_long)ei_status.mem; 1615 dev->mem_start = (u_long)ei_status.mem;
1615 dev->mem_end = ei_status.rmem_end = (u_long)info->base + req.Size; 1616 dev->mem_end = dev->mem_start + req.Size;
1616 1617
1617 ei_status.tx_start_page = start_pg; 1618 ei_status.tx_start_page = start_pg;
1618 ei_status.rx_start_page = start_pg + TX_PAGES; 1619 ei_status.rx_start_page = start_pg + TX_PAGES;
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 3b377f6cd4a0..ad4b58af6b76 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1217,36 +1217,43 @@ ppp_push(struct ppp *ppp)
1217 */ 1217 */
1218static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) 1218static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1219{ 1219{
1220 int nch, len, fragsize; 1220 int len, fragsize;
1221 int i, bits, hdrlen, mtu; 1221 int i, bits, hdrlen, mtu;
1222 int flen, fnb; 1222 int flen;
1223 int navail, nfree;
1224 int nbigger;
1223 unsigned char *p, *q; 1225 unsigned char *p, *q;
1224 struct list_head *list; 1226 struct list_head *list;
1225 struct channel *pch; 1227 struct channel *pch;
1226 struct sk_buff *frag; 1228 struct sk_buff *frag;
1227 struct ppp_channel *chan; 1229 struct ppp_channel *chan;
1228 1230
1229 nch = 0; 1231 nfree = 0; /* # channels which have no packet already queued */
1232 navail = 0; /* total # of usable channels (not deregistered) */
1230 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1233 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1234 i = 0;
1231 list = &ppp->channels; 1235 list = &ppp->channels;
1232 while ((list = list->next) != &ppp->channels) { 1236 while ((list = list->next) != &ppp->channels) {
1233 pch = list_entry(list, struct channel, clist); 1237 pch = list_entry(list, struct channel, clist);
1234 nch += pch->avail = (skb_queue_len(&pch->file.xq) == 0); 1238 navail += pch->avail = (pch->chan != NULL);
1235 /* 1239 if (pch->avail) {
1236 * If a channel hasn't had a fragment yet, it has to get 1240 if (skb_queue_len(&pch->file.xq) == 0
1237 * one before we send any fragments on later channels. 1241 || !pch->had_frag) {
1238 * If it can't take a fragment now, don't give any 1242 pch->avail = 2;
1239 * to subsequent channels. 1243 ++nfree;
1240 */
1241 if (!pch->had_frag && !pch->avail) {
1242 while ((list = list->next) != &ppp->channels) {
1243 pch = list_entry(list, struct channel, clist);
1244 pch->avail = 0;
1245 } 1244 }
1246 break; 1245 if (!pch->had_frag && i < ppp->nxchan)
1246 ppp->nxchan = i;
1247 } 1247 }
1248 ++i;
1248 } 1249 }
1249 if (nch == 0) 1250
1251 /*
1252 * Don't start sending this packet unless at least half of
1253 * the channels are free. This gives much better TCP
1254 * performance if we have a lot of channels.
1255 */
1256 if (nfree == 0 || nfree < navail / 2)
1250 return 0; /* can't take now, leave it in xmit_pending */ 1257 return 0; /* can't take now, leave it in xmit_pending */
1251 1258
1252 /* Do protocol field compression (XXX this should be optional) */ 1259 /* Do protocol field compression (XXX this should be optional) */
@@ -1257,14 +1264,19 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1257 --len; 1264 --len;
1258 } 1265 }
1259 1266
1260 /* decide on fragment size */ 1267 /*
1268 * Decide on fragment size.
1269 * We create a fragment for each free channel regardless of
1270 * how small they are (i.e. even 0 length) in order to minimize
1271 * the time that it will take to detect when a channel drops
1272 * a fragment.
1273 */
1261 fragsize = len; 1274 fragsize = len;
1262 if (nch > 1) { 1275 if (nfree > 1)
1263 int maxch = ROUNDUP(len, MIN_FRAG_SIZE); 1276 fragsize = ROUNDUP(fragsize, nfree);
1264 if (nch > maxch) 1277 /* nbigger channels get fragsize bytes, the rest get fragsize-1,
1265 nch = maxch; 1278 except if nbigger==0, then they all get fragsize. */
1266 fragsize = ROUNDUP(fragsize, nch); 1279 nbigger = len % nfree;
1267 }
1268 1280
1269 /* skip to the channel after the one we last used 1281 /* skip to the channel after the one we last used
1270 and start at that one */ 1282 and start at that one */
@@ -1278,7 +1290,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1278 1290
1279 /* create a fragment for each channel */ 1291 /* create a fragment for each channel */
1280 bits = B; 1292 bits = B;
1281 do { 1293 while (nfree > 0 || len > 0) {
1282 list = list->next; 1294 list = list->next;
1283 if (list == &ppp->channels) { 1295 if (list == &ppp->channels) {
1284 i = 0; 1296 i = 0;
@@ -1289,61 +1301,92 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1289 if (!pch->avail) 1301 if (!pch->avail)
1290 continue; 1302 continue;
1291 1303
1304 /*
1305 * Skip this channel if it has a fragment pending already and
1306 * we haven't given a fragment to all of the free channels.
1307 */
1308 if (pch->avail == 1) {
1309 if (nfree > 0)
1310 continue;
1311 } else {
1312 --nfree;
1313 pch->avail = 1;
1314 }
1315
1292 /* check the channel's mtu and whether it is still attached. */ 1316 /* check the channel's mtu and whether it is still attached. */
1293 spin_lock_bh(&pch->downl); 1317 spin_lock_bh(&pch->downl);
1294 if (pch->chan == 0 || (mtu = pch->chan->mtu) < hdrlen) { 1318 if (pch->chan == NULL) {
1295 /* can't use this channel */ 1319 /* can't use this channel, it's being deregistered */
1296 spin_unlock_bh(&pch->downl); 1320 spin_unlock_bh(&pch->downl);
1297 pch->avail = 0; 1321 pch->avail = 0;
1298 if (--nch == 0) 1322 if (--navail == 0)
1299 break; 1323 break;
1300 continue; 1324 continue;
1301 } 1325 }
1302 1326
1303 /* 1327 /*
1304 * We have to create multiple fragments for this channel 1328 * Create a fragment for this channel of
1305 * if fragsize is greater than the channel's mtu. 1329 * min(max(mtu+2-hdrlen, 4), fragsize, len) bytes.
1330 * If mtu+2-hdrlen < 4, that is a ridiculously small
1331 * MTU, so we use mtu = 2 + hdrlen.
1306 */ 1332 */
1307 if (fragsize > len) 1333 if (fragsize > len)
1308 fragsize = len; 1334 fragsize = len;
1309 for (flen = fragsize; flen > 0; flen -= fnb) { 1335 flen = fragsize;
1310 fnb = flen; 1336 mtu = pch->chan->mtu + 2 - hdrlen;
1311 if (fnb > mtu + 2 - hdrlen) 1337 if (mtu < 4)
1312 fnb = mtu + 2 - hdrlen; 1338 mtu = 4;
1313 if (fnb >= len) 1339 if (flen > mtu)
1314 bits |= E; 1340 flen = mtu;
1315 frag = alloc_skb(fnb + hdrlen, GFP_ATOMIC); 1341 if (flen == len && nfree == 0)
1316 if (frag == 0) 1342 bits |= E;
1317 goto noskb; 1343 frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
1318 q = skb_put(frag, fnb + hdrlen); 1344 if (frag == 0)
1319 /* make the MP header */ 1345 goto noskb;
1320 q[0] = PPP_MP >> 8; 1346 q = skb_put(frag, flen + hdrlen);
1321 q[1] = PPP_MP; 1347
1322 if (ppp->flags & SC_MP_XSHORTSEQ) { 1348 /* make the MP header */
1323 q[2] = bits + ((ppp->nxseq >> 8) & 0xf); 1349 q[0] = PPP_MP >> 8;
1324 q[3] = ppp->nxseq; 1350 q[1] = PPP_MP;
1325 } else { 1351 if (ppp->flags & SC_MP_XSHORTSEQ) {
1326 q[2] = bits; 1352 q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
1327 q[3] = ppp->nxseq >> 16; 1353 q[3] = ppp->nxseq;
1328 q[4] = ppp->nxseq >> 8; 1354 } else {
1329 q[5] = ppp->nxseq; 1355 q[2] = bits;
1330 } 1356 q[3] = ppp->nxseq >> 16;
1331 1357 q[4] = ppp->nxseq >> 8;
1332 /* copy the data in */ 1358 q[5] = ppp->nxseq;
1333 memcpy(q + hdrlen, p, fnb);
1334
1335 /* try to send it down the channel */
1336 chan = pch->chan;
1337 if (!chan->ops->start_xmit(chan, frag))
1338 skb_queue_tail(&pch->file.xq, frag);
1339 pch->had_frag = 1;
1340 p += fnb;
1341 len -= fnb;
1342 ++ppp->nxseq;
1343 bits = 0;
1344 } 1359 }
1360
1361 /*
1362 * Copy the data in.
1363 * Unfortunately there is a bug in older versions of
1364 * the Linux PPP multilink reconstruction code where it
1365 * drops 0-length fragments. Therefore we make sure the
1366 * fragment has at least one byte of data. Any bytes
1367 * we add in this situation will end up as padding on the
1368 * end of the reconstructed packet.
1369 */
1370 if (flen == 0)
1371 *skb_put(frag, 1) = 0;
1372 else
1373 memcpy(q + hdrlen, p, flen);
1374
1375 /* try to send it down the channel */
1376 chan = pch->chan;
1377 if (skb_queue_len(&pch->file.xq)
1378 || !chan->ops->start_xmit(chan, frag))
1379 skb_queue_tail(&pch->file.xq, frag);
1380 pch->had_frag = 1;
1381 p += flen;
1382 len -= flen;
1383 ++ppp->nxseq;
1384 bits = 0;
1345 spin_unlock_bh(&pch->downl); 1385 spin_unlock_bh(&pch->downl);
1346 } while (len > 0); 1386
1387 if (--nbigger == 0 && fragsize > 0)
1388 --fragsize;
1389 }
1347 ppp->nxchan = i; 1390 ppp->nxchan = i;
1348 1391
1349 return 1; 1392 return 1;
@@ -1422,7 +1465,7 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
1422 kfree_skb(skb); 1465 kfree_skb(skb);
1423 return; 1466 return;
1424 } 1467 }
1425 1468
1426 proto = PPP_PROTO(skb); 1469 proto = PPP_PROTO(skb);
1427 read_lock_bh(&pch->upl); 1470 read_lock_bh(&pch->upl);
1428 if (pch->ppp == 0 || proto >= 0xc000 || proto == PPP_CCPFRAG) { 1471 if (pch->ppp == 0 || proto >= 0xc000 || proto == PPP_CCPFRAG) {
@@ -1691,7 +1734,7 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1691 struct list_head *l; 1734 struct list_head *l;
1692 int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1735 int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1693 1736
1694 if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0) 1737 if (!pskb_may_pull(skb, mphdrlen) || ppp->mrru == 0)
1695 goto err; /* no good, throw it away */ 1738 goto err; /* no good, throw it away */
1696 1739
1697 /* Decode sequence number and begin/end bits */ 1740 /* Decode sequence number and begin/end bits */
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index d6d0e43dab65..ce449fe90e6d 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -69,7 +69,13 @@ VERSION 2.2LK <2005/01/25>
69#include <asm/io.h> 69#include <asm/io.h>
70#include <asm/irq.h> 70#include <asm/irq.h>
71 71
72#define RTL8169_VERSION "2.2LK" 72#ifdef CONFIG_R8169_NAPI
73#define NAPI_SUFFIX "-NAPI"
74#else
75#define NAPI_SUFFIX ""
76#endif
77
78#define RTL8169_VERSION "2.2LK" NAPI_SUFFIX
73#define MODULENAME "r8169" 79#define MODULENAME "r8169"
74#define PFX MODULENAME ": " 80#define PFX MODULENAME ": "
75 81
@@ -85,6 +91,10 @@ VERSION 2.2LK <2005/01/25>
85#define dprintk(fmt, args...) do {} while (0) 91#define dprintk(fmt, args...) do {} while (0)
86#endif /* RTL8169_DEBUG */ 92#endif /* RTL8169_DEBUG */
87 93
94#define R8169_MSG_DEFAULT \
95 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | \
96 NETIF_MSG_IFDOWN)
97
88#define TX_BUFFS_AVAIL(tp) \ 98#define TX_BUFFS_AVAIL(tp) \
89 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) 99 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
90 100
@@ -174,8 +184,9 @@ const static struct {
174#undef _R 184#undef _R
175 185
176static struct pci_device_id rtl8169_pci_tbl[] = { 186static struct pci_device_id rtl8169_pci_tbl[] = {
177 {0x10ec, 0x8169, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 187 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), },
178 {0x1186, 0x4300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 188 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), },
189 { PCI_DEVICE(0x16ec, 0x0116), },
179 {0,}, 190 {0,},
180}; 191};
181 192
@@ -183,10 +194,15 @@ MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
183 194
184static int rx_copybreak = 200; 195static int rx_copybreak = 200;
185static int use_dac; 196static int use_dac;
197static struct {
198 u32 msg_enable;
199} debug = { -1 };
186 200
187enum RTL8169_registers { 201enum RTL8169_registers {
188 MAC0 = 0, /* Ethernet hardware address. */ 202 MAC0 = 0, /* Ethernet hardware address. */
189 MAR0 = 8, /* Multicast filter. */ 203 MAR0 = 8, /* Multicast filter. */
204 CounterAddrLow = 0x10,
205 CounterAddrHigh = 0x14,
190 TxDescStartAddrLow = 0x20, 206 TxDescStartAddrLow = 0x20,
191 TxDescStartAddrHigh = 0x24, 207 TxDescStartAddrHigh = 0x24,
192 TxHDescStartAddrLow = 0x28, 208 TxHDescStartAddrLow = 0x28,
@@ -328,6 +344,9 @@ enum RTL8169_register_content {
328 344
329 /* _TBICSRBit */ 345 /* _TBICSRBit */
330 TBILinkOK = 0x02000000, 346 TBILinkOK = 0x02000000,
347
348 /* DumpCounterCommand */
349 CounterDump = 0x8,
331}; 350};
332 351
333enum _DescStatusBit { 352enum _DescStatusBit {
@@ -385,6 +404,7 @@ struct rtl8169_private {
385 struct pci_dev *pci_dev; /* Index of PCI device */ 404 struct pci_dev *pci_dev; /* Index of PCI device */
386 struct net_device_stats stats; /* statistics of net device */ 405 struct net_device_stats stats; /* statistics of net device */
387 spinlock_t lock; /* spin lock flag */ 406 spinlock_t lock; /* spin lock flag */
407 u32 msg_enable;
388 int chipset; 408 int chipset;
389 int mac_version; 409 int mac_version;
390 int phy_version; 410 int phy_version;
@@ -418,9 +438,13 @@ struct rtl8169_private {
418MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); 438MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
419MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); 439MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
420module_param_array(media, int, &num_media, 0); 440module_param_array(media, int, &num_media, 0);
441MODULE_PARM_DESC(media, "force phy operation. Deprecated by ethtool (8).");
421module_param(rx_copybreak, int, 0); 442module_param(rx_copybreak, int, 0);
443MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
422module_param(use_dac, int, 0); 444module_param(use_dac, int, 0);
423MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); 445MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
446module_param_named(debug, debug.msg_enable, int, 0);
447MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
424MODULE_LICENSE("GPL"); 448MODULE_LICENSE("GPL");
425MODULE_VERSION(RTL8169_VERSION); 449MODULE_VERSION(RTL8169_VERSION);
426 450
@@ -433,10 +457,10 @@ static void rtl8169_hw_start(struct net_device *dev);
433static int rtl8169_close(struct net_device *dev); 457static int rtl8169_close(struct net_device *dev);
434static void rtl8169_set_rx_mode(struct net_device *dev); 458static void rtl8169_set_rx_mode(struct net_device *dev);
435static void rtl8169_tx_timeout(struct net_device *dev); 459static void rtl8169_tx_timeout(struct net_device *dev);
436static struct net_device_stats *rtl8169_get_stats(struct net_device *netdev); 460static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
437static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *, 461static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
438 void __iomem *); 462 void __iomem *);
439static int rtl8169_change_mtu(struct net_device *netdev, int new_mtu); 463static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
440static void rtl8169_down(struct net_device *dev); 464static void rtl8169_down(struct net_device *dev);
441 465
442#ifdef CONFIG_R8169_NAPI 466#ifdef CONFIG_R8169_NAPI
@@ -543,9 +567,13 @@ static void rtl8169_check_link_status(struct net_device *dev,
543 spin_lock_irqsave(&tp->lock, flags); 567 spin_lock_irqsave(&tp->lock, flags);
544 if (tp->link_ok(ioaddr)) { 568 if (tp->link_ok(ioaddr)) {
545 netif_carrier_on(dev); 569 netif_carrier_on(dev);
546 printk(KERN_INFO PFX "%s: link up\n", dev->name); 570 if (netif_msg_ifup(tp))
547 } else 571 printk(KERN_INFO PFX "%s: link up\n", dev->name);
572 } else {
573 if (netif_msg_ifdown(tp))
574 printk(KERN_INFO PFX "%s: link down\n", dev->name);
548 netif_carrier_off(dev); 575 netif_carrier_off(dev);
576 }
549 spin_unlock_irqrestore(&tp->lock, flags); 577 spin_unlock_irqrestore(&tp->lock, flags);
550} 578}
551 579
@@ -569,7 +597,7 @@ static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex)
569 597
570 option = ((idx < MAX_UNITS) && (idx >= 0)) ? media[idx] : 0xff; 598 option = ((idx < MAX_UNITS) && (idx >= 0)) ? media[idx] : 0xff;
571 599
572 if ((option != 0xff) && !idx) 600 if ((option != 0xff) && !idx && netif_msg_drv(&debug))
573 printk(KERN_WARNING PFX "media option is deprecated.\n"); 601 printk(KERN_WARNING PFX "media option is deprecated.\n");
574 602
575 for (p = link_settings; p->media != 0xff; p++) { 603 for (p = link_settings; p->media != 0xff; p++) {
@@ -611,9 +639,11 @@ static int rtl8169_set_speed_tbi(struct net_device *dev,
611 } else if (autoneg == AUTONEG_ENABLE) 639 } else if (autoneg == AUTONEG_ENABLE)
612 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart); 640 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
613 else { 641 else {
614 printk(KERN_WARNING PFX 642 if (netif_msg_link(tp)) {
615 "%s: incorrect speed setting refused in TBI mode\n", 643 printk(KERN_WARNING "%s: "
616 dev->name); 644 "incorrect speed setting refused in TBI mode\n",
645 dev->name);
646 }
617 ret = -EOPNOTSUPP; 647 ret = -EOPNOTSUPP;
618 } 648 }
619 649
@@ -871,12 +901,120 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
871 spin_unlock_irqrestore(&tp->lock, flags); 901 spin_unlock_irqrestore(&tp->lock, flags);
872} 902}
873 903
904static u32 rtl8169_get_msglevel(struct net_device *dev)
905{
906 struct rtl8169_private *tp = netdev_priv(dev);
907
908 return tp->msg_enable;
909}
910
911static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
912{
913 struct rtl8169_private *tp = netdev_priv(dev);
914
915 tp->msg_enable = value;
916}
917
918static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
919 "tx_packets",
920 "rx_packets",
921 "tx_errors",
922 "rx_errors",
923 "rx_missed",
924 "align_errors",
925 "tx_single_collisions",
926 "tx_multi_collisions",
927 "unicast",
928 "broadcast",
929 "multicast",
930 "tx_aborted",
931 "tx_underrun",
932};
933
934struct rtl8169_counters {
935 u64 tx_packets;
936 u64 rx_packets;
937 u64 tx_errors;
938 u32 rx_errors;
939 u16 rx_missed;
940 u16 align_errors;
941 u32 tx_one_collision;
942 u32 tx_multi_collision;
943 u64 rx_unicast;
944 u64 rx_broadcast;
945 u32 rx_multicast;
946 u16 tx_aborted;
947 u16 tx_underun;
948};
949
950static int rtl8169_get_stats_count(struct net_device *dev)
951{
952 return ARRAY_SIZE(rtl8169_gstrings);
953}
954
955static void rtl8169_get_ethtool_stats(struct net_device *dev,
956 struct ethtool_stats *stats, u64 *data)
957{
958 struct rtl8169_private *tp = netdev_priv(dev);
959 void __iomem *ioaddr = tp->mmio_addr;
960 struct rtl8169_counters *counters;
961 dma_addr_t paddr;
962 u32 cmd;
963
964 ASSERT_RTNL();
965
966 counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr);
967 if (!counters)
968 return;
969
970 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
971 cmd = (u64)paddr & DMA_32BIT_MASK;
972 RTL_W32(CounterAddrLow, cmd);
973 RTL_W32(CounterAddrLow, cmd | CounterDump);
974
975 while (RTL_R32(CounterAddrLow) & CounterDump) {
976 if (msleep_interruptible(1))
977 break;
978 }
979
980 RTL_W32(CounterAddrLow, 0);
981 RTL_W32(CounterAddrHigh, 0);
982
983 data[0] = le64_to_cpu(counters->tx_packets);
984 data[1] = le64_to_cpu(counters->rx_packets);
985 data[2] = le64_to_cpu(counters->tx_errors);
986 data[3] = le32_to_cpu(counters->rx_errors);
987 data[4] = le16_to_cpu(counters->rx_missed);
988 data[5] = le16_to_cpu(counters->align_errors);
989 data[6] = le32_to_cpu(counters->tx_one_collision);
990 data[7] = le32_to_cpu(counters->tx_multi_collision);
991 data[8] = le64_to_cpu(counters->rx_unicast);
992 data[9] = le64_to_cpu(counters->rx_broadcast);
993 data[10] = le32_to_cpu(counters->rx_multicast);
994 data[11] = le16_to_cpu(counters->tx_aborted);
995 data[12] = le16_to_cpu(counters->tx_underun);
996
997 pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr);
998}
999
1000static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1001{
1002 switch(stringset) {
1003 case ETH_SS_STATS:
1004 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1005 break;
1006 }
1007}
1008
1009
874static struct ethtool_ops rtl8169_ethtool_ops = { 1010static struct ethtool_ops rtl8169_ethtool_ops = {
875 .get_drvinfo = rtl8169_get_drvinfo, 1011 .get_drvinfo = rtl8169_get_drvinfo,
876 .get_regs_len = rtl8169_get_regs_len, 1012 .get_regs_len = rtl8169_get_regs_len,
877 .get_link = ethtool_op_get_link, 1013 .get_link = ethtool_op_get_link,
878 .get_settings = rtl8169_get_settings, 1014 .get_settings = rtl8169_get_settings,
879 .set_settings = rtl8169_set_settings, 1015 .set_settings = rtl8169_set_settings,
1016 .get_msglevel = rtl8169_get_msglevel,
1017 .set_msglevel = rtl8169_set_msglevel,
880 .get_rx_csum = rtl8169_get_rx_csum, 1018 .get_rx_csum = rtl8169_get_rx_csum,
881 .set_rx_csum = rtl8169_set_rx_csum, 1019 .set_rx_csum = rtl8169_set_rx_csum,
882 .get_tx_csum = ethtool_op_get_tx_csum, 1020 .get_tx_csum = ethtool_op_get_tx_csum,
@@ -886,6 +1024,9 @@ static struct ethtool_ops rtl8169_ethtool_ops = {
886 .get_tso = ethtool_op_get_tso, 1024 .get_tso = ethtool_op_get_tso,
887 .set_tso = ethtool_op_set_tso, 1025 .set_tso = ethtool_op_set_tso,
888 .get_regs = rtl8169_get_regs, 1026 .get_regs = rtl8169_get_regs,
1027 .get_strings = rtl8169_get_strings,
1028 .get_stats_count = rtl8169_get_stats_count,
1029 .get_ethtool_stats = rtl8169_get_ethtool_stats,
889}; 1030};
890 1031
891static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum, 1032static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum,
@@ -1091,7 +1232,8 @@ static void rtl8169_phy_timer(unsigned long __opaque)
1091 if (tp->link_ok(ioaddr)) 1232 if (tp->link_ok(ioaddr))
1092 goto out_unlock; 1233 goto out_unlock;
1093 1234
1094 printk(KERN_WARNING PFX "%s: PHY reset until link up\n", dev->name); 1235 if (netif_msg_link(tp))
1236 printk(KERN_WARNING "%s: PHY reset until link up\n", dev->name);
1095 1237
1096 tp->phy_reset_enable(ioaddr); 1238 tp->phy_reset_enable(ioaddr);
1097 1239
@@ -1169,18 +1311,23 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
1169 /* dev zeroed in alloc_etherdev */ 1311 /* dev zeroed in alloc_etherdev */
1170 dev = alloc_etherdev(sizeof (*tp)); 1312 dev = alloc_etherdev(sizeof (*tp));
1171 if (dev == NULL) { 1313 if (dev == NULL) {
1172 printk(KERN_ERR PFX "unable to alloc new ethernet\n"); 1314 if (netif_msg_drv(&debug))
1315 printk(KERN_ERR PFX "unable to alloc new ethernet\n");
1173 goto err_out; 1316 goto err_out;
1174 } 1317 }
1175 1318
1176 SET_MODULE_OWNER(dev); 1319 SET_MODULE_OWNER(dev);
1177 SET_NETDEV_DEV(dev, &pdev->dev); 1320 SET_NETDEV_DEV(dev, &pdev->dev);
1178 tp = netdev_priv(dev); 1321 tp = netdev_priv(dev);
1322 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
1179 1323
1180 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 1324 /* enable device (incl. PCI PM wakeup and hotplug setup) */
1181 rc = pci_enable_device(pdev); 1325 rc = pci_enable_device(pdev);
1182 if (rc) { 1326 if (rc < 0) {
1183 printk(KERN_ERR PFX "%s: enable failure\n", pci_name(pdev)); 1327 if (netif_msg_probe(tp)) {
1328 printk(KERN_ERR PFX "%s: enable failure\n",
1329 pci_name(pdev));
1330 }
1184 goto err_out_free_dev; 1331 goto err_out_free_dev;
1185 } 1332 }
1186 1333
@@ -1196,29 +1343,39 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
1196 pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command); 1343 pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command);
1197 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK; 1344 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
1198 } else { 1345 } else {
1199 printk(KERN_ERR PFX 1346 if (netif_msg_probe(tp)) {
1200 "Cannot find PowerManagement capability, aborting.\n"); 1347 printk(KERN_ERR PFX
1348 "Cannot find PowerManagement capability. "
1349 "Aborting.\n");
1350 }
1201 goto err_out_mwi; 1351 goto err_out_mwi;
1202 } 1352 }
1203 1353
1204 /* make sure PCI base addr 1 is MMIO */ 1354 /* make sure PCI base addr 1 is MMIO */
1205 if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 1355 if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
1206 printk(KERN_ERR PFX 1356 if (netif_msg_probe(tp)) {
1207 "region #1 not an MMIO resource, aborting\n"); 1357 printk(KERN_ERR PFX
1358 "region #1 not an MMIO resource, aborting\n");
1359 }
1208 rc = -ENODEV; 1360 rc = -ENODEV;
1209 goto err_out_mwi; 1361 goto err_out_mwi;
1210 } 1362 }
1211 /* check for weird/broken PCI region reporting */ 1363 /* check for weird/broken PCI region reporting */
1212 if (pci_resource_len(pdev, 1) < R8169_REGS_SIZE) { 1364 if (pci_resource_len(pdev, 1) < R8169_REGS_SIZE) {
1213 printk(KERN_ERR PFX "Invalid PCI region size(s), aborting\n"); 1365 if (netif_msg_probe(tp)) {
1366 printk(KERN_ERR PFX
1367 "Invalid PCI region size(s), aborting\n");
1368 }
1214 rc = -ENODEV; 1369 rc = -ENODEV;
1215 goto err_out_mwi; 1370 goto err_out_mwi;
1216 } 1371 }
1217 1372
1218 rc = pci_request_regions(pdev, MODULENAME); 1373 rc = pci_request_regions(pdev, MODULENAME);
1219 if (rc) { 1374 if (rc < 0) {
1220 printk(KERN_ERR PFX "%s: could not request regions.\n", 1375 if (netif_msg_probe(tp)) {
1221 pci_name(pdev)); 1376 printk(KERN_ERR PFX "%s: could not request regions.\n",
1377 pci_name(pdev));
1378 }
1222 goto err_out_mwi; 1379 goto err_out_mwi;
1223 } 1380 }
1224 1381
@@ -1231,7 +1388,10 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
1231 } else { 1388 } else {
1232 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 1389 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1233 if (rc < 0) { 1390 if (rc < 0) {
1234 printk(KERN_ERR PFX "DMA configuration failed.\n"); 1391 if (netif_msg_probe(tp)) {
1392 printk(KERN_ERR PFX
1393 "DMA configuration failed.\n");
1394 }
1235 goto err_out_free_res; 1395 goto err_out_free_res;
1236 } 1396 }
1237 } 1397 }
@@ -1241,7 +1401,8 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
1241 /* ioremap MMIO region */ 1401 /* ioremap MMIO region */
1242 ioaddr = ioremap(pci_resource_start(pdev, 1), R8169_REGS_SIZE); 1402 ioaddr = ioremap(pci_resource_start(pdev, 1), R8169_REGS_SIZE);
1243 if (ioaddr == NULL) { 1403 if (ioaddr == NULL) {
1244 printk(KERN_ERR PFX "cannot remap MMIO, aborting\n"); 1404 if (netif_msg_probe(tp))
1405 printk(KERN_ERR PFX "cannot remap MMIO, aborting\n");
1245 rc = -EIO; 1406 rc = -EIO;
1246 goto err_out_free_res; 1407 goto err_out_free_res;
1247 } 1408 }
@@ -1272,9 +1433,11 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
1272 } 1433 }
1273 if (i < 0) { 1434 if (i < 0) {
1274 /* Unknown chip: assume array element #0, original RTL-8169 */ 1435 /* Unknown chip: assume array element #0, original RTL-8169 */
1275 printk(KERN_DEBUG PFX 1436 if (netif_msg_probe(tp)) {
1276 "PCI device %s: unknown chip version, assuming %s\n", 1437 printk(KERN_DEBUG PFX "PCI device %s: "
1277 pci_name(pdev), rtl_chip_info[0].name); 1438 "unknown chip version, assuming %s\n",
1439 pci_name(pdev), rtl_chip_info[0].name);
1440 }
1278 i++; 1441 i++;
1279 } 1442 }
1280 tp->chipset = i; 1443 tp->chipset = i;
@@ -1308,7 +1471,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1308 struct rtl8169_private *tp; 1471 struct rtl8169_private *tp;
1309 void __iomem *ioaddr = NULL; 1472 void __iomem *ioaddr = NULL;
1310 static int board_idx = -1; 1473 static int board_idx = -1;
1311 static int printed_version = 0;
1312 u8 autoneg, duplex; 1474 u8 autoneg, duplex;
1313 u16 speed; 1475 u16 speed;
1314 int i, rc; 1476 int i, rc;
@@ -1318,10 +1480,9 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1318 1480
1319 board_idx++; 1481 board_idx++;
1320 1482
1321 if (!printed_version) { 1483 if (netif_msg_drv(&debug)) {
1322 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", 1484 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
1323 MODULENAME, RTL8169_VERSION); 1485 MODULENAME, RTL8169_VERSION);
1324 printed_version = 1;
1325 } 1486 }
1326 1487
1327 rc = rtl8169_init_board(pdev, &dev, &ioaddr); 1488 rc = rtl8169_init_board(pdev, &dev, &ioaddr);
@@ -1366,7 +1527,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1366#ifdef CONFIG_R8169_NAPI 1527#ifdef CONFIG_R8169_NAPI
1367 dev->poll = rtl8169_poll; 1528 dev->poll = rtl8169_poll;
1368 dev->weight = R8169_NAPI_WEIGHT; 1529 dev->weight = R8169_NAPI_WEIGHT;
1369 printk(KERN_INFO PFX "NAPI enabled\n");
1370#endif 1530#endif
1371 1531
1372#ifdef CONFIG_R8169_VLAN 1532#ifdef CONFIG_R8169_VLAN
@@ -1391,20 +1551,24 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1391 return rc; 1551 return rc;
1392 } 1552 }
1393 1553
1394 printk(KERN_DEBUG "%s: Identified chip type is '%s'.\n", dev->name, 1554 if (netif_msg_probe(tp)) {
1395 rtl_chip_info[tp->chipset].name); 1555 printk(KERN_DEBUG "%s: Identified chip type is '%s'.\n",
1556 dev->name, rtl_chip_info[tp->chipset].name);
1557 }
1396 1558
1397 pci_set_drvdata(pdev, dev); 1559 pci_set_drvdata(pdev, dev);
1398 1560
1399 printk(KERN_INFO "%s: %s at 0x%lx, " 1561 if (netif_msg_probe(tp)) {
1400 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, " 1562 printk(KERN_INFO "%s: %s at 0x%lx, "
1401 "IRQ %d\n", 1563 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
1402 dev->name, 1564 "IRQ %d\n",
1403 rtl_chip_info[ent->driver_data].name, 1565 dev->name,
1404 dev->base_addr, 1566 rtl_chip_info[ent->driver_data].name,
1405 dev->dev_addr[0], dev->dev_addr[1], 1567 dev->base_addr,
1406 dev->dev_addr[2], dev->dev_addr[3], 1568 dev->dev_addr[0], dev->dev_addr[1],
1407 dev->dev_addr[4], dev->dev_addr[5], dev->irq); 1569 dev->dev_addr[2], dev->dev_addr[3],
1570 dev->dev_addr[4], dev->dev_addr[5], dev->irq);
1571 }
1408 1572
1409 rtl8169_hw_phy_config(dev); 1573 rtl8169_hw_phy_config(dev);
1410 1574
@@ -1427,7 +1591,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1427 1591
1428 rtl8169_set_speed(dev, autoneg, speed, duplex); 1592 rtl8169_set_speed(dev, autoneg, speed, duplex);
1429 1593
1430 if (RTL_R8(PHYstatus) & TBI_Enable) 1594 if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
1431 printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name); 1595 printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
1432 1596
1433 return 0; 1597 return 0;
@@ -1860,8 +2024,13 @@ static void rtl8169_reinit_task(void *_data)
1860 ret = rtl8169_open(dev); 2024 ret = rtl8169_open(dev);
1861 if (unlikely(ret < 0)) { 2025 if (unlikely(ret < 0)) {
1862 if (net_ratelimit()) { 2026 if (net_ratelimit()) {
1863 printk(PFX KERN_ERR "%s: reinit failure (status = %d)." 2027 struct rtl8169_private *tp = netdev_priv(dev);
1864 " Rescheduling.\n", dev->name, ret); 2028
2029 if (netif_msg_drv(tp)) {
2030 printk(PFX KERN_ERR
2031 "%s: reinit failure (status = %d)."
2032 " Rescheduling.\n", dev->name, ret);
2033 }
1865 } 2034 }
1866 rtl8169_schedule_work(dev, rtl8169_reinit_task); 2035 rtl8169_schedule_work(dev, rtl8169_reinit_task);
1867 } 2036 }
@@ -1886,8 +2055,12 @@ static void rtl8169_reset_task(void *_data)
1886 netif_wake_queue(dev); 2055 netif_wake_queue(dev);
1887 } else { 2056 } else {
1888 if (net_ratelimit()) { 2057 if (net_ratelimit()) {
1889 printk(PFX KERN_EMERG "%s: Rx buffers shortage\n", 2058 struct rtl8169_private *tp = netdev_priv(dev);
1890 dev->name); 2059
2060 if (netif_msg_intr(tp)) {
2061 printk(PFX KERN_EMERG
2062 "%s: Rx buffers shortage\n", dev->name);
2063 }
1891 } 2064 }
1892 rtl8169_schedule_work(dev, rtl8169_reset_task); 2065 rtl8169_schedule_work(dev, rtl8169_reset_task);
1893 } 2066 }
@@ -1973,8 +2146,11 @@ static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
1973 int ret = 0; 2146 int ret = 0;
1974 2147
1975 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { 2148 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
1976 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", 2149 if (netif_msg_drv(tp)) {
1977 dev->name); 2150 printk(KERN_ERR
2151 "%s: BUG! Tx Ring full when queue awake!\n",
2152 dev->name);
2153 }
1978 goto err_stop; 2154 goto err_stop;
1979 } 2155 }
1980 2156
@@ -2049,8 +2225,11 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
2049 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 2225 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2050 pci_read_config_word(pdev, PCI_STATUS, &pci_status); 2226 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
2051 2227
2052 printk(KERN_ERR PFX "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n", 2228 if (netif_msg_intr(tp)) {
2053 dev->name, pci_cmd, pci_status); 2229 printk(KERN_ERR
2230 "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n",
2231 dev->name, pci_cmd, pci_status);
2232 }
2054 2233
2055 /* 2234 /*
2056 * The recovery sequence below admits a very elaborated explanation: 2235 * The recovery sequence below admits a very elaborated explanation:
@@ -2069,7 +2248,8 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
2069 2248
2070 /* The infamous DAC f*ckup only happens at boot time */ 2249 /* The infamous DAC f*ckup only happens at boot time */
2071 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) { 2250 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
2072 printk(KERN_INFO PFX "%s: disabling PCI DAC.\n", dev->name); 2251 if (netif_msg_intr(tp))
2252 printk(KERN_INFO "%s: disabling PCI DAC.\n", dev->name);
2073 tp->cp_cmd &= ~PCIDAC; 2253 tp->cp_cmd &= ~PCIDAC;
2074 RTL_W16(CPlusCmd, tp->cp_cmd); 2254 RTL_W16(CPlusCmd, tp->cp_cmd);
2075 dev->features &= ~NETIF_F_HIGHDMA; 2255 dev->features &= ~NETIF_F_HIGHDMA;
@@ -2180,7 +2360,7 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2180 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; 2360 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
2181 rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota); 2361 rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota);
2182 2362
2183 while (rx_left > 0) { 2363 for (; rx_left > 0; rx_left--, cur_rx++) {
2184 unsigned int entry = cur_rx % NUM_RX_DESC; 2364 unsigned int entry = cur_rx % NUM_RX_DESC;
2185 struct RxDesc *desc = tp->RxDescArray + entry; 2365 struct RxDesc *desc = tp->RxDescArray + entry;
2186 u32 status; 2366 u32 status;
@@ -2190,9 +2370,12 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2190 2370
2191 if (status & DescOwn) 2371 if (status & DescOwn)
2192 break; 2372 break;
2193 if (status & RxRES) { 2373 if (unlikely(status & RxRES)) {
2194 printk(KERN_INFO "%s: Rx ERROR. status = %08x\n", 2374 if (netif_msg_rx_err(tp)) {
2195 dev->name, status); 2375 printk(KERN_INFO
2376 "%s: Rx ERROR. status = %08x\n",
2377 dev->name, status);
2378 }
2196 tp->stats.rx_errors++; 2379 tp->stats.rx_errors++;
2197 if (status & (RxRWT | RxRUNT)) 2380 if (status & (RxRWT | RxRUNT))
2198 tp->stats.rx_length_errors++; 2381 tp->stats.rx_length_errors++;
@@ -2214,7 +2397,7 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2214 tp->stats.rx_dropped++; 2397 tp->stats.rx_dropped++;
2215 tp->stats.rx_length_errors++; 2398 tp->stats.rx_length_errors++;
2216 rtl8169_mark_to_asic(desc, tp->rx_buf_sz); 2399 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
2217 goto move_on; 2400 continue;
2218 } 2401 }
2219 2402
2220 rtl8169_rx_csum(skb, desc); 2403 rtl8169_rx_csum(skb, desc);
@@ -2243,16 +2426,13 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2243 tp->stats.rx_bytes += pkt_size; 2426 tp->stats.rx_bytes += pkt_size;
2244 tp->stats.rx_packets++; 2427 tp->stats.rx_packets++;
2245 } 2428 }
2246move_on:
2247 cur_rx++;
2248 rx_left--;
2249 } 2429 }
2250 2430
2251 count = cur_rx - tp->cur_rx; 2431 count = cur_rx - tp->cur_rx;
2252 tp->cur_rx = cur_rx; 2432 tp->cur_rx = cur_rx;
2253 2433
2254 delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); 2434 delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
2255 if (!delta && count) 2435 if (!delta && count && netif_msg_intr(tp))
2256 printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name); 2436 printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
2257 tp->dirty_rx += delta; 2437 tp->dirty_rx += delta;
2258 2438
@@ -2263,7 +2443,7 @@ move_on:
2263 * after refill ? 2443 * after refill ?
2264 * - how do others driver handle this condition (Uh oh...). 2444 * - how do others driver handle this condition (Uh oh...).
2265 */ 2445 */
2266 if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) 2446 if ((tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) && netif_msg_intr(tp))
2267 printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name); 2447 printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);
2268 2448
2269 return count; 2449 return count;
@@ -2315,7 +2495,7 @@ rtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
2315 2495
2316 if (likely(netif_rx_schedule_prep(dev))) 2496 if (likely(netif_rx_schedule_prep(dev)))
2317 __netif_rx_schedule(dev); 2497 __netif_rx_schedule(dev);
2318 else { 2498 else if (netif_msg_intr(tp)) {
2319 printk(KERN_INFO "%s: interrupt %04x taken in poll\n", 2499 printk(KERN_INFO "%s: interrupt %04x taken in poll\n",
2320 dev->name, status); 2500 dev->name, status);
2321 } 2501 }
@@ -2334,8 +2514,10 @@ rtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
2334 } while (boguscnt > 0); 2514 } while (boguscnt > 0);
2335 2515
2336 if (boguscnt <= 0) { 2516 if (boguscnt <= 0) {
2337 printk(KERN_WARNING "%s: Too much work at interrupt!\n", 2517 if (net_ratelimit() && netif_msg_intr(tp)) {
2338 dev->name); 2518 printk(KERN_WARNING
2519 "%s: Too much work at interrupt!\n", dev->name);
2520 }
2339 /* Clear all interrupt sources. */ 2521 /* Clear all interrupt sources. */
2340 RTL_W16(IntrStatus, 0xffff); 2522 RTL_W16(IntrStatus, 0xffff);
2341 } 2523 }
@@ -2458,8 +2640,10 @@ rtl8169_set_rx_mode(struct net_device *dev)
2458 2640
2459 if (dev->flags & IFF_PROMISC) { 2641 if (dev->flags & IFF_PROMISC) {
2460 /* Unconditionally log net taps. */ 2642 /* Unconditionally log net taps. */
2461 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", 2643 if (netif_msg_link(tp)) {
2462 dev->name); 2644 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
2645 dev->name);
2646 }
2463 rx_mode = 2647 rx_mode =
2464 AcceptBroadcast | AcceptMulticast | AcceptMyPhys | 2648 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
2465 AcceptAllPhys; 2649 AcceptAllPhys;
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index 05b827f79f54..1ccb2989001c 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -4212,7 +4212,7 @@ SK_BOOL DualNet;
4212 Flags); 4212 Flags);
4213 4213
4214 SkGeStopPort(pAC, IoC, FromPort, SK_STOP_ALL, SK_HARD_RST); 4214 SkGeStopPort(pAC, IoC, FromPort, SK_STOP_ALL, SK_HARD_RST);
4215 pAC->dev[Param.Para32[0]]->flags &= ~IFF_RUNNING; 4215 netif_carrier_off(pAC->dev[Param.Para32[0]]);
4216 spin_unlock_irqrestore( 4216 spin_unlock_irqrestore(
4217 &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, 4217 &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
4218 Flags); 4218 Flags);
@@ -4355,7 +4355,7 @@ SK_BOOL DualNet;
4355 } 4355 }
4356 4356
4357 /* Inform the world that link protocol is up. */ 4357 /* Inform the world that link protocol is up. */
4358 pAC->dev[Param.Para32[0]]->flags |= IFF_RUNNING; 4358 netif_carrier_on(pAC->dev[Param.Para32[0]]);
4359 4359
4360 break; 4360 break;
4361 case SK_DRV_NET_DOWN: /* SK_U32 Reason */ 4361 case SK_DRV_NET_DOWN: /* SK_U32 Reason */
@@ -4368,7 +4368,7 @@ SK_BOOL DualNet;
4368 } else { 4368 } else {
4369 DoPrintInterfaceChange = SK_TRUE; 4369 DoPrintInterfaceChange = SK_TRUE;
4370 } 4370 }
4371 pAC->dev[Param.Para32[1]]->flags &= ~IFF_RUNNING; 4371 netif_carrier_off(pAC->dev[Param.Para32[1]]);
4372 break; 4372 break;
4373 case SK_DRV_SWITCH_HARD: /* SK_U32 FromPortIdx SK_U32 ToPortIdx */ 4373 case SK_DRV_SWITCH_HARD: /* SK_U32 FromPortIdx SK_U32 ToPortIdx */
4374 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT, 4374 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
@@ -4961,7 +4961,6 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
4961#ifdef CONFIG_NET_POLL_CONTROLLER 4961#ifdef CONFIG_NET_POLL_CONTROLLER
4962 dev->poll_controller = &SkGePollController; 4962 dev->poll_controller = &SkGePollController;
4963#endif 4963#endif
4964 dev->flags &= ~IFF_RUNNING;
4965 SET_NETDEV_DEV(dev, &pdev->dev); 4964 SET_NETDEV_DEV(dev, &pdev->dev);
4966 SET_ETHTOOL_OPS(dev, &SkGeEthtoolOps); 4965 SET_ETHTOOL_OPS(dev, &SkGeEthtoolOps);
4967 4966
@@ -5035,7 +5034,6 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
5035 dev->set_mac_address = &SkGeSetMacAddr; 5034 dev->set_mac_address = &SkGeSetMacAddr;
5036 dev->do_ioctl = &SkGeIoctl; 5035 dev->do_ioctl = &SkGeIoctl;
5037 dev->change_mtu = &SkGeChangeMtu; 5036 dev->change_mtu = &SkGeChangeMtu;
5038 dev->flags &= ~IFF_RUNNING;
5039 SET_NETDEV_DEV(dev, &pdev->dev); 5037 SET_NETDEV_DEV(dev, &pdev->dev);
5040 SET_ETHTOOL_OPS(dev, &SkGeEthtoolOps); 5038 SET_ETHTOOL_OPS(dev, &SkGeEthtoolOps);
5041 5039
diff --git a/drivers/net/sk_g16.c b/drivers/net/sk_g16.c
deleted file mode 100644
index 134ae0e6495b..000000000000
--- a/drivers/net/sk_g16.c
+++ /dev/null
@@ -1,2066 +0,0 @@
1/*-
2 * Copyright (C) 1994 by PJD Weichmann & SWS Bern, Switzerland
3 *
4 * This software may be used and distributed according to the terms
5 * of the GNU General Public License, incorporated herein by reference.
6 *
7 * Module : sk_g16.c
8 *
9 * Version : $Revision: 1.1 $
10 *
11 * Author : Patrick J.D. Weichmann
12 *
13 * Date Created : 94/05/26
14 * Last Updated : $Date: 1994/06/30 16:25:15 $
15 *
16 * Description : Schneider & Koch G16 Ethernet Device Driver for
17 * Linux Kernel >= 1.1.22
18 * Update History :
19 * Paul Gortmaker, 03/97: Fix for v2.1.x to use read{b,w}
20 * write{b,w} and memcpy -> memcpy_{to,from}io
21 *
22 * Jeff Garzik, 06/2000, Modularize
23 *
24-*/
25
26static const char rcsid[] = "$Id: sk_g16.c,v 1.1 1994/06/30 16:25:15 root Exp $";
27
28/*
29 * The Schneider & Koch (SK) G16 Network device driver is based
30 * on the 'ni6510' driver from Michael Hipp which can be found at
31 * ftp://sunsite.unc.edu/pub/Linux/system/Network/drivers/nidrivers.tar.gz
32 *
33 * Sources: 1) ni6510.c by M. Hipp
34 * 2) depca.c by D.C. Davies
35 * 3) skeleton.c by D. Becker
36 * 4) Am7990 Local Area Network Controller for Ethernet (LANCE),
37 * AMD, Pub. #05698, June 1989
38 *
39 * Many Thanks for helping me to get things working to:
40 *
41 * A. Cox (A.Cox@swansea.ac.uk)
42 * M. Hipp (mhipp@student.uni-tuebingen.de)
43 * R. Bolz (Schneider & Koch, Germany)
44 *
45 * To Do:
46 * - Support of SK_G8 and other SK Network Cards.
47 * - Autoset memory mapped RAM. Check for free memory and then
48 * configure RAM correctly.
49 * - SK_close should really set card in to initial state.
50 * - Test if IRQ 3 is not switched off. Use autoirq() functionality.
51 * (as in /drivers/net/skeleton.c)
52 * - Implement Multicast addressing. At minimum something like
53 * in depca.c.
54 * - Redo the statistics part.
55 * - Try to find out if the board is in 8 Bit or 16 Bit slot.
56 * If in 8 Bit mode don't use IRQ 11.
57 * - (Try to make it slightly faster.)
58 * - Power management support
59 */
60
61#include <linux/module.h>
62#include <linux/kernel.h>
63#include <linux/fcntl.h>
64#include <linux/ioport.h>
65#include <linux/interrupt.h>
66#include <linux/slab.h>
67#include <linux/string.h>
68#include <linux/delay.h>
69#include <linux/errno.h>
70#include <linux/init.h>
71#include <linux/spinlock.h>
72#include <linux/netdevice.h>
73#include <linux/etherdevice.h>
74#include <linux/skbuff.h>
75#include <linux/bitops.h>
76
77#include <asm/system.h>
78#include <asm/io.h>
79
80#include "sk_g16.h"
81
82/*
83 * Schneider & Koch Card Definitions
84 * =================================
85 */
86
87#define SK_NAME "SK_G16"
88
89/*
90 * SK_G16 Configuration
91 * --------------------
92 */
93
94/*
95 * Abbreviations
96 * -------------
97 *
98 * RAM - used for the 16KB shared memory
99 * Boot_ROM, ROM - are used for referencing the BootEPROM
100 *
101 * SK_BOOT_ROM and SK_ADDR are symbolic constants used to configure
102 * the behaviour of the driver and the SK_G16.
103 *
104 * ! See sk_g16.install on how to install and configure the driver !
105 *
106 * SK_BOOT_ROM defines if the Boot_ROM should be switched off or not.
107 *
108 * SK_ADDR defines the address where the RAM will be mapped into the real
109 * host memory.
110 * valid addresses are from 0xa0000 to 0xfc000 in 16Kbyte steps.
111 */
112
113#define SK_BOOT_ROM 1 /* 1=BootROM on 0=off */
114
115#define SK_ADDR 0xcc000
116
117/*
118 * In POS3 are bits A14-A19 of the address bus. These bits can be set
119 * to choose the RAM address. That's why we only can choose the RAM address
120 * in 16KB steps.
121 */
122
123#define POS_ADDR (rom_addr>>14) /* Do not change this line */
124
125/*
126 * SK_G16 I/O PORT's + IRQ's + Boot_ROM locations
127 * ----------------------------------------------
128 */
129
130/*
131 * As nearly every card has also SK_G16 a specified I/O Port region and
132 * only a few possible IRQ's.
133 * In the Installation Guide from Schneider & Koch is listed a possible
134 * Interrupt IRQ2. IRQ2 is always IRQ9 in boards with two cascaded interrupt
135 * controllers. So we use in SK_IRQS IRQ9.
136 */
137
138/* Don't touch any of the following #defines. */
139
140#define SK_IO_PORTS { 0x100, 0x180, 0x208, 0x220, 0x288, 0x320, 0x328, 0x390, 0 }
141
142#define SK_IRQS { 3, 5, 9, 11, 0 }
143
144#define SK_BOOT_ROM_LOCATIONS { 0xc0000, 0xc4000, 0xc8000, 0xcc000, 0xd0000, 0xd4000, 0xd8000, 0xdc000, 0 }
145
146#define SK_BOOT_ROM_ID { 0x55, 0xaa, 0x10, 0x50, 0x06, 0x33 }
147
148/*
149 * SK_G16 POS REGISTERS
150 * --------------------
151 */
152
153/*
154 * SK_G16 has a Programmable Option Select (POS) Register.
155 * The POS is composed of 8 separate registers (POS0-7) which
156 * are I/O mapped on an address set by the W1 switch.
157 *
158 */
159
160#define SK_POS_SIZE 8 /* 8 I/O Ports are used by SK_G16 */
161
162#define SK_POS0 ioaddr /* Card-ID Low (R) */
163#define SK_POS1 ioaddr+1 /* Card-ID High (R) */
164#define SK_POS2 ioaddr+2 /* Card-Enable, Boot-ROM Disable (RW) */
165#define SK_POS3 ioaddr+3 /* Base address of RAM */
166#define SK_POS4 ioaddr+4 /* IRQ */
167
168/* POS5 - POS7 are unused */
169
170/*
171 * SK_G16 MAC PREFIX
172 * -----------------
173 */
174
175/*
176 * Scheider & Koch manufacturer code (00:00:a5).
177 * This must be checked, that we are sure it is a SK card.
178 */
179
180#define SK_MAC0 0x00
181#define SK_MAC1 0x00
182#define SK_MAC2 0x5a
183
184/*
185 * SK_G16 ID
186 * ---------
187 */
188
189/*
190 * If POS0,POS1 contain the following ID, then we know
191 * at which I/O Port Address we are.
192 */
193
194#define SK_IDLOW 0xfd
195#define SK_IDHIGH 0x6a
196
197
198/*
199 * LANCE POS Bit definitions
200 * -------------------------
201 */
202
203#define SK_ROM_RAM_ON (POS2_CARD)
204#define SK_ROM_RAM_OFF (POS2_EPROM)
205#define SK_ROM_ON (inb(SK_POS2) & POS2_CARD)
206#define SK_ROM_OFF (inb(SK_POS2) | POS2_EPROM)
207#define SK_RAM_ON (inb(SK_POS2) | POS2_CARD)
208#define SK_RAM_OFF (inb(SK_POS2) & POS2_EPROM)
209
210#define POS2_CARD 0x0001 /* 1 = SK_G16 on 0 = off */
211#define POS2_EPROM 0x0002 /* 1 = Boot EPROM off 0 = on */
212
213/*
214 * SK_G16 Memory mapped Registers
215 * ------------------------------
216 *
217 */
218
219#define SK_IOREG (&board->ioreg) /* LANCE data registers. */
220#define SK_PORT (&board->port) /* Control, Status register */
221#define SK_IOCOM (&board->iocom) /* I/O Command */
222
223/*
224 * SK_G16 Status/Control Register bits
225 * -----------------------------------
226 *
227 * (C) Controlreg (S) Statusreg
228 */
229
230/*
231 * Register transfer: 0 = no transfer
232 * 1 = transferring data between LANCE and I/O reg
233 */
234#define SK_IORUN 0x20
235
236/*
237 * LANCE interrupt: 0 = LANCE interrupt occurred
238 * 1 = no LANCE interrupt occurred
239 */
240#define SK_IRQ 0x10
241
242#define SK_RESET 0x08 /* Reset SK_CARD: 0 = RESET 1 = normal */
243#define SK_RW 0x02 /* 0 = write to 1 = read from */
244#define SK_ADR 0x01 /* 0 = REG DataPort 1 = RAP Reg addr port */
245
246
247#define SK_RREG SK_RW /* Transferdirection to read from lance */
248#define SK_WREG 0 /* Transferdirection to write to lance */
249#define SK_RAP SK_ADR /* Destination Register RAP */
250#define SK_RDATA 0 /* Destination Register REG DataPort */
251
252/*
253 * SK_G16 I/O Command
254 * ------------------
255 */
256
257/*
258 * Any bitcombination sets the internal I/O bit (transfer will start)
259 * when written to I/O Command
260 */
261
262#define SK_DOIO 0x80 /* Do Transfer */
263
264/*
265 * LANCE RAP (Register Address Port).
266 * ---------------------------------
267 */
268
269/*
270 * The LANCE internal registers are selected through the RAP.
271 * The Registers are:
272 *
273 * CSR0 - Status and Control flags
274 * CSR1 - Low order bits of initialize block (bits 15:00)
275 * CSR2 - High order bits of initialize block (bits 07:00, 15:08 are reserved)
276 * CSR3 - Allows redefinition of the Bus Master Interface.
277 * This register must be set to 0x0002, which means BSWAP = 0,
278 * ACON = 1, BCON = 0;
279 *
280 */
281
282#define CSR0 0x00
283#define CSR1 0x01
284#define CSR2 0x02
285#define CSR3 0x03
286
287/*
288 * General Definitions
289 * ===================
290 */
291
292/*
293 * Set the number of Tx and Rx buffers, using Log_2(# buffers).
294 * We have 16KB RAM which can be accessed by the LANCE. In the
295 * memory are not only the buffers but also the ring descriptors and
296 * the initialize block.
297 * Don't change anything unless you really know what you do.
298 */
299
300#define LC_LOG_TX_BUFFERS 1 /* (2 == 2^^1) 2 Transmit buffers */
301#define LC_LOG_RX_BUFFERS 3 /* (8 == 2^^3) 8 Receive buffers */
302
303/* Descriptor ring sizes */
304
305#define TMDNUM (1 << (LC_LOG_TX_BUFFERS)) /* 2 Transmit descriptor rings */
306#define RMDNUM (1 << (LC_LOG_RX_BUFFERS)) /* 8 Receive Buffers */
307
308/* Define Mask for setting RMD, TMD length in the LANCE init_block */
309
310#define TMDNUMMASK (LC_LOG_TX_BUFFERS << 29)
311#define RMDNUMMASK (LC_LOG_RX_BUFFERS << 29)
312
313/*
314 * Data Buffer size is set to maximum packet length.
315 */
316
317#define PKT_BUF_SZ 1518
318
319/*
320 * The number of low I/O ports used by the ethercard.
321 */
322
323#define ETHERCARD_TOTAL_SIZE SK_POS_SIZE
324
325/*
326 * SK_DEBUG
327 *
328 * Here you can choose what level of debugging wanted.
329 *
330 * If SK_DEBUG and SK_DEBUG2 are undefined, then only the
331 * necessary messages will be printed.
332 *
333 * If SK_DEBUG is defined, there will be many debugging prints
334 * which can help to find some mistakes in configuration or even
335 * in the driver code.
336 *
337 * If SK_DEBUG2 is defined, many many messages will be printed
338 * which normally you don't need. I used this to check the interrupt
339 * routine.
340 *
341 * (If you define only SK_DEBUG2 then only the messages for
342 * checking interrupts will be printed!)
343 *
344 * Normal way of live is:
345 *
346 * For the whole thing get going let both symbolic constants
347 * undefined. If you face any problems and you know what's going
348 * on (you know something about the card and you can interpret some
349 * hex LANCE register output) then define SK_DEBUG
350 *
351 */
352
353#undef SK_DEBUG /* debugging */
354#undef SK_DEBUG2 /* debugging with more verbose report */
355
356#ifdef SK_DEBUG
357#define PRINTK(x) printk x
358#else
359#define PRINTK(x) /**/
360#endif
361
362#ifdef SK_DEBUG2
363#define PRINTK2(x) printk x
364#else
365#define PRINTK2(x) /**/
366#endif
367
368/*
369 * SK_G16 RAM
370 *
371 * The components are memory mapped and can be set in a region from
372 * 0x00000 through 0xfc000 in 16KB steps.
373 *
374 * The Network components are: dual ported RAM, Prom, I/O Reg, Status-,
375 * Controlregister and I/O Command.
376 *
377 * dual ported RAM: This is the only memory region which the LANCE chip
378 * has access to. From the Lance it is addressed from 0x0000 to
379 * 0x3fbf. The host accesses it normally.
380 *
381 * PROM: The PROM obtains the ETHERNET-MAC-Address. It is realised as a
382 * 8-Bit PROM, this means only the 16 even addresses are used of the
383 * 32 Byte Address region. Access to an odd address results in invalid
384 * data.
385 *
386 * LANCE I/O Reg: The I/O Reg is build of 4 single Registers, Low-Byte Write,
387 * Hi-Byte Write, Low-Byte Read, Hi-Byte Read.
388 * Transfer from or to the LANCE is always in 16Bit so Low and High
389 * registers are always relevant.
390 *
391 * The Data from the Readregister is not the data in the Writeregister!!
392 *
393 * Port: Status- and Controlregister.
394 * Two different registers which share the same address, Status is
395 * read-only, Control is write-only.
396 *
397 * I/O Command:
398 * Any bitcombination written in here starts the transmission between
399 * Host and LANCE.
400 */
401
402typedef struct
403{
404 unsigned char ram[0x3fc0]; /* 16KB dual ported ram */
405 unsigned char rom[0x0020]; /* 32Byte PROM containing 6Byte MAC */
406 unsigned char res1[0x0010]; /* reserved */
407 unsigned volatile short ioreg;/* LANCE I/O Register */
408 unsigned volatile char port; /* Statusregister and Controlregister */
409 unsigned char iocom; /* I/O Command Register */
410} SK_RAM;
411
412/* struct */
413
414/*
415 * This is the structure for the dual ported ram. We
416 * have exactly 16 320 Bytes. In here there must be:
417 *
418 * - Initialize Block (starting at a word boundary)
419 * - Receive and Transmit Descriptor Rings (quadword boundary)
420 * - Data Buffers (arbitrary boundary)
421 *
422 * This is because LANCE has on SK_G16 only access to the dual ported
423 * RAM and nowhere else.
424 */
425
426struct SK_ram
427{
428 struct init_block ib;
429 struct tmd tmde[TMDNUM];
430 struct rmd rmde[RMDNUM];
431 char tmdbuf[TMDNUM][PKT_BUF_SZ];
432 char rmdbuf[RMDNUM][PKT_BUF_SZ];
433};
434
435/*
436 * Structure where all necessary information is for ring buffer
437 * management and statistics.
438 */
439
440struct priv
441{
442 struct SK_ram *ram; /* dual ported ram structure */
443 struct rmd *rmdhead; /* start of receive ring descriptors */
444 struct tmd *tmdhead; /* start of transmit ring descriptors */
445 int rmdnum; /* actual used ring descriptor */
446 int tmdnum; /* actual transmit descriptor for transmitting data */
447 int tmdlast; /* last sent descriptor used for error handling, etc */
448 void *rmdbufs[RMDNUM]; /* pointer to the receive buffers */
449 void *tmdbufs[TMDNUM]; /* pointer to the transmit buffers */
450 struct net_device_stats stats; /* Device driver statistics */
451};
452
453/* global variable declaration */
454
455/* IRQ map used to reserve a IRQ (see SK_open()) */
456
457/* static variables */
458
459static SK_RAM *board; /* pointer to our memory mapped board components */
460static DEFINE_SPINLOCK(SK_lock);
461
462/* Macros */
463
464
465/* Function Prototypes */
466
467/*
468 * Device Driver functions
469 * -----------------------
470 * See for short explanation of each function its definitions header.
471 */
472
473static int SK_probe(struct net_device *dev, short ioaddr);
474
475static void SK_timeout(struct net_device *dev);
476static int SK_open(struct net_device *dev);
477static int SK_send_packet(struct sk_buff *skb, struct net_device *dev);
478static irqreturn_t SK_interrupt(int irq, void *dev_id, struct pt_regs * regs);
479static void SK_rxintr(struct net_device *dev);
480static void SK_txintr(struct net_device *dev);
481static int SK_close(struct net_device *dev);
482
483static struct net_device_stats *SK_get_stats(struct net_device *dev);
484
485unsigned int SK_rom_addr(void);
486
487static void set_multicast_list(struct net_device *dev);
488
489/*
490 * LANCE Functions
491 * ---------------
492 */
493
494static int SK_lance_init(struct net_device *dev, unsigned short mode);
495void SK_reset_board(void);
496void SK_set_RAP(int reg_number);
497int SK_read_reg(int reg_number);
498int SK_rread_reg(void);
499void SK_write_reg(int reg_number, int value);
500
501/*
502 * Debugging functions
503 * -------------------
504 */
505
506void SK_print_pos(struct net_device *dev, char *text);
507void SK_print_dev(struct net_device *dev, char *text);
508void SK_print_ram(struct net_device *dev);
509
510
511/*-
512 * Function : SK_init
513 * Author : Patrick J.D. Weichmann
514 * Date Created : 94/05/26
515 *
516 * Description : Check for a SK_G16 network adaptor and initialize it.
517 * This function gets called by dev_init which initializes
518 * all Network devices.
519 *
520 * Parameters : I : struct net_device *dev - structure preconfigured
521 * from Space.c
522 * Return Value : 0 = Driver Found and initialized
523 * Errors : ENODEV - no device found
524 * ENXIO - not probed
525 * Globals : None
526 * Update History :
527 * YY/MM/DD uid Description
528-*/
529
530static int io; /* 0 == probe */
531
532/*
533 * Check for a network adaptor of this type, and return '0' if one exists.
534 * If dev->base_addr == 0, probe all likely locations.
535 * If dev->base_addr == 1, always return failure.
536 */
537
538struct net_device * __init SK_init(int unit)
539{
540 int *port, ports[] = SK_IO_PORTS; /* SK_G16 supported ports */
541 static unsigned version_printed;
542 struct net_device *dev = alloc_etherdev(sizeof(struct priv));
543 int err = -ENODEV;
544
545 if (!dev)
546 return ERR_PTR(-ENOMEM);
547
548 if (unit >= 0) {
549 sprintf(dev->name, "eth%d", unit);
550 netdev_boot_setup_check(dev);
551 io = dev->base_addr;
552 }
553
554 if (version_printed++ == 0)
555 PRINTK(("%s: %s", SK_NAME, rcsid));
556
557 if (io > 0xff) { /* Check a single specified address */
558 err = -EBUSY;
559 /* Check if on specified address is a SK_G16 */
560 if (request_region(io, ETHERCARD_TOTAL_SIZE, "sk_g16")) {
561 err = SK_probe(dev, io);
562 if (!err)
563 goto got_it;
564 release_region(io, ETHERCARD_TOTAL_SIZE);
565 }
566 } else if (io > 0) { /* Don't probe at all */
567 err = -ENXIO;
568 } else {
569 /* Autoprobe base_addr */
570 for (port = &ports[0]; *port; port++) {
571 io = *port;
572
573 /* Check if I/O Port region is used by another board */
574 if (!request_region(io, ETHERCARD_TOTAL_SIZE, "sk_g16"))
575 continue; /* Try next Port address */
576
577 /* Check if at ioaddr is a SK_G16 */
578 if (SK_probe(dev, io) == 0)
579 goto got_it;
580
581 release_region(io, ETHERCARD_TOTAL_SIZE);
582 }
583 }
584err_out:
585 free_netdev(dev);
586 return ERR_PTR(err);
587
588got_it:
589 err = register_netdev(dev);
590 if (err) {
591 release_region(dev->base_addr, ETHERCARD_TOTAL_SIZE);
592 goto err_out;
593 }
594 return dev;
595
596} /* End of SK_init */
597
598
599MODULE_AUTHOR("Patrick J.D. Weichmann");
600MODULE_DESCRIPTION("Schneider & Koch G16 Ethernet Device Driver");
601MODULE_LICENSE("GPL");
602MODULE_PARM(io, "i");
603MODULE_PARM_DESC(io, "0 to probe common ports (unsafe), or the I/O base of the board");
604
605
606#ifdef MODULE
607
608static struct net_device *SK_dev;
609
610static int __init SK_init_module (void)
611{
612 SK_dev = SK_init(-1);
613 return IS_ERR(SK_dev) ? PTR_ERR(SK_dev) : 0;
614}
615
616static void __exit SK_cleanup_module (void)
617{
618 unregister_netdev(SK_dev);
619 release_region(SK_dev->base_addr, ETHERCARD_TOTAL_SIZE);
620 free_netdev(SK_dev);
621}
622
623module_init(SK_init_module);
624module_exit(SK_cleanup_module);
625#endif
626
627
628/*-
629 * Function : SK_probe
630 * Author : Patrick J.D. Weichmann
631 * Date Created : 94/05/26
632 *
633 * Description : This function is called by SK_init and
634 * does the main part of initialization.
635 *
636 * Parameters : I : struct net_device *dev - SK_G16 device structure
637 * I : short ioaddr - I/O Port address where POS is.
638 * Return Value : 0 = Initialization done
639 * Errors : ENODEV - No SK_G16 found
640 * -1 - Configuration problem
641 * Globals : board - pointer to SK_RAM
642 * Update History :
643 * YY/MM/DD uid Description
644 * 94/06/30 pwe SK_ADDR now checked and at the correct place
645-*/
646
647int __init SK_probe(struct net_device *dev, short ioaddr)
648{
649 int i,j; /* Counters */
650 int sk_addr_flag = 0; /* SK ADDR correct? 1 - no, 0 - yes */
651 unsigned int rom_addr; /* used to store RAM address used for POS_ADDR */
652
653 struct priv *p = netdev_priv(dev); /* SK_G16 private structure */
654
655 if (inb(SK_POS0) != SK_IDLOW || inb(SK_POS1) != SK_IDHIGH)
656 return -ENODEV;
657 dev->base_addr = ioaddr;
658
659 if (SK_ADDR & 0x3fff || SK_ADDR < 0xa0000)
660 {
661
662 sk_addr_flag = 1;
663
664 /*
665 * Now here we could use a routine which searches for a free
666 * place in the ram and set SK_ADDR if found. TODO.
667 */
668 }
669
670 if (SK_BOOT_ROM) /* Shall we keep Boot_ROM on ? */
671 {
672 PRINTK(("## %s: SK_BOOT_ROM is set.\n", SK_NAME));
673
674 rom_addr = SK_rom_addr();
675
676 if (rom_addr == 0) /* No Boot_ROM found */
677 {
678 if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
679 {
680 printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
681 dev->name, SK_ADDR);
682 return -1;
683 }
684
685 rom_addr = SK_ADDR; /* assign predefined address */
686
687 PRINTK(("## %s: NO Bootrom found \n", SK_NAME));
688
689 outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off */
690 outb(POS_ADDR, SK_POS3); /* Set RAM address */
691 outb(SK_RAM_ON, SK_POS2); /* enable RAM */
692 }
693 else if (rom_addr == SK_ADDR)
694 {
695 printk("%s: RAM + ROM are set to the same address %#08x\n"
696 " Check configuration. Now switching off Boot_ROM\n",
697 SK_NAME, rom_addr);
698
699 outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off*/
700 outb(POS_ADDR, SK_POS3); /* Set RAM address */
701 outb(SK_RAM_ON, SK_POS2); /* enable RAM */
702 }
703 else
704 {
705 PRINTK(("## %s: Found ROM at %#08x\n", SK_NAME, rom_addr));
706 PRINTK(("## %s: Keeping Boot_ROM on\n", SK_NAME));
707
708 if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
709 {
710 printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
711 dev->name, SK_ADDR);
712 return -1;
713 }
714
715 rom_addr = SK_ADDR;
716
717 outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off */
718 outb(POS_ADDR, SK_POS3); /* Set RAM address */
719 outb(SK_ROM_RAM_ON, SK_POS2); /* RAM on, BOOT_ROM on */
720 }
721 }
722 else /* Don't keep Boot_ROM */
723 {
724 PRINTK(("## %s: SK_BOOT_ROM is not set.\n", SK_NAME));
725
726 if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
727 {
728 printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
729 dev->name, SK_ADDR);
730 return -1;
731 }
732
733 rom_addr = SK_rom_addr(); /* Try to find a Boot_ROM */
734
735 /* IF we find a Boot_ROM disable it */
736
737 outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off */
738
739 /* We found a Boot_ROM and it's gone. Set RAM address on
740 * Boot_ROM address.
741 */
742
743 if (rom_addr)
744 {
745 printk("%s: We found Boot_ROM at %#08x. Now setting RAM on"
746 "that address\n", SK_NAME, rom_addr);
747
748 outb(POS_ADDR, SK_POS3); /* Set RAM on Boot_ROM address */
749 }
750 else /* We did not find a Boot_ROM, use predefined SK_ADDR for ram */
751 {
752 if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
753 {
754 printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
755 dev->name, SK_ADDR);
756 return -1;
757 }
758
759 rom_addr = SK_ADDR;
760
761 outb(POS_ADDR, SK_POS3); /* Set RAM address */
762 }
763 outb(SK_RAM_ON, SK_POS2); /* enable RAM */
764 }
765
766#ifdef SK_DEBUG
767 SK_print_pos(dev, "POS registers after ROM, RAM config");
768#endif
769
770 board = (SK_RAM *) isa_bus_to_virt(rom_addr);
771
772 /* Read in station address */
773 for (i = 0, j = 0; i < ETH_ALEN; i++, j+=2)
774 {
775 dev->dev_addr[i] = readb(board->rom+j);
776 }
777
778 /* Check for manufacturer code */
779 if (!(dev->dev_addr[0] == SK_MAC0 &&
780 dev->dev_addr[1] == SK_MAC1 &&
781 dev->dev_addr[2] == SK_MAC2) )
782 {
783 PRINTK(("## %s: We did not find SK_G16 at RAM location.\n",
784 SK_NAME));
785 return -ENODEV; /* NO SK_G16 found */
786 }
787
788 printk("%s: %s found at %#3x, HW addr: %#04x:%02x:%02x:%02x:%02x:%02x\n",
789 dev->name,
790 "Schneider & Koch Netcard",
791 (unsigned int) dev->base_addr,
792 dev->dev_addr[0],
793 dev->dev_addr[1],
794 dev->dev_addr[2],
795 dev->dev_addr[3],
796 dev->dev_addr[4],
797 dev->dev_addr[5]);
798
799 memset((char *) dev->priv, 0, sizeof(struct priv)); /* clear memory */
800
801 /* Assign our Device Driver functions */
802
803 dev->open = SK_open;
804 dev->stop = SK_close;
805 dev->hard_start_xmit = SK_send_packet;
806 dev->get_stats = SK_get_stats;
807 dev->set_multicast_list = set_multicast_list;
808 dev->tx_timeout = SK_timeout;
809 dev->watchdog_timeo = HZ/7;
810
811
812 dev->flags &= ~IFF_MULTICAST;
813
814 /* Initialize private structure */
815
816 p->ram = (struct SK_ram *) rom_addr; /* Set dual ported RAM addr */
817 p->tmdhead = &(p->ram)->tmde[0]; /* Set TMD head */
818 p->rmdhead = &(p->ram)->rmde[0]; /* Set RMD head */
819
820 /* Initialize buffer pointers */
821
822 for (i = 0; i < TMDNUM; i++)
823 {
824 p->tmdbufs[i] = &(p->ram)->tmdbuf[i];
825 }
826
827 for (i = 0; i < RMDNUM; i++)
828 {
829 p->rmdbufs[i] = &(p->ram)->rmdbuf[i];
830 }
831
832#ifdef SK_DEBUG
833 SK_print_pos(dev, "End of SK_probe");
834 SK_print_ram(dev);
835#endif
836 return 0; /* Initialization done */
837} /* End of SK_probe() */
838
839
840/*-
841 * Function : SK_open
842 * Author : Patrick J.D. Weichmann
843 * Date Created : 94/05/26
844 *
845 * Description : This function is called sometimes after booting
846 * when ifconfig program is run.
847 *
848 * This function requests an IRQ, sets the correct
849 * IRQ in the card. Then calls SK_lance_init() to
850 * init and start the LANCE chip. Then if everything is
851 * ok returns with 0 (OK), which means SK_G16 is now
852 * opened and operational.
853 *
854 * (Called by dev_open() /net/inet/dev.c)
855 *
856 * Parameters : I : struct net_device *dev - SK_G16 device structure
857 * Return Value : 0 - Device opened
858 * Errors : -EAGAIN - Open failed
859 * Side Effects : None
860 * Update History :
861 * YY/MM/DD uid Description
862-*/
863
864static int SK_open(struct net_device *dev)
865{
866 int i = 0;
867 int irqval = 0;
868 int ioaddr = dev->base_addr;
869
870 int irqtab[] = SK_IRQS;
871
872 struct priv *p = netdev_priv(dev);
873
874 PRINTK(("## %s: At beginning of SK_open(). CSR0: %#06x\n",
875 SK_NAME, SK_read_reg(CSR0)));
876
877 if (dev->irq == 0) /* Autoirq */
878 {
879 i = 0;
880
881 /*
882 * Check if one IRQ out of SK_IRQS is free and install
883 * interrupt handler.
884 * Most done by request_irq().
885 * irqval: 0 - interrupt handler installed for IRQ irqtab[i]
886 * -EBUSY - interrupt busy
887 * -EINVAL - irq > 15 or handler = NULL
888 */
889
890 do
891 {
892 irqval = request_irq(irqtab[i], &SK_interrupt, 0, "sk_g16", dev);
893 i++;
894 } while (irqval && irqtab[i]);
895
896 if (irqval) /* We tried every possible IRQ but no success */
897 {
898 printk("%s: unable to get an IRQ\n", dev->name);
899 return -EAGAIN;
900 }
901
902 dev->irq = irqtab[--i];
903
904 outb(i<<2, SK_POS4); /* Set Card on probed IRQ */
905
906 }
907 else if (dev->irq == 2) /* IRQ2 is always IRQ9 */
908 {
909 if (request_irq(9, &SK_interrupt, 0, "sk_g16", dev))
910 {
911 printk("%s: unable to get IRQ 9\n", dev->name);
912 return -EAGAIN;
913 }
914 dev->irq = 9;
915
916 /*
917 * Now we set card on IRQ2.
918 * This can be confusing, but remember that IRQ2 on the network
919 * card is in reality IRQ9
920 */
921 outb(0x08, SK_POS4); /* set card to IRQ2 */
922
923 }
924 else /* Check IRQ as defined in Space.c */
925 {
926 int i = 0;
927
928 /* check if IRQ free and valid. Then install Interrupt handler */
929
930 if (request_irq(dev->irq, &SK_interrupt, 0, "sk_g16", dev))
931 {
932 printk("%s: unable to get selected IRQ\n", dev->name);
933 return -EAGAIN;
934 }
935
936 switch(dev->irq)
937 {
938 case 3: i = 0;
939 break;
940 case 5: i = 1;
941 break;
942 case 2: i = 2;
943 break;
944 case 11:i = 3;
945 break;
946 default:
947 printk("%s: Preselected IRQ %d is invalid for %s boards",
948 dev->name,
949 dev->irq,
950 SK_NAME);
951 return -EAGAIN;
952 }
953
954 outb(i<<2, SK_POS4); /* Set IRQ on card */
955 }
956
957 printk("%s: Schneider & Koch G16 at %#3x, IRQ %d, shared mem at %#08x\n",
958 dev->name, (unsigned int)dev->base_addr,
959 (int) dev->irq, (unsigned int) p->ram);
960
961 if (!(i = SK_lance_init(dev, 0))) /* LANCE init OK? */
962 {
963 netif_start_queue(dev);
964
965#ifdef SK_DEBUG
966
967 /*
968 * This debug block tries to stop LANCE,
969 * reinit LANCE with transmitter and receiver disabled,
970 * then stop again and reinit with NORMAL_MODE
971 */
972
973 printk("## %s: After lance init. CSR0: %#06x\n",
974 SK_NAME, SK_read_reg(CSR0));
975 SK_write_reg(CSR0, CSR0_STOP);
976 printk("## %s: LANCE stopped. CSR0: %#06x\n",
977 SK_NAME, SK_read_reg(CSR0));
978 SK_lance_init(dev, MODE_DTX | MODE_DRX);
979 printk("## %s: Reinit with DTX + DRX off. CSR0: %#06x\n",
980 SK_NAME, SK_read_reg(CSR0));
981 SK_write_reg(CSR0, CSR0_STOP);
982 printk("## %s: LANCE stopped. CSR0: %#06x\n",
983 SK_NAME, SK_read_reg(CSR0));
984 SK_lance_init(dev, MODE_NORMAL);
985 printk("## %s: LANCE back to normal mode. CSR0: %#06x\n",
986 SK_NAME, SK_read_reg(CSR0));
987 SK_print_pos(dev, "POS regs before returning OK");
988
989#endif /* SK_DEBUG */
990
991 return 0; /* SK_open() is successful */
992 }
993 else /* LANCE init failed */
994 {
995
996 PRINTK(("## %s: LANCE init failed: CSR0: %#06x\n",
997 SK_NAME, SK_read_reg(CSR0)));
998
999 return -EAGAIN;
1000 }
1001
1002} /* End of SK_open() */
1003
1004
1005/*-
1006 * Function : SK_lance_init
1007 * Author : Patrick J.D. Weichmann
1008 * Date Created : 94/05/26
1009 *
1010 * Description : Reset LANCE chip, fill RMD, TMD structures with
1011 * start values and Start LANCE.
1012 *
1013 * Parameters : I : struct net_device *dev - SK_G16 device structure
1014 * I : int mode - put LANCE into "mode" see data-sheet for
1015 * more info.
1016 * Return Value : 0 - Init done
1017 * Errors : -1 - Init failed
1018 * Update History :
1019 * YY/MM/DD uid Description
1020-*/
1021
1022static int SK_lance_init(struct net_device *dev, unsigned short mode)
1023{
1024 int i;
1025 unsigned long flags;
1026 struct priv *p = netdev_priv(dev);
1027 struct tmd *tmdp;
1028 struct rmd *rmdp;
1029
1030 PRINTK(("## %s: At beginning of LANCE init. CSR0: %#06x\n",
1031 SK_NAME, SK_read_reg(CSR0)));
1032
1033 /* Reset LANCE */
1034 SK_reset_board();
1035
1036 /* Initialize TMD's with start values */
1037 p->tmdnum = 0; /* First descriptor for transmitting */
1038 p->tmdlast = 0; /* First descriptor for reading stats */
1039
1040 for (i = 0; i < TMDNUM; i++) /* Init all TMD's */
1041 {
1042 tmdp = p->tmdhead + i;
1043
1044 writel((unsigned long) p->tmdbufs[i], tmdp->u.buffer); /* assign buffer */
1045
1046 /* Mark TMD as start and end of packet */
1047 writeb(TX_STP | TX_ENP, &tmdp->u.s.status);
1048 }
1049
1050
1051 /* Initialize RMD's with start values */
1052
1053 p->rmdnum = 0; /* First RMD which will be used */
1054
1055 for (i = 0; i < RMDNUM; i++) /* Init all RMD's */
1056 {
1057 rmdp = p->rmdhead + i;
1058
1059
1060 writel((unsigned long) p->rmdbufs[i], rmdp->u.buffer); /* assign buffer */
1061
1062 /*
1063 * LANCE must be owner at beginning so that he can fill in
1064 * receiving packets, set status and release RMD
1065 */
1066
1067 writeb(RX_OWN, &rmdp->u.s.status);
1068
1069 writew(-PKT_BUF_SZ, &rmdp->blen); /* Buffer Size (two's complement) */
1070
1071 writeb(0, &rmdp->mlen); /* init message length */
1072
1073 }
1074
1075 /* Fill LANCE Initialize Block */
1076
1077 writew(mode, (&((p->ram)->ib.mode))); /* Set operation mode */
1078
1079 for (i = 0; i < ETH_ALEN; i++) /* Set physical address */
1080 {
1081 writeb(dev->dev_addr[i], (&((p->ram)->ib.paddr[i])));
1082 }
1083
1084 for (i = 0; i < 8; i++) /* Set multicast, logical address */
1085 {
1086 writeb(0, (&((p->ram)->ib.laddr[i]))); /* We do not use logical addressing */
1087 }
1088
1089 /* Set ring descriptor pointers and set number of descriptors */
1090
1091 writel((int)p->rmdhead | RMDNUMMASK, (&((p->ram)->ib.rdrp)));
1092 writel((int)p->tmdhead | TMDNUMMASK, (&((p->ram)->ib.tdrp)));
1093
1094 /* Prepare LANCE Control and Status Registers */
1095
1096 spin_lock_irqsave(&SK_lock, flags);
1097
1098 SK_write_reg(CSR3, CSR3_ACON); /* Ale Control !!!THIS MUST BE SET!!!! */
1099
1100 /*
1101 * LANCE addresses the RAM from 0x0000 to 0x3fbf and has no access to
1102 * PC Memory locations.
1103 *
1104 * In structure SK_ram is defined that the first thing in ram
1105 * is the initialization block. So his address is for LANCE always
1106 * 0x0000
1107 *
1108 * CSR1 contains low order bits 15:0 of initialization block address
1109 * CSR2 is built of:
1110 * 7:0 High order bits 23:16 of initialization block address
1111 * 15:8 reserved, must be 0
1112 */
1113
1114 /* Set initialization block address (must be on word boundary) */
1115 SK_write_reg(CSR1, 0); /* Set low order bits 15:0 */
1116 SK_write_reg(CSR2, 0); /* Set high order bits 23:16 */
1117
1118
1119 PRINTK(("## %s: After setting CSR1-3. CSR0: %#06x\n",
1120 SK_NAME, SK_read_reg(CSR0)));
1121
1122 /* Initialize LANCE */
1123
1124 /*
1125 * INIT = Initialize, when set, causes the LANCE to begin the
1126 * initialization procedure and access the Init Block.
1127 */
1128
1129 SK_write_reg(CSR0, CSR0_INIT);
1130
1131 spin_unlock_irqrestore(&SK_lock, flags);
1132
1133 /* Wait until LANCE finished initialization */
1134
1135 SK_set_RAP(CSR0); /* Register Address Pointer to CSR0 */
1136
1137 for (i = 0; (i < 100) && !(SK_rread_reg() & CSR0_IDON); i++)
1138 ; /* Wait until init done or go ahead if problems (i>=100) */
1139
1140 if (i >= 100) /* Something is wrong ! */
1141 {
1142 printk("%s: can't init am7990, status: %04x "
1143 "init_block: %#08x\n",
1144 dev->name, (int) SK_read_reg(CSR0),
1145 (unsigned int) &(p->ram)->ib);
1146
1147#ifdef SK_DEBUG
1148 SK_print_pos(dev, "LANCE INIT failed");
1149 SK_print_dev(dev,"Device Structure:");
1150#endif
1151
1152 return -1; /* LANCE init failed */
1153 }
1154
1155 PRINTK(("## %s: init done after %d ticks\n", SK_NAME, i));
1156
1157 /* Clear Initialize done, enable Interrupts, start LANCE */
1158
1159 SK_write_reg(CSR0, CSR0_IDON | CSR0_INEA | CSR0_STRT);
1160
1161 PRINTK(("## %s: LANCE started. CSR0: %#06x\n", SK_NAME,
1162 SK_read_reg(CSR0)));
1163
1164 return 0; /* LANCE is up and running */
1165
1166} /* End of SK_lance_init() */
1167
1168
1169
1170/*-
1171 * Function : SK_send_packet
1172 * Author : Patrick J.D. Weichmann
1173 * Date Created : 94/05/27
1174 *
1175 * Description : Writes an socket buffer into a transmit descriptor
1176 * and starts transmission.
1177 *
1178 * Parameters : I : struct sk_buff *skb - packet to transfer
1179 * I : struct net_device *dev - SK_G16 device structure
1180 * Return Value : 0 - OK
1181 * 1 - Could not transmit (dev_queue_xmit will queue it)
1182 * and try to sent it later
1183 * Globals : None
1184 * Side Effects : None
1185 * Update History :
1186 * YY/MM/DD uid Description
1187-*/
1188
1189static void SK_timeout(struct net_device *dev)
1190{
1191 printk(KERN_WARNING "%s: xmitter timed out, try to restart!\n", dev->name);
1192 SK_lance_init(dev, MODE_NORMAL); /* Reinit LANCE */
1193 netif_wake_queue(dev); /* Clear Transmitter flag */
1194 dev->trans_start = jiffies; /* Mark Start of transmission */
1195}
1196
1197static int SK_send_packet(struct sk_buff *skb, struct net_device *dev)
1198{
1199 struct priv *p = netdev_priv(dev);
1200 struct tmd *tmdp;
1201 static char pad[64];
1202
1203 PRINTK2(("## %s: SK_send_packet() called, CSR0 %#04x.\n",
1204 SK_NAME, SK_read_reg(CSR0)));
1205
1206
1207 /*
1208 * Block a timer-based transmit from overlapping.
1209 * This means check if we are already in.
1210 */
1211
1212 netif_stop_queue (dev);
1213
1214 {
1215
1216 /* Evaluate Packet length */
1217 short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
1218
1219 tmdp = p->tmdhead + p->tmdnum; /* Which descriptor for transmitting */
1220
1221 /* Fill in Transmit Message Descriptor */
1222
1223 /* Copy data into dual ported ram */
1224
1225 memcpy_toio((tmdp->u.buffer & 0x00ffffff), skb->data, skb->len);
1226 if (len != skb->len)
1227 memcpy_toio((tmdp->u.buffer & 0x00ffffff) + skb->len, pad, len-skb->len);
1228
1229 writew(-len, &tmdp->blen); /* set length to transmit */
1230
1231 /*
1232 * Packet start and end is always set because we use the maximum
1233 * packet length as buffer length.
1234 * Relinquish ownership to LANCE
1235 */
1236
1237 writeb(TX_OWN | TX_STP | TX_ENP, &tmdp->u.s.status);
1238
1239 /* Start Demand Transmission */
1240 SK_write_reg(CSR0, CSR0_TDMD | CSR0_INEA);
1241
1242 dev->trans_start = jiffies; /* Mark start of transmission */
1243
1244 /* Set pointer to next transmit buffer */
1245 p->tmdnum++;
1246 p->tmdnum &= TMDNUM-1;
1247
1248 /* Do we own the next transmit buffer ? */
1249 if (! (readb(&((p->tmdhead + p->tmdnum)->u.s.status)) & TX_OWN) )
1250 {
1251 /*
1252 * We own next buffer and are ready to transmit, so
1253 * clear busy flag
1254 */
1255 netif_start_queue(dev);
1256 }
1257
1258 p->stats.tx_bytes += skb->len;
1259
1260 }
1261
1262 dev_kfree_skb(skb);
1263 return 0;
1264} /* End of SK_send_packet */
1265
1266
1267/*-
1268 * Function : SK_interrupt
1269 * Author : Patrick J.D. Weichmann
1270 * Date Created : 94/05/27
1271 *
1272 * Description : SK_G16 interrupt handler which checks for LANCE
1273 * Errors, handles transmit and receive interrupts
1274 *
1275 * Parameters : I : int irq, void *dev_id, struct pt_regs * regs -
1276 * Return Value : None
1277 * Errors : None
1278 * Globals : None
1279 * Side Effects : None
1280 * Update History :
1281 * YY/MM/DD uid Description
1282-*/
1283
1284static irqreturn_t SK_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1285{
1286 int csr0;
1287 struct net_device *dev = dev_id;
1288 struct priv *p = netdev_priv(dev);
1289
1290
1291 PRINTK2(("## %s: SK_interrupt(). status: %#06x\n",
1292 SK_NAME, SK_read_reg(CSR0)));
1293
1294 if (dev == NULL)
1295 {
1296 printk("SK_interrupt(): IRQ %d for unknown device.\n", irq);
1297 }
1298
1299 spin_lock (&SK_lock);
1300
1301 csr0 = SK_read_reg(CSR0); /* store register for checking */
1302
1303 /*
1304 * Acknowledge all of the current interrupt sources, disable
1305 * Interrupts (INEA = 0)
1306 */
1307
1308 SK_write_reg(CSR0, csr0 & CSR0_CLRALL);
1309
1310 if (csr0 & CSR0_ERR) /* LANCE Error */
1311 {
1312 printk("%s: error: %04x\n", dev->name, csr0);
1313
1314 if (csr0 & CSR0_MISS) /* No place to store packet ? */
1315 {
1316 p->stats.rx_dropped++;
1317 }
1318 }
1319
1320 if (csr0 & CSR0_RINT) /* Receive Interrupt (packet arrived) */
1321 {
1322 SK_rxintr(dev);
1323 }
1324
1325 if (csr0 & CSR0_TINT) /* Transmit interrupt (packet sent) */
1326 {
1327 SK_txintr(dev);
1328 }
1329
1330 SK_write_reg(CSR0, CSR0_INEA); /* Enable Interrupts */
1331
1332 spin_unlock (&SK_lock);
1333 return IRQ_HANDLED;
1334} /* End of SK_interrupt() */
1335
1336
1337/*-
1338 * Function : SK_txintr
1339 * Author : Patrick J.D. Weichmann
1340 * Date Created : 94/05/27
1341 *
1342 * Description : After sending a packet we check status, update
1343 * statistics and relinquish ownership of transmit
1344 * descriptor ring.
1345 *
1346 * Parameters : I : struct net_device *dev - SK_G16 device structure
1347 * Return Value : None
1348 * Errors : None
1349 * Globals : None
1350 * Update History :
1351 * YY/MM/DD uid Description
1352-*/
1353
1354static void SK_txintr(struct net_device *dev)
1355{
1356 int tmdstat;
1357 struct tmd *tmdp;
1358 struct priv *p = netdev_priv(dev);
1359
1360
1361 PRINTK2(("## %s: SK_txintr() status: %#06x\n",
1362 SK_NAME, SK_read_reg(CSR0)));
1363
1364 tmdp = p->tmdhead + p->tmdlast; /* Which buffer we sent at last ? */
1365
1366 /* Set next buffer */
1367 p->tmdlast++;
1368 p->tmdlast &= TMDNUM-1;
1369
1370 tmdstat = readb(&tmdp->u.s.status);
1371
1372 /*
1373 * We check status of transmitted packet.
1374 * see LANCE data-sheet for error explanation
1375 */
1376 if (tmdstat & TX_ERR) /* Error occurred */
1377 {
1378 int stat2 = readw(&tmdp->status2);
1379
1380 printk("%s: TX error: %04x %04x\n", dev->name, tmdstat, stat2);
1381
1382 if (stat2 & TX_TDR) /* TDR problems? */
1383 {
1384 printk("%s: tdr-problems \n", dev->name);
1385 }
1386
1387 if (stat2 & TX_RTRY) /* Failed in 16 attempts to transmit ? */
1388 p->stats.tx_aborted_errors++;
1389 if (stat2 & TX_LCOL) /* Late collision ? */
1390 p->stats.tx_window_errors++;
1391 if (stat2 & TX_LCAR) /* Loss of Carrier ? */
1392 p->stats.tx_carrier_errors++;
1393 if (stat2 & TX_UFLO) /* Underflow error ? */
1394 {
1395 p->stats.tx_fifo_errors++;
1396
1397 /*
1398 * If UFLO error occurs it will turn transmitter of.
1399 * So we must reinit LANCE
1400 */
1401
1402 SK_lance_init(dev, MODE_NORMAL);
1403 }
1404
1405 p->stats.tx_errors++;
1406
1407 writew(0, &tmdp->status2); /* Clear error flags */
1408 }
1409 else if (tmdstat & TX_MORE) /* Collisions occurred ? */
1410 {
1411 /*
1412 * Here I have a problem.
1413 * I only know that there must be one or up to 15 collisions.
1414 * That's why TX_MORE is set, because after 16 attempts TX_RTRY
1415 * will be set which means couldn't send packet aborted transfer.
1416 *
1417 * First I did not have this in but then I thought at minimum
1418 * we see that something was not ok.
1419 * If anyone knows something better than this to handle this
1420 * please report it.
1421 */
1422
1423 p->stats.collisions++;
1424 }
1425 else /* Packet sent without any problems */
1426 {
1427 p->stats.tx_packets++;
1428 }
1429
1430 /*
1431 * We mark transmitter not busy anymore, because now we have a free
1432 * transmit descriptor which can be filled by SK_send_packet and
1433 * afterwards sent by the LANCE
1434 *
1435 * The function which do handle slow IRQ parts is do_bottom_half()
1436 * which runs at normal kernel priority, that means all interrupt are
1437 * enabled. (see kernel/irq.c)
1438 *
1439 * net_bh does something like this:
1440 * - check if already in net_bh
1441 * - try to transmit something from the send queue
1442 * - if something is in the receive queue send it up to higher
1443 * levels if it is a known protocol
1444 * - try to transmit something from the send queue
1445 */
1446
1447 netif_wake_queue(dev);
1448
1449} /* End of SK_txintr() */
1450
1451
1452/*-
1453 * Function : SK_rxintr
1454 * Author : Patrick J.D. Weichmann
1455 * Date Created : 94/05/27
1456 *
1457 * Description : Buffer sent, check for errors, relinquish ownership
1458 * of the receive message descriptor.
1459 *
1460 * Parameters : I : SK_G16 device structure
1461 * Return Value : None
1462 * Globals : None
1463 * Update History :
1464 * YY/MM/DD uid Description
1465-*/
1466
1467static void SK_rxintr(struct net_device *dev)
1468{
1469
1470 struct rmd *rmdp;
1471 int rmdstat;
1472 struct priv *p = netdev_priv(dev);
1473
1474 PRINTK2(("## %s: SK_rxintr(). CSR0: %#06x\n",
1475 SK_NAME, SK_read_reg(CSR0)));
1476
1477 rmdp = p->rmdhead + p->rmdnum;
1478
1479 /* As long as we own the next entry, check status and send
1480 * it up to higher layer
1481 */
1482
1483 while (!( (rmdstat = readb(&rmdp->u.s.status)) & RX_OWN))
1484 {
1485 /*
1486 * Start and end of packet must be set, because we use
1487 * the ethernet maximum packet length (1518) as buffer size.
1488 *
1489 * Because our buffers are at maximum OFLO and BUFF errors are
1490 * not to be concerned (see Data sheet)
1491 */
1492
1493 if ((rmdstat & (RX_STP | RX_ENP)) != (RX_STP | RX_ENP))
1494 {
1495 /* Start of a frame > 1518 Bytes ? */
1496
1497 if (rmdstat & RX_STP)
1498 {
1499 p->stats.rx_errors++; /* bad packet received */
1500 p->stats.rx_length_errors++; /* packet too long */
1501
1502 printk("%s: packet too long\n", dev->name);
1503 }
1504
1505 /*
1506 * All other packets will be ignored until a new frame with
1507 * start (RX_STP) set follows.
1508 *
1509 * What we do is just give descriptor free for new incoming
1510 * packets.
1511 */
1512
1513 writeb(RX_OWN, &rmdp->u.s.status); /* Relinquish ownership to LANCE */
1514
1515 }
1516 else if (rmdstat & RX_ERR) /* Receive Error ? */
1517 {
1518 printk("%s: RX error: %04x\n", dev->name, (int) rmdstat);
1519
1520 p->stats.rx_errors++;
1521
1522 if (rmdstat & RX_FRAM) p->stats.rx_frame_errors++;
1523 if (rmdstat & RX_CRC) p->stats.rx_crc_errors++;
1524
1525 writeb(RX_OWN, &rmdp->u.s.status); /* Relinquish ownership to LANCE */
1526
1527 }
1528 else /* We have a packet which can be queued for the upper layers */
1529 {
1530
1531 int len = readw(&rmdp->mlen) & 0x0fff; /* extract message length from receive buffer */
1532 struct sk_buff *skb;
1533
1534 skb = dev_alloc_skb(len+2); /* allocate socket buffer */
1535
1536 if (skb == NULL) /* Could not get mem ? */
1537 {
1538
1539 /*
1540 * Couldn't allocate sk_buffer so we give descriptor back
1541 * to Lance, update statistics and go ahead.
1542 */
1543
1544 writeb(RX_OWN, &rmdp->u.s.status); /* Relinquish ownership to LANCE */
1545 printk("%s: Couldn't allocate sk_buff, deferring packet.\n",
1546 dev->name);
1547 p->stats.rx_dropped++;
1548
1549 break; /* Jump out */
1550 }
1551
1552 /* Prepare sk_buff to queue for upper layers */
1553
1554 skb->dev = dev;
1555 skb_reserve(skb,2); /* Align IP header on 16 byte boundary */
1556
1557 /*
1558 * Copy data out of our receive descriptor into sk_buff.
1559 *
1560 * (rmdp->u.buffer & 0x00ffffff) -> get address of buffer and
1561 * ignore status fields)
1562 */
1563
1564 memcpy_fromio(skb_put(skb,len), (rmdp->u.buffer & 0x00ffffff), len);
1565
1566
1567 /*
1568 * Notify the upper protocol layers that there is another packet
1569 * to handle
1570 *
1571 * netif_rx() always succeeds. see /net/inet/dev.c for more.
1572 */
1573
1574 skb->protocol=eth_type_trans(skb,dev);
1575 netif_rx(skb); /* queue packet and mark it for processing */
1576
1577 /*
1578 * Packet is queued and marked for processing so we
1579 * free our descriptor and update statistics
1580 */
1581
1582 writeb(RX_OWN, &rmdp->u.s.status);
1583 dev->last_rx = jiffies;
1584 p->stats.rx_packets++;
1585 p->stats.rx_bytes += len;
1586
1587
1588 p->rmdnum++;
1589 p->rmdnum %= RMDNUM;
1590
1591 rmdp = p->rmdhead + p->rmdnum;
1592 }
1593 }
1594} /* End of SK_rxintr() */
1595
1596
1597/*-
1598 * Function : SK_close
1599 * Author : Patrick J.D. Weichmann
1600 * Date Created : 94/05/26
1601 *
1602 * Description : close gets called from dev_close() and should
1603 * deinstall the card (free_irq, mem etc).
1604 *
1605 * Parameters : I : struct net_device *dev - our device structure
1606 * Return Value : 0 - closed device driver
1607 * Errors : None
1608 * Globals : None
1609 * Update History :
1610 * YY/MM/DD uid Description
1611-*/
1612
1613/* I have tried to set BOOT_ROM on and RAM off but then, after a 'ifconfig
1614 * down' the system stops. So I don't shut set card to init state.
1615 */
1616
1617static int SK_close(struct net_device *dev)
1618{
1619
1620 PRINTK(("## %s: SK_close(). CSR0: %#06x\n",
1621 SK_NAME, SK_read_reg(CSR0)));
1622
1623 netif_stop_queue(dev); /* Transmitter busy */
1624
1625 printk("%s: Shutting %s down CSR0 %#06x\n", dev->name, SK_NAME,
1626 (int) SK_read_reg(CSR0));
1627
1628 SK_write_reg(CSR0, CSR0_STOP); /* STOP the LANCE */
1629
1630 free_irq(dev->irq, dev); /* Free IRQ */
1631
1632 return 0; /* always succeed */
1633
1634} /* End of SK_close() */
1635
1636
1637/*-
1638 * Function : SK_get_stats
1639 * Author : Patrick J.D. Weichmann
1640 * Date Created : 94/05/26
1641 *
1642 * Description : Return current status structure to upper layers.
1643 * It is called by sprintf_stats (dev.c).
1644 *
1645 * Parameters : I : struct net_device *dev - our device structure
1646 * Return Value : struct net_device_stats * - our current statistics
1647 * Errors : None
1648 * Side Effects : None
1649 * Update History :
1650 * YY/MM/DD uid Description
1651-*/
1652
1653static struct net_device_stats *SK_get_stats(struct net_device *dev)
1654{
1655
1656 struct priv *p = netdev_priv(dev);
1657
1658 PRINTK(("## %s: SK_get_stats(). CSR0: %#06x\n",
1659 SK_NAME, SK_read_reg(CSR0)));
1660
1661 return &p->stats; /* Return Device status */
1662
1663} /* End of SK_get_stats() */
1664
1665
1666/*-
1667 * Function : set_multicast_list
1668 * Author : Patrick J.D. Weichmann
1669 * Date Created : 94/05/26
1670 *
1671 * Description : This function gets called when a program performs
1672 * a SIOCSIFFLAGS call. Ifconfig does this if you call
1673 * 'ifconfig [-]allmulti' which enables or disables the
1674 * Promiscuous mode.
1675 * Promiscuous mode is when the Network card accepts all
1676 * packets, not only the packets which match our MAC
1677 * Address. It is useful for writing a network monitor,
1678 * but it is also a security problem. You have to remember
1679 * that all information on the net is not encrypted.
1680 *
1681 * Parameters : I : struct net_device *dev - SK_G16 device Structure
1682 * Return Value : None
1683 * Errors : None
1684 * Globals : None
1685 * Update History :
1686 * YY/MM/DD uid Description
1687 * 95/10/18 ACox New multicast calling scheme
1688-*/
1689
1690
1691/* Set or clear the multicast filter for SK_G16.
1692 */
1693
1694static void set_multicast_list(struct net_device *dev)
1695{
1696
1697 if (dev->flags&IFF_PROMISC)
1698 {
1699 /* Reinitialize LANCE with MODE_PROM set */
1700 SK_lance_init(dev, MODE_PROM);
1701 }
1702 else if (dev->mc_count==0 && !(dev->flags&IFF_ALLMULTI))
1703 {
1704 /* Reinitialize LANCE without MODE_PROM */
1705 SK_lance_init(dev, MODE_NORMAL);
1706 }
1707 else
1708 {
1709 /* Multicast with logical address filter on */
1710 /* Reinitialize LANCE without MODE_PROM */
1711 SK_lance_init(dev, MODE_NORMAL);
1712
1713 /* Not implemented yet. */
1714 }
1715} /* End of set_multicast_list() */
1716
1717
1718
1719/*-
1720 * Function : SK_rom_addr
1721 * Author : Patrick J.D. Weichmann
1722 * Date Created : 94/06/01
1723 *
1724 * Description : Try to find a Boot_ROM at all possible locations
1725 *
1726 * Parameters : None
1727 * Return Value : Address where Boot_ROM is
1728 * Errors : 0 - Did not find Boot_ROM
1729 * Globals : None
1730 * Update History :
1731 * YY/MM/DD uid Description
1732-*/
1733
1734unsigned int __init SK_rom_addr(void)
1735{
1736 int i,j;
1737 int rom_found = 0;
1738 unsigned int rom_location[] = SK_BOOT_ROM_LOCATIONS;
1739 unsigned char rom_id[] = SK_BOOT_ROM_ID;
1740 unsigned char test_byte;
1741
1742 /* Autodetect Boot_ROM */
1743 PRINTK(("## %s: Autodetection of Boot_ROM\n", SK_NAME));
1744
1745 for (i = 0; (rom_location[i] != 0) && (rom_found == 0); i++)
1746 {
1747
1748 PRINTK(("## Trying ROM location %#08x", rom_location[i]));
1749
1750 rom_found = 1;
1751 for (j = 0; j < 6; j++)
1752 {
1753 test_byte = readb(rom_location[i]+j);
1754 PRINTK((" %02x ", *test_byte));
1755
1756 if(test_byte != rom_id[j])
1757 {
1758 rom_found = 0;
1759 }
1760 }
1761 PRINTK(("\n"));
1762 }
1763
1764 if (rom_found == 1)
1765 {
1766 PRINTK(("## %s: Boot_ROM found at %#08x\n",
1767 SK_NAME, rom_location[(i-1)]));
1768
1769 return (rom_location[--i]);
1770 }
1771 else
1772 {
1773 PRINTK(("%s: No Boot_ROM found\n", SK_NAME));
1774 return 0;
1775 }
1776} /* End of SK_rom_addr() */
1777
1778
1779
1780/* LANCE access functions
1781 *
1782 * ! CSR1-3 can only be accessed when in CSR0 the STOP bit is set !
1783 */
1784
1785
1786/*-
1787 * Function : SK_reset_board
1788 *
1789 * Author : Patrick J.D. Weichmann
1790 *
1791 * Date Created : 94/05/25
1792 *
1793 * Description : This function resets SK_G16 and all components, but
1794 * POS registers are not changed
1795 *
1796 * Parameters : None
1797 * Return Value : None
1798 * Errors : None
1799 * Globals : SK_RAM *board - SK_RAM structure pointer
1800 *
1801 * Update History :
1802 * YY/MM/DD uid Description
1803-*/
1804
1805void SK_reset_board(void)
1806{
1807 writeb(0x00, SK_PORT); /* Reset active */
1808 mdelay(5); /* Delay min 5ms */
1809 writeb(SK_RESET, SK_PORT); /* Set back to normal operation */
1810
1811} /* End of SK_reset_board() */
1812
1813
1814/*-
1815 * Function : SK_set_RAP
1816 * Author : Patrick J.D. Weichmann
1817 * Date Created : 94/05/25
1818 *
1819 * Description : Set LANCE Register Address Port to register
1820 * for later data transfer.
1821 *
1822 * Parameters : I : reg_number - which CSR to read/write from/to
1823 * Return Value : None
1824 * Errors : None
1825 * Globals : SK_RAM *board - SK_RAM structure pointer
1826 * Update History :
1827 * YY/MM/DD uid Description
1828-*/
1829
1830void SK_set_RAP(int reg_number)
1831{
1832 writew(reg_number, SK_IOREG);
1833 writeb(SK_RESET | SK_RAP | SK_WREG, SK_PORT);
1834 writeb(SK_DOIO, SK_IOCOM);
1835
1836 while (readb(SK_PORT) & SK_IORUN)
1837 barrier();
1838} /* End of SK_set_RAP() */
1839
1840
1841/*-
1842 * Function : SK_read_reg
1843 * Author : Patrick J.D. Weichmann
1844 * Date Created : 94/05/25
1845 *
1846 * Description : Set RAP and read data from a LANCE CSR register
1847 *
1848 * Parameters : I : reg_number - which CSR to read from
1849 * Return Value : Register contents
1850 * Errors : None
1851 * Globals : SK_RAM *board - SK_RAM structure pointer
1852 * Update History :
1853 * YY/MM/DD uid Description
1854-*/
1855
1856int SK_read_reg(int reg_number)
1857{
1858 SK_set_RAP(reg_number);
1859
1860 writeb(SK_RESET | SK_RDATA | SK_RREG, SK_PORT);
1861 writeb(SK_DOIO, SK_IOCOM);
1862
1863 while (readb(SK_PORT) & SK_IORUN)
1864 barrier();
1865 return (readw(SK_IOREG));
1866
1867} /* End of SK_read_reg() */
1868
1869
1870/*-
1871 * Function : SK_rread_reg
1872 * Author : Patrick J.D. Weichmann
1873 * Date Created : 94/05/28
1874 *
1875 * Description : Read data from preseted register.
1876 * This function requires that you know which
1877 * Register is actually set. Be aware that CSR1-3
1878 * can only be accessed when in CSR0 STOP is set.
1879 *
1880 * Return Value : Register contents
1881 * Errors : None
1882 * Globals : SK_RAM *board - SK_RAM structure pointer
1883 * Update History :
1884 * YY/MM/DD uid Description
1885-*/
1886
1887int SK_rread_reg(void)
1888{
1889 writeb(SK_RESET | SK_RDATA | SK_RREG, SK_PORT);
1890
1891 writeb(SK_DOIO, SK_IOCOM);
1892
1893 while (readb(SK_PORT) & SK_IORUN)
1894 barrier();
1895 return (readw(SK_IOREG));
1896
1897} /* End of SK_rread_reg() */
1898
1899
1900/*-
1901 * Function : SK_write_reg
1902 * Author : Patrick J.D. Weichmann
1903 * Date Created : 94/05/25
1904 *
1905 * Description : This function sets the RAP then fills in the
1906 * LANCE I/O Reg and starts Transfer to LANCE.
1907 * It waits until transfer has ended which is max. 7 ms
1908 * and then it returns.
1909 *
1910 * Parameters : I : reg_number - which CSR to write to
1911 * I : value - what value to fill into register
1912 * Return Value : None
1913 * Errors : None
1914 * Globals : SK_RAM *board - SK_RAM structure pointer
1915 * Update History :
1916 * YY/MM/DD uid Description
1917-*/
1918
1919void SK_write_reg(int reg_number, int value)
1920{
1921 SK_set_RAP(reg_number);
1922
1923 writew(value, SK_IOREG);
1924 writeb(SK_RESET | SK_RDATA | SK_WREG, SK_PORT);
1925 writeb(SK_DOIO, SK_IOCOM);
1926
1927 while (readb(SK_PORT) & SK_IORUN)
1928 barrier();
1929} /* End of SK_write_reg */
1930
1931
1932
1933/*
1934 * Debugging functions
1935 * -------------------
1936 */
1937
1938/*-
1939 * Function : SK_print_pos
1940 * Author : Patrick J.D. Weichmann
1941 * Date Created : 94/05/25
1942 *
1943 * Description : This function prints out the 4 POS (Programmable
1944 * Option Select) Registers. Used mainly to debug operation.
1945 *
1946 * Parameters : I : struct net_device *dev - SK_G16 device structure
1947 * I : char * - Text which will be printed as title
1948 * Return Value : None
1949 * Errors : None
1950 * Update History :
1951 * YY/MM/DD uid Description
1952-*/
1953
1954void SK_print_pos(struct net_device *dev, char *text)
1955{
1956 int ioaddr = dev->base_addr;
1957
1958 unsigned char pos0 = inb(SK_POS0),
1959 pos1 = inb(SK_POS1),
1960 pos2 = inb(SK_POS2),
1961 pos3 = inb(SK_POS3),
1962 pos4 = inb(SK_POS4);
1963
1964
1965 printk("## %s: %s.\n"
1966 "## pos0=%#4x pos1=%#4x pos2=%#04x pos3=%#08x pos4=%#04x\n",
1967 SK_NAME, text, pos0, pos1, pos2, (pos3<<14), pos4);
1968
1969} /* End of SK_print_pos() */
1970
1971
1972
1973/*-
1974 * Function : SK_print_dev
1975 * Author : Patrick J.D. Weichmann
1976 * Date Created : 94/05/25
1977 *
1978 * Description : This function simply prints out the important fields
1979 * of the device structure.
1980 *
1981 * Parameters : I : struct net_device *dev - SK_G16 device structure
1982 * I : char *text - Title for printing
1983 * Return Value : None
1984 * Errors : None
1985 * Update History :
1986 * YY/MM/DD uid Description
1987-*/
1988
1989void SK_print_dev(struct net_device *dev, char *text)
1990{
1991 if (dev == NULL)
1992 {
1993 printk("## %s: Device Structure. %s\n", SK_NAME, text);
1994 printk("## DEVICE == NULL\n");
1995 }
1996 else
1997 {
1998 printk("## %s: Device Structure. %s\n", SK_NAME, text);
1999 printk("## Device Name: %s Base Address: %#06lx IRQ: %d\n",
2000 dev->name, dev->base_addr, dev->irq);
2001
2002 printk("## next device: %#08x init function: %#08x\n",
2003 (int) dev->next, (int) dev->init);
2004 }
2005
2006} /* End of SK_print_dev() */
2007
2008
2009
2010/*-
2011 * Function : SK_print_ram
2012 * Author : Patrick J.D. Weichmann
2013 * Date Created : 94/06/02
2014 *
2015 * Description : This function is used to check how are things set up
2016 * in the 16KB RAM. Also the pointers to the receive and
2017 * transmit descriptor rings and rx and tx buffers locations.
2018 * It contains a minor bug in printing, but has no effect to the values
2019 * only newlines are not correct.
2020 *
2021 * Parameters : I : struct net_device *dev - SK_G16 device structure
2022 * Return Value : None
2023 * Errors : None
2024 * Globals : None
2025 * Update History :
2026 * YY/MM/DD uid Description
2027-*/
2028
2029void __init SK_print_ram(struct net_device *dev)
2030{
2031
2032 int i;
2033 struct priv *p = netdev_priv(dev);
2034
2035 printk("## %s: RAM Details.\n"
2036 "## RAM at %#08x tmdhead: %#08x rmdhead: %#08x initblock: %#08x\n",
2037 SK_NAME,
2038 (unsigned int) p->ram,
2039 (unsigned int) p->tmdhead,
2040 (unsigned int) p->rmdhead,
2041 (unsigned int) &(p->ram)->ib);
2042
2043 printk("## ");
2044
2045 for(i = 0; i < TMDNUM; i++)
2046 {
2047 if (!(i % 3)) /* Every third line do a newline */
2048 {
2049 printk("\n## ");
2050 }
2051 printk("tmdbufs%d: %#08x ", (i+1), (int) p->tmdbufs[i]);
2052 }
2053 printk("## ");
2054
2055 for(i = 0; i < RMDNUM; i++)
2056 {
2057 if (!(i % 3)) /* Every third line do a newline */
2058 {
2059 printk("\n## ");
2060 }
2061 printk("rmdbufs%d: %#08x ", (i+1), (int) p->rmdbufs[i]);
2062 }
2063 printk("\n");
2064
2065} /* End of SK_print_ram() */
2066
diff --git a/drivers/net/sk_g16.h b/drivers/net/sk_g16.h
deleted file mode 100644
index 0a5dc0908a04..000000000000
--- a/drivers/net/sk_g16.h
+++ /dev/null
@@ -1,165 +0,0 @@
1/*-
2 *
3 * This software may be used and distributed according to the terms
4 * of the GNU General Public License, incorporated herein by reference.
5 *
6 * Module : sk_g16.h
7 * Version : $Revision$
8 *
9 * Author : M.Hipp (mhipp@student.uni-tuebingen.de)
10 * changes by : Patrick J.D. Weichmann
11 *
12 * Date Created : 94/05/25
13 *
14 * Description : In here are all necessary definitions of
15 * the am7990 (LANCE) chip used for writing a
16 * network device driver which uses this chip
17 *
18 * $Log$
19-*/
20
21#ifndef SK_G16_H
22
23#define SK_G16_H
24
25
26/*
27 * Control and Status Register 0 (CSR0) bit definitions
28 *
29 * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write)
30 *
31 */
32
33#define CSR0_ERR 0x8000 /* Error summary (R) */
34#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */
35#define CSR0_CERR 0x2000 /* Collision Error (RC) */
36#define CSR0_MISS 0x1000 /* Missed packet (RC) */
37#define CSR0_MERR 0x0800 /* Memory Error (RC) */
38#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */
39#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */
40#define CSR0_IDON 0x0100 /* Initialization Done (RC) */
41#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */
42#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */
43#define CSR0_RXON 0x0020 /* Receiver on (R) */
44#define CSR0_TXON 0x0010 /* Transmitter on (R) */
45#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */
46#define CSR0_STOP 0x0004 /* Stop (RS) */
47#define CSR0_STRT 0x0002 /* Start (RS) */
48#define CSR0_INIT 0x0001 /* Initialize (RS) */
49
50#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */
51
52/*
53 * Control and Status Register 3 (CSR3) bit definitions
54 *
55 */
56
57#define CSR3_BSWAP 0x0004 /* Byte Swap (RW) */
58#define CSR3_ACON 0x0002 /* ALE Control (RW) */
59#define CSR3_BCON 0x0001 /* Byte Control (RW) */
60
61/*
62 * Initialization Block Mode operation Bit Definitions.
63 */
64
65#define MODE_PROM 0x8000 /* Promiscuous Mode */
66#define MODE_INTL 0x0040 /* Internal Loopback */
67#define MODE_DRTY 0x0020 /* Disable Retry */
68#define MODE_COLL 0x0010 /* Force Collision */
69#define MODE_DTCR 0x0008 /* Disable Transmit CRC) */
70#define MODE_LOOP 0x0004 /* Loopback */
71#define MODE_DTX 0x0002 /* Disable the Transmitter */
72#define MODE_DRX 0x0001 /* Disable the Receiver */
73
74#define MODE_NORMAL 0x0000 /* Normal operation mode */
75
76/*
77 * Receive message descriptor status bit definitions.
78 */
79
80#define RX_OWN 0x80 /* Owner bit 0 = host, 1 = lance */
81#define RX_ERR 0x40 /* Error Summary */
82#define RX_FRAM 0x20 /* Framing Error */
83#define RX_OFLO 0x10 /* Overflow Error */
84#define RX_CRC 0x08 /* CRC Error */
85#define RX_BUFF 0x04 /* Buffer Error */
86#define RX_STP 0x02 /* Start of Packet */
87#define RX_ENP 0x01 /* End of Packet */
88
89
90/*
91 * Transmit message descriptor status bit definitions.
92 */
93
94#define TX_OWN 0x80 /* Owner bit 0 = host, 1 = lance */
95#define TX_ERR 0x40 /* Error Summary */
96#define TX_MORE 0x10 /* More the 1 retry needed to Xmit */
97#define TX_ONE 0x08 /* One retry needed to Xmit */
98#define TX_DEF 0x04 /* Deferred */
99#define TX_STP 0x02 /* Start of Packet */
100#define TX_ENP 0x01 /* End of Packet */
101
102/*
103 * Transmit status (2) (valid if TX_ERR == 1)
104 */
105
106#define TX_BUFF 0x8000 /* Buffering error (no ENP) */
107#define TX_UFLO 0x4000 /* Underflow (late memory) */
108#define TX_LCOL 0x1000 /* Late collision */
109#define TX_LCAR 0x0400 /* Loss of Carrier */
110#define TX_RTRY 0x0200 /* Failed after 16 retransmissions */
111#define TX_TDR 0x003f /* Time-domain-reflectometer-value */
112
113
114/*
115 * Structures used for Communication with the LANCE
116 */
117
118/* LANCE Initialize Block */
119
120struct init_block
121{
122 unsigned short mode; /* Mode Register */
123 unsigned char paddr[6]; /* Physical Address (MAC) */
124 unsigned char laddr[8]; /* Logical Filter Address (not used) */
125 unsigned int rdrp; /* Receive Descriptor Ring pointer */
126 unsigned int tdrp; /* Transmit Descriptor Ring pointer */
127};
128
129
130/* Receive Message Descriptor Entry */
131
132struct rmd
133{
134 union
135 {
136 unsigned long buffer; /* Address of buffer */
137 struct
138 {
139 unsigned char unused[3];
140 unsigned volatile char status; /* Status Bits */
141 } s;
142 } u;
143 volatile short blen; /* Buffer Length (two's complement) */
144 unsigned short mlen; /* Message Byte Count */
145};
146
147
148/* Transmit Message Descriptor Entry */
149
150struct tmd
151{
152 union
153 {
154 unsigned long buffer; /* Address of buffer */
155 struct
156 {
157 unsigned char unused[3];
158 unsigned volatile char status; /* Status Bits */
159 } s;
160 } u;
161 unsigned short blen; /* Buffer Length (two's complement) */
162 unsigned volatile short status2; /* Error Status Bits */
163};
164
165#endif /* End of SK_G16_H */
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
new file mode 100644
index 000000000000..30e8d589d167
--- /dev/null
+++ b/drivers/net/skge.c
@@ -0,0 +1,3386 @@
1/*
2 * New driver for Marvell Yukon chipset and SysKonnect Gigabit
3 * Ethernet adapters. Based on earlier sk98lin, e100 and
4 * FreeBSD if_sk drivers.
5 *
6 * This driver intentionally does not support all the features
7 * of the original driver such as link fail-over and link management because
8 * those should be done at higher levels.
9 *
10 * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <linux/config.h>
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/ethtool.h>
34#include <linux/pci.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/delay.h>
38#include <linux/crc32.h>
39#include <linux/dma-mapping.h>
40#include <asm/irq.h>
41
42#include "skge.h"
43
44#define DRV_NAME "skge"
45#define DRV_VERSION "0.6"
46#define PFX DRV_NAME " "
47
48#define DEFAULT_TX_RING_SIZE 128
49#define DEFAULT_RX_RING_SIZE 512
50#define MAX_TX_RING_SIZE 1024
51#define MAX_RX_RING_SIZE 4096
52#define PHY_RETRIES 1000
53#define ETH_JUMBO_MTU 9000
54#define TX_WATCHDOG (5 * HZ)
55#define NAPI_WEIGHT 64
56#define BLINK_HZ (HZ/4)
57#define LINK_POLL_HZ (HZ/10)
58
59MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver");
60MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>");
61MODULE_LICENSE("GPL");
62MODULE_VERSION(DRV_VERSION);
63
64static const u32 default_msg
65 = NETIF_MSG_DRV| NETIF_MSG_PROBE| NETIF_MSG_LINK
66 | NETIF_MSG_IFUP| NETIF_MSG_IFDOWN;
67
68static int debug = -1; /* defaults above */
69module_param(debug, int, 0);
70MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
71
72static const struct pci_device_id skge_id_table[] = {
73 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940,
74 PCI_ANY_ID, PCI_ANY_ID },
75 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B,
76 PCI_ANY_ID, PCI_ANY_ID },
77 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE,
78 PCI_ANY_ID, PCI_ANY_ID },
79 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU,
80 PCI_ANY_ID, PCI_ANY_ID },
81 { PCI_VENDOR_ID_SYSKONNECT, 0x9E00, /* SK-9Exx */
82 PCI_ANY_ID, PCI_ANY_ID },
83 { PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T,
84 PCI_ANY_ID, PCI_ANY_ID },
85 { PCI_VENDOR_ID_MARVELL, 0x4320, /* Gigabit Ethernet Controller */
86 PCI_ANY_ID, PCI_ANY_ID },
87 { PCI_VENDOR_ID_MARVELL, 0x5005, /* Marvell (11ab), Belkin */
88 PCI_ANY_ID, PCI_ANY_ID },
89 { PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD,
90 PCI_ANY_ID, PCI_ANY_ID },
91 { PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1032,
92 PCI_ANY_ID, PCI_ANY_ID },
93 { PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064,
94 PCI_ANY_ID, PCI_ANY_ID },
95 { 0 }
96};
97MODULE_DEVICE_TABLE(pci, skge_id_table);
98
99static int skge_up(struct net_device *dev);
100static int skge_down(struct net_device *dev);
101static void skge_tx_clean(struct skge_port *skge);
102static void skge_xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
103static void skge_gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
104static void genesis_get_stats(struct skge_port *skge, u64 *data);
105static void yukon_get_stats(struct skge_port *skge, u64 *data);
106static void yukon_init(struct skge_hw *hw, int port);
107static void yukon_reset(struct skge_hw *hw, int port);
108static void genesis_mac_init(struct skge_hw *hw, int port);
109static void genesis_reset(struct skge_hw *hw, int port);
110
111static const int txqaddr[] = { Q_XA1, Q_XA2 };
112static const int rxqaddr[] = { Q_R1, Q_R2 };
113static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
114static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
115
116/* Don't need to look at whole 16K.
117 * last interesting register is descriptor poll timer.
118 */
119#define SKGE_REGS_LEN (29*128)
120
121static int skge_get_regs_len(struct net_device *dev)
122{
123 return SKGE_REGS_LEN;
124}
125
126/*
127 * Returns copy of control register region
128 * I/O region is divided into banks and certain regions are unreadable
129 */
130static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
131 void *p)
132{
133 const struct skge_port *skge = netdev_priv(dev);
134 unsigned long offs;
135 const void __iomem *io = skge->hw->regs;
136 static const unsigned long bankmap
137 = (1<<0) | (1<<2) | (1<<8) | (1<<9)
138 | (1<<12) | (1<<13) | (1<<14) | (1<<15) | (1<<16)
139 | (1<<17) | (1<<20) | (1<<21) | (1<<22) | (1<<23)
140 | (1<<24) | (1<<25) | (1<<26) | (1<<27) | (1<<28);
141
142 regs->version = 1;
143 for (offs = 0; offs < regs->len; offs += 128) {
144 u32 len = min_t(u32, 128, regs->len - offs);
145
146 if (bankmap & (1<<(offs/128)))
147 memcpy_fromio(p + offs, io + offs, len);
148 else
149 memset(p + offs, 0, len);
150 }
151}
152
153/* Wake on Lan only supported on Yukon chps with rev 1 or above */
154static int wol_supported(const struct skge_hw *hw)
155{
156 return !((hw->chip_id == CHIP_ID_GENESIS ||
157 (hw->chip_id == CHIP_ID_YUKON && chip_rev(hw) == 0)));
158}
159
160static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
161{
162 struct skge_port *skge = netdev_priv(dev);
163
164 wol->supported = wol_supported(skge->hw) ? WAKE_MAGIC : 0;
165 wol->wolopts = skge->wol ? WAKE_MAGIC : 0;
166}
167
168static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
169{
170 struct skge_port *skge = netdev_priv(dev);
171 struct skge_hw *hw = skge->hw;
172
173 if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
174 return -EOPNOTSUPP;
175
176 if (wol->wolopts == WAKE_MAGIC && !wol_supported(hw))
177 return -EOPNOTSUPP;
178
179 skge->wol = wol->wolopts == WAKE_MAGIC;
180
181 if (skge->wol) {
182 memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN);
183
184 skge_write16(hw, WOL_CTRL_STAT,
185 WOL_CTL_ENA_PME_ON_MAGIC_PKT |
186 WOL_CTL_ENA_MAGIC_PKT_UNIT);
187 } else
188 skge_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT);
189
190 return 0;
191}
192
193
194static int skge_get_settings(struct net_device *dev,
195 struct ethtool_cmd *ecmd)
196{
197 struct skge_port *skge = netdev_priv(dev);
198 struct skge_hw *hw = skge->hw;
199
200 ecmd->transceiver = XCVR_INTERNAL;
201
202 if (iscopper(hw)) {
203 if (hw->chip_id == CHIP_ID_GENESIS)
204 ecmd->supported = SUPPORTED_1000baseT_Full
205 | SUPPORTED_1000baseT_Half
206 | SUPPORTED_Autoneg | SUPPORTED_TP;
207 else {
208 ecmd->supported = SUPPORTED_10baseT_Half
209 | SUPPORTED_10baseT_Full
210 | SUPPORTED_100baseT_Half
211 | SUPPORTED_100baseT_Full
212 | SUPPORTED_1000baseT_Half
213 | SUPPORTED_1000baseT_Full
214 | SUPPORTED_Autoneg| SUPPORTED_TP;
215
216 if (hw->chip_id == CHIP_ID_YUKON)
217 ecmd->supported &= ~SUPPORTED_1000baseT_Half;
218
219 else if (hw->chip_id == CHIP_ID_YUKON_FE)
220 ecmd->supported &= ~(SUPPORTED_1000baseT_Half
221 | SUPPORTED_1000baseT_Full);
222 }
223
224 ecmd->port = PORT_TP;
225 ecmd->phy_address = hw->phy_addr;
226 } else {
227 ecmd->supported = SUPPORTED_1000baseT_Full
228 | SUPPORTED_FIBRE
229 | SUPPORTED_Autoneg;
230
231 ecmd->port = PORT_FIBRE;
232 }
233
234 ecmd->advertising = skge->advertising;
235 ecmd->autoneg = skge->autoneg;
236 ecmd->speed = skge->speed;
237 ecmd->duplex = skge->duplex;
238 return 0;
239}
240
241static u32 skge_modes(const struct skge_hw *hw)
242{
243 u32 modes = ADVERTISED_Autoneg
244 | ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half
245 | ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half
246 | ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half;
247
248 if (iscopper(hw)) {
249 modes |= ADVERTISED_TP;
250 switch(hw->chip_id) {
251 case CHIP_ID_GENESIS:
252 modes &= ~(ADVERTISED_100baseT_Full
253 | ADVERTISED_100baseT_Half
254 | ADVERTISED_10baseT_Full
255 | ADVERTISED_10baseT_Half);
256 break;
257
258 case CHIP_ID_YUKON:
259 modes &= ~ADVERTISED_1000baseT_Half;
260 break;
261
262 case CHIP_ID_YUKON_FE:
263 modes &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
264 break;
265 }
266 } else {
267 modes |= ADVERTISED_FIBRE;
268 modes &= ~ADVERTISED_1000baseT_Half;
269 }
270 return modes;
271}
272
273static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
274{
275 struct skge_port *skge = netdev_priv(dev);
276 const struct skge_hw *hw = skge->hw;
277
278 if (ecmd->autoneg == AUTONEG_ENABLE) {
279 if (ecmd->advertising & skge_modes(hw))
280 return -EINVAL;
281 } else {
282 switch(ecmd->speed) {
283 case SPEED_1000:
284 if (hw->chip_id == CHIP_ID_YUKON_FE)
285 return -EINVAL;
286 break;
287 case SPEED_100:
288 case SPEED_10:
289 if (iscopper(hw) || hw->chip_id == CHIP_ID_GENESIS)
290 return -EINVAL;
291 break;
292 default:
293 return -EINVAL;
294 }
295 }
296
297 skge->autoneg = ecmd->autoneg;
298 skge->speed = ecmd->speed;
299 skge->duplex = ecmd->duplex;
300 skge->advertising = ecmd->advertising;
301
302 if (netif_running(dev)) {
303 skge_down(dev);
304 skge_up(dev);
305 }
306 return (0);
307}
308
309static void skge_get_drvinfo(struct net_device *dev,
310 struct ethtool_drvinfo *info)
311{
312 struct skge_port *skge = netdev_priv(dev);
313
314 strcpy(info->driver, DRV_NAME);
315 strcpy(info->version, DRV_VERSION);
316 strcpy(info->fw_version, "N/A");
317 strcpy(info->bus_info, pci_name(skge->hw->pdev));
318}
319
320static const struct skge_stat {
321 char name[ETH_GSTRING_LEN];
322 u16 xmac_offset;
323 u16 gma_offset;
324} skge_stats[] = {
325 { "tx_bytes", XM_TXO_OK_HI, GM_TXO_OK_HI },
326 { "rx_bytes", XM_RXO_OK_HI, GM_RXO_OK_HI },
327
328 { "tx_broadcast", XM_TXF_BC_OK, GM_TXF_BC_OK },
329 { "rx_broadcast", XM_RXF_BC_OK, GM_RXF_BC_OK },
330 { "tx_multicast", XM_TXF_MC_OK, GM_TXF_MC_OK },
331 { "rx_multicast", XM_RXF_MC_OK, GM_RXF_MC_OK },
332 { "tx_unicast", XM_TXF_UC_OK, GM_TXF_UC_OK },
333 { "rx_unicast", XM_RXF_UC_OK, GM_RXF_UC_OK },
334 { "tx_mac_pause", XM_TXF_MPAUSE, GM_TXF_MPAUSE },
335 { "rx_mac_pause", XM_RXF_MPAUSE, GM_RXF_MPAUSE },
336
337 { "collisions", XM_TXF_SNG_COL, GM_TXF_SNG_COL },
338 { "multi_collisions", XM_TXF_MUL_COL, GM_TXF_MUL_COL },
339 { "aborted", XM_TXF_ABO_COL, GM_TXF_ABO_COL },
340 { "late_collision", XM_TXF_LAT_COL, GM_TXF_LAT_COL },
341 { "fifo_underrun", XM_TXE_FIFO_UR, GM_TXE_FIFO_UR },
342 { "fifo_overflow", XM_RXE_FIFO_OV, GM_RXE_FIFO_OV },
343
344 { "rx_toolong", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR },
345 { "rx_jabber", XM_RXF_JAB_PKT, GM_RXF_JAB_PKT },
346 { "rx_runt", XM_RXE_RUNT, GM_RXE_FRAG },
347 { "rx_too_long", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR },
348 { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR },
349};
350
351static int skge_get_stats_count(struct net_device *dev)
352{
353 return ARRAY_SIZE(skge_stats);
354}
355
356static void skge_get_ethtool_stats(struct net_device *dev,
357 struct ethtool_stats *stats, u64 *data)
358{
359 struct skge_port *skge = netdev_priv(dev);
360
361 if (skge->hw->chip_id == CHIP_ID_GENESIS)
362 genesis_get_stats(skge, data);
363 else
364 yukon_get_stats(skge, data);
365}
366
367/* Use hardware MIB variables for critical path statistics and
368 * transmit feedback not reported at interrupt.
369 * Other errors are accounted for in interrupt handler.
370 */
371static struct net_device_stats *skge_get_stats(struct net_device *dev)
372{
373 struct skge_port *skge = netdev_priv(dev);
374 u64 data[ARRAY_SIZE(skge_stats)];
375
376 if (skge->hw->chip_id == CHIP_ID_GENESIS)
377 genesis_get_stats(skge, data);
378 else
379 yukon_get_stats(skge, data);
380
381 skge->net_stats.tx_bytes = data[0];
382 skge->net_stats.rx_bytes = data[1];
383 skge->net_stats.tx_packets = data[2] + data[4] + data[6];
384 skge->net_stats.rx_packets = data[3] + data[5] + data[7];
385 skge->net_stats.multicast = data[5] + data[7];
386 skge->net_stats.collisions = data[10];
387 skge->net_stats.tx_aborted_errors = data[12];
388
389 return &skge->net_stats;
390}
391
392static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
393{
394 int i;
395
396 switch(stringset) {
397 case ETH_SS_STATS:
398 for (i = 0; i < ARRAY_SIZE(skge_stats); i++)
399 memcpy(data + i * ETH_GSTRING_LEN,
400 skge_stats[i].name, ETH_GSTRING_LEN);
401 break;
402 }
403}
404
405static void skge_get_ring_param(struct net_device *dev,
406 struct ethtool_ringparam *p)
407{
408 struct skge_port *skge = netdev_priv(dev);
409
410 p->rx_max_pending = MAX_RX_RING_SIZE;
411 p->tx_max_pending = MAX_TX_RING_SIZE;
412 p->rx_mini_max_pending = 0;
413 p->rx_jumbo_max_pending = 0;
414
415 p->rx_pending = skge->rx_ring.count;
416 p->tx_pending = skge->tx_ring.count;
417 p->rx_mini_pending = 0;
418 p->rx_jumbo_pending = 0;
419}
420
421static int skge_set_ring_param(struct net_device *dev,
422 struct ethtool_ringparam *p)
423{
424 struct skge_port *skge = netdev_priv(dev);
425
426 if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
427 p->tx_pending == 0 || p->tx_pending > MAX_TX_RING_SIZE)
428 return -EINVAL;
429
430 skge->rx_ring.count = p->rx_pending;
431 skge->tx_ring.count = p->tx_pending;
432
433 if (netif_running(dev)) {
434 skge_down(dev);
435 skge_up(dev);
436 }
437
438 return 0;
439}
440
441static u32 skge_get_msglevel(struct net_device *netdev)
442{
443 struct skge_port *skge = netdev_priv(netdev);
444 return skge->msg_enable;
445}
446
447static void skge_set_msglevel(struct net_device *netdev, u32 value)
448{
449 struct skge_port *skge = netdev_priv(netdev);
450 skge->msg_enable = value;
451}
452
453static int skge_nway_reset(struct net_device *dev)
454{
455 struct skge_port *skge = netdev_priv(dev);
456 struct skge_hw *hw = skge->hw;
457 int port = skge->port;
458
459 if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev))
460 return -EINVAL;
461
462 spin_lock_bh(&hw->phy_lock);
463 if (hw->chip_id == CHIP_ID_GENESIS) {
464 genesis_reset(hw, port);
465 genesis_mac_init(hw, port);
466 } else {
467 yukon_reset(hw, port);
468 yukon_init(hw, port);
469 }
470 spin_unlock_bh(&hw->phy_lock);
471 return 0;
472}
473
474static int skge_set_sg(struct net_device *dev, u32 data)
475{
476 struct skge_port *skge = netdev_priv(dev);
477 struct skge_hw *hw = skge->hw;
478
479 if (hw->chip_id == CHIP_ID_GENESIS && data)
480 return -EOPNOTSUPP;
481 return ethtool_op_set_sg(dev, data);
482}
483
484static int skge_set_tx_csum(struct net_device *dev, u32 data)
485{
486 struct skge_port *skge = netdev_priv(dev);
487 struct skge_hw *hw = skge->hw;
488
489 if (hw->chip_id == CHIP_ID_GENESIS && data)
490 return -EOPNOTSUPP;
491
492 return ethtool_op_set_tx_csum(dev, data);
493}
494
495static u32 skge_get_rx_csum(struct net_device *dev)
496{
497 struct skge_port *skge = netdev_priv(dev);
498
499 return skge->rx_csum;
500}
501
502/* Only Yukon supports checksum offload. */
503static int skge_set_rx_csum(struct net_device *dev, u32 data)
504{
505 struct skge_port *skge = netdev_priv(dev);
506
507 if (skge->hw->chip_id == CHIP_ID_GENESIS && data)
508 return -EOPNOTSUPP;
509
510 skge->rx_csum = data;
511 return 0;
512}
513
514/* Only Yukon II supports TSO (not implemented yet) */
515static int skge_set_tso(struct net_device *dev, u32 data)
516{
517 if (data)
518 return -EOPNOTSUPP;
519 return 0;
520}
521
522static void skge_get_pauseparam(struct net_device *dev,
523 struct ethtool_pauseparam *ecmd)
524{
525 struct skge_port *skge = netdev_priv(dev);
526
527 ecmd->tx_pause = (skge->flow_control == FLOW_MODE_LOC_SEND)
528 || (skge->flow_control == FLOW_MODE_SYMMETRIC);
529 ecmd->rx_pause = (skge->flow_control == FLOW_MODE_REM_SEND)
530 || (skge->flow_control == FLOW_MODE_SYMMETRIC);
531
532 ecmd->autoneg = skge->autoneg;
533}
534
535static int skge_set_pauseparam(struct net_device *dev,
536 struct ethtool_pauseparam *ecmd)
537{
538 struct skge_port *skge = netdev_priv(dev);
539
540 skge->autoneg = ecmd->autoneg;
541 if (ecmd->rx_pause && ecmd->tx_pause)
542 skge->flow_control = FLOW_MODE_SYMMETRIC;
543 else if(ecmd->rx_pause && !ecmd->tx_pause)
544 skge->flow_control = FLOW_MODE_REM_SEND;
545 else if(!ecmd->rx_pause && ecmd->tx_pause)
546 skge->flow_control = FLOW_MODE_LOC_SEND;
547 else
548 skge->flow_control = FLOW_MODE_NONE;
549
550 if (netif_running(dev)) {
551 skge_down(dev);
552 skge_up(dev);
553 }
554 return 0;
555}
556
557/* Chip internal frequency for clock calculations */
558static inline u32 hwkhz(const struct skge_hw *hw)
559{
560 if (hw->chip_id == CHIP_ID_GENESIS)
561 return 53215; /* or: 53.125 MHz */
562 else if (hw->chip_id == CHIP_ID_YUKON_EC)
563 return 125000; /* or: 125.000 MHz */
564 else
565 return 78215; /* or: 78.125 MHz */
566}
567
568/* Chip hz to microseconds */
569static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks)
570{
571 return (ticks * 1000) / hwkhz(hw);
572}
573
574/* Microseconds to chip hz */
575static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec)
576{
577 return hwkhz(hw) * usec / 1000;
578}
579
580static int skge_get_coalesce(struct net_device *dev,
581 struct ethtool_coalesce *ecmd)
582{
583 struct skge_port *skge = netdev_priv(dev);
584 struct skge_hw *hw = skge->hw;
585 int port = skge->port;
586
587 ecmd->rx_coalesce_usecs = 0;
588 ecmd->tx_coalesce_usecs = 0;
589
590 if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) {
591 u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI));
592 u32 msk = skge_read32(hw, B2_IRQM_MSK);
593
594 if (msk & rxirqmask[port])
595 ecmd->rx_coalesce_usecs = delay;
596 if (msk & txirqmask[port])
597 ecmd->tx_coalesce_usecs = delay;
598 }
599
600 return 0;
601}
602
603/* Note: interrupt timer is per board, but can turn on/off per port */
604static int skge_set_coalesce(struct net_device *dev,
605 struct ethtool_coalesce *ecmd)
606{
607 struct skge_port *skge = netdev_priv(dev);
608 struct skge_hw *hw = skge->hw;
609 int port = skge->port;
610 u32 msk = skge_read32(hw, B2_IRQM_MSK);
611 u32 delay = 25;
612
613 if (ecmd->rx_coalesce_usecs == 0)
614 msk &= ~rxirqmask[port];
615 else if (ecmd->rx_coalesce_usecs < 25 ||
616 ecmd->rx_coalesce_usecs > 33333)
617 return -EINVAL;
618 else {
619 msk |= rxirqmask[port];
620 delay = ecmd->rx_coalesce_usecs;
621 }
622
623 if (ecmd->tx_coalesce_usecs == 0)
624 msk &= ~txirqmask[port];
625 else if (ecmd->tx_coalesce_usecs < 25 ||
626 ecmd->tx_coalesce_usecs > 33333)
627 return -EINVAL;
628 else {
629 msk |= txirqmask[port];
630 delay = min(delay, ecmd->rx_coalesce_usecs);
631 }
632
633 skge_write32(hw, B2_IRQM_MSK, msk);
634 if (msk == 0)
635 skge_write32(hw, B2_IRQM_CTRL, TIM_STOP);
636 else {
637 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay));
638 skge_write32(hw, B2_IRQM_CTRL, TIM_START);
639 }
640 return 0;
641}
642
643static void skge_led_on(struct skge_hw *hw, int port)
644{
645 if (hw->chip_id == CHIP_ID_GENESIS) {
646 skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_ON);
647 skge_write8(hw, B0_LED, LED_STAT_ON);
648
649 skge_write8(hw, SKGEMAC_REG(port, RX_LED_TST), LED_T_ON);
650 skge_write32(hw, SKGEMAC_REG(port, RX_LED_VAL), 100);
651 skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_START);
652
653 switch (hw->phy_type) {
654 case SK_PHY_BCOM:
655 skge_xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL,
656 PHY_B_PEC_LED_ON);
657 break;
658 case SK_PHY_LONE:
659 skge_xm_phy_write(hw, port, PHY_LONE_LED_CFG,
660 0x0800);
661 break;
662 default:
663 skge_write8(hw, SKGEMAC_REG(port, TX_LED_TST), LED_T_ON);
664 skge_write32(hw, SKGEMAC_REG(port, TX_LED_VAL), 100);
665 skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_START);
666 }
667 } else {
668 skge_gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
669 skge_gm_phy_write(hw, port, PHY_MARV_LED_OVER,
670 PHY_M_LED_MO_DUP(MO_LED_ON) |
671 PHY_M_LED_MO_10(MO_LED_ON) |
672 PHY_M_LED_MO_100(MO_LED_ON) |
673 PHY_M_LED_MO_1000(MO_LED_ON) |
674 PHY_M_LED_MO_RX(MO_LED_ON));
675 }
676}
677
678static void skge_led_off(struct skge_hw *hw, int port)
679{
680 if (hw->chip_id == CHIP_ID_GENESIS) {
681 skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_OFF);
682 skge_write8(hw, B0_LED, LED_STAT_OFF);
683
684 skge_write32(hw, SKGEMAC_REG(port, RX_LED_VAL), 0);
685 skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_T_OFF);
686
687 switch (hw->phy_type) {
688 case SK_PHY_BCOM:
689 skge_xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL,
690 PHY_B_PEC_LED_OFF);
691 break;
692 case SK_PHY_LONE:
693 skge_xm_phy_write(hw, port, PHY_LONE_LED_CFG,
694 PHY_L_LC_LEDT);
695 break;
696 default:
697 skge_write32(hw, SKGEMAC_REG(port, TX_LED_VAL), 0);
698 skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_T_OFF);
699 }
700 } else {
701 skge_gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
702 skge_gm_phy_write(hw, port, PHY_MARV_LED_OVER,
703 PHY_M_LED_MO_DUP(MO_LED_OFF) |
704 PHY_M_LED_MO_10(MO_LED_OFF) |
705 PHY_M_LED_MO_100(MO_LED_OFF) |
706 PHY_M_LED_MO_1000(MO_LED_OFF) |
707 PHY_M_LED_MO_RX(MO_LED_OFF));
708 }
709}
710
711static void skge_blink_timer(unsigned long data)
712{
713 struct skge_port *skge = (struct skge_port *) data;
714 struct skge_hw *hw = skge->hw;
715 unsigned long flags;
716
717 spin_lock_irqsave(&hw->phy_lock, flags);
718 if (skge->blink_on)
719 skge_led_on(hw, skge->port);
720 else
721 skge_led_off(hw, skge->port);
722 spin_unlock_irqrestore(&hw->phy_lock, flags);
723
724 skge->blink_on = !skge->blink_on;
725 mod_timer(&skge->led_blink, jiffies + BLINK_HZ);
726}
727
728/* blink LED's for finding board */
729static int skge_phys_id(struct net_device *dev, u32 data)
730{
731 struct skge_port *skge = netdev_priv(dev);
732
733 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
734 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
735
736 /* start blinking */
737 skge->blink_on = 1;
738 mod_timer(&skge->led_blink, jiffies+1);
739
740 msleep_interruptible(data * 1000);
741 del_timer_sync(&skge->led_blink);
742
743 skge_led_off(skge->hw, skge->port);
744
745 return 0;
746}
747
748static struct ethtool_ops skge_ethtool_ops = {
749 .get_settings = skge_get_settings,
750 .set_settings = skge_set_settings,
751 .get_drvinfo = skge_get_drvinfo,
752 .get_regs_len = skge_get_regs_len,
753 .get_regs = skge_get_regs,
754 .get_wol = skge_get_wol,
755 .set_wol = skge_set_wol,
756 .get_msglevel = skge_get_msglevel,
757 .set_msglevel = skge_set_msglevel,
758 .nway_reset = skge_nway_reset,
759 .get_link = ethtool_op_get_link,
760 .get_ringparam = skge_get_ring_param,
761 .set_ringparam = skge_set_ring_param,
762 .get_pauseparam = skge_get_pauseparam,
763 .set_pauseparam = skge_set_pauseparam,
764 .get_coalesce = skge_get_coalesce,
765 .set_coalesce = skge_set_coalesce,
766 .get_tso = ethtool_op_get_tso,
767 .set_tso = skge_set_tso,
768 .get_sg = ethtool_op_get_sg,
769 .set_sg = skge_set_sg,
770 .get_tx_csum = ethtool_op_get_tx_csum,
771 .set_tx_csum = skge_set_tx_csum,
772 .get_rx_csum = skge_get_rx_csum,
773 .set_rx_csum = skge_set_rx_csum,
774 .get_strings = skge_get_strings,
775 .phys_id = skge_phys_id,
776 .get_stats_count = skge_get_stats_count,
777 .get_ethtool_stats = skge_get_ethtool_stats,
778};
779
780/*
781 * Allocate ring elements and chain them together
782 * One-to-one association of board descriptors with ring elements
783 */
784static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
785{
786 struct skge_tx_desc *d;
787 struct skge_element *e;
788 int i;
789
790 ring->start = kmalloc(sizeof(*e)*ring->count, GFP_KERNEL);
791 if (!ring->start)
792 return -ENOMEM;
793
794 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
795 e->desc = d;
796 if (i == ring->count - 1) {
797 e->next = ring->start;
798 d->next_offset = base;
799 } else {
800 e->next = e + 1;
801 d->next_offset = base + (i+1) * sizeof(*d);
802 }
803 }
804 ring->to_use = ring->to_clean = ring->start;
805
806 return 0;
807}
808
809/* Setup buffer for receiving */
810static inline int skge_rx_alloc(struct skge_port *skge,
811 struct skge_element *e)
812{
813 unsigned long bufsize = skge->netdev->mtu + ETH_HLEN; /* VLAN? */
814 struct skge_rx_desc *rd = e->desc;
815 struct sk_buff *skb;
816 u64 map;
817
818 skb = dev_alloc_skb(bufsize + NET_IP_ALIGN);
819 if (unlikely(!skb)) {
820 printk(KERN_DEBUG PFX "%s: out of memory for receive\n",
821 skge->netdev->name);
822 return -ENOMEM;
823 }
824
825 skb->dev = skge->netdev;
826 skb_reserve(skb, NET_IP_ALIGN);
827
828 map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
829 PCI_DMA_FROMDEVICE);
830
831 rd->dma_lo = map;
832 rd->dma_hi = map >> 32;
833 e->skb = skb;
834 rd->csum1_start = ETH_HLEN;
835 rd->csum2_start = ETH_HLEN;
836 rd->csum1 = 0;
837 rd->csum2 = 0;
838
839 wmb();
840
841 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
842 pci_unmap_addr_set(e, mapaddr, map);
843 pci_unmap_len_set(e, maplen, bufsize);
844 return 0;
845}
846
847/* Free all unused buffers in receive ring, assumes receiver stopped */
848static void skge_rx_clean(struct skge_port *skge)
849{
850 struct skge_hw *hw = skge->hw;
851 struct skge_ring *ring = &skge->rx_ring;
852 struct skge_element *e;
853
854 for (e = ring->to_clean; e != ring->to_use; e = e->next) {
855 struct skge_rx_desc *rd = e->desc;
856 rd->control = 0;
857
858 pci_unmap_single(hw->pdev,
859 pci_unmap_addr(e, mapaddr),
860 pci_unmap_len(e, maplen),
861 PCI_DMA_FROMDEVICE);
862 dev_kfree_skb(e->skb);
863 e->skb = NULL;
864 }
865 ring->to_clean = e;
866}
867
868/* Allocate buffers for receive ring
869 * For receive: to_use is refill location
870 * to_clean is next received frame.
871 *
872 * if (to_use == to_clean)
873 * then ring all frames in ring need buffers
874 * if (to_use->next == to_clean)
875 * then ring all frames in ring have buffers
876 */
877static int skge_rx_fill(struct skge_port *skge)
878{
879 struct skge_ring *ring = &skge->rx_ring;
880 struct skge_element *e;
881 int ret = 0;
882
883 for (e = ring->to_use; e->next != ring->to_clean; e = e->next) {
884 if (skge_rx_alloc(skge, e)) {
885 ret = 1;
886 break;
887 }
888
889 }
890 ring->to_use = e;
891
892 return ret;
893}
894
895static void skge_link_up(struct skge_port *skge)
896{
897 netif_carrier_on(skge->netdev);
898 if (skge->tx_avail > MAX_SKB_FRAGS + 1)
899 netif_wake_queue(skge->netdev);
900
901 if (netif_msg_link(skge))
902 printk(KERN_INFO PFX
903 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
904 skge->netdev->name, skge->speed,
905 skge->duplex == DUPLEX_FULL ? "full" : "half",
906 (skge->flow_control == FLOW_MODE_NONE) ? "none" :
907 (skge->flow_control == FLOW_MODE_LOC_SEND) ? "tx only" :
908 (skge->flow_control == FLOW_MODE_REM_SEND) ? "rx only" :
909 (skge->flow_control == FLOW_MODE_SYMMETRIC) ? "tx and rx" :
910 "unknown");
911}
912
913static void skge_link_down(struct skge_port *skge)
914{
915 netif_carrier_off(skge->netdev);
916 netif_stop_queue(skge->netdev);
917
918 if (netif_msg_link(skge))
919 printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name);
920}
921
922static u16 skge_xm_phy_read(struct skge_hw *hw, int port, u16 reg)
923{
924 int i;
925 u16 v;
926
927 skge_xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
928 v = skge_xm_read16(hw, port, XM_PHY_DATA);
929 if (hw->phy_type != SK_PHY_XMAC) {
930 for (i = 0; i < PHY_RETRIES; i++) {
931 udelay(1);
932 if (skge_xm_read16(hw, port, XM_MMU_CMD)
933 & XM_MMU_PHY_RDY)
934 goto ready;
935 }
936
937 printk(KERN_WARNING PFX "%s: phy read timed out\n",
938 hw->dev[port]->name);
939 return 0;
940 ready:
941 v = skge_xm_read16(hw, port, XM_PHY_DATA);
942 }
943
944 return v;
945}
946
947static void skge_xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
948{
949 int i;
950
951 skge_xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
952 for (i = 0; i < PHY_RETRIES; i++) {
953 if (!(skge_xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
954 goto ready;
955 cpu_relax();
956 }
957 printk(KERN_WARNING PFX "%s: phy write failed to come ready\n",
958 hw->dev[port]->name);
959
960
961 ready:
962 skge_xm_write16(hw, port, XM_PHY_DATA, val);
963 for (i = 0; i < PHY_RETRIES; i++) {
964 udelay(1);
965 if (!(skge_xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
966 return;
967 }
968 printk(KERN_WARNING PFX "%s: phy write timed out\n",
969 hw->dev[port]->name);
970}
971
972static void genesis_init(struct skge_hw *hw)
973{
974 /* set blink source counter */
975 skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100);
976 skge_write8(hw, B2_BSC_CTRL, BSC_START);
977
978 /* configure mac arbiter */
979 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
980
981 /* configure mac arbiter timeout values */
982 skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53);
983 skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53);
984 skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53);
985 skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53);
986
987 skge_write8(hw, B3_MA_RCINI_RX1, 0);
988 skge_write8(hw, B3_MA_RCINI_RX2, 0);
989 skge_write8(hw, B3_MA_RCINI_TX1, 0);
990 skge_write8(hw, B3_MA_RCINI_TX2, 0);
991
992 /* configure packet arbiter timeout */
993 skge_write16(hw, B3_PA_CTRL, PA_RST_CLR);
994 skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX);
995 skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX);
996 skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX);
997 skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX);
998}
999
1000static void genesis_reset(struct skge_hw *hw, int port)
1001{
1002 int i;
1003 u64 zero = 0;
1004
1005 /* reset the statistics module */
1006 skge_xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
1007 skge_xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */
1008 skge_xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */
1009 skge_xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */
1010 skge_xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */
1011
1012 /* disable all PHY IRQs */
1013 if (hw->phy_type == SK_PHY_BCOM)
1014 skge_xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
1015
1016 skge_xm_outhash(hw, port, XM_HSM, (u8 *) &zero);
1017 for (i = 0; i < 15; i++)
1018 skge_xm_outaddr(hw, port, XM_EXM(i), (u8 *) &zero);
1019 skge_xm_outhash(hw, port, XM_SRC_CHK, (u8 *) &zero);
1020}
1021
1022
1023static void genesis_mac_init(struct skge_hw *hw, int port)
1024{
1025 struct skge_port *skge = netdev_priv(hw->dev[port]);
1026 int i;
1027 u32 r;
1028 u16 id1;
1029 u16 ctrl1, ctrl2, ctrl3, ctrl4, ctrl5;
1030
1031 /* magic workaround patterns for Broadcom */
1032 static const struct {
1033 u16 reg;
1034 u16 val;
1035 } A1hack[] = {
1036 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 },
1037 { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 },
1038 { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 },
1039 { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
1040 }, C0hack[] = {
1041 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 },
1042 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
1043 };
1044
1045
1046 /* initialize Rx, Tx and Link LED */
1047 skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_ON);
1048 skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON);
1049
1050 skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_START);
1051 skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_START);
1052
1053 /* Unreset the XMAC. */
1054 skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
1055
1056 /*
1057 * Perform additional initialization for external PHYs,
1058 * namely for the 1000baseTX cards that use the XMAC's
1059 * GMII mode.
1060 */
1061 spin_lock_bh(&hw->phy_lock);
1062 if (hw->phy_type != SK_PHY_XMAC) {
1063 /* Take PHY out of reset. */
1064 r = skge_read32(hw, B2_GP_IO);
1065 if (port == 0)
1066 r |= GP_DIR_0|GP_IO_0;
1067 else
1068 r |= GP_DIR_2|GP_IO_2;
1069
1070 skge_write32(hw, B2_GP_IO, r);
1071 skge_read32(hw, B2_GP_IO);
1072
1073 /* Enable GMII mode on the XMAC. */
1074 skge_xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
1075
1076 id1 = skge_xm_phy_read(hw, port, PHY_XMAC_ID1);
1077
1078 /* Optimize MDIO transfer by suppressing preamble. */
1079 skge_xm_write16(hw, port, XM_MMU_CMD,
1080 skge_xm_read16(hw, port, XM_MMU_CMD)
1081 | XM_MMU_NO_PRE);
1082
1083 if (id1 == PHY_BCOM_ID1_C0) {
1084 /*
1085 * Workaround BCOM Errata for the C0 type.
1086 * Write magic patterns to reserved registers.
1087 */
1088 for (i = 0; i < ARRAY_SIZE(C0hack); i++)
1089 skge_xm_phy_write(hw, port,
1090 C0hack[i].reg, C0hack[i].val);
1091
1092 } else if (id1 == PHY_BCOM_ID1_A1) {
1093 /*
1094 * Workaround BCOM Errata for the A1 type.
1095 * Write magic patterns to reserved registers.
1096 */
1097 for (i = 0; i < ARRAY_SIZE(A1hack); i++)
1098 skge_xm_phy_write(hw, port,
1099 A1hack[i].reg, A1hack[i].val);
1100 }
1101
1102 /*
1103 * Workaround BCOM Errata (#10523) for all BCom PHYs.
1104 * Disable Power Management after reset.
1105 */
1106 r = skge_xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL);
1107 skge_xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r | PHY_B_AC_DIS_PM);
1108 }
1109
1110 /* Dummy read */
1111 skge_xm_read16(hw, port, XM_ISRC);
1112
1113 r = skge_xm_read32(hw, port, XM_MODE);
1114 skge_xm_write32(hw, port, XM_MODE, r|XM_MD_CSA);
1115
1116 /* We don't need the FCS appended to the packet. */
1117 r = skge_xm_read16(hw, port, XM_RX_CMD);
1118 skge_xm_write16(hw, port, XM_RX_CMD, r | XM_RX_STRIP_FCS);
1119
1120 /* We want short frames padded to 60 bytes. */
1121 r = skge_xm_read16(hw, port, XM_TX_CMD);
1122 skge_xm_write16(hw, port, XM_TX_CMD, r | XM_TX_AUTO_PAD);
1123
1124 /*
1125 * Enable the reception of all error frames. This is is
1126 * a necessary evil due to the design of the XMAC. The
1127 * XMAC's receive FIFO is only 8K in size, however jumbo
1128 * frames can be up to 9000 bytes in length. When bad
1129 * frame filtering is enabled, the XMAC's RX FIFO operates
1130 * in 'store and forward' mode. For this to work, the
1131 * entire frame has to fit into the FIFO, but that means
1132 * that jumbo frames larger than 8192 bytes will be
1133 * truncated. Disabling all bad frame filtering causes
1134 * the RX FIFO to operate in streaming mode, in which
1135 * case the XMAC will start transfering frames out of the
1136 * RX FIFO as soon as the FIFO threshold is reached.
1137 */
1138 r = skge_xm_read32(hw, port, XM_MODE);
1139 skge_xm_write32(hw, port, XM_MODE,
1140 XM_MD_RX_CRCE|XM_MD_RX_LONG|XM_MD_RX_RUNT|
1141 XM_MD_RX_ERR|XM_MD_RX_IRLE);
1142
1143 skge_xm_outaddr(hw, port, XM_SA, hw->dev[port]->dev_addr);
1144 skge_xm_outaddr(hw, port, XM_EXM(0), hw->dev[port]->dev_addr);
1145
1146 /*
1147 * Bump up the transmit threshold. This helps hold off transmit
1148 * underruns when we're blasting traffic from both ports at once.
1149 */
1150 skge_xm_write16(hw, port, XM_TX_THR, 512);
1151
1152 /* Configure MAC arbiter */
1153 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
1154
1155 /* configure timeout values */
1156 skge_write8(hw, B3_MA_TOINI_RX1, 72);
1157 skge_write8(hw, B3_MA_TOINI_RX2, 72);
1158 skge_write8(hw, B3_MA_TOINI_TX1, 72);
1159 skge_write8(hw, B3_MA_TOINI_TX2, 72);
1160
1161 skge_write8(hw, B3_MA_RCINI_RX1, 0);
1162 skge_write8(hw, B3_MA_RCINI_RX2, 0);
1163 skge_write8(hw, B3_MA_RCINI_TX1, 0);
1164 skge_write8(hw, B3_MA_RCINI_TX2, 0);
1165
1166 /* Configure Rx MAC FIFO */
1167 skge_write8(hw, SKGEMAC_REG(port, RX_MFF_CTRL2), MFF_RST_CLR);
1168 skge_write16(hw, SKGEMAC_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT);
1169 skge_write8(hw, SKGEMAC_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD);
1170
1171 /* Configure Tx MAC FIFO */
1172 skge_write8(hw, SKGEMAC_REG(port, TX_MFF_CTRL2), MFF_RST_CLR);
1173 skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF);
1174 skge_write8(hw, SKGEMAC_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD);
1175
1176 if (hw->dev[port]->mtu > ETH_DATA_LEN) {
1177 /* Enable frame flushing if jumbo frames used */
1178 skge_write16(hw, SKGEMAC_REG(port,RX_MFF_CTRL1), MFF_ENA_FLUSH);
1179 } else {
1180 /* enable timeout timers if normal frames */
1181 skge_write16(hw, B3_PA_CTRL,
1182 port == 0 ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2);
1183 }
1184
1185
1186 r = skge_xm_read16(hw, port, XM_RX_CMD);
1187 if (hw->dev[port]->mtu > ETH_DATA_LEN)
1188 skge_xm_write16(hw, port, XM_RX_CMD, r | XM_RX_BIG_PK_OK);
1189 else
1190 skge_xm_write16(hw, port, XM_RX_CMD, r & ~(XM_RX_BIG_PK_OK));
1191
1192 switch (hw->phy_type) {
1193 case SK_PHY_XMAC:
1194 if (skge->autoneg == AUTONEG_ENABLE) {
1195 ctrl1 = PHY_X_AN_FD | PHY_X_AN_HD;
1196
1197 switch (skge->flow_control) {
1198 case FLOW_MODE_NONE:
1199 ctrl1 |= PHY_X_P_NO_PAUSE;
1200 break;
1201 case FLOW_MODE_LOC_SEND:
1202 ctrl1 |= PHY_X_P_ASYM_MD;
1203 break;
1204 case FLOW_MODE_SYMMETRIC:
1205 ctrl1 |= PHY_X_P_SYM_MD;
1206 break;
1207 case FLOW_MODE_REM_SEND:
1208 ctrl1 |= PHY_X_P_BOTH_MD;
1209 break;
1210 }
1211
1212 skge_xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl1);
1213 ctrl2 = PHY_CT_ANE | PHY_CT_RE_CFG;
1214 } else {
1215 ctrl2 = 0;
1216 if (skge->duplex == DUPLEX_FULL)
1217 ctrl2 |= PHY_CT_DUP_MD;
1218 }
1219
1220 skge_xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl2);
1221 break;
1222
1223 case SK_PHY_BCOM:
1224 ctrl1 = PHY_CT_SP1000;
1225 ctrl2 = 0;
1226 ctrl3 = PHY_SEL_TYPE;
1227 ctrl4 = PHY_B_PEC_EN_LTR;
1228 ctrl5 = PHY_B_AC_TX_TST;
1229
1230 if (skge->autoneg == AUTONEG_ENABLE) {
1231 /*
1232 * Workaround BCOM Errata #1 for the C5 type.
1233 * 1000Base-T Link Acquisition Failure in Slave Mode
1234 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
1235 */
1236 ctrl2 |= PHY_B_1000C_RD;
1237 if (skge->advertising & ADVERTISED_1000baseT_Half)
1238 ctrl2 |= PHY_B_1000C_AHD;
1239 if (skge->advertising & ADVERTISED_1000baseT_Full)
1240 ctrl2 |= PHY_B_1000C_AFD;
1241
1242 /* Set Flow-control capabilities */
1243 switch (skge->flow_control) {
1244 case FLOW_MODE_NONE:
1245 ctrl3 |= PHY_B_P_NO_PAUSE;
1246 break;
1247 case FLOW_MODE_LOC_SEND:
1248 ctrl3 |= PHY_B_P_ASYM_MD;
1249 break;
1250 case FLOW_MODE_SYMMETRIC:
1251 ctrl3 |= PHY_B_P_SYM_MD;
1252 break;
1253 case FLOW_MODE_REM_SEND:
1254 ctrl3 |= PHY_B_P_BOTH_MD;
1255 break;
1256 }
1257
1258 /* Restart Auto-negotiation */
1259 ctrl1 |= PHY_CT_ANE | PHY_CT_RE_CFG;
1260 } else {
1261 if (skge->duplex == DUPLEX_FULL)
1262 ctrl1 |= PHY_CT_DUP_MD;
1263
1264 ctrl2 |= PHY_B_1000C_MSE; /* set it to Slave */
1265 }
1266
1267 skge_xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, ctrl2);
1268 skge_xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV, ctrl3);
1269
1270 if (skge->netdev->mtu > ETH_DATA_LEN) {
1271 ctrl4 |= PHY_B_PEC_HIGH_LA;
1272 ctrl5 |= PHY_B_AC_LONG_PACK;
1273
1274 skge_xm_phy_write(hw, port,PHY_BCOM_AUX_CTRL, ctrl5);
1275 }
1276
1277 skge_xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ctrl4);
1278 skge_xm_phy_write(hw, port, PHY_BCOM_CTRL, ctrl1);
1279 break;
1280 }
1281 spin_unlock_bh(&hw->phy_lock);
1282
1283 /* Clear MIB counters */
1284 skge_xm_write16(hw, port, XM_STAT_CMD,
1285 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1286 /* Clear two times according to Errata #3 */
1287 skge_xm_write16(hw, port, XM_STAT_CMD,
1288 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1289
1290 /* Start polling for link status */
1291 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1292}
1293
1294static void genesis_stop(struct skge_port *skge)
1295{
1296 struct skge_hw *hw = skge->hw;
1297 int port = skge->port;
1298
1299 /* Clear Tx packet arbiter timeout IRQ */
1300 skge_write16(hw, B3_PA_CTRL,
1301 port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
1302
1303 /*
1304 * If the transfer stucks at the MAC the STOP command will not
1305 * terminate if we don't flush the XMAC's transmit FIFO !
1306 */
1307 skge_xm_write32(hw, port, XM_MODE,
1308 skge_xm_read32(hw, port, XM_MODE)|XM_MD_FTF);
1309
1310
1311 /* Reset the MAC */
1312 skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST);
1313
1314 /* For external PHYs there must be special handling */
1315 if (hw->phy_type != SK_PHY_XMAC) {
1316 u32 reg = skge_read32(hw, B2_GP_IO);
1317
1318 if (port == 0) {
1319 reg |= GP_DIR_0;
1320 reg &= ~GP_IO_0;
1321 } else {
1322 reg |= GP_DIR_2;
1323 reg &= ~GP_IO_2;
1324 }
1325 skge_write32(hw, B2_GP_IO, reg);
1326 skge_read32(hw, B2_GP_IO);
1327 }
1328
1329 skge_xm_write16(hw, port, XM_MMU_CMD,
1330 skge_xm_read16(hw, port, XM_MMU_CMD)
1331 & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
1332
1333 skge_xm_read16(hw, port, XM_MMU_CMD);
1334}
1335
1336
1337static void genesis_get_stats(struct skge_port *skge, u64 *data)
1338{
1339 struct skge_hw *hw = skge->hw;
1340 int port = skge->port;
1341 int i;
1342 unsigned long timeout = jiffies + HZ;
1343
1344 skge_xm_write16(hw, port,
1345 XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC);
1346
1347 /* wait for update to complete */
1348 while (skge_xm_read16(hw, port, XM_STAT_CMD)
1349 & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) {
1350 if (time_after(jiffies, timeout))
1351 break;
1352 udelay(10);
1353 }
1354
1355 /* special case for 64 bit octet counter */
1356 data[0] = (u64) skge_xm_read32(hw, port, XM_TXO_OK_HI) << 32
1357 | skge_xm_read32(hw, port, XM_TXO_OK_LO);
1358 data[1] = (u64) skge_xm_read32(hw, port, XM_RXO_OK_HI) << 32
1359 | skge_xm_read32(hw, port, XM_RXO_OK_LO);
1360
1361 for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
1362 data[i] = skge_xm_read32(hw, port, skge_stats[i].xmac_offset);
1363}
1364
1365static void genesis_mac_intr(struct skge_hw *hw, int port)
1366{
1367 struct skge_port *skge = netdev_priv(hw->dev[port]);
1368 u16 status = skge_xm_read16(hw, port, XM_ISRC);
1369
1370 pr_debug("genesis_intr status %x\n", status);
1371 if (hw->phy_type == SK_PHY_XMAC) {
1372 /* LInk down, start polling for state change */
1373 if (status & XM_IS_INP_ASS) {
1374 skge_xm_write16(hw, port, XM_IMSK,
1375 skge_xm_read16(hw, port, XM_IMSK) | XM_IS_INP_ASS);
1376 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1377 }
1378 else if (status & XM_IS_AND)
1379 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1380 }
1381
1382 if (status & XM_IS_TXF_UR) {
1383 skge_xm_write32(hw, port, XM_MODE, XM_MD_FTF);
1384 ++skge->net_stats.tx_fifo_errors;
1385 }
1386 if (status & XM_IS_RXF_OV) {
1387 skge_xm_write32(hw, port, XM_MODE, XM_MD_FRF);
1388 ++skge->net_stats.rx_fifo_errors;
1389 }
1390}
1391
1392static void skge_gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
1393{
1394 int i;
1395
1396 skge_gma_write16(hw, port, GM_SMI_DATA, val);
1397 skge_gma_write16(hw, port, GM_SMI_CTRL,
1398 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
1399 for (i = 0; i < PHY_RETRIES; i++) {
1400 udelay(1);
1401
1402 if (!(skge_gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
1403 break;
1404 }
1405}
1406
1407static u16 skge_gm_phy_read(struct skge_hw *hw, int port, u16 reg)
1408{
1409 int i;
1410
1411 skge_gma_write16(hw, port, GM_SMI_CTRL,
1412 GM_SMI_CT_PHY_AD(hw->phy_addr)
1413 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
1414
1415 for (i = 0; i < PHY_RETRIES; i++) {
1416 udelay(1);
1417 if (skge_gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
1418 goto ready;
1419 }
1420
1421 printk(KERN_WARNING PFX "%s: phy read timeout\n",
1422 hw->dev[port]->name);
1423 return 0;
1424 ready:
1425 return skge_gma_read16(hw, port, GM_SMI_DATA);
1426}
1427
1428static void genesis_link_down(struct skge_port *skge)
1429{
1430 struct skge_hw *hw = skge->hw;
1431 int port = skge->port;
1432
1433 pr_debug("genesis_link_down\n");
1434
1435 skge_xm_write16(hw, port, XM_MMU_CMD,
1436 skge_xm_read16(hw, port, XM_MMU_CMD)
1437 & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
1438
1439 /* dummy read to ensure writing */
1440 (void) skge_xm_read16(hw, port, XM_MMU_CMD);
1441
1442 skge_link_down(skge);
1443}
1444
1445static void genesis_link_up(struct skge_port *skge)
1446{
1447 struct skge_hw *hw = skge->hw;
1448 int port = skge->port;
1449 u16 cmd;
1450 u32 mode, msk;
1451
1452 pr_debug("genesis_link_up\n");
1453 cmd = skge_xm_read16(hw, port, XM_MMU_CMD);
1454
1455 /*
1456 * enabling pause frame reception is required for 1000BT
1457 * because the XMAC is not reset if the link is going down
1458 */
1459 if (skge->flow_control == FLOW_MODE_NONE ||
1460 skge->flow_control == FLOW_MODE_LOC_SEND)
1461 cmd |= XM_MMU_IGN_PF;
1462 else
1463 /* Enable Pause Frame Reception */
1464 cmd &= ~XM_MMU_IGN_PF;
1465
1466 skge_xm_write16(hw, port, XM_MMU_CMD, cmd);
1467
1468 mode = skge_xm_read32(hw, port, XM_MODE);
1469 if (skge->flow_control == FLOW_MODE_SYMMETRIC ||
1470 skge->flow_control == FLOW_MODE_LOC_SEND) {
1471 /*
1472 * Configure Pause Frame Generation
1473 * Use internal and external Pause Frame Generation.
1474 * Sending pause frames is edge triggered.
1475 * Send a Pause frame with the maximum pause time if
1476 * internal oder external FIFO full condition occurs.
1477 * Send a zero pause time frame to re-start transmission.
1478 */
1479 /* XM_PAUSE_DA = '010000C28001' (default) */
1480 /* XM_MAC_PTIME = 0xffff (maximum) */
1481 /* remember this value is defined in big endian (!) */
1482 skge_xm_write16(hw, port, XM_MAC_PTIME, 0xffff);
1483
1484 mode |= XM_PAUSE_MODE;
1485 skge_write16(hw, SKGEMAC_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE);
1486 } else {
1487 /*
1488 * disable pause frame generation is required for 1000BT
1489 * because the XMAC is not reset if the link is going down
1490 */
1491 /* Disable Pause Mode in Mode Register */
1492 mode &= ~XM_PAUSE_MODE;
1493
1494 skge_write16(hw, SKGEMAC_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE);
1495 }
1496
1497 skge_xm_write32(hw, port, XM_MODE, mode);
1498
1499 msk = XM_DEF_MSK;
1500 if (hw->phy_type != SK_PHY_XMAC)
1501 msk |= XM_IS_INP_ASS; /* disable GP0 interrupt bit */
1502
1503 skge_xm_write16(hw, port, XM_IMSK, msk);
1504 skge_xm_read16(hw, port, XM_ISRC);
1505
1506 /* get MMU Command Reg. */
1507 cmd = skge_xm_read16(hw, port, XM_MMU_CMD);
1508 if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL)
1509 cmd |= XM_MMU_GMII_FD;
1510
1511 if (hw->phy_type == SK_PHY_BCOM) {
1512 /*
1513 * Workaround BCOM Errata (#10523) for all BCom Phys
1514 * Enable Power Management after link up
1515 */
1516 skge_xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1517 skge_xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL)
1518 & ~PHY_B_AC_DIS_PM);
1519 skge_xm_phy_write(hw, port, PHY_BCOM_INT_MASK,
1520 PHY_B_DEF_MSK);
1521 }
1522
1523 /* enable Rx/Tx */
1524 skge_xm_write16(hw, port, XM_MMU_CMD,
1525 cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1526 skge_link_up(skge);
1527}
1528
1529
1530static void genesis_bcom_intr(struct skge_port *skge)
1531{
1532 struct skge_hw *hw = skge->hw;
1533 int port = skge->port;
1534 u16 stat = skge_xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
1535
1536 pr_debug("genesis_bcom intr stat=%x\n", stat);
1537
1538 /* Workaround BCom Errata:
1539 * enable and disable loopback mode if "NO HCD" occurs.
1540 */
1541 if (stat & PHY_B_IS_NO_HDCL) {
1542 u16 ctrl = skge_xm_phy_read(hw, port, PHY_BCOM_CTRL);
1543 skge_xm_phy_write(hw, port, PHY_BCOM_CTRL,
1544 ctrl | PHY_CT_LOOP);
1545 skge_xm_phy_write(hw, port, PHY_BCOM_CTRL,
1546 ctrl & ~PHY_CT_LOOP);
1547 }
1548
1549 stat = skge_xm_phy_read(hw, port, PHY_BCOM_STAT);
1550 if (stat & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) {
1551 u16 aux = skge_xm_phy_read(hw, port, PHY_BCOM_AUX_STAT);
1552 if ( !(aux & PHY_B_AS_LS) && netif_carrier_ok(skge->netdev))
1553 genesis_link_down(skge);
1554
1555 else if (stat & PHY_B_IS_LST_CHANGE) {
1556 if (aux & PHY_B_AS_AN_C) {
1557 switch (aux & PHY_B_AS_AN_RES_MSK) {
1558 case PHY_B_RES_1000FD:
1559 skge->duplex = DUPLEX_FULL;
1560 break;
1561 case PHY_B_RES_1000HD:
1562 skge->duplex = DUPLEX_HALF;
1563 break;
1564 }
1565
1566 switch (aux & PHY_B_AS_PAUSE_MSK) {
1567 case PHY_B_AS_PAUSE_MSK:
1568 skge->flow_control = FLOW_MODE_SYMMETRIC;
1569 break;
1570 case PHY_B_AS_PRR:
1571 skge->flow_control = FLOW_MODE_REM_SEND;
1572 break;
1573 case PHY_B_AS_PRT:
1574 skge->flow_control = FLOW_MODE_LOC_SEND;
1575 break;
1576 default:
1577 skge->flow_control = FLOW_MODE_NONE;
1578 }
1579 skge->speed = SPEED_1000;
1580 }
1581 genesis_link_up(skge);
1582 }
1583 else
1584 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1585 }
1586}
1587
1588/* Perodic poll of phy status to check for link transistion */
1589static void skge_link_timer(unsigned long __arg)
1590{
1591 struct skge_port *skge = (struct skge_port *) __arg;
1592 struct skge_hw *hw = skge->hw;
1593 int port = skge->port;
1594
1595 if (hw->chip_id != CHIP_ID_GENESIS || !netif_running(skge->netdev))
1596 return;
1597
1598 spin_lock_bh(&hw->phy_lock);
1599 if (hw->phy_type == SK_PHY_BCOM)
1600 genesis_bcom_intr(skge);
1601 else {
1602 int i;
1603 for (i = 0; i < 3; i++)
1604 if (skge_xm_read16(hw, port, XM_ISRC) & XM_IS_INP_ASS)
1605 break;
1606
1607 if (i == 3)
1608 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1609 else
1610 genesis_link_up(skge);
1611 }
1612 spin_unlock_bh(&hw->phy_lock);
1613}
1614
1615/* Marvell Phy Initailization */
1616static void yukon_init(struct skge_hw *hw, int port)
1617{
1618 struct skge_port *skge = netdev_priv(hw->dev[port]);
1619 u16 ctrl, ct1000, adv;
1620 u16 ledctrl, ledover;
1621
1622 pr_debug("yukon_init\n");
1623 if (skge->autoneg == AUTONEG_ENABLE) {
1624 u16 ectrl = skge_gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
1625
1626 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
1627 PHY_M_EC_MAC_S_MSK);
1628 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
1629
1630 /* on PHY 88E1111 there is a change for downshift control */
1631 if (hw->chip_id == CHIP_ID_YUKON_EC)
1632 ectrl |= PHY_M_EC_M_DSC_2(0) | PHY_M_EC_DOWN_S_ENA;
1633 else
1634 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1635
1636 skge_gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
1637 }
1638
1639 ctrl = skge_gm_phy_read(hw, port, PHY_MARV_CTRL);
1640 if (skge->autoneg == AUTONEG_DISABLE)
1641 ctrl &= ~PHY_CT_ANE;
1642
1643 ctrl |= PHY_CT_RESET;
1644 skge_gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1645
1646 ctrl = 0;
1647 ct1000 = 0;
1648 adv = PHY_SEL_TYPE;
1649
1650 if (skge->autoneg == AUTONEG_ENABLE) {
1651 if (iscopper(hw)) {
1652 if (skge->advertising & ADVERTISED_1000baseT_Full)
1653 ct1000 |= PHY_M_1000C_AFD;
1654 if (skge->advertising & ADVERTISED_1000baseT_Half)
1655 ct1000 |= PHY_M_1000C_AHD;
1656 if (skge->advertising & ADVERTISED_100baseT_Full)
1657 adv |= PHY_M_AN_100_FD;
1658 if (skge->advertising & ADVERTISED_100baseT_Half)
1659 adv |= PHY_M_AN_100_HD;
1660 if (skge->advertising & ADVERTISED_10baseT_Full)
1661 adv |= PHY_M_AN_10_FD;
1662 if (skge->advertising & ADVERTISED_10baseT_Half)
1663 adv |= PHY_M_AN_10_HD;
1664
1665 /* Set Flow-control capabilities */
1666 switch (skge->flow_control) {
1667 case FLOW_MODE_NONE:
1668 adv |= PHY_B_P_NO_PAUSE;
1669 break;
1670 case FLOW_MODE_LOC_SEND:
1671 adv |= PHY_B_P_ASYM_MD;
1672 break;
1673 case FLOW_MODE_SYMMETRIC:
1674 adv |= PHY_B_P_SYM_MD;
1675 break;
1676 case FLOW_MODE_REM_SEND:
1677 adv |= PHY_B_P_BOTH_MD;
1678 break;
1679 }
1680 } else { /* special defines for FIBER (88E1011S only) */
1681 adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
1682
1683 /* Set Flow-control capabilities */
1684 switch (skge->flow_control) {
1685 case FLOW_MODE_NONE:
1686 adv |= PHY_M_P_NO_PAUSE_X;
1687 break;
1688 case FLOW_MODE_LOC_SEND:
1689 adv |= PHY_M_P_ASYM_MD_X;
1690 break;
1691 case FLOW_MODE_SYMMETRIC:
1692 adv |= PHY_M_P_SYM_MD_X;
1693 break;
1694 case FLOW_MODE_REM_SEND:
1695 adv |= PHY_M_P_BOTH_MD_X;
1696 break;
1697 }
1698 }
1699 /* Restart Auto-negotiation */
1700 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1701 } else {
1702 /* forced speed/duplex settings */
1703 ct1000 = PHY_M_1000C_MSE;
1704
1705 if (skge->duplex == DUPLEX_FULL)
1706 ctrl |= PHY_CT_DUP_MD;
1707
1708 switch (skge->speed) {
1709 case SPEED_1000:
1710 ctrl |= PHY_CT_SP1000;
1711 break;
1712 case SPEED_100:
1713 ctrl |= PHY_CT_SP100;
1714 break;
1715 }
1716
1717 ctrl |= PHY_CT_RESET;
1718 }
1719
1720 if (hw->chip_id != CHIP_ID_YUKON_FE)
1721 skge_gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
1722
1723 skge_gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
1724 skge_gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1725
1726 /* Setup Phy LED's */
1727 ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
1728 ledover = 0;
1729
1730 if (hw->chip_id == CHIP_ID_YUKON_FE) {
1731 /* on 88E3082 these bits are at 11..9 (shifted left) */
1732 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
1733
1734 skge_gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR,
1735 ((skge_gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR)
1736
1737 & ~PHY_M_FELP_LED1_MSK)
1738 | PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL)));
1739 } else {
1740 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
1741 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
1742
1743 /* turn off the Rx LED (LED_RX) */
1744 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
1745 }
1746
1747 /* disable blink mode (LED_DUPLEX) on collisions */
1748 ctrl |= PHY_M_LEDC_DP_CTRL;
1749 skge_gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
1750
1751 if (skge->autoneg == AUTONEG_DISABLE || skge->speed == SPEED_100) {
1752 /* turn on 100 Mbps LED (LED_LINK100) */
1753 ledover |= PHY_M_LED_MO_100(MO_LED_ON);
1754 }
1755
1756 if (ledover)
1757 skge_gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
1758
1759 /* Enable phy interrupt on autonegotiation complete (or link up) */
1760 if (skge->autoneg == AUTONEG_ENABLE)
1761 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
1762 else
1763 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1764}
1765
1766static void yukon_reset(struct skge_hw *hw, int port)
1767{
1768 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */
1769 skge_gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
1770 skge_gma_write16(hw, port, GM_MC_ADDR_H2, 0);
1771 skge_gma_write16(hw, port, GM_MC_ADDR_H3, 0);
1772 skge_gma_write16(hw, port, GM_MC_ADDR_H4, 0);
1773
1774 skge_gma_write16(hw, port, GM_RX_CTRL,
1775 skge_gma_read16(hw, port, GM_RX_CTRL)
1776 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
1777}
1778
1779static void yukon_mac_init(struct skge_hw *hw, int port)
1780{
1781 struct skge_port *skge = netdev_priv(hw->dev[port]);
1782 int i;
1783 u32 reg;
1784 const u8 *addr = hw->dev[port]->dev_addr;
1785
1786 /* WA code for COMA mode -- set PHY reset */
1787 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1788 chip_rev(hw) == CHIP_REV_YU_LITE_A3)
1789 skge_write32(hw, B2_GP_IO,
1790 (skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9));
1791
1792 /* hard reset */
1793 skge_write32(hw, SKGEMAC_REG(port, GPHY_CTRL), GPC_RST_SET);
1794 skge_write32(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_RST_SET);
1795
1796 /* WA code for COMA mode -- clear PHY reset */
1797 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1798 chip_rev(hw) == CHIP_REV_YU_LITE_A3)
1799 skge_write32(hw, B2_GP_IO,
1800 (skge_read32(hw, B2_GP_IO) | GP_DIR_9)
1801 & ~GP_IO_9);
1802
1803 /* Set hardware config mode */
1804 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
1805 GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE;
1806 reg |= iscopper(hw) ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
1807
1808 /* Clear GMC reset */
1809 skge_write32(hw, SKGEMAC_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
1810 skge_write32(hw, SKGEMAC_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
1811 skge_write32(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
1812 if (skge->autoneg == AUTONEG_DISABLE) {
1813 reg = GM_GPCR_AU_ALL_DIS;
1814 skge_gma_write16(hw, port, GM_GP_CTRL,
1815 skge_gma_read16(hw, port, GM_GP_CTRL) | reg);
1816
1817 switch (skge->speed) {
1818 case SPEED_1000:
1819 reg |= GM_GPCR_SPEED_1000;
1820 /* fallthru */
1821 case SPEED_100:
1822 reg |= GM_GPCR_SPEED_100;
1823 }
1824
1825 if (skge->duplex == DUPLEX_FULL)
1826 reg |= GM_GPCR_DUP_FULL;
1827 } else
1828 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
1829 switch (skge->flow_control) {
1830 case FLOW_MODE_NONE:
1831 skge_write32(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1832 reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1833 break;
1834 case FLOW_MODE_LOC_SEND:
1835 /* disable Rx flow-control */
1836 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1837 }
1838
1839 skge_gma_write16(hw, port, GM_GP_CTRL, reg);
1840 skge_read16(hw, GMAC_IRQ_SRC);
1841
1842 spin_lock_bh(&hw->phy_lock);
1843 yukon_init(hw, port);
1844 spin_unlock_bh(&hw->phy_lock);
1845
1846 /* MIB clear */
1847 reg = skge_gma_read16(hw, port, GM_PHY_ADDR);
1848 skge_gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
1849
1850 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
1851 skge_gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
1852 skge_gma_write16(hw, port, GM_PHY_ADDR, reg);
1853
1854 /* transmit control */
1855 skge_gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
1856
1857 /* receive control reg: unicast + multicast + no FCS */
1858 skge_gma_write16(hw, port, GM_RX_CTRL,
1859 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
1860
1861 /* transmit flow control */
1862 skge_gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
1863
1864 /* transmit parameter */
1865 skge_gma_write16(hw, port, GM_TX_PARAM,
1866 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
1867 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
1868 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF));
1869
1870 /* serial mode register */
1871 reg = GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
1872 if (hw->dev[port]->mtu > 1500)
1873 reg |= GM_SMOD_JUMBO_ENA;
1874
1875 skge_gma_write16(hw, port, GM_SERIAL_MODE, reg);
1876
1877 /* physical address: used for pause frames */
1878 skge_gm_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
1879 /* virtual address for data */
1880 skge_gm_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
1881
1882 /* enable interrupt mask for counter overflows */
1883 skge_gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
1884 skge_gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
1885 skge_gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
1886
1887 /* Initialize Mac Fifo */
1888
1889 /* Configure Rx MAC FIFO */
1890 skge_write16(hw, SKGEMAC_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
1891 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
1892 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1893 chip_rev(hw) == CHIP_REV_YU_LITE_A3)
1894 reg &= ~GMF_RX_F_FL_ON;
1895 skge_write8(hw, SKGEMAC_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
1896 skge_write16(hw, SKGEMAC_REG(port, RX_GMF_CTRL_T), reg);
1897 skge_write16(hw, SKGEMAC_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF);
1898
1899 /* Configure Tx MAC FIFO */
1900 skge_write8(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
1901 skge_write16(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
1902}
1903
1904static void yukon_stop(struct skge_port *skge)
1905{
1906 struct skge_hw *hw = skge->hw;
1907 int port = skge->port;
1908
1909 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1910 chip_rev(hw) == CHIP_REV_YU_LITE_A3) {
1911 skge_write32(hw, B2_GP_IO,
1912 skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9);
1913 }
1914
1915 skge_gma_write16(hw, port, GM_GP_CTRL,
1916 skge_gma_read16(hw, port, GM_GP_CTRL)
1917 & ~(GM_GPCR_RX_ENA|GM_GPCR_RX_ENA));
1918 skge_gma_read16(hw, port, GM_GP_CTRL);
1919
1920 /* set GPHY Control reset */
1921 skge_gma_write32(hw, port, GPHY_CTRL, GPC_RST_SET);
1922 skge_gma_write32(hw, port, GMAC_CTRL, GMC_RST_SET);
1923}
1924
1925static void yukon_get_stats(struct skge_port *skge, u64 *data)
1926{
1927 struct skge_hw *hw = skge->hw;
1928 int port = skge->port;
1929 int i;
1930
1931 data[0] = (u64) skge_gma_read32(hw, port, GM_TXO_OK_HI) << 32
1932 | skge_gma_read32(hw, port, GM_TXO_OK_LO);
1933 data[1] = (u64) skge_gma_read32(hw, port, GM_RXO_OK_HI) << 32
1934 | skge_gma_read32(hw, port, GM_RXO_OK_LO);
1935
1936 for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
1937 data[i] = skge_gma_read32(hw, port,
1938 skge_stats[i].gma_offset);
1939}
1940
1941static void yukon_mac_intr(struct skge_hw *hw, int port)
1942{
1943 struct skge_port *skge = netdev_priv(hw->dev[port]);
1944 u8 status = skge_read8(hw, SKGEMAC_REG(port, GMAC_IRQ_SRC));
1945
1946 pr_debug("yukon_intr status %x\n", status);
1947 if (status & GM_IS_RX_FF_OR) {
1948 ++skge->net_stats.rx_fifo_errors;
1949 skge_gma_write8(hw, port, RX_GMF_CTRL_T, GMF_CLI_RX_FO);
1950 }
1951 if (status & GM_IS_TX_FF_UR) {
1952 ++skge->net_stats.tx_fifo_errors;
1953 skge_gma_write8(hw, port, TX_GMF_CTRL_T, GMF_CLI_TX_FU);
1954 }
1955
1956}
1957
1958static u16 yukon_speed(const struct skge_hw *hw, u16 aux)
1959{
1960 if (hw->chip_id == CHIP_ID_YUKON_FE)
1961 return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10;
1962
1963 switch(aux & PHY_M_PS_SPEED_MSK) {
1964 case PHY_M_PS_SPEED_1000:
1965 return SPEED_1000;
1966 case PHY_M_PS_SPEED_100:
1967 return SPEED_100;
1968 default:
1969 return SPEED_10;
1970 }
1971}
1972
1973static void yukon_link_up(struct skge_port *skge)
1974{
1975 struct skge_hw *hw = skge->hw;
1976 int port = skge->port;
1977 u16 reg;
1978
1979 pr_debug("yukon_link_up\n");
1980
1981 /* Enable Transmit FIFO Underrun */
1982 skge_write8(hw, GMAC_IRQ_MSK, GMAC_DEF_MSK);
1983
1984 reg = skge_gma_read16(hw, port, GM_GP_CTRL);
1985 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
1986 reg |= GM_GPCR_DUP_FULL;
1987
1988 /* enable Rx/Tx */
1989 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1990 skge_gma_write16(hw, port, GM_GP_CTRL, reg);
1991
1992 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1993 skge_link_up(skge);
1994}
1995
1996static void yukon_link_down(struct skge_port *skge)
1997{
1998 struct skge_hw *hw = skge->hw;
1999 int port = skge->port;
2000
2001 pr_debug("yukon_link_down\n");
2002 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
2003 skge_gm_phy_write(hw, port, GM_GP_CTRL,
2004 skge_gm_phy_read(hw, port, GM_GP_CTRL)
2005 & ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA));
2006
2007 if (hw->chip_id != CHIP_ID_YUKON_FE &&
2008 skge->flow_control == FLOW_MODE_REM_SEND) {
2009 /* restore Asymmetric Pause bit */
2010 skge_gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
2011 skge_gm_phy_read(hw, port,
2012 PHY_MARV_AUNE_ADV)
2013 | PHY_M_AN_ASP);
2014
2015 }
2016
2017 yukon_reset(hw, port);
2018 skge_link_down(skge);
2019
2020 yukon_init(hw, port);
2021}
2022
2023static void yukon_phy_intr(struct skge_port *skge)
2024{
2025 struct skge_hw *hw = skge->hw;
2026 int port = skge->port;
2027 const char *reason = NULL;
2028 u16 istatus, phystat;
2029
2030 istatus = skge_gm_phy_read(hw, port, PHY_MARV_INT_STAT);
2031 phystat = skge_gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
2032 pr_debug("yukon phy intr istat=%x phy_stat=%x\n", istatus, phystat);
2033
2034 if (istatus & PHY_M_IS_AN_COMPL) {
2035 if (skge_gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
2036 & PHY_M_AN_RF) {
2037 reason = "remote fault";
2038 goto failed;
2039 }
2040
2041 if (!(hw->chip_id == CHIP_ID_YUKON_FE || hw->chip_id == CHIP_ID_YUKON_EC)
2042 && (skge_gm_phy_read(hw, port, PHY_MARV_1000T_STAT)
2043 & PHY_B_1000S_MSF)) {
2044 reason = "master/slave fault";
2045 goto failed;
2046 }
2047
2048 if (!(phystat & PHY_M_PS_SPDUP_RES)) {
2049 reason = "speed/duplex";
2050 goto failed;
2051 }
2052
2053 skge->duplex = (phystat & PHY_M_PS_FULL_DUP)
2054 ? DUPLEX_FULL : DUPLEX_HALF;
2055 skge->speed = yukon_speed(hw, phystat);
2056
2057 /* Tx & Rx Pause Enabled bits are at 9..8 */
2058 if (hw->chip_id == CHIP_ID_YUKON_XL)
2059 phystat >>= 6;
2060
2061 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
2062 switch (phystat & PHY_M_PS_PAUSE_MSK) {
2063 case PHY_M_PS_PAUSE_MSK:
2064 skge->flow_control = FLOW_MODE_SYMMETRIC;
2065 break;
2066 case PHY_M_PS_RX_P_EN:
2067 skge->flow_control = FLOW_MODE_REM_SEND;
2068 break;
2069 case PHY_M_PS_TX_P_EN:
2070 skge->flow_control = FLOW_MODE_LOC_SEND;
2071 break;
2072 default:
2073 skge->flow_control = FLOW_MODE_NONE;
2074 }
2075
2076 if (skge->flow_control == FLOW_MODE_NONE ||
2077 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
2078 skge_write8(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
2079 else
2080 skge_write8(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
2081 yukon_link_up(skge);
2082 return;
2083 }
2084
2085 if (istatus & PHY_M_IS_LSP_CHANGE)
2086 skge->speed = yukon_speed(hw, phystat);
2087
2088 if (istatus & PHY_M_IS_DUP_CHANGE)
2089 skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
2090 if (istatus & PHY_M_IS_LST_CHANGE) {
2091 if (phystat & PHY_M_PS_LINK_UP)
2092 yukon_link_up(skge);
2093 else
2094 yukon_link_down(skge);
2095 }
2096 return;
2097 failed:
2098 printk(KERN_ERR PFX "%s: autonegotiation failed (%s)\n",
2099 skge->netdev->name, reason);
2100
2101 /* XXX restart autonegotiation? */
2102}
2103
2104static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
2105{
2106 u32 end;
2107
2108 start /= 8;
2109 len /= 8;
2110 end = start + len - 1;
2111
2112 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
2113 skge_write32(hw, RB_ADDR(q, RB_START), start);
2114 skge_write32(hw, RB_ADDR(q, RB_WP), start);
2115 skge_write32(hw, RB_ADDR(q, RB_RP), start);
2116 skge_write32(hw, RB_ADDR(q, RB_END), end);
2117
2118 if (q == Q_R1 || q == Q_R2) {
2119 /* Set thresholds on receive queue's */
2120 skge_write32(hw, RB_ADDR(q, RB_RX_UTPP),
2121 start + (2*len)/3);
2122 skge_write32(hw, RB_ADDR(q, RB_RX_LTPP),
2123 start + (len/3));
2124 } else {
2125 /* Enable store & forward on Tx queue's because
2126 * Tx FIFO is only 4K on Genesis and 1K on Yukon
2127 */
2128 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
2129 }
2130
2131 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
2132}
2133
2134/* Setup Bus Memory Interface */
2135static void skge_qset(struct skge_port *skge, u16 q,
2136 const struct skge_element *e)
2137{
2138 struct skge_hw *hw = skge->hw;
2139 u32 watermark = 0x600;
2140 u64 base = skge->dma + (e->desc - skge->mem);
2141
2142 /* optimization to reduce window on 32bit/33mhz */
2143 if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0)
2144 watermark /= 2;
2145
2146 skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET);
2147 skge_write32(hw, Q_ADDR(q, Q_F), watermark);
2148 skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32));
2149 skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base);
2150}
2151
2152static int skge_up(struct net_device *dev)
2153{
2154 struct skge_port *skge = netdev_priv(dev);
2155 struct skge_hw *hw = skge->hw;
2156 int port = skge->port;
2157 u32 chunk, ram_addr;
2158 size_t rx_size, tx_size;
2159 int err;
2160
2161 if (netif_msg_ifup(skge))
2162 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
2163
2164 rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc);
2165 tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc);
2166 skge->mem_size = tx_size + rx_size;
2167 skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma);
2168 if (!skge->mem)
2169 return -ENOMEM;
2170
2171 memset(skge->mem, 0, skge->mem_size);
2172
2173 if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma)))
2174 goto free_pci_mem;
2175
2176 if (skge_rx_fill(skge))
2177 goto free_rx_ring;
2178
2179 if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
2180 skge->dma + rx_size)))
2181 goto free_rx_ring;
2182
2183 skge->tx_avail = skge->tx_ring.count - 1;
2184
2185 /* Initialze MAC */
2186 if (hw->chip_id == CHIP_ID_GENESIS)
2187 genesis_mac_init(hw, port);
2188 else
2189 yukon_mac_init(hw, port);
2190
2191 /* Configure RAMbuffers */
2192 chunk = hw->ram_size / (isdualport(hw) ? 4 : 2);
2193 ram_addr = hw->ram_offset + 2 * chunk * port;
2194
2195 skge_ramset(hw, rxqaddr[port], ram_addr, chunk);
2196 skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean);
2197
2198 BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean);
2199 skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk);
2200 skge_qset(skge, txqaddr[port], skge->tx_ring.to_use);
2201
2202 /* Start receiver BMU */
2203 wmb();
2204 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
2205
2206 pr_debug("skge_up completed\n");
2207 return 0;
2208
2209 free_rx_ring:
2210 skge_rx_clean(skge);
2211 kfree(skge->rx_ring.start);
2212 free_pci_mem:
2213 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
2214
2215 return err;
2216}
2217
2218static int skge_down(struct net_device *dev)
2219{
2220 struct skge_port *skge = netdev_priv(dev);
2221 struct skge_hw *hw = skge->hw;
2222 int port = skge->port;
2223
2224 if (netif_msg_ifdown(skge))
2225 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
2226
2227 netif_stop_queue(dev);
2228
2229 del_timer_sync(&skge->led_blink);
2230 del_timer_sync(&skge->link_check);
2231
2232 /* Stop transmitter */
2233 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
2234 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
2235 RB_RST_SET|RB_DIS_OP_MD);
2236
2237 if (hw->chip_id == CHIP_ID_GENESIS)
2238 genesis_stop(skge);
2239 else
2240 yukon_stop(skge);
2241
2242 /* Disable Force Sync bit and Enable Alloc bit */
2243 skge_write8(hw, SKGEMAC_REG(port, TXA_CTRL),
2244 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
2245
2246 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
2247 skge_write32(hw, SKGEMAC_REG(port, TXA_ITI_INI), 0L);
2248 skge_write32(hw, SKGEMAC_REG(port, TXA_LIM_INI), 0L);
2249
2250 /* Reset PCI FIFO */
2251 skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET);
2252 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
2253
2254 /* Reset the RAM Buffer async Tx queue */
2255 skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET);
2256 /* stop receiver */
2257 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP);
2258 skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL),
2259 RB_RST_SET|RB_DIS_OP_MD);
2260 skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET);
2261
2262 if (hw->chip_id == CHIP_ID_GENESIS) {
2263 skge_write8(hw, SKGEMAC_REG(port, TX_MFF_CTRL2), MFF_RST_SET);
2264 skge_write8(hw, SKGEMAC_REG(port, RX_MFF_CTRL2), MFF_RST_SET);
2265 skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_STOP);
2266 skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_STOP);
2267 } else {
2268 skge_write8(hw, SKGEMAC_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
2269 skge_write8(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
2270 }
2271
2272 /* turn off led's */
2273 skge_write16(hw, B0_LED, LED_STAT_OFF);
2274
2275 skge_tx_clean(skge);
2276 skge_rx_clean(skge);
2277
2278 kfree(skge->rx_ring.start);
2279 kfree(skge->tx_ring.start);
2280 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
2281 return 0;
2282}
2283
2284static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2285{
2286 struct skge_port *skge = netdev_priv(dev);
2287 struct skge_hw *hw = skge->hw;
2288 struct skge_ring *ring = &skge->tx_ring;
2289 struct skge_element *e;
2290 struct skge_tx_desc *td;
2291 int i;
2292 u32 control, len;
2293 u64 map;
2294 unsigned long flags;
2295
2296 skb = skb_padto(skb, ETH_ZLEN);
2297 if (!skb)
2298 return NETDEV_TX_OK;
2299
2300 local_irq_save(flags);
2301 if (!spin_trylock(&skge->tx_lock)) {
2302 /* Collision - tell upper layer to requeue */
2303 local_irq_restore(flags);
2304 return NETDEV_TX_LOCKED;
2305 }
2306
2307 if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) {
2308 netif_stop_queue(dev);
2309 spin_unlock_irqrestore(&skge->tx_lock, flags);
2310
2311 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
2312 dev->name);
2313 return NETDEV_TX_BUSY;
2314 }
2315
2316 e = ring->to_use;
2317 td = e->desc;
2318 e->skb = skb;
2319 len = skb_headlen(skb);
2320 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
2321 pci_unmap_addr_set(e, mapaddr, map);
2322 pci_unmap_len_set(e, maplen, len);
2323
2324 td->dma_lo = map;
2325 td->dma_hi = map >> 32;
2326
2327 if (skb->ip_summed == CHECKSUM_HW) {
2328 const struct iphdr *ip
2329 = (const struct iphdr *) (skb->data + ETH_HLEN);
2330 int offset = skb->h.raw - skb->data;
2331
2332 /* This seems backwards, but it is what the sk98lin
2333 * does. Looks like hardware is wrong?
2334 */
2335 if (ip->protocol == IPPROTO_UDP
2336 && chip_rev(hw) == 0 && hw->chip_id == CHIP_ID_YUKON)
2337 control = BMU_TCP_CHECK;
2338 else
2339 control = BMU_UDP_CHECK;
2340
2341 td->csum_offs = 0;
2342 td->csum_start = offset;
2343 td->csum_write = offset + skb->csum;
2344 } else
2345 control = BMU_CHECK;
2346
2347 if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */
2348 control |= BMU_EOF| BMU_IRQ_EOF;
2349 else {
2350 struct skge_tx_desc *tf = td;
2351
2352 control |= BMU_STFWD;
2353 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2354 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2355
2356 map = pci_map_page(hw->pdev, frag->page, frag->page_offset,
2357 frag->size, PCI_DMA_TODEVICE);
2358
2359 e = e->next;
2360 e->skb = NULL;
2361 tf = e->desc;
2362 tf->dma_lo = map;
2363 tf->dma_hi = (u64) map >> 32;
2364 pci_unmap_addr_set(e, mapaddr, map);
2365 pci_unmap_len_set(e, maplen, frag->size);
2366
2367 tf->control = BMU_OWN | BMU_SW | control | frag->size;
2368 }
2369 tf->control |= BMU_EOF | BMU_IRQ_EOF;
2370 }
2371 /* Make sure all the descriptors written */
2372 wmb();
2373 td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
2374 wmb();
2375
2376 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
2377
2378 if (netif_msg_tx_queued(skge))
2379 printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n",
2380 dev->name, e - ring->start, skb->len);
2381
2382 ring->to_use = e->next;
2383 skge->tx_avail -= skb_shinfo(skb)->nr_frags + 1;
2384 if (skge->tx_avail <= MAX_SKB_FRAGS + 1) {
2385 pr_debug("%s: transmit queue full\n", dev->name);
2386 netif_stop_queue(dev);
2387 }
2388
2389 dev->trans_start = jiffies;
2390 spin_unlock_irqrestore(&skge->tx_lock, flags);
2391
2392 return NETDEV_TX_OK;
2393}
2394
2395static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e)
2396{
2397 if (e->skb) {
2398 pci_unmap_single(hw->pdev,
2399 pci_unmap_addr(e, mapaddr),
2400 pci_unmap_len(e, maplen),
2401 PCI_DMA_TODEVICE);
2402 dev_kfree_skb_any(e->skb);
2403 e->skb = NULL;
2404 } else {
2405 pci_unmap_page(hw->pdev,
2406 pci_unmap_addr(e, mapaddr),
2407 pci_unmap_len(e, maplen),
2408 PCI_DMA_TODEVICE);
2409 }
2410}
2411
2412static void skge_tx_clean(struct skge_port *skge)
2413{
2414 struct skge_ring *ring = &skge->tx_ring;
2415 struct skge_element *e;
2416 unsigned long flags;
2417
2418 spin_lock_irqsave(&skge->tx_lock, flags);
2419 for (e = ring->to_clean; e != ring->to_use; e = e->next) {
2420 ++skge->tx_avail;
2421 skge_tx_free(skge->hw, e);
2422 }
2423 ring->to_clean = e;
2424 spin_unlock_irqrestore(&skge->tx_lock, flags);
2425}
2426
2427static void skge_tx_timeout(struct net_device *dev)
2428{
2429 struct skge_port *skge = netdev_priv(dev);
2430
2431 if (netif_msg_timer(skge))
2432 printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name);
2433
2434 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
2435 skge_tx_clean(skge);
2436}
2437
2438static int skge_change_mtu(struct net_device *dev, int new_mtu)
2439{
2440 int err = 0;
2441
2442 if(new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
2443 return -EINVAL;
2444
2445 dev->mtu = new_mtu;
2446
2447 if (netif_running(dev)) {
2448 skge_down(dev);
2449 skge_up(dev);
2450 }
2451
2452 return err;
2453}
2454
2455static void genesis_set_multicast(struct net_device *dev)
2456{
2457 struct skge_port *skge = netdev_priv(dev);
2458 struct skge_hw *hw = skge->hw;
2459 int port = skge->port;
2460 int i, count = dev->mc_count;
2461 struct dev_mc_list *list = dev->mc_list;
2462 u32 mode;
2463 u8 filter[8];
2464
2465 mode = skge_xm_read32(hw, port, XM_MODE);
2466 mode |= XM_MD_ENA_HASH;
2467 if (dev->flags & IFF_PROMISC)
2468 mode |= XM_MD_ENA_PROM;
2469 else
2470 mode &= ~XM_MD_ENA_PROM;
2471
2472 if (dev->flags & IFF_ALLMULTI)
2473 memset(filter, 0xff, sizeof(filter));
2474 else {
2475 memset(filter, 0, sizeof(filter));
2476 for(i = 0; list && i < count; i++, list = list->next) {
2477 u32 crc = crc32_le(~0, list->dmi_addr, ETH_ALEN);
2478 u8 bit = 63 - (crc & 63);
2479
2480 filter[bit/8] |= 1 << (bit%8);
2481 }
2482 }
2483
2484 skge_xm_outhash(hw, port, XM_HSM, filter);
2485
2486 skge_xm_write32(hw, port, XM_MODE, mode);
2487}
2488
2489static void yukon_set_multicast(struct net_device *dev)
2490{
2491 struct skge_port *skge = netdev_priv(dev);
2492 struct skge_hw *hw = skge->hw;
2493 int port = skge->port;
2494 struct dev_mc_list *list = dev->mc_list;
2495 u16 reg;
2496 u8 filter[8];
2497
2498 memset(filter, 0, sizeof(filter));
2499
2500 reg = skge_gma_read16(hw, port, GM_RX_CTRL);
2501 reg |= GM_RXCR_UCF_ENA;
2502
2503 if (dev->flags & IFF_PROMISC) /* promiscious */
2504 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
2505 else if (dev->flags & IFF_ALLMULTI) /* all multicast */
2506 memset(filter, 0xff, sizeof(filter));
2507 else if (dev->mc_count == 0) /* no multicast */
2508 reg &= ~GM_RXCR_MCF_ENA;
2509 else {
2510 int i;
2511 reg |= GM_RXCR_MCF_ENA;
2512
2513 for(i = 0; list && i < dev->mc_count; i++, list = list->next) {
2514 u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f;
2515 filter[bit/8] |= 1 << (bit%8);
2516 }
2517 }
2518
2519
2520 skge_gma_write16(hw, port, GM_MC_ADDR_H1,
2521 (u16)filter[0] | ((u16)filter[1] << 8));
2522 skge_gma_write16(hw, port, GM_MC_ADDR_H2,
2523 (u16)filter[2] | ((u16)filter[3] << 8));
2524 skge_gma_write16(hw, port, GM_MC_ADDR_H3,
2525 (u16)filter[4] | ((u16)filter[5] << 8));
2526 skge_gma_write16(hw, port, GM_MC_ADDR_H4,
2527 (u16)filter[6] | ((u16)filter[7] << 8));
2528
2529 skge_gma_write16(hw, port, GM_RX_CTRL, reg);
2530}
2531
2532static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2533{
2534 if (hw->chip_id == CHIP_ID_GENESIS)
2535 return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0;
2536 else
2537 return (status & GMR_FS_ANY_ERR) ||
2538 (status & GMR_FS_RX_OK) == 0;
2539}
2540
2541static void skge_rx_error(struct skge_port *skge, int slot,
2542 u32 control, u32 status)
2543{
2544 if (netif_msg_rx_err(skge))
2545 printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n",
2546 skge->netdev->name, slot, control, status);
2547
2548 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)
2549 || (control & BMU_BBC) > skge->netdev->mtu + VLAN_ETH_HLEN)
2550 skge->net_stats.rx_length_errors++;
2551 else {
2552 if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2553 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2554 skge->net_stats.rx_length_errors++;
2555 if (status & XMR_FS_FRA_ERR)
2556 skge->net_stats.rx_frame_errors++;
2557 if (status & XMR_FS_FCS_ERR)
2558 skge->net_stats.rx_crc_errors++;
2559 } else {
2560 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2561 skge->net_stats.rx_length_errors++;
2562 if (status & GMR_FS_FRAGMENT)
2563 skge->net_stats.rx_frame_errors++;
2564 if (status & GMR_FS_CRC_ERR)
2565 skge->net_stats.rx_crc_errors++;
2566 }
2567 }
2568}
2569
2570static int skge_poll(struct net_device *dev, int *budget)
2571{
2572 struct skge_port *skge = netdev_priv(dev);
2573 struct skge_hw *hw = skge->hw;
2574 struct skge_ring *ring = &skge->rx_ring;
2575 struct skge_element *e;
2576 unsigned int to_do = min(dev->quota, *budget);
2577 unsigned int work_done = 0;
2578 int done;
2579 static const u32 irqmask[] = { IS_PORT_1, IS_PORT_2 };
2580
2581 for (e = ring->to_clean; e != ring->to_use && work_done < to_do;
2582 e = e->next) {
2583 struct skge_rx_desc *rd = e->desc;
2584 struct sk_buff *skb = e->skb;
2585 u32 control, len, status;
2586
2587 rmb();
2588 control = rd->control;
2589 if (control & BMU_OWN)
2590 break;
2591
2592 len = control & BMU_BBC;
2593 e->skb = NULL;
2594
2595 pci_unmap_single(hw->pdev,
2596 pci_unmap_addr(e, mapaddr),
2597 pci_unmap_len(e, maplen),
2598 PCI_DMA_FROMDEVICE);
2599
2600 status = rd->status;
2601 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)
2602 || len > dev->mtu + VLAN_ETH_HLEN
2603 || bad_phy_status(hw, status)) {
2604 skge_rx_error(skge, e - ring->start, control, status);
2605 dev_kfree_skb(skb);
2606 continue;
2607 }
2608
2609 if (netif_msg_rx_status(skge))
2610 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2611 dev->name, e - ring->start, rd->status, len);
2612
2613 skb_put(skb, len);
2614 skb->protocol = eth_type_trans(skb, dev);
2615
2616 if (skge->rx_csum) {
2617 skb->csum = le16_to_cpu(rd->csum2);
2618 skb->ip_summed = CHECKSUM_HW;
2619 }
2620
2621 dev->last_rx = jiffies;
2622 netif_receive_skb(skb);
2623
2624 ++work_done;
2625 }
2626 ring->to_clean = e;
2627
2628 *budget -= work_done;
2629 dev->quota -= work_done;
2630 done = work_done < to_do;
2631
2632 if (skge_rx_fill(skge))
2633 done = 0;
2634
2635 /* restart receiver */
2636 wmb();
2637 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR),
2638 CSR_START | CSR_IRQ_CL_F);
2639
2640 if (done) {
2641 local_irq_disable();
2642 hw->intr_mask |= irqmask[skge->port];
2643 /* Order is important since data can get interrupted */
2644 skge_write32(hw, B0_IMSK, hw->intr_mask);
2645 __netif_rx_complete(dev);
2646 local_irq_enable();
2647 }
2648
2649 return !done;
2650}
2651
2652static inline void skge_tx_intr(struct net_device *dev)
2653{
2654 struct skge_port *skge = netdev_priv(dev);
2655 struct skge_hw *hw = skge->hw;
2656 struct skge_ring *ring = &skge->tx_ring;
2657 struct skge_element *e;
2658
2659 spin_lock(&skge->tx_lock);
2660 for(e = ring->to_clean; e != ring->to_use; e = e->next) {
2661 struct skge_tx_desc *td = e->desc;
2662 u32 control;
2663
2664 rmb();
2665 control = td->control;
2666 if (control & BMU_OWN)
2667 break;
2668
2669 if (unlikely(netif_msg_tx_done(skge)))
2670 printk(KERN_DEBUG PFX "%s: tx done slot %td status 0x%x\n",
2671 dev->name, e - ring->start, td->status);
2672
2673 skge_tx_free(hw, e);
2674 e->skb = NULL;
2675 ++skge->tx_avail;
2676 }
2677 ring->to_clean = e;
2678 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
2679
2680 if (skge->tx_avail > MAX_SKB_FRAGS + 1)
2681 netif_wake_queue(dev);
2682
2683 spin_unlock(&skge->tx_lock);
2684}
2685
2686static void skge_mac_parity(struct skge_hw *hw, int port)
2687{
2688 printk(KERN_ERR PFX "%s: mac data parity error\n",
2689 hw->dev[port] ? hw->dev[port]->name
2690 : (port == 0 ? "(port A)": "(port B"));
2691
2692 if (hw->chip_id == CHIP_ID_GENESIS)
2693 skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1),
2694 MFF_CLR_PERR);
2695 else
2696 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */
2697 skge_write8(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T),
2698 (hw->chip_id == CHIP_ID_YUKON && chip_rev(hw) == 0)
2699 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE);
2700}
2701
2702static void skge_pci_clear(struct skge_hw *hw)
2703{
2704 u16 status;
2705
2706 status = skge_read16(hw, SKGEPCI_REG(PCI_STATUS));
2707 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2708 skge_write16(hw, SKGEPCI_REG(PCI_STATUS),
2709 status | PCI_STATUS_ERROR_BITS);
2710 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2711}
2712
2713static void skge_mac_intr(struct skge_hw *hw, int port)
2714{
2715 if (hw->chip_id == CHIP_ID_GENESIS)
2716 genesis_mac_intr(hw, port);
2717 else
2718 yukon_mac_intr(hw, port);
2719}
2720
2721/* Handle device specific framing and timeout interrupts */
2722static void skge_error_irq(struct skge_hw *hw)
2723{
2724 u32 hwstatus = skge_read32(hw, B0_HWE_ISRC);
2725
2726 if (hw->chip_id == CHIP_ID_GENESIS) {
2727 /* clear xmac errors */
2728 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1))
2729 skge_write16(hw, SKGEMAC_REG(0, RX_MFF_CTRL1), MFF_CLR_INSTAT);
2730 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2))
2731 skge_write16(hw, SKGEMAC_REG(0, RX_MFF_CTRL2), MFF_CLR_INSTAT);
2732 } else {
2733 /* Timestamp (unused) overflow */
2734 if (hwstatus & IS_IRQ_TIST_OV)
2735 skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2736
2737 if (hwstatus & IS_IRQ_SENSOR) {
2738 /* no sensors on 32-bit Yukon */
2739 if (!(skge_read16(hw, B0_CTST) & CS_BUS_SLOT_SZ)) {
2740 printk(KERN_ERR PFX "ignoring bogus sensor interrups\n");
2741 skge_write32(hw, B0_HWE_IMSK,
2742 IS_ERR_MSK & ~IS_IRQ_SENSOR);
2743 } else
2744 printk(KERN_WARNING PFX "sensor interrupt\n");
2745 }
2746
2747
2748 }
2749
2750 if (hwstatus & IS_RAM_RD_PAR) {
2751 printk(KERN_ERR PFX "Ram read data parity error\n");
2752 skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR);
2753 }
2754
2755 if (hwstatus & IS_RAM_WR_PAR) {
2756 printk(KERN_ERR PFX "Ram write data parity error\n");
2757 skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR);
2758 }
2759
2760 if (hwstatus & IS_M1_PAR_ERR)
2761 skge_mac_parity(hw, 0);
2762
2763 if (hwstatus & IS_M2_PAR_ERR)
2764 skge_mac_parity(hw, 1);
2765
2766 if (hwstatus & IS_R1_PAR_ERR)
2767 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
2768
2769 if (hwstatus & IS_R2_PAR_ERR)
2770 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
2771
2772 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
2773 printk(KERN_ERR PFX "hardware error detected (status 0x%x)\n",
2774 hwstatus);
2775
2776 skge_pci_clear(hw);
2777
2778 hwstatus = skge_read32(hw, B0_HWE_ISRC);
2779 if (hwstatus & IS_IRQ_STAT) {
2780 printk(KERN_WARNING PFX "IRQ status %x: still set ignoring hardware errors\n",
2781 hwstatus);
2782 hw->intr_mask &= ~IS_HW_ERR;
2783 }
2784 }
2785}
2786
2787/*
2788 * Interrrupt from PHY are handled in tasklet (soft irq)
2789 * because accessing phy registers requires spin wait which might
2790 * cause excess interrupt latency.
2791 */
2792static void skge_extirq(unsigned long data)
2793{
2794 struct skge_hw *hw = (struct skge_hw *) data;
2795 int port;
2796
2797 spin_lock(&hw->phy_lock);
2798 for (port = 0; port < 2; port++) {
2799 struct net_device *dev = hw->dev[port];
2800
2801 if (dev && netif_running(dev)) {
2802 struct skge_port *skge = netdev_priv(dev);
2803
2804 if (hw->chip_id != CHIP_ID_GENESIS)
2805 yukon_phy_intr(skge);
2806 else if (hw->phy_type == SK_PHY_BCOM)
2807 genesis_bcom_intr(skge);
2808 }
2809 }
2810 spin_unlock(&hw->phy_lock);
2811
2812 local_irq_disable();
2813 hw->intr_mask |= IS_EXT_REG;
2814 skge_write32(hw, B0_IMSK, hw->intr_mask);
2815 local_irq_enable();
2816}
2817
2818static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2819{
2820 struct skge_hw *hw = dev_id;
2821 u32 status = skge_read32(hw, B0_SP_ISRC);
2822
2823 if (status == 0 || status == ~0) /* hotplug or shared irq */
2824 return IRQ_NONE;
2825
2826 status &= hw->intr_mask;
2827
2828 if ((status & IS_R1_F) && netif_rx_schedule_prep(hw->dev[0])) {
2829 status &= ~IS_R1_F;
2830 hw->intr_mask &= ~IS_R1_F;
2831 skge_write32(hw, B0_IMSK, hw->intr_mask);
2832 __netif_rx_schedule(hw->dev[0]);
2833 }
2834
2835 if ((status & IS_R2_F) && netif_rx_schedule_prep(hw->dev[1])) {
2836 status &= ~IS_R2_F;
2837 hw->intr_mask &= ~IS_R2_F;
2838 skge_write32(hw, B0_IMSK, hw->intr_mask);
2839 __netif_rx_schedule(hw->dev[1]);
2840 }
2841
2842 if (status & IS_XA1_F)
2843 skge_tx_intr(hw->dev[0]);
2844
2845 if (status & IS_XA2_F)
2846 skge_tx_intr(hw->dev[1]);
2847
2848 if (status & IS_MAC1)
2849 skge_mac_intr(hw, 0);
2850
2851 if (status & IS_MAC2)
2852 skge_mac_intr(hw, 1);
2853
2854 if (status & IS_HW_ERR)
2855 skge_error_irq(hw);
2856
2857 if (status & IS_EXT_REG) {
2858 hw->intr_mask &= ~IS_EXT_REG;
2859 tasklet_schedule(&hw->ext_tasklet);
2860 }
2861
2862 if (status)
2863 skge_write32(hw, B0_IMSK, hw->intr_mask);
2864
2865 return IRQ_HANDLED;
2866}
2867
2868#ifdef CONFIG_NET_POLL_CONTROLLER
2869static void skge_netpoll(struct net_device *dev)
2870{
2871 struct skge_port *skge = netdev_priv(dev);
2872
2873 disable_irq(dev->irq);
2874 skge_intr(dev->irq, skge->hw, NULL);
2875 enable_irq(dev->irq);
2876}
2877#endif
2878
2879static int skge_set_mac_address(struct net_device *dev, void *p)
2880{
2881 struct skge_port *skge = netdev_priv(dev);
2882 struct sockaddr *addr = p;
2883 int err = 0;
2884
2885 if (!is_valid_ether_addr(addr->sa_data))
2886 return -EADDRNOTAVAIL;
2887
2888 skge_down(dev);
2889 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
2890 memcpy_toio(skge->hw->regs + B2_MAC_1 + skge->port*8,
2891 dev->dev_addr, ETH_ALEN);
2892 memcpy_toio(skge->hw->regs + B2_MAC_2 + skge->port*8,
2893 dev->dev_addr, ETH_ALEN);
2894 if (dev->flags & IFF_UP)
2895 err = skge_up(dev);
2896 return err;
2897}
2898
2899static const struct {
2900 u8 id;
2901 const char *name;
2902} skge_chips[] = {
2903 { CHIP_ID_GENESIS, "Genesis" },
2904 { CHIP_ID_YUKON, "Yukon" },
2905 { CHIP_ID_YUKON_LITE, "Yukon-Lite"},
2906 { CHIP_ID_YUKON_LP, "Yukon-LP"},
2907 { CHIP_ID_YUKON_XL, "Yukon-2 XL"},
2908 { CHIP_ID_YUKON_EC, "YUKON-2 EC"},
2909 { CHIP_ID_YUKON_FE, "YUKON-2 FE"},
2910};
2911
2912static const char *skge_board_name(const struct skge_hw *hw)
2913{
2914 int i;
2915 static char buf[16];
2916
2917 for (i = 0; i < ARRAY_SIZE(skge_chips); i++)
2918 if (skge_chips[i].id == hw->chip_id)
2919 return skge_chips[i].name;
2920
2921 snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id);
2922 return buf;
2923}
2924
2925
2926/*
2927 * Setup the board data structure, but don't bring up
2928 * the port(s)
2929 */
2930static int skge_reset(struct skge_hw *hw)
2931{
2932 u16 ctst;
2933 u8 t8;
2934 int i, ports;
2935
2936 ctst = skge_read16(hw, B0_CTST);
2937
2938 /* do a SW reset */
2939 skge_write8(hw, B0_CTST, CS_RST_SET);
2940 skge_write8(hw, B0_CTST, CS_RST_CLR);
2941
2942 /* clear PCI errors, if any */
2943 skge_pci_clear(hw);
2944
2945 skge_write8(hw, B0_CTST, CS_MRST_CLR);
2946
2947 /* restore CLK_RUN bits (for Yukon-Lite) */
2948 skge_write16(hw, B0_CTST,
2949 ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA));
2950
2951 hw->chip_id = skge_read8(hw, B2_CHIP_ID);
2952 hw->phy_type = skge_read8(hw, B2_E_1) & 0xf;
2953 hw->pmd_type = skge_read8(hw, B2_PMD_TYP);
2954
2955 switch(hw->chip_id) {
2956 case CHIP_ID_GENESIS:
2957 switch (hw->phy_type) {
2958 case SK_PHY_XMAC:
2959 hw->phy_addr = PHY_ADDR_XMAC;
2960 break;
2961 case SK_PHY_BCOM:
2962 hw->phy_addr = PHY_ADDR_BCOM;
2963 break;
2964 default:
2965 printk(KERN_ERR PFX "%s: unsupported phy type 0x%x\n",
2966 pci_name(hw->pdev), hw->phy_type);
2967 return -EOPNOTSUPP;
2968 }
2969 break;
2970
2971 case CHIP_ID_YUKON:
2972 case CHIP_ID_YUKON_LITE:
2973 case CHIP_ID_YUKON_LP:
2974 if (hw->phy_type < SK_PHY_MARV_COPPER && hw->pmd_type != 'S')
2975 hw->phy_type = SK_PHY_MARV_COPPER;
2976
2977 hw->phy_addr = PHY_ADDR_MARV;
2978 if (!iscopper(hw))
2979 hw->phy_type = SK_PHY_MARV_FIBER;
2980
2981 break;
2982
2983 default:
2984 printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n",
2985 pci_name(hw->pdev), hw->chip_id);
2986 return -EOPNOTSUPP;
2987 }
2988
2989 hw->mac_cfg = skge_read8(hw, B2_MAC_CFG);
2990 ports = isdualport(hw) ? 2 : 1;
2991
2992 /* read the adapters RAM size */
2993 t8 = skge_read8(hw, B2_E_0);
2994 if (hw->chip_id == CHIP_ID_GENESIS) {
2995 if (t8 == 3) {
2996 /* special case: 4 x 64k x 36, offset = 0x80000 */
2997 hw->ram_size = 0x100000;
2998 hw->ram_offset = 0x80000;
2999 } else
3000 hw->ram_size = t8 * 512;
3001 }
3002 else if (t8 == 0)
3003 hw->ram_size = 0x20000;
3004 else
3005 hw->ram_size = t8 * 4096;
3006
3007 if (hw->chip_id == CHIP_ID_GENESIS)
3008 genesis_init(hw);
3009 else {
3010 /* switch power to VCC (WA for VAUX problem) */
3011 skge_write8(hw, B0_POWER_CTRL,
3012 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
3013 for (i = 0; i < ports; i++) {
3014 skge_write16(hw, SKGEMAC_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
3015 skge_write16(hw, SKGEMAC_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
3016 }
3017 }
3018
3019 /* turn off hardware timer (unused) */
3020 skge_write8(hw, B2_TI_CTRL, TIM_STOP);
3021 skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
3022 skge_write8(hw, B0_LED, LED_STAT_ON);
3023
3024 /* enable the Tx Arbiters */
3025 for (i = 0; i < ports; i++)
3026 skge_write8(hw, SKGEMAC_REG(i, TXA_CTRL), TXA_ENA_ARB);
3027
3028 /* Initialize ram interface */
3029 skge_write16(hw, B3_RI_CTRL, RI_RST_CLR);
3030
3031 skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53);
3032 skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53);
3033 skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53);
3034 skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53);
3035 skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53);
3036 skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53);
3037 skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53);
3038 skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53);
3039 skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53);
3040 skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53);
3041 skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53);
3042 skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53);
3043
3044 skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK);
3045
3046 /* Set interrupt moderation for Transmit only
3047 * Receive interrupts avoided by NAPI
3048 */
3049 skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F);
3050 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
3051 skge_write32(hw, B2_IRQM_CTRL, TIM_START);
3052
3053 hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
3054 if (isdualport(hw))
3055 hw->intr_mask |= IS_PORT_2;
3056 skge_write32(hw, B0_IMSK, hw->intr_mask);
3057
3058 if (hw->chip_id != CHIP_ID_GENESIS)
3059 skge_write8(hw, GMAC_IRQ_MSK, 0);
3060
3061 spin_lock_bh(&hw->phy_lock);
3062 for (i = 0; i < ports; i++) {
3063 if (hw->chip_id == CHIP_ID_GENESIS)
3064 genesis_reset(hw, i);
3065 else
3066 yukon_reset(hw, i);
3067 }
3068 spin_unlock_bh(&hw->phy_lock);
3069
3070 return 0;
3071}
3072
3073/* Initialize network device */
3074static struct net_device *skge_devinit(struct skge_hw *hw, int port)
3075{
3076 struct skge_port *skge;
3077 struct net_device *dev = alloc_etherdev(sizeof(*skge));
3078
3079 if (!dev) {
3080 printk(KERN_ERR "skge etherdev alloc failed");
3081 return NULL;
3082 }
3083
3084 SET_MODULE_OWNER(dev);
3085 SET_NETDEV_DEV(dev, &hw->pdev->dev);
3086 dev->open = skge_up;
3087 dev->stop = skge_down;
3088 dev->hard_start_xmit = skge_xmit_frame;
3089 dev->get_stats = skge_get_stats;
3090 if (hw->chip_id == CHIP_ID_GENESIS)
3091 dev->set_multicast_list = genesis_set_multicast;
3092 else
3093 dev->set_multicast_list = yukon_set_multicast;
3094
3095 dev->set_mac_address = skge_set_mac_address;
3096 dev->change_mtu = skge_change_mtu;
3097 SET_ETHTOOL_OPS(dev, &skge_ethtool_ops);
3098 dev->tx_timeout = skge_tx_timeout;
3099 dev->watchdog_timeo = TX_WATCHDOG;
3100 dev->poll = skge_poll;
3101 dev->weight = NAPI_WEIGHT;
3102#ifdef CONFIG_NET_POLL_CONTROLLER
3103 dev->poll_controller = skge_netpoll;
3104#endif
3105 dev->irq = hw->pdev->irq;
3106 dev->features = NETIF_F_LLTX;
3107
3108 skge = netdev_priv(dev);
3109 skge->netdev = dev;
3110 skge->hw = hw;
3111 skge->msg_enable = netif_msg_init(debug, default_msg);
3112 skge->tx_ring.count = DEFAULT_TX_RING_SIZE;
3113 skge->rx_ring.count = DEFAULT_RX_RING_SIZE;
3114
3115 /* Auto speed and flow control */
3116 skge->autoneg = AUTONEG_ENABLE;
3117 skge->flow_control = FLOW_MODE_SYMMETRIC;
3118 skge->duplex = -1;
3119 skge->speed = -1;
3120 skge->advertising = skge_modes(hw);
3121
3122 hw->dev[port] = dev;
3123
3124 skge->port = port;
3125
3126 spin_lock_init(&skge->tx_lock);
3127
3128 init_timer(&skge->link_check);
3129 skge->link_check.function = skge_link_timer;
3130 skge->link_check.data = (unsigned long) skge;
3131
3132 init_timer(&skge->led_blink);
3133 skge->led_blink.function = skge_blink_timer;
3134 skge->led_blink.data = (unsigned long) skge;
3135
3136 if (hw->chip_id != CHIP_ID_GENESIS) {
3137 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3138 skge->rx_csum = 1;
3139 }
3140
3141 /* read the mac address */
3142 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
3143
3144 /* device is off until link detection */
3145 netif_carrier_off(dev);
3146 netif_stop_queue(dev);
3147
3148 return dev;
3149}
3150
3151static void __devinit skge_show_addr(struct net_device *dev)
3152{
3153 const struct skge_port *skge = netdev_priv(dev);
3154
3155 if (netif_msg_probe(skge))
3156 printk(KERN_INFO PFX "%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n",
3157 dev->name,
3158 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
3159 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
3160}
3161
3162static int __devinit skge_probe(struct pci_dev *pdev,
3163 const struct pci_device_id *ent)
3164{
3165 struct net_device *dev, *dev1;
3166 struct skge_hw *hw;
3167 int err, using_dac = 0;
3168
3169 if ((err = pci_enable_device(pdev))) {
3170 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3171 pci_name(pdev));
3172 goto err_out;
3173 }
3174
3175 if ((err = pci_request_regions(pdev, DRV_NAME))) {
3176 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
3177 pci_name(pdev));
3178 goto err_out_disable_pdev;
3179 }
3180
3181 pci_set_master(pdev);
3182
3183 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)))
3184 using_dac = 1;
3185 else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
3186 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3187 pci_name(pdev));
3188 goto err_out_free_regions;
3189 }
3190
3191#ifdef __BIG_ENDIAN
3192 /* byte swap decriptors in hardware */
3193 {
3194 u32 reg;
3195
3196 pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
3197 reg |= PCI_REV_DESC;
3198 pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
3199 }
3200#endif
3201
3202 err = -ENOMEM;
3203 hw = kmalloc(sizeof(*hw), GFP_KERNEL);
3204 if (!hw) {
3205 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
3206 pci_name(pdev));
3207 goto err_out_free_regions;
3208 }
3209
3210 memset(hw, 0, sizeof(*hw));
3211 hw->pdev = pdev;
3212 spin_lock_init(&hw->phy_lock);
3213 tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
3214
3215 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3216 if (!hw->regs) {
3217 printk(KERN_ERR PFX "%s: cannot map device registers\n",
3218 pci_name(pdev));
3219 goto err_out_free_hw;
3220 }
3221
3222 if ((err = request_irq(pdev->irq, skge_intr, SA_SHIRQ, DRV_NAME, hw))) {
3223 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3224 pci_name(pdev), pdev->irq);
3225 goto err_out_iounmap;
3226 }
3227 pci_set_drvdata(pdev, hw);
3228
3229 err = skge_reset(hw);
3230 if (err)
3231 goto err_out_free_irq;
3232
3233 printk(KERN_INFO PFX "addr 0x%lx irq %d chip %s rev %d\n",
3234 pci_resource_start(pdev, 0), pdev->irq,
3235 skge_board_name(hw), chip_rev(hw));
3236
3237 if ((dev = skge_devinit(hw, 0)) == NULL)
3238 goto err_out_led_off;
3239
3240 if (using_dac)
3241 dev->features |= NETIF_F_HIGHDMA;
3242
3243 if ((err = register_netdev(dev))) {
3244 printk(KERN_ERR PFX "%s: cannot register net device\n",
3245 pci_name(pdev));
3246 goto err_out_free_netdev;
3247 }
3248
3249 skge_show_addr(dev);
3250
3251 if (isdualport(hw) && (dev1 = skge_devinit(hw, 1))) {
3252 if (using_dac)
3253 dev1->features |= NETIF_F_HIGHDMA;
3254
3255 if (register_netdev(dev1) == 0)
3256 skge_show_addr(dev1);
3257 else {
3258 /* Failure to register second port need not be fatal */
3259 printk(KERN_WARNING PFX "register of second port failed\n");
3260 hw->dev[1] = NULL;
3261 free_netdev(dev1);
3262 }
3263 }
3264
3265 return 0;
3266
3267err_out_free_netdev:
3268 free_netdev(dev);
3269err_out_led_off:
3270 skge_write16(hw, B0_LED, LED_STAT_OFF);
3271err_out_free_irq:
3272 free_irq(pdev->irq, hw);
3273err_out_iounmap:
3274 iounmap(hw->regs);
3275err_out_free_hw:
3276 kfree(hw);
3277err_out_free_regions:
3278 pci_release_regions(pdev);
3279err_out_disable_pdev:
3280 pci_disable_device(pdev);
3281 pci_set_drvdata(pdev, NULL);
3282err_out:
3283 return err;
3284}
3285
3286static void __devexit skge_remove(struct pci_dev *pdev)
3287{
3288 struct skge_hw *hw = pci_get_drvdata(pdev);
3289 struct net_device *dev0, *dev1;
3290
3291 if(!hw)
3292 return;
3293
3294 if ((dev1 = hw->dev[1]))
3295 unregister_netdev(dev1);
3296 dev0 = hw->dev[0];
3297 unregister_netdev(dev0);
3298
3299 tasklet_kill(&hw->ext_tasklet);
3300
3301 free_irq(pdev->irq, hw);
3302 pci_release_regions(pdev);
3303 pci_disable_device(pdev);
3304 if (dev1)
3305 free_netdev(dev1);
3306 free_netdev(dev0);
3307 skge_write16(hw, B0_LED, LED_STAT_OFF);
3308 iounmap(hw->regs);
3309 kfree(hw);
3310 pci_set_drvdata(pdev, NULL);
3311}
3312
3313#ifdef CONFIG_PM
3314static int skge_suspend(struct pci_dev *pdev, u32 state)
3315{
3316 struct skge_hw *hw = pci_get_drvdata(pdev);
3317 int i, wol = 0;
3318
3319 for(i = 0; i < 2; i++) {
3320 struct net_device *dev = hw->dev[i];
3321
3322 if (dev) {
3323 struct skge_port *skge = netdev_priv(dev);
3324 if (netif_running(dev)) {
3325 netif_carrier_off(dev);
3326 skge_down(dev);
3327 }
3328 netif_device_detach(dev);
3329 wol |= skge->wol;
3330 }
3331 }
3332
3333 pci_save_state(pdev);
3334 pci_enable_wake(pdev, state, wol);
3335 pci_disable_device(pdev);
3336 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3337
3338 return 0;
3339}
3340
3341static int skge_resume(struct pci_dev *pdev)
3342{
3343 struct skge_hw *hw = pci_get_drvdata(pdev);
3344 int i;
3345
3346 pci_set_power_state(pdev, PCI_D0);
3347 pci_restore_state(pdev);
3348 pci_enable_wake(pdev, PCI_D0, 0);
3349
3350 skge_reset(hw);
3351
3352 for(i = 0; i < 2; i++) {
3353 struct net_device *dev = hw->dev[i];
3354 if (dev) {
3355 netif_device_attach(dev);
3356 if(netif_running(dev))
3357 skge_up(dev);
3358 }
3359 }
3360 return 0;
3361}
3362#endif
3363
3364static struct pci_driver skge_driver = {
3365 .name = DRV_NAME,
3366 .id_table = skge_id_table,
3367 .probe = skge_probe,
3368 .remove = __devexit_p(skge_remove),
3369#ifdef CONFIG_PM
3370 .suspend = skge_suspend,
3371 .resume = skge_resume,
3372#endif
3373};
3374
3375static int __init skge_init_module(void)
3376{
3377 return pci_module_init(&skge_driver);
3378}
3379
3380static void __exit skge_cleanup_module(void)
3381{
3382 pci_unregister_driver(&skge_driver);
3383}
3384
3385module_init(skge_init_module);
3386module_exit(skge_cleanup_module);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
new file mode 100644
index 000000000000..36c62b68fab4
--- /dev/null
+++ b/drivers/net/skge.h
@@ -0,0 +1,3005 @@
1/*
2 * Definitions for the new Marvell Yukon / SysKonenct driver.
3 */
4#ifndef _SKGE_H
5#define _SKGE_H
6
7/* PCI config registers */
8#define PCI_DEV_REG1 0x40
9#define PCI_DEV_REG2 0x44
10#ifndef PCI_VPD
11#define PCI_VPD 0x50
12#endif
13
14/* PCI_OUR_REG_2 32 bit Our Register 2 */
15enum {
16 PCI_VPD_WR_THR = 0xff<<24, /* Bit 31..24: VPD Write Threshold */
17 PCI_DEV_SEL = 0x7f<<17, /* Bit 23..17: EEPROM Device Select */
18 PCI_VPD_ROM_SZ = 7 <<14, /* Bit 16..14: VPD ROM Size */
19 /* Bit 13..12: reserved */
20 PCI_EN_DUMMY_RD = 1<<3, /* Enable Dummy Read */
21 PCI_REV_DESC = 1<<2, /* Reverse Desc. Bytes */
22 PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
23};
24
25/* PCI_VPD_ADR_REG 16 bit VPD Address Register */
26enum {
27 PCI_VPD_FLAG = 1<<15, /* starts VPD rd/wr cycle */
28 PCI_VPD_ADR_MSK =0x7fffL, /* Bit 14.. 0: VPD Address Mask */
29 VPD_RES_ID = 0x82,
30 VPD_RES_READ = 0x90,
31 VPD_RES_WRITE = 0x81,
32 VPD_RES_END = 0x78,
33};
34
35
36#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
37 PCI_STATUS_SIG_SYSTEM_ERROR | \
38 PCI_STATUS_REC_MASTER_ABORT | \
39 PCI_STATUS_REC_TARGET_ABORT | \
40 PCI_STATUS_PARITY)
41
42
43enum csr_regs {
44 B0_RAP = 0x0000,
45 B0_CTST = 0x0004,
46 B0_LED = 0x0006,
47 B0_POWER_CTRL = 0x0007,
48 B0_ISRC = 0x0008,
49 B0_IMSK = 0x000c,
50 B0_HWE_ISRC = 0x0010,
51 B0_HWE_IMSK = 0x0014,
52 B0_SP_ISRC = 0x0018,
53 B0_XM1_IMSK = 0x0020,
54 B0_XM1_ISRC = 0x0028,
55 B0_XM1_PHY_ADDR = 0x0030,
56 B0_XM1_PHY_DATA = 0x0034,
57 B0_XM2_IMSK = 0x0040,
58 B0_XM2_ISRC = 0x0048,
59 B0_XM2_PHY_ADDR = 0x0050,
60 B0_XM2_PHY_DATA = 0x0054,
61 B0_R1_CSR = 0x0060,
62 B0_R2_CSR = 0x0064,
63 B0_XS1_CSR = 0x0068,
64 B0_XA1_CSR = 0x006c,
65 B0_XS2_CSR = 0x0070,
66 B0_XA2_CSR = 0x0074,
67
68 B2_MAC_1 = 0x0100,
69 B2_MAC_2 = 0x0108,
70 B2_MAC_3 = 0x0110,
71 B2_CONN_TYP = 0x0118,
72 B2_PMD_TYP = 0x0119,
73 B2_MAC_CFG = 0x011a,
74 B2_CHIP_ID = 0x011b,
75 B2_E_0 = 0x011c,
76 B2_E_1 = 0x011d,
77 B2_E_2 = 0x011e,
78 B2_E_3 = 0x011f,
79 B2_FAR = 0x0120,
80 B2_FDP = 0x0124,
81 B2_LD_CTRL = 0x0128,
82 B2_LD_TEST = 0x0129,
83 B2_TI_INI = 0x0130,
84 B2_TI_VAL = 0x0134,
85 B2_TI_CTRL = 0x0138,
86 B2_TI_TEST = 0x0139,
87 B2_IRQM_INI = 0x0140,
88 B2_IRQM_VAL = 0x0144,
89 B2_IRQM_CTRL = 0x0148,
90 B2_IRQM_TEST = 0x0149,
91 B2_IRQM_MSK = 0x014c,
92 B2_IRQM_HWE_MSK = 0x0150,
93 B2_TST_CTRL1 = 0x0158,
94 B2_TST_CTRL2 = 0x0159,
95 B2_GP_IO = 0x015c,
96 B2_I2C_CTRL = 0x0160,
97 B2_I2C_DATA = 0x0164,
98 B2_I2C_IRQ = 0x0168,
99 B2_I2C_SW = 0x016c,
100 B2_BSC_INI = 0x0170,
101 B2_BSC_VAL = 0x0174,
102 B2_BSC_CTRL = 0x0178,
103 B2_BSC_STAT = 0x0179,
104 B2_BSC_TST = 0x017a,
105
106 B3_RAM_ADDR = 0x0180,
107 B3_RAM_DATA_LO = 0x0184,
108 B3_RAM_DATA_HI = 0x0188,
109 B3_RI_WTO_R1 = 0x0190,
110 B3_RI_WTO_XA1 = 0x0191,
111 B3_RI_WTO_XS1 = 0x0192,
112 B3_RI_RTO_R1 = 0x0193,
113 B3_RI_RTO_XA1 = 0x0194,
114 B3_RI_RTO_XS1 = 0x0195,
115 B3_RI_WTO_R2 = 0x0196,
116 B3_RI_WTO_XA2 = 0x0197,
117 B3_RI_WTO_XS2 = 0x0198,
118 B3_RI_RTO_R2 = 0x0199,
119 B3_RI_RTO_XA2 = 0x019a,
120 B3_RI_RTO_XS2 = 0x019b,
121 B3_RI_TO_VAL = 0x019c,
122 B3_RI_CTRL = 0x01a0,
123 B3_RI_TEST = 0x01a2,
124 B3_MA_TOINI_RX1 = 0x01b0,
125 B3_MA_TOINI_RX2 = 0x01b1,
126 B3_MA_TOINI_TX1 = 0x01b2,
127 B3_MA_TOINI_TX2 = 0x01b3,
128 B3_MA_TOVAL_RX1 = 0x01b4,
129 B3_MA_TOVAL_RX2 = 0x01b5,
130 B3_MA_TOVAL_TX1 = 0x01b6,
131 B3_MA_TOVAL_TX2 = 0x01b7,
132 B3_MA_TO_CTRL = 0x01b8,
133 B3_MA_TO_TEST = 0x01ba,
134 B3_MA_RCINI_RX1 = 0x01c0,
135 B3_MA_RCINI_RX2 = 0x01c1,
136 B3_MA_RCINI_TX1 = 0x01c2,
137 B3_MA_RCINI_TX2 = 0x01c3,
138 B3_MA_RCVAL_RX1 = 0x01c4,
139 B3_MA_RCVAL_RX2 = 0x01c5,
140 B3_MA_RCVAL_TX1 = 0x01c6,
141 B3_MA_RCVAL_TX2 = 0x01c7,
142 B3_MA_RC_CTRL = 0x01c8,
143 B3_MA_RC_TEST = 0x01ca,
144 B3_PA_TOINI_RX1 = 0x01d0,
145 B3_PA_TOINI_RX2 = 0x01d4,
146 B3_PA_TOINI_TX1 = 0x01d8,
147 B3_PA_TOINI_TX2 = 0x01dc,
148 B3_PA_TOVAL_RX1 = 0x01e0,
149 B3_PA_TOVAL_RX2 = 0x01e4,
150 B3_PA_TOVAL_TX1 = 0x01e8,
151 B3_PA_TOVAL_TX2 = 0x01ec,
152 B3_PA_CTRL = 0x01f0,
153 B3_PA_TEST = 0x01f2,
154};
155
156/* B0_CTST 16 bit Control/Status register */
157enum {
158 CS_CLK_RUN_HOT = 1<<13,/* CLK_RUN hot m. (YUKON-Lite only) */
159 CS_CLK_RUN_RST = 1<<12,/* CLK_RUN reset (YUKON-Lite only) */
160 CS_CLK_RUN_ENA = 1<<11,/* CLK_RUN enable (YUKON-Lite only) */
161 CS_VAUX_AVAIL = 1<<10,/* VAUX available (YUKON only) */
162 CS_BUS_CLOCK = 1<<9, /* Bus Clock 0/1 = 33/66 MHz */
163 CS_BUS_SLOT_SZ = 1<<8, /* Slot Size 0/1 = 32/64 bit slot */
164 CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */
165 CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */
166 CS_STOP_DONE = 1<<5, /* Stop Master is finished */
167 CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */
168 CS_MRST_CLR = 1<<3, /* Clear Master reset */
169 CS_MRST_SET = 1<<2, /* Set Master reset */
170 CS_RST_CLR = 1<<1, /* Clear Software reset */
171 CS_RST_SET = 1, /* Set Software reset */
172
173/* B0_LED 8 Bit LED register */
174/* Bit 7.. 2: reserved */
175 LED_STAT_ON = 1<<1, /* Status LED on */
176 LED_STAT_OFF = 1, /* Status LED off */
177
178/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */
179 PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */
180 PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */
181 PC_VCC_ENA = 1<<5, /* Switch VCC Enable */
182 PC_VCC_DIS = 1<<4, /* Switch VCC Disable */
183 PC_VAUX_ON = 1<<3, /* Switch VAUX On */
184 PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */
185 PC_VCC_ON = 1<<1, /* Switch VCC On */
186 PC_VCC_OFF = 1<<0, /* Switch VCC Off */
187};
188
189/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */
190enum {
191 IS_ALL_MSK = 0xbffffffful, /* All Interrupt bits */
192 IS_HW_ERR = 1<<31, /* Interrupt HW Error */
193 /* Bit 30: reserved */
194 IS_PA_TO_RX1 = 1<<29, /* Packet Arb Timeout Rx1 */
195 IS_PA_TO_RX2 = 1<<28, /* Packet Arb Timeout Rx2 */
196 IS_PA_TO_TX1 = 1<<27, /* Packet Arb Timeout Tx1 */
197 IS_PA_TO_TX2 = 1<<26, /* Packet Arb Timeout Tx2 */
198 IS_I2C_READY = 1<<25, /* IRQ on end of I2C Tx */
199 IS_IRQ_SW = 1<<24, /* SW forced IRQ */
200 IS_EXT_REG = 1<<23, /* IRQ from LM80 or PHY (GENESIS only) */
201 /* IRQ from PHY (YUKON only) */
202 IS_TIMINT = 1<<22, /* IRQ from Timer */
203 IS_MAC1 = 1<<21, /* IRQ from MAC 1 */
204 IS_LNK_SYNC_M1 = 1<<20, /* Link Sync Cnt wrap MAC 1 */
205 IS_MAC2 = 1<<19, /* IRQ from MAC 2 */
206 IS_LNK_SYNC_M2 = 1<<18, /* Link Sync Cnt wrap MAC 2 */
207/* Receive Queue 1 */
208 IS_R1_B = 1<<17, /* Q_R1 End of Buffer */
209 IS_R1_F = 1<<16, /* Q_R1 End of Frame */
210 IS_R1_C = 1<<15, /* Q_R1 Encoding Error */
211/* Receive Queue 2 */
212 IS_R2_B = 1<<14, /* Q_R2 End of Buffer */
213 IS_R2_F = 1<<13, /* Q_R2 End of Frame */
214 IS_R2_C = 1<<12, /* Q_R2 Encoding Error */
215/* Synchronous Transmit Queue 1 */
216 IS_XS1_B = 1<<11, /* Q_XS1 End of Buffer */
217 IS_XS1_F = 1<<10, /* Q_XS1 End of Frame */
218 IS_XS1_C = 1<<9, /* Q_XS1 Encoding Error */
219/* Asynchronous Transmit Queue 1 */
220 IS_XA1_B = 1<<8, /* Q_XA1 End of Buffer */
221 IS_XA1_F = 1<<7, /* Q_XA1 End of Frame */
222 IS_XA1_C = 1<<6, /* Q_XA1 Encoding Error */
223/* Synchronous Transmit Queue 2 */
224 IS_XS2_B = 1<<5, /* Q_XS2 End of Buffer */
225 IS_XS2_F = 1<<4, /* Q_XS2 End of Frame */
226 IS_XS2_C = 1<<3, /* Q_XS2 Encoding Error */
227/* Asynchronous Transmit Queue 2 */
228 IS_XA2_B = 1<<2, /* Q_XA2 End of Buffer */
229 IS_XA2_F = 1<<1, /* Q_XA2 End of Frame */
230 IS_XA2_C = 1<<0, /* Q_XA2 Encoding Error */
231
232 IS_PORT_1 = IS_XA1_F| IS_R1_F| IS_MAC1,
233 IS_PORT_2 = IS_XA2_F| IS_R2_F| IS_MAC2,
234};
235
236
237/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
238enum {
239 IS_ERR_MSK = 0x00003fff,/* All Error bits */
240
241 IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
242 IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */
243 IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */
244 IS_IRQ_STAT = 1<<10, /* IRQ status exception */
245 IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */
246 IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */
247 IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */
248 IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */
249 IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */
250 IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */
251 IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */
252 IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */
253 IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */
254 IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */
255};
256
257/* B2_TST_CTRL1 8 bit Test Control Register 1 */
258enum {
259 TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */
260 TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */
261 TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */
262 TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */
263 TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */
264 TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */
265 TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */
266 TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */
267};
268
269/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */
270enum {
271 CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */
272 /* Bit 3.. 2: reserved */
273 CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */
274 CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/
275};
276
277/* B2_CHIP_ID 8 bit Chip Identification Number */
278enum {
279 CHIP_ID_GENESIS = 0x0a, /* Chip ID for GENESIS */
280 CHIP_ID_YUKON = 0xb0, /* Chip ID for YUKON */
281 CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */
282 CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */
283 CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */
284 CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */
285 CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */
286
287 CHIP_REV_YU_LITE_A1 = 3, /* Chip Rev. for YUKON-Lite A1,A2 */
288 CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */
289};
290
291/* B2_LD_TEST 8 bit EPROM loader test register */
292enum {
293 LD_T_ON = 1<<3, /* Loader Test mode on */
294 LD_T_OFF = 1<<2, /* Loader Test mode off */
295 LD_T_STEP = 1<<1, /* Decrement FPROM addr. Counter */
296 LD_START = 1<<0, /* Start loading FPROM */
297};
298
299/* B2_TI_CTRL 8 bit Timer control */
300/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */
301enum {
302 TIM_START = 1<<2, /* Start Timer */
303 TIM_STOP = 1<<1, /* Stop Timer */
304 TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */
305};
306
307/* B2_TI_TEST 8 Bit Timer Test */
308/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */
309/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */
310enum {
311 TIM_T_ON = 1<<2, /* Test mode on */
312 TIM_T_OFF = 1<<1, /* Test mode off */
313 TIM_T_STEP = 1<<0, /* Test step */
314};
315
316/* B28_DPT_INI 32 bit Descriptor Poll Timer Init Val */
317/* B28_DPT_VAL 32 bit Descriptor Poll Timer Curr Val */
318/* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */
319enum {
320 DPT_MSK = 0x00ffffffL, /* Bit 23.. 0: Desc Poll Timer Bits */
321
322 DPT_START = 1<<1, /* Start Descriptor Poll Timer */
323 DPT_STOP = 1<<0, /* Stop Descriptor Poll Timer */
324};
325
326/* B2_GP_IO 32 bit General Purpose I/O Register */
327enum {
328 GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */
329 GP_DIR_8 = 1<<24, /* IO_8 direct, 0=In/1=Out */
330 GP_DIR_7 = 1<<23, /* IO_7 direct, 0=In/1=Out */
331 GP_DIR_6 = 1<<22, /* IO_6 direct, 0=In/1=Out */
332 GP_DIR_5 = 1<<21, /* IO_5 direct, 0=In/1=Out */
333 GP_DIR_4 = 1<<20, /* IO_4 direct, 0=In/1=Out */
334 GP_DIR_3 = 1<<19, /* IO_3 direct, 0=In/1=Out */
335 GP_DIR_2 = 1<<18, /* IO_2 direct, 0=In/1=Out */
336 GP_DIR_1 = 1<<17, /* IO_1 direct, 0=In/1=Out */
337 GP_DIR_0 = 1<<16, /* IO_0 direct, 0=In/1=Out */
338
339 GP_IO_9 = 1<<9, /* IO_9 pin */
340 GP_IO_8 = 1<<8, /* IO_8 pin */
341 GP_IO_7 = 1<<7, /* IO_7 pin */
342 GP_IO_6 = 1<<6, /* IO_6 pin */
343 GP_IO_5 = 1<<5, /* IO_5 pin */
344 GP_IO_4 = 1<<4, /* IO_4 pin */
345 GP_IO_3 = 1<<3, /* IO_3 pin */
346 GP_IO_2 = 1<<2, /* IO_2 pin */
347 GP_IO_1 = 1<<1, /* IO_1 pin */
348 GP_IO_0 = 1<<0, /* IO_0 pin */
349};
350
351/* Rx/Tx Path related Arbiter Test Registers */
352/* B3_MA_TO_TEST 16 bit MAC Arbiter Timeout Test Reg */
353/* B3_MA_RC_TEST 16 bit MAC Arbiter Recovery Test Reg */
354/* B3_PA_TEST 16 bit Packet Arbiter Test Register */
355/* Bit 15, 11, 7, and 3 are reserved in B3_PA_TEST */
356enum {
357 TX2_T_EV = 1<<15,/* TX2 Timeout/Recv Event occured */
358 TX2_T_ON = 1<<14,/* TX2 Timeout/Recv Timer Test On */
359 TX2_T_OFF = 1<<13,/* TX2 Timeout/Recv Timer Tst Off */
360 TX2_T_STEP = 1<<12,/* TX2 Timeout/Recv Timer Step */
361 TX1_T_EV = 1<<11,/* TX1 Timeout/Recv Event occured */
362 TX1_T_ON = 1<<10,/* TX1 Timeout/Recv Timer Test On */
363 TX1_T_OFF = 1<<9, /* TX1 Timeout/Recv Timer Tst Off */
364 TX1_T_STEP = 1<<8, /* TX1 Timeout/Recv Timer Step */
365 RX2_T_EV = 1<<7, /* RX2 Timeout/Recv Event occured */
366 RX2_T_ON = 1<<6, /* RX2 Timeout/Recv Timer Test On */
367 RX2_T_OFF = 1<<5, /* RX2 Timeout/Recv Timer Tst Off */
368 RX2_T_STEP = 1<<4, /* RX2 Timeout/Recv Timer Step */
369 RX1_T_EV = 1<<3, /* RX1 Timeout/Recv Event occured */
370 RX1_T_ON = 1<<2, /* RX1 Timeout/Recv Timer Test On */
371 RX1_T_OFF = 1<<1, /* RX1 Timeout/Recv Timer Tst Off */
372 RX1_T_STEP = 1<<0, /* RX1 Timeout/Recv Timer Step */
373};
374
375/* Descriptor Bit Definition */
376/* TxCtrl Transmit Buffer Control Field */
377/* RxCtrl Receive Buffer Control Field */
378enum {
379 BMU_OWN = 1<<31, /* OWN bit: 0=host/1=BMU */
380 BMU_STF = 1<<30, /* Start of Frame */
381 BMU_EOF = 1<<29, /* End of Frame */
382 BMU_IRQ_EOB = 1<<28, /* Req "End of Buffer" IRQ */
383 BMU_IRQ_EOF = 1<<27, /* Req "End of Frame" IRQ */
384 /* TxCtrl specific bits */
385 BMU_STFWD = 1<<26, /* (Tx) Store & Forward Frame */
386 BMU_NO_FCS = 1<<25, /* (Tx) Disable MAC FCS (CRC) generation */
387 BMU_SW = 1<<24, /* (Tx) 1 bit res. for SW use */
388 /* RxCtrl specific bits */
389 BMU_DEV_0 = 1<<26, /* (Rx) Transfer data to Dev0 */
390 BMU_STAT_VAL = 1<<25, /* (Rx) Rx Status Valid */
391 BMU_TIST_VAL = 1<<24, /* (Rx) Rx TimeStamp Valid */
392 /* Bit 23..16: BMU Check Opcodes */
393 BMU_CHECK = 0x55<<16, /* Default BMU check */
394 BMU_TCP_CHECK = 0x56<<16, /* Descr with TCP ext */
395 BMU_UDP_CHECK = 0x57<<16, /* Descr with UDP ext (YUKON only) */
396 BMU_BBC = 0xffffL, /* Bit 15.. 0: Buffer Byte Counter */
397};
398
399/* B2_BSC_CTRL 8 bit Blink Source Counter Control */
400enum {
401 BSC_START = 1<<1, /* Start Blink Source Counter */
402 BSC_STOP = 1<<0, /* Stop Blink Source Counter */
403};
404
405/* B2_BSC_STAT 8 bit Blink Source Counter Status */
406enum {
407 BSC_SRC = 1<<0, /* Blink Source, 0=Off / 1=On */
408};
409
410/* B2_BSC_TST 16 bit Blink Source Counter Test Reg */
411enum {
412 BSC_T_ON = 1<<2, /* Test mode on */
413 BSC_T_OFF = 1<<1, /* Test mode off */
414 BSC_T_STEP = 1<<0, /* Test step */
415};
416
417/* B3_RAM_ADDR 32 bit RAM Address, to read or write */
418 /* Bit 31..19: reserved */
419#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */
420/* RAM Interface Registers */
421
422/* B3_RI_CTRL 16 bit RAM Iface Control Register */
423enum {
424 RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */
425 RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/
426
427 RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */
428 RI_RST_SET = 1<<0, /* Set RAM Interface Reset */
429};
430
431/* B3_RI_TEST 8 bit RAM Iface Test Register */
432enum {
433 RI_T_EV = 1<<3, /* Timeout Event occured */
434 RI_T_ON = 1<<2, /* Timeout Timer Test On */
435 RI_T_OFF = 1<<1, /* Timeout Timer Test Off */
436 RI_T_STEP = 1<<0, /* Timeout Timer Step */
437};
438
439/* MAC Arbiter Registers */
440/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */
441enum {
442 MA_FOE_ON = 1<<3, /* XMAC Fast Output Enable ON */
443 MA_FOE_OFF = 1<<2, /* XMAC Fast Output Enable OFF */
444 MA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */
445 MA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */
446
447};
448
449/* Timeout values */
450#define SK_MAC_TO_53 72 /* MAC arbiter timeout */
451#define SK_PKT_TO_53 0x2000 /* Packet arbiter timeout */
452#define SK_PKT_TO_MAX 0xffff /* Maximum value */
453#define SK_RI_TO_53 36 /* RAM interface timeout */
454
455
456/* B3_MA_RC_CTRL 16 bit MAC Arbiter Recovery Ctrl Reg */
457enum {
458 MA_ENA_REC_TX2 = 1<<7, /* Enable Recovery Timer TX2 */
459 MA_DIS_REC_TX2 = 1<<6, /* Disable Recovery Timer TX2 */
460 MA_ENA_REC_TX1 = 1<<5, /* Enable Recovery Timer TX1 */
461 MA_DIS_REC_TX1 = 1<<4, /* Disable Recovery Timer TX1 */
462 MA_ENA_REC_RX2 = 1<<3, /* Enable Recovery Timer RX2 */
463 MA_DIS_REC_RX2 = 1<<2, /* Disable Recovery Timer RX2 */
464 MA_ENA_REC_RX1 = 1<<1, /* Enable Recovery Timer RX1 */
465 MA_DIS_REC_RX1 = 1<<0, /* Disable Recovery Timer RX1 */
466};
467
468/* Packet Arbiter Registers */
469/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */
470enum {
471 PA_CLR_TO_TX2 = 1<<13, /* Clear IRQ Packet Timeout TX2 */
472 PA_CLR_TO_TX1 = 1<<12, /* Clear IRQ Packet Timeout TX1 */
473 PA_CLR_TO_RX2 = 1<<11, /* Clear IRQ Packet Timeout RX2 */
474 PA_CLR_TO_RX1 = 1<<10, /* Clear IRQ Packet Timeout RX1 */
475 PA_ENA_TO_TX2 = 1<<9, /* Enable Timeout Timer TX2 */
476 PA_DIS_TO_TX2 = 1<<8, /* Disable Timeout Timer TX2 */
477 PA_ENA_TO_TX1 = 1<<7, /* Enable Timeout Timer TX1 */
478 PA_DIS_TO_TX1 = 1<<6, /* Disable Timeout Timer TX1 */
479 PA_ENA_TO_RX2 = 1<<5, /* Enable Timeout Timer RX2 */
480 PA_DIS_TO_RX2 = 1<<4, /* Disable Timeout Timer RX2 */
481 PA_ENA_TO_RX1 = 1<<3, /* Enable Timeout Timer RX1 */
482 PA_DIS_TO_RX1 = 1<<2, /* Disable Timeout Timer RX1 */
483 PA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */
484 PA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */
485};
486
487#define PA_ENA_TO_ALL (PA_ENA_TO_RX1 | PA_ENA_TO_RX2 |\
488 PA_ENA_TO_TX1 | PA_ENA_TO_TX2)
489
490
491/* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */
492/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */
493/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */
494/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */
495/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */
496
497#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */
498
499/* TXA_CTRL 8 bit Tx Arbiter Control Register */
500enum {
501 TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */
502 TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */
503 TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */
504 TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */
505 TXA_START_RC = 1<<3, /* Start sync Rate Control */
506 TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */
507 TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */
508 TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */
509};
510
511/*
512 * Bank 4 - 5
513 */
514/* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */
515enum {
516 TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/
517 TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */
518 TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */
519 TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */
520 TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */
521 TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */
522 TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */
523};
524
525
526enum {
527 B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */
528 B7_CFG_SPC = 0x0380,/* copy of the Configuration register */
529 B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */
530 B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */
531 B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */
532 B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */
533 B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */
534 B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */
535 B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */
536};
537
538/* Queue Register Offsets, use Q_ADDR() to access */
539enum {
540 B8_Q_REGS = 0x0400, /* base of Queue registers */
541 Q_D = 0x00, /* 8*32 bit Current Descriptor */
542 Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */
543 Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */
544 Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */
545 Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */
546 Q_BC = 0x30, /* 32 bit Current Byte Counter */
547 Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */
548 Q_F = 0x38, /* 32 bit Flag Register */
549 Q_T1 = 0x3c, /* 32 bit Test Register 1 */
550 Q_T1_TR = 0x3c, /* 8 bit Test Register 1 Transfer SM */
551 Q_T1_WR = 0x3d, /* 8 bit Test Register 1 Write Descriptor SM */
552 Q_T1_RD = 0x3e, /* 8 bit Test Register 1 Read Descriptor SM */
553 Q_T1_SV = 0x3f, /* 8 bit Test Register 1 Supervisor SM */
554 Q_T2 = 0x40, /* 32 bit Test Register 2 */
555 Q_T3 = 0x44, /* 32 bit Test Register 3 */
556
557/* Yukon-2 */
558 Q_DONE = 0x24, /* 16 bit Done Index (Yukon-2 only) */
559 Q_WM = 0x40, /* 16 bit FIFO Watermark */
560 Q_AL = 0x42, /* 8 bit FIFO Alignment */
561 Q_RSP = 0x44, /* 16 bit FIFO Read Shadow Pointer */
562 Q_RSL = 0x46, /* 8 bit FIFO Read Shadow Level */
563 Q_RP = 0x48, /* 8 bit FIFO Read Pointer */
564 Q_RL = 0x4a, /* 8 bit FIFO Read Level */
565 Q_WP = 0x4c, /* 8 bit FIFO Write Pointer */
566 Q_WSP = 0x4d, /* 8 bit FIFO Write Shadow Pointer */
567 Q_WL = 0x4e, /* 8 bit FIFO Write Level */
568 Q_WSL = 0x4f, /* 8 bit FIFO Write Shadow Level */
569};
570#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
571
572/* RAM Buffer Register Offsets */
573enum {
574
575 RB_START = 0x00,/* 32 bit RAM Buffer Start Address */
576 RB_END = 0x04,/* 32 bit RAM Buffer End Address */
577 RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */
578 RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */
579 RB_RX_UTPP = 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */
580 RB_RX_LTPP = 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */
581 RB_RX_UTHP = 0x18,/* 32 bit Rx Upper Threshold, High Prio */
582 RB_RX_LTHP = 0x1c,/* 32 bit Rx Lower Threshold, High Prio */
583 /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */
584 RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */
585 RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */
586 RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */
587 RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */
588 RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */
589};
590
591/* Receive and Transmit Queues */
592enum {
593 Q_R1 = 0x0000, /* Receive Queue 1 */
594 Q_R2 = 0x0080, /* Receive Queue 2 */
595 Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */
596 Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */
597 Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */
598 Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */
599};
600
601/* Different MAC Types */
602enum {
603 SK_MAC_XMAC = 0, /* Xaqti XMAC II */
604 SK_MAC_GMAC = 1, /* Marvell GMAC */
605};
606
607/* Different PHY Types */
608enum {
609 SK_PHY_XMAC = 0,/* integrated in XMAC II */
610 SK_PHY_BCOM = 1,/* Broadcom BCM5400 */
611 SK_PHY_LONE = 2,/* Level One LXT1000 [not supported]*/
612 SK_PHY_NAT = 3,/* National DP83891 [not supported] */
613 SK_PHY_MARV_COPPER= 4,/* Marvell 88E1011S */
614 SK_PHY_MARV_FIBER = 5,/* Marvell 88E1011S working on fiber */
615};
616
617/* PHY addresses (bits 12..8 of PHY address reg) */
618enum {
619 PHY_ADDR_XMAC = 0<<8,
620 PHY_ADDR_BCOM = 1<<8,
621 PHY_ADDR_LONE = 3<<8,
622 PHY_ADDR_NAT = 0<<8,
623/* GPHY address (bits 15..11 of SMI control reg) */
624 PHY_ADDR_MARV = 0,
625};
626
627#define RB_ADDR(offs, queue) (B16_RAM_REGS + (queue) + (offs))
628
629/* Receive MAC FIFO, Receive LED, and Link_Sync regs (GENESIS only) */
630enum {
631 RX_MFF_EA = 0x0c00,/* 32 bit Receive MAC FIFO End Address */
632 RX_MFF_WP = 0x0c04,/* 32 bit Receive MAC FIFO Write Pointer */
633
634 RX_MFF_RP = 0x0c0c,/* 32 bit Receive MAC FIFO Read Pointer */
635 RX_MFF_PC = 0x0c10,/* 32 bit Receive MAC FIFO Packet Cnt */
636 RX_MFF_LEV = 0x0c14,/* 32 bit Receive MAC FIFO Level */
637 RX_MFF_CTRL1 = 0x0c18,/* 16 bit Receive MAC FIFO Control Reg 1*/
638 RX_MFF_STAT_TO = 0x0c1a,/* 8 bit Receive MAC Status Timeout */
639 RX_MFF_TIST_TO = 0x0c1b,/* 8 bit Receive MAC Time Stamp Timeout */
640 RX_MFF_CTRL2 = 0x0c1c,/* 8 bit Receive MAC FIFO Control Reg 2*/
641 RX_MFF_TST1 = 0x0c1d,/* 8 bit Receive MAC FIFO Test Reg 1 */
642 RX_MFF_TST2 = 0x0c1e,/* 8 bit Receive MAC FIFO Test Reg 2 */
643
644 RX_LED_INI = 0x0c20,/* 32 bit Receive LED Cnt Init Value */
645 RX_LED_VAL = 0x0c24,/* 32 bit Receive LED Cnt Current Value */
646 RX_LED_CTRL = 0x0c28,/* 8 bit Receive LED Cnt Control Reg */
647 RX_LED_TST = 0x0c29,/* 8 bit Receive LED Cnt Test Register */
648
649 LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */
650 LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */
651 LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */
652 LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */
653 LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */
654};
655
656/* Receive and Transmit MAC FIFO Registers (GENESIS only) */
657/* RX_MFF_CTRL1 16 bit Receive MAC FIFO Control Reg 1 */
658enum {
659 MFF_ENA_RDY_PAT = 1<<13, /* Enable Ready Patch */
660 MFF_DIS_RDY_PAT = 1<<12, /* Disable Ready Patch */
661 MFF_ENA_TIM_PAT = 1<<11, /* Enable Timing Patch */
662 MFF_DIS_TIM_PAT = 1<<10, /* Disable Timing Patch */
663 MFF_ENA_ALM_FUL = 1<<9, /* Enable AlmostFull Sign */
664 MFF_DIS_ALM_FUL = 1<<8, /* Disable AlmostFull Sign */
665 MFF_ENA_PAUSE = 1<<7, /* Enable Pause Signaling */
666 MFF_DIS_PAUSE = 1<<6, /* Disable Pause Signaling */
667 MFF_ENA_FLUSH = 1<<5, /* Enable Frame Flushing */
668 MFF_DIS_FLUSH = 1<<4, /* Disable Frame Flushing */
669 MFF_ENA_TIST = 1<<3, /* Enable Time Stamp Gener */
670 MFF_DIS_TIST = 1<<2, /* Disable Time Stamp Gener */
671 MFF_CLR_INTIST = 1<<1, /* Clear IRQ No Time Stamp */
672 MFF_CLR_INSTAT = 1<<0, /* Clear IRQ No Status */
673#define MFF_RX_CTRL_DEF MFF_ENA_TIM_PAT
674};
675
676/* TX_MFF_CTRL1 16 bit Transmit MAC FIFO Control Reg 1 */
677enum {
678 MFF_CLR_PERR = 1<<15, /* Clear Parity Error IRQ */
679 /* Bit 14: reserved */
680 MFF_ENA_PKT_REC = 1<<13, /* Enable Packet Recovery */
681 MFF_DIS_PKT_REC = 1<<12, /* Disable Packet Recovery */
682
683 MFF_ENA_W4E = 1<<7, /* Enable Wait for Empty */
684 MFF_DIS_W4E = 1<<6, /* Disable Wait for Empty */
685
686 MFF_ENA_LOOPB = 1<<3, /* Enable Loopback */
687 MFF_DIS_LOOPB = 1<<2, /* Disable Loopback */
688 MFF_CLR_MAC_RST = 1<<1, /* Clear XMAC Reset */
689 MFF_SET_MAC_RST = 1<<0, /* Set XMAC Reset */
690};
691
692#define MFF_TX_CTRL_DEF (MFF_ENA_PKT_REC | MFF_ENA_TIM_PAT | MFF_ENA_FLUSH)
693
694/* RX_MFF_TST2 8 bit Receive MAC FIFO Test Register 2 */
695/* TX_MFF_TST2 8 bit Transmit MAC FIFO Test Register 2 */
696enum {
697 MFF_WSP_T_ON = 1<<6, /* Tx: Write Shadow Ptr TestOn */
698 MFF_WSP_T_OFF = 1<<5, /* Tx: Write Shadow Ptr TstOff */
699 MFF_WSP_INC = 1<<4, /* Tx: Write Shadow Ptr Increment */
700 MFF_PC_DEC = 1<<3, /* Packet Counter Decrement */
701 MFF_PC_T_ON = 1<<2, /* Packet Counter Test On */
702 MFF_PC_T_OFF = 1<<1, /* Packet Counter Test Off */
703 MFF_PC_INC = 1<<0, /* Packet Counter Increment */
704};
705
706/* RX_MFF_TST1 8 bit Receive MAC FIFO Test Register 1 */
707/* TX_MFF_TST1 8 bit Transmit MAC FIFO Test Register 1 */
708enum {
709 MFF_WP_T_ON = 1<<6, /* Write Pointer Test On */
710 MFF_WP_T_OFF = 1<<5, /* Write Pointer Test Off */
711 MFF_WP_INC = 1<<4, /* Write Pointer Increm */
712
713 MFF_RP_T_ON = 1<<2, /* Read Pointer Test On */
714 MFF_RP_T_OFF = 1<<1, /* Read Pointer Test Off */
715 MFF_RP_DEC = 1<<0, /* Read Pointer Decrement */
716};
717
718/* RX_MFF_CTRL2 8 bit Receive MAC FIFO Control Reg 2 */
719/* TX_MFF_CTRL2 8 bit Transmit MAC FIFO Control Reg 2 */
720enum {
721 MFF_ENA_OP_MD = 1<<3, /* Enable Operation Mode */
722 MFF_DIS_OP_MD = 1<<2, /* Disable Operation Mode */
723 MFF_RST_CLR = 1<<1, /* Clear MAC FIFO Reset */
724 MFF_RST_SET = 1<<0, /* Set MAC FIFO Reset */
725};
726
727
728/* Link LED Counter Registers (GENESIS only) */
729
730/* RX_LED_CTRL 8 bit Receive LED Cnt Control Reg */
731/* TX_LED_CTRL 8 bit Transmit LED Cnt Control Reg */
732/* LNK_SYNC_CTRL 8 bit Link Sync Cnt Control Register */
733enum {
734 LED_START = 1<<2, /* Start Timer */
735 LED_STOP = 1<<1, /* Stop Timer */
736 LED_STATE = 1<<0, /* Rx/Tx: LED State, 1=LED on */
737};
738
739/* RX_LED_TST 8 bit Receive LED Cnt Test Register */
740/* TX_LED_TST 8 bit Transmit LED Cnt Test Register */
741/* LNK_SYNC_TST 8 bit Link Sync Cnt Test Register */
742enum {
743 LED_T_ON = 1<<2, /* LED Counter Test mode On */
744 LED_T_OFF = 1<<1, /* LED Counter Test mode Off */
745 LED_T_STEP = 1<<0, /* LED Counter Step */
746};
747
748/* LNK_LED_REG 8 bit Link LED Register */
749enum {
750 LED_BLK_ON = 1<<5, /* Link LED Blinking On */
751 LED_BLK_OFF = 1<<4, /* Link LED Blinking Off */
752 LED_SYNC_ON = 1<<3, /* Use Sync Wire to switch LED */
753 LED_SYNC_OFF = 1<<2, /* Disable Sync Wire Input */
754 LED_ON = 1<<1, /* switch LED on */
755 LED_OFF = 1<<0, /* switch LED off */
756};
757
758/* Receive GMAC FIFO (YUKON and Yukon-2) */
759enum {
760 RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */
761 RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */
762 RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */
763 RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */
764 RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */
765 RX_GMF_TR_THR = 0x0c54,/* 32 bit Rx Truncation Threshold (Yukon-2) */
766
767 RX_GMF_VLAN = 0x0c5c,/* 32 bit Rx VLAN Type Register (Yukon-2) */
768 RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */
769
770 RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */
771
772 RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */
773
774 RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */
775};
776
777
778/* TXA_TEST 8 bit Tx Arbiter Test Register */
779enum {
780 TXA_INT_T_ON = 1<<5, /* Tx Arb Interval Timer Test On */
781 TXA_INT_T_OFF = 1<<4, /* Tx Arb Interval Timer Test Off */
782 TXA_INT_T_STEP = 1<<3, /* Tx Arb Interval Timer Step */
783 TXA_LIM_T_ON = 1<<2, /* Tx Arb Limit Timer Test On */
784 TXA_LIM_T_OFF = 1<<1, /* Tx Arb Limit Timer Test Off */
785 TXA_LIM_T_STEP = 1<<0, /* Tx Arb Limit Timer Step */
786};
787
788/* TXA_STAT 8 bit Tx Arbiter Status Register */
789enum {
790 TXA_PRIO_XS = 1<<0, /* sync queue has prio to send */
791};
792
793
794/* Q_BC 32 bit Current Byte Counter */
795
796/* BMU Control Status Registers */
797/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */
798/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */
799/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */
800/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */
801/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */
802/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */
803/* Q_CSR 32 bit BMU Control/Status Register */
804
805enum {
806 CSR_SV_IDLE = 1<<24, /* BMU SM Idle */
807
808 CSR_DESC_CLR = 1<<21, /* Clear Reset for Descr */
809 CSR_DESC_SET = 1<<20, /* Set Reset for Descr */
810 CSR_FIFO_CLR = 1<<19, /* Clear Reset for FIFO */
811 CSR_FIFO_SET = 1<<18, /* Set Reset for FIFO */
812 CSR_HPI_RUN = 1<<17, /* Release HPI SM */
813 CSR_HPI_RST = 1<<16, /* Reset HPI SM to Idle */
814 CSR_SV_RUN = 1<<15, /* Release Supervisor SM */
815 CSR_SV_RST = 1<<14, /* Reset Supervisor SM */
816 CSR_DREAD_RUN = 1<<13, /* Release Descr Read SM */
817 CSR_DREAD_RST = 1<<12, /* Reset Descr Read SM */
818 CSR_DWRITE_RUN = 1<<11, /* Release Descr Write SM */
819 CSR_DWRITE_RST = 1<<10, /* Reset Descr Write SM */
820 CSR_TRANS_RUN = 1<<9, /* Release Transfer SM */
821 CSR_TRANS_RST = 1<<8, /* Reset Transfer SM */
822 CSR_ENA_POL = 1<<7, /* Enable Descr Polling */
823 CSR_DIS_POL = 1<<6, /* Disable Descr Polling */
824 CSR_STOP = 1<<5, /* Stop Rx/Tx Queue */
825 CSR_START = 1<<4, /* Start Rx/Tx Queue */
826 CSR_IRQ_CL_P = 1<<3, /* (Rx) Clear Parity IRQ */
827 CSR_IRQ_CL_B = 1<<2, /* Clear EOB IRQ */
828 CSR_IRQ_CL_F = 1<<1, /* Clear EOF IRQ */
829 CSR_IRQ_CL_C = 1<<0, /* Clear ERR IRQ */
830};
831
832#define CSR_SET_RESET (CSR_DESC_SET | CSR_FIFO_SET | CSR_HPI_RST |\
833 CSR_SV_RST | CSR_DREAD_RST | CSR_DWRITE_RST |\
834 CSR_TRANS_RST)
835#define CSR_CLR_RESET (CSR_DESC_CLR | CSR_FIFO_CLR | CSR_HPI_RUN |\
836 CSR_SV_RUN | CSR_DREAD_RUN | CSR_DWRITE_RUN |\
837 CSR_TRANS_RUN)
838
839/* Q_F 32 bit Flag Register */
840enum {
841 F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */
842 F_EMPTY = 1<<27, /* Tx FIFO: empty flag */
843 F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */
844 F_WM_REACHED = 1<<25, /* Watermark reached */
845
846 F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */
847 F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */
848};
849
850/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */
851/* RB_START 32 bit RAM Buffer Start Address */
852/* RB_END 32 bit RAM Buffer End Address */
853/* RB_WP 32 bit RAM Buffer Write Pointer */
854/* RB_RP 32 bit RAM Buffer Read Pointer */
855/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */
856/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */
857/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */
858/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */
859/* RB_PC 32 bit RAM Buffer Packet Counter */
860/* RB_LEV 32 bit RAM Buffer Level Register */
861
862#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */
863/* RB_TST2 8 bit RAM Buffer Test Register 2 */
864/* RB_TST1 8 bit RAM Buffer Test Register 1 */
865
866/* RB_CTRL 8 bit RAM Buffer Control Register */
867enum {
868 RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */
869 RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */
870 RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */
871 RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */
872 RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */
873 RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */
874};
875
876/* Transmit MAC FIFO and Transmit LED Registers (GENESIS only), */
877enum {
878 TX_MFF_EA = 0x0d00,/* 32 bit Transmit MAC FIFO End Address */
879 TX_MFF_WP = 0x0d04,/* 32 bit Transmit MAC FIFO WR Pointer */
880 TX_MFF_WSP = 0x0d08,/* 32 bit Transmit MAC FIFO WR Shadow Ptr */
881 TX_MFF_RP = 0x0d0c,/* 32 bit Transmit MAC FIFO RD Pointer */
882 TX_MFF_PC = 0x0d10,/* 32 bit Transmit MAC FIFO Packet Cnt */
883 TX_MFF_LEV = 0x0d14,/* 32 bit Transmit MAC FIFO Level */
884 TX_MFF_CTRL1 = 0x0d18,/* 16 bit Transmit MAC FIFO Ctrl Reg 1 */
885 TX_MFF_WAF = 0x0d1a,/* 8 bit Transmit MAC Wait after flush */
886
887 TX_MFF_CTRL2 = 0x0d1c,/* 8 bit Transmit MAC FIFO Ctrl Reg 2 */
888 TX_MFF_TST1 = 0x0d1d,/* 8 bit Transmit MAC FIFO Test Reg 1 */
889 TX_MFF_TST2 = 0x0d1e,/* 8 bit Transmit MAC FIFO Test Reg 2 */
890
891 TX_LED_INI = 0x0d20,/* 32 bit Transmit LED Cnt Init Value */
892 TX_LED_VAL = 0x0d24,/* 32 bit Transmit LED Cnt Current Val */
893 TX_LED_CTRL = 0x0d28,/* 8 bit Transmit LED Cnt Control Reg */
894 TX_LED_TST = 0x0d29,/* 8 bit Transmit LED Cnt Test Reg */
895};
896
897/* Counter and Timer constants, for a host clock of 62.5 MHz */
898#define SK_XMIT_DUR 0x002faf08UL /* 50 ms */
899#define SK_BLK_DUR 0x01dcd650UL /* 500 ms */
900
901#define SK_DPOLL_DEF 0x00ee6b28UL /* 250 ms at 62.5 MHz */
902
903#define SK_DPOLL_MAX 0x00ffffffUL /* 268 ms at 62.5 MHz */
904 /* 215 ms at 78.12 MHz */
905
906#define SK_FACT_62 100 /* is given in percent */
907#define SK_FACT_53 85 /* on GENESIS: 53.12 MHz */
908#define SK_FACT_78 125 /* on YUKON: 78.12 MHz */
909
910
911/* Transmit GMAC FIFO (YUKON only) */
912enum {
913 TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */
914 TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/
915 TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */
916
917 TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */
918 TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */
919 TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */
920
921 TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */
922 TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */
923 TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */
924
925 /* Descriptor Poll Timer Registers */
926 B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */
927 B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */
928 B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */
929
930 B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */
931
932 /* Time Stamp Timer Registers (YUKON only) */
933 GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */
934 GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */
935 GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */
936};
937
938/* Status BMU Registers (Yukon-2 only)*/
939enum {
940 STAT_CTRL = 0x0e80,/* 32 bit Status BMU Control Reg */
941 STAT_LAST_IDX = 0x0e84,/* 16 bit Status BMU Last Index */
942 /* 0x0e85 - 0x0e86: reserved */
943 STAT_LIST_ADDR_LO = 0x0e88,/* 32 bit Status List Start Addr (low) */
944 STAT_LIST_ADDR_HI = 0x0e8c,/* 32 bit Status List Start Addr (high) */
945 STAT_TXA1_RIDX = 0x0e90,/* 16 bit Status TxA1 Report Index Reg */
946 STAT_TXS1_RIDX = 0x0e92,/* 16 bit Status TxS1 Report Index Reg */
947 STAT_TXA2_RIDX = 0x0e94,/* 16 bit Status TxA2 Report Index Reg */
948 STAT_TXS2_RIDX = 0x0e96,/* 16 bit Status TxS2 Report Index Reg */
949 STAT_TX_IDX_TH = 0x0e98,/* 16 bit Status Tx Index Threshold Reg */
950 STAT_PUT_IDX = 0x0e9c,/* 16 bit Status Put Index Reg */
951
952/* FIFO Control/Status Registers (Yukon-2 only)*/
953 STAT_FIFO_WP = 0x0ea0,/* 8 bit Status FIFO Write Pointer Reg */
954 STAT_FIFO_RP = 0x0ea4,/* 8 bit Status FIFO Read Pointer Reg */
955 STAT_FIFO_RSP = 0x0ea6,/* 8 bit Status FIFO Read Shadow Ptr */
956 STAT_FIFO_LEVEL = 0x0ea8,/* 8 bit Status FIFO Level Reg */
957 STAT_FIFO_SHLVL = 0x0eaa,/* 8 bit Status FIFO Shadow Level Reg */
958 STAT_FIFO_WM = 0x0eac,/* 8 bit Status FIFO Watermark Reg */
959 STAT_FIFO_ISR_WM = 0x0ead,/* 8 bit Status FIFO ISR Watermark Reg */
960
961/* Level and ISR Timer Registers (Yukon-2 only)*/
962 STAT_LEV_TIMER_INI = 0x0eb0,/* 32 bit Level Timer Init. Value Reg */
963 STAT_LEV_TIMER_CNT = 0x0eb4,/* 32 bit Level Timer Counter Reg */
964 STAT_LEV_TIMER_CTRL = 0x0eb8,/* 8 bit Level Timer Control Reg */
965 STAT_LEV_TIMER_TEST = 0x0eb9,/* 8 bit Level Timer Test Reg */
966 STAT_TX_TIMER_INI = 0x0ec0,/* 32 bit Tx Timer Init. Value Reg */
967 STAT_TX_TIMER_CNT = 0x0ec4,/* 32 bit Tx Timer Counter Reg */
968 STAT_TX_TIMER_CTRL = 0x0ec8,/* 8 bit Tx Timer Control Reg */
969 STAT_TX_TIMER_TEST = 0x0ec9,/* 8 bit Tx Timer Test Reg */
970 STAT_ISR_TIMER_INI = 0x0ed0,/* 32 bit ISR Timer Init. Value Reg */
971 STAT_ISR_TIMER_CNT = 0x0ed4,/* 32 bit ISR Timer Counter Reg */
972 STAT_ISR_TIMER_CTRL = 0x0ed8,/* 8 bit ISR Timer Control Reg */
973 STAT_ISR_TIMER_TEST = 0x0ed9,/* 8 bit ISR Timer Test Reg */
974
975 ST_LAST_IDX_MASK = 0x007f,/* Last Index Mask */
976 ST_TXRP_IDX_MASK = 0x0fff,/* Tx Report Index Mask */
977 ST_TXTH_IDX_MASK = 0x0fff,/* Tx Threshold Index Mask */
978 ST_WM_IDX_MASK = 0x3f,/* FIFO Watermark Index Mask */
979};
980
981enum {
982 LINKLED_OFF = 0x01,
983 LINKLED_ON = 0x02,
984 LINKLED_LINKSYNC_OFF = 0x04,
985 LINKLED_LINKSYNC_ON = 0x08,
986 LINKLED_BLINK_OFF = 0x10,
987 LINKLED_BLINK_ON = 0x20,
988};
989
990/* GMAC and GPHY Control Registers (YUKON only) */
991enum {
992 GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */
993 GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */
994 GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */
995 GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */
996 GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */
997
998/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
999
1000 WOL_REG_OFFS = 0x20,/* HW-Bug: Address is + 0x20 against spec. */
1001
1002 WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */
1003 WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */
1004 WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */
1005 WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */
1006 WOL_PATT_PME = 0x0f2a,/* 8 bit WOL PME Match Enable (Yukon-2) */
1007 WOL_PATT_ASFM = 0x0f2b,/* 8 bit WOL ASF Match Enable (Yukon-2) */
1008 WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */
1009
1010/* WOL Pattern Length Registers (YUKON only) */
1011
1012 WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */
1013 WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */
1014
1015/* WOL Pattern Counter Registers (YUKON only) */
1016
1017 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */
1018 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */
1019};
1020
1021enum {
1022 WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */
1023 WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */
1024};
1025
1026enum {
1027 BASE_XMAC_1 = 0x2000,/* XMAC 1 registers */
1028 BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */
1029 BASE_XMAC_2 = 0x3000,/* XMAC 2 registers */
1030 BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */
1031};
1032
1033/*
1034 * Receive Frame Status Encoding
1035 */
1036enum {
1037 XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */
1038 XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/
1039 XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/
1040 XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */
1041 XMR_FS_MC = 1<<14, /* Bit 14: Multicast Frame */
1042 XMR_FS_UC = 1<<13, /* Bit 13: Unicast Frame */
1043
1044 XMR_FS_BURST = 1<<11, /* Bit 11: Burst Mode */
1045 XMR_FS_CEX_ERR = 1<<10, /* Bit 10: Carrier Ext. Error */
1046 XMR_FS_802_3 = 1<<9, /* Bit 9: 802.3 Frame */
1047 XMR_FS_COL_ERR = 1<<8, /* Bit 8: Collision Error */
1048 XMR_FS_CAR_ERR = 1<<7, /* Bit 7: Carrier Event Error */
1049 XMR_FS_LEN_ERR = 1<<6, /* Bit 6: In-Range Length Error */
1050 XMR_FS_FRA_ERR = 1<<5, /* Bit 5: Framing Error */
1051 XMR_FS_RUNT = 1<<4, /* Bit 4: Runt Frame */
1052 XMR_FS_LNG_ERR = 1<<3, /* Bit 3: Giant (Jumbo) Frame */
1053 XMR_FS_FCS_ERR = 1<<2, /* Bit 2: Frame Check Sequ Err */
1054 XMR_FS_ERR = 1<<1, /* Bit 1: Frame Error */
1055 XMR_FS_MCTRL = 1<<0, /* Bit 0: MAC Control Packet */
1056
1057/*
1058 * XMR_FS_ERR will be set if
1059 * XMR_FS_FCS_ERR, XMR_FS_LNG_ERR, XMR_FS_RUNT,
1060 * XMR_FS_FRA_ERR, XMR_FS_LEN_ERR, or XMR_FS_CEX_ERR
1061 * is set. XMR_FS_LNG_ERR and XMR_FS_LEN_ERR will issue
1062 * XMR_FS_ERR unless the corresponding bit in the Receive Command
1063 * Register is set.
1064 */
1065};
1066
1067/*
1068,* XMAC-PHY Registers, indirect addressed over the XMAC
1069 */
1070enum {
1071 PHY_XMAC_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
1072 PHY_XMAC_STAT = 0x01,/* 16 bit r/w PHY Status Register */
1073 PHY_XMAC_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
1074 PHY_XMAC_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
1075 PHY_XMAC_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
1076 PHY_XMAC_AUNE_LP = 0x05,/* 16 bit r/o Link Partner Abi Reg */
1077 PHY_XMAC_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
1078 PHY_XMAC_NEPG = 0x07,/* 16 bit r/w Next Page Register */
1079 PHY_XMAC_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
1080
1081 PHY_XMAC_EXT_STAT = 0x0f,/* 16 bit r/o Ext Status Register */
1082 PHY_XMAC_RES_ABI = 0x10,/* 16 bit r/o PHY Resolved Ability */
1083};
1084/*
1085 * Broadcom-PHY Registers, indirect addressed over XMAC
1086 */
1087enum {
1088 PHY_BCOM_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
1089 PHY_BCOM_STAT = 0x01,/* 16 bit r/o PHY Status Register */
1090 PHY_BCOM_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
1091 PHY_BCOM_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
1092 PHY_BCOM_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
1093 PHY_BCOM_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
1094 PHY_BCOM_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
1095 PHY_BCOM_NEPG = 0x07,/* 16 bit r/w Next Page Register */
1096 PHY_BCOM_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
1097 /* Broadcom-specific registers */
1098 PHY_BCOM_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
1099 PHY_BCOM_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
1100 PHY_BCOM_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
1101 PHY_BCOM_P_EXT_CTRL = 0x10,/* 16 bit r/w PHY Extended Ctrl Reg */
1102 PHY_BCOM_P_EXT_STAT = 0x11,/* 16 bit r/o PHY Extended Stat Reg */
1103 PHY_BCOM_RE_CTR = 0x12,/* 16 bit r/w Receive Error Counter */
1104 PHY_BCOM_FC_CTR = 0x13,/* 16 bit r/w False Carrier Sense Cnt */
1105 PHY_BCOM_RNO_CTR = 0x14,/* 16 bit r/w Receiver NOT_OK Cnt */
1106
1107 PHY_BCOM_AUX_CTRL = 0x18,/* 16 bit r/w Auxiliary Control Reg */
1108 PHY_BCOM_AUX_STAT = 0x19,/* 16 bit r/o Auxiliary Stat Summary */
1109 PHY_BCOM_INT_STAT = 0x1a,/* 16 bit r/o Interrupt Status Reg */
1110 PHY_BCOM_INT_MASK = 0x1b,/* 16 bit r/w Interrupt Mask Reg */
1111};
1112
1113/*
1114 * Marvel-PHY Registers, indirect addressed over GMAC
1115 */
1116enum {
1117 PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
1118 PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */
1119 PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
1120 PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
1121 PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
1122 PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
1123 PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
1124 PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */
1125 PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
1126 /* Marvel-specific registers */
1127 PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
1128 PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
1129 PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
1130 PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */
1131 PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */
1132 PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */
1133 PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */
1134 PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */
1135 PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */
1136 PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */
1137 PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */
1138 PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */
1139 PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */
1140 PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */
1141 PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */
1142 PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */
1143 PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */
1144 PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */
1145
1146/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1147 PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */
1148 PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */
1149 PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */
1150 PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */
1151 PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */
1152};
1153
1154/* Level One-PHY Registers, indirect addressed over XMAC */
1155enum {
1156 PHY_LONE_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
1157 PHY_LONE_STAT = 0x01,/* 16 bit r/o PHY Status Register */
1158 PHY_LONE_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
1159 PHY_LONE_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
1160 PHY_LONE_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
1161 PHY_LONE_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
1162 PHY_LONE_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
1163 PHY_LONE_NEPG = 0x07,/* 16 bit r/w Next Page Register */
1164 PHY_LONE_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
1165 /* Level One-specific registers */
1166 PHY_LONE_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
1167 PHY_LONE_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
1168 PHY_LONE_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
1169 PHY_LONE_PORT_CFG = 0x10,/* 16 bit r/w Port Configuration Reg*/
1170 PHY_LONE_Q_STAT = 0x11,/* 16 bit r/o Quick Status Reg */
1171 PHY_LONE_INT_ENAB = 0x12,/* 16 bit r/w Interrupt Enable Reg */
1172 PHY_LONE_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */
1173 PHY_LONE_LED_CFG = 0x14,/* 16 bit r/w LED Configuration Reg */
1174 PHY_LONE_PORT_CTRL = 0x15,/* 16 bit r/w Port Control Reg */
1175 PHY_LONE_CIM = 0x16,/* 16 bit r/o CIM Reg */
1176};
1177
1178/* National-PHY Registers, indirect addressed over XMAC */
1179enum {
1180 PHY_NAT_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
1181 PHY_NAT_STAT = 0x01,/* 16 bit r/w PHY Status Register */
1182 PHY_NAT_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
1183 PHY_NAT_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
1184 PHY_NAT_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
1185 PHY_NAT_AUNE_LP = 0x05,/* 16 bit r/o Link Partner Ability Reg */
1186 PHY_NAT_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
1187 PHY_NAT_NEPG = 0x07,/* 16 bit r/w Next Page Register */
1188 PHY_NAT_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner Reg */
1189 /* National-specific registers */
1190 PHY_NAT_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
1191 PHY_NAT_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
1192 PHY_NAT_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Register */
1193 PHY_NAT_EXT_CTRL1 = 0x10,/* 16 bit r/o Extended Control Reg1 */
1194 PHY_NAT_Q_STAT1 = 0x11,/* 16 bit r/o Quick Status Reg1 */
1195 PHY_NAT_10B_OP = 0x12,/* 16 bit r/o 10Base-T Operations Reg */
1196 PHY_NAT_EXT_CTRL2 = 0x13,/* 16 bit r/o Extended Control Reg1 */
1197 PHY_NAT_Q_STAT2 = 0x14,/* 16 bit r/o Quick Status Reg2 */
1198
1199 PHY_NAT_PHY_ADDR = 0x19,/* 16 bit r/o PHY Address Register */
1200};
1201
1202enum {
1203 PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */
1204 PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */
1205 PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */
1206 PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */
1207 PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */
1208 PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */
1209 PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */
1210 PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */
1211 PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */
1212 PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */
1213};
1214
1215enum {
1216 PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */
1217 PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */
1218 PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */
1219};
1220
1221enum {
1222 PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */
1223
1224 PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */
1225 PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */
1226 PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occured */
1227 PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */
1228 PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */
1229 PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */
1230 PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */
1231};
1232
1233enum {
1234 PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */
1235 PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */
1236 PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */
1237};
1238
1239/* different Broadcom PHY Ids */
1240enum {
1241 PHY_BCOM_ID1_A1 = 0x6041,
1242 PHY_BCOM_ID1_B2 = 0x6043,
1243 PHY_BCOM_ID1_C0 = 0x6044,
1244 PHY_BCOM_ID1_C5 = 0x6047,
1245};
1246
1247/* different Marvell PHY Ids */
1248enum {
1249 PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */
1250 PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
1251 PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */
1252 PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
1253 PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
1254};
1255
1256enum {
1257 PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
1258 PHY_X_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
1259 PHY_X_AN_RFB = 3<<12,/* Bit 13..12: Remote Fault Bits */
1260
1261 PHY_X_AN_PAUSE = 3<<7,/* Bit 8.. 7: Pause Bits */
1262 PHY_X_AN_HD = 1<<6, /* Bit 6: Half Duplex */
1263 PHY_X_AN_FD = 1<<5, /* Bit 5: Full Duplex */
1264};
1265
1266enum {
1267 PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */
1268
1269 PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */
1270 PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */
1271 PHY_B_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
1272};
1273
1274enum {
1275 PHY_L_AN_RF = 1<<13, /* Bit 13: Remote Fault */
1276 /* Bit 12: reserved */
1277 PHY_L_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */
1278 PHY_L_AN_PC = 1<<10, /* Bit 10: Pause Capable */
1279
1280 PHY_L_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
1281};
1282
1283/* PHY_NAT_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement */
1284/* PHY_NAT_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
1285/* PHY_AN_NXT_PG (see XMAC) Bit 15: Request Next Page */
1286enum {
1287 PHY_N_AN_RF = 1<<13, /* Bit 13: Remote Fault */
1288
1289 PHY_N_AN_100F = 1<<11, /* Bit 11: 100Base-T2 FD Support */
1290 PHY_N_AN_100H = 1<<10, /* Bit 10: 100Base-T2 HD Support */
1291
1292 PHY_N_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
1293};
1294
1295/* field type definition for PHY_x_AN_SEL */
1296enum {
1297 PHY_SEL_TYPE = 1, /* 00001 = Ethernet */
1298};
1299
1300enum {
1301 PHY_ANE_LP_NP = 1<<3, /* Bit 3: Link Partner can Next Page */
1302 PHY_ANE_LOC_NP = 1<<2, /* Bit 2: Local PHY can Next Page */
1303 PHY_ANE_RX_PG = 1<<1, /* Bit 1: Page Received */
1304};
1305
1306enum {
1307 PHY_ANE_PAR_DF = 1<<4, /* Bit 4: Parallel Detection Fault */
1308
1309 PHY_ANE_LP_CAP = 1<<0, /* Bit 0: Link Partner Auto-Neg. Cap. */
1310};
1311
1312enum {
1313 PHY_NP_MORE = 1<<15, /* Bit 15: More, Next Pages to follow */
1314 PHY_NP_ACK1 = 1<<14, /* Bit 14: (ro) Ack1, for receiving a message */
1315 PHY_NP_MSG_VAL = 1<<13, /* Bit 13: Message Page valid */
1316 PHY_NP_ACK2 = 1<<12, /* Bit 12: Ack2, comply with msg content */
1317 PHY_NP_TOG = 1<<11, /* Bit 11: Toggle Bit, ensure sync */
1318 PHY_NP_MSG = 0x07ff, /* Bit 10..0: Message from/to Link Partner */
1319};
1320
1321enum {
1322 PHY_X_EX_FD = 1<<15, /* Bit 15: Device Supports Full Duplex */
1323 PHY_X_EX_HD = 1<<14, /* Bit 14: Device Supports Half Duplex */
1324};
1325
1326enum {
1327 PHY_X_RS_PAUSE = 3<<7,/* Bit 8..7: selected Pause Mode */
1328 PHY_X_RS_HD = 1<<6, /* Bit 6: Half Duplex Mode selected */
1329 PHY_X_RS_FD = 1<<5, /* Bit 5: Full Duplex Mode selected */
1330 PHY_X_RS_ABLMIS = 1<<4, /* Bit 4: duplex or pause cap mismatch */
1331 PHY_X_RS_PAUMIS = 1<<3, /* Bit 3: pause capability mismatch */
1332};
1333
1334/** Remote Fault Bits (PHY_X_AN_RFB) encoding */
1335enum {
1336 X_RFB_OK = 0<<12,/* Bit 13..12 No errors, Link OK */
1337 X_RFB_LF = 1<<12, /* Bit 13..12 Link Failure */
1338 X_RFB_OFF = 2<<12,/* Bit 13..12 Offline */
1339 X_RFB_AN_ERR = 3<<12,/* Bit 13..12 Auto-Negotiation Error */
1340};
1341
1342/* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */
1343enum {
1344 PHY_X_P_NO_PAUSE = 0<<7,/* Bit 8..7: no Pause Mode */
1345 PHY_X_P_SYM_MD = 1<<7, /* Bit 8..7: symmetric Pause Mode */
1346 PHY_X_P_ASYM_MD = 2<<7,/* Bit 8..7: asymmetric Pause Mode */
1347 PHY_X_P_BOTH_MD = 3<<7,/* Bit 8..7: both Pause Mode */
1348};
1349
1350
1351/* Broadcom-Specific */
1352/***** PHY_BCOM_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
1353enum {
1354 PHY_B_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */
1355 PHY_B_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */
1356 PHY_B_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */
1357 PHY_B_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */
1358 PHY_B_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */
1359 PHY_B_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */
1360};
1361
1362/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
1363/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
1364enum {
1365 PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
1366 PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
1367 PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
1368 PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */
1369 PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */
1370 PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */
1371 /* Bit 9..8: reserved */
1372 PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
1373};
1374
1375/***** PHY_BCOM_EXT_STAT 16 bit r/o Extended Status Register *****/
1376enum {
1377 PHY_B_ES_X_FD_CAP = 1<<15, /* Bit 15: 1000Base-X FD capable */
1378 PHY_B_ES_X_HD_CAP = 1<<14, /* Bit 14: 1000Base-X HD capable */
1379 PHY_B_ES_T_FD_CAP = 1<<13, /* Bit 13: 1000Base-T FD capable */
1380 PHY_B_ES_T_HD_CAP = 1<<12, /* Bit 12: 1000Base-T HD capable */
1381};
1382
1383/***** PHY_BCOM_P_EXT_CTRL 16 bit r/w PHY Extended Control Reg *****/
1384enum {
1385 PHY_B_PEC_MAC_PHY = 1<<15, /* Bit 15: 10BIT/GMI-Interface */
1386 PHY_B_PEC_DIS_CROSS = 1<<14, /* Bit 14: Disable MDI Crossover */
1387 PHY_B_PEC_TX_DIS = 1<<13, /* Bit 13: Tx output Disabled */
1388 PHY_B_PEC_INT_DIS = 1<<12, /* Bit 12: Interrupts Disabled */
1389 PHY_B_PEC_F_INT = 1<<11, /* Bit 11: Force Interrupt */
1390 PHY_B_PEC_BY_45 = 1<<10, /* Bit 10: Bypass 4B5B-Decoder */
1391 PHY_B_PEC_BY_SCR = 1<<9, /* Bit 9: Bypass Scrambler */
1392 PHY_B_PEC_BY_MLT3 = 1<<8, /* Bit 8: Bypass MLT3 Encoder */
1393 PHY_B_PEC_BY_RXA = 1<<7, /* Bit 7: Bypass Rx Alignm. */
1394 PHY_B_PEC_RES_SCR = 1<<6, /* Bit 6: Reset Scrambler */
1395 PHY_B_PEC_EN_LTR = 1<<5, /* Bit 5: Ena LED Traffic Mode */
1396 PHY_B_PEC_LED_ON = 1<<4, /* Bit 4: Force LED's on */
1397 PHY_B_PEC_LED_OFF = 1<<3, /* Bit 3: Force LED's off */
1398 PHY_B_PEC_EX_IPG = 1<<2, /* Bit 2: Extend Tx IPG Mode */
1399 PHY_B_PEC_3_LED = 1<<1, /* Bit 1: Three Link LED mode */
1400 PHY_B_PEC_HIGH_LA = 1<<0, /* Bit 0: GMII FIFO Elasticy */
1401};
1402
1403/***** PHY_BCOM_P_EXT_STAT 16 bit r/o PHY Extended Status Reg *****/
1404enum {
1405 PHY_B_PES_CROSS_STAT = 1<<13, /* Bit 13: MDI Crossover Status */
1406 PHY_B_PES_INT_STAT = 1<<12, /* Bit 12: Interrupt Status */
1407 PHY_B_PES_RRS = 1<<11, /* Bit 11: Remote Receiver Stat. */
1408 PHY_B_PES_LRS = 1<<10, /* Bit 10: Local Receiver Stat. */
1409 PHY_B_PES_LOCKED = 1<<9, /* Bit 9: Locked */
1410 PHY_B_PES_LS = 1<<8, /* Bit 8: Link Status */
1411 PHY_B_PES_RF = 1<<7, /* Bit 7: Remote Fault */
1412 PHY_B_PES_CE_ER = 1<<6, /* Bit 6: Carrier Ext Error */
1413 PHY_B_PES_BAD_SSD = 1<<5, /* Bit 5: Bad SSD */
1414 PHY_B_PES_BAD_ESD = 1<<4, /* Bit 4: Bad ESD */
1415 PHY_B_PES_RX_ER = 1<<3, /* Bit 3: Receive Error */
1416 PHY_B_PES_TX_ER = 1<<2, /* Bit 2: Transmit Error */
1417 PHY_B_PES_LOCK_ER = 1<<1, /* Bit 1: Lock Error */
1418 PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */
1419};
1420
1421/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/
1422enum {
1423 PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */
1424
1425/***** PHY_BCOM_RNO_CTR 16 bit r/w Receive NOT_OK Counter *****/
1426 PHY_B_RC_LOC_MSK = 0xff00, /* Bit 15..8: Local Rx NOT_OK cnt */
1427 PHY_B_RC_REM_MSK = 0x00ff, /* Bit 7..0: Remote Rx NOT_OK cnt */
1428
1429/***** PHY_BCOM_AUX_CTRL 16 bit r/w Auxiliary Control Reg *****/
1430 PHY_B_AC_L_SQE = 1<<15, /* Bit 15: Low Squelch */
1431 PHY_B_AC_LONG_PACK = 1<<14, /* Bit 14: Rx Long Packets */
1432 PHY_B_AC_ER_CTRL = 3<<12,/* Bit 13..12: Edgerate Control */
1433 /* Bit 11: reserved */
1434 PHY_B_AC_TX_TST = 1<<10, /* Bit 10: Tx test bit, always 1 */
1435 /* Bit 9.. 8: reserved */
1436 PHY_B_AC_DIS_PRF = 1<<7, /* Bit 7: dis part resp filter */
1437 /* Bit 6: reserved */
1438 PHY_B_AC_DIS_PM = 1<<5, /* Bit 5: dis power management */
1439 /* Bit 4: reserved */
1440 PHY_B_AC_DIAG = 1<<3, /* Bit 3: Diagnostic Mode */
1441};
1442
1443/***** PHY_BCOM_AUX_STAT 16 bit r/o Auxiliary Status Reg *****/
1444enum {
1445 PHY_B_AS_AN_C = 1<<15, /* Bit 15: AutoNeg complete */
1446 PHY_B_AS_AN_CA = 1<<14, /* Bit 14: AN Complete Ack */
1447 PHY_B_AS_ANACK_D = 1<<13, /* Bit 13: AN Ack Detect */
1448 PHY_B_AS_ANAB_D = 1<<12, /* Bit 12: AN Ability Detect */
1449 PHY_B_AS_NPW = 1<<11, /* Bit 11: AN Next Page Wait */
1450 PHY_B_AS_AN_RES_MSK = 7<<8,/* Bit 10..8: AN HDC */
1451 PHY_B_AS_PDF = 1<<7, /* Bit 7: Parallel Detect. Fault */
1452 PHY_B_AS_RF = 1<<6, /* Bit 6: Remote Fault */
1453 PHY_B_AS_ANP_R = 1<<5, /* Bit 5: AN Page Received */
1454 PHY_B_AS_LP_ANAB = 1<<4, /* Bit 4: LP AN Ability */
1455 PHY_B_AS_LP_NPAB = 1<<3, /* Bit 3: LP Next Page Ability */
1456 PHY_B_AS_LS = 1<<2, /* Bit 2: Link Status */
1457 PHY_B_AS_PRR = 1<<1, /* Bit 1: Pause Resolution-Rx */
1458 PHY_B_AS_PRT = 1<<0, /* Bit 0: Pause Resolution-Tx */
1459};
1460#define PHY_B_AS_PAUSE_MSK (PHY_B_AS_PRR | PHY_B_AS_PRT)
1461
1462/***** PHY_BCOM_INT_STAT 16 bit r/o Interrupt Status Reg *****/
1463/***** PHY_BCOM_INT_MASK 16 bit r/w Interrupt Mask Reg *****/
1464enum {
1465 PHY_B_IS_PSE = 1<<14, /* Bit 14: Pair Swap Error */
1466 PHY_B_IS_MDXI_SC = 1<<13, /* Bit 13: MDIX Status Change */
1467 PHY_B_IS_HCT = 1<<12, /* Bit 12: counter above 32k */
1468 PHY_B_IS_LCT = 1<<11, /* Bit 11: counter above 128 */
1469 PHY_B_IS_AN_PR = 1<<10, /* Bit 10: Page Received */
1470 PHY_B_IS_NO_HDCL = 1<<9, /* Bit 9: No HCD Link */
1471 PHY_B_IS_NO_HDC = 1<<8, /* Bit 8: No HCD */
1472 PHY_B_IS_NEG_USHDC = 1<<7, /* Bit 7: Negotiated Unsup. HCD */
1473 PHY_B_IS_SCR_S_ER = 1<<6, /* Bit 6: Scrambler Sync Error */
1474 PHY_B_IS_RRS_CHANGE = 1<<5, /* Bit 5: Remote Rx Stat Change */
1475 PHY_B_IS_LRS_CHANGE = 1<<4, /* Bit 4: Local Rx Stat Change */
1476 PHY_B_IS_DUP_CHANGE = 1<<3, /* Bit 3: Duplex Mode Change */
1477 PHY_B_IS_LSP_CHANGE = 1<<2, /* Bit 2: Link Speed Change */
1478 PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */
1479 PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */
1480};
1481#define PHY_B_DEF_MSK (~(PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
1482
1483/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */
1484enum {
1485 PHY_B_P_NO_PAUSE = 0<<10,/* Bit 11..10: no Pause Mode */
1486 PHY_B_P_SYM_MD = 1<<10, /* Bit 11..10: symmetric Pause Mode */
1487 PHY_B_P_ASYM_MD = 2<<10,/* Bit 11..10: asymmetric Pause Mode */
1488 PHY_B_P_BOTH_MD = 3<<10,/* Bit 11..10: both Pause Mode */
1489};
1490/*
1491 * Resolved Duplex mode and Capabilities (Aux Status Summary Reg)
1492 */
1493enum {
1494 PHY_B_RES_1000FD = 7<<8,/* Bit 10..8: 1000Base-T Full Dup. */
1495 PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */
1496};
1497
1498/*
1499 * Level One-Specific
1500 */
1501/***** PHY_LONE_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
1502enum {
1503 PHY_L_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */
1504 PHY_L_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */
1505 PHY_L_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */
1506 PHY_L_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */
1507 PHY_L_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */
1508 PHY_L_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */
1509};
1510
1511/***** PHY_LONE_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
1512enum {
1513 PHY_L_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
1514 PHY_L_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
1515 PHY_L_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
1516 PHY_L_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */
1517 PHY_L_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */
1518 PHY_L_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */
1519
1520 PHY_L_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
1521
1522/***** PHY_LONE_EXT_STAT 16 bit r/o Extended Status Register *****/
1523 PHY_L_ES_X_FD_CAP = 1<<15, /* Bit 15: 1000Base-X FD capable */
1524 PHY_L_ES_X_HD_CAP = 1<<14, /* Bit 14: 1000Base-X HD capable */
1525 PHY_L_ES_T_FD_CAP = 1<<13, /* Bit 13: 1000Base-T FD capable */
1526 PHY_L_ES_T_HD_CAP = 1<<12, /* Bit 12: 1000Base-T HD capable */
1527};
1528
1529/***** PHY_LONE_PORT_CFG 16 bit r/w Port Configuration Reg *****/
1530enum {
1531 PHY_L_PC_REP_MODE = 1<<15, /* Bit 15: Repeater Mode */
1532
1533 PHY_L_PC_TX_DIS = 1<<13, /* Bit 13: Tx output Disabled */
1534 PHY_L_PC_BY_SCR = 1<<12, /* Bit 12: Bypass Scrambler */
1535 PHY_L_PC_BY_45 = 1<<11, /* Bit 11: Bypass 4B5B-Decoder */
1536 PHY_L_PC_JAB_DIS = 1<<10, /* Bit 10: Jabber Disabled */
1537 PHY_L_PC_SQE = 1<<9, /* Bit 9: Enable Heartbeat */
1538 PHY_L_PC_TP_LOOP = 1<<8, /* Bit 8: TP Loopback */
1539 PHY_L_PC_SSS = 1<<7, /* Bit 7: Smart Speed Selection */
1540 PHY_L_PC_FIFO_SIZE = 1<<6, /* Bit 6: FIFO Size */
1541 PHY_L_PC_PRE_EN = 1<<5, /* Bit 5: Preamble Enable */
1542 PHY_L_PC_CIM = 1<<4, /* Bit 4: Carrier Integrity Mon */
1543 PHY_L_PC_10_SER = 1<<3, /* Bit 3: Use Serial Output */
1544 PHY_L_PC_ANISOL = 1<<2, /* Bit 2: Unisolate Port */
1545 PHY_L_PC_TEN_BIT = 1<<1, /* Bit 1: 10bit iface mode on */
1546 PHY_L_PC_ALTCLOCK = 1<<0, /* Bit 0: (ro) ALTCLOCK Mode on */
1547};
1548
1549/***** PHY_LONE_Q_STAT 16 bit r/o Quick Status Reg *****/
1550enum {
1551 PHY_L_QS_D_RATE = 3<<14,/* Bit 15..14: Data Rate */
1552 PHY_L_QS_TX_STAT = 1<<13, /* Bit 13: Transmitting */
1553 PHY_L_QS_RX_STAT = 1<<12, /* Bit 12: Receiving */
1554 PHY_L_QS_COL_STAT = 1<<11, /* Bit 11: Collision */
1555 PHY_L_QS_L_STAT = 1<<10, /* Bit 10: Link is up */
1556 PHY_L_QS_DUP_MOD = 1<<9, /* Bit 9: Full/Half Duplex */
1557 PHY_L_QS_AN = 1<<8, /* Bit 8: AutoNeg is On */
1558 PHY_L_QS_AN_C = 1<<7, /* Bit 7: AN is Complete */
1559 PHY_L_QS_LLE = 7<<4,/* Bit 6..4: Line Length Estim. */
1560 PHY_L_QS_PAUSE = 1<<3, /* Bit 3: LP advertised Pause */
1561 PHY_L_QS_AS_PAUSE = 1<<2, /* Bit 2: LP adv. asym. Pause */
1562 PHY_L_QS_ISOLATE = 1<<1, /* Bit 1: CIM Isolated */
1563 PHY_L_QS_EVENT = 1<<0, /* Bit 0: Event has occurred */
1564};
1565
1566/***** PHY_LONE_INT_ENAB 16 bit r/w Interrupt Enable Reg *****/
1567/***** PHY_LONE_INT_STAT 16 bit r/o Interrupt Status Reg *****/
1568enum {
1569 PHY_L_IS_AN_F = 1<<13, /* Bit 13: Auto-Negotiation fault */
1570 PHY_L_IS_CROSS = 1<<11, /* Bit 11: Crossover used */
1571 PHY_L_IS_POL = 1<<10, /* Bit 10: Polarity correct. used */
1572 PHY_L_IS_SS = 1<<9, /* Bit 9: Smart Speed Downgrade */
1573 PHY_L_IS_CFULL = 1<<8, /* Bit 8: Counter Full */
1574 PHY_L_IS_AN_C = 1<<7, /* Bit 7: AutoNeg Complete */
1575 PHY_L_IS_SPEED = 1<<6, /* Bit 6: Speed Changed */
1576 PHY_L_IS_DUP = 1<<5, /* Bit 5: Duplex Changed */
1577 PHY_L_IS_LS = 1<<4, /* Bit 4: Link Status Changed */
1578 PHY_L_IS_ISOL = 1<<3, /* Bit 3: Isolate Occured */
1579 PHY_L_IS_MDINT = 1<<2, /* Bit 2: (ro) STAT: MII Int Pending */
1580 PHY_L_IS_INTEN = 1<<1, /* Bit 1: ENAB: Enable IRQs */
1581 PHY_L_IS_FORCE = 1<<0, /* Bit 0: ENAB: Force Interrupt */
1582};
1583
1584/* int. mask */
1585#define PHY_L_DEF_MSK (PHY_L_IS_LS | PHY_L_IS_ISOL | PHY_L_IS_INTEN)
1586
1587/***** PHY_LONE_LED_CFG 16 bit r/w LED Configuration Reg *****/
1588enum {
1589 PHY_L_LC_LEDC = 3<<14,/* Bit 15..14: Col/Blink/On/Off */
1590 PHY_L_LC_LEDR = 3<<12,/* Bit 13..12: Rx/Blink/On/Off */
1591 PHY_L_LC_LEDT = 3<<10,/* Bit 11..10: Tx/Blink/On/Off */
1592 PHY_L_LC_LEDG = 3<<8,/* Bit 9..8: Giga/Blink/On/Off */
1593 PHY_L_LC_LEDS = 3<<6,/* Bit 7..6: 10-100/Blink/On/Off */
1594 PHY_L_LC_LEDL = 3<<4,/* Bit 5..4: Link/Blink/On/Off */
1595 PHY_L_LC_LEDF = 3<<2,/* Bit 3..2: Duplex/Blink/On/Off */
1596 PHY_L_LC_PSTRECH= 1<<1, /* Bit 1: Strech LED Pulses */
1597 PHY_L_LC_FREQ = 1<<0, /* Bit 0: 30/100 ms */
1598};
1599
1600/***** PHY_LONE_PORT_CTRL 16 bit r/w Port Control Reg *****/
1601enum {
1602 PHY_L_PC_TX_TCLK = 1<<15, /* Bit 15: Enable TX_TCLK */
1603 PHY_L_PC_ALT_NP = 1<<13, /* Bit 14: Alternate Next Page */
1604 PHY_L_PC_GMII_ALT= 1<<12, /* Bit 13: Alternate GMII driver */
1605 PHY_L_PC_TEN_CRS = 1<<10, /* Bit 10: Extend CRS*/
1606};
1607
1608/***** PHY_LONE_CIM 16 bit r/o CIM Reg *****/
1609enum {
1610 PHY_L_CIM_ISOL = 0xff<<8,/* Bit 15..8: Isolate Count */
1611 PHY_L_CIM_FALSE_CAR = 0xff, /* Bit 7..0: False Carrier Count */
1612};
1613
1614/*
1615 * Pause Bits (PHY_L_AN_ASP and PHY_L_AN_PC) encoding
1616 */
1617enum {
1618 PHY_L_P_NO_PAUSE= 0<<10,/* Bit 11..10: no Pause Mode */
1619 PHY_L_P_SYM_MD = 1<<10, /* Bit 11..10: symmetric Pause Mode */
1620 PHY_L_P_ASYM_MD = 2<<10,/* Bit 11..10: asymmetric Pause Mode */
1621 PHY_L_P_BOTH_MD = 3<<10,/* Bit 11..10: both Pause Mode */
1622};
1623
1624/*
1625 * National-Specific
1626 */
1627/***** PHY_NAT_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
1628enum {
1629 PHY_N_1000C_TEST= 7<<13,/* Bit 15..13: Test Modes */
1630 PHY_N_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */
1631 PHY_N_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */
1632 PHY_N_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */
1633 PHY_N_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */
1634 PHY_N_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */
1635 PHY_N_1000C_APC = 1<<7, /* Bit 7: Asymmetric Pause Cap. */};
1636
1637
1638/***** PHY_NAT_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
1639enum {
1640 PHY_N_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
1641 PHY_N_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
1642 PHY_N_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
1643 PHY_N_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status*/
1644 PHY_N_1000S_LP_FD= 1<<11, /* Bit 11: Link Partner can FD */
1645 PHY_N_1000S_LP_HD= 1<<10, /* Bit 10: Link Partner can HD */
1646 PHY_N_1000C_LP_APC= 1<<9, /* Bit 9: LP Asym. Pause Cap. */
1647 PHY_N_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
1648};
1649
1650/***** PHY_NAT_EXT_STAT 16 bit r/o Extended Status Register *****/
1651enum {
1652 PHY_N_ES_X_FD_CAP= 1<<15, /* Bit 15: 1000Base-X FD capable */
1653 PHY_N_ES_X_HD_CAP= 1<<14, /* Bit 14: 1000Base-X HD capable */
1654 PHY_N_ES_T_FD_CAP= 1<<13, /* Bit 13: 1000Base-T FD capable */
1655 PHY_N_ES_T_HD_CAP= 1<<12, /* Bit 12: 1000Base-T HD capable */
1656};
1657
1658/** Marvell-Specific */
1659enum {
1660 PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */
1661 PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */
1662 PHY_M_AN_RF = 1<<13, /* Remote Fault */
1663
1664 PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */
1665 PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */
1666 PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */
1667 PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */
1668 PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */
1669 PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */
1670 PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */
1671 PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */
1672};
1673
1674/* special defines for FIBER (88E1011S only) */
1675enum {
1676 PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */
1677 PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */
1678 PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */
1679 PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */
1680};
1681
1682/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */
1683enum {
1684 PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */
1685 PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */
1686 PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */
1687 PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */
1688};
1689
1690/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
1691enum {
1692 PHY_M_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */
1693 PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */
1694 PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */
1695 PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */
1696 PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */
1697 PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */
1698};
1699
1700/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/
1701enum {
1702 PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */
1703 PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */
1704 PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */
1705 PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */
1706 PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */
1707 PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */
1708 PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */
1709 PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */
1710 PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */
1711 PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */
1712 PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */
1713 PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */
1714};
1715
1716enum {
1717 PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */
1718 PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */
1719};
1720
1721#define PHY_M_PC_MDI_XMODE(x) (((x)<<5) & PHY_M_PC_MDIX_MSK)
1722
1723enum {
1724 PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */
1725 PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */
1726 PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */
1727};
1728
1729/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1730enum {
1731 PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */
1732 PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */
1733 PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */
1734 PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */
1735 PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */
1736
1737 PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */
1738 PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */
1739
1740 PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */
1741 PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */
1742};
1743
1744/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/
1745enum {
1746 PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */
1747 PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */
1748 PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */
1749 PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */
1750 PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */
1751 PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */
1752 PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */
1753 PHY_M_PS_LINK_UP = 1<<10, /* Link Up */
1754 PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */
1755 PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */
1756 PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */
1757 PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */
1758 PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */
1759 PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */
1760 PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */
1761 PHY_M_PS_JABBER = 1<<0, /* Jabber */
1762};
1763
1764#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN)
1765
1766/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1767enum {
1768 PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */
1769 PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */
1770};
1771
1772enum {
1773 PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */
1774 PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */
1775 PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */
1776 PHY_M_IS_AN_PR = 1<<12, /* Page Received */
1777 PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */
1778 PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */
1779 PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */
1780 PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */
1781 PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */
1782 PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */
1783 PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */
1784 PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */
1785
1786 PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */
1787 PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */
1788 PHY_M_IS_JABBER = 1<<0, /* Jabber */
1789};
1790
1791#define PHY_M_DEF_MSK ( PHY_M_IS_AN_ERROR | PHY_M_IS_LSP_CHANGE | \
1792 PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR)
1793
1794/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/
1795enum {
1796 PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */
1797 PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */
1798
1799 PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */
1800 PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */
1801 /* (88E1011 only) */
1802 PHY_M_EC_S_DSC_MSK = 3<<8,/* Bit 9.. 8: Slave Downshift Counter */
1803 /* (88E1011 only) */
1804 PHY_M_EC_M_DSC_MSK2 = 7<<9,/* Bit 11.. 9: Master Downshift Counter */
1805 /* (88E1111 only) */
1806 PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */
1807 /* !!! Errata in spec. (1 = disable) */
1808 PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/
1809 PHY_M_EC_MAC_S_MSK = 7<<4,/* Bit 6.. 4: Def. MAC interface speed */
1810 PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
1811 PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */
1812 PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */
1813 PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */};
1814
1815#define PHY_M_EC_M_DSC(x) ((x)<<10) /* 00=1x; 01=2x; 10=3x; 11=4x */
1816#define PHY_M_EC_S_DSC(x) ((x)<<8) /* 00=dis; 01=1x; 10=2x; 11=3x */
1817#define PHY_M_EC_MAC_S(x) ((x)<<4) /* 01X=0; 110=2.5; 111=25 (MHz) */
1818
1819#define PHY_M_EC_M_DSC_2(x) ((x)<<9) /* 000=1x; 001=2x; 010=3x; 011=4x */
1820 /* 100=5x; 101=6x; 110=7x; 111=8x */
1821enum {
1822 MAC_TX_CLK_0_MHZ = 2,
1823 MAC_TX_CLK_2_5_MHZ = 6,
1824 MAC_TX_CLK_25_MHZ = 7,
1825};
1826
1827/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/
1828enum {
1829 PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */
1830 PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */
1831 PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */
1832 PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */
1833 PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */
1834 PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */
1835 PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */
1836 /* (88E1111 only) */
1837};
1838
1839enum {
1840 PHY_M_LEDC_LINK_MSK = 3<<3,/* Bit 4.. 3: Link Control Mask */
1841 /* (88E1011 only) */
1842 PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */
1843 PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */
1844 PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */
1845 PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */
1846 PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */
1847};
1848
1849#define PHY_M_LED_PULS_DUR(x) ( ((x)<<12) & PHY_M_LEDC_PULS_MSK)
1850
1851enum {
1852 PULS_NO_STR = 0,/* no pulse stretching */
1853 PULS_21MS = 1,/* 21 ms to 42 ms */
1854 PULS_42MS = 2,/* 42 ms to 84 ms */
1855 PULS_84MS = 3,/* 84 ms to 170 ms */
1856 PULS_170MS = 4,/* 170 ms to 340 ms */
1857 PULS_340MS = 5,/* 340 ms to 670 ms */
1858 PULS_670MS = 6,/* 670 ms to 1.3 s */
1859 PULS_1300MS = 7,/* 1.3 s to 2.7 s */
1860};
1861
1862#define PHY_M_LED_BLINK_RT(x) ( ((x)<<8) & PHY_M_LEDC_BL_R_MSK)
1863
1864enum {
1865 BLINK_42MS = 0,/* 42 ms */
1866 BLINK_84MS = 1,/* 84 ms */
1867 BLINK_170MS = 2,/* 170 ms */
1868 BLINK_340MS = 3,/* 340 ms */
1869 BLINK_670MS = 4,/* 670 ms */
1870};
1871
1872/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/
1873#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */
1874 /* Bit 13..12: reserved */
1875#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */
1876#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */
1877#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */
1878#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */
1879#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */
1880#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */
1881
1882enum {
1883 MO_LED_NORM = 0,
1884 MO_LED_BLINK = 1,
1885 MO_LED_OFF = 2,
1886 MO_LED_ON = 3,
1887};
1888
1889/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/
1890enum {
1891 PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */
1892 PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */
1893 PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */
1894 PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */
1895 PHY_M_EC2_FO_AM_MSK = 7,/* Bit 2.. 0: Fiber Output Amplitude */
1896};
1897
1898/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/
1899enum {
1900 PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */
1901 PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */
1902 PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */
1903 PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */
1904 PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */
1905 PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */
1906 PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */
1907 /* (88E1111 only) */
1908 /* Bit 9.. 4: reserved (88E1011 only) */
1909 PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */
1910 PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */
1911 PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */
1912};
1913
1914/***** PHY_MARV_CABLE_DIAG 16 bit r/o Cable Diagnostic Reg *****/
1915enum {
1916 PHY_M_CABD_ENA_TEST = 1<<15, /* Enable Test (Page 0) */
1917 PHY_M_CABD_DIS_WAIT = 1<<15, /* Disable Waiting Period (Page 1) */
1918 /* (88E1111 only) */
1919 PHY_M_CABD_STAT_MSK = 3<<13, /* Bit 14..13: Status Mask */
1920 PHY_M_CABD_AMPL_MSK = 0x1f<<8,/* Bit 12.. 8: Amplitude Mask */
1921 /* (88E1111 only) */
1922 PHY_M_CABD_DIST_MSK = 0xff, /* Bit 7.. 0: Distance Mask */
1923};
1924
1925/* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */
1926enum {
1927 CABD_STAT_NORMAL= 0,
1928 CABD_STAT_SHORT = 1,
1929 CABD_STAT_OPEN = 2,
1930 CABD_STAT_FAIL = 3,
1931};
1932
1933/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1934/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/
1935 /* Bit 15..12: reserved (used internally) */
1936enum {
1937 PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */
1938 PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */
1939 PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */
1940};
1941
1942#define PHY_M_FELP_LED2_CTRL(x) ( ((x)<<8) & PHY_M_FELP_LED2_MSK)
1943#define PHY_M_FELP_LED1_CTRL(x) ( ((x)<<4) & PHY_M_FELP_LED1_MSK)
1944#define PHY_M_FELP_LED0_CTRL(x) ( ((x)<<0) & PHY_M_FELP_LED0_MSK)
1945
1946enum {
1947 LED_PAR_CTRL_COLX = 0x00,
1948 LED_PAR_CTRL_ERROR = 0x01,
1949 LED_PAR_CTRL_DUPLEX = 0x02,
1950 LED_PAR_CTRL_DP_COL = 0x03,
1951 LED_PAR_CTRL_SPEED = 0x04,
1952 LED_PAR_CTRL_LINK = 0x05,
1953 LED_PAR_CTRL_TX = 0x06,
1954 LED_PAR_CTRL_RX = 0x07,
1955 LED_PAR_CTRL_ACT = 0x08,
1956 LED_PAR_CTRL_LNK_RX = 0x09,
1957 LED_PAR_CTRL_LNK_AC = 0x0a,
1958 LED_PAR_CTRL_ACT_BL = 0x0b,
1959 LED_PAR_CTRL_TX_BL = 0x0c,
1960 LED_PAR_CTRL_RX_BL = 0x0d,
1961 LED_PAR_CTRL_COL_BL = 0x0e,
1962 LED_PAR_CTRL_INACT = 0x0f
1963};
1964
1965/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/
1966enum {
1967 PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */
1968 PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */
1969 PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */
1970};
1971
1972/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
1973/***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/
1974enum {
1975 PHY_M_MAC_MD_MSK = 7<<7, /* Bit 9.. 7: Mode Select Mask */
1976 PHY_M_MAC_MD_AUTO = 3,/* Auto Copper/1000Base-X */
1977 PHY_M_MAC_MD_COPPER = 5,/* Copper only */
1978 PHY_M_MAC_MD_1000BX = 7,/* 1000Base-X only */
1979};
1980#define PHY_M_MAC_MODE_SEL(x) ( ((x)<<7) & PHY_M_MAC_MD_MSK)
1981
1982/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/
1983enum {
1984 PHY_M_LEDC_LOS_MSK = 0xf<<12,/* Bit 15..12: LOS LED Ctrl. Mask */
1985 PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */
1986 PHY_M_LEDC_STA1_MSK = 0xf<<4,/* Bit 7.. 4: STAT1 LED Ctrl. Mask */
1987 PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */
1988};
1989
1990#define PHY_M_LEDC_LOS_CTRL(x) ( ((x)<<12) & PHY_M_LEDC_LOS_MSK)
1991#define PHY_M_LEDC_INIT_CTRL(x) ( ((x)<<8) & PHY_M_LEDC_INIT_MSK)
1992#define PHY_M_LEDC_STA1_CTRL(x) ( ((x)<<4) & PHY_M_LEDC_STA1_MSK)
1993#define PHY_M_LEDC_STA0_CTRL(x) ( ((x)<<0) & PHY_M_LEDC_STA0_MSK)
1994
1995/* GMAC registers */
1996/* Port Registers */
1997enum {
1998 GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */
1999 GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */
2000 GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */
2001 GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */
2002 GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */
2003 GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */
2004 GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */
2005/* Source Address Registers */
2006 GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */
2007 GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */
2008 GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */
2009 GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */
2010 GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */
2011 GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */
2012
2013/* Multicast Address Hash Registers */
2014 GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */
2015 GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */
2016 GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */
2017 GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */
2018
2019/* Interrupt Source Registers */
2020 GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */
2021 GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */
2022 GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */
2023
2024/* Interrupt Mask Registers */
2025 GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */
2026 GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */
2027 GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */
2028
2029/* Serial Management Interface (SMI) Registers */
2030 GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */
2031 GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */
2032 GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */
2033};
2034
2035/* MIB Counters */
2036#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */
2037#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */
2038
2039/*
2040 * MIB Counters base address definitions (low word) -
2041 * use offset 4 for access to high word (32 bit r/o)
2042 */
2043enum {
2044 GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */
2045 GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */
2046 GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */
2047 GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */
2048 GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */
2049 /* GM_MIB_CNT_BASE + 40: reserved */
2050 GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */
2051 GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */
2052 GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */
2053 GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */
2054 GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */
2055 GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */
2056 GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */
2057 GM_RXF_127B = GM_MIB_CNT_BASE + 104, /* 65-127 Byte Rx Frame */
2058 GM_RXF_255B = GM_MIB_CNT_BASE + 112, /* 128-255 Byte Rx Frame */
2059 GM_RXF_511B = GM_MIB_CNT_BASE + 120, /* 256-511 Byte Rx Frame */
2060 GM_RXF_1023B = GM_MIB_CNT_BASE + 128, /* 512-1023 Byte Rx Frame */
2061 GM_RXF_1518B = GM_MIB_CNT_BASE + 136, /* 1024-1518 Byte Rx Frame */
2062 GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144, /* 1519-MaxSize Byte Rx Frame */
2063 GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152, /* Rx Frame too Long Error */
2064 GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160, /* Rx Jabber Packet Frame */
2065 /* GM_MIB_CNT_BASE + 168: reserved */
2066 GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176, /* Rx FIFO overflow Event */
2067 /* GM_MIB_CNT_BASE + 184: reserved */
2068 GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192, /* Unicast Frames Xmitted OK */
2069 GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200, /* Broadcast Frames Xmitted OK */
2070 GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208, /* Pause MAC Ctrl Frames Xmitted */
2071 GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216, /* Multicast Frames Xmitted OK */
2072 GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224, /* Octets Transmitted OK Low */
2073 GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232, /* Octets Transmitted OK High */
2074 GM_TXF_64B = GM_MIB_CNT_BASE + 240, /* 64 Byte Tx Frame */
2075 GM_TXF_127B = GM_MIB_CNT_BASE + 248, /* 65-127 Byte Tx Frame */
2076 GM_TXF_255B = GM_MIB_CNT_BASE + 256, /* 128-255 Byte Tx Frame */
2077 GM_TXF_511B = GM_MIB_CNT_BASE + 264, /* 256-511 Byte Tx Frame */
2078 GM_TXF_1023B = GM_MIB_CNT_BASE + 272, /* 512-1023 Byte Tx Frame */
2079 GM_TXF_1518B = GM_MIB_CNT_BASE + 280, /* 1024-1518 Byte Tx Frame */
2080 GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288, /* 1519-MaxSize Byte Tx Frame */
2081
2082 GM_TXF_COL = GM_MIB_CNT_BASE + 304, /* Tx Collision */
2083 GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312, /* Tx Late Collision */
2084 GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320, /* Tx aborted due to Exces. Col. */
2085 GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328, /* Tx Multiple Collision */
2086 GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336, /* Tx Single Collision */
2087 GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344, /* Tx FIFO Underrun Event */
2088};
2089
2090/* GMAC Bit Definitions */
2091/* GM_GP_STAT 16 bit r/o General Purpose Status Register */
2092enum {
2093 GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */
2094 GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */
2095 GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */
2096 GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */
2097 GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */
2098 GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */
2099 GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occured */
2100 GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occured */
2101
2102 GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */
2103 GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */
2104 GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */
2105 GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */
2106 GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */
2107};
2108
2109/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */
2110enum {
2111 GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */
2112 GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */
2113 GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */
2114 GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */
2115 GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */
2116 GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */
2117 GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */
2118 GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */
2119 GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */
2120 GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */
2121 GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */
2122 GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */
2123 GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */
2124 GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */
2125 GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */
2126};
2127
2128#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
2129#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS)
2130
2131/* GM_TX_CTRL 16 bit r/w Transmit Control Register */
2132enum {
2133 GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */
2134 GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */
2135 GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */
2136 GM_TXCR_COL_THR_MSK = 1<<10, /* Bit 12..10: Collision Threshold */
2137};
2138
2139#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK)
2140#define TX_COL_DEF 0x04
2141
2142/* GM_RX_CTRL 16 bit r/w Receive Control Register */
2143enum {
2144 GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */
2145 GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */
2146 GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */
2147 GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */
2148};
2149
2150/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */
2151enum {
2152 GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */
2153 GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */
2154 GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */
2155
2156 TX_JAM_LEN_DEF = 0x03,
2157 TX_JAM_IPG_DEF = 0x0b,
2158 TX_IPG_JAM_DEF = 0x1c,
2159};
2160
2161#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK)
2162#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK)
2163#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK)
2164
2165
2166/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
2167enum {
2168 GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */
2169 GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */
2170 GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */
2171 GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */
2172 GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
2173};
2174
2175#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
2176#define DATA_BLIND_DEF 0x04
2177
2178#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK)
2179#define IPG_DATA_DEF 0x1e
2180
2181/* GM_SMI_CTRL 16 bit r/w SMI Control Register */
2182enum {
2183 GM_SMI_CT_PHY_A_MSK = 0x1f<<11,/* Bit 15..11: PHY Device Address */
2184 GM_SMI_CT_REG_A_MSK = 0x1f<<6,/* Bit 10.. 6: PHY Register Address */
2185 GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/
2186 GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */
2187 GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */
2188};
2189
2190#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK)
2191#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK)
2192
2193/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */
2194enum {
2195 GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */
2196 GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */
2197};
2198
2199/* Receive Frame Status Encoding */
2200enum {
2201 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */
2202 GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */
2203 GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */
2204 GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */
2205 GMR_FS_MC = 1<<10, /* Bit 10: Multicast Packet */
2206 GMR_FS_BC = 1<<9, /* Bit 9: Broadcast Packet */
2207 GMR_FS_RX_OK = 1<<8, /* Bit 8: Receive OK (Good Packet) */
2208 GMR_FS_GOOD_FC = 1<<7, /* Bit 7: Good Flow-Control Packet */
2209 GMR_FS_BAD_FC = 1<<6, /* Bit 6: Bad Flow-Control Packet */
2210 GMR_FS_MII_ERR = 1<<5, /* Bit 5: MII Error */
2211 GMR_FS_LONG_ERR = 1<<4, /* Bit 4: Too Long Packet */
2212 GMR_FS_FRAGMENT = 1<<3, /* Bit 3: Fragment */
2213
2214 GMR_FS_CRC_ERR = 1<<1, /* Bit 1: CRC Error */
2215 GMR_FS_RX_FF_OV = 1<<0, /* Bit 0: Rx FIFO Overflow */
2216
2217/*
2218 * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR)
2219 */
2220 GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR |
2221 GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC |
2222 GMR_FS_JABBER,
2223/* Rx GMAC FIFO Flush Mask (default) */
2224 RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR |
2225 GMR_FS_BAD_FC | GMR_FS_GOOD_FC | GMR_FS_UN_SIZE |
2226 GMR_FS_JABBER,
2227};
2228
2229/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
2230enum {
2231 GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */
2232 GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */
2233 GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */
2234
2235 GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */
2236 GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */
2237 GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */
2238 GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */
2239 GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */
2240 GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */
2241 GMF_CLI_RX_FC = 1<<4, /* Clear IRQ Rx Frame Complete */
2242 GMF_OPER_ON = 1<<3, /* Operational Mode On */
2243 GMF_OPER_OFF = 1<<2, /* Operational Mode Off */
2244 GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */
2245 GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */
2246
2247 RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */
2248};
2249
2250
2251/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
2252enum {
2253 GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */
2254 GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
2255 GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */
2256
2257 GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */
2258 GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */
2259 GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */
2260};
2261
2262/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */
2263enum {
2264 GMT_ST_START = 1<<2, /* Start Time Stamp Timer */
2265 GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */
2266 GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */
2267};
2268
2269/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */
2270enum {
2271 GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */
2272 GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */
2273 GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */
2274 GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */
2275 GMC_PAUSE_ON = 1<<3, /* Pause On */
2276 GMC_PAUSE_OFF = 1<<2, /* Pause Off */
2277 GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */
2278 GMC_RST_SET = 1<<0, /* Set GMAC Reset */
2279};
2280
2281/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */
2282enum {
2283 GPC_SEL_BDT = 1<<28, /* Select Bi-Dir. Transfer for MDC/MDIO */
2284 GPC_INT_POL_HI = 1<<27, /* IRQ Polarity is Active HIGH */
2285 GPC_75_OHM = 1<<26, /* Use 75 Ohm Termination instead of 50 */
2286 GPC_DIS_FC = 1<<25, /* Disable Automatic Fiber/Copper Detection */
2287 GPC_DIS_SLEEP = 1<<24, /* Disable Energy Detect */
2288 GPC_HWCFG_M_3 = 1<<23, /* HWCFG_MODE[3] */
2289 GPC_HWCFG_M_2 = 1<<22, /* HWCFG_MODE[2] */
2290 GPC_HWCFG_M_1 = 1<<21, /* HWCFG_MODE[1] */
2291 GPC_HWCFG_M_0 = 1<<20, /* HWCFG_MODE[0] */
2292 GPC_ANEG_0 = 1<<19, /* ANEG[0] */
2293 GPC_ENA_XC = 1<<18, /* Enable MDI crossover */
2294 GPC_DIS_125 = 1<<17, /* Disable 125 MHz clock */
2295 GPC_ANEG_3 = 1<<16, /* ANEG[3] */
2296 GPC_ANEG_2 = 1<<15, /* ANEG[2] */
2297 GPC_ANEG_1 = 1<<14, /* ANEG[1] */
2298 GPC_ENA_PAUSE = 1<<13, /* Enable Pause (SYM_OR_REM) */
2299 GPC_PHYADDR_4 = 1<<12, /* Bit 4 of Phy Addr */
2300 GPC_PHYADDR_3 = 1<<11, /* Bit 3 of Phy Addr */
2301 GPC_PHYADDR_2 = 1<<10, /* Bit 2 of Phy Addr */
2302 GPC_PHYADDR_1 = 1<<9, /* Bit 1 of Phy Addr */
2303 GPC_PHYADDR_0 = 1<<8, /* Bit 0 of Phy Addr */
2304 /* Bits 7..2: reserved */
2305 GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */
2306 GPC_RST_SET = 1<<0, /* Set GPHY Reset */
2307};
2308
2309#define GPC_HWCFG_GMII_COP (GPC_HWCFG_M_3|GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0)
2310#define GPC_HWCFG_GMII_FIB (GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0)
2311#define GPC_ANEG_ADV_ALL_M (GPC_ANEG_3 | GPC_ANEG_2 | GPC_ANEG_1 | GPC_ANEG_0)
2312
2313/* forced speed and duplex mode (don't mix with other ANEG bits) */
2314#define GPC_FRC10MBIT_HALF 0
2315#define GPC_FRC10MBIT_FULL GPC_ANEG_0
2316#define GPC_FRC100MBIT_HALF GPC_ANEG_1
2317#define GPC_FRC100MBIT_FULL (GPC_ANEG_0 | GPC_ANEG_1)
2318
2319/* auto-negotiation with limited advertised speeds */
2320/* mix only with master/slave settings (for copper) */
2321#define GPC_ADV_1000_HALF GPC_ANEG_2
2322#define GPC_ADV_1000_FULL GPC_ANEG_3
2323#define GPC_ADV_ALL (GPC_ANEG_2 | GPC_ANEG_3)
2324
2325/* master/slave settings */
2326/* only for copper with 1000 Mbps */
2327#define GPC_FORCE_MASTER 0
2328#define GPC_FORCE_SLAVE GPC_ANEG_0
2329#define GPC_PREF_MASTER GPC_ANEG_1
2330#define GPC_PREF_SLAVE (GPC_ANEG_1 | GPC_ANEG_0)
2331
2332/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */
2333/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */
2334enum {
2335 GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */
2336 GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */
2337 GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */
2338 GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */
2339 GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
2340 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
2341
2342#define GMAC_DEF_MSK (GM_IS_TX_CO_OV | GM_IS_RX_CO_OV | GM_IS_TX_FF_UR)
2343
2344/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
2345 /* Bits 15.. 2: reserved */
2346 GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */
2347 GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */
2348
2349
2350/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */
2351 WOL_CTL_LINK_CHG_OCC = 1<<15,
2352 WOL_CTL_MAGIC_PKT_OCC = 1<<14,
2353 WOL_CTL_PATTERN_OCC = 1<<13,
2354 WOL_CTL_CLEAR_RESULT = 1<<12,
2355 WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11,
2356 WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10,
2357 WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9,
2358 WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8,
2359 WOL_CTL_ENA_PME_ON_PATTERN = 1<<7,
2360 WOL_CTL_DIS_PME_ON_PATTERN = 1<<6,
2361 WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5,
2362 WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4,
2363 WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3,
2364 WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2,
2365 WOL_CTL_ENA_PATTERN_UNIT = 1<<1,
2366 WOL_CTL_DIS_PATTERN_UNIT = 1<<0,
2367};
2368
2369#define WOL_CTL_DEFAULT \
2370 (WOL_CTL_DIS_PME_ON_LINK_CHG | \
2371 WOL_CTL_DIS_PME_ON_PATTERN | \
2372 WOL_CTL_DIS_PME_ON_MAGIC_PKT | \
2373 WOL_CTL_DIS_LINK_CHG_UNIT | \
2374 WOL_CTL_DIS_PATTERN_UNIT | \
2375 WOL_CTL_DIS_MAGIC_PKT_UNIT)
2376
2377/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */
2378#define WOL_CTL_PATT_ENA(x) (1 << (x))
2379
2380
2381/* XMAC II registers */
2382enum {
2383 XM_MMU_CMD = 0x0000, /* 16 bit r/w MMU Command Register */
2384 XM_POFF = 0x0008, /* 32 bit r/w Packet Offset Register */
2385 XM_BURST = 0x000c, /* 32 bit r/w Burst Register for half duplex*/
2386 XM_1L_VLAN_TAG = 0x0010, /* 16 bit r/w One Level VLAN Tag ID */
2387 XM_2L_VLAN_TAG = 0x0014, /* 16 bit r/w Two Level VLAN Tag ID */
2388 XM_TX_CMD = 0x0020, /* 16 bit r/w Transmit Command Register */
2389 XM_TX_RT_LIM = 0x0024, /* 16 bit r/w Transmit Retry Limit Register */
2390 XM_TX_STIME = 0x0028, /* 16 bit r/w Transmit Slottime Register */
2391 XM_TX_IPG = 0x002c, /* 16 bit r/w Transmit Inter Packet Gap */
2392 XM_RX_CMD = 0x0030, /* 16 bit r/w Receive Command Register */
2393 XM_PHY_ADDR = 0x0034, /* 16 bit r/w PHY Address Register */
2394 XM_PHY_DATA = 0x0038, /* 16 bit r/w PHY Data Register */
2395 XM_GP_PORT = 0x0040, /* 32 bit r/w General Purpose Port Register */
2396 XM_IMSK = 0x0044, /* 16 bit r/w Interrupt Mask Register */
2397 XM_ISRC = 0x0048, /* 16 bit r/o Interrupt Status Register */
2398 XM_HW_CFG = 0x004c, /* 16 bit r/w Hardware Config Register */
2399 XM_TX_LO_WM = 0x0060, /* 16 bit r/w Tx FIFO Low Water Mark */
2400 XM_TX_HI_WM = 0x0062, /* 16 bit r/w Tx FIFO High Water Mark */
2401 XM_TX_THR = 0x0064, /* 16 bit r/w Tx Request Threshold */
2402 XM_HT_THR = 0x0066, /* 16 bit r/w Host Request Threshold */
2403 XM_PAUSE_DA = 0x0068, /* NA reg r/w Pause Destination Address */
2404 XM_CTL_PARA = 0x0070, /* 32 bit r/w Control Parameter Register */
2405 XM_MAC_OPCODE = 0x0074, /* 16 bit r/w Opcode for MAC control frames */
2406 XM_MAC_PTIME = 0x0076, /* 16 bit r/w Pause time for MAC ctrl frames*/
2407 XM_TX_STAT = 0x0078, /* 32 bit r/o Tx Status LIFO Register */
2408
2409 XM_EXM_START = 0x0080, /* r/w Start Address of the EXM Regs */
2410#define XM_EXM(reg) (XM_EXM_START + ((reg) << 3))
2411};
2412
2413enum {
2414 XM_SRC_CHK = 0x0100, /* NA reg r/w Source Check Address Register */
2415 XM_SA = 0x0108, /* NA reg r/w Station Address Register */
2416 XM_HSM = 0x0110, /* 64 bit r/w Hash Match Address Registers */
2417 XM_RX_LO_WM = 0x0118, /* 16 bit r/w Receive Low Water Mark */
2418 XM_RX_HI_WM = 0x011a, /* 16 bit r/w Receive High Water Mark */
2419 XM_RX_THR = 0x011c, /* 32 bit r/w Receive Request Threshold */
2420 XM_DEV_ID = 0x0120, /* 32 bit r/o Device ID Register */
2421 XM_MODE = 0x0124, /* 32 bit r/w Mode Register */
2422 XM_LSA = 0x0128, /* NA reg r/o Last Source Register */
2423 XM_TS_READ = 0x0130, /* 32 bit r/o Time Stamp Read Register */
2424 XM_TS_LOAD = 0x0134, /* 32 bit r/o Time Stamp Load Value */
2425 XM_STAT_CMD = 0x0200, /* 16 bit r/w Statistics Command Register */
2426 XM_RX_CNT_EV = 0x0204, /* 32 bit r/o Rx Counter Event Register */
2427 XM_TX_CNT_EV = 0x0208, /* 32 bit r/o Tx Counter Event Register */
2428 XM_RX_EV_MSK = 0x020c, /* 32 bit r/w Rx Counter Event Mask */
2429 XM_TX_EV_MSK = 0x0210, /* 32 bit r/w Tx Counter Event Mask */
2430 XM_TXF_OK = 0x0280, /* 32 bit r/o Frames Transmitted OK Conuter */
2431 XM_TXO_OK_HI = 0x0284, /* 32 bit r/o Octets Transmitted OK High Cnt*/
2432 XM_TXO_OK_LO = 0x0288, /* 32 bit r/o Octets Transmitted OK Low Cnt */
2433 XM_TXF_BC_OK = 0x028c, /* 32 bit r/o Broadcast Frames Xmitted OK */
2434 XM_TXF_MC_OK = 0x0290, /* 32 bit r/o Multicast Frames Xmitted OK */
2435 XM_TXF_UC_OK = 0x0294, /* 32 bit r/o Unicast Frames Xmitted OK */
2436 XM_TXF_LONG = 0x0298, /* 32 bit r/o Tx Long Frame Counter */
2437 XM_TXE_BURST = 0x029c, /* 32 bit r/o Tx Burst Event Counter */
2438 XM_TXF_MPAUSE = 0x02a0, /* 32 bit r/o Tx Pause MAC Ctrl Frame Cnt */
2439 XM_TXF_MCTRL = 0x02a4, /* 32 bit r/o Tx MAC Ctrl Frame Counter */
2440 XM_TXF_SNG_COL = 0x02a8, /* 32 bit r/o Tx Single Collision Counter */
2441 XM_TXF_MUL_COL = 0x02ac, /* 32 bit r/o Tx Multiple Collision Counter */
2442 XM_TXF_ABO_COL = 0x02b0, /* 32 bit r/o Tx aborted due to Exces. Col. */
2443 XM_TXF_LAT_COL = 0x02b4, /* 32 bit r/o Tx Late Collision Counter */
2444 XM_TXF_DEF = 0x02b8, /* 32 bit r/o Tx Deferred Frame Counter */
2445 XM_TXF_EX_DEF = 0x02bc, /* 32 bit r/o Tx Excessive Deferall Counter */
2446 XM_TXE_FIFO_UR = 0x02c0, /* 32 bit r/o Tx FIFO Underrun Event Cnt */
2447 XM_TXE_CS_ERR = 0x02c4, /* 32 bit r/o Tx Carrier Sense Error Cnt */
2448 XM_TXP_UTIL = 0x02c8, /* 32 bit r/o Tx Utilization in % */
2449 XM_TXF_64B = 0x02d0, /* 32 bit r/o 64 Byte Tx Frame Counter */
2450 XM_TXF_127B = 0x02d4, /* 32 bit r/o 65-127 Byte Tx Frame Counter */
2451 XM_TXF_255B = 0x02d8, /* 32 bit r/o 128-255 Byte Tx Frame Counter */
2452 XM_TXF_511B = 0x02dc, /* 32 bit r/o 256-511 Byte Tx Frame Counter */
2453 XM_TXF_1023B = 0x02e0, /* 32 bit r/o 512-1023 Byte Tx Frame Counter*/
2454 XM_TXF_MAX_SZ = 0x02e4, /* 32 bit r/o 1024-MaxSize Byte Tx Frame Cnt*/
2455 XM_RXF_OK = 0x0300, /* 32 bit r/o Frames Received OK */
2456 XM_RXO_OK_HI = 0x0304, /* 32 bit r/o Octets Received OK High Cnt */
2457 XM_RXO_OK_LO = 0x0308, /* 32 bit r/o Octets Received OK Low Counter*/
2458 XM_RXF_BC_OK = 0x030c, /* 32 bit r/o Broadcast Frames Received OK */
2459 XM_RXF_MC_OK = 0x0310, /* 32 bit r/o Multicast Frames Received OK */
2460 XM_RXF_UC_OK = 0x0314, /* 32 bit r/o Unicast Frames Received OK */
2461 XM_RXF_MPAUSE = 0x0318, /* 32 bit r/o Rx Pause MAC Ctrl Frame Cnt */
2462 XM_RXF_MCTRL = 0x031c, /* 32 bit r/o Rx MAC Ctrl Frame Counter */
2463 XM_RXF_INV_MP = 0x0320, /* 32 bit r/o Rx invalid Pause Frame Cnt */
2464 XM_RXF_INV_MOC = 0x0324, /* 32 bit r/o Rx Frames with inv. MAC Opcode*/
2465 XM_RXE_BURST = 0x0328, /* 32 bit r/o Rx Burst Event Counter */
2466 XM_RXE_FMISS = 0x032c, /* 32 bit r/o Rx Missed Frames Event Cnt */
2467 XM_RXF_FRA_ERR = 0x0330, /* 32 bit r/o Rx Framing Error Counter */
2468 XM_RXE_FIFO_OV = 0x0334, /* 32 bit r/o Rx FIFO overflow Event Cnt */
2469 XM_RXF_JAB_PKT = 0x0338, /* 32 bit r/o Rx Jabber Packet Frame Cnt */
2470 XM_RXE_CAR_ERR = 0x033c, /* 32 bit r/o Rx Carrier Event Error Cnt */
2471 XM_RXF_LEN_ERR = 0x0340, /* 32 bit r/o Rx in Range Length Error */
2472 XM_RXE_SYM_ERR = 0x0344, /* 32 bit r/o Rx Symbol Error Counter */
2473 XM_RXE_SHT_ERR = 0x0348, /* 32 bit r/o Rx Short Event Error Cnt */
2474 XM_RXE_RUNT = 0x034c, /* 32 bit r/o Rx Runt Event Counter */
2475 XM_RXF_LNG_ERR = 0x0350, /* 32 bit r/o Rx Frame too Long Error Cnt */
2476 XM_RXF_FCS_ERR = 0x0354, /* 32 bit r/o Rx Frame Check Seq. Error Cnt */
2477 XM_RXF_CEX_ERR = 0x035c, /* 32 bit r/o Rx Carrier Ext Error Frame Cnt*/
2478 XM_RXP_UTIL = 0x0360, /* 32 bit r/o Rx Utilization in % */
2479 XM_RXF_64B = 0x0368, /* 32 bit r/o 64 Byte Rx Frame Counter */
2480 XM_RXF_127B = 0x036c, /* 32 bit r/o 65-127 Byte Rx Frame Counter */
2481 XM_RXF_255B = 0x0370, /* 32 bit r/o 128-255 Byte Rx Frame Counter */
2482 XM_RXF_511B = 0x0374, /* 32 bit r/o 256-511 Byte Rx Frame Counter */
2483 XM_RXF_1023B = 0x0378, /* 32 bit r/o 512-1023 Byte Rx Frame Counter*/
2484 XM_RXF_MAX_SZ = 0x037c, /* 32 bit r/o 1024-MaxSize Byte Rx Frame Cnt*/
2485};
2486
2487/* XM_MMU_CMD 16 bit r/w MMU Command Register */
2488enum {
2489 XM_MMU_PHY_RDY = 1<<12,/* Bit 12: PHY Read Ready */
2490 XM_MMU_PHY_BUSY = 1<<11,/* Bit 11: PHY Busy */
2491 XM_MMU_IGN_PF = 1<<10,/* Bit 10: Ignore Pause Frame */
2492 XM_MMU_MAC_LB = 1<<9, /* Bit 9: Enable MAC Loopback */
2493 XM_MMU_FRC_COL = 1<<7, /* Bit 7: Force Collision */
2494 XM_MMU_SIM_COL = 1<<6, /* Bit 6: Simulate Collision */
2495 XM_MMU_NO_PRE = 1<<5, /* Bit 5: No MDIO Preamble */
2496 XM_MMU_GMII_FD = 1<<4, /* Bit 4: GMII uses Full Duplex */
2497 XM_MMU_RAT_CTRL = 1<<3, /* Bit 3: Enable Rate Control */
2498 XM_MMU_GMII_LOOP= 1<<2, /* Bit 2: PHY is in Loopback Mode */
2499 XM_MMU_ENA_RX = 1<<1, /* Bit 1: Enable Receiver */
2500 XM_MMU_ENA_TX = 1<<0, /* Bit 0: Enable Transmitter */
2501};
2502
2503
2504/* XM_TX_CMD 16 bit r/w Transmit Command Register */
2505enum {
2506 XM_TX_BK2BK = 1<<6, /* Bit 6: Ignor Carrier Sense (Tx Bk2Bk)*/
2507 XM_TX_ENC_BYP = 1<<5, /* Bit 5: Set Encoder in Bypass Mode */
2508 XM_TX_SAM_LINE = 1<<4, /* Bit 4: (sc) Start utilization calculation */
2509 XM_TX_NO_GIG_MD = 1<<3, /* Bit 3: Disable Carrier Extension */
2510 XM_TX_NO_PRE = 1<<2, /* Bit 2: Disable Preamble Generation */
2511 XM_TX_NO_CRC = 1<<1, /* Bit 1: Disable CRC Generation */
2512 XM_TX_AUTO_PAD = 1<<0, /* Bit 0: Enable Automatic Padding */
2513};
2514
2515/* XM_TX_RT_LIM 16 bit r/w Transmit Retry Limit Register */
2516#define XM_RT_LIM_MSK 0x1f /* Bit 4..0: Tx Retry Limit */
2517
2518
2519/* XM_TX_STIME 16 bit r/w Transmit Slottime Register */
2520#define XM_STIME_MSK 0x7f /* Bit 6..0: Tx Slottime bits */
2521
2522
2523/* XM_TX_IPG 16 bit r/w Transmit Inter Packet Gap */
2524#define XM_IPG_MSK 0xff /* Bit 7..0: IPG value bits */
2525
2526
2527/* XM_RX_CMD 16 bit r/w Receive Command Register */
2528enum {
2529 XM_RX_LENERR_OK = 1<<8, /* Bit 8 don't set Rx Err bit for */
2530 /* inrange error packets */
2531 XM_RX_BIG_PK_OK = 1<<7, /* Bit 7 don't set Rx Err bit for */
2532 /* jumbo packets */
2533 XM_RX_IPG_CAP = 1<<6, /* Bit 6 repl. type field with IPG */
2534 XM_RX_TP_MD = 1<<5, /* Bit 5: Enable transparent Mode */
2535 XM_RX_STRIP_FCS = 1<<4, /* Bit 4: Enable FCS Stripping */
2536 XM_RX_SELF_RX = 1<<3, /* Bit 3: Enable Rx of own packets */
2537 XM_RX_SAM_LINE = 1<<2, /* Bit 2: (sc) Start utilization calculation */
2538 XM_RX_STRIP_PAD = 1<<1, /* Bit 1: Strip pad bytes of Rx frames */
2539 XM_RX_DIS_CEXT = 1<<0, /* Bit 0: Disable carrier ext. check */
2540};
2541
2542
2543/* XM_PHY_ADDR 16 bit r/w PHY Address Register */
2544#define XM_PHY_ADDR_SZ 0x1f /* Bit 4..0: PHY Address bits */
2545
2546
2547/* XM_GP_PORT 32 bit r/w General Purpose Port Register */
2548enum {
2549 XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */
2550 XM_GP_FRC_INT = 1<<5, /* Bit 5: (sc) Force Interrupt */
2551 XM_GP_RES_MAC = 1<<3, /* Bit 3: (sc) Reset MAC and FIFOs */
2552 XM_GP_RES_STAT = 1<<2, /* Bit 2: (sc) Reset the statistics module */
2553 XM_GP_INP_ASS = 1<<0, /* Bit 0: (ro) GP Input Pin asserted */
2554};
2555
2556
2557/* XM_IMSK 16 bit r/w Interrupt Mask Register */
2558/* XM_ISRC 16 bit r/o Interrupt Status Register */
2559enum {
2560 XM_IS_LNK_AE = 1<<14, /* Bit 14: Link Asynchronous Event */
2561 XM_IS_TX_ABORT = 1<<13, /* Bit 13: Transmit Abort, late Col. etc */
2562 XM_IS_FRC_INT = 1<<12, /* Bit 12: Force INT bit set in GP */
2563 XM_IS_INP_ASS = 1<<11, /* Bit 11: Input Asserted, GP bit 0 set */
2564 XM_IS_LIPA_RC = 1<<10, /* Bit 10: Link Partner requests config */
2565 XM_IS_RX_PAGE = 1<<9, /* Bit 9: Page Received */
2566 XM_IS_TX_PAGE = 1<<8, /* Bit 8: Next Page Loaded for Transmit */
2567 XM_IS_AND = 1<<7, /* Bit 7: Auto-Negotiation Done */
2568 XM_IS_TSC_OV = 1<<6, /* Bit 6: Time Stamp Counter Overflow */
2569 XM_IS_RXC_OV = 1<<5, /* Bit 5: Rx Counter Event Overflow */
2570 XM_IS_TXC_OV = 1<<4, /* Bit 4: Tx Counter Event Overflow */
2571 XM_IS_RXF_OV = 1<<3, /* Bit 3: Receive FIFO Overflow */
2572 XM_IS_TXF_UR = 1<<2, /* Bit 2: Transmit FIFO Underrun */
2573 XM_IS_TX_COMP = 1<<1, /* Bit 1: Frame Tx Complete */
2574 XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */
2575};
2576
2577#define XM_DEF_MSK (~(XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE | \
2578 XM_IS_AND | XM_IS_RXC_OV | XM_IS_TXC_OV | \
2579 XM_IS_RXF_OV | XM_IS_TXF_UR))
2580
2581
2582/* XM_HW_CFG 16 bit r/w Hardware Config Register */
2583enum {
2584 XM_HW_GEN_EOP = 1<<3, /* Bit 3: generate End of Packet pulse */
2585 XM_HW_COM4SIG = 1<<2, /* Bit 2: use Comma Detect for Sig. Det.*/
2586 XM_HW_GMII_MD = 1<<0, /* Bit 0: GMII Interface selected */
2587};
2588
2589
2590/* XM_TX_LO_WM 16 bit r/w Tx FIFO Low Water Mark */
2591/* XM_TX_HI_WM 16 bit r/w Tx FIFO High Water Mark */
2592#define XM_TX_WM_MSK 0x01ff /* Bit 9.. 0 Tx FIFO Watermark bits */
2593
2594/* XM_TX_THR 16 bit r/w Tx Request Threshold */
2595/* XM_HT_THR 16 bit r/w Host Request Threshold */
2596/* XM_RX_THR 16 bit r/w Rx Request Threshold */
2597#define XM_THR_MSK 0x03ff /* Bit 10.. 0 Rx/Tx Request Threshold bits */
2598
2599
2600/* XM_TX_STAT 32 bit r/o Tx Status LIFO Register */
2601enum {
2602 XM_ST_VALID = (1UL<<31), /* Bit 31: Status Valid */
2603 XM_ST_BYTE_CNT = (0x3fffL<<17), /* Bit 30..17: Tx frame Length */
2604 XM_ST_RETRY_CNT = (0x1fL<<12), /* Bit 16..12: Retry Count */
2605 XM_ST_EX_COL = 1<<11, /* Bit 11: Excessive Collisions */
2606 XM_ST_EX_DEF = 1<<10, /* Bit 10: Excessive Deferral */
2607 XM_ST_BURST = 1<<9, /* Bit 9: p. xmitted in burst md*/
2608 XM_ST_DEFER = 1<<8, /* Bit 8: packet was defered */
2609 XM_ST_BC = 1<<7, /* Bit 7: Broadcast packet */
2610 XM_ST_MC = 1<<6, /* Bit 6: Multicast packet */
2611 XM_ST_UC = 1<<5, /* Bit 5: Unicast packet */
2612 XM_ST_TX_UR = 1<<4, /* Bit 4: FIFO Underrun occured */
2613 XM_ST_CS_ERR = 1<<3, /* Bit 3: Carrier Sense Error */
2614 XM_ST_LAT_COL = 1<<2, /* Bit 2: Late Collision Error */
2615 XM_ST_MUL_COL = 1<<1, /* Bit 1: Multiple Collisions */
2616 XM_ST_SGN_COL = 1<<0, /* Bit 0: Single Collision */
2617};
2618
2619/* XM_RX_LO_WM 16 bit r/w Receive Low Water Mark */
2620/* XM_RX_HI_WM 16 bit r/w Receive High Water Mark */
2621#define XM_RX_WM_MSK 0x03ff /* Bit 11.. 0: Rx FIFO Watermark bits */
2622
2623
2624/* XM_DEV_ID 32 bit r/o Device ID Register */
2625#define XM_DEV_OUI (0x00ffffffUL<<8) /* Bit 31..8: Device OUI */
2626#define XM_DEV_REV (0x07L << 5) /* Bit 7..5: Chip Rev Num */
2627
2628
2629/* XM_MODE 32 bit r/w Mode Register */
2630enum {
2631 XM_MD_ENA_REJ = 1<<26, /* Bit 26: Enable Frame Reject */
2632 XM_MD_SPOE_E = 1<<25, /* Bit 25: Send Pause on Edge */
2633 /* extern generated */
2634 XM_MD_TX_REP = 1<<24, /* Bit 24: Transmit Repeater Mode */
2635 XM_MD_SPOFF_I = 1<<23, /* Bit 23: Send Pause on FIFO full */
2636 /* intern generated */
2637 XM_MD_LE_STW = 1<<22, /* Bit 22: Rx Stat Word in Little Endian */
2638 XM_MD_TX_CONT = 1<<21, /* Bit 21: Send Continuous */
2639 XM_MD_TX_PAUSE = 1<<20, /* Bit 20: (sc) Send Pause Frame */
2640 XM_MD_ATS = 1<<19, /* Bit 19: Append Time Stamp */
2641 XM_MD_SPOL_I = 1<<18, /* Bit 18: Send Pause on Low */
2642 /* intern generated */
2643 XM_MD_SPOH_I = 1<<17, /* Bit 17: Send Pause on High */
2644 /* intern generated */
2645 XM_MD_CAP = 1<<16, /* Bit 16: Check Address Pair */
2646 XM_MD_ENA_HASH = 1<<15, /* Bit 15: Enable Hashing */
2647 XM_MD_CSA = 1<<14, /* Bit 14: Check Station Address */
2648 XM_MD_CAA = 1<<13, /* Bit 13: Check Address Array */
2649 XM_MD_RX_MCTRL = 1<<12, /* Bit 12: Rx MAC Control Frame */
2650 XM_MD_RX_RUNT = 1<<11, /* Bit 11: Rx Runt Frames */
2651 XM_MD_RX_IRLE = 1<<10, /* Bit 10: Rx in Range Len Err Frame */
2652 XM_MD_RX_LONG = 1<<9, /* Bit 9: Rx Long Frame */
2653 XM_MD_RX_CRCE = 1<<8, /* Bit 8: Rx CRC Error Frame */
2654 XM_MD_RX_ERR = 1<<7, /* Bit 7: Rx Error Frame */
2655 XM_MD_DIS_UC = 1<<6, /* Bit 6: Disable Rx Unicast */
2656 XM_MD_DIS_MC = 1<<5, /* Bit 5: Disable Rx Multicast */
2657 XM_MD_DIS_BC = 1<<4, /* Bit 4: Disable Rx Broadcast */
2658 XM_MD_ENA_PROM = 1<<3, /* Bit 3: Enable Promiscuous */
2659 XM_MD_ENA_BE = 1<<2, /* Bit 2: Enable Big Endian */
2660 XM_MD_FTF = 1<<1, /* Bit 1: (sc) Flush Tx FIFO */
2661 XM_MD_FRF = 1<<0, /* Bit 0: (sc) Flush Rx FIFO */
2662};
2663
2664#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I)
2665#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\
2666 XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA | XM_MD_CAA)
2667
2668/* XM_STAT_CMD 16 bit r/w Statistics Command Register */
2669enum {
2670 XM_SC_SNP_RXC = 1<<5, /* Bit 5: (sc) Snap Rx Counters */
2671 XM_SC_SNP_TXC = 1<<4, /* Bit 4: (sc) Snap Tx Counters */
2672 XM_SC_CP_RXC = 1<<3, /* Bit 3: Copy Rx Counters Continuously */
2673 XM_SC_CP_TXC = 1<<2, /* Bit 2: Copy Tx Counters Continuously */
2674 XM_SC_CLR_RXC = 1<<1, /* Bit 1: (sc) Clear Rx Counters */
2675 XM_SC_CLR_TXC = 1<<0, /* Bit 0: (sc) Clear Tx Counters */
2676};
2677
2678
2679/* XM_RX_CNT_EV 32 bit r/o Rx Counter Event Register */
2680/* XM_RX_EV_MSK 32 bit r/w Rx Counter Event Mask */
2681enum {
2682 XMR_MAX_SZ_OV = 1<<31, /* Bit 31: 1024-MaxSize Rx Cnt Ov*/
2683 XMR_1023B_OV = 1<<30, /* Bit 30: 512-1023Byte Rx Cnt Ov*/
2684 XMR_511B_OV = 1<<29, /* Bit 29: 256-511 Byte Rx Cnt Ov*/
2685 XMR_255B_OV = 1<<28, /* Bit 28: 128-255 Byte Rx Cnt Ov*/
2686 XMR_127B_OV = 1<<27, /* Bit 27: 65-127 Byte Rx Cnt Ov */
2687 XMR_64B_OV = 1<<26, /* Bit 26: 64 Byte Rx Cnt Ov */
2688 XMR_UTIL_OV = 1<<25, /* Bit 25: Rx Util Cnt Overflow */
2689 XMR_UTIL_UR = 1<<24, /* Bit 24: Rx Util Cnt Underrun */
2690 XMR_CEX_ERR_OV = 1<<23, /* Bit 23: CEXT Err Cnt Ov */
2691 XMR_FCS_ERR_OV = 1<<21, /* Bit 21: Rx FCS Error Cnt Ov */
2692 XMR_LNG_ERR_OV = 1<<20, /* Bit 20: Rx too Long Err Cnt Ov*/
2693 XMR_RUNT_OV = 1<<19, /* Bit 19: Runt Event Cnt Ov */
2694 XMR_SHT_ERR_OV = 1<<18, /* Bit 18: Rx Short Ev Err Cnt Ov*/
2695 XMR_SYM_ERR_OV = 1<<17, /* Bit 17: Rx Sym Err Cnt Ov */
2696 XMR_CAR_ERR_OV = 1<<15, /* Bit 15: Rx Carr Ev Err Cnt Ov */
2697 XMR_JAB_PKT_OV = 1<<14, /* Bit 14: Rx Jabb Packet Cnt Ov */
2698 XMR_FIFO_OV = 1<<13, /* Bit 13: Rx FIFO Ov Ev Cnt Ov */
2699 XMR_FRA_ERR_OV = 1<<12, /* Bit 12: Rx Framing Err Cnt Ov */
2700 XMR_FMISS_OV = 1<<11, /* Bit 11: Rx Missed Ev Cnt Ov */
2701 XMR_BURST = 1<<10, /* Bit 10: Rx Burst Event Cnt Ov */
2702 XMR_INV_MOC = 1<<9, /* Bit 9: Rx with inv. MAC OC Ov*/
2703 XMR_INV_MP = 1<<8, /* Bit 8: Rx inv Pause Frame Ov */
2704 XMR_MCTRL_OV = 1<<7, /* Bit 7: Rx MAC Ctrl-F Cnt Ov */
2705 XMR_MPAUSE_OV = 1<<6, /* Bit 6: Rx Pause MAC Ctrl-F Ov*/
2706 XMR_UC_OK_OV = 1<<5, /* Bit 5: Rx Unicast Frame CntOv*/
2707 XMR_MC_OK_OV = 1<<4, /* Bit 4: Rx Multicast Cnt Ov */
2708 XMR_BC_OK_OV = 1<<3, /* Bit 3: Rx Broadcast Cnt Ov */
2709 XMR_OK_LO_OV = 1<<2, /* Bit 2: Octets Rx OK Low CntOv*/
2710 XMR_OK_HI_OV = 1<<1, /* Bit 1: Octets Rx OK Hi Cnt Ov*/
2711 XMR_OK_OV = 1<<0, /* Bit 0: Frames Received Ok Ov */
2712};
2713
2714#define XMR_DEF_MSK (XMR_OK_LO_OV | XMR_OK_HI_OV)
2715
2716/* XM_TX_CNT_EV 32 bit r/o Tx Counter Event Register */
2717/* XM_TX_EV_MSK 32 bit r/w Tx Counter Event Mask */
2718enum {
2719 XMT_MAX_SZ_OV = 1<<25, /* Bit 25: 1024-MaxSize Tx Cnt Ov*/
2720 XMT_1023B_OV = 1<<24, /* Bit 24: 512-1023Byte Tx Cnt Ov*/
2721 XMT_511B_OV = 1<<23, /* Bit 23: 256-511 Byte Tx Cnt Ov*/
2722 XMT_255B_OV = 1<<22, /* Bit 22: 128-255 Byte Tx Cnt Ov*/
2723 XMT_127B_OV = 1<<21, /* Bit 21: 65-127 Byte Tx Cnt Ov */
2724 XMT_64B_OV = 1<<20, /* Bit 20: 64 Byte Tx Cnt Ov */
2725 XMT_UTIL_OV = 1<<19, /* Bit 19: Tx Util Cnt Overflow */
2726 XMT_UTIL_UR = 1<<18, /* Bit 18: Tx Util Cnt Underrun */
2727 XMT_CS_ERR_OV = 1<<17, /* Bit 17: Tx Carr Sen Err Cnt Ov*/
2728 XMT_FIFO_UR_OV = 1<<16, /* Bit 16: Tx FIFO Ur Ev Cnt Ov */
2729 XMT_EX_DEF_OV = 1<<15, /* Bit 15: Tx Ex Deferall Cnt Ov */
2730 XMT_DEF = 1<<14, /* Bit 14: Tx Deferred Cnt Ov */
2731 XMT_LAT_COL_OV = 1<<13, /* Bit 13: Tx Late Col Cnt Ov */
2732 XMT_ABO_COL_OV = 1<<12, /* Bit 12: Tx abo dueto Ex Col Ov*/
2733 XMT_MUL_COL_OV = 1<<11, /* Bit 11: Tx Mult Col Cnt Ov */
2734 XMT_SNG_COL = 1<<10, /* Bit 10: Tx Single Col Cnt Ov */
2735 XMT_MCTRL_OV = 1<<9, /* Bit 9: Tx MAC Ctrl Counter Ov*/
2736 XMT_MPAUSE = 1<<8, /* Bit 8: Tx Pause MAC Ctrl-F Ov*/
2737 XMT_BURST = 1<<7, /* Bit 7: Tx Burst Event Cnt Ov */
2738 XMT_LONG = 1<<6, /* Bit 6: Tx Long Frame Cnt Ov */
2739 XMT_UC_OK_OV = 1<<5, /* Bit 5: Tx Unicast Cnt Ov */
2740 XMT_MC_OK_OV = 1<<4, /* Bit 4: Tx Multicast Cnt Ov */
2741 XMT_BC_OK_OV = 1<<3, /* Bit 3: Tx Broadcast Cnt Ov */
2742 XMT_OK_LO_OV = 1<<2, /* Bit 2: Octets Tx OK Low CntOv*/
2743 XMT_OK_HI_OV = 1<<1, /* Bit 1: Octets Tx OK Hi Cnt Ov*/
2744 XMT_OK_OV = 1<<0, /* Bit 0: Frames Tx Ok Ov */
2745};
2746
2747#define XMT_DEF_MSK (XMT_OK_LO_OV | XMT_OK_HI_OV)
2748
2749struct skge_rx_desc {
2750 u32 control;
2751 u32 next_offset;
2752 u32 dma_lo;
2753 u32 dma_hi;
2754 u32 status;
2755 u32 timestamp;
2756 u16 csum2;
2757 u16 csum1;
2758 u16 csum2_start;
2759 u16 csum1_start;
2760};
2761
2762struct skge_tx_desc {
2763 u32 control;
2764 u32 next_offset;
2765 u32 dma_lo;
2766 u32 dma_hi;
2767 u32 status;
2768 u32 csum_offs;
2769 u16 csum_write;
2770 u16 csum_start;
2771 u32 rsvd;
2772};
2773
2774struct skge_element {
2775 struct skge_element *next;
2776 void *desc;
2777 struct sk_buff *skb;
2778 DECLARE_PCI_UNMAP_ADDR(mapaddr);
2779 DECLARE_PCI_UNMAP_LEN(maplen);
2780};
2781
2782struct skge_ring {
2783 struct skge_element *to_clean;
2784 struct skge_element *to_use;
2785 struct skge_element *start;
2786 unsigned long count;
2787};
2788
2789
2790struct skge_hw {
2791 void __iomem *regs;
2792 struct pci_dev *pdev;
2793 u32 intr_mask;
2794 struct net_device *dev[2];
2795
2796 u8 mac_cfg;
2797 u8 chip_id;
2798 u8 phy_type;
2799 u8 pmd_type;
2800 u16 phy_addr;
2801
2802 u32 ram_size;
2803 u32 ram_offset;
2804
2805 struct tasklet_struct ext_tasklet;
2806 spinlock_t phy_lock;
2807};
2808
2809static inline int isdualport(const struct skge_hw *hw)
2810{
2811 return !(hw->mac_cfg & CFG_SNG_MAC);
2812}
2813
2814static inline u8 chip_rev(const struct skge_hw *hw)
2815{
2816 return (hw->mac_cfg & CFG_CHIP_R_MSK) >> 4;
2817}
2818
2819static inline int iscopper(const struct skge_hw *hw)
2820{
2821 return (hw->pmd_type == 'T');
2822}
2823
2824enum {
2825 FLOW_MODE_NONE = 0, /* No Flow-Control */
2826 FLOW_MODE_LOC_SEND = 1, /* Local station sends PAUSE */
2827 FLOW_MODE_REM_SEND = 2, /* Symmetric or just remote */
2828 FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */
2829};
2830
2831struct skge_port {
2832 u32 msg_enable;
2833 struct skge_hw *hw;
2834 struct net_device *netdev;
2835 int port;
2836
2837 spinlock_t tx_lock;
2838 u32 tx_avail;
2839 struct skge_ring tx_ring;
2840 struct skge_ring rx_ring;
2841
2842 struct net_device_stats net_stats;
2843
2844 u8 rx_csum;
2845 u8 blink_on;
2846 u8 flow_control;
2847 u8 wol;
2848 u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */
2849 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
2850 u16 speed; /* SPEED_1000, SPEED_100, ... */
2851 u32 advertising;
2852
2853 void *mem; /* PCI memory for rings */
2854 dma_addr_t dma;
2855 unsigned long mem_size;
2856
2857 struct timer_list link_check;
2858 struct timer_list led_blink;
2859};
2860
2861
2862/* Register accessor for memory mapped device */
2863static inline u32 skge_read32(const struct skge_hw *hw, int reg)
2864{
2865 return readl(hw->regs + reg);
2866
2867}
2868
2869static inline u16 skge_read16(const struct skge_hw *hw, int reg)
2870{
2871 return readw(hw->regs + reg);
2872}
2873
2874static inline u8 skge_read8(const struct skge_hw *hw, int reg)
2875{
2876 return readb(hw->regs + reg);
2877}
2878
2879static inline void skge_write32(const struct skge_hw *hw, int reg, u32 val)
2880{
2881 writel(val, hw->regs + reg);
2882}
2883
2884static inline void skge_write16(const struct skge_hw *hw, int reg, u16 val)
2885{
2886 writew(val, hw->regs + reg);
2887}
2888
2889static inline void skge_write8(const struct skge_hw *hw, int reg, u8 val)
2890{
2891 writeb(val, hw->regs + reg);
2892}
2893
2894/* MAC Related Registers inside the device. */
2895#define SKGEMAC_REG(port,reg) (((port)<<7)+(reg))
2896
2897/* PCI config space can be accessed via memory mapped space */
2898#define SKGEPCI_REG(reg) ((reg)+ 0x380)
2899
2900#define SKGEXM_REG(port, reg) \
2901 ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1)
2902
2903static inline u32 skge_xm_read32(const struct skge_hw *hw, int port, int reg)
2904{
2905 return skge_read32(hw, SKGEXM_REG(port,reg));
2906}
2907
2908static inline u16 skge_xm_read16(const struct skge_hw *hw, int port, int reg)
2909{
2910 return skge_read16(hw, SKGEXM_REG(port,reg));
2911}
2912
2913static inline u8 skge_xm_read8(const struct skge_hw *hw, int port, int reg)
2914{
2915 return skge_read8(hw, SKGEXM_REG(port,reg));
2916}
2917
2918static inline void skge_xm_write32(const struct skge_hw *hw, int port, int r, u32 v)
2919{
2920 skge_write32(hw, SKGEXM_REG(port,r), v);
2921}
2922
2923static inline void skge_xm_write16(const struct skge_hw *hw, int port, int r, u16 v)
2924{
2925 skge_write16(hw, SKGEXM_REG(port,r), v);
2926}
2927
2928static inline void skge_xm_write8(const struct skge_hw *hw, int port, int r, u8 v)
2929{
2930 skge_write8(hw, SKGEXM_REG(port,r), v);
2931}
2932
2933static inline void skge_xm_outhash(const struct skge_hw *hw, int port, int reg,
2934 const u8 *hash)
2935{
2936 skge_xm_write16(hw, port, reg,
2937 (u16)hash[0] | ((u16)hash[1] << 8));
2938 skge_xm_write16(hw, port, reg+2,
2939 (u16)hash[2] | ((u16)hash[3] << 8));
2940 skge_xm_write16(hw, port, reg+4,
2941 (u16)hash[4] | ((u16)hash[5] << 8));
2942 skge_xm_write16(hw, port, reg+6,
2943 (u16)hash[6] | ((u16)hash[7] << 8));
2944}
2945
2946static inline void skge_xm_outaddr(const struct skge_hw *hw, int port, int reg,
2947 const u8 *addr)
2948{
2949 skge_xm_write16(hw, port, reg,
2950 (u16)addr[0] | ((u16)addr[1] << 8));
2951 skge_xm_write16(hw, port, reg,
2952 (u16)addr[2] | ((u16)addr[3] << 8));
2953 skge_xm_write16(hw, port, reg,
2954 (u16)addr[4] | ((u16)addr[5] << 8));
2955}
2956
2957
2958#define SKGEGMA_REG(port,reg) \
2959 ((reg) + BASE_GMAC_1 + \
2960 (port) * (BASE_GMAC_2-BASE_GMAC_1))
2961
2962static inline u16 skge_gma_read16(const struct skge_hw *hw, int port, int reg)
2963{
2964 return skge_read16(hw, SKGEGMA_REG(port,reg));
2965}
2966
2967static inline u32 skge_gma_read32(const struct skge_hw *hw, int port, int reg)
2968{
2969 return (u32) skge_read16(hw, SKGEGMA_REG(port,reg))
2970 | ((u32)skge_read16(hw, SKGEGMA_REG(port,reg+4)) << 16);
2971}
2972
2973static inline u8 skge_gma_read8(const struct skge_hw *hw, int port, int reg)
2974{
2975 return skge_read8(hw, SKGEGMA_REG(port,reg));
2976}
2977
2978static inline void skge_gma_write16(const struct skge_hw *hw, int port, int r, u16 v)
2979{
2980 skge_write16(hw, SKGEGMA_REG(port,r), v);
2981}
2982
2983static inline void skge_gma_write32(const struct skge_hw *hw, int port, int r, u32 v)
2984{
2985 skge_write16(hw, SKGEGMA_REG(port, r), (u16) v);
2986 skge_write32(hw, SKGEGMA_REG(port, r+4), (u16)(v >> 16));
2987}
2988
2989static inline void skge_gma_write8(const struct skge_hw *hw, int port, int r, u8 v)
2990{
2991 skge_write8(hw, SKGEGMA_REG(port,r), v);
2992}
2993
2994static inline void skge_gm_set_addr(struct skge_hw *hw, int port, int reg,
2995 const u8 *addr)
2996{
2997 skge_gma_write16(hw, port, reg,
2998 (u16) addr[0] | ((u16) addr[1] << 8));
2999 skge_gma_write16(hw, port, reg+4,
3000 (u16) addr[2] | ((u16) addr[3] << 8));
3001 skge_gma_write16(hw, port, reg+8,
3002 (u16) addr[4] | ((u16) addr[5] << 8));
3003}
3004
3005#endif
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 5e561ba44333..fd80048f7f7a 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -129,7 +129,7 @@ MODULE_PARM_DESC(nowait, "set to 1 for no wait state");
129/* 129/*
130 * Transmit timeout, default 5 seconds. 130 * Transmit timeout, default 5 seconds.
131 */ 131 */
132static int watchdog = 5000; 132static int watchdog = 1000;
133module_param(watchdog, int, 0400); 133module_param(watchdog, int, 0400);
134MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); 134MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
135 135
@@ -660,15 +660,14 @@ static void smc_hardware_send_pkt(unsigned long data)
660 SMC_outw(((len & 1) ? (0x2000 | buf[len-1]) : 0), ioaddr, DATA_REG); 660 SMC_outw(((len & 1) ? (0x2000 | buf[len-1]) : 0), ioaddr, DATA_REG);
661 661
662 /* 662 /*
663 * If THROTTLE_TX_PKTS is set, we look at the TX_EMPTY flag 663 * If THROTTLE_TX_PKTS is set, we stop the queue here. This will
664 * before queueing this packet for TX, and if it's clear then 664 * have the effect of having at most one packet queued for TX
665 * we stop the queue here. This will have the effect of 665 * in the chip's memory at all time.
666 * having at most 2 packets queued for TX in the chip's memory 666 *
667 * at all time. If THROTTLE_TX_PKTS is not set then the queue 667 * If THROTTLE_TX_PKTS is not set then the queue is stopped only
668 * is stopped only when memory allocation (MC_ALLOC) does not 668 * when memory allocation (MC_ALLOC) does not succeed right away.
669 * succeed right away.
670 */ 669 */
671 if (THROTTLE_TX_PKTS && !(SMC_GET_INT() & IM_TX_EMPTY_INT)) 670 if (THROTTLE_TX_PKTS)
672 netif_stop_queue(dev); 671 netif_stop_queue(dev);
673 672
674 /* queue the packet for TX */ 673 /* queue the packet for TX */
@@ -792,17 +791,20 @@ static void smc_tx(struct net_device *dev)
792 DBG(2, "%s: TX STATUS 0x%04x PNR 0x%02x\n", 791 DBG(2, "%s: TX STATUS 0x%04x PNR 0x%02x\n",
793 dev->name, tx_status, packet_no); 792 dev->name, tx_status, packet_no);
794 793
795 if (!(tx_status & TS_SUCCESS)) 794 if (!(tx_status & ES_TX_SUC))
796 lp->stats.tx_errors++; 795 lp->stats.tx_errors++;
797 if (tx_status & TS_LOSTCAR) 796
797 if (tx_status & ES_LOSTCARR)
798 lp->stats.tx_carrier_errors++; 798 lp->stats.tx_carrier_errors++;
799 799
800 if (tx_status & TS_LATCOL) { 800 if (tx_status & (ES_LATCOL | ES_16COL)) {
801 PRINTK("%s: late collision occurred on last xmit\n", dev->name); 801 PRINTK("%s: %s occurred on last xmit\n", dev->name,
802 (tx_status & ES_LATCOL) ?
803 "late collision" : "too many collisions");
802 lp->stats.tx_window_errors++; 804 lp->stats.tx_window_errors++;
803 if (!(lp->stats.tx_window_errors & 63) && net_ratelimit()) { 805 if (!(lp->stats.tx_window_errors & 63) && net_ratelimit()) {
804 printk(KERN_INFO "%s: unexpectedly large numbers of " 806 printk(KERN_INFO "%s: unexpectedly large number of "
805 "late collisions. Please check duplex " 807 "bad collisions. Please check duplex "
806 "setting.\n", dev->name); 808 "setting.\n", dev->name);
807 } 809 }
808 } 810 }
@@ -1236,7 +1238,7 @@ static void smc_10bt_check_media(struct net_device *dev, int init)
1236 old_carrier = netif_carrier_ok(dev) ? 1 : 0; 1238 old_carrier = netif_carrier_ok(dev) ? 1 : 0;
1237 1239
1238 SMC_SELECT_BANK(0); 1240 SMC_SELECT_BANK(0);
1239 new_carrier = SMC_inw(ioaddr, EPH_STATUS_REG) & ES_LINK_OK ? 1 : 0; 1241 new_carrier = (SMC_GET_EPH_STATUS() & ES_LINK_OK) ? 1 : 0;
1240 SMC_SELECT_BANK(2); 1242 SMC_SELECT_BANK(2);
1241 1243
1242 if (init || (old_carrier != new_carrier)) { 1244 if (init || (old_carrier != new_carrier)) {
@@ -1308,15 +1310,16 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1308 if (!status) 1310 if (!status)
1309 break; 1311 break;
1310 1312
1311 if (status & IM_RCV_INT) { 1313 if (status & IM_TX_INT) {
1312 DBG(3, "%s: RX irq\n", dev->name); 1314 /* do this before RX as it will free memory quickly */
1313 smc_rcv(dev);
1314 } else if (status & IM_TX_INT) {
1315 DBG(3, "%s: TX int\n", dev->name); 1315 DBG(3, "%s: TX int\n", dev->name);
1316 smc_tx(dev); 1316 smc_tx(dev);
1317 SMC_ACK_INT(IM_TX_INT); 1317 SMC_ACK_INT(IM_TX_INT);
1318 if (THROTTLE_TX_PKTS) 1318 if (THROTTLE_TX_PKTS)
1319 netif_wake_queue(dev); 1319 netif_wake_queue(dev);
1320 } else if (status & IM_RCV_INT) {
1321 DBG(3, "%s: RX irq\n", dev->name);
1322 smc_rcv(dev);
1320 } else if (status & IM_ALLOC_INT) { 1323 } else if (status & IM_ALLOC_INT) {
1321 DBG(3, "%s: Allocation irq\n", dev->name); 1324 DBG(3, "%s: Allocation irq\n", dev->name);
1322 tasklet_hi_schedule(&lp->tx_task); 1325 tasklet_hi_schedule(&lp->tx_task);
@@ -1337,7 +1340,10 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1337 /* multiple collisions */ 1340 /* multiple collisions */
1338 lp->stats.collisions += card_stats & 0xF; 1341 lp->stats.collisions += card_stats & 0xF;
1339 } else if (status & IM_RX_OVRN_INT) { 1342 } else if (status & IM_RX_OVRN_INT) {
1340 DBG(1, "%s: RX overrun\n", dev->name); 1343 DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name,
1344 ({ int eph_st; SMC_SELECT_BANK(0);
1345 eph_st = SMC_GET_EPH_STATUS();
1346 SMC_SELECT_BANK(2); eph_st; }) );
1341 SMC_ACK_INT(IM_RX_OVRN_INT); 1347 SMC_ACK_INT(IM_RX_OVRN_INT);
1342 lp->stats.rx_errors++; 1348 lp->stats.rx_errors++;
1343 lp->stats.rx_fifo_errors++; 1349 lp->stats.rx_fifo_errors++;
@@ -1389,7 +1395,7 @@ static void smc_timeout(struct net_device *dev)
1389{ 1395{
1390 struct smc_local *lp = netdev_priv(dev); 1396 struct smc_local *lp = netdev_priv(dev);
1391 void __iomem *ioaddr = lp->base; 1397 void __iomem *ioaddr = lp->base;
1392 int status, mask, meminfo, fifo; 1398 int status, mask, eph_st, meminfo, fifo;
1393 1399
1394 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1400 DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
1395 1401
@@ -1398,11 +1404,13 @@ static void smc_timeout(struct net_device *dev)
1398 mask = SMC_GET_INT_MASK(); 1404 mask = SMC_GET_INT_MASK();
1399 fifo = SMC_GET_FIFO(); 1405 fifo = SMC_GET_FIFO();
1400 SMC_SELECT_BANK(0); 1406 SMC_SELECT_BANK(0);
1407 eph_st = SMC_GET_EPH_STATUS();
1401 meminfo = SMC_GET_MIR(); 1408 meminfo = SMC_GET_MIR();
1402 SMC_SELECT_BANK(2); 1409 SMC_SELECT_BANK(2);
1403 spin_unlock_irq(&lp->lock); 1410 spin_unlock_irq(&lp->lock);
1404 PRINTK( "%s: INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n", 1411 PRINTK( "%s: TX timeout (INT 0x%02x INTMASK 0x%02x "
1405 dev->name, status, mask, meminfo, fifo ); 1412 "MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n",
1413 dev->name, status, mask, meminfo, fifo, eph_st );
1406 1414
1407 smc_reset(dev); 1415 smc_reset(dev);
1408 smc_enable(dev); 1416 smc_enable(dev);
@@ -1863,7 +1871,7 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr)
1863 SMC_SELECT_BANK(1); 1871 SMC_SELECT_BANK(1);
1864 val = SMC_GET_BASE(); 1872 val = SMC_GET_BASE();
1865 val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT; 1873 val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT;
1866 if (((unsigned long)ioaddr & ((PAGE_SIZE-1)<<SMC_IO_SHIFT)) != val) { /*XXX: WTF? */ 1874 if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
1867 printk("%s: IOADDR %p doesn't match configuration (%x).\n", 1875 printk("%s: IOADDR %p doesn't match configuration (%x).\n",
1868 CARDNAME, ioaddr, val); 1876 CARDNAME, ioaddr, val);
1869 } 1877 }
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index ddd2688e7d33..946528e6b742 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -151,7 +151,7 @@
151 151
152/* We actually can't write halfwords properly if not word aligned */ 152/* We actually can't write halfwords properly if not word aligned */
153static inline void 153static inline void
154SMC_outw(u16 val, unsigned long ioaddr, int reg) 154SMC_outw(u16 val, void __iomem *ioaddr, int reg)
155{ 155{
156 if (reg & 2) { 156 if (reg & 2) {
157 unsigned int v = val << 16; 157 unsigned int v = val << 16;
@@ -317,7 +317,7 @@ static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l)
317#define SMC_insl(a, r, p, l) \ 317#define SMC_insl(a, r, p, l) \
318 smc_pxa_dma_insl(a, lp->physaddr, r, dev->dma, p, l) 318 smc_pxa_dma_insl(a, lp->physaddr, r, dev->dma, p, l)
319static inline void 319static inline void
320smc_pxa_dma_insl(u_long ioaddr, u_long physaddr, int reg, int dma, 320smc_pxa_dma_insl(void __iomem *ioaddr, u_long physaddr, int reg, int dma,
321 u_char *buf, int len) 321 u_char *buf, int len)
322{ 322{
323 dma_addr_t dmabuf; 323 dma_addr_t dmabuf;
@@ -355,7 +355,7 @@ smc_pxa_dma_insl(u_long ioaddr, u_long physaddr, int reg, int dma,
355#define SMC_insw(a, r, p, l) \ 355#define SMC_insw(a, r, p, l) \
356 smc_pxa_dma_insw(a, lp->physaddr, r, dev->dma, p, l) 356 smc_pxa_dma_insw(a, lp->physaddr, r, dev->dma, p, l)
357static inline void 357static inline void
358smc_pxa_dma_insw(u_long ioaddr, u_long physaddr, int reg, int dma, 358smc_pxa_dma_insw(void __iomem *ioaddr, u_long physaddr, int reg, int dma,
359 u_char *buf, int len) 359 u_char *buf, int len)
360{ 360{
361 dma_addr_t dmabuf; 361 dma_addr_t dmabuf;
@@ -681,14 +681,6 @@ static const char * chip_ids[ 16 ] = {
681 681
682 682
683/* 683/*
684 . Transmit status bits
685*/
686#define TS_SUCCESS 0x0001
687#define TS_LOSTCAR 0x0400
688#define TS_LATCOL 0x0200
689#define TS_16COL 0x0010
690
691/*
692 . Receive status bits 684 . Receive status bits
693*/ 685*/
694#define RS_ALGNERR 0x8000 686#define RS_ALGNERR 0x8000
@@ -845,6 +837,7 @@ static const char * chip_ids[ 16 ] = {
845#define SMC_GET_FIFO() SMC_inw( ioaddr, FIFO_REG ) 837#define SMC_GET_FIFO() SMC_inw( ioaddr, FIFO_REG )
846#define SMC_GET_PTR() SMC_inw( ioaddr, PTR_REG ) 838#define SMC_GET_PTR() SMC_inw( ioaddr, PTR_REG )
847#define SMC_SET_PTR(x) SMC_outw( x, ioaddr, PTR_REG ) 839#define SMC_SET_PTR(x) SMC_outw( x, ioaddr, PTR_REG )
840#define SMC_GET_EPH_STATUS() SMC_inw( ioaddr, EPH_STATUS_REG )
848#define SMC_GET_RCR() SMC_inw( ioaddr, RCR_REG ) 841#define SMC_GET_RCR() SMC_inw( ioaddr, RCR_REG )
849#define SMC_SET_RCR(x) SMC_outw( x, ioaddr, RCR_REG ) 842#define SMC_SET_RCR(x) SMC_outw( x, ioaddr, RCR_REG )
850#define SMC_GET_REV() SMC_inw( ioaddr, REV_REG ) 843#define SMC_GET_REV() SMC_inw( ioaddr, REV_REG )
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 236bdd3f6ba0..12e2b6826fa3 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -2,7 +2,7 @@
2/* 2/*
3 Written 1998-2000 by Donald Becker. 3 Written 1998-2000 by Donald Becker.
4 4
5 Current maintainer is Ion Badulescu <ionut@cs.columbia.edu>. Please 5 Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
6 send all bug reports to me, and not to Donald Becker, as this code 6 send all bug reports to me, and not to Donald Becker, as this code
7 has been heavily modified from Donald's original version. 7 has been heavily modified from Donald's original version.
8 8
@@ -129,12 +129,18 @@
129 - put the chip to a D3 slumber on driver unload 129 - put the chip to a D3 slumber on driver unload
130 - added config option to enable/disable NAPI 130 - added config option to enable/disable NAPI
131 131
132TODO: bugfixes (no bugs known as of right now) 132 LK1.4.2 (Ion Badulescu)
133 - finally added firmware (GPL'ed by Adaptec)
134 - removed compatibility code for 2.2.x
135
136TODO: - fix forced speed/duplexing code (broken a long time ago, when
137 somebody converted the driver to use the generic MII code)
138 - fix VLAN support
133*/ 139*/
134 140
135#define DRV_NAME "starfire" 141#define DRV_NAME "starfire"
136#define DRV_VERSION "1.03+LK1.4.1" 142#define DRV_VERSION "1.03+LK1.4.2"
137#define DRV_RELDATE "February 10, 2002" 143#define DRV_RELDATE "January 19, 2005"
138 144
139#include <linux/config.h> 145#include <linux/config.h>
140#include <linux/version.h> 146#include <linux/version.h>
@@ -145,25 +151,15 @@ TODO: bugfixes (no bugs known as of right now)
145#include <linux/etherdevice.h> 151#include <linux/etherdevice.h>
146#include <linux/init.h> 152#include <linux/init.h>
147#include <linux/delay.h> 153#include <linux/delay.h>
154#include <linux/crc32.h>
155#include <linux/ethtool.h>
156#include <linux/mii.h>
157#include <linux/if_vlan.h>
148#include <asm/processor.h> /* Processor type for cache alignment. */ 158#include <asm/processor.h> /* Processor type for cache alignment. */
149#include <asm/uaccess.h> 159#include <asm/uaccess.h>
150#include <asm/io.h> 160#include <asm/io.h>
151 161
152/* 162#include "starfire_firmware.h"
153 * Adaptec's license for their drivers (which is where I got the
154 * firmware files) does not allow one to redistribute them. Thus, we can't
155 * include the firmware with this driver.
156 *
157 * However, should a legal-to-distribute firmware become available,
158 * the driver developer would need only to obtain the firmware in the
159 * form of a C header file.
160 * Once that's done, the #undef below must be changed into a #define
161 * for this driver to really use the firmware. Note that Rx/Tx
162 * hardware TCP checksumming is not possible without the firmware.
163 *
164 * WANTED: legal firmware to include with this GPL'd driver.
165 */
166#undef HAS_FIRMWARE
167/* 163/*
168 * The current frame processor firmware fails to checksum a fragment 164 * The current frame processor firmware fails to checksum a fragment
169 * of length 1. If and when this is fixed, the #define below can be removed. 165 * of length 1. If and when this is fixed, the #define below can be removed.
@@ -172,13 +168,7 @@ TODO: bugfixes (no bugs known as of right now)
172/* 168/*
173 * Define this if using the driver with the zero-copy patch 169 * Define this if using the driver with the zero-copy patch
174 */ 170 */
175#if defined(HAS_FIRMWARE) && defined(MAX_SKB_FRAGS)
176#define ZEROCOPY 171#define ZEROCOPY
177#endif
178
179#ifdef HAS_FIRMWARE
180#include "starfire_firmware.h"
181#endif /* HAS_FIRMWARE */
182 172
183#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 173#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
184#define VLAN_SUPPORT 174#define VLAN_SUPPORT
@@ -202,11 +192,7 @@ static int mtu;
202 The Starfire has a 512 element hash table based on the Ethernet CRC. */ 192 The Starfire has a 512 element hash table based on the Ethernet CRC. */
203static int multicast_filter_limit = 512; 193static int multicast_filter_limit = 512;
204/* Whether to do TCP/UDP checksums in hardware */ 194/* Whether to do TCP/UDP checksums in hardware */
205#ifdef HAS_FIRMWARE
206static int enable_hw_cksum = 1; 195static int enable_hw_cksum = 1;
207#else
208static int enable_hw_cksum = 0;
209#endif
210 196
211#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ 197#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
212/* 198/*
@@ -291,43 +277,15 @@ static int full_duplex[MAX_UNITS] = {0, };
291#define RX_DESC_ADDR_SIZE RxDescAddr32bit 277#define RX_DESC_ADDR_SIZE RxDescAddr32bit
292#endif 278#endif
293 279
294#ifdef MAX_SKB_FRAGS
295#define skb_first_frag_len(skb) skb_headlen(skb) 280#define skb_first_frag_len(skb) skb_headlen(skb)
296#define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) 281#define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
297#else /* not MAX_SKB_FRAGS */
298#define skb_first_frag_len(skb) (skb->len)
299#define skb_num_frags(skb) 1
300#endif /* not MAX_SKB_FRAGS */
301
302/* 2.2.x compatibility code */
303#if LINUX_VERSION_CODE < 0x20300
304
305#include "starfire-kcomp22.h"
306
307#else /* LINUX_VERSION_CODE > 0x20300 */
308
309#include <linux/crc32.h>
310#include <linux/ethtool.h>
311#include <linux/mii.h>
312
313#include <linux/if_vlan.h>
314
315#define init_tx_timer(dev, func, timeout) \
316 dev->tx_timeout = func; \
317 dev->watchdog_timeo = timeout;
318#define kick_tx_timer(dev, func, timeout)
319
320#define netif_start_if(dev)
321#define netif_stop_if(dev)
322
323#define PCI_SLOT_NAME(pci_dev) pci_name(pci_dev)
324
325#endif /* LINUX_VERSION_CODE > 0x20300 */
326 282
327#ifdef HAVE_NETDEV_POLL 283#ifdef HAVE_NETDEV_POLL
328#define init_poll(dev) \ 284#define init_poll(dev) \
285do { \
329 dev->poll = &netdev_poll; \ 286 dev->poll = &netdev_poll; \
330 dev->weight = max_interrupt_work; 287 dev->weight = max_interrupt_work; \
288} while (0)
331#define netdev_rx(dev, ioaddr) \ 289#define netdev_rx(dev, ioaddr) \
332do { \ 290do { \
333 u32 intr_enable; \ 291 u32 intr_enable; \
@@ -341,7 +299,7 @@ do { \
341 /* Paranoia check */ \ 299 /* Paranoia check */ \
342 intr_enable = readl(ioaddr + IntrEnable); \ 300 intr_enable = readl(ioaddr + IntrEnable); \
343 if (intr_enable & (IntrRxDone | IntrRxEmpty)) { \ 301 if (intr_enable & (IntrRxDone | IntrRxEmpty)) { \
344 printk("%s: interrupt while in polling mode!\n", dev->name); \ 302 printk(KERN_INFO "%s: interrupt while in polling mode!\n", dev->name); \
345 intr_enable &= ~(IntrRxDone | IntrRxEmpty); \ 303 intr_enable &= ~(IntrRxDone | IntrRxEmpty); \
346 writel(intr_enable, ioaddr + IntrEnable); \ 304 writel(intr_enable, ioaddr + IntrEnable); \
347 } \ 305 } \
@@ -371,6 +329,7 @@ KERN_INFO " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELD
371MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 329MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
372MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver"); 330MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
373MODULE_LICENSE("GPL"); 331MODULE_LICENSE("GPL");
332MODULE_VERSION(DRV_VERSION);
374 333
375module_param(max_interrupt_work, int, 0); 334module_param(max_interrupt_work, int, 0);
376module_param(mtu, int, 0); 335module_param(mtu, int, 0);
@@ -425,7 +384,7 @@ on the 32/64 bitness of the architecture), and relies on automatic
425minimum-length padding. It does not use the completion queue 384minimum-length padding. It does not use the completion queue
426consumer index, but instead checks for non-zero status entries. 385consumer index, but instead checks for non-zero status entries.
427 386
428For receive this driver uses type 0/1/2/3 receive descriptors. The driver 387For receive this driver uses type 2/3 receive descriptors. The driver
429allocates full frame size skbuffs for the Rx ring buffers, so all frames 388allocates full frame size skbuffs for the Rx ring buffers, so all frames
430should fit in a single descriptor. The driver does not use the completion 389should fit in a single descriptor. The driver does not use the completion
431queue consumer index, but instead checks for non-zero status entries. 390queue consumer index, but instead checks for non-zero status entries.
@@ -476,7 +435,7 @@ IVc. Errata
476 435
477*/ 436*/
478 437
479 438
480 439
481enum chip_capability_flags {CanHaveMII=1, }; 440enum chip_capability_flags {CanHaveMII=1, };
482 441
@@ -670,7 +629,6 @@ struct full_rx_done_desc {
670 u32 timestamp; 629 u32 timestamp;
671}; 630};
672/* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */ 631/* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
673#ifdef HAS_FIRMWARE
674#ifdef VLAN_SUPPORT 632#ifdef VLAN_SUPPORT
675typedef struct full_rx_done_desc rx_done_desc; 633typedef struct full_rx_done_desc rx_done_desc;
676#define RxComplType RxComplType3 634#define RxComplType RxComplType3
@@ -678,15 +636,6 @@ typedef struct full_rx_done_desc rx_done_desc;
678typedef struct csum_rx_done_desc rx_done_desc; 636typedef struct csum_rx_done_desc rx_done_desc;
679#define RxComplType RxComplType2 637#define RxComplType RxComplType2
680#endif /* not VLAN_SUPPORT */ 638#endif /* not VLAN_SUPPORT */
681#else /* not HAS_FIRMWARE */
682#ifdef VLAN_SUPPORT
683typedef struct basic_rx_done_desc rx_done_desc;
684#define RxComplType RxComplType1
685#else /* not VLAN_SUPPORT */
686typedef struct short_rx_done_desc rx_done_desc;
687#define RxComplType RxComplType0
688#endif /* not VLAN_SUPPORT */
689#endif /* not HAS_FIRMWARE */
690 639
691enum rx_done_bits { 640enum rx_done_bits {
692 RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000, 641 RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
@@ -898,13 +847,10 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
898 /* enable MWI -- it vastly improves Rx performance on sparc64 */ 847 /* enable MWI -- it vastly improves Rx performance on sparc64 */
899 pci_set_mwi(pdev); 848 pci_set_mwi(pdev);
900 849
901#ifdef MAX_SKB_FRAGS
902 dev->features |= NETIF_F_SG;
903#endif /* MAX_SKB_FRAGS */
904#ifdef ZEROCOPY 850#ifdef ZEROCOPY
905 /* Starfire can do TCP/UDP checksumming */ 851 /* Starfire can do TCP/UDP checksumming */
906 if (enable_hw_cksum) 852 if (enable_hw_cksum)
907 dev->features |= NETIF_F_IP_CSUM; 853 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
908#endif /* ZEROCOPY */ 854#endif /* ZEROCOPY */
909#ifdef VLAN_SUPPORT 855#ifdef VLAN_SUPPORT
910 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; 856 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
@@ -1008,7 +954,8 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
1008 /* The chip-specific entries in the device structure. */ 954 /* The chip-specific entries in the device structure. */
1009 dev->open = &netdev_open; 955 dev->open = &netdev_open;
1010 dev->hard_start_xmit = &start_tx; 956 dev->hard_start_xmit = &start_tx;
1011 init_tx_timer(dev, tx_timeout, TX_TIMEOUT); 957 dev->tx_timeout = tx_timeout;
958 dev->watchdog_timeo = TX_TIMEOUT;
1012 init_poll(dev); 959 init_poll(dev);
1013 dev->stop = &netdev_close; 960 dev->stop = &netdev_close;
1014 dev->get_stats = &get_stats; 961 dev->get_stats = &get_stats;
@@ -1039,7 +986,7 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
1039 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0) 986 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
1040 break; 987 break;
1041 if (boguscnt == 0) { 988 if (boguscnt == 0) {
1042 printk("%s: PHY reset never completed!\n", dev->name); 989 printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
1043 continue; 990 continue;
1044 } 991 }
1045 mii_status = mdio_read(dev, phy, MII_BMSR); 992 mii_status = mdio_read(dev, phy, MII_BMSR);
@@ -1110,6 +1057,7 @@ static int netdev_open(struct net_device *dev)
1110 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size; 1057 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
1111 1058
1112 /* Do we ever need to reset the chip??? */ 1059 /* Do we ever need to reset the chip??? */
1060
1113 retval = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev); 1061 retval = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
1114 if (retval) 1062 if (retval)
1115 return retval; 1063 return retval;
@@ -1211,7 +1159,6 @@ static int netdev_open(struct net_device *dev)
1211 1159
1212 writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl); 1160 writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1213 1161
1214 netif_start_if(dev);
1215 netif_start_queue(dev); 1162 netif_start_queue(dev);
1216 1163
1217 if (debug > 1) 1164 if (debug > 1)
@@ -1238,13 +1185,11 @@ static int netdev_open(struct net_device *dev)
1238 writel(ETH_P_8021Q, ioaddr + VlanType); 1185 writel(ETH_P_8021Q, ioaddr + VlanType);
1239#endif /* VLAN_SUPPORT */ 1186#endif /* VLAN_SUPPORT */
1240 1187
1241#ifdef HAS_FIRMWARE
1242 /* Load Rx/Tx firmware into the frame processors */ 1188 /* Load Rx/Tx firmware into the frame processors */
1243 for (i = 0; i < FIRMWARE_RX_SIZE * 2; i++) 1189 for (i = 0; i < FIRMWARE_RX_SIZE * 2; i++)
1244 writel(firmware_rx[i], ioaddr + RxGfpMem + i * 4); 1190 writel(firmware_rx[i], ioaddr + RxGfpMem + i * 4);
1245 for (i = 0; i < FIRMWARE_TX_SIZE * 2; i++) 1191 for (i = 0; i < FIRMWARE_TX_SIZE * 2; i++)
1246 writel(firmware_tx[i], ioaddr + TxGfpMem + i * 4); 1192 writel(firmware_tx[i], ioaddr + TxGfpMem + i * 4);
1247#endif /* HAS_FIRMWARE */
1248 if (enable_hw_cksum) 1193 if (enable_hw_cksum)
1249 /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */ 1194 /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1250 writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl); 1195 writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
@@ -1378,8 +1323,6 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
1378 u32 status; 1323 u32 status;
1379 int i; 1324 int i;
1380 1325
1381 kick_tx_timer(dev, tx_timeout, TX_TIMEOUT);
1382
1383 /* 1326 /*
1384 * be cautious here, wrapping the queue has weird semantics 1327 * be cautious here, wrapping the queue has weird semantics
1385 * and we may not have enough slots even when it seems we do. 1328 * and we may not have enough slots even when it seems we do.
@@ -1404,7 +1347,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
1404 } 1347 }
1405 1348
1406 if (has_bad_length) 1349 if (has_bad_length)
1407 skb_checksum_help(skb); 1350 skb_checksum_help(skb, 0);
1408 } 1351 }
1409#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ 1352#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1410 1353
@@ -1433,12 +1376,10 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
1433 np->tx_info[entry].mapping = 1376 np->tx_info[entry].mapping =
1434 pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE); 1377 pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1435 } else { 1378 } else {
1436#ifdef MAX_SKB_FRAGS
1437 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1]; 1379 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1438 status |= this_frag->size; 1380 status |= this_frag->size;
1439 np->tx_info[entry].mapping = 1381 np->tx_info[entry].mapping =
1440 pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE); 1382 pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE);
1441#endif /* MAX_SKB_FRAGS */
1442 } 1383 }
1443 1384
1444 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); 1385 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
@@ -1531,7 +1472,6 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
1531 np->tx_info[entry].mapping = 0; 1472 np->tx_info[entry].mapping = 0;
1532 np->dirty_tx += np->tx_info[entry].used_slots; 1473 np->dirty_tx += np->tx_info[entry].used_slots;
1533 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE; 1474 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1534#ifdef MAX_SKB_FRAGS
1535 { 1475 {
1536 int i; 1476 int i;
1537 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1477 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
@@ -1543,7 +1483,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
1543 entry++; 1483 entry++;
1544 } 1484 }
1545 } 1485 }
1546#endif /* MAX_SKB_FRAGS */ 1486
1547 dev_kfree_skb_irq(skb); 1487 dev_kfree_skb_irq(skb);
1548 } 1488 }
1549 np->tx_done_q[np->tx_done].status = 0; 1489 np->tx_done_q[np->tx_done].status = 0;
@@ -1603,7 +1543,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1603 if (debug > 4) 1543 if (debug > 4)
1604 printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status); 1544 printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1605 if (!(desc_status & RxOK)) { 1545 if (!(desc_status & RxOK)) {
1606 /* There was a error. */ 1546 /* There was an error. */
1607 if (debug > 2) 1547 if (debug > 2)
1608 printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status); 1548 printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
1609 np->stats.rx_errors++; 1549 np->stats.rx_errors++;
@@ -1656,11 +1596,10 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1656#endif 1596#endif
1657 1597
1658 skb->protocol = eth_type_trans(skb, dev); 1598 skb->protocol = eth_type_trans(skb, dev);
1659#if defined(HAS_FIRMWARE) || defined(VLAN_SUPPORT) 1599#ifdef VLAN_SUPPORT
1660 if (debug > 4) 1600 if (debug > 4)
1661 printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2)); 1601 printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1662#endif 1602#endif
1663#ifdef HAS_FIRMWARE
1664 if (le16_to_cpu(desc->status2) & 0x0100) { 1603 if (le16_to_cpu(desc->status2) & 0x0100) {
1665 skb->ip_summed = CHECKSUM_UNNECESSARY; 1604 skb->ip_summed = CHECKSUM_UNNECESSARY;
1666 np->stats.rx_compressed++; 1605 np->stats.rx_compressed++;
@@ -1679,7 +1618,6 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1679 skb->csum = le16_to_cpu(desc->csum); 1618 skb->csum = le16_to_cpu(desc->csum);
1680 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2)); 1619 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1681 } 1620 }
1682#endif /* HAS_FIRMWARE */
1683#ifdef VLAN_SUPPORT 1621#ifdef VLAN_SUPPORT
1684 if (np->vlgrp && le16_to_cpu(desc->status2) & 0x0200) { 1622 if (np->vlgrp && le16_to_cpu(desc->status2) & 0x0200) {
1685 if (debug > 4) 1623 if (debug > 4)
@@ -1900,9 +1838,6 @@ static struct net_device_stats *get_stats(struct net_device *dev)
1900} 1838}
1901 1839
1902 1840
1903/* Chips may use the upper or lower CRC bits, and may reverse and/or invert
1904 them. Select the endian-ness that results in minimal calculations.
1905*/
1906static void set_rx_mode(struct net_device *dev) 1841static void set_rx_mode(struct net_device *dev)
1907{ 1842{
1908 struct netdev_private *np = netdev_priv(dev); 1843 struct netdev_private *np = netdev_priv(dev);
@@ -1969,6 +1904,8 @@ static void set_rx_mode(struct net_device *dev)
1969 memset(mc_filter, 0, sizeof(mc_filter)); 1904 memset(mc_filter, 0, sizeof(mc_filter));
1970 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1905 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1971 i++, mclist = mclist->next) { 1906 i++, mclist = mclist->next) {
1907 /* The chip uses the upper 9 CRC bits
1908 as index into the hash table */
1972 int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23; 1909 int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
1973 __u32 *fptr = (__u32 *) &mc_filter[(bit_nr >> 4) & ~1]; 1910 __u32 *fptr = (__u32 *) &mc_filter[(bit_nr >> 4) & ~1];
1974 1911
@@ -2001,7 +1938,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2001 struct netdev_private *np = netdev_priv(dev); 1938 struct netdev_private *np = netdev_priv(dev);
2002 strcpy(info->driver, DRV_NAME); 1939 strcpy(info->driver, DRV_NAME);
2003 strcpy(info->version, DRV_VERSION); 1940 strcpy(info->version, DRV_VERSION);
2004 strcpy(info->bus_info, PCI_SLOT_NAME(np->pci_dev)); 1941 strcpy(info->bus_info, pci_name(np->pci_dev));
2005} 1942}
2006 1943
2007static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 1944static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
@@ -2083,7 +2020,6 @@ static int netdev_close(struct net_device *dev)
2083 int i; 2020 int i;
2084 2021
2085 netif_stop_queue(dev); 2022 netif_stop_queue(dev);
2086 netif_stop_if(dev);
2087 2023
2088 if (debug > 1) { 2024 if (debug > 1) {
2089 printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n", 2025 printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
@@ -2184,7 +2120,13 @@ static int __init starfire_init (void)
2184/* when a module, this is printed whether or not devices are found in probe */ 2120/* when a module, this is printed whether or not devices are found in probe */
2185#ifdef MODULE 2121#ifdef MODULE
2186 printk(version); 2122 printk(version);
2123#ifdef HAVE_NETDEV_POLL
2124 printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2125#else
2126 printk(KERN_INFO DRV_NAME ": polling (NAPI) disabled\n");
2187#endif 2127#endif
2128#endif
2129
2188#ifndef ADDR_64BITS 2130#ifndef ADDR_64BITS
2189 /* we can do this test only at run-time... sigh */ 2131 /* we can do this test only at run-time... sigh */
2190 if (sizeof(dma_addr_t) == sizeof(u64)) { 2132 if (sizeof(dma_addr_t) == sizeof(u64)) {
@@ -2192,10 +2134,6 @@ static int __init starfire_init (void)
2192 return -ENODEV; 2134 return -ENODEV;
2193 } 2135 }
2194#endif /* not ADDR_64BITS */ 2136#endif /* not ADDR_64BITS */
2195#ifndef HAS_FIRMWARE
2196 /* unconditionally disable hw cksums if firmware is not present */
2197 enable_hw_cksum = 0;
2198#endif /* not HAS_FIRMWARE */
2199 return pci_module_init (&starfire_driver); 2137 return pci_module_init (&starfire_driver);
2200} 2138}
2201 2139
diff --git a/drivers/net/starfire_firmware.h b/drivers/net/starfire_firmware.h
new file mode 100644
index 000000000000..0a668528955d
--- /dev/null
+++ b/drivers/net/starfire_firmware.h
@@ -0,0 +1,346 @@
1/*
2 * Copyright 2003 Adaptec, Inc.
3 *
4 * Please read the following license before using the Adaptec Software
5 * ("Program"). If you do not agree to the license terms, do not use the
6 * Program:
7 *
8 * You agree to be bound by version 2 of the General Public License ("GPL")
9 * dated June 1991, which can be found at http://www.fsf.org/licenses/gpl.html.
10 * If the link is broken, write to Free Software Foundation, 59 Temple Place,
11 * Boston, Massachusetts 02111-1307.
12 *
13 * BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE IT IS LICENSED "AS IS" AND
14 * THERE IS NO WARRANTY FOR THE PROGRAM, INCLUDING BUT NOT LIMITED TO THE
15 * IMPLIED WARRANTIES OF MERCHANTIBILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * (TO THE EXTENT PERMITTED BY APPLICABLE LAW). USE OF THE PROGRAM IS AT YOUR
17 * OWN RISK. IN NO EVENT WILL ADAPTEC OR ITS LICENSORS BE LIABLE TO YOU FOR
18 * DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES
19 * ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM.
20 *
21 */
22
23static const u32 firmware_rx[] = {
24 0x010003dc, 0x00000000,
25 0x04000421, 0x00000086,
26 0x80000015, 0x0000180e,
27 0x81000015, 0x00006664,
28 0x1a0040ab, 0x00000b06,
29 0x14200011, 0x00000000,
30 0x14204022, 0x0000aaaa,
31 0x14204022, 0x00000300,
32 0x14204022, 0x00000000,
33 0x1a0040ab, 0x00000b14,
34 0x14200011, 0x00000000,
35 0x83000015, 0x00000002,
36 0x04000021, 0x00000000,
37 0x00000010, 0x00000000,
38 0x04000421, 0x00000087,
39 0x00000010, 0x00000000,
40 0x00000010, 0x00000000,
41 0x00008015, 0x00000000,
42 0x0000003e, 0x00000000,
43 0x00000010, 0x00000000,
44 0x82000015, 0x00004000,
45 0x009e8050, 0x00000000,
46 0x03008015, 0x00000000,
47 0x86008015, 0x00000000,
48 0x82000015, 0x00008000,
49 0x0100001c, 0x00000000,
50 0x000050a0, 0x0000010c,
51 0x4e20d011, 0x00006008,
52 0x1420d012, 0x00004008,
53 0x0000f090, 0x00007000,
54 0x0000c8b0, 0x00003000,
55 0x00004040, 0x00000000,
56 0x00108015, 0x00000000,
57 0x00a2c150, 0x00004000,
58 0x00a400b0, 0x00000014,
59 0x00000020, 0x00000000,
60 0x2500400d, 0x00002525,
61 0x00047220, 0x00003100,
62 0x00934070, 0x00000000,
63 0x00000020, 0x00000000,
64 0x00924460, 0x00000184,
65 0x2b20c011, 0x00000000,
66 0x0000c420, 0x00000540,
67 0x36014018, 0x0000422d,
68 0x14200011, 0x00000000,
69 0x00924460, 0x00000183,
70 0x3200001f, 0x00000034,
71 0x02ac0015, 0x00000002,
72 0x00a60110, 0x00000008,
73 0x42200011, 0x00000000,
74 0x00924060, 0x00000103,
75 0x0000001e, 0x00000000,
76 0x00000020, 0x00000100,
77 0x0000001e, 0x00000000,
78 0x00924460, 0x00000086,
79 0x00004080, 0x00000000,
80 0x0092c070, 0x00000000,
81 0x00924060, 0x00000100,
82 0x0000c890, 0x00005000,
83 0x00a6c110, 0x00000000,
84 0x00b0c090, 0x00000012,
85 0x021c0015, 0x00000000,
86 0x3200001f, 0x00000034,
87 0x00924460, 0x00000510,
88 0x44210011, 0x00000000,
89 0x42000011, 0x00000000,
90 0x83000015, 0x00000040,
91 0x00924460, 0x00000508,
92 0x45014018, 0x00004545,
93 0x00808050, 0x00000000,
94 0x62208012, 0x00000000,
95 0x82000015, 0x00000800,
96 0x15200011, 0x00000000,
97 0x00000010, 0x00000000,
98 0x00000010, 0x00000000,
99 0x00000010, 0x00000000,
100 0x00000010, 0x00000000,
101 0x00000010, 0x00000000,
102 0x80000015, 0x0000eea4,
103 0x81000015, 0x0000005f,
104 0x00000060, 0x00000000,
105 0x00004120, 0x00000000,
106 0x00004a00, 0x00004000,
107 0x00924460, 0x00000190,
108 0x5601401a, 0x00005956,
109 0x14000011, 0x00000000,
110 0x00934050, 0x00000018,
111 0x00930050, 0x00000018,
112 0x3601403a, 0x0000002d,
113 0x000643a9, 0x00000000,
114 0x0000c420, 0x00000140,
115 0x5601401a, 0x00005956,
116 0x14000011, 0x00000000,
117 0x00000010, 0x00000000,
118 0x00000010, 0x00000000,
119 0x000642a9, 0x00000000,
120 0x00024420, 0x00000183,
121 0x5601401a, 0x00005956,
122 0x82000015, 0x00002000,
123 0x15200011, 0x00000000,
124 0x82000015, 0x00000010,
125 0x15200011, 0x00000000,
126 0x82000015, 0x00000010,
127 0x15200011, 0x00000000,
128}; /* 104 Rx instructions */
129#define FIRMWARE_RX_SIZE 104
130
131static const u32 firmware_tx[] = {
132 0x010003dc, 0x00000000,
133 0x04000421, 0x00000086,
134 0x80000015, 0x0000180e,
135 0x81000015, 0x00006664,
136 0x1a0040ab, 0x00000b06,
137 0x14200011, 0x00000000,
138 0x14204022, 0x0000aaaa,
139 0x14204022, 0x00000300,
140 0x14204022, 0x00000000,
141 0x1a0040ab, 0x00000b14,
142 0x14200011, 0x00000000,
143 0x83000015, 0x00000002,
144 0x04000021, 0x00000000,
145 0x00000010, 0x00000000,
146 0x04000421, 0x00000087,
147 0x00000010, 0x00000000,
148 0x00000010, 0x00000000,
149 0x00008015, 0x00000000,
150 0x0000003e, 0x00000000,
151 0x00000010, 0x00000000,
152 0x82000015, 0x00004000,
153 0x009e8050, 0x00000000,
154 0x03008015, 0x00000000,
155 0x86008015, 0x00000000,
156 0x82000015, 0x00008000,
157 0x0100001c, 0x00000000,
158 0x000050a0, 0x0000010c,
159 0x4e20d011, 0x00006008,
160 0x1420d012, 0x00004008,
161 0x0000f090, 0x00007000,
162 0x0000c8b0, 0x00003000,
163 0x00004040, 0x00000000,
164 0x00108015, 0x00000000,
165 0x00a2c150, 0x00004000,
166 0x00a400b0, 0x00000014,
167 0x00000020, 0x00000000,
168 0x2500400d, 0x00002525,
169 0x00047220, 0x00003100,
170 0x00934070, 0x00000000,
171 0x00000020, 0x00000000,
172 0x00924460, 0x00000184,
173 0x2b20c011, 0x00000000,
174 0x0000c420, 0x00000540,
175 0x36014018, 0x0000422d,
176 0x14200011, 0x00000000,
177 0x00924460, 0x00000183,
178 0x3200001f, 0x00000034,
179 0x02ac0015, 0x00000002,
180 0x00a60110, 0x00000008,
181 0x42200011, 0x00000000,
182 0x00924060, 0x00000103,
183 0x0000001e, 0x00000000,
184 0x00000020, 0x00000100,
185 0x0000001e, 0x00000000,
186 0x00924460, 0x00000086,
187 0x00004080, 0x00000000,
188 0x0092c070, 0x00000000,
189 0x00924060, 0x00000100,
190 0x0000c890, 0x00005000,
191 0x00a6c110, 0x00000000,
192 0x00b0c090, 0x00000012,
193 0x021c0015, 0x00000000,
194 0x3200001f, 0x00000034,
195 0x00924460, 0x00000510,
196 0x44210011, 0x00000000,
197 0x42000011, 0x00000000,
198 0x83000015, 0x00000040,
199 0x00924460, 0x00000508,
200 0x45014018, 0x00004545,
201 0x00808050, 0x00000000,
202 0x62208012, 0x00000000,
203 0x82000015, 0x00000800,
204 0x15200011, 0x00000000,
205 0x00000010, 0x00000000,
206 0x00000010, 0x00000000,
207 0x00000010, 0x00000000,
208 0x00000010, 0x00000000,
209 0x00000010, 0x00000000,
210 0x80000015, 0x0000eea4,
211 0x81000015, 0x0000005f,
212 0x00000060, 0x00000000,
213 0x00004120, 0x00000000,
214 0x00004a00, 0x00004000,
215 0x00924460, 0x00000190,
216 0x5601401a, 0x00005956,
217 0x14000011, 0x00000000,
218 0x00934050, 0x00000018,
219 0x00930050, 0x00000018,
220 0x3601403a, 0x0000002d,
221 0x000643a9, 0x00000000,
222 0x0000c420, 0x00000140,
223 0x5601401a, 0x00005956,
224 0x14000011, 0x00000000,
225 0x00000010, 0x00000000,
226 0x00000010, 0x00000000,
227 0x000642a9, 0x00000000,
228 0x00024420, 0x00000183,
229 0x5601401a, 0x00005956,
230 0x82000015, 0x00002000,
231 0x15200011, 0x00000000,
232 0x82000015, 0x00000010,
233 0x15200011, 0x00000000,
234 0x82000015, 0x00000010,
235 0x15200011, 0x00000000,
236}; /* 104 Tx instructions */
237#define FIRMWARE_TX_SIZE 104
238#if 0
239static const u32 firmware_wol[] = {
240 0x010003dc, 0x00000000,
241 0x19000421, 0x00000087,
242 0x80000015, 0x00001a1a,
243 0x81000015, 0x00001a1a,
244 0x1a0040ab, 0x00000b06,
245 0x15200011, 0x00000000,
246 0x15204022, 0x0000aaaa,
247 0x15204022, 0x00000300,
248 0x15204022, 0x00000000,
249 0x1a0040ab, 0x00000b15,
250 0x15200011, 0x00000000,
251 0x83000015, 0x00000002,
252 0x04000021, 0x00000000,
253 0x00000010, 0x00000000,
254 0x04000421, 0x00000087,
255 0x00000010, 0x00000000,
256 0x00000010, 0x00000000,
257 0x00008015, 0x00000000,
258 0x0000003e, 0x00000000,
259 0x00000010, 0x00000000,
260 0x00000010, 0x00000000,
261 0x82000015, 0x00004000,
262 0x82000015, 0x00008000,
263 0x0000000c, 0x00000000,
264 0x00000010, 0x00000000,
265 0x00004080, 0x00000100,
266 0x1f20c011, 0x00001122,
267 0x2720f011, 0x00003011,
268 0x19200071, 0x00000000,
269 0x1a200051, 0x00000000,
270 0x00000010, 0x00000000,
271 0x00000010, 0x00000000,
272 0x1d2040a4, 0x00003344,
273 0x1d2040a2, 0x00005566,
274 0x000040a0, 0x00000100,
275 0x00108050, 0x00000001,
276 0x1a208012, 0x00000006,
277 0x82000015, 0x00008080,
278 0x010003dc, 0x00000000,
279 0x1d2040a4, 0x00002233,
280 0x1d2040a4, 0x00004455,
281 0x2d208011, 0x00000005,
282 0x1d2040a4, 0x00006611,
283 0x00108050, 0x00000001,
284 0x27200011, 0x00000000,
285 0x1d2050a4, 0x00006600,
286 0x82000015, 0x00008080,
287 0x010003dc, 0x00000000,
288 0x00000050, 0x00000000,
289 0x1b200031, 0x00000000,
290 0x0000001e, 0x00000000,
291 0x0000001e, 0x00000000,
292 0x0000001e, 0x00000000,
293 0x0000001e, 0x00000000,
294 0x00924460, 0x00000086,
295 0x00004080, 0x00000000,
296 0x0092c070, 0x00000000,
297 0x00924060, 0x00000100,
298 0x0000c890, 0x00005000,
299 0x00a6c110, 0x00000000,
300 0x00b0c090, 0x00000012,
301 0x021c0015, 0x00000000,
302 0x3200001f, 0x00000034,
303 0x00924460, 0x00000510,
304 0x44210011, 0x00000000,
305 0x42000011, 0x00000000,
306 0x83000015, 0x00000040,
307 0x00924460, 0x00000508,
308 0x476a0012, 0x00000100,
309 0x83000015, 0x00000008,
310 0x16200011, 0x00000000,
311 0x001e8050, 0x00000000,
312 0x001e8050, 0x00000000,
313 0x00808050, 0x00000000,
314 0x03008015, 0x00000000,
315 0x62208012, 0x00000000,
316 0x82000015, 0x00000800,
317 0x16200011, 0x00000000,
318 0x80000015, 0x0000eea4,
319 0x81000015, 0x0000005f,
320 0x00000020, 0x00000000,
321 0x00004120, 0x00000000,
322 0x00004a00, 0x00004000,
323 0x00924460, 0x00000190,
324 0x5c01401a, 0x0000595c,
325 0x15000011, 0x00000000,
326 0x00934050, 0x00000018,
327 0x00930050, 0x00000018,
328 0x3601403a, 0x0000002d,
329 0x00064029, 0x00000000,
330 0x0000c420, 0x00000140,
331 0x5c01401a, 0x0000595c,
332 0x15000011, 0x00000000,
333 0x00000010, 0x00000000,
334 0x00000010, 0x00000000,
335 0x00064029, 0x00000000,
336 0x00024420, 0x00000183,
337 0x5c01401a, 0x0000595c,
338 0x82000015, 0x00002000,
339 0x16200011, 0x00000000,
340 0x82000015, 0x00000010,
341 0x16200011, 0x00000000,
342 0x82000015, 0x00000010,
343 0x16200011, 0x00000000,
344}; /* 104 WoL instructions */
345#define FIRMWARE_WOL_SIZE 104
346#endif
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 9680a308c62b..cf31c0629852 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -2819,7 +2819,7 @@ void TLan_PhyMonitor( struct net_device *dev )
2819 if (priv->link) { 2819 if (priv->link) {
2820 priv->link = 0; 2820 priv->link = 0;
2821 printk(KERN_DEBUG "TLAN: %s has lost link\n", dev->name); 2821 printk(KERN_DEBUG "TLAN: %s has lost link\n", dev->name);
2822 dev->flags &= ~IFF_RUNNING; 2822 netif_carrier_off(dev);
2823 TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT ); 2823 TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
2824 return; 2824 return;
2825 } 2825 }
@@ -2829,7 +2829,7 @@ void TLan_PhyMonitor( struct net_device *dev )
2829 if ((phy_status & MII_GS_LINK) && !priv->link) { 2829 if ((phy_status & MII_GS_LINK) && !priv->link) {
2830 priv->link = 1; 2830 priv->link = 1;
2831 printk(KERN_DEBUG "TLAN: %s has reestablished link\n", dev->name); 2831 printk(KERN_DEBUG "TLAN: %s has reestablished link\n", dev->name);
2832 dev->flags |= IFF_RUNNING; 2832 netif_carrier_on(dev);
2833 } 2833 }
2834 2834
2835 /* Setup a new monitor */ 2835 /* Setup a new monitor */
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index c098863bdd9d..3873917a9c22 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -888,11 +888,6 @@ static int tok_open(struct net_device *dev)
888 ti->sap_status = CLOSED; /* CLOSED or OPEN */ 888 ti->sap_status = CLOSED; /* CLOSED or OPEN */
889 ti->open_failure = NO; /* NO or YES */ 889 ti->open_failure = NO; /* NO or YES */
890 ti->open_mode = MANUAL; /* MANUAL or AUTOMATIC */ 890 ti->open_mode = MANUAL; /* MANUAL or AUTOMATIC */
891 /* 12/2000 not typical Linux, but we can use RUNNING to let us know when
892 the network has crapped out or cables are disconnected. Useful because
893 the IFF_UP flag stays up the whole time, until ifconfig tr0 down.
894 */
895 dev->flags &= ~IFF_RUNNING;
896 891
897 ti->sram_phys &= ~1; /* to reverse what we do in tok_close */ 892 ti->sram_phys &= ~1; /* to reverse what we do in tok_close */
898 /* init the spinlock */ 893 /* init the spinlock */
@@ -1242,7 +1237,7 @@ irqreturn_t tok_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1242 ti->open_status = CLOSED; 1237 ti->open_status = CLOSED;
1243 ti->sap_status = CLOSED; 1238 ti->sap_status = CLOSED;
1244 ti->open_mode = AUTOMATIC; 1239 ti->open_mode = AUTOMATIC;
1245 dev->flags &= ~IFF_RUNNING; 1240 netif_carrier_off(dev);
1246 netif_stop_queue(dev); 1241 netif_stop_queue(dev);
1247 ti->open_action = RESTART; 1242 ti->open_action = RESTART;
1248 outb(0, dev->base_addr + ADAPTRESET); 1243 outb(0, dev->base_addr + ADAPTRESET);
@@ -1323,7 +1318,7 @@ irqreturn_t tok_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1323 break; 1318 break;
1324 } 1319 }
1325 netif_wake_queue(dev); 1320 netif_wake_queue(dev);
1326 dev->flags |= IFF_RUNNING;/*BMS 12/2000*/ 1321 netif_carrier_on(dev);
1327 break; 1322 break;
1328 case DIR_INTERRUPT: 1323 case DIR_INTERRUPT:
1329 case DIR_MOD_OPEN_PARAMS: 1324 case DIR_MOD_OPEN_PARAMS:
@@ -1427,7 +1422,7 @@ irqreturn_t tok_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1427 ring_status); 1422 ring_status);
1428 if(ring_status& (REMOVE_RECV|AUTO_REMOVAL|LOBE_FAULT)){ 1423 if(ring_status& (REMOVE_RECV|AUTO_REMOVAL|LOBE_FAULT)){
1429 netif_stop_queue(dev); 1424 netif_stop_queue(dev);
1430 dev->flags &= ~IFF_RUNNING;/*not typical Linux*/ 1425 netif_carrier_off(dev);
1431 DPRINTK("Remove received, or Auto-removal error" 1426 DPRINTK("Remove received, or Auto-removal error"
1432 ", or Lobe fault\n"); 1427 ", or Lobe fault\n");
1433 DPRINTK("We'll try to reopen the closed adapter" 1428 DPRINTK("We'll try to reopen the closed adapter"
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 7f450b51a6cb..a5d6891c9d4c 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -2,7 +2,7 @@
2 * Generic HDLC support routines for Linux 2 * Generic HDLC support routines for Linux
3 * Frame Relay support 3 * Frame Relay support
4 * 4 *
5 * Copyright (C) 1999 - 2003 Krzysztof Halasa <khc@pm.waw.pl> 5 * Copyright (C) 1999 - 2005 Krzysztof Halasa <khc@pm.waw.pl>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License 8 * under the terms of version 2 of the GNU General Public License
@@ -27,6 +27,10 @@
27 active = open and "link reliable" 27 active = open and "link reliable"
28 exist = new = not used 28 exist = new = not used
29 29
30 CCITT LMI: ITU-T Q.933 Annex A
31 ANSI LMI: ANSI T1.617 Annex D
32 CISCO LMI: the original, aka "Gang of Four" LMI
33
30*/ 34*/
31 35
32#include <linux/module.h> 36#include <linux/module.h>
@@ -49,45 +53,41 @@
49#undef DEBUG_ECN 53#undef DEBUG_ECN
50#undef DEBUG_LINK 54#undef DEBUG_LINK
51 55
52#define MAXLEN_LMISTAT 20 /* max size of status enquiry frame */ 56#define FR_UI 0x03
53 57#define FR_PAD 0x00
54#define PVC_STATE_NEW 0x01 58
55#define PVC_STATE_ACTIVE 0x02 59#define NLPID_IP 0xCC
56#define PVC_STATE_FECN 0x08 /* FECN condition */ 60#define NLPID_IPV6 0x8E
57#define PVC_STATE_BECN 0x10 /* BECN condition */ 61#define NLPID_SNAP 0x80
58 62#define NLPID_PAD 0x00
59 63#define NLPID_CCITT_ANSI_LMI 0x08
60#define FR_UI 0x03 64#define NLPID_CISCO_LMI 0x09
61#define FR_PAD 0x00 65
62 66
63#define NLPID_IP 0xCC 67#define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */
64#define NLPID_IPV6 0x8E 68#define LMI_CISCO_DLCI 1023
65#define NLPID_SNAP 0x80 69
66#define NLPID_PAD 0x00 70#define LMI_CALLREF 0x00 /* Call Reference */
67#define NLPID_Q933 0x08 71#define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */
68 72#define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */
69 73#define LMI_CCITT_REPTYPE 0x51
70#define LMI_DLCI 0 /* LMI DLCI */ 74#define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */
71#define LMI_PROTO 0x08 75#define LMI_CCITT_ALIVE 0x53
72#define LMI_CALLREF 0x00 /* Call Reference */ 76#define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */
73#define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI lockshift */ 77#define LMI_CCITT_PVCSTAT 0x57
74#define LMI_REPTYPE 1 /* report type */ 78
75#define LMI_CCITT_REPTYPE 0x51 79#define LMI_FULLREP 0x00 /* full report */
76#define LMI_ALIVE 3 /* keep alive */ 80#define LMI_INTEGRITY 0x01 /* link integrity report */
77#define LMI_CCITT_ALIVE 0x53 81#define LMI_SINGLE 0x02 /* single PVC report */
78#define LMI_PVCSTAT 7 /* pvc status */ 82
79#define LMI_CCITT_PVCSTAT 0x57
80#define LMI_FULLREP 0 /* full report */
81#define LMI_INTEGRITY 1 /* link integrity report */
82#define LMI_SINGLE 2 /* single pvc report */
83#define LMI_STATUS_ENQUIRY 0x75 83#define LMI_STATUS_ENQUIRY 0x75
84#define LMI_STATUS 0x7D /* reply */ 84#define LMI_STATUS 0x7D /* reply */
85 85
86#define LMI_REPT_LEN 1 /* report type element length */ 86#define LMI_REPT_LEN 1 /* report type element length */
87#define LMI_INTEG_LEN 2 /* link integrity element length */ 87#define LMI_INTEG_LEN 2 /* link integrity element length */
88 88
89#define LMI_LENGTH 13 /* standard LMI frame length */ 89#define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */
90#define LMI_ANSI_LENGTH 14 90#define LMI_ANSI_LENGTH 14
91 91
92 92
93typedef struct { 93typedef struct {
@@ -223,51 +223,34 @@ static inline struct net_device** get_dev_p(pvc_device *pvc, int type)
223} 223}
224 224
225 225
226static inline u16 status_to_dlci(u8 *status, int *active, int *new)
227{
228 *new = (status[2] & 0x08) ? 1 : 0;
229 *active = (status[2] & 0x02) ? 1 : 0;
230
231 return ((status[0] & 0x3F) << 4) | ((status[1] & 0x78) >> 3);
232}
233
234
235static inline void dlci_to_status(u16 dlci, u8 *status, int active, int new)
236{
237 status[0] = (dlci >> 4) & 0x3F;
238 status[1] = ((dlci << 3) & 0x78) | 0x80;
239 status[2] = 0x80;
240
241 if (new)
242 status[2] |= 0x08;
243 else if (active)
244 status[2] |= 0x02;
245}
246
247
248
249static int fr_hard_header(struct sk_buff **skb_p, u16 dlci) 226static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
250{ 227{
251 u16 head_len; 228 u16 head_len;
252 struct sk_buff *skb = *skb_p; 229 struct sk_buff *skb = *skb_p;
253 230
254 switch (skb->protocol) { 231 switch (skb->protocol) {
255 case __constant_ntohs(ETH_P_IP): 232 case __constant_ntohs(NLPID_CCITT_ANSI_LMI):
256 head_len = 4; 233 head_len = 4;
257 skb_push(skb, head_len); 234 skb_push(skb, head_len);
258 skb->data[3] = NLPID_IP; 235 skb->data[3] = NLPID_CCITT_ANSI_LMI;
259 break; 236 break;
260 237
261 case __constant_ntohs(ETH_P_IPV6): 238 case __constant_ntohs(NLPID_CISCO_LMI):
262 head_len = 4; 239 head_len = 4;
263 skb_push(skb, head_len); 240 skb_push(skb, head_len);
264 skb->data[3] = NLPID_IPV6; 241 skb->data[3] = NLPID_CISCO_LMI;
265 break; 242 break;
266 243
267 case __constant_ntohs(LMI_PROTO): 244 case __constant_ntohs(ETH_P_IP):
245 head_len = 4;
246 skb_push(skb, head_len);
247 skb->data[3] = NLPID_IP;
248 break;
249
250 case __constant_ntohs(ETH_P_IPV6):
268 head_len = 4; 251 head_len = 4;
269 skb_push(skb, head_len); 252 skb_push(skb, head_len);
270 skb->data[3] = LMI_PROTO; 253 skb->data[3] = NLPID_IPV6;
271 break; 254 break;
272 255
273 case __constant_ntohs(ETH_P_802_3): 256 case __constant_ntohs(ETH_P_802_3):
@@ -461,13 +444,14 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
461 hdlc_device *hdlc = dev_to_hdlc(dev); 444 hdlc_device *hdlc = dev_to_hdlc(dev);
462 struct sk_buff *skb; 445 struct sk_buff *skb;
463 pvc_device *pvc = hdlc->state.fr.first_pvc; 446 pvc_device *pvc = hdlc->state.fr.first_pvc;
464 int len = (hdlc->state.fr.settings.lmi == LMI_ANSI) ? LMI_ANSI_LENGTH 447 int lmi = hdlc->state.fr.settings.lmi;
465 : LMI_LENGTH; 448 int dce = hdlc->state.fr.settings.dce;
466 int stat_len = 3; 449 int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
450 int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
467 u8 *data; 451 u8 *data;
468 int i = 0; 452 int i = 0;
469 453
470 if (hdlc->state.fr.settings.dce && fullrep) { 454 if (dce && fullrep) {
471 len += hdlc->state.fr.dce_pvc_count * (2 + stat_len); 455 len += hdlc->state.fr.dce_pvc_count * (2 + stat_len);
472 if (len > HDLC_MAX_MRU) { 456 if (len > HDLC_MAX_MRU) {
473 printk(KERN_WARNING "%s: Too many PVCs while sending " 457 printk(KERN_WARNING "%s: Too many PVCs while sending "
@@ -484,29 +468,31 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
484 } 468 }
485 memset(skb->data, 0, len); 469 memset(skb->data, 0, len);
486 skb_reserve(skb, 4); 470 skb_reserve(skb, 4);
487 skb->protocol = __constant_htons(LMI_PROTO); 471 if (lmi == LMI_CISCO) {
488 fr_hard_header(&skb, LMI_DLCI); 472 skb->protocol = __constant_htons(NLPID_CISCO_LMI);
473 fr_hard_header(&skb, LMI_CISCO_DLCI);
474 } else {
475 skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI);
476 fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
477 }
489 data = skb->tail; 478 data = skb->tail;
490 data[i++] = LMI_CALLREF; 479 data[i++] = LMI_CALLREF;
491 data[i++] = hdlc->state.fr.settings.dce 480 data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
492 ? LMI_STATUS : LMI_STATUS_ENQUIRY; 481 if (lmi == LMI_ANSI)
493 if (hdlc->state.fr.settings.lmi == LMI_ANSI)
494 data[i++] = LMI_ANSI_LOCKSHIFT; 482 data[i++] = LMI_ANSI_LOCKSHIFT;
495 data[i++] = (hdlc->state.fr.settings.lmi == LMI_CCITT) 483 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
496 ? LMI_CCITT_REPTYPE : LMI_REPTYPE; 484 LMI_ANSI_CISCO_REPTYPE;
497 data[i++] = LMI_REPT_LEN; 485 data[i++] = LMI_REPT_LEN;
498 data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY; 486 data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
499 487 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
500 data[i++] = (hdlc->state.fr.settings.lmi == LMI_CCITT)
501 ? LMI_CCITT_ALIVE : LMI_ALIVE;
502 data[i++] = LMI_INTEG_LEN; 488 data[i++] = LMI_INTEG_LEN;
503 data[i++] = hdlc->state.fr.txseq =fr_lmi_nextseq(hdlc->state.fr.txseq); 489 data[i++] = hdlc->state.fr.txseq =fr_lmi_nextseq(hdlc->state.fr.txseq);
504 data[i++] = hdlc->state.fr.rxseq; 490 data[i++] = hdlc->state.fr.rxseq;
505 491
506 if (hdlc->state.fr.settings.dce && fullrep) { 492 if (dce && fullrep) {
507 while (pvc) { 493 while (pvc) {
508 data[i++] = (hdlc->state.fr.settings.lmi == LMI_CCITT) 494 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
509 ? LMI_CCITT_PVCSTAT : LMI_PVCSTAT; 495 LMI_ANSI_CISCO_PVCSTAT;
510 data[i++] = stat_len; 496 data[i++] = stat_len;
511 497
512 /* LMI start/restart */ 498 /* LMI start/restart */
@@ -523,8 +509,20 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
523 fr_log_dlci_active(pvc); 509 fr_log_dlci_active(pvc);
524 } 510 }
525 511
526 dlci_to_status(pvc->dlci, data + i, 512 if (lmi == LMI_CISCO) {
527 pvc->state.active, pvc->state.new); 513 data[i] = pvc->dlci >> 8;
514 data[i + 1] = pvc->dlci & 0xFF;
515 } else {
516 data[i] = (pvc->dlci >> 4) & 0x3F;
517 data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
518 data[i + 2] = 0x80;
519 }
520
521 if (pvc->state.new)
522 data[i + 2] |= 0x08;
523 else if (pvc->state.active)
524 data[i + 2] |= 0x02;
525
528 i += stat_len; 526 i += stat_len;
529 pvc = pvc->next; 527 pvc = pvc->next;
530 } 528 }
@@ -569,6 +567,8 @@ static void fr_set_link_state(int reliable, struct net_device *dev)
569 pvc_carrier(0, pvc); 567 pvc_carrier(0, pvc);
570 pvc->state.exist = pvc->state.active = 0; 568 pvc->state.exist = pvc->state.active = 0;
571 pvc->state.new = 0; 569 pvc->state.new = 0;
570 if (!hdlc->state.fr.settings.dce)
571 pvc->state.bandwidth = 0;
572 pvc = pvc->next; 572 pvc = pvc->next;
573 } 573 }
574 } 574 }
@@ -583,11 +583,12 @@ static void fr_timer(unsigned long arg)
583 int i, cnt = 0, reliable; 583 int i, cnt = 0, reliable;
584 u32 list; 584 u32 list;
585 585
586 if (hdlc->state.fr.settings.dce) 586 if (hdlc->state.fr.settings.dce) {
587 reliable = hdlc->state.fr.request && 587 reliable = hdlc->state.fr.request &&
588 time_before(jiffies, hdlc->state.fr.last_poll + 588 time_before(jiffies, hdlc->state.fr.last_poll +
589 hdlc->state.fr.settings.t392 * HZ); 589 hdlc->state.fr.settings.t392 * HZ);
590 else { 590 hdlc->state.fr.request = 0;
591 } else {
591 hdlc->state.fr.last_errors <<= 1; /* Shift the list */ 592 hdlc->state.fr.last_errors <<= 1; /* Shift the list */
592 if (hdlc->state.fr.request) { 593 if (hdlc->state.fr.request) {
593 if (hdlc->state.fr.reliable) 594 if (hdlc->state.fr.reliable)
@@ -634,65 +635,88 @@ static void fr_timer(unsigned long arg)
634static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb) 635static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
635{ 636{
636 hdlc_device *hdlc = dev_to_hdlc(dev); 637 hdlc_device *hdlc = dev_to_hdlc(dev);
637 int stat_len;
638 pvc_device *pvc; 638 pvc_device *pvc;
639 int reptype = -1, error, no_ram;
640 u8 rxseq, txseq; 639 u8 rxseq, txseq;
641 int i; 640 int lmi = hdlc->state.fr.settings.lmi;
641 int dce = hdlc->state.fr.settings.dce;
642 int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
642 643
643 if (skb->len < ((hdlc->state.fr.settings.lmi == LMI_ANSI) 644 if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
644 ? LMI_ANSI_LENGTH : LMI_LENGTH)) { 645 LMI_CCITT_CISCO_LENGTH)) {
645 printk(KERN_INFO "%s: Short LMI frame\n", dev->name); 646 printk(KERN_INFO "%s: Short LMI frame\n", dev->name);
646 return 1; 647 return 1;
647 } 648 }
648 649
649 if (skb->data[5] != (!hdlc->state.fr.settings.dce ? 650 if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
650 LMI_STATUS : LMI_STATUS_ENQUIRY)) { 651 NLPID_CCITT_ANSI_LMI)) {
651 printk(KERN_INFO "%s: LMI msgtype=%x, Not LMI status %s\n", 652 printk(KERN_INFO "%s: Received non-LMI frame with LMI"
652 dev->name, skb->data[2], 653 " DLCI\n", dev->name);
653 hdlc->state.fr.settings.dce ? "enquiry" : "reply"); 654 return 1;
655 }
656
657 if (skb->data[4] != LMI_CALLREF) {
658 printk(KERN_INFO "%s: Invalid LMI Call reference (0x%02X)\n",
659 dev->name, skb->data[4]);
660 return 1;
661 }
662
663 if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
664 printk(KERN_INFO "%s: Invalid LMI Message type (0x%02X)\n",
665 dev->name, skb->data[5]);
654 return 1; 666 return 1;
655 } 667 }
656 668
657 i = (hdlc->state.fr.settings.lmi == LMI_ANSI) ? 7 : 6; 669 if (lmi == LMI_ANSI) {
670 if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
671 printk(KERN_INFO "%s: Not ANSI locking shift in LMI"
672 " message (0x%02X)\n", dev->name, skb->data[6]);
673 return 1;
674 }
675 i = 7;
676 } else
677 i = 6;
658 678
659 if (skb->data[i] != 679 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
660 ((hdlc->state.fr.settings.lmi == LMI_CCITT) 680 LMI_ANSI_CISCO_REPTYPE)) {
661 ? LMI_CCITT_REPTYPE : LMI_REPTYPE)) { 681 printk(KERN_INFO "%s: Not an LMI Report type IE (0x%02X)\n",
662 printk(KERN_INFO "%s: Not a report type=%x\n",
663 dev->name, skb->data[i]); 682 dev->name, skb->data[i]);
664 return 1; 683 return 1;
665 } 684 }
666 i++;
667 685
668 i++; /* Skip length field */ 686 if (skb->data[++i] != LMI_REPT_LEN) {
687 printk(KERN_INFO "%s: Invalid LMI Report type IE length"
688 " (%u)\n", dev->name, skb->data[i]);
689 return 1;
690 }
669 691
670 reptype = skb->data[i++]; 692 reptype = skb->data[++i];
693 if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
694 printk(KERN_INFO "%s: Unsupported LMI Report type (0x%02X)\n",
695 dev->name, reptype);
696 return 1;
697 }
671 698
672 if (skb->data[i]!= 699 if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
673 ((hdlc->state.fr.settings.lmi == LMI_CCITT) 700 LMI_ANSI_CISCO_ALIVE)) {
674 ? LMI_CCITT_ALIVE : LMI_ALIVE)) { 701 printk(KERN_INFO "%s: Not an LMI Link integrity verification"
675 printk(KERN_INFO "%s: Unsupported status element=%x\n", 702 " IE (0x%02X)\n", dev->name, skb->data[i]);
676 dev->name, skb->data[i]);
677 return 1; 703 return 1;
678 } 704 }
679 i++;
680 705
681 i++; /* Skip length field */ 706 if (skb->data[++i] != LMI_INTEG_LEN) {
707 printk(KERN_INFO "%s: Invalid LMI Link integrity verification"
708 " IE length (%u)\n", dev->name, skb->data[i]);
709 return 1;
710 }
711 i++;
682 712
683 hdlc->state.fr.rxseq = skb->data[i++]; /* TX sequence from peer */ 713 hdlc->state.fr.rxseq = skb->data[i++]; /* TX sequence from peer */
684 rxseq = skb->data[i++]; /* Should confirm our sequence */ 714 rxseq = skb->data[i++]; /* Should confirm our sequence */
685 715
686 txseq = hdlc->state.fr.txseq; 716 txseq = hdlc->state.fr.txseq;
687 717
688 if (hdlc->state.fr.settings.dce) { 718 if (dce)
689 if (reptype != LMI_FULLREP && reptype != LMI_INTEGRITY) {
690 printk(KERN_INFO "%s: Unsupported report type=%x\n",
691 dev->name, reptype);
692 return 1;
693 }
694 hdlc->state.fr.last_poll = jiffies; 719 hdlc->state.fr.last_poll = jiffies;
695 }
696 720
697 error = 0; 721 error = 0;
698 if (!hdlc->state.fr.reliable) 722 if (!hdlc->state.fr.reliable)
@@ -703,7 +727,7 @@ static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
703 error = 1; 727 error = 1;
704 } 728 }
705 729
706 if (hdlc->state.fr.settings.dce) { 730 if (dce) {
707 if (hdlc->state.fr.fullrep_sent && !error) { 731 if (hdlc->state.fr.fullrep_sent && !error) {
708/* Stop sending full report - the last one has been confirmed by DTE */ 732/* Stop sending full report - the last one has been confirmed by DTE */
709 hdlc->state.fr.fullrep_sent = 0; 733 hdlc->state.fr.fullrep_sent = 0;
@@ -725,6 +749,7 @@ static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
725 hdlc->state.fr.dce_changed = 0; 749 hdlc->state.fr.dce_changed = 0;
726 } 750 }
727 751
752 hdlc->state.fr.request = 1; /* got request */
728 fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0); 753 fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
729 return 0; 754 return 0;
730 } 755 }
@@ -739,7 +764,6 @@ static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
739 if (reptype != LMI_FULLREP) 764 if (reptype != LMI_FULLREP)
740 return 0; 765 return 0;
741 766
742 stat_len = 3;
743 pvc = hdlc->state.fr.first_pvc; 767 pvc = hdlc->state.fr.first_pvc;
744 768
745 while (pvc) { 769 while (pvc) {
@@ -750,24 +774,35 @@ static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
750 no_ram = 0; 774 no_ram = 0;
751 while (skb->len >= i + 2 + stat_len) { 775 while (skb->len >= i + 2 + stat_len) {
752 u16 dlci; 776 u16 dlci;
777 u32 bw;
753 unsigned int active, new; 778 unsigned int active, new;
754 779
755 if (skb->data[i] != ((hdlc->state.fr.settings.lmi == LMI_CCITT) 780 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
756 ? LMI_CCITT_PVCSTAT : LMI_PVCSTAT)) { 781 LMI_ANSI_CISCO_PVCSTAT)) {
757 printk(KERN_WARNING "%s: Invalid PVCSTAT ID: %x\n", 782 printk(KERN_INFO "%s: Not an LMI PVC status IE"
758 dev->name, skb->data[i]); 783 " (0x%02X)\n", dev->name, skb->data[i]);
759 return 1; 784 return 1;
760 } 785 }
761 i++;
762 786
763 if (skb->data[i] != stat_len) { 787 if (skb->data[++i] != stat_len) {
764 printk(KERN_WARNING "%s: Invalid PVCSTAT length: %x\n", 788 printk(KERN_INFO "%s: Invalid LMI PVC status IE length"
765 dev->name, skb->data[i]); 789 " (%u)\n", dev->name, skb->data[i]);
766 return 1; 790 return 1;
767 } 791 }
768 i++; 792 i++;
769 793
770 dlci = status_to_dlci(skb->data + i, &active, &new); 794 new = !! (skb->data[i + 2] & 0x08);
795 active = !! (skb->data[i + 2] & 0x02);
796 if (lmi == LMI_CISCO) {
797 dlci = (skb->data[i] << 8) | skb->data[i + 1];
798 bw = (skb->data[i + 3] << 16) |
799 (skb->data[i + 4] << 8) |
800 (skb->data[i + 5]);
801 } else {
802 dlci = ((skb->data[i] & 0x3F) << 4) |
803 ((skb->data[i + 1] & 0x78) >> 3);
804 bw = 0;
805 }
771 806
772 pvc = add_pvc(dev, dlci); 807 pvc = add_pvc(dev, dlci);
773 808
@@ -783,9 +818,11 @@ static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
783 pvc->state.deleted = 0; 818 pvc->state.deleted = 0;
784 if (active != pvc->state.active || 819 if (active != pvc->state.active ||
785 new != pvc->state.new || 820 new != pvc->state.new ||
821 bw != pvc->state.bandwidth ||
786 !pvc->state.exist) { 822 !pvc->state.exist) {
787 pvc->state.new = new; 823 pvc->state.new = new;
788 pvc->state.active = active; 824 pvc->state.active = active;
825 pvc->state.bandwidth = bw;
789 pvc_carrier(active, pvc); 826 pvc_carrier(active, pvc);
790 fr_log_dlci_active(pvc); 827 fr_log_dlci_active(pvc);
791 } 828 }
@@ -801,6 +838,7 @@ static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
801 pvc_carrier(0, pvc); 838 pvc_carrier(0, pvc);
802 pvc->state.active = pvc->state.new = 0; 839 pvc->state.active = pvc->state.new = 0;
803 pvc->state.exist = 0; 840 pvc->state.exist = 0;
841 pvc->state.bandwidth = 0;
804 fr_log_dlci_active(pvc); 842 fr_log_dlci_active(pvc);
805 } 843 }
806 pvc = pvc->next; 844 pvc = pvc->next;
@@ -829,22 +867,15 @@ static int fr_rx(struct sk_buff *skb)
829 867
830 dlci = q922_to_dlci(skb->data); 868 dlci = q922_to_dlci(skb->data);
831 869
832 if (dlci == LMI_DLCI) { 870 if ((dlci == LMI_CCITT_ANSI_DLCI &&
833 if (hdlc->state.fr.settings.lmi == LMI_NONE) 871 (hdlc->state.fr.settings.lmi == LMI_ANSI ||
834 goto rx_error; /* LMI packet with no LMI? */ 872 hdlc->state.fr.settings.lmi == LMI_CCITT)) ||
835 873 (dlci == LMI_CISCO_DLCI &&
836 if (data[3] == LMI_PROTO) { 874 hdlc->state.fr.settings.lmi == LMI_CISCO)) {
837 if (fr_lmi_recv(ndev, skb)) 875 if (fr_lmi_recv(ndev, skb))
838 goto rx_error; 876 goto rx_error;
839 else { 877 dev_kfree_skb_any(skb);
840 dev_kfree_skb_any(skb); 878 return NET_RX_SUCCESS;
841 return NET_RX_SUCCESS;
842 }
843 }
844
845 printk(KERN_INFO "%s: Received non-LMI frame with LMI DLCI\n",
846 ndev->name);
847 goto rx_error;
848 } 879 }
849 880
850 pvc = find_pvc(hdlc, dlci); 881 pvc = find_pvc(hdlc, dlci);
@@ -1170,7 +1201,8 @@ int hdlc_fr_ioctl(struct net_device *dev, struct ifreq *ifr)
1170 1201
1171 if ((new_settings.lmi != LMI_NONE && 1202 if ((new_settings.lmi != LMI_NONE &&
1172 new_settings.lmi != LMI_ANSI && 1203 new_settings.lmi != LMI_ANSI &&
1173 new_settings.lmi != LMI_CCITT) || 1204 new_settings.lmi != LMI_CCITT &&
1205 new_settings.lmi != LMI_CISCO) ||
1174 new_settings.t391 < 1 || 1206 new_settings.t391 < 1 ||
1175 new_settings.t392 < 2 || 1207 new_settings.t392 < 2 ||
1176 new_settings.n391 < 1 || 1208 new_settings.n391 < 1 ||
diff --git a/drivers/net/wan/hdlc_generic.c b/drivers/net/wan/hdlc_generic.c
index 6ed064cb4469..a63f6a2cc4f7 100644
--- a/drivers/net/wan/hdlc_generic.c
+++ b/drivers/net/wan/hdlc_generic.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Generic HDLC support routines for Linux 2 * Generic HDLC support routines for Linux
3 * 3 *
4 * Copyright (C) 1999 - 2003 Krzysztof Halasa <khc@pm.waw.pl> 4 * Copyright (C) 1999 - 2005 Krzysztof Halasa <khc@pm.waw.pl>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License 7 * under the terms of version 2 of the GNU General Public License
@@ -38,7 +38,7 @@
38#include <linux/hdlc.h> 38#include <linux/hdlc.h>
39 39
40 40
41static const char* version = "HDLC support module revision 1.17"; 41static const char* version = "HDLC support module revision 1.18";
42 42
43#undef DEBUG_LINK 43#undef DEBUG_LINK
44 44
@@ -126,10 +126,13 @@ void hdlc_set_carrier(int on, struct net_device *dev)
126 if (!hdlc->open) 126 if (!hdlc->open)
127 goto carrier_exit; 127 goto carrier_exit;
128 128
129 if (hdlc->carrier) 129 if (hdlc->carrier) {
130 printk(KERN_INFO "%s: Carrier detected\n", dev->name);
130 __hdlc_set_carrier_on(dev); 131 __hdlc_set_carrier_on(dev);
131 else 132 } else {
133 printk(KERN_INFO "%s: Carrier lost\n", dev->name);
132 __hdlc_set_carrier_off(dev); 134 __hdlc_set_carrier_off(dev);
135 }
133 136
134carrier_exit: 137carrier_exit:
135 spin_unlock_irqrestore(&hdlc->state_lock, flags); 138 spin_unlock_irqrestore(&hdlc->state_lock, flags);
@@ -157,8 +160,11 @@ int hdlc_open(struct net_device *dev)
157 160
158 spin_lock_irq(&hdlc->state_lock); 161 spin_lock_irq(&hdlc->state_lock);
159 162
160 if (hdlc->carrier) 163 if (hdlc->carrier) {
164 printk(KERN_INFO "%s: Carrier detected\n", dev->name);
161 __hdlc_set_carrier_on(dev); 165 __hdlc_set_carrier_on(dev);
166 } else
167 printk(KERN_INFO "%s: No carrier\n", dev->name);
162 168
163 hdlc->open = 1; 169 hdlc->open = 1;
164 170
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 15e545f66cd7..2b948ea397d5 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -723,7 +723,7 @@ static void lmc_watchdog (unsigned long data) /*fold00*/
723 /* lmc_reset (sc); Why reset??? The link can go down ok */ 723 /* lmc_reset (sc); Why reset??? The link can go down ok */
724 724
725 /* Inform the world that link has been lost */ 725 /* Inform the world that link has been lost */
726 dev->flags &= ~IFF_RUNNING; 726 netif_carrier_off(dev);
727 } 727 }
728 728
729 /* 729 /*
@@ -736,7 +736,7 @@ static void lmc_watchdog (unsigned long data) /*fold00*/
736 /* lmc_reset (sc); Again why reset??? */ 736 /* lmc_reset (sc); Again why reset??? */
737 737
738 /* Inform the world that link protocol is back up. */ 738 /* Inform the world that link protocol is back up. */
739 dev->flags |= IFF_RUNNING; 739 netif_carrier_on(dev);
740 740
741 /* Now we have to tell the syncppp that we had an outage 741 /* Now we have to tell the syncppp that we had an outage
742 * and that it should deal. Calling sppp_reopen here 742 * and that it should deal. Calling sppp_reopen here
@@ -1168,8 +1168,6 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
1168 sc->lmc_media->set_link_status (sc, 1); 1168 sc->lmc_media->set_link_status (sc, 1);
1169 sc->lmc_media->set_status (sc, NULL); 1169 sc->lmc_media->set_status (sc, NULL);
1170 1170
1171 //dev->flags |= IFF_RUNNING;
1172
1173 netif_wake_queue(dev); 1171 netif_wake_queue(dev);
1174 1172
1175 sc->lmc_txfull = 0; 1173 sc->lmc_txfull = 0;
@@ -1233,8 +1231,6 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/
1233 csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */ 1231 csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */
1234 LMC_CSR_WRITE (sc, csr_command, csr6); 1232 LMC_CSR_WRITE (sc, csr_command, csr6);
1235 1233
1236 dev->flags &= ~IFF_RUNNING;
1237
1238 sc->stats.rx_missed_errors += 1234 sc->stats.rx_missed_errors +=
1239 LMC_CSR_READ (sc, csr_missed_frames) & 0xffff; 1235 LMC_CSR_READ (sc, csr_missed_frames) & 0xffff;
1240 1236
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index a3a32430ae9d..b1078baa1d5e 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -492,6 +492,9 @@ EXPORT_SYMBOL(orinoco_debug);
492static int suppress_linkstatus; /* = 0 */ 492static int suppress_linkstatus; /* = 0 */
493module_param(suppress_linkstatus, bool, 0644); 493module_param(suppress_linkstatus, bool, 0644);
494MODULE_PARM_DESC(suppress_linkstatus, "Don't log link status changes"); 494MODULE_PARM_DESC(suppress_linkstatus, "Don't log link status changes");
495static int ignore_disconnect; /* = 0 */
496module_param(ignore_disconnect, int, 0644);
497MODULE_PARM_DESC(ignore_disconnect, "Don't report lost link to the network layer");
495 498
496/********************************************************************/ 499/********************************************************************/
497/* Compile time configuration and compatibility stuff */ 500/* Compile time configuration and compatibility stuff */
@@ -604,7 +607,6 @@ struct hermes_rx_descriptor {
604static int orinoco_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 607static int orinoco_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
605static int __orinoco_program_rids(struct net_device *dev); 608static int __orinoco_program_rids(struct net_device *dev);
606static void __orinoco_set_multicast_list(struct net_device *dev); 609static void __orinoco_set_multicast_list(struct net_device *dev);
607static int orinoco_debug_dump_recs(struct net_device *dev);
608 610
609/********************************************************************/ 611/********************************************************************/
610/* Internal helper functions */ 612/* Internal helper functions */
@@ -655,7 +657,7 @@ static int orinoco_open(struct net_device *dev)
655 return err; 657 return err;
656} 658}
657 659
658int orinoco_stop(struct net_device *dev) 660static int orinoco_stop(struct net_device *dev)
659{ 661{
660 struct orinoco_private *priv = netdev_priv(dev); 662 struct orinoco_private *priv = netdev_priv(dev);
661 int err = 0; 663 int err = 0;
@@ -686,7 +688,7 @@ static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
686 struct orinoco_private *priv = netdev_priv(dev); 688 struct orinoco_private *priv = netdev_priv(dev);
687 hermes_t *hw = &priv->hw; 689 hermes_t *hw = &priv->hw;
688 struct iw_statistics *wstats = &priv->wstats; 690 struct iw_statistics *wstats = &priv->wstats;
689 int err = 0; 691 int err;
690 unsigned long flags; 692 unsigned long flags;
691 693
692 if (! netif_device_present(dev)) { 694 if (! netif_device_present(dev)) {
@@ -695,9 +697,21 @@ static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
695 return NULL; /* FIXME: Can we do better than this? */ 697 return NULL; /* FIXME: Can we do better than this? */
696 } 698 }
697 699
700 /* If busy, return the old stats. Returning NULL may cause
701 * the interface to disappear from /proc/net/wireless */
698 if (orinoco_lock(priv, &flags) != 0) 702 if (orinoco_lock(priv, &flags) != 0)
699 return NULL; /* FIXME: Erg, we've been signalled, how 703 return wstats;
700 * do we propagate this back up? */ 704
705 /* We can't really wait for the tallies inquiry command to
706 * complete, so we just use the previous results and trigger
707 * a new tallies inquiry command for next time - Jean II */
708 /* FIXME: Really we should wait for the inquiry to come back -
709 * as it is the stats we give don't make a whole lot of sense.
710 * Unfortunately, it's not clear how to do that within the
711 * wireless extensions framework: I think we're in user
712 * context, but a lock seems to be held by the time we get in
713 * here so we're not safe to sleep here. */
714 hermes_inquire(hw, HERMES_INQ_TALLIES);
701 715
702 if (priv->iw_mode == IW_MODE_ADHOC) { 716 if (priv->iw_mode == IW_MODE_ADHOC) {
703 memset(&wstats->qual, 0, sizeof(wstats->qual)); 717 memset(&wstats->qual, 0, sizeof(wstats->qual));
@@ -716,25 +730,16 @@ static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
716 730
717 err = HERMES_READ_RECORD(hw, USER_BAP, 731 err = HERMES_READ_RECORD(hw, USER_BAP,
718 HERMES_RID_COMMSQUALITY, &cq); 732 HERMES_RID_COMMSQUALITY, &cq);
719 733
720 wstats->qual.qual = (int)le16_to_cpu(cq.qual); 734 if (!err) {
721 wstats->qual.level = (int)le16_to_cpu(cq.signal) - 0x95; 735 wstats->qual.qual = (int)le16_to_cpu(cq.qual);
722 wstats->qual.noise = (int)le16_to_cpu(cq.noise) - 0x95; 736 wstats->qual.level = (int)le16_to_cpu(cq.signal) - 0x95;
723 wstats->qual.updated = 7; 737 wstats->qual.noise = (int)le16_to_cpu(cq.noise) - 0x95;
738 wstats->qual.updated = 7;
739 }
724 } 740 }
725 741
726 /* We can't really wait for the tallies inquiry command to
727 * complete, so we just use the previous results and trigger
728 * a new tallies inquiry command for next time - Jean II */
729 /* FIXME: We're in user context (I think?), so we should just
730 wait for the tallies to come through */
731 err = hermes_inquire(hw, HERMES_INQ_TALLIES);
732
733 orinoco_unlock(priv, &flags); 742 orinoco_unlock(priv, &flags);
734
735 if (err)
736 return NULL;
737
738 return wstats; 743 return wstats;
739} 744}
740 745
@@ -1275,9 +1280,10 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1275 len = sizeof(tallies); 1280 len = sizeof(tallies);
1276 } 1281 }
1277 1282
1278 /* Read directly the data (no seek) */ 1283 err = hermes_bap_pread(hw, IRQ_BAP, &tallies, len,
1279 hermes_read_words(hw, HERMES_DATA1, (void *) &tallies, 1284 infofid, sizeof(info));
1280 len / 2); /* FIXME: blech! */ 1285 if (err)
1286 break;
1281 1287
1282 /* Increment our various counters */ 1288 /* Increment our various counters */
1283 /* wstats->discard.nwid - no wrong BSSID stuff */ 1289 /* wstats->discard.nwid - no wrong BSSID stuff */
@@ -1307,8 +1313,10 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1307 break; 1313 break;
1308 } 1314 }
1309 1315
1310 hermes_read_words(hw, HERMES_DATA1, (void *) &linkstatus, 1316 err = hermes_bap_pread(hw, IRQ_BAP, &linkstatus, len,
1311 len / 2); 1317 infofid, sizeof(info));
1318 if (err)
1319 break;
1312 newstatus = le16_to_cpu(linkstatus.linkstatus); 1320 newstatus = le16_to_cpu(linkstatus.linkstatus);
1313 1321
1314 connected = (newstatus == HERMES_LINKSTATUS_CONNECTED) 1322 connected = (newstatus == HERMES_LINKSTATUS_CONNECTED)
@@ -1317,7 +1325,7 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1317 1325
1318 if (connected) 1326 if (connected)
1319 netif_carrier_on(dev); 1327 netif_carrier_on(dev);
1320 else 1328 else if (!ignore_disconnect)
1321 netif_carrier_off(dev); 1329 netif_carrier_off(dev);
1322 1330
1323 if (newstatus != priv->last_linkstatus) 1331 if (newstatus != priv->last_linkstatus)
@@ -1350,6 +1358,8 @@ int __orinoco_up(struct net_device *dev)
1350 struct hermes *hw = &priv->hw; 1358 struct hermes *hw = &priv->hw;
1351 int err; 1359 int err;
1352 1360
1361 netif_carrier_off(dev); /* just to make sure */
1362
1353 err = __orinoco_program_rids(dev); 1363 err = __orinoco_program_rids(dev);
1354 if (err) { 1364 if (err) {
1355 printk(KERN_ERR "%s: Error %d configuring card\n", 1365 printk(KERN_ERR "%s: Error %d configuring card\n",
@@ -1413,7 +1423,7 @@ int orinoco_reinit_firmware(struct net_device *dev)
1413 return err; 1423 return err;
1414 1424
1415 err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid); 1425 err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
1416 if (err == -EIO) { 1426 if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) {
1417 /* Try workaround for old Symbol firmware bug */ 1427 /* Try workaround for old Symbol firmware bug */
1418 printk(KERN_WARNING "%s: firmware ALLOC bug detected " 1428 printk(KERN_WARNING "%s: firmware ALLOC bug detected "
1419 "(old Symbol firmware?). Trying to work around... ", 1429 "(old Symbol firmware?). Trying to work around... ",
@@ -1610,17 +1620,15 @@ static int __orinoco_program_rids(struct net_device *dev)
1610 return err; 1620 return err;
1611 } 1621 }
1612 /* Set the channel/frequency */ 1622 /* Set the channel/frequency */
1613 if (priv->channel == 0) { 1623 if (priv->channel != 0 && priv->iw_mode != IW_MODE_INFRA) {
1614 printk(KERN_DEBUG "%s: Channel is 0 in __orinoco_program_rids()\n", dev->name); 1624 err = hermes_write_wordrec(hw, USER_BAP,
1615 if (priv->createibss) 1625 HERMES_RID_CNFOWNCHANNEL,
1616 priv->channel = 10; 1626 priv->channel);
1617 } 1627 if (err) {
1618 err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFOWNCHANNEL, 1628 printk(KERN_ERR "%s: Error %d setting channel %d\n",
1619 priv->channel); 1629 dev->name, err, priv->channel);
1620 if (err) { 1630 return err;
1621 printk(KERN_ERR "%s: Error %d setting channel\n", 1631 }
1622 dev->name, err);
1623 return err;
1624 } 1632 }
1625 1633
1626 if (priv->has_ibss) { 1634 if (priv->has_ibss) {
@@ -1916,7 +1924,7 @@ static void orinoco_reset(struct net_device *dev)
1916{ 1924{
1917 struct orinoco_private *priv = netdev_priv(dev); 1925 struct orinoco_private *priv = netdev_priv(dev);
1918 struct hermes *hw = &priv->hw; 1926 struct hermes *hw = &priv->hw;
1919 int err = 0; 1927 int err;
1920 unsigned long flags; 1928 unsigned long flags;
1921 1929
1922 if (orinoco_lock(priv, &flags) != 0) 1930 if (orinoco_lock(priv, &flags) != 0)
@@ -1938,20 +1946,20 @@ static void orinoco_reset(struct net_device *dev)
1938 1946
1939 orinoco_unlock(priv, &flags); 1947 orinoco_unlock(priv, &flags);
1940 1948
1941 if (priv->hard_reset) 1949 if (priv->hard_reset) {
1942 err = (*priv->hard_reset)(priv); 1950 err = (*priv->hard_reset)(priv);
1943 if (err) { 1951 if (err) {
1944 printk(KERN_ERR "%s: orinoco_reset: Error %d " 1952 printk(KERN_ERR "%s: orinoco_reset: Error %d "
1945 "performing hard reset\n", dev->name, err); 1953 "performing hard reset\n", dev->name, err);
1946 /* FIXME: shutdown of some sort */ 1954 goto disable;
1947 return; 1955 }
1948 } 1956 }
1949 1957
1950 err = orinoco_reinit_firmware(dev); 1958 err = orinoco_reinit_firmware(dev);
1951 if (err) { 1959 if (err) {
1952 printk(KERN_ERR "%s: orinoco_reset: Error %d re-initializing firmware\n", 1960 printk(KERN_ERR "%s: orinoco_reset: Error %d re-initializing firmware\n",
1953 dev->name, err); 1961 dev->name, err);
1954 return; 1962 goto disable;
1955 } 1963 }
1956 1964
1957 spin_lock_irq(&priv->lock); /* This has to be called from user context */ 1965 spin_lock_irq(&priv->lock); /* This has to be called from user context */
@@ -1972,6 +1980,10 @@ static void orinoco_reset(struct net_device *dev)
1972 spin_unlock_irq(&priv->lock); 1980 spin_unlock_irq(&priv->lock);
1973 1981
1974 return; 1982 return;
1983 disable:
1984 hermes_set_irqmask(hw, 0);
1985 netif_device_detach(dev);
1986 printk(KERN_ERR "%s: Device has been disabled!\n", dev->name);
1975} 1987}
1976 1988
1977/********************************************************************/ 1989/********************************************************************/
@@ -2056,7 +2068,7 @@ irqreturn_t orinoco_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2056 if (events & HERMES_EV_ALLOC) 2068 if (events & HERMES_EV_ALLOC)
2057 __orinoco_ev_alloc(dev, hw); 2069 __orinoco_ev_alloc(dev, hw);
2058 2070
2059 hermes_write_regn(hw, EVACK, events); 2071 hermes_write_regn(hw, EVACK, evstat);
2060 2072
2061 evstat = hermes_read_regn(hw, EVSTAT); 2073 evstat = hermes_read_regn(hw, EVSTAT);
2062 events = evstat & hw->inten; 2074 events = evstat & hw->inten;
@@ -2215,6 +2227,8 @@ static int determine_firmware(struct net_device *dev)
2215 firmver >= 0x31000; 2227 firmver >= 0x31000;
2216 priv->has_preamble = (firmver >= 0x20000); 2228 priv->has_preamble = (firmver >= 0x20000);
2217 priv->ibss_port = 4; 2229 priv->ibss_port = 4;
2230 priv->broken_disableport = (firmver == 0x25013) ||
2231 (firmver >= 0x30000 && firmver <= 0x31000);
2218 /* Tested with Intel firmware : 0x20015 => Jean II */ 2232 /* Tested with Intel firmware : 0x20015 => Jean II */
2219 /* Tested with 3Com firmware : 0x15012 & 0x22001 => Jean II */ 2233 /* Tested with 3Com firmware : 0x15012 & 0x22001 => Jean II */
2220 break; 2234 break;
@@ -2267,7 +2281,7 @@ static int orinoco_init(struct net_device *dev)
2267 priv->nicbuf_size = IEEE802_11_FRAME_LEN + ETH_HLEN; 2281 priv->nicbuf_size = IEEE802_11_FRAME_LEN + ETH_HLEN;
2268 2282
2269 /* Initialize the firmware */ 2283 /* Initialize the firmware */
2270 err = hermes_init(hw); 2284 err = orinoco_reinit_firmware(dev);
2271 if (err != 0) { 2285 if (err != 0) {
2272 printk(KERN_ERR "%s: failed to initialize firmware (err = %d)\n", 2286 printk(KERN_ERR "%s: failed to initialize firmware (err = %d)\n",
2273 dev->name, err); 2287 dev->name, err);
@@ -2400,31 +2414,12 @@ static int orinoco_init(struct net_device *dev)
2400 /* By default use IEEE/IBSS ad-hoc mode if we have it */ 2414 /* By default use IEEE/IBSS ad-hoc mode if we have it */
2401 priv->prefer_port3 = priv->has_port3 && (! priv->has_ibss); 2415 priv->prefer_port3 = priv->has_port3 && (! priv->has_ibss);
2402 set_port_type(priv); 2416 set_port_type(priv);
2403 priv->channel = 10; /* default channel, more-or-less arbitrary */ 2417 priv->channel = 0; /* use firmware default */
2404 2418
2405 priv->promiscuous = 0; 2419 priv->promiscuous = 0;
2406 priv->wep_on = 0; 2420 priv->wep_on = 0;
2407 priv->tx_key = 0; 2421 priv->tx_key = 0;
2408 2422
2409 err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
2410 if (err == -EIO) {
2411 /* Try workaround for old Symbol firmware bug */
2412 printk(KERN_WARNING "%s: firmware ALLOC bug detected "
2413 "(old Symbol firmware?). Trying to work around... ",
2414 dev->name);
2415
2416 priv->nicbuf_size = TX_NICBUF_SIZE_BUG;
2417 err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
2418 if (err)
2419 printk("failed!\n");
2420 else
2421 printk("ok.\n");
2422 }
2423 if (err) {
2424 printk("%s: Error %d allocating Tx buffer\n", dev->name, err);
2425 goto out;
2426 }
2427
2428 /* Make the hardware available, as long as it hasn't been 2423 /* Make the hardware available, as long as it hasn't been
2429 * removed elsewhere (e.g. by PCMCIA hot unplug) */ 2424 * removed elsewhere (e.g. by PCMCIA hot unplug) */
2430 spin_lock_irq(&priv->lock); 2425 spin_lock_irq(&priv->lock);
@@ -2450,7 +2445,7 @@ struct net_device *alloc_orinocodev(int sizeof_card,
2450 priv = netdev_priv(dev); 2445 priv = netdev_priv(dev);
2451 priv->ndev = dev; 2446 priv->ndev = dev;
2452 if (sizeof_card) 2447 if (sizeof_card)
2453 priv->card = (void *)((unsigned long)netdev_priv(dev) 2448 priv->card = (void *)((unsigned long)priv
2454 + sizeof(struct orinoco_private)); 2449 + sizeof(struct orinoco_private));
2455 else 2450 else
2456 priv->card = NULL; 2451 priv->card = NULL;
@@ -2555,6 +2550,7 @@ static int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
2555 } 2550 }
2556 2551
2557 len = le16_to_cpu(essidbuf.len); 2552 len = le16_to_cpu(essidbuf.len);
2553 BUG_ON(len > IW_ESSID_MAX_SIZE);
2558 2554
2559 memset(buf, 0, IW_ESSID_MAX_SIZE+1); 2555 memset(buf, 0, IW_ESSID_MAX_SIZE+1);
2560 memcpy(buf, p, len); 2556 memcpy(buf, p, len);
@@ -2923,13 +2919,14 @@ static int orinoco_ioctl_setessid(struct net_device *dev, struct iw_point *erq)
2923 memset(&essidbuf, 0, sizeof(essidbuf)); 2919 memset(&essidbuf, 0, sizeof(essidbuf));
2924 2920
2925 if (erq->flags) { 2921 if (erq->flags) {
2926 if (erq->length > IW_ESSID_MAX_SIZE) 2922 /* iwconfig includes the NUL in the specified length */
2923 if (erq->length > IW_ESSID_MAX_SIZE+1)
2927 return -E2BIG; 2924 return -E2BIG;
2928 2925
2929 if (copy_from_user(&essidbuf, erq->pointer, erq->length)) 2926 if (copy_from_user(&essidbuf, erq->pointer, erq->length))
2930 return -EFAULT; 2927 return -EFAULT;
2931 2928
2932 essidbuf[erq->length] = '\0'; 2929 essidbuf[IW_ESSID_MAX_SIZE] = '\0';
2933 } 2930 }
2934 2931
2935 if (orinoco_lock(priv, &flags) != 0) 2932 if (orinoco_lock(priv, &flags) != 0)
@@ -3855,7 +3852,6 @@ orinoco_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3855 { SIOCIWFIRSTPRIV + 0x7, 0, 3852 { SIOCIWFIRSTPRIV + 0x7, 0,
3856 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 3853 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
3857 "get_ibssport" }, 3854 "get_ibssport" },
3858 { SIOCIWLASTPRIV, 0, 0, "dump_recs" },
3859 }; 3855 };
3860 3856
3861 wrq->u.data.length = sizeof(privtab) / sizeof(privtab[0]); 3857 wrq->u.data.length = sizeof(privtab) / sizeof(privtab[0]);
@@ -3943,14 +3939,6 @@ orinoco_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3943 err = orinoco_ioctl_getibssport(dev, wrq); 3939 err = orinoco_ioctl_getibssport(dev, wrq);
3944 break; 3940 break;
3945 3941
3946 case SIOCIWLASTPRIV:
3947 err = orinoco_debug_dump_recs(dev);
3948 if (err)
3949 printk(KERN_ERR "%s: Unable to dump records (%d)\n",
3950 dev->name, err);
3951 break;
3952
3953
3954 default: 3942 default:
3955 err = -EOPNOTSUPP; 3943 err = -EOPNOTSUPP;
3956 } 3944 }
@@ -3964,187 +3952,6 @@ orinoco_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3964 return err; 3952 return err;
3965} 3953}
3966 3954
3967struct {
3968 u16 rid;
3969 char *name;
3970 int displaytype;
3971#define DISPLAY_WORDS 0
3972#define DISPLAY_BYTES 1
3973#define DISPLAY_STRING 2
3974#define DISPLAY_XSTRING 3
3975} record_table[] = {
3976#define DEBUG_REC(name,type) { HERMES_RID_##name, #name, DISPLAY_##type }
3977 DEBUG_REC(CNFPORTTYPE,WORDS),
3978 DEBUG_REC(CNFOWNMACADDR,BYTES),
3979 DEBUG_REC(CNFDESIREDSSID,STRING),
3980 DEBUG_REC(CNFOWNCHANNEL,WORDS),
3981 DEBUG_REC(CNFOWNSSID,STRING),
3982 DEBUG_REC(CNFOWNATIMWINDOW,WORDS),
3983 DEBUG_REC(CNFSYSTEMSCALE,WORDS),
3984 DEBUG_REC(CNFMAXDATALEN,WORDS),
3985 DEBUG_REC(CNFPMENABLED,WORDS),
3986 DEBUG_REC(CNFPMEPS,WORDS),
3987 DEBUG_REC(CNFMULTICASTRECEIVE,WORDS),
3988 DEBUG_REC(CNFMAXSLEEPDURATION,WORDS),
3989 DEBUG_REC(CNFPMHOLDOVERDURATION,WORDS),
3990 DEBUG_REC(CNFOWNNAME,STRING),
3991 DEBUG_REC(CNFOWNDTIMPERIOD,WORDS),
3992 DEBUG_REC(CNFMULTICASTPMBUFFERING,WORDS),
3993 DEBUG_REC(CNFWEPENABLED_AGERE,WORDS),
3994 DEBUG_REC(CNFMANDATORYBSSID_SYMBOL,WORDS),
3995 DEBUG_REC(CNFWEPDEFAULTKEYID,WORDS),
3996 DEBUG_REC(CNFDEFAULTKEY0,BYTES),
3997 DEBUG_REC(CNFDEFAULTKEY1,BYTES),
3998 DEBUG_REC(CNFMWOROBUST_AGERE,WORDS),
3999 DEBUG_REC(CNFDEFAULTKEY2,BYTES),
4000 DEBUG_REC(CNFDEFAULTKEY3,BYTES),
4001 DEBUG_REC(CNFWEPFLAGS_INTERSIL,WORDS),
4002 DEBUG_REC(CNFWEPKEYMAPPINGTABLE,WORDS),
4003 DEBUG_REC(CNFAUTHENTICATION,WORDS),
4004 DEBUG_REC(CNFMAXASSOCSTA,WORDS),
4005 DEBUG_REC(CNFKEYLENGTH_SYMBOL,WORDS),
4006 DEBUG_REC(CNFTXCONTROL,WORDS),
4007 DEBUG_REC(CNFROAMINGMODE,WORDS),
4008 DEBUG_REC(CNFHOSTAUTHENTICATION,WORDS),
4009 DEBUG_REC(CNFRCVCRCERROR,WORDS),
4010 DEBUG_REC(CNFMMLIFE,WORDS),
4011 DEBUG_REC(CNFALTRETRYCOUNT,WORDS),
4012 DEBUG_REC(CNFBEACONINT,WORDS),
4013 DEBUG_REC(CNFAPPCFINFO,WORDS),
4014 DEBUG_REC(CNFSTAPCFINFO,WORDS),
4015 DEBUG_REC(CNFPRIORITYQUSAGE,WORDS),
4016 DEBUG_REC(CNFTIMCTRL,WORDS),
4017 DEBUG_REC(CNFTHIRTY2TALLY,WORDS),
4018 DEBUG_REC(CNFENHSECURITY,WORDS),
4019 DEBUG_REC(CNFGROUPADDRESSES,BYTES),
4020 DEBUG_REC(CNFCREATEIBSS,WORDS),
4021 DEBUG_REC(CNFFRAGMENTATIONTHRESHOLD,WORDS),
4022 DEBUG_REC(CNFRTSTHRESHOLD,WORDS),
4023 DEBUG_REC(CNFTXRATECONTROL,WORDS),
4024 DEBUG_REC(CNFPROMISCUOUSMODE,WORDS),
4025 DEBUG_REC(CNFBASICRATES_SYMBOL,WORDS),
4026 DEBUG_REC(CNFPREAMBLE_SYMBOL,WORDS),
4027 DEBUG_REC(CNFSHORTPREAMBLE,WORDS),
4028 DEBUG_REC(CNFWEPKEYS_AGERE,BYTES),
4029 DEBUG_REC(CNFEXCLUDELONGPREAMBLE,WORDS),
4030 DEBUG_REC(CNFTXKEY_AGERE,WORDS),
4031 DEBUG_REC(CNFAUTHENTICATIONRSPTO,WORDS),
4032 DEBUG_REC(CNFBASICRATES,WORDS),
4033 DEBUG_REC(CNFSUPPORTEDRATES,WORDS),
4034 DEBUG_REC(CNFTICKTIME,WORDS),
4035 DEBUG_REC(CNFSCANREQUEST,WORDS),
4036 DEBUG_REC(CNFJOINREQUEST,WORDS),
4037 DEBUG_REC(CNFAUTHENTICATESTATION,WORDS),
4038 DEBUG_REC(CNFCHANNELINFOREQUEST,WORDS),
4039 DEBUG_REC(MAXLOADTIME,WORDS),
4040 DEBUG_REC(DOWNLOADBUFFER,WORDS),
4041 DEBUG_REC(PRIID,WORDS),
4042 DEBUG_REC(PRISUPRANGE,WORDS),
4043 DEBUG_REC(CFIACTRANGES,WORDS),
4044 DEBUG_REC(NICSERNUM,XSTRING),
4045 DEBUG_REC(NICID,WORDS),
4046 DEBUG_REC(MFISUPRANGE,WORDS),
4047 DEBUG_REC(CFISUPRANGE,WORDS),
4048 DEBUG_REC(CHANNELLIST,WORDS),
4049 DEBUG_REC(REGULATORYDOMAINS,WORDS),
4050 DEBUG_REC(TEMPTYPE,WORDS),
4051/* DEBUG_REC(CIS,BYTES), */
4052 DEBUG_REC(STAID,WORDS),
4053 DEBUG_REC(CURRENTSSID,STRING),
4054 DEBUG_REC(CURRENTBSSID,BYTES),
4055 DEBUG_REC(COMMSQUALITY,WORDS),
4056 DEBUG_REC(CURRENTTXRATE,WORDS),
4057 DEBUG_REC(CURRENTBEACONINTERVAL,WORDS),
4058 DEBUG_REC(CURRENTSCALETHRESHOLDS,WORDS),
4059 DEBUG_REC(PROTOCOLRSPTIME,WORDS),
4060 DEBUG_REC(SHORTRETRYLIMIT,WORDS),
4061 DEBUG_REC(LONGRETRYLIMIT,WORDS),
4062 DEBUG_REC(MAXTRANSMITLIFETIME,WORDS),
4063 DEBUG_REC(MAXRECEIVELIFETIME,WORDS),
4064 DEBUG_REC(CFPOLLABLE,WORDS),
4065 DEBUG_REC(AUTHENTICATIONALGORITHMS,WORDS),
4066 DEBUG_REC(PRIVACYOPTIONIMPLEMENTED,WORDS),
4067 DEBUG_REC(OWNMACADDR,BYTES),
4068 DEBUG_REC(SCANRESULTSTABLE,WORDS),
4069 DEBUG_REC(PHYTYPE,WORDS),
4070 DEBUG_REC(CURRENTCHANNEL,WORDS),
4071 DEBUG_REC(CURRENTPOWERSTATE,WORDS),
4072 DEBUG_REC(CCAMODE,WORDS),
4073 DEBUG_REC(SUPPORTEDDATARATES,WORDS),
4074 DEBUG_REC(BUILDSEQ,BYTES),
4075 DEBUG_REC(FWID,XSTRING)
4076#undef DEBUG_REC
4077};
4078
4079#define DEBUG_LTV_SIZE 128
4080
4081static int orinoco_debug_dump_recs(struct net_device *dev)
4082{
4083 struct orinoco_private *priv = netdev_priv(dev);
4084 hermes_t *hw = &priv->hw;
4085 u8 *val8;
4086 u16 *val16;
4087 int i,j;
4088 u16 length;
4089 int err;
4090
4091 /* I'm not sure: we might have a lock here, so we'd better go
4092 atomic, just in case. */
4093 val8 = kmalloc(DEBUG_LTV_SIZE + 2, GFP_ATOMIC);
4094 if (! val8)
4095 return -ENOMEM;
4096 val16 = (u16 *)val8;
4097
4098 for (i = 0; i < ARRAY_SIZE(record_table); i++) {
4099 u16 rid = record_table[i].rid;
4100 int len;
4101
4102 memset(val8, 0, DEBUG_LTV_SIZE + 2);
4103
4104 err = hermes_read_ltv(hw, USER_BAP, rid, DEBUG_LTV_SIZE,
4105 &length, val8);
4106 if (err) {
4107 DEBUG(0, "Error %d reading RID 0x%04x\n", err, rid);
4108 continue;
4109 }
4110 val16 = (u16 *)val8;
4111 if (length == 0)
4112 continue;
4113
4114 printk(KERN_DEBUG "%-15s (0x%04x): length=%d (%d bytes)\tvalue=",
4115 record_table[i].name,
4116 rid, length, (length-1)*2);
4117 len = min(((int)length-1)*2, DEBUG_LTV_SIZE);
4118
4119 switch (record_table[i].displaytype) {
4120 case DISPLAY_WORDS:
4121 for (j = 0; j < len / 2; j++)
4122 printk("%04X-", le16_to_cpu(val16[j]));
4123 break;
4124
4125 case DISPLAY_BYTES:
4126 default:
4127 for (j = 0; j < len; j++)
4128 printk("%02X:", val8[j]);
4129 break;
4130
4131 case DISPLAY_STRING:
4132 len = min(len, le16_to_cpu(val16[0])+2);
4133 val8[len] = '\0';
4134 printk("\"%s\"", (char *)&val16[1]);
4135 break;
4136
4137 case DISPLAY_XSTRING:
4138 printk("'%s'", (char *)val8);
4139 }
4140
4141 printk("\n");
4142 }
4143
4144 kfree(val8);
4145
4146 return 0;
4147}
4148 3955
4149/********************************************************************/ 3956/********************************************************************/
4150/* Debugging */ 3957/* Debugging */
@@ -4218,7 +4025,6 @@ EXPORT_SYMBOL(free_orinocodev);
4218 4025
4219EXPORT_SYMBOL(__orinoco_up); 4026EXPORT_SYMBOL(__orinoco_up);
4220EXPORT_SYMBOL(__orinoco_down); 4027EXPORT_SYMBOL(__orinoco_down);
4221EXPORT_SYMBOL(orinoco_stop);
4222EXPORT_SYMBOL(orinoco_reinit_firmware); 4028EXPORT_SYMBOL(orinoco_reinit_firmware);
4223 4029
4224EXPORT_SYMBOL(orinoco_interrupt); 4030EXPORT_SYMBOL(orinoco_interrupt);
diff --git a/drivers/net/wireless/orinoco.h b/drivers/net/wireless/orinoco.h
index 13e42c2afb27..f749b50d1088 100644
--- a/drivers/net/wireless/orinoco.h
+++ b/drivers/net/wireless/orinoco.h
@@ -119,7 +119,6 @@ extern struct net_device *alloc_orinocodev(int sizeof_card,
119extern void free_orinocodev(struct net_device *dev); 119extern void free_orinocodev(struct net_device *dev);
120extern int __orinoco_up(struct net_device *dev); 120extern int __orinoco_up(struct net_device *dev);
121extern int __orinoco_down(struct net_device *dev); 121extern int __orinoco_down(struct net_device *dev);
122extern int orinoco_stop(struct net_device *dev);
123extern int orinoco_reinit_firmware(struct net_device *dev); 122extern int orinoco_reinit_firmware(struct net_device *dev);
124extern irqreturn_t orinoco_interrupt(int irq, void * dev_id, struct pt_regs *regs); 123extern irqreturn_t orinoco_interrupt(int irq, void * dev_id, struct pt_regs *regs);
125 124
diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h
new file mode 100644
index 000000000000..0008e2ad0c9f
--- /dev/null
+++ b/include/linux/dm9000.h
@@ -0,0 +1,36 @@
1/* include/linux/dm9000.h
2 *
3 * Copyright (c) 2004 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * Header file for dm9000 platform data
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12*/
13
14#ifndef __DM9000_PLATFORM_DATA
15#define __DM9000_PLATFORM_DATA __FILE__
16
17/* IO control flags */
18
19#define DM9000_PLATF_8BITONLY (0x0001)
20#define DM9000_PLATF_16BITONLY (0x0002)
21#define DM9000_PLATF_32BITONLY (0x0004)
22
23/* platfrom data for platfrom device structure's platfrom_data field */
24
25struct dm9000_plat_data {
26 unsigned int flags;
27
28 /* allow replacement IO routines */
29
30 void (*inblk)(void __iomem *reg, void *data, int len);
31 void (*outblk)(void __iomem *reg, void *data, int len);
32 void (*dumpblk)(void __iomem *reg, int len);
33};
34
35#endif /* __DM9000_PLATFORM_DATA */
36
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index 503194e62fe1..ed2927ef1ff7 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Generic HDLC support routines for Linux 2 * Generic HDLC support routines for Linux
3 * 3 *
4 * Copyright (C) 1999-2003 Krzysztof Halasa <khc@pm.waw.pl> 4 * Copyright (C) 1999-2005 Krzysztof Halasa <khc@pm.waw.pl>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License 7 * under the terms of version 2 of the GNU General Public License
@@ -41,6 +41,7 @@
41#define LMI_NONE 1 /* No LMI, all PVCs are static */ 41#define LMI_NONE 1 /* No LMI, all PVCs are static */
42#define LMI_ANSI 2 /* ANSI Annex D */ 42#define LMI_ANSI 2 /* ANSI Annex D */
43#define LMI_CCITT 3 /* ITU-T Annex A */ 43#define LMI_CCITT 3 /* ITU-T Annex A */
44#define LMI_CISCO 4 /* The "original" LMI, aka Gang of Four */
44 45
45#define HDLC_MAX_MTU 1500 /* Ethernet 1500 bytes */ 46#define HDLC_MAX_MTU 1500 /* Ethernet 1500 bytes */
46#define HDLC_MAX_MRU (HDLC_MAX_MTU + 10 + 14 + 4) /* for ETH+VLAN over FR */ 47#define HDLC_MAX_MRU (HDLC_MAX_MTU + 10 + 14 + 4) /* for ETH+VLAN over FR */
@@ -89,6 +90,7 @@ typedef struct pvc_device_struct {
89 unsigned int deleted: 1; 90 unsigned int deleted: 1;
90 unsigned int fecn: 1; 91 unsigned int fecn: 1;
91 unsigned int becn: 1; 92 unsigned int becn: 1;
93 unsigned int bandwidth; /* Cisco LMI reporting only */
92 }state; 94 }state;
93}pvc_device; 95}pvc_device;
94 96
diff --git a/include/linux/if.h b/include/linux/if.h
index d73a9d62f208..ce627d9092ef 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -33,7 +33,7 @@
33#define IFF_LOOPBACK 0x8 /* is a loopback net */ 33#define IFF_LOOPBACK 0x8 /* is a loopback net */
34#define IFF_POINTOPOINT 0x10 /* interface is has p-p link */ 34#define IFF_POINTOPOINT 0x10 /* interface is has p-p link */
35#define IFF_NOTRAILERS 0x20 /* avoid use of trailers */ 35#define IFF_NOTRAILERS 0x20 /* avoid use of trailers */
36#define IFF_RUNNING 0x40 /* resources allocated */ 36#define IFF_RUNNING 0x40 /* interface running and carrier ok */
37#define IFF_NOARP 0x80 /* no ARP protocol */ 37#define IFF_NOARP 0x80 /* no ARP protocol */
38#define IFF_PROMISC 0x100 /* receive all packets */ 38#define IFF_PROMISC 0x100 /* receive all packets */
39#define IFF_ALLMULTI 0x200 /* receive all multicast packets*/ 39#define IFF_ALLMULTI 0x200 /* receive all multicast packets*/
diff --git a/include/linux/wireless.h b/include/linux/wireless.h
index 2f51f2b6562e..ae485f9c916e 100644
--- a/include/linux/wireless.h
+++ b/include/linux/wireless.h
@@ -1,10 +1,10 @@
1/* 1/*
2 * This file define a set of standard wireless extensions 2 * This file define a set of standard wireless extensions
3 * 3 *
4 * Version : 17 21.6.04 4 * Version : 18 12.3.05
5 * 5 *
6 * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> 6 * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
7 * Copyright (c) 1997-2004 Jean Tourrilhes, All Rights Reserved. 7 * Copyright (c) 1997-2005 Jean Tourrilhes, All Rights Reserved.
8 */ 8 */
9 9
10#ifndef _LINUX_WIRELESS_H 10#ifndef _LINUX_WIRELESS_H
@@ -82,7 +82,7 @@
82 * (there is some stuff that will be added in the future...) 82 * (there is some stuff that will be added in the future...)
83 * I just plan to increment with each new version. 83 * I just plan to increment with each new version.
84 */ 84 */
85#define WIRELESS_EXT 17 85#define WIRELESS_EXT 18
86 86
87/* 87/*
88 * Changes : 88 * Changes :
@@ -182,6 +182,21 @@
182 * - Document (struct iw_quality *)->updated, add new flags (INVALID) 182 * - Document (struct iw_quality *)->updated, add new flags (INVALID)
183 * - Wireless Event capability in struct iw_range 183 * - Wireless Event capability in struct iw_range
184 * - Add support for relative TxPower (yick !) 184 * - Add support for relative TxPower (yick !)
185 *
186 * V17 to V18 (From Jouni Malinen <jkmaline@cc.hut.fi>)
187 * ----------
188 * - Add support for WPA/WPA2
189 * - Add extended encoding configuration (SIOCSIWENCODEEXT and
190 * SIOCGIWENCODEEXT)
191 * - Add SIOCSIWGENIE/SIOCGIWGENIE
192 * - Add SIOCSIWMLME
193 * - Add SIOCSIWPMKSA
194 * - Add struct iw_range bit field for supported encoding capabilities
195 * - Add optional scan request parameters for SIOCSIWSCAN
196 * - Add SIOCSIWAUTH/SIOCGIWAUTH for setting authentication and WPA
197 * related parameters (extensible up to 4096 parameter values)
198 * - Add wireless events: IWEVGENIE, IWEVMICHAELMICFAILURE,
199 * IWEVASSOCREQIE, IWEVASSOCRESPIE, IWEVPMKIDCAND
185 */ 200 */
186 201
187/**************************** CONSTANTS ****************************/ 202/**************************** CONSTANTS ****************************/
@@ -256,6 +271,30 @@
256#define SIOCSIWPOWER 0x8B2C /* set Power Management settings */ 271#define SIOCSIWPOWER 0x8B2C /* set Power Management settings */
257#define SIOCGIWPOWER 0x8B2D /* get Power Management settings */ 272#define SIOCGIWPOWER 0x8B2D /* get Power Management settings */
258 273
274/* WPA : Generic IEEE 802.11 informatiom element (e.g., for WPA/RSN/WMM).
275 * This ioctl uses struct iw_point and data buffer that includes IE id and len
276 * fields. More than one IE may be included in the request. Setting the generic
277 * IE to empty buffer (len=0) removes the generic IE from the driver. Drivers
278 * are allowed to generate their own WPA/RSN IEs, but in these cases, drivers
279 * are required to report the used IE as a wireless event, e.g., when
280 * associating with an AP. */
281#define SIOCSIWGENIE 0x8B30 /* set generic IE */
282#define SIOCGIWGENIE 0x8B31 /* get generic IE */
283
284/* WPA : IEEE 802.11 MLME requests */
285#define SIOCSIWMLME 0x8B16 /* request MLME operation; uses
286 * struct iw_mlme */
287/* WPA : Authentication mode parameters */
288#define SIOCSIWAUTH 0x8B32 /* set authentication mode params */
289#define SIOCGIWAUTH 0x8B33 /* get authentication mode params */
290
291/* WPA : Extended version of encoding configuration */
292#define SIOCSIWENCODEEXT 0x8B34 /* set encoding token & mode */
293#define SIOCGIWENCODEEXT 0x8B35 /* get encoding token & mode */
294
295/* WPA2 : PMKSA cache management */
296#define SIOCSIWPMKSA 0x8B36 /* PMKSA cache operation */
297
259/* -------------------- DEV PRIVATE IOCTL LIST -------------------- */ 298/* -------------------- DEV PRIVATE IOCTL LIST -------------------- */
260 299
261/* These 32 ioctl are wireless device private, for 16 commands. 300/* These 32 ioctl are wireless device private, for 16 commands.
@@ -297,6 +336,34 @@
297#define IWEVCUSTOM 0x8C02 /* Driver specific ascii string */ 336#define IWEVCUSTOM 0x8C02 /* Driver specific ascii string */
298#define IWEVREGISTERED 0x8C03 /* Discovered a new node (AP mode) */ 337#define IWEVREGISTERED 0x8C03 /* Discovered a new node (AP mode) */
299#define IWEVEXPIRED 0x8C04 /* Expired a node (AP mode) */ 338#define IWEVEXPIRED 0x8C04 /* Expired a node (AP mode) */
339#define IWEVGENIE 0x8C05 /* Generic IE (WPA, RSN, WMM, ..)
340 * (scan results); This includes id and
341 * length fields. One IWEVGENIE may
342 * contain more than one IE. Scan
343 * results may contain one or more
344 * IWEVGENIE events. */
345#define IWEVMICHAELMICFAILURE 0x8C06 /* Michael MIC failure
346 * (struct iw_michaelmicfailure)
347 */
348#define IWEVASSOCREQIE 0x8C07 /* IEs used in (Re)Association Request.
349 * The data includes id and length
350 * fields and may contain more than one
351 * IE. This event is required in
352 * Managed mode if the driver
353 * generates its own WPA/RSN IE. This
354 * should be sent just before
355 * IWEVREGISTERED event for the
356 * association. */
357#define IWEVASSOCRESPIE 0x8C08 /* IEs used in (Re)Association
358 * Response. The data includes id and
359 * length fields and may contain more
360 * than one IE. This may be sent
361 * between IWEVASSOCREQIE and
362 * IWEVREGISTERED events for the
363 * association. */
364#define IWEVPMKIDCAND 0x8C09 /* PMKID candidate for RSN
365 * pre-authentication
366 * (struct iw_pmkid_cand) */
300 367
301#define IWEVFIRST 0x8C00 368#define IWEVFIRST 0x8C00
302 369
@@ -432,12 +499,94 @@
432#define IW_SCAN_THIS_MODE 0x0020 /* Scan only this Mode */ 499#define IW_SCAN_THIS_MODE 0x0020 /* Scan only this Mode */
433#define IW_SCAN_ALL_RATE 0x0040 /* Scan all Bit-Rates */ 500#define IW_SCAN_ALL_RATE 0x0040 /* Scan all Bit-Rates */
434#define IW_SCAN_THIS_RATE 0x0080 /* Scan only this Bit-Rate */ 501#define IW_SCAN_THIS_RATE 0x0080 /* Scan only this Bit-Rate */
502/* struct iw_scan_req scan_type */
503#define IW_SCAN_TYPE_ACTIVE 0
504#define IW_SCAN_TYPE_PASSIVE 1
435/* Maximum size of returned data */ 505/* Maximum size of returned data */
436#define IW_SCAN_MAX_DATA 4096 /* In bytes */ 506#define IW_SCAN_MAX_DATA 4096 /* In bytes */
437 507
438/* Max number of char in custom event - use multiple of them if needed */ 508/* Max number of char in custom event - use multiple of them if needed */
439#define IW_CUSTOM_MAX 256 /* In bytes */ 509#define IW_CUSTOM_MAX 256 /* In bytes */
440 510
511/* Generic information element */
512#define IW_GENERIC_IE_MAX 1024
513
514/* MLME requests (SIOCSIWMLME / struct iw_mlme) */
515#define IW_MLME_DEAUTH 0
516#define IW_MLME_DISASSOC 1
517
518/* SIOCSIWAUTH/SIOCGIWAUTH struct iw_param flags */
519#define IW_AUTH_INDEX 0x0FFF
520#define IW_AUTH_FLAGS 0xF000
521/* SIOCSIWAUTH/SIOCGIWAUTH parameters (0 .. 4095)
522 * (IW_AUTH_INDEX mask in struct iw_param flags; this is the index of the
523 * parameter that is being set/get to; value will be read/written to
524 * struct iw_param value field) */
525#define IW_AUTH_WPA_VERSION 0
526#define IW_AUTH_CIPHER_PAIRWISE 1
527#define IW_AUTH_CIPHER_GROUP 2
528#define IW_AUTH_KEY_MGMT 3
529#define IW_AUTH_TKIP_COUNTERMEASURES 4
530#define IW_AUTH_DROP_UNENCRYPTED 5
531#define IW_AUTH_80211_AUTH_ALG 6
532#define IW_AUTH_WPA_ENABLED 7
533#define IW_AUTH_RX_UNENCRYPTED_EAPOL 8
534#define IW_AUTH_ROAMING_CONTROL 9
535#define IW_AUTH_PRIVACY_INVOKED 10
536
537/* IW_AUTH_WPA_VERSION values (bit field) */
538#define IW_AUTH_WPA_VERSION_DISABLED 0x00000001
539#define IW_AUTH_WPA_VERSION_WPA 0x00000002
540#define IW_AUTH_WPA_VERSION_WPA2 0x00000004
541
542/* IW_AUTH_PAIRWISE_CIPHER and IW_AUTH_GROUP_CIPHER values (bit field) */
543#define IW_AUTH_CIPHER_NONE 0x00000001
544#define IW_AUTH_CIPHER_WEP40 0x00000002
545#define IW_AUTH_CIPHER_TKIP 0x00000004
546#define IW_AUTH_CIPHER_CCMP 0x00000008
547#define IW_AUTH_CIPHER_WEP104 0x00000010
548
549/* IW_AUTH_KEY_MGMT values (bit field) */
550#define IW_AUTH_KEY_MGMT_802_1X 1
551#define IW_AUTH_KEY_MGMT_PSK 2
552
553/* IW_AUTH_80211_AUTH_ALG values (bit field) */
554#define IW_AUTH_ALG_OPEN_SYSTEM 0x00000001
555#define IW_AUTH_ALG_SHARED_KEY 0x00000002
556#define IW_AUTH_ALG_LEAP 0x00000004
557
558/* IW_AUTH_ROAMING_CONTROL values */
559#define IW_AUTH_ROAMING_ENABLE 0 /* driver/firmware based roaming */
560#define IW_AUTH_ROAMING_DISABLE 1 /* user space program used for roaming
561 * control */
562
563/* SIOCSIWENCODEEXT definitions */
564#define IW_ENCODE_SEQ_MAX_SIZE 8
565/* struct iw_encode_ext ->alg */
566#define IW_ENCODE_ALG_NONE 0
567#define IW_ENCODE_ALG_WEP 1
568#define IW_ENCODE_ALG_TKIP 2
569#define IW_ENCODE_ALG_CCMP 3
570/* struct iw_encode_ext ->ext_flags */
571#define IW_ENCODE_EXT_TX_SEQ_VALID 0x00000001
572#define IW_ENCODE_EXT_RX_SEQ_VALID 0x00000002
573#define IW_ENCODE_EXT_GROUP_KEY 0x00000004
574#define IW_ENCODE_EXT_SET_TX_KEY 0x00000008
575
576/* IWEVMICHAELMICFAILURE : struct iw_michaelmicfailure ->flags */
577#define IW_MICFAILURE_KEY_ID 0x00000003 /* Key ID 0..3 */
578#define IW_MICFAILURE_GROUP 0x00000004
579#define IW_MICFAILURE_PAIRWISE 0x00000008
580#define IW_MICFAILURE_STAKEY 0x00000010
581#define IW_MICFAILURE_COUNT 0x00000060 /* 1 or 2 (0 = count not supported)
582 */
583
584/* Bit field values for enc_capa in struct iw_range */
585#define IW_ENC_CAPA_WPA 0x00000001
586#define IW_ENC_CAPA_WPA2 0x00000002
587#define IW_ENC_CAPA_CIPHER_TKIP 0x00000004
588#define IW_ENC_CAPA_CIPHER_CCMP 0x00000008
589
441/* Event capability macros - in (struct iw_range *)->event_capa 590/* Event capability macros - in (struct iw_range *)->event_capa
442 * Because we have more than 32 possible events, we use an array of 591 * Because we have more than 32 possible events, we use an array of
443 * 32 bit bitmasks. Note : 32 bits = 0x20 = 2^5. */ 592 * 32 bit bitmasks. Note : 32 bits = 0x20 = 2^5. */
@@ -546,6 +695,132 @@ struct iw_thrspy
546 struct iw_quality high; /* High threshold */ 695 struct iw_quality high; /* High threshold */
547}; 696};
548 697
698/*
699 * Optional data for scan request
700 *
701 * Note: these optional parameters are controlling parameters for the
702 * scanning behavior, these do not apply to getting scan results
703 * (SIOCGIWSCAN). Drivers are expected to keep a local BSS table and
704 * provide a merged results with all BSSes even if the previous scan
705 * request limited scanning to a subset, e.g., by specifying an SSID.
706 * Especially, scan results are required to include an entry for the
707 * current BSS if the driver is in Managed mode and associated with an AP.
708 */
709struct iw_scan_req
710{
711 __u8 scan_type; /* IW_SCAN_TYPE_{ACTIVE,PASSIVE} */
712 __u8 essid_len;
713 __u8 num_channels; /* num entries in channel_list;
714 * 0 = scan all allowed channels */
715 __u8 flags; /* reserved as padding; use zero, this may
716 * be used in the future for adding flags
717 * to request different scan behavior */
718 struct sockaddr bssid; /* ff:ff:ff:ff:ff:ff for broadcast BSSID or
719 * individual address of a specific BSS */
720
721 /*
722 * Use this ESSID if IW_SCAN_THIS_ESSID flag is used instead of using
723 * the current ESSID. This allows scan requests for specific ESSID
724 * without having to change the current ESSID and potentially breaking
725 * the current association.
726 */
727 __u8 essid[IW_ESSID_MAX_SIZE];
728
729 /*
730 * Optional parameters for changing the default scanning behavior.
731 * These are based on the MLME-SCAN.request from IEEE Std 802.11.
732 * TU is 1.024 ms. If these are set to 0, driver is expected to use
733 * reasonable default values. min_channel_time defines the time that
734 * will be used to wait for the first reply on each channel. If no
735 * replies are received, next channel will be scanned after this. If
736 * replies are received, total time waited on the channel is defined by
737 * max_channel_time.
738 */
739 __u32 min_channel_time; /* in TU */
740 __u32 max_channel_time; /* in TU */
741
742 struct iw_freq channel_list[IW_MAX_FREQUENCIES];
743};
744
745/* ------------------------- WPA SUPPORT ------------------------- */
746
747/*
748 * Extended data structure for get/set encoding (this is used with
749 * SIOCSIWENCODEEXT/SIOCGIWENCODEEXT. struct iw_point and IW_ENCODE_*
750 * flags are used in the same way as with SIOCSIWENCODE/SIOCGIWENCODE and
751 * only the data contents changes (key data -> this structure, including
752 * key data).
753 *
754 * If the new key is the first group key, it will be set as the default
755 * TX key. Otherwise, default TX key index is only changed if
756 * IW_ENCODE_EXT_SET_TX_KEY flag is set.
757 *
758 * Key will be changed with SIOCSIWENCODEEXT in all cases except for
759 * special "change TX key index" operation which is indicated by setting
760 * key_len = 0 and ext_flags |= IW_ENCODE_EXT_SET_TX_KEY.
761 *
762 * tx_seq/rx_seq are only used when respective
763 * IW_ENCODE_EXT_{TX,RX}_SEQ_VALID flag is set in ext_flags. Normal
764 * TKIP/CCMP operation is to set RX seq with SIOCSIWENCODEEXT and start
765 * TX seq from zero whenever key is changed. SIOCGIWENCODEEXT is normally
766 * used only by an Authenticator (AP or an IBSS station) to get the
767 * current TX sequence number. Using TX_SEQ_VALID for SIOCSIWENCODEEXT and
768 * RX_SEQ_VALID for SIOCGIWENCODEEXT are optional, but can be useful for
769 * debugging/testing.
770 */
771struct iw_encode_ext
772{
773 __u32 ext_flags; /* IW_ENCODE_EXT_* */
774 __u8 tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
775 __u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
776 struct sockaddr addr; /* ff:ff:ff:ff:ff:ff for broadcast/multicast
777 * (group) keys or unicast address for
778 * individual keys */
779 __u16 alg; /* IW_ENCODE_ALG_* */
780 __u16 key_len;
781 __u8 key[0];
782};
783
784/* SIOCSIWMLME data */
785struct iw_mlme
786{
787 __u16 cmd; /* IW_MLME_* */
788 __u16 reason_code;
789 struct sockaddr addr;
790};
791
792/* SIOCSIWPMKSA data */
793#define IW_PMKSA_ADD 1
794#define IW_PMKSA_REMOVE 2
795#define IW_PMKSA_FLUSH 3
796
797#define IW_PMKID_LEN 16
798
799struct iw_pmksa
800{
801 __u32 cmd; /* IW_PMKSA_* */
802 struct sockaddr bssid;
803 __u8 pmkid[IW_PMKID_LEN];
804};
805
806/* IWEVMICHAELMICFAILURE data */
807struct iw_michaelmicfailure
808{
809 __u32 flags;
810 struct sockaddr src_addr;
811 __u8 tsc[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
812};
813
814/* IWEVPMKIDCAND data */
815#define IW_PMKID_CAND_PREAUTH 0x00000001 /* RNS pre-authentication enabled */
816struct iw_pmkid_cand
817{
818 __u32 flags; /* IW_PMKID_CAND_* */
819 __u32 index; /* the smaller the index, the higher the
820 * priority */
821 struct sockaddr bssid;
822};
823
549/* ------------------------ WIRELESS STATS ------------------------ */ 824/* ------------------------ WIRELESS STATS ------------------------ */
550/* 825/*
551 * Wireless statistics (used for /proc/net/wireless) 826 * Wireless statistics (used for /proc/net/wireless)
@@ -725,6 +1000,8 @@ struct iw_range
725 struct iw_freq freq[IW_MAX_FREQUENCIES]; /* list */ 1000 struct iw_freq freq[IW_MAX_FREQUENCIES]; /* list */
726 /* Note : this frequency list doesn't need to fit channel numbers, 1001 /* Note : this frequency list doesn't need to fit channel numbers,
727 * because each entry contain its channel index */ 1002 * because each entry contain its channel index */
1003
1004 __u32 enc_capa; /* IW_ENC_CAPA_* bit field */
728}; 1005};
729 1006
730/* 1007/*
diff --git a/net/core/wireless.c b/net/core/wireless.c
index 750cc5daeb03..b2fe378dfbf8 100644
--- a/net/core/wireless.c
+++ b/net/core/wireless.c
@@ -2,7 +2,7 @@
2 * This file implement the Wireless Extensions APIs. 2 * This file implement the Wireless Extensions APIs.
3 * 3 *
4 * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> 4 * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
5 * Copyright (c) 1997-2004 Jean Tourrilhes, All Rights Reserved. 5 * Copyright (c) 1997-2005 Jean Tourrilhes, All Rights Reserved.
6 * 6 *
7 * (As all part of the Linux kernel, this file is GPL) 7 * (As all part of the Linux kernel, this file is GPL)
8 */ 8 */
@@ -187,6 +187,12 @@ static const struct iw_ioctl_description standard_ioctl[] = {
187 .header_type = IW_HEADER_TYPE_ADDR, 187 .header_type = IW_HEADER_TYPE_ADDR,
188 .flags = IW_DESCR_FLAG_DUMP, 188 .flags = IW_DESCR_FLAG_DUMP,
189 }, 189 },
190 [SIOCSIWMLME - SIOCIWFIRST] = {
191 .header_type = IW_HEADER_TYPE_POINT,
192 .token_size = 1,
193 .min_tokens = sizeof(struct iw_mlme),
194 .max_tokens = sizeof(struct iw_mlme),
195 },
190 [SIOCGIWAPLIST - SIOCIWFIRST] = { 196 [SIOCGIWAPLIST - SIOCIWFIRST] = {
191 .header_type = IW_HEADER_TYPE_POINT, 197 .header_type = IW_HEADER_TYPE_POINT,
192 .token_size = sizeof(struct sockaddr) + 198 .token_size = sizeof(struct sockaddr) +
@@ -195,7 +201,10 @@ static const struct iw_ioctl_description standard_ioctl[] = {
195 .flags = IW_DESCR_FLAG_NOMAX, 201 .flags = IW_DESCR_FLAG_NOMAX,
196 }, 202 },
197 [SIOCSIWSCAN - SIOCIWFIRST] = { 203 [SIOCSIWSCAN - SIOCIWFIRST] = {
198 .header_type = IW_HEADER_TYPE_PARAM, 204 .header_type = IW_HEADER_TYPE_POINT,
205 .token_size = 1,
206 .min_tokens = 0,
207 .max_tokens = sizeof(struct iw_scan_req),
199 }, 208 },
200 [SIOCGIWSCAN - SIOCIWFIRST] = { 209 [SIOCGIWSCAN - SIOCIWFIRST] = {
201 .header_type = IW_HEADER_TYPE_POINT, 210 .header_type = IW_HEADER_TYPE_POINT,
@@ -273,6 +282,42 @@ static const struct iw_ioctl_description standard_ioctl[] = {
273 [SIOCGIWPOWER - SIOCIWFIRST] = { 282 [SIOCGIWPOWER - SIOCIWFIRST] = {
274 .header_type = IW_HEADER_TYPE_PARAM, 283 .header_type = IW_HEADER_TYPE_PARAM,
275 }, 284 },
285 [SIOCSIWGENIE - SIOCIWFIRST] = {
286 .header_type = IW_HEADER_TYPE_POINT,
287 .token_size = 1,
288 .max_tokens = IW_GENERIC_IE_MAX,
289 },
290 [SIOCGIWGENIE - SIOCIWFIRST] = {
291 .header_type = IW_HEADER_TYPE_POINT,
292 .token_size = 1,
293 .max_tokens = IW_GENERIC_IE_MAX,
294 },
295 [SIOCSIWAUTH - SIOCIWFIRST] = {
296 .header_type = IW_HEADER_TYPE_PARAM,
297 },
298 [SIOCGIWAUTH - SIOCIWFIRST] = {
299 .header_type = IW_HEADER_TYPE_PARAM,
300 },
301 [SIOCSIWENCODEEXT - SIOCIWFIRST] = {
302 .header_type = IW_HEADER_TYPE_POINT,
303 .token_size = 1,
304 .min_tokens = sizeof(struct iw_encode_ext),
305 .max_tokens = sizeof(struct iw_encode_ext) +
306 IW_ENCODING_TOKEN_MAX,
307 },
308 [SIOCGIWENCODEEXT - SIOCIWFIRST] = {
309 .header_type = IW_HEADER_TYPE_POINT,
310 .token_size = 1,
311 .min_tokens = sizeof(struct iw_encode_ext),
312 .max_tokens = sizeof(struct iw_encode_ext) +
313 IW_ENCODING_TOKEN_MAX,
314 },
315 [SIOCSIWPMKSA - SIOCIWFIRST] = {
316 .header_type = IW_HEADER_TYPE_POINT,
317 .token_size = 1,
318 .min_tokens = sizeof(struct iw_pmksa),
319 .max_tokens = sizeof(struct iw_pmksa),
320 },
276}; 321};
277static const int standard_ioctl_num = (sizeof(standard_ioctl) / 322static const int standard_ioctl_num = (sizeof(standard_ioctl) /
278 sizeof(struct iw_ioctl_description)); 323 sizeof(struct iw_ioctl_description));
@@ -299,6 +344,31 @@ static const struct iw_ioctl_description standard_event[] = {
299 [IWEVEXPIRED - IWEVFIRST] = { 344 [IWEVEXPIRED - IWEVFIRST] = {
300 .header_type = IW_HEADER_TYPE_ADDR, 345 .header_type = IW_HEADER_TYPE_ADDR,
301 }, 346 },
347 [IWEVGENIE - IWEVFIRST] = {
348 .header_type = IW_HEADER_TYPE_POINT,
349 .token_size = 1,
350 .max_tokens = IW_GENERIC_IE_MAX,
351 },
352 [IWEVMICHAELMICFAILURE - IWEVFIRST] = {
353 .header_type = IW_HEADER_TYPE_POINT,
354 .token_size = 1,
355 .max_tokens = sizeof(struct iw_michaelmicfailure),
356 },
357 [IWEVASSOCREQIE - IWEVFIRST] = {
358 .header_type = IW_HEADER_TYPE_POINT,
359 .token_size = 1,
360 .max_tokens = IW_GENERIC_IE_MAX,
361 },
362 [IWEVASSOCRESPIE - IWEVFIRST] = {
363 .header_type = IW_HEADER_TYPE_POINT,
364 .token_size = 1,
365 .max_tokens = IW_GENERIC_IE_MAX,
366 },
367 [IWEVPMKIDCAND - IWEVFIRST] = {
368 .header_type = IW_HEADER_TYPE_POINT,
369 .token_size = 1,
370 .max_tokens = sizeof(struct iw_pmkid_cand),
371 },
302}; 372};
303static const int standard_event_num = (sizeof(standard_event) / 373static const int standard_event_num = (sizeof(standard_event) /
304 sizeof(struct iw_ioctl_description)); 374 sizeof(struct iw_ioctl_description));