aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-05-20 02:27:55 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-08-11 19:29:42 -0400
commita8fe65b8f031c5c0a7414059773eaa962e5243cb (patch)
tree7deefb4ca57299d315cf2e3f97b9919857eaacb4 /drivers/net/ethernet
parentbaf0fbfe7ea34cd676e3362a62033d8ca1c52d99 (diff)
8139*/atp/r8169/sc92031: Move the Realtek drivers
Move the Realtek drivers into drivers/net/ethernet/realtek/ and make the necessary Kconfig and Makefile changes. CC: Realtek linux nic maintainers <nic_swsd@realtek.com> CC: Francois Romieu <romieu@fr.zoreil.com> CC: Jeff Garzik <jgarzik@pobox.com> CC: Donald Becker <becker@scyld.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2064
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2622
-rw-r--r--drivers/net/ethernet/realtek/Kconfig126
-rw-r--r--drivers/net/ethernet/realtek/Makefile9
-rw-r--r--drivers/net/ethernet/realtek/atp.c940
-rw-r--r--drivers/net/ethernet/realtek/atp.h259
-rw-r--r--drivers/net/ethernet/realtek/r8169.c5824
-rw-r--r--drivers/net/ethernet/realtek/sc92031.c1615
10 files changed, 13461 insertions, 0 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 110071ec4ce6..fecac79b009b 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -32,6 +32,7 @@ source "drivers/net/ethernet/8390/Kconfig"
32source "drivers/net/ethernet/pasemi/Kconfig" 32source "drivers/net/ethernet/pasemi/Kconfig"
33source "drivers/net/ethernet/qlogic/Kconfig" 33source "drivers/net/ethernet/qlogic/Kconfig"
34source "drivers/net/ethernet/racal/Kconfig" 34source "drivers/net/ethernet/racal/Kconfig"
35source "drivers/net/ethernet/realtek/Kconfig"
35source "drivers/net/ethernet/sfc/Kconfig" 36source "drivers/net/ethernet/sfc/Kconfig"
36source "drivers/net/ethernet/smsc/Kconfig" 37source "drivers/net/ethernet/smsc/Kconfig"
37source "drivers/net/ethernet/stmicro/Kconfig" 38source "drivers/net/ethernet/stmicro/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 4a6edf7141d2..0092c30db18f 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
23obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/ 23obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
24obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ 24obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
25obj-$(CONFIG_NET_VENDOR_RACAL) += racal/ 25obj-$(CONFIG_NET_VENDOR_RACAL) += racal/
26obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
26obj-$(CONFIG_SFC) += sfc/ 27obj-$(CONFIG_SFC) += sfc/
27obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ 28obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
28obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/ 29obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
new file mode 100644
index 000000000000..cc4c210a91f8
--- /dev/null
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -0,0 +1,2064 @@
1/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
2/*
3 Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
4
5 Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 Copyright 2001 Manfred Spraul [natsemi.c]
8 Copyright 1999-2001 by Donald Becker. [natsemi.c]
9 Written 1997-2001 by Donald Becker. [8139too.c]
10 Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
11
12 This software may be used and distributed according to the terms of
13 the GNU General Public License (GPL), incorporated herein by reference.
14 Drivers based on or derived from this code fall under the GPL and must
15 retain the authorship, copyright and license notice. This file is not
16 a complete program and may only be used when the entire operating
17 system is licensed under the GPL.
18
19 See the file COPYING in this distribution for more information.
20
21 Contributors:
22
23 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
26
27 TODO:
28 * Test Tx checksumming thoroughly
29
30 Low priority TODO:
31 * Complete reset on PciErr
32 * Consider Rx interrupt mitigation using TimerIntr
33 * Investigate using skb->priority with h/w VLAN priority
34 * Investigate using High Priority Tx Queue with skb->priority
35 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
36 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
37 * Implement Tx software interrupt mitigation via
38 Tx descriptor bit
39 * The real minimum of CP_MIN_MTU is 4 bytes. However,
40 for this to be supported, one must(?) turn on packet padding.
41 * Support external MII transceivers (patch available)
42
43 NOTES:
44 * TX checksumming is considered experimental. It is off by
45 default, use ethtool to turn it on.
46
47 */
48
49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
51#define DRV_NAME "8139cp"
52#define DRV_VERSION "1.3"
53#define DRV_RELDATE "Mar 22, 2004"
54
55
56#include <linux/module.h>
57#include <linux/moduleparam.h>
58#include <linux/kernel.h>
59#include <linux/compiler.h>
60#include <linux/netdevice.h>
61#include <linux/etherdevice.h>
62#include <linux/init.h>
63#include <linux/interrupt.h>
64#include <linux/pci.h>
65#include <linux/dma-mapping.h>
66#include <linux/delay.h>
67#include <linux/ethtool.h>
68#include <linux/gfp.h>
69#include <linux/mii.h>
70#include <linux/if_vlan.h>
71#include <linux/crc32.h>
72#include <linux/in.h>
73#include <linux/ip.h>
74#include <linux/tcp.h>
75#include <linux/udp.h>
76#include <linux/cache.h>
77#include <asm/io.h>
78#include <asm/irq.h>
79#include <asm/uaccess.h>
80
81/* These identify the driver base version and may not be removed. */
82static char version[] =
83DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
84
85MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
86MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
87MODULE_VERSION(DRV_VERSION);
88MODULE_LICENSE("GPL");
89
90static int debug = -1;
91module_param(debug, int, 0);
92MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
93
94/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
95 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
96static int multicast_filter_limit = 32;
97module_param(multicast_filter_limit, int, 0);
98MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
99
100#define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
101 NETIF_MSG_PROBE | \
102 NETIF_MSG_LINK)
103#define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
104#define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
105#define CP_REGS_SIZE (0xff + 1)
106#define CP_REGS_VER 1 /* version 1 */
107#define CP_RX_RING_SIZE 64
108#define CP_TX_RING_SIZE 64
109#define CP_RING_BYTES \
110 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
111 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
112 CP_STATS_SIZE)
113#define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
114#define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
115#define TX_BUFFS_AVAIL(CP) \
116 (((CP)->tx_tail <= (CP)->tx_head) ? \
117 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
118 (CP)->tx_tail - (CP)->tx_head - 1)
119
120#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
121#define CP_INTERNAL_PHY 32
122
123/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
124#define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
125#define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
126#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
127#define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
128
129/* Time in jiffies before concluding the transmitter is hung. */
130#define TX_TIMEOUT (6*HZ)
131
132/* hardware minimum and maximum for a single frame's data payload */
133#define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
134#define CP_MAX_MTU 4096
135
136enum {
137 /* NIC register offsets */
138 MAC0 = 0x00, /* Ethernet hardware address. */
139 MAR0 = 0x08, /* Multicast filter. */
140 StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
141 TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */
142 HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */
143 Cmd = 0x37, /* Command register */
144 IntrMask = 0x3C, /* Interrupt mask */
145 IntrStatus = 0x3E, /* Interrupt status */
146 TxConfig = 0x40, /* Tx configuration */
147 ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
148 RxConfig = 0x44, /* Rx configuration */
149 RxMissed = 0x4C, /* 24 bits valid, write clears */
150 Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
151 Config1 = 0x52, /* Config1 */
152 Config3 = 0x59, /* Config3 */
153 Config4 = 0x5A, /* Config4 */
154 MultiIntr = 0x5C, /* Multiple interrupt select */
155 BasicModeCtrl = 0x62, /* MII BMCR */
156 BasicModeStatus = 0x64, /* MII BMSR */
157 NWayAdvert = 0x66, /* MII ADVERTISE */
158 NWayLPAR = 0x68, /* MII LPA */
159 NWayExpansion = 0x6A, /* MII Expansion */
160 Config5 = 0xD8, /* Config5 */
161 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
162 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
163 CpCmd = 0xE0, /* C+ Command register (C+ mode only) */
164 IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */
165 RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */
166 TxThresh = 0xEC, /* Early Tx threshold */
167 OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */
168 OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */
169
170 /* Tx and Rx status descriptors */
171 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
172 RingEnd = (1 << 30), /* End of descriptor ring */
173 FirstFrag = (1 << 29), /* First segment of a packet */
174 LastFrag = (1 << 28), /* Final segment of a packet */
175 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
176 MSSShift = 16, /* MSS value position */
177 MSSMask = 0xfff, /* MSS value: 11 bits */
178 TxError = (1 << 23), /* Tx error summary */
179 RxError = (1 << 20), /* Rx error summary */
180 IPCS = (1 << 18), /* Calculate IP checksum */
181 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
182 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
183 TxVlanTag = (1 << 17), /* Add VLAN tag */
184 RxVlanTagged = (1 << 16), /* Rx VLAN tag available */
185 IPFail = (1 << 15), /* IP checksum failed */
186 UDPFail = (1 << 14), /* UDP/IP checksum failed */
187 TCPFail = (1 << 13), /* TCP/IP checksum failed */
188 NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */
189 PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */
190 PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
191 RxProtoTCP = 1,
192 RxProtoUDP = 2,
193 RxProtoIP = 3,
194 TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */
195 TxOWC = (1 << 22), /* Tx Out-of-window collision */
196 TxLinkFail = (1 << 21), /* Link failed during Tx of packet */
197 TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */
198 TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */
199 TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
200 RxErrFrame = (1 << 27), /* Rx frame alignment error */
201 RxMcast = (1 << 26), /* Rx multicast packet rcv'd */
202 RxErrCRC = (1 << 18), /* Rx CRC error */
203 RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */
204 RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */
205 RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
206
207 /* StatsAddr register */
208 DumpStats = (1 << 3), /* Begin stats dump */
209
210 /* RxConfig register */
211 RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */
212 RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */
213 AcceptErr = 0x20, /* Accept packets with CRC errors */
214 AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */
215 AcceptBroadcast = 0x08, /* Accept broadcast packets */
216 AcceptMulticast = 0x04, /* Accept multicast packets */
217 AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */
218 AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */
219
220 /* IntrMask / IntrStatus registers */
221 PciErr = (1 << 15), /* System error on the PCI bus */
222 TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
223 LenChg = (1 << 13), /* Cable length change */
224 SWInt = (1 << 8), /* Software-requested interrupt */
225 TxEmpty = (1 << 7), /* No Tx descriptors available */
226 RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */
227 LinkChg = (1 << 5), /* Packet underrun, or link change */
228 RxEmpty = (1 << 4), /* No Rx descriptors available */
229 TxErr = (1 << 3), /* Tx error */
230 TxOK = (1 << 2), /* Tx packet sent */
231 RxErr = (1 << 1), /* Rx error */
232 RxOK = (1 << 0), /* Rx packet received */
233 IntrResvd = (1 << 10), /* reserved, according to RealTek engineers,
234 but hardware likes to raise it */
235
236 IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
237 RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
238 RxErr | RxOK | IntrResvd,
239
240 /* C mode command register */
241 CmdReset = (1 << 4), /* Enable to reset; self-clearing */
242 RxOn = (1 << 3), /* Rx mode enable */
243 TxOn = (1 << 2), /* Tx mode enable */
244
245 /* C+ mode command register */
246 RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */
247 RxChkSum = (1 << 5), /* Rx checksum offload enable */
248 PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
249 PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */
250 CpRxOn = (1 << 1), /* Rx mode enable */
251 CpTxOn = (1 << 0), /* Tx mode enable */
252
253 /* Cfg9436 EEPROM control register */
254 Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */
255 Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */
256
257 /* TxConfig register */
258 IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
259 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
260
261 /* Early Tx Threshold register */
262 TxThreshMask = 0x3f, /* Mask bits 5-0 */
263 TxThreshMax = 2048, /* Max early Tx threshold */
264
265 /* Config1 register */
266 DriverLoaded = (1 << 5), /* Software marker, driver is loaded */
267 LWACT = (1 << 4), /* LWAKE active mode */
268 PMEnable = (1 << 0), /* Enable various PM features of chip */
269
270 /* Config3 register */
271 PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */
272 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
273 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
274
275 /* Config4 register */
276 LWPTN = (1 << 1), /* LWAKE Pattern */
277 LWPME = (1 << 4), /* LANWAKE vs PMEB */
278
279 /* Config5 register */
280 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
281 MWF = (1 << 5), /* Accept Multicast wakeup frame */
282 UWF = (1 << 4), /* Accept Unicast wakeup frame */
283 LANWake = (1 << 1), /* Enable LANWake signal */
284 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
285
286 cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
287 cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
288 cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
289};
290
291static const unsigned int cp_rx_config =
292 (RX_FIFO_THRESH << RxCfgFIFOShift) |
293 (RX_DMA_BURST << RxCfgDMAShift);
294
295struct cp_desc {
296 __le32 opts1;
297 __le32 opts2;
298 __le64 addr;
299};
300
301struct cp_dma_stats {
302 __le64 tx_ok;
303 __le64 rx_ok;
304 __le64 tx_err;
305 __le32 rx_err;
306 __le16 rx_fifo;
307 __le16 frame_align;
308 __le32 tx_ok_1col;
309 __le32 tx_ok_mcol;
310 __le64 rx_ok_phys;
311 __le64 rx_ok_bcast;
312 __le32 rx_ok_mcast;
313 __le16 tx_abort;
314 __le16 tx_underrun;
315} __packed;
316
317struct cp_extra_stats {
318 unsigned long rx_frags;
319};
320
321struct cp_private {
322 void __iomem *regs;
323 struct net_device *dev;
324 spinlock_t lock;
325 u32 msg_enable;
326
327 struct napi_struct napi;
328
329 struct pci_dev *pdev;
330 u32 rx_config;
331 u16 cpcmd;
332
333 struct cp_extra_stats cp_stats;
334
335 unsigned rx_head ____cacheline_aligned;
336 unsigned rx_tail;
337 struct cp_desc *rx_ring;
338 struct sk_buff *rx_skb[CP_RX_RING_SIZE];
339
340 unsigned tx_head ____cacheline_aligned;
341 unsigned tx_tail;
342 struct cp_desc *tx_ring;
343 struct sk_buff *tx_skb[CP_TX_RING_SIZE];
344
345 unsigned rx_buf_sz;
346 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
347
348 dma_addr_t ring_dma;
349
350 struct mii_if_info mii_if;
351};
352
353#define cpr8(reg) readb(cp->regs + (reg))
354#define cpr16(reg) readw(cp->regs + (reg))
355#define cpr32(reg) readl(cp->regs + (reg))
356#define cpw8(reg,val) writeb((val), cp->regs + (reg))
357#define cpw16(reg,val) writew((val), cp->regs + (reg))
358#define cpw32(reg,val) writel((val), cp->regs + (reg))
359#define cpw8_f(reg,val) do { \
360 writeb((val), cp->regs + (reg)); \
361 readb(cp->regs + (reg)); \
362 } while (0)
363#define cpw16_f(reg,val) do { \
364 writew((val), cp->regs + (reg)); \
365 readw(cp->regs + (reg)); \
366 } while (0)
367#define cpw32_f(reg,val) do { \
368 writel((val), cp->regs + (reg)); \
369 readl(cp->regs + (reg)); \
370 } while (0)
371
372
373static void __cp_set_rx_mode (struct net_device *dev);
374static void cp_tx (struct cp_private *cp);
375static void cp_clean_rings (struct cp_private *cp);
376#ifdef CONFIG_NET_POLL_CONTROLLER
377static void cp_poll_controller(struct net_device *dev);
378#endif
379static int cp_get_eeprom_len(struct net_device *dev);
380static int cp_get_eeprom(struct net_device *dev,
381 struct ethtool_eeprom *eeprom, u8 *data);
382static int cp_set_eeprom(struct net_device *dev,
383 struct ethtool_eeprom *eeprom, u8 *data);
384
385static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
386 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
387 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
388 { },
389};
390MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
391
392static struct {
393 const char str[ETH_GSTRING_LEN];
394} ethtool_stats_keys[] = {
395 { "tx_ok" },
396 { "rx_ok" },
397 { "tx_err" },
398 { "rx_err" },
399 { "rx_fifo" },
400 { "frame_align" },
401 { "tx_ok_1col" },
402 { "tx_ok_mcol" },
403 { "rx_ok_phys" },
404 { "rx_ok_bcast" },
405 { "rx_ok_mcast" },
406 { "tx_abort" },
407 { "tx_underrun" },
408 { "rx_frags" },
409};
410
411
412static inline void cp_set_rxbufsize (struct cp_private *cp)
413{
414 unsigned int mtu = cp->dev->mtu;
415
416 if (mtu > ETH_DATA_LEN)
417 /* MTU + ethernet header + FCS + optional VLAN tag */
418 cp->rx_buf_sz = mtu + ETH_HLEN + 8;
419 else
420 cp->rx_buf_sz = PKT_BUF_SZ;
421}
422
423static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
424 struct cp_desc *desc)
425{
426 u32 opts2 = le32_to_cpu(desc->opts2);
427
428 skb->protocol = eth_type_trans (skb, cp->dev);
429
430 cp->dev->stats.rx_packets++;
431 cp->dev->stats.rx_bytes += skb->len;
432
433 if (opts2 & RxVlanTagged)
434 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
435
436 napi_gro_receive(&cp->napi, skb);
437}
438
439static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
440 u32 status, u32 len)
441{
442 netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
443 rx_tail, status, len);
444 cp->dev->stats.rx_errors++;
445 if (status & RxErrFrame)
446 cp->dev->stats.rx_frame_errors++;
447 if (status & RxErrCRC)
448 cp->dev->stats.rx_crc_errors++;
449 if ((status & RxErrRunt) || (status & RxErrLong))
450 cp->dev->stats.rx_length_errors++;
451 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
452 cp->dev->stats.rx_length_errors++;
453 if (status & RxErrFIFO)
454 cp->dev->stats.rx_fifo_errors++;
455}
456
457static inline unsigned int cp_rx_csum_ok (u32 status)
458{
459 unsigned int protocol = (status >> 16) & 0x3;
460
461 if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
462 ((protocol == RxProtoUDP) && !(status & UDPFail)))
463 return 1;
464 else
465 return 0;
466}
467
468static int cp_rx_poll(struct napi_struct *napi, int budget)
469{
470 struct cp_private *cp = container_of(napi, struct cp_private, napi);
471 struct net_device *dev = cp->dev;
472 unsigned int rx_tail = cp->rx_tail;
473 int rx;
474
475rx_status_loop:
476 rx = 0;
477 cpw16(IntrStatus, cp_rx_intr_mask);
478
479 while (1) {
480 u32 status, len;
481 dma_addr_t mapping;
482 struct sk_buff *skb, *new_skb;
483 struct cp_desc *desc;
484 const unsigned buflen = cp->rx_buf_sz;
485
486 skb = cp->rx_skb[rx_tail];
487 BUG_ON(!skb);
488
489 desc = &cp->rx_ring[rx_tail];
490 status = le32_to_cpu(desc->opts1);
491 if (status & DescOwn)
492 break;
493
494 len = (status & 0x1fff) - 4;
495 mapping = le64_to_cpu(desc->addr);
496
497 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
498 /* we don't support incoming fragmented frames.
499 * instead, we attempt to ensure that the
500 * pre-allocated RX skbs are properly sized such
501 * that RX fragments are never encountered
502 */
503 cp_rx_err_acct(cp, rx_tail, status, len);
504 dev->stats.rx_dropped++;
505 cp->cp_stats.rx_frags++;
506 goto rx_next;
507 }
508
509 if (status & (RxError | RxErrFIFO)) {
510 cp_rx_err_acct(cp, rx_tail, status, len);
511 goto rx_next;
512 }
513
514 netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
515 rx_tail, status, len);
516
517 new_skb = netdev_alloc_skb_ip_align(dev, buflen);
518 if (!new_skb) {
519 dev->stats.rx_dropped++;
520 goto rx_next;
521 }
522
523 dma_unmap_single(&cp->pdev->dev, mapping,
524 buflen, PCI_DMA_FROMDEVICE);
525
526 /* Handle checksum offloading for incoming packets. */
527 if (cp_rx_csum_ok(status))
528 skb->ip_summed = CHECKSUM_UNNECESSARY;
529 else
530 skb_checksum_none_assert(skb);
531
532 skb_put(skb, len);
533
534 mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
535 PCI_DMA_FROMDEVICE);
536 cp->rx_skb[rx_tail] = new_skb;
537
538 cp_rx_skb(cp, skb, desc);
539 rx++;
540
541rx_next:
542 cp->rx_ring[rx_tail].opts2 = 0;
543 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
544 if (rx_tail == (CP_RX_RING_SIZE - 1))
545 desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
546 cp->rx_buf_sz);
547 else
548 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
549 rx_tail = NEXT_RX(rx_tail);
550
551 if (rx >= budget)
552 break;
553 }
554
555 cp->rx_tail = rx_tail;
556
557 /* if we did not reach work limit, then we're done with
558 * this round of polling
559 */
560 if (rx < budget) {
561 unsigned long flags;
562
563 if (cpr16(IntrStatus) & cp_rx_intr_mask)
564 goto rx_status_loop;
565
566 spin_lock_irqsave(&cp->lock, flags);
567 __napi_complete(napi);
568 cpw16_f(IntrMask, cp_intr_mask);
569 spin_unlock_irqrestore(&cp->lock, flags);
570 }
571
572 return rx;
573}
574
575static irqreturn_t cp_interrupt (int irq, void *dev_instance)
576{
577 struct net_device *dev = dev_instance;
578 struct cp_private *cp;
579 u16 status;
580
581 if (unlikely(dev == NULL))
582 return IRQ_NONE;
583 cp = netdev_priv(dev);
584
585 status = cpr16(IntrStatus);
586 if (!status || (status == 0xFFFF))
587 return IRQ_NONE;
588
589 netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
590 status, cpr8(Cmd), cpr16(CpCmd));
591
592 cpw16(IntrStatus, status & ~cp_rx_intr_mask);
593
594 spin_lock(&cp->lock);
595
596 /* close possible race's with dev_close */
597 if (unlikely(!netif_running(dev))) {
598 cpw16(IntrMask, 0);
599 spin_unlock(&cp->lock);
600 return IRQ_HANDLED;
601 }
602
603 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
604 if (napi_schedule_prep(&cp->napi)) {
605 cpw16_f(IntrMask, cp_norx_intr_mask);
606 __napi_schedule(&cp->napi);
607 }
608
609 if (status & (TxOK | TxErr | TxEmpty | SWInt))
610 cp_tx(cp);
611 if (status & LinkChg)
612 mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
613
614 spin_unlock(&cp->lock);
615
616 if (status & PciErr) {
617 u16 pci_status;
618
619 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
620 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
621 netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
622 status, pci_status);
623
624 /* TODO: reset hardware */
625 }
626
627 return IRQ_HANDLED;
628}
629
630#ifdef CONFIG_NET_POLL_CONTROLLER
631/*
632 * Polling receive - used by netconsole and other diagnostic tools
633 * to allow network i/o with interrupts disabled.
634 */
635static void cp_poll_controller(struct net_device *dev)
636{
637 disable_irq(dev->irq);
638 cp_interrupt(dev->irq, dev);
639 enable_irq(dev->irq);
640}
641#endif
642
643static void cp_tx (struct cp_private *cp)
644{
645 unsigned tx_head = cp->tx_head;
646 unsigned tx_tail = cp->tx_tail;
647
648 while (tx_tail != tx_head) {
649 struct cp_desc *txd = cp->tx_ring + tx_tail;
650 struct sk_buff *skb;
651 u32 status;
652
653 rmb();
654 status = le32_to_cpu(txd->opts1);
655 if (status & DescOwn)
656 break;
657
658 skb = cp->tx_skb[tx_tail];
659 BUG_ON(!skb);
660
661 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
662 le32_to_cpu(txd->opts1) & 0xffff,
663 PCI_DMA_TODEVICE);
664
665 if (status & LastFrag) {
666 if (status & (TxError | TxFIFOUnder)) {
667 netif_dbg(cp, tx_err, cp->dev,
668 "tx err, status 0x%x\n", status);
669 cp->dev->stats.tx_errors++;
670 if (status & TxOWC)
671 cp->dev->stats.tx_window_errors++;
672 if (status & TxMaxCol)
673 cp->dev->stats.tx_aborted_errors++;
674 if (status & TxLinkFail)
675 cp->dev->stats.tx_carrier_errors++;
676 if (status & TxFIFOUnder)
677 cp->dev->stats.tx_fifo_errors++;
678 } else {
679 cp->dev->stats.collisions +=
680 ((status >> TxColCntShift) & TxColCntMask);
681 cp->dev->stats.tx_packets++;
682 cp->dev->stats.tx_bytes += skb->len;
683 netif_dbg(cp, tx_done, cp->dev,
684 "tx done, slot %d\n", tx_tail);
685 }
686 dev_kfree_skb_irq(skb);
687 }
688
689 cp->tx_skb[tx_tail] = NULL;
690
691 tx_tail = NEXT_TX(tx_tail);
692 }
693
694 cp->tx_tail = tx_tail;
695
696 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
697 netif_wake_queue(cp->dev);
698}
699
700static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
701{
702 return vlan_tx_tag_present(skb) ?
703 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
704}
705
706static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
707 struct net_device *dev)
708{
709 struct cp_private *cp = netdev_priv(dev);
710 unsigned entry;
711 u32 eor, flags;
712 unsigned long intr_flags;
713 __le32 opts2;
714 int mss = 0;
715
716 spin_lock_irqsave(&cp->lock, intr_flags);
717
718 /* This is a hard error, log it. */
719 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
720 netif_stop_queue(dev);
721 spin_unlock_irqrestore(&cp->lock, intr_flags);
722 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
723 return NETDEV_TX_BUSY;
724 }
725
726 entry = cp->tx_head;
727 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
728 mss = skb_shinfo(skb)->gso_size;
729
730 opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
731
732 if (skb_shinfo(skb)->nr_frags == 0) {
733 struct cp_desc *txd = &cp->tx_ring[entry];
734 u32 len;
735 dma_addr_t mapping;
736
737 len = skb->len;
738 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
739 txd->opts2 = opts2;
740 txd->addr = cpu_to_le64(mapping);
741 wmb();
742
743 flags = eor | len | DescOwn | FirstFrag | LastFrag;
744
745 if (mss)
746 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
747 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
748 const struct iphdr *ip = ip_hdr(skb);
749 if (ip->protocol == IPPROTO_TCP)
750 flags |= IPCS | TCPCS;
751 else if (ip->protocol == IPPROTO_UDP)
752 flags |= IPCS | UDPCS;
753 else
754 WARN_ON(1); /* we need a WARN() */
755 }
756
757 txd->opts1 = cpu_to_le32(flags);
758 wmb();
759
760 cp->tx_skb[entry] = skb;
761 entry = NEXT_TX(entry);
762 } else {
763 struct cp_desc *txd;
764 u32 first_len, first_eor;
765 dma_addr_t first_mapping;
766 int frag, first_entry = entry;
767 const struct iphdr *ip = ip_hdr(skb);
768
769 /* We must give this initial chunk to the device last.
770 * Otherwise we could race with the device.
771 */
772 first_eor = eor;
773 first_len = skb_headlen(skb);
774 first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
775 first_len, PCI_DMA_TODEVICE);
776 cp->tx_skb[entry] = skb;
777 entry = NEXT_TX(entry);
778
779 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
780 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
781 u32 len;
782 u32 ctrl;
783 dma_addr_t mapping;
784
785 len = this_frag->size;
786 mapping = dma_map_single(&cp->pdev->dev,
787 ((void *) page_address(this_frag->page) +
788 this_frag->page_offset),
789 len, PCI_DMA_TODEVICE);
790 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
791
792 ctrl = eor | len | DescOwn;
793
794 if (mss)
795 ctrl |= LargeSend |
796 ((mss & MSSMask) << MSSShift);
797 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
798 if (ip->protocol == IPPROTO_TCP)
799 ctrl |= IPCS | TCPCS;
800 else if (ip->protocol == IPPROTO_UDP)
801 ctrl |= IPCS | UDPCS;
802 else
803 BUG();
804 }
805
806 if (frag == skb_shinfo(skb)->nr_frags - 1)
807 ctrl |= LastFrag;
808
809 txd = &cp->tx_ring[entry];
810 txd->opts2 = opts2;
811 txd->addr = cpu_to_le64(mapping);
812 wmb();
813
814 txd->opts1 = cpu_to_le32(ctrl);
815 wmb();
816
817 cp->tx_skb[entry] = skb;
818 entry = NEXT_TX(entry);
819 }
820
821 txd = &cp->tx_ring[first_entry];
822 txd->opts2 = opts2;
823 txd->addr = cpu_to_le64(first_mapping);
824 wmb();
825
826 if (skb->ip_summed == CHECKSUM_PARTIAL) {
827 if (ip->protocol == IPPROTO_TCP)
828 txd->opts1 = cpu_to_le32(first_eor | first_len |
829 FirstFrag | DescOwn |
830 IPCS | TCPCS);
831 else if (ip->protocol == IPPROTO_UDP)
832 txd->opts1 = cpu_to_le32(first_eor | first_len |
833 FirstFrag | DescOwn |
834 IPCS | UDPCS);
835 else
836 BUG();
837 } else
838 txd->opts1 = cpu_to_le32(first_eor | first_len |
839 FirstFrag | DescOwn);
840 wmb();
841 }
842 cp->tx_head = entry;
843 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
844 entry, skb->len);
845 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
846 netif_stop_queue(dev);
847
848 spin_unlock_irqrestore(&cp->lock, intr_flags);
849
850 cpw8(TxPoll, NormalTxPoll);
851
852 return NETDEV_TX_OK;
853}
854
855/* Set or clear the multicast filter for this adaptor.
856 This routine is not state sensitive and need not be SMP locked. */
857
858static void __cp_set_rx_mode (struct net_device *dev)
859{
860 struct cp_private *cp = netdev_priv(dev);
861 u32 mc_filter[2]; /* Multicast hash filter */
862 int rx_mode;
863 u32 tmp;
864
865 /* Note: do not reorder, GCC is clever about common statements. */
866 if (dev->flags & IFF_PROMISC) {
867 /* Unconditionally log net taps. */
868 rx_mode =
869 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
870 AcceptAllPhys;
871 mc_filter[1] = mc_filter[0] = 0xffffffff;
872 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
873 (dev->flags & IFF_ALLMULTI)) {
874 /* Too many to filter perfectly -- accept all multicasts. */
875 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
876 mc_filter[1] = mc_filter[0] = 0xffffffff;
877 } else {
878 struct netdev_hw_addr *ha;
879 rx_mode = AcceptBroadcast | AcceptMyPhys;
880 mc_filter[1] = mc_filter[0] = 0;
881 netdev_for_each_mc_addr(ha, dev) {
882 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
883
884 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
885 rx_mode |= AcceptMulticast;
886 }
887 }
888
889 /* We can safely update without stopping the chip. */
890 tmp = cp_rx_config | rx_mode;
891 if (cp->rx_config != tmp) {
892 cpw32_f (RxConfig, tmp);
893 cp->rx_config = tmp;
894 }
895 cpw32_f (MAR0 + 0, mc_filter[0]);
896 cpw32_f (MAR0 + 4, mc_filter[1]);
897}
898
899static void cp_set_rx_mode (struct net_device *dev)
900{
901 unsigned long flags;
902 struct cp_private *cp = netdev_priv(dev);
903
904 spin_lock_irqsave (&cp->lock, flags);
905 __cp_set_rx_mode(dev);
906 spin_unlock_irqrestore (&cp->lock, flags);
907}
908
909static void __cp_get_stats(struct cp_private *cp)
910{
911 /* only lower 24 bits valid; write any value to clear */
912 cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
913 cpw32 (RxMissed, 0);
914}
915
916static struct net_device_stats *cp_get_stats(struct net_device *dev)
917{
918 struct cp_private *cp = netdev_priv(dev);
919 unsigned long flags;
920
921 /* The chip only need report frame silently dropped. */
922 spin_lock_irqsave(&cp->lock, flags);
923 if (netif_running(dev) && netif_device_present(dev))
924 __cp_get_stats(cp);
925 spin_unlock_irqrestore(&cp->lock, flags);
926
927 return &dev->stats;
928}
929
930static void cp_stop_hw (struct cp_private *cp)
931{
932 cpw16(IntrStatus, ~(cpr16(IntrStatus)));
933 cpw16_f(IntrMask, 0);
934 cpw8(Cmd, 0);
935 cpw16_f(CpCmd, 0);
936 cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
937
938 cp->rx_tail = 0;
939 cp->tx_head = cp->tx_tail = 0;
940}
941
942static void cp_reset_hw (struct cp_private *cp)
943{
944 unsigned work = 1000;
945
946 cpw8(Cmd, CmdReset);
947
948 while (work--) {
949 if (!(cpr8(Cmd) & CmdReset))
950 return;
951
952 schedule_timeout_uninterruptible(10);
953 }
954
955 netdev_err(cp->dev, "hardware reset timeout\n");
956}
957
958static inline void cp_start_hw (struct cp_private *cp)
959{
960 cpw16(CpCmd, cp->cpcmd);
961 cpw8(Cmd, RxOn | TxOn);
962}
963
964static void cp_init_hw (struct cp_private *cp)
965{
966 struct net_device *dev = cp->dev;
967 dma_addr_t ring_dma;
968
969 cp_reset_hw(cp);
970
971 cpw8_f (Cfg9346, Cfg9346_Unlock);
972
973 /* Restore our idea of the MAC address. */
974 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
975 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
976
977 cp_start_hw(cp);
978 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
979
980 __cp_set_rx_mode(dev);
981 cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
982
983 cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
984 /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
985 cpw8(Config3, PARMEnable);
986 cp->wol_enabled = 0;
987
988 cpw8(Config5, cpr8(Config5) & PMEStatus);
989
990 cpw32_f(HiTxRingAddr, 0);
991 cpw32_f(HiTxRingAddr + 4, 0);
992
993 ring_dma = cp->ring_dma;
994 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
995 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
996
997 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
998 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
999 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1000
1001 cpw16(MultiIntr, 0);
1002
1003 cpw16_f(IntrMask, cp_intr_mask);
1004
1005 cpw8_f(Cfg9346, Cfg9346_Lock);
1006}
1007
1008static int cp_refill_rx(struct cp_private *cp)
1009{
1010 struct net_device *dev = cp->dev;
1011 unsigned i;
1012
1013 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1014 struct sk_buff *skb;
1015 dma_addr_t mapping;
1016
1017 skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1018 if (!skb)
1019 goto err_out;
1020
1021 mapping = dma_map_single(&cp->pdev->dev, skb->data,
1022 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1023 cp->rx_skb[i] = skb;
1024
1025 cp->rx_ring[i].opts2 = 0;
1026 cp->rx_ring[i].addr = cpu_to_le64(mapping);
1027 if (i == (CP_RX_RING_SIZE - 1))
1028 cp->rx_ring[i].opts1 =
1029 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1030 else
1031 cp->rx_ring[i].opts1 =
1032 cpu_to_le32(DescOwn | cp->rx_buf_sz);
1033 }
1034
1035 return 0;
1036
1037err_out:
1038 cp_clean_rings(cp);
1039 return -ENOMEM;
1040}
1041
1042static void cp_init_rings_index (struct cp_private *cp)
1043{
1044 cp->rx_tail = 0;
1045 cp->tx_head = cp->tx_tail = 0;
1046}
1047
1048static int cp_init_rings (struct cp_private *cp)
1049{
1050 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1051 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1052
1053 cp_init_rings_index(cp);
1054
1055 return cp_refill_rx (cp);
1056}
1057
1058static int cp_alloc_rings (struct cp_private *cp)
1059{
1060 void *mem;
1061
1062 mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
1063 &cp->ring_dma, GFP_KERNEL);
1064 if (!mem)
1065 return -ENOMEM;
1066
1067 cp->rx_ring = mem;
1068 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1069
1070 return cp_init_rings(cp);
1071}
1072
1073static void cp_clean_rings (struct cp_private *cp)
1074{
1075 struct cp_desc *desc;
1076 unsigned i;
1077
1078 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1079 if (cp->rx_skb[i]) {
1080 desc = cp->rx_ring + i;
1081 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1082 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1083 dev_kfree_skb(cp->rx_skb[i]);
1084 }
1085 }
1086
1087 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1088 if (cp->tx_skb[i]) {
1089 struct sk_buff *skb = cp->tx_skb[i];
1090
1091 desc = cp->tx_ring + i;
1092 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1093 le32_to_cpu(desc->opts1) & 0xffff,
1094 PCI_DMA_TODEVICE);
1095 if (le32_to_cpu(desc->opts1) & LastFrag)
1096 dev_kfree_skb(skb);
1097 cp->dev->stats.tx_dropped++;
1098 }
1099 }
1100
1101 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1102 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1103
1104 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1105 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1106}
1107
1108static void cp_free_rings (struct cp_private *cp)
1109{
1110 cp_clean_rings(cp);
1111 dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1112 cp->ring_dma);
1113 cp->rx_ring = NULL;
1114 cp->tx_ring = NULL;
1115}
1116
1117static int cp_open (struct net_device *dev)
1118{
1119 struct cp_private *cp = netdev_priv(dev);
1120 int rc;
1121
1122 netif_dbg(cp, ifup, dev, "enabling interface\n");
1123
1124 rc = cp_alloc_rings(cp);
1125 if (rc)
1126 return rc;
1127
1128 napi_enable(&cp->napi);
1129
1130 cp_init_hw(cp);
1131
1132 rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1133 if (rc)
1134 goto err_out_hw;
1135
1136 netif_carrier_off(dev);
1137 mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1138 netif_start_queue(dev);
1139
1140 return 0;
1141
1142err_out_hw:
1143 napi_disable(&cp->napi);
1144 cp_stop_hw(cp);
1145 cp_free_rings(cp);
1146 return rc;
1147}
1148
1149static int cp_close (struct net_device *dev)
1150{
1151 struct cp_private *cp = netdev_priv(dev);
1152 unsigned long flags;
1153
1154 napi_disable(&cp->napi);
1155
1156 netif_dbg(cp, ifdown, dev, "disabling interface\n");
1157
1158 spin_lock_irqsave(&cp->lock, flags);
1159
1160 netif_stop_queue(dev);
1161 netif_carrier_off(dev);
1162
1163 cp_stop_hw(cp);
1164
1165 spin_unlock_irqrestore(&cp->lock, flags);
1166
1167 free_irq(dev->irq, dev);
1168
1169 cp_free_rings(cp);
1170 return 0;
1171}
1172
1173static void cp_tx_timeout(struct net_device *dev)
1174{
1175 struct cp_private *cp = netdev_priv(dev);
1176 unsigned long flags;
1177 int rc;
1178
1179 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1180 cpr8(Cmd), cpr16(CpCmd),
1181 cpr16(IntrStatus), cpr16(IntrMask));
1182
1183 spin_lock_irqsave(&cp->lock, flags);
1184
1185 cp_stop_hw(cp);
1186 cp_clean_rings(cp);
1187 rc = cp_init_rings(cp);
1188 cp_start_hw(cp);
1189
1190 netif_wake_queue(dev);
1191
1192 spin_unlock_irqrestore(&cp->lock, flags);
1193}
1194
1195#ifdef BROKEN
1196static int cp_change_mtu(struct net_device *dev, int new_mtu)
1197{
1198 struct cp_private *cp = netdev_priv(dev);
1199 int rc;
1200 unsigned long flags;
1201
1202 /* check for invalid MTU, according to hardware limits */
1203 if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1204 return -EINVAL;
1205
1206 /* if network interface not up, no need for complexity */
1207 if (!netif_running(dev)) {
1208 dev->mtu = new_mtu;
1209 cp_set_rxbufsize(cp); /* set new rx buf size */
1210 return 0;
1211 }
1212
1213 spin_lock_irqsave(&cp->lock, flags);
1214
1215 cp_stop_hw(cp); /* stop h/w and free rings */
1216 cp_clean_rings(cp);
1217
1218 dev->mtu = new_mtu;
1219 cp_set_rxbufsize(cp); /* set new rx buf size */
1220
1221 rc = cp_init_rings(cp); /* realloc and restart h/w */
1222 cp_start_hw(cp);
1223
1224 spin_unlock_irqrestore(&cp->lock, flags);
1225
1226 return rc;
1227}
1228#endif /* BROKEN */
1229
1230static const char mii_2_8139_map[8] = {
1231 BasicModeCtrl,
1232 BasicModeStatus,
1233 0,
1234 0,
1235 NWayAdvert,
1236 NWayLPAR,
1237 NWayExpansion,
1238 0
1239};
1240
1241static int mdio_read(struct net_device *dev, int phy_id, int location)
1242{
1243 struct cp_private *cp = netdev_priv(dev);
1244
1245 return location < 8 && mii_2_8139_map[location] ?
1246 readw(cp->regs + mii_2_8139_map[location]) : 0;
1247}
1248
1249
1250static void mdio_write(struct net_device *dev, int phy_id, int location,
1251 int value)
1252{
1253 struct cp_private *cp = netdev_priv(dev);
1254
1255 if (location == 0) {
1256 cpw8(Cfg9346, Cfg9346_Unlock);
1257 cpw16(BasicModeCtrl, value);
1258 cpw8(Cfg9346, Cfg9346_Lock);
1259 } else if (location < 8 && mii_2_8139_map[location])
1260 cpw16(mii_2_8139_map[location], value);
1261}
1262
1263/* Set the ethtool Wake-on-LAN settings */
1264static int netdev_set_wol (struct cp_private *cp,
1265 const struct ethtool_wolinfo *wol)
1266{
1267 u8 options;
1268
1269 options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1270 /* If WOL is being disabled, no need for complexity */
1271 if (wol->wolopts) {
1272 if (wol->wolopts & WAKE_PHY) options |= LinkUp;
1273 if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
1274 }
1275
1276 cpw8 (Cfg9346, Cfg9346_Unlock);
1277 cpw8 (Config3, options);
1278 cpw8 (Cfg9346, Cfg9346_Lock);
1279
1280 options = 0; /* Paranoia setting */
1281 options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1282 /* If WOL is being disabled, no need for complexity */
1283 if (wol->wolopts) {
1284 if (wol->wolopts & WAKE_UCAST) options |= UWF;
1285 if (wol->wolopts & WAKE_BCAST) options |= BWF;
1286 if (wol->wolopts & WAKE_MCAST) options |= MWF;
1287 }
1288
1289 cpw8 (Config5, options);
1290
1291 cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1292
1293 return 0;
1294}
1295
1296/* Get the ethtool Wake-on-LAN settings */
1297static void netdev_get_wol (struct cp_private *cp,
1298 struct ethtool_wolinfo *wol)
1299{
1300 u8 options;
1301
1302 wol->wolopts = 0; /* Start from scratch */
1303 wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
1304 WAKE_MCAST | WAKE_UCAST;
1305 /* We don't need to go on if WOL is disabled */
1306 if (!cp->wol_enabled) return;
1307
1308 options = cpr8 (Config3);
1309 if (options & LinkUp) wol->wolopts |= WAKE_PHY;
1310 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
1311
1312 options = 0; /* Paranoia setting */
1313 options = cpr8 (Config5);
1314 if (options & UWF) wol->wolopts |= WAKE_UCAST;
1315 if (options & BWF) wol->wolopts |= WAKE_BCAST;
1316 if (options & MWF) wol->wolopts |= WAKE_MCAST;
1317}
1318
1319static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1320{
1321 struct cp_private *cp = netdev_priv(dev);
1322
1323 strcpy (info->driver, DRV_NAME);
1324 strcpy (info->version, DRV_VERSION);
1325 strcpy (info->bus_info, pci_name(cp->pdev));
1326}
1327
1328static int cp_get_regs_len(struct net_device *dev)
1329{
1330 return CP_REGS_SIZE;
1331}
1332
1333static int cp_get_sset_count (struct net_device *dev, int sset)
1334{
1335 switch (sset) {
1336 case ETH_SS_STATS:
1337 return CP_NUM_STATS;
1338 default:
1339 return -EOPNOTSUPP;
1340 }
1341}
1342
1343static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1344{
1345 struct cp_private *cp = netdev_priv(dev);
1346 int rc;
1347 unsigned long flags;
1348
1349 spin_lock_irqsave(&cp->lock, flags);
1350 rc = mii_ethtool_gset(&cp->mii_if, cmd);
1351 spin_unlock_irqrestore(&cp->lock, flags);
1352
1353 return rc;
1354}
1355
1356static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1357{
1358 struct cp_private *cp = netdev_priv(dev);
1359 int rc;
1360 unsigned long flags;
1361
1362 spin_lock_irqsave(&cp->lock, flags);
1363 rc = mii_ethtool_sset(&cp->mii_if, cmd);
1364 spin_unlock_irqrestore(&cp->lock, flags);
1365
1366 return rc;
1367}
1368
1369static int cp_nway_reset(struct net_device *dev)
1370{
1371 struct cp_private *cp = netdev_priv(dev);
1372 return mii_nway_restart(&cp->mii_if);
1373}
1374
1375static u32 cp_get_msglevel(struct net_device *dev)
1376{
1377 struct cp_private *cp = netdev_priv(dev);
1378 return cp->msg_enable;
1379}
1380
1381static void cp_set_msglevel(struct net_device *dev, u32 value)
1382{
1383 struct cp_private *cp = netdev_priv(dev);
1384 cp->msg_enable = value;
1385}
1386
1387static int cp_set_features(struct net_device *dev, u32 features)
1388{
1389 struct cp_private *cp = netdev_priv(dev);
1390 unsigned long flags;
1391
1392 if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1393 return 0;
1394
1395 spin_lock_irqsave(&cp->lock, flags);
1396
1397 if (features & NETIF_F_RXCSUM)
1398 cp->cpcmd |= RxChkSum;
1399 else
1400 cp->cpcmd &= ~RxChkSum;
1401
1402 if (features & NETIF_F_HW_VLAN_RX)
1403 cp->cpcmd |= RxVlanOn;
1404 else
1405 cp->cpcmd &= ~RxVlanOn;
1406
1407 cpw16_f(CpCmd, cp->cpcmd);
1408 spin_unlock_irqrestore(&cp->lock, flags);
1409
1410 return 0;
1411}
1412
1413static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1414 void *p)
1415{
1416 struct cp_private *cp = netdev_priv(dev);
1417 unsigned long flags;
1418
1419 if (regs->len < CP_REGS_SIZE)
1420 return /* -EINVAL */;
1421
1422 regs->version = CP_REGS_VER;
1423
1424 spin_lock_irqsave(&cp->lock, flags);
1425 memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1426 spin_unlock_irqrestore(&cp->lock, flags);
1427}
1428
1429static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1430{
1431 struct cp_private *cp = netdev_priv(dev);
1432 unsigned long flags;
1433
1434 spin_lock_irqsave (&cp->lock, flags);
1435 netdev_get_wol (cp, wol);
1436 spin_unlock_irqrestore (&cp->lock, flags);
1437}
1438
1439static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1440{
1441 struct cp_private *cp = netdev_priv(dev);
1442 unsigned long flags;
1443 int rc;
1444
1445 spin_lock_irqsave (&cp->lock, flags);
1446 rc = netdev_set_wol (cp, wol);
1447 spin_unlock_irqrestore (&cp->lock, flags);
1448
1449 return rc;
1450}
1451
1452static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1453{
1454 switch (stringset) {
1455 case ETH_SS_STATS:
1456 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1457 break;
1458 default:
1459 BUG();
1460 break;
1461 }
1462}
1463
1464static void cp_get_ethtool_stats (struct net_device *dev,
1465 struct ethtool_stats *estats, u64 *tmp_stats)
1466{
1467 struct cp_private *cp = netdev_priv(dev);
1468 struct cp_dma_stats *nic_stats;
1469 dma_addr_t dma;
1470 int i;
1471
1472 nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1473 &dma, GFP_KERNEL);
1474 if (!nic_stats)
1475 return;
1476
1477 /* begin NIC statistics dump */
1478 cpw32(StatsAddr + 4, (u64)dma >> 32);
1479 cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1480 cpr32(StatsAddr);
1481
1482 for (i = 0; i < 1000; i++) {
1483 if ((cpr32(StatsAddr) & DumpStats) == 0)
1484 break;
1485 udelay(10);
1486 }
1487 cpw32(StatsAddr, 0);
1488 cpw32(StatsAddr + 4, 0);
1489 cpr32(StatsAddr);
1490
1491 i = 0;
1492 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1493 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1494 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1495 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1496 tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1497 tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1498 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1499 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1500 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1501 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1502 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1503 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1504 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1505 tmp_stats[i++] = cp->cp_stats.rx_frags;
1506 BUG_ON(i != CP_NUM_STATS);
1507
1508 dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1509}
1510
1511static const struct ethtool_ops cp_ethtool_ops = {
1512 .get_drvinfo = cp_get_drvinfo,
1513 .get_regs_len = cp_get_regs_len,
1514 .get_sset_count = cp_get_sset_count,
1515 .get_settings = cp_get_settings,
1516 .set_settings = cp_set_settings,
1517 .nway_reset = cp_nway_reset,
1518 .get_link = ethtool_op_get_link,
1519 .get_msglevel = cp_get_msglevel,
1520 .set_msglevel = cp_set_msglevel,
1521 .get_regs = cp_get_regs,
1522 .get_wol = cp_get_wol,
1523 .set_wol = cp_set_wol,
1524 .get_strings = cp_get_strings,
1525 .get_ethtool_stats = cp_get_ethtool_stats,
1526 .get_eeprom_len = cp_get_eeprom_len,
1527 .get_eeprom = cp_get_eeprom,
1528 .set_eeprom = cp_set_eeprom,
1529};
1530
1531static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1532{
1533 struct cp_private *cp = netdev_priv(dev);
1534 int rc;
1535 unsigned long flags;
1536
1537 if (!netif_running(dev))
1538 return -EINVAL;
1539
1540 spin_lock_irqsave(&cp->lock, flags);
1541 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1542 spin_unlock_irqrestore(&cp->lock, flags);
1543 return rc;
1544}
1545
1546static int cp_set_mac_address(struct net_device *dev, void *p)
1547{
1548 struct cp_private *cp = netdev_priv(dev);
1549 struct sockaddr *addr = p;
1550
1551 if (!is_valid_ether_addr(addr->sa_data))
1552 return -EADDRNOTAVAIL;
1553
1554 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1555
1556 spin_lock_irq(&cp->lock);
1557
1558 cpw8_f(Cfg9346, Cfg9346_Unlock);
1559 cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1560 cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1561 cpw8_f(Cfg9346, Cfg9346_Lock);
1562
1563 spin_unlock_irq(&cp->lock);
1564
1565 return 0;
1566}
1567
1568/* Serial EEPROM section. */
1569
1570/* EEPROM_Ctrl bits. */
1571#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
1572#define EE_CS 0x08 /* EEPROM chip select. */
1573#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
1574#define EE_WRITE_0 0x00
1575#define EE_WRITE_1 0x02
1576#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
1577#define EE_ENB (0x80 | EE_CS)
1578
1579/* Delay between EEPROM clock transitions.
1580 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1581 */
1582
1583#define eeprom_delay() readl(ee_addr)
1584
1585/* The EEPROM commands include the alway-set leading bit. */
1586#define EE_EXTEND_CMD (4)
1587#define EE_WRITE_CMD (5)
1588#define EE_READ_CMD (6)
1589#define EE_ERASE_CMD (7)
1590
1591#define EE_EWDS_ADDR (0)
1592#define EE_WRAL_ADDR (1)
1593#define EE_ERAL_ADDR (2)
1594#define EE_EWEN_ADDR (3)
1595
1596#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1597
1598static void eeprom_cmd_start(void __iomem *ee_addr)
1599{
1600 writeb (EE_ENB & ~EE_CS, ee_addr);
1601 writeb (EE_ENB, ee_addr);
1602 eeprom_delay ();
1603}
1604
1605static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1606{
1607 int i;
1608
1609 /* Shift the command bits out. */
1610 for (i = cmd_len - 1; i >= 0; i--) {
1611 int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1612 writeb (EE_ENB | dataval, ee_addr);
1613 eeprom_delay ();
1614 writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1615 eeprom_delay ();
1616 }
1617 writeb (EE_ENB, ee_addr);
1618 eeprom_delay ();
1619}
1620
1621static void eeprom_cmd_end(void __iomem *ee_addr)
1622{
1623 writeb (~EE_CS, ee_addr);
1624 eeprom_delay ();
1625}
1626
1627static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1628 int addr_len)
1629{
1630 int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1631
1632 eeprom_cmd_start(ee_addr);
1633 eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1634 eeprom_cmd_end(ee_addr);
1635}
1636
1637static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1638{
1639 int i;
1640 u16 retval = 0;
1641 void __iomem *ee_addr = ioaddr + Cfg9346;
1642 int read_cmd = location | (EE_READ_CMD << addr_len);
1643
1644 eeprom_cmd_start(ee_addr);
1645 eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1646
1647 for (i = 16; i > 0; i--) {
1648 writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1649 eeprom_delay ();
1650 retval =
1651 (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1652 0);
1653 writeb (EE_ENB, ee_addr);
1654 eeprom_delay ();
1655 }
1656
1657 eeprom_cmd_end(ee_addr);
1658
1659 return retval;
1660}
1661
1662static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1663 int addr_len)
1664{
1665 int i;
1666 void __iomem *ee_addr = ioaddr + Cfg9346;
1667 int write_cmd = location | (EE_WRITE_CMD << addr_len);
1668
1669 eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1670
1671 eeprom_cmd_start(ee_addr);
1672 eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1673 eeprom_cmd(ee_addr, val, 16);
1674 eeprom_cmd_end(ee_addr);
1675
1676 eeprom_cmd_start(ee_addr);
1677 for (i = 0; i < 20000; i++)
1678 if (readb(ee_addr) & EE_DATA_READ)
1679 break;
1680 eeprom_cmd_end(ee_addr);
1681
1682 eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1683}
1684
1685static int cp_get_eeprom_len(struct net_device *dev)
1686{
1687 struct cp_private *cp = netdev_priv(dev);
1688 int size;
1689
1690 spin_lock_irq(&cp->lock);
1691 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1692 spin_unlock_irq(&cp->lock);
1693
1694 return size;
1695}
1696
1697static int cp_get_eeprom(struct net_device *dev,
1698 struct ethtool_eeprom *eeprom, u8 *data)
1699{
1700 struct cp_private *cp = netdev_priv(dev);
1701 unsigned int addr_len;
1702 u16 val;
1703 u32 offset = eeprom->offset >> 1;
1704 u32 len = eeprom->len;
1705 u32 i = 0;
1706
1707 eeprom->magic = CP_EEPROM_MAGIC;
1708
1709 spin_lock_irq(&cp->lock);
1710
1711 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1712
1713 if (eeprom->offset & 1) {
1714 val = read_eeprom(cp->regs, offset, addr_len);
1715 data[i++] = (u8)(val >> 8);
1716 offset++;
1717 }
1718
1719 while (i < len - 1) {
1720 val = read_eeprom(cp->regs, offset, addr_len);
1721 data[i++] = (u8)val;
1722 data[i++] = (u8)(val >> 8);
1723 offset++;
1724 }
1725
1726 if (i < len) {
1727 val = read_eeprom(cp->regs, offset, addr_len);
1728 data[i] = (u8)val;
1729 }
1730
1731 spin_unlock_irq(&cp->lock);
1732 return 0;
1733}
1734
1735static int cp_set_eeprom(struct net_device *dev,
1736 struct ethtool_eeprom *eeprom, u8 *data)
1737{
1738 struct cp_private *cp = netdev_priv(dev);
1739 unsigned int addr_len;
1740 u16 val;
1741 u32 offset = eeprom->offset >> 1;
1742 u32 len = eeprom->len;
1743 u32 i = 0;
1744
1745 if (eeprom->magic != CP_EEPROM_MAGIC)
1746 return -EINVAL;
1747
1748 spin_lock_irq(&cp->lock);
1749
1750 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1751
1752 if (eeprom->offset & 1) {
1753 val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1754 val |= (u16)data[i++] << 8;
1755 write_eeprom(cp->regs, offset, val, addr_len);
1756 offset++;
1757 }
1758
1759 while (i < len - 1) {
1760 val = (u16)data[i++];
1761 val |= (u16)data[i++] << 8;
1762 write_eeprom(cp->regs, offset, val, addr_len);
1763 offset++;
1764 }
1765
1766 if (i < len) {
1767 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1768 val |= (u16)data[i];
1769 write_eeprom(cp->regs, offset, val, addr_len);
1770 }
1771
1772 spin_unlock_irq(&cp->lock);
1773 return 0;
1774}
1775
1776/* Put the board into D3cold state and wait for WakeUp signal */
1777static void cp_set_d3_state (struct cp_private *cp)
1778{
1779 pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1780 pci_set_power_state (cp->pdev, PCI_D3hot);
1781}
1782
1783static const struct net_device_ops cp_netdev_ops = {
1784 .ndo_open = cp_open,
1785 .ndo_stop = cp_close,
1786 .ndo_validate_addr = eth_validate_addr,
1787 .ndo_set_mac_address = cp_set_mac_address,
1788 .ndo_set_multicast_list = cp_set_rx_mode,
1789 .ndo_get_stats = cp_get_stats,
1790 .ndo_do_ioctl = cp_ioctl,
1791 .ndo_start_xmit = cp_start_xmit,
1792 .ndo_tx_timeout = cp_tx_timeout,
1793 .ndo_set_features = cp_set_features,
1794#ifdef BROKEN
1795 .ndo_change_mtu = cp_change_mtu,
1796#endif
1797
1798#ifdef CONFIG_NET_POLL_CONTROLLER
1799 .ndo_poll_controller = cp_poll_controller,
1800#endif
1801};
1802
1803static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1804{
1805 struct net_device *dev;
1806 struct cp_private *cp;
1807 int rc;
1808 void __iomem *regs;
1809 resource_size_t pciaddr;
1810 unsigned int addr_len, i, pci_using_dac;
1811
1812#ifndef MODULE
1813 static int version_printed;
1814 if (version_printed++ == 0)
1815 pr_info("%s", version);
1816#endif
1817
1818 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1819 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1820 dev_info(&pdev->dev,
1821 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1822 pdev->vendor, pdev->device, pdev->revision);
1823 return -ENODEV;
1824 }
1825
1826 dev = alloc_etherdev(sizeof(struct cp_private));
1827 if (!dev)
1828 return -ENOMEM;
1829 SET_NETDEV_DEV(dev, &pdev->dev);
1830
1831 cp = netdev_priv(dev);
1832 cp->pdev = pdev;
1833 cp->dev = dev;
1834 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1835 spin_lock_init (&cp->lock);
1836 cp->mii_if.dev = dev;
1837 cp->mii_if.mdio_read = mdio_read;
1838 cp->mii_if.mdio_write = mdio_write;
1839 cp->mii_if.phy_id = CP_INTERNAL_PHY;
1840 cp->mii_if.phy_id_mask = 0x1f;
1841 cp->mii_if.reg_num_mask = 0x1f;
1842 cp_set_rxbufsize(cp);
1843
1844 rc = pci_enable_device(pdev);
1845 if (rc)
1846 goto err_out_free;
1847
1848 rc = pci_set_mwi(pdev);
1849 if (rc)
1850 goto err_out_disable;
1851
1852 rc = pci_request_regions(pdev, DRV_NAME);
1853 if (rc)
1854 goto err_out_mwi;
1855
1856 pciaddr = pci_resource_start(pdev, 1);
1857 if (!pciaddr) {
1858 rc = -EIO;
1859 dev_err(&pdev->dev, "no MMIO resource\n");
1860 goto err_out_res;
1861 }
1862 if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1863 rc = -EIO;
1864 dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1865 (unsigned long long)pci_resource_len(pdev, 1));
1866 goto err_out_res;
1867 }
1868
1869 /* Configure DMA attributes. */
1870 if ((sizeof(dma_addr_t) > 4) &&
1871 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1872 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1873 pci_using_dac = 1;
1874 } else {
1875 pci_using_dac = 0;
1876
1877 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1878 if (rc) {
1879 dev_err(&pdev->dev,
1880 "No usable DMA configuration, aborting\n");
1881 goto err_out_res;
1882 }
1883 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1884 if (rc) {
1885 dev_err(&pdev->dev,
1886 "No usable consistent DMA configuration, aborting\n");
1887 goto err_out_res;
1888 }
1889 }
1890
1891 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1892 PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1893
1894 dev->features |= NETIF_F_RXCSUM;
1895 dev->hw_features |= NETIF_F_RXCSUM;
1896
1897 regs = ioremap(pciaddr, CP_REGS_SIZE);
1898 if (!regs) {
1899 rc = -EIO;
1900 dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1901 (unsigned long long)pci_resource_len(pdev, 1),
1902 (unsigned long long)pciaddr);
1903 goto err_out_res;
1904 }
1905 dev->base_addr = (unsigned long) regs;
1906 cp->regs = regs;
1907
1908 cp_stop_hw(cp);
1909
1910 /* read MAC address from EEPROM */
1911 addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1912 for (i = 0; i < 3; i++)
1913 ((__le16 *) (dev->dev_addr))[i] =
1914 cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1915 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1916
1917 dev->netdev_ops = &cp_netdev_ops;
1918 netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1919 dev->ethtool_ops = &cp_ethtool_ops;
1920 dev->watchdog_timeo = TX_TIMEOUT;
1921
1922 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1923
1924 if (pci_using_dac)
1925 dev->features |= NETIF_F_HIGHDMA;
1926
1927 /* disabled by default until verified */
1928 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1929 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1930 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1931 NETIF_F_HIGHDMA;
1932
1933 dev->irq = pdev->irq;
1934
1935 rc = register_netdev(dev);
1936 if (rc)
1937 goto err_out_iomap;
1938
1939 netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
1940 dev->base_addr, dev->dev_addr, dev->irq);
1941
1942 pci_set_drvdata(pdev, dev);
1943
1944 /* enable busmastering and memory-write-invalidate */
1945 pci_set_master(pdev);
1946
1947 if (cp->wol_enabled)
1948 cp_set_d3_state (cp);
1949
1950 return 0;
1951
1952err_out_iomap:
1953 iounmap(regs);
1954err_out_res:
1955 pci_release_regions(pdev);
1956err_out_mwi:
1957 pci_clear_mwi(pdev);
1958err_out_disable:
1959 pci_disable_device(pdev);
1960err_out_free:
1961 free_netdev(dev);
1962 return rc;
1963}
1964
1965static void cp_remove_one (struct pci_dev *pdev)
1966{
1967 struct net_device *dev = pci_get_drvdata(pdev);
1968 struct cp_private *cp = netdev_priv(dev);
1969
1970 unregister_netdev(dev);
1971 iounmap(cp->regs);
1972 if (cp->wol_enabled)
1973 pci_set_power_state (pdev, PCI_D0);
1974 pci_release_regions(pdev);
1975 pci_clear_mwi(pdev);
1976 pci_disable_device(pdev);
1977 pci_set_drvdata(pdev, NULL);
1978 free_netdev(dev);
1979}
1980
1981#ifdef CONFIG_PM
1982static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
1983{
1984 struct net_device *dev = pci_get_drvdata(pdev);
1985 struct cp_private *cp = netdev_priv(dev);
1986 unsigned long flags;
1987
1988 if (!netif_running(dev))
1989 return 0;
1990
1991 netif_device_detach (dev);
1992 netif_stop_queue (dev);
1993
1994 spin_lock_irqsave (&cp->lock, flags);
1995
1996 /* Disable Rx and Tx */
1997 cpw16 (IntrMask, 0);
1998 cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
1999
2000 spin_unlock_irqrestore (&cp->lock, flags);
2001
2002 pci_save_state(pdev);
2003 pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2004 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2005
2006 return 0;
2007}
2008
2009static int cp_resume (struct pci_dev *pdev)
2010{
2011 struct net_device *dev = pci_get_drvdata (pdev);
2012 struct cp_private *cp = netdev_priv(dev);
2013 unsigned long flags;
2014
2015 if (!netif_running(dev))
2016 return 0;
2017
2018 netif_device_attach (dev);
2019
2020 pci_set_power_state(pdev, PCI_D0);
2021 pci_restore_state(pdev);
2022 pci_enable_wake(pdev, PCI_D0, 0);
2023
2024 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2025 cp_init_rings_index (cp);
2026 cp_init_hw (cp);
2027 netif_start_queue (dev);
2028
2029 spin_lock_irqsave (&cp->lock, flags);
2030
2031 mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2032
2033 spin_unlock_irqrestore (&cp->lock, flags);
2034
2035 return 0;
2036}
2037#endif /* CONFIG_PM */
2038
2039static struct pci_driver cp_driver = {
2040 .name = DRV_NAME,
2041 .id_table = cp_pci_tbl,
2042 .probe = cp_init_one,
2043 .remove = cp_remove_one,
2044#ifdef CONFIG_PM
2045 .resume = cp_resume,
2046 .suspend = cp_suspend,
2047#endif
2048};
2049
2050static int __init cp_init (void)
2051{
2052#ifdef MODULE
2053 pr_info("%s", version);
2054#endif
2055 return pci_register_driver(&cp_driver);
2056}
2057
2058static void __exit cp_exit (void)
2059{
2060 pci_unregister_driver (&cp_driver);
2061}
2062
2063module_init(cp_init);
2064module_exit(cp_exit);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
new file mode 100644
index 000000000000..c2672c692d6f
--- /dev/null
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -0,0 +1,2622 @@
1/*
2
3 8139too.c: A RealTek RTL-8139 Fast Ethernet driver for Linux.
4
5 Maintained by Jeff Garzik <jgarzik@pobox.com>
6 Copyright 2000-2002 Jeff Garzik
7
8 Much code comes from Donald Becker's rtl8139.c driver,
9 versions 1.13 and older. This driver was originally based
10 on rtl8139.c version 1.07. Header of rtl8139.c version 1.13:
11
12 -----<snip>-----
13
14 Written 1997-2001 by Donald Becker.
15 This software may be used and distributed according to the
16 terms of the GNU General Public License (GPL), incorporated
17 herein by reference. Drivers based on or derived from this
18 code fall under the GPL and must retain the authorship,
19 copyright and license notice. This file is not a complete
20 program and may only be used when the entire operating
21 system is licensed under the GPL.
22
23 This driver is for boards based on the RTL8129 and RTL8139
24 PCI ethernet chips.
25
26 The author may be reached as becker@scyld.com, or C/O Scyld
27 Computing Corporation 410 Severn Ave., Suite 210 Annapolis
28 MD 21403
29
30 Support and updates available at
31 http://www.scyld.com/network/rtl8139.html
32
33 Twister-tuning table provided by Kinston
34 <shangh@realtek.com.tw>.
35
36 -----<snip>-----
37
38 This software may be used and distributed according to the terms
39 of the GNU General Public License, incorporated herein by reference.
40
41 Contributors:
42
43 Donald Becker - he wrote the original driver, kudos to him!
44 (but please don't e-mail him for support, this isn't his driver)
45
46 Tigran Aivazian - bug fixes, skbuff free cleanup
47
48 Martin Mares - suggestions for PCI cleanup
49
50 David S. Miller - PCI DMA and softnet updates
51
52 Ernst Gill - fixes ported from BSD driver
53
54 Daniel Kobras - identified specific locations of
55 posted MMIO write bugginess
56
57 Gerard Sharp - bug fix, testing and feedback
58
59 David Ford - Rx ring wrap fix
60
61 Dan DeMaggio - swapped RTL8139 cards with me, and allowed me
62 to find and fix a crucial bug on older chipsets.
63
64 Donald Becker/Chris Butterworth/Marcus Westergren -
65 Noticed various Rx packet size-related buglets.
66
67 Santiago Garcia Mantinan - testing and feedback
68
69 Jens David - 2.2.x kernel backports
70
71 Martin Dennett - incredibly helpful insight on undocumented
72 features of the 8139 chips
73
74 Jean-Jacques Michel - bug fix
75
76 Tobias Ringström - Rx interrupt status checking suggestion
77
78 Andrew Morton - Clear blocked signals, avoid
79 buffer overrun setting current->comm.
80
81 Kalle Olavi Niemitalo - Wake-on-LAN ioctls
82
83 Robert Kuebel - Save kernel thread from dying on any signal.
84
85 Submitting bug reports:
86
87 "rtl8139-diag -mmmaaavvveefN" output
88 enable RTL8139_DEBUG below, and look at 'dmesg' or kernel log
89
90*/
91
92#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
94#define DRV_NAME "8139too"
95#define DRV_VERSION "0.9.28"
96
97
98#include <linux/module.h>
99#include <linux/kernel.h>
100#include <linux/compiler.h>
101#include <linux/pci.h>
102#include <linux/init.h>
103#include <linux/interrupt.h>
104#include <linux/netdevice.h>
105#include <linux/etherdevice.h>
106#include <linux/rtnetlink.h>
107#include <linux/delay.h>
108#include <linux/ethtool.h>
109#include <linux/mii.h>
110#include <linux/completion.h>
111#include <linux/crc32.h>
112#include <linux/io.h>
113#include <linux/uaccess.h>
114#include <linux/gfp.h>
115#include <asm/irq.h>
116
117#define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION
118
119/* Default Message level */
120#define RTL8139_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
121 NETIF_MSG_PROBE | \
122 NETIF_MSG_LINK)
123
124
125/* define to 1, 2 or 3 to enable copious debugging info */
126#define RTL8139_DEBUG 0
127
128/* define to 1 to disable lightweight runtime debugging checks */
129#undef RTL8139_NDEBUG
130
131
132#ifdef RTL8139_NDEBUG
133# define assert(expr) do {} while (0)
134#else
135# define assert(expr) \
136 if (unlikely(!(expr))) { \
137 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
138 #expr, __FILE__, __func__, __LINE__); \
139 }
140#endif
141
142
143/* A few user-configurable values. */
144/* media options */
145#define MAX_UNITS 8
146static int media[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
147static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
148
149/* Whether to use MMIO or PIO. Default to MMIO. */
150#ifdef CONFIG_8139TOO_PIO
151static int use_io = 1;
152#else
153static int use_io = 0;
154#endif
155
156/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
157 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
158static int multicast_filter_limit = 32;
159
160/* bitmapped message enable number */
161static int debug = -1;
162
163/*
164 * Receive ring size
165 * Warning: 64K ring has hardware issues and may lock up.
166 */
167#if defined(CONFIG_SH_DREAMCAST)
168#define RX_BUF_IDX 0 /* 8K ring */
169#else
170#define RX_BUF_IDX 2 /* 32K ring */
171#endif
172#define RX_BUF_LEN (8192 << RX_BUF_IDX)
173#define RX_BUF_PAD 16
174#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */
175
176#if RX_BUF_LEN == 65536
177#define RX_BUF_TOT_LEN RX_BUF_LEN
178#else
179#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD)
180#endif
181
182/* Number of Tx descriptor registers. */
183#define NUM_TX_DESC 4
184
185/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
186#define MAX_ETH_FRAME_SIZE 1536
187
188/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
189#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE
190#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC)
191
192/* PCI Tuning Parameters
193 Threshold is bytes transferred to chip before transmission starts. */
194#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
195
196/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
197#define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */
198#define RX_DMA_BURST 7 /* Maximum PCI burst, '6' is 1024 */
199#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
200#define TX_RETRY 8 /* 0-15. retries = 16 + (TX_RETRY * 16) */
201
202/* Operational parameters that usually are not changed. */
203/* Time in jiffies before concluding the transmitter is hung. */
204#define TX_TIMEOUT (6*HZ)
205
206
207enum {
208 HAS_MII_XCVR = 0x010000,
209 HAS_CHIP_XCVR = 0x020000,
210 HAS_LNK_CHNG = 0x040000,
211};
212
213#define RTL_NUM_STATS 4 /* number of ETHTOOL_GSTATS u64's */
214#define RTL_REGS_VER 1 /* version of reg. data in ETHTOOL_GREGS */
215#define RTL_MIN_IO_SIZE 0x80
216#define RTL8139B_IO_SIZE 256
217
218#define RTL8129_CAPS HAS_MII_XCVR
219#define RTL8139_CAPS (HAS_CHIP_XCVR|HAS_LNK_CHNG)
220
221typedef enum {
222 RTL8139 = 0,
223 RTL8129,
224} board_t;
225
226
227/* indexed by board_t, above */
228static const struct {
229 const char *name;
230 u32 hw_flags;
231} board_info[] __devinitdata = {
232 { "RealTek RTL8139", RTL8139_CAPS },
233 { "RealTek RTL8129", RTL8129_CAPS },
234};
235
236
237static DEFINE_PCI_DEVICE_TABLE(rtl8139_pci_tbl) = {
238 {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
239 {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
240 {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
241 {0x1500, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
242 {0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
243 {0x1186, 0x1300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
244 {0x1186, 0x1340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
245 {0x13d1, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
246 {0x1259, 0xa117, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
247 {0x1259, 0xa11e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
248 {0x14ea, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
249 {0x14ea, 0xab07, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
250 {0x11db, 0x1234, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
251 {0x1432, 0x9130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
252 {0x02ac, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
253 {0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
254 {0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
255 {0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
256 {0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
257
258#ifdef CONFIG_SH_SECUREEDGE5410
259 /* Bogus 8139 silicon reports 8129 without external PROM :-( */
260 {0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
261#endif
262#ifdef CONFIG_8139TOO_8129
263 {0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8129 },
264#endif
265
266 /* some crazy cards report invalid vendor ids like
267 * 0x0001 here. The other ids are valid and constant,
268 * so we simply don't match on the main vendor id.
269 */
270 {PCI_ANY_ID, 0x8139, 0x10ec, 0x8139, 0, 0, RTL8139 },
271 {PCI_ANY_ID, 0x8139, 0x1186, 0x1300, 0, 0, RTL8139 },
272 {PCI_ANY_ID, 0x8139, 0x13d1, 0xab06, 0, 0, RTL8139 },
273
274 {0,}
275};
276MODULE_DEVICE_TABLE (pci, rtl8139_pci_tbl);
277
278static struct {
279 const char str[ETH_GSTRING_LEN];
280} ethtool_stats_keys[] = {
281 { "early_rx" },
282 { "tx_buf_mapped" },
283 { "tx_timeouts" },
284 { "rx_lost_in_ring" },
285};
286
287/* The rest of these values should never change. */
288
289/* Symbolic offsets to registers. */
290enum RTL8139_registers {
291 MAC0 = 0, /* Ethernet hardware address. */
292 MAR0 = 8, /* Multicast filter. */
293 TxStatus0 = 0x10, /* Transmit status (Four 32bit registers). */
294 TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */
295 RxBuf = 0x30,
296 ChipCmd = 0x37,
297 RxBufPtr = 0x38,
298 RxBufAddr = 0x3A,
299 IntrMask = 0x3C,
300 IntrStatus = 0x3E,
301 TxConfig = 0x40,
302 RxConfig = 0x44,
303 Timer = 0x48, /* A general-purpose counter. */
304 RxMissed = 0x4C, /* 24 bits valid, write clears. */
305 Cfg9346 = 0x50,
306 Config0 = 0x51,
307 Config1 = 0x52,
308 TimerInt = 0x54,
309 MediaStatus = 0x58,
310 Config3 = 0x59,
311 Config4 = 0x5A, /* absent on RTL-8139A */
312 HltClk = 0x5B,
313 MultiIntr = 0x5C,
314 TxSummary = 0x60,
315 BasicModeCtrl = 0x62,
316 BasicModeStatus = 0x64,
317 NWayAdvert = 0x66,
318 NWayLPAR = 0x68,
319 NWayExpansion = 0x6A,
320 /* Undocumented registers, but required for proper operation. */
321 FIFOTMS = 0x70, /* FIFO Control and test. */
322 CSCR = 0x74, /* Chip Status and Configuration Register. */
323 PARA78 = 0x78,
324 FlashReg = 0xD4, /* Communication with Flash ROM, four bytes. */
325 PARA7c = 0x7c, /* Magic transceiver parameter register. */
326 Config5 = 0xD8, /* absent on RTL-8139A */
327};
328
329enum ClearBitMasks {
330 MultiIntrClear = 0xF000,
331 ChipCmdClear = 0xE2,
332 Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1),
333};
334
335enum ChipCmdBits {
336 CmdReset = 0x10,
337 CmdRxEnb = 0x08,
338 CmdTxEnb = 0x04,
339 RxBufEmpty = 0x01,
340};
341
342/* Interrupt register bits, using my own meaningful names. */
343enum IntrStatusBits {
344 PCIErr = 0x8000,
345 PCSTimeout = 0x4000,
346 RxFIFOOver = 0x40,
347 RxUnderrun = 0x20,
348 RxOverflow = 0x10,
349 TxErr = 0x08,
350 TxOK = 0x04,
351 RxErr = 0x02,
352 RxOK = 0x01,
353
354 RxAckBits = RxFIFOOver | RxOverflow | RxOK,
355};
356
357enum TxStatusBits {
358 TxHostOwns = 0x2000,
359 TxUnderrun = 0x4000,
360 TxStatOK = 0x8000,
361 TxOutOfWindow = 0x20000000,
362 TxAborted = 0x40000000,
363 TxCarrierLost = 0x80000000,
364};
365enum RxStatusBits {
366 RxMulticast = 0x8000,
367 RxPhysical = 0x4000,
368 RxBroadcast = 0x2000,
369 RxBadSymbol = 0x0020,
370 RxRunt = 0x0010,
371 RxTooLong = 0x0008,
372 RxCRCErr = 0x0004,
373 RxBadAlign = 0x0002,
374 RxStatusOK = 0x0001,
375};
376
377/* Bits in RxConfig. */
378enum rx_mode_bits {
379 AcceptErr = 0x20,
380 AcceptRunt = 0x10,
381 AcceptBroadcast = 0x08,
382 AcceptMulticast = 0x04,
383 AcceptMyPhys = 0x02,
384 AcceptAllPhys = 0x01,
385};
386
387/* Bits in TxConfig. */
388enum tx_config_bits {
389 /* Interframe Gap Time. Only TxIFG96 doesn't violate IEEE 802.3 */
390 TxIFGShift = 24,
391 TxIFG84 = (0 << TxIFGShift), /* 8.4us / 840ns (10 / 100Mbps) */
392 TxIFG88 = (1 << TxIFGShift), /* 8.8us / 880ns (10 / 100Mbps) */
393 TxIFG92 = (2 << TxIFGShift), /* 9.2us / 920ns (10 / 100Mbps) */
394 TxIFG96 = (3 << TxIFGShift), /* 9.6us / 960ns (10 / 100Mbps) */
395
396 TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */
397 TxCRC = (1 << 16), /* DISABLE Tx pkt CRC append */
398 TxClearAbt = (1 << 0), /* Clear abort (WO) */
399 TxDMAShift = 8, /* DMA burst value (0-7) is shifted X many bits */
400 TxRetryShift = 4, /* TXRR value (0-15) is shifted X many bits */
401
402 TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */
403};
404
405/* Bits in Config1 */
406enum Config1Bits {
407 Cfg1_PM_Enable = 0x01,
408 Cfg1_VPD_Enable = 0x02,
409 Cfg1_PIO = 0x04,
410 Cfg1_MMIO = 0x08,
411 LWAKE = 0x10, /* not on 8139, 8139A */
412 Cfg1_Driver_Load = 0x20,
413 Cfg1_LED0 = 0x40,
414 Cfg1_LED1 = 0x80,
415 SLEEP = (1 << 1), /* only on 8139, 8139A */
416 PWRDN = (1 << 0), /* only on 8139, 8139A */
417};
418
419/* Bits in Config3 */
420enum Config3Bits {
421 Cfg3_FBtBEn = (1 << 0), /* 1 = Fast Back to Back */
422 Cfg3_FuncRegEn = (1 << 1), /* 1 = enable CardBus Function registers */
423 Cfg3_CLKRUN_En = (1 << 2), /* 1 = enable CLKRUN */
424 Cfg3_CardB_En = (1 << 3), /* 1 = enable CardBus registers */
425 Cfg3_LinkUp = (1 << 4), /* 1 = wake up on link up */
426 Cfg3_Magic = (1 << 5), /* 1 = wake up on Magic Packet (tm) */
427 Cfg3_PARM_En = (1 << 6), /* 0 = software can set twister parameters */
428 Cfg3_GNTSel = (1 << 7), /* 1 = delay 1 clock from PCI GNT signal */
429};
430
431/* Bits in Config4 */
432enum Config4Bits {
433 LWPTN = (1 << 2), /* not on 8139, 8139A */
434};
435
436/* Bits in Config5 */
437enum Config5Bits {
438 Cfg5_PME_STS = (1 << 0), /* 1 = PCI reset resets PME_Status */
439 Cfg5_LANWake = (1 << 1), /* 1 = enable LANWake signal */
440 Cfg5_LDPS = (1 << 2), /* 0 = save power when link is down */
441 Cfg5_FIFOAddrPtr= (1 << 3), /* Realtek internal SRAM testing */
442 Cfg5_UWF = (1 << 4), /* 1 = accept unicast wakeup frame */
443 Cfg5_MWF = (1 << 5), /* 1 = accept multicast wakeup frame */
444 Cfg5_BWF = (1 << 6), /* 1 = accept broadcast wakeup frame */
445};
446
447enum RxConfigBits {
448 /* rx fifo threshold */
449 RxCfgFIFOShift = 13,
450 RxCfgFIFONone = (7 << RxCfgFIFOShift),
451
452 /* Max DMA burst */
453 RxCfgDMAShift = 8,
454 RxCfgDMAUnlimited = (7 << RxCfgDMAShift),
455
456 /* rx ring buffer length */
457 RxCfgRcv8K = 0,
458 RxCfgRcv16K = (1 << 11),
459 RxCfgRcv32K = (1 << 12),
460 RxCfgRcv64K = (1 << 11) | (1 << 12),
461
462 /* Disable packet wrap at end of Rx buffer. (not possible with 64k) */
463 RxNoWrap = (1 << 7),
464};
465
466/* Twister tuning parameters from RealTek.
467 Completely undocumented, but required to tune bad links on some boards. */
468enum CSCRBits {
469 CSCR_LinkOKBit = 0x0400,
470 CSCR_LinkChangeBit = 0x0800,
471 CSCR_LinkStatusBits = 0x0f000,
472 CSCR_LinkDownOffCmd = 0x003c0,
473 CSCR_LinkDownCmd = 0x0f3c0,
474};
475
476enum Cfg9346Bits {
477 Cfg9346_Lock = 0x00,
478 Cfg9346_Unlock = 0xC0,
479};
480
481typedef enum {
482 CH_8139 = 0,
483 CH_8139_K,
484 CH_8139A,
485 CH_8139A_G,
486 CH_8139B,
487 CH_8130,
488 CH_8139C,
489 CH_8100,
490 CH_8100B_8139D,
491 CH_8101,
492} chip_t;
493
494enum chip_flags {
495 HasHltClk = (1 << 0),
496 HasLWake = (1 << 1),
497};
498
499#define HW_REVID(b30, b29, b28, b27, b26, b23, b22) \
500 (b30<<30 | b29<<29 | b28<<28 | b27<<27 | b26<<26 | b23<<23 | b22<<22)
501#define HW_REVID_MASK HW_REVID(1, 1, 1, 1, 1, 1, 1)
502
503/* directly indexed by chip_t, above */
504static const struct {
505 const char *name;
506 u32 version; /* from RTL8139C/RTL8139D docs */
507 u32 flags;
508} rtl_chip_info[] = {
509 { "RTL-8139",
510 HW_REVID(1, 0, 0, 0, 0, 0, 0),
511 HasHltClk,
512 },
513
514 { "RTL-8139 rev K",
515 HW_REVID(1, 1, 0, 0, 0, 0, 0),
516 HasHltClk,
517 },
518
519 { "RTL-8139A",
520 HW_REVID(1, 1, 1, 0, 0, 0, 0),
521 HasHltClk, /* XXX undocumented? */
522 },
523
524 { "RTL-8139A rev G",
525 HW_REVID(1, 1, 1, 0, 0, 1, 0),
526 HasHltClk, /* XXX undocumented? */
527 },
528
529 { "RTL-8139B",
530 HW_REVID(1, 1, 1, 1, 0, 0, 0),
531 HasLWake,
532 },
533
534 { "RTL-8130",
535 HW_REVID(1, 1, 1, 1, 1, 0, 0),
536 HasLWake,
537 },
538
539 { "RTL-8139C",
540 HW_REVID(1, 1, 1, 0, 1, 0, 0),
541 HasLWake,
542 },
543
544 { "RTL-8100",
545 HW_REVID(1, 1, 1, 1, 0, 1, 0),
546 HasLWake,
547 },
548
549 { "RTL-8100B/8139D",
550 HW_REVID(1, 1, 1, 0, 1, 0, 1),
551 HasHltClk /* XXX undocumented? */
552 | HasLWake,
553 },
554
555 { "RTL-8101",
556 HW_REVID(1, 1, 1, 0, 1, 1, 1),
557 HasLWake,
558 },
559};
560
561struct rtl_extra_stats {
562 unsigned long early_rx;
563 unsigned long tx_buf_mapped;
564 unsigned long tx_timeouts;
565 unsigned long rx_lost_in_ring;
566};
567
568struct rtl8139_private {
569 void __iomem *mmio_addr;
570 int drv_flags;
571 struct pci_dev *pci_dev;
572 u32 msg_enable;
573 struct napi_struct napi;
574 struct net_device *dev;
575
576 unsigned char *rx_ring;
577 unsigned int cur_rx; /* RX buf index of next pkt */
578 dma_addr_t rx_ring_dma;
579
580 unsigned int tx_flag;
581 unsigned long cur_tx;
582 unsigned long dirty_tx;
583 unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */
584 unsigned char *tx_bufs; /* Tx bounce buffer region. */
585 dma_addr_t tx_bufs_dma;
586
587 signed char phys[4]; /* MII device addresses. */
588
589 /* Twister tune state. */
590 char twistie, twist_row, twist_col;
591
592 unsigned int watchdog_fired : 1;
593 unsigned int default_port : 4; /* Last dev->if_port value. */
594 unsigned int have_thread : 1;
595
596 spinlock_t lock;
597 spinlock_t rx_lock;
598
599 chip_t chipset;
600 u32 rx_config;
601 struct rtl_extra_stats xstats;
602
603 struct delayed_work thread;
604
605 struct mii_if_info mii;
606 unsigned int regs_len;
607 unsigned long fifo_copy_timeout;
608};
609
610MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>");
611MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver");
612MODULE_LICENSE("GPL");
613MODULE_VERSION(DRV_VERSION);
614
615module_param(use_io, int, 0);
616MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO");
617module_param(multicast_filter_limit, int, 0);
618module_param_array(media, int, NULL, 0);
619module_param_array(full_duplex, int, NULL, 0);
620module_param(debug, int, 0);
621MODULE_PARM_DESC (debug, "8139too bitmapped message enable number");
622MODULE_PARM_DESC (multicast_filter_limit, "8139too maximum number of filtered multicast addresses");
623MODULE_PARM_DESC (media, "8139too: Bits 4+9: force full duplex, bit 5: 100Mbps");
624MODULE_PARM_DESC (full_duplex, "8139too: Force full duplex for board(s) (1)");
625
626static int read_eeprom (void __iomem *ioaddr, int location, int addr_len);
627static int rtl8139_open (struct net_device *dev);
628static int mdio_read (struct net_device *dev, int phy_id, int location);
629static void mdio_write (struct net_device *dev, int phy_id, int location,
630 int val);
631static void rtl8139_start_thread(struct rtl8139_private *tp);
632static void rtl8139_tx_timeout (struct net_device *dev);
633static void rtl8139_init_ring (struct net_device *dev);
634static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
635 struct net_device *dev);
636#ifdef CONFIG_NET_POLL_CONTROLLER
637static void rtl8139_poll_controller(struct net_device *dev);
638#endif
639static int rtl8139_set_mac_address(struct net_device *dev, void *p);
640static int rtl8139_poll(struct napi_struct *napi, int budget);
641static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance);
642static int rtl8139_close (struct net_device *dev);
643static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
644static struct net_device_stats *rtl8139_get_stats (struct net_device *dev);
645static void rtl8139_set_rx_mode (struct net_device *dev);
646static void __set_rx_mode (struct net_device *dev);
647static void rtl8139_hw_start (struct net_device *dev);
648static void rtl8139_thread (struct work_struct *work);
649static void rtl8139_tx_timeout_task(struct work_struct *work);
650static const struct ethtool_ops rtl8139_ethtool_ops;
651
652/* write MMIO register, with flush */
653/* Flush avoids rtl8139 bug w/ posted MMIO writes */
654#define RTL_W8_F(reg, val8) do { iowrite8 ((val8), ioaddr + (reg)); ioread8 (ioaddr + (reg)); } while (0)
655#define RTL_W16_F(reg, val16) do { iowrite16 ((val16), ioaddr + (reg)); ioread16 (ioaddr + (reg)); } while (0)
656#define RTL_W32_F(reg, val32) do { iowrite32 ((val32), ioaddr + (reg)); ioread32 (ioaddr + (reg)); } while (0)
657
658/* write MMIO register */
659#define RTL_W8(reg, val8) iowrite8 ((val8), ioaddr + (reg))
660#define RTL_W16(reg, val16) iowrite16 ((val16), ioaddr + (reg))
661#define RTL_W32(reg, val32) iowrite32 ((val32), ioaddr + (reg))
662
663/* read MMIO register */
664#define RTL_R8(reg) ioread8 (ioaddr + (reg))
665#define RTL_R16(reg) ioread16 (ioaddr + (reg))
666#define RTL_R32(reg) ioread32 (ioaddr + (reg))
667
668
669static const u16 rtl8139_intr_mask =
670 PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver |
671 TxErr | TxOK | RxErr | RxOK;
672
673static const u16 rtl8139_norx_intr_mask =
674 PCIErr | PCSTimeout | RxUnderrun |
675 TxErr | TxOK | RxErr ;
676
677#if RX_BUF_IDX == 0
678static const unsigned int rtl8139_rx_config =
679 RxCfgRcv8K | RxNoWrap |
680 (RX_FIFO_THRESH << RxCfgFIFOShift) |
681 (RX_DMA_BURST << RxCfgDMAShift);
682#elif RX_BUF_IDX == 1
683static const unsigned int rtl8139_rx_config =
684 RxCfgRcv16K | RxNoWrap |
685 (RX_FIFO_THRESH << RxCfgFIFOShift) |
686 (RX_DMA_BURST << RxCfgDMAShift);
687#elif RX_BUF_IDX == 2
688static const unsigned int rtl8139_rx_config =
689 RxCfgRcv32K | RxNoWrap |
690 (RX_FIFO_THRESH << RxCfgFIFOShift) |
691 (RX_DMA_BURST << RxCfgDMAShift);
692#elif RX_BUF_IDX == 3
693static const unsigned int rtl8139_rx_config =
694 RxCfgRcv64K |
695 (RX_FIFO_THRESH << RxCfgFIFOShift) |
696 (RX_DMA_BURST << RxCfgDMAShift);
697#else
698#error "Invalid configuration for 8139_RXBUF_IDX"
699#endif
700
701static const unsigned int rtl8139_tx_config =
702 TxIFG96 | (TX_DMA_BURST << TxDMAShift) | (TX_RETRY << TxRetryShift);
703
704static void __rtl8139_cleanup_dev (struct net_device *dev)
705{
706 struct rtl8139_private *tp = netdev_priv(dev);
707 struct pci_dev *pdev;
708
709 assert (dev != NULL);
710 assert (tp->pci_dev != NULL);
711 pdev = tp->pci_dev;
712
713 if (tp->mmio_addr)
714 pci_iounmap (pdev, tp->mmio_addr);
715
716 /* it's ok to call this even if we have no regions to free */
717 pci_release_regions (pdev);
718
719 free_netdev(dev);
720 pci_set_drvdata (pdev, NULL);
721}
722
723
724static void rtl8139_chip_reset (void __iomem *ioaddr)
725{
726 int i;
727
728 /* Soft reset the chip. */
729 RTL_W8 (ChipCmd, CmdReset);
730
731 /* Check that the chip has finished the reset. */
732 for (i = 1000; i > 0; i--) {
733 barrier();
734 if ((RTL_R8 (ChipCmd) & CmdReset) == 0)
735 break;
736 udelay (10);
737 }
738}
739
740
741static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev)
742{
743 void __iomem *ioaddr;
744 struct net_device *dev;
745 struct rtl8139_private *tp;
746 u8 tmp8;
747 int rc, disable_dev_on_err = 0;
748 unsigned int i;
749 unsigned long pio_start, pio_end, pio_flags, pio_len;
750 unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
751 u32 version;
752
753 assert (pdev != NULL);
754
755 /* dev and priv zeroed in alloc_etherdev */
756 dev = alloc_etherdev (sizeof (*tp));
757 if (dev == NULL) {
758 dev_err(&pdev->dev, "Unable to alloc new net device\n");
759 return ERR_PTR(-ENOMEM);
760 }
761 SET_NETDEV_DEV(dev, &pdev->dev);
762
763 tp = netdev_priv(dev);
764 tp->pci_dev = pdev;
765
766 /* enable device (incl. PCI PM wakeup and hotplug setup) */
767 rc = pci_enable_device (pdev);
768 if (rc)
769 goto err_out;
770
771 pio_start = pci_resource_start (pdev, 0);
772 pio_end = pci_resource_end (pdev, 0);
773 pio_flags = pci_resource_flags (pdev, 0);
774 pio_len = pci_resource_len (pdev, 0);
775
776 mmio_start = pci_resource_start (pdev, 1);
777 mmio_end = pci_resource_end (pdev, 1);
778 mmio_flags = pci_resource_flags (pdev, 1);
779 mmio_len = pci_resource_len (pdev, 1);
780
781 /* set this immediately, we need to know before
782 * we talk to the chip directly */
783 pr_debug("PIO region size == 0x%02lX\n", pio_len);
784 pr_debug("MMIO region size == 0x%02lX\n", mmio_len);
785
786retry:
787 if (use_io) {
788 /* make sure PCI base addr 0 is PIO */
789 if (!(pio_flags & IORESOURCE_IO)) {
790 dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
791 rc = -ENODEV;
792 goto err_out;
793 }
794 /* check for weird/broken PCI region reporting */
795 if (pio_len < RTL_MIN_IO_SIZE) {
796 dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n");
797 rc = -ENODEV;
798 goto err_out;
799 }
800 } else {
801 /* make sure PCI base addr 1 is MMIO */
802 if (!(mmio_flags & IORESOURCE_MEM)) {
803 dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
804 rc = -ENODEV;
805 goto err_out;
806 }
807 if (mmio_len < RTL_MIN_IO_SIZE) {
808 dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n");
809 rc = -ENODEV;
810 goto err_out;
811 }
812 }
813
814 rc = pci_request_regions (pdev, DRV_NAME);
815 if (rc)
816 goto err_out;
817 disable_dev_on_err = 1;
818
819 /* enable PCI bus-mastering */
820 pci_set_master (pdev);
821
822 if (use_io) {
823 ioaddr = pci_iomap(pdev, 0, 0);
824 if (!ioaddr) {
825 dev_err(&pdev->dev, "cannot map PIO, aborting\n");
826 rc = -EIO;
827 goto err_out;
828 }
829 dev->base_addr = pio_start;
830 tp->regs_len = pio_len;
831 } else {
832 /* ioremap MMIO region */
833 ioaddr = pci_iomap(pdev, 1, 0);
834 if (ioaddr == NULL) {
835 dev_err(&pdev->dev, "cannot remap MMIO, trying PIO\n");
836 pci_release_regions(pdev);
837 use_io = 1;
838 goto retry;
839 }
840 dev->base_addr = (long) ioaddr;
841 tp->regs_len = mmio_len;
842 }
843 tp->mmio_addr = ioaddr;
844
845 /* Bring old chips out of low-power mode. */
846 RTL_W8 (HltClk, 'R');
847
848 /* check for missing/broken hardware */
849 if (RTL_R32 (TxConfig) == 0xFFFFFFFF) {
850 dev_err(&pdev->dev, "Chip not responding, ignoring board\n");
851 rc = -EIO;
852 goto err_out;
853 }
854
855 /* identify chip attached to board */
856 version = RTL_R32 (TxConfig) & HW_REVID_MASK;
857 for (i = 0; i < ARRAY_SIZE (rtl_chip_info); i++)
858 if (version == rtl_chip_info[i].version) {
859 tp->chipset = i;
860 goto match;
861 }
862
863 /* if unknown chip, assume array element #0, original RTL-8139 in this case */
864 i = 0;
865 dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n");
866 dev_dbg(&pdev->dev, "TxConfig = 0x%x\n", RTL_R32 (TxConfig));
867 tp->chipset = 0;
868
869match:
870 pr_debug("chipset id (%d) == index %d, '%s'\n",
871 version, i, rtl_chip_info[i].name);
872
873 if (tp->chipset >= CH_8139B) {
874 u8 new_tmp8 = tmp8 = RTL_R8 (Config1);
875 pr_debug("PCI PM wakeup\n");
876 if ((rtl_chip_info[tp->chipset].flags & HasLWake) &&
877 (tmp8 & LWAKE))
878 new_tmp8 &= ~LWAKE;
879 new_tmp8 |= Cfg1_PM_Enable;
880 if (new_tmp8 != tmp8) {
881 RTL_W8 (Cfg9346, Cfg9346_Unlock);
882 RTL_W8 (Config1, tmp8);
883 RTL_W8 (Cfg9346, Cfg9346_Lock);
884 }
885 if (rtl_chip_info[tp->chipset].flags & HasLWake) {
886 tmp8 = RTL_R8 (Config4);
887 if (tmp8 & LWPTN) {
888 RTL_W8 (Cfg9346, Cfg9346_Unlock);
889 RTL_W8 (Config4, tmp8 & ~LWPTN);
890 RTL_W8 (Cfg9346, Cfg9346_Lock);
891 }
892 }
893 } else {
894 pr_debug("Old chip wakeup\n");
895 tmp8 = RTL_R8 (Config1);
896 tmp8 &= ~(SLEEP | PWRDN);
897 RTL_W8 (Config1, tmp8);
898 }
899
900 rtl8139_chip_reset (ioaddr);
901
902 return dev;
903
904err_out:
905 __rtl8139_cleanup_dev (dev);
906 if (disable_dev_on_err)
907 pci_disable_device (pdev);
908 return ERR_PTR(rc);
909}
910
911static const struct net_device_ops rtl8139_netdev_ops = {
912 .ndo_open = rtl8139_open,
913 .ndo_stop = rtl8139_close,
914 .ndo_get_stats = rtl8139_get_stats,
915 .ndo_change_mtu = eth_change_mtu,
916 .ndo_validate_addr = eth_validate_addr,
917 .ndo_set_mac_address = rtl8139_set_mac_address,
918 .ndo_start_xmit = rtl8139_start_xmit,
919 .ndo_set_multicast_list = rtl8139_set_rx_mode,
920 .ndo_do_ioctl = netdev_ioctl,
921 .ndo_tx_timeout = rtl8139_tx_timeout,
922#ifdef CONFIG_NET_POLL_CONTROLLER
923 .ndo_poll_controller = rtl8139_poll_controller,
924#endif
925};
926
927static int __devinit rtl8139_init_one (struct pci_dev *pdev,
928 const struct pci_device_id *ent)
929{
930 struct net_device *dev = NULL;
931 struct rtl8139_private *tp;
932 int i, addr_len, option;
933 void __iomem *ioaddr;
934 static int board_idx = -1;
935
936 assert (pdev != NULL);
937 assert (ent != NULL);
938
939 board_idx++;
940
941 /* when we're built into the kernel, the driver version message
942 * is only printed if at least one 8139 board has been found
943 */
944#ifndef MODULE
945 {
946 static int printed_version;
947 if (!printed_version++)
948 pr_info(RTL8139_DRIVER_NAME "\n");
949 }
950#endif
951
952 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
953 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision >= 0x20) {
954 dev_info(&pdev->dev,
955 "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip, use 8139cp\n",
956 pdev->vendor, pdev->device, pdev->revision);
957 return -ENODEV;
958 }
959
960 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
961 pdev->device == PCI_DEVICE_ID_REALTEK_8139 &&
962 pdev->subsystem_vendor == PCI_VENDOR_ID_ATHEROS &&
963 pdev->subsystem_device == PCI_DEVICE_ID_REALTEK_8139) {
964 pr_info("OQO Model 2 detected. Forcing PIO\n");
965 use_io = 1;
966 }
967
968 dev = rtl8139_init_board (pdev);
969 if (IS_ERR(dev))
970 return PTR_ERR(dev);
971
972 assert (dev != NULL);
973 tp = netdev_priv(dev);
974 tp->dev = dev;
975
976 ioaddr = tp->mmio_addr;
977 assert (ioaddr != NULL);
978
979 addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
980 for (i = 0; i < 3; i++)
981 ((__le16 *) (dev->dev_addr))[i] =
982 cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len));
983 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
984
985 /* The Rtl8139-specific entries in the device structure. */
986 dev->netdev_ops = &rtl8139_netdev_ops;
987 dev->ethtool_ops = &rtl8139_ethtool_ops;
988 dev->watchdog_timeo = TX_TIMEOUT;
989 netif_napi_add(dev, &tp->napi, rtl8139_poll, 64);
990
991 /* note: the hardware is not capable of sg/csum/highdma, however
992 * through the use of skb_copy_and_csum_dev we enable these
993 * features
994 */
995 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
996 dev->vlan_features = dev->features;
997
998 dev->irq = pdev->irq;
999
1000 /* tp zeroed and aligned in alloc_etherdev */
1001 tp = netdev_priv(dev);
1002
1003 /* note: tp->chipset set in rtl8139_init_board */
1004 tp->drv_flags = board_info[ent->driver_data].hw_flags;
1005 tp->mmio_addr = ioaddr;
1006 tp->msg_enable =
1007 (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1));
1008 spin_lock_init (&tp->lock);
1009 spin_lock_init (&tp->rx_lock);
1010 INIT_DELAYED_WORK(&tp->thread, rtl8139_thread);
1011 tp->mii.dev = dev;
1012 tp->mii.mdio_read = mdio_read;
1013 tp->mii.mdio_write = mdio_write;
1014 tp->mii.phy_id_mask = 0x3f;
1015 tp->mii.reg_num_mask = 0x1f;
1016
1017 /* dev is fully set up and ready to use now */
1018 pr_debug("about to register device named %s (%p)...\n",
1019 dev->name, dev);
1020 i = register_netdev (dev);
1021 if (i) goto err_out;
1022
1023 pci_set_drvdata (pdev, dev);
1024
1025 netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n",
1026 board_info[ent->driver_data].name,
1027 dev->base_addr, dev->dev_addr, dev->irq);
1028
1029 netdev_dbg(dev, "Identified 8139 chip type '%s'\n",
1030 rtl_chip_info[tp->chipset].name);
1031
1032 /* Find the connected MII xcvrs.
1033 Doing this in open() would allow detecting external xcvrs later, but
1034 takes too much time. */
1035#ifdef CONFIG_8139TOO_8129
1036 if (tp->drv_flags & HAS_MII_XCVR) {
1037 int phy, phy_idx = 0;
1038 for (phy = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
1039 int mii_status = mdio_read(dev, phy, 1);
1040 if (mii_status != 0xffff && mii_status != 0x0000) {
1041 u16 advertising = mdio_read(dev, phy, 4);
1042 tp->phys[phy_idx++] = phy;
1043 netdev_info(dev, "MII transceiver %d status 0x%04x advertising %04x\n",
1044 phy, mii_status, advertising);
1045 }
1046 }
1047 if (phy_idx == 0) {
1048 netdev_info(dev, "No MII transceivers found! Assuming SYM transceiver\n");
1049 tp->phys[0] = 32;
1050 }
1051 } else
1052#endif
1053 tp->phys[0] = 32;
1054 tp->mii.phy_id = tp->phys[0];
1055
1056 /* The lower four bits are the media type. */
1057 option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx];
1058 if (option > 0) {
1059 tp->mii.full_duplex = (option & 0x210) ? 1 : 0;
1060 tp->default_port = option & 0xFF;
1061 if (tp->default_port)
1062 tp->mii.force_media = 1;
1063 }
1064 if (board_idx < MAX_UNITS && full_duplex[board_idx] > 0)
1065 tp->mii.full_duplex = full_duplex[board_idx];
1066 if (tp->mii.full_duplex) {
1067 netdev_info(dev, "Media type forced to Full Duplex\n");
1068 /* Changing the MII-advertised media because might prevent
1069 re-connection. */
1070 tp->mii.force_media = 1;
1071 }
1072 if (tp->default_port) {
1073 netdev_info(dev, " Forcing %dMbps %s-duplex operation\n",
1074 (option & 0x20 ? 100 : 10),
1075 (option & 0x10 ? "full" : "half"));
1076 mdio_write(dev, tp->phys[0], 0,
1077 ((option & 0x20) ? 0x2000 : 0) | /* 100Mbps? */
1078 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
1079 }
1080
1081 /* Put the chip into low-power mode. */
1082 if (rtl_chip_info[tp->chipset].flags & HasHltClk)
1083 RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */
1084
1085 return 0;
1086
1087err_out:
1088 __rtl8139_cleanup_dev (dev);
1089 pci_disable_device (pdev);
1090 return i;
1091}
1092
1093
1094static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
1095{
1096 struct net_device *dev = pci_get_drvdata (pdev);
1097 struct rtl8139_private *tp = netdev_priv(dev);
1098
1099 assert (dev != NULL);
1100
1101 cancel_delayed_work_sync(&tp->thread);
1102
1103 unregister_netdev (dev);
1104
1105 __rtl8139_cleanup_dev (dev);
1106 pci_disable_device (pdev);
1107}
1108
1109
1110/* Serial EEPROM section. */
1111
1112/* EEPROM_Ctrl bits. */
1113#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
1114#define EE_CS 0x08 /* EEPROM chip select. */
1115#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
1116#define EE_WRITE_0 0x00
1117#define EE_WRITE_1 0x02
1118#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
1119#define EE_ENB (0x80 | EE_CS)
1120
1121/* Delay between EEPROM clock transitions.
1122 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1123 */
1124
1125#define eeprom_delay() (void)RTL_R32(Cfg9346)
1126
1127/* The EEPROM commands include the alway-set leading bit. */
1128#define EE_WRITE_CMD (5)
1129#define EE_READ_CMD (6)
1130#define EE_ERASE_CMD (7)
1131
1132static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1133{
1134 int i;
1135 unsigned retval = 0;
1136 int read_cmd = location | (EE_READ_CMD << addr_len);
1137
1138 RTL_W8 (Cfg9346, EE_ENB & ~EE_CS);
1139 RTL_W8 (Cfg9346, EE_ENB);
1140 eeprom_delay ();
1141
1142 /* Shift the read command bits out. */
1143 for (i = 4 + addr_len; i >= 0; i--) {
1144 int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1145 RTL_W8 (Cfg9346, EE_ENB | dataval);
1146 eeprom_delay ();
1147 RTL_W8 (Cfg9346, EE_ENB | dataval | EE_SHIFT_CLK);
1148 eeprom_delay ();
1149 }
1150 RTL_W8 (Cfg9346, EE_ENB);
1151 eeprom_delay ();
1152
1153 for (i = 16; i > 0; i--) {
1154 RTL_W8 (Cfg9346, EE_ENB | EE_SHIFT_CLK);
1155 eeprom_delay ();
1156 retval =
1157 (retval << 1) | ((RTL_R8 (Cfg9346) & EE_DATA_READ) ? 1 :
1158 0);
1159 RTL_W8 (Cfg9346, EE_ENB);
1160 eeprom_delay ();
1161 }
1162
1163 /* Terminate the EEPROM access. */
1164 RTL_W8 (Cfg9346, ~EE_CS);
1165 eeprom_delay ();
1166
1167 return retval;
1168}
1169
1170/* MII serial management: mostly bogus for now. */
1171/* Read and write the MII management registers using software-generated
1172 serial MDIO protocol.
1173 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
1174 met by back-to-back PCI I/O cycles, but we insert a delay to avoid
1175 "overclocking" issues. */
1176#define MDIO_DIR 0x80
1177#define MDIO_DATA_OUT 0x04
1178#define MDIO_DATA_IN 0x02
1179#define MDIO_CLK 0x01
1180#define MDIO_WRITE0 (MDIO_DIR)
1181#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
1182
1183#define mdio_delay() RTL_R8(Config4)
1184
1185
1186static const char mii_2_8139_map[8] = {
1187 BasicModeCtrl,
1188 BasicModeStatus,
1189 0,
1190 0,
1191 NWayAdvert,
1192 NWayLPAR,
1193 NWayExpansion,
1194 0
1195};
1196
1197
1198#ifdef CONFIG_8139TOO_8129
1199/* Syncronize the MII management interface by shifting 32 one bits out. */
1200static void mdio_sync (void __iomem *ioaddr)
1201{
1202 int i;
1203
1204 for (i = 32; i >= 0; i--) {
1205 RTL_W8 (Config4, MDIO_WRITE1);
1206 mdio_delay ();
1207 RTL_W8 (Config4, MDIO_WRITE1 | MDIO_CLK);
1208 mdio_delay ();
1209 }
1210}
1211#endif
1212
1213static int mdio_read (struct net_device *dev, int phy_id, int location)
1214{
1215 struct rtl8139_private *tp = netdev_priv(dev);
1216 int retval = 0;
1217#ifdef CONFIG_8139TOO_8129
1218 void __iomem *ioaddr = tp->mmio_addr;
1219 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
1220 int i;
1221#endif
1222
1223 if (phy_id > 31) { /* Really a 8139. Use internal registers. */
1224 void __iomem *ioaddr = tp->mmio_addr;
1225 return location < 8 && mii_2_8139_map[location] ?
1226 RTL_R16 (mii_2_8139_map[location]) : 0;
1227 }
1228
1229#ifdef CONFIG_8139TOO_8129
1230 mdio_sync (ioaddr);
1231 /* Shift the read command bits out. */
1232 for (i = 15; i >= 0; i--) {
1233 int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
1234
1235 RTL_W8 (Config4, MDIO_DIR | dataval);
1236 mdio_delay ();
1237 RTL_W8 (Config4, MDIO_DIR | dataval | MDIO_CLK);
1238 mdio_delay ();
1239 }
1240
1241 /* Read the two transition, 16 data, and wire-idle bits. */
1242 for (i = 19; i > 0; i--) {
1243 RTL_W8 (Config4, 0);
1244 mdio_delay ();
1245 retval = (retval << 1) | ((RTL_R8 (Config4) & MDIO_DATA_IN) ? 1 : 0);
1246 RTL_W8 (Config4, MDIO_CLK);
1247 mdio_delay ();
1248 }
1249#endif
1250
1251 return (retval >> 1) & 0xffff;
1252}
1253
1254
1255static void mdio_write (struct net_device *dev, int phy_id, int location,
1256 int value)
1257{
1258 struct rtl8139_private *tp = netdev_priv(dev);
1259#ifdef CONFIG_8139TOO_8129
1260 void __iomem *ioaddr = tp->mmio_addr;
1261 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
1262 int i;
1263#endif
1264
1265 if (phy_id > 31) { /* Really a 8139. Use internal registers. */
1266 void __iomem *ioaddr = tp->mmio_addr;
1267 if (location == 0) {
1268 RTL_W8 (Cfg9346, Cfg9346_Unlock);
1269 RTL_W16 (BasicModeCtrl, value);
1270 RTL_W8 (Cfg9346, Cfg9346_Lock);
1271 } else if (location < 8 && mii_2_8139_map[location])
1272 RTL_W16 (mii_2_8139_map[location], value);
1273 return;
1274 }
1275
1276#ifdef CONFIG_8139TOO_8129
1277 mdio_sync (ioaddr);
1278
1279 /* Shift the command bits out. */
1280 for (i = 31; i >= 0; i--) {
1281 int dataval =
1282 (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
1283 RTL_W8 (Config4, dataval);
1284 mdio_delay ();
1285 RTL_W8 (Config4, dataval | MDIO_CLK);
1286 mdio_delay ();
1287 }
1288 /* Clear out extra bits. */
1289 for (i = 2; i > 0; i--) {
1290 RTL_W8 (Config4, 0);
1291 mdio_delay ();
1292 RTL_W8 (Config4, MDIO_CLK);
1293 mdio_delay ();
1294 }
1295#endif
1296}
1297
1298
1299static int rtl8139_open (struct net_device *dev)
1300{
1301 struct rtl8139_private *tp = netdev_priv(dev);
1302 int retval;
1303 void __iomem *ioaddr = tp->mmio_addr;
1304
1305 retval = request_irq (dev->irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev);
1306 if (retval)
1307 return retval;
1308
1309 tp->tx_bufs = dma_alloc_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
1310 &tp->tx_bufs_dma, GFP_KERNEL);
1311 tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
1312 &tp->rx_ring_dma, GFP_KERNEL);
1313 if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
1314 free_irq(dev->irq, dev);
1315
1316 if (tp->tx_bufs)
1317 dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
1318 tp->tx_bufs, tp->tx_bufs_dma);
1319 if (tp->rx_ring)
1320 dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
1321 tp->rx_ring, tp->rx_ring_dma);
1322
1323 return -ENOMEM;
1324
1325 }
1326
1327 napi_enable(&tp->napi);
1328
1329 tp->mii.full_duplex = tp->mii.force_media;
1330 tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000;
1331
1332 rtl8139_init_ring (dev);
1333 rtl8139_hw_start (dev);
1334 netif_start_queue (dev);
1335
1336 netif_dbg(tp, ifup, dev,
1337 "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
1338 __func__,
1339 (unsigned long long)pci_resource_start (tp->pci_dev, 1),
1340 dev->irq, RTL_R8 (MediaStatus),
1341 tp->mii.full_duplex ? "full" : "half");
1342
1343 rtl8139_start_thread(tp);
1344
1345 return 0;
1346}
1347
1348
1349static void rtl_check_media (struct net_device *dev, unsigned int init_media)
1350{
1351 struct rtl8139_private *tp = netdev_priv(dev);
1352
1353 if (tp->phys[0] >= 0) {
1354 mii_check_media(&tp->mii, netif_msg_link(tp), init_media);
1355 }
1356}
1357
1358/* Start the hardware at open or resume. */
1359static void rtl8139_hw_start (struct net_device *dev)
1360{
1361 struct rtl8139_private *tp = netdev_priv(dev);
1362 void __iomem *ioaddr = tp->mmio_addr;
1363 u32 i;
1364 u8 tmp;
1365
1366 /* Bring old chips out of low-power mode. */
1367 if (rtl_chip_info[tp->chipset].flags & HasHltClk)
1368 RTL_W8 (HltClk, 'R');
1369
1370 rtl8139_chip_reset (ioaddr);
1371
1372 /* unlock Config[01234] and BMCR register writes */
1373 RTL_W8_F (Cfg9346, Cfg9346_Unlock);
1374 /* Restore our idea of the MAC address. */
1375 RTL_W32_F (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1376 RTL_W32_F (MAC0 + 4, le16_to_cpu (*(__le16 *) (dev->dev_addr + 4)));
1377
1378 tp->cur_rx = 0;
1379
1380 /* init Rx ring buffer DMA address */
1381 RTL_W32_F (RxBuf, tp->rx_ring_dma);
1382
1383 /* Must enable Tx/Rx before setting transfer thresholds! */
1384 RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
1385
1386 tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys;
1387 RTL_W32 (RxConfig, tp->rx_config);
1388 RTL_W32 (TxConfig, rtl8139_tx_config);
1389
1390 rtl_check_media (dev, 1);
1391
1392 if (tp->chipset >= CH_8139B) {
1393 /* Disable magic packet scanning, which is enabled
1394 * when PM is enabled in Config1. It can be reenabled
1395 * via ETHTOOL_SWOL if desired. */
1396 RTL_W8 (Config3, RTL_R8 (Config3) & ~Cfg3_Magic);
1397 }
1398
1399 netdev_dbg(dev, "init buffer addresses\n");
1400
1401 /* Lock Config[01234] and BMCR register writes */
1402 RTL_W8 (Cfg9346, Cfg9346_Lock);
1403
1404 /* init Tx buffer DMA addresses */
1405 for (i = 0; i < NUM_TX_DESC; i++)
1406 RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
1407
1408 RTL_W32 (RxMissed, 0);
1409
1410 rtl8139_set_rx_mode (dev);
1411
1412 /* no early-rx interrupts */
1413 RTL_W16 (MultiIntr, RTL_R16 (MultiIntr) & MultiIntrClear);
1414
1415 /* make sure RxTx has started */
1416 tmp = RTL_R8 (ChipCmd);
1417 if ((!(tmp & CmdRxEnb)) || (!(tmp & CmdTxEnb)))
1418 RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
1419
1420 /* Enable all known interrupts by setting the interrupt mask. */
1421 RTL_W16 (IntrMask, rtl8139_intr_mask);
1422}
1423
1424
1425/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1426static void rtl8139_init_ring (struct net_device *dev)
1427{
1428 struct rtl8139_private *tp = netdev_priv(dev);
1429 int i;
1430
1431 tp->cur_rx = 0;
1432 tp->cur_tx = 0;
1433 tp->dirty_tx = 0;
1434
1435 for (i = 0; i < NUM_TX_DESC; i++)
1436 tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE];
1437}
1438
1439
1440/* This must be global for CONFIG_8139TOO_TUNE_TWISTER case */
1441static int next_tick = 3 * HZ;
1442
1443#ifndef CONFIG_8139TOO_TUNE_TWISTER
1444static inline void rtl8139_tune_twister (struct net_device *dev,
1445 struct rtl8139_private *tp) {}
1446#else
1447enum TwisterParamVals {
1448 PARA78_default = 0x78fa8388,
1449 PARA7c_default = 0xcb38de43, /* param[0][3] */
1450 PARA7c_xxx = 0xcb38de43,
1451};
1452
1453static const unsigned long param[4][4] = {
1454 {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
1455 {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1456 {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1457 {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
1458};
1459
1460static void rtl8139_tune_twister (struct net_device *dev,
1461 struct rtl8139_private *tp)
1462{
1463 int linkcase;
1464 void __iomem *ioaddr = tp->mmio_addr;
1465
1466 /* This is a complicated state machine to configure the "twister" for
1467 impedance/echos based on the cable length.
1468 All of this is magic and undocumented.
1469 */
1470 switch (tp->twistie) {
1471 case 1:
1472 if (RTL_R16 (CSCR) & CSCR_LinkOKBit) {
1473 /* We have link beat, let us tune the twister. */
1474 RTL_W16 (CSCR, CSCR_LinkDownOffCmd);
1475 tp->twistie = 2; /* Change to state 2. */
1476 next_tick = HZ / 10;
1477 } else {
1478 /* Just put in some reasonable defaults for when beat returns. */
1479 RTL_W16 (CSCR, CSCR_LinkDownCmd);
1480 RTL_W32 (FIFOTMS, 0x20); /* Turn on cable test mode. */
1481 RTL_W32 (PARA78, PARA78_default);
1482 RTL_W32 (PARA7c, PARA7c_default);
1483 tp->twistie = 0; /* Bail from future actions. */
1484 }
1485 break;
1486 case 2:
1487 /* Read how long it took to hear the echo. */
1488 linkcase = RTL_R16 (CSCR) & CSCR_LinkStatusBits;
1489 if (linkcase == 0x7000)
1490 tp->twist_row = 3;
1491 else if (linkcase == 0x3000)
1492 tp->twist_row = 2;
1493 else if (linkcase == 0x1000)
1494 tp->twist_row = 1;
1495 else
1496 tp->twist_row = 0;
1497 tp->twist_col = 0;
1498 tp->twistie = 3; /* Change to state 2. */
1499 next_tick = HZ / 10;
1500 break;
1501 case 3:
1502 /* Put out four tuning parameters, one per 100msec. */
1503 if (tp->twist_col == 0)
1504 RTL_W16 (FIFOTMS, 0);
1505 RTL_W32 (PARA7c, param[(int) tp->twist_row]
1506 [(int) tp->twist_col]);
1507 next_tick = HZ / 10;
1508 if (++tp->twist_col >= 4) {
1509 /* For short cables we are done.
1510 For long cables (row == 3) check for mistune. */
1511 tp->twistie =
1512 (tp->twist_row == 3) ? 4 : 0;
1513 }
1514 break;
1515 case 4:
1516 /* Special case for long cables: check for mistune. */
1517 if ((RTL_R16 (CSCR) &
1518 CSCR_LinkStatusBits) == 0x7000) {
1519 tp->twistie = 0;
1520 break;
1521 } else {
1522 RTL_W32 (PARA7c, 0xfb38de03);
1523 tp->twistie = 5;
1524 next_tick = HZ / 10;
1525 }
1526 break;
1527 case 5:
1528 /* Retune for shorter cable (column 2). */
1529 RTL_W32 (FIFOTMS, 0x20);
1530 RTL_W32 (PARA78, PARA78_default);
1531 RTL_W32 (PARA7c, PARA7c_default);
1532 RTL_W32 (FIFOTMS, 0x00);
1533 tp->twist_row = 2;
1534 tp->twist_col = 0;
1535 tp->twistie = 3;
1536 next_tick = HZ / 10;
1537 break;
1538
1539 default:
1540 /* do nothing */
1541 break;
1542 }
1543}
1544#endif /* CONFIG_8139TOO_TUNE_TWISTER */
1545
1546static inline void rtl8139_thread_iter (struct net_device *dev,
1547 struct rtl8139_private *tp,
1548 void __iomem *ioaddr)
1549{
1550 int mii_lpa;
1551
1552 mii_lpa = mdio_read (dev, tp->phys[0], MII_LPA);
1553
1554 if (!tp->mii.force_media && mii_lpa != 0xffff) {
1555 int duplex = ((mii_lpa & LPA_100FULL) ||
1556 (mii_lpa & 0x01C0) == 0x0040);
1557 if (tp->mii.full_duplex != duplex) {
1558 tp->mii.full_duplex = duplex;
1559
1560 if (mii_lpa) {
1561 netdev_info(dev, "Setting %s-duplex based on MII #%d link partner ability of %04x\n",
1562 tp->mii.full_duplex ? "full" : "half",
1563 tp->phys[0], mii_lpa);
1564 } else {
1565 netdev_info(dev, "media is unconnected, link down, or incompatible connection\n");
1566 }
1567#if 0
1568 RTL_W8 (Cfg9346, Cfg9346_Unlock);
1569 RTL_W8 (Config1, tp->mii.full_duplex ? 0x60 : 0x20);
1570 RTL_W8 (Cfg9346, Cfg9346_Lock);
1571#endif
1572 }
1573 }
1574
1575 next_tick = HZ * 60;
1576
1577 rtl8139_tune_twister (dev, tp);
1578
1579 netdev_dbg(dev, "Media selection tick, Link partner %04x\n",
1580 RTL_R16(NWayLPAR));
1581 netdev_dbg(dev, "Other registers are IntMask %04x IntStatus %04x\n",
1582 RTL_R16(IntrMask), RTL_R16(IntrStatus));
1583 netdev_dbg(dev, "Chip config %02x %02x\n",
1584 RTL_R8(Config0), RTL_R8(Config1));
1585}
1586
1587static void rtl8139_thread (struct work_struct *work)
1588{
1589 struct rtl8139_private *tp =
1590 container_of(work, struct rtl8139_private, thread.work);
1591 struct net_device *dev = tp->mii.dev;
1592 unsigned long thr_delay = next_tick;
1593
1594 rtnl_lock();
1595
1596 if (!netif_running(dev))
1597 goto out_unlock;
1598
1599 if (tp->watchdog_fired) {
1600 tp->watchdog_fired = 0;
1601 rtl8139_tx_timeout_task(work);
1602 } else
1603 rtl8139_thread_iter(dev, tp, tp->mmio_addr);
1604
1605 if (tp->have_thread)
1606 schedule_delayed_work(&tp->thread, thr_delay);
1607out_unlock:
1608 rtnl_unlock ();
1609}
1610
1611static void rtl8139_start_thread(struct rtl8139_private *tp)
1612{
1613 tp->twistie = 0;
1614 if (tp->chipset == CH_8139_K)
1615 tp->twistie = 1;
1616 else if (tp->drv_flags & HAS_LNK_CHNG)
1617 return;
1618
1619 tp->have_thread = 1;
1620 tp->watchdog_fired = 0;
1621
1622 schedule_delayed_work(&tp->thread, next_tick);
1623}
1624
1625static inline void rtl8139_tx_clear (struct rtl8139_private *tp)
1626{
1627 tp->cur_tx = 0;
1628 tp->dirty_tx = 0;
1629
1630 /* XXX account for unsent Tx packets in tp->stats.tx_dropped */
1631}
1632
1633static void rtl8139_tx_timeout_task (struct work_struct *work)
1634{
1635 struct rtl8139_private *tp =
1636 container_of(work, struct rtl8139_private, thread.work);
1637 struct net_device *dev = tp->mii.dev;
1638 void __iomem *ioaddr = tp->mmio_addr;
1639 int i;
1640 u8 tmp8;
1641
1642 netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n",
1643 RTL_R8(ChipCmd), RTL_R16(IntrStatus),
1644 RTL_R16(IntrMask), RTL_R8(MediaStatus));
1645 /* Emit info to figure out what went wrong. */
1646 netdev_dbg(dev, "Tx queue start entry %ld dirty entry %ld\n",
1647 tp->cur_tx, tp->dirty_tx);
1648 for (i = 0; i < NUM_TX_DESC; i++)
1649 netdev_dbg(dev, "Tx descriptor %d is %08x%s\n",
1650 i, RTL_R32(TxStatus0 + (i * 4)),
1651 i == tp->dirty_tx % NUM_TX_DESC ?
1652 " (queue head)" : "");
1653
1654 tp->xstats.tx_timeouts++;
1655
1656 /* disable Tx ASAP, if not already */
1657 tmp8 = RTL_R8 (ChipCmd);
1658 if (tmp8 & CmdTxEnb)
1659 RTL_W8 (ChipCmd, CmdRxEnb);
1660
1661 spin_lock_bh(&tp->rx_lock);
1662 /* Disable interrupts by clearing the interrupt mask. */
1663 RTL_W16 (IntrMask, 0x0000);
1664
1665 /* Stop a shared interrupt from scavenging while we are. */
1666 spin_lock_irq(&tp->lock);
1667 rtl8139_tx_clear (tp);
1668 spin_unlock_irq(&tp->lock);
1669
1670 /* ...and finally, reset everything */
1671 if (netif_running(dev)) {
1672 rtl8139_hw_start (dev);
1673 netif_wake_queue (dev);
1674 }
1675 spin_unlock_bh(&tp->rx_lock);
1676}
1677
1678static void rtl8139_tx_timeout (struct net_device *dev)
1679{
1680 struct rtl8139_private *tp = netdev_priv(dev);
1681
1682 tp->watchdog_fired = 1;
1683 if (!tp->have_thread) {
1684 INIT_DELAYED_WORK(&tp->thread, rtl8139_thread);
1685 schedule_delayed_work(&tp->thread, next_tick);
1686 }
1687}
1688
1689static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
1690 struct net_device *dev)
1691{
1692 struct rtl8139_private *tp = netdev_priv(dev);
1693 void __iomem *ioaddr = tp->mmio_addr;
1694 unsigned int entry;
1695 unsigned int len = skb->len;
1696 unsigned long flags;
1697
1698 /* Calculate the next Tx descriptor entry. */
1699 entry = tp->cur_tx % NUM_TX_DESC;
1700
1701 /* Note: the chip doesn't have auto-pad! */
1702 if (likely(len < TX_BUF_SIZE)) {
1703 if (len < ETH_ZLEN)
1704 memset(tp->tx_buf[entry], 0, ETH_ZLEN);
1705 skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
1706 dev_kfree_skb(skb);
1707 } else {
1708 dev_kfree_skb(skb);
1709 dev->stats.tx_dropped++;
1710 return NETDEV_TX_OK;
1711 }
1712
1713 spin_lock_irqsave(&tp->lock, flags);
1714 /*
1715 * Writing to TxStatus triggers a DMA transfer of the data
1716 * copied to tp->tx_buf[entry] above. Use a memory barrier
1717 * to make sure that the device sees the updated data.
1718 */
1719 wmb();
1720 RTL_W32_F (TxStatus0 + (entry * sizeof (u32)),
1721 tp->tx_flag | max(len, (unsigned int)ETH_ZLEN));
1722
1723 tp->cur_tx++;
1724
1725 if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
1726 netif_stop_queue (dev);
1727 spin_unlock_irqrestore(&tp->lock, flags);
1728
1729 netif_dbg(tp, tx_queued, dev, "Queued Tx packet size %u to slot %d\n",
1730 len, entry);
1731
1732 return NETDEV_TX_OK;
1733}
1734
1735
1736static void rtl8139_tx_interrupt (struct net_device *dev,
1737 struct rtl8139_private *tp,
1738 void __iomem *ioaddr)
1739{
1740 unsigned long dirty_tx, tx_left;
1741
1742 assert (dev != NULL);
1743 assert (ioaddr != NULL);
1744
1745 dirty_tx = tp->dirty_tx;
1746 tx_left = tp->cur_tx - dirty_tx;
1747 while (tx_left > 0) {
1748 int entry = dirty_tx % NUM_TX_DESC;
1749 int txstatus;
1750
1751 txstatus = RTL_R32 (TxStatus0 + (entry * sizeof (u32)));
1752
1753 if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted)))
1754 break; /* It still hasn't been Txed */
1755
1756 /* Note: TxCarrierLost is always asserted at 100mbps. */
1757 if (txstatus & (TxOutOfWindow | TxAborted)) {
1758 /* There was an major error, log it. */
1759 netif_dbg(tp, tx_err, dev, "Transmit error, Tx status %08x\n",
1760 txstatus);
1761 dev->stats.tx_errors++;
1762 if (txstatus & TxAborted) {
1763 dev->stats.tx_aborted_errors++;
1764 RTL_W32 (TxConfig, TxClearAbt);
1765 RTL_W16 (IntrStatus, TxErr);
1766 wmb();
1767 }
1768 if (txstatus & TxCarrierLost)
1769 dev->stats.tx_carrier_errors++;
1770 if (txstatus & TxOutOfWindow)
1771 dev->stats.tx_window_errors++;
1772 } else {
1773 if (txstatus & TxUnderrun) {
1774 /* Add 64 to the Tx FIFO threshold. */
1775 if (tp->tx_flag < 0x00300000)
1776 tp->tx_flag += 0x00020000;
1777 dev->stats.tx_fifo_errors++;
1778 }
1779 dev->stats.collisions += (txstatus >> 24) & 15;
1780 dev->stats.tx_bytes += txstatus & 0x7ff;
1781 dev->stats.tx_packets++;
1782 }
1783
1784 dirty_tx++;
1785 tx_left--;
1786 }
1787
1788#ifndef RTL8139_NDEBUG
1789 if (tp->cur_tx - dirty_tx > NUM_TX_DESC) {
1790 netdev_err(dev, "Out-of-sync dirty pointer, %ld vs. %ld\n",
1791 dirty_tx, tp->cur_tx);
1792 dirty_tx += NUM_TX_DESC;
1793 }
1794#endif /* RTL8139_NDEBUG */
1795
1796 /* only wake the queue if we did work, and the queue is stopped */
1797 if (tp->dirty_tx != dirty_tx) {
1798 tp->dirty_tx = dirty_tx;
1799 mb();
1800 netif_wake_queue (dev);
1801 }
1802}
1803
1804
1805/* TODO: clean this up! Rx reset need not be this intensive */
1806static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
1807 struct rtl8139_private *tp, void __iomem *ioaddr)
1808{
1809 u8 tmp8;
1810#ifdef CONFIG_8139_OLD_RX_RESET
1811 int tmp_work;
1812#endif
1813
1814 netif_dbg(tp, rx_err, dev, "Ethernet frame had errors, status %08x\n",
1815 rx_status);
1816 dev->stats.rx_errors++;
1817 if (!(rx_status & RxStatusOK)) {
1818 if (rx_status & RxTooLong) {
1819 netdev_dbg(dev, "Oversized Ethernet frame, status %04x!\n",
1820 rx_status);
1821 /* A.C.: The chip hangs here. */
1822 }
1823 if (rx_status & (RxBadSymbol | RxBadAlign))
1824 dev->stats.rx_frame_errors++;
1825 if (rx_status & (RxRunt | RxTooLong))
1826 dev->stats.rx_length_errors++;
1827 if (rx_status & RxCRCErr)
1828 dev->stats.rx_crc_errors++;
1829 } else {
1830 tp->xstats.rx_lost_in_ring++;
1831 }
1832
1833#ifndef CONFIG_8139_OLD_RX_RESET
1834 tmp8 = RTL_R8 (ChipCmd);
1835 RTL_W8 (ChipCmd, tmp8 & ~CmdRxEnb);
1836 RTL_W8 (ChipCmd, tmp8);
1837 RTL_W32 (RxConfig, tp->rx_config);
1838 tp->cur_rx = 0;
1839#else
1840 /* Reset the receiver, based on RealTek recommendation. (Bug?) */
1841
1842 /* disable receive */
1843 RTL_W8_F (ChipCmd, CmdTxEnb);
1844 tmp_work = 200;
1845 while (--tmp_work > 0) {
1846 udelay(1);
1847 tmp8 = RTL_R8 (ChipCmd);
1848 if (!(tmp8 & CmdRxEnb))
1849 break;
1850 }
1851 if (tmp_work <= 0)
1852 netdev_warn(dev, "rx stop wait too long\n");
1853 /* restart receive */
1854 tmp_work = 200;
1855 while (--tmp_work > 0) {
1856 RTL_W8_F (ChipCmd, CmdRxEnb | CmdTxEnb);
1857 udelay(1);
1858 tmp8 = RTL_R8 (ChipCmd);
1859 if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb))
1860 break;
1861 }
1862 if (tmp_work <= 0)
1863 netdev_warn(dev, "tx/rx enable wait too long\n");
1864
1865 /* and reinitialize all rx related registers */
1866 RTL_W8_F (Cfg9346, Cfg9346_Unlock);
1867 /* Must enable Tx/Rx before setting transfer thresholds! */
1868 RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
1869
1870 tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys;
1871 RTL_W32 (RxConfig, tp->rx_config);
1872 tp->cur_rx = 0;
1873
1874 netdev_dbg(dev, "init buffer addresses\n");
1875
1876 /* Lock Config[01234] and BMCR register writes */
1877 RTL_W8 (Cfg9346, Cfg9346_Lock);
1878
1879 /* init Rx ring buffer DMA address */
1880 RTL_W32_F (RxBuf, tp->rx_ring_dma);
1881
1882 /* A.C.: Reset the multicast list. */
1883 __set_rx_mode (dev);
1884#endif
1885}
1886
1887#if RX_BUF_IDX == 3
1888static inline void wrap_copy(struct sk_buff *skb, const unsigned char *ring,
1889 u32 offset, unsigned int size)
1890{
1891 u32 left = RX_BUF_LEN - offset;
1892
1893 if (size > left) {
1894 skb_copy_to_linear_data(skb, ring + offset, left);
1895 skb_copy_to_linear_data_offset(skb, left, ring, size - left);
1896 } else
1897 skb_copy_to_linear_data(skb, ring + offset, size);
1898}
1899#endif
1900
1901static void rtl8139_isr_ack(struct rtl8139_private *tp)
1902{
1903 void __iomem *ioaddr = tp->mmio_addr;
1904 u16 status;
1905
1906 status = RTL_R16 (IntrStatus) & RxAckBits;
1907
1908 /* Clear out errors and receive interrupts */
1909 if (likely(status != 0)) {
1910 if (unlikely(status & (RxFIFOOver | RxOverflow))) {
1911 tp->dev->stats.rx_errors++;
1912 if (status & RxFIFOOver)
1913 tp->dev->stats.rx_fifo_errors++;
1914 }
1915 RTL_W16_F (IntrStatus, RxAckBits);
1916 }
1917}
1918
1919static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
1920 int budget)
1921{
1922 void __iomem *ioaddr = tp->mmio_addr;
1923 int received = 0;
1924 unsigned char *rx_ring = tp->rx_ring;
1925 unsigned int cur_rx = tp->cur_rx;
1926 unsigned int rx_size = 0;
1927
1928 netdev_dbg(dev, "In %s(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
1929 __func__, (u16)cur_rx,
1930 RTL_R16(RxBufAddr), RTL_R16(RxBufPtr), RTL_R8(ChipCmd));
1931
1932 while (netif_running(dev) && received < budget &&
1933 (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
1934 u32 ring_offset = cur_rx % RX_BUF_LEN;
1935 u32 rx_status;
1936 unsigned int pkt_size;
1937 struct sk_buff *skb;
1938
1939 rmb();
1940
1941 /* read size+status of next frame from DMA ring buffer */
1942 rx_status = le32_to_cpu (*(__le32 *) (rx_ring + ring_offset));
1943 rx_size = rx_status >> 16;
1944 pkt_size = rx_size - 4;
1945
1946 netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n",
1947 __func__, rx_status, rx_size, cur_rx);
1948#if RTL8139_DEBUG > 2
1949 print_hex_dump(KERN_DEBUG, "Frame contents: ",
1950 DUMP_PREFIX_OFFSET, 16, 1,
1951 &rx_ring[ring_offset], 70, true);
1952#endif
1953
1954 /* Packet copy from FIFO still in progress.
1955 * Theoretically, this should never happen
1956 * since EarlyRx is disabled.
1957 */
1958 if (unlikely(rx_size == 0xfff0)) {
1959 if (!tp->fifo_copy_timeout)
1960 tp->fifo_copy_timeout = jiffies + 2;
1961 else if (time_after(jiffies, tp->fifo_copy_timeout)) {
1962 netdev_dbg(dev, "hung FIFO. Reset\n");
1963 rx_size = 0;
1964 goto no_early_rx;
1965 }
1966 netif_dbg(tp, intr, dev, "fifo copy in progress\n");
1967 tp->xstats.early_rx++;
1968 break;
1969 }
1970
1971no_early_rx:
1972 tp->fifo_copy_timeout = 0;
1973
1974 /* If Rx err or invalid rx_size/rx_status received
1975 * (which happens if we get lost in the ring),
1976 * Rx process gets reset, so we abort any further
1977 * Rx processing.
1978 */
1979 if (unlikely((rx_size > (MAX_ETH_FRAME_SIZE+4)) ||
1980 (rx_size < 8) ||
1981 (!(rx_status & RxStatusOK)))) {
1982 rtl8139_rx_err (rx_status, dev, tp, ioaddr);
1983 received = -1;
1984 goto out;
1985 }
1986
1987 /* Malloc up new buffer, compatible with net-2e. */
1988 /* Omit the four octet CRC from the length. */
1989
1990 skb = netdev_alloc_skb_ip_align(dev, pkt_size);
1991 if (likely(skb)) {
1992#if RX_BUF_IDX == 3
1993 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
1994#else
1995 skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
1996#endif
1997 skb_put (skb, pkt_size);
1998
1999 skb->protocol = eth_type_trans (skb, dev);
2000
2001 dev->stats.rx_bytes += pkt_size;
2002 dev->stats.rx_packets++;
2003
2004 netif_receive_skb (skb);
2005 } else {
2006 if (net_ratelimit())
2007 netdev_warn(dev, "Memory squeeze, dropping packet\n");
2008 dev->stats.rx_dropped++;
2009 }
2010 received++;
2011
2012 cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
2013 RTL_W16 (RxBufPtr, (u16) (cur_rx - 16));
2014
2015 rtl8139_isr_ack(tp);
2016 }
2017
2018 if (unlikely(!received || rx_size == 0xfff0))
2019 rtl8139_isr_ack(tp);
2020
2021 netdev_dbg(dev, "Done %s(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
2022 __func__, cur_rx,
2023 RTL_R16(RxBufAddr), RTL_R16(RxBufPtr), RTL_R8(ChipCmd));
2024
2025 tp->cur_rx = cur_rx;
2026
2027 /*
2028 * The receive buffer should be mostly empty.
2029 * Tell NAPI to reenable the Rx irq.
2030 */
2031 if (tp->fifo_copy_timeout)
2032 received = budget;
2033
2034out:
2035 return received;
2036}
2037
2038
2039static void rtl8139_weird_interrupt (struct net_device *dev,
2040 struct rtl8139_private *tp,
2041 void __iomem *ioaddr,
2042 int status, int link_changed)
2043{
2044 netdev_dbg(dev, "Abnormal interrupt, status %08x\n", status);
2045
2046 assert (dev != NULL);
2047 assert (tp != NULL);
2048 assert (ioaddr != NULL);
2049
2050 /* Update the error count. */
2051 dev->stats.rx_missed_errors += RTL_R32 (RxMissed);
2052 RTL_W32 (RxMissed, 0);
2053
2054 if ((status & RxUnderrun) && link_changed &&
2055 (tp->drv_flags & HAS_LNK_CHNG)) {
2056 rtl_check_media(dev, 0);
2057 status &= ~RxUnderrun;
2058 }
2059
2060 if (status & (RxUnderrun | RxErr))
2061 dev->stats.rx_errors++;
2062
2063 if (status & PCSTimeout)
2064 dev->stats.rx_length_errors++;
2065 if (status & RxUnderrun)
2066 dev->stats.rx_fifo_errors++;
2067 if (status & PCIErr) {
2068 u16 pci_cmd_status;
2069 pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status);
2070 pci_write_config_word (tp->pci_dev, PCI_STATUS, pci_cmd_status);
2071
2072 netdev_err(dev, "PCI Bus error %04x\n", pci_cmd_status);
2073 }
2074}
2075
2076static int rtl8139_poll(struct napi_struct *napi, int budget)
2077{
2078 struct rtl8139_private *tp = container_of(napi, struct rtl8139_private, napi);
2079 struct net_device *dev = tp->dev;
2080 void __iomem *ioaddr = tp->mmio_addr;
2081 int work_done;
2082
2083 spin_lock(&tp->rx_lock);
2084 work_done = 0;
2085 if (likely(RTL_R16(IntrStatus) & RxAckBits))
2086 work_done += rtl8139_rx(dev, tp, budget);
2087
2088 if (work_done < budget) {
2089 unsigned long flags;
2090 /*
2091 * Order is important since data can get interrupted
2092 * again when we think we are done.
2093 */
2094 spin_lock_irqsave(&tp->lock, flags);
2095 __napi_complete(napi);
2096 RTL_W16_F(IntrMask, rtl8139_intr_mask);
2097 spin_unlock_irqrestore(&tp->lock, flags);
2098 }
2099 spin_unlock(&tp->rx_lock);
2100
2101 return work_done;
2102}
2103
2104/* The interrupt handler does all of the Rx thread work and cleans up
2105 after the Tx thread. */
2106static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
2107{
2108 struct net_device *dev = (struct net_device *) dev_instance;
2109 struct rtl8139_private *tp = netdev_priv(dev);
2110 void __iomem *ioaddr = tp->mmio_addr;
2111 u16 status, ackstat;
2112 int link_changed = 0; /* avoid bogus "uninit" warning */
2113 int handled = 0;
2114
2115 spin_lock (&tp->lock);
2116 status = RTL_R16 (IntrStatus);
2117
2118 /* shared irq? */
2119 if (unlikely((status & rtl8139_intr_mask) == 0))
2120 goto out;
2121
2122 handled = 1;
2123
2124 /* h/w no longer present (hotplug?) or major error, bail */
2125 if (unlikely(status == 0xFFFF))
2126 goto out;
2127
2128 /* close possible race's with dev_close */
2129 if (unlikely(!netif_running(dev))) {
2130 RTL_W16 (IntrMask, 0);
2131 goto out;
2132 }
2133
2134 /* Acknowledge all of the current interrupt sources ASAP, but
2135 an first get an additional status bit from CSCR. */
2136 if (unlikely(status & RxUnderrun))
2137 link_changed = RTL_R16 (CSCR) & CSCR_LinkChangeBit;
2138
2139 ackstat = status & ~(RxAckBits | TxErr);
2140 if (ackstat)
2141 RTL_W16 (IntrStatus, ackstat);
2142
2143 /* Receive packets are processed by poll routine.
2144 If not running start it now. */
2145 if (status & RxAckBits){
2146 if (napi_schedule_prep(&tp->napi)) {
2147 RTL_W16_F (IntrMask, rtl8139_norx_intr_mask);
2148 __napi_schedule(&tp->napi);
2149 }
2150 }
2151
2152 /* Check uncommon events with one test. */
2153 if (unlikely(status & (PCIErr | PCSTimeout | RxUnderrun | RxErr)))
2154 rtl8139_weird_interrupt (dev, tp, ioaddr,
2155 status, link_changed);
2156
2157 if (status & (TxOK | TxErr)) {
2158 rtl8139_tx_interrupt (dev, tp, ioaddr);
2159 if (status & TxErr)
2160 RTL_W16 (IntrStatus, TxErr);
2161 }
2162 out:
2163 spin_unlock (&tp->lock);
2164
2165 netdev_dbg(dev, "exiting interrupt, intr_status=%#4.4x\n",
2166 RTL_R16(IntrStatus));
2167 return IRQ_RETVAL(handled);
2168}
2169
2170#ifdef CONFIG_NET_POLL_CONTROLLER
2171/*
2172 * Polling receive - used by netconsole and other diagnostic tools
2173 * to allow network i/o with interrupts disabled.
2174 */
2175static void rtl8139_poll_controller(struct net_device *dev)
2176{
2177 disable_irq(dev->irq);
2178 rtl8139_interrupt(dev->irq, dev);
2179 enable_irq(dev->irq);
2180}
2181#endif
2182
2183static int rtl8139_set_mac_address(struct net_device *dev, void *p)
2184{
2185 struct rtl8139_private *tp = netdev_priv(dev);
2186 void __iomem *ioaddr = tp->mmio_addr;
2187 struct sockaddr *addr = p;
2188
2189 if (!is_valid_ether_addr(addr->sa_data))
2190 return -EADDRNOTAVAIL;
2191
2192 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2193
2194 spin_lock_irq(&tp->lock);
2195
2196 RTL_W8_F(Cfg9346, Cfg9346_Unlock);
2197 RTL_W32_F(MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
2198 RTL_W32_F(MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
2199 RTL_W8_F(Cfg9346, Cfg9346_Lock);
2200
2201 spin_unlock_irq(&tp->lock);
2202
2203 return 0;
2204}
2205
2206static int rtl8139_close (struct net_device *dev)
2207{
2208 struct rtl8139_private *tp = netdev_priv(dev);
2209 void __iomem *ioaddr = tp->mmio_addr;
2210 unsigned long flags;
2211
2212 netif_stop_queue(dev);
2213 napi_disable(&tp->napi);
2214
2215 netif_dbg(tp, ifdown, dev, "Shutting down ethercard, status was 0x%04x\n",
2216 RTL_R16(IntrStatus));
2217
2218 spin_lock_irqsave (&tp->lock, flags);
2219
2220 /* Stop the chip's Tx and Rx DMA processes. */
2221 RTL_W8 (ChipCmd, 0);
2222
2223 /* Disable interrupts by clearing the interrupt mask. */
2224 RTL_W16 (IntrMask, 0);
2225
2226 /* Update the error counts. */
2227 dev->stats.rx_missed_errors += RTL_R32 (RxMissed);
2228 RTL_W32 (RxMissed, 0);
2229
2230 spin_unlock_irqrestore (&tp->lock, flags);
2231
2232 free_irq (dev->irq, dev);
2233
2234 rtl8139_tx_clear (tp);
2235
2236 dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
2237 tp->rx_ring, tp->rx_ring_dma);
2238 dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
2239 tp->tx_bufs, tp->tx_bufs_dma);
2240 tp->rx_ring = NULL;
2241 tp->tx_bufs = NULL;
2242
2243 /* Green! Put the chip in low-power mode. */
2244 RTL_W8 (Cfg9346, Cfg9346_Unlock);
2245
2246 if (rtl_chip_info[tp->chipset].flags & HasHltClk)
2247 RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */
2248
2249 return 0;
2250}
2251
2252
2253/* Get the ethtool Wake-on-LAN settings. Assumes that wol points to
2254 kernel memory, *wol has been initialized as {ETHTOOL_GWOL}, and
2255 other threads or interrupts aren't messing with the 8139. */
2256static void rtl8139_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2257{
2258 struct rtl8139_private *tp = netdev_priv(dev);
2259 void __iomem *ioaddr = tp->mmio_addr;
2260
2261 spin_lock_irq(&tp->lock);
2262 if (rtl_chip_info[tp->chipset].flags & HasLWake) {
2263 u8 cfg3 = RTL_R8 (Config3);
2264 u8 cfg5 = RTL_R8 (Config5);
2265
2266 wol->supported = WAKE_PHY | WAKE_MAGIC
2267 | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
2268
2269 wol->wolopts = 0;
2270 if (cfg3 & Cfg3_LinkUp)
2271 wol->wolopts |= WAKE_PHY;
2272 if (cfg3 & Cfg3_Magic)
2273 wol->wolopts |= WAKE_MAGIC;
2274 /* (KON)FIXME: See how netdev_set_wol() handles the
2275 following constants. */
2276 if (cfg5 & Cfg5_UWF)
2277 wol->wolopts |= WAKE_UCAST;
2278 if (cfg5 & Cfg5_MWF)
2279 wol->wolopts |= WAKE_MCAST;
2280 if (cfg5 & Cfg5_BWF)
2281 wol->wolopts |= WAKE_BCAST;
2282 }
2283 spin_unlock_irq(&tp->lock);
2284}
2285
2286
2287/* Set the ethtool Wake-on-LAN settings. Return 0 or -errno. Assumes
2288 that wol points to kernel memory and other threads or interrupts
2289 aren't messing with the 8139. */
2290static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2291{
2292 struct rtl8139_private *tp = netdev_priv(dev);
2293 void __iomem *ioaddr = tp->mmio_addr;
2294 u32 support;
2295 u8 cfg3, cfg5;
2296
2297 support = ((rtl_chip_info[tp->chipset].flags & HasLWake)
2298 ? (WAKE_PHY | WAKE_MAGIC
2299 | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)
2300 : 0);
2301 if (wol->wolopts & ~support)
2302 return -EINVAL;
2303
2304 spin_lock_irq(&tp->lock);
2305 cfg3 = RTL_R8 (Config3) & ~(Cfg3_LinkUp | Cfg3_Magic);
2306 if (wol->wolopts & WAKE_PHY)
2307 cfg3 |= Cfg3_LinkUp;
2308 if (wol->wolopts & WAKE_MAGIC)
2309 cfg3 |= Cfg3_Magic;
2310 RTL_W8 (Cfg9346, Cfg9346_Unlock);
2311 RTL_W8 (Config3, cfg3);
2312 RTL_W8 (Cfg9346, Cfg9346_Lock);
2313
2314 cfg5 = RTL_R8 (Config5) & ~(Cfg5_UWF | Cfg5_MWF | Cfg5_BWF);
2315 /* (KON)FIXME: These are untested. We may have to set the
2316 CRC0, Wakeup0 and LSBCRC0 registers too, but I have no
2317 documentation. */
2318 if (wol->wolopts & WAKE_UCAST)
2319 cfg5 |= Cfg5_UWF;
2320 if (wol->wolopts & WAKE_MCAST)
2321 cfg5 |= Cfg5_MWF;
2322 if (wol->wolopts & WAKE_BCAST)
2323 cfg5 |= Cfg5_BWF;
2324 RTL_W8 (Config5, cfg5); /* need not unlock via Cfg9346 */
2325 spin_unlock_irq(&tp->lock);
2326
2327 return 0;
2328}
2329
2330static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2331{
2332 struct rtl8139_private *tp = netdev_priv(dev);
2333 strcpy(info->driver, DRV_NAME);
2334 strcpy(info->version, DRV_VERSION);
2335 strcpy(info->bus_info, pci_name(tp->pci_dev));
2336 info->regdump_len = tp->regs_len;
2337}
2338
2339static int rtl8139_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2340{
2341 struct rtl8139_private *tp = netdev_priv(dev);
2342 spin_lock_irq(&tp->lock);
2343 mii_ethtool_gset(&tp->mii, cmd);
2344 spin_unlock_irq(&tp->lock);
2345 return 0;
2346}
2347
2348static int rtl8139_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2349{
2350 struct rtl8139_private *tp = netdev_priv(dev);
2351 int rc;
2352 spin_lock_irq(&tp->lock);
2353 rc = mii_ethtool_sset(&tp->mii, cmd);
2354 spin_unlock_irq(&tp->lock);
2355 return rc;
2356}
2357
2358static int rtl8139_nway_reset(struct net_device *dev)
2359{
2360 struct rtl8139_private *tp = netdev_priv(dev);
2361 return mii_nway_restart(&tp->mii);
2362}
2363
2364static u32 rtl8139_get_link(struct net_device *dev)
2365{
2366 struct rtl8139_private *tp = netdev_priv(dev);
2367 return mii_link_ok(&tp->mii);
2368}
2369
2370static u32 rtl8139_get_msglevel(struct net_device *dev)
2371{
2372 struct rtl8139_private *tp = netdev_priv(dev);
2373 return tp->msg_enable;
2374}
2375
2376static void rtl8139_set_msglevel(struct net_device *dev, u32 datum)
2377{
2378 struct rtl8139_private *tp = netdev_priv(dev);
2379 tp->msg_enable = datum;
2380}
2381
2382static int rtl8139_get_regs_len(struct net_device *dev)
2383{
2384 struct rtl8139_private *tp;
2385 /* TODO: we are too slack to do reg dumping for pio, for now */
2386 if (use_io)
2387 return 0;
2388 tp = netdev_priv(dev);
2389 return tp->regs_len;
2390}
2391
2392static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
2393{
2394 struct rtl8139_private *tp;
2395
2396 /* TODO: we are too slack to do reg dumping for pio, for now */
2397 if (use_io)
2398 return;
2399 tp = netdev_priv(dev);
2400
2401 regs->version = RTL_REGS_VER;
2402
2403 spin_lock_irq(&tp->lock);
2404 memcpy_fromio(regbuf, tp->mmio_addr, regs->len);
2405 spin_unlock_irq(&tp->lock);
2406}
2407
2408static int rtl8139_get_sset_count(struct net_device *dev, int sset)
2409{
2410 switch (sset) {
2411 case ETH_SS_STATS:
2412 return RTL_NUM_STATS;
2413 default:
2414 return -EOPNOTSUPP;
2415 }
2416}
2417
2418static void rtl8139_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
2419{
2420 struct rtl8139_private *tp = netdev_priv(dev);
2421
2422 data[0] = tp->xstats.early_rx;
2423 data[1] = tp->xstats.tx_buf_mapped;
2424 data[2] = tp->xstats.tx_timeouts;
2425 data[3] = tp->xstats.rx_lost_in_ring;
2426}
2427
2428static void rtl8139_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2429{
2430 memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
2431}
2432
2433static const struct ethtool_ops rtl8139_ethtool_ops = {
2434 .get_drvinfo = rtl8139_get_drvinfo,
2435 .get_settings = rtl8139_get_settings,
2436 .set_settings = rtl8139_set_settings,
2437 .get_regs_len = rtl8139_get_regs_len,
2438 .get_regs = rtl8139_get_regs,
2439 .nway_reset = rtl8139_nway_reset,
2440 .get_link = rtl8139_get_link,
2441 .get_msglevel = rtl8139_get_msglevel,
2442 .set_msglevel = rtl8139_set_msglevel,
2443 .get_wol = rtl8139_get_wol,
2444 .set_wol = rtl8139_set_wol,
2445 .get_strings = rtl8139_get_strings,
2446 .get_sset_count = rtl8139_get_sset_count,
2447 .get_ethtool_stats = rtl8139_get_ethtool_stats,
2448};
2449
2450static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2451{
2452 struct rtl8139_private *tp = netdev_priv(dev);
2453 int rc;
2454
2455 if (!netif_running(dev))
2456 return -EINVAL;
2457
2458 spin_lock_irq(&tp->lock);
2459 rc = generic_mii_ioctl(&tp->mii, if_mii(rq), cmd, NULL);
2460 spin_unlock_irq(&tp->lock);
2461
2462 return rc;
2463}
2464
2465
2466static struct net_device_stats *rtl8139_get_stats (struct net_device *dev)
2467{
2468 struct rtl8139_private *tp = netdev_priv(dev);
2469 void __iomem *ioaddr = tp->mmio_addr;
2470 unsigned long flags;
2471
2472 if (netif_running(dev)) {
2473 spin_lock_irqsave (&tp->lock, flags);
2474 dev->stats.rx_missed_errors += RTL_R32 (RxMissed);
2475 RTL_W32 (RxMissed, 0);
2476 spin_unlock_irqrestore (&tp->lock, flags);
2477 }
2478
2479 return &dev->stats;
2480}
2481
2482/* Set or clear the multicast filter for this adaptor.
2483 This routine is not state sensitive and need not be SMP locked. */
2484
2485static void __set_rx_mode (struct net_device *dev)
2486{
2487 struct rtl8139_private *tp = netdev_priv(dev);
2488 void __iomem *ioaddr = tp->mmio_addr;
2489 u32 mc_filter[2]; /* Multicast hash filter */
2490 int rx_mode;
2491 u32 tmp;
2492
2493 netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08x\n",
2494 dev->flags, RTL_R32(RxConfig));
2495
2496 /* Note: do not reorder, GCC is clever about common statements. */
2497 if (dev->flags & IFF_PROMISC) {
2498 rx_mode =
2499 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
2500 AcceptAllPhys;
2501 mc_filter[1] = mc_filter[0] = 0xffffffff;
2502 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2503 (dev->flags & IFF_ALLMULTI)) {
2504 /* Too many to filter perfectly -- accept all multicasts. */
2505 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
2506 mc_filter[1] = mc_filter[0] = 0xffffffff;
2507 } else {
2508 struct netdev_hw_addr *ha;
2509 rx_mode = AcceptBroadcast | AcceptMyPhys;
2510 mc_filter[1] = mc_filter[0] = 0;
2511 netdev_for_each_mc_addr(ha, dev) {
2512 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2513
2514 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2515 rx_mode |= AcceptMulticast;
2516 }
2517 }
2518
2519 /* We can safely update without stopping the chip. */
2520 tmp = rtl8139_rx_config | rx_mode;
2521 if (tp->rx_config != tmp) {
2522 RTL_W32_F (RxConfig, tmp);
2523 tp->rx_config = tmp;
2524 }
2525 RTL_W32_F (MAR0 + 0, mc_filter[0]);
2526 RTL_W32_F (MAR0 + 4, mc_filter[1]);
2527}
2528
2529static void rtl8139_set_rx_mode (struct net_device *dev)
2530{
2531 unsigned long flags;
2532 struct rtl8139_private *tp = netdev_priv(dev);
2533
2534 spin_lock_irqsave (&tp->lock, flags);
2535 __set_rx_mode(dev);
2536 spin_unlock_irqrestore (&tp->lock, flags);
2537}
2538
2539#ifdef CONFIG_PM
2540
2541static int rtl8139_suspend (struct pci_dev *pdev, pm_message_t state)
2542{
2543 struct net_device *dev = pci_get_drvdata (pdev);
2544 struct rtl8139_private *tp = netdev_priv(dev);
2545 void __iomem *ioaddr = tp->mmio_addr;
2546 unsigned long flags;
2547
2548 pci_save_state (pdev);
2549
2550 if (!netif_running (dev))
2551 return 0;
2552
2553 netif_device_detach (dev);
2554
2555 spin_lock_irqsave (&tp->lock, flags);
2556
2557 /* Disable interrupts, stop Tx and Rx. */
2558 RTL_W16 (IntrMask, 0);
2559 RTL_W8 (ChipCmd, 0);
2560
2561 /* Update the error counts. */
2562 dev->stats.rx_missed_errors += RTL_R32 (RxMissed);
2563 RTL_W32 (RxMissed, 0);
2564
2565 spin_unlock_irqrestore (&tp->lock, flags);
2566
2567 pci_set_power_state (pdev, PCI_D3hot);
2568
2569 return 0;
2570}
2571
2572
2573static int rtl8139_resume (struct pci_dev *pdev)
2574{
2575 struct net_device *dev = pci_get_drvdata (pdev);
2576
2577 pci_restore_state (pdev);
2578 if (!netif_running (dev))
2579 return 0;
2580 pci_set_power_state (pdev, PCI_D0);
2581 rtl8139_init_ring (dev);
2582 rtl8139_hw_start (dev);
2583 netif_device_attach (dev);
2584 return 0;
2585}
2586
2587#endif /* CONFIG_PM */
2588
2589
2590static struct pci_driver rtl8139_pci_driver = {
2591 .name = DRV_NAME,
2592 .id_table = rtl8139_pci_tbl,
2593 .probe = rtl8139_init_one,
2594 .remove = __devexit_p(rtl8139_remove_one),
2595#ifdef CONFIG_PM
2596 .suspend = rtl8139_suspend,
2597 .resume = rtl8139_resume,
2598#endif /* CONFIG_PM */
2599};
2600
2601
2602static int __init rtl8139_init_module (void)
2603{
2604 /* when we're a module, we always print a version message,
2605 * even if no 8139 board is found.
2606 */
2607#ifdef MODULE
2608 pr_info(RTL8139_DRIVER_NAME "\n");
2609#endif
2610
2611 return pci_register_driver(&rtl8139_pci_driver);
2612}
2613
2614
2615static void __exit rtl8139_cleanup_module (void)
2616{
2617 pci_unregister_driver (&rtl8139_pci_driver);
2618}
2619
2620
2621module_init(rtl8139_init_module);
2622module_exit(rtl8139_cleanup_module);
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
new file mode 100644
index 000000000000..a5f67a091c4d
--- /dev/null
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -0,0 +1,126 @@
1#
2# Realtek device configuration
3#
4
5config NET_VENDOR_REALTEK
6 bool "Realtek devices"
7 depends on PCI || (PARPORT && X86)
8 ---help---
9 If you have a network (Ethernet) card belonging to this class, say Y
10 and read the Ethernet-HOWTO, available from
11 <http://www.tldp.org/docs.html#howto>.
12
13 Note that the answer to this question doesn't directly affect the
14 kernel: saying N will just cause the configurator to skip all
15 the questions about Realtek devices. If you say Y, you will be asked for
16 your specific card in the following questions.
17
18if NET_VENDOR_REALTEK
19
20config ATP
21 tristate "AT-LAN-TEC/RealTek pocket adapter support"
22 depends on PARPORT && X86
23 select CRC32
24 ---help---
25 This is a network (Ethernet) device which attaches to your parallel
26 port. Read <file:drivers/net/atp.c> as well as the Ethernet-HOWTO,
27 available from <http://www.tldp.org/docs.html#howto>, if you
28 want to use this. If you intend to use this driver, you should have
29 said N to the "Parallel printer support", because the two drivers
30 don't like each other.
31
32 To compile this driver as a module, choose M here: the module
33 will be called atp.
34
35config 8139CP
36 tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support (EXPERIMENTAL)"
37 depends on PCI && EXPERIMENTAL
38 select CRC32
39 select MII
40 ---help---
41 This is a driver for the Fast Ethernet PCI network cards based on
42 the RTL8139C+ chips. If you have one of those, say Y and read
43 the Ethernet-HOWTO, available from
44 <http://www.tldp.org/docs.html#howto>.
45
46 To compile this driver as a module, choose M here: the module
47 will be called 8139cp. This is recommended.
48
49config 8139TOO
50 tristate "RealTek RTL-8129/8130/8139 PCI Fast Ethernet Adapter support"
51 depends on PCI
52 select CRC32
53 select MII
54 ---help---
55 This is a driver for the Fast Ethernet PCI network cards based on
56 the RTL 8129/8130/8139 chips. If you have one of those, say Y and
57 read the Ethernet-HOWTO <http://www.tldp.org/docs.html#howto>.
58
59 To compile this driver as a module, choose M here: the module
60 will be called 8139too. This is recommended.
61
62config 8139TOO_PIO
63 bool "Use PIO instead of MMIO"
64 default y
65 depends on 8139TOO
66 ---help---
67 This instructs the driver to use programmed I/O ports (PIO) instead
68 of PCI shared memory (MMIO). This can possibly solve some problems
69 in case your mainboard has memory consistency issues. If unsure,
70 say N.
71
72config 8139TOO_TUNE_TWISTER
73 bool "Support for uncommon RTL-8139 rev. K (automatic channel equalization)"
74 depends on 8139TOO
75 ---help---
76 This implements a function which might come in handy in case you
77 are using low quality on long cabling. It is required for RealTek
78 RTL-8139 revision K boards, and totally unused otherwise. It tries
79 to match the transceiver to the cable characteristics. This is
80 experimental since hardly documented by the manufacturer.
81 If unsure, say Y.
82
83config 8139TOO_8129
84 bool "Support for older RTL-8129/8130 boards"
85 depends on 8139TOO
86 ---help---
87 This enables support for the older and uncommon RTL-8129 and
88 RTL-8130 chips, which support MII via an external transceiver,
89 instead of an internal one. Disabling this option will save some
90 memory by making the code size smaller. If unsure, say Y.
91
92config 8139_OLD_RX_RESET
93 bool "Use older RX-reset method"
94 depends on 8139TOO
95 ---help---
96 The 8139too driver was recently updated to contain a more rapid
97 reset sequence, in the face of severe receive errors. This "new"
98 RX-reset method should be adequate for all boards. But if you
99 experience problems, you can enable this option to restore the
100 old RX-reset behavior. If unsure, say N.
101
102config R8169
103 tristate "Realtek 8169 gigabit ethernet support"
104 depends on PCI
105 select FW_LOADER
106 select CRC32
107 select MII
108 ---help---
109 Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
110
111 To compile this driver as a module, choose M here: the module
112 will be called r8169. This is recommended.
113
114config SC92031
115 tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)"
116 depends on PCI && EXPERIMENTAL
117 select CRC32
118 ---help---
119 This is a driver for the Fast Ethernet PCI network cards based on
120 the Silan SC92031 chip (sometimes also called Rsltek 8139D). If you
121 have one of these, say Y here.
122
123 To compile this driver as a module, choose M here: the module
124 will be called sc92031. This is recommended.
125
126endif # NET_VENDOR_REALTEK
diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile
new file mode 100644
index 000000000000..e48cfb6ac42d
--- /dev/null
+++ b/drivers/net/ethernet/realtek/Makefile
@@ -0,0 +1,9 @@
1#
2# Makefile for the Realtek network device drivers.
3#
4
5obj-$(CONFIG_8139CP) += 8139cp.o
6obj-$(CONFIG_8139TOO) += 8139too.o
7obj-$(CONFIG_ATP) += atp.o
8obj-$(CONFIG_R8169) += r8169.o
9obj-$(CONFIG_SC92031) += sc92031.o
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
new file mode 100644
index 000000000000..f3459798b0e9
--- /dev/null
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -0,0 +1,940 @@
1/* atp.c: Attached (pocket) ethernet adapter driver for linux. */
2/*
3 This is a driver for commonly OEM pocket (parallel port)
4 ethernet adapters based on the Realtek RTL8002 and RTL8012 chips.
5
6 Written 1993-2000 by Donald Becker.
7
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL.
14
15 Copyright 1993 United States Government as represented by the Director,
16 National Security Agency. Copyright 1994-2000 retained by the original
17 author, Donald Becker. The timer-based reset code was supplied in 1995
18 by Bill Carlson, wwc@super.org.
19
20 The author may be reached as becker@scyld.com, or C/O
21 Scyld Computing Corporation
22 410 Severn Ave., Suite 210
23 Annapolis MD 21403
24
25 Support information and updates available at
26 http://www.scyld.com/network/atp.html
27
28
29 Modular support/softnet added by Alan Cox.
30 _bit abuse fixed up by Alan Cox
31
32*/
33
34static const char version[] =
35"atp.c:v1.09=ac 2002/10/01 Donald Becker <becker@scyld.com>\n";
36
37/* The user-configurable values.
38 These may be modified when a driver module is loaded.*/
39
40static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
41#define net_debug debug
42
43/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
44static int max_interrupt_work = 15;
45
46#define NUM_UNITS 2
47/* The standard set of ISA module parameters. */
48static int io[NUM_UNITS];
49static int irq[NUM_UNITS];
50static int xcvr[NUM_UNITS]; /* The data transfer mode. */
51
52/* Operational parameters that are set at compile time. */
53
54/* Time in jiffies before concluding the transmitter is hung. */
55#define TX_TIMEOUT (400*HZ/1000)
56
57/*
58 This file is a device driver for the RealTek (aka AT-Lan-Tec) pocket
59 ethernet adapter. This is a common low-cost OEM pocket ethernet
60 adapter, sold under many names.
61
62 Sources:
63 This driver was written from the packet driver assembly code provided by
64 Vincent Bono of AT-Lan-Tec. Ever try to figure out how a complicated
65 device works just from the assembly code? It ain't pretty. The following
66 description is written based on guesses and writing lots of special-purpose
67 code to test my theorized operation.
68
69 In 1997 Realtek made available the documentation for the second generation
70 RTL8012 chip, which has lead to several driver improvements.
71 http://www.realtek.com.tw/
72
73 Theory of Operation
74
75 The RTL8002 adapter seems to be built around a custom spin of the SEEQ
76 controller core. It probably has a 16K or 64K internal packet buffer, of
77 which the first 4K is devoted to transmit and the rest to receive.
78 The controller maintains the queue of received packet and the packet buffer
79 access pointer internally, with only 'reset to beginning' and 'skip to next
80 packet' commands visible. The transmit packet queue holds two (or more?)
81 packets: both 'retransmit this packet' (due to collision) and 'transmit next
82 packet' commands must be started by hand.
83
84 The station address is stored in a standard bit-serial EEPROM which must be
85 read (ughh) by the device driver. (Provisions have been made for
86 substituting a 74S288 PROM, but I haven't gotten reports of any models
87 using it.) Unlike built-in devices, a pocket adapter can temporarily lose
88 power without indication to the device driver. The major effect is that
89 the station address, receive filter (promiscuous, etc.) and transceiver
90 must be reset.
91
92 The controller itself has 16 registers, some of which use only the lower
93 bits. The registers are read and written 4 bits at a time. The four bit
94 register address is presented on the data lines along with a few additional
95 timing and control bits. The data is then read from status port or written
96 to the data port.
97
98 Correction: the controller has two banks of 16 registers. The second
99 bank contains only the multicast filter table (now used) and the EEPROM
100 access registers.
101
102 Since the bulk data transfer of the actual packets through the slow
103 parallel port dominates the driver's running time, four distinct data
104 (non-register) transfer modes are provided by the adapter, two in each
105 direction. In the first mode timing for the nibble transfers is
106 provided through the data port. In the second mode the same timing is
107 provided through the control port. In either case the data is read from
108 the status port and written to the data port, just as it is accessing
109 registers.
110
111 In addition to the basic data transfer methods, several more are modes are
112 created by adding some delay by doing multiple reads of the data to allow
113 it to stabilize. This delay seems to be needed on most machines.
114
115 The data transfer mode is stored in the 'dev->if_port' field. Its default
116 value is '4'. It may be overridden at boot-time using the third parameter
117 to the "ether=..." initialization.
118
119 The header file <atp.h> provides inline functions that encapsulate the
120 register and data access methods. These functions are hand-tuned to
121 generate reasonable object code. This header file also documents my
122 interpretations of the device registers.
123*/
124
125#include <linux/kernel.h>
126#include <linux/module.h>
127#include <linux/types.h>
128#include <linux/fcntl.h>
129#include <linux/interrupt.h>
130#include <linux/ioport.h>
131#include <linux/in.h>
132#include <linux/string.h>
133#include <linux/errno.h>
134#include <linux/init.h>
135#include <linux/crc32.h>
136#include <linux/netdevice.h>
137#include <linux/etherdevice.h>
138#include <linux/skbuff.h>
139#include <linux/spinlock.h>
140#include <linux/delay.h>
141#include <linux/bitops.h>
142
143#include <asm/system.h>
144#include <asm/io.h>
145#include <asm/dma.h>
146
147#include "atp.h"
148
149MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
150MODULE_DESCRIPTION("RealTek RTL8002/8012 parallel port Ethernet driver");
151MODULE_LICENSE("GPL");
152
153module_param(max_interrupt_work, int, 0);
154module_param(debug, int, 0);
155module_param_array(io, int, NULL, 0);
156module_param_array(irq, int, NULL, 0);
157module_param_array(xcvr, int, NULL, 0);
158MODULE_PARM_DESC(max_interrupt_work, "ATP maximum events handled per interrupt");
159MODULE_PARM_DESC(debug, "ATP debug level (0-7)");
160MODULE_PARM_DESC(io, "ATP I/O base address(es)");
161MODULE_PARM_DESC(irq, "ATP IRQ number(s)");
162MODULE_PARM_DESC(xcvr, "ATP transceiver(s) (0=internal, 1=external)");
163
164/* The number of low I/O ports used by the ethercard. */
165#define ETHERCARD_TOTAL_SIZE 3
166
167/* Sequence to switch an 8012 from printer mux to ethernet mode. */
168static char mux_8012[] = { 0xff, 0xf7, 0xff, 0xfb, 0xf3, 0xfb, 0xff, 0xf7,};
169
170struct net_local {
171 spinlock_t lock;
172 struct net_device *next_module;
173 struct timer_list timer; /* Media selection timer. */
174 long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
175 int saved_tx_size;
176 unsigned int tx_unit_busy:1;
177 unsigned char re_tx, /* Number of packet retransmissions. */
178 addr_mode, /* Current Rx filter e.g. promiscuous, etc. */
179 pac_cnt_in_tx_buf,
180 chip_type;
181};
182
183/* This code, written by wwc@super.org, resets the adapter every
184 TIMED_CHECKER ticks. This recovers from an unknown error which
185 hangs the device. */
186#define TIMED_CHECKER (HZ/4)
187#ifdef TIMED_CHECKER
188#include <linux/timer.h>
189static void atp_timed_checker(unsigned long ignored);
190#endif
191
192/* Index to functions, as function prototypes. */
193
194static int atp_probe1(long ioaddr);
195static void get_node_ID(struct net_device *dev);
196static unsigned short eeprom_op(long ioaddr, unsigned int cmd);
197static int net_open(struct net_device *dev);
198static void hardware_init(struct net_device *dev);
199static void write_packet(long ioaddr, int length, unsigned char *packet, int pad, int mode);
200static void trigger_send(long ioaddr, int length);
201static netdev_tx_t atp_send_packet(struct sk_buff *skb,
202 struct net_device *dev);
203static irqreturn_t atp_interrupt(int irq, void *dev_id);
204static void net_rx(struct net_device *dev);
205static void read_block(long ioaddr, int length, unsigned char *buffer, int data_mode);
206static int net_close(struct net_device *dev);
207static void set_rx_mode(struct net_device *dev);
208static void tx_timeout(struct net_device *dev);
209
210
211/* A list of all installed ATP devices, for removing the driver module. */
212static struct net_device *root_atp_dev;
213
214/* Check for a network adapter of this type, and return '0' iff one exists.
215 If dev->base_addr == 0, probe all likely locations.
216 If dev->base_addr == 1, always return failure.
217 If dev->base_addr == 2, allocate space for the device and return success
218 (detachable devices only).
219
220 FIXME: we should use the parport layer for this
221 */
222static int __init atp_init(void)
223{
224 int *port, ports[] = {0x378, 0x278, 0x3bc, 0};
225 int base_addr = io[0];
226
227 if (base_addr > 0x1ff) /* Check a single specified location. */
228 return atp_probe1(base_addr);
229 else if (base_addr == 1) /* Don't probe at all. */
230 return -ENXIO;
231
232 for (port = ports; *port; port++) {
233 long ioaddr = *port;
234 outb(0x57, ioaddr + PAR_DATA);
235 if (inb(ioaddr + PAR_DATA) != 0x57)
236 continue;
237 if (atp_probe1(ioaddr) == 0)
238 return 0;
239 }
240
241 return -ENODEV;
242}
243
244static const struct net_device_ops atp_netdev_ops = {
245 .ndo_open = net_open,
246 .ndo_stop = net_close,
247 .ndo_start_xmit = atp_send_packet,
248 .ndo_set_multicast_list = set_rx_mode,
249 .ndo_tx_timeout = tx_timeout,
250 .ndo_change_mtu = eth_change_mtu,
251 .ndo_set_mac_address = eth_mac_addr,
252 .ndo_validate_addr = eth_validate_addr,
253};
254
255static int __init atp_probe1(long ioaddr)
256{
257 struct net_device *dev = NULL;
258 struct net_local *lp;
259 int saved_ctrl_reg, status, i;
260 int res;
261
262 outb(0xff, ioaddr + PAR_DATA);
263 /* Save the original value of the Control register, in case we guessed
264 wrong. */
265 saved_ctrl_reg = inb(ioaddr + PAR_CONTROL);
266 if (net_debug > 3)
267 printk("atp: Control register was %#2.2x.\n", saved_ctrl_reg);
268 /* IRQEN=0, SLCTB=high INITB=high, AUTOFDB=high, STBB=high. */
269 outb(0x04, ioaddr + PAR_CONTROL);
270#ifndef final_version
271 if (net_debug > 3) {
272 /* Turn off the printer multiplexer on the 8012. */
273 for (i = 0; i < 8; i++)
274 outb(mux_8012[i], ioaddr + PAR_DATA);
275 write_reg(ioaddr, MODSEL, 0x00);
276 printk("atp: Registers are ");
277 for (i = 0; i < 32; i++)
278 printk(" %2.2x", read_nibble(ioaddr, i));
279 printk(".\n");
280 }
281#endif
282 /* Turn off the printer multiplexer on the 8012. */
283 for (i = 0; i < 8; i++)
284 outb(mux_8012[i], ioaddr + PAR_DATA);
285 write_reg_high(ioaddr, CMR1, CMR1h_RESET);
286 /* udelay() here? */
287 status = read_nibble(ioaddr, CMR1);
288
289 if (net_debug > 3) {
290 printk(KERN_DEBUG "atp: Status nibble was %#2.2x..", status);
291 for (i = 0; i < 32; i++)
292 printk(" %2.2x", read_nibble(ioaddr, i));
293 printk("\n");
294 }
295
296 if ((status & 0x78) != 0x08) {
297 /* The pocket adapter probe failed, restore the control register. */
298 outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
299 return -ENODEV;
300 }
301 status = read_nibble(ioaddr, CMR2_h);
302 if ((status & 0x78) != 0x10) {
303 outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
304 return -ENODEV;
305 }
306
307 dev = alloc_etherdev(sizeof(struct net_local));
308 if (!dev)
309 return -ENOMEM;
310
311 /* Find the IRQ used by triggering an interrupt. */
312 write_reg_byte(ioaddr, CMR2, 0x01); /* No accept mode, IRQ out. */
313 write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); /* Enable Tx and Rx. */
314
315 /* Omit autoIRQ routine for now. Use "table lookup" instead. Uhgggh. */
316 if (irq[0])
317 dev->irq = irq[0];
318 else if (ioaddr == 0x378)
319 dev->irq = 7;
320 else
321 dev->irq = 5;
322 write_reg_high(ioaddr, CMR1, CMR1h_TxRxOFF); /* Disable Tx and Rx units. */
323 write_reg(ioaddr, CMR2, CMR2_NULL);
324
325 dev->base_addr = ioaddr;
326
327 /* Read the station address PROM. */
328 get_node_ID(dev);
329
330#ifndef MODULE
331 if (net_debug)
332 printk(KERN_INFO "%s", version);
333#endif
334
335 printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, "
336 "SAPROM %pM.\n",
337 dev->name, dev->base_addr, dev->irq, dev->dev_addr);
338
339 /* Reset the ethernet hardware and activate the printer pass-through. */
340 write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
341
342 lp = netdev_priv(dev);
343 lp->chip_type = RTL8002;
344 lp->addr_mode = CMR2h_Normal;
345 spin_lock_init(&lp->lock);
346
347 /* For the ATP adapter the "if_port" is really the data transfer mode. */
348 if (xcvr[0])
349 dev->if_port = xcvr[0];
350 else
351 dev->if_port = (dev->mem_start & 0xf) ? (dev->mem_start & 0x7) : 4;
352 if (dev->mem_end & 0xf)
353 net_debug = dev->mem_end & 7;
354
355 dev->netdev_ops = &atp_netdev_ops;
356 dev->watchdog_timeo = TX_TIMEOUT;
357
358 res = register_netdev(dev);
359 if (res) {
360 free_netdev(dev);
361 return res;
362 }
363
364 lp->next_module = root_atp_dev;
365 root_atp_dev = dev;
366
367 return 0;
368}
369
370/* Read the station address PROM, usually a word-wide EEPROM. */
371static void __init get_node_ID(struct net_device *dev)
372{
373 long ioaddr = dev->base_addr;
374 int sa_offset = 0;
375 int i;
376
377 write_reg(ioaddr, CMR2, CMR2_EEPROM); /* Point to the EEPROM control registers. */
378
379 /* Some adapters have the station address at offset 15 instead of offset
380 zero. Check for it, and fix it if needed. */
381 if (eeprom_op(ioaddr, EE_READ(0)) == 0xffff)
382 sa_offset = 15;
383
384 for (i = 0; i < 3; i++)
385 ((__be16 *)dev->dev_addr)[i] =
386 cpu_to_be16(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
387
388 write_reg(ioaddr, CMR2, CMR2_NULL);
389}
390
391/*
392 An EEPROM read command starts by shifting out 0x60+address, and then
393 shifting in the serial data. See the NatSemi databook for details.
394 * ________________
395 * CS : __|
396 * ___ ___
397 * CLK: ______| |___| |
398 * __ _______ _______
399 * DI : __X_______X_______X
400 * DO : _________X_______X
401 */
402
403static unsigned short __init eeprom_op(long ioaddr, u32 cmd)
404{
405 unsigned eedata_out = 0;
406 int num_bits = EE_CMD_SIZE;
407
408 while (--num_bits >= 0) {
409 char outval = (cmd & (1<<num_bits)) ? EE_DATA_WRITE : 0;
410 write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_LOW);
411 write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_HIGH);
412 eedata_out <<= 1;
413 if (read_nibble(ioaddr, PROM_DATA) & EE_DATA_READ)
414 eedata_out++;
415 }
416 write_reg_high(ioaddr, PROM_CMD, EE_CLK_LOW & ~EE_CS);
417 return eedata_out;
418}
419
420
421/* Open/initialize the board. This is called (in the current kernel)
422 sometime after booting when the 'ifconfig' program is run.
423
424 This routine sets everything up anew at each open, even
425 registers that "should" only need to be set once at boot, so that
426 there is non-reboot way to recover if something goes wrong.
427
428 This is an attachable device: if there is no private entry then it wasn't
429 probed for at boot-time, and we need to probe for it again.
430 */
431static int net_open(struct net_device *dev)
432{
433 struct net_local *lp = netdev_priv(dev);
434 int ret;
435
436 /* The interrupt line is turned off (tri-stated) when the device isn't in
437 use. That's especially important for "attached" interfaces where the
438 port or interrupt may be shared. */
439 ret = request_irq(dev->irq, atp_interrupt, 0, dev->name, dev);
440 if (ret)
441 return ret;
442
443 hardware_init(dev);
444
445 init_timer(&lp->timer);
446 lp->timer.expires = jiffies + TIMED_CHECKER;
447 lp->timer.data = (unsigned long)dev;
448 lp->timer.function = atp_timed_checker; /* timer handler */
449 add_timer(&lp->timer);
450
451 netif_start_queue(dev);
452 return 0;
453}
454
455/* This routine resets the hardware. We initialize everything, assuming that
456 the hardware may have been temporarily detached. */
457static void hardware_init(struct net_device *dev)
458{
459 struct net_local *lp = netdev_priv(dev);
460 long ioaddr = dev->base_addr;
461 int i;
462
463 /* Turn off the printer multiplexer on the 8012. */
464 for (i = 0; i < 8; i++)
465 outb(mux_8012[i], ioaddr + PAR_DATA);
466 write_reg_high(ioaddr, CMR1, CMR1h_RESET);
467
468 for (i = 0; i < 6; i++)
469 write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
470
471 write_reg_high(ioaddr, CMR2, lp->addr_mode);
472
473 if (net_debug > 2) {
474 printk(KERN_DEBUG "%s: Reset: current Rx mode %d.\n", dev->name,
475 (read_nibble(ioaddr, CMR2_h) >> 3) & 0x0f);
476 }
477
478 write_reg(ioaddr, CMR2, CMR2_IRQOUT);
479 write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
480
481 /* Enable the interrupt line from the serial port. */
482 outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
483
484 /* Unmask the interesting interrupts. */
485 write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
486 write_reg_high(ioaddr, IMR, ISRh_RxErr);
487
488 lp->tx_unit_busy = 0;
489 lp->pac_cnt_in_tx_buf = 0;
490 lp->saved_tx_size = 0;
491}
492
493static void trigger_send(long ioaddr, int length)
494{
495 write_reg_byte(ioaddr, TxCNT0, length & 0xff);
496 write_reg(ioaddr, TxCNT1, length >> 8);
497 write_reg(ioaddr, CMR1, CMR1_Xmit);
498}
499
500static void write_packet(long ioaddr, int length, unsigned char *packet, int pad_len, int data_mode)
501{
502 if (length & 1)
503 {
504 length++;
505 pad_len++;
506 }
507
508 outb(EOC+MAR, ioaddr + PAR_DATA);
509 if ((data_mode & 1) == 0) {
510 /* Write the packet out, starting with the write addr. */
511 outb(WrAddr+MAR, ioaddr + PAR_DATA);
512 do {
513 write_byte_mode0(ioaddr, *packet++);
514 } while (--length > pad_len) ;
515 do {
516 write_byte_mode0(ioaddr, 0);
517 } while (--length > 0) ;
518 } else {
519 /* Write the packet out in slow mode. */
520 unsigned char outbyte = *packet++;
521
522 outb(Ctrl_LNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
523 outb(WrAddr+MAR, ioaddr + PAR_DATA);
524
525 outb((outbyte & 0x0f)|0x40, ioaddr + PAR_DATA);
526 outb(outbyte & 0x0f, ioaddr + PAR_DATA);
527 outbyte >>= 4;
528 outb(outbyte & 0x0f, ioaddr + PAR_DATA);
529 outb(Ctrl_HNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
530 while (--length > pad_len)
531 write_byte_mode1(ioaddr, *packet++);
532 while (--length > 0)
533 write_byte_mode1(ioaddr, 0);
534 }
535 /* Terminate the Tx frame. End of write: ECB. */
536 outb(0xff, ioaddr + PAR_DATA);
537 outb(Ctrl_HNibWrite | Ctrl_SelData | Ctrl_IRQEN, ioaddr + PAR_CONTROL);
538}
539
540static void tx_timeout(struct net_device *dev)
541{
542 long ioaddr = dev->base_addr;
543
544 printk(KERN_WARNING "%s: Transmit timed out, %s?\n", dev->name,
545 inb(ioaddr + PAR_CONTROL) & 0x10 ? "network cable problem"
546 : "IRQ conflict");
547 dev->stats.tx_errors++;
548 /* Try to restart the adapter. */
549 hardware_init(dev);
550 dev->trans_start = jiffies; /* prevent tx timeout */
551 netif_wake_queue(dev);
552 dev->stats.tx_errors++;
553}
554
555static netdev_tx_t atp_send_packet(struct sk_buff *skb,
556 struct net_device *dev)
557{
558 struct net_local *lp = netdev_priv(dev);
559 long ioaddr = dev->base_addr;
560 int length;
561 unsigned long flags;
562
563 length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
564
565 netif_stop_queue(dev);
566
567 /* Disable interrupts by writing 0x00 to the Interrupt Mask Register.
568 This sequence must not be interrupted by an incoming packet. */
569
570 spin_lock_irqsave(&lp->lock, flags);
571 write_reg(ioaddr, IMR, 0);
572 write_reg_high(ioaddr, IMR, 0);
573 spin_unlock_irqrestore(&lp->lock, flags);
574
575 write_packet(ioaddr, length, skb->data, length-skb->len, dev->if_port);
576
577 lp->pac_cnt_in_tx_buf++;
578 if (lp->tx_unit_busy == 0) {
579 trigger_send(ioaddr, length);
580 lp->saved_tx_size = 0; /* Redundant */
581 lp->re_tx = 0;
582 lp->tx_unit_busy = 1;
583 } else
584 lp->saved_tx_size = length;
585 /* Re-enable the LPT interrupts. */
586 write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
587 write_reg_high(ioaddr, IMR, ISRh_RxErr);
588
589 dev_kfree_skb (skb);
590 return NETDEV_TX_OK;
591}
592
593
594/* The typical workload of the driver:
595 Handle the network interface interrupts. */
596static irqreturn_t atp_interrupt(int irq, void *dev_instance)
597{
598 struct net_device *dev = dev_instance;
599 struct net_local *lp;
600 long ioaddr;
601 static int num_tx_since_rx;
602 int boguscount = max_interrupt_work;
603 int handled = 0;
604
605 ioaddr = dev->base_addr;
606 lp = netdev_priv(dev);
607
608 spin_lock(&lp->lock);
609
610 /* Disable additional spurious interrupts. */
611 outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
612
613 /* The adapter's output is currently the IRQ line, switch it to data. */
614 write_reg(ioaddr, CMR2, CMR2_NULL);
615 write_reg(ioaddr, IMR, 0);
616
617 if (net_debug > 5) printk(KERN_DEBUG "%s: In interrupt ", dev->name);
618 while (--boguscount > 0) {
619 int status = read_nibble(ioaddr, ISR);
620 if (net_debug > 5) printk("loop status %02x..", status);
621
622 if (status & (ISR_RxOK<<3)) {
623 handled = 1;
624 write_reg(ioaddr, ISR, ISR_RxOK); /* Clear the Rx interrupt. */
625 do {
626 int read_status = read_nibble(ioaddr, CMR1);
627 if (net_debug > 6)
628 printk("handling Rx packet %02x..", read_status);
629 /* We acknowledged the normal Rx interrupt, so if the interrupt
630 is still outstanding we must have a Rx error. */
631 if (read_status & (CMR1_IRQ << 3)) { /* Overrun. */
632 dev->stats.rx_over_errors++;
633 /* Set to no-accept mode long enough to remove a packet. */
634 write_reg_high(ioaddr, CMR2, CMR2h_OFF);
635 net_rx(dev);
636 /* Clear the interrupt and return to normal Rx mode. */
637 write_reg_high(ioaddr, ISR, ISRh_RxErr);
638 write_reg_high(ioaddr, CMR2, lp->addr_mode);
639 } else if ((read_status & (CMR1_BufEnb << 3)) == 0) {
640 net_rx(dev);
641 num_tx_since_rx = 0;
642 } else
643 break;
644 } while (--boguscount > 0);
645 } else if (status & ((ISR_TxErr + ISR_TxOK)<<3)) {
646 handled = 1;
647 if (net_debug > 6) printk("handling Tx done..");
648 /* Clear the Tx interrupt. We should check for too many failures
649 and reinitialize the adapter. */
650 write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK);
651 if (status & (ISR_TxErr<<3)) {
652 dev->stats.collisions++;
653 if (++lp->re_tx > 15) {
654 dev->stats.tx_aborted_errors++;
655 hardware_init(dev);
656 break;
657 }
658 /* Attempt to retransmit. */
659 if (net_debug > 6) printk("attempting to ReTx");
660 write_reg(ioaddr, CMR1, CMR1_ReXmit + CMR1_Xmit);
661 } else {
662 /* Finish up the transmit. */
663 dev->stats.tx_packets++;
664 lp->pac_cnt_in_tx_buf--;
665 if ( lp->saved_tx_size) {
666 trigger_send(ioaddr, lp->saved_tx_size);
667 lp->saved_tx_size = 0;
668 lp->re_tx = 0;
669 } else
670 lp->tx_unit_busy = 0;
671 netif_wake_queue(dev); /* Inform upper layers. */
672 }
673 num_tx_since_rx++;
674 } else if (num_tx_since_rx > 8 &&
675 time_after(jiffies, dev->last_rx + HZ)) {
676 if (net_debug > 2)
677 printk(KERN_DEBUG "%s: Missed packet? No Rx after %d Tx and "
678 "%ld jiffies status %02x CMR1 %02x.\n", dev->name,
679 num_tx_since_rx, jiffies - dev->last_rx, status,
680 (read_nibble(ioaddr, CMR1) >> 3) & 15);
681 dev->stats.rx_missed_errors++;
682 hardware_init(dev);
683 num_tx_since_rx = 0;
684 break;
685 } else
686 break;
687 }
688
689 /* This following code fixes a rare (and very difficult to track down)
690 problem where the adapter forgets its ethernet address. */
691 {
692 int i;
693 for (i = 0; i < 6; i++)
694 write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
695#if 0 && defined(TIMED_CHECKER)
696 mod_timer(&lp->timer, jiffies + TIMED_CHECKER);
697#endif
698 }
699
700 /* Tell the adapter that it can go back to using the output line as IRQ. */
701 write_reg(ioaddr, CMR2, CMR2_IRQOUT);
702 /* Enable the physical interrupt line, which is sure to be low until.. */
703 outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
704 /* .. we enable the interrupt sources. */
705 write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
706 write_reg_high(ioaddr, IMR, ISRh_RxErr); /* Hmmm, really needed? */
707
708 spin_unlock(&lp->lock);
709
710 if (net_debug > 5) printk("exiting interrupt.\n");
711 return IRQ_RETVAL(handled);
712}
713
714#ifdef TIMED_CHECKER
715/* This following code fixes a rare (and very difficult to track down)
716 problem where the adapter forgets its ethernet address. */
717static void atp_timed_checker(unsigned long data)
718{
719 struct net_device *dev = (struct net_device *)data;
720 long ioaddr = dev->base_addr;
721 struct net_local *lp = netdev_priv(dev);
722 int tickssofar = jiffies - lp->last_rx_time;
723 int i;
724
725 spin_lock(&lp->lock);
726 if (tickssofar > 2*HZ) {
727#if 1
728 for (i = 0; i < 6; i++)
729 write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
730 lp->last_rx_time = jiffies;
731#else
732 for (i = 0; i < 6; i++)
733 if (read_cmd_byte(ioaddr, PAR0 + i) != atp_timed_dev->dev_addr[i])
734 {
735 struct net_local *lp = netdev_priv(atp_timed_dev);
736 write_reg_byte(ioaddr, PAR0 + i, atp_timed_dev->dev_addr[i]);
737 if (i == 2)
738 dev->stats.tx_errors++;
739 else if (i == 3)
740 dev->stats.tx_dropped++;
741 else if (i == 4)
742 dev->stats.collisions++;
743 else
744 dev->stats.rx_errors++;
745 }
746#endif
747 }
748 spin_unlock(&lp->lock);
749 lp->timer.expires = jiffies + TIMED_CHECKER;
750 add_timer(&lp->timer);
751}
752#endif
753
754/* We have a good packet(s), get it/them out of the buffers. */
755static void net_rx(struct net_device *dev)
756{
757 struct net_local *lp = netdev_priv(dev);
758 long ioaddr = dev->base_addr;
759 struct rx_header rx_head;
760
761 /* Process the received packet. */
762 outb(EOC+MAR, ioaddr + PAR_DATA);
763 read_block(ioaddr, 8, (unsigned char*)&rx_head, dev->if_port);
764 if (net_debug > 5)
765 printk(KERN_DEBUG " rx_count %04x %04x %04x %04x..", rx_head.pad,
766 rx_head.rx_count, rx_head.rx_status, rx_head.cur_addr);
767 if ((rx_head.rx_status & 0x77) != 0x01) {
768 dev->stats.rx_errors++;
769 if (rx_head.rx_status & 0x0004) dev->stats.rx_frame_errors++;
770 else if (rx_head.rx_status & 0x0002) dev->stats.rx_crc_errors++;
771 if (net_debug > 3)
772 printk(KERN_DEBUG "%s: Unknown ATP Rx error %04x.\n",
773 dev->name, rx_head.rx_status);
774 if (rx_head.rx_status & 0x0020) {
775 dev->stats.rx_fifo_errors++;
776 write_reg_high(ioaddr, CMR1, CMR1h_TxENABLE);
777 write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
778 } else if (rx_head.rx_status & 0x0050)
779 hardware_init(dev);
780 return;
781 } else {
782 /* Malloc up new buffer. The "-4" omits the FCS (CRC). */
783 int pkt_len = (rx_head.rx_count & 0x7ff) - 4;
784 struct sk_buff *skb;
785
786 skb = dev_alloc_skb(pkt_len + 2);
787 if (skb == NULL) {
788 printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n",
789 dev->name);
790 dev->stats.rx_dropped++;
791 goto done;
792 }
793
794 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
795 read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port);
796 skb->protocol = eth_type_trans(skb, dev);
797 netif_rx(skb);
798 dev->last_rx = jiffies;
799 dev->stats.rx_packets++;
800 dev->stats.rx_bytes += pkt_len;
801 }
802 done:
803 write_reg(ioaddr, CMR1, CMR1_NextPkt);
804 lp->last_rx_time = jiffies;
805}
806
807static void read_block(long ioaddr, int length, unsigned char *p, int data_mode)
808{
809 if (data_mode <= 3) { /* Mode 0 or 1 */
810 outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
811 outb(length == 8 ? RdAddr | HNib | MAR : RdAddr | MAR,
812 ioaddr + PAR_DATA);
813 if (data_mode <= 1) { /* Mode 0 or 1 */
814 do { *p++ = read_byte_mode0(ioaddr); } while (--length > 0);
815 } else { /* Mode 2 or 3 */
816 do { *p++ = read_byte_mode2(ioaddr); } while (--length > 0);
817 }
818 } else if (data_mode <= 5) {
819 do { *p++ = read_byte_mode4(ioaddr); } while (--length > 0);
820 } else {
821 do { *p++ = read_byte_mode6(ioaddr); } while (--length > 0);
822 }
823
824 outb(EOC+HNib+MAR, ioaddr + PAR_DATA);
825 outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
826}
827
828/* The inverse routine to net_open(). */
829static int
830net_close(struct net_device *dev)
831{
832 struct net_local *lp = netdev_priv(dev);
833 long ioaddr = dev->base_addr;
834
835 netif_stop_queue(dev);
836
837 del_timer_sync(&lp->timer);
838
839 /* Flush the Tx and disable Rx here. */
840 lp->addr_mode = CMR2h_OFF;
841 write_reg_high(ioaddr, CMR2, CMR2h_OFF);
842
843 /* Free the IRQ line. */
844 outb(0x00, ioaddr + PAR_CONTROL);
845 free_irq(dev->irq, dev);
846
847 /* Reset the ethernet hardware and activate the printer pass-through. */
848 write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
849 return 0;
850}
851
852/*
853 * Set or clear the multicast filter for this adapter.
854 */
855
856static void set_rx_mode_8002(struct net_device *dev)
857{
858 struct net_local *lp = netdev_priv(dev);
859 long ioaddr = dev->base_addr;
860
861 if (!netdev_mc_empty(dev) || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
862 lp->addr_mode = CMR2h_PROMISC;
863 else
864 lp->addr_mode = CMR2h_Normal;
865 write_reg_high(ioaddr, CMR2, lp->addr_mode);
866}
867
868static void set_rx_mode_8012(struct net_device *dev)
869{
870 struct net_local *lp = netdev_priv(dev);
871 long ioaddr = dev->base_addr;
872 unsigned char new_mode, mc_filter[8]; /* Multicast hash filter */
873 int i;
874
875 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
876 new_mode = CMR2h_PROMISC;
877 } else if ((netdev_mc_count(dev) > 1000) ||
878 (dev->flags & IFF_ALLMULTI)) {
879 /* Too many to filter perfectly -- accept all multicasts. */
880 memset(mc_filter, 0xff, sizeof(mc_filter));
881 new_mode = CMR2h_Normal;
882 } else {
883 struct netdev_hw_addr *ha;
884
885 memset(mc_filter, 0, sizeof(mc_filter));
886 netdev_for_each_mc_addr(ha, dev) {
887 int filterbit = ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
888 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
889 }
890 new_mode = CMR2h_Normal;
891 }
892 lp->addr_mode = new_mode;
893 write_reg(ioaddr, CMR2, CMR2_IRQOUT | 0x04); /* Switch to page 1. */
894 for (i = 0; i < 8; i++)
895 write_reg_byte(ioaddr, i, mc_filter[i]);
896 if (net_debug > 2 || 1) {
897 lp->addr_mode = 1;
898 printk(KERN_DEBUG "%s: Mode %d, setting multicast filter to",
899 dev->name, lp->addr_mode);
900 for (i = 0; i < 8; i++)
901 printk(" %2.2x", mc_filter[i]);
902 printk(".\n");
903 }
904
905 write_reg_high(ioaddr, CMR2, lp->addr_mode);
906 write_reg(ioaddr, CMR2, CMR2_IRQOUT); /* Switch back to page 0 */
907}
908
909static void set_rx_mode(struct net_device *dev)
910{
911 struct net_local *lp = netdev_priv(dev);
912
913 if (lp->chip_type == RTL8002)
914 return set_rx_mode_8002(dev);
915 else
916 return set_rx_mode_8012(dev);
917}
918
919
920static int __init atp_init_module(void) {
921 if (debug) /* Emit version even if no cards detected. */
922 printk(KERN_INFO "%s", version);
923 return atp_init();
924}
925
926static void __exit atp_cleanup_module(void) {
927 struct net_device *next_dev;
928
929 while (root_atp_dev) {
930 struct net_local *atp_local = netdev_priv(root_atp_dev);
931 next_dev = atp_local->next_module;
932 unregister_netdev(root_atp_dev);
933 /* No need to release_region(), since we never snarf it. */
934 free_netdev(root_atp_dev);
935 root_atp_dev = next_dev;
936 }
937}
938
939module_init(atp_init_module);
940module_exit(atp_cleanup_module);
diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h
new file mode 100644
index 000000000000..0edc642c2c2f
--- /dev/null
+++ b/drivers/net/ethernet/realtek/atp.h
@@ -0,0 +1,259 @@
1/* Linux header file for the ATP pocket ethernet adapter. */
2/* v1.09 8/9/2000 becker@scyld.com. */
3
4#include <linux/if_ether.h>
5#include <linux/types.h>
6
7/* The header prepended to received packets. */
8struct rx_header {
9 ushort pad; /* Pad. */
10 ushort rx_count;
11 ushort rx_status; /* Unknown bit assignments :-<. */
12 ushort cur_addr; /* Apparently the current buffer address(?) */
13};
14
15#define PAR_DATA 0
16#define PAR_STATUS 1
17#define PAR_CONTROL 2
18
19enum chip_type { RTL8002, RTL8012 };
20
21#define Ctrl_LNibRead 0x08 /* LP_PSELECP */
22#define Ctrl_HNibRead 0
23#define Ctrl_LNibWrite 0x08 /* LP_PSELECP */
24#define Ctrl_HNibWrite 0
25#define Ctrl_SelData 0x04 /* LP_PINITP */
26#define Ctrl_IRQEN 0x10 /* LP_PINTEN */
27
28#define EOW 0xE0
29#define EOC 0xE0
30#define WrAddr 0x40 /* Set address of EPLC read, write register. */
31#define RdAddr 0xC0
32#define HNib 0x10
33
34enum page0_regs
35{
36 /* The first six registers hold the ethernet physical station address. */
37 PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5,
38 TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */
39 TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */
40 ISR = 10, IMR = 11, /* Interrupt status and mask. */
41 CMR1 = 12, /* Command register 1. */
42 CMR2 = 13, /* Command register 2. */
43 MODSEL = 14, /* Mode select register. */
44 MAR = 14, /* Memory address register (?). */
45 CMR2_h = 0x1d, };
46
47enum eepage_regs
48{ PROM_CMD = 6, PROM_DATA = 7 }; /* Note that PROM_CMD is in the "high" bits. */
49
50
51#define ISR_TxOK 0x01
52#define ISR_RxOK 0x04
53#define ISR_TxErr 0x02
54#define ISRh_RxErr 0x11 /* ISR, high nibble */
55
56#define CMR1h_MUX 0x08 /* Select printer multiplexor on 8012. */
57#define CMR1h_RESET 0x04 /* Reset. */
58#define CMR1h_RxENABLE 0x02 /* Rx unit enable. */
59#define CMR1h_TxENABLE 0x01 /* Tx unit enable. */
60#define CMR1h_TxRxOFF 0x00
61#define CMR1_ReXmit 0x08 /* Trigger a retransmit. */
62#define CMR1_Xmit 0x04 /* Trigger a transmit. */
63#define CMR1_IRQ 0x02 /* Interrupt active. */
64#define CMR1_BufEnb 0x01 /* Enable the buffer(?). */
65#define CMR1_NextPkt 0x01 /* Enable the buffer(?). */
66
67#define CMR2_NULL 8
68#define CMR2_IRQOUT 9
69#define CMR2_RAMTEST 10
70#define CMR2_EEPROM 12 /* Set to page 1, for reading the EEPROM. */
71
72#define CMR2h_OFF 0 /* No accept mode. */
73#define CMR2h_Physical 1 /* Accept a physical address match only. */
74#define CMR2h_Normal 2 /* Accept physical and broadcast address. */
75#define CMR2h_PROMISC 3 /* Promiscuous mode. */
76
77/* An inline function used below: it differs from inb() by explicitly return an unsigned
78 char, saving a truncation. */
79static inline unsigned char inbyte(unsigned short port)
80{
81 unsigned char _v;
82 __asm__ __volatile__ ("inb %w1,%b0" :"=a" (_v):"d" (port));
83 return _v;
84}
85
86/* Read register OFFSET.
87 This command should always be terminated with read_end(). */
88static inline unsigned char read_nibble(short port, unsigned char offset)
89{
90 unsigned char retval;
91 outb(EOC+offset, port + PAR_DATA);
92 outb(RdAddr+offset, port + PAR_DATA);
93 inbyte(port + PAR_STATUS); /* Settling time delay */
94 retval = inbyte(port + PAR_STATUS);
95 outb(EOC+offset, port + PAR_DATA);
96
97 return retval;
98}
99
100/* Functions for bulk data read. The interrupt line is always disabled. */
101/* Get a byte using read mode 0, reading data from the control lines. */
102static inline unsigned char read_byte_mode0(short ioaddr)
103{
104 unsigned char low_nib;
105
106 outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
107 inbyte(ioaddr + PAR_STATUS);
108 low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
109 outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
110 inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
111 inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
112 return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
113}
114
115/* The same as read_byte_mode0(), but does multiple inb()s for stability. */
116static inline unsigned char read_byte_mode2(short ioaddr)
117{
118 unsigned char low_nib;
119
120 outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
121 inbyte(ioaddr + PAR_STATUS);
122 low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
123 outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
124 inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
125 return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
126}
127
128/* Read a byte through the data register. */
129static inline unsigned char read_byte_mode4(short ioaddr)
130{
131 unsigned char low_nib;
132
133 outb(RdAddr | MAR, ioaddr + PAR_DATA);
134 low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
135 outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
136 return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
137}
138
139/* Read a byte through the data register, double reading to allow settling. */
140static inline unsigned char read_byte_mode6(short ioaddr)
141{
142 unsigned char low_nib;
143
144 outb(RdAddr | MAR, ioaddr + PAR_DATA);
145 inbyte(ioaddr + PAR_STATUS);
146 low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
147 outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
148 inbyte(ioaddr + PAR_STATUS);
149 return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
150}
151
152static inline void
153write_reg(short port, unsigned char reg, unsigned char value)
154{
155 unsigned char outval;
156 outb(EOC | reg, port + PAR_DATA);
157 outval = WrAddr | reg;
158 outb(outval, port + PAR_DATA);
159 outb(outval, port + PAR_DATA); /* Double write for PS/2. */
160
161 outval &= 0xf0;
162 outval |= value;
163 outb(outval, port + PAR_DATA);
164 outval &= 0x1f;
165 outb(outval, port + PAR_DATA);
166 outb(outval, port + PAR_DATA);
167
168 outb(EOC | outval, port + PAR_DATA);
169}
170
171static inline void
172write_reg_high(short port, unsigned char reg, unsigned char value)
173{
174 unsigned char outval = EOC | HNib | reg;
175
176 outb(outval, port + PAR_DATA);
177 outval &= WrAddr | HNib | 0x0f;
178 outb(outval, port + PAR_DATA);
179 outb(outval, port + PAR_DATA); /* Double write for PS/2. */
180
181 outval = WrAddr | HNib | value;
182 outb(outval, port + PAR_DATA);
183 outval &= HNib | 0x0f; /* HNib | value */
184 outb(outval, port + PAR_DATA);
185 outb(outval, port + PAR_DATA);
186
187 outb(EOC | HNib | outval, port + PAR_DATA);
188}
189
190/* Write a byte out using nibble mode. The low nibble is written first. */
191static inline void
192write_reg_byte(short port, unsigned char reg, unsigned char value)
193{
194 unsigned char outval;
195 outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */
196 outval = WrAddr | reg;
197 outb(outval, port + PAR_DATA);
198 outb(outval, port + PAR_DATA); /* Double write for PS/2. */
199
200 outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA);
201 outb(value & 0x0f, port + PAR_DATA);
202 value >>= 4;
203 outb(value, port + PAR_DATA);
204 outb(0x10 | value, port + PAR_DATA);
205 outb(0x10 | value, port + PAR_DATA);
206
207 outb(EOC | value, port + PAR_DATA); /* Reset the address register. */
208}
209
210/*
211 * Bulk data writes to the packet buffer. The interrupt line remains enabled.
212 * The first, faster method uses only the dataport (data modes 0, 2 & 4).
213 * The second (backup) method uses data and control regs (modes 1, 3 & 5).
214 * It should only be needed when there is skew between the individual data
215 * lines.
216 */
217static inline void write_byte_mode0(short ioaddr, unsigned char value)
218{
219 outb(value & 0x0f, ioaddr + PAR_DATA);
220 outb((value>>4) | 0x10, ioaddr + PAR_DATA);
221}
222
223static inline void write_byte_mode1(short ioaddr, unsigned char value)
224{
225 outb(value & 0x0f, ioaddr + PAR_DATA);
226 outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL);
227 outb((value>>4) | 0x10, ioaddr + PAR_DATA);
228 outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL);
229}
230
231/* Write 16bit VALUE to the packet buffer: the same as above just doubled. */
232static inline void write_word_mode0(short ioaddr, unsigned short value)
233{
234 outb(value & 0x0f, ioaddr + PAR_DATA);
235 value >>= 4;
236 outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
237 value >>= 4;
238 outb(value & 0x0f, ioaddr + PAR_DATA);
239 value >>= 4;
240 outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
241}
242
243/* EEPROM_Ctrl bits. */
244#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
245#define EE_CS 0x02 /* EEPROM chip select. */
246#define EE_CLK_HIGH 0x12
247#define EE_CLK_LOW 0x16
248#define EE_DATA_WRITE 0x01 /* EEPROM chip data in. */
249#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
250
251/* Delay between EEPROM clock transitions. */
252#define eeprom_delay(ticks) \
253do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
254
255/* The EEPROM commands include the alway-set leading bit. */
256#define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17)
257#define EE_READ(offset) (((6 << 6) + (offset)) << 17)
258#define EE_ERASE(offset) (((7 << 6) + (offset)) << 17)
259#define EE_CMD_SIZE 27 /* The command+address+data size. */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
new file mode 100644
index 000000000000..02339b3352e7
--- /dev/null
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -0,0 +1,5824 @@
1/*
2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
3 *
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
7 *
8 * See MAINTAINERS file for support contact information.
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/pci.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/delay.h>
17#include <linux/ethtool.h>
18#include <linux/mii.h>
19#include <linux/if_vlan.h>
20#include <linux/crc32.h>
21#include <linux/in.h>
22#include <linux/ip.h>
23#include <linux/tcp.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/dma-mapping.h>
27#include <linux/pm_runtime.h>
28#include <linux/firmware.h>
29#include <linux/pci-aspm.h>
30#include <linux/prefetch.h>
31
32#include <asm/system.h>
33#include <asm/io.h>
34#include <asm/irq.h>
35
36#define RTL8169_VERSION "2.3LK-NAPI"
37#define MODULENAME "r8169"
38#define PFX MODULENAME ": "
39
40#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
41#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
42#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
43#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
44#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
45#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
46
47#ifdef RTL8169_DEBUG
48#define assert(expr) \
49 if (!(expr)) { \
50 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
51 #expr,__FILE__,__func__,__LINE__); \
52 }
53#define dprintk(fmt, args...) \
54 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
55#else
56#define assert(expr) do {} while (0)
57#define dprintk(fmt, args...) do {} while (0)
58#endif /* RTL8169_DEBUG */
59
60#define R8169_MSG_DEFAULT \
61 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
62
63#define TX_BUFFS_AVAIL(tp) \
64 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
65
66/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
67 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
68static const int multicast_filter_limit = 32;
69
70/* MAC address length */
71#define MAC_ADDR_LEN 6
72
73#define MAX_READ_REQUEST_SHIFT 12
74#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
75#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
76#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
77
78#define R8169_REGS_SIZE 256
79#define R8169_NAPI_WEIGHT 64
80#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
81#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
82#define RX_BUF_SIZE 1536 /* Rx Buffer size */
83#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
84#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
85
86#define RTL8169_TX_TIMEOUT (6*HZ)
87#define RTL8169_PHY_TIMEOUT (10*HZ)
88
89#define RTL_EEPROM_SIG cpu_to_le32(0x8129)
90#define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
91#define RTL_EEPROM_SIG_ADDR 0x0000
92
93/* write/read MMIO register */
94#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
95#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
96#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
97#define RTL_R8(reg) readb (ioaddr + (reg))
98#define RTL_R16(reg) readw (ioaddr + (reg))
99#define RTL_R32(reg) readl (ioaddr + (reg))
100
101enum mac_version {
102 RTL_GIGA_MAC_VER_01 = 0,
103 RTL_GIGA_MAC_VER_02,
104 RTL_GIGA_MAC_VER_03,
105 RTL_GIGA_MAC_VER_04,
106 RTL_GIGA_MAC_VER_05,
107 RTL_GIGA_MAC_VER_06,
108 RTL_GIGA_MAC_VER_07,
109 RTL_GIGA_MAC_VER_08,
110 RTL_GIGA_MAC_VER_09,
111 RTL_GIGA_MAC_VER_10,
112 RTL_GIGA_MAC_VER_11,
113 RTL_GIGA_MAC_VER_12,
114 RTL_GIGA_MAC_VER_13,
115 RTL_GIGA_MAC_VER_14,
116 RTL_GIGA_MAC_VER_15,
117 RTL_GIGA_MAC_VER_16,
118 RTL_GIGA_MAC_VER_17,
119 RTL_GIGA_MAC_VER_18,
120 RTL_GIGA_MAC_VER_19,
121 RTL_GIGA_MAC_VER_20,
122 RTL_GIGA_MAC_VER_21,
123 RTL_GIGA_MAC_VER_22,
124 RTL_GIGA_MAC_VER_23,
125 RTL_GIGA_MAC_VER_24,
126 RTL_GIGA_MAC_VER_25,
127 RTL_GIGA_MAC_VER_26,
128 RTL_GIGA_MAC_VER_27,
129 RTL_GIGA_MAC_VER_28,
130 RTL_GIGA_MAC_VER_29,
131 RTL_GIGA_MAC_VER_30,
132 RTL_GIGA_MAC_VER_31,
133 RTL_GIGA_MAC_VER_32,
134 RTL_GIGA_MAC_VER_33,
135 RTL_GIGA_MAC_VER_34,
136 RTL_GIGA_MAC_NONE = 0xff,
137};
138
139enum rtl_tx_desc_version {
140 RTL_TD_0 = 0,
141 RTL_TD_1 = 1,
142};
143
144#define _R(NAME,TD,FW) \
145 { .name = NAME, .txd_version = TD, .fw_name = FW }
146
147static const struct {
148 const char *name;
149 enum rtl_tx_desc_version txd_version;
150 const char *fw_name;
151} rtl_chip_infos[] = {
152 /* PCI devices. */
153 [RTL_GIGA_MAC_VER_01] =
154 _R("RTL8169", RTL_TD_0, NULL),
155 [RTL_GIGA_MAC_VER_02] =
156 _R("RTL8169s", RTL_TD_0, NULL),
157 [RTL_GIGA_MAC_VER_03] =
158 _R("RTL8110s", RTL_TD_0, NULL),
159 [RTL_GIGA_MAC_VER_04] =
160 _R("RTL8169sb/8110sb", RTL_TD_0, NULL),
161 [RTL_GIGA_MAC_VER_05] =
162 _R("RTL8169sc/8110sc", RTL_TD_0, NULL),
163 [RTL_GIGA_MAC_VER_06] =
164 _R("RTL8169sc/8110sc", RTL_TD_0, NULL),
165 /* PCI-E devices. */
166 [RTL_GIGA_MAC_VER_07] =
167 _R("RTL8102e", RTL_TD_1, NULL),
168 [RTL_GIGA_MAC_VER_08] =
169 _R("RTL8102e", RTL_TD_1, NULL),
170 [RTL_GIGA_MAC_VER_09] =
171 _R("RTL8102e", RTL_TD_1, NULL),
172 [RTL_GIGA_MAC_VER_10] =
173 _R("RTL8101e", RTL_TD_0, NULL),
174 [RTL_GIGA_MAC_VER_11] =
175 _R("RTL8168b/8111b", RTL_TD_0, NULL),
176 [RTL_GIGA_MAC_VER_12] =
177 _R("RTL8168b/8111b", RTL_TD_0, NULL),
178 [RTL_GIGA_MAC_VER_13] =
179 _R("RTL8101e", RTL_TD_0, NULL),
180 [RTL_GIGA_MAC_VER_14] =
181 _R("RTL8100e", RTL_TD_0, NULL),
182 [RTL_GIGA_MAC_VER_15] =
183 _R("RTL8100e", RTL_TD_0, NULL),
184 [RTL_GIGA_MAC_VER_16] =
185 _R("RTL8101e", RTL_TD_0, NULL),
186 [RTL_GIGA_MAC_VER_17] =
187 _R("RTL8168b/8111b", RTL_TD_0, NULL),
188 [RTL_GIGA_MAC_VER_18] =
189 _R("RTL8168cp/8111cp", RTL_TD_1, NULL),
190 [RTL_GIGA_MAC_VER_19] =
191 _R("RTL8168c/8111c", RTL_TD_1, NULL),
192 [RTL_GIGA_MAC_VER_20] =
193 _R("RTL8168c/8111c", RTL_TD_1, NULL),
194 [RTL_GIGA_MAC_VER_21] =
195 _R("RTL8168c/8111c", RTL_TD_1, NULL),
196 [RTL_GIGA_MAC_VER_22] =
197 _R("RTL8168c/8111c", RTL_TD_1, NULL),
198 [RTL_GIGA_MAC_VER_23] =
199 _R("RTL8168cp/8111cp", RTL_TD_1, NULL),
200 [RTL_GIGA_MAC_VER_24] =
201 _R("RTL8168cp/8111cp", RTL_TD_1, NULL),
202 [RTL_GIGA_MAC_VER_25] =
203 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1),
204 [RTL_GIGA_MAC_VER_26] =
205 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2),
206 [RTL_GIGA_MAC_VER_27] =
207 _R("RTL8168dp/8111dp", RTL_TD_1, NULL),
208 [RTL_GIGA_MAC_VER_28] =
209 _R("RTL8168dp/8111dp", RTL_TD_1, NULL),
210 [RTL_GIGA_MAC_VER_29] =
211 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1),
212 [RTL_GIGA_MAC_VER_30] =
213 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1),
214 [RTL_GIGA_MAC_VER_31] =
215 _R("RTL8168dp/8111dp", RTL_TD_1, NULL),
216 [RTL_GIGA_MAC_VER_32] =
217 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1),
218 [RTL_GIGA_MAC_VER_33] =
219 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2),
220 [RTL_GIGA_MAC_VER_34] =
221 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3)
222};
223#undef _R
224
225enum cfg_version {
226 RTL_CFG_0 = 0x00,
227 RTL_CFG_1,
228 RTL_CFG_2
229};
230
231static void rtl_hw_start_8169(struct net_device *);
232static void rtl_hw_start_8168(struct net_device *);
233static void rtl_hw_start_8101(struct net_device *);
234
235static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
236 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
237 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
238 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
239 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
240 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
241 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
242 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
243 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
244 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
245 { PCI_VENDOR_ID_LINKSYS, 0x1032,
246 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
247 { 0x0001, 0x8168,
248 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
249 {0,},
250};
251
252MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
253
254static int rx_buf_sz = 16383;
255static int use_dac;
256static struct {
257 u32 msg_enable;
258} debug = { -1 };
259
260enum rtl_registers {
261 MAC0 = 0, /* Ethernet hardware address. */
262 MAC4 = 4,
263 MAR0 = 8, /* Multicast filter. */
264 CounterAddrLow = 0x10,
265 CounterAddrHigh = 0x14,
266 TxDescStartAddrLow = 0x20,
267 TxDescStartAddrHigh = 0x24,
268 TxHDescStartAddrLow = 0x28,
269 TxHDescStartAddrHigh = 0x2c,
270 FLASH = 0x30,
271 ERSR = 0x36,
272 ChipCmd = 0x37,
273 TxPoll = 0x38,
274 IntrMask = 0x3c,
275 IntrStatus = 0x3e,
276
277 TxConfig = 0x40,
278#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
279#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
280
281 RxConfig = 0x44,
282#define RX128_INT_EN (1 << 15) /* 8111c and later */
283#define RX_MULTI_EN (1 << 14) /* 8111c only */
284#define RXCFG_FIFO_SHIFT 13
285 /* No threshold before first PCI xfer */
286#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
287#define RXCFG_DMA_SHIFT 8
288 /* Unlimited maximum PCI burst. */
289#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
290
291 RxMissed = 0x4c,
292 Cfg9346 = 0x50,
293 Config0 = 0x51,
294 Config1 = 0x52,
295 Config2 = 0x53,
296 Config3 = 0x54,
297 Config4 = 0x55,
298 Config5 = 0x56,
299 MultiIntr = 0x5c,
300 PHYAR = 0x60,
301 PHYstatus = 0x6c,
302 RxMaxSize = 0xda,
303 CPlusCmd = 0xe0,
304 IntrMitigate = 0xe2,
305 RxDescAddrLow = 0xe4,
306 RxDescAddrHigh = 0xe8,
307 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
308
309#define NoEarlyTx 0x3f /* Max value : no early transmit. */
310
311 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
312
313#define TxPacketMax (8064 >> 7)
314
315 FuncEvent = 0xf0,
316 FuncEventMask = 0xf4,
317 FuncPresetState = 0xf8,
318 FuncForceEvent = 0xfc,
319};
320
321enum rtl8110_registers {
322 TBICSR = 0x64,
323 TBI_ANAR = 0x68,
324 TBI_LPAR = 0x6a,
325};
326
327enum rtl8168_8101_registers {
328 CSIDR = 0x64,
329 CSIAR = 0x68,
330#define CSIAR_FLAG 0x80000000
331#define CSIAR_WRITE_CMD 0x80000000
332#define CSIAR_BYTE_ENABLE 0x0f
333#define CSIAR_BYTE_ENABLE_SHIFT 12
334#define CSIAR_ADDR_MASK 0x0fff
335 PMCH = 0x6f,
336 EPHYAR = 0x80,
337#define EPHYAR_FLAG 0x80000000
338#define EPHYAR_WRITE_CMD 0x80000000
339#define EPHYAR_REG_MASK 0x1f
340#define EPHYAR_REG_SHIFT 16
341#define EPHYAR_DATA_MASK 0xffff
342 DLLPR = 0xd0,
343#define PFM_EN (1 << 6)
344 DBG_REG = 0xd1,
345#define FIX_NAK_1 (1 << 4)
346#define FIX_NAK_2 (1 << 3)
347 TWSI = 0xd2,
348 MCU = 0xd3,
349#define NOW_IS_OOB (1 << 7)
350#define EN_NDP (1 << 3)
351#define EN_OOB_RESET (1 << 2)
352 EFUSEAR = 0xdc,
353#define EFUSEAR_FLAG 0x80000000
354#define EFUSEAR_WRITE_CMD 0x80000000
355#define EFUSEAR_READ_CMD 0x00000000
356#define EFUSEAR_REG_MASK 0x03ff
357#define EFUSEAR_REG_SHIFT 8
358#define EFUSEAR_DATA_MASK 0xff
359};
360
361enum rtl8168_registers {
362 LED_FREQ = 0x1a,
363 EEE_LED = 0x1b,
364 ERIDR = 0x70,
365 ERIAR = 0x74,
366#define ERIAR_FLAG 0x80000000
367#define ERIAR_WRITE_CMD 0x80000000
368#define ERIAR_READ_CMD 0x00000000
369#define ERIAR_ADDR_BYTE_ALIGN 4
370#define ERIAR_TYPE_SHIFT 16
371#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
372#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
373#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
374#define ERIAR_MASK_SHIFT 12
375#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
376#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
377#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
378 EPHY_RXER_NUM = 0x7c,
379 OCPDR = 0xb0, /* OCP GPHY access */
380#define OCPDR_WRITE_CMD 0x80000000
381#define OCPDR_READ_CMD 0x00000000
382#define OCPDR_REG_MASK 0x7f
383#define OCPDR_GPHY_REG_SHIFT 16
384#define OCPDR_DATA_MASK 0xffff
385 OCPAR = 0xb4,
386#define OCPAR_FLAG 0x80000000
387#define OCPAR_GPHY_WRITE_CMD 0x8000f060
388#define OCPAR_GPHY_READ_CMD 0x0000f060
389 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
390 MISC = 0xf0, /* 8168e only. */
391#define TXPLA_RST (1 << 29)
392#define PWM_EN (1 << 22)
393};
394
395enum rtl_register_content {
396 /* InterruptStatusBits */
397 SYSErr = 0x8000,
398 PCSTimeout = 0x4000,
399 SWInt = 0x0100,
400 TxDescUnavail = 0x0080,
401 RxFIFOOver = 0x0040,
402 LinkChg = 0x0020,
403 RxOverflow = 0x0010,
404 TxErr = 0x0008,
405 TxOK = 0x0004,
406 RxErr = 0x0002,
407 RxOK = 0x0001,
408
409 /* RxStatusDesc */
410 RxFOVF = (1 << 23),
411 RxRWT = (1 << 22),
412 RxRES = (1 << 21),
413 RxRUNT = (1 << 20),
414 RxCRC = (1 << 19),
415
416 /* ChipCmdBits */
417 StopReq = 0x80,
418 CmdReset = 0x10,
419 CmdRxEnb = 0x08,
420 CmdTxEnb = 0x04,
421 RxBufEmpty = 0x01,
422
423 /* TXPoll register p.5 */
424 HPQ = 0x80, /* Poll cmd on the high prio queue */
425 NPQ = 0x40, /* Poll cmd on the low prio queue */
426 FSWInt = 0x01, /* Forced software interrupt */
427
428 /* Cfg9346Bits */
429 Cfg9346_Lock = 0x00,
430 Cfg9346_Unlock = 0xc0,
431
432 /* rx_mode_bits */
433 AcceptErr = 0x20,
434 AcceptRunt = 0x10,
435 AcceptBroadcast = 0x08,
436 AcceptMulticast = 0x04,
437 AcceptMyPhys = 0x02,
438 AcceptAllPhys = 0x01,
439#define RX_CONFIG_ACCEPT_MASK 0x3f
440
441 /* TxConfigBits */
442 TxInterFrameGapShift = 24,
443 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
444
445 /* Config1 register p.24 */
446 LEDS1 = (1 << 7),
447 LEDS0 = (1 << 6),
448 MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */
449 Speed_down = (1 << 4),
450 MEMMAP = (1 << 3),
451 IOMAP = (1 << 2),
452 VPD = (1 << 1),
453 PMEnable = (1 << 0), /* Power Management Enable */
454
455 /* Config2 register p. 25 */
456 PCI_Clock_66MHz = 0x01,
457 PCI_Clock_33MHz = 0x00,
458
459 /* Config3 register p.25 */
460 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
461 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
462 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
463
464 /* Config5 register p.27 */
465 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
466 MWF = (1 << 5), /* Accept Multicast wakeup frame */
467 UWF = (1 << 4), /* Accept Unicast wakeup frame */
468 Spi_en = (1 << 3),
469 LanWake = (1 << 1), /* LanWake enable/disable */
470 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
471
472 /* TBICSR p.28 */
473 TBIReset = 0x80000000,
474 TBILoopback = 0x40000000,
475 TBINwEnable = 0x20000000,
476 TBINwRestart = 0x10000000,
477 TBILinkOk = 0x02000000,
478 TBINwComplete = 0x01000000,
479
480 /* CPlusCmd p.31 */
481 EnableBist = (1 << 15), // 8168 8101
482 Mac_dbgo_oe = (1 << 14), // 8168 8101
483 Normal_mode = (1 << 13), // unused
484 Force_half_dup = (1 << 12), // 8168 8101
485 Force_rxflow_en = (1 << 11), // 8168 8101
486 Force_txflow_en = (1 << 10), // 8168 8101
487 Cxpl_dbg_sel = (1 << 9), // 8168 8101
488 ASF = (1 << 8), // 8168 8101
489 PktCntrDisable = (1 << 7), // 8168 8101
490 Mac_dbgo_sel = 0x001c, // 8168
491 RxVlan = (1 << 6),
492 RxChkSum = (1 << 5),
493 PCIDAC = (1 << 4),
494 PCIMulRW = (1 << 3),
495 INTT_0 = 0x0000, // 8168
496 INTT_1 = 0x0001, // 8168
497 INTT_2 = 0x0002, // 8168
498 INTT_3 = 0x0003, // 8168
499
500 /* rtl8169_PHYstatus */
501 TBI_Enable = 0x80,
502 TxFlowCtrl = 0x40,
503 RxFlowCtrl = 0x20,
504 _1000bpsF = 0x10,
505 _100bps = 0x08,
506 _10bps = 0x04,
507 LinkStatus = 0x02,
508 FullDup = 0x01,
509
510 /* _TBICSRBit */
511 TBILinkOK = 0x02000000,
512
513 /* DumpCounterCommand */
514 CounterDump = 0x8,
515};
516
517enum rtl_desc_bit {
518 /* First doubleword. */
519 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
520 RingEnd = (1 << 30), /* End of descriptor ring */
521 FirstFrag = (1 << 29), /* First segment of a packet */
522 LastFrag = (1 << 28), /* Final segment of a packet */
523};
524
525/* Generic case. */
526enum rtl_tx_desc_bit {
527 /* First doubleword. */
528 TD_LSO = (1 << 27), /* Large Send Offload */
529#define TD_MSS_MAX 0x07ffu /* MSS value */
530
531 /* Second doubleword. */
532 TxVlanTag = (1 << 17), /* Add VLAN tag */
533};
534
535/* 8169, 8168b and 810x except 8102e. */
536enum rtl_tx_desc_bit_0 {
537 /* First doubleword. */
538#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
539 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
540 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
541 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
542};
543
544/* 8102e, 8168c and beyond. */
545enum rtl_tx_desc_bit_1 {
546 /* Second doubleword. */
547#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
548 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
549 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
550 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
551};
552
553static const struct rtl_tx_desc_info {
554 struct {
555 u32 udp;
556 u32 tcp;
557 } checksum;
558 u16 mss_shift;
559 u16 opts_offset;
560} tx_desc_info [] = {
561 [RTL_TD_0] = {
562 .checksum = {
563 .udp = TD0_IP_CS | TD0_UDP_CS,
564 .tcp = TD0_IP_CS | TD0_TCP_CS
565 },
566 .mss_shift = TD0_MSS_SHIFT,
567 .opts_offset = 0
568 },
569 [RTL_TD_1] = {
570 .checksum = {
571 .udp = TD1_IP_CS | TD1_UDP_CS,
572 .tcp = TD1_IP_CS | TD1_TCP_CS
573 },
574 .mss_shift = TD1_MSS_SHIFT,
575 .opts_offset = 1
576 }
577};
578
579enum rtl_rx_desc_bit {
580 /* Rx private */
581 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
582 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
583
584#define RxProtoUDP (PID1)
585#define RxProtoTCP (PID0)
586#define RxProtoIP (PID1 | PID0)
587#define RxProtoMask RxProtoIP
588
589 IPFail = (1 << 16), /* IP checksum failed */
590 UDPFail = (1 << 15), /* UDP/IP checksum failed */
591 TCPFail = (1 << 14), /* TCP/IP checksum failed */
592 RxVlanTag = (1 << 16), /* VLAN tag available */
593};
594
595#define RsvdMask 0x3fffc000
596
597struct TxDesc {
598 __le32 opts1;
599 __le32 opts2;
600 __le64 addr;
601};
602
603struct RxDesc {
604 __le32 opts1;
605 __le32 opts2;
606 __le64 addr;
607};
608
609struct ring_info {
610 struct sk_buff *skb;
611 u32 len;
612 u8 __pad[sizeof(void *) - sizeof(u32)];
613};
614
615enum features {
616 RTL_FEATURE_WOL = (1 << 0),
617 RTL_FEATURE_MSI = (1 << 1),
618 RTL_FEATURE_GMII = (1 << 2),
619};
620
621struct rtl8169_counters {
622 __le64 tx_packets;
623 __le64 rx_packets;
624 __le64 tx_errors;
625 __le32 rx_errors;
626 __le16 rx_missed;
627 __le16 align_errors;
628 __le32 tx_one_collision;
629 __le32 tx_multi_collision;
630 __le64 rx_unicast;
631 __le64 rx_broadcast;
632 __le32 rx_multicast;
633 __le16 tx_aborted;
634 __le16 tx_underun;
635};
636
637struct rtl8169_private {
638 void __iomem *mmio_addr; /* memory map physical address */
639 struct pci_dev *pci_dev;
640 struct net_device *dev;
641 struct napi_struct napi;
642 spinlock_t lock;
643 u32 msg_enable;
644 u16 txd_version;
645 u16 mac_version;
646 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
647 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
648 u32 dirty_rx;
649 u32 dirty_tx;
650 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
651 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
652 dma_addr_t TxPhyAddr;
653 dma_addr_t RxPhyAddr;
654 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
655 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
656 struct timer_list timer;
657 u16 cp_cmd;
658 u16 intr_event;
659 u16 napi_event;
660 u16 intr_mask;
661
662 struct mdio_ops {
663 void (*write)(void __iomem *, int, int);
664 int (*read)(void __iomem *, int);
665 } mdio_ops;
666
667 struct pll_power_ops {
668 void (*down)(struct rtl8169_private *);
669 void (*up)(struct rtl8169_private *);
670 } pll_power_ops;
671
672 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
673 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
674 void (*phy_reset_enable)(struct rtl8169_private *tp);
675 void (*hw_start)(struct net_device *);
676 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
677 unsigned int (*link_ok)(void __iomem *);
678 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
679 struct delayed_work task;
680 unsigned features;
681
682 struct mii_if_info mii;
683 struct rtl8169_counters counters;
684 u32 saved_wolopts;
685
686 struct rtl_fw {
687 const struct firmware *fw;
688
689#define RTL_VER_SIZE 32
690
691 char version[RTL_VER_SIZE];
692
693 struct rtl_fw_phy_action {
694 __le32 *code;
695 size_t size;
696 } phy_action;
697 } *rtl_fw;
698#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
699};
700
701MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
702MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
703module_param(use_dac, int, 0);
704MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
705module_param_named(debug, debug.msg_enable, int, 0);
706MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
707MODULE_LICENSE("GPL");
708MODULE_VERSION(RTL8169_VERSION);
709MODULE_FIRMWARE(FIRMWARE_8168D_1);
710MODULE_FIRMWARE(FIRMWARE_8168D_2);
711MODULE_FIRMWARE(FIRMWARE_8168E_1);
712MODULE_FIRMWARE(FIRMWARE_8168E_2);
713MODULE_FIRMWARE(FIRMWARE_8105E_1);
714
715static int rtl8169_open(struct net_device *dev);
716static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
717 struct net_device *dev);
718static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
719static int rtl8169_init_ring(struct net_device *dev);
720static void rtl_hw_start(struct net_device *dev);
721static int rtl8169_close(struct net_device *dev);
722static void rtl_set_rx_mode(struct net_device *dev);
723static void rtl8169_tx_timeout(struct net_device *dev);
724static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
725static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
726 void __iomem *, u32 budget);
727static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
728static void rtl8169_down(struct net_device *dev);
729static void rtl8169_rx_clear(struct rtl8169_private *tp);
730static int rtl8169_poll(struct napi_struct *napi, int budget);
731
732static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
733{
734 void __iomem *ioaddr = tp->mmio_addr;
735 int i;
736
737 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
738 for (i = 0; i < 20; i++) {
739 udelay(100);
740 if (RTL_R32(OCPAR) & OCPAR_FLAG)
741 break;
742 }
743 return RTL_R32(OCPDR);
744}
745
746static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
747{
748 void __iomem *ioaddr = tp->mmio_addr;
749 int i;
750
751 RTL_W32(OCPDR, data);
752 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
753 for (i = 0; i < 20; i++) {
754 udelay(100);
755 if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0)
756 break;
757 }
758}
759
760static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
761{
762 void __iomem *ioaddr = tp->mmio_addr;
763 int i;
764
765 RTL_W8(ERIDR, cmd);
766 RTL_W32(ERIAR, 0x800010e8);
767 msleep(2);
768 for (i = 0; i < 5; i++) {
769 udelay(100);
770 if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
771 break;
772 }
773
774 ocp_write(tp, 0x1, 0x30, 0x00000001);
775}
776
777#define OOB_CMD_RESET 0x00
778#define OOB_CMD_DRIVER_START 0x05
779#define OOB_CMD_DRIVER_STOP 0x06
780
781static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
782{
783 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
784}
785
786static void rtl8168_driver_start(struct rtl8169_private *tp)
787{
788 u16 reg;
789 int i;
790
791 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
792
793 reg = rtl8168_get_ocp_reg(tp);
794
795 for (i = 0; i < 10; i++) {
796 msleep(10);
797 if (ocp_read(tp, 0x0f, reg) & 0x00000800)
798 break;
799 }
800}
801
802static void rtl8168_driver_stop(struct rtl8169_private *tp)
803{
804 u16 reg;
805 int i;
806
807 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
808
809 reg = rtl8168_get_ocp_reg(tp);
810
811 for (i = 0; i < 10; i++) {
812 msleep(10);
813 if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0)
814 break;
815 }
816}
817
818static int r8168dp_check_dash(struct rtl8169_private *tp)
819{
820 u16 reg = rtl8168_get_ocp_reg(tp);
821
822 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
823}
824
825static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
826{
827 int i;
828
829 RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff));
830
831 for (i = 20; i > 0; i--) {
832 /*
833 * Check if the RTL8169 has completed writing to the specified
834 * MII register.
835 */
836 if (!(RTL_R32(PHYAR) & 0x80000000))
837 break;
838 udelay(25);
839 }
840 /*
841 * According to hardware specs a 20us delay is required after write
842 * complete indication, but before sending next command.
843 */
844 udelay(20);
845}
846
847static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
848{
849 int i, value = -1;
850
851 RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16);
852
853 for (i = 20; i > 0; i--) {
854 /*
855 * Check if the RTL8169 has completed retrieving data from
856 * the specified MII register.
857 */
858 if (RTL_R32(PHYAR) & 0x80000000) {
859 value = RTL_R32(PHYAR) & 0xffff;
860 break;
861 }
862 udelay(25);
863 }
864 /*
865 * According to hardware specs a 20us delay is required after read
866 * complete indication, but before sending next command.
867 */
868 udelay(20);
869
870 return value;
871}
872
873static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data)
874{
875 int i;
876
877 RTL_W32(OCPDR, data |
878 ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
879 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
880 RTL_W32(EPHY_RXER_NUM, 0);
881
882 for (i = 0; i < 100; i++) {
883 mdelay(1);
884 if (!(RTL_R32(OCPAR) & OCPAR_FLAG))
885 break;
886 }
887}
888
889static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
890{
891 r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD |
892 (value & OCPDR_DATA_MASK));
893}
894
895static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr)
896{
897 int i;
898
899 r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD);
900
901 mdelay(1);
902 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
903 RTL_W32(EPHY_RXER_NUM, 0);
904
905 for (i = 0; i < 100; i++) {
906 mdelay(1);
907 if (RTL_R32(OCPAR) & OCPAR_FLAG)
908 break;
909 }
910
911 return RTL_R32(OCPDR) & OCPDR_DATA_MASK;
912}
913
914#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
915
916static void r8168dp_2_mdio_start(void __iomem *ioaddr)
917{
918 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
919}
920
921static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
922{
923 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
924}
925
926static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
927{
928 r8168dp_2_mdio_start(ioaddr);
929
930 r8169_mdio_write(ioaddr, reg_addr, value);
931
932 r8168dp_2_mdio_stop(ioaddr);
933}
934
935static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
936{
937 int value;
938
939 r8168dp_2_mdio_start(ioaddr);
940
941 value = r8169_mdio_read(ioaddr, reg_addr);
942
943 r8168dp_2_mdio_stop(ioaddr);
944
945 return value;
946}
947
948static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
949{
950 tp->mdio_ops.write(tp->mmio_addr, location, val);
951}
952
953static int rtl_readphy(struct rtl8169_private *tp, int location)
954{
955 return tp->mdio_ops.read(tp->mmio_addr, location);
956}
957
958static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
959{
960 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
961}
962
963static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
964{
965 int val;
966
967 val = rtl_readphy(tp, reg_addr);
968 rtl_writephy(tp, reg_addr, (val | p) & ~m);
969}
970
971static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
972 int val)
973{
974 struct rtl8169_private *tp = netdev_priv(dev);
975
976 rtl_writephy(tp, location, val);
977}
978
979static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
980{
981 struct rtl8169_private *tp = netdev_priv(dev);
982
983 return rtl_readphy(tp, location);
984}
985
986static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
987{
988 unsigned int i;
989
990 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
991 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
992
993 for (i = 0; i < 100; i++) {
994 if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
995 break;
996 udelay(10);
997 }
998}
999
1000static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
1001{
1002 u16 value = 0xffff;
1003 unsigned int i;
1004
1005 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1006
1007 for (i = 0; i < 100; i++) {
1008 if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
1009 value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
1010 break;
1011 }
1012 udelay(10);
1013 }
1014
1015 return value;
1016}
1017
1018static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
1019{
1020 unsigned int i;
1021
1022 RTL_W32(CSIDR, value);
1023 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
1024 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
1025
1026 for (i = 0; i < 100; i++) {
1027 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
1028 break;
1029 udelay(10);
1030 }
1031}
1032
1033static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
1034{
1035 u32 value = ~0x00;
1036 unsigned int i;
1037
1038 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
1039 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
1040
1041 for (i = 0; i < 100; i++) {
1042 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
1043 value = RTL_R32(CSIDR);
1044 break;
1045 }
1046 udelay(10);
1047 }
1048
1049 return value;
1050}
1051
1052static
1053void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
1054{
1055 unsigned int i;
1056
1057 BUG_ON((addr & 3) || (mask == 0));
1058 RTL_W32(ERIDR, val);
1059 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1060
1061 for (i = 0; i < 100; i++) {
1062 if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
1063 break;
1064 udelay(100);
1065 }
1066}
1067
1068static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type)
1069{
1070 u32 value = ~0x00;
1071 unsigned int i;
1072
1073 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1074
1075 for (i = 0; i < 100; i++) {
1076 if (RTL_R32(ERIAR) & ERIAR_FLAG) {
1077 value = RTL_R32(ERIDR);
1078 break;
1079 }
1080 udelay(100);
1081 }
1082
1083 return value;
1084}
1085
1086static void
1087rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type)
1088{
1089 u32 val;
1090
1091 val = rtl_eri_read(ioaddr, addr, type);
1092 rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type);
1093}
1094
1095struct exgmac_reg {
1096 u16 addr;
1097 u16 mask;
1098 u32 val;
1099};
1100
1101static void rtl_write_exgmac_batch(void __iomem *ioaddr,
1102 const struct exgmac_reg *r, int len)
1103{
1104 while (len-- > 0) {
1105 rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1106 r++;
1107 }
1108}
1109
1110static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
1111{
1112 u8 value = 0xff;
1113 unsigned int i;
1114
1115 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1116
1117 for (i = 0; i < 300; i++) {
1118 if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
1119 value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
1120 break;
1121 }
1122 udelay(100);
1123 }
1124
1125 return value;
1126}
1127
1128static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
1129{
1130 RTL_W16(IntrMask, 0x0000);
1131
1132 RTL_W16(IntrStatus, 0xffff);
1133}
1134
1135static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1136{
1137 void __iomem *ioaddr = tp->mmio_addr;
1138
1139 return RTL_R32(TBICSR) & TBIReset;
1140}
1141
1142static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1143{
1144 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1145}
1146
1147static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1148{
1149 return RTL_R32(TBICSR) & TBILinkOk;
1150}
1151
1152static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1153{
1154 return RTL_R8(PHYstatus) & LinkStatus;
1155}
1156
1157static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1158{
1159 void __iomem *ioaddr = tp->mmio_addr;
1160
1161 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1162}
1163
1164static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1165{
1166 unsigned int val;
1167
1168 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1169 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1170}
1171
1172static void rtl_link_chg_patch(struct rtl8169_private *tp)
1173{
1174 void __iomem *ioaddr = tp->mmio_addr;
1175 struct net_device *dev = tp->dev;
1176
1177 if (!netif_running(dev))
1178 return;
1179
1180 if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
1181 if (RTL_R8(PHYstatus) & _1000bpsF) {
1182 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1183 0x00000011, ERIAR_EXGMAC);
1184 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1185 0x00000005, ERIAR_EXGMAC);
1186 } else if (RTL_R8(PHYstatus) & _100bps) {
1187 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1188 0x0000001f, ERIAR_EXGMAC);
1189 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1190 0x00000005, ERIAR_EXGMAC);
1191 } else {
1192 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1193 0x0000001f, ERIAR_EXGMAC);
1194 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1195 0x0000003f, ERIAR_EXGMAC);
1196 }
1197 /* Reset packet filter */
1198 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1199 ERIAR_EXGMAC);
1200 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1201 ERIAR_EXGMAC);
1202 }
1203}
1204
1205static void __rtl8169_check_link_status(struct net_device *dev,
1206 struct rtl8169_private *tp,
1207 void __iomem *ioaddr, bool pm)
1208{
1209 unsigned long flags;
1210
1211 spin_lock_irqsave(&tp->lock, flags);
1212 if (tp->link_ok(ioaddr)) {
1213 rtl_link_chg_patch(tp);
1214 /* This is to cancel a scheduled suspend if there's one. */
1215 if (pm)
1216 pm_request_resume(&tp->pci_dev->dev);
1217 netif_carrier_on(dev);
1218 if (net_ratelimit())
1219 netif_info(tp, ifup, dev, "link up\n");
1220 } else {
1221 netif_carrier_off(dev);
1222 netif_info(tp, ifdown, dev, "link down\n");
1223 if (pm)
1224 pm_schedule_suspend(&tp->pci_dev->dev, 100);
1225 }
1226 spin_unlock_irqrestore(&tp->lock, flags);
1227}
1228
1229static void rtl8169_check_link_status(struct net_device *dev,
1230 struct rtl8169_private *tp,
1231 void __iomem *ioaddr)
1232{
1233 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1234}
1235
1236#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1237
1238static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1239{
1240 void __iomem *ioaddr = tp->mmio_addr;
1241 u8 options;
1242 u32 wolopts = 0;
1243
1244 options = RTL_R8(Config1);
1245 if (!(options & PMEnable))
1246 return 0;
1247
1248 options = RTL_R8(Config3);
1249 if (options & LinkUp)
1250 wolopts |= WAKE_PHY;
1251 if (options & MagicPacket)
1252 wolopts |= WAKE_MAGIC;
1253
1254 options = RTL_R8(Config5);
1255 if (options & UWF)
1256 wolopts |= WAKE_UCAST;
1257 if (options & BWF)
1258 wolopts |= WAKE_BCAST;
1259 if (options & MWF)
1260 wolopts |= WAKE_MCAST;
1261
1262 return wolopts;
1263}
1264
1265static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1266{
1267 struct rtl8169_private *tp = netdev_priv(dev);
1268
1269 spin_lock_irq(&tp->lock);
1270
1271 wol->supported = WAKE_ANY;
1272 wol->wolopts = __rtl8169_get_wol(tp);
1273
1274 spin_unlock_irq(&tp->lock);
1275}
1276
1277static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1278{
1279 void __iomem *ioaddr = tp->mmio_addr;
1280 unsigned int i;
1281 static const struct {
1282 u32 opt;
1283 u16 reg;
1284 u8 mask;
1285 } cfg[] = {
1286 { WAKE_ANY, Config1, PMEnable },
1287 { WAKE_PHY, Config3, LinkUp },
1288 { WAKE_MAGIC, Config3, MagicPacket },
1289 { WAKE_UCAST, Config5, UWF },
1290 { WAKE_BCAST, Config5, BWF },
1291 { WAKE_MCAST, Config5, MWF },
1292 { WAKE_ANY, Config5, LanWake }
1293 };
1294
1295 RTL_W8(Cfg9346, Cfg9346_Unlock);
1296
1297 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1298 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1299 if (wolopts & cfg[i].opt)
1300 options |= cfg[i].mask;
1301 RTL_W8(cfg[i].reg, options);
1302 }
1303
1304 RTL_W8(Cfg9346, Cfg9346_Lock);
1305}
1306
1307static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1308{
1309 struct rtl8169_private *tp = netdev_priv(dev);
1310
1311 spin_lock_irq(&tp->lock);
1312
1313 if (wol->wolopts)
1314 tp->features |= RTL_FEATURE_WOL;
1315 else
1316 tp->features &= ~RTL_FEATURE_WOL;
1317 __rtl8169_set_wol(tp, wol->wolopts);
1318 spin_unlock_irq(&tp->lock);
1319
1320 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1321
1322 return 0;
1323}
1324
1325static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1326{
1327 return rtl_chip_infos[tp->mac_version].fw_name;
1328}
1329
1330static void rtl8169_get_drvinfo(struct net_device *dev,
1331 struct ethtool_drvinfo *info)
1332{
1333 struct rtl8169_private *tp = netdev_priv(dev);
1334 struct rtl_fw *rtl_fw = tp->rtl_fw;
1335
1336 strcpy(info->driver, MODULENAME);
1337 strcpy(info->version, RTL8169_VERSION);
1338 strcpy(info->bus_info, pci_name(tp->pci_dev));
1339 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1340 strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" :
1341 rtl_fw->version);
1342}
1343
1344static int rtl8169_get_regs_len(struct net_device *dev)
1345{
1346 return R8169_REGS_SIZE;
1347}
1348
1349static int rtl8169_set_speed_tbi(struct net_device *dev,
1350 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1351{
1352 struct rtl8169_private *tp = netdev_priv(dev);
1353 void __iomem *ioaddr = tp->mmio_addr;
1354 int ret = 0;
1355 u32 reg;
1356
1357 reg = RTL_R32(TBICSR);
1358 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1359 (duplex == DUPLEX_FULL)) {
1360 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1361 } else if (autoneg == AUTONEG_ENABLE)
1362 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1363 else {
1364 netif_warn(tp, link, dev,
1365 "incorrect speed setting refused in TBI mode\n");
1366 ret = -EOPNOTSUPP;
1367 }
1368
1369 return ret;
1370}
1371
1372static int rtl8169_set_speed_xmii(struct net_device *dev,
1373 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1374{
1375 struct rtl8169_private *tp = netdev_priv(dev);
1376 int giga_ctrl, bmcr;
1377 int rc = -EINVAL;
1378
1379 rtl_writephy(tp, 0x1f, 0x0000);
1380
1381 if (autoneg == AUTONEG_ENABLE) {
1382 int auto_nego;
1383
1384 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1385 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1386 ADVERTISE_100HALF | ADVERTISE_100FULL);
1387
1388 if (adv & ADVERTISED_10baseT_Half)
1389 auto_nego |= ADVERTISE_10HALF;
1390 if (adv & ADVERTISED_10baseT_Full)
1391 auto_nego |= ADVERTISE_10FULL;
1392 if (adv & ADVERTISED_100baseT_Half)
1393 auto_nego |= ADVERTISE_100HALF;
1394 if (adv & ADVERTISED_100baseT_Full)
1395 auto_nego |= ADVERTISE_100FULL;
1396
1397 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1398
1399 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1400 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1401
1402 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1403 if (tp->mii.supports_gmii) {
1404 if (adv & ADVERTISED_1000baseT_Half)
1405 giga_ctrl |= ADVERTISE_1000HALF;
1406 if (adv & ADVERTISED_1000baseT_Full)
1407 giga_ctrl |= ADVERTISE_1000FULL;
1408 } else if (adv & (ADVERTISED_1000baseT_Half |
1409 ADVERTISED_1000baseT_Full)) {
1410 netif_info(tp, link, dev,
1411 "PHY does not support 1000Mbps\n");
1412 goto out;
1413 }
1414
1415 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1416
1417 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1418 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1419 } else {
1420 giga_ctrl = 0;
1421
1422 if (speed == SPEED_10)
1423 bmcr = 0;
1424 else if (speed == SPEED_100)
1425 bmcr = BMCR_SPEED100;
1426 else
1427 goto out;
1428
1429 if (duplex == DUPLEX_FULL)
1430 bmcr |= BMCR_FULLDPLX;
1431 }
1432
1433 rtl_writephy(tp, MII_BMCR, bmcr);
1434
1435 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1436 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1437 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1438 rtl_writephy(tp, 0x17, 0x2138);
1439 rtl_writephy(tp, 0x0e, 0x0260);
1440 } else {
1441 rtl_writephy(tp, 0x17, 0x2108);
1442 rtl_writephy(tp, 0x0e, 0x0000);
1443 }
1444 }
1445
1446 rc = 0;
1447out:
1448 return rc;
1449}
1450
1451static int rtl8169_set_speed(struct net_device *dev,
1452 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1453{
1454 struct rtl8169_private *tp = netdev_priv(dev);
1455 int ret;
1456
1457 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1458 if (ret < 0)
1459 goto out;
1460
1461 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1462 (advertising & ADVERTISED_1000baseT_Full)) {
1463 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1464 }
1465out:
1466 return ret;
1467}
1468
1469static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1470{
1471 struct rtl8169_private *tp = netdev_priv(dev);
1472 unsigned long flags;
1473 int ret;
1474
1475 del_timer_sync(&tp->timer);
1476
1477 spin_lock_irqsave(&tp->lock, flags);
1478 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1479 cmd->duplex, cmd->advertising);
1480 spin_unlock_irqrestore(&tp->lock, flags);
1481
1482 return ret;
1483}
1484
1485static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
1486{
1487 if (dev->mtu > TD_MSS_MAX)
1488 features &= ~NETIF_F_ALL_TSO;
1489
1490 return features;
1491}
1492
1493static int rtl8169_set_features(struct net_device *dev, u32 features)
1494{
1495 struct rtl8169_private *tp = netdev_priv(dev);
1496 void __iomem *ioaddr = tp->mmio_addr;
1497 unsigned long flags;
1498
1499 spin_lock_irqsave(&tp->lock, flags);
1500
1501 if (features & NETIF_F_RXCSUM)
1502 tp->cp_cmd |= RxChkSum;
1503 else
1504 tp->cp_cmd &= ~RxChkSum;
1505
1506 if (dev->features & NETIF_F_HW_VLAN_RX)
1507 tp->cp_cmd |= RxVlan;
1508 else
1509 tp->cp_cmd &= ~RxVlan;
1510
1511 RTL_W16(CPlusCmd, tp->cp_cmd);
1512 RTL_R16(CPlusCmd);
1513
1514 spin_unlock_irqrestore(&tp->lock, flags);
1515
1516 return 0;
1517}
1518
1519static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1520 struct sk_buff *skb)
1521{
1522 return (vlan_tx_tag_present(skb)) ?
1523 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1524}
1525
1526static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1527{
1528 u32 opts2 = le32_to_cpu(desc->opts2);
1529
1530 if (opts2 & RxVlanTag)
1531 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1532
1533 desc->opts2 = 0;
1534}
1535
1536static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1537{
1538 struct rtl8169_private *tp = netdev_priv(dev);
1539 void __iomem *ioaddr = tp->mmio_addr;
1540 u32 status;
1541
1542 cmd->supported =
1543 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1544 cmd->port = PORT_FIBRE;
1545 cmd->transceiver = XCVR_INTERNAL;
1546
1547 status = RTL_R32(TBICSR);
1548 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1549 cmd->autoneg = !!(status & TBINwEnable);
1550
1551 ethtool_cmd_speed_set(cmd, SPEED_1000);
1552 cmd->duplex = DUPLEX_FULL; /* Always set */
1553
1554 return 0;
1555}
1556
1557static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1558{
1559 struct rtl8169_private *tp = netdev_priv(dev);
1560
1561 return mii_ethtool_gset(&tp->mii, cmd);
1562}
1563
1564static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1565{
1566 struct rtl8169_private *tp = netdev_priv(dev);
1567 unsigned long flags;
1568 int rc;
1569
1570 spin_lock_irqsave(&tp->lock, flags);
1571
1572 rc = tp->get_settings(dev, cmd);
1573
1574 spin_unlock_irqrestore(&tp->lock, flags);
1575 return rc;
1576}
1577
1578static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1579 void *p)
1580{
1581 struct rtl8169_private *tp = netdev_priv(dev);
1582 unsigned long flags;
1583
1584 if (regs->len > R8169_REGS_SIZE)
1585 regs->len = R8169_REGS_SIZE;
1586
1587 spin_lock_irqsave(&tp->lock, flags);
1588 memcpy_fromio(p, tp->mmio_addr, regs->len);
1589 spin_unlock_irqrestore(&tp->lock, flags);
1590}
1591
1592static u32 rtl8169_get_msglevel(struct net_device *dev)
1593{
1594 struct rtl8169_private *tp = netdev_priv(dev);
1595
1596 return tp->msg_enable;
1597}
1598
1599static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1600{
1601 struct rtl8169_private *tp = netdev_priv(dev);
1602
1603 tp->msg_enable = value;
1604}
1605
1606static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1607 "tx_packets",
1608 "rx_packets",
1609 "tx_errors",
1610 "rx_errors",
1611 "rx_missed",
1612 "align_errors",
1613 "tx_single_collisions",
1614 "tx_multi_collisions",
1615 "unicast",
1616 "broadcast",
1617 "multicast",
1618 "tx_aborted",
1619 "tx_underrun",
1620};
1621
1622static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1623{
1624 switch (sset) {
1625 case ETH_SS_STATS:
1626 return ARRAY_SIZE(rtl8169_gstrings);
1627 default:
1628 return -EOPNOTSUPP;
1629 }
1630}
1631
1632static void rtl8169_update_counters(struct net_device *dev)
1633{
1634 struct rtl8169_private *tp = netdev_priv(dev);
1635 void __iomem *ioaddr = tp->mmio_addr;
1636 struct device *d = &tp->pci_dev->dev;
1637 struct rtl8169_counters *counters;
1638 dma_addr_t paddr;
1639 u32 cmd;
1640 int wait = 1000;
1641
1642 /*
1643 * Some chips are unable to dump tally counters when the receiver
1644 * is disabled.
1645 */
1646 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1647 return;
1648
1649 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1650 if (!counters)
1651 return;
1652
1653 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1654 cmd = (u64)paddr & DMA_BIT_MASK(32);
1655 RTL_W32(CounterAddrLow, cmd);
1656 RTL_W32(CounterAddrLow, cmd | CounterDump);
1657
1658 while (wait--) {
1659 if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
1660 memcpy(&tp->counters, counters, sizeof(*counters));
1661 break;
1662 }
1663 udelay(10);
1664 }
1665
1666 RTL_W32(CounterAddrLow, 0);
1667 RTL_W32(CounterAddrHigh, 0);
1668
1669 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1670}
1671
1672static void rtl8169_get_ethtool_stats(struct net_device *dev,
1673 struct ethtool_stats *stats, u64 *data)
1674{
1675 struct rtl8169_private *tp = netdev_priv(dev);
1676
1677 ASSERT_RTNL();
1678
1679 rtl8169_update_counters(dev);
1680
1681 data[0] = le64_to_cpu(tp->counters.tx_packets);
1682 data[1] = le64_to_cpu(tp->counters.rx_packets);
1683 data[2] = le64_to_cpu(tp->counters.tx_errors);
1684 data[3] = le32_to_cpu(tp->counters.rx_errors);
1685 data[4] = le16_to_cpu(tp->counters.rx_missed);
1686 data[5] = le16_to_cpu(tp->counters.align_errors);
1687 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1688 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1689 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1690 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1691 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1692 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1693 data[12] = le16_to_cpu(tp->counters.tx_underun);
1694}
1695
1696static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1697{
1698 switch(stringset) {
1699 case ETH_SS_STATS:
1700 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1701 break;
1702 }
1703}
1704
1705static const struct ethtool_ops rtl8169_ethtool_ops = {
1706 .get_drvinfo = rtl8169_get_drvinfo,
1707 .get_regs_len = rtl8169_get_regs_len,
1708 .get_link = ethtool_op_get_link,
1709 .get_settings = rtl8169_get_settings,
1710 .set_settings = rtl8169_set_settings,
1711 .get_msglevel = rtl8169_get_msglevel,
1712 .set_msglevel = rtl8169_set_msglevel,
1713 .get_regs = rtl8169_get_regs,
1714 .get_wol = rtl8169_get_wol,
1715 .set_wol = rtl8169_set_wol,
1716 .get_strings = rtl8169_get_strings,
1717 .get_sset_count = rtl8169_get_sset_count,
1718 .get_ethtool_stats = rtl8169_get_ethtool_stats,
1719};
1720
1721static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1722 struct net_device *dev, u8 default_version)
1723{
1724 void __iomem *ioaddr = tp->mmio_addr;
1725 /*
1726 * The driver currently handles the 8168Bf and the 8168Be identically
1727 * but they can be identified more specifically through the test below
1728 * if needed:
1729 *
1730 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
1731 *
1732 * Same thing for the 8101Eb and the 8101Ec:
1733 *
1734 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
1735 */
1736 static const struct rtl_mac_info {
1737 u32 mask;
1738 u32 val;
1739 int mac_version;
1740 } mac_info[] = {
1741 /* 8168E family. */
1742 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
1743 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
1744 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
1745 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
1746
1747 /* 8168D family. */
1748 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
1749 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
1750 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
1751
1752 /* 8168DP family. */
1753 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
1754 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
1755 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
1756
1757 /* 8168C family. */
1758 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
1759 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
1760 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
1761 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
1762 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
1763 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
1764 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
1765 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
1766 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
1767
1768 /* 8168B family. */
1769 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
1770 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
1771 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
1772 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1773
1774 /* 8101 family. */
1775 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
1776 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
1777 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
1778 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
1779 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
1780 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
1781 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
1782 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
1783 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
1784 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
1785 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
1786 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
1787 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
1788 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
1789 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
1790 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
1791 /* FIXME: where did these entries come from ? -- FR */
1792 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
1793 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
1794
1795 /* 8110 family. */
1796 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
1797 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
1798 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
1799 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
1800 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
1801 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
1802
1803 /* Catch-all */
1804 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
1805 };
1806 const struct rtl_mac_info *p = mac_info;
1807 u32 reg;
1808
1809 reg = RTL_R32(TxConfig);
1810 while ((reg & p->mask) != p->val)
1811 p++;
1812 tp->mac_version = p->mac_version;
1813
1814 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
1815 netif_notice(tp, probe, dev,
1816 "unknown MAC, using family default\n");
1817 tp->mac_version = default_version;
1818 }
1819}
1820
1821static void rtl8169_print_mac_version(struct rtl8169_private *tp)
1822{
1823 dprintk("mac_version = 0x%02x\n", tp->mac_version);
1824}
1825
1826struct phy_reg {
1827 u16 reg;
1828 u16 val;
1829};
1830
1831static void rtl_writephy_batch(struct rtl8169_private *tp,
1832 const struct phy_reg *regs, int len)
1833{
1834 while (len-- > 0) {
1835 rtl_writephy(tp, regs->reg, regs->val);
1836 regs++;
1837 }
1838}
1839
1840#define PHY_READ 0x00000000
1841#define PHY_DATA_OR 0x10000000
1842#define PHY_DATA_AND 0x20000000
1843#define PHY_BJMPN 0x30000000
1844#define PHY_READ_EFUSE 0x40000000
1845#define PHY_READ_MAC_BYTE 0x50000000
1846#define PHY_WRITE_MAC_BYTE 0x60000000
1847#define PHY_CLEAR_READCOUNT 0x70000000
1848#define PHY_WRITE 0x80000000
1849#define PHY_READCOUNT_EQ_SKIP 0x90000000
1850#define PHY_COMP_EQ_SKIPN 0xa0000000
1851#define PHY_COMP_NEQ_SKIPN 0xb0000000
1852#define PHY_WRITE_PREVIOUS 0xc0000000
1853#define PHY_SKIPN 0xd0000000
1854#define PHY_DELAY_MS 0xe0000000
1855#define PHY_WRITE_ERI_WORD 0xf0000000
1856
1857struct fw_info {
1858 u32 magic;
1859 char version[RTL_VER_SIZE];
1860 __le32 fw_start;
1861 __le32 fw_len;
1862 u8 chksum;
1863} __packed;
1864
1865#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
1866
1867static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
1868{
1869 const struct firmware *fw = rtl_fw->fw;
1870 struct fw_info *fw_info = (struct fw_info *)fw->data;
1871 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
1872 char *version = rtl_fw->version;
1873 bool rc = false;
1874
1875 if (fw->size < FW_OPCODE_SIZE)
1876 goto out;
1877
1878 if (!fw_info->magic) {
1879 size_t i, size, start;
1880 u8 checksum = 0;
1881
1882 if (fw->size < sizeof(*fw_info))
1883 goto out;
1884
1885 for (i = 0; i < fw->size; i++)
1886 checksum += fw->data[i];
1887 if (checksum != 0)
1888 goto out;
1889
1890 start = le32_to_cpu(fw_info->fw_start);
1891 if (start > fw->size)
1892 goto out;
1893
1894 size = le32_to_cpu(fw_info->fw_len);
1895 if (size > (fw->size - start) / FW_OPCODE_SIZE)
1896 goto out;
1897
1898 memcpy(version, fw_info->version, RTL_VER_SIZE);
1899
1900 pa->code = (__le32 *)(fw->data + start);
1901 pa->size = size;
1902 } else {
1903 if (fw->size % FW_OPCODE_SIZE)
1904 goto out;
1905
1906 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
1907
1908 pa->code = (__le32 *)fw->data;
1909 pa->size = fw->size / FW_OPCODE_SIZE;
1910 }
1911 version[RTL_VER_SIZE - 1] = 0;
1912
1913 rc = true;
1914out:
1915 return rc;
1916}
1917
1918static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
1919 struct rtl_fw_phy_action *pa)
1920{
1921 bool rc = false;
1922 size_t index;
1923
1924 for (index = 0; index < pa->size; index++) {
1925 u32 action = le32_to_cpu(pa->code[index]);
1926 u32 regno = (action & 0x0fff0000) >> 16;
1927
1928 switch(action & 0xf0000000) {
1929 case PHY_READ:
1930 case PHY_DATA_OR:
1931 case PHY_DATA_AND:
1932 case PHY_READ_EFUSE:
1933 case PHY_CLEAR_READCOUNT:
1934 case PHY_WRITE:
1935 case PHY_WRITE_PREVIOUS:
1936 case PHY_DELAY_MS:
1937 break;
1938
1939 case PHY_BJMPN:
1940 if (regno > index) {
1941 netif_err(tp, ifup, tp->dev,
1942 "Out of range of firmware\n");
1943 goto out;
1944 }
1945 break;
1946 case PHY_READCOUNT_EQ_SKIP:
1947 if (index + 2 >= pa->size) {
1948 netif_err(tp, ifup, tp->dev,
1949 "Out of range of firmware\n");
1950 goto out;
1951 }
1952 break;
1953 case PHY_COMP_EQ_SKIPN:
1954 case PHY_COMP_NEQ_SKIPN:
1955 case PHY_SKIPN:
1956 if (index + 1 + regno >= pa->size) {
1957 netif_err(tp, ifup, tp->dev,
1958 "Out of range of firmware\n");
1959 goto out;
1960 }
1961 break;
1962
1963 case PHY_READ_MAC_BYTE:
1964 case PHY_WRITE_MAC_BYTE:
1965 case PHY_WRITE_ERI_WORD:
1966 default:
1967 netif_err(tp, ifup, tp->dev,
1968 "Invalid action 0x%08x\n", action);
1969 goto out;
1970 }
1971 }
1972 rc = true;
1973out:
1974 return rc;
1975}
1976
1977static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
1978{
1979 struct net_device *dev = tp->dev;
1980 int rc = -EINVAL;
1981
1982 if (!rtl_fw_format_ok(tp, rtl_fw)) {
1983 netif_err(tp, ifup, dev, "invalid firwmare\n");
1984 goto out;
1985 }
1986
1987 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
1988 rc = 0;
1989out:
1990 return rc;
1991}
1992
1993static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
1994{
1995 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
1996 u32 predata, count;
1997 size_t index;
1998
1999 predata = count = 0;
2000
2001 for (index = 0; index < pa->size; ) {
2002 u32 action = le32_to_cpu(pa->code[index]);
2003 u32 data = action & 0x0000ffff;
2004 u32 regno = (action & 0x0fff0000) >> 16;
2005
2006 if (!action)
2007 break;
2008
2009 switch(action & 0xf0000000) {
2010 case PHY_READ:
2011 predata = rtl_readphy(tp, regno);
2012 count++;
2013 index++;
2014 break;
2015 case PHY_DATA_OR:
2016 predata |= data;
2017 index++;
2018 break;
2019 case PHY_DATA_AND:
2020 predata &= data;
2021 index++;
2022 break;
2023 case PHY_BJMPN:
2024 index -= regno;
2025 break;
2026 case PHY_READ_EFUSE:
2027 predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
2028 index++;
2029 break;
2030 case PHY_CLEAR_READCOUNT:
2031 count = 0;
2032 index++;
2033 break;
2034 case PHY_WRITE:
2035 rtl_writephy(tp, regno, data);
2036 index++;
2037 break;
2038 case PHY_READCOUNT_EQ_SKIP:
2039 index += (count == data) ? 2 : 1;
2040 break;
2041 case PHY_COMP_EQ_SKIPN:
2042 if (predata == data)
2043 index += regno;
2044 index++;
2045 break;
2046 case PHY_COMP_NEQ_SKIPN:
2047 if (predata != data)
2048 index += regno;
2049 index++;
2050 break;
2051 case PHY_WRITE_PREVIOUS:
2052 rtl_writephy(tp, regno, predata);
2053 index++;
2054 break;
2055 case PHY_SKIPN:
2056 index += regno + 1;
2057 break;
2058 case PHY_DELAY_MS:
2059 mdelay(data);
2060 index++;
2061 break;
2062
2063 case PHY_READ_MAC_BYTE:
2064 case PHY_WRITE_MAC_BYTE:
2065 case PHY_WRITE_ERI_WORD:
2066 default:
2067 BUG();
2068 }
2069 }
2070}
2071
2072static void rtl_release_firmware(struct rtl8169_private *tp)
2073{
2074 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2075 release_firmware(tp->rtl_fw->fw);
2076 kfree(tp->rtl_fw);
2077 }
2078 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2079}
2080
2081static void rtl_apply_firmware(struct rtl8169_private *tp)
2082{
2083 struct rtl_fw *rtl_fw = tp->rtl_fw;
2084
2085 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2086 if (!IS_ERR_OR_NULL(rtl_fw))
2087 rtl_phy_write_fw(tp, rtl_fw);
2088}
2089
2090static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2091{
2092 if (rtl_readphy(tp, reg) != val)
2093 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2094 else
2095 rtl_apply_firmware(tp);
2096}
2097
2098static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2099{
2100 static const struct phy_reg phy_reg_init[] = {
2101 { 0x1f, 0x0001 },
2102 { 0x06, 0x006e },
2103 { 0x08, 0x0708 },
2104 { 0x15, 0x4000 },
2105 { 0x18, 0x65c7 },
2106
2107 { 0x1f, 0x0001 },
2108 { 0x03, 0x00a1 },
2109 { 0x02, 0x0008 },
2110 { 0x01, 0x0120 },
2111 { 0x00, 0x1000 },
2112 { 0x04, 0x0800 },
2113 { 0x04, 0x0000 },
2114
2115 { 0x03, 0xff41 },
2116 { 0x02, 0xdf60 },
2117 { 0x01, 0x0140 },
2118 { 0x00, 0x0077 },
2119 { 0x04, 0x7800 },
2120 { 0x04, 0x7000 },
2121
2122 { 0x03, 0x802f },
2123 { 0x02, 0x4f02 },
2124 { 0x01, 0x0409 },
2125 { 0x00, 0xf0f9 },
2126 { 0x04, 0x9800 },
2127 { 0x04, 0x9000 },
2128
2129 { 0x03, 0xdf01 },
2130 { 0x02, 0xdf20 },
2131 { 0x01, 0xff95 },
2132 { 0x00, 0xba00 },
2133 { 0x04, 0xa800 },
2134 { 0x04, 0xa000 },
2135
2136 { 0x03, 0xff41 },
2137 { 0x02, 0xdf20 },
2138 { 0x01, 0x0140 },
2139 { 0x00, 0x00bb },
2140 { 0x04, 0xb800 },
2141 { 0x04, 0xb000 },
2142
2143 { 0x03, 0xdf41 },
2144 { 0x02, 0xdc60 },
2145 { 0x01, 0x6340 },
2146 { 0x00, 0x007d },
2147 { 0x04, 0xd800 },
2148 { 0x04, 0xd000 },
2149
2150 { 0x03, 0xdf01 },
2151 { 0x02, 0xdf20 },
2152 { 0x01, 0x100a },
2153 { 0x00, 0xa0ff },
2154 { 0x04, 0xf800 },
2155 { 0x04, 0xf000 },
2156
2157 { 0x1f, 0x0000 },
2158 { 0x0b, 0x0000 },
2159 { 0x00, 0x9200 }
2160 };
2161
2162 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2163}
2164
2165static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2166{
2167 static const struct phy_reg phy_reg_init[] = {
2168 { 0x1f, 0x0002 },
2169 { 0x01, 0x90d0 },
2170 { 0x1f, 0x0000 }
2171 };
2172
2173 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2174}
2175
2176static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2177{
2178 struct pci_dev *pdev = tp->pci_dev;
2179
2180 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2181 (pdev->subsystem_device != 0xe000))
2182 return;
2183
2184 rtl_writephy(tp, 0x1f, 0x0001);
2185 rtl_writephy(tp, 0x10, 0xf01b);
2186 rtl_writephy(tp, 0x1f, 0x0000);
2187}
2188
2189static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2190{
2191 static const struct phy_reg phy_reg_init[] = {
2192 { 0x1f, 0x0001 },
2193 { 0x04, 0x0000 },
2194 { 0x03, 0x00a1 },
2195 { 0x02, 0x0008 },
2196 { 0x01, 0x0120 },
2197 { 0x00, 0x1000 },
2198 { 0x04, 0x0800 },
2199 { 0x04, 0x9000 },
2200 { 0x03, 0x802f },
2201 { 0x02, 0x4f02 },
2202 { 0x01, 0x0409 },
2203 { 0x00, 0xf099 },
2204 { 0x04, 0x9800 },
2205 { 0x04, 0xa000 },
2206 { 0x03, 0xdf01 },
2207 { 0x02, 0xdf20 },
2208 { 0x01, 0xff95 },
2209 { 0x00, 0xba00 },
2210 { 0x04, 0xa800 },
2211 { 0x04, 0xf000 },
2212 { 0x03, 0xdf01 },
2213 { 0x02, 0xdf20 },
2214 { 0x01, 0x101a },
2215 { 0x00, 0xa0ff },
2216 { 0x04, 0xf800 },
2217 { 0x04, 0x0000 },
2218 { 0x1f, 0x0000 },
2219
2220 { 0x1f, 0x0001 },
2221 { 0x10, 0xf41b },
2222 { 0x14, 0xfb54 },
2223 { 0x18, 0xf5c7 },
2224 { 0x1f, 0x0000 },
2225
2226 { 0x1f, 0x0001 },
2227 { 0x17, 0x0cc0 },
2228 { 0x1f, 0x0000 }
2229 };
2230
2231 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2232
2233 rtl8169scd_hw_phy_config_quirk(tp);
2234}
2235
2236static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2237{
2238 static const struct phy_reg phy_reg_init[] = {
2239 { 0x1f, 0x0001 },
2240 { 0x04, 0x0000 },
2241 { 0x03, 0x00a1 },
2242 { 0x02, 0x0008 },
2243 { 0x01, 0x0120 },
2244 { 0x00, 0x1000 },
2245 { 0x04, 0x0800 },
2246 { 0x04, 0x9000 },
2247 { 0x03, 0x802f },
2248 { 0x02, 0x4f02 },
2249 { 0x01, 0x0409 },
2250 { 0x00, 0xf099 },
2251 { 0x04, 0x9800 },
2252 { 0x04, 0xa000 },
2253 { 0x03, 0xdf01 },
2254 { 0x02, 0xdf20 },
2255 { 0x01, 0xff95 },
2256 { 0x00, 0xba00 },
2257 { 0x04, 0xa800 },
2258 { 0x04, 0xf000 },
2259 { 0x03, 0xdf01 },
2260 { 0x02, 0xdf20 },
2261 { 0x01, 0x101a },
2262 { 0x00, 0xa0ff },
2263 { 0x04, 0xf800 },
2264 { 0x04, 0x0000 },
2265 { 0x1f, 0x0000 },
2266
2267 { 0x1f, 0x0001 },
2268 { 0x0b, 0x8480 },
2269 { 0x1f, 0x0000 },
2270
2271 { 0x1f, 0x0001 },
2272 { 0x18, 0x67c7 },
2273 { 0x04, 0x2000 },
2274 { 0x03, 0x002f },
2275 { 0x02, 0x4360 },
2276 { 0x01, 0x0109 },
2277 { 0x00, 0x3022 },
2278 { 0x04, 0x2800 },
2279 { 0x1f, 0x0000 },
2280
2281 { 0x1f, 0x0001 },
2282 { 0x17, 0x0cc0 },
2283 { 0x1f, 0x0000 }
2284 };
2285
2286 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2287}
2288
2289static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2290{
2291 static const struct phy_reg phy_reg_init[] = {
2292 { 0x10, 0xf41b },
2293 { 0x1f, 0x0000 }
2294 };
2295
2296 rtl_writephy(tp, 0x1f, 0x0001);
2297 rtl_patchphy(tp, 0x16, 1 << 0);
2298
2299 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2300}
2301
2302static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2303{
2304 static const struct phy_reg phy_reg_init[] = {
2305 { 0x1f, 0x0001 },
2306 { 0x10, 0xf41b },
2307 { 0x1f, 0x0000 }
2308 };
2309
2310 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2311}
2312
2313static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2314{
2315 static const struct phy_reg phy_reg_init[] = {
2316 { 0x1f, 0x0000 },
2317 { 0x1d, 0x0f00 },
2318 { 0x1f, 0x0002 },
2319 { 0x0c, 0x1ec8 },
2320 { 0x1f, 0x0000 }
2321 };
2322
2323 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2324}
2325
2326static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2327{
2328 static const struct phy_reg phy_reg_init[] = {
2329 { 0x1f, 0x0001 },
2330 { 0x1d, 0x3d98 },
2331 { 0x1f, 0x0000 }
2332 };
2333
2334 rtl_writephy(tp, 0x1f, 0x0000);
2335 rtl_patchphy(tp, 0x14, 1 << 5);
2336 rtl_patchphy(tp, 0x0d, 1 << 5);
2337
2338 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2339}
2340
2341static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2342{
2343 static const struct phy_reg phy_reg_init[] = {
2344 { 0x1f, 0x0001 },
2345 { 0x12, 0x2300 },
2346 { 0x1f, 0x0002 },
2347 { 0x00, 0x88d4 },
2348 { 0x01, 0x82b1 },
2349 { 0x03, 0x7002 },
2350 { 0x08, 0x9e30 },
2351 { 0x09, 0x01f0 },
2352 { 0x0a, 0x5500 },
2353 { 0x0c, 0x00c8 },
2354 { 0x1f, 0x0003 },
2355 { 0x12, 0xc096 },
2356 { 0x16, 0x000a },
2357 { 0x1f, 0x0000 },
2358 { 0x1f, 0x0000 },
2359 { 0x09, 0x2000 },
2360 { 0x09, 0x0000 }
2361 };
2362
2363 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2364
2365 rtl_patchphy(tp, 0x14, 1 << 5);
2366 rtl_patchphy(tp, 0x0d, 1 << 5);
2367 rtl_writephy(tp, 0x1f, 0x0000);
2368}
2369
2370static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2371{
2372 static const struct phy_reg phy_reg_init[] = {
2373 { 0x1f, 0x0001 },
2374 { 0x12, 0x2300 },
2375 { 0x03, 0x802f },
2376 { 0x02, 0x4f02 },
2377 { 0x01, 0x0409 },
2378 { 0x00, 0xf099 },
2379 { 0x04, 0x9800 },
2380 { 0x04, 0x9000 },
2381 { 0x1d, 0x3d98 },
2382 { 0x1f, 0x0002 },
2383 { 0x0c, 0x7eb8 },
2384 { 0x06, 0x0761 },
2385 { 0x1f, 0x0003 },
2386 { 0x16, 0x0f0a },
2387 { 0x1f, 0x0000 }
2388 };
2389
2390 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2391
2392 rtl_patchphy(tp, 0x16, 1 << 0);
2393 rtl_patchphy(tp, 0x14, 1 << 5);
2394 rtl_patchphy(tp, 0x0d, 1 << 5);
2395 rtl_writephy(tp, 0x1f, 0x0000);
2396}
2397
2398static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2399{
2400 static const struct phy_reg phy_reg_init[] = {
2401 { 0x1f, 0x0001 },
2402 { 0x12, 0x2300 },
2403 { 0x1d, 0x3d98 },
2404 { 0x1f, 0x0002 },
2405 { 0x0c, 0x7eb8 },
2406 { 0x06, 0x5461 },
2407 { 0x1f, 0x0003 },
2408 { 0x16, 0x0f0a },
2409 { 0x1f, 0x0000 }
2410 };
2411
2412 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2413
2414 rtl_patchphy(tp, 0x16, 1 << 0);
2415 rtl_patchphy(tp, 0x14, 1 << 5);
2416 rtl_patchphy(tp, 0x0d, 1 << 5);
2417 rtl_writephy(tp, 0x1f, 0x0000);
2418}
2419
2420static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2421{
2422 rtl8168c_3_hw_phy_config(tp);
2423}
2424
2425static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2426{
2427 static const struct phy_reg phy_reg_init_0[] = {
2428 /* Channel Estimation */
2429 { 0x1f, 0x0001 },
2430 { 0x06, 0x4064 },
2431 { 0x07, 0x2863 },
2432 { 0x08, 0x059c },
2433 { 0x09, 0x26b4 },
2434 { 0x0a, 0x6a19 },
2435 { 0x0b, 0xdcc8 },
2436 { 0x10, 0xf06d },
2437 { 0x14, 0x7f68 },
2438 { 0x18, 0x7fd9 },
2439 { 0x1c, 0xf0ff },
2440 { 0x1d, 0x3d9c },
2441 { 0x1f, 0x0003 },
2442 { 0x12, 0xf49f },
2443 { 0x13, 0x070b },
2444 { 0x1a, 0x05ad },
2445 { 0x14, 0x94c0 },
2446
2447 /*
2448 * Tx Error Issue
2449 * Enhance line driver power
2450 */
2451 { 0x1f, 0x0002 },
2452 { 0x06, 0x5561 },
2453 { 0x1f, 0x0005 },
2454 { 0x05, 0x8332 },
2455 { 0x06, 0x5561 },
2456
2457 /*
2458 * Can not link to 1Gbps with bad cable
2459 * Decrease SNR threshold form 21.07dB to 19.04dB
2460 */
2461 { 0x1f, 0x0001 },
2462 { 0x17, 0x0cc0 },
2463
2464 { 0x1f, 0x0000 },
2465 { 0x0d, 0xf880 }
2466 };
2467 void __iomem *ioaddr = tp->mmio_addr;
2468
2469 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2470
2471 /*
2472 * Rx Error Issue
2473 * Fine Tune Switching regulator parameter
2474 */
2475 rtl_writephy(tp, 0x1f, 0x0002);
2476 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2477 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2478
2479 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
2480 static const struct phy_reg phy_reg_init[] = {
2481 { 0x1f, 0x0002 },
2482 { 0x05, 0x669a },
2483 { 0x1f, 0x0005 },
2484 { 0x05, 0x8330 },
2485 { 0x06, 0x669a },
2486 { 0x1f, 0x0002 }
2487 };
2488 int val;
2489
2490 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2491
2492 val = rtl_readphy(tp, 0x0d);
2493
2494 if ((val & 0x00ff) != 0x006c) {
2495 static const u32 set[] = {
2496 0x0065, 0x0066, 0x0067, 0x0068,
2497 0x0069, 0x006a, 0x006b, 0x006c
2498 };
2499 int i;
2500
2501 rtl_writephy(tp, 0x1f, 0x0002);
2502
2503 val &= 0xff00;
2504 for (i = 0; i < ARRAY_SIZE(set); i++)
2505 rtl_writephy(tp, 0x0d, val | set[i]);
2506 }
2507 } else {
2508 static const struct phy_reg phy_reg_init[] = {
2509 { 0x1f, 0x0002 },
2510 { 0x05, 0x6662 },
2511 { 0x1f, 0x0005 },
2512 { 0x05, 0x8330 },
2513 { 0x06, 0x6662 }
2514 };
2515
2516 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2517 }
2518
2519 /* RSET couple improve */
2520 rtl_writephy(tp, 0x1f, 0x0002);
2521 rtl_patchphy(tp, 0x0d, 0x0300);
2522 rtl_patchphy(tp, 0x0f, 0x0010);
2523
2524 /* Fine tune PLL performance */
2525 rtl_writephy(tp, 0x1f, 0x0002);
2526 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2527 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2528
2529 rtl_writephy(tp, 0x1f, 0x0005);
2530 rtl_writephy(tp, 0x05, 0x001b);
2531
2532 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2533
2534 rtl_writephy(tp, 0x1f, 0x0000);
2535}
2536
2537static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2538{
2539 static const struct phy_reg phy_reg_init_0[] = {
2540 /* Channel Estimation */
2541 { 0x1f, 0x0001 },
2542 { 0x06, 0x4064 },
2543 { 0x07, 0x2863 },
2544 { 0x08, 0x059c },
2545 { 0x09, 0x26b4 },
2546 { 0x0a, 0x6a19 },
2547 { 0x0b, 0xdcc8 },
2548 { 0x10, 0xf06d },
2549 { 0x14, 0x7f68 },
2550 { 0x18, 0x7fd9 },
2551 { 0x1c, 0xf0ff },
2552 { 0x1d, 0x3d9c },
2553 { 0x1f, 0x0003 },
2554 { 0x12, 0xf49f },
2555 { 0x13, 0x070b },
2556 { 0x1a, 0x05ad },
2557 { 0x14, 0x94c0 },
2558
2559 /*
2560 * Tx Error Issue
2561 * Enhance line driver power
2562 */
2563 { 0x1f, 0x0002 },
2564 { 0x06, 0x5561 },
2565 { 0x1f, 0x0005 },
2566 { 0x05, 0x8332 },
2567 { 0x06, 0x5561 },
2568
2569 /*
2570 * Can not link to 1Gbps with bad cable
2571 * Decrease SNR threshold form 21.07dB to 19.04dB
2572 */
2573 { 0x1f, 0x0001 },
2574 { 0x17, 0x0cc0 },
2575
2576 { 0x1f, 0x0000 },
2577 { 0x0d, 0xf880 }
2578 };
2579 void __iomem *ioaddr = tp->mmio_addr;
2580
2581 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2582
2583 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
2584 static const struct phy_reg phy_reg_init[] = {
2585 { 0x1f, 0x0002 },
2586 { 0x05, 0x669a },
2587 { 0x1f, 0x0005 },
2588 { 0x05, 0x8330 },
2589 { 0x06, 0x669a },
2590
2591 { 0x1f, 0x0002 }
2592 };
2593 int val;
2594
2595 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2596
2597 val = rtl_readphy(tp, 0x0d);
2598 if ((val & 0x00ff) != 0x006c) {
2599 static const u32 set[] = {
2600 0x0065, 0x0066, 0x0067, 0x0068,
2601 0x0069, 0x006a, 0x006b, 0x006c
2602 };
2603 int i;
2604
2605 rtl_writephy(tp, 0x1f, 0x0002);
2606
2607 val &= 0xff00;
2608 for (i = 0; i < ARRAY_SIZE(set); i++)
2609 rtl_writephy(tp, 0x0d, val | set[i]);
2610 }
2611 } else {
2612 static const struct phy_reg phy_reg_init[] = {
2613 { 0x1f, 0x0002 },
2614 { 0x05, 0x2642 },
2615 { 0x1f, 0x0005 },
2616 { 0x05, 0x8330 },
2617 { 0x06, 0x2642 }
2618 };
2619
2620 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2621 }
2622
2623 /* Fine tune PLL performance */
2624 rtl_writephy(tp, 0x1f, 0x0002);
2625 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2626 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2627
2628 /* Switching regulator Slew rate */
2629 rtl_writephy(tp, 0x1f, 0x0002);
2630 rtl_patchphy(tp, 0x0f, 0x0017);
2631
2632 rtl_writephy(tp, 0x1f, 0x0005);
2633 rtl_writephy(tp, 0x05, 0x001b);
2634
2635 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2636
2637 rtl_writephy(tp, 0x1f, 0x0000);
2638}
2639
2640static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2641{
2642 static const struct phy_reg phy_reg_init[] = {
2643 { 0x1f, 0x0002 },
2644 { 0x10, 0x0008 },
2645 { 0x0d, 0x006c },
2646
2647 { 0x1f, 0x0000 },
2648 { 0x0d, 0xf880 },
2649
2650 { 0x1f, 0x0001 },
2651 { 0x17, 0x0cc0 },
2652
2653 { 0x1f, 0x0001 },
2654 { 0x0b, 0xa4d8 },
2655 { 0x09, 0x281c },
2656 { 0x07, 0x2883 },
2657 { 0x0a, 0x6b35 },
2658 { 0x1d, 0x3da4 },
2659 { 0x1c, 0xeffd },
2660 { 0x14, 0x7f52 },
2661 { 0x18, 0x7fc6 },
2662 { 0x08, 0x0601 },
2663 { 0x06, 0x4063 },
2664 { 0x10, 0xf074 },
2665 { 0x1f, 0x0003 },
2666 { 0x13, 0x0789 },
2667 { 0x12, 0xf4bd },
2668 { 0x1a, 0x04fd },
2669 { 0x14, 0x84b0 },
2670 { 0x1f, 0x0000 },
2671 { 0x00, 0x9200 },
2672
2673 { 0x1f, 0x0005 },
2674 { 0x01, 0x0340 },
2675 { 0x1f, 0x0001 },
2676 { 0x04, 0x4000 },
2677 { 0x03, 0x1d21 },
2678 { 0x02, 0x0c32 },
2679 { 0x01, 0x0200 },
2680 { 0x00, 0x5554 },
2681 { 0x04, 0x4800 },
2682 { 0x04, 0x4000 },
2683 { 0x04, 0xf000 },
2684 { 0x03, 0xdf01 },
2685 { 0x02, 0xdf20 },
2686 { 0x01, 0x101a },
2687 { 0x00, 0xa0ff },
2688 { 0x04, 0xf800 },
2689 { 0x04, 0xf000 },
2690 { 0x1f, 0x0000 },
2691
2692 { 0x1f, 0x0007 },
2693 { 0x1e, 0x0023 },
2694 { 0x16, 0x0000 },
2695 { 0x1f, 0x0000 }
2696 };
2697
2698 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2699}
2700
2701static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
2702{
2703 static const struct phy_reg phy_reg_init[] = {
2704 { 0x1f, 0x0001 },
2705 { 0x17, 0x0cc0 },
2706
2707 { 0x1f, 0x0007 },
2708 { 0x1e, 0x002d },
2709 { 0x18, 0x0040 },
2710 { 0x1f, 0x0000 }
2711 };
2712
2713 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2714 rtl_patchphy(tp, 0x0d, 1 << 5);
2715}
2716
2717static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
2718{
2719 static const struct phy_reg phy_reg_init[] = {
2720 /* Enable Delay cap */
2721 { 0x1f, 0x0005 },
2722 { 0x05, 0x8b80 },
2723 { 0x06, 0xc896 },
2724 { 0x1f, 0x0000 },
2725
2726 /* Channel estimation fine tune */
2727 { 0x1f, 0x0001 },
2728 { 0x0b, 0x6c20 },
2729 { 0x07, 0x2872 },
2730 { 0x1c, 0xefff },
2731 { 0x1f, 0x0003 },
2732 { 0x14, 0x6420 },
2733 { 0x1f, 0x0000 },
2734
2735 /* Update PFM & 10M TX idle timer */
2736 { 0x1f, 0x0007 },
2737 { 0x1e, 0x002f },
2738 { 0x15, 0x1919 },
2739 { 0x1f, 0x0000 },
2740
2741 { 0x1f, 0x0007 },
2742 { 0x1e, 0x00ac },
2743 { 0x18, 0x0006 },
2744 { 0x1f, 0x0000 }
2745 };
2746
2747 rtl_apply_firmware(tp);
2748
2749 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2750
2751 /* DCO enable for 10M IDLE Power */
2752 rtl_writephy(tp, 0x1f, 0x0007);
2753 rtl_writephy(tp, 0x1e, 0x0023);
2754 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
2755 rtl_writephy(tp, 0x1f, 0x0000);
2756
2757 /* For impedance matching */
2758 rtl_writephy(tp, 0x1f, 0x0002);
2759 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
2760 rtl_writephy(tp, 0x1f, 0x0000);
2761
2762 /* PHY auto speed down */
2763 rtl_writephy(tp, 0x1f, 0x0007);
2764 rtl_writephy(tp, 0x1e, 0x002d);
2765 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
2766 rtl_writephy(tp, 0x1f, 0x0000);
2767 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
2768
2769 rtl_writephy(tp, 0x1f, 0x0005);
2770 rtl_writephy(tp, 0x05, 0x8b86);
2771 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
2772 rtl_writephy(tp, 0x1f, 0x0000);
2773
2774 rtl_writephy(tp, 0x1f, 0x0005);
2775 rtl_writephy(tp, 0x05, 0x8b85);
2776 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
2777 rtl_writephy(tp, 0x1f, 0x0007);
2778 rtl_writephy(tp, 0x1e, 0x0020);
2779 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
2780 rtl_writephy(tp, 0x1f, 0x0006);
2781 rtl_writephy(tp, 0x00, 0x5a00);
2782 rtl_writephy(tp, 0x1f, 0x0000);
2783 rtl_writephy(tp, 0x0d, 0x0007);
2784 rtl_writephy(tp, 0x0e, 0x003c);
2785 rtl_writephy(tp, 0x0d, 0x4007);
2786 rtl_writephy(tp, 0x0e, 0x0000);
2787 rtl_writephy(tp, 0x0d, 0x0000);
2788}
2789
2790static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
2791{
2792 static const struct phy_reg phy_reg_init[] = {
2793 /* Enable Delay cap */
2794 { 0x1f, 0x0004 },
2795 { 0x1f, 0x0007 },
2796 { 0x1e, 0x00ac },
2797 { 0x18, 0x0006 },
2798 { 0x1f, 0x0002 },
2799 { 0x1f, 0x0000 },
2800 { 0x1f, 0x0000 },
2801
2802 /* Channel estimation fine tune */
2803 { 0x1f, 0x0003 },
2804 { 0x09, 0xa20f },
2805 { 0x1f, 0x0000 },
2806 { 0x1f, 0x0000 },
2807
2808 /* Green Setting */
2809 { 0x1f, 0x0005 },
2810 { 0x05, 0x8b5b },
2811 { 0x06, 0x9222 },
2812 { 0x05, 0x8b6d },
2813 { 0x06, 0x8000 },
2814 { 0x05, 0x8b76 },
2815 { 0x06, 0x8000 },
2816 { 0x1f, 0x0000 }
2817 };
2818
2819 rtl_apply_firmware(tp);
2820
2821 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2822
2823 /* For 4-corner performance improve */
2824 rtl_writephy(tp, 0x1f, 0x0005);
2825 rtl_writephy(tp, 0x05, 0x8b80);
2826 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
2827 rtl_writephy(tp, 0x1f, 0x0000);
2828
2829 /* PHY auto speed down */
2830 rtl_writephy(tp, 0x1f, 0x0004);
2831 rtl_writephy(tp, 0x1f, 0x0007);
2832 rtl_writephy(tp, 0x1e, 0x002d);
2833 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
2834 rtl_writephy(tp, 0x1f, 0x0002);
2835 rtl_writephy(tp, 0x1f, 0x0000);
2836 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
2837
2838 /* improve 10M EEE waveform */
2839 rtl_writephy(tp, 0x1f, 0x0005);
2840 rtl_writephy(tp, 0x05, 0x8b86);
2841 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
2842 rtl_writephy(tp, 0x1f, 0x0000);
2843
2844 /* Improve 2-pair detection performance */
2845 rtl_writephy(tp, 0x1f, 0x0005);
2846 rtl_writephy(tp, 0x05, 0x8b85);
2847 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
2848 rtl_writephy(tp, 0x1f, 0x0000);
2849
2850 /* EEE setting */
2851 rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003,
2852 ERIAR_EXGMAC);
2853 rtl_writephy(tp, 0x1f, 0x0005);
2854 rtl_writephy(tp, 0x05, 0x8b85);
2855 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
2856 rtl_writephy(tp, 0x1f, 0x0004);
2857 rtl_writephy(tp, 0x1f, 0x0007);
2858 rtl_writephy(tp, 0x1e, 0x0020);
2859 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
2860 rtl_writephy(tp, 0x1f, 0x0002);
2861 rtl_writephy(tp, 0x1f, 0x0000);
2862 rtl_writephy(tp, 0x0d, 0x0007);
2863 rtl_writephy(tp, 0x0e, 0x003c);
2864 rtl_writephy(tp, 0x0d, 0x4007);
2865 rtl_writephy(tp, 0x0e, 0x0000);
2866 rtl_writephy(tp, 0x0d, 0x0000);
2867
2868 /* Green feature */
2869 rtl_writephy(tp, 0x1f, 0x0003);
2870 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
2871 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
2872 rtl_writephy(tp, 0x1f, 0x0000);
2873}
2874
2875static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
2876{
2877 static const struct phy_reg phy_reg_init[] = {
2878 { 0x1f, 0x0003 },
2879 { 0x08, 0x441d },
2880 { 0x01, 0x9100 },
2881 { 0x1f, 0x0000 }
2882 };
2883
2884 rtl_writephy(tp, 0x1f, 0x0000);
2885 rtl_patchphy(tp, 0x11, 1 << 12);
2886 rtl_patchphy(tp, 0x19, 1 << 13);
2887 rtl_patchphy(tp, 0x10, 1 << 15);
2888
2889 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2890}
2891
2892static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
2893{
2894 static const struct phy_reg phy_reg_init[] = {
2895 { 0x1f, 0x0005 },
2896 { 0x1a, 0x0000 },
2897 { 0x1f, 0x0000 },
2898
2899 { 0x1f, 0x0004 },
2900 { 0x1c, 0x0000 },
2901 { 0x1f, 0x0000 },
2902
2903 { 0x1f, 0x0001 },
2904 { 0x15, 0x7701 },
2905 { 0x1f, 0x0000 }
2906 };
2907
2908 /* Disable ALDPS before ram code */
2909 rtl_writephy(tp, 0x1f, 0x0000);
2910 rtl_writephy(tp, 0x18, 0x0310);
2911 msleep(100);
2912
2913 rtl_apply_firmware(tp);
2914
2915 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2916}
2917
2918static void rtl_hw_phy_config(struct net_device *dev)
2919{
2920 struct rtl8169_private *tp = netdev_priv(dev);
2921
2922 rtl8169_print_mac_version(tp);
2923
2924 switch (tp->mac_version) {
2925 case RTL_GIGA_MAC_VER_01:
2926 break;
2927 case RTL_GIGA_MAC_VER_02:
2928 case RTL_GIGA_MAC_VER_03:
2929 rtl8169s_hw_phy_config(tp);
2930 break;
2931 case RTL_GIGA_MAC_VER_04:
2932 rtl8169sb_hw_phy_config(tp);
2933 break;
2934 case RTL_GIGA_MAC_VER_05:
2935 rtl8169scd_hw_phy_config(tp);
2936 break;
2937 case RTL_GIGA_MAC_VER_06:
2938 rtl8169sce_hw_phy_config(tp);
2939 break;
2940 case RTL_GIGA_MAC_VER_07:
2941 case RTL_GIGA_MAC_VER_08:
2942 case RTL_GIGA_MAC_VER_09:
2943 rtl8102e_hw_phy_config(tp);
2944 break;
2945 case RTL_GIGA_MAC_VER_11:
2946 rtl8168bb_hw_phy_config(tp);
2947 break;
2948 case RTL_GIGA_MAC_VER_12:
2949 rtl8168bef_hw_phy_config(tp);
2950 break;
2951 case RTL_GIGA_MAC_VER_17:
2952 rtl8168bef_hw_phy_config(tp);
2953 break;
2954 case RTL_GIGA_MAC_VER_18:
2955 rtl8168cp_1_hw_phy_config(tp);
2956 break;
2957 case RTL_GIGA_MAC_VER_19:
2958 rtl8168c_1_hw_phy_config(tp);
2959 break;
2960 case RTL_GIGA_MAC_VER_20:
2961 rtl8168c_2_hw_phy_config(tp);
2962 break;
2963 case RTL_GIGA_MAC_VER_21:
2964 rtl8168c_3_hw_phy_config(tp);
2965 break;
2966 case RTL_GIGA_MAC_VER_22:
2967 rtl8168c_4_hw_phy_config(tp);
2968 break;
2969 case RTL_GIGA_MAC_VER_23:
2970 case RTL_GIGA_MAC_VER_24:
2971 rtl8168cp_2_hw_phy_config(tp);
2972 break;
2973 case RTL_GIGA_MAC_VER_25:
2974 rtl8168d_1_hw_phy_config(tp);
2975 break;
2976 case RTL_GIGA_MAC_VER_26:
2977 rtl8168d_2_hw_phy_config(tp);
2978 break;
2979 case RTL_GIGA_MAC_VER_27:
2980 rtl8168d_3_hw_phy_config(tp);
2981 break;
2982 case RTL_GIGA_MAC_VER_28:
2983 rtl8168d_4_hw_phy_config(tp);
2984 break;
2985 case RTL_GIGA_MAC_VER_29:
2986 case RTL_GIGA_MAC_VER_30:
2987 rtl8105e_hw_phy_config(tp);
2988 break;
2989 case RTL_GIGA_MAC_VER_31:
2990 /* None. */
2991 break;
2992 case RTL_GIGA_MAC_VER_32:
2993 case RTL_GIGA_MAC_VER_33:
2994 rtl8168e_1_hw_phy_config(tp);
2995 break;
2996 case RTL_GIGA_MAC_VER_34:
2997 rtl8168e_2_hw_phy_config(tp);
2998 break;
2999
3000 default:
3001 break;
3002 }
3003}
3004
3005static void rtl8169_phy_timer(unsigned long __opaque)
3006{
3007 struct net_device *dev = (struct net_device *)__opaque;
3008 struct rtl8169_private *tp = netdev_priv(dev);
3009 struct timer_list *timer = &tp->timer;
3010 void __iomem *ioaddr = tp->mmio_addr;
3011 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3012
3013 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3014
3015 spin_lock_irq(&tp->lock);
3016
3017 if (tp->phy_reset_pending(tp)) {
3018 /*
3019 * A busy loop could burn quite a few cycles on nowadays CPU.
3020 * Let's delay the execution of the timer for a few ticks.
3021 */
3022 timeout = HZ/10;
3023 goto out_mod_timer;
3024 }
3025
3026 if (tp->link_ok(ioaddr))
3027 goto out_unlock;
3028
3029 netif_warn(tp, link, dev, "PHY reset until link up\n");
3030
3031 tp->phy_reset_enable(tp);
3032
3033out_mod_timer:
3034 mod_timer(timer, jiffies + timeout);
3035out_unlock:
3036 spin_unlock_irq(&tp->lock);
3037}
3038
3039#ifdef CONFIG_NET_POLL_CONTROLLER
3040/*
3041 * Polling 'interrupt' - used by things like netconsole to send skbs
3042 * without having to re-enable interrupts. It's not called while
3043 * the interrupt routine is executing.
3044 */
3045static void rtl8169_netpoll(struct net_device *dev)
3046{
3047 struct rtl8169_private *tp = netdev_priv(dev);
3048 struct pci_dev *pdev = tp->pci_dev;
3049
3050 disable_irq(pdev->irq);
3051 rtl8169_interrupt(pdev->irq, dev);
3052 enable_irq(pdev->irq);
3053}
3054#endif
3055
3056static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3057 void __iomem *ioaddr)
3058{
3059 iounmap(ioaddr);
3060 pci_release_regions(pdev);
3061 pci_clear_mwi(pdev);
3062 pci_disable_device(pdev);
3063 free_netdev(dev);
3064}
3065
3066static void rtl8169_phy_reset(struct net_device *dev,
3067 struct rtl8169_private *tp)
3068{
3069 unsigned int i;
3070
3071 tp->phy_reset_enable(tp);
3072 for (i = 0; i < 100; i++) {
3073 if (!tp->phy_reset_pending(tp))
3074 return;
3075 msleep(1);
3076 }
3077 netif_err(tp, link, dev, "PHY reset failed\n");
3078}
3079
3080static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3081{
3082 void __iomem *ioaddr = tp->mmio_addr;
3083
3084 rtl_hw_phy_config(dev);
3085
3086 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3087 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3088 RTL_W8(0x82, 0x01);
3089 }
3090
3091 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3092
3093 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3094 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3095
3096 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3097 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3098 RTL_W8(0x82, 0x01);
3099 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3100 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3101 }
3102
3103 rtl8169_phy_reset(dev, tp);
3104
3105 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3106 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3107 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3108 (tp->mii.supports_gmii ?
3109 ADVERTISED_1000baseT_Half |
3110 ADVERTISED_1000baseT_Full : 0));
3111
3112 if (RTL_R8(PHYstatus) & TBI_Enable)
3113 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3114}
3115
3116static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3117{
3118 void __iomem *ioaddr = tp->mmio_addr;
3119 u32 high;
3120 u32 low;
3121
3122 low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
3123 high = addr[4] | (addr[5] << 8);
3124
3125 spin_lock_irq(&tp->lock);
3126
3127 RTL_W8(Cfg9346, Cfg9346_Unlock);
3128
3129 RTL_W32(MAC4, high);
3130 RTL_R32(MAC4);
3131
3132 RTL_W32(MAC0, low);
3133 RTL_R32(MAC0);
3134
3135 if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
3136 const struct exgmac_reg e[] = {
3137 { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
3138 { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
3139 { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
3140 { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
3141 low >> 16 },
3142 };
3143
3144 rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e));
3145 }
3146
3147 RTL_W8(Cfg9346, Cfg9346_Lock);
3148
3149 spin_unlock_irq(&tp->lock);
3150}
3151
3152static int rtl_set_mac_address(struct net_device *dev, void *p)
3153{
3154 struct rtl8169_private *tp = netdev_priv(dev);
3155 struct sockaddr *addr = p;
3156
3157 if (!is_valid_ether_addr(addr->sa_data))
3158 return -EADDRNOTAVAIL;
3159
3160 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3161
3162 rtl_rar_set(tp, dev->dev_addr);
3163
3164 return 0;
3165}
3166
3167static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3168{
3169 struct rtl8169_private *tp = netdev_priv(dev);
3170 struct mii_ioctl_data *data = if_mii(ifr);
3171
3172 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3173}
3174
3175static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3176 struct mii_ioctl_data *data, int cmd)
3177{
3178 switch (cmd) {
3179 case SIOCGMIIPHY:
3180 data->phy_id = 32; /* Internal PHY */
3181 return 0;
3182
3183 case SIOCGMIIREG:
3184 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3185 return 0;
3186
3187 case SIOCSMIIREG:
3188 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3189 return 0;
3190 }
3191 return -EOPNOTSUPP;
3192}
3193
3194static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3195{
3196 return -EOPNOTSUPP;
3197}
3198
3199static const struct rtl_cfg_info {
3200 void (*hw_start)(struct net_device *);
3201 unsigned int region;
3202 unsigned int align;
3203 u16 intr_event;
3204 u16 napi_event;
3205 unsigned features;
3206 u8 default_ver;
3207} rtl_cfg_infos [] = {
3208 [RTL_CFG_0] = {
3209 .hw_start = rtl_hw_start_8169,
3210 .region = 1,
3211 .align = 0,
3212 .intr_event = SYSErr | LinkChg | RxOverflow |
3213 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
3214 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
3215 .features = RTL_FEATURE_GMII,
3216 .default_ver = RTL_GIGA_MAC_VER_01,
3217 },
3218 [RTL_CFG_1] = {
3219 .hw_start = rtl_hw_start_8168,
3220 .region = 2,
3221 .align = 8,
3222 .intr_event = SYSErr | LinkChg | RxOverflow |
3223 TxErr | TxOK | RxOK | RxErr,
3224 .napi_event = TxErr | TxOK | RxOK | RxOverflow,
3225 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
3226 .default_ver = RTL_GIGA_MAC_VER_11,
3227 },
3228 [RTL_CFG_2] = {
3229 .hw_start = rtl_hw_start_8101,
3230 .region = 2,
3231 .align = 8,
3232 .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
3233 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
3234 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
3235 .features = RTL_FEATURE_MSI,
3236 .default_ver = RTL_GIGA_MAC_VER_13,
3237 }
3238};
3239
3240/* Cfg9346_Unlock assumed. */
3241static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
3242 const struct rtl_cfg_info *cfg)
3243{
3244 unsigned msi = 0;
3245 u8 cfg2;
3246
3247 cfg2 = RTL_R8(Config2) & ~MSIEnable;
3248 if (cfg->features & RTL_FEATURE_MSI) {
3249 if (pci_enable_msi(pdev)) {
3250 dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
3251 } else {
3252 cfg2 |= MSIEnable;
3253 msi = RTL_FEATURE_MSI;
3254 }
3255 }
3256 RTL_W8(Config2, cfg2);
3257 return msi;
3258}
3259
3260static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3261{
3262 if (tp->features & RTL_FEATURE_MSI) {
3263 pci_disable_msi(pdev);
3264 tp->features &= ~RTL_FEATURE_MSI;
3265 }
3266}
3267
3268static const struct net_device_ops rtl8169_netdev_ops = {
3269 .ndo_open = rtl8169_open,
3270 .ndo_stop = rtl8169_close,
3271 .ndo_get_stats = rtl8169_get_stats,
3272 .ndo_start_xmit = rtl8169_start_xmit,
3273 .ndo_tx_timeout = rtl8169_tx_timeout,
3274 .ndo_validate_addr = eth_validate_addr,
3275 .ndo_change_mtu = rtl8169_change_mtu,
3276 .ndo_fix_features = rtl8169_fix_features,
3277 .ndo_set_features = rtl8169_set_features,
3278 .ndo_set_mac_address = rtl_set_mac_address,
3279 .ndo_do_ioctl = rtl8169_ioctl,
3280 .ndo_set_multicast_list = rtl_set_rx_mode,
3281#ifdef CONFIG_NET_POLL_CONTROLLER
3282 .ndo_poll_controller = rtl8169_netpoll,
3283#endif
3284
3285};
3286
3287static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
3288{
3289 struct mdio_ops *ops = &tp->mdio_ops;
3290
3291 switch (tp->mac_version) {
3292 case RTL_GIGA_MAC_VER_27:
3293 ops->write = r8168dp_1_mdio_write;
3294 ops->read = r8168dp_1_mdio_read;
3295 break;
3296 case RTL_GIGA_MAC_VER_28:
3297 case RTL_GIGA_MAC_VER_31:
3298 ops->write = r8168dp_2_mdio_write;
3299 ops->read = r8168dp_2_mdio_read;
3300 break;
3301 default:
3302 ops->write = r8169_mdio_write;
3303 ops->read = r8169_mdio_read;
3304 break;
3305 }
3306}
3307
3308static void r810x_phy_power_down(struct rtl8169_private *tp)
3309{
3310 rtl_writephy(tp, 0x1f, 0x0000);
3311 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3312}
3313
3314static void r810x_phy_power_up(struct rtl8169_private *tp)
3315{
3316 rtl_writephy(tp, 0x1f, 0x0000);
3317 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3318}
3319
3320static void r810x_pll_power_down(struct rtl8169_private *tp)
3321{
3322 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
3323 rtl_writephy(tp, 0x1f, 0x0000);
3324 rtl_writephy(tp, MII_BMCR, 0x0000);
3325 return;
3326 }
3327
3328 r810x_phy_power_down(tp);
3329}
3330
3331static void r810x_pll_power_up(struct rtl8169_private *tp)
3332{
3333 r810x_phy_power_up(tp);
3334}
3335
3336static void r8168_phy_power_up(struct rtl8169_private *tp)
3337{
3338 rtl_writephy(tp, 0x1f, 0x0000);
3339 switch (tp->mac_version) {
3340 case RTL_GIGA_MAC_VER_11:
3341 case RTL_GIGA_MAC_VER_12:
3342 case RTL_GIGA_MAC_VER_17:
3343 case RTL_GIGA_MAC_VER_18:
3344 case RTL_GIGA_MAC_VER_19:
3345 case RTL_GIGA_MAC_VER_20:
3346 case RTL_GIGA_MAC_VER_21:
3347 case RTL_GIGA_MAC_VER_22:
3348 case RTL_GIGA_MAC_VER_23:
3349 case RTL_GIGA_MAC_VER_24:
3350 case RTL_GIGA_MAC_VER_25:
3351 case RTL_GIGA_MAC_VER_26:
3352 case RTL_GIGA_MAC_VER_27:
3353 case RTL_GIGA_MAC_VER_28:
3354 case RTL_GIGA_MAC_VER_31:
3355 rtl_writephy(tp, 0x0e, 0x0000);
3356 break;
3357 default:
3358 break;
3359 }
3360 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3361}
3362
3363static void r8168_phy_power_down(struct rtl8169_private *tp)
3364{
3365 rtl_writephy(tp, 0x1f, 0x0000);
3366 switch (tp->mac_version) {
3367 case RTL_GIGA_MAC_VER_32:
3368 case RTL_GIGA_MAC_VER_33:
3369 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3370 break;
3371
3372 case RTL_GIGA_MAC_VER_11:
3373 case RTL_GIGA_MAC_VER_12:
3374 case RTL_GIGA_MAC_VER_17:
3375 case RTL_GIGA_MAC_VER_18:
3376 case RTL_GIGA_MAC_VER_19:
3377 case RTL_GIGA_MAC_VER_20:
3378 case RTL_GIGA_MAC_VER_21:
3379 case RTL_GIGA_MAC_VER_22:
3380 case RTL_GIGA_MAC_VER_23:
3381 case RTL_GIGA_MAC_VER_24:
3382 case RTL_GIGA_MAC_VER_25:
3383 case RTL_GIGA_MAC_VER_26:
3384 case RTL_GIGA_MAC_VER_27:
3385 case RTL_GIGA_MAC_VER_28:
3386 case RTL_GIGA_MAC_VER_31:
3387 rtl_writephy(tp, 0x0e, 0x0200);
3388 default:
3389 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3390 break;
3391 }
3392}
3393
3394static void r8168_pll_power_down(struct rtl8169_private *tp)
3395{
3396 void __iomem *ioaddr = tp->mmio_addr;
3397
3398 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3399 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3400 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
3401 r8168dp_check_dash(tp)) {
3402 return;
3403 }
3404
3405 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
3406 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
3407 (RTL_R16(CPlusCmd) & ASF)) {
3408 return;
3409 }
3410
3411 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3412 tp->mac_version == RTL_GIGA_MAC_VER_33)
3413 rtl_ephy_write(ioaddr, 0x19, 0xff64);
3414
3415 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
3416 rtl_writephy(tp, 0x1f, 0x0000);
3417 rtl_writephy(tp, MII_BMCR, 0x0000);
3418
3419 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3420 tp->mac_version == RTL_GIGA_MAC_VER_33)
3421 RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
3422 AcceptMulticast | AcceptMyPhys);
3423 return;
3424 }
3425
3426 r8168_phy_power_down(tp);
3427
3428 switch (tp->mac_version) {
3429 case RTL_GIGA_MAC_VER_25:
3430 case RTL_GIGA_MAC_VER_26:
3431 case RTL_GIGA_MAC_VER_27:
3432 case RTL_GIGA_MAC_VER_28:
3433 case RTL_GIGA_MAC_VER_31:
3434 case RTL_GIGA_MAC_VER_32:
3435 case RTL_GIGA_MAC_VER_33:
3436 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3437 break;
3438 }
3439}
3440
3441static void r8168_pll_power_up(struct rtl8169_private *tp)
3442{
3443 void __iomem *ioaddr = tp->mmio_addr;
3444
3445 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3446 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3447 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
3448 r8168dp_check_dash(tp)) {
3449 return;
3450 }
3451
3452 switch (tp->mac_version) {
3453 case RTL_GIGA_MAC_VER_25:
3454 case RTL_GIGA_MAC_VER_26:
3455 case RTL_GIGA_MAC_VER_27:
3456 case RTL_GIGA_MAC_VER_28:
3457 case RTL_GIGA_MAC_VER_31:
3458 case RTL_GIGA_MAC_VER_32:
3459 case RTL_GIGA_MAC_VER_33:
3460 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3461 break;
3462 }
3463
3464 r8168_phy_power_up(tp);
3465}
3466
3467static void rtl_pll_power_op(struct rtl8169_private *tp,
3468 void (*op)(struct rtl8169_private *))
3469{
3470 if (op)
3471 op(tp);
3472}
3473
3474static void rtl_pll_power_down(struct rtl8169_private *tp)
3475{
3476 rtl_pll_power_op(tp, tp->pll_power_ops.down);
3477}
3478
3479static void rtl_pll_power_up(struct rtl8169_private *tp)
3480{
3481 rtl_pll_power_op(tp, tp->pll_power_ops.up);
3482}
3483
3484static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3485{
3486 struct pll_power_ops *ops = &tp->pll_power_ops;
3487
3488 switch (tp->mac_version) {
3489 case RTL_GIGA_MAC_VER_07:
3490 case RTL_GIGA_MAC_VER_08:
3491 case RTL_GIGA_MAC_VER_09:
3492 case RTL_GIGA_MAC_VER_10:
3493 case RTL_GIGA_MAC_VER_16:
3494 case RTL_GIGA_MAC_VER_29:
3495 case RTL_GIGA_MAC_VER_30:
3496 ops->down = r810x_pll_power_down;
3497 ops->up = r810x_pll_power_up;
3498 break;
3499
3500 case RTL_GIGA_MAC_VER_11:
3501 case RTL_GIGA_MAC_VER_12:
3502 case RTL_GIGA_MAC_VER_17:
3503 case RTL_GIGA_MAC_VER_18:
3504 case RTL_GIGA_MAC_VER_19:
3505 case RTL_GIGA_MAC_VER_20:
3506 case RTL_GIGA_MAC_VER_21:
3507 case RTL_GIGA_MAC_VER_22:
3508 case RTL_GIGA_MAC_VER_23:
3509 case RTL_GIGA_MAC_VER_24:
3510 case RTL_GIGA_MAC_VER_25:
3511 case RTL_GIGA_MAC_VER_26:
3512 case RTL_GIGA_MAC_VER_27:
3513 case RTL_GIGA_MAC_VER_28:
3514 case RTL_GIGA_MAC_VER_31:
3515 case RTL_GIGA_MAC_VER_32:
3516 case RTL_GIGA_MAC_VER_33:
3517 case RTL_GIGA_MAC_VER_34:
3518 ops->down = r8168_pll_power_down;
3519 ops->up = r8168_pll_power_up;
3520 break;
3521
3522 default:
3523 ops->down = NULL;
3524 ops->up = NULL;
3525 break;
3526 }
3527}
3528
3529static void rtl_init_rxcfg(struct rtl8169_private *tp)
3530{
3531 void __iomem *ioaddr = tp->mmio_addr;
3532
3533 switch (tp->mac_version) {
3534 case RTL_GIGA_MAC_VER_01:
3535 case RTL_GIGA_MAC_VER_02:
3536 case RTL_GIGA_MAC_VER_03:
3537 case RTL_GIGA_MAC_VER_04:
3538 case RTL_GIGA_MAC_VER_05:
3539 case RTL_GIGA_MAC_VER_06:
3540 case RTL_GIGA_MAC_VER_10:
3541 case RTL_GIGA_MAC_VER_11:
3542 case RTL_GIGA_MAC_VER_12:
3543 case RTL_GIGA_MAC_VER_13:
3544 case RTL_GIGA_MAC_VER_14:
3545 case RTL_GIGA_MAC_VER_15:
3546 case RTL_GIGA_MAC_VER_16:
3547 case RTL_GIGA_MAC_VER_17:
3548 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
3549 break;
3550 case RTL_GIGA_MAC_VER_18:
3551 case RTL_GIGA_MAC_VER_19:
3552 case RTL_GIGA_MAC_VER_20:
3553 case RTL_GIGA_MAC_VER_21:
3554 case RTL_GIGA_MAC_VER_22:
3555 case RTL_GIGA_MAC_VER_23:
3556 case RTL_GIGA_MAC_VER_24:
3557 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
3558 break;
3559 default:
3560 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
3561 break;
3562 }
3563}
3564
3565static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
3566{
3567 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
3568}
3569
3570static void rtl_hw_reset(struct rtl8169_private *tp)
3571{
3572 void __iomem *ioaddr = tp->mmio_addr;
3573 int i;
3574
3575 /* Soft reset the chip. */
3576 RTL_W8(ChipCmd, CmdReset);
3577
3578 /* Check that the chip has finished the reset. */
3579 for (i = 0; i < 100; i++) {
3580 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
3581 break;
3582 udelay(100);
3583 }
3584
3585 rtl8169_init_ring_indexes(tp);
3586}
3587
3588static int __devinit
3589rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3590{
3591 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
3592 const unsigned int region = cfg->region;
3593 struct rtl8169_private *tp;
3594 struct mii_if_info *mii;
3595 struct net_device *dev;
3596 void __iomem *ioaddr;
3597 int chipset, i;
3598 int rc;
3599
3600 if (netif_msg_drv(&debug)) {
3601 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
3602 MODULENAME, RTL8169_VERSION);
3603 }
3604
3605 dev = alloc_etherdev(sizeof (*tp));
3606 if (!dev) {
3607 if (netif_msg_drv(&debug))
3608 dev_err(&pdev->dev, "unable to alloc new ethernet\n");
3609 rc = -ENOMEM;
3610 goto out;
3611 }
3612
3613 SET_NETDEV_DEV(dev, &pdev->dev);
3614 dev->netdev_ops = &rtl8169_netdev_ops;
3615 tp = netdev_priv(dev);
3616 tp->dev = dev;
3617 tp->pci_dev = pdev;
3618 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
3619
3620 mii = &tp->mii;
3621 mii->dev = dev;
3622 mii->mdio_read = rtl_mdio_read;
3623 mii->mdio_write = rtl_mdio_write;
3624 mii->phy_id_mask = 0x1f;
3625 mii->reg_num_mask = 0x1f;
3626 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
3627
3628 /* disable ASPM completely as that cause random device stop working
3629 * problems as well as full system hangs for some PCIe devices users */
3630 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3631 PCIE_LINK_STATE_CLKPM);
3632
3633 /* enable device (incl. PCI PM wakeup and hotplug setup) */
3634 rc = pci_enable_device(pdev);
3635 if (rc < 0) {
3636 netif_err(tp, probe, dev, "enable failure\n");
3637 goto err_out_free_dev_1;
3638 }
3639
3640 if (pci_set_mwi(pdev) < 0)
3641 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
3642
3643 /* make sure PCI base addr 1 is MMIO */
3644 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
3645 netif_err(tp, probe, dev,
3646 "region #%d not an MMIO resource, aborting\n",
3647 region);
3648 rc = -ENODEV;
3649 goto err_out_mwi_2;
3650 }
3651
3652 /* check for weird/broken PCI region reporting */
3653 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
3654 netif_err(tp, probe, dev,
3655 "Invalid PCI region size(s), aborting\n");
3656 rc = -ENODEV;
3657 goto err_out_mwi_2;
3658 }
3659
3660 rc = pci_request_regions(pdev, MODULENAME);
3661 if (rc < 0) {
3662 netif_err(tp, probe, dev, "could not request regions\n");
3663 goto err_out_mwi_2;
3664 }
3665
3666 tp->cp_cmd = RxChkSum;
3667
3668 if ((sizeof(dma_addr_t) > 4) &&
3669 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
3670 tp->cp_cmd |= PCIDAC;
3671 dev->features |= NETIF_F_HIGHDMA;
3672 } else {
3673 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3674 if (rc < 0) {
3675 netif_err(tp, probe, dev, "DMA configuration failed\n");
3676 goto err_out_free_res_3;
3677 }
3678 }
3679
3680 /* ioremap MMIO region */
3681 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
3682 if (!ioaddr) {
3683 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
3684 rc = -EIO;
3685 goto err_out_free_res_3;
3686 }
3687 tp->mmio_addr = ioaddr;
3688
3689 if (!pci_is_pcie(pdev))
3690 netif_info(tp, probe, dev, "not PCI Express\n");
3691
3692 /* Identify chip attached to board */
3693 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
3694
3695 rtl_init_rxcfg(tp);
3696
3697 RTL_W16(IntrMask, 0x0000);
3698
3699 rtl_hw_reset(tp);
3700
3701 RTL_W16(IntrStatus, 0xffff);
3702
3703 pci_set_master(pdev);
3704
3705 /*
3706 * Pretend we are using VLANs; This bypasses a nasty bug where
3707 * Interrupts stop flowing on high load on 8110SCd controllers.
3708 */
3709 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3710 tp->cp_cmd |= RxVlan;
3711
3712 rtl_init_mdio_ops(tp);
3713 rtl_init_pll_power_ops(tp);
3714
3715 rtl8169_print_mac_version(tp);
3716
3717 chipset = tp->mac_version;
3718 tp->txd_version = rtl_chip_infos[chipset].txd_version;
3719
3720 RTL_W8(Cfg9346, Cfg9346_Unlock);
3721 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
3722 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
3723 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
3724 tp->features |= RTL_FEATURE_WOL;
3725 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
3726 tp->features |= RTL_FEATURE_WOL;
3727 tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
3728 RTL_W8(Cfg9346, Cfg9346_Lock);
3729
3730 if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) &&
3731 (RTL_R8(PHYstatus) & TBI_Enable)) {
3732 tp->set_speed = rtl8169_set_speed_tbi;
3733 tp->get_settings = rtl8169_gset_tbi;
3734 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
3735 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
3736 tp->link_ok = rtl8169_tbi_link_ok;
3737 tp->do_ioctl = rtl_tbi_ioctl;
3738 } else {
3739 tp->set_speed = rtl8169_set_speed_xmii;
3740 tp->get_settings = rtl8169_gset_xmii;
3741 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
3742 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
3743 tp->link_ok = rtl8169_xmii_link_ok;
3744 tp->do_ioctl = rtl_xmii_ioctl;
3745 }
3746
3747 spin_lock_init(&tp->lock);
3748
3749 /* Get MAC address */
3750 for (i = 0; i < MAC_ADDR_LEN; i++)
3751 dev->dev_addr[i] = RTL_R8(MAC0 + i);
3752 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
3753
3754 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
3755 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
3756 dev->irq = pdev->irq;
3757 dev->base_addr = (unsigned long) ioaddr;
3758
3759 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
3760
3761 /* don't enable SG, IP_CSUM and TSO by default - it might not work
3762 * properly for all devices */
3763 dev->features |= NETIF_F_RXCSUM |
3764 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3765
3766 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
3767 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3768 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
3769 NETIF_F_HIGHDMA;
3770
3771 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3772 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
3773 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
3774
3775 tp->intr_mask = 0xffff;
3776 tp->hw_start = cfg->hw_start;
3777 tp->intr_event = cfg->intr_event;
3778 tp->napi_event = cfg->napi_event;
3779
3780 init_timer(&tp->timer);
3781 tp->timer.data = (unsigned long) dev;
3782 tp->timer.function = rtl8169_phy_timer;
3783
3784 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
3785
3786 rc = register_netdev(dev);
3787 if (rc < 0)
3788 goto err_out_msi_4;
3789
3790 pci_set_drvdata(pdev, dev);
3791
3792 netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
3793 rtl_chip_infos[chipset].name, dev->base_addr, dev->dev_addr,
3794 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
3795
3796 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3797 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3798 tp->mac_version == RTL_GIGA_MAC_VER_31) {
3799 rtl8168_driver_start(tp);
3800 }
3801
3802 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
3803
3804 if (pci_dev_run_wake(pdev))
3805 pm_runtime_put_noidle(&pdev->dev);
3806
3807 netif_carrier_off(dev);
3808
3809out:
3810 return rc;
3811
3812err_out_msi_4:
3813 rtl_disable_msi(pdev, tp);
3814 iounmap(ioaddr);
3815err_out_free_res_3:
3816 pci_release_regions(pdev);
3817err_out_mwi_2:
3818 pci_clear_mwi(pdev);
3819 pci_disable_device(pdev);
3820err_out_free_dev_1:
3821 free_netdev(dev);
3822 goto out;
3823}
3824
3825static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3826{
3827 struct net_device *dev = pci_get_drvdata(pdev);
3828 struct rtl8169_private *tp = netdev_priv(dev);
3829
3830 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3831 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3832 tp->mac_version == RTL_GIGA_MAC_VER_31) {
3833 rtl8168_driver_stop(tp);
3834 }
3835
3836 cancel_delayed_work_sync(&tp->task);
3837
3838 unregister_netdev(dev);
3839
3840 rtl_release_firmware(tp);
3841
3842 if (pci_dev_run_wake(pdev))
3843 pm_runtime_get_noresume(&pdev->dev);
3844
3845 /* restore original MAC address */
3846 rtl_rar_set(tp, dev->perm_addr);
3847
3848 rtl_disable_msi(pdev, tp);
3849 rtl8169_release_board(pdev, dev, tp->mmio_addr);
3850 pci_set_drvdata(pdev, NULL);
3851}
3852
3853static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
3854{
3855 struct rtl_fw *rtl_fw;
3856 const char *name;
3857 int rc = -ENOMEM;
3858
3859 name = rtl_lookup_firmware_name(tp);
3860 if (!name)
3861 goto out_no_firmware;
3862
3863 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
3864 if (!rtl_fw)
3865 goto err_warn;
3866
3867 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
3868 if (rc < 0)
3869 goto err_free;
3870
3871 rc = rtl_check_firmware(tp, rtl_fw);
3872 if (rc < 0)
3873 goto err_release_firmware;
3874
3875 tp->rtl_fw = rtl_fw;
3876out:
3877 return;
3878
3879err_release_firmware:
3880 release_firmware(rtl_fw->fw);
3881err_free:
3882 kfree(rtl_fw);
3883err_warn:
3884 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
3885 name, rc);
3886out_no_firmware:
3887 tp->rtl_fw = NULL;
3888 goto out;
3889}
3890
3891static void rtl_request_firmware(struct rtl8169_private *tp)
3892{
3893 if (IS_ERR(tp->rtl_fw))
3894 rtl_request_uncached_firmware(tp);
3895}
3896
3897static int rtl8169_open(struct net_device *dev)
3898{
3899 struct rtl8169_private *tp = netdev_priv(dev);
3900 void __iomem *ioaddr = tp->mmio_addr;
3901 struct pci_dev *pdev = tp->pci_dev;
3902 int retval = -ENOMEM;
3903
3904 pm_runtime_get_sync(&pdev->dev);
3905
3906 /*
3907 * Rx and Tx desscriptors needs 256 bytes alignment.
3908 * dma_alloc_coherent provides more.
3909 */
3910 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
3911 &tp->TxPhyAddr, GFP_KERNEL);
3912 if (!tp->TxDescArray)
3913 goto err_pm_runtime_put;
3914
3915 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
3916 &tp->RxPhyAddr, GFP_KERNEL);
3917 if (!tp->RxDescArray)
3918 goto err_free_tx_0;
3919
3920 retval = rtl8169_init_ring(dev);
3921 if (retval < 0)
3922 goto err_free_rx_1;
3923
3924 INIT_DELAYED_WORK(&tp->task, NULL);
3925
3926 smp_mb();
3927
3928 rtl_request_firmware(tp);
3929
3930 retval = request_irq(dev->irq, rtl8169_interrupt,
3931 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
3932 dev->name, dev);
3933 if (retval < 0)
3934 goto err_release_fw_2;
3935
3936 napi_enable(&tp->napi);
3937
3938 rtl8169_init_phy(dev, tp);
3939
3940 rtl8169_set_features(dev, dev->features);
3941
3942 rtl_pll_power_up(tp);
3943
3944 rtl_hw_start(dev);
3945
3946 tp->saved_wolopts = 0;
3947 pm_runtime_put_noidle(&pdev->dev);
3948
3949 rtl8169_check_link_status(dev, tp, ioaddr);
3950out:
3951 return retval;
3952
3953err_release_fw_2:
3954 rtl_release_firmware(tp);
3955 rtl8169_rx_clear(tp);
3956err_free_rx_1:
3957 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
3958 tp->RxPhyAddr);
3959 tp->RxDescArray = NULL;
3960err_free_tx_0:
3961 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
3962 tp->TxPhyAddr);
3963 tp->TxDescArray = NULL;
3964err_pm_runtime_put:
3965 pm_runtime_put_noidle(&pdev->dev);
3966 goto out;
3967}
3968
3969static void rtl_rx_close(struct rtl8169_private *tp)
3970{
3971 void __iomem *ioaddr = tp->mmio_addr;
3972
3973 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
3974}
3975
3976static void rtl8169_hw_reset(struct rtl8169_private *tp)
3977{
3978 void __iomem *ioaddr = tp->mmio_addr;
3979
3980 /* Disable interrupts */
3981 rtl8169_irq_mask_and_ack(ioaddr);
3982
3983 rtl_rx_close(tp);
3984
3985 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3986 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3987 tp->mac_version == RTL_GIGA_MAC_VER_31) {
3988 while (RTL_R8(TxPoll) & NPQ)
3989 udelay(20);
3990 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
3991 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
3992 udelay(100);
3993 } else {
3994 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
3995 udelay(100);
3996 }
3997
3998 rtl_hw_reset(tp);
3999}
4000
4001static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4002{
4003 void __iomem *ioaddr = tp->mmio_addr;
4004
4005 /* Set DMA burst size and Interframe Gap Time */
4006 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4007 (InterFrameGap << TxInterFrameGapShift));
4008}
4009
4010static void rtl_hw_start(struct net_device *dev)
4011{
4012 struct rtl8169_private *tp = netdev_priv(dev);
4013
4014 tp->hw_start(dev);
4015
4016 netif_start_queue(dev);
4017}
4018
4019static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4020 void __iomem *ioaddr)
4021{
4022 /*
4023 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4024 * register to be written before TxDescAddrLow to work.
4025 * Switching from MMIO to I/O access fixes the issue as well.
4026 */
4027 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4028 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4029 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4030 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4031}
4032
4033static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4034{
4035 u16 cmd;
4036
4037 cmd = RTL_R16(CPlusCmd);
4038 RTL_W16(CPlusCmd, cmd);
4039 return cmd;
4040}
4041
4042static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4043{
4044 /* Low hurts. Let's disable the filtering. */
4045 RTL_W16(RxMaxSize, rx_buf_sz + 1);
4046}
4047
4048static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4049{
4050 static const struct rtl_cfg2_info {
4051 u32 mac_version;
4052 u32 clk;
4053 u32 val;
4054 } cfg2_info [] = {
4055 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4056 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4057 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4058 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4059 };
4060 const struct rtl_cfg2_info *p = cfg2_info;
4061 unsigned int i;
4062 u32 clk;
4063
4064 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4065 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4066 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4067 RTL_W32(0x7c, p->val);
4068 break;
4069 }
4070 }
4071}
4072
4073static void rtl_hw_start_8169(struct net_device *dev)
4074{
4075 struct rtl8169_private *tp = netdev_priv(dev);
4076 void __iomem *ioaddr = tp->mmio_addr;
4077 struct pci_dev *pdev = tp->pci_dev;
4078
4079 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4080 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4081 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4082 }
4083
4084 RTL_W8(Cfg9346, Cfg9346_Unlock);
4085 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4086 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4087 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4088 tp->mac_version == RTL_GIGA_MAC_VER_04)
4089 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4090
4091 rtl_init_rxcfg(tp);
4092
4093 RTL_W8(EarlyTxThres, NoEarlyTx);
4094
4095 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4096
4097 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4098 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4099 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4100 tp->mac_version == RTL_GIGA_MAC_VER_04)
4101 rtl_set_rx_tx_config_registers(tp);
4102
4103 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4104
4105 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4106 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4107 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4108 "Bit-3 and bit-14 MUST be 1\n");
4109 tp->cp_cmd |= (1 << 14);
4110 }
4111
4112 RTL_W16(CPlusCmd, tp->cp_cmd);
4113
4114 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4115
4116 /*
4117 * Undocumented corner. Supposedly:
4118 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4119 */
4120 RTL_W16(IntrMitigate, 0x0000);
4121
4122 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4123
4124 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4125 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4126 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4127 tp->mac_version != RTL_GIGA_MAC_VER_04) {
4128 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4129 rtl_set_rx_tx_config_registers(tp);
4130 }
4131
4132 RTL_W8(Cfg9346, Cfg9346_Lock);
4133
4134 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4135 RTL_R8(IntrMask);
4136
4137 RTL_W32(RxMissed, 0);
4138
4139 rtl_set_rx_mode(dev);
4140
4141 /* no early-rx interrupts */
4142 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4143
4144 /* Enable all known interrupts by setting the interrupt mask. */
4145 RTL_W16(IntrMask, tp->intr_event);
4146}
4147
4148static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
4149{
4150 int cap = pci_pcie_cap(pdev);
4151
4152 if (cap) {
4153 u16 ctl;
4154
4155 pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
4156 ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
4157 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
4158 }
4159}
4160
4161static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits)
4162{
4163 u32 csi;
4164
4165 csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
4166 rtl_csi_write(ioaddr, 0x070c, csi | bits);
4167}
4168
4169static void rtl_csi_access_enable_1(void __iomem *ioaddr)
4170{
4171 rtl_csi_access_enable(ioaddr, 0x17000000);
4172}
4173
4174static void rtl_csi_access_enable_2(void __iomem *ioaddr)
4175{
4176 rtl_csi_access_enable(ioaddr, 0x27000000);
4177}
4178
4179struct ephy_info {
4180 unsigned int offset;
4181 u16 mask;
4182 u16 bits;
4183};
4184
4185static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len)
4186{
4187 u16 w;
4188
4189 while (len-- > 0) {
4190 w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
4191 rtl_ephy_write(ioaddr, e->offset, w);
4192 e++;
4193 }
4194}
4195
4196static void rtl_disable_clock_request(struct pci_dev *pdev)
4197{
4198 int cap = pci_pcie_cap(pdev);
4199
4200 if (cap) {
4201 u16 ctl;
4202
4203 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
4204 ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
4205 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
4206 }
4207}
4208
4209static void rtl_enable_clock_request(struct pci_dev *pdev)
4210{
4211 int cap = pci_pcie_cap(pdev);
4212
4213 if (cap) {
4214 u16 ctl;
4215
4216 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
4217 ctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
4218 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
4219 }
4220}
4221
4222#define R8168_CPCMD_QUIRK_MASK (\
4223 EnableBist | \
4224 Mac_dbgo_oe | \
4225 Force_half_dup | \
4226 Force_rxflow_en | \
4227 Force_txflow_en | \
4228 Cxpl_dbg_sel | \
4229 ASF | \
4230 PktCntrDisable | \
4231 Mac_dbgo_sel)
4232
4233static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev)
4234{
4235 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4236
4237 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4238
4239 rtl_tx_performance_tweak(pdev,
4240 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4241}
4242
4243static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev)
4244{
4245 rtl_hw_start_8168bb(ioaddr, pdev);
4246
4247 RTL_W8(MaxTxPacketSize, TxPacketMax);
4248
4249 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4250}
4251
4252static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev)
4253{
4254 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4255
4256 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4257
4258 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4259
4260 rtl_disable_clock_request(pdev);
4261
4262 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4263}
4264
4265static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
4266{
4267 static const struct ephy_info e_info_8168cp[] = {
4268 { 0x01, 0, 0x0001 },
4269 { 0x02, 0x0800, 0x1000 },
4270 { 0x03, 0, 0x0042 },
4271 { 0x06, 0x0080, 0x0000 },
4272 { 0x07, 0, 0x2000 }
4273 };
4274
4275 rtl_csi_access_enable_2(ioaddr);
4276
4277 rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4278
4279 __rtl_hw_start_8168cp(ioaddr, pdev);
4280}
4281
4282static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
4283{
4284 rtl_csi_access_enable_2(ioaddr);
4285
4286 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4287
4288 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4289
4290 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4291}
4292
4293static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
4294{
4295 rtl_csi_access_enable_2(ioaddr);
4296
4297 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4298
4299 /* Magic. */
4300 RTL_W8(DBG_REG, 0x20);
4301
4302 RTL_W8(MaxTxPacketSize, TxPacketMax);
4303
4304 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4305
4306 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4307}
4308
4309static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
4310{
4311 static const struct ephy_info e_info_8168c_1[] = {
4312 { 0x02, 0x0800, 0x1000 },
4313 { 0x03, 0, 0x0002 },
4314 { 0x06, 0x0080, 0x0000 }
4315 };
4316
4317 rtl_csi_access_enable_2(ioaddr);
4318
4319 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4320
4321 rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4322
4323 __rtl_hw_start_8168cp(ioaddr, pdev);
4324}
4325
4326static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev)
4327{
4328 static const struct ephy_info e_info_8168c_2[] = {
4329 { 0x01, 0, 0x0001 },
4330 { 0x03, 0x0400, 0x0220 }
4331 };
4332
4333 rtl_csi_access_enable_2(ioaddr);
4334
4335 rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4336
4337 __rtl_hw_start_8168cp(ioaddr, pdev);
4338}
4339
4340static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev)
4341{
4342 rtl_hw_start_8168c_2(ioaddr, pdev);
4343}
4344
4345static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev)
4346{
4347 rtl_csi_access_enable_2(ioaddr);
4348
4349 __rtl_hw_start_8168cp(ioaddr, pdev);
4350}
4351
4352static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
4353{
4354 rtl_csi_access_enable_2(ioaddr);
4355
4356 rtl_disable_clock_request(pdev);
4357
4358 RTL_W8(MaxTxPacketSize, TxPacketMax);
4359
4360 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4361
4362 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4363}
4364
4365static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev)
4366{
4367 rtl_csi_access_enable_1(ioaddr);
4368
4369 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4370
4371 RTL_W8(MaxTxPacketSize, TxPacketMax);
4372
4373 rtl_disable_clock_request(pdev);
4374}
4375
4376static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
4377{
4378 static const struct ephy_info e_info_8168d_4[] = {
4379 { 0x0b, ~0, 0x48 },
4380 { 0x19, 0x20, 0x50 },
4381 { 0x0c, ~0, 0x20 }
4382 };
4383 int i;
4384
4385 rtl_csi_access_enable_1(ioaddr);
4386
4387 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4388
4389 RTL_W8(MaxTxPacketSize, TxPacketMax);
4390
4391 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4392 const struct ephy_info *e = e_info_8168d_4 + i;
4393 u16 w;
4394
4395 w = rtl_ephy_read(ioaddr, e->offset);
4396 rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits);
4397 }
4398
4399 rtl_enable_clock_request(pdev);
4400}
4401
4402static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4403{
4404 static const struct ephy_info e_info_8168e_1[] = {
4405 { 0x00, 0x0200, 0x0100 },
4406 { 0x00, 0x0000, 0x0004 },
4407 { 0x06, 0x0002, 0x0001 },
4408 { 0x06, 0x0000, 0x0030 },
4409 { 0x07, 0x0000, 0x2000 },
4410 { 0x00, 0x0000, 0x0020 },
4411 { 0x03, 0x5800, 0x2000 },
4412 { 0x03, 0x0000, 0x0001 },
4413 { 0x01, 0x0800, 0x1000 },
4414 { 0x07, 0x0000, 0x4000 },
4415 { 0x1e, 0x0000, 0x2000 },
4416 { 0x19, 0xffff, 0xfe6c },
4417 { 0x0a, 0x0000, 0x0040 }
4418 };
4419
4420 rtl_csi_access_enable_2(ioaddr);
4421
4422 rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4423
4424 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4425
4426 RTL_W8(MaxTxPacketSize, TxPacketMax);
4427
4428 rtl_disable_clock_request(pdev);
4429
4430 /* Reset tx FIFO pointer */
4431 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
4432 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
4433
4434 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4435}
4436
4437static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev)
4438{
4439 static const struct ephy_info e_info_8168e_2[] = {
4440 { 0x09, 0x0000, 0x0080 },
4441 { 0x19, 0x0000, 0x0224 }
4442 };
4443
4444 rtl_csi_access_enable_1(ioaddr);
4445
4446 rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
4447
4448 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4449
4450 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4451 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4452 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
4453 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
4454 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
4455 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
4456 rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4457 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
4458 ERIAR_EXGMAC);
4459
4460 RTL_W8(MaxTxPacketSize, 0x27);
4461
4462 rtl_disable_clock_request(pdev);
4463
4464 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
4465 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
4466
4467 /* Adjust EEE LED frequency */
4468 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
4469
4470 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
4471 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
4472 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4473}
4474
4475static void rtl_hw_start_8168(struct net_device *dev)
4476{
4477 struct rtl8169_private *tp = netdev_priv(dev);
4478 void __iomem *ioaddr = tp->mmio_addr;
4479 struct pci_dev *pdev = tp->pci_dev;
4480
4481 RTL_W8(Cfg9346, Cfg9346_Unlock);
4482
4483 RTL_W8(MaxTxPacketSize, TxPacketMax);
4484
4485 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4486
4487 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
4488
4489 RTL_W16(CPlusCmd, tp->cp_cmd);
4490
4491 RTL_W16(IntrMitigate, 0x5151);
4492
4493 /* Work around for RxFIFO overflow. */
4494 if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
4495 tp->mac_version == RTL_GIGA_MAC_VER_22) {
4496 tp->intr_event |= RxFIFOOver | PCSTimeout;
4497 tp->intr_event &= ~RxOverflow;
4498 }
4499
4500 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4501
4502 rtl_set_rx_mode(dev);
4503
4504 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4505 (InterFrameGap << TxInterFrameGapShift));
4506
4507 RTL_R8(IntrMask);
4508
4509 switch (tp->mac_version) {
4510 case RTL_GIGA_MAC_VER_11:
4511 rtl_hw_start_8168bb(ioaddr, pdev);
4512 break;
4513
4514 case RTL_GIGA_MAC_VER_12:
4515 case RTL_GIGA_MAC_VER_17:
4516 rtl_hw_start_8168bef(ioaddr, pdev);
4517 break;
4518
4519 case RTL_GIGA_MAC_VER_18:
4520 rtl_hw_start_8168cp_1(ioaddr, pdev);
4521 break;
4522
4523 case RTL_GIGA_MAC_VER_19:
4524 rtl_hw_start_8168c_1(ioaddr, pdev);
4525 break;
4526
4527 case RTL_GIGA_MAC_VER_20:
4528 rtl_hw_start_8168c_2(ioaddr, pdev);
4529 break;
4530
4531 case RTL_GIGA_MAC_VER_21:
4532 rtl_hw_start_8168c_3(ioaddr, pdev);
4533 break;
4534
4535 case RTL_GIGA_MAC_VER_22:
4536 rtl_hw_start_8168c_4(ioaddr, pdev);
4537 break;
4538
4539 case RTL_GIGA_MAC_VER_23:
4540 rtl_hw_start_8168cp_2(ioaddr, pdev);
4541 break;
4542
4543 case RTL_GIGA_MAC_VER_24:
4544 rtl_hw_start_8168cp_3(ioaddr, pdev);
4545 break;
4546
4547 case RTL_GIGA_MAC_VER_25:
4548 case RTL_GIGA_MAC_VER_26:
4549 case RTL_GIGA_MAC_VER_27:
4550 rtl_hw_start_8168d(ioaddr, pdev);
4551 break;
4552
4553 case RTL_GIGA_MAC_VER_28:
4554 rtl_hw_start_8168d_4(ioaddr, pdev);
4555 break;
4556
4557 case RTL_GIGA_MAC_VER_31:
4558 rtl_hw_start_8168dp(ioaddr, pdev);
4559 break;
4560
4561 case RTL_GIGA_MAC_VER_32:
4562 case RTL_GIGA_MAC_VER_33:
4563 rtl_hw_start_8168e_1(ioaddr, pdev);
4564 break;
4565 case RTL_GIGA_MAC_VER_34:
4566 rtl_hw_start_8168e_2(ioaddr, pdev);
4567 break;
4568
4569 default:
4570 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
4571 dev->name, tp->mac_version);
4572 break;
4573 }
4574
4575 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4576
4577 RTL_W8(Cfg9346, Cfg9346_Lock);
4578
4579 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4580
4581 RTL_W16(IntrMask, tp->intr_event);
4582}
4583
4584#define R810X_CPCMD_QUIRK_MASK (\
4585 EnableBist | \
4586 Mac_dbgo_oe | \
4587 Force_half_dup | \
4588 Force_rxflow_en | \
4589 Force_txflow_en | \
4590 Cxpl_dbg_sel | \
4591 ASF | \
4592 PktCntrDisable | \
4593 Mac_dbgo_sel)
4594
4595static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4596{
4597 static const struct ephy_info e_info_8102e_1[] = {
4598 { 0x01, 0, 0x6e65 },
4599 { 0x02, 0, 0x091f },
4600 { 0x03, 0, 0xc2f9 },
4601 { 0x06, 0, 0xafb5 },
4602 { 0x07, 0, 0x0e00 },
4603 { 0x19, 0, 0xec80 },
4604 { 0x01, 0, 0x2e65 },
4605 { 0x01, 0, 0x6e65 }
4606 };
4607 u8 cfg1;
4608
4609 rtl_csi_access_enable_2(ioaddr);
4610
4611 RTL_W8(DBG_REG, FIX_NAK_1);
4612
4613 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4614
4615 RTL_W8(Config1,
4616 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
4617 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4618
4619 cfg1 = RTL_R8(Config1);
4620 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
4621 RTL_W8(Config1, cfg1 & ~LEDS0);
4622
4623 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
4624}
4625
4626static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
4627{
4628 rtl_csi_access_enable_2(ioaddr);
4629
4630 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4631
4632 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
4633 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4634}
4635
4636static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
4637{
4638 rtl_hw_start_8102e_2(ioaddr, pdev);
4639
4640 rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
4641}
4642
4643static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4644{
4645 static const struct ephy_info e_info_8105e_1[] = {
4646 { 0x07, 0, 0x4000 },
4647 { 0x19, 0, 0x0200 },
4648 { 0x19, 0, 0x0020 },
4649 { 0x1e, 0, 0x2000 },
4650 { 0x03, 0, 0x0001 },
4651 { 0x19, 0, 0x0100 },
4652 { 0x19, 0, 0x0004 },
4653 { 0x0a, 0, 0x0020 }
4654 };
4655
4656 /* Force LAN exit from ASPM if Rx/Tx are not idle */
4657 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
4658
4659 /* Disable Early Tally Counter */
4660 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
4661
4662 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
4663 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
4664
4665 rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
4666}
4667
4668static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev)
4669{
4670 rtl_hw_start_8105e_1(ioaddr, pdev);
4671 rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
4672}
4673
4674static void rtl_hw_start_8101(struct net_device *dev)
4675{
4676 struct rtl8169_private *tp = netdev_priv(dev);
4677 void __iomem *ioaddr = tp->mmio_addr;
4678 struct pci_dev *pdev = tp->pci_dev;
4679
4680 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
4681 tp->mac_version == RTL_GIGA_MAC_VER_16) {
4682 int cap = pci_pcie_cap(pdev);
4683
4684 if (cap) {
4685 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
4686 PCI_EXP_DEVCTL_NOSNOOP_EN);
4687 }
4688 }
4689
4690 RTL_W8(Cfg9346, Cfg9346_Unlock);
4691
4692 switch (tp->mac_version) {
4693 case RTL_GIGA_MAC_VER_07:
4694 rtl_hw_start_8102e_1(ioaddr, pdev);
4695 break;
4696
4697 case RTL_GIGA_MAC_VER_08:
4698 rtl_hw_start_8102e_3(ioaddr, pdev);
4699 break;
4700
4701 case RTL_GIGA_MAC_VER_09:
4702 rtl_hw_start_8102e_2(ioaddr, pdev);
4703 break;
4704
4705 case RTL_GIGA_MAC_VER_29:
4706 rtl_hw_start_8105e_1(ioaddr, pdev);
4707 break;
4708 case RTL_GIGA_MAC_VER_30:
4709 rtl_hw_start_8105e_2(ioaddr, pdev);
4710 break;
4711 }
4712
4713 RTL_W8(Cfg9346, Cfg9346_Lock);
4714
4715 RTL_W8(MaxTxPacketSize, TxPacketMax);
4716
4717 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4718
4719 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
4720 RTL_W16(CPlusCmd, tp->cp_cmd);
4721
4722 RTL_W16(IntrMitigate, 0x0000);
4723
4724 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4725
4726 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4727 rtl_set_rx_tx_config_registers(tp);
4728
4729 RTL_R8(IntrMask);
4730
4731 rtl_set_rx_mode(dev);
4732
4733 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
4734
4735 RTL_W16(IntrMask, tp->intr_event);
4736}
4737
4738static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
4739{
4740 if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu)
4741 return -EINVAL;
4742
4743 dev->mtu = new_mtu;
4744 netdev_update_features(dev);
4745
4746 return 0;
4747}
4748
4749static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
4750{
4751 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
4752 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
4753}
4754
4755static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
4756 void **data_buff, struct RxDesc *desc)
4757{
4758 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
4759 DMA_FROM_DEVICE);
4760
4761 kfree(*data_buff);
4762 *data_buff = NULL;
4763 rtl8169_make_unusable_by_asic(desc);
4764}
4765
4766static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
4767{
4768 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
4769
4770 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
4771}
4772
4773static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
4774 u32 rx_buf_sz)
4775{
4776 desc->addr = cpu_to_le64(mapping);
4777 wmb();
4778 rtl8169_mark_to_asic(desc, rx_buf_sz);
4779}
4780
4781static inline void *rtl8169_align(void *data)
4782{
4783 return (void *)ALIGN((long)data, 16);
4784}
4785
4786static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
4787 struct RxDesc *desc)
4788{
4789 void *data;
4790 dma_addr_t mapping;
4791 struct device *d = &tp->pci_dev->dev;
4792 struct net_device *dev = tp->dev;
4793 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
4794
4795 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
4796 if (!data)
4797 return NULL;
4798
4799 if (rtl8169_align(data) != data) {
4800 kfree(data);
4801 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
4802 if (!data)
4803 return NULL;
4804 }
4805
4806 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
4807 DMA_FROM_DEVICE);
4808 if (unlikely(dma_mapping_error(d, mapping))) {
4809 if (net_ratelimit())
4810 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
4811 goto err_out;
4812 }
4813
4814 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
4815 return data;
4816
4817err_out:
4818 kfree(data);
4819 return NULL;
4820}
4821
4822static void rtl8169_rx_clear(struct rtl8169_private *tp)
4823{
4824 unsigned int i;
4825
4826 for (i = 0; i < NUM_RX_DESC; i++) {
4827 if (tp->Rx_databuff[i]) {
4828 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
4829 tp->RxDescArray + i);
4830 }
4831 }
4832}
4833
4834static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
4835{
4836 desc->opts1 |= cpu_to_le32(RingEnd);
4837}
4838
4839static int rtl8169_rx_fill(struct rtl8169_private *tp)
4840{
4841 unsigned int i;
4842
4843 for (i = 0; i < NUM_RX_DESC; i++) {
4844 void *data;
4845
4846 if (tp->Rx_databuff[i])
4847 continue;
4848
4849 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
4850 if (!data) {
4851 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
4852 goto err_out;
4853 }
4854 tp->Rx_databuff[i] = data;
4855 }
4856
4857 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
4858 return 0;
4859
4860err_out:
4861 rtl8169_rx_clear(tp);
4862 return -ENOMEM;
4863}
4864
4865static int rtl8169_init_ring(struct net_device *dev)
4866{
4867 struct rtl8169_private *tp = netdev_priv(dev);
4868
4869 rtl8169_init_ring_indexes(tp);
4870
4871 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
4872 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
4873
4874 return rtl8169_rx_fill(tp);
4875}
4876
4877static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
4878 struct TxDesc *desc)
4879{
4880 unsigned int len = tx_skb->len;
4881
4882 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
4883
4884 desc->opts1 = 0x00;
4885 desc->opts2 = 0x00;
4886 desc->addr = 0x00;
4887 tx_skb->len = 0;
4888}
4889
4890static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
4891 unsigned int n)
4892{
4893 unsigned int i;
4894
4895 for (i = 0; i < n; i++) {
4896 unsigned int entry = (start + i) % NUM_TX_DESC;
4897 struct ring_info *tx_skb = tp->tx_skb + entry;
4898 unsigned int len = tx_skb->len;
4899
4900 if (len) {
4901 struct sk_buff *skb = tx_skb->skb;
4902
4903 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
4904 tp->TxDescArray + entry);
4905 if (skb) {
4906 tp->dev->stats.tx_dropped++;
4907 dev_kfree_skb(skb);
4908 tx_skb->skb = NULL;
4909 }
4910 }
4911 }
4912}
4913
4914static void rtl8169_tx_clear(struct rtl8169_private *tp)
4915{
4916 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
4917 tp->cur_tx = tp->dirty_tx = 0;
4918}
4919
4920static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
4921{
4922 struct rtl8169_private *tp = netdev_priv(dev);
4923
4924 PREPARE_DELAYED_WORK(&tp->task, task);
4925 schedule_delayed_work(&tp->task, 4);
4926}
4927
4928static void rtl8169_wait_for_quiescence(struct net_device *dev)
4929{
4930 struct rtl8169_private *tp = netdev_priv(dev);
4931 void __iomem *ioaddr = tp->mmio_addr;
4932
4933 synchronize_irq(dev->irq);
4934
4935 /* Wait for any pending NAPI task to complete */
4936 napi_disable(&tp->napi);
4937
4938 rtl8169_irq_mask_and_ack(ioaddr);
4939
4940 tp->intr_mask = 0xffff;
4941 RTL_W16(IntrMask, tp->intr_event);
4942 napi_enable(&tp->napi);
4943}
4944
4945static void rtl8169_reinit_task(struct work_struct *work)
4946{
4947 struct rtl8169_private *tp =
4948 container_of(work, struct rtl8169_private, task.work);
4949 struct net_device *dev = tp->dev;
4950 int ret;
4951
4952 rtnl_lock();
4953
4954 if (!netif_running(dev))
4955 goto out_unlock;
4956
4957 rtl8169_wait_for_quiescence(dev);
4958 rtl8169_close(dev);
4959
4960 ret = rtl8169_open(dev);
4961 if (unlikely(ret < 0)) {
4962 if (net_ratelimit())
4963 netif_err(tp, drv, dev,
4964 "reinit failure (status = %d). Rescheduling\n",
4965 ret);
4966 rtl8169_schedule_work(dev, rtl8169_reinit_task);
4967 }
4968
4969out_unlock:
4970 rtnl_unlock();
4971}
4972
4973static void rtl8169_reset_task(struct work_struct *work)
4974{
4975 struct rtl8169_private *tp =
4976 container_of(work, struct rtl8169_private, task.work);
4977 struct net_device *dev = tp->dev;
4978 int i;
4979
4980 rtnl_lock();
4981
4982 if (!netif_running(dev))
4983 goto out_unlock;
4984
4985 rtl8169_wait_for_quiescence(dev);
4986
4987 for (i = 0; i < NUM_RX_DESC; i++)
4988 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
4989
4990 rtl8169_tx_clear(tp);
4991
4992 rtl8169_hw_reset(tp);
4993 rtl_hw_start(dev);
4994 netif_wake_queue(dev);
4995 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
4996
4997out_unlock:
4998 rtnl_unlock();
4999}
5000
5001static void rtl8169_tx_timeout(struct net_device *dev)
5002{
5003 struct rtl8169_private *tp = netdev_priv(dev);
5004
5005 rtl8169_hw_reset(tp);
5006
5007 /* Let's wait a bit while any (async) irq lands on */
5008 rtl8169_schedule_work(dev, rtl8169_reset_task);
5009}
5010
5011static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5012 u32 *opts)
5013{
5014 struct skb_shared_info *info = skb_shinfo(skb);
5015 unsigned int cur_frag, entry;
5016 struct TxDesc * uninitialized_var(txd);
5017 struct device *d = &tp->pci_dev->dev;
5018
5019 entry = tp->cur_tx;
5020 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5021 skb_frag_t *frag = info->frags + cur_frag;
5022 dma_addr_t mapping;
5023 u32 status, len;
5024 void *addr;
5025
5026 entry = (entry + 1) % NUM_TX_DESC;
5027
5028 txd = tp->TxDescArray + entry;
5029 len = frag->size;
5030 addr = ((void *) page_address(frag->page)) + frag->page_offset;
5031 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5032 if (unlikely(dma_mapping_error(d, mapping))) {
5033 if (net_ratelimit())
5034 netif_err(tp, drv, tp->dev,
5035 "Failed to map TX fragments DMA!\n");
5036 goto err_out;
5037 }
5038
5039 /* Anti gcc 2.95.3 bugware (sic) */
5040 status = opts[0] | len |
5041 (RingEnd * !((entry + 1) % NUM_TX_DESC));
5042
5043 txd->opts1 = cpu_to_le32(status);
5044 txd->opts2 = cpu_to_le32(opts[1]);
5045 txd->addr = cpu_to_le64(mapping);
5046
5047 tp->tx_skb[entry].len = len;
5048 }
5049
5050 if (cur_frag) {
5051 tp->tx_skb[entry].skb = skb;
5052 txd->opts1 |= cpu_to_le32(LastFrag);
5053 }
5054
5055 return cur_frag;
5056
5057err_out:
5058 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5059 return -EIO;
5060}
5061
5062static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5063 struct sk_buff *skb, u32 *opts)
5064{
5065 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5066 u32 mss = skb_shinfo(skb)->gso_size;
5067 int offset = info->opts_offset;
5068
5069 if (mss) {
5070 opts[0] |= TD_LSO;
5071 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5072 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5073 const struct iphdr *ip = ip_hdr(skb);
5074
5075 if (ip->protocol == IPPROTO_TCP)
5076 opts[offset] |= info->checksum.tcp;
5077 else if (ip->protocol == IPPROTO_UDP)
5078 opts[offset] |= info->checksum.udp;
5079 else
5080 WARN_ON_ONCE(1);
5081 }
5082}
5083
5084static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5085 struct net_device *dev)
5086{
5087 struct rtl8169_private *tp = netdev_priv(dev);
5088 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5089 struct TxDesc *txd = tp->TxDescArray + entry;
5090 void __iomem *ioaddr = tp->mmio_addr;
5091 struct device *d = &tp->pci_dev->dev;
5092 dma_addr_t mapping;
5093 u32 status, len;
5094 u32 opts[2];
5095 int frags;
5096
5097 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
5098 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5099 goto err_stop_0;
5100 }
5101
5102 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5103 goto err_stop_0;
5104
5105 len = skb_headlen(skb);
5106 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5107 if (unlikely(dma_mapping_error(d, mapping))) {
5108 if (net_ratelimit())
5109 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5110 goto err_dma_0;
5111 }
5112
5113 tp->tx_skb[entry].len = len;
5114 txd->addr = cpu_to_le64(mapping);
5115
5116 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
5117 opts[0] = DescOwn;
5118
5119 rtl8169_tso_csum(tp, skb, opts);
5120
5121 frags = rtl8169_xmit_frags(tp, skb, opts);
5122 if (frags < 0)
5123 goto err_dma_1;
5124 else if (frags)
5125 opts[0] |= FirstFrag;
5126 else {
5127 opts[0] |= FirstFrag | LastFrag;
5128 tp->tx_skb[entry].skb = skb;
5129 }
5130
5131 txd->opts2 = cpu_to_le32(opts[1]);
5132
5133 wmb();
5134
5135 /* Anti gcc 2.95.3 bugware (sic) */
5136 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
5137 txd->opts1 = cpu_to_le32(status);
5138
5139 tp->cur_tx += frags + 1;
5140
5141 wmb();
5142
5143 RTL_W8(TxPoll, NPQ);
5144
5145 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
5146 netif_stop_queue(dev);
5147 smp_rmb();
5148 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
5149 netif_wake_queue(dev);
5150 }
5151
5152 return NETDEV_TX_OK;
5153
5154err_dma_1:
5155 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5156err_dma_0:
5157 dev_kfree_skb(skb);
5158 dev->stats.tx_dropped++;
5159 return NETDEV_TX_OK;
5160
5161err_stop_0:
5162 netif_stop_queue(dev);
5163 dev->stats.tx_dropped++;
5164 return NETDEV_TX_BUSY;
5165}
5166
5167static void rtl8169_pcierr_interrupt(struct net_device *dev)
5168{
5169 struct rtl8169_private *tp = netdev_priv(dev);
5170 struct pci_dev *pdev = tp->pci_dev;
5171 u16 pci_status, pci_cmd;
5172
5173 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5174 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5175
5176 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5177 pci_cmd, pci_status);
5178
5179 /*
5180 * The recovery sequence below admits a very elaborated explanation:
5181 * - it seems to work;
5182 * - I did not see what else could be done;
5183 * - it makes iop3xx happy.
5184 *
5185 * Feel free to adjust to your needs.
5186 */
5187 if (pdev->broken_parity_status)
5188 pci_cmd &= ~PCI_COMMAND_PARITY;
5189 else
5190 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5191
5192 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5193
5194 pci_write_config_word(pdev, PCI_STATUS,
5195 pci_status & (PCI_STATUS_DETECTED_PARITY |
5196 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5197 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5198
5199 /* The infamous DAC f*ckup only happens at boot time */
5200 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
5201 void __iomem *ioaddr = tp->mmio_addr;
5202
5203 netif_info(tp, intr, dev, "disabling PCI DAC\n");
5204 tp->cp_cmd &= ~PCIDAC;
5205 RTL_W16(CPlusCmd, tp->cp_cmd);
5206 dev->features &= ~NETIF_F_HIGHDMA;
5207 }
5208
5209 rtl8169_hw_reset(tp);
5210
5211 rtl8169_schedule_work(dev, rtl8169_reinit_task);
5212}
5213
5214static void rtl8169_tx_interrupt(struct net_device *dev,
5215 struct rtl8169_private *tp,
5216 void __iomem *ioaddr)
5217{
5218 unsigned int dirty_tx, tx_left;
5219
5220 dirty_tx = tp->dirty_tx;
5221 smp_rmb();
5222 tx_left = tp->cur_tx - dirty_tx;
5223
5224 while (tx_left > 0) {
5225 unsigned int entry = dirty_tx % NUM_TX_DESC;
5226 struct ring_info *tx_skb = tp->tx_skb + entry;
5227 u32 status;
5228
5229 rmb();
5230 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5231 if (status & DescOwn)
5232 break;
5233
5234 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5235 tp->TxDescArray + entry);
5236 if (status & LastFrag) {
5237 dev->stats.tx_packets++;
5238 dev->stats.tx_bytes += tx_skb->skb->len;
5239 dev_kfree_skb(tx_skb->skb);
5240 tx_skb->skb = NULL;
5241 }
5242 dirty_tx++;
5243 tx_left--;
5244 }
5245
5246 if (tp->dirty_tx != dirty_tx) {
5247 tp->dirty_tx = dirty_tx;
5248 smp_wmb();
5249 if (netif_queue_stopped(dev) &&
5250 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
5251 netif_wake_queue(dev);
5252 }
5253 /*
5254 * 8168 hack: TxPoll requests are lost when the Tx packets are
5255 * too close. Let's kick an extra TxPoll request when a burst
5256 * of start_xmit activity is detected (if it is not detected,
5257 * it is slow enough). -- FR
5258 */
5259 smp_rmb();
5260 if (tp->cur_tx != dirty_tx)
5261 RTL_W8(TxPoll, NPQ);
5262 }
5263}
5264
5265static inline int rtl8169_fragmented_frame(u32 status)
5266{
5267 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5268}
5269
5270static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
5271{
5272 u32 status = opts1 & RxProtoMask;
5273
5274 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
5275 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
5276 skb->ip_summed = CHECKSUM_UNNECESSARY;
5277 else
5278 skb_checksum_none_assert(skb);
5279}
5280
5281static struct sk_buff *rtl8169_try_rx_copy(void *data,
5282 struct rtl8169_private *tp,
5283 int pkt_size,
5284 dma_addr_t addr)
5285{
5286 struct sk_buff *skb;
5287 struct device *d = &tp->pci_dev->dev;
5288
5289 data = rtl8169_align(data);
5290 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
5291 prefetch(data);
5292 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
5293 if (skb)
5294 memcpy(skb->data, data, pkt_size);
5295 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
5296
5297 return skb;
5298}
5299
5300static int rtl8169_rx_interrupt(struct net_device *dev,
5301 struct rtl8169_private *tp,
5302 void __iomem *ioaddr, u32 budget)
5303{
5304 unsigned int cur_rx, rx_left;
5305 unsigned int count;
5306
5307 cur_rx = tp->cur_rx;
5308 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
5309 rx_left = min(rx_left, budget);
5310
5311 for (; rx_left > 0; rx_left--, cur_rx++) {
5312 unsigned int entry = cur_rx % NUM_RX_DESC;
5313 struct RxDesc *desc = tp->RxDescArray + entry;
5314 u32 status;
5315
5316 rmb();
5317 status = le32_to_cpu(desc->opts1);
5318
5319 if (status & DescOwn)
5320 break;
5321 if (unlikely(status & RxRES)) {
5322 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
5323 status);
5324 dev->stats.rx_errors++;
5325 if (status & (RxRWT | RxRUNT))
5326 dev->stats.rx_length_errors++;
5327 if (status & RxCRC)
5328 dev->stats.rx_crc_errors++;
5329 if (status & RxFOVF) {
5330 rtl8169_schedule_work(dev, rtl8169_reset_task);
5331 dev->stats.rx_fifo_errors++;
5332 }
5333 rtl8169_mark_to_asic(desc, rx_buf_sz);
5334 } else {
5335 struct sk_buff *skb;
5336 dma_addr_t addr = le64_to_cpu(desc->addr);
5337 int pkt_size = (status & 0x00001FFF) - 4;
5338
5339 /*
5340 * The driver does not support incoming fragmented
5341 * frames. They are seen as a symptom of over-mtu
5342 * sized frames.
5343 */
5344 if (unlikely(rtl8169_fragmented_frame(status))) {
5345 dev->stats.rx_dropped++;
5346 dev->stats.rx_length_errors++;
5347 rtl8169_mark_to_asic(desc, rx_buf_sz);
5348 continue;
5349 }
5350
5351 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
5352 tp, pkt_size, addr);
5353 rtl8169_mark_to_asic(desc, rx_buf_sz);
5354 if (!skb) {
5355 dev->stats.rx_dropped++;
5356 continue;
5357 }
5358
5359 rtl8169_rx_csum(skb, status);
5360 skb_put(skb, pkt_size);
5361 skb->protocol = eth_type_trans(skb, dev);
5362
5363 rtl8169_rx_vlan_tag(desc, skb);
5364
5365 napi_gro_receive(&tp->napi, skb);
5366
5367 dev->stats.rx_bytes += pkt_size;
5368 dev->stats.rx_packets++;
5369 }
5370
5371 /* Work around for AMD plateform. */
5372 if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
5373 (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
5374 desc->opts2 = 0;
5375 cur_rx++;
5376 }
5377 }
5378
5379 count = cur_rx - tp->cur_rx;
5380 tp->cur_rx = cur_rx;
5381
5382 tp->dirty_rx += count;
5383
5384 return count;
5385}
5386
5387static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
5388{
5389 struct net_device *dev = dev_instance;
5390 struct rtl8169_private *tp = netdev_priv(dev);
5391 void __iomem *ioaddr = tp->mmio_addr;
5392 int handled = 0;
5393 int status;
5394
5395 /* loop handling interrupts until we have no new ones or
5396 * we hit a invalid/hotplug case.
5397 */
5398 status = RTL_R16(IntrStatus);
5399 while (status && status != 0xffff) {
5400 handled = 1;
5401
5402 /* Handle all of the error cases first. These will reset
5403 * the chip, so just exit the loop.
5404 */
5405 if (unlikely(!netif_running(dev))) {
5406 rtl8169_hw_reset(tp);
5407 break;
5408 }
5409
5410 if (unlikely(status & RxFIFOOver)) {
5411 switch (tp->mac_version) {
5412 /* Work around for rx fifo overflow */
5413 case RTL_GIGA_MAC_VER_11:
5414 case RTL_GIGA_MAC_VER_22:
5415 case RTL_GIGA_MAC_VER_26:
5416 netif_stop_queue(dev);
5417 rtl8169_tx_timeout(dev);
5418 goto done;
5419 /* Testers needed. */
5420 case RTL_GIGA_MAC_VER_17:
5421 case RTL_GIGA_MAC_VER_19:
5422 case RTL_GIGA_MAC_VER_20:
5423 case RTL_GIGA_MAC_VER_21:
5424 case RTL_GIGA_MAC_VER_23:
5425 case RTL_GIGA_MAC_VER_24:
5426 case RTL_GIGA_MAC_VER_27:
5427 case RTL_GIGA_MAC_VER_28:
5428 case RTL_GIGA_MAC_VER_31:
5429 /* Experimental science. Pktgen proof. */
5430 case RTL_GIGA_MAC_VER_12:
5431 case RTL_GIGA_MAC_VER_25:
5432 if (status == RxFIFOOver)
5433 goto done;
5434 break;
5435 default:
5436 break;
5437 }
5438 }
5439
5440 if (unlikely(status & SYSErr)) {
5441 rtl8169_pcierr_interrupt(dev);
5442 break;
5443 }
5444
5445 if (status & LinkChg)
5446 __rtl8169_check_link_status(dev, tp, ioaddr, true);
5447
5448 /* We need to see the lastest version of tp->intr_mask to
5449 * avoid ignoring an MSI interrupt and having to wait for
5450 * another event which may never come.
5451 */
5452 smp_rmb();
5453 if (status & tp->intr_mask & tp->napi_event) {
5454 RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
5455 tp->intr_mask = ~tp->napi_event;
5456
5457 if (likely(napi_schedule_prep(&tp->napi)))
5458 __napi_schedule(&tp->napi);
5459 else
5460 netif_info(tp, intr, dev,
5461 "interrupt %04x in poll\n", status);
5462 }
5463
5464 /* We only get a new MSI interrupt when all active irq
5465 * sources on the chip have been acknowledged. So, ack
5466 * everything we've seen and check if new sources have become
5467 * active to avoid blocking all interrupts from the chip.
5468 */
5469 RTL_W16(IntrStatus,
5470 (status & RxFIFOOver) ? (status | RxOverflow) : status);
5471 status = RTL_R16(IntrStatus);
5472 }
5473done:
5474 return IRQ_RETVAL(handled);
5475}
5476
5477static int rtl8169_poll(struct napi_struct *napi, int budget)
5478{
5479 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
5480 struct net_device *dev = tp->dev;
5481 void __iomem *ioaddr = tp->mmio_addr;
5482 int work_done;
5483
5484 work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget);
5485 rtl8169_tx_interrupt(dev, tp, ioaddr);
5486
5487 if (work_done < budget) {
5488 napi_complete(napi);
5489
5490 /* We need for force the visibility of tp->intr_mask
5491 * for other CPUs, as we can loose an MSI interrupt
5492 * and potentially wait for a retransmit timeout if we don't.
5493 * The posted write to IntrMask is safe, as it will
5494 * eventually make it to the chip and we won't loose anything
5495 * until it does.
5496 */
5497 tp->intr_mask = 0xffff;
5498 wmb();
5499 RTL_W16(IntrMask, tp->intr_event);
5500 }
5501
5502 return work_done;
5503}
5504
5505static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
5506{
5507 struct rtl8169_private *tp = netdev_priv(dev);
5508
5509 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
5510 return;
5511
5512 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
5513 RTL_W32(RxMissed, 0);
5514}
5515
5516static void rtl8169_down(struct net_device *dev)
5517{
5518 struct rtl8169_private *tp = netdev_priv(dev);
5519 void __iomem *ioaddr = tp->mmio_addr;
5520
5521 del_timer_sync(&tp->timer);
5522
5523 netif_stop_queue(dev);
5524
5525 napi_disable(&tp->napi);
5526
5527 spin_lock_irq(&tp->lock);
5528
5529 rtl8169_hw_reset(tp);
5530 /*
5531 * At this point device interrupts can not be enabled in any function,
5532 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task,
5533 * rtl8169_reinit_task) and napi is disabled (rtl8169_poll).
5534 */
5535 rtl8169_rx_missed(dev, ioaddr);
5536
5537 spin_unlock_irq(&tp->lock);
5538
5539 synchronize_irq(dev->irq);
5540
5541 /* Give a racing hard_start_xmit a few cycles to complete. */
5542 synchronize_sched(); /* FIXME: should this be synchronize_irq()? */
5543
5544 rtl8169_tx_clear(tp);
5545
5546 rtl8169_rx_clear(tp);
5547
5548 rtl_pll_power_down(tp);
5549}
5550
5551static int rtl8169_close(struct net_device *dev)
5552{
5553 struct rtl8169_private *tp = netdev_priv(dev);
5554 struct pci_dev *pdev = tp->pci_dev;
5555
5556 pm_runtime_get_sync(&pdev->dev);
5557
5558 /* Update counters before going down */
5559 rtl8169_update_counters(dev);
5560
5561 rtl8169_down(dev);
5562
5563 free_irq(dev->irq, dev);
5564
5565 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
5566 tp->RxPhyAddr);
5567 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
5568 tp->TxPhyAddr);
5569 tp->TxDescArray = NULL;
5570 tp->RxDescArray = NULL;
5571
5572 pm_runtime_put_sync(&pdev->dev);
5573
5574 return 0;
5575}
5576
5577static void rtl_set_rx_mode(struct net_device *dev)
5578{
5579 struct rtl8169_private *tp = netdev_priv(dev);
5580 void __iomem *ioaddr = tp->mmio_addr;
5581 unsigned long flags;
5582 u32 mc_filter[2]; /* Multicast hash filter */
5583 int rx_mode;
5584 u32 tmp = 0;
5585
5586 if (dev->flags & IFF_PROMISC) {
5587 /* Unconditionally log net taps. */
5588 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
5589 rx_mode =
5590 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
5591 AcceptAllPhys;
5592 mc_filter[1] = mc_filter[0] = 0xffffffff;
5593 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
5594 (dev->flags & IFF_ALLMULTI)) {
5595 /* Too many to filter perfectly -- accept all multicasts. */
5596 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
5597 mc_filter[1] = mc_filter[0] = 0xffffffff;
5598 } else {
5599 struct netdev_hw_addr *ha;
5600
5601 rx_mode = AcceptBroadcast | AcceptMyPhys;
5602 mc_filter[1] = mc_filter[0] = 0;
5603 netdev_for_each_mc_addr(ha, dev) {
5604 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
5605 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
5606 rx_mode |= AcceptMulticast;
5607 }
5608 }
5609
5610 spin_lock_irqsave(&tp->lock, flags);
5611
5612 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
5613
5614 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
5615 u32 data = mc_filter[0];
5616
5617 mc_filter[0] = swab32(mc_filter[1]);
5618 mc_filter[1] = swab32(data);
5619 }
5620
5621 RTL_W32(MAR0 + 4, mc_filter[1]);
5622 RTL_W32(MAR0 + 0, mc_filter[0]);
5623
5624 RTL_W32(RxConfig, tmp);
5625
5626 spin_unlock_irqrestore(&tp->lock, flags);
5627}
5628
5629/**
5630 * rtl8169_get_stats - Get rtl8169 read/write statistics
5631 * @dev: The Ethernet Device to get statistics for
5632 *
5633 * Get TX/RX statistics for rtl8169
5634 */
5635static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
5636{
5637 struct rtl8169_private *tp = netdev_priv(dev);
5638 void __iomem *ioaddr = tp->mmio_addr;
5639 unsigned long flags;
5640
5641 if (netif_running(dev)) {
5642 spin_lock_irqsave(&tp->lock, flags);
5643 rtl8169_rx_missed(dev, ioaddr);
5644 spin_unlock_irqrestore(&tp->lock, flags);
5645 }
5646
5647 return &dev->stats;
5648}
5649
5650static void rtl8169_net_suspend(struct net_device *dev)
5651{
5652 struct rtl8169_private *tp = netdev_priv(dev);
5653
5654 if (!netif_running(dev))
5655 return;
5656
5657 rtl_pll_power_down(tp);
5658
5659 netif_device_detach(dev);
5660 netif_stop_queue(dev);
5661}
5662
5663#ifdef CONFIG_PM
5664
5665static int rtl8169_suspend(struct device *device)
5666{
5667 struct pci_dev *pdev = to_pci_dev(device);
5668 struct net_device *dev = pci_get_drvdata(pdev);
5669
5670 rtl8169_net_suspend(dev);
5671
5672 return 0;
5673}
5674
5675static void __rtl8169_resume(struct net_device *dev)
5676{
5677 struct rtl8169_private *tp = netdev_priv(dev);
5678
5679 netif_device_attach(dev);
5680
5681 rtl_pll_power_up(tp);
5682
5683 rtl8169_schedule_work(dev, rtl8169_reset_task);
5684}
5685
5686static int rtl8169_resume(struct device *device)
5687{
5688 struct pci_dev *pdev = to_pci_dev(device);
5689 struct net_device *dev = pci_get_drvdata(pdev);
5690 struct rtl8169_private *tp = netdev_priv(dev);
5691
5692 rtl8169_init_phy(dev, tp);
5693
5694 if (netif_running(dev))
5695 __rtl8169_resume(dev);
5696
5697 return 0;
5698}
5699
5700static int rtl8169_runtime_suspend(struct device *device)
5701{
5702 struct pci_dev *pdev = to_pci_dev(device);
5703 struct net_device *dev = pci_get_drvdata(pdev);
5704 struct rtl8169_private *tp = netdev_priv(dev);
5705
5706 if (!tp->TxDescArray)
5707 return 0;
5708
5709 spin_lock_irq(&tp->lock);
5710 tp->saved_wolopts = __rtl8169_get_wol(tp);
5711 __rtl8169_set_wol(tp, WAKE_ANY);
5712 spin_unlock_irq(&tp->lock);
5713
5714 rtl8169_net_suspend(dev);
5715
5716 return 0;
5717}
5718
5719static int rtl8169_runtime_resume(struct device *device)
5720{
5721 struct pci_dev *pdev = to_pci_dev(device);
5722 struct net_device *dev = pci_get_drvdata(pdev);
5723 struct rtl8169_private *tp = netdev_priv(dev);
5724
5725 if (!tp->TxDescArray)
5726 return 0;
5727
5728 spin_lock_irq(&tp->lock);
5729 __rtl8169_set_wol(tp, tp->saved_wolopts);
5730 tp->saved_wolopts = 0;
5731 spin_unlock_irq(&tp->lock);
5732
5733 rtl8169_init_phy(dev, tp);
5734
5735 __rtl8169_resume(dev);
5736
5737 return 0;
5738}
5739
5740static int rtl8169_runtime_idle(struct device *device)
5741{
5742 struct pci_dev *pdev = to_pci_dev(device);
5743 struct net_device *dev = pci_get_drvdata(pdev);
5744 struct rtl8169_private *tp = netdev_priv(dev);
5745
5746 return tp->TxDescArray ? -EBUSY : 0;
5747}
5748
5749static const struct dev_pm_ops rtl8169_pm_ops = {
5750 .suspend = rtl8169_suspend,
5751 .resume = rtl8169_resume,
5752 .freeze = rtl8169_suspend,
5753 .thaw = rtl8169_resume,
5754 .poweroff = rtl8169_suspend,
5755 .restore = rtl8169_resume,
5756 .runtime_suspend = rtl8169_runtime_suspend,
5757 .runtime_resume = rtl8169_runtime_resume,
5758 .runtime_idle = rtl8169_runtime_idle,
5759};
5760
5761#define RTL8169_PM_OPS (&rtl8169_pm_ops)
5762
5763#else /* !CONFIG_PM */
5764
5765#define RTL8169_PM_OPS NULL
5766
5767#endif /* !CONFIG_PM */
5768
5769static void rtl_shutdown(struct pci_dev *pdev)
5770{
5771 struct net_device *dev = pci_get_drvdata(pdev);
5772 struct rtl8169_private *tp = netdev_priv(dev);
5773 void __iomem *ioaddr = tp->mmio_addr;
5774
5775 rtl8169_net_suspend(dev);
5776
5777 /* Restore original MAC address */
5778 rtl_rar_set(tp, dev->perm_addr);
5779
5780 spin_lock_irq(&tp->lock);
5781
5782 rtl8169_hw_reset(tp);
5783
5784 spin_unlock_irq(&tp->lock);
5785
5786 if (system_state == SYSTEM_POWER_OFF) {
5787 /* WoL fails with 8168b when the receiver is disabled. */
5788 if ((tp->mac_version == RTL_GIGA_MAC_VER_11 ||
5789 tp->mac_version == RTL_GIGA_MAC_VER_12 ||
5790 tp->mac_version == RTL_GIGA_MAC_VER_17) &&
5791 (tp->features & RTL_FEATURE_WOL)) {
5792 pci_clear_master(pdev);
5793
5794 RTL_W8(ChipCmd, CmdRxEnb);
5795 /* PCI commit */
5796 RTL_R8(ChipCmd);
5797 }
5798
5799 pci_wake_from_d3(pdev, true);
5800 pci_set_power_state(pdev, PCI_D3hot);
5801 }
5802}
5803
5804static struct pci_driver rtl8169_pci_driver = {
5805 .name = MODULENAME,
5806 .id_table = rtl8169_pci_tbl,
5807 .probe = rtl8169_init_one,
5808 .remove = __devexit_p(rtl8169_remove_one),
5809 .shutdown = rtl_shutdown,
5810 .driver.pm = RTL8169_PM_OPS,
5811};
5812
5813static int __init rtl8169_init_module(void)
5814{
5815 return pci_register_driver(&rtl8169_pci_driver);
5816}
5817
5818static void __exit rtl8169_cleanup_module(void)
5819{
5820 pci_unregister_driver(&rtl8169_pci_driver);
5821}
5822
5823module_init(rtl8169_init_module);
5824module_exit(rtl8169_cleanup_module);
diff --git a/drivers/net/ethernet/realtek/sc92031.c b/drivers/net/ethernet/realtek/sc92031.c
new file mode 100644
index 000000000000..9da47337b7c3
--- /dev/null
+++ b/drivers/net/ethernet/realtek/sc92031.c
@@ -0,0 +1,1615 @@
1/* Silan SC92031 PCI Fast Ethernet Adapter driver
2 *
3 * Based on vendor drivers:
4 * Silan Fast Ethernet Netcard Driver:
5 * MODULE_AUTHOR ("gaoyonghong");
6 * MODULE_DESCRIPTION ("SILAN Fast Ethernet driver");
7 * MODULE_LICENSE("GPL");
8 * 8139D Fast Ethernet driver:
9 * (C) 2002 by gaoyonghong
10 * MODULE_AUTHOR ("gaoyonghong");
11 * MODULE_DESCRIPTION ("Rsltek 8139D PCI Fast Ethernet Adapter driver");
12 * MODULE_LICENSE("GPL");
13 * Both are almost identical and seem to be based on pci-skeleton.c
14 *
15 * Rewritten for 2.6 by Cesar Eduardo Barros
16 *
17 * A datasheet for this chip can be found at
18 * http://www.silan.com.cn/english/product/pdf/SC92031AY.pdf
19 */
20
21/* Note about set_mac_address: I don't know how to change the hardware
22 * matching, so you need to enable IFF_PROMISC when using it.
23 */
24
25#include <linux/interrupt.h>
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/delay.h>
29#include <linux/pci.h>
30#include <linux/dma-mapping.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/ethtool.h>
34#include <linux/crc32.h>
35
36#include <asm/irq.h>
37
38#define SC92031_NAME "sc92031"
39
40/* BAR 0 is MMIO, BAR 1 is PIO */
41#ifndef SC92031_USE_BAR
42#define SC92031_USE_BAR 0
43#endif
44
45/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
46static int multicast_filter_limit = 64;
47module_param(multicast_filter_limit, int, 0);
48MODULE_PARM_DESC(multicast_filter_limit,
49 "Maximum number of filtered multicast addresses");
50
51static int media;
52module_param(media, int, 0);
53MODULE_PARM_DESC(media, "Media type (0x00 = autodetect,"
54 " 0x01 = 10M half, 0x02 = 10M full,"
55 " 0x04 = 100M half, 0x08 = 100M full)");
56
57/* Size of the in-memory receive ring. */
58#define RX_BUF_LEN_IDX 3 /* 0==8K, 1==16K, 2==32K, 3==64K ,4==128K*/
59#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
60
61/* Number of Tx descriptor registers. */
62#define NUM_TX_DESC 4
63
64/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
65#define MAX_ETH_FRAME_SIZE 1536
66
67/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
68#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE
69#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC)
70
71/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
72#define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */
73
74/* Time in jiffies before concluding the transmitter is hung. */
75#define TX_TIMEOUT (4*HZ)
76
77#define SILAN_STATS_NUM 2 /* number of ETHTOOL_GSTATS */
78
79/* media options */
80#define AUTOSELECT 0x00
81#define M10_HALF 0x01
82#define M10_FULL 0x02
83#define M100_HALF 0x04
84#define M100_FULL 0x08
85
86 /* Symbolic offsets to registers. */
87enum silan_registers {
88 Config0 = 0x00, // Config0
89 Config1 = 0x04, // Config1
90 RxBufWPtr = 0x08, // Rx buffer writer poiter
91 IntrStatus = 0x0C, // Interrupt status
92 IntrMask = 0x10, // Interrupt mask
93 RxbufAddr = 0x14, // Rx buffer start address
94 RxBufRPtr = 0x18, // Rx buffer read pointer
95 Txstatusall = 0x1C, // Transmit status of all descriptors
96 TxStatus0 = 0x20, // Transmit status (Four 32bit registers).
97 TxAddr0 = 0x30, // Tx descriptors (also four 32bit).
98 RxConfig = 0x40, // Rx configuration
99 MAC0 = 0x44, // Ethernet hardware address.
100 MAR0 = 0x4C, // Multicast filter.
101 RxStatus0 = 0x54, // Rx status
102 TxConfig = 0x5C, // Tx configuration
103 PhyCtrl = 0x60, // physical control
104 FlowCtrlConfig = 0x64, // flow control
105 Miicmd0 = 0x68, // Mii command0 register
106 Miicmd1 = 0x6C, // Mii command1 register
107 Miistatus = 0x70, // Mii status register
108 Timercnt = 0x74, // Timer counter register
109 TimerIntr = 0x78, // Timer interrupt register
110 PMConfig = 0x7C, // Power Manager configuration
111 CRC0 = 0x80, // Power Manager CRC ( Two 32bit regisers)
112 Wakeup0 = 0x88, // power Manager wakeup( Eight 64bit regiser)
113 LSBCRC0 = 0xC8, // power Manager LSBCRC(Two 32bit regiser)
114 TestD0 = 0xD0,
115 TestD4 = 0xD4,
116 TestD8 = 0xD8,
117};
118
119#define MII_BMCR 0 // Basic mode control register
120#define MII_BMSR 1 // Basic mode status register
121#define MII_JAB 16
122#define MII_OutputStatus 24
123
124#define BMCR_FULLDPLX 0x0100 // Full duplex
125#define BMCR_ANRESTART 0x0200 // Auto negotiation restart
126#define BMCR_ANENABLE 0x1000 // Enable auto negotiation
127#define BMCR_SPEED100 0x2000 // Select 100Mbps
128#define BMSR_LSTATUS 0x0004 // Link status
129#define PHY_16_JAB_ENB 0x1000
130#define PHY_16_PORT_ENB 0x1
131
132enum IntrStatusBits {
133 LinkFail = 0x80000000,
134 LinkOK = 0x40000000,
135 TimeOut = 0x20000000,
136 RxOverflow = 0x0040,
137 RxOK = 0x0020,
138 TxOK = 0x0001,
139 IntrBits = LinkFail|LinkOK|TimeOut|RxOverflow|RxOK|TxOK,
140};
141
142enum TxStatusBits {
143 TxCarrierLost = 0x20000000,
144 TxAborted = 0x10000000,
145 TxOutOfWindow = 0x08000000,
146 TxNccShift = 22,
147 EarlyTxThresShift = 16,
148 TxStatOK = 0x8000,
149 TxUnderrun = 0x4000,
150 TxOwn = 0x2000,
151};
152
153enum RxStatusBits {
154 RxStatesOK = 0x80000,
155 RxBadAlign = 0x40000,
156 RxHugeFrame = 0x20000,
157 RxSmallFrame = 0x10000,
158 RxCRCOK = 0x8000,
159 RxCrlFrame = 0x4000,
160 Rx_Broadcast = 0x2000,
161 Rx_Multicast = 0x1000,
162 RxAddrMatch = 0x0800,
163 MiiErr = 0x0400,
164};
165
166enum RxConfigBits {
167 RxFullDx = 0x80000000,
168 RxEnb = 0x40000000,
169 RxSmall = 0x20000000,
170 RxHuge = 0x10000000,
171 RxErr = 0x08000000,
172 RxAllphys = 0x04000000,
173 RxMulticast = 0x02000000,
174 RxBroadcast = 0x01000000,
175 RxLoopBack = (1 << 23) | (1 << 22),
176 LowThresholdShift = 12,
177 HighThresholdShift = 2,
178};
179
180enum TxConfigBits {
181 TxFullDx = 0x80000000,
182 TxEnb = 0x40000000,
183 TxEnbPad = 0x20000000,
184 TxEnbHuge = 0x10000000,
185 TxEnbFCS = 0x08000000,
186 TxNoBackOff = 0x04000000,
187 TxEnbPrem = 0x02000000,
188 TxCareLostCrs = 0x1000000,
189 TxExdCollNum = 0xf00000,
190 TxDataRate = 0x80000,
191};
192
193enum PhyCtrlconfigbits {
194 PhyCtrlAne = 0x80000000,
195 PhyCtrlSpd100 = 0x40000000,
196 PhyCtrlSpd10 = 0x20000000,
197 PhyCtrlPhyBaseAddr = 0x1f000000,
198 PhyCtrlDux = 0x800000,
199 PhyCtrlReset = 0x400000,
200};
201
202enum FlowCtrlConfigBits {
203 FlowCtrlFullDX = 0x80000000,
204 FlowCtrlEnb = 0x40000000,
205};
206
207enum Config0Bits {
208 Cfg0_Reset = 0x80000000,
209 Cfg0_Anaoff = 0x40000000,
210 Cfg0_LDPS = 0x20000000,
211};
212
213enum Config1Bits {
214 Cfg1_EarlyRx = 1 << 31,
215 Cfg1_EarlyTx = 1 << 30,
216
217 //rx buffer size
218 Cfg1_Rcv8K = 0x0,
219 Cfg1_Rcv16K = 0x1,
220 Cfg1_Rcv32K = 0x3,
221 Cfg1_Rcv64K = 0x7,
222 Cfg1_Rcv128K = 0xf,
223};
224
225enum MiiCmd0Bits {
226 Mii_Divider = 0x20000000,
227 Mii_WRITE = 0x400000,
228 Mii_READ = 0x200000,
229 Mii_SCAN = 0x100000,
230 Mii_Tamod = 0x80000,
231 Mii_Drvmod = 0x40000,
232 Mii_mdc = 0x20000,
233 Mii_mdoen = 0x10000,
234 Mii_mdo = 0x8000,
235 Mii_mdi = 0x4000,
236};
237
238enum MiiStatusBits {
239 Mii_StatusBusy = 0x80000000,
240};
241
242enum PMConfigBits {
243 PM_Enable = 1 << 31,
244 PM_LongWF = 1 << 30,
245 PM_Magic = 1 << 29,
246 PM_LANWake = 1 << 28,
247 PM_LWPTN = (1 << 27 | 1<< 26),
248 PM_LinkUp = 1 << 25,
249 PM_WakeUp = 1 << 24,
250};
251
252/* Locking rules:
253 * priv->lock protects most of the fields of priv and most of the
254 * hardware registers. It does not have to protect against softirqs
255 * between sc92031_disable_interrupts and sc92031_enable_interrupts;
256 * it also does not need to be used in ->open and ->stop while the
257 * device interrupts are off.
258 * Not having to protect against softirqs is very useful due to heavy
259 * use of mdelay() at _sc92031_reset.
260 * Functions prefixed with _sc92031_ must be called with the lock held;
261 * functions prefixed with sc92031_ must be called without the lock held.
262 * Use mmiowb() before unlocking if the hardware was written to.
263 */
264
265/* Locking rules for the interrupt:
266 * - the interrupt and the tasklet never run at the same time
267 * - neither run between sc92031_disable_interrupts and
268 * sc92031_enable_interrupt
269 */
270
271struct sc92031_priv {
272 spinlock_t lock;
273 /* iomap.h cookie */
274 void __iomem *port_base;
275 /* pci device structure */
276 struct pci_dev *pdev;
277 /* tasklet */
278 struct tasklet_struct tasklet;
279
280 /* CPU address of rx ring */
281 void *rx_ring;
282 /* PCI address of rx ring */
283 dma_addr_t rx_ring_dma_addr;
284 /* PCI address of rx ring read pointer */
285 dma_addr_t rx_ring_tail;
286
287 /* tx ring write index */
288 unsigned tx_head;
289 /* tx ring read index */
290 unsigned tx_tail;
291 /* CPU address of tx bounce buffer */
292 void *tx_bufs;
293 /* PCI address of tx bounce buffer */
294 dma_addr_t tx_bufs_dma_addr;
295
296 /* copies of some hardware registers */
297 u32 intr_status;
298 atomic_t intr_mask;
299 u32 rx_config;
300 u32 tx_config;
301 u32 pm_config;
302
303 /* copy of some flags from dev->flags */
304 unsigned int mc_flags;
305
306 /* for ETHTOOL_GSTATS */
307 u64 tx_timeouts;
308 u64 rx_loss;
309
310 /* for dev->get_stats */
311 long rx_value;
312};
313
314/* I don't know which registers can be safely read; however, I can guess
315 * MAC0 is one of them. */
316static inline void _sc92031_dummy_read(void __iomem *port_base)
317{
318 ioread32(port_base + MAC0);
319}
320
321static u32 _sc92031_mii_wait(void __iomem *port_base)
322{
323 u32 mii_status;
324
325 do {
326 udelay(10);
327 mii_status = ioread32(port_base + Miistatus);
328 } while (mii_status & Mii_StatusBusy);
329
330 return mii_status;
331}
332
333static u32 _sc92031_mii_cmd(void __iomem *port_base, u32 cmd0, u32 cmd1)
334{
335 iowrite32(Mii_Divider, port_base + Miicmd0);
336
337 _sc92031_mii_wait(port_base);
338
339 iowrite32(cmd1, port_base + Miicmd1);
340 iowrite32(Mii_Divider | cmd0, port_base + Miicmd0);
341
342 return _sc92031_mii_wait(port_base);
343}
344
345static void _sc92031_mii_scan(void __iomem *port_base)
346{
347 _sc92031_mii_cmd(port_base, Mii_SCAN, 0x1 << 6);
348}
349
350static u16 _sc92031_mii_read(void __iomem *port_base, unsigned reg)
351{
352 return _sc92031_mii_cmd(port_base, Mii_READ, reg << 6) >> 13;
353}
354
355static void _sc92031_mii_write(void __iomem *port_base, unsigned reg, u16 val)
356{
357 _sc92031_mii_cmd(port_base, Mii_WRITE, (reg << 6) | ((u32)val << 11));
358}
359
360static void sc92031_disable_interrupts(struct net_device *dev)
361{
362 struct sc92031_priv *priv = netdev_priv(dev);
363 void __iomem *port_base = priv->port_base;
364
365 /* tell the tasklet/interrupt not to enable interrupts */
366 atomic_set(&priv->intr_mask, 0);
367 wmb();
368
369 /* stop interrupts */
370 iowrite32(0, port_base + IntrMask);
371 _sc92031_dummy_read(port_base);
372 mmiowb();
373
374 /* wait for any concurrent interrupt/tasklet to finish */
375 synchronize_irq(dev->irq);
376 tasklet_disable(&priv->tasklet);
377}
378
379static void sc92031_enable_interrupts(struct net_device *dev)
380{
381 struct sc92031_priv *priv = netdev_priv(dev);
382 void __iomem *port_base = priv->port_base;
383
384 tasklet_enable(&priv->tasklet);
385
386 atomic_set(&priv->intr_mask, IntrBits);
387 wmb();
388
389 iowrite32(IntrBits, port_base + IntrMask);
390 mmiowb();
391}
392
393static void _sc92031_disable_tx_rx(struct net_device *dev)
394{
395 struct sc92031_priv *priv = netdev_priv(dev);
396 void __iomem *port_base = priv->port_base;
397
398 priv->rx_config &= ~RxEnb;
399 priv->tx_config &= ~TxEnb;
400 iowrite32(priv->rx_config, port_base + RxConfig);
401 iowrite32(priv->tx_config, port_base + TxConfig);
402}
403
404static void _sc92031_enable_tx_rx(struct net_device *dev)
405{
406 struct sc92031_priv *priv = netdev_priv(dev);
407 void __iomem *port_base = priv->port_base;
408
409 priv->rx_config |= RxEnb;
410 priv->tx_config |= TxEnb;
411 iowrite32(priv->rx_config, port_base + RxConfig);
412 iowrite32(priv->tx_config, port_base + TxConfig);
413}
414
415static void _sc92031_tx_clear(struct net_device *dev)
416{
417 struct sc92031_priv *priv = netdev_priv(dev);
418
419 while (priv->tx_head - priv->tx_tail > 0) {
420 priv->tx_tail++;
421 dev->stats.tx_dropped++;
422 }
423 priv->tx_head = priv->tx_tail = 0;
424}
425
426static void _sc92031_set_mar(struct net_device *dev)
427{
428 struct sc92031_priv *priv = netdev_priv(dev);
429 void __iomem *port_base = priv->port_base;
430 u32 mar0 = 0, mar1 = 0;
431
432 if ((dev->flags & IFF_PROMISC) ||
433 netdev_mc_count(dev) > multicast_filter_limit ||
434 (dev->flags & IFF_ALLMULTI))
435 mar0 = mar1 = 0xffffffff;
436 else if (dev->flags & IFF_MULTICAST) {
437 struct netdev_hw_addr *ha;
438
439 netdev_for_each_mc_addr(ha, dev) {
440 u32 crc;
441 unsigned bit = 0;
442
443 crc = ~ether_crc(ETH_ALEN, ha->addr);
444 crc >>= 24;
445
446 if (crc & 0x01) bit |= 0x02;
447 if (crc & 0x02) bit |= 0x01;
448 if (crc & 0x10) bit |= 0x20;
449 if (crc & 0x20) bit |= 0x10;
450 if (crc & 0x40) bit |= 0x08;
451 if (crc & 0x80) bit |= 0x04;
452
453 if (bit > 31)
454 mar0 |= 0x1 << (bit - 32);
455 else
456 mar1 |= 0x1 << bit;
457 }
458 }
459
460 iowrite32(mar0, port_base + MAR0);
461 iowrite32(mar1, port_base + MAR0 + 4);
462}
463
464static void _sc92031_set_rx_config(struct net_device *dev)
465{
466 struct sc92031_priv *priv = netdev_priv(dev);
467 void __iomem *port_base = priv->port_base;
468 unsigned int old_mc_flags;
469 u32 rx_config_bits = 0;
470
471 old_mc_flags = priv->mc_flags;
472
473 if (dev->flags & IFF_PROMISC)
474 rx_config_bits |= RxSmall | RxHuge | RxErr | RxBroadcast
475 | RxMulticast | RxAllphys;
476
477 if (dev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
478 rx_config_bits |= RxMulticast;
479
480 if (dev->flags & IFF_BROADCAST)
481 rx_config_bits |= RxBroadcast;
482
483 priv->rx_config &= ~(RxSmall | RxHuge | RxErr | RxBroadcast
484 | RxMulticast | RxAllphys);
485 priv->rx_config |= rx_config_bits;
486
487 priv->mc_flags = dev->flags & (IFF_PROMISC | IFF_ALLMULTI
488 | IFF_MULTICAST | IFF_BROADCAST);
489
490 if (netif_carrier_ok(dev) && priv->mc_flags != old_mc_flags)
491 iowrite32(priv->rx_config, port_base + RxConfig);
492}
493
494static bool _sc92031_check_media(struct net_device *dev)
495{
496 struct sc92031_priv *priv = netdev_priv(dev);
497 void __iomem *port_base = priv->port_base;
498 u16 bmsr;
499
500 bmsr = _sc92031_mii_read(port_base, MII_BMSR);
501 rmb();
502 if (bmsr & BMSR_LSTATUS) {
503 bool speed_100, duplex_full;
504 u32 flow_ctrl_config = 0;
505 u16 output_status = _sc92031_mii_read(port_base,
506 MII_OutputStatus);
507 _sc92031_mii_scan(port_base);
508
509 speed_100 = output_status & 0x2;
510 duplex_full = output_status & 0x4;
511
512 /* Initial Tx/Rx configuration */
513 priv->rx_config = (0x40 << LowThresholdShift) | (0x1c0 << HighThresholdShift);
514 priv->tx_config = 0x48800000;
515
516 /* NOTE: vendor driver had dead code here to enable tx padding */
517
518 if (!speed_100)
519 priv->tx_config |= 0x80000;
520
521 // configure rx mode
522 _sc92031_set_rx_config(dev);
523
524 if (duplex_full) {
525 priv->rx_config |= RxFullDx;
526 priv->tx_config |= TxFullDx;
527 flow_ctrl_config = FlowCtrlFullDX | FlowCtrlEnb;
528 } else {
529 priv->rx_config &= ~RxFullDx;
530 priv->tx_config &= ~TxFullDx;
531 }
532
533 _sc92031_set_mar(dev);
534 _sc92031_set_rx_config(dev);
535 _sc92031_enable_tx_rx(dev);
536 iowrite32(flow_ctrl_config, port_base + FlowCtrlConfig);
537
538 netif_carrier_on(dev);
539
540 if (printk_ratelimit())
541 printk(KERN_INFO "%s: link up, %sMbps, %s-duplex\n",
542 dev->name,
543 speed_100 ? "100" : "10",
544 duplex_full ? "full" : "half");
545 return true;
546 } else {
547 _sc92031_mii_scan(port_base);
548
549 netif_carrier_off(dev);
550
551 _sc92031_disable_tx_rx(dev);
552
553 if (printk_ratelimit())
554 printk(KERN_INFO "%s: link down\n", dev->name);
555 return false;
556 }
557}
558
559static void _sc92031_phy_reset(struct net_device *dev)
560{
561 struct sc92031_priv *priv = netdev_priv(dev);
562 void __iomem *port_base = priv->port_base;
563 u32 phy_ctrl;
564
565 phy_ctrl = ioread32(port_base + PhyCtrl);
566 phy_ctrl &= ~(PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10);
567 phy_ctrl |= PhyCtrlAne | PhyCtrlReset;
568
569 switch (media) {
570 default:
571 case AUTOSELECT:
572 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
573 break;
574 case M10_HALF:
575 phy_ctrl |= PhyCtrlSpd10;
576 break;
577 case M10_FULL:
578 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd10;
579 break;
580 case M100_HALF:
581 phy_ctrl |= PhyCtrlSpd100;
582 break;
583 case M100_FULL:
584 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
585 break;
586 }
587
588 iowrite32(phy_ctrl, port_base + PhyCtrl);
589 mdelay(10);
590
591 phy_ctrl &= ~PhyCtrlReset;
592 iowrite32(phy_ctrl, port_base + PhyCtrl);
593 mdelay(1);
594
595 _sc92031_mii_write(port_base, MII_JAB,
596 PHY_16_JAB_ENB | PHY_16_PORT_ENB);
597 _sc92031_mii_scan(port_base);
598
599 netif_carrier_off(dev);
600 netif_stop_queue(dev);
601}
602
603static void _sc92031_reset(struct net_device *dev)
604{
605 struct sc92031_priv *priv = netdev_priv(dev);
606 void __iomem *port_base = priv->port_base;
607
608 /* disable PM */
609 iowrite32(0, port_base + PMConfig);
610
611 /* soft reset the chip */
612 iowrite32(Cfg0_Reset, port_base + Config0);
613 mdelay(200);
614
615 iowrite32(0, port_base + Config0);
616 mdelay(10);
617
618 /* disable interrupts */
619 iowrite32(0, port_base + IntrMask);
620
621 /* clear multicast address */
622 iowrite32(0, port_base + MAR0);
623 iowrite32(0, port_base + MAR0 + 4);
624
625 /* init rx ring */
626 iowrite32(priv->rx_ring_dma_addr, port_base + RxbufAddr);
627 priv->rx_ring_tail = priv->rx_ring_dma_addr;
628
629 /* init tx ring */
630 _sc92031_tx_clear(dev);
631
632 /* clear old register values */
633 priv->intr_status = 0;
634 atomic_set(&priv->intr_mask, 0);
635 priv->rx_config = 0;
636 priv->tx_config = 0;
637 priv->mc_flags = 0;
638
639 /* configure rx buffer size */
640 /* NOTE: vendor driver had dead code here to enable early tx/rx */
641 iowrite32(Cfg1_Rcv64K, port_base + Config1);
642
643 _sc92031_phy_reset(dev);
644 _sc92031_check_media(dev);
645
646 /* calculate rx fifo overflow */
647 priv->rx_value = 0;
648
649 /* enable PM */
650 iowrite32(priv->pm_config, port_base + PMConfig);
651
652 /* clear intr register */
653 ioread32(port_base + IntrStatus);
654}
655
656static void _sc92031_tx_tasklet(struct net_device *dev)
657{
658 struct sc92031_priv *priv = netdev_priv(dev);
659 void __iomem *port_base = priv->port_base;
660
661 unsigned old_tx_tail;
662 unsigned entry;
663 u32 tx_status;
664
665 old_tx_tail = priv->tx_tail;
666 while (priv->tx_head - priv->tx_tail > 0) {
667 entry = priv->tx_tail % NUM_TX_DESC;
668 tx_status = ioread32(port_base + TxStatus0 + entry * 4);
669
670 if (!(tx_status & (TxStatOK | TxUnderrun | TxAborted)))
671 break;
672
673 priv->tx_tail++;
674
675 if (tx_status & TxStatOK) {
676 dev->stats.tx_bytes += tx_status & 0x1fff;
677 dev->stats.tx_packets++;
678 /* Note: TxCarrierLost is always asserted at 100mbps. */
679 dev->stats.collisions += (tx_status >> 22) & 0xf;
680 }
681
682 if (tx_status & (TxOutOfWindow | TxAborted)) {
683 dev->stats.tx_errors++;
684
685 if (tx_status & TxAborted)
686 dev->stats.tx_aborted_errors++;
687
688 if (tx_status & TxCarrierLost)
689 dev->stats.tx_carrier_errors++;
690
691 if (tx_status & TxOutOfWindow)
692 dev->stats.tx_window_errors++;
693 }
694
695 if (tx_status & TxUnderrun)
696 dev->stats.tx_fifo_errors++;
697 }
698
699 if (priv->tx_tail != old_tx_tail)
700 if (netif_queue_stopped(dev))
701 netif_wake_queue(dev);
702}
703
704static void _sc92031_rx_tasklet_error(struct net_device *dev,
705 u32 rx_status, unsigned rx_size)
706{
707 if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) {
708 dev->stats.rx_errors++;
709 dev->stats.rx_length_errors++;
710 }
711
712 if (!(rx_status & RxStatesOK)) {
713 dev->stats.rx_errors++;
714
715 if (rx_status & (RxHugeFrame | RxSmallFrame))
716 dev->stats.rx_length_errors++;
717
718 if (rx_status & RxBadAlign)
719 dev->stats.rx_frame_errors++;
720
721 if (!(rx_status & RxCRCOK))
722 dev->stats.rx_crc_errors++;
723 } else {
724 struct sc92031_priv *priv = netdev_priv(dev);
725 priv->rx_loss++;
726 }
727}
728
729static void _sc92031_rx_tasklet(struct net_device *dev)
730{
731 struct sc92031_priv *priv = netdev_priv(dev);
732 void __iomem *port_base = priv->port_base;
733
734 dma_addr_t rx_ring_head;
735 unsigned rx_len;
736 unsigned rx_ring_offset;
737 void *rx_ring = priv->rx_ring;
738
739 rx_ring_head = ioread32(port_base + RxBufWPtr);
740 rmb();
741
742 /* rx_ring_head is only 17 bits in the RxBufWPtr register.
743 * we need to change it to 32 bits physical address
744 */
745 rx_ring_head &= (dma_addr_t)(RX_BUF_LEN - 1);
746 rx_ring_head |= priv->rx_ring_dma_addr & ~(dma_addr_t)(RX_BUF_LEN - 1);
747 if (rx_ring_head < priv->rx_ring_dma_addr)
748 rx_ring_head += RX_BUF_LEN;
749
750 if (rx_ring_head >= priv->rx_ring_tail)
751 rx_len = rx_ring_head - priv->rx_ring_tail;
752 else
753 rx_len = RX_BUF_LEN - (priv->rx_ring_tail - rx_ring_head);
754
755 if (!rx_len)
756 return;
757
758 if (unlikely(rx_len > RX_BUF_LEN)) {
759 if (printk_ratelimit())
760 printk(KERN_ERR "%s: rx packets length > rx buffer\n",
761 dev->name);
762 return;
763 }
764
765 rx_ring_offset = (priv->rx_ring_tail - priv->rx_ring_dma_addr) % RX_BUF_LEN;
766
767 while (rx_len) {
768 u32 rx_status;
769 unsigned rx_size, rx_size_align, pkt_size;
770 struct sk_buff *skb;
771
772 rx_status = le32_to_cpup((__le32 *)(rx_ring + rx_ring_offset));
773 rmb();
774
775 rx_size = rx_status >> 20;
776 rx_size_align = (rx_size + 3) & ~3; // for 4 bytes aligned
777 pkt_size = rx_size - 4; // Omit the four octet CRC from the length.
778
779 rx_ring_offset = (rx_ring_offset + 4) % RX_BUF_LEN;
780
781 if (unlikely(rx_status == 0 ||
782 rx_size > (MAX_ETH_FRAME_SIZE + 4) ||
783 rx_size < 16 ||
784 !(rx_status & RxStatesOK))) {
785 _sc92031_rx_tasklet_error(dev, rx_status, rx_size);
786 break;
787 }
788
789 if (unlikely(rx_size_align + 4 > rx_len)) {
790 if (printk_ratelimit())
791 printk(KERN_ERR "%s: rx_len is too small\n", dev->name);
792 break;
793 }
794
795 rx_len -= rx_size_align + 4;
796
797 skb = netdev_alloc_skb_ip_align(dev, pkt_size);
798 if (unlikely(!skb)) {
799 if (printk_ratelimit())
800 printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
801 dev->name, pkt_size);
802 goto next;
803 }
804
805 if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
806 memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
807 rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
808 memcpy(skb_put(skb, pkt_size - (RX_BUF_LEN - rx_ring_offset)),
809 rx_ring, pkt_size - (RX_BUF_LEN - rx_ring_offset));
810 } else {
811 memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size);
812 }
813
814 skb->protocol = eth_type_trans(skb, dev);
815 netif_rx(skb);
816
817 dev->stats.rx_bytes += pkt_size;
818 dev->stats.rx_packets++;
819
820 if (rx_status & Rx_Multicast)
821 dev->stats.multicast++;
822
823 next:
824 rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN;
825 }
826 mb();
827
828 priv->rx_ring_tail = rx_ring_head;
829 iowrite32(priv->rx_ring_tail, port_base + RxBufRPtr);
830}
831
832static void _sc92031_link_tasklet(struct net_device *dev)
833{
834 if (_sc92031_check_media(dev))
835 netif_wake_queue(dev);
836 else {
837 netif_stop_queue(dev);
838 dev->stats.tx_carrier_errors++;
839 }
840}
841
842static void sc92031_tasklet(unsigned long data)
843{
844 struct net_device *dev = (struct net_device *)data;
845 struct sc92031_priv *priv = netdev_priv(dev);
846 void __iomem *port_base = priv->port_base;
847 u32 intr_status, intr_mask;
848
849 intr_status = priv->intr_status;
850
851 spin_lock(&priv->lock);
852
853 if (unlikely(!netif_running(dev)))
854 goto out;
855
856 if (intr_status & TxOK)
857 _sc92031_tx_tasklet(dev);
858
859 if (intr_status & RxOK)
860 _sc92031_rx_tasklet(dev);
861
862 if (intr_status & RxOverflow)
863 dev->stats.rx_errors++;
864
865 if (intr_status & TimeOut) {
866 dev->stats.rx_errors++;
867 dev->stats.rx_length_errors++;
868 }
869
870 if (intr_status & (LinkFail | LinkOK))
871 _sc92031_link_tasklet(dev);
872
873out:
874 intr_mask = atomic_read(&priv->intr_mask);
875 rmb();
876
877 iowrite32(intr_mask, port_base + IntrMask);
878 mmiowb();
879
880 spin_unlock(&priv->lock);
881}
882
883static irqreturn_t sc92031_interrupt(int irq, void *dev_id)
884{
885 struct net_device *dev = dev_id;
886 struct sc92031_priv *priv = netdev_priv(dev);
887 void __iomem *port_base = priv->port_base;
888 u32 intr_status, intr_mask;
889
890 /* mask interrupts before clearing IntrStatus */
891 iowrite32(0, port_base + IntrMask);
892 _sc92031_dummy_read(port_base);
893
894 intr_status = ioread32(port_base + IntrStatus);
895 if (unlikely(intr_status == 0xffffffff))
896 return IRQ_NONE; // hardware has gone missing
897
898 intr_status &= IntrBits;
899 if (!intr_status)
900 goto out_none;
901
902 priv->intr_status = intr_status;
903 tasklet_schedule(&priv->tasklet);
904
905 return IRQ_HANDLED;
906
907out_none:
908 intr_mask = atomic_read(&priv->intr_mask);
909 rmb();
910
911 iowrite32(intr_mask, port_base + IntrMask);
912 mmiowb();
913
914 return IRQ_NONE;
915}
916
917static struct net_device_stats *sc92031_get_stats(struct net_device *dev)
918{
919 struct sc92031_priv *priv = netdev_priv(dev);
920 void __iomem *port_base = priv->port_base;
921
922 // FIXME I do not understand what is this trying to do.
923 if (netif_running(dev)) {
924 int temp;
925
926 spin_lock_bh(&priv->lock);
927
928 /* Update the error count. */
929 temp = (ioread32(port_base + RxStatus0) >> 16) & 0xffff;
930
931 if (temp == 0xffff) {
932 priv->rx_value += temp;
933 dev->stats.rx_fifo_errors = priv->rx_value;
934 } else
935 dev->stats.rx_fifo_errors = temp + priv->rx_value;
936
937 spin_unlock_bh(&priv->lock);
938 }
939
940 return &dev->stats;
941}
942
943static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb,
944 struct net_device *dev)
945{
946 struct sc92031_priv *priv = netdev_priv(dev);
947 void __iomem *port_base = priv->port_base;
948 unsigned len;
949 unsigned entry;
950 u32 tx_status;
951
952 if (unlikely(skb->len > TX_BUF_SIZE)) {
953 dev->stats.tx_dropped++;
954 goto out;
955 }
956
957 spin_lock(&priv->lock);
958
959 if (unlikely(!netif_carrier_ok(dev))) {
960 dev->stats.tx_dropped++;
961 goto out_unlock;
962 }
963
964 BUG_ON(priv->tx_head - priv->tx_tail >= NUM_TX_DESC);
965
966 entry = priv->tx_head++ % NUM_TX_DESC;
967
968 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
969
970 len = skb->len;
971 if (len < ETH_ZLEN) {
972 memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,
973 0, ETH_ZLEN - len);
974 len = ETH_ZLEN;
975 }
976
977 wmb();
978
979 if (len < 100)
980 tx_status = len;
981 else if (len < 300)
982 tx_status = 0x30000 | len;
983 else
984 tx_status = 0x50000 | len;
985
986 iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE,
987 port_base + TxAddr0 + entry * 4);
988 iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
989 mmiowb();
990
991 if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
992 netif_stop_queue(dev);
993
994out_unlock:
995 spin_unlock(&priv->lock);
996
997out:
998 dev_kfree_skb(skb);
999
1000 return NETDEV_TX_OK;
1001}
1002
1003static int sc92031_open(struct net_device *dev)
1004{
1005 int err;
1006 struct sc92031_priv *priv = netdev_priv(dev);
1007 struct pci_dev *pdev = priv->pdev;
1008
1009 priv->rx_ring = pci_alloc_consistent(pdev, RX_BUF_LEN,
1010 &priv->rx_ring_dma_addr);
1011 if (unlikely(!priv->rx_ring)) {
1012 err = -ENOMEM;
1013 goto out_alloc_rx_ring;
1014 }
1015
1016 priv->tx_bufs = pci_alloc_consistent(pdev, TX_BUF_TOT_LEN,
1017 &priv->tx_bufs_dma_addr);
1018 if (unlikely(!priv->tx_bufs)) {
1019 err = -ENOMEM;
1020 goto out_alloc_tx_bufs;
1021 }
1022 priv->tx_head = priv->tx_tail = 0;
1023
1024 err = request_irq(pdev->irq, sc92031_interrupt,
1025 IRQF_SHARED, dev->name, dev);
1026 if (unlikely(err < 0))
1027 goto out_request_irq;
1028
1029 priv->pm_config = 0;
1030
1031 /* Interrupts already disabled by sc92031_stop or sc92031_probe */
1032 spin_lock_bh(&priv->lock);
1033
1034 _sc92031_reset(dev);
1035 mmiowb();
1036
1037 spin_unlock_bh(&priv->lock);
1038 sc92031_enable_interrupts(dev);
1039
1040 if (netif_carrier_ok(dev))
1041 netif_start_queue(dev);
1042 else
1043 netif_tx_disable(dev);
1044
1045 return 0;
1046
1047out_request_irq:
1048 pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
1049 priv->tx_bufs_dma_addr);
1050out_alloc_tx_bufs:
1051 pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
1052 priv->rx_ring_dma_addr);
1053out_alloc_rx_ring:
1054 return err;
1055}
1056
1057static int sc92031_stop(struct net_device *dev)
1058{
1059 struct sc92031_priv *priv = netdev_priv(dev);
1060 struct pci_dev *pdev = priv->pdev;
1061
1062 netif_tx_disable(dev);
1063
1064 /* Disable interrupts, stop Tx and Rx. */
1065 sc92031_disable_interrupts(dev);
1066
1067 spin_lock_bh(&priv->lock);
1068
1069 _sc92031_disable_tx_rx(dev);
1070 _sc92031_tx_clear(dev);
1071 mmiowb();
1072
1073 spin_unlock_bh(&priv->lock);
1074
1075 free_irq(pdev->irq, dev);
1076 pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
1077 priv->tx_bufs_dma_addr);
1078 pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
1079 priv->rx_ring_dma_addr);
1080
1081 return 0;
1082}
1083
1084static void sc92031_set_multicast_list(struct net_device *dev)
1085{
1086 struct sc92031_priv *priv = netdev_priv(dev);
1087
1088 spin_lock_bh(&priv->lock);
1089
1090 _sc92031_set_mar(dev);
1091 _sc92031_set_rx_config(dev);
1092 mmiowb();
1093
1094 spin_unlock_bh(&priv->lock);
1095}
1096
1097static void sc92031_tx_timeout(struct net_device *dev)
1098{
1099 struct sc92031_priv *priv = netdev_priv(dev);
1100
1101 /* Disable interrupts by clearing the interrupt mask.*/
1102 sc92031_disable_interrupts(dev);
1103
1104 spin_lock(&priv->lock);
1105
1106 priv->tx_timeouts++;
1107
1108 _sc92031_reset(dev);
1109 mmiowb();
1110
1111 spin_unlock(&priv->lock);
1112
1113 /* enable interrupts */
1114 sc92031_enable_interrupts(dev);
1115
1116 if (netif_carrier_ok(dev))
1117 netif_wake_queue(dev);
1118}
1119
1120#ifdef CONFIG_NET_POLL_CONTROLLER
1121static void sc92031_poll_controller(struct net_device *dev)
1122{
1123 disable_irq(dev->irq);
1124 if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE)
1125 sc92031_tasklet((unsigned long)dev);
1126 enable_irq(dev->irq);
1127}
1128#endif
1129
1130static int sc92031_ethtool_get_settings(struct net_device *dev,
1131 struct ethtool_cmd *cmd)
1132{
1133 struct sc92031_priv *priv = netdev_priv(dev);
1134 void __iomem *port_base = priv->port_base;
1135 u8 phy_address;
1136 u32 phy_ctrl;
1137 u16 output_status;
1138
1139 spin_lock_bh(&priv->lock);
1140
1141 phy_address = ioread32(port_base + Miicmd1) >> 27;
1142 phy_ctrl = ioread32(port_base + PhyCtrl);
1143
1144 output_status = _sc92031_mii_read(port_base, MII_OutputStatus);
1145 _sc92031_mii_scan(port_base);
1146 mmiowb();
1147
1148 spin_unlock_bh(&priv->lock);
1149
1150 cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
1151 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full
1152 | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII;
1153
1154 cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
1155
1156 if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
1157 == (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
1158 cmd->advertising |= ADVERTISED_Autoneg;
1159
1160 if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10)
1161 cmd->advertising |= ADVERTISED_10baseT_Half;
1162
1163 if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux))
1164 == (PhyCtrlSpd10 | PhyCtrlDux))
1165 cmd->advertising |= ADVERTISED_10baseT_Full;
1166
1167 if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100)
1168 cmd->advertising |= ADVERTISED_100baseT_Half;
1169
1170 if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux))
1171 == (PhyCtrlSpd100 | PhyCtrlDux))
1172 cmd->advertising |= ADVERTISED_100baseT_Full;
1173
1174 if (phy_ctrl & PhyCtrlAne)
1175 cmd->advertising |= ADVERTISED_Autoneg;
1176
1177 ethtool_cmd_speed_set(cmd,
1178 (output_status & 0x2) ? SPEED_100 : SPEED_10);
1179 cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
1180 cmd->port = PORT_MII;
1181 cmd->phy_address = phy_address;
1182 cmd->transceiver = XCVR_INTERNAL;
1183 cmd->autoneg = (phy_ctrl & PhyCtrlAne) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1184
1185 return 0;
1186}
1187
1188static int sc92031_ethtool_set_settings(struct net_device *dev,
1189 struct ethtool_cmd *cmd)
1190{
1191 struct sc92031_priv *priv = netdev_priv(dev);
1192 void __iomem *port_base = priv->port_base;
1193 u32 speed = ethtool_cmd_speed(cmd);
1194 u32 phy_ctrl;
1195 u32 old_phy_ctrl;
1196
1197 if (!(speed == SPEED_10 || speed == SPEED_100))
1198 return -EINVAL;
1199 if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL))
1200 return -EINVAL;
1201 if (!(cmd->port == PORT_MII))
1202 return -EINVAL;
1203 if (!(cmd->phy_address == 0x1f))
1204 return -EINVAL;
1205 if (!(cmd->transceiver == XCVR_INTERNAL))
1206 return -EINVAL;
1207 if (!(cmd->autoneg == AUTONEG_DISABLE || cmd->autoneg == AUTONEG_ENABLE))
1208 return -EINVAL;
1209
1210 if (cmd->autoneg == AUTONEG_ENABLE) {
1211 if (!(cmd->advertising & (ADVERTISED_Autoneg
1212 | ADVERTISED_100baseT_Full
1213 | ADVERTISED_100baseT_Half
1214 | ADVERTISED_10baseT_Full
1215 | ADVERTISED_10baseT_Half)))
1216 return -EINVAL;
1217
1218 phy_ctrl = PhyCtrlAne;
1219
1220 // FIXME: I'm not sure what the original code was trying to do
1221 if (cmd->advertising & ADVERTISED_Autoneg)
1222 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
1223 if (cmd->advertising & ADVERTISED_100baseT_Full)
1224 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
1225 if (cmd->advertising & ADVERTISED_100baseT_Half)
1226 phy_ctrl |= PhyCtrlSpd100;
1227 if (cmd->advertising & ADVERTISED_10baseT_Full)
1228 phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux;
1229 if (cmd->advertising & ADVERTISED_10baseT_Half)
1230 phy_ctrl |= PhyCtrlSpd10;
1231 } else {
1232 // FIXME: Whole branch guessed
1233 phy_ctrl = 0;
1234
1235 if (speed == SPEED_10)
1236 phy_ctrl |= PhyCtrlSpd10;
1237 else /* cmd->speed == SPEED_100 */
1238 phy_ctrl |= PhyCtrlSpd100;
1239
1240 if (cmd->duplex == DUPLEX_FULL)
1241 phy_ctrl |= PhyCtrlDux;
1242 }
1243
1244 spin_lock_bh(&priv->lock);
1245
1246 old_phy_ctrl = ioread32(port_base + PhyCtrl);
1247 phy_ctrl |= old_phy_ctrl & ~(PhyCtrlAne | PhyCtrlDux
1248 | PhyCtrlSpd100 | PhyCtrlSpd10);
1249 if (phy_ctrl != old_phy_ctrl)
1250 iowrite32(phy_ctrl, port_base + PhyCtrl);
1251
1252 spin_unlock_bh(&priv->lock);
1253
1254 return 0;
1255}
1256
1257static void sc92031_ethtool_get_wol(struct net_device *dev,
1258 struct ethtool_wolinfo *wolinfo)
1259{
1260 struct sc92031_priv *priv = netdev_priv(dev);
1261 void __iomem *port_base = priv->port_base;
1262 u32 pm_config;
1263
1264 spin_lock_bh(&priv->lock);
1265 pm_config = ioread32(port_base + PMConfig);
1266 spin_unlock_bh(&priv->lock);
1267
1268 // FIXME: Guessed
1269 wolinfo->supported = WAKE_PHY | WAKE_MAGIC
1270 | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
1271 wolinfo->wolopts = 0;
1272
1273 if (pm_config & PM_LinkUp)
1274 wolinfo->wolopts |= WAKE_PHY;
1275
1276 if (pm_config & PM_Magic)
1277 wolinfo->wolopts |= WAKE_MAGIC;
1278
1279 if (pm_config & PM_WakeUp)
1280 // FIXME: Guessed
1281 wolinfo->wolopts |= WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
1282}
1283
1284static int sc92031_ethtool_set_wol(struct net_device *dev,
1285 struct ethtool_wolinfo *wolinfo)
1286{
1287 struct sc92031_priv *priv = netdev_priv(dev);
1288 void __iomem *port_base = priv->port_base;
1289 u32 pm_config;
1290
1291 spin_lock_bh(&priv->lock);
1292
1293 pm_config = ioread32(port_base + PMConfig)
1294 & ~(PM_LinkUp | PM_Magic | PM_WakeUp);
1295
1296 if (wolinfo->wolopts & WAKE_PHY)
1297 pm_config |= PM_LinkUp;
1298
1299 if (wolinfo->wolopts & WAKE_MAGIC)
1300 pm_config |= PM_Magic;
1301
1302 // FIXME: Guessed
1303 if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
1304 pm_config |= PM_WakeUp;
1305
1306 priv->pm_config = pm_config;
1307 iowrite32(pm_config, port_base + PMConfig);
1308 mmiowb();
1309
1310 spin_unlock_bh(&priv->lock);
1311
1312 return 0;
1313}
1314
1315static int sc92031_ethtool_nway_reset(struct net_device *dev)
1316{
1317 int err = 0;
1318 struct sc92031_priv *priv = netdev_priv(dev);
1319 void __iomem *port_base = priv->port_base;
1320 u16 bmcr;
1321
1322 spin_lock_bh(&priv->lock);
1323
1324 bmcr = _sc92031_mii_read(port_base, MII_BMCR);
1325 if (!(bmcr & BMCR_ANENABLE)) {
1326 err = -EINVAL;
1327 goto out;
1328 }
1329
1330 _sc92031_mii_write(port_base, MII_BMCR, bmcr | BMCR_ANRESTART);
1331
1332out:
1333 _sc92031_mii_scan(port_base);
1334 mmiowb();
1335
1336 spin_unlock_bh(&priv->lock);
1337
1338 return err;
1339}
1340
1341static const char sc92031_ethtool_stats_strings[SILAN_STATS_NUM][ETH_GSTRING_LEN] = {
1342 "tx_timeout",
1343 "rx_loss",
1344};
1345
1346static void sc92031_ethtool_get_strings(struct net_device *dev,
1347 u32 stringset, u8 *data)
1348{
1349 if (stringset == ETH_SS_STATS)
1350 memcpy(data, sc92031_ethtool_stats_strings,
1351 SILAN_STATS_NUM * ETH_GSTRING_LEN);
1352}
1353
1354static int sc92031_ethtool_get_sset_count(struct net_device *dev, int sset)
1355{
1356 switch (sset) {
1357 case ETH_SS_STATS:
1358 return SILAN_STATS_NUM;
1359 default:
1360 return -EOPNOTSUPP;
1361 }
1362}
1363
1364static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
1365 struct ethtool_stats *stats, u64 *data)
1366{
1367 struct sc92031_priv *priv = netdev_priv(dev);
1368
1369 spin_lock_bh(&priv->lock);
1370 data[0] = priv->tx_timeouts;
1371 data[1] = priv->rx_loss;
1372 spin_unlock_bh(&priv->lock);
1373}
1374
1375static const struct ethtool_ops sc92031_ethtool_ops = {
1376 .get_settings = sc92031_ethtool_get_settings,
1377 .set_settings = sc92031_ethtool_set_settings,
1378 .get_wol = sc92031_ethtool_get_wol,
1379 .set_wol = sc92031_ethtool_set_wol,
1380 .nway_reset = sc92031_ethtool_nway_reset,
1381 .get_link = ethtool_op_get_link,
1382 .get_strings = sc92031_ethtool_get_strings,
1383 .get_sset_count = sc92031_ethtool_get_sset_count,
1384 .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats,
1385};
1386
1387
1388static const struct net_device_ops sc92031_netdev_ops = {
1389 .ndo_get_stats = sc92031_get_stats,
1390 .ndo_start_xmit = sc92031_start_xmit,
1391 .ndo_open = sc92031_open,
1392 .ndo_stop = sc92031_stop,
1393 .ndo_set_multicast_list = sc92031_set_multicast_list,
1394 .ndo_change_mtu = eth_change_mtu,
1395 .ndo_validate_addr = eth_validate_addr,
1396 .ndo_set_mac_address = eth_mac_addr,
1397 .ndo_tx_timeout = sc92031_tx_timeout,
1398#ifdef CONFIG_NET_POLL_CONTROLLER
1399 .ndo_poll_controller = sc92031_poll_controller,
1400#endif
1401};
1402
1403static int __devinit sc92031_probe(struct pci_dev *pdev,
1404 const struct pci_device_id *id)
1405{
1406 int err;
1407 void __iomem* port_base;
1408 struct net_device *dev;
1409 struct sc92031_priv *priv;
1410 u32 mac0, mac1;
1411 unsigned long base_addr;
1412
1413 err = pci_enable_device(pdev);
1414 if (unlikely(err < 0))
1415 goto out_enable_device;
1416
1417 pci_set_master(pdev);
1418
1419 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1420 if (unlikely(err < 0))
1421 goto out_set_dma_mask;
1422
1423 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1424 if (unlikely(err < 0))
1425 goto out_set_dma_mask;
1426
1427 err = pci_request_regions(pdev, SC92031_NAME);
1428 if (unlikely(err < 0))
1429 goto out_request_regions;
1430
1431 port_base = pci_iomap(pdev, SC92031_USE_BAR, 0);
1432 if (unlikely(!port_base)) {
1433 err = -EIO;
1434 goto out_iomap;
1435 }
1436
1437 dev = alloc_etherdev(sizeof(struct sc92031_priv));
1438 if (unlikely(!dev)) {
1439 err = -ENOMEM;
1440 goto out_alloc_etherdev;
1441 }
1442
1443 pci_set_drvdata(pdev, dev);
1444 SET_NETDEV_DEV(dev, &pdev->dev);
1445
1446#if SC92031_USE_BAR == 0
1447 dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR);
1448 dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR);
1449#elif SC92031_USE_BAR == 1
1450 dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR);
1451#endif
1452 dev->irq = pdev->irq;
1453
1454 /* faked with skb_copy_and_csum_dev */
1455 dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
1456 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1457
1458 dev->netdev_ops = &sc92031_netdev_ops;
1459 dev->watchdog_timeo = TX_TIMEOUT;
1460 dev->ethtool_ops = &sc92031_ethtool_ops;
1461
1462 priv = netdev_priv(dev);
1463 spin_lock_init(&priv->lock);
1464 priv->port_base = port_base;
1465 priv->pdev = pdev;
1466 tasklet_init(&priv->tasklet, sc92031_tasklet, (unsigned long)dev);
1467 /* Fudge tasklet count so the call to sc92031_enable_interrupts at
1468 * sc92031_open will work correctly */
1469 tasklet_disable_nosync(&priv->tasklet);
1470
1471 /* PCI PM Wakeup */
1472 iowrite32((~PM_LongWF & ~PM_LWPTN) | PM_Enable, port_base + PMConfig);
1473
1474 mac0 = ioread32(port_base + MAC0);
1475 mac1 = ioread32(port_base + MAC0 + 4);
1476 dev->dev_addr[0] = dev->perm_addr[0] = mac0 >> 24;
1477 dev->dev_addr[1] = dev->perm_addr[1] = mac0 >> 16;
1478 dev->dev_addr[2] = dev->perm_addr[2] = mac0 >> 8;
1479 dev->dev_addr[3] = dev->perm_addr[3] = mac0;
1480 dev->dev_addr[4] = dev->perm_addr[4] = mac1 >> 8;
1481 dev->dev_addr[5] = dev->perm_addr[5] = mac1;
1482
1483 err = register_netdev(dev);
1484 if (err < 0)
1485 goto out_register_netdev;
1486
1487#if SC92031_USE_BAR == 0
1488 base_addr = dev->mem_start;
1489#elif SC92031_USE_BAR == 1
1490 base_addr = dev->base_addr;
1491#endif
1492 printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name,
1493 base_addr, dev->dev_addr, dev->irq);
1494
1495 return 0;
1496
1497out_register_netdev:
1498 free_netdev(dev);
1499out_alloc_etherdev:
1500 pci_iounmap(pdev, port_base);
1501out_iomap:
1502 pci_release_regions(pdev);
1503out_request_regions:
1504out_set_dma_mask:
1505 pci_disable_device(pdev);
1506out_enable_device:
1507 return err;
1508}
1509
1510static void __devexit sc92031_remove(struct pci_dev *pdev)
1511{
1512 struct net_device *dev = pci_get_drvdata(pdev);
1513 struct sc92031_priv *priv = netdev_priv(dev);
1514 void __iomem* port_base = priv->port_base;
1515
1516 unregister_netdev(dev);
1517 free_netdev(dev);
1518 pci_iounmap(pdev, port_base);
1519 pci_release_regions(pdev);
1520 pci_disable_device(pdev);
1521}
1522
1523static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state)
1524{
1525 struct net_device *dev = pci_get_drvdata(pdev);
1526 struct sc92031_priv *priv = netdev_priv(dev);
1527
1528 pci_save_state(pdev);
1529
1530 if (!netif_running(dev))
1531 goto out;
1532
1533 netif_device_detach(dev);
1534
1535 /* Disable interrupts, stop Tx and Rx. */
1536 sc92031_disable_interrupts(dev);
1537
1538 spin_lock_bh(&priv->lock);
1539
1540 _sc92031_disable_tx_rx(dev);
1541 _sc92031_tx_clear(dev);
1542 mmiowb();
1543
1544 spin_unlock_bh(&priv->lock);
1545
1546out:
1547 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1548
1549 return 0;
1550}
1551
1552static int sc92031_resume(struct pci_dev *pdev)
1553{
1554 struct net_device *dev = pci_get_drvdata(pdev);
1555 struct sc92031_priv *priv = netdev_priv(dev);
1556
1557 pci_restore_state(pdev);
1558 pci_set_power_state(pdev, PCI_D0);
1559
1560 if (!netif_running(dev))
1561 goto out;
1562
1563 /* Interrupts already disabled by sc92031_suspend */
1564 spin_lock_bh(&priv->lock);
1565
1566 _sc92031_reset(dev);
1567 mmiowb();
1568
1569 spin_unlock_bh(&priv->lock);
1570 sc92031_enable_interrupts(dev);
1571
1572 netif_device_attach(dev);
1573
1574 if (netif_carrier_ok(dev))
1575 netif_wake_queue(dev);
1576 else
1577 netif_tx_disable(dev);
1578
1579out:
1580 return 0;
1581}
1582
1583static DEFINE_PCI_DEVICE_TABLE(sc92031_pci_device_id_table) = {
1584 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
1585 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
1586 { PCI_DEVICE(0x1088, 0x2031) },
1587 { 0, }
1588};
1589MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table);
1590
1591static struct pci_driver sc92031_pci_driver = {
1592 .name = SC92031_NAME,
1593 .id_table = sc92031_pci_device_id_table,
1594 .probe = sc92031_probe,
1595 .remove = __devexit_p(sc92031_remove),
1596 .suspend = sc92031_suspend,
1597 .resume = sc92031_resume,
1598};
1599
1600static int __init sc92031_init(void)
1601{
1602 return pci_register_driver(&sc92031_pci_driver);
1603}
1604
1605static void __exit sc92031_exit(void)
1606{
1607 pci_unregister_driver(&sc92031_pci_driver);
1608}
1609
1610module_init(sc92031_init);
1611module_exit(sc92031_exit);
1612
1613MODULE_LICENSE("GPL");
1614MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>");
1615MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver");