aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-25 07:25:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-25 07:25:22 -0400
commit8a9ea3237e7eb5c25f09e429ad242ae5a3d5ea22 (patch)
treea0a63398a9983667d52cbbbf4e2405b4f22b1d83 /drivers/net/ethernet/marvell
parent1be025d3cb40cd295123af2c394f7229ef9b30ca (diff)
parent8b3408f8ee994973869d8ba32c5bf482bc4ddca4 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1745 commits) dp83640: free packet queues on remove dp83640: use proper function to free transmit time stamping packets ipv6: Do not use routes from locally generated RAs |PATCH net-next] tg3: add tx_dropped counter be2net: don't create multiple RX/TX rings in multi channel mode be2net: don't create multiple TXQs in BE2 be2net: refactor VF setup/teardown code into be_vf_setup/clear() be2net: add vlan/rx-mode/flow-control config to be_setup() net_sched: cls_flow: use skb_header_pointer() ipv4: avoid useless call of the function check_peer_pmtu TCP: remove TCP_DEBUG net: Fix driver name for mdio-gpio.c ipv4: tcp: fix TOS value in ACK messages sent from TIME_WAIT rtnetlink: Add missing manual netlink notification in dev_change_net_namespaces ipv4: fix ipsec forward performance regression jme: fix irq storm after suspend/resume route: fix ICMP redirect validation net: hold sock reference while processing tx timestamps tcp: md5: add more const attributes Add ethtool -g support to virtio_net ... Fix up conflicts in: - drivers/net/Kconfig: The split-up generated a trivial conflict with removal of a stale reference to Documentation/networking/net-modules.txt. Remove it from the new location instead. - fs/sysfs/dir.c: Fairly nasty conflicts with the sysfs rb-tree usage, conflicting with Eric Biederman's changes for tagged directories.
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r--drivers/net/ethernet/marvell/Kconfig111
-rw-r--r--drivers/net/ethernet/marvell/Makefile8
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c3019
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c1663
-rw-r--r--drivers/net/ethernet/marvell/skge.c4161
-rw-r--r--drivers/net/ethernet/marvell/skge.h2584
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5158
-rw-r--r--drivers/net/ethernet/marvell/sky2.h2427
8 files changed, 19131 insertions, 0 deletions
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
new file mode 100644
index 000000000000..0029934748bc
--- /dev/null
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -0,0 +1,111 @@
1#
2# Marvell device configuration
3#
4
5config NET_VENDOR_MARVELL
6 bool "Marvell devices"
7 default y
8 depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET
9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from
12 <http://www.tldp.org/docs.html#howto>.
13
14 Note that the answer to this question doesn't directly affect the
15 kernel: saying N will just cause the configurator to skip all
16 the questions about Marvell devices. If you say Y, you will be
17 asked for your specific card in the following questions.
18
19if NET_VENDOR_MARVELL
20
21config MV643XX_ETH
22 tristate "Marvell Discovery (643XX) and Orion ethernet support"
23 depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
24 select INET_LRO
25 select PHYLIB
26 ---help---
27 This driver supports the gigabit ethernet MACs in the
28 Marvell Discovery PPC/MIPS chipset family (MV643XX) and
29 in the Marvell Orion ARM SoC family.
30
31 Some boards that use the Discovery chipset are the Momenco
32 Ocelot C and Jaguar ATX and Pegasos II.
33
34config PXA168_ETH
35 tristate "Marvell pxa168 ethernet support"
36 depends on CPU_PXA168
37 select PHYLIB
38 ---help---
39 This driver supports the pxa168 Ethernet ports.
40
41 To compile this driver as a module, choose M here. The module
42 will be called pxa168_eth.
43
44config SKGE
45 tristate "Marvell Yukon Gigabit Ethernet support"
46 depends on PCI
47 select CRC32
48 ---help---
49 This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx
50 and related Gigabit Ethernet adapters. It is a new smaller driver
51 with better performance and more complete ethtool support.
52
53 It does not support the link failover and network management
54 features that "portable" vendor supplied sk98lin driver does.
55
56 This driver supports adapters based on the original Yukon chipset:
57 Marvell 88E8001, Belkin F5D5005, CNet GigaCard, DLink DGE-530T,
58 Linksys EG1032/EG1064, 3Com 3C940/3C940B, SysKonnect SK-9871/9872.
59
60 It does not support the newer Yukon2 chipset: a separate driver,
61 sky2, is provided for these adapters.
62
63 To compile this driver as a module, choose M here: the module
64 will be called skge. This is recommended.
65
66config SKGE_DEBUG
67 bool "Debugging interface"
68 depends on SKGE && DEBUG_FS
69 ---help---
70 This option adds the ability to dump driver state for debugging.
71 The file /sys/kernel/debug/skge/ethX displays the state of the internal
72 transmit and receive rings.
73
74 If unsure, say N.
75
76config SKGE_GENESIS
77 bool "Support for older SysKonnect Genesis boards"
78 depends on SKGE
79 ---help---
80 This enables support for the older and uncommon SysKonnect Genesis
81 chips, which support MII via an external transceiver, instead of
82 an internal one. Disabling this option will save some memory
83 by making code smaller. If unsure say Y.
84
85config SKY2
86 tristate "Marvell Yukon 2 support"
87 depends on PCI
88 select CRC32
89 ---help---
90 This driver supports Gigabit Ethernet adapters based on the
91 Marvell Yukon 2 chipset:
92 Marvell 88E8021/88E8022/88E8035/88E8036/88E8038/88E8050/88E8052/
93 88E8053/88E8055/88E8061/88E8062, SysKonnect SK-9E21D/SK-9S21
94
95 There is companion driver for the older Marvell Yukon and
96 SysKonnect Genesis based adapters: skge.
97
98 To compile this driver as a module, choose M here: the module
99 will be called sky2. This is recommended.
100
101config SKY2_DEBUG
102 bool "Debugging interface"
103 depends on SKY2 && DEBUG_FS
104 ---help---
105 This option adds the ability to dump driver state for debugging.
106 The file /sys/kernel/debug/sky2/ethX displays the state of the internal
107 transmit and receive rings.
108
109 If unsure, say N.
110
111endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
new file mode 100644
index 000000000000..57e3234a37ba
--- /dev/null
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the Marvell device drivers.
3#
4
5obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
6obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
7obj-$(CONFIG_SKGE) += skge.o
8obj-$(CONFIG_SKY2) += sky2.o
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
new file mode 100644
index 000000000000..194a03113802
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -0,0 +1,3019 @@
1/*
2 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
4 *
5 * Based on the 64360 driver from:
6 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
7 * Rabeeh Khoury <rabeeh@marvell.com>
8 *
9 * Copyright (C) 2003 PMC-Sierra, Inc.,
10 * written by Manish Lachwani
11 *
12 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
13 *
14 * Copyright (C) 2004-2006 MontaVista Software, Inc.
15 * Dale Farnsworth <dale@farnsworth.org>
16 *
17 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
18 * <sjhill@realitydiluted.com>
19 *
20 * Copyright (C) 2007-2008 Marvell Semiconductor
21 * Lennert Buytenhek <buytenh@marvell.com>
22 *
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version 2
26 * of the License, or (at your option) any later version.
27 *
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
32 *
33 * You should have received a copy of the GNU General Public License
34 * along with this program; if not, write to the Free Software
35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
36 */
37
38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39
40#include <linux/init.h>
41#include <linux/dma-mapping.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
45#include <linux/udp.h>
46#include <linux/etherdevice.h>
47#include <linux/delay.h>
48#include <linux/ethtool.h>
49#include <linux/platform_device.h>
50#include <linux/module.h>
51#include <linux/kernel.h>
52#include <linux/spinlock.h>
53#include <linux/workqueue.h>
54#include <linux/phy.h>
55#include <linux/mv643xx_eth.h>
56#include <linux/io.h>
57#include <linux/types.h>
58#include <linux/inet_lro.h>
59#include <linux/slab.h>
60#include <asm/system.h>
61
62static char mv643xx_eth_driver_name[] = "mv643xx_eth";
63static char mv643xx_eth_driver_version[] = "1.4";
64
65
66/*
67 * Registers shared between all ports.
68 */
69#define PHY_ADDR 0x0000
70#define SMI_REG 0x0004
71#define SMI_BUSY 0x10000000
72#define SMI_READ_VALID 0x08000000
73#define SMI_OPCODE_READ 0x04000000
74#define SMI_OPCODE_WRITE 0x00000000
75#define ERR_INT_CAUSE 0x0080
76#define ERR_INT_SMI_DONE 0x00000010
77#define ERR_INT_MASK 0x0084
78#define WINDOW_BASE(w) (0x0200 + ((w) << 3))
79#define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
80#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
81#define WINDOW_BAR_ENABLE 0x0290
82#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
83
84/*
85 * Main per-port registers. These live at offset 0x0400 for
86 * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
87 */
88#define PORT_CONFIG 0x0000
89#define UNICAST_PROMISCUOUS_MODE 0x00000001
90#define PORT_CONFIG_EXT 0x0004
91#define MAC_ADDR_LOW 0x0014
92#define MAC_ADDR_HIGH 0x0018
93#define SDMA_CONFIG 0x001c
94#define TX_BURST_SIZE_16_64BIT 0x01000000
95#define TX_BURST_SIZE_4_64BIT 0x00800000
96#define BLM_TX_NO_SWAP 0x00000020
97#define BLM_RX_NO_SWAP 0x00000010
98#define RX_BURST_SIZE_16_64BIT 0x00000008
99#define RX_BURST_SIZE_4_64BIT 0x00000004
100#define PORT_SERIAL_CONTROL 0x003c
101#define SET_MII_SPEED_TO_100 0x01000000
102#define SET_GMII_SPEED_TO_1000 0x00800000
103#define SET_FULL_DUPLEX_MODE 0x00200000
104#define MAX_RX_PACKET_9700BYTE 0x000a0000
105#define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000
106#define DO_NOT_FORCE_LINK_FAIL 0x00000400
107#define SERIAL_PORT_CONTROL_RESERVED 0x00000200
108#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008
109#define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004
110#define FORCE_LINK_PASS 0x00000002
111#define SERIAL_PORT_ENABLE 0x00000001
112#define PORT_STATUS 0x0044
113#define TX_FIFO_EMPTY 0x00000400
114#define TX_IN_PROGRESS 0x00000080
115#define PORT_SPEED_MASK 0x00000030
116#define PORT_SPEED_1000 0x00000010
117#define PORT_SPEED_100 0x00000020
118#define PORT_SPEED_10 0x00000000
119#define FLOW_CONTROL_ENABLED 0x00000008
120#define FULL_DUPLEX 0x00000004
121#define LINK_UP 0x00000002
122#define TXQ_COMMAND 0x0048
123#define TXQ_FIX_PRIO_CONF 0x004c
124#define TX_BW_RATE 0x0050
125#define TX_BW_MTU 0x0058
126#define TX_BW_BURST 0x005c
127#define INT_CAUSE 0x0060
128#define INT_TX_END 0x07f80000
129#define INT_TX_END_0 0x00080000
130#define INT_RX 0x000003fc
131#define INT_RX_0 0x00000004
132#define INT_EXT 0x00000002
133#define INT_CAUSE_EXT 0x0064
134#define INT_EXT_LINK_PHY 0x00110000
135#define INT_EXT_TX 0x000000ff
136#define INT_MASK 0x0068
137#define INT_MASK_EXT 0x006c
138#define TX_FIFO_URGENT_THRESHOLD 0x0074
139#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
140#define TX_BW_RATE_MOVED 0x00e0
141#define TX_BW_MTU_MOVED 0x00e8
142#define TX_BW_BURST_MOVED 0x00ec
143#define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4))
144#define RXQ_COMMAND 0x0280
145#define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2))
146#define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4))
147#define TXQ_BW_CONF(q) (0x0304 + ((q) << 4))
148#define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4))
149
150/*
151 * Misc per-port registers.
152 */
153#define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
154#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
155#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
156#define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
157
158
159/*
160 * SDMA configuration register default value.
161 */
162#if defined(__BIG_ENDIAN)
163#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
164 (RX_BURST_SIZE_4_64BIT | \
165 TX_BURST_SIZE_4_64BIT)
166#elif defined(__LITTLE_ENDIAN)
167#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
168 (RX_BURST_SIZE_4_64BIT | \
169 BLM_RX_NO_SWAP | \
170 BLM_TX_NO_SWAP | \
171 TX_BURST_SIZE_4_64BIT)
172#else
173#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
174#endif
175
176
177/*
178 * Misc definitions.
179 */
180#define DEFAULT_RX_QUEUE_SIZE 128
181#define DEFAULT_TX_QUEUE_SIZE 256
182#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
183
184
185/*
186 * RX/TX descriptors.
187 */
188#if defined(__BIG_ENDIAN)
189struct rx_desc {
190 u16 byte_cnt; /* Descriptor buffer byte count */
191 u16 buf_size; /* Buffer size */
192 u32 cmd_sts; /* Descriptor command status */
193 u32 next_desc_ptr; /* Next descriptor pointer */
194 u32 buf_ptr; /* Descriptor buffer pointer */
195};
196
197struct tx_desc {
198 u16 byte_cnt; /* buffer byte count */
199 u16 l4i_chk; /* CPU provided TCP checksum */
200 u32 cmd_sts; /* Command/status field */
201 u32 next_desc_ptr; /* Pointer to next descriptor */
202 u32 buf_ptr; /* pointer to buffer for this descriptor*/
203};
204#elif defined(__LITTLE_ENDIAN)
205struct rx_desc {
206 u32 cmd_sts; /* Descriptor command status */
207 u16 buf_size; /* Buffer size */
208 u16 byte_cnt; /* Descriptor buffer byte count */
209 u32 buf_ptr; /* Descriptor buffer pointer */
210 u32 next_desc_ptr; /* Next descriptor pointer */
211};
212
213struct tx_desc {
214 u32 cmd_sts; /* Command/status field */
215 u16 l4i_chk; /* CPU provided TCP checksum */
216 u16 byte_cnt; /* buffer byte count */
217 u32 buf_ptr; /* pointer to buffer for this descriptor*/
218 u32 next_desc_ptr; /* Pointer to next descriptor */
219};
220#else
221#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
222#endif
223
224/* RX & TX descriptor command */
225#define BUFFER_OWNED_BY_DMA 0x80000000
226
227/* RX & TX descriptor status */
228#define ERROR_SUMMARY 0x00000001
229
230/* RX descriptor status */
231#define LAYER_4_CHECKSUM_OK 0x40000000
232#define RX_ENABLE_INTERRUPT 0x20000000
233#define RX_FIRST_DESC 0x08000000
234#define RX_LAST_DESC 0x04000000
235#define RX_IP_HDR_OK 0x02000000
236#define RX_PKT_IS_IPV4 0x01000000
237#define RX_PKT_IS_ETHERNETV2 0x00800000
238#define RX_PKT_LAYER4_TYPE_MASK 0x00600000
239#define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000
240#define RX_PKT_IS_VLAN_TAGGED 0x00080000
241
242/* TX descriptor command */
243#define TX_ENABLE_INTERRUPT 0x00800000
244#define GEN_CRC 0x00400000
245#define TX_FIRST_DESC 0x00200000
246#define TX_LAST_DESC 0x00100000
247#define ZERO_PADDING 0x00080000
248#define GEN_IP_V4_CHECKSUM 0x00040000
249#define GEN_TCP_UDP_CHECKSUM 0x00020000
250#define UDP_FRAME 0x00010000
251#define MAC_HDR_EXTRA_4_BYTES 0x00008000
252#define MAC_HDR_EXTRA_8_BYTES 0x00000200
253
254#define TX_IHL_SHIFT 11
255
256
257/* global *******************************************************************/
258struct mv643xx_eth_shared_private {
259 /*
260 * Ethernet controller base address.
261 */
262 void __iomem *base;
263
264 /*
265 * Points at the right SMI instance to use.
266 */
267 struct mv643xx_eth_shared_private *smi;
268
269 /*
270 * Provides access to local SMI interface.
271 */
272 struct mii_bus *smi_bus;
273
274 /*
275 * If we have access to the error interrupt pin (which is
276 * somewhat misnamed as it not only reflects internal errors
277 * but also reflects SMI completion), use that to wait for
278 * SMI access completion instead of polling the SMI busy bit.
279 */
280 int err_interrupt;
281 wait_queue_head_t smi_busy_wait;
282
283 /*
284 * Per-port MBUS window access register value.
285 */
286 u32 win_protect;
287
288 /*
289 * Hardware-specific parameters.
290 */
291 unsigned int t_clk;
292 int extended_rx_coal_limit;
293 int tx_bw_control;
294 int tx_csum_limit;
295};
296
297#define TX_BW_CONTROL_ABSENT 0
298#define TX_BW_CONTROL_OLD_LAYOUT 1
299#define TX_BW_CONTROL_NEW_LAYOUT 2
300
301static int mv643xx_eth_open(struct net_device *dev);
302static int mv643xx_eth_stop(struct net_device *dev);
303
304
305/* per-port *****************************************************************/
306struct mib_counters {
307 u64 good_octets_received;
308 u32 bad_octets_received;
309 u32 internal_mac_transmit_err;
310 u32 good_frames_received;
311 u32 bad_frames_received;
312 u32 broadcast_frames_received;
313 u32 multicast_frames_received;
314 u32 frames_64_octets;
315 u32 frames_65_to_127_octets;
316 u32 frames_128_to_255_octets;
317 u32 frames_256_to_511_octets;
318 u32 frames_512_to_1023_octets;
319 u32 frames_1024_to_max_octets;
320 u64 good_octets_sent;
321 u32 good_frames_sent;
322 u32 excessive_collision;
323 u32 multicast_frames_sent;
324 u32 broadcast_frames_sent;
325 u32 unrec_mac_control_received;
326 u32 fc_sent;
327 u32 good_fc_received;
328 u32 bad_fc_received;
329 u32 undersize_received;
330 u32 fragments_received;
331 u32 oversize_received;
332 u32 jabber_received;
333 u32 mac_receive_error;
334 u32 bad_crc_event;
335 u32 collision;
336 u32 late_collision;
337};
338
339struct lro_counters {
340 u32 lro_aggregated;
341 u32 lro_flushed;
342 u32 lro_no_desc;
343};
344
345struct rx_queue {
346 int index;
347
348 int rx_ring_size;
349
350 int rx_desc_count;
351 int rx_curr_desc;
352 int rx_used_desc;
353
354 struct rx_desc *rx_desc_area;
355 dma_addr_t rx_desc_dma;
356 int rx_desc_area_size;
357 struct sk_buff **rx_skb;
358
359 struct net_lro_mgr lro_mgr;
360 struct net_lro_desc lro_arr[8];
361};
362
363struct tx_queue {
364 int index;
365
366 int tx_ring_size;
367
368 int tx_desc_count;
369 int tx_curr_desc;
370 int tx_used_desc;
371
372 struct tx_desc *tx_desc_area;
373 dma_addr_t tx_desc_dma;
374 int tx_desc_area_size;
375
376 struct sk_buff_head tx_skb;
377
378 unsigned long tx_packets;
379 unsigned long tx_bytes;
380 unsigned long tx_dropped;
381};
382
383struct mv643xx_eth_private {
384 struct mv643xx_eth_shared_private *shared;
385 void __iomem *base;
386 int port_num;
387
388 struct net_device *dev;
389
390 struct phy_device *phy;
391
392 struct timer_list mib_counters_timer;
393 spinlock_t mib_counters_lock;
394 struct mib_counters mib_counters;
395
396 struct lro_counters lro_counters;
397
398 struct work_struct tx_timeout_task;
399
400 struct napi_struct napi;
401 u32 int_mask;
402 u8 oom;
403 u8 work_link;
404 u8 work_tx;
405 u8 work_tx_end;
406 u8 work_rx;
407 u8 work_rx_refill;
408
409 int skb_size;
410 struct sk_buff_head rx_recycle;
411
412 /*
413 * RX state.
414 */
415 int rx_ring_size;
416 unsigned long rx_desc_sram_addr;
417 int rx_desc_sram_size;
418 int rxq_count;
419 struct timer_list rx_oom;
420 struct rx_queue rxq[8];
421
422 /*
423 * TX state.
424 */
425 int tx_ring_size;
426 unsigned long tx_desc_sram_addr;
427 int tx_desc_sram_size;
428 int txq_count;
429 struct tx_queue txq[8];
430};
431
432
433/* port register accessors **************************************************/
434static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
435{
436 return readl(mp->shared->base + offset);
437}
438
439static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
440{
441 return readl(mp->base + offset);
442}
443
444static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
445{
446 writel(data, mp->shared->base + offset);
447}
448
449static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
450{
451 writel(data, mp->base + offset);
452}
453
454
455/* rxq/txq helper functions *************************************************/
456static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
457{
458 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
459}
460
461static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
462{
463 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
464}
465
466static void rxq_enable(struct rx_queue *rxq)
467{
468 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
469 wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
470}
471
472static void rxq_disable(struct rx_queue *rxq)
473{
474 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
475 u8 mask = 1 << rxq->index;
476
477 wrlp(mp, RXQ_COMMAND, mask << 8);
478 while (rdlp(mp, RXQ_COMMAND) & mask)
479 udelay(10);
480}
481
482static void txq_reset_hw_ptr(struct tx_queue *txq)
483{
484 struct mv643xx_eth_private *mp = txq_to_mp(txq);
485 u32 addr;
486
487 addr = (u32)txq->tx_desc_dma;
488 addr += txq->tx_curr_desc * sizeof(struct tx_desc);
489 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
490}
491
492static void txq_enable(struct tx_queue *txq)
493{
494 struct mv643xx_eth_private *mp = txq_to_mp(txq);
495 wrlp(mp, TXQ_COMMAND, 1 << txq->index);
496}
497
498static void txq_disable(struct tx_queue *txq)
499{
500 struct mv643xx_eth_private *mp = txq_to_mp(txq);
501 u8 mask = 1 << txq->index;
502
503 wrlp(mp, TXQ_COMMAND, mask << 8);
504 while (rdlp(mp, TXQ_COMMAND) & mask)
505 udelay(10);
506}
507
508static void txq_maybe_wake(struct tx_queue *txq)
509{
510 struct mv643xx_eth_private *mp = txq_to_mp(txq);
511 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
512
513 if (netif_tx_queue_stopped(nq)) {
514 __netif_tx_lock(nq, smp_processor_id());
515 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
516 netif_tx_wake_queue(nq);
517 __netif_tx_unlock(nq);
518 }
519}
520
521
522/* rx napi ******************************************************************/
523static int
524mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph,
525 u64 *hdr_flags, void *priv)
526{
527 unsigned long cmd_sts = (unsigned long)priv;
528
529 /*
530 * Make sure that this packet is Ethernet II, is not VLAN
531 * tagged, is IPv4, has a valid IP header, and is TCP.
532 */
533 if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 |
534 RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK |
535 RX_PKT_IS_VLAN_TAGGED)) !=
536 (RX_IP_HDR_OK | RX_PKT_IS_IPV4 |
537 RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4))
538 return -1;
539
540 skb_reset_network_header(skb);
541 skb_set_transport_header(skb, ip_hdrlen(skb));
542 *iphdr = ip_hdr(skb);
543 *tcph = tcp_hdr(skb);
544 *hdr_flags = LRO_IPV4 | LRO_TCP;
545
546 return 0;
547}
548
549static int rxq_process(struct rx_queue *rxq, int budget)
550{
551 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
552 struct net_device_stats *stats = &mp->dev->stats;
553 int lro_flush_needed;
554 int rx;
555
556 lro_flush_needed = 0;
557 rx = 0;
558 while (rx < budget && rxq->rx_desc_count) {
559 struct rx_desc *rx_desc;
560 unsigned int cmd_sts;
561 struct sk_buff *skb;
562 u16 byte_cnt;
563
564 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
565
566 cmd_sts = rx_desc->cmd_sts;
567 if (cmd_sts & BUFFER_OWNED_BY_DMA)
568 break;
569 rmb();
570
571 skb = rxq->rx_skb[rxq->rx_curr_desc];
572 rxq->rx_skb[rxq->rx_curr_desc] = NULL;
573
574 rxq->rx_curr_desc++;
575 if (rxq->rx_curr_desc == rxq->rx_ring_size)
576 rxq->rx_curr_desc = 0;
577
578 dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
579 rx_desc->buf_size, DMA_FROM_DEVICE);
580 rxq->rx_desc_count--;
581 rx++;
582
583 mp->work_rx_refill |= 1 << rxq->index;
584
585 byte_cnt = rx_desc->byte_cnt;
586
587 /*
588 * Update statistics.
589 *
590 * Note that the descriptor byte count includes 2 dummy
591 * bytes automatically inserted by the hardware at the
592 * start of the packet (which we don't count), and a 4
593 * byte CRC at the end of the packet (which we do count).
594 */
595 stats->rx_packets++;
596 stats->rx_bytes += byte_cnt - 2;
597
598 /*
599 * In case we received a packet without first / last bits
600 * on, or the error summary bit is set, the packet needs
601 * to be dropped.
602 */
603 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
604 != (RX_FIRST_DESC | RX_LAST_DESC))
605 goto err;
606
607 /*
608 * The -4 is for the CRC in the trailer of the
609 * received packet
610 */
611 skb_put(skb, byte_cnt - 2 - 4);
612
613 if (cmd_sts & LAYER_4_CHECKSUM_OK)
614 skb->ip_summed = CHECKSUM_UNNECESSARY;
615 skb->protocol = eth_type_trans(skb, mp->dev);
616
617 if (skb->dev->features & NETIF_F_LRO &&
618 skb->ip_summed == CHECKSUM_UNNECESSARY) {
619 lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts);
620 lro_flush_needed = 1;
621 } else
622 netif_receive_skb(skb);
623
624 continue;
625
626err:
627 stats->rx_dropped++;
628
629 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
630 (RX_FIRST_DESC | RX_LAST_DESC)) {
631 if (net_ratelimit())
632 netdev_err(mp->dev,
633 "received packet spanning multiple descriptors\n");
634 }
635
636 if (cmd_sts & ERROR_SUMMARY)
637 stats->rx_errors++;
638
639 dev_kfree_skb(skb);
640 }
641
642 if (lro_flush_needed)
643 lro_flush_all(&rxq->lro_mgr);
644
645 if (rx < budget)
646 mp->work_rx &= ~(1 << rxq->index);
647
648 return rx;
649}
650
651static int rxq_refill(struct rx_queue *rxq, int budget)
652{
653 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
654 int refilled;
655
656 refilled = 0;
657 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
658 struct sk_buff *skb;
659 int rx;
660 struct rx_desc *rx_desc;
661 int size;
662
663 skb = __skb_dequeue(&mp->rx_recycle);
664 if (skb == NULL)
665 skb = dev_alloc_skb(mp->skb_size);
666
667 if (skb == NULL) {
668 mp->oom = 1;
669 goto oom;
670 }
671
672 if (SKB_DMA_REALIGN)
673 skb_reserve(skb, SKB_DMA_REALIGN);
674
675 refilled++;
676 rxq->rx_desc_count++;
677
678 rx = rxq->rx_used_desc++;
679 if (rxq->rx_used_desc == rxq->rx_ring_size)
680 rxq->rx_used_desc = 0;
681
682 rx_desc = rxq->rx_desc_area + rx;
683
684 size = skb->end - skb->data;
685 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
686 skb->data, size,
687 DMA_FROM_DEVICE);
688 rx_desc->buf_size = size;
689 rxq->rx_skb[rx] = skb;
690 wmb();
691 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
692 wmb();
693
694 /*
695 * The hardware automatically prepends 2 bytes of
696 * dummy data to each received packet, so that the
697 * IP header ends up 16-byte aligned.
698 */
699 skb_reserve(skb, 2);
700 }
701
702 if (refilled < budget)
703 mp->work_rx_refill &= ~(1 << rxq->index);
704
705oom:
706 return refilled;
707}
708
709
710/* tx ***********************************************************************/
711static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
712{
713 int frag;
714
715 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
716 const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
717
718 if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7)
719 return 1;
720 }
721
722 return 0;
723}
724
725static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
726{
727 struct mv643xx_eth_private *mp = txq_to_mp(txq);
728 int nr_frags = skb_shinfo(skb)->nr_frags;
729 int frag;
730
731 for (frag = 0; frag < nr_frags; frag++) {
732 skb_frag_t *this_frag;
733 int tx_index;
734 struct tx_desc *desc;
735
736 this_frag = &skb_shinfo(skb)->frags[frag];
737 tx_index = txq->tx_curr_desc++;
738 if (txq->tx_curr_desc == txq->tx_ring_size)
739 txq->tx_curr_desc = 0;
740 desc = &txq->tx_desc_area[tx_index];
741
742 /*
743 * The last fragment will generate an interrupt
744 * which will free the skb on TX completion.
745 */
746 if (frag == nr_frags - 1) {
747 desc->cmd_sts = BUFFER_OWNED_BY_DMA |
748 ZERO_PADDING | TX_LAST_DESC |
749 TX_ENABLE_INTERRUPT;
750 } else {
751 desc->cmd_sts = BUFFER_OWNED_BY_DMA;
752 }
753
754 desc->l4i_chk = 0;
755 desc->byte_cnt = skb_frag_size(this_frag);
756 desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
757 this_frag, 0,
758 skb_frag_size(this_frag),
759 DMA_TO_DEVICE);
760 }
761}
762
763static inline __be16 sum16_as_be(__sum16 sum)
764{
765 return (__force __be16)sum;
766}
767
768static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
769{
770 struct mv643xx_eth_private *mp = txq_to_mp(txq);
771 int nr_frags = skb_shinfo(skb)->nr_frags;
772 int tx_index;
773 struct tx_desc *desc;
774 u32 cmd_sts;
775 u16 l4i_chk;
776 int length;
777
778 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
779 l4i_chk = 0;
780
781 if (skb->ip_summed == CHECKSUM_PARTIAL) {
782 int hdr_len;
783 int tag_bytes;
784
785 BUG_ON(skb->protocol != htons(ETH_P_IP) &&
786 skb->protocol != htons(ETH_P_8021Q));
787
788 hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
789 tag_bytes = hdr_len - ETH_HLEN;
790 if (skb->len - hdr_len > mp->shared->tx_csum_limit ||
791 unlikely(tag_bytes & ~12)) {
792 if (skb_checksum_help(skb) == 0)
793 goto no_csum;
794 kfree_skb(skb);
795 return 1;
796 }
797
798 if (tag_bytes & 4)
799 cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
800 if (tag_bytes & 8)
801 cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
802
803 cmd_sts |= GEN_TCP_UDP_CHECKSUM |
804 GEN_IP_V4_CHECKSUM |
805 ip_hdr(skb)->ihl << TX_IHL_SHIFT;
806
807 switch (ip_hdr(skb)->protocol) {
808 case IPPROTO_UDP:
809 cmd_sts |= UDP_FRAME;
810 l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
811 break;
812 case IPPROTO_TCP:
813 l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
814 break;
815 default:
816 BUG();
817 }
818 } else {
819no_csum:
820 /* Errata BTS #50, IHL must be 5 if no HW checksum */
821 cmd_sts |= 5 << TX_IHL_SHIFT;
822 }
823
824 tx_index = txq->tx_curr_desc++;
825 if (txq->tx_curr_desc == txq->tx_ring_size)
826 txq->tx_curr_desc = 0;
827 desc = &txq->tx_desc_area[tx_index];
828
829 if (nr_frags) {
830 txq_submit_frag_skb(txq, skb);
831 length = skb_headlen(skb);
832 } else {
833 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
834 length = skb->len;
835 }
836
837 desc->l4i_chk = l4i_chk;
838 desc->byte_cnt = length;
839 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
840 length, DMA_TO_DEVICE);
841
842 __skb_queue_tail(&txq->tx_skb, skb);
843
844 skb_tx_timestamp(skb);
845
846 /* ensure all other descriptors are written before first cmd_sts */
847 wmb();
848 desc->cmd_sts = cmd_sts;
849
850 /* clear TX_END status */
851 mp->work_tx_end &= ~(1 << txq->index);
852
853 /* ensure all descriptors are written before poking hardware */
854 wmb();
855 txq_enable(txq);
856
857 txq->tx_desc_count += nr_frags + 1;
858
859 return 0;
860}
861
862static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
863{
864 struct mv643xx_eth_private *mp = netdev_priv(dev);
865 int length, queue;
866 struct tx_queue *txq;
867 struct netdev_queue *nq;
868
869 queue = skb_get_queue_mapping(skb);
870 txq = mp->txq + queue;
871 nq = netdev_get_tx_queue(dev, queue);
872
873 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
874 txq->tx_dropped++;
875 netdev_printk(KERN_DEBUG, dev,
876 "failed to linearize skb with tiny unaligned fragment\n");
877 return NETDEV_TX_BUSY;
878 }
879
880 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
881 if (net_ratelimit())
882 netdev_err(dev, "tx queue full?!\n");
883 kfree_skb(skb);
884 return NETDEV_TX_OK;
885 }
886
887 length = skb->len;
888
889 if (!txq_submit_skb(txq, skb)) {
890 int entries_left;
891
892 txq->tx_bytes += length;
893 txq->tx_packets++;
894
895 entries_left = txq->tx_ring_size - txq->tx_desc_count;
896 if (entries_left < MAX_SKB_FRAGS + 1)
897 netif_tx_stop_queue(nq);
898 }
899
900 return NETDEV_TX_OK;
901}
902
903
904/* tx napi ******************************************************************/
905static void txq_kick(struct tx_queue *txq)
906{
907 struct mv643xx_eth_private *mp = txq_to_mp(txq);
908 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
909 u32 hw_desc_ptr;
910 u32 expected_ptr;
911
912 __netif_tx_lock(nq, smp_processor_id());
913
914 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
915 goto out;
916
917 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
918 expected_ptr = (u32)txq->tx_desc_dma +
919 txq->tx_curr_desc * sizeof(struct tx_desc);
920
921 if (hw_desc_ptr != expected_ptr)
922 txq_enable(txq);
923
924out:
925 __netif_tx_unlock(nq);
926
927 mp->work_tx_end &= ~(1 << txq->index);
928}
929
930static int txq_reclaim(struct tx_queue *txq, int budget, int force)
931{
932 struct mv643xx_eth_private *mp = txq_to_mp(txq);
933 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
934 int reclaimed;
935
936 __netif_tx_lock(nq, smp_processor_id());
937
938 reclaimed = 0;
939 while (reclaimed < budget && txq->tx_desc_count > 0) {
940 int tx_index;
941 struct tx_desc *desc;
942 u32 cmd_sts;
943 struct sk_buff *skb;
944
945 tx_index = txq->tx_used_desc;
946 desc = &txq->tx_desc_area[tx_index];
947 cmd_sts = desc->cmd_sts;
948
949 if (cmd_sts & BUFFER_OWNED_BY_DMA) {
950 if (!force)
951 break;
952 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
953 }
954
955 txq->tx_used_desc = tx_index + 1;
956 if (txq->tx_used_desc == txq->tx_ring_size)
957 txq->tx_used_desc = 0;
958
959 reclaimed++;
960 txq->tx_desc_count--;
961
962 skb = NULL;
963 if (cmd_sts & TX_LAST_DESC)
964 skb = __skb_dequeue(&txq->tx_skb);
965
966 if (cmd_sts & ERROR_SUMMARY) {
967 netdev_info(mp->dev, "tx error\n");
968 mp->dev->stats.tx_errors++;
969 }
970
971 if (cmd_sts & TX_FIRST_DESC) {
972 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
973 desc->byte_cnt, DMA_TO_DEVICE);
974 } else {
975 dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
976 desc->byte_cnt, DMA_TO_DEVICE);
977 }
978
979 if (skb != NULL) {
980 if (skb_queue_len(&mp->rx_recycle) <
981 mp->rx_ring_size &&
982 skb_recycle_check(skb, mp->skb_size))
983 __skb_queue_head(&mp->rx_recycle, skb);
984 else
985 dev_kfree_skb(skb);
986 }
987 }
988
989 __netif_tx_unlock(nq);
990
991 if (reclaimed < budget)
992 mp->work_tx &= ~(1 << txq->index);
993
994 return reclaimed;
995}
996
997
998/* tx rate control **********************************************************/
999/*
1000 * Set total maximum TX rate (shared by all TX queues for this port)
1001 * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
1002 */
1003static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
1004{
1005 int token_rate;
1006 int mtu;
1007 int bucket_size;
1008
1009 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
1010 if (token_rate > 1023)
1011 token_rate = 1023;
1012
1013 mtu = (mp->dev->mtu + 255) >> 8;
1014 if (mtu > 63)
1015 mtu = 63;
1016
1017 bucket_size = (burst + 255) >> 8;
1018 if (bucket_size > 65535)
1019 bucket_size = 65535;
1020
1021 switch (mp->shared->tx_bw_control) {
1022 case TX_BW_CONTROL_OLD_LAYOUT:
1023 wrlp(mp, TX_BW_RATE, token_rate);
1024 wrlp(mp, TX_BW_MTU, mtu);
1025 wrlp(mp, TX_BW_BURST, bucket_size);
1026 break;
1027 case TX_BW_CONTROL_NEW_LAYOUT:
1028 wrlp(mp, TX_BW_RATE_MOVED, token_rate);
1029 wrlp(mp, TX_BW_MTU_MOVED, mtu);
1030 wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
1031 break;
1032 }
1033}
1034
1035static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
1036{
1037 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1038 int token_rate;
1039 int bucket_size;
1040
1041 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
1042 if (token_rate > 1023)
1043 token_rate = 1023;
1044
1045 bucket_size = (burst + 255) >> 8;
1046 if (bucket_size > 65535)
1047 bucket_size = 65535;
1048
1049 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
1050 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
1051}
1052
1053static void txq_set_fixed_prio_mode(struct tx_queue *txq)
1054{
1055 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1056 int off;
1057 u32 val;
1058
1059 /*
1060 * Turn on fixed priority mode.
1061 */
1062 off = 0;
1063 switch (mp->shared->tx_bw_control) {
1064 case TX_BW_CONTROL_OLD_LAYOUT:
1065 off = TXQ_FIX_PRIO_CONF;
1066 break;
1067 case TX_BW_CONTROL_NEW_LAYOUT:
1068 off = TXQ_FIX_PRIO_CONF_MOVED;
1069 break;
1070 }
1071
1072 if (off) {
1073 val = rdlp(mp, off);
1074 val |= 1 << txq->index;
1075 wrlp(mp, off, val);
1076 }
1077}
1078
1079
1080/* mii management interface *************************************************/
1081static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
1082{
1083 struct mv643xx_eth_shared_private *msp = dev_id;
1084
1085 if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) {
1086 writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE);
1087 wake_up(&msp->smi_busy_wait);
1088 return IRQ_HANDLED;
1089 }
1090
1091 return IRQ_NONE;
1092}
1093
1094static int smi_is_done(struct mv643xx_eth_shared_private *msp)
1095{
1096 return !(readl(msp->base + SMI_REG) & SMI_BUSY);
1097}
1098
1099static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
1100{
1101 if (msp->err_interrupt == NO_IRQ) {
1102 int i;
1103
1104 for (i = 0; !smi_is_done(msp); i++) {
1105 if (i == 10)
1106 return -ETIMEDOUT;
1107 msleep(10);
1108 }
1109
1110 return 0;
1111 }
1112
1113 if (!smi_is_done(msp)) {
1114 wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
1115 msecs_to_jiffies(100));
1116 if (!smi_is_done(msp))
1117 return -ETIMEDOUT;
1118 }
1119
1120 return 0;
1121}
1122
1123static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
1124{
1125 struct mv643xx_eth_shared_private *msp = bus->priv;
1126 void __iomem *smi_reg = msp->base + SMI_REG;
1127 int ret;
1128
1129 if (smi_wait_ready(msp)) {
1130 pr_warn("SMI bus busy timeout\n");
1131 return -ETIMEDOUT;
1132 }
1133
1134 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
1135
1136 if (smi_wait_ready(msp)) {
1137 pr_warn("SMI bus busy timeout\n");
1138 return -ETIMEDOUT;
1139 }
1140
1141 ret = readl(smi_reg);
1142 if (!(ret & SMI_READ_VALID)) {
1143 pr_warn("SMI bus read not valid\n");
1144 return -ENODEV;
1145 }
1146
1147 return ret & 0xffff;
1148}
1149
1150static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
1151{
1152 struct mv643xx_eth_shared_private *msp = bus->priv;
1153 void __iomem *smi_reg = msp->base + SMI_REG;
1154
1155 if (smi_wait_ready(msp)) {
1156 pr_warn("SMI bus busy timeout\n");
1157 return -ETIMEDOUT;
1158 }
1159
1160 writel(SMI_OPCODE_WRITE | (reg << 21) |
1161 (addr << 16) | (val & 0xffff), smi_reg);
1162
1163 if (smi_wait_ready(msp)) {
1164 pr_warn("SMI bus busy timeout\n");
1165 return -ETIMEDOUT;
1166 }
1167
1168 return 0;
1169}
1170
1171
1172/* statistics ***************************************************************/
1173static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1174{
1175 struct mv643xx_eth_private *mp = netdev_priv(dev);
1176 struct net_device_stats *stats = &dev->stats;
1177 unsigned long tx_packets = 0;
1178 unsigned long tx_bytes = 0;
1179 unsigned long tx_dropped = 0;
1180 int i;
1181
1182 for (i = 0; i < mp->txq_count; i++) {
1183 struct tx_queue *txq = mp->txq + i;
1184
1185 tx_packets += txq->tx_packets;
1186 tx_bytes += txq->tx_bytes;
1187 tx_dropped += txq->tx_dropped;
1188 }
1189
1190 stats->tx_packets = tx_packets;
1191 stats->tx_bytes = tx_bytes;
1192 stats->tx_dropped = tx_dropped;
1193
1194 return stats;
1195}
1196
1197static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp)
1198{
1199 u32 lro_aggregated = 0;
1200 u32 lro_flushed = 0;
1201 u32 lro_no_desc = 0;
1202 int i;
1203
1204 for (i = 0; i < mp->rxq_count; i++) {
1205 struct rx_queue *rxq = mp->rxq + i;
1206
1207 lro_aggregated += rxq->lro_mgr.stats.aggregated;
1208 lro_flushed += rxq->lro_mgr.stats.flushed;
1209 lro_no_desc += rxq->lro_mgr.stats.no_desc;
1210 }
1211
1212 mp->lro_counters.lro_aggregated = lro_aggregated;
1213 mp->lro_counters.lro_flushed = lro_flushed;
1214 mp->lro_counters.lro_no_desc = lro_no_desc;
1215}
1216
1217static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
1218{
1219 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
1220}
1221
1222static void mib_counters_clear(struct mv643xx_eth_private *mp)
1223{
1224 int i;
1225
1226 for (i = 0; i < 0x80; i += 4)
1227 mib_read(mp, i);
1228}
1229
1230static void mib_counters_update(struct mv643xx_eth_private *mp)
1231{
1232 struct mib_counters *p = &mp->mib_counters;
1233
1234 spin_lock_bh(&mp->mib_counters_lock);
1235 p->good_octets_received += mib_read(mp, 0x00);
1236 p->bad_octets_received += mib_read(mp, 0x08);
1237 p->internal_mac_transmit_err += mib_read(mp, 0x0c);
1238 p->good_frames_received += mib_read(mp, 0x10);
1239 p->bad_frames_received += mib_read(mp, 0x14);
1240 p->broadcast_frames_received += mib_read(mp, 0x18);
1241 p->multicast_frames_received += mib_read(mp, 0x1c);
1242 p->frames_64_octets += mib_read(mp, 0x20);
1243 p->frames_65_to_127_octets += mib_read(mp, 0x24);
1244 p->frames_128_to_255_octets += mib_read(mp, 0x28);
1245 p->frames_256_to_511_octets += mib_read(mp, 0x2c);
1246 p->frames_512_to_1023_octets += mib_read(mp, 0x30);
1247 p->frames_1024_to_max_octets += mib_read(mp, 0x34);
1248 p->good_octets_sent += mib_read(mp, 0x38);
1249 p->good_frames_sent += mib_read(mp, 0x40);
1250 p->excessive_collision += mib_read(mp, 0x44);
1251 p->multicast_frames_sent += mib_read(mp, 0x48);
1252 p->broadcast_frames_sent += mib_read(mp, 0x4c);
1253 p->unrec_mac_control_received += mib_read(mp, 0x50);
1254 p->fc_sent += mib_read(mp, 0x54);
1255 p->good_fc_received += mib_read(mp, 0x58);
1256 p->bad_fc_received += mib_read(mp, 0x5c);
1257 p->undersize_received += mib_read(mp, 0x60);
1258 p->fragments_received += mib_read(mp, 0x64);
1259 p->oversize_received += mib_read(mp, 0x68);
1260 p->jabber_received += mib_read(mp, 0x6c);
1261 p->mac_receive_error += mib_read(mp, 0x70);
1262 p->bad_crc_event += mib_read(mp, 0x74);
1263 p->collision += mib_read(mp, 0x78);
1264 p->late_collision += mib_read(mp, 0x7c);
1265 spin_unlock_bh(&mp->mib_counters_lock);
1266
1267 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1268}
1269
1270static void mib_counters_timer_wrapper(unsigned long _mp)
1271{
1272 struct mv643xx_eth_private *mp = (void *)_mp;
1273
1274 mib_counters_update(mp);
1275}
1276
1277
1278/* interrupt coalescing *****************************************************/
1279/*
1280 * Hardware coalescing parameters are set in units of 64 t_clk
1281 * cycles. I.e.:
1282 *
1283 * coal_delay_in_usec = 64000000 * register_value / t_clk_rate
1284 *
1285 * register_value = coal_delay_in_usec * t_clk_rate / 64000000
1286 *
1287 * In the ->set*() methods, we round the computed register value
1288 * to the nearest integer.
1289 */
1290static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
1291{
1292 u32 val = rdlp(mp, SDMA_CONFIG);
1293 u64 temp;
1294
1295 if (mp->shared->extended_rx_coal_limit)
1296 temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7);
1297 else
1298 temp = (val & 0x003fff00) >> 8;
1299
1300 temp *= 64000000;
1301 do_div(temp, mp->shared->t_clk);
1302
1303 return (unsigned int)temp;
1304}
1305
1306static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1307{
1308 u64 temp;
1309 u32 val;
1310
1311 temp = (u64)usec * mp->shared->t_clk;
1312 temp += 31999999;
1313 do_div(temp, 64000000);
1314
1315 val = rdlp(mp, SDMA_CONFIG);
1316 if (mp->shared->extended_rx_coal_limit) {
1317 if (temp > 0xffff)
1318 temp = 0xffff;
1319 val &= ~0x023fff80;
1320 val |= (temp & 0x8000) << 10;
1321 val |= (temp & 0x7fff) << 7;
1322 } else {
1323 if (temp > 0x3fff)
1324 temp = 0x3fff;
1325 val &= ~0x003fff00;
1326 val |= (temp & 0x3fff) << 8;
1327 }
1328 wrlp(mp, SDMA_CONFIG, val);
1329}
1330
1331static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
1332{
1333 u64 temp;
1334
1335 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
1336 temp *= 64000000;
1337 do_div(temp, mp->shared->t_clk);
1338
1339 return (unsigned int)temp;
1340}
1341
1342static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1343{
1344 u64 temp;
1345
1346 temp = (u64)usec * mp->shared->t_clk;
1347 temp += 31999999;
1348 do_div(temp, 64000000);
1349
1350 if (temp > 0x3fff)
1351 temp = 0x3fff;
1352
1353 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
1354}
1355
1356
1357/* ethtool ******************************************************************/
1358struct mv643xx_eth_stats {
1359 char stat_string[ETH_GSTRING_LEN];
1360 int sizeof_stat;
1361 int netdev_off;
1362 int mp_off;
1363};
1364
1365#define SSTAT(m) \
1366 { #m, FIELD_SIZEOF(struct net_device_stats, m), \
1367 offsetof(struct net_device, stats.m), -1 }
1368
1369#define MIBSTAT(m) \
1370 { #m, FIELD_SIZEOF(struct mib_counters, m), \
1371 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
1372
1373#define LROSTAT(m) \
1374 { #m, FIELD_SIZEOF(struct lro_counters, m), \
1375 -1, offsetof(struct mv643xx_eth_private, lro_counters.m) }
1376
1377static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
1378 SSTAT(rx_packets),
1379 SSTAT(tx_packets),
1380 SSTAT(rx_bytes),
1381 SSTAT(tx_bytes),
1382 SSTAT(rx_errors),
1383 SSTAT(tx_errors),
1384 SSTAT(rx_dropped),
1385 SSTAT(tx_dropped),
1386 MIBSTAT(good_octets_received),
1387 MIBSTAT(bad_octets_received),
1388 MIBSTAT(internal_mac_transmit_err),
1389 MIBSTAT(good_frames_received),
1390 MIBSTAT(bad_frames_received),
1391 MIBSTAT(broadcast_frames_received),
1392 MIBSTAT(multicast_frames_received),
1393 MIBSTAT(frames_64_octets),
1394 MIBSTAT(frames_65_to_127_octets),
1395 MIBSTAT(frames_128_to_255_octets),
1396 MIBSTAT(frames_256_to_511_octets),
1397 MIBSTAT(frames_512_to_1023_octets),
1398 MIBSTAT(frames_1024_to_max_octets),
1399 MIBSTAT(good_octets_sent),
1400 MIBSTAT(good_frames_sent),
1401 MIBSTAT(excessive_collision),
1402 MIBSTAT(multicast_frames_sent),
1403 MIBSTAT(broadcast_frames_sent),
1404 MIBSTAT(unrec_mac_control_received),
1405 MIBSTAT(fc_sent),
1406 MIBSTAT(good_fc_received),
1407 MIBSTAT(bad_fc_received),
1408 MIBSTAT(undersize_received),
1409 MIBSTAT(fragments_received),
1410 MIBSTAT(oversize_received),
1411 MIBSTAT(jabber_received),
1412 MIBSTAT(mac_receive_error),
1413 MIBSTAT(bad_crc_event),
1414 MIBSTAT(collision),
1415 MIBSTAT(late_collision),
1416 LROSTAT(lro_aggregated),
1417 LROSTAT(lro_flushed),
1418 LROSTAT(lro_no_desc),
1419};
1420
1421static int
1422mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp,
1423 struct ethtool_cmd *cmd)
1424{
1425 int err;
1426
1427 err = phy_read_status(mp->phy);
1428 if (err == 0)
1429 err = phy_ethtool_gset(mp->phy, cmd);
1430
1431 /*
1432 * The MAC does not support 1000baseT_Half.
1433 */
1434 cmd->supported &= ~SUPPORTED_1000baseT_Half;
1435 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
1436
1437 return err;
1438}
1439
1440static int
1441mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
1442 struct ethtool_cmd *cmd)
1443{
1444 u32 port_status;
1445
1446 port_status = rdlp(mp, PORT_STATUS);
1447
1448 cmd->supported = SUPPORTED_MII;
1449 cmd->advertising = ADVERTISED_MII;
1450 switch (port_status & PORT_SPEED_MASK) {
1451 case PORT_SPEED_10:
1452 ethtool_cmd_speed_set(cmd, SPEED_10);
1453 break;
1454 case PORT_SPEED_100:
1455 ethtool_cmd_speed_set(cmd, SPEED_100);
1456 break;
1457 case PORT_SPEED_1000:
1458 ethtool_cmd_speed_set(cmd, SPEED_1000);
1459 break;
1460 default:
1461 cmd->speed = -1;
1462 break;
1463 }
1464 cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
1465 cmd->port = PORT_MII;
1466 cmd->phy_address = 0;
1467 cmd->transceiver = XCVR_INTERNAL;
1468 cmd->autoneg = AUTONEG_DISABLE;
1469 cmd->maxtxpkt = 1;
1470 cmd->maxrxpkt = 1;
1471
1472 return 0;
1473}
1474
1475static int
1476mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1477{
1478 struct mv643xx_eth_private *mp = netdev_priv(dev);
1479
1480 if (mp->phy != NULL)
1481 return mv643xx_eth_get_settings_phy(mp, cmd);
1482 else
1483 return mv643xx_eth_get_settings_phyless(mp, cmd);
1484}
1485
1486static int
1487mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1488{
1489 struct mv643xx_eth_private *mp = netdev_priv(dev);
1490
1491 if (mp->phy == NULL)
1492 return -EINVAL;
1493
1494 /*
1495 * The MAC does not support 1000baseT_Half.
1496 */
1497 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
1498
1499 return phy_ethtool_sset(mp->phy, cmd);
1500}
1501
1502static void mv643xx_eth_get_drvinfo(struct net_device *dev,
1503 struct ethtool_drvinfo *drvinfo)
1504{
1505 strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32);
1506 strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
1507 strncpy(drvinfo->fw_version, "N/A", 32);
1508 strncpy(drvinfo->bus_info, "platform", 32);
1509 drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
1510}
1511
1512static int mv643xx_eth_nway_reset(struct net_device *dev)
1513{
1514 struct mv643xx_eth_private *mp = netdev_priv(dev);
1515
1516 if (mp->phy == NULL)
1517 return -EINVAL;
1518
1519 return genphy_restart_aneg(mp->phy);
1520}
1521
1522static int
1523mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
1524{
1525 struct mv643xx_eth_private *mp = netdev_priv(dev);
1526
1527 ec->rx_coalesce_usecs = get_rx_coal(mp);
1528 ec->tx_coalesce_usecs = get_tx_coal(mp);
1529
1530 return 0;
1531}
1532
1533static int
1534mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
1535{
1536 struct mv643xx_eth_private *mp = netdev_priv(dev);
1537
1538 set_rx_coal(mp, ec->rx_coalesce_usecs);
1539 set_tx_coal(mp, ec->tx_coalesce_usecs);
1540
1541 return 0;
1542}
1543
1544static void
1545mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1546{
1547 struct mv643xx_eth_private *mp = netdev_priv(dev);
1548
1549 er->rx_max_pending = 4096;
1550 er->tx_max_pending = 4096;
1551
1552 er->rx_pending = mp->rx_ring_size;
1553 er->tx_pending = mp->tx_ring_size;
1554}
1555
1556static int
1557mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1558{
1559 struct mv643xx_eth_private *mp = netdev_priv(dev);
1560
1561 if (er->rx_mini_pending || er->rx_jumbo_pending)
1562 return -EINVAL;
1563
1564 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
1565 mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096;
1566
1567 if (netif_running(dev)) {
1568 mv643xx_eth_stop(dev);
1569 if (mv643xx_eth_open(dev)) {
1570 netdev_err(dev,
1571 "fatal error on re-opening device after ring param change\n");
1572 return -ENOMEM;
1573 }
1574 }
1575
1576 return 0;
1577}
1578
1579
1580static int
1581mv643xx_eth_set_features(struct net_device *dev, u32 features)
1582{
1583 struct mv643xx_eth_private *mp = netdev_priv(dev);
1584 u32 rx_csum = features & NETIF_F_RXCSUM;
1585
1586 wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
1587
1588 return 0;
1589}
1590
1591static void mv643xx_eth_get_strings(struct net_device *dev,
1592 uint32_t stringset, uint8_t *data)
1593{
1594 int i;
1595
1596 if (stringset == ETH_SS_STATS) {
1597 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1598 memcpy(data + i * ETH_GSTRING_LEN,
1599 mv643xx_eth_stats[i].stat_string,
1600 ETH_GSTRING_LEN);
1601 }
1602 }
1603}
1604
1605static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
1606 struct ethtool_stats *stats,
1607 uint64_t *data)
1608{
1609 struct mv643xx_eth_private *mp = netdev_priv(dev);
1610 int i;
1611
1612 mv643xx_eth_get_stats(dev);
1613 mib_counters_update(mp);
1614 mv643xx_eth_grab_lro_stats(mp);
1615
1616 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1617 const struct mv643xx_eth_stats *stat;
1618 void *p;
1619
1620 stat = mv643xx_eth_stats + i;
1621
1622 if (stat->netdev_off >= 0)
1623 p = ((void *)mp->dev) + stat->netdev_off;
1624 else
1625 p = ((void *)mp) + stat->mp_off;
1626
1627 data[i] = (stat->sizeof_stat == 8) ?
1628 *(uint64_t *)p : *(uint32_t *)p;
1629 }
1630}
1631
1632static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
1633{
1634 if (sset == ETH_SS_STATS)
1635 return ARRAY_SIZE(mv643xx_eth_stats);
1636
1637 return -EOPNOTSUPP;
1638}
1639
1640static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1641 .get_settings = mv643xx_eth_get_settings,
1642 .set_settings = mv643xx_eth_set_settings,
1643 .get_drvinfo = mv643xx_eth_get_drvinfo,
1644 .nway_reset = mv643xx_eth_nway_reset,
1645 .get_link = ethtool_op_get_link,
1646 .get_coalesce = mv643xx_eth_get_coalesce,
1647 .set_coalesce = mv643xx_eth_set_coalesce,
1648 .get_ringparam = mv643xx_eth_get_ringparam,
1649 .set_ringparam = mv643xx_eth_set_ringparam,
1650 .get_strings = mv643xx_eth_get_strings,
1651 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
1652 .get_sset_count = mv643xx_eth_get_sset_count,
1653};
1654
1655
1656/* address handling *********************************************************/
1657static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1658{
1659 unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
1660 unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
1661
1662 addr[0] = (mac_h >> 24) & 0xff;
1663 addr[1] = (mac_h >> 16) & 0xff;
1664 addr[2] = (mac_h >> 8) & 0xff;
1665 addr[3] = mac_h & 0xff;
1666 addr[4] = (mac_l >> 8) & 0xff;
1667 addr[5] = mac_l & 0xff;
1668}
1669
1670static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
1671{
1672 wrlp(mp, MAC_ADDR_HIGH,
1673 (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
1674 wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
1675}
1676
1677static u32 uc_addr_filter_mask(struct net_device *dev)
1678{
1679 struct netdev_hw_addr *ha;
1680 u32 nibbles;
1681
1682 if (dev->flags & IFF_PROMISC)
1683 return 0;
1684
1685 nibbles = 1 << (dev->dev_addr[5] & 0x0f);
1686 netdev_for_each_uc_addr(ha, dev) {
1687 if (memcmp(dev->dev_addr, ha->addr, 5))
1688 return 0;
1689 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
1690 return 0;
1691
1692 nibbles |= 1 << (ha->addr[5] & 0x0f);
1693 }
1694
1695 return nibbles;
1696}
1697
1698static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
1699{
1700 struct mv643xx_eth_private *mp = netdev_priv(dev);
1701 u32 port_config;
1702 u32 nibbles;
1703 int i;
1704
1705 uc_addr_set(mp, dev->dev_addr);
1706
1707 port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
1708
1709 nibbles = uc_addr_filter_mask(dev);
1710 if (!nibbles) {
1711 port_config |= UNICAST_PROMISCUOUS_MODE;
1712 nibbles = 0xffff;
1713 }
1714
1715 for (i = 0; i < 16; i += 4) {
1716 int off = UNICAST_TABLE(mp->port_num) + i;
1717 u32 v;
1718
1719 v = 0;
1720 if (nibbles & 1)
1721 v |= 0x00000001;
1722 if (nibbles & 2)
1723 v |= 0x00000100;
1724 if (nibbles & 4)
1725 v |= 0x00010000;
1726 if (nibbles & 8)
1727 v |= 0x01000000;
1728 nibbles >>= 4;
1729
1730 wrl(mp, off, v);
1731 }
1732
1733 wrlp(mp, PORT_CONFIG, port_config);
1734}
1735
1736static int addr_crc(unsigned char *addr)
1737{
1738 int crc = 0;
1739 int i;
1740
1741 for (i = 0; i < 6; i++) {
1742 int j;
1743
1744 crc = (crc ^ addr[i]) << 8;
1745 for (j = 7; j >= 0; j--) {
1746 if (crc & (0x100 << j))
1747 crc ^= 0x107 << j;
1748 }
1749 }
1750
1751 return crc;
1752}
1753
1754static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
1755{
1756 struct mv643xx_eth_private *mp = netdev_priv(dev);
1757 u32 *mc_spec;
1758 u32 *mc_other;
1759 struct netdev_hw_addr *ha;
1760 int i;
1761
1762 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1763 int port_num;
1764 u32 accept;
1765
1766oom:
1767 port_num = mp->port_num;
1768 accept = 0x01010101;
1769 for (i = 0; i < 0x100; i += 4) {
1770 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
1771 wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
1772 }
1773 return;
1774 }
1775
1776 mc_spec = kmalloc(0x200, GFP_ATOMIC);
1777 if (mc_spec == NULL)
1778 goto oom;
1779 mc_other = mc_spec + (0x100 >> 2);
1780
1781 memset(mc_spec, 0, 0x100);
1782 memset(mc_other, 0, 0x100);
1783
1784 netdev_for_each_mc_addr(ha, dev) {
1785 u8 *a = ha->addr;
1786 u32 *table;
1787 int entry;
1788
1789 if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
1790 table = mc_spec;
1791 entry = a[5];
1792 } else {
1793 table = mc_other;
1794 entry = addr_crc(a);
1795 }
1796
1797 table[entry >> 2] |= 1 << (8 * (entry & 3));
1798 }
1799
1800 for (i = 0; i < 0x100; i += 4) {
1801 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]);
1802 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]);
1803 }
1804
1805 kfree(mc_spec);
1806}
1807
1808static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1809{
1810 mv643xx_eth_program_unicast_filter(dev);
1811 mv643xx_eth_program_multicast_filter(dev);
1812}
1813
1814static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
1815{
1816 struct sockaddr *sa = addr;
1817
1818 if (!is_valid_ether_addr(sa->sa_data))
1819 return -EINVAL;
1820
1821 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
1822
1823 netif_addr_lock_bh(dev);
1824 mv643xx_eth_program_unicast_filter(dev);
1825 netif_addr_unlock_bh(dev);
1826
1827 return 0;
1828}
1829
1830
1831/* rx/tx queue initialisation ***********************************************/
1832static int rxq_init(struct mv643xx_eth_private *mp, int index)
1833{
1834 struct rx_queue *rxq = mp->rxq + index;
1835 struct rx_desc *rx_desc;
1836 int size;
1837 int i;
1838
1839 rxq->index = index;
1840
1841 rxq->rx_ring_size = mp->rx_ring_size;
1842
1843 rxq->rx_desc_count = 0;
1844 rxq->rx_curr_desc = 0;
1845 rxq->rx_used_desc = 0;
1846
1847 size = rxq->rx_ring_size * sizeof(struct rx_desc);
1848
1849 if (index == 0 && size <= mp->rx_desc_sram_size) {
1850 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
1851 mp->rx_desc_sram_size);
1852 rxq->rx_desc_dma = mp->rx_desc_sram_addr;
1853 } else {
1854 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
1855 size, &rxq->rx_desc_dma,
1856 GFP_KERNEL);
1857 }
1858
1859 if (rxq->rx_desc_area == NULL) {
1860 netdev_err(mp->dev,
1861 "can't allocate rx ring (%d bytes)\n", size);
1862 goto out;
1863 }
1864 memset(rxq->rx_desc_area, 0, size);
1865
1866 rxq->rx_desc_area_size = size;
1867 rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
1868 GFP_KERNEL);
1869 if (rxq->rx_skb == NULL) {
1870 netdev_err(mp->dev, "can't allocate rx skb ring\n");
1871 goto out_free;
1872 }
1873
1874 rx_desc = (struct rx_desc *)rxq->rx_desc_area;
1875 for (i = 0; i < rxq->rx_ring_size; i++) {
1876 int nexti;
1877
1878 nexti = i + 1;
1879 if (nexti == rxq->rx_ring_size)
1880 nexti = 0;
1881
1882 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
1883 nexti * sizeof(struct rx_desc);
1884 }
1885
1886 rxq->lro_mgr.dev = mp->dev;
1887 memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats));
1888 rxq->lro_mgr.features = LRO_F_NAPI;
1889 rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1890 rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1891 rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr);
1892 rxq->lro_mgr.max_aggr = 32;
1893 rxq->lro_mgr.frag_align_pad = 0;
1894 rxq->lro_mgr.lro_arr = rxq->lro_arr;
1895 rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header;
1896
1897 memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr));
1898
1899 return 0;
1900
1901
1902out_free:
1903 if (index == 0 && size <= mp->rx_desc_sram_size)
1904 iounmap(rxq->rx_desc_area);
1905 else
1906 dma_free_coherent(mp->dev->dev.parent, size,
1907 rxq->rx_desc_area,
1908 rxq->rx_desc_dma);
1909
1910out:
1911 return -ENOMEM;
1912}
1913
1914static void rxq_deinit(struct rx_queue *rxq)
1915{
1916 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
1917 int i;
1918
1919 rxq_disable(rxq);
1920
1921 for (i = 0; i < rxq->rx_ring_size; i++) {
1922 if (rxq->rx_skb[i]) {
1923 dev_kfree_skb(rxq->rx_skb[i]);
1924 rxq->rx_desc_count--;
1925 }
1926 }
1927
1928 if (rxq->rx_desc_count) {
1929 netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
1930 rxq->rx_desc_count);
1931 }
1932
1933 if (rxq->index == 0 &&
1934 rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
1935 iounmap(rxq->rx_desc_area);
1936 else
1937 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
1938 rxq->rx_desc_area, rxq->rx_desc_dma);
1939
1940 kfree(rxq->rx_skb);
1941}
1942
1943static int txq_init(struct mv643xx_eth_private *mp, int index)
1944{
1945 struct tx_queue *txq = mp->txq + index;
1946 struct tx_desc *tx_desc;
1947 int size;
1948 int i;
1949
1950 txq->index = index;
1951
1952 txq->tx_ring_size = mp->tx_ring_size;
1953
1954 txq->tx_desc_count = 0;
1955 txq->tx_curr_desc = 0;
1956 txq->tx_used_desc = 0;
1957
1958 size = txq->tx_ring_size * sizeof(struct tx_desc);
1959
1960 if (index == 0 && size <= mp->tx_desc_sram_size) {
1961 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
1962 mp->tx_desc_sram_size);
1963 txq->tx_desc_dma = mp->tx_desc_sram_addr;
1964 } else {
1965 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
1966 size, &txq->tx_desc_dma,
1967 GFP_KERNEL);
1968 }
1969
1970 if (txq->tx_desc_area == NULL) {
1971 netdev_err(mp->dev,
1972 "can't allocate tx ring (%d bytes)\n", size);
1973 return -ENOMEM;
1974 }
1975 memset(txq->tx_desc_area, 0, size);
1976
1977 txq->tx_desc_area_size = size;
1978
1979 tx_desc = (struct tx_desc *)txq->tx_desc_area;
1980 for (i = 0; i < txq->tx_ring_size; i++) {
1981 struct tx_desc *txd = tx_desc + i;
1982 int nexti;
1983
1984 nexti = i + 1;
1985 if (nexti == txq->tx_ring_size)
1986 nexti = 0;
1987
1988 txd->cmd_sts = 0;
1989 txd->next_desc_ptr = txq->tx_desc_dma +
1990 nexti * sizeof(struct tx_desc);
1991 }
1992
1993 skb_queue_head_init(&txq->tx_skb);
1994
1995 return 0;
1996}
1997
1998static void txq_deinit(struct tx_queue *txq)
1999{
2000 struct mv643xx_eth_private *mp = txq_to_mp(txq);
2001
2002 txq_disable(txq);
2003 txq_reclaim(txq, txq->tx_ring_size, 1);
2004
2005 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
2006
2007 if (txq->index == 0 &&
2008 txq->tx_desc_area_size <= mp->tx_desc_sram_size)
2009 iounmap(txq->tx_desc_area);
2010 else
2011 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2012 txq->tx_desc_area, txq->tx_desc_dma);
2013}
2014
2015
2016/* netdev ops and related ***************************************************/
2017static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
2018{
2019 u32 int_cause;
2020 u32 int_cause_ext;
2021
2022 int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
2023 if (int_cause == 0)
2024 return 0;
2025
2026 int_cause_ext = 0;
2027 if (int_cause & INT_EXT) {
2028 int_cause &= ~INT_EXT;
2029 int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
2030 }
2031
2032 if (int_cause) {
2033 wrlp(mp, INT_CAUSE, ~int_cause);
2034 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
2035 ~(rdlp(mp, TXQ_COMMAND) & 0xff);
2036 mp->work_rx |= (int_cause & INT_RX) >> 2;
2037 }
2038
2039 int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
2040 if (int_cause_ext) {
2041 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
2042 if (int_cause_ext & INT_EXT_LINK_PHY)
2043 mp->work_link = 1;
2044 mp->work_tx |= int_cause_ext & INT_EXT_TX;
2045 }
2046
2047 return 1;
2048}
2049
2050static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
2051{
2052 struct net_device *dev = (struct net_device *)dev_id;
2053 struct mv643xx_eth_private *mp = netdev_priv(dev);
2054
2055 if (unlikely(!mv643xx_eth_collect_events(mp)))
2056 return IRQ_NONE;
2057
2058 wrlp(mp, INT_MASK, 0);
2059 napi_schedule(&mp->napi);
2060
2061 return IRQ_HANDLED;
2062}
2063
2064static void handle_link_event(struct mv643xx_eth_private *mp)
2065{
2066 struct net_device *dev = mp->dev;
2067 u32 port_status;
2068 int speed;
2069 int duplex;
2070 int fc;
2071
2072 port_status = rdlp(mp, PORT_STATUS);
2073 if (!(port_status & LINK_UP)) {
2074 if (netif_carrier_ok(dev)) {
2075 int i;
2076
2077 netdev_info(dev, "link down\n");
2078
2079 netif_carrier_off(dev);
2080
2081 for (i = 0; i < mp->txq_count; i++) {
2082 struct tx_queue *txq = mp->txq + i;
2083
2084 txq_reclaim(txq, txq->tx_ring_size, 1);
2085 txq_reset_hw_ptr(txq);
2086 }
2087 }
2088 return;
2089 }
2090
2091 switch (port_status & PORT_SPEED_MASK) {
2092 case PORT_SPEED_10:
2093 speed = 10;
2094 break;
2095 case PORT_SPEED_100:
2096 speed = 100;
2097 break;
2098 case PORT_SPEED_1000:
2099 speed = 1000;
2100 break;
2101 default:
2102 speed = -1;
2103 break;
2104 }
2105 duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
2106 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
2107
2108 netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
2109 speed, duplex ? "full" : "half", fc ? "en" : "dis");
2110
2111 if (!netif_carrier_ok(dev))
2112 netif_carrier_on(dev);
2113}
2114
2115static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
2116{
2117 struct mv643xx_eth_private *mp;
2118 int work_done;
2119
2120 mp = container_of(napi, struct mv643xx_eth_private, napi);
2121
2122 if (unlikely(mp->oom)) {
2123 mp->oom = 0;
2124 del_timer(&mp->rx_oom);
2125 }
2126
2127 work_done = 0;
2128 while (work_done < budget) {
2129 u8 queue_mask;
2130 int queue;
2131 int work_tbd;
2132
2133 if (mp->work_link) {
2134 mp->work_link = 0;
2135 handle_link_event(mp);
2136 work_done++;
2137 continue;
2138 }
2139
2140 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
2141 if (likely(!mp->oom))
2142 queue_mask |= mp->work_rx_refill;
2143
2144 if (!queue_mask) {
2145 if (mv643xx_eth_collect_events(mp))
2146 continue;
2147 break;
2148 }
2149
2150 queue = fls(queue_mask) - 1;
2151 queue_mask = 1 << queue;
2152
2153 work_tbd = budget - work_done;
2154 if (work_tbd > 16)
2155 work_tbd = 16;
2156
2157 if (mp->work_tx_end & queue_mask) {
2158 txq_kick(mp->txq + queue);
2159 } else if (mp->work_tx & queue_mask) {
2160 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
2161 txq_maybe_wake(mp->txq + queue);
2162 } else if (mp->work_rx & queue_mask) {
2163 work_done += rxq_process(mp->rxq + queue, work_tbd);
2164 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
2165 work_done += rxq_refill(mp->rxq + queue, work_tbd);
2166 } else {
2167 BUG();
2168 }
2169 }
2170
2171 if (work_done < budget) {
2172 if (mp->oom)
2173 mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
2174 napi_complete(napi);
2175 wrlp(mp, INT_MASK, mp->int_mask);
2176 }
2177
2178 return work_done;
2179}
2180
2181static inline void oom_timer_wrapper(unsigned long data)
2182{
2183 struct mv643xx_eth_private *mp = (void *)data;
2184
2185 napi_schedule(&mp->napi);
2186}
2187
2188static void phy_reset(struct mv643xx_eth_private *mp)
2189{
2190 int data;
2191
2192 data = phy_read(mp->phy, MII_BMCR);
2193 if (data < 0)
2194 return;
2195
2196 data |= BMCR_RESET;
2197 if (phy_write(mp->phy, MII_BMCR, data) < 0)
2198 return;
2199
2200 do {
2201 data = phy_read(mp->phy, MII_BMCR);
2202 } while (data >= 0 && data & BMCR_RESET);
2203}
2204
2205static void port_start(struct mv643xx_eth_private *mp)
2206{
2207 u32 pscr;
2208 int i;
2209
2210 /*
2211 * Perform PHY reset, if there is a PHY.
2212 */
2213 if (mp->phy != NULL) {
2214 struct ethtool_cmd cmd;
2215
2216 mv643xx_eth_get_settings(mp->dev, &cmd);
2217 phy_reset(mp);
2218 mv643xx_eth_set_settings(mp->dev, &cmd);
2219 }
2220
2221 /*
2222 * Configure basic link parameters.
2223 */
2224 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
2225
2226 pscr |= SERIAL_PORT_ENABLE;
2227 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2228
2229 pscr |= DO_NOT_FORCE_LINK_FAIL;
2230 if (mp->phy == NULL)
2231 pscr |= FORCE_LINK_PASS;
2232 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2233
2234 /*
2235 * Configure TX path and queues.
2236 */
2237 tx_set_rate(mp, 1000000000, 16777216);
2238 for (i = 0; i < mp->txq_count; i++) {
2239 struct tx_queue *txq = mp->txq + i;
2240
2241 txq_reset_hw_ptr(txq);
2242 txq_set_rate(txq, 1000000000, 16777216);
2243 txq_set_fixed_prio_mode(txq);
2244 }
2245
2246 /*
2247 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
2248 * frames to RX queue #0, and include the pseudo-header when
2249 * calculating receive checksums.
2250 */
2251 mv643xx_eth_set_features(mp->dev, mp->dev->features);
2252
2253 /*
2254 * Treat BPDUs as normal multicasts, and disable partition mode.
2255 */
2256 wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
2257
2258 /*
2259 * Add configured unicast addresses to address filter table.
2260 */
2261 mv643xx_eth_program_unicast_filter(mp->dev);
2262
2263 /*
2264 * Enable the receive queues.
2265 */
2266 for (i = 0; i < mp->rxq_count; i++) {
2267 struct rx_queue *rxq = mp->rxq + i;
2268 u32 addr;
2269
2270 addr = (u32)rxq->rx_desc_dma;
2271 addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
2272 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
2273
2274 rxq_enable(rxq);
2275 }
2276}
2277
2278static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
2279{
2280 int skb_size;
2281
2282 /*
2283 * Reserve 2+14 bytes for an ethernet header (the hardware
2284 * automatically prepends 2 bytes of dummy data to each
2285 * received packet), 16 bytes for up to four VLAN tags, and
2286 * 4 bytes for the trailing FCS -- 36 bytes total.
2287 */
2288 skb_size = mp->dev->mtu + 36;
2289
2290 /*
2291 * Make sure that the skb size is a multiple of 8 bytes, as
2292 * the lower three bits of the receive descriptor's buffer
2293 * size field are ignored by the hardware.
2294 */
2295 mp->skb_size = (skb_size + 7) & ~7;
2296
2297 /*
2298 * If NET_SKB_PAD is smaller than a cache line,
2299 * netdev_alloc_skb() will cause skb->data to be misaligned
2300 * to a cache line boundary. If this is the case, include
2301 * some extra space to allow re-aligning the data area.
2302 */
2303 mp->skb_size += SKB_DMA_REALIGN;
2304}
2305
2306static int mv643xx_eth_open(struct net_device *dev)
2307{
2308 struct mv643xx_eth_private *mp = netdev_priv(dev);
2309 int err;
2310 int i;
2311
2312 wrlp(mp, INT_CAUSE, 0);
2313 wrlp(mp, INT_CAUSE_EXT, 0);
2314 rdlp(mp, INT_CAUSE_EXT);
2315
2316 err = request_irq(dev->irq, mv643xx_eth_irq,
2317 IRQF_SHARED, dev->name, dev);
2318 if (err) {
2319 netdev_err(dev, "can't assign irq\n");
2320 return -EAGAIN;
2321 }
2322
2323 mv643xx_eth_recalc_skb_size(mp);
2324
2325 napi_enable(&mp->napi);
2326
2327 skb_queue_head_init(&mp->rx_recycle);
2328
2329 mp->int_mask = INT_EXT;
2330
2331 for (i = 0; i < mp->rxq_count; i++) {
2332 err = rxq_init(mp, i);
2333 if (err) {
2334 while (--i >= 0)
2335 rxq_deinit(mp->rxq + i);
2336 goto out;
2337 }
2338
2339 rxq_refill(mp->rxq + i, INT_MAX);
2340 mp->int_mask |= INT_RX_0 << i;
2341 }
2342
2343 if (mp->oom) {
2344 mp->rx_oom.expires = jiffies + (HZ / 10);
2345 add_timer(&mp->rx_oom);
2346 }
2347
2348 for (i = 0; i < mp->txq_count; i++) {
2349 err = txq_init(mp, i);
2350 if (err) {
2351 while (--i >= 0)
2352 txq_deinit(mp->txq + i);
2353 goto out_free;
2354 }
2355 mp->int_mask |= INT_TX_END_0 << i;
2356 }
2357
2358 port_start(mp);
2359
2360 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
2361 wrlp(mp, INT_MASK, mp->int_mask);
2362
2363 return 0;
2364
2365
2366out_free:
2367 for (i = 0; i < mp->rxq_count; i++)
2368 rxq_deinit(mp->rxq + i);
2369out:
2370 free_irq(dev->irq, dev);
2371
2372 return err;
2373}
2374
2375static void port_reset(struct mv643xx_eth_private *mp)
2376{
2377 unsigned int data;
2378 int i;
2379
2380 for (i = 0; i < mp->rxq_count; i++)
2381 rxq_disable(mp->rxq + i);
2382 for (i = 0; i < mp->txq_count; i++)
2383 txq_disable(mp->txq + i);
2384
2385 while (1) {
2386 u32 ps = rdlp(mp, PORT_STATUS);
2387
2388 if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
2389 break;
2390 udelay(10);
2391 }
2392
2393 /* Reset the Enable bit in the Configuration Register */
2394 data = rdlp(mp, PORT_SERIAL_CONTROL);
2395 data &= ~(SERIAL_PORT_ENABLE |
2396 DO_NOT_FORCE_LINK_FAIL |
2397 FORCE_LINK_PASS);
2398 wrlp(mp, PORT_SERIAL_CONTROL, data);
2399}
2400
2401static int mv643xx_eth_stop(struct net_device *dev)
2402{
2403 struct mv643xx_eth_private *mp = netdev_priv(dev);
2404 int i;
2405
2406 wrlp(mp, INT_MASK_EXT, 0x00000000);
2407 wrlp(mp, INT_MASK, 0x00000000);
2408 rdlp(mp, INT_MASK);
2409
2410 napi_disable(&mp->napi);
2411
2412 del_timer_sync(&mp->rx_oom);
2413
2414 netif_carrier_off(dev);
2415
2416 free_irq(dev->irq, dev);
2417
2418 port_reset(mp);
2419 mv643xx_eth_get_stats(dev);
2420 mib_counters_update(mp);
2421 del_timer_sync(&mp->mib_counters_timer);
2422
2423 skb_queue_purge(&mp->rx_recycle);
2424
2425 for (i = 0; i < mp->rxq_count; i++)
2426 rxq_deinit(mp->rxq + i);
2427 for (i = 0; i < mp->txq_count; i++)
2428 txq_deinit(mp->txq + i);
2429
2430 return 0;
2431}
2432
2433static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2434{
2435 struct mv643xx_eth_private *mp = netdev_priv(dev);
2436
2437 if (mp->phy != NULL)
2438 return phy_mii_ioctl(mp->phy, ifr, cmd);
2439
2440 return -EOPNOTSUPP;
2441}
2442
2443static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
2444{
2445 struct mv643xx_eth_private *mp = netdev_priv(dev);
2446
2447 if (new_mtu < 64 || new_mtu > 9500)
2448 return -EINVAL;
2449
2450 dev->mtu = new_mtu;
2451 mv643xx_eth_recalc_skb_size(mp);
2452 tx_set_rate(mp, 1000000000, 16777216);
2453
2454 if (!netif_running(dev))
2455 return 0;
2456
2457 /*
2458 * Stop and then re-open the interface. This will allocate RX
2459 * skbs of the new MTU.
2460 * There is a possible danger that the open will not succeed,
2461 * due to memory being full.
2462 */
2463 mv643xx_eth_stop(dev);
2464 if (mv643xx_eth_open(dev)) {
2465 netdev_err(dev,
2466 "fatal error on re-opening device after MTU change\n");
2467 }
2468
2469 return 0;
2470}
2471
2472static void tx_timeout_task(struct work_struct *ugly)
2473{
2474 struct mv643xx_eth_private *mp;
2475
2476 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
2477 if (netif_running(mp->dev)) {
2478 netif_tx_stop_all_queues(mp->dev);
2479 port_reset(mp);
2480 port_start(mp);
2481 netif_tx_wake_all_queues(mp->dev);
2482 }
2483}
2484
2485static void mv643xx_eth_tx_timeout(struct net_device *dev)
2486{
2487 struct mv643xx_eth_private *mp = netdev_priv(dev);
2488
2489 netdev_info(dev, "tx timeout\n");
2490
2491 schedule_work(&mp->tx_timeout_task);
2492}
2493
2494#ifdef CONFIG_NET_POLL_CONTROLLER
2495static void mv643xx_eth_netpoll(struct net_device *dev)
2496{
2497 struct mv643xx_eth_private *mp = netdev_priv(dev);
2498
2499 wrlp(mp, INT_MASK, 0x00000000);
2500 rdlp(mp, INT_MASK);
2501
2502 mv643xx_eth_irq(dev->irq, dev);
2503
2504 wrlp(mp, INT_MASK, mp->int_mask);
2505}
2506#endif
2507
2508
2509/* platform glue ************************************************************/
2510static void
2511mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
2512 struct mbus_dram_target_info *dram)
2513{
2514 void __iomem *base = msp->base;
2515 u32 win_enable;
2516 u32 win_protect;
2517 int i;
2518
2519 for (i = 0; i < 6; i++) {
2520 writel(0, base + WINDOW_BASE(i));
2521 writel(0, base + WINDOW_SIZE(i));
2522 if (i < 4)
2523 writel(0, base + WINDOW_REMAP_HIGH(i));
2524 }
2525
2526 win_enable = 0x3f;
2527 win_protect = 0;
2528
2529 for (i = 0; i < dram->num_cs; i++) {
2530 struct mbus_dram_window *cs = dram->cs + i;
2531
2532 writel((cs->base & 0xffff0000) |
2533 (cs->mbus_attr << 8) |
2534 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
2535 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
2536
2537 win_enable &= ~(1 << i);
2538 win_protect |= 3 << (2 * i);
2539 }
2540
2541 writel(win_enable, base + WINDOW_BAR_ENABLE);
2542 msp->win_protect = win_protect;
2543}
2544
2545static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
2546{
2547 /*
2548 * Check whether we have a 14-bit coal limit field in bits
2549 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
2550 * SDMA config register.
2551 */
2552 writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
2553 if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
2554 msp->extended_rx_coal_limit = 1;
2555 else
2556 msp->extended_rx_coal_limit = 0;
2557
2558 /*
2559 * Check whether the MAC supports TX rate control, and if
2560 * yes, whether its associated registers are in the old or
2561 * the new place.
2562 */
2563 writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
2564 if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
2565 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
2566 } else {
2567 writel(7, msp->base + 0x0400 + TX_BW_RATE);
2568 if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
2569 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
2570 else
2571 msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
2572 }
2573}
2574
2575static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2576{
2577 static int mv643xx_eth_version_printed;
2578 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2579 struct mv643xx_eth_shared_private *msp;
2580 struct resource *res;
2581 int ret;
2582
2583 if (!mv643xx_eth_version_printed++)
2584 pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
2585 mv643xx_eth_driver_version);
2586
2587 ret = -EINVAL;
2588 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2589 if (res == NULL)
2590 goto out;
2591
2592 ret = -ENOMEM;
2593 msp = kzalloc(sizeof(*msp), GFP_KERNEL);
2594 if (msp == NULL)
2595 goto out;
2596
2597 msp->base = ioremap(res->start, resource_size(res));
2598 if (msp->base == NULL)
2599 goto out_free;
2600
2601 /*
2602 * Set up and register SMI bus.
2603 */
2604 if (pd == NULL || pd->shared_smi == NULL) {
2605 msp->smi_bus = mdiobus_alloc();
2606 if (msp->smi_bus == NULL)
2607 goto out_unmap;
2608
2609 msp->smi_bus->priv = msp;
2610 msp->smi_bus->name = "mv643xx_eth smi";
2611 msp->smi_bus->read = smi_bus_read;
2612 msp->smi_bus->write = smi_bus_write,
2613 snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
2614 msp->smi_bus->parent = &pdev->dev;
2615 msp->smi_bus->phy_mask = 0xffffffff;
2616 if (mdiobus_register(msp->smi_bus) < 0)
2617 goto out_free_mii_bus;
2618 msp->smi = msp;
2619 } else {
2620 msp->smi = platform_get_drvdata(pd->shared_smi);
2621 }
2622
2623 msp->err_interrupt = NO_IRQ;
2624 init_waitqueue_head(&msp->smi_busy_wait);
2625
2626 /*
2627 * Check whether the error interrupt is hooked up.
2628 */
2629 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2630 if (res != NULL) {
2631 int err;
2632
2633 err = request_irq(res->start, mv643xx_eth_err_irq,
2634 IRQF_SHARED, "mv643xx_eth", msp);
2635 if (!err) {
2636 writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK);
2637 msp->err_interrupt = res->start;
2638 }
2639 }
2640
2641 /*
2642 * (Re-)program MBUS remapping windows if we are asked to.
2643 */
2644 if (pd != NULL && pd->dram != NULL)
2645 mv643xx_eth_conf_mbus_windows(msp, pd->dram);
2646
2647 /*
2648 * Detect hardware parameters.
2649 */
2650 msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
2651 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
2652 pd->tx_csum_limit : 9 * 1024;
2653 infer_hw_params(msp);
2654
2655 platform_set_drvdata(pdev, msp);
2656
2657 return 0;
2658
2659out_free_mii_bus:
2660 mdiobus_free(msp->smi_bus);
2661out_unmap:
2662 iounmap(msp->base);
2663out_free:
2664 kfree(msp);
2665out:
2666 return ret;
2667}
2668
2669static int mv643xx_eth_shared_remove(struct platform_device *pdev)
2670{
2671 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
2672 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2673
2674 if (pd == NULL || pd->shared_smi == NULL) {
2675 mdiobus_unregister(msp->smi_bus);
2676 mdiobus_free(msp->smi_bus);
2677 }
2678 if (msp->err_interrupt != NO_IRQ)
2679 free_irq(msp->err_interrupt, msp);
2680 iounmap(msp->base);
2681 kfree(msp);
2682
2683 return 0;
2684}
2685
2686static struct platform_driver mv643xx_eth_shared_driver = {
2687 .probe = mv643xx_eth_shared_probe,
2688 .remove = mv643xx_eth_shared_remove,
2689 .driver = {
2690 .name = MV643XX_ETH_SHARED_NAME,
2691 .owner = THIS_MODULE,
2692 },
2693};
2694
2695static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
2696{
2697 int addr_shift = 5 * mp->port_num;
2698 u32 data;
2699
2700 data = rdl(mp, PHY_ADDR);
2701 data &= ~(0x1f << addr_shift);
2702 data |= (phy_addr & 0x1f) << addr_shift;
2703 wrl(mp, PHY_ADDR, data);
2704}
2705
2706static int phy_addr_get(struct mv643xx_eth_private *mp)
2707{
2708 unsigned int data;
2709
2710 data = rdl(mp, PHY_ADDR);
2711
2712 return (data >> (5 * mp->port_num)) & 0x1f;
2713}
2714
2715static void set_params(struct mv643xx_eth_private *mp,
2716 struct mv643xx_eth_platform_data *pd)
2717{
2718 struct net_device *dev = mp->dev;
2719
2720 if (is_valid_ether_addr(pd->mac_addr))
2721 memcpy(dev->dev_addr, pd->mac_addr, 6);
2722 else
2723 uc_addr_get(mp, dev->dev_addr);
2724
2725 mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
2726 if (pd->rx_queue_size)
2727 mp->rx_ring_size = pd->rx_queue_size;
2728 mp->rx_desc_sram_addr = pd->rx_sram_addr;
2729 mp->rx_desc_sram_size = pd->rx_sram_size;
2730
2731 mp->rxq_count = pd->rx_queue_count ? : 1;
2732
2733 mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
2734 if (pd->tx_queue_size)
2735 mp->tx_ring_size = pd->tx_queue_size;
2736 mp->tx_desc_sram_addr = pd->tx_sram_addr;
2737 mp->tx_desc_sram_size = pd->tx_sram_size;
2738
2739 mp->txq_count = pd->tx_queue_count ? : 1;
2740}
2741
2742static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2743 int phy_addr)
2744{
2745 struct mii_bus *bus = mp->shared->smi->smi_bus;
2746 struct phy_device *phydev;
2747 int start;
2748 int num;
2749 int i;
2750
2751 if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
2752 start = phy_addr_get(mp) & 0x1f;
2753 num = 32;
2754 } else {
2755 start = phy_addr & 0x1f;
2756 num = 1;
2757 }
2758
2759 phydev = NULL;
2760 for (i = 0; i < num; i++) {
2761 int addr = (start + i) & 0x1f;
2762
2763 if (bus->phy_map[addr] == NULL)
2764 mdiobus_scan(bus, addr);
2765
2766 if (phydev == NULL) {
2767 phydev = bus->phy_map[addr];
2768 if (phydev != NULL)
2769 phy_addr_set(mp, addr);
2770 }
2771 }
2772
2773 return phydev;
2774}
2775
2776static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
2777{
2778 struct phy_device *phy = mp->phy;
2779
2780 phy_reset(mp);
2781
2782 phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII);
2783
2784 if (speed == 0) {
2785 phy->autoneg = AUTONEG_ENABLE;
2786 phy->speed = 0;
2787 phy->duplex = 0;
2788 phy->advertising = phy->supported | ADVERTISED_Autoneg;
2789 } else {
2790 phy->autoneg = AUTONEG_DISABLE;
2791 phy->advertising = 0;
2792 phy->speed = speed;
2793 phy->duplex = duplex;
2794 }
2795 phy_start_aneg(phy);
2796}
2797
2798static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
2799{
2800 u32 pscr;
2801
2802 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
2803 if (pscr & SERIAL_PORT_ENABLE) {
2804 pscr &= ~SERIAL_PORT_ENABLE;
2805 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2806 }
2807
2808 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
2809 if (mp->phy == NULL) {
2810 pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
2811 if (speed == SPEED_1000)
2812 pscr |= SET_GMII_SPEED_TO_1000;
2813 else if (speed == SPEED_100)
2814 pscr |= SET_MII_SPEED_TO_100;
2815
2816 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
2817
2818 pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
2819 if (duplex == DUPLEX_FULL)
2820 pscr |= SET_FULL_DUPLEX_MODE;
2821 }
2822
2823 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2824}
2825
2826static const struct net_device_ops mv643xx_eth_netdev_ops = {
2827 .ndo_open = mv643xx_eth_open,
2828 .ndo_stop = mv643xx_eth_stop,
2829 .ndo_start_xmit = mv643xx_eth_xmit,
2830 .ndo_set_rx_mode = mv643xx_eth_set_rx_mode,
2831 .ndo_set_mac_address = mv643xx_eth_set_mac_address,
2832 .ndo_validate_addr = eth_validate_addr,
2833 .ndo_do_ioctl = mv643xx_eth_ioctl,
2834 .ndo_change_mtu = mv643xx_eth_change_mtu,
2835 .ndo_set_features = mv643xx_eth_set_features,
2836 .ndo_tx_timeout = mv643xx_eth_tx_timeout,
2837 .ndo_get_stats = mv643xx_eth_get_stats,
2838#ifdef CONFIG_NET_POLL_CONTROLLER
2839 .ndo_poll_controller = mv643xx_eth_netpoll,
2840#endif
2841};
2842
2843static int mv643xx_eth_probe(struct platform_device *pdev)
2844{
2845 struct mv643xx_eth_platform_data *pd;
2846 struct mv643xx_eth_private *mp;
2847 struct net_device *dev;
2848 struct resource *res;
2849 int err;
2850
2851 pd = pdev->dev.platform_data;
2852 if (pd == NULL) {
2853 dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
2854 return -ENODEV;
2855 }
2856
2857 if (pd->shared == NULL) {
2858 dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
2859 return -ENODEV;
2860 }
2861
2862 dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
2863 if (!dev)
2864 return -ENOMEM;
2865
2866 mp = netdev_priv(dev);
2867 platform_set_drvdata(pdev, mp);
2868
2869 mp->shared = platform_get_drvdata(pd->shared);
2870 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
2871 mp->port_num = pd->port_number;
2872
2873 mp->dev = dev;
2874
2875 set_params(mp, pd);
2876 netif_set_real_num_tx_queues(dev, mp->txq_count);
2877 netif_set_real_num_rx_queues(dev, mp->rxq_count);
2878
2879 if (pd->phy_addr != MV643XX_ETH_PHY_NONE)
2880 mp->phy = phy_scan(mp, pd->phy_addr);
2881
2882 if (mp->phy != NULL)
2883 phy_init(mp, pd->speed, pd->duplex);
2884
2885 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
2886
2887 init_pscr(mp, pd->speed, pd->duplex);
2888
2889
2890 mib_counters_clear(mp);
2891
2892 init_timer(&mp->mib_counters_timer);
2893 mp->mib_counters_timer.data = (unsigned long)mp;
2894 mp->mib_counters_timer.function = mib_counters_timer_wrapper;
2895 mp->mib_counters_timer.expires = jiffies + 30 * HZ;
2896 add_timer(&mp->mib_counters_timer);
2897
2898 spin_lock_init(&mp->mib_counters_lock);
2899
2900 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
2901
2902 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);
2903
2904 init_timer(&mp->rx_oom);
2905 mp->rx_oom.data = (unsigned long)mp;
2906 mp->rx_oom.function = oom_timer_wrapper;
2907
2908
2909 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2910 BUG_ON(!res);
2911 dev->irq = res->start;
2912
2913 dev->netdev_ops = &mv643xx_eth_netdev_ops;
2914
2915 dev->watchdog_timeo = 2 * HZ;
2916 dev->base_addr = 0;
2917
2918 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
2919 NETIF_F_RXCSUM | NETIF_F_LRO;
2920 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
2921 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
2922
2923 dev->priv_flags |= IFF_UNICAST_FLT;
2924
2925 SET_NETDEV_DEV(dev, &pdev->dev);
2926
2927 if (mp->shared->win_protect)
2928 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
2929
2930 netif_carrier_off(dev);
2931
2932 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
2933
2934 set_rx_coal(mp, 250);
2935 set_tx_coal(mp, 0);
2936
2937 err = register_netdev(dev);
2938 if (err)
2939 goto out;
2940
2941 netdev_notice(dev, "port %d with MAC address %pM\n",
2942 mp->port_num, dev->dev_addr);
2943
2944 if (mp->tx_desc_sram_size > 0)
2945 netdev_notice(dev, "configured with sram\n");
2946
2947 return 0;
2948
2949out:
2950 free_netdev(dev);
2951
2952 return err;
2953}
2954
2955static int mv643xx_eth_remove(struct platform_device *pdev)
2956{
2957 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2958
2959 unregister_netdev(mp->dev);
2960 if (mp->phy != NULL)
2961 phy_detach(mp->phy);
2962 cancel_work_sync(&mp->tx_timeout_task);
2963 free_netdev(mp->dev);
2964
2965 platform_set_drvdata(pdev, NULL);
2966
2967 return 0;
2968}
2969
2970static void mv643xx_eth_shutdown(struct platform_device *pdev)
2971{
2972 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2973
2974 /* Mask all interrupts on ethernet port */
2975 wrlp(mp, INT_MASK, 0);
2976 rdlp(mp, INT_MASK);
2977
2978 if (netif_running(mp->dev))
2979 port_reset(mp);
2980}
2981
2982static struct platform_driver mv643xx_eth_driver = {
2983 .probe = mv643xx_eth_probe,
2984 .remove = mv643xx_eth_remove,
2985 .shutdown = mv643xx_eth_shutdown,
2986 .driver = {
2987 .name = MV643XX_ETH_NAME,
2988 .owner = THIS_MODULE,
2989 },
2990};
2991
2992static int __init mv643xx_eth_init_module(void)
2993{
2994 int rc;
2995
2996 rc = platform_driver_register(&mv643xx_eth_shared_driver);
2997 if (!rc) {
2998 rc = platform_driver_register(&mv643xx_eth_driver);
2999 if (rc)
3000 platform_driver_unregister(&mv643xx_eth_shared_driver);
3001 }
3002
3003 return rc;
3004}
3005module_init(mv643xx_eth_init_module);
3006
3007static void __exit mv643xx_eth_cleanup_module(void)
3008{
3009 platform_driver_unregister(&mv643xx_eth_driver);
3010 platform_driver_unregister(&mv643xx_eth_shared_driver);
3011}
3012module_exit(mv643xx_eth_cleanup_module);
3013
3014MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
3015 "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
3016MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
3017MODULE_LICENSE("GPL");
3018MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
3019MODULE_ALIAS("platform:" MV643XX_ETH_NAME);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
new file mode 100644
index 000000000000..d17d0624c5e6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -0,0 +1,1663 @@
1/*
2 * PXA168 ethernet driver.
3 * Most of the code is derived from mv643xx ethernet driver.
4 *
5 * Copyright (C) 2010 Marvell International Ltd.
6 * Sachin Sanap <ssanap@marvell.com>
7 * Zhangfei Gao <zgao6@marvell.com>
8 * Philip Rakity <prakity@marvell.com>
9 * Mark Brown <markb@marvell.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 */
25
26#include <linux/init.h>
27#include <linux/dma-mapping.h>
28#include <linux/in.h>
29#include <linux/ip.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/etherdevice.h>
33#include <linux/bitops.h>
34#include <linux/delay.h>
35#include <linux/ethtool.h>
36#include <linux/platform_device.h>
37#include <linux/module.h>
38#include <linux/kernel.h>
39#include <linux/workqueue.h>
40#include <linux/clk.h>
41#include <linux/phy.h>
42#include <linux/io.h>
43#include <linux/interrupt.h>
44#include <linux/types.h>
45#include <asm/pgtable.h>
46#include <asm/system.h>
47#include <asm/cacheflush.h>
48#include <linux/pxa168_eth.h>
49
50#define DRIVER_NAME "pxa168-eth"
51#define DRIVER_VERSION "0.3"
52
53/*
54 * Registers
55 */
56
57#define PHY_ADDRESS 0x0000
58#define SMI 0x0010
59#define PORT_CONFIG 0x0400
60#define PORT_CONFIG_EXT 0x0408
61#define PORT_COMMAND 0x0410
62#define PORT_STATUS 0x0418
63#define HTPR 0x0428
64#define SDMA_CONFIG 0x0440
65#define SDMA_CMD 0x0448
66#define INT_CAUSE 0x0450
67#define INT_W_CLEAR 0x0454
68#define INT_MASK 0x0458
69#define ETH_F_RX_DESC_0 0x0480
70#define ETH_C_RX_DESC_0 0x04A0
71#define ETH_C_TX_DESC_1 0x04E4
72
73/* smi register */
74#define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */
75#define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */
76#define SMI_OP_W (0 << 26) /* Write operation */
77#define SMI_OP_R (1 << 26) /* Read operation */
78
79#define PHY_WAIT_ITERATIONS 10
80
81#define PXA168_ETH_PHY_ADDR_DEFAULT 0
82/* RX & TX descriptor command */
83#define BUF_OWNED_BY_DMA (1 << 31)
84
85/* RX descriptor status */
86#define RX_EN_INT (1 << 23)
87#define RX_FIRST_DESC (1 << 17)
88#define RX_LAST_DESC (1 << 16)
89#define RX_ERROR (1 << 15)
90
91/* TX descriptor command */
92#define TX_EN_INT (1 << 23)
93#define TX_GEN_CRC (1 << 22)
94#define TX_ZERO_PADDING (1 << 18)
95#define TX_FIRST_DESC (1 << 17)
96#define TX_LAST_DESC (1 << 16)
97#define TX_ERROR (1 << 15)
98
99/* SDMA_CMD */
100#define SDMA_CMD_AT (1 << 31)
101#define SDMA_CMD_TXDL (1 << 24)
102#define SDMA_CMD_TXDH (1 << 23)
103#define SDMA_CMD_AR (1 << 15)
104#define SDMA_CMD_ERD (1 << 7)
105
106/* Bit definitions of the Port Config Reg */
107#define PCR_HS (1 << 12)
108#define PCR_EN (1 << 7)
109#define PCR_PM (1 << 0)
110
111/* Bit definitions of the Port Config Extend Reg */
112#define PCXR_2BSM (1 << 28)
113#define PCXR_DSCP_EN (1 << 21)
114#define PCXR_MFL_1518 (0 << 14)
115#define PCXR_MFL_1536 (1 << 14)
116#define PCXR_MFL_2048 (2 << 14)
117#define PCXR_MFL_64K (3 << 14)
118#define PCXR_FLP (1 << 11)
119#define PCXR_PRIO_TX_OFF 3
120#define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)
121
122/* Bit definitions of the SDMA Config Reg */
123#define SDCR_BSZ_OFF 12
124#define SDCR_BSZ8 (3 << SDCR_BSZ_OFF)
125#define SDCR_BSZ4 (2 << SDCR_BSZ_OFF)
126#define SDCR_BSZ2 (1 << SDCR_BSZ_OFF)
127#define SDCR_BSZ1 (0 << SDCR_BSZ_OFF)
128#define SDCR_BLMR (1 << 6)
129#define SDCR_BLMT (1 << 7)
130#define SDCR_RIFB (1 << 9)
131#define SDCR_RC_OFF 2
132#define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF)
133
134/*
135 * Bit definitions of the Interrupt Cause Reg
136 * and Interrupt MASK Reg is the same
137 */
138#define ICR_RXBUF (1 << 0)
139#define ICR_TXBUF_H (1 << 2)
140#define ICR_TXBUF_L (1 << 3)
141#define ICR_TXEND_H (1 << 6)
142#define ICR_TXEND_L (1 << 7)
143#define ICR_RXERR (1 << 8)
144#define ICR_TXERR_H (1 << 10)
145#define ICR_TXERR_L (1 << 11)
146#define ICR_TX_UDR (1 << 13)
147#define ICR_MII_CH (1 << 28)
148
149#define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\
150 ICR_TXERR_H | ICR_TXERR_L |\
151 ICR_TXEND_H | ICR_TXEND_L |\
152 ICR_RXBUF | ICR_RXERR | ICR_MII_CH)
153
154#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
155
156#define NUM_RX_DESCS 64
157#define NUM_TX_DESCS 64
158
159#define HASH_ADD 0
160#define HASH_DELETE 1
161#define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */
162#define HOP_NUMBER 12
163
164/* Bit definitions for Port status */
165#define PORT_SPEED_100 (1 << 0)
166#define FULL_DUPLEX (1 << 1)
167#define FLOW_CONTROL_ENABLED (1 << 2)
168#define LINK_UP (1 << 3)
169
170/* Bit definitions for work to be done */
171#define WORK_LINK (1 << 0)
172#define WORK_TX_DONE (1 << 1)
173
174/*
175 * Misc definitions.
176 */
177#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
178
179struct rx_desc {
180 u32 cmd_sts; /* Descriptor command status */
181 u16 byte_cnt; /* Descriptor buffer byte count */
182 u16 buf_size; /* Buffer size */
183 u32 buf_ptr; /* Descriptor buffer pointer */
184 u32 next_desc_ptr; /* Next descriptor pointer */
185};
186
187struct tx_desc {
188 u32 cmd_sts; /* Command/status field */
189 u16 reserved;
190 u16 byte_cnt; /* buffer byte count */
191 u32 buf_ptr; /* pointer to buffer for this descriptor */
192 u32 next_desc_ptr; /* Pointer to next descriptor */
193};
194
195struct pxa168_eth_private {
196 int port_num; /* User Ethernet port number */
197
198 int rx_resource_err; /* Rx ring resource error flag */
199
200 /* Next available and first returning Rx resource */
201 int rx_curr_desc_q, rx_used_desc_q;
202
203 /* Next available and first returning Tx resource */
204 int tx_curr_desc_q, tx_used_desc_q;
205
206 struct rx_desc *p_rx_desc_area;
207 dma_addr_t rx_desc_dma;
208 int rx_desc_area_size;
209 struct sk_buff **rx_skb;
210
211 struct tx_desc *p_tx_desc_area;
212 dma_addr_t tx_desc_dma;
213 int tx_desc_area_size;
214 struct sk_buff **tx_skb;
215
216 struct work_struct tx_timeout_task;
217
218 struct net_device *dev;
219 struct napi_struct napi;
220 u8 work_todo;
221 int skb_size;
222
223 struct net_device_stats stats;
224 /* Size of Tx Ring per queue */
225 int tx_ring_size;
226 /* Number of tx descriptors in use */
227 int tx_desc_count;
228 /* Size of Rx Ring per queue */
229 int rx_ring_size;
230 /* Number of rx descriptors in use */
231 int rx_desc_count;
232
233 /*
234 * Used in case RX Ring is empty, which can occur when
235 * system does not have resources (skb's)
236 */
237 struct timer_list timeout;
238 struct mii_bus *smi_bus;
239 struct phy_device *phy;
240
241 /* clock */
242 struct clk *clk;
243 struct pxa168_eth_platform_data *pd;
244 /*
245 * Ethernet controller base address.
246 */
247 void __iomem *base;
248
249 /* Pointer to the hardware address filter table */
250 void *htpr;
251 dma_addr_t htpr_dma;
252};
253
254struct addr_table_entry {
255 __le32 lo;
256 __le32 hi;
257};
258
259/* Bit fields of a Hash Table Entry */
260enum hash_table_entry {
261 HASH_ENTRY_VALID = 1,
262 SKIP = 2,
263 HASH_ENTRY_RECEIVE_DISCARD = 4,
264 HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
265};
266
267static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
268static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
269static int pxa168_init_hw(struct pxa168_eth_private *pep);
270static void eth_port_reset(struct net_device *dev);
271static void eth_port_start(struct net_device *dev);
272static int pxa168_eth_open(struct net_device *dev);
273static int pxa168_eth_stop(struct net_device *dev);
274static int ethernet_phy_setup(struct net_device *dev);
275
276static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
277{
278 return readl(pep->base + offset);
279}
280
281static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
282{
283 writel(data, pep->base + offset);
284}
285
286static void abort_dma(struct pxa168_eth_private *pep)
287{
288 int delay;
289 int max_retries = 40;
290
291 do {
292 wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
293 udelay(100);
294
295 delay = 10;
296 while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
297 && delay-- > 0) {
298 udelay(10);
299 }
300 } while (max_retries-- > 0 && delay <= 0);
301
302 if (max_retries <= 0)
303 printk(KERN_ERR "%s : DMA Stuck\n", __func__);
304}
305
306static int ethernet_phy_get(struct pxa168_eth_private *pep)
307{
308 unsigned int reg_data;
309
310 reg_data = rdl(pep, PHY_ADDRESS);
311
312 return (reg_data >> (5 * pep->port_num)) & 0x1f;
313}
314
315static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
316{
317 u32 reg_data;
318 int addr_shift = 5 * pep->port_num;
319
320 reg_data = rdl(pep, PHY_ADDRESS);
321 reg_data &= ~(0x1f << addr_shift);
322 reg_data |= (phy_addr & 0x1f) << addr_shift;
323 wrl(pep, PHY_ADDRESS, reg_data);
324}
325
326static void ethernet_phy_reset(struct pxa168_eth_private *pep)
327{
328 int data;
329
330 data = phy_read(pep->phy, MII_BMCR);
331 if (data < 0)
332 return;
333
334 data |= BMCR_RESET;
335 if (phy_write(pep->phy, MII_BMCR, data) < 0)
336 return;
337
338 do {
339 data = phy_read(pep->phy, MII_BMCR);
340 } while (data >= 0 && data & BMCR_RESET);
341}
342
343static void rxq_refill(struct net_device *dev)
344{
345 struct pxa168_eth_private *pep = netdev_priv(dev);
346 struct sk_buff *skb;
347 struct rx_desc *p_used_rx_desc;
348 int used_rx_desc;
349
350 while (pep->rx_desc_count < pep->rx_ring_size) {
351 int size;
352
353 skb = dev_alloc_skb(pep->skb_size);
354 if (!skb)
355 break;
356 if (SKB_DMA_REALIGN)
357 skb_reserve(skb, SKB_DMA_REALIGN);
358 pep->rx_desc_count++;
359 /* Get 'used' Rx descriptor */
360 used_rx_desc = pep->rx_used_desc_q;
361 p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
362 size = skb->end - skb->data;
363 p_used_rx_desc->buf_ptr = dma_map_single(NULL,
364 skb->data,
365 size,
366 DMA_FROM_DEVICE);
367 p_used_rx_desc->buf_size = size;
368 pep->rx_skb[used_rx_desc] = skb;
369
370 /* Return the descriptor to DMA ownership */
371 wmb();
372 p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
373 wmb();
374
375 /* Move the used descriptor pointer to the next descriptor */
376 pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
377
378 /* Any Rx return cancels the Rx resource error status */
379 pep->rx_resource_err = 0;
380
381 skb_reserve(skb, ETH_HW_IP_ALIGN);
382 }
383
384 /*
385 * If RX ring is empty of SKB, set a timer to try allocating
386 * again at a later time.
387 */
388 if (pep->rx_desc_count == 0) {
389 pep->timeout.expires = jiffies + (HZ / 10);
390 add_timer(&pep->timeout);
391 }
392}
393
394static inline void rxq_refill_timer_wrapper(unsigned long data)
395{
396 struct pxa168_eth_private *pep = (void *)data;
397 napi_schedule(&pep->napi);
398}
399
400static inline u8 flip_8_bits(u8 x)
401{
402 return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
403 | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
404 | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
405 | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
406}
407
408static void nibble_swap_every_byte(unsigned char *mac_addr)
409{
410 int i;
411 for (i = 0; i < ETH_ALEN; i++) {
412 mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
413 ((mac_addr[i] & 0xf0) >> 4);
414 }
415}
416
417static void inverse_every_nibble(unsigned char *mac_addr)
418{
419 int i;
420 for (i = 0; i < ETH_ALEN; i++)
421 mac_addr[i] = flip_8_bits(mac_addr[i]);
422}
423
424/*
425 * ----------------------------------------------------------------------------
426 * This function will calculate the hash function of the address.
427 * Inputs
428 * mac_addr_orig - MAC address.
429 * Outputs
430 * return the calculated entry.
431 */
432static u32 hash_function(unsigned char *mac_addr_orig)
433{
434 u32 hash_result;
435 u32 addr0;
436 u32 addr1;
437 u32 addr2;
438 u32 addr3;
439 unsigned char mac_addr[ETH_ALEN];
440
441 /* Make a copy of MAC address since we are going to performe bit
442 * operations on it
443 */
444 memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
445
446 nibble_swap_every_byte(mac_addr);
447 inverse_every_nibble(mac_addr);
448
449 addr0 = (mac_addr[5] >> 2) & 0x3f;
450 addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
451 addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
452 addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
453
454 hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
455 hash_result = hash_result & 0x07ff;
456 return hash_result;
457}
458
459/*
460 * ----------------------------------------------------------------------------
461 * This function will add/del an entry to the address table.
462 * Inputs
463 * pep - ETHERNET .
464 * mac_addr - MAC address.
465 * skip - if 1, skip this address.Used in case of deleting an entry which is a
466 * part of chain in the hash table.We can't just delete the entry since
467 * that will break the chain.We need to defragment the tables time to
468 * time.
469 * rd - 0 Discard packet upon match.
470 * - 1 Receive packet upon match.
471 * Outputs
472 * address table entry is added/deleted.
473 * 0 if success.
474 * -ENOSPC if table full
475 */
476static int add_del_hash_entry(struct pxa168_eth_private *pep,
477 unsigned char *mac_addr,
478 u32 rd, u32 skip, int del)
479{
480 struct addr_table_entry *entry, *start;
481 u32 new_high;
482 u32 new_low;
483 u32 i;
484
485 new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
486 | (((mac_addr[1] >> 0) & 0xf) << 11)
487 | (((mac_addr[0] >> 4) & 0xf) << 7)
488 | (((mac_addr[0] >> 0) & 0xf) << 3)
489 | (((mac_addr[3] >> 4) & 0x1) << 31)
490 | (((mac_addr[3] >> 0) & 0xf) << 27)
491 | (((mac_addr[2] >> 4) & 0xf) << 23)
492 | (((mac_addr[2] >> 0) & 0xf) << 19)
493 | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
494 | HASH_ENTRY_VALID;
495
496 new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
497 | (((mac_addr[5] >> 0) & 0xf) << 11)
498 | (((mac_addr[4] >> 4) & 0xf) << 7)
499 | (((mac_addr[4] >> 0) & 0xf) << 3)
500 | (((mac_addr[3] >> 5) & 0x7) << 0);
501
502 /*
503 * Pick the appropriate table, start scanning for free/reusable
504 * entries at the index obtained by hashing the specified MAC address
505 */
506 start = pep->htpr;
507 entry = start + hash_function(mac_addr);
508 for (i = 0; i < HOP_NUMBER; i++) {
509 if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
510 break;
511 } else {
512 /* if same address put in same position */
513 if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
514 (new_low & 0xfffffff8)) &&
515 (le32_to_cpu(entry->hi) == new_high)) {
516 break;
517 }
518 }
519 if (entry == start + 0x7ff)
520 entry = start;
521 else
522 entry++;
523 }
524
525 if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
526 (le32_to_cpu(entry->hi) != new_high) && del)
527 return 0;
528
529 if (i == HOP_NUMBER) {
530 if (!del) {
531 printk(KERN_INFO "%s: table section is full, need to "
532 "move to 16kB implementation?\n",
533 __FILE__);
534 return -ENOSPC;
535 } else
536 return 0;
537 }
538
539 /*
540 * Update the selected entry
541 */
542 if (del) {
543 entry->hi = 0;
544 entry->lo = 0;
545 } else {
546 entry->hi = cpu_to_le32(new_high);
547 entry->lo = cpu_to_le32(new_low);
548 }
549
550 return 0;
551}
552
553/*
554 * ----------------------------------------------------------------------------
555 * Create an addressTable entry from MAC address info
556 * found in the specifed net_device struct
557 *
558 * Input : pointer to ethernet interface network device structure
559 * Output : N/A
560 */
561static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
562 unsigned char *oaddr,
563 unsigned char *addr)
564{
565 /* Delete old entry */
566 if (oaddr)
567 add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
568 /* Add new entry */
569 add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
570}
571
572static int init_hash_table(struct pxa168_eth_private *pep)
573{
574 /*
575 * Hardware expects CPU to build a hash table based on a predefined
576 * hash function and populate it based on hardware address. The
577 * location of the hash table is identified by 32-bit pointer stored
578 * in HTPR internal register. Two possible sizes exists for the hash
579 * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
580 * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
581 * 1/2kB.
582 */
583 /* TODO: Add support for 8kB hash table and alternative hash
584 * function.Driver can dynamically switch to them if the 1/2kB hash
585 * table is full.
586 */
587 if (pep->htpr == NULL) {
588 pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
589 HASH_ADDR_TABLE_SIZE,
590 &pep->htpr_dma, GFP_KERNEL);
591 if (pep->htpr == NULL)
592 return -ENOMEM;
593 }
594 memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
595 wrl(pep, HTPR, pep->htpr_dma);
596 return 0;
597}
598
599static void pxa168_eth_set_rx_mode(struct net_device *dev)
600{
601 struct pxa168_eth_private *pep = netdev_priv(dev);
602 struct netdev_hw_addr *ha;
603 u32 val;
604
605 val = rdl(pep, PORT_CONFIG);
606 if (dev->flags & IFF_PROMISC)
607 val |= PCR_PM;
608 else
609 val &= ~PCR_PM;
610 wrl(pep, PORT_CONFIG, val);
611
612 /*
613 * Remove the old list of MAC address and add dev->addr
614 * and multicast address.
615 */
616 memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
617 update_hash_table_mac_address(pep, NULL, dev->dev_addr);
618
619 netdev_for_each_mc_addr(ha, dev)
620 update_hash_table_mac_address(pep, NULL, ha->addr);
621}
622
623static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
624{
625 struct sockaddr *sa = addr;
626 struct pxa168_eth_private *pep = netdev_priv(dev);
627 unsigned char oldMac[ETH_ALEN];
628
629 if (!is_valid_ether_addr(sa->sa_data))
630 return -EINVAL;
631 memcpy(oldMac, dev->dev_addr, ETH_ALEN);
632 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
633 netif_addr_lock_bh(dev);
634 update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
635 netif_addr_unlock_bh(dev);
636 return 0;
637}
638
639static void eth_port_start(struct net_device *dev)
640{
641 unsigned int val = 0;
642 struct pxa168_eth_private *pep = netdev_priv(dev);
643 int tx_curr_desc, rx_curr_desc;
644
645 /* Perform PHY reset, if there is a PHY. */
646 if (pep->phy != NULL) {
647 struct ethtool_cmd cmd;
648
649 pxa168_get_settings(pep->dev, &cmd);
650 ethernet_phy_reset(pep);
651 pxa168_set_settings(pep->dev, &cmd);
652 }
653
654 /* Assignment of Tx CTRP of given queue */
655 tx_curr_desc = pep->tx_curr_desc_q;
656 wrl(pep, ETH_C_TX_DESC_1,
657 (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
658
659 /* Assignment of Rx CRDP of given queue */
660 rx_curr_desc = pep->rx_curr_desc_q;
661 wrl(pep, ETH_C_RX_DESC_0,
662 (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
663
664 wrl(pep, ETH_F_RX_DESC_0,
665 (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
666
667 /* Clear all interrupts */
668 wrl(pep, INT_CAUSE, 0);
669
670 /* Enable all interrupts for receive, transmit and error. */
671 wrl(pep, INT_MASK, ALL_INTS);
672
673 val = rdl(pep, PORT_CONFIG);
674 val |= PCR_EN;
675 wrl(pep, PORT_CONFIG, val);
676
677 /* Start RX DMA engine */
678 val = rdl(pep, SDMA_CMD);
679 val |= SDMA_CMD_ERD;
680 wrl(pep, SDMA_CMD, val);
681}
682
683static void eth_port_reset(struct net_device *dev)
684{
685 struct pxa168_eth_private *pep = netdev_priv(dev);
686 unsigned int val = 0;
687
688 /* Stop all interrupts for receive, transmit and error. */
689 wrl(pep, INT_MASK, 0);
690
691 /* Clear all interrupts */
692 wrl(pep, INT_CAUSE, 0);
693
694 /* Stop RX DMA */
695 val = rdl(pep, SDMA_CMD);
696 val &= ~SDMA_CMD_ERD; /* abort dma command */
697
698 /* Abort any transmit and receive operations and put DMA
699 * in idle state.
700 */
701 abort_dma(pep);
702
703 /* Disable port */
704 val = rdl(pep, PORT_CONFIG);
705 val &= ~PCR_EN;
706 wrl(pep, PORT_CONFIG, val);
707}
708
709/*
710 * txq_reclaim - Free the tx desc data for completed descriptors
711 * If force is non-zero, frees uncompleted descriptors as well
712 */
713static int txq_reclaim(struct net_device *dev, int force)
714{
715 struct pxa168_eth_private *pep = netdev_priv(dev);
716 struct tx_desc *desc;
717 u32 cmd_sts;
718 struct sk_buff *skb;
719 int tx_index;
720 dma_addr_t addr;
721 int count;
722 int released = 0;
723
724 netif_tx_lock(dev);
725
726 pep->work_todo &= ~WORK_TX_DONE;
727 while (pep->tx_desc_count > 0) {
728 tx_index = pep->tx_used_desc_q;
729 desc = &pep->p_tx_desc_area[tx_index];
730 cmd_sts = desc->cmd_sts;
731 if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
732 if (released > 0) {
733 goto txq_reclaim_end;
734 } else {
735 released = -1;
736 goto txq_reclaim_end;
737 }
738 }
739 pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
740 pep->tx_desc_count--;
741 addr = desc->buf_ptr;
742 count = desc->byte_cnt;
743 skb = pep->tx_skb[tx_index];
744 if (skb)
745 pep->tx_skb[tx_index] = NULL;
746
747 if (cmd_sts & TX_ERROR) {
748 if (net_ratelimit())
749 printk(KERN_ERR "%s: Error in TX\n", dev->name);
750 dev->stats.tx_errors++;
751 }
752 dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
753 if (skb)
754 dev_kfree_skb_irq(skb);
755 released++;
756 }
757txq_reclaim_end:
758 netif_tx_unlock(dev);
759 return released;
760}
761
762static void pxa168_eth_tx_timeout(struct net_device *dev)
763{
764 struct pxa168_eth_private *pep = netdev_priv(dev);
765
766 printk(KERN_INFO "%s: TX timeout desc_count %d\n",
767 dev->name, pep->tx_desc_count);
768
769 schedule_work(&pep->tx_timeout_task);
770}
771
772static void pxa168_eth_tx_timeout_task(struct work_struct *work)
773{
774 struct pxa168_eth_private *pep = container_of(work,
775 struct pxa168_eth_private,
776 tx_timeout_task);
777 struct net_device *dev = pep->dev;
778 pxa168_eth_stop(dev);
779 pxa168_eth_open(dev);
780}
781
782static int rxq_process(struct net_device *dev, int budget)
783{
784 struct pxa168_eth_private *pep = netdev_priv(dev);
785 struct net_device_stats *stats = &dev->stats;
786 unsigned int received_packets = 0;
787 struct sk_buff *skb;
788
789 while (budget-- > 0) {
790 int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
791 struct rx_desc *rx_desc;
792 unsigned int cmd_sts;
793
794 /* Do not process Rx ring in case of Rx ring resource error */
795 if (pep->rx_resource_err)
796 break;
797 rx_curr_desc = pep->rx_curr_desc_q;
798 rx_used_desc = pep->rx_used_desc_q;
799 rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
800 cmd_sts = rx_desc->cmd_sts;
801 rmb();
802 if (cmd_sts & (BUF_OWNED_BY_DMA))
803 break;
804 skb = pep->rx_skb[rx_curr_desc];
805 pep->rx_skb[rx_curr_desc] = NULL;
806
807 rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
808 pep->rx_curr_desc_q = rx_next_curr_desc;
809
810 /* Rx descriptors exhausted. */
811 /* Set the Rx ring resource error flag */
812 if (rx_next_curr_desc == rx_used_desc)
813 pep->rx_resource_err = 1;
814 pep->rx_desc_count--;
815 dma_unmap_single(NULL, rx_desc->buf_ptr,
816 rx_desc->buf_size,
817 DMA_FROM_DEVICE);
818 received_packets++;
819 /*
820 * Update statistics.
821 * Note byte count includes 4 byte CRC count
822 */
823 stats->rx_packets++;
824 stats->rx_bytes += rx_desc->byte_cnt;
825 /*
826 * In case received a packet without first / last bits on OR
827 * the error summary bit is on, the packets needs to be droped.
828 */
829 if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
830 (RX_FIRST_DESC | RX_LAST_DESC))
831 || (cmd_sts & RX_ERROR)) {
832
833 stats->rx_dropped++;
834 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
835 (RX_FIRST_DESC | RX_LAST_DESC)) {
836 if (net_ratelimit())
837 printk(KERN_ERR
838 "%s: Rx pkt on multiple desc\n",
839 dev->name);
840 }
841 if (cmd_sts & RX_ERROR)
842 stats->rx_errors++;
843 dev_kfree_skb_irq(skb);
844 } else {
845 /*
846 * The -4 is for the CRC in the trailer of the
847 * received packet
848 */
849 skb_put(skb, rx_desc->byte_cnt - 4);
850 skb->protocol = eth_type_trans(skb, dev);
851 netif_receive_skb(skb);
852 }
853 }
854 /* Fill RX ring with skb's */
855 rxq_refill(dev);
856 return received_packets;
857}
858
859static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
860 struct net_device *dev)
861{
862 u32 icr;
863 int ret = 0;
864
865 icr = rdl(pep, INT_CAUSE);
866 if (icr == 0)
867 return IRQ_NONE;
868
869 wrl(pep, INT_CAUSE, ~icr);
870 if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
871 pep->work_todo |= WORK_TX_DONE;
872 ret = 1;
873 }
874 if (icr & ICR_RXBUF)
875 ret = 1;
876 if (icr & ICR_MII_CH) {
877 pep->work_todo |= WORK_LINK;
878 ret = 1;
879 }
880 return ret;
881}
882
883static void handle_link_event(struct pxa168_eth_private *pep)
884{
885 struct net_device *dev = pep->dev;
886 u32 port_status;
887 int speed;
888 int duplex;
889 int fc;
890
891 port_status = rdl(pep, PORT_STATUS);
892 if (!(port_status & LINK_UP)) {
893 if (netif_carrier_ok(dev)) {
894 printk(KERN_INFO "%s: link down\n", dev->name);
895 netif_carrier_off(dev);
896 txq_reclaim(dev, 1);
897 }
898 return;
899 }
900 if (port_status & PORT_SPEED_100)
901 speed = 100;
902 else
903 speed = 10;
904
905 duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
906 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
907 printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
908 "flow control %sabled\n", dev->name,
909 speed, duplex ? "full" : "half", fc ? "en" : "dis");
910 if (!netif_carrier_ok(dev))
911 netif_carrier_on(dev);
912}
913
914static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
915{
916 struct net_device *dev = (struct net_device *)dev_id;
917 struct pxa168_eth_private *pep = netdev_priv(dev);
918
919 if (unlikely(!pxa168_eth_collect_events(pep, dev)))
920 return IRQ_NONE;
921 /* Disable interrupts */
922 wrl(pep, INT_MASK, 0);
923 napi_schedule(&pep->napi);
924 return IRQ_HANDLED;
925}
926
927static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
928{
929 int skb_size;
930
931 /*
932 * Reserve 2+14 bytes for an ethernet header (the hardware
933 * automatically prepends 2 bytes of dummy data to each
934 * received packet), 16 bytes for up to four VLAN tags, and
935 * 4 bytes for the trailing FCS -- 36 bytes total.
936 */
937 skb_size = pep->dev->mtu + 36;
938
939 /*
940 * Make sure that the skb size is a multiple of 8 bytes, as
941 * the lower three bits of the receive descriptor's buffer
942 * size field are ignored by the hardware.
943 */
944 pep->skb_size = (skb_size + 7) & ~7;
945
946 /*
947 * If NET_SKB_PAD is smaller than a cache line,
948 * netdev_alloc_skb() will cause skb->data to be misaligned
949 * to a cache line boundary. If this is the case, include
950 * some extra space to allow re-aligning the data area.
951 */
952 pep->skb_size += SKB_DMA_REALIGN;
953
954}
955
956static int set_port_config_ext(struct pxa168_eth_private *pep)
957{
958 int skb_size;
959
960 pxa168_eth_recalc_skb_size(pep);
961 if (pep->skb_size <= 1518)
962 skb_size = PCXR_MFL_1518;
963 else if (pep->skb_size <= 1536)
964 skb_size = PCXR_MFL_1536;
965 else if (pep->skb_size <= 2048)
966 skb_size = PCXR_MFL_2048;
967 else
968 skb_size = PCXR_MFL_64K;
969
970 /* Extended Port Configuration */
971 wrl(pep,
972 PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
973 PCXR_DSCP_EN | /* Enable DSCP in IP */
974 skb_size | PCXR_FLP | /* do not force link pass */
975 PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */
976
977 return 0;
978}
979
980static int pxa168_init_hw(struct pxa168_eth_private *pep)
981{
982 int err = 0;
983
984 /* Disable interrupts */
985 wrl(pep, INT_MASK, 0);
986 wrl(pep, INT_CAUSE, 0);
987 /* Write to ICR to clear interrupts. */
988 wrl(pep, INT_W_CLEAR, 0);
989 /* Abort any transmit and receive operations and put DMA
990 * in idle state.
991 */
992 abort_dma(pep);
993 /* Initialize address hash table */
994 err = init_hash_table(pep);
995 if (err)
996 return err;
997 /* SDMA configuration */
998 wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */
999 SDCR_RIFB | /* Rx interrupt on frame */
1000 SDCR_BLMT | /* Little endian transmit */
1001 SDCR_BLMR | /* Little endian receive */
1002 SDCR_RC_MAX_RETRANS); /* Max retransmit count */
1003 /* Port Configuration */
1004 wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */
1005 set_port_config_ext(pep);
1006
1007 return err;
1008}
1009
1010static int rxq_init(struct net_device *dev)
1011{
1012 struct pxa168_eth_private *pep = netdev_priv(dev);
1013 struct rx_desc *p_rx_desc;
1014 int size = 0, i = 0;
1015 int rx_desc_num = pep->rx_ring_size;
1016
1017 /* Allocate RX skb rings */
1018 pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
1019 GFP_KERNEL);
1020 if (!pep->rx_skb) {
1021 printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name);
1022 return -ENOMEM;
1023 }
1024 /* Allocate RX ring */
1025 pep->rx_desc_count = 0;
1026 size = pep->rx_ring_size * sizeof(struct rx_desc);
1027 pep->rx_desc_area_size = size;
1028 pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1029 &pep->rx_desc_dma, GFP_KERNEL);
1030 if (!pep->p_rx_desc_area) {
1031 printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
1032 dev->name, size);
1033 goto out;
1034 }
1035 memset((void *)pep->p_rx_desc_area, 0, size);
1036 /* initialize the next_desc_ptr links in the Rx descriptors ring */
1037 p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
1038 for (i = 0; i < rx_desc_num; i++) {
1039 p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
1040 ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
1041 }
1042 /* Save Rx desc pointer to driver struct. */
1043 pep->rx_curr_desc_q = 0;
1044 pep->rx_used_desc_q = 0;
1045 pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
1046 return 0;
1047out:
1048 kfree(pep->rx_skb);
1049 return -ENOMEM;
1050}
1051
1052static void rxq_deinit(struct net_device *dev)
1053{
1054 struct pxa168_eth_private *pep = netdev_priv(dev);
1055 int curr;
1056
1057 /* Free preallocated skb's on RX rings */
1058 for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
1059 if (pep->rx_skb[curr]) {
1060 dev_kfree_skb(pep->rx_skb[curr]);
1061 pep->rx_desc_count--;
1062 }
1063 }
1064 if (pep->rx_desc_count)
1065 printk(KERN_ERR
1066 "Error in freeing Rx Ring. %d skb's still\n",
1067 pep->rx_desc_count);
1068 /* Free RX ring */
1069 if (pep->p_rx_desc_area)
1070 dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
1071 pep->p_rx_desc_area, pep->rx_desc_dma);
1072 kfree(pep->rx_skb);
1073}
1074
1075static int txq_init(struct net_device *dev)
1076{
1077 struct pxa168_eth_private *pep = netdev_priv(dev);
1078 struct tx_desc *p_tx_desc;
1079 int size = 0, i = 0;
1080 int tx_desc_num = pep->tx_ring_size;
1081
1082 pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
1083 GFP_KERNEL);
1084 if (!pep->tx_skb) {
1085 printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name);
1086 return -ENOMEM;
1087 }
1088 /* Allocate TX ring */
1089 pep->tx_desc_count = 0;
1090 size = pep->tx_ring_size * sizeof(struct tx_desc);
1091 pep->tx_desc_area_size = size;
1092 pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1093 &pep->tx_desc_dma, GFP_KERNEL);
1094 if (!pep->p_tx_desc_area) {
1095 printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
1096 dev->name, size);
1097 goto out;
1098 }
1099 memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
1100 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
1101 p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
1102 for (i = 0; i < tx_desc_num; i++) {
1103 p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
1104 ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
1105 }
1106 pep->tx_curr_desc_q = 0;
1107 pep->tx_used_desc_q = 0;
1108 pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
1109 return 0;
1110out:
1111 kfree(pep->tx_skb);
1112 return -ENOMEM;
1113}
1114
1115static void txq_deinit(struct net_device *dev)
1116{
1117 struct pxa168_eth_private *pep = netdev_priv(dev);
1118
1119 /* Free outstanding skb's on TX ring */
1120 txq_reclaim(dev, 1);
1121 BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
1122 /* Free TX ring */
1123 if (pep->p_tx_desc_area)
1124 dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
1125 pep->p_tx_desc_area, pep->tx_desc_dma);
1126 kfree(pep->tx_skb);
1127}
1128
1129static int pxa168_eth_open(struct net_device *dev)
1130{
1131 struct pxa168_eth_private *pep = netdev_priv(dev);
1132 int err;
1133
1134 err = request_irq(dev->irq, pxa168_eth_int_handler,
1135 IRQF_DISABLED, dev->name, dev);
1136 if (err) {
1137 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
1138 return -EAGAIN;
1139 }
1140 pep->rx_resource_err = 0;
1141 err = rxq_init(dev);
1142 if (err != 0)
1143 goto out_free_irq;
1144 err = txq_init(dev);
1145 if (err != 0)
1146 goto out_free_rx_skb;
1147 pep->rx_used_desc_q = 0;
1148 pep->rx_curr_desc_q = 0;
1149
1150 /* Fill RX ring with skb's */
1151 rxq_refill(dev);
1152 pep->rx_used_desc_q = 0;
1153 pep->rx_curr_desc_q = 0;
1154 netif_carrier_off(dev);
1155 eth_port_start(dev);
1156 napi_enable(&pep->napi);
1157 return 0;
1158out_free_rx_skb:
1159 rxq_deinit(dev);
1160out_free_irq:
1161 free_irq(dev->irq, dev);
1162 return err;
1163}
1164
1165static int pxa168_eth_stop(struct net_device *dev)
1166{
1167 struct pxa168_eth_private *pep = netdev_priv(dev);
1168 eth_port_reset(dev);
1169
1170 /* Disable interrupts */
1171 wrl(pep, INT_MASK, 0);
1172 wrl(pep, INT_CAUSE, 0);
1173 /* Write to ICR to clear interrupts. */
1174 wrl(pep, INT_W_CLEAR, 0);
1175 napi_disable(&pep->napi);
1176 del_timer_sync(&pep->timeout);
1177 netif_carrier_off(dev);
1178 free_irq(dev->irq, dev);
1179 rxq_deinit(dev);
1180 txq_deinit(dev);
1181
1182 return 0;
1183}
1184
1185static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
1186{
1187 int retval;
1188 struct pxa168_eth_private *pep = netdev_priv(dev);
1189
1190 if ((mtu > 9500) || (mtu < 68))
1191 return -EINVAL;
1192
1193 dev->mtu = mtu;
1194 retval = set_port_config_ext(pep);
1195
1196 if (!netif_running(dev))
1197 return 0;
1198
1199 /*
1200 * Stop and then re-open the interface. This will allocate RX
1201 * skbs of the new MTU.
1202 * There is a possible danger that the open will not succeed,
1203 * due to memory being full.
1204 */
1205 pxa168_eth_stop(dev);
1206 if (pxa168_eth_open(dev)) {
1207 dev_printk(KERN_ERR, &dev->dev,
1208 "fatal error on re-opening device after "
1209 "MTU change\n");
1210 }
1211
1212 return 0;
1213}
1214
1215static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
1216{
1217 int tx_desc_curr;
1218
1219 tx_desc_curr = pep->tx_curr_desc_q;
1220 pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
1221 BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
1222 pep->tx_desc_count++;
1223
1224 return tx_desc_curr;
1225}
1226
1227static int pxa168_rx_poll(struct napi_struct *napi, int budget)
1228{
1229 struct pxa168_eth_private *pep =
1230 container_of(napi, struct pxa168_eth_private, napi);
1231 struct net_device *dev = pep->dev;
1232 int work_done = 0;
1233
1234 if (unlikely(pep->work_todo & WORK_LINK)) {
1235 pep->work_todo &= ~(WORK_LINK);
1236 handle_link_event(pep);
1237 }
1238 /*
1239 * We call txq_reclaim every time since in NAPI interupts are disabled
1240 * and due to this we miss the TX_DONE interrupt,which is not updated in
1241 * interrupt status register.
1242 */
1243 txq_reclaim(dev, 0);
1244 if (netif_queue_stopped(dev)
1245 && pep->tx_ring_size - pep->tx_desc_count > 1) {
1246 netif_wake_queue(dev);
1247 }
1248 work_done = rxq_process(dev, budget);
1249 if (work_done < budget) {
1250 napi_complete(napi);
1251 wrl(pep, INT_MASK, ALL_INTS);
1252 }
1253
1254 return work_done;
1255}
1256
1257static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1258{
1259 struct pxa168_eth_private *pep = netdev_priv(dev);
1260 struct net_device_stats *stats = &dev->stats;
1261 struct tx_desc *desc;
1262 int tx_index;
1263 int length;
1264
1265 tx_index = eth_alloc_tx_desc_index(pep);
1266 desc = &pep->p_tx_desc_area[tx_index];
1267 length = skb->len;
1268 pep->tx_skb[tx_index] = skb;
1269 desc->byte_cnt = length;
1270 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
1271
1272 skb_tx_timestamp(skb);
1273
1274 wmb();
1275 desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
1276 TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
1277 wmb();
1278 wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
1279
1280 stats->tx_bytes += length;
1281 stats->tx_packets++;
1282 dev->trans_start = jiffies;
1283 if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
1284 /* We handled the current skb, but now we are out of space.*/
1285 netif_stop_queue(dev);
1286 }
1287
1288 return NETDEV_TX_OK;
1289}
1290
1291static int smi_wait_ready(struct pxa168_eth_private *pep)
1292{
1293 int i = 0;
1294
1295 /* wait for the SMI register to become available */
1296 for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
1297 if (i == PHY_WAIT_ITERATIONS)
1298 return -ETIMEDOUT;
1299 msleep(10);
1300 }
1301
1302 return 0;
1303}
1304
1305static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
1306{
1307 struct pxa168_eth_private *pep = bus->priv;
1308 int i = 0;
1309 int val;
1310
1311 if (smi_wait_ready(pep)) {
1312 printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
1313 return -ETIMEDOUT;
1314 }
1315 wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
1316 /* now wait for the data to be valid */
1317 for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
1318 if (i == PHY_WAIT_ITERATIONS) {
1319 printk(KERN_WARNING
1320 "pxa168_eth: SMI bus read not valid\n");
1321 return -ENODEV;
1322 }
1323 msleep(10);
1324 }
1325
1326 return val & 0xffff;
1327}
1328
1329static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
1330 u16 value)
1331{
1332 struct pxa168_eth_private *pep = bus->priv;
1333
1334 if (smi_wait_ready(pep)) {
1335 printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
1336 return -ETIMEDOUT;
1337 }
1338
1339 wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
1340 SMI_OP_W | (value & 0xffff));
1341
1342 if (smi_wait_ready(pep)) {
1343 printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n");
1344 return -ETIMEDOUT;
1345 }
1346
1347 return 0;
1348}
1349
1350static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
1351 int cmd)
1352{
1353 struct pxa168_eth_private *pep = netdev_priv(dev);
1354 if (pep->phy != NULL)
1355 return phy_mii_ioctl(pep->phy, ifr, cmd);
1356
1357 return -EOPNOTSUPP;
1358}
1359
1360static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
1361{
1362 struct mii_bus *bus = pep->smi_bus;
1363 struct phy_device *phydev;
1364 int start;
1365 int num;
1366 int i;
1367
1368 if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
1369 /* Scan entire range */
1370 start = ethernet_phy_get(pep);
1371 num = 32;
1372 } else {
1373 /* Use phy addr specific to platform */
1374 start = phy_addr & 0x1f;
1375 num = 1;
1376 }
1377 phydev = NULL;
1378 for (i = 0; i < num; i++) {
1379 int addr = (start + i) & 0x1f;
1380 if (bus->phy_map[addr] == NULL)
1381 mdiobus_scan(bus, addr);
1382
1383 if (phydev == NULL) {
1384 phydev = bus->phy_map[addr];
1385 if (phydev != NULL)
1386 ethernet_phy_set_addr(pep, addr);
1387 }
1388 }
1389
1390 return phydev;
1391}
1392
1393static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
1394{
1395 struct phy_device *phy = pep->phy;
1396 ethernet_phy_reset(pep);
1397
1398 phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII);
1399
1400 if (speed == 0) {
1401 phy->autoneg = AUTONEG_ENABLE;
1402 phy->speed = 0;
1403 phy->duplex = 0;
1404 phy->supported &= PHY_BASIC_FEATURES;
1405 phy->advertising = phy->supported | ADVERTISED_Autoneg;
1406 } else {
1407 phy->autoneg = AUTONEG_DISABLE;
1408 phy->advertising = 0;
1409 phy->speed = speed;
1410 phy->duplex = duplex;
1411 }
1412 phy_start_aneg(phy);
1413}
1414
1415static int ethernet_phy_setup(struct net_device *dev)
1416{
1417 struct pxa168_eth_private *pep = netdev_priv(dev);
1418
1419 if (pep->pd->init)
1420 pep->pd->init();
1421 pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
1422 if (pep->phy != NULL)
1423 phy_init(pep, pep->pd->speed, pep->pd->duplex);
1424 update_hash_table_mac_address(pep, NULL, dev->dev_addr);
1425
1426 return 0;
1427}
1428
1429static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1430{
1431 struct pxa168_eth_private *pep = netdev_priv(dev);
1432 int err;
1433
1434 err = phy_read_status(pep->phy);
1435 if (err == 0)
1436 err = phy_ethtool_gset(pep->phy, cmd);
1437
1438 return err;
1439}
1440
1441static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1442{
1443 struct pxa168_eth_private *pep = netdev_priv(dev);
1444
1445 return phy_ethtool_sset(pep->phy, cmd);
1446}
1447
1448static void pxa168_get_drvinfo(struct net_device *dev,
1449 struct ethtool_drvinfo *info)
1450{
1451 strncpy(info->driver, DRIVER_NAME, 32);
1452 strncpy(info->version, DRIVER_VERSION, 32);
1453 strncpy(info->fw_version, "N/A", 32);
1454 strncpy(info->bus_info, "N/A", 32);
1455}
1456
1457static const struct ethtool_ops pxa168_ethtool_ops = {
1458 .get_settings = pxa168_get_settings,
1459 .set_settings = pxa168_set_settings,
1460 .get_drvinfo = pxa168_get_drvinfo,
1461 .get_link = ethtool_op_get_link,
1462};
1463
1464static const struct net_device_ops pxa168_eth_netdev_ops = {
1465 .ndo_open = pxa168_eth_open,
1466 .ndo_stop = pxa168_eth_stop,
1467 .ndo_start_xmit = pxa168_eth_start_xmit,
1468 .ndo_set_rx_mode = pxa168_eth_set_rx_mode,
1469 .ndo_set_mac_address = pxa168_eth_set_mac_address,
1470 .ndo_validate_addr = eth_validate_addr,
1471 .ndo_do_ioctl = pxa168_eth_do_ioctl,
1472 .ndo_change_mtu = pxa168_eth_change_mtu,
1473 .ndo_tx_timeout = pxa168_eth_tx_timeout,
1474};
1475
1476static int pxa168_eth_probe(struct platform_device *pdev)
1477{
1478 struct pxa168_eth_private *pep = NULL;
1479 struct net_device *dev = NULL;
1480 struct resource *res;
1481 struct clk *clk;
1482 int err;
1483
1484 printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
1485
1486 clk = clk_get(&pdev->dev, "MFUCLK");
1487 if (IS_ERR(clk)) {
1488 printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n",
1489 DRIVER_NAME);
1490 return -ENODEV;
1491 }
1492 clk_enable(clk);
1493
1494 dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
1495 if (!dev) {
1496 err = -ENOMEM;
1497 goto err_clk;
1498 }
1499
1500 platform_set_drvdata(pdev, dev);
1501 pep = netdev_priv(dev);
1502 pep->dev = dev;
1503 pep->clk = clk;
1504 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1505 if (res == NULL) {
1506 err = -ENODEV;
1507 goto err_netdev;
1508 }
1509 pep->base = ioremap(res->start, resource_size(res));
1510 if (pep->base == NULL) {
1511 err = -ENOMEM;
1512 goto err_netdev;
1513 }
1514 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1515 BUG_ON(!res);
1516 dev->irq = res->start;
1517 dev->netdev_ops = &pxa168_eth_netdev_ops;
1518 dev->watchdog_timeo = 2 * HZ;
1519 dev->base_addr = 0;
1520 SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
1521
1522 INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
1523
1524 printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
1525 random_ether_addr(dev->dev_addr);
1526
1527 pep->pd = pdev->dev.platform_data;
1528 pep->rx_ring_size = NUM_RX_DESCS;
1529 if (pep->pd->rx_queue_size)
1530 pep->rx_ring_size = pep->pd->rx_queue_size;
1531
1532 pep->tx_ring_size = NUM_TX_DESCS;
1533 if (pep->pd->tx_queue_size)
1534 pep->tx_ring_size = pep->pd->tx_queue_size;
1535
1536 pep->port_num = pep->pd->port_number;
1537 /* Hardware supports only 3 ports */
1538 BUG_ON(pep->port_num > 2);
1539 netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
1540
1541 memset(&pep->timeout, 0, sizeof(struct timer_list));
1542 init_timer(&pep->timeout);
1543 pep->timeout.function = rxq_refill_timer_wrapper;
1544 pep->timeout.data = (unsigned long)pep;
1545
1546 pep->smi_bus = mdiobus_alloc();
1547 if (pep->smi_bus == NULL) {
1548 err = -ENOMEM;
1549 goto err_base;
1550 }
1551 pep->smi_bus->priv = pep;
1552 pep->smi_bus->name = "pxa168_eth smi";
1553 pep->smi_bus->read = pxa168_smi_read;
1554 pep->smi_bus->write = pxa168_smi_write;
1555 snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
1556 pep->smi_bus->parent = &pdev->dev;
1557 pep->smi_bus->phy_mask = 0xffffffff;
1558 err = mdiobus_register(pep->smi_bus);
1559 if (err)
1560 goto err_free_mdio;
1561
1562 pxa168_init_hw(pep);
1563 err = ethernet_phy_setup(dev);
1564 if (err)
1565 goto err_mdiobus;
1566 SET_NETDEV_DEV(dev, &pdev->dev);
1567 err = register_netdev(dev);
1568 if (err)
1569 goto err_mdiobus;
1570 return 0;
1571
1572err_mdiobus:
1573 mdiobus_unregister(pep->smi_bus);
1574err_free_mdio:
1575 mdiobus_free(pep->smi_bus);
1576err_base:
1577 iounmap(pep->base);
1578err_netdev:
1579 free_netdev(dev);
1580err_clk:
1581 clk_disable(clk);
1582 clk_put(clk);
1583 return err;
1584}
1585
1586static int pxa168_eth_remove(struct platform_device *pdev)
1587{
1588 struct net_device *dev = platform_get_drvdata(pdev);
1589 struct pxa168_eth_private *pep = netdev_priv(dev);
1590
1591 if (pep->htpr) {
1592 dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
1593 pep->htpr, pep->htpr_dma);
1594 pep->htpr = NULL;
1595 }
1596 if (pep->clk) {
1597 clk_disable(pep->clk);
1598 clk_put(pep->clk);
1599 pep->clk = NULL;
1600 }
1601 if (pep->phy != NULL)
1602 phy_detach(pep->phy);
1603
1604 iounmap(pep->base);
1605 pep->base = NULL;
1606 mdiobus_unregister(pep->smi_bus);
1607 mdiobus_free(pep->smi_bus);
1608 unregister_netdev(dev);
1609 cancel_work_sync(&pep->tx_timeout_task);
1610 free_netdev(dev);
1611 platform_set_drvdata(pdev, NULL);
1612 return 0;
1613}
1614
1615static void pxa168_eth_shutdown(struct platform_device *pdev)
1616{
1617 struct net_device *dev = platform_get_drvdata(pdev);
1618 eth_port_reset(dev);
1619}
1620
1621#ifdef CONFIG_PM
1622static int pxa168_eth_resume(struct platform_device *pdev)
1623{
1624 return -ENOSYS;
1625}
1626
1627static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
1628{
1629 return -ENOSYS;
1630}
1631
1632#else
1633#define pxa168_eth_resume NULL
1634#define pxa168_eth_suspend NULL
1635#endif
1636
1637static struct platform_driver pxa168_eth_driver = {
1638 .probe = pxa168_eth_probe,
1639 .remove = pxa168_eth_remove,
1640 .shutdown = pxa168_eth_shutdown,
1641 .resume = pxa168_eth_resume,
1642 .suspend = pxa168_eth_suspend,
1643 .driver = {
1644 .name = DRIVER_NAME,
1645 },
1646};
1647
1648static int __init pxa168_init_module(void)
1649{
1650 return platform_driver_register(&pxa168_eth_driver);
1651}
1652
1653static void __exit pxa168_cleanup_module(void)
1654{
1655 platform_driver_unregister(&pxa168_eth_driver);
1656}
1657
1658module_init(pxa168_init_module);
1659module_exit(pxa168_cleanup_module);
1660
1661MODULE_LICENSE("GPL");
1662MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
1663MODULE_ALIAS("platform:pxa168_eth");
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
new file mode 100644
index 000000000000..c7b60839ac99
--- /dev/null
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -0,0 +1,4161 @@
1/*
2 * New driver for Marvell Yukon chipset and SysKonnect Gigabit
3 * Ethernet adapters. Based on earlier sk98lin, e100 and
4 * FreeBSD if_sk drivers.
5 *
6 * This driver intentionally does not support all the features
7 * of the original driver such as link fail-over and link management because
8 * those should be done at higher levels.
9 *
10 * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
28#include <linux/in.h>
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/moduleparam.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/ethtool.h>
35#include <linux/pci.h>
36#include <linux/if_vlan.h>
37#include <linux/ip.h>
38#include <linux/delay.h>
39#include <linux/crc32.h>
40#include <linux/dma-mapping.h>
41#include <linux/debugfs.h>
42#include <linux/sched.h>
43#include <linux/seq_file.h>
44#include <linux/mii.h>
45#include <linux/slab.h>
46#include <linux/dmi.h>
47#include <linux/prefetch.h>
48#include <asm/irq.h>
49
50#include "skge.h"
51
52#define DRV_NAME "skge"
53#define DRV_VERSION "1.14"
54
55#define DEFAULT_TX_RING_SIZE 128
56#define DEFAULT_RX_RING_SIZE 512
57#define MAX_TX_RING_SIZE 1024
58#define TX_LOW_WATER (MAX_SKB_FRAGS + 1)
59#define MAX_RX_RING_SIZE 4096
60#define RX_COPY_THRESHOLD 128
61#define RX_BUF_SIZE 1536
62#define PHY_RETRIES 1000
63#define ETH_JUMBO_MTU 9000
64#define TX_WATCHDOG (5 * HZ)
65#define NAPI_WEIGHT 64
66#define BLINK_MS 250
67#define LINK_HZ HZ
68
69#define SKGE_EEPROM_MAGIC 0x9933aabb
70
71
72MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver");
73MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
74MODULE_LICENSE("GPL");
75MODULE_VERSION(DRV_VERSION);
76
77static const u32 default_msg = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
78 NETIF_MSG_LINK | NETIF_MSG_IFUP |
79 NETIF_MSG_IFDOWN);
80
81static int debug = -1; /* defaults above */
82module_param(debug, int, 0);
83MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
84
85static DEFINE_PCI_DEVICE_TABLE(skge_id_table) = {
86 { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x1700) }, /* 3Com 3C940 */
87 { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x80EB) }, /* 3Com 3C940B */
88#ifdef CONFIG_SKGE_GENESIS
89 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4300) }, /* SK-9xx */
90#endif
91 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */
92 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* D-Link DGE-530T (rev.B) */
93 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) }, /* D-Link DGE-530T */
94 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) }, /* D-Link DGE-530T Rev C1 */
95 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, /* Marvell Yukon 88E8001/8003/8010 */
96 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
97 { PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) }, /* CNet PowerG-2000 */
98 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, 0x1064) }, /* Linksys EG1064 v2 */
99 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 }, /* Linksys EG1032 v2 */
100 { 0 }
101};
102MODULE_DEVICE_TABLE(pci, skge_id_table);
103
104static int skge_up(struct net_device *dev);
105static int skge_down(struct net_device *dev);
106static void skge_phy_reset(struct skge_port *skge);
107static void skge_tx_clean(struct net_device *dev);
108static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
109static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
110static void genesis_get_stats(struct skge_port *skge, u64 *data);
111static void yukon_get_stats(struct skge_port *skge, u64 *data);
112static void yukon_init(struct skge_hw *hw, int port);
113static void genesis_mac_init(struct skge_hw *hw, int port);
114static void genesis_link_up(struct skge_port *skge);
115static void skge_set_multicast(struct net_device *dev);
116static irqreturn_t skge_intr(int irq, void *dev_id);
117
118/* Avoid conditionals by using array */
119static const int txqaddr[] = { Q_XA1, Q_XA2 };
120static const int rxqaddr[] = { Q_R1, Q_R2 };
121static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
122static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
123static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F };
124static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 };
125
126static inline bool is_genesis(const struct skge_hw *hw)
127{
128#ifdef CONFIG_SKGE_GENESIS
129 return hw->chip_id == CHIP_ID_GENESIS;
130#else
131 return false;
132#endif
133}
134
135static int skge_get_regs_len(struct net_device *dev)
136{
137 return 0x4000;
138}
139
140/*
141 * Returns copy of whole control register region
142 * Note: skip RAM address register because accessing it will
143 * cause bus hangs!
144 */
145static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
146 void *p)
147{
148 const struct skge_port *skge = netdev_priv(dev);
149 const void __iomem *io = skge->hw->regs;
150
151 regs->version = 1;
152 memset(p, 0, regs->len);
153 memcpy_fromio(p, io, B3_RAM_ADDR);
154
155 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
156 regs->len - B3_RI_WTO_R1);
157}
158
159/* Wake on Lan only supported on Yukon chips with rev 1 or above */
160static u32 wol_supported(const struct skge_hw *hw)
161{
162 if (is_genesis(hw))
163 return 0;
164
165 if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
166 return 0;
167
168 return WAKE_MAGIC | WAKE_PHY;
169}
170
171static void skge_wol_init(struct skge_port *skge)
172{
173 struct skge_hw *hw = skge->hw;
174 int port = skge->port;
175 u16 ctrl;
176
177 skge_write16(hw, B0_CTST, CS_RST_CLR);
178 skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
179
180 /* Turn on Vaux */
181 skge_write8(hw, B0_POWER_CTRL,
182 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
183
184 /* WA code for COMA mode -- clear PHY reset */
185 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
186 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
187 u32 reg = skge_read32(hw, B2_GP_IO);
188 reg |= GP_DIR_9;
189 reg &= ~GP_IO_9;
190 skge_write32(hw, B2_GP_IO, reg);
191 }
192
193 skge_write32(hw, SK_REG(port, GPHY_CTRL),
194 GPC_DIS_SLEEP |
195 GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 |
196 GPC_ANEG_1 | GPC_RST_SET);
197
198 skge_write32(hw, SK_REG(port, GPHY_CTRL),
199 GPC_DIS_SLEEP |
200 GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 |
201 GPC_ANEG_1 | GPC_RST_CLR);
202
203 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
204
205 /* Force to 10/100 skge_reset will re-enable on resume */
206 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
207 (PHY_AN_100FULL | PHY_AN_100HALF |
208 PHY_AN_10FULL | PHY_AN_10HALF | PHY_AN_CSMA));
209 /* no 1000 HD/FD */
210 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0);
211 gm_phy_write(hw, port, PHY_MARV_CTRL,
212 PHY_CT_RESET | PHY_CT_SPS_LSB | PHY_CT_ANE |
213 PHY_CT_RE_CFG | PHY_CT_DUP_MD);
214
215
216 /* Set GMAC to no flow control and auto update for speed/duplex */
217 gma_write16(hw, port, GM_GP_CTRL,
218 GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
219 GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
220
221 /* Set WOL address */
222 memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
223 skge->netdev->dev_addr, ETH_ALEN);
224
225 /* Turn on appropriate WOL control bits */
226 skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
227 ctrl = 0;
228 if (skge->wol & WAKE_PHY)
229 ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
230 else
231 ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
232
233 if (skge->wol & WAKE_MAGIC)
234 ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
235 else
236 ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;
237
238 ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
239 skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
240
241 /* block receiver */
242 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
243}
244
245static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
246{
247 struct skge_port *skge = netdev_priv(dev);
248
249 wol->supported = wol_supported(skge->hw);
250 wol->wolopts = skge->wol;
251}
252
253static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
254{
255 struct skge_port *skge = netdev_priv(dev);
256 struct skge_hw *hw = skge->hw;
257
258 if ((wol->wolopts & ~wol_supported(hw)) ||
259 !device_can_wakeup(&hw->pdev->dev))
260 return -EOPNOTSUPP;
261
262 skge->wol = wol->wolopts;
263
264 device_set_wakeup_enable(&hw->pdev->dev, skge->wol);
265
266 return 0;
267}
268
269/* Determine supported/advertised modes based on hardware.
270 * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx
271 */
272static u32 skge_supported_modes(const struct skge_hw *hw)
273{
274 u32 supported;
275
276 if (hw->copper) {
277 supported = (SUPPORTED_10baseT_Half |
278 SUPPORTED_10baseT_Full |
279 SUPPORTED_100baseT_Half |
280 SUPPORTED_100baseT_Full |
281 SUPPORTED_1000baseT_Half |
282 SUPPORTED_1000baseT_Full |
283 SUPPORTED_Autoneg |
284 SUPPORTED_TP);
285
286 if (is_genesis(hw))
287 supported &= ~(SUPPORTED_10baseT_Half |
288 SUPPORTED_10baseT_Full |
289 SUPPORTED_100baseT_Half |
290 SUPPORTED_100baseT_Full);
291
292 else if (hw->chip_id == CHIP_ID_YUKON)
293 supported &= ~SUPPORTED_1000baseT_Half;
294 } else
295 supported = (SUPPORTED_1000baseT_Full |
296 SUPPORTED_1000baseT_Half |
297 SUPPORTED_FIBRE |
298 SUPPORTED_Autoneg);
299
300 return supported;
301}
302
303static int skge_get_settings(struct net_device *dev,
304 struct ethtool_cmd *ecmd)
305{
306 struct skge_port *skge = netdev_priv(dev);
307 struct skge_hw *hw = skge->hw;
308
309 ecmd->transceiver = XCVR_INTERNAL;
310 ecmd->supported = skge_supported_modes(hw);
311
312 if (hw->copper) {
313 ecmd->port = PORT_TP;
314 ecmd->phy_address = hw->phy_addr;
315 } else
316 ecmd->port = PORT_FIBRE;
317
318 ecmd->advertising = skge->advertising;
319 ecmd->autoneg = skge->autoneg;
320 ethtool_cmd_speed_set(ecmd, skge->speed);
321 ecmd->duplex = skge->duplex;
322 return 0;
323}
324
325static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
326{
327 struct skge_port *skge = netdev_priv(dev);
328 const struct skge_hw *hw = skge->hw;
329 u32 supported = skge_supported_modes(hw);
330 int err = 0;
331
332 if (ecmd->autoneg == AUTONEG_ENABLE) {
333 ecmd->advertising = supported;
334 skge->duplex = -1;
335 skge->speed = -1;
336 } else {
337 u32 setting;
338 u32 speed = ethtool_cmd_speed(ecmd);
339
340 switch (speed) {
341 case SPEED_1000:
342 if (ecmd->duplex == DUPLEX_FULL)
343 setting = SUPPORTED_1000baseT_Full;
344 else if (ecmd->duplex == DUPLEX_HALF)
345 setting = SUPPORTED_1000baseT_Half;
346 else
347 return -EINVAL;
348 break;
349 case SPEED_100:
350 if (ecmd->duplex == DUPLEX_FULL)
351 setting = SUPPORTED_100baseT_Full;
352 else if (ecmd->duplex == DUPLEX_HALF)
353 setting = SUPPORTED_100baseT_Half;
354 else
355 return -EINVAL;
356 break;
357
358 case SPEED_10:
359 if (ecmd->duplex == DUPLEX_FULL)
360 setting = SUPPORTED_10baseT_Full;
361 else if (ecmd->duplex == DUPLEX_HALF)
362 setting = SUPPORTED_10baseT_Half;
363 else
364 return -EINVAL;
365 break;
366 default:
367 return -EINVAL;
368 }
369
370 if ((setting & supported) == 0)
371 return -EINVAL;
372
373 skge->speed = speed;
374 skge->duplex = ecmd->duplex;
375 }
376
377 skge->autoneg = ecmd->autoneg;
378 skge->advertising = ecmd->advertising;
379
380 if (netif_running(dev)) {
381 skge_down(dev);
382 err = skge_up(dev);
383 if (err) {
384 dev_close(dev);
385 return err;
386 }
387 }
388
389 return 0;
390}
391
392static void skge_get_drvinfo(struct net_device *dev,
393 struct ethtool_drvinfo *info)
394{
395 struct skge_port *skge = netdev_priv(dev);
396
397 strcpy(info->driver, DRV_NAME);
398 strcpy(info->version, DRV_VERSION);
399 strcpy(info->fw_version, "N/A");
400 strcpy(info->bus_info, pci_name(skge->hw->pdev));
401}
402
403static const struct skge_stat {
404 char name[ETH_GSTRING_LEN];
405 u16 xmac_offset;
406 u16 gma_offset;
407} skge_stats[] = {
408 { "tx_bytes", XM_TXO_OK_HI, GM_TXO_OK_HI },
409 { "rx_bytes", XM_RXO_OK_HI, GM_RXO_OK_HI },
410
411 { "tx_broadcast", XM_TXF_BC_OK, GM_TXF_BC_OK },
412 { "rx_broadcast", XM_RXF_BC_OK, GM_RXF_BC_OK },
413 { "tx_multicast", XM_TXF_MC_OK, GM_TXF_MC_OK },
414 { "rx_multicast", XM_RXF_MC_OK, GM_RXF_MC_OK },
415 { "tx_unicast", XM_TXF_UC_OK, GM_TXF_UC_OK },
416 { "rx_unicast", XM_RXF_UC_OK, GM_RXF_UC_OK },
417 { "tx_mac_pause", XM_TXF_MPAUSE, GM_TXF_MPAUSE },
418 { "rx_mac_pause", XM_RXF_MPAUSE, GM_RXF_MPAUSE },
419
420 { "collisions", XM_TXF_SNG_COL, GM_TXF_SNG_COL },
421 { "multi_collisions", XM_TXF_MUL_COL, GM_TXF_MUL_COL },
422 { "aborted", XM_TXF_ABO_COL, GM_TXF_ABO_COL },
423 { "late_collision", XM_TXF_LAT_COL, GM_TXF_LAT_COL },
424 { "fifo_underrun", XM_TXE_FIFO_UR, GM_TXE_FIFO_UR },
425 { "fifo_overflow", XM_RXE_FIFO_OV, GM_RXE_FIFO_OV },
426
427 { "rx_toolong", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR },
428 { "rx_jabber", XM_RXF_JAB_PKT, GM_RXF_JAB_PKT },
429 { "rx_runt", XM_RXE_RUNT, GM_RXE_FRAG },
430 { "rx_too_long", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR },
431 { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR },
432};
433
434static int skge_get_sset_count(struct net_device *dev, int sset)
435{
436 switch (sset) {
437 case ETH_SS_STATS:
438 return ARRAY_SIZE(skge_stats);
439 default:
440 return -EOPNOTSUPP;
441 }
442}
443
444static void skge_get_ethtool_stats(struct net_device *dev,
445 struct ethtool_stats *stats, u64 *data)
446{
447 struct skge_port *skge = netdev_priv(dev);
448
449 if (is_genesis(skge->hw))
450 genesis_get_stats(skge, data);
451 else
452 yukon_get_stats(skge, data);
453}
454
455/* Use hardware MIB variables for critical path statistics and
456 * transmit feedback not reported at interrupt.
457 * Other errors are accounted for in interrupt handler.
458 */
459static struct net_device_stats *skge_get_stats(struct net_device *dev)
460{
461 struct skge_port *skge = netdev_priv(dev);
462 u64 data[ARRAY_SIZE(skge_stats)];
463
464 if (is_genesis(skge->hw))
465 genesis_get_stats(skge, data);
466 else
467 yukon_get_stats(skge, data);
468
469 dev->stats.tx_bytes = data[0];
470 dev->stats.rx_bytes = data[1];
471 dev->stats.tx_packets = data[2] + data[4] + data[6];
472 dev->stats.rx_packets = data[3] + data[5] + data[7];
473 dev->stats.multicast = data[3] + data[5];
474 dev->stats.collisions = data[10];
475 dev->stats.tx_aborted_errors = data[12];
476
477 return &dev->stats;
478}
479
480static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
481{
482 int i;
483
484 switch (stringset) {
485 case ETH_SS_STATS:
486 for (i = 0; i < ARRAY_SIZE(skge_stats); i++)
487 memcpy(data + i * ETH_GSTRING_LEN,
488 skge_stats[i].name, ETH_GSTRING_LEN);
489 break;
490 }
491}
492
493static void skge_get_ring_param(struct net_device *dev,
494 struct ethtool_ringparam *p)
495{
496 struct skge_port *skge = netdev_priv(dev);
497
498 p->rx_max_pending = MAX_RX_RING_SIZE;
499 p->tx_max_pending = MAX_TX_RING_SIZE;
500
501 p->rx_pending = skge->rx_ring.count;
502 p->tx_pending = skge->tx_ring.count;
503}
504
505static int skge_set_ring_param(struct net_device *dev,
506 struct ethtool_ringparam *p)
507{
508 struct skge_port *skge = netdev_priv(dev);
509 int err = 0;
510
511 if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
512 p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE)
513 return -EINVAL;
514
515 skge->rx_ring.count = p->rx_pending;
516 skge->tx_ring.count = p->tx_pending;
517
518 if (netif_running(dev)) {
519 skge_down(dev);
520 err = skge_up(dev);
521 if (err)
522 dev_close(dev);
523 }
524
525 return err;
526}
527
528static u32 skge_get_msglevel(struct net_device *netdev)
529{
530 struct skge_port *skge = netdev_priv(netdev);
531 return skge->msg_enable;
532}
533
534static void skge_set_msglevel(struct net_device *netdev, u32 value)
535{
536 struct skge_port *skge = netdev_priv(netdev);
537 skge->msg_enable = value;
538}
539
540static int skge_nway_reset(struct net_device *dev)
541{
542 struct skge_port *skge = netdev_priv(dev);
543
544 if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev))
545 return -EINVAL;
546
547 skge_phy_reset(skge);
548 return 0;
549}
550
551static void skge_get_pauseparam(struct net_device *dev,
552 struct ethtool_pauseparam *ecmd)
553{
554 struct skge_port *skge = netdev_priv(dev);
555
556 ecmd->rx_pause = ((skge->flow_control == FLOW_MODE_SYMMETRIC) ||
557 (skge->flow_control == FLOW_MODE_SYM_OR_REM));
558 ecmd->tx_pause = (ecmd->rx_pause ||
559 (skge->flow_control == FLOW_MODE_LOC_SEND));
560
561 ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause;
562}
563
564static int skge_set_pauseparam(struct net_device *dev,
565 struct ethtool_pauseparam *ecmd)
566{
567 struct skge_port *skge = netdev_priv(dev);
568 struct ethtool_pauseparam old;
569 int err = 0;
570
571 skge_get_pauseparam(dev, &old);
572
573 if (ecmd->autoneg != old.autoneg)
574 skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC;
575 else {
576 if (ecmd->rx_pause && ecmd->tx_pause)
577 skge->flow_control = FLOW_MODE_SYMMETRIC;
578 else if (ecmd->rx_pause && !ecmd->tx_pause)
579 skge->flow_control = FLOW_MODE_SYM_OR_REM;
580 else if (!ecmd->rx_pause && ecmd->tx_pause)
581 skge->flow_control = FLOW_MODE_LOC_SEND;
582 else
583 skge->flow_control = FLOW_MODE_NONE;
584 }
585
586 if (netif_running(dev)) {
587 skge_down(dev);
588 err = skge_up(dev);
589 if (err) {
590 dev_close(dev);
591 return err;
592 }
593 }
594
595 return 0;
596}
597
598/* Chip internal frequency for clock calculations */
599static inline u32 hwkhz(const struct skge_hw *hw)
600{
601 return is_genesis(hw) ? 53125 : 78125;
602}
603
604/* Chip HZ to microseconds */
605static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks)
606{
607 return (ticks * 1000) / hwkhz(hw);
608}
609
610/* Microseconds to chip HZ */
611static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec)
612{
613 return hwkhz(hw) * usec / 1000;
614}
615
616static int skge_get_coalesce(struct net_device *dev,
617 struct ethtool_coalesce *ecmd)
618{
619 struct skge_port *skge = netdev_priv(dev);
620 struct skge_hw *hw = skge->hw;
621 int port = skge->port;
622
623 ecmd->rx_coalesce_usecs = 0;
624 ecmd->tx_coalesce_usecs = 0;
625
626 if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) {
627 u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI));
628 u32 msk = skge_read32(hw, B2_IRQM_MSK);
629
630 if (msk & rxirqmask[port])
631 ecmd->rx_coalesce_usecs = delay;
632 if (msk & txirqmask[port])
633 ecmd->tx_coalesce_usecs = delay;
634 }
635
636 return 0;
637}
638
639/* Note: interrupt timer is per board, but can turn on/off per port */
640static int skge_set_coalesce(struct net_device *dev,
641 struct ethtool_coalesce *ecmd)
642{
643 struct skge_port *skge = netdev_priv(dev);
644 struct skge_hw *hw = skge->hw;
645 int port = skge->port;
646 u32 msk = skge_read32(hw, B2_IRQM_MSK);
647 u32 delay = 25;
648
649 if (ecmd->rx_coalesce_usecs == 0)
650 msk &= ~rxirqmask[port];
651 else if (ecmd->rx_coalesce_usecs < 25 ||
652 ecmd->rx_coalesce_usecs > 33333)
653 return -EINVAL;
654 else {
655 msk |= rxirqmask[port];
656 delay = ecmd->rx_coalesce_usecs;
657 }
658
659 if (ecmd->tx_coalesce_usecs == 0)
660 msk &= ~txirqmask[port];
661 else if (ecmd->tx_coalesce_usecs < 25 ||
662 ecmd->tx_coalesce_usecs > 33333)
663 return -EINVAL;
664 else {
665 msk |= txirqmask[port];
666 delay = min(delay, ecmd->rx_coalesce_usecs);
667 }
668
669 skge_write32(hw, B2_IRQM_MSK, msk);
670 if (msk == 0)
671 skge_write32(hw, B2_IRQM_CTRL, TIM_STOP);
672 else {
673 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay));
674 skge_write32(hw, B2_IRQM_CTRL, TIM_START);
675 }
676 return 0;
677}
678
679enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST };
680static void skge_led(struct skge_port *skge, enum led_mode mode)
681{
682 struct skge_hw *hw = skge->hw;
683 int port = skge->port;
684
685 spin_lock_bh(&hw->phy_lock);
686 if (is_genesis(hw)) {
687 switch (mode) {
688 case LED_MODE_OFF:
689 if (hw->phy_type == SK_PHY_BCOM)
690 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF);
691 else {
692 skge_write32(hw, SK_REG(port, TX_LED_VAL), 0);
693 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF);
694 }
695 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
696 skge_write32(hw, SK_REG(port, RX_LED_VAL), 0);
697 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF);
698 break;
699
700 case LED_MODE_ON:
701 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON);
702 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON);
703
704 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
705 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
706
707 break;
708
709 case LED_MODE_TST:
710 skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON);
711 skge_write32(hw, SK_REG(port, RX_LED_VAL), 100);
712 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
713
714 if (hw->phy_type == SK_PHY_BCOM)
715 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON);
716 else {
717 skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON);
718 skge_write32(hw, SK_REG(port, TX_LED_VAL), 100);
719 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
720 }
721
722 }
723 } else {
724 switch (mode) {
725 case LED_MODE_OFF:
726 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
727 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
728 PHY_M_LED_MO_DUP(MO_LED_OFF) |
729 PHY_M_LED_MO_10(MO_LED_OFF) |
730 PHY_M_LED_MO_100(MO_LED_OFF) |
731 PHY_M_LED_MO_1000(MO_LED_OFF) |
732 PHY_M_LED_MO_RX(MO_LED_OFF));
733 break;
734 case LED_MODE_ON:
735 gm_phy_write(hw, port, PHY_MARV_LED_CTRL,
736 PHY_M_LED_PULS_DUR(PULS_170MS) |
737 PHY_M_LED_BLINK_RT(BLINK_84MS) |
738 PHY_M_LEDC_TX_CTRL |
739 PHY_M_LEDC_DP_CTRL);
740
741 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
742 PHY_M_LED_MO_RX(MO_LED_OFF) |
743 (skge->speed == SPEED_100 ?
744 PHY_M_LED_MO_100(MO_LED_ON) : 0));
745 break;
746 case LED_MODE_TST:
747 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
748 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
749 PHY_M_LED_MO_DUP(MO_LED_ON) |
750 PHY_M_LED_MO_10(MO_LED_ON) |
751 PHY_M_LED_MO_100(MO_LED_ON) |
752 PHY_M_LED_MO_1000(MO_LED_ON) |
753 PHY_M_LED_MO_RX(MO_LED_ON));
754 }
755 }
756 spin_unlock_bh(&hw->phy_lock);
757}
758
759/* blink LED's for finding board */
760static int skge_set_phys_id(struct net_device *dev,
761 enum ethtool_phys_id_state state)
762{
763 struct skge_port *skge = netdev_priv(dev);
764
765 switch (state) {
766 case ETHTOOL_ID_ACTIVE:
767 return 2; /* cycle on/off twice per second */
768
769 case ETHTOOL_ID_ON:
770 skge_led(skge, LED_MODE_TST);
771 break;
772
773 case ETHTOOL_ID_OFF:
774 skge_led(skge, LED_MODE_OFF);
775 break;
776
777 case ETHTOOL_ID_INACTIVE:
778 /* back to regular LED state */
779 skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF);
780 }
781
782 return 0;
783}
784
785static int skge_get_eeprom_len(struct net_device *dev)
786{
787 struct skge_port *skge = netdev_priv(dev);
788 u32 reg2;
789
790 pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, &reg2);
791 return 1 << (((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
792}
793
794static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset)
795{
796 u32 val;
797
798 pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset);
799
800 do {
801 pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
802 } while (!(offset & PCI_VPD_ADDR_F));
803
804 pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val);
805 return val;
806}
807
808static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val)
809{
810 pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val);
811 pci_write_config_word(pdev, cap + PCI_VPD_ADDR,
812 offset | PCI_VPD_ADDR_F);
813
814 do {
815 pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
816 } while (offset & PCI_VPD_ADDR_F);
817}
818
819static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
820 u8 *data)
821{
822 struct skge_port *skge = netdev_priv(dev);
823 struct pci_dev *pdev = skge->hw->pdev;
824 int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
825 int length = eeprom->len;
826 u16 offset = eeprom->offset;
827
828 if (!cap)
829 return -EINVAL;
830
831 eeprom->magic = SKGE_EEPROM_MAGIC;
832
833 while (length > 0) {
834 u32 val = skge_vpd_read(pdev, cap, offset);
835 int n = min_t(int, length, sizeof(val));
836
837 memcpy(data, &val, n);
838 length -= n;
839 data += n;
840 offset += n;
841 }
842 return 0;
843}
844
845static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
846 u8 *data)
847{
848 struct skge_port *skge = netdev_priv(dev);
849 struct pci_dev *pdev = skge->hw->pdev;
850 int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
851 int length = eeprom->len;
852 u16 offset = eeprom->offset;
853
854 if (!cap)
855 return -EINVAL;
856
857 if (eeprom->magic != SKGE_EEPROM_MAGIC)
858 return -EINVAL;
859
860 while (length > 0) {
861 u32 val;
862 int n = min_t(int, length, sizeof(val));
863
864 if (n < sizeof(val))
865 val = skge_vpd_read(pdev, cap, offset);
866 memcpy(&val, data, n);
867
868 skge_vpd_write(pdev, cap, offset, val);
869
870 length -= n;
871 data += n;
872 offset += n;
873 }
874 return 0;
875}
876
877static const struct ethtool_ops skge_ethtool_ops = {
878 .get_settings = skge_get_settings,
879 .set_settings = skge_set_settings,
880 .get_drvinfo = skge_get_drvinfo,
881 .get_regs_len = skge_get_regs_len,
882 .get_regs = skge_get_regs,
883 .get_wol = skge_get_wol,
884 .set_wol = skge_set_wol,
885 .get_msglevel = skge_get_msglevel,
886 .set_msglevel = skge_set_msglevel,
887 .nway_reset = skge_nway_reset,
888 .get_link = ethtool_op_get_link,
889 .get_eeprom_len = skge_get_eeprom_len,
890 .get_eeprom = skge_get_eeprom,
891 .set_eeprom = skge_set_eeprom,
892 .get_ringparam = skge_get_ring_param,
893 .set_ringparam = skge_set_ring_param,
894 .get_pauseparam = skge_get_pauseparam,
895 .set_pauseparam = skge_set_pauseparam,
896 .get_coalesce = skge_get_coalesce,
897 .set_coalesce = skge_set_coalesce,
898 .get_strings = skge_get_strings,
899 .set_phys_id = skge_set_phys_id,
900 .get_sset_count = skge_get_sset_count,
901 .get_ethtool_stats = skge_get_ethtool_stats,
902};
903
904/*
905 * Allocate ring elements and chain them together
906 * One-to-one association of board descriptors with ring elements
907 */
908static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
909{
910 struct skge_tx_desc *d;
911 struct skge_element *e;
912 int i;
913
914 ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL);
915 if (!ring->start)
916 return -ENOMEM;
917
918 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
919 e->desc = d;
920 if (i == ring->count - 1) {
921 e->next = ring->start;
922 d->next_offset = base;
923 } else {
924 e->next = e + 1;
925 d->next_offset = base + (i+1) * sizeof(*d);
926 }
927 }
928 ring->to_use = ring->to_clean = ring->start;
929
930 return 0;
931}
932
933/* Allocate and setup a new buffer for receiving */
934static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
935 struct sk_buff *skb, unsigned int bufsize)
936{
937 struct skge_rx_desc *rd = e->desc;
938 u64 map;
939
940 map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
941 PCI_DMA_FROMDEVICE);
942
943 rd->dma_lo = map;
944 rd->dma_hi = map >> 32;
945 e->skb = skb;
946 rd->csum1_start = ETH_HLEN;
947 rd->csum2_start = ETH_HLEN;
948 rd->csum1 = 0;
949 rd->csum2 = 0;
950
951 wmb();
952
953 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
954 dma_unmap_addr_set(e, mapaddr, map);
955 dma_unmap_len_set(e, maplen, bufsize);
956}
957
958/* Resume receiving using existing skb,
959 * Note: DMA address is not changed by chip.
960 * MTU not changed while receiver active.
961 */
962static inline void skge_rx_reuse(struct skge_element *e, unsigned int size)
963{
964 struct skge_rx_desc *rd = e->desc;
965
966 rd->csum2 = 0;
967 rd->csum2_start = ETH_HLEN;
968
969 wmb();
970
971 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size;
972}
973
974
975/* Free all buffers in receive ring, assumes receiver stopped */
976static void skge_rx_clean(struct skge_port *skge)
977{
978 struct skge_hw *hw = skge->hw;
979 struct skge_ring *ring = &skge->rx_ring;
980 struct skge_element *e;
981
982 e = ring->start;
983 do {
984 struct skge_rx_desc *rd = e->desc;
985 rd->control = 0;
986 if (e->skb) {
987 pci_unmap_single(hw->pdev,
988 dma_unmap_addr(e, mapaddr),
989 dma_unmap_len(e, maplen),
990 PCI_DMA_FROMDEVICE);
991 dev_kfree_skb(e->skb);
992 e->skb = NULL;
993 }
994 } while ((e = e->next) != ring->start);
995}
996
997
998/* Allocate buffers for receive ring
999 * For receive: to_clean is next received frame.
1000 */
1001static int skge_rx_fill(struct net_device *dev)
1002{
1003 struct skge_port *skge = netdev_priv(dev);
1004 struct skge_ring *ring = &skge->rx_ring;
1005 struct skge_element *e;
1006
1007 e = ring->start;
1008 do {
1009 struct sk_buff *skb;
1010
1011 skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN,
1012 GFP_KERNEL);
1013 if (!skb)
1014 return -ENOMEM;
1015
1016 skb_reserve(skb, NET_IP_ALIGN);
1017 skge_rx_setup(skge, e, skb, skge->rx_buf_size);
1018 } while ((e = e->next) != ring->start);
1019
1020 ring->to_clean = ring->start;
1021 return 0;
1022}
1023
1024static const char *skge_pause(enum pause_status status)
1025{
1026 switch (status) {
1027 case FLOW_STAT_NONE:
1028 return "none";
1029 case FLOW_STAT_REM_SEND:
1030 return "rx only";
1031 case FLOW_STAT_LOC_SEND:
1032 return "tx_only";
1033 case FLOW_STAT_SYMMETRIC: /* Both station may send PAUSE */
1034 return "both";
1035 default:
1036 return "indeterminated";
1037 }
1038}
1039
1040
1041static void skge_link_up(struct skge_port *skge)
1042{
1043 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
1044 LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
1045
1046 netif_carrier_on(skge->netdev);
1047 netif_wake_queue(skge->netdev);
1048
1049 netif_info(skge, link, skge->netdev,
1050 "Link is up at %d Mbps, %s duplex, flow control %s\n",
1051 skge->speed,
1052 skge->duplex == DUPLEX_FULL ? "full" : "half",
1053 skge_pause(skge->flow_status));
1054}
1055
1056static void skge_link_down(struct skge_port *skge)
1057{
1058 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
1059 netif_carrier_off(skge->netdev);
1060 netif_stop_queue(skge->netdev);
1061
1062 netif_info(skge, link, skge->netdev, "Link is down\n");
1063}
1064
1065static void xm_link_down(struct skge_hw *hw, int port)
1066{
1067 struct net_device *dev = hw->dev[port];
1068 struct skge_port *skge = netdev_priv(dev);
1069
1070 xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE);
1071
1072 if (netif_carrier_ok(dev))
1073 skge_link_down(skge);
1074}
1075
1076static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
1077{
1078 int i;
1079
1080 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
1081 *val = xm_read16(hw, port, XM_PHY_DATA);
1082
1083 if (hw->phy_type == SK_PHY_XMAC)
1084 goto ready;
1085
1086 for (i = 0; i < PHY_RETRIES; i++) {
1087 if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY)
1088 goto ready;
1089 udelay(1);
1090 }
1091
1092 return -ETIMEDOUT;
1093 ready:
1094 *val = xm_read16(hw, port, XM_PHY_DATA);
1095
1096 return 0;
1097}
1098
1099static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
1100{
1101 u16 v = 0;
1102 if (__xm_phy_read(hw, port, reg, &v))
1103 pr_warning("%s: phy read timed out\n", hw->dev[port]->name);
1104 return v;
1105}
1106
1107static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
1108{
1109 int i;
1110
1111 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
1112 for (i = 0; i < PHY_RETRIES; i++) {
1113 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
1114 goto ready;
1115 udelay(1);
1116 }
1117 return -EIO;
1118
1119 ready:
1120 xm_write16(hw, port, XM_PHY_DATA, val);
1121 for (i = 0; i < PHY_RETRIES; i++) {
1122 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
1123 return 0;
1124 udelay(1);
1125 }
1126 return -ETIMEDOUT;
1127}
1128
1129static void genesis_init(struct skge_hw *hw)
1130{
1131 /* set blink source counter */
1132 skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100);
1133 skge_write8(hw, B2_BSC_CTRL, BSC_START);
1134
1135 /* configure mac arbiter */
1136 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
1137
1138 /* configure mac arbiter timeout values */
1139 skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53);
1140 skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53);
1141 skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53);
1142 skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53);
1143
1144 skge_write8(hw, B3_MA_RCINI_RX1, 0);
1145 skge_write8(hw, B3_MA_RCINI_RX2, 0);
1146 skge_write8(hw, B3_MA_RCINI_TX1, 0);
1147 skge_write8(hw, B3_MA_RCINI_TX2, 0);
1148
1149 /* configure packet arbiter timeout */
1150 skge_write16(hw, B3_PA_CTRL, PA_RST_CLR);
1151 skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX);
1152 skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX);
1153 skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX);
1154 skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX);
1155}
1156
1157static void genesis_reset(struct skge_hw *hw, int port)
1158{
1159 static const u8 zero[8] = { 0 };
1160 u32 reg;
1161
1162 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
1163
1164 /* reset the statistics module */
1165 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
1166 xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE);
1167 xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */
1168 xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */
1169 xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */
1170
1171 /* disable Broadcom PHY IRQ */
1172 if (hw->phy_type == SK_PHY_BCOM)
1173 xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
1174
1175 xm_outhash(hw, port, XM_HSM, zero);
1176
1177 /* Flush TX and RX fifo */
1178 reg = xm_read32(hw, port, XM_MODE);
1179 xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF);
1180 xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF);
1181}
1182
1183/* Convert mode to MII values */
1184static const u16 phy_pause_map[] = {
1185 [FLOW_MODE_NONE] = 0,
1186 [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM,
1187 [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP,
1188 [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
1189};
1190
1191/* special defines for FIBER (88E1011S only) */
1192static const u16 fiber_pause_map[] = {
1193 [FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE,
1194 [FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD,
1195 [FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD,
1196 [FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD,
1197};
1198
1199
1200/* Check status of Broadcom phy link */
1201static void bcom_check_link(struct skge_hw *hw, int port)
1202{
1203 struct net_device *dev = hw->dev[port];
1204 struct skge_port *skge = netdev_priv(dev);
1205 u16 status;
1206
1207 /* read twice because of latch */
1208 xm_phy_read(hw, port, PHY_BCOM_STAT);
1209 status = xm_phy_read(hw, port, PHY_BCOM_STAT);
1210
1211 if ((status & PHY_ST_LSYNC) == 0) {
1212 xm_link_down(hw, port);
1213 return;
1214 }
1215
1216 if (skge->autoneg == AUTONEG_ENABLE) {
1217 u16 lpa, aux;
1218
1219 if (!(status & PHY_ST_AN_OVER))
1220 return;
1221
1222 lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
1223 if (lpa & PHY_B_AN_RF) {
1224 netdev_notice(dev, "remote fault\n");
1225 return;
1226 }
1227
1228 aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT);
1229
1230 /* Check Duplex mismatch */
1231 switch (aux & PHY_B_AS_AN_RES_MSK) {
1232 case PHY_B_RES_1000FD:
1233 skge->duplex = DUPLEX_FULL;
1234 break;
1235 case PHY_B_RES_1000HD:
1236 skge->duplex = DUPLEX_HALF;
1237 break;
1238 default:
1239 netdev_notice(dev, "duplex mismatch\n");
1240 return;
1241 }
1242
1243 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1244 switch (aux & PHY_B_AS_PAUSE_MSK) {
1245 case PHY_B_AS_PAUSE_MSK:
1246 skge->flow_status = FLOW_STAT_SYMMETRIC;
1247 break;
1248 case PHY_B_AS_PRR:
1249 skge->flow_status = FLOW_STAT_REM_SEND;
1250 break;
1251 case PHY_B_AS_PRT:
1252 skge->flow_status = FLOW_STAT_LOC_SEND;
1253 break;
1254 default:
1255 skge->flow_status = FLOW_STAT_NONE;
1256 }
1257 skge->speed = SPEED_1000;
1258 }
1259
1260 if (!netif_carrier_ok(dev))
1261 genesis_link_up(skge);
1262}
1263
1264/* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional
1265 * Phy on for 100 or 10Mbit operation
1266 */
1267static void bcom_phy_init(struct skge_port *skge)
1268{
1269 struct skge_hw *hw = skge->hw;
1270 int port = skge->port;
1271 int i;
1272 u16 id1, r, ext, ctl;
1273
1274 /* magic workaround patterns for Broadcom */
1275 static const struct {
1276 u16 reg;
1277 u16 val;
1278 } A1hack[] = {
1279 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 },
1280 { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 },
1281 { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 },
1282 { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
1283 }, C0hack[] = {
1284 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 },
1285 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
1286 };
1287
1288 /* read Id from external PHY (all have the same address) */
1289 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
1290
1291 /* Optimize MDIO transfer by suppressing preamble. */
1292 r = xm_read16(hw, port, XM_MMU_CMD);
1293 r |= XM_MMU_NO_PRE;
1294 xm_write16(hw, port, XM_MMU_CMD, r);
1295
1296 switch (id1) {
1297 case PHY_BCOM_ID1_C0:
1298 /*
1299 * Workaround BCOM Errata for the C0 type.
1300 * Write magic patterns to reserved registers.
1301 */
1302 for (i = 0; i < ARRAY_SIZE(C0hack); i++)
1303 xm_phy_write(hw, port,
1304 C0hack[i].reg, C0hack[i].val);
1305
1306 break;
1307 case PHY_BCOM_ID1_A1:
1308 /*
1309 * Workaround BCOM Errata for the A1 type.
1310 * Write magic patterns to reserved registers.
1311 */
1312 for (i = 0; i < ARRAY_SIZE(A1hack); i++)
1313 xm_phy_write(hw, port,
1314 A1hack[i].reg, A1hack[i].val);
1315 break;
1316 }
1317
1318 /*
1319 * Workaround BCOM Errata (#10523) for all BCom PHYs.
1320 * Disable Power Management after reset.
1321 */
1322 r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL);
1323 r |= PHY_B_AC_DIS_PM;
1324 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r);
1325
1326 /* Dummy read */
1327 xm_read16(hw, port, XM_ISRC);
1328
1329 ext = PHY_B_PEC_EN_LTR; /* enable tx led */
1330 ctl = PHY_CT_SP1000; /* always 1000mbit */
1331
1332 if (skge->autoneg == AUTONEG_ENABLE) {
1333 /*
1334 * Workaround BCOM Errata #1 for the C5 type.
1335 * 1000Base-T Link Acquisition Failure in Slave Mode
1336 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
1337 */
1338 u16 adv = PHY_B_1000C_RD;
1339 if (skge->advertising & ADVERTISED_1000baseT_Half)
1340 adv |= PHY_B_1000C_AHD;
1341 if (skge->advertising & ADVERTISED_1000baseT_Full)
1342 adv |= PHY_B_1000C_AFD;
1343 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv);
1344
1345 ctl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1346 } else {
1347 if (skge->duplex == DUPLEX_FULL)
1348 ctl |= PHY_CT_DUP_MD;
1349 /* Force to slave */
1350 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE);
1351 }
1352
1353 /* Set autonegotiation pause parameters */
1354 xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV,
1355 phy_pause_map[skge->flow_control] | PHY_AN_CSMA);
1356
1357 /* Handle Jumbo frames */
1358 if (hw->dev[port]->mtu > ETH_DATA_LEN) {
1359 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1360 PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK);
1361
1362 ext |= PHY_B_PEC_HIGH_LA;
1363
1364 }
1365
1366 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext);
1367 xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl);
1368
1369 /* Use link status change interrupt */
1370 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
1371}
1372
1373static void xm_phy_init(struct skge_port *skge)
1374{
1375 struct skge_hw *hw = skge->hw;
1376 int port = skge->port;
1377 u16 ctrl = 0;
1378
1379 if (skge->autoneg == AUTONEG_ENABLE) {
1380 if (skge->advertising & ADVERTISED_1000baseT_Half)
1381 ctrl |= PHY_X_AN_HD;
1382 if (skge->advertising & ADVERTISED_1000baseT_Full)
1383 ctrl |= PHY_X_AN_FD;
1384
1385 ctrl |= fiber_pause_map[skge->flow_control];
1386
1387 xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl);
1388
1389 /* Restart Auto-negotiation */
1390 ctrl = PHY_CT_ANE | PHY_CT_RE_CFG;
1391 } else {
1392 /* Set DuplexMode in Config register */
1393 if (skge->duplex == DUPLEX_FULL)
1394 ctrl |= PHY_CT_DUP_MD;
1395 /*
1396 * Do NOT enable Auto-negotiation here. This would hold
1397 * the link down because no IDLEs are transmitted
1398 */
1399 }
1400
1401 xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl);
1402
1403 /* Poll PHY for status changes */
1404 mod_timer(&skge->link_timer, jiffies + LINK_HZ);
1405}
1406
1407static int xm_check_link(struct net_device *dev)
1408{
1409 struct skge_port *skge = netdev_priv(dev);
1410 struct skge_hw *hw = skge->hw;
1411 int port = skge->port;
1412 u16 status;
1413
1414 /* read twice because of latch */
1415 xm_phy_read(hw, port, PHY_XMAC_STAT);
1416 status = xm_phy_read(hw, port, PHY_XMAC_STAT);
1417
1418 if ((status & PHY_ST_LSYNC) == 0) {
1419 xm_link_down(hw, port);
1420 return 0;
1421 }
1422
1423 if (skge->autoneg == AUTONEG_ENABLE) {
1424 u16 lpa, res;
1425
1426 if (!(status & PHY_ST_AN_OVER))
1427 return 0;
1428
1429 lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
1430 if (lpa & PHY_B_AN_RF) {
1431 netdev_notice(dev, "remote fault\n");
1432 return 0;
1433 }
1434
1435 res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI);
1436
1437 /* Check Duplex mismatch */
1438 switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) {
1439 case PHY_X_RS_FD:
1440 skge->duplex = DUPLEX_FULL;
1441 break;
1442 case PHY_X_RS_HD:
1443 skge->duplex = DUPLEX_HALF;
1444 break;
1445 default:
1446 netdev_notice(dev, "duplex mismatch\n");
1447 return 0;
1448 }
1449
1450 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1451 if ((skge->flow_control == FLOW_MODE_SYMMETRIC ||
1452 skge->flow_control == FLOW_MODE_SYM_OR_REM) &&
1453 (lpa & PHY_X_P_SYM_MD))
1454 skge->flow_status = FLOW_STAT_SYMMETRIC;
1455 else if (skge->flow_control == FLOW_MODE_SYM_OR_REM &&
1456 (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD)
1457 /* Enable PAUSE receive, disable PAUSE transmit */
1458 skge->flow_status = FLOW_STAT_REM_SEND;
1459 else if (skge->flow_control == FLOW_MODE_LOC_SEND &&
1460 (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD)
1461 /* Disable PAUSE receive, enable PAUSE transmit */
1462 skge->flow_status = FLOW_STAT_LOC_SEND;
1463 else
1464 skge->flow_status = FLOW_STAT_NONE;
1465
1466 skge->speed = SPEED_1000;
1467 }
1468
1469 if (!netif_carrier_ok(dev))
1470 genesis_link_up(skge);
1471 return 1;
1472}
1473
1474/* Poll to check for link coming up.
1475 *
1476 * Since internal PHY is wired to a level triggered pin, can't
1477 * get an interrupt when carrier is detected, need to poll for
1478 * link coming up.
1479 */
1480static void xm_link_timer(unsigned long arg)
1481{
1482 struct skge_port *skge = (struct skge_port *) arg;
1483 struct net_device *dev = skge->netdev;
1484 struct skge_hw *hw = skge->hw;
1485 int port = skge->port;
1486 int i;
1487 unsigned long flags;
1488
1489 if (!netif_running(dev))
1490 return;
1491
1492 spin_lock_irqsave(&hw->phy_lock, flags);
1493
1494 /*
1495 * Verify that the link by checking GPIO register three times.
1496 * This pin has the signal from the link_sync pin connected to it.
1497 */
1498 for (i = 0; i < 3; i++) {
1499 if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS)
1500 goto link_down;
1501 }
1502
1503 /* Re-enable interrupt to detect link down */
1504 if (xm_check_link(dev)) {
1505 u16 msk = xm_read16(hw, port, XM_IMSK);
1506 msk &= ~XM_IS_INP_ASS;
1507 xm_write16(hw, port, XM_IMSK, msk);
1508 xm_read16(hw, port, XM_ISRC);
1509 } else {
1510link_down:
1511 mod_timer(&skge->link_timer,
1512 round_jiffies(jiffies + LINK_HZ));
1513 }
1514 spin_unlock_irqrestore(&hw->phy_lock, flags);
1515}
1516
1517static void genesis_mac_init(struct skge_hw *hw, int port)
1518{
1519 struct net_device *dev = hw->dev[port];
1520 struct skge_port *skge = netdev_priv(dev);
1521 int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN;
1522 int i;
1523 u32 r;
1524 static const u8 zero[6] = { 0 };
1525
1526 for (i = 0; i < 10; i++) {
1527 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
1528 MFF_SET_MAC_RST);
1529 if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)
1530 goto reset_ok;
1531 udelay(1);
1532 }
1533
1534 netdev_warn(dev, "genesis reset failed\n");
1535
1536 reset_ok:
1537 /* Unreset the XMAC. */
1538 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
1539
1540 /*
1541 * Perform additional initialization for external PHYs,
1542 * namely for the 1000baseTX cards that use the XMAC's
1543 * GMII mode.
1544 */
1545 if (hw->phy_type != SK_PHY_XMAC) {
1546 /* Take external Phy out of reset */
1547 r = skge_read32(hw, B2_GP_IO);
1548 if (port == 0)
1549 r |= GP_DIR_0|GP_IO_0;
1550 else
1551 r |= GP_DIR_2|GP_IO_2;
1552
1553 skge_write32(hw, B2_GP_IO, r);
1554
1555 /* Enable GMII interface */
1556 xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
1557 }
1558
1559
1560 switch (hw->phy_type) {
1561 case SK_PHY_XMAC:
1562 xm_phy_init(skge);
1563 break;
1564 case SK_PHY_BCOM:
1565 bcom_phy_init(skge);
1566 bcom_check_link(hw, port);
1567 }
1568
1569 /* Set Station Address */
1570 xm_outaddr(hw, port, XM_SA, dev->dev_addr);
1571
1572 /* We don't use match addresses so clear */
1573 for (i = 1; i < 16; i++)
1574 xm_outaddr(hw, port, XM_EXM(i), zero);
1575
1576 /* Clear MIB counters */
1577 xm_write16(hw, port, XM_STAT_CMD,
1578 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1579 /* Clear two times according to Errata #3 */
1580 xm_write16(hw, port, XM_STAT_CMD,
1581 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1582
1583 /* configure Rx High Water Mark (XM_RX_HI_WM) */
1584 xm_write16(hw, port, XM_RX_HI_WM, 1450);
1585
1586 /* We don't need the FCS appended to the packet. */
1587 r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS;
1588 if (jumbo)
1589 r |= XM_RX_BIG_PK_OK;
1590
1591 if (skge->duplex == DUPLEX_HALF) {
1592 /*
1593 * If in manual half duplex mode the other side might be in
1594 * full duplex mode, so ignore if a carrier extension is not seen
1595 * on frames received
1596 */
1597 r |= XM_RX_DIS_CEXT;
1598 }
1599 xm_write16(hw, port, XM_RX_CMD, r);
1600
1601 /* We want short frames padded to 60 bytes. */
1602 xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD);
1603
1604 /* Increase threshold for jumbo frames on dual port */
1605 if (hw->ports > 1 && jumbo)
1606 xm_write16(hw, port, XM_TX_THR, 1020);
1607 else
1608 xm_write16(hw, port, XM_TX_THR, 512);
1609
1610 /*
1611 * Enable the reception of all error frames. This is is
1612 * a necessary evil due to the design of the XMAC. The
1613 * XMAC's receive FIFO is only 8K in size, however jumbo
1614 * frames can be up to 9000 bytes in length. When bad
1615 * frame filtering is enabled, the XMAC's RX FIFO operates
1616 * in 'store and forward' mode. For this to work, the
1617 * entire frame has to fit into the FIFO, but that means
1618 * that jumbo frames larger than 8192 bytes will be
1619 * truncated. Disabling all bad frame filtering causes
1620 * the RX FIFO to operate in streaming mode, in which
1621 * case the XMAC will start transferring frames out of the
1622 * RX FIFO as soon as the FIFO threshold is reached.
1623 */
1624 xm_write32(hw, port, XM_MODE, XM_DEF_MODE);
1625
1626
1627 /*
1628 * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
1629 * - Enable all bits excepting 'Octets Rx OK Low CntOv'
1630 * and 'Octets Rx OK Hi Cnt Ov'.
1631 */
1632 xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK);
1633
1634 /*
1635 * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
1636 * - Enable all bits excepting 'Octets Tx OK Low CntOv'
1637 * and 'Octets Tx OK Hi Cnt Ov'.
1638 */
1639 xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK);
1640
1641 /* Configure MAC arbiter */
1642 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
1643
1644 /* configure timeout values */
1645 skge_write8(hw, B3_MA_TOINI_RX1, 72);
1646 skge_write8(hw, B3_MA_TOINI_RX2, 72);
1647 skge_write8(hw, B3_MA_TOINI_TX1, 72);
1648 skge_write8(hw, B3_MA_TOINI_TX2, 72);
1649
1650 skge_write8(hw, B3_MA_RCINI_RX1, 0);
1651 skge_write8(hw, B3_MA_RCINI_RX2, 0);
1652 skge_write8(hw, B3_MA_RCINI_TX1, 0);
1653 skge_write8(hw, B3_MA_RCINI_TX2, 0);
1654
1655 /* Configure Rx MAC FIFO */
1656 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR);
1657 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT);
1658 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD);
1659
1660 /* Configure Tx MAC FIFO */
1661 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR);
1662 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF);
1663 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD);
1664
1665 if (jumbo) {
1666 /* Enable frame flushing if jumbo frames used */
1667 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_FLUSH);
1668 } else {
1669 /* enable timeout timers if normal frames */
1670 skge_write16(hw, B3_PA_CTRL,
1671 (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2);
1672 }
1673}
1674
1675static void genesis_stop(struct skge_port *skge)
1676{
1677 struct skge_hw *hw = skge->hw;
1678 int port = skge->port;
1679 unsigned retries = 1000;
1680 u16 cmd;
1681
1682 /* Disable Tx and Rx */
1683 cmd = xm_read16(hw, port, XM_MMU_CMD);
1684 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1685 xm_write16(hw, port, XM_MMU_CMD, cmd);
1686
1687 genesis_reset(hw, port);
1688
1689 /* Clear Tx packet arbiter timeout IRQ */
1690 skge_write16(hw, B3_PA_CTRL,
1691 port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
1692
1693 /* Reset the MAC */
1694 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
1695 do {
1696 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST);
1697 if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST))
1698 break;
1699 } while (--retries > 0);
1700
1701 /* For external PHYs there must be special handling */
1702 if (hw->phy_type != SK_PHY_XMAC) {
1703 u32 reg = skge_read32(hw, B2_GP_IO);
1704 if (port == 0) {
1705 reg |= GP_DIR_0;
1706 reg &= ~GP_IO_0;
1707 } else {
1708 reg |= GP_DIR_2;
1709 reg &= ~GP_IO_2;
1710 }
1711 skge_write32(hw, B2_GP_IO, reg);
1712 skge_read32(hw, B2_GP_IO);
1713 }
1714
1715 xm_write16(hw, port, XM_MMU_CMD,
1716 xm_read16(hw, port, XM_MMU_CMD)
1717 & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
1718
1719 xm_read16(hw, port, XM_MMU_CMD);
1720}
1721
1722
1723static void genesis_get_stats(struct skge_port *skge, u64 *data)
1724{
1725 struct skge_hw *hw = skge->hw;
1726 int port = skge->port;
1727 int i;
1728 unsigned long timeout = jiffies + HZ;
1729
1730 xm_write16(hw, port,
1731 XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC);
1732
1733 /* wait for update to complete */
1734 while (xm_read16(hw, port, XM_STAT_CMD)
1735 & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) {
1736 if (time_after(jiffies, timeout))
1737 break;
1738 udelay(10);
1739 }
1740
1741 /* special case for 64 bit octet counter */
1742 data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32
1743 | xm_read32(hw, port, XM_TXO_OK_LO);
1744 data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32
1745 | xm_read32(hw, port, XM_RXO_OK_LO);
1746
1747 for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
1748 data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset);
1749}
1750
1751static void genesis_mac_intr(struct skge_hw *hw, int port)
1752{
1753 struct net_device *dev = hw->dev[port];
1754 struct skge_port *skge = netdev_priv(dev);
1755 u16 status = xm_read16(hw, port, XM_ISRC);
1756
1757 netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
1758 "mac interrupt status 0x%x\n", status);
1759
1760 if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) {
1761 xm_link_down(hw, port);
1762 mod_timer(&skge->link_timer, jiffies + 1);
1763 }
1764
1765 if (status & XM_IS_TXF_UR) {
1766 xm_write32(hw, port, XM_MODE, XM_MD_FTF);
1767 ++dev->stats.tx_fifo_errors;
1768 }
1769}
1770
1771static void genesis_link_up(struct skge_port *skge)
1772{
1773 struct skge_hw *hw = skge->hw;
1774 int port = skge->port;
1775 u16 cmd, msk;
1776 u32 mode;
1777
1778 cmd = xm_read16(hw, port, XM_MMU_CMD);
1779
1780 /*
1781 * enabling pause frame reception is required for 1000BT
1782 * because the XMAC is not reset if the link is going down
1783 */
1784 if (skge->flow_status == FLOW_STAT_NONE ||
1785 skge->flow_status == FLOW_STAT_LOC_SEND)
1786 /* Disable Pause Frame Reception */
1787 cmd |= XM_MMU_IGN_PF;
1788 else
1789 /* Enable Pause Frame Reception */
1790 cmd &= ~XM_MMU_IGN_PF;
1791
1792 xm_write16(hw, port, XM_MMU_CMD, cmd);
1793
1794 mode = xm_read32(hw, port, XM_MODE);
1795 if (skge->flow_status == FLOW_STAT_SYMMETRIC ||
1796 skge->flow_status == FLOW_STAT_LOC_SEND) {
1797 /*
1798 * Configure Pause Frame Generation
1799 * Use internal and external Pause Frame Generation.
1800 * Sending pause frames is edge triggered.
1801 * Send a Pause frame with the maximum pause time if
1802 * internal oder external FIFO full condition occurs.
1803 * Send a zero pause time frame to re-start transmission.
1804 */
1805 /* XM_PAUSE_DA = '010000C28001' (default) */
1806 /* XM_MAC_PTIME = 0xffff (maximum) */
1807 /* remember this value is defined in big endian (!) */
1808 xm_write16(hw, port, XM_MAC_PTIME, 0xffff);
1809
1810 mode |= XM_PAUSE_MODE;
1811 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE);
1812 } else {
1813 /*
1814 * disable pause frame generation is required for 1000BT
1815 * because the XMAC is not reset if the link is going down
1816 */
1817 /* Disable Pause Mode in Mode Register */
1818 mode &= ~XM_PAUSE_MODE;
1819
1820 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE);
1821 }
1822
1823 xm_write32(hw, port, XM_MODE, mode);
1824
1825 /* Turn on detection of Tx underrun */
1826 msk = xm_read16(hw, port, XM_IMSK);
1827 msk &= ~XM_IS_TXF_UR;
1828 xm_write16(hw, port, XM_IMSK, msk);
1829
1830 xm_read16(hw, port, XM_ISRC);
1831
1832 /* get MMU Command Reg. */
1833 cmd = xm_read16(hw, port, XM_MMU_CMD);
1834 if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL)
1835 cmd |= XM_MMU_GMII_FD;
1836
1837 /*
1838 * Workaround BCOM Errata (#10523) for all BCom Phys
1839 * Enable Power Management after link up
1840 */
1841 if (hw->phy_type == SK_PHY_BCOM) {
1842 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1843 xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL)
1844 & ~PHY_B_AC_DIS_PM);
1845 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
1846 }
1847
1848 /* enable Rx/Tx */
1849 xm_write16(hw, port, XM_MMU_CMD,
1850 cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1851 skge_link_up(skge);
1852}
1853
1854
1855static inline void bcom_phy_intr(struct skge_port *skge)
1856{
1857 struct skge_hw *hw = skge->hw;
1858 int port = skge->port;
1859 u16 isrc;
1860
1861 isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
1862 netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
1863 "phy interrupt status 0x%x\n", isrc);
1864
1865 if (isrc & PHY_B_IS_PSE)
1866 pr_err("%s: uncorrectable pair swap error\n",
1867 hw->dev[port]->name);
1868
1869 /* Workaround BCom Errata:
1870 * enable and disable loopback mode if "NO HCD" occurs.
1871 */
1872 if (isrc & PHY_B_IS_NO_HDCL) {
1873 u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL);
1874 xm_phy_write(hw, port, PHY_BCOM_CTRL,
1875 ctrl | PHY_CT_LOOP);
1876 xm_phy_write(hw, port, PHY_BCOM_CTRL,
1877 ctrl & ~PHY_CT_LOOP);
1878 }
1879
1880 if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
1881 bcom_check_link(hw, port);
1882
1883}
1884
1885static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
1886{
1887 int i;
1888
1889 gma_write16(hw, port, GM_SMI_DATA, val);
1890 gma_write16(hw, port, GM_SMI_CTRL,
1891 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
1892 for (i = 0; i < PHY_RETRIES; i++) {
1893 udelay(1);
1894
1895 if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
1896 return 0;
1897 }
1898
1899 pr_warning("%s: phy write timeout\n", hw->dev[port]->name);
1900 return -EIO;
1901}
1902
1903static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
1904{
1905 int i;
1906
1907 gma_write16(hw, port, GM_SMI_CTRL,
1908 GM_SMI_CT_PHY_AD(hw->phy_addr)
1909 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
1910
1911 for (i = 0; i < PHY_RETRIES; i++) {
1912 udelay(1);
1913 if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
1914 goto ready;
1915 }
1916
1917 return -ETIMEDOUT;
1918 ready:
1919 *val = gma_read16(hw, port, GM_SMI_DATA);
1920 return 0;
1921}
1922
1923static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
1924{
1925 u16 v = 0;
1926 if (__gm_phy_read(hw, port, reg, &v))
1927 pr_warning("%s: phy read timeout\n", hw->dev[port]->name);
1928 return v;
1929}
1930
1931/* Marvell Phy Initialization */
1932static void yukon_init(struct skge_hw *hw, int port)
1933{
1934 struct skge_port *skge = netdev_priv(hw->dev[port]);
1935 u16 ctrl, ct1000, adv;
1936
1937 if (skge->autoneg == AUTONEG_ENABLE) {
1938 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
1939
1940 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
1941 PHY_M_EC_MAC_S_MSK);
1942 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
1943
1944 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1945
1946 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
1947 }
1948
1949 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1950 if (skge->autoneg == AUTONEG_DISABLE)
1951 ctrl &= ~PHY_CT_ANE;
1952
1953 ctrl |= PHY_CT_RESET;
1954 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1955
1956 ctrl = 0;
1957 ct1000 = 0;
1958 adv = PHY_AN_CSMA;
1959
1960 if (skge->autoneg == AUTONEG_ENABLE) {
1961 if (hw->copper) {
1962 if (skge->advertising & ADVERTISED_1000baseT_Full)
1963 ct1000 |= PHY_M_1000C_AFD;
1964 if (skge->advertising & ADVERTISED_1000baseT_Half)
1965 ct1000 |= PHY_M_1000C_AHD;
1966 if (skge->advertising & ADVERTISED_100baseT_Full)
1967 adv |= PHY_M_AN_100_FD;
1968 if (skge->advertising & ADVERTISED_100baseT_Half)
1969 adv |= PHY_M_AN_100_HD;
1970 if (skge->advertising & ADVERTISED_10baseT_Full)
1971 adv |= PHY_M_AN_10_FD;
1972 if (skge->advertising & ADVERTISED_10baseT_Half)
1973 adv |= PHY_M_AN_10_HD;
1974
1975 /* Set Flow-control capabilities */
1976 adv |= phy_pause_map[skge->flow_control];
1977 } else {
1978 if (skge->advertising & ADVERTISED_1000baseT_Full)
1979 adv |= PHY_M_AN_1000X_AFD;
1980 if (skge->advertising & ADVERTISED_1000baseT_Half)
1981 adv |= PHY_M_AN_1000X_AHD;
1982
1983 adv |= fiber_pause_map[skge->flow_control];
1984 }
1985
1986 /* Restart Auto-negotiation */
1987 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1988 } else {
1989 /* forced speed/duplex settings */
1990 ct1000 = PHY_M_1000C_MSE;
1991
1992 if (skge->duplex == DUPLEX_FULL)
1993 ctrl |= PHY_CT_DUP_MD;
1994
1995 switch (skge->speed) {
1996 case SPEED_1000:
1997 ctrl |= PHY_CT_SP1000;
1998 break;
1999 case SPEED_100:
2000 ctrl |= PHY_CT_SP100;
2001 break;
2002 }
2003
2004 ctrl |= PHY_CT_RESET;
2005 }
2006
2007 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
2008
2009 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
2010 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
2011
2012 /* Enable phy interrupt on autonegotiation complete (or link up) */
2013 if (skge->autoneg == AUTONEG_ENABLE)
2014 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK);
2015 else
2016 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
2017}
2018
2019static void yukon_reset(struct skge_hw *hw, int port)
2020{
2021 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */
2022 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
2023 gma_write16(hw, port, GM_MC_ADDR_H2, 0);
2024 gma_write16(hw, port, GM_MC_ADDR_H3, 0);
2025 gma_write16(hw, port, GM_MC_ADDR_H4, 0);
2026
2027 gma_write16(hw, port, GM_RX_CTRL,
2028 gma_read16(hw, port, GM_RX_CTRL)
2029 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
2030}
2031
2032/* Apparently, early versions of Yukon-Lite had wrong chip_id? */
2033static int is_yukon_lite_a0(struct skge_hw *hw)
2034{
2035 u32 reg;
2036 int ret;
2037
2038 if (hw->chip_id != CHIP_ID_YUKON)
2039 return 0;
2040
2041 reg = skge_read32(hw, B2_FAR);
2042 skge_write8(hw, B2_FAR + 3, 0xff);
2043 ret = (skge_read8(hw, B2_FAR + 3) != 0);
2044 skge_write32(hw, B2_FAR, reg);
2045 return ret;
2046}
2047
2048static void yukon_mac_init(struct skge_hw *hw, int port)
2049{
2050 struct skge_port *skge = netdev_priv(hw->dev[port]);
2051 int i;
2052 u32 reg;
2053 const u8 *addr = hw->dev[port]->dev_addr;
2054
2055 /* WA code for COMA mode -- set PHY reset */
2056 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
2057 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
2058 reg = skge_read32(hw, B2_GP_IO);
2059 reg |= GP_DIR_9 | GP_IO_9;
2060 skge_write32(hw, B2_GP_IO, reg);
2061 }
2062
2063 /* hard reset */
2064 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
2065 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
2066
2067 /* WA code for COMA mode -- clear PHY reset */
2068 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
2069 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
2070 reg = skge_read32(hw, B2_GP_IO);
2071 reg |= GP_DIR_9;
2072 reg &= ~GP_IO_9;
2073 skge_write32(hw, B2_GP_IO, reg);
2074 }
2075
2076 /* Set hardware config mode */
2077 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
2078 GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE;
2079 reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
2080
2081 /* Clear GMC reset */
2082 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
2083 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
2084 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
2085
2086 if (skge->autoneg == AUTONEG_DISABLE) {
2087 reg = GM_GPCR_AU_ALL_DIS;
2088 gma_write16(hw, port, GM_GP_CTRL,
2089 gma_read16(hw, port, GM_GP_CTRL) | reg);
2090
2091 switch (skge->speed) {
2092 case SPEED_1000:
2093 reg &= ~GM_GPCR_SPEED_100;
2094 reg |= GM_GPCR_SPEED_1000;
2095 break;
2096 case SPEED_100:
2097 reg &= ~GM_GPCR_SPEED_1000;
2098 reg |= GM_GPCR_SPEED_100;
2099 break;
2100 case SPEED_10:
2101 reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
2102 break;
2103 }
2104
2105 if (skge->duplex == DUPLEX_FULL)
2106 reg |= GM_GPCR_DUP_FULL;
2107 } else
2108 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
2109
2110 switch (skge->flow_control) {
2111 case FLOW_MODE_NONE:
2112 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
2113 reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
2114 break;
2115 case FLOW_MODE_LOC_SEND:
2116 /* disable Rx flow-control */
2117 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
2118 break;
2119 case FLOW_MODE_SYMMETRIC:
2120 case FLOW_MODE_SYM_OR_REM:
2121 /* enable Tx & Rx flow-control */
2122 break;
2123 }
2124
2125 gma_write16(hw, port, GM_GP_CTRL, reg);
2126 skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
2127
2128 yukon_init(hw, port);
2129
2130 /* MIB clear */
2131 reg = gma_read16(hw, port, GM_PHY_ADDR);
2132 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
2133
2134 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
2135 gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
2136 gma_write16(hw, port, GM_PHY_ADDR, reg);
2137
2138 /* transmit control */
2139 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
2140
2141 /* receive control reg: unicast + multicast + no FCS */
2142 gma_write16(hw, port, GM_RX_CTRL,
2143 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
2144
2145 /* transmit flow control */
2146 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
2147
2148 /* transmit parameter */
2149 gma_write16(hw, port, GM_TX_PARAM,
2150 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
2151 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
2152 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF));
2153
2154 /* configure the Serial Mode Register */
2155 reg = DATA_BLIND_VAL(DATA_BLIND_DEF)
2156 | GM_SMOD_VLAN_ENA
2157 | IPG_DATA_VAL(IPG_DATA_DEF);
2158
2159 if (hw->dev[port]->mtu > ETH_DATA_LEN)
2160 reg |= GM_SMOD_JUMBO_ENA;
2161
2162 gma_write16(hw, port, GM_SERIAL_MODE, reg);
2163
2164 /* physical address: used for pause frames */
2165 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
2166 /* virtual address for data */
2167 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
2168
2169 /* enable interrupt mask for counter overflows */
2170 gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
2171 gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
2172 gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
2173
2174 /* Initialize Mac Fifo */
2175
2176 /* Configure Rx MAC FIFO */
2177 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
2178 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
2179
2180 /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
2181 if (is_yukon_lite_a0(hw))
2182 reg &= ~GMF_RX_F_FL_ON;
2183
2184 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
2185 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
2186 /*
2187 * because Pause Packet Truncation in GMAC is not working
2188 * we have to increase the Flush Threshold to 64 bytes
2189 * in order to flush pause packets in Rx FIFO on Yukon-1
2190 */
2191 skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
2192
2193 /* Configure Tx MAC FIFO */
2194 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
2195 skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
2196}
2197
2198/* Go into power down mode */
2199static void yukon_suspend(struct skge_hw *hw, int port)
2200{
2201 u16 ctrl;
2202
2203 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
2204 ctrl |= PHY_M_PC_POL_R_DIS;
2205 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
2206
2207 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
2208 ctrl |= PHY_CT_RESET;
2209 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
2210
2211 /* switch IEEE compatible power down mode on */
2212 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
2213 ctrl |= PHY_CT_PDOWN;
2214 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
2215}
2216
2217static void yukon_stop(struct skge_port *skge)
2218{
2219 struct skge_hw *hw = skge->hw;
2220 int port = skge->port;
2221
2222 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
2223 yukon_reset(hw, port);
2224
2225 gma_write16(hw, port, GM_GP_CTRL,
2226 gma_read16(hw, port, GM_GP_CTRL)
2227 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA));
2228 gma_read16(hw, port, GM_GP_CTRL);
2229
2230 yukon_suspend(hw, port);
2231
2232 /* set GPHY Control reset */
2233 skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
2234 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
2235}
2236
2237static void yukon_get_stats(struct skge_port *skge, u64 *data)
2238{
2239 struct skge_hw *hw = skge->hw;
2240 int port = skge->port;
2241 int i;
2242
2243 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
2244 | gma_read32(hw, port, GM_TXO_OK_LO);
2245 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
2246 | gma_read32(hw, port, GM_RXO_OK_LO);
2247
2248 for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
2249 data[i] = gma_read32(hw, port,
2250 skge_stats[i].gma_offset);
2251}
2252
2253static void yukon_mac_intr(struct skge_hw *hw, int port)
2254{
2255 struct net_device *dev = hw->dev[port];
2256 struct skge_port *skge = netdev_priv(dev);
2257 u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
2258
2259 netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
2260 "mac interrupt status 0x%x\n", status);
2261
2262 if (status & GM_IS_RX_FF_OR) {
2263 ++dev->stats.rx_fifo_errors;
2264 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
2265 }
2266
2267 if (status & GM_IS_TX_FF_UR) {
2268 ++dev->stats.tx_fifo_errors;
2269 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
2270 }
2271
2272}
2273
2274static u16 yukon_speed(const struct skge_hw *hw, u16 aux)
2275{
2276 switch (aux & PHY_M_PS_SPEED_MSK) {
2277 case PHY_M_PS_SPEED_1000:
2278 return SPEED_1000;
2279 case PHY_M_PS_SPEED_100:
2280 return SPEED_100;
2281 default:
2282 return SPEED_10;
2283 }
2284}
2285
2286static void yukon_link_up(struct skge_port *skge)
2287{
2288 struct skge_hw *hw = skge->hw;
2289 int port = skge->port;
2290 u16 reg;
2291
2292 /* Enable Transmit FIFO Underrun */
2293 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
2294
2295 reg = gma_read16(hw, port, GM_GP_CTRL);
2296 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
2297 reg |= GM_GPCR_DUP_FULL;
2298
2299 /* enable Rx/Tx */
2300 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
2301 gma_write16(hw, port, GM_GP_CTRL, reg);
2302
2303 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
2304 skge_link_up(skge);
2305}
2306
2307static void yukon_link_down(struct skge_port *skge)
2308{
2309 struct skge_hw *hw = skge->hw;
2310 int port = skge->port;
2311 u16 ctrl;
2312
2313 ctrl = gma_read16(hw, port, GM_GP_CTRL);
2314 ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
2315 gma_write16(hw, port, GM_GP_CTRL, ctrl);
2316
2317 if (skge->flow_status == FLOW_STAT_REM_SEND) {
2318 ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
2319 ctrl |= PHY_M_AN_ASP;
2320 /* restore Asymmetric Pause bit */
2321 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl);
2322 }
2323
2324 skge_link_down(skge);
2325
2326 yukon_init(hw, port);
2327}
2328
2329static void yukon_phy_intr(struct skge_port *skge)
2330{
2331 struct skge_hw *hw = skge->hw;
2332 int port = skge->port;
2333 const char *reason = NULL;
2334 u16 istatus, phystat;
2335
2336 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
2337 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
2338
2339 netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
2340 "phy interrupt status 0x%x 0x%x\n", istatus, phystat);
2341
2342 if (istatus & PHY_M_IS_AN_COMPL) {
2343 if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
2344 & PHY_M_AN_RF) {
2345 reason = "remote fault";
2346 goto failed;
2347 }
2348
2349 if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
2350 reason = "master/slave fault";
2351 goto failed;
2352 }
2353
2354 if (!(phystat & PHY_M_PS_SPDUP_RES)) {
2355 reason = "speed/duplex";
2356 goto failed;
2357 }
2358
2359 skge->duplex = (phystat & PHY_M_PS_FULL_DUP)
2360 ? DUPLEX_FULL : DUPLEX_HALF;
2361 skge->speed = yukon_speed(hw, phystat);
2362
2363 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
2364 switch (phystat & PHY_M_PS_PAUSE_MSK) {
2365 case PHY_M_PS_PAUSE_MSK:
2366 skge->flow_status = FLOW_STAT_SYMMETRIC;
2367 break;
2368 case PHY_M_PS_RX_P_EN:
2369 skge->flow_status = FLOW_STAT_REM_SEND;
2370 break;
2371 case PHY_M_PS_TX_P_EN:
2372 skge->flow_status = FLOW_STAT_LOC_SEND;
2373 break;
2374 default:
2375 skge->flow_status = FLOW_STAT_NONE;
2376 }
2377
2378 if (skge->flow_status == FLOW_STAT_NONE ||
2379 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
2380 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
2381 else
2382 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
2383 yukon_link_up(skge);
2384 return;
2385 }
2386
2387 if (istatus & PHY_M_IS_LSP_CHANGE)
2388 skge->speed = yukon_speed(hw, phystat);
2389
2390 if (istatus & PHY_M_IS_DUP_CHANGE)
2391 skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
2392 if (istatus & PHY_M_IS_LST_CHANGE) {
2393 if (phystat & PHY_M_PS_LINK_UP)
2394 yukon_link_up(skge);
2395 else
2396 yukon_link_down(skge);
2397 }
2398 return;
2399 failed:
2400 pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason);
2401
2402 /* XXX restart autonegotiation? */
2403}
2404
2405static void skge_phy_reset(struct skge_port *skge)
2406{
2407 struct skge_hw *hw = skge->hw;
2408 int port = skge->port;
2409 struct net_device *dev = hw->dev[port];
2410
2411 netif_stop_queue(skge->netdev);
2412 netif_carrier_off(skge->netdev);
2413
2414 spin_lock_bh(&hw->phy_lock);
2415 if (is_genesis(hw)) {
2416 genesis_reset(hw, port);
2417 genesis_mac_init(hw, port);
2418 } else {
2419 yukon_reset(hw, port);
2420 yukon_init(hw, port);
2421 }
2422 spin_unlock_bh(&hw->phy_lock);
2423
2424 skge_set_multicast(dev);
2425}
2426
2427/* Basic MII support */
2428static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2429{
2430 struct mii_ioctl_data *data = if_mii(ifr);
2431 struct skge_port *skge = netdev_priv(dev);
2432 struct skge_hw *hw = skge->hw;
2433 int err = -EOPNOTSUPP;
2434
2435 if (!netif_running(dev))
2436 return -ENODEV; /* Phy still in reset */
2437
2438 switch (cmd) {
2439 case SIOCGMIIPHY:
2440 data->phy_id = hw->phy_addr;
2441
2442 /* fallthru */
2443 case SIOCGMIIREG: {
2444 u16 val = 0;
2445 spin_lock_bh(&hw->phy_lock);
2446
2447 if (is_genesis(hw))
2448 err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
2449 else
2450 err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
2451 spin_unlock_bh(&hw->phy_lock);
2452 data->val_out = val;
2453 break;
2454 }
2455
2456 case SIOCSMIIREG:
2457 spin_lock_bh(&hw->phy_lock);
2458 if (is_genesis(hw))
2459 err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f,
2460 data->val_in);
2461 else
2462 err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f,
2463 data->val_in);
2464 spin_unlock_bh(&hw->phy_lock);
2465 break;
2466 }
2467 return err;
2468}
2469
2470static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
2471{
2472 u32 end;
2473
2474 start /= 8;
2475 len /= 8;
2476 end = start + len - 1;
2477
2478 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
2479 skge_write32(hw, RB_ADDR(q, RB_START), start);
2480 skge_write32(hw, RB_ADDR(q, RB_WP), start);
2481 skge_write32(hw, RB_ADDR(q, RB_RP), start);
2482 skge_write32(hw, RB_ADDR(q, RB_END), end);
2483
2484 if (q == Q_R1 || q == Q_R2) {
2485 /* Set thresholds on receive queue's */
2486 skge_write32(hw, RB_ADDR(q, RB_RX_UTPP),
2487 start + (2*len)/3);
2488 skge_write32(hw, RB_ADDR(q, RB_RX_LTPP),
2489 start + (len/3));
2490 } else {
2491 /* Enable store & forward on Tx queue's because
2492 * Tx FIFO is only 4K on Genesis and 1K on Yukon
2493 */
2494 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
2495 }
2496
2497 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
2498}
2499
2500/* Setup Bus Memory Interface */
2501static void skge_qset(struct skge_port *skge, u16 q,
2502 const struct skge_element *e)
2503{
2504 struct skge_hw *hw = skge->hw;
2505 u32 watermark = 0x600;
2506 u64 base = skge->dma + (e->desc - skge->mem);
2507
2508 /* optimization to reduce window on 32bit/33mhz */
2509 if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0)
2510 watermark /= 2;
2511
2512 skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET);
2513 skge_write32(hw, Q_ADDR(q, Q_F), watermark);
2514 skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32));
2515 skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base);
2516}
2517
2518static int skge_up(struct net_device *dev)
2519{
2520 struct skge_port *skge = netdev_priv(dev);
2521 struct skge_hw *hw = skge->hw;
2522 int port = skge->port;
2523 u32 chunk, ram_addr;
2524 size_t rx_size, tx_size;
2525 int err;
2526
2527 if (!is_valid_ether_addr(dev->dev_addr))
2528 return -EINVAL;
2529
2530 netif_info(skge, ifup, skge->netdev, "enabling interface\n");
2531
2532 if (dev->mtu > RX_BUF_SIZE)
2533 skge->rx_buf_size = dev->mtu + ETH_HLEN;
2534 else
2535 skge->rx_buf_size = RX_BUF_SIZE;
2536
2537
2538 rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc);
2539 tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc);
2540 skge->mem_size = tx_size + rx_size;
2541 skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma);
2542 if (!skge->mem)
2543 return -ENOMEM;
2544
2545 BUG_ON(skge->dma & 7);
2546
2547 if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
2548 dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n");
2549 err = -EINVAL;
2550 goto free_pci_mem;
2551 }
2552
2553 memset(skge->mem, 0, skge->mem_size);
2554
2555 err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma);
2556 if (err)
2557 goto free_pci_mem;
2558
2559 err = skge_rx_fill(dev);
2560 if (err)
2561 goto free_rx_ring;
2562
2563 err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
2564 skge->dma + rx_size);
2565 if (err)
2566 goto free_rx_ring;
2567
2568 if (hw->ports == 1) {
2569 err = request_irq(hw->pdev->irq, skge_intr, IRQF_SHARED,
2570 dev->name, hw);
2571 if (err) {
2572 netdev_err(dev, "Unable to allocate interrupt %d error: %d\n",
2573 hw->pdev->irq, err);
2574 goto free_tx_ring;
2575 }
2576 }
2577
2578 /* Initialize MAC */
2579 spin_lock_bh(&hw->phy_lock);
2580 if (is_genesis(hw))
2581 genesis_mac_init(hw, port);
2582 else
2583 yukon_mac_init(hw, port);
2584 spin_unlock_bh(&hw->phy_lock);
2585
2586 /* Configure RAMbuffers - equally between ports and tx/rx */
2587 chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2);
2588 ram_addr = hw->ram_offset + 2 * chunk * port;
2589
2590 skge_ramset(hw, rxqaddr[port], ram_addr, chunk);
2591 skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean);
2592
2593 BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean);
2594 skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk);
2595 skge_qset(skge, txqaddr[port], skge->tx_ring.to_use);
2596
2597 /* Start receiver BMU */
2598 wmb();
2599 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
2600 skge_led(skge, LED_MODE_ON);
2601
2602 spin_lock_irq(&hw->hw_lock);
2603 hw->intr_mask |= portmask[port];
2604 skge_write32(hw, B0_IMSK, hw->intr_mask);
2605 skge_read32(hw, B0_IMSK);
2606 spin_unlock_irq(&hw->hw_lock);
2607
2608 napi_enable(&skge->napi);
2609 return 0;
2610
2611 free_tx_ring:
2612 kfree(skge->tx_ring.start);
2613 free_rx_ring:
2614 skge_rx_clean(skge);
2615 kfree(skge->rx_ring.start);
2616 free_pci_mem:
2617 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
2618 skge->mem = NULL;
2619
2620 return err;
2621}
2622
2623/* stop receiver */
2624static void skge_rx_stop(struct skge_hw *hw, int port)
2625{
2626 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP);
2627 skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL),
2628 RB_RST_SET|RB_DIS_OP_MD);
2629 skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET);
2630}
2631
2632static int skge_down(struct net_device *dev)
2633{
2634 struct skge_port *skge = netdev_priv(dev);
2635 struct skge_hw *hw = skge->hw;
2636 int port = skge->port;
2637
2638 if (skge->mem == NULL)
2639 return 0;
2640
2641 netif_info(skge, ifdown, skge->netdev, "disabling interface\n");
2642
2643 netif_tx_disable(dev);
2644
2645 if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)
2646 del_timer_sync(&skge->link_timer);
2647
2648 napi_disable(&skge->napi);
2649 netif_carrier_off(dev);
2650
2651 spin_lock_irq(&hw->hw_lock);
2652 hw->intr_mask &= ~portmask[port];
2653 skge_write32(hw, B0_IMSK, (hw->ports == 1) ? 0 : hw->intr_mask);
2654 skge_read32(hw, B0_IMSK);
2655 spin_unlock_irq(&hw->hw_lock);
2656
2657 if (hw->ports == 1)
2658 free_irq(hw->pdev->irq, hw);
2659
2660 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
2661 if (is_genesis(hw))
2662 genesis_stop(skge);
2663 else
2664 yukon_stop(skge);
2665
2666 /* Stop transmitter */
2667 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
2668 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
2669 RB_RST_SET|RB_DIS_OP_MD);
2670
2671
2672 /* Disable Force Sync bit and Enable Alloc bit */
2673 skge_write8(hw, SK_REG(port, TXA_CTRL),
2674 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
2675
2676 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
2677 skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
2678 skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
2679
2680 /* Reset PCI FIFO */
2681 skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET);
2682 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
2683
2684 /* Reset the RAM Buffer async Tx queue */
2685 skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET);
2686
2687 skge_rx_stop(hw, port);
2688
2689 if (is_genesis(hw)) {
2690 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET);
2691 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET);
2692 } else {
2693 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
2694 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
2695 }
2696
2697 skge_led(skge, LED_MODE_OFF);
2698
2699 netif_tx_lock_bh(dev);
2700 skge_tx_clean(dev);
2701 netif_tx_unlock_bh(dev);
2702
2703 skge_rx_clean(skge);
2704
2705 kfree(skge->rx_ring.start);
2706 kfree(skge->tx_ring.start);
2707 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
2708 skge->mem = NULL;
2709 return 0;
2710}
2711
2712static inline int skge_avail(const struct skge_ring *ring)
2713{
2714 smp_mb();
2715 return ((ring->to_clean > ring->to_use) ? 0 : ring->count)
2716 + (ring->to_clean - ring->to_use) - 1;
2717}
2718
2719static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2720 struct net_device *dev)
2721{
2722 struct skge_port *skge = netdev_priv(dev);
2723 struct skge_hw *hw = skge->hw;
2724 struct skge_element *e;
2725 struct skge_tx_desc *td;
2726 int i;
2727 u32 control, len;
2728 u64 map;
2729
2730 if (skb_padto(skb, ETH_ZLEN))
2731 return NETDEV_TX_OK;
2732
2733 if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1))
2734 return NETDEV_TX_BUSY;
2735
2736 e = skge->tx_ring.to_use;
2737 td = e->desc;
2738 BUG_ON(td->control & BMU_OWN);
2739 e->skb = skb;
2740 len = skb_headlen(skb);
2741 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
2742 dma_unmap_addr_set(e, mapaddr, map);
2743 dma_unmap_len_set(e, maplen, len);
2744
2745 td->dma_lo = map;
2746 td->dma_hi = map >> 32;
2747
2748 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2749 const int offset = skb_checksum_start_offset(skb);
2750
2751 /* This seems backwards, but it is what the sk98lin
2752 * does. Looks like hardware is wrong?
2753 */
2754 if (ipip_hdr(skb)->protocol == IPPROTO_UDP &&
2755 hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
2756 control = BMU_TCP_CHECK;
2757 else
2758 control = BMU_UDP_CHECK;
2759
2760 td->csum_offs = 0;
2761 td->csum_start = offset;
2762 td->csum_write = offset + skb->csum_offset;
2763 } else
2764 control = BMU_CHECK;
2765
2766 if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */
2767 control |= BMU_EOF | BMU_IRQ_EOF;
2768 else {
2769 struct skge_tx_desc *tf = td;
2770
2771 control |= BMU_STFWD;
2772 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2773 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2774
2775 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
2776 skb_frag_size(frag), DMA_TO_DEVICE);
2777
2778 e = e->next;
2779 e->skb = skb;
2780 tf = e->desc;
2781 BUG_ON(tf->control & BMU_OWN);
2782
2783 tf->dma_lo = map;
2784 tf->dma_hi = (u64) map >> 32;
2785 dma_unmap_addr_set(e, mapaddr, map);
2786 dma_unmap_len_set(e, maplen, skb_frag_size(frag));
2787
2788 tf->control = BMU_OWN | BMU_SW | control | skb_frag_size(frag);
2789 }
2790 tf->control |= BMU_EOF | BMU_IRQ_EOF;
2791 }
2792 /* Make sure all the descriptors written */
2793 wmb();
2794 td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
2795 wmb();
2796
2797 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
2798
2799 netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev,
2800 "tx queued, slot %td, len %d\n",
2801 e - skge->tx_ring.start, skb->len);
2802
2803 skge->tx_ring.to_use = e->next;
2804 smp_wmb();
2805
2806 if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
2807 netdev_dbg(dev, "transmit queue full\n");
2808 netif_stop_queue(dev);
2809 }
2810
2811 return NETDEV_TX_OK;
2812}
2813
2814
2815/* Free resources associated with this reing element */
2816static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
2817 u32 control)
2818{
2819 struct pci_dev *pdev = skge->hw->pdev;
2820
2821 /* skb header vs. fragment */
2822 if (control & BMU_STF)
2823 pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr),
2824 dma_unmap_len(e, maplen),
2825 PCI_DMA_TODEVICE);
2826 else
2827 pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr),
2828 dma_unmap_len(e, maplen),
2829 PCI_DMA_TODEVICE);
2830
2831 if (control & BMU_EOF) {
2832 netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
2833 "tx done slot %td\n", e - skge->tx_ring.start);
2834
2835 dev_kfree_skb(e->skb);
2836 }
2837}
2838
2839/* Free all buffers in transmit ring */
2840static void skge_tx_clean(struct net_device *dev)
2841{
2842 struct skge_port *skge = netdev_priv(dev);
2843 struct skge_element *e;
2844
2845 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
2846 struct skge_tx_desc *td = e->desc;
2847 skge_tx_free(skge, e, td->control);
2848 td->control = 0;
2849 }
2850
2851 skge->tx_ring.to_clean = e;
2852}
2853
2854static void skge_tx_timeout(struct net_device *dev)
2855{
2856 struct skge_port *skge = netdev_priv(dev);
2857
2858 netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n");
2859
2860 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
2861 skge_tx_clean(dev);
2862 netif_wake_queue(dev);
2863}
2864
2865static int skge_change_mtu(struct net_device *dev, int new_mtu)
2866{
2867 int err;
2868
2869 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
2870 return -EINVAL;
2871
2872 if (!netif_running(dev)) {
2873 dev->mtu = new_mtu;
2874 return 0;
2875 }
2876
2877 skge_down(dev);
2878
2879 dev->mtu = new_mtu;
2880
2881 err = skge_up(dev);
2882 if (err)
2883 dev_close(dev);
2884
2885 return err;
2886}
2887
2888static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
2889
2890static void genesis_add_filter(u8 filter[8], const u8 *addr)
2891{
2892 u32 crc, bit;
2893
2894 crc = ether_crc_le(ETH_ALEN, addr);
2895 bit = ~crc & 0x3f;
2896 filter[bit/8] |= 1 << (bit%8);
2897}
2898
2899static void genesis_set_multicast(struct net_device *dev)
2900{
2901 struct skge_port *skge = netdev_priv(dev);
2902 struct skge_hw *hw = skge->hw;
2903 int port = skge->port;
2904 struct netdev_hw_addr *ha;
2905 u32 mode;
2906 u8 filter[8];
2907
2908 mode = xm_read32(hw, port, XM_MODE);
2909 mode |= XM_MD_ENA_HASH;
2910 if (dev->flags & IFF_PROMISC)
2911 mode |= XM_MD_ENA_PROM;
2912 else
2913 mode &= ~XM_MD_ENA_PROM;
2914
2915 if (dev->flags & IFF_ALLMULTI)
2916 memset(filter, 0xff, sizeof(filter));
2917 else {
2918 memset(filter, 0, sizeof(filter));
2919
2920 if (skge->flow_status == FLOW_STAT_REM_SEND ||
2921 skge->flow_status == FLOW_STAT_SYMMETRIC)
2922 genesis_add_filter(filter, pause_mc_addr);
2923
2924 netdev_for_each_mc_addr(ha, dev)
2925 genesis_add_filter(filter, ha->addr);
2926 }
2927
2928 xm_write32(hw, port, XM_MODE, mode);
2929 xm_outhash(hw, port, XM_HSM, filter);
2930}
2931
2932static void yukon_add_filter(u8 filter[8], const u8 *addr)
2933{
2934 u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f;
2935 filter[bit/8] |= 1 << (bit%8);
2936}
2937
2938static void yukon_set_multicast(struct net_device *dev)
2939{
2940 struct skge_port *skge = netdev_priv(dev);
2941 struct skge_hw *hw = skge->hw;
2942 int port = skge->port;
2943 struct netdev_hw_addr *ha;
2944 int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND ||
2945 skge->flow_status == FLOW_STAT_SYMMETRIC);
2946 u16 reg;
2947 u8 filter[8];
2948
2949 memset(filter, 0, sizeof(filter));
2950
2951 reg = gma_read16(hw, port, GM_RX_CTRL);
2952 reg |= GM_RXCR_UCF_ENA;
2953
2954 if (dev->flags & IFF_PROMISC) /* promiscuous */
2955 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
2956 else if (dev->flags & IFF_ALLMULTI) /* all multicast */
2957 memset(filter, 0xff, sizeof(filter));
2958 else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */
2959 reg &= ~GM_RXCR_MCF_ENA;
2960 else {
2961 reg |= GM_RXCR_MCF_ENA;
2962
2963 if (rx_pause)
2964 yukon_add_filter(filter, pause_mc_addr);
2965
2966 netdev_for_each_mc_addr(ha, dev)
2967 yukon_add_filter(filter, ha->addr);
2968 }
2969
2970
2971 gma_write16(hw, port, GM_MC_ADDR_H1,
2972 (u16)filter[0] | ((u16)filter[1] << 8));
2973 gma_write16(hw, port, GM_MC_ADDR_H2,
2974 (u16)filter[2] | ((u16)filter[3] << 8));
2975 gma_write16(hw, port, GM_MC_ADDR_H3,
2976 (u16)filter[4] | ((u16)filter[5] << 8));
2977 gma_write16(hw, port, GM_MC_ADDR_H4,
2978 (u16)filter[6] | ((u16)filter[7] << 8));
2979
2980 gma_write16(hw, port, GM_RX_CTRL, reg);
2981}
2982
2983static inline u16 phy_length(const struct skge_hw *hw, u32 status)
2984{
2985 if (is_genesis(hw))
2986 return status >> XMR_FS_LEN_SHIFT;
2987 else
2988 return status >> GMR_FS_LEN_SHIFT;
2989}
2990
2991static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2992{
2993 if (is_genesis(hw))
2994 return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0;
2995 else
2996 return (status & GMR_FS_ANY_ERR) ||
2997 (status & GMR_FS_RX_OK) == 0;
2998}
2999
3000static void skge_set_multicast(struct net_device *dev)
3001{
3002 struct skge_port *skge = netdev_priv(dev);
3003
3004 if (is_genesis(skge->hw))
3005 genesis_set_multicast(dev);
3006 else
3007 yukon_set_multicast(dev);
3008
3009}
3010
3011
3012/* Get receive buffer from descriptor.
3013 * Handles copy of small buffers and reallocation failures
3014 */
3015static struct sk_buff *skge_rx_get(struct net_device *dev,
3016 struct skge_element *e,
3017 u32 control, u32 status, u16 csum)
3018{
3019 struct skge_port *skge = netdev_priv(dev);
3020 struct sk_buff *skb;
3021 u16 len = control & BMU_BBC;
3022
3023 netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev,
3024 "rx slot %td status 0x%x len %d\n",
3025 e - skge->rx_ring.start, status, len);
3026
3027 if (len > skge->rx_buf_size)
3028 goto error;
3029
3030 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
3031 goto error;
3032
3033 if (bad_phy_status(skge->hw, status))
3034 goto error;
3035
3036 if (phy_length(skge->hw, status) != len)
3037 goto error;
3038
3039 if (len < RX_COPY_THRESHOLD) {
3040 skb = netdev_alloc_skb_ip_align(dev, len);
3041 if (!skb)
3042 goto resubmit;
3043
3044 pci_dma_sync_single_for_cpu(skge->hw->pdev,
3045 dma_unmap_addr(e, mapaddr),
3046 len, PCI_DMA_FROMDEVICE);
3047 skb_copy_from_linear_data(e->skb, skb->data, len);
3048 pci_dma_sync_single_for_device(skge->hw->pdev,
3049 dma_unmap_addr(e, mapaddr),
3050 len, PCI_DMA_FROMDEVICE);
3051 skge_rx_reuse(e, skge->rx_buf_size);
3052 } else {
3053 struct sk_buff *nskb;
3054
3055 nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
3056 if (!nskb)
3057 goto resubmit;
3058
3059 pci_unmap_single(skge->hw->pdev,
3060 dma_unmap_addr(e, mapaddr),
3061 dma_unmap_len(e, maplen),
3062 PCI_DMA_FROMDEVICE);
3063 skb = e->skb;
3064 prefetch(skb->data);
3065 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
3066 }
3067
3068 skb_put(skb, len);
3069
3070 if (dev->features & NETIF_F_RXCSUM) {
3071 skb->csum = csum;
3072 skb->ip_summed = CHECKSUM_COMPLETE;
3073 }
3074
3075 skb->protocol = eth_type_trans(skb, dev);
3076
3077 return skb;
3078error:
3079
3080 netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev,
3081 "rx err, slot %td control 0x%x status 0x%x\n",
3082 e - skge->rx_ring.start, control, status);
3083
3084 if (is_genesis(skge->hw)) {
3085 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
3086 dev->stats.rx_length_errors++;
3087 if (status & XMR_FS_FRA_ERR)
3088 dev->stats.rx_frame_errors++;
3089 if (status & XMR_FS_FCS_ERR)
3090 dev->stats.rx_crc_errors++;
3091 } else {
3092 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
3093 dev->stats.rx_length_errors++;
3094 if (status & GMR_FS_FRAGMENT)
3095 dev->stats.rx_frame_errors++;
3096 if (status & GMR_FS_CRC_ERR)
3097 dev->stats.rx_crc_errors++;
3098 }
3099
3100resubmit:
3101 skge_rx_reuse(e, skge->rx_buf_size);
3102 return NULL;
3103}
3104
3105/* Free all buffers in Tx ring which are no longer owned by device */
3106static void skge_tx_done(struct net_device *dev)
3107{
3108 struct skge_port *skge = netdev_priv(dev);
3109 struct skge_ring *ring = &skge->tx_ring;
3110 struct skge_element *e;
3111
3112 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
3113
3114 for (e = ring->to_clean; e != ring->to_use; e = e->next) {
3115 u32 control = ((const struct skge_tx_desc *) e->desc)->control;
3116
3117 if (control & BMU_OWN)
3118 break;
3119
3120 skge_tx_free(skge, e, control);
3121 }
3122 skge->tx_ring.to_clean = e;
3123
3124 /* Can run lockless until we need to synchronize to restart queue. */
3125 smp_mb();
3126
3127 if (unlikely(netif_queue_stopped(dev) &&
3128 skge_avail(&skge->tx_ring) > TX_LOW_WATER)) {
3129 netif_tx_lock(dev);
3130 if (unlikely(netif_queue_stopped(dev) &&
3131 skge_avail(&skge->tx_ring) > TX_LOW_WATER)) {
3132 netif_wake_queue(dev);
3133
3134 }
3135 netif_tx_unlock(dev);
3136 }
3137}
3138
3139static int skge_poll(struct napi_struct *napi, int to_do)
3140{
3141 struct skge_port *skge = container_of(napi, struct skge_port, napi);
3142 struct net_device *dev = skge->netdev;
3143 struct skge_hw *hw = skge->hw;
3144 struct skge_ring *ring = &skge->rx_ring;
3145 struct skge_element *e;
3146 int work_done = 0;
3147
3148 skge_tx_done(dev);
3149
3150 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
3151
3152 for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
3153 struct skge_rx_desc *rd = e->desc;
3154 struct sk_buff *skb;
3155 u32 control;
3156
3157 rmb();
3158 control = rd->control;
3159 if (control & BMU_OWN)
3160 break;
3161
3162 skb = skge_rx_get(dev, e, control, rd->status, rd->csum2);
3163 if (likely(skb)) {
3164 napi_gro_receive(napi, skb);
3165 ++work_done;
3166 }
3167 }
3168 ring->to_clean = e;
3169
3170 /* restart receiver */
3171 wmb();
3172 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
3173
3174 if (work_done < to_do) {
3175 unsigned long flags;
3176
3177 napi_gro_flush(napi);
3178 spin_lock_irqsave(&hw->hw_lock, flags);
3179 __napi_complete(napi);
3180 hw->intr_mask |= napimask[skge->port];
3181 skge_write32(hw, B0_IMSK, hw->intr_mask);
3182 skge_read32(hw, B0_IMSK);
3183 spin_unlock_irqrestore(&hw->hw_lock, flags);
3184 }
3185
3186 return work_done;
3187}
3188
3189/* Parity errors seem to happen when Genesis is connected to a switch
3190 * with no other ports present. Heartbeat error??
3191 */
3192static void skge_mac_parity(struct skge_hw *hw, int port)
3193{
3194 struct net_device *dev = hw->dev[port];
3195
3196 ++dev->stats.tx_heartbeat_errors;
3197
3198 if (is_genesis(hw))
3199 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
3200 MFF_CLR_PERR);
3201 else
3202 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */
3203 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T),
3204 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
3205 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE);
3206}
3207
3208static void skge_mac_intr(struct skge_hw *hw, int port)
3209{
3210 if (is_genesis(hw))
3211 genesis_mac_intr(hw, port);
3212 else
3213 yukon_mac_intr(hw, port);
3214}
3215
3216/* Handle device specific framing and timeout interrupts */
3217static void skge_error_irq(struct skge_hw *hw)
3218{
3219 struct pci_dev *pdev = hw->pdev;
3220 u32 hwstatus = skge_read32(hw, B0_HWE_ISRC);
3221
3222 if (is_genesis(hw)) {
3223 /* clear xmac errors */
3224 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1))
3225 skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT);
3226 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2))
3227 skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT);
3228 } else {
3229 /* Timestamp (unused) overflow */
3230 if (hwstatus & IS_IRQ_TIST_OV)
3231 skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3232 }
3233
3234 if (hwstatus & IS_RAM_RD_PAR) {
3235 dev_err(&pdev->dev, "Ram read data parity error\n");
3236 skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR);
3237 }
3238
3239 if (hwstatus & IS_RAM_WR_PAR) {
3240 dev_err(&pdev->dev, "Ram write data parity error\n");
3241 skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR);
3242 }
3243
3244 if (hwstatus & IS_M1_PAR_ERR)
3245 skge_mac_parity(hw, 0);
3246
3247 if (hwstatus & IS_M2_PAR_ERR)
3248 skge_mac_parity(hw, 1);
3249
3250 if (hwstatus & IS_R1_PAR_ERR) {
3251 dev_err(&pdev->dev, "%s: receive queue parity error\n",
3252 hw->dev[0]->name);
3253 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
3254 }
3255
3256 if (hwstatus & IS_R2_PAR_ERR) {
3257 dev_err(&pdev->dev, "%s: receive queue parity error\n",
3258 hw->dev[1]->name);
3259 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
3260 }
3261
3262 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
3263 u16 pci_status, pci_cmd;
3264
3265 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
3266 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
3267
3268 dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n",
3269 pci_cmd, pci_status);
3270
3271 /* Write the error bits back to clear them. */
3272 pci_status &= PCI_STATUS_ERROR_BITS;
3273 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3274 pci_write_config_word(pdev, PCI_COMMAND,
3275 pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
3276 pci_write_config_word(pdev, PCI_STATUS, pci_status);
3277 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3278
3279 /* if error still set then just ignore it */
3280 hwstatus = skge_read32(hw, B0_HWE_ISRC);
3281 if (hwstatus & IS_IRQ_STAT) {
3282 dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n");
3283 hw->intr_mask &= ~IS_HW_ERR;
3284 }
3285 }
3286}
3287
3288/*
3289 * Interrupt from PHY are handled in tasklet (softirq)
3290 * because accessing phy registers requires spin wait which might
3291 * cause excess interrupt latency.
3292 */
3293static void skge_extirq(unsigned long arg)
3294{
3295 struct skge_hw *hw = (struct skge_hw *) arg;
3296 int port;
3297
3298 for (port = 0; port < hw->ports; port++) {
3299 struct net_device *dev = hw->dev[port];
3300
3301 if (netif_running(dev)) {
3302 struct skge_port *skge = netdev_priv(dev);
3303
3304 spin_lock(&hw->phy_lock);
3305 if (!is_genesis(hw))
3306 yukon_phy_intr(skge);
3307 else if (hw->phy_type == SK_PHY_BCOM)
3308 bcom_phy_intr(skge);
3309 spin_unlock(&hw->phy_lock);
3310 }
3311 }
3312
3313 spin_lock_irq(&hw->hw_lock);
3314 hw->intr_mask |= IS_EXT_REG;
3315 skge_write32(hw, B0_IMSK, hw->intr_mask);
3316 skge_read32(hw, B0_IMSK);
3317 spin_unlock_irq(&hw->hw_lock);
3318}
3319
3320static irqreturn_t skge_intr(int irq, void *dev_id)
3321{
3322 struct skge_hw *hw = dev_id;
3323 u32 status;
3324 int handled = 0;
3325
3326 spin_lock(&hw->hw_lock);
3327 /* Reading this register masks IRQ */
3328 status = skge_read32(hw, B0_SP_ISRC);
3329 if (status == 0 || status == ~0)
3330 goto out;
3331
3332 handled = 1;
3333 status &= hw->intr_mask;
3334 if (status & IS_EXT_REG) {
3335 hw->intr_mask &= ~IS_EXT_REG;
3336 tasklet_schedule(&hw->phy_task);
3337 }
3338
3339 if (status & (IS_XA1_F|IS_R1_F)) {
3340 struct skge_port *skge = netdev_priv(hw->dev[0]);
3341 hw->intr_mask &= ~(IS_XA1_F|IS_R1_F);
3342 napi_schedule(&skge->napi);
3343 }
3344
3345 if (status & IS_PA_TO_TX1)
3346 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
3347
3348 if (status & IS_PA_TO_RX1) {
3349 ++hw->dev[0]->stats.rx_over_errors;
3350 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
3351 }
3352
3353
3354 if (status & IS_MAC1)
3355 skge_mac_intr(hw, 0);
3356
3357 if (hw->dev[1]) {
3358 struct skge_port *skge = netdev_priv(hw->dev[1]);
3359
3360 if (status & (IS_XA2_F|IS_R2_F)) {
3361 hw->intr_mask &= ~(IS_XA2_F|IS_R2_F);
3362 napi_schedule(&skge->napi);
3363 }
3364
3365 if (status & IS_PA_TO_RX2) {
3366 ++hw->dev[1]->stats.rx_over_errors;
3367 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
3368 }
3369
3370 if (status & IS_PA_TO_TX2)
3371 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
3372
3373 if (status & IS_MAC2)
3374 skge_mac_intr(hw, 1);
3375 }
3376
3377 if (status & IS_HW_ERR)
3378 skge_error_irq(hw);
3379
3380 skge_write32(hw, B0_IMSK, hw->intr_mask);
3381 skge_read32(hw, B0_IMSK);
3382out:
3383 spin_unlock(&hw->hw_lock);
3384
3385 return IRQ_RETVAL(handled);
3386}
3387
3388#ifdef CONFIG_NET_POLL_CONTROLLER
3389static void skge_netpoll(struct net_device *dev)
3390{
3391 struct skge_port *skge = netdev_priv(dev);
3392
3393 disable_irq(dev->irq);
3394 skge_intr(dev->irq, skge->hw);
3395 enable_irq(dev->irq);
3396}
3397#endif
3398
3399static int skge_set_mac_address(struct net_device *dev, void *p)
3400{
3401 struct skge_port *skge = netdev_priv(dev);
3402 struct skge_hw *hw = skge->hw;
3403 unsigned port = skge->port;
3404 const struct sockaddr *addr = p;
3405 u16 ctrl;
3406
3407 if (!is_valid_ether_addr(addr->sa_data))
3408 return -EADDRNOTAVAIL;
3409
3410 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
3411
3412 if (!netif_running(dev)) {
3413 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
3414 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN);
3415 } else {
3416 /* disable Rx */
3417 spin_lock_bh(&hw->phy_lock);
3418 ctrl = gma_read16(hw, port, GM_GP_CTRL);
3419 gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA);
3420
3421 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
3422 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN);
3423
3424 if (is_genesis(hw))
3425 xm_outaddr(hw, port, XM_SA, dev->dev_addr);
3426 else {
3427 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
3428 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
3429 }
3430
3431 gma_write16(hw, port, GM_GP_CTRL, ctrl);
3432 spin_unlock_bh(&hw->phy_lock);
3433 }
3434
3435 return 0;
3436}
3437
3438static const struct {
3439 u8 id;
3440 const char *name;
3441} skge_chips[] = {
3442 { CHIP_ID_GENESIS, "Genesis" },
3443 { CHIP_ID_YUKON, "Yukon" },
3444 { CHIP_ID_YUKON_LITE, "Yukon-Lite"},
3445 { CHIP_ID_YUKON_LP, "Yukon-LP"},
3446};
3447
3448static const char *skge_board_name(const struct skge_hw *hw)
3449{
3450 int i;
3451 static char buf[16];
3452
3453 for (i = 0; i < ARRAY_SIZE(skge_chips); i++)
3454 if (skge_chips[i].id == hw->chip_id)
3455 return skge_chips[i].name;
3456
3457 snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id);
3458 return buf;
3459}
3460
3461
3462/*
3463 * Setup the board data structure, but don't bring up
3464 * the port(s)
3465 */
3466static int skge_reset(struct skge_hw *hw)
3467{
3468 u32 reg;
3469 u16 ctst, pci_status;
3470 u8 t8, mac_cfg, pmd_type;
3471 int i;
3472
3473 ctst = skge_read16(hw, B0_CTST);
3474
3475 /* do a SW reset */
3476 skge_write8(hw, B0_CTST, CS_RST_SET);
3477 skge_write8(hw, B0_CTST, CS_RST_CLR);
3478
3479 /* clear PCI errors, if any */
3480 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3481 skge_write8(hw, B2_TST_CTRL2, 0);
3482
3483 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
3484 pci_write_config_word(hw->pdev, PCI_STATUS,
3485 pci_status | PCI_STATUS_ERROR_BITS);
3486 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3487 skge_write8(hw, B0_CTST, CS_MRST_CLR);
3488
3489 /* restore CLK_RUN bits (for Yukon-Lite) */
3490 skge_write16(hw, B0_CTST,
3491 ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA));
3492
3493 hw->chip_id = skge_read8(hw, B2_CHIP_ID);
3494 hw->phy_type = skge_read8(hw, B2_E_1) & 0xf;
3495 pmd_type = skge_read8(hw, B2_PMD_TYP);
3496 hw->copper = (pmd_type == 'T' || pmd_type == '1');
3497
3498 switch (hw->chip_id) {
3499 case CHIP_ID_GENESIS:
3500#ifdef CONFIG_SKGE_GENESIS
3501 switch (hw->phy_type) {
3502 case SK_PHY_XMAC:
3503 hw->phy_addr = PHY_ADDR_XMAC;
3504 break;
3505 case SK_PHY_BCOM:
3506 hw->phy_addr = PHY_ADDR_BCOM;
3507 break;
3508 default:
3509 dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n",
3510 hw->phy_type);
3511 return -EOPNOTSUPP;
3512 }
3513 break;
3514#else
3515 dev_err(&hw->pdev->dev, "Genesis chip detected but not configured\n");
3516 return -EOPNOTSUPP;
3517#endif
3518
3519 case CHIP_ID_YUKON:
3520 case CHIP_ID_YUKON_LITE:
3521 case CHIP_ID_YUKON_LP:
3522 if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S')
3523 hw->copper = 1;
3524
3525 hw->phy_addr = PHY_ADDR_MARV;
3526 break;
3527
3528 default:
3529 dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
3530 hw->chip_id);
3531 return -EOPNOTSUPP;
3532 }
3533
3534 mac_cfg = skge_read8(hw, B2_MAC_CFG);
3535 hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2;
3536 hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4;
3537
3538 /* read the adapters RAM size */
3539 t8 = skge_read8(hw, B2_E_0);
3540 if (is_genesis(hw)) {
3541 if (t8 == 3) {
3542 /* special case: 4 x 64k x 36, offset = 0x80000 */
3543 hw->ram_size = 0x100000;
3544 hw->ram_offset = 0x80000;
3545 } else
3546 hw->ram_size = t8 * 512;
3547 } else if (t8 == 0)
3548 hw->ram_size = 0x20000;
3549 else
3550 hw->ram_size = t8 * 4096;
3551
3552 hw->intr_mask = IS_HW_ERR;
3553
3554 /* Use PHY IRQ for all but fiber based Genesis board */
3555 if (!(is_genesis(hw) && hw->phy_type == SK_PHY_XMAC))
3556 hw->intr_mask |= IS_EXT_REG;
3557
3558 if (is_genesis(hw))
3559 genesis_init(hw);
3560 else {
3561 /* switch power to VCC (WA for VAUX problem) */
3562 skge_write8(hw, B0_POWER_CTRL,
3563 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
3564
3565 /* avoid boards with stuck Hardware error bits */
3566 if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
3567 (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
3568 dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n");
3569 hw->intr_mask &= ~IS_HW_ERR;
3570 }
3571
3572 /* Clear PHY COMA */
3573 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3574 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg);
3575 reg &= ~PCI_PHY_COMA;
3576 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg);
3577 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3578
3579
3580 for (i = 0; i < hw->ports; i++) {
3581 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
3582 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
3583 }
3584 }
3585
3586 /* turn off hardware timer (unused) */
3587 skge_write8(hw, B2_TI_CTRL, TIM_STOP);
3588 skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
3589 skge_write8(hw, B0_LED, LED_STAT_ON);
3590
3591 /* enable the Tx Arbiters */
3592 for (i = 0; i < hw->ports; i++)
3593 skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
3594
3595 /* Initialize ram interface */
3596 skge_write16(hw, B3_RI_CTRL, RI_RST_CLR);
3597
3598 skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53);
3599 skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53);
3600 skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53);
3601 skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53);
3602 skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53);
3603 skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53);
3604 skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53);
3605 skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53);
3606 skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53);
3607 skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53);
3608 skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53);
3609 skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53);
3610
3611 skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK);
3612
3613 /* Set interrupt moderation for Transmit only
3614 * Receive interrupts avoided by NAPI
3615 */
3616 skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F);
3617 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
3618 skge_write32(hw, B2_IRQM_CTRL, TIM_START);
3619
3620 /* Leave irq disabled until first port is brought up. */
3621 skge_write32(hw, B0_IMSK, 0);
3622
3623 for (i = 0; i < hw->ports; i++) {
3624 if (is_genesis(hw))
3625 genesis_reset(hw, i);
3626 else
3627 yukon_reset(hw, i);
3628 }
3629
3630 return 0;
3631}
3632
3633
3634#ifdef CONFIG_SKGE_DEBUG
3635
3636static struct dentry *skge_debug;
3637
3638static int skge_debug_show(struct seq_file *seq, void *v)
3639{
3640 struct net_device *dev = seq->private;
3641 const struct skge_port *skge = netdev_priv(dev);
3642 const struct skge_hw *hw = skge->hw;
3643 const struct skge_element *e;
3644
3645 if (!netif_running(dev))
3646 return -ENETDOWN;
3647
3648 seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC),
3649 skge_read32(hw, B0_IMSK));
3650
3651 seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring));
3652 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
3653 const struct skge_tx_desc *t = e->desc;
3654 seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n",
3655 t->control, t->dma_hi, t->dma_lo, t->status,
3656 t->csum_offs, t->csum_write, t->csum_start);
3657 }
3658
3659 seq_printf(seq, "\nRx Ring:\n");
3660 for (e = skge->rx_ring.to_clean; ; e = e->next) {
3661 const struct skge_rx_desc *r = e->desc;
3662
3663 if (r->control & BMU_OWN)
3664 break;
3665
3666 seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n",
3667 r->control, r->dma_hi, r->dma_lo, r->status,
3668 r->timestamp, r->csum1, r->csum1_start);
3669 }
3670
3671 return 0;
3672}
3673
3674static int skge_debug_open(struct inode *inode, struct file *file)
3675{
3676 return single_open(file, skge_debug_show, inode->i_private);
3677}
3678
3679static const struct file_operations skge_debug_fops = {
3680 .owner = THIS_MODULE,
3681 .open = skge_debug_open,
3682 .read = seq_read,
3683 .llseek = seq_lseek,
3684 .release = single_release,
3685};
3686
3687/*
3688 * Use network device events to create/remove/rename
3689 * debugfs file entries
3690 */
3691static int skge_device_event(struct notifier_block *unused,
3692 unsigned long event, void *ptr)
3693{
3694 struct net_device *dev = ptr;
3695 struct skge_port *skge;
3696 struct dentry *d;
3697
3698 if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug)
3699 goto done;
3700
3701 skge = netdev_priv(dev);
3702 switch (event) {
3703 case NETDEV_CHANGENAME:
3704 if (skge->debugfs) {
3705 d = debugfs_rename(skge_debug, skge->debugfs,
3706 skge_debug, dev->name);
3707 if (d)
3708 skge->debugfs = d;
3709 else {
3710 netdev_info(dev, "rename failed\n");
3711 debugfs_remove(skge->debugfs);
3712 }
3713 }
3714 break;
3715
3716 case NETDEV_GOING_DOWN:
3717 if (skge->debugfs) {
3718 debugfs_remove(skge->debugfs);
3719 skge->debugfs = NULL;
3720 }
3721 break;
3722
3723 case NETDEV_UP:
3724 d = debugfs_create_file(dev->name, S_IRUGO,
3725 skge_debug, dev,
3726 &skge_debug_fops);
3727 if (!d || IS_ERR(d))
3728 netdev_info(dev, "debugfs create failed\n");
3729 else
3730 skge->debugfs = d;
3731 break;
3732 }
3733
3734done:
3735 return NOTIFY_DONE;
3736}
3737
3738static struct notifier_block skge_notifier = {
3739 .notifier_call = skge_device_event,
3740};
3741
3742
3743static __init void skge_debug_init(void)
3744{
3745 struct dentry *ent;
3746
3747 ent = debugfs_create_dir("skge", NULL);
3748 if (!ent || IS_ERR(ent)) {
3749 pr_info("debugfs create directory failed\n");
3750 return;
3751 }
3752
3753 skge_debug = ent;
3754 register_netdevice_notifier(&skge_notifier);
3755}
3756
3757static __exit void skge_debug_cleanup(void)
3758{
3759 if (skge_debug) {
3760 unregister_netdevice_notifier(&skge_notifier);
3761 debugfs_remove(skge_debug);
3762 skge_debug = NULL;
3763 }
3764}
3765
3766#else
3767#define skge_debug_init()
3768#define skge_debug_cleanup()
3769#endif
3770
3771static const struct net_device_ops skge_netdev_ops = {
3772 .ndo_open = skge_up,
3773 .ndo_stop = skge_down,
3774 .ndo_start_xmit = skge_xmit_frame,
3775 .ndo_do_ioctl = skge_ioctl,
3776 .ndo_get_stats = skge_get_stats,
3777 .ndo_tx_timeout = skge_tx_timeout,
3778 .ndo_change_mtu = skge_change_mtu,
3779 .ndo_validate_addr = eth_validate_addr,
3780 .ndo_set_rx_mode = skge_set_multicast,
3781 .ndo_set_mac_address = skge_set_mac_address,
3782#ifdef CONFIG_NET_POLL_CONTROLLER
3783 .ndo_poll_controller = skge_netpoll,
3784#endif
3785};
3786
3787
3788/* Initialize network device */
3789static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3790 int highmem)
3791{
3792 struct skge_port *skge;
3793 struct net_device *dev = alloc_etherdev(sizeof(*skge));
3794
3795 if (!dev) {
3796 dev_err(&hw->pdev->dev, "etherdev alloc failed\n");
3797 return NULL;
3798 }
3799
3800 SET_NETDEV_DEV(dev, &hw->pdev->dev);
3801 dev->netdev_ops = &skge_netdev_ops;
3802 dev->ethtool_ops = &skge_ethtool_ops;
3803 dev->watchdog_timeo = TX_WATCHDOG;
3804 dev->irq = hw->pdev->irq;
3805
3806 if (highmem)
3807 dev->features |= NETIF_F_HIGHDMA;
3808
3809 skge = netdev_priv(dev);
3810 netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT);
3811 skge->netdev = dev;
3812 skge->hw = hw;
3813 skge->msg_enable = netif_msg_init(debug, default_msg);
3814
3815 skge->tx_ring.count = DEFAULT_TX_RING_SIZE;
3816 skge->rx_ring.count = DEFAULT_RX_RING_SIZE;
3817
3818 /* Auto speed and flow control */
3819 skge->autoneg = AUTONEG_ENABLE;
3820 skge->flow_control = FLOW_MODE_SYM_OR_REM;
3821 skge->duplex = -1;
3822 skge->speed = -1;
3823 skge->advertising = skge_supported_modes(hw);
3824
3825 if (device_can_wakeup(&hw->pdev->dev)) {
3826 skge->wol = wol_supported(hw) & WAKE_MAGIC;
3827 device_set_wakeup_enable(&hw->pdev->dev, skge->wol);
3828 }
3829
3830 hw->dev[port] = dev;
3831
3832 skge->port = port;
3833
3834 /* Only used for Genesis XMAC */
3835 if (is_genesis(hw))
3836 setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge);
3837 else {
3838 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
3839 NETIF_F_RXCSUM;
3840 dev->features |= dev->hw_features;
3841 }
3842
3843 /* read the mac address */
3844 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
3845 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
3846
3847 return dev;
3848}
3849
3850static void __devinit skge_show_addr(struct net_device *dev)
3851{
3852 const struct skge_port *skge = netdev_priv(dev);
3853
3854 netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr);
3855}
3856
3857static int only_32bit_dma;
3858
3859static int __devinit skge_probe(struct pci_dev *pdev,
3860 const struct pci_device_id *ent)
3861{
3862 struct net_device *dev, *dev1;
3863 struct skge_hw *hw;
3864 int err, using_dac = 0;
3865
3866 err = pci_enable_device(pdev);
3867 if (err) {
3868 dev_err(&pdev->dev, "cannot enable PCI device\n");
3869 goto err_out;
3870 }
3871
3872 err = pci_request_regions(pdev, DRV_NAME);
3873 if (err) {
3874 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
3875 goto err_out_disable_pdev;
3876 }
3877
3878 pci_set_master(pdev);
3879
3880 if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3881 using_dac = 1;
3882 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3883 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
3884 using_dac = 0;
3885 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3886 }
3887
3888 if (err) {
3889 dev_err(&pdev->dev, "no usable DMA configuration\n");
3890 goto err_out_free_regions;
3891 }
3892
3893#ifdef __BIG_ENDIAN
3894 /* byte swap descriptors in hardware */
3895 {
3896 u32 reg;
3897
3898 pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
3899 reg |= PCI_REV_DESC;
3900 pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
3901 }
3902#endif
3903
3904 err = -ENOMEM;
3905 /* space for skge@pci:0000:04:00.0 */
3906 hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
3907 + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
3908 if (!hw) {
3909 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
3910 goto err_out_free_regions;
3911 }
3912 sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
3913
3914 hw->pdev = pdev;
3915 spin_lock_init(&hw->hw_lock);
3916 spin_lock_init(&hw->phy_lock);
3917 tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw);
3918
3919 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3920 if (!hw->regs) {
3921 dev_err(&pdev->dev, "cannot map device registers\n");
3922 goto err_out_free_hw;
3923 }
3924
3925 err = skge_reset(hw);
3926 if (err)
3927 goto err_out_iounmap;
3928
3929 pr_info("%s addr 0x%llx irq %d chip %s rev %d\n",
3930 DRV_VERSION,
3931 (unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
3932 skge_board_name(hw), hw->chip_rev);
3933
3934 dev = skge_devinit(hw, 0, using_dac);
3935 if (!dev)
3936 goto err_out_led_off;
3937
3938 /* Some motherboards are broken and has zero in ROM. */
3939 if (!is_valid_ether_addr(dev->dev_addr))
3940 dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n");
3941
3942 err = register_netdev(dev);
3943 if (err) {
3944 dev_err(&pdev->dev, "cannot register net device\n");
3945 goto err_out_free_netdev;
3946 }
3947
3948 skge_show_addr(dev);
3949
3950 if (hw->ports > 1) {
3951 dev1 = skge_devinit(hw, 1, using_dac);
3952 if (!dev1) {
3953 err = -ENOMEM;
3954 goto err_out_unregister;
3955 }
3956
3957 err = register_netdev(dev1);
3958 if (err) {
3959 dev_err(&pdev->dev, "cannot register second net device\n");
3960 goto err_out_free_dev1;
3961 }
3962
3963 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED,
3964 hw->irq_name, hw);
3965 if (err) {
3966 dev_err(&pdev->dev, "cannot assign irq %d\n",
3967 pdev->irq);
3968 goto err_out_unregister_dev1;
3969 }
3970
3971 skge_show_addr(dev1);
3972 }
3973 pci_set_drvdata(pdev, hw);
3974
3975 return 0;
3976
3977err_out_unregister_dev1:
3978 unregister_netdev(dev1);
3979err_out_free_dev1:
3980 free_netdev(dev1);
3981err_out_unregister:
3982 unregister_netdev(dev);
3983err_out_free_netdev:
3984 free_netdev(dev);
3985err_out_led_off:
3986 skge_write16(hw, B0_LED, LED_STAT_OFF);
3987err_out_iounmap:
3988 iounmap(hw->regs);
3989err_out_free_hw:
3990 kfree(hw);
3991err_out_free_regions:
3992 pci_release_regions(pdev);
3993err_out_disable_pdev:
3994 pci_disable_device(pdev);
3995 pci_set_drvdata(pdev, NULL);
3996err_out:
3997 return err;
3998}
3999
4000static void __devexit skge_remove(struct pci_dev *pdev)
4001{
4002 struct skge_hw *hw = pci_get_drvdata(pdev);
4003 struct net_device *dev0, *dev1;
4004
4005 if (!hw)
4006 return;
4007
4008 dev1 = hw->dev[1];
4009 if (dev1)
4010 unregister_netdev(dev1);
4011 dev0 = hw->dev[0];
4012 unregister_netdev(dev0);
4013
4014 tasklet_disable(&hw->phy_task);
4015
4016 spin_lock_irq(&hw->hw_lock);
4017 hw->intr_mask = 0;
4018
4019 if (hw->ports > 1) {
4020 skge_write32(hw, B0_IMSK, 0);
4021 skge_read32(hw, B0_IMSK);
4022 free_irq(pdev->irq, hw);
4023 }
4024 spin_unlock_irq(&hw->hw_lock);
4025
4026 skge_write16(hw, B0_LED, LED_STAT_OFF);
4027 skge_write8(hw, B0_CTST, CS_RST_SET);
4028
4029 if (hw->ports > 1)
4030 free_irq(pdev->irq, hw);
4031 pci_release_regions(pdev);
4032 pci_disable_device(pdev);
4033 if (dev1)
4034 free_netdev(dev1);
4035 free_netdev(dev0);
4036
4037 iounmap(hw->regs);
4038 kfree(hw);
4039 pci_set_drvdata(pdev, NULL);
4040}
4041
4042#ifdef CONFIG_PM
4043static int skge_suspend(struct device *dev)
4044{
4045 struct pci_dev *pdev = to_pci_dev(dev);
4046 struct skge_hw *hw = pci_get_drvdata(pdev);
4047 int i;
4048
4049 if (!hw)
4050 return 0;
4051
4052 for (i = 0; i < hw->ports; i++) {
4053 struct net_device *dev = hw->dev[i];
4054 struct skge_port *skge = netdev_priv(dev);
4055
4056 if (netif_running(dev))
4057 skge_down(dev);
4058
4059 if (skge->wol)
4060 skge_wol_init(skge);
4061 }
4062
4063 skge_write32(hw, B0_IMSK, 0);
4064
4065 return 0;
4066}
4067
4068static int skge_resume(struct device *dev)
4069{
4070 struct pci_dev *pdev = to_pci_dev(dev);
4071 struct skge_hw *hw = pci_get_drvdata(pdev);
4072 int i, err;
4073
4074 if (!hw)
4075 return 0;
4076
4077 err = skge_reset(hw);
4078 if (err)
4079 goto out;
4080
4081 for (i = 0; i < hw->ports; i++) {
4082 struct net_device *dev = hw->dev[i];
4083
4084 if (netif_running(dev)) {
4085 err = skge_up(dev);
4086
4087 if (err) {
4088 netdev_err(dev, "could not up: %d\n", err);
4089 dev_close(dev);
4090 goto out;
4091 }
4092 }
4093 }
4094out:
4095 return err;
4096}
4097
4098static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume);
4099#define SKGE_PM_OPS (&skge_pm_ops)
4100
4101#else
4102
4103#define SKGE_PM_OPS NULL
4104#endif
4105
4106static void skge_shutdown(struct pci_dev *pdev)
4107{
4108 struct skge_hw *hw = pci_get_drvdata(pdev);
4109 int i;
4110
4111 if (!hw)
4112 return;
4113
4114 for (i = 0; i < hw->ports; i++) {
4115 struct net_device *dev = hw->dev[i];
4116 struct skge_port *skge = netdev_priv(dev);
4117
4118 if (skge->wol)
4119 skge_wol_init(skge);
4120 }
4121
4122 pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
4123 pci_set_power_state(pdev, PCI_D3hot);
4124}
4125
4126static struct pci_driver skge_driver = {
4127 .name = DRV_NAME,
4128 .id_table = skge_id_table,
4129 .probe = skge_probe,
4130 .remove = __devexit_p(skge_remove),
4131 .shutdown = skge_shutdown,
4132 .driver.pm = SKGE_PM_OPS,
4133};
4134
4135static struct dmi_system_id skge_32bit_dma_boards[] = {
4136 {
4137 .ident = "Gigabyte nForce boards",
4138 .matches = {
4139 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"),
4140 DMI_MATCH(DMI_BOARD_NAME, "nForce"),
4141 },
4142 },
4143 {}
4144};
4145
4146static int __init skge_init_module(void)
4147{
4148 if (dmi_check_system(skge_32bit_dma_boards))
4149 only_32bit_dma = 1;
4150 skge_debug_init();
4151 return pci_register_driver(&skge_driver);
4152}
4153
4154static void __exit skge_cleanup_module(void)
4155{
4156 pci_unregister_driver(&skge_driver);
4157 skge_debug_cleanup();
4158}
4159
4160module_init(skge_init_module);
4161module_exit(skge_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/skge.h b/drivers/net/ethernet/marvell/skge.h
new file mode 100644
index 000000000000..a2eb34115844
--- /dev/null
+++ b/drivers/net/ethernet/marvell/skge.h
@@ -0,0 +1,2584 @@
1/*
2 * Definitions for the new Marvell Yukon / SysKonnect driver.
3 */
4#ifndef _SKGE_H
5#define _SKGE_H
6#include <linux/interrupt.h>
7
8/* PCI config registers */
9#define PCI_DEV_REG1 0x40
10#define PCI_PHY_COMA 0x8000000
11#define PCI_VIO 0x2000000
12
13#define PCI_DEV_REG2 0x44
14#define PCI_VPD_ROM_SZ 7L<<14 /* VPD ROM size 0=256, 1=512, ... */
15#define PCI_REV_DESC 1<<2 /* Reverse Descriptor bytes */
16
17#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
18 PCI_STATUS_SIG_SYSTEM_ERROR | \
19 PCI_STATUS_REC_MASTER_ABORT | \
20 PCI_STATUS_REC_TARGET_ABORT | \
21 PCI_STATUS_PARITY)
22
23enum csr_regs {
24 B0_RAP = 0x0000,
25 B0_CTST = 0x0004,
26 B0_LED = 0x0006,
27 B0_POWER_CTRL = 0x0007,
28 B0_ISRC = 0x0008,
29 B0_IMSK = 0x000c,
30 B0_HWE_ISRC = 0x0010,
31 B0_HWE_IMSK = 0x0014,
32 B0_SP_ISRC = 0x0018,
33 B0_XM1_IMSK = 0x0020,
34 B0_XM1_ISRC = 0x0028,
35 B0_XM1_PHY_ADDR = 0x0030,
36 B0_XM1_PHY_DATA = 0x0034,
37 B0_XM2_IMSK = 0x0040,
38 B0_XM2_ISRC = 0x0048,
39 B0_XM2_PHY_ADDR = 0x0050,
40 B0_XM2_PHY_DATA = 0x0054,
41 B0_R1_CSR = 0x0060,
42 B0_R2_CSR = 0x0064,
43 B0_XS1_CSR = 0x0068,
44 B0_XA1_CSR = 0x006c,
45 B0_XS2_CSR = 0x0070,
46 B0_XA2_CSR = 0x0074,
47
48 B2_MAC_1 = 0x0100,
49 B2_MAC_2 = 0x0108,
50 B2_MAC_3 = 0x0110,
51 B2_CONN_TYP = 0x0118,
52 B2_PMD_TYP = 0x0119,
53 B2_MAC_CFG = 0x011a,
54 B2_CHIP_ID = 0x011b,
55 B2_E_0 = 0x011c,
56 B2_E_1 = 0x011d,
57 B2_E_2 = 0x011e,
58 B2_E_3 = 0x011f,
59 B2_FAR = 0x0120,
60 B2_FDP = 0x0124,
61 B2_LD_CTRL = 0x0128,
62 B2_LD_TEST = 0x0129,
63 B2_TI_INI = 0x0130,
64 B2_TI_VAL = 0x0134,
65 B2_TI_CTRL = 0x0138,
66 B2_TI_TEST = 0x0139,
67 B2_IRQM_INI = 0x0140,
68 B2_IRQM_VAL = 0x0144,
69 B2_IRQM_CTRL = 0x0148,
70 B2_IRQM_TEST = 0x0149,
71 B2_IRQM_MSK = 0x014c,
72 B2_IRQM_HWE_MSK = 0x0150,
73 B2_TST_CTRL1 = 0x0158,
74 B2_TST_CTRL2 = 0x0159,
75 B2_GP_IO = 0x015c,
76 B2_I2C_CTRL = 0x0160,
77 B2_I2C_DATA = 0x0164,
78 B2_I2C_IRQ = 0x0168,
79 B2_I2C_SW = 0x016c,
80 B2_BSC_INI = 0x0170,
81 B2_BSC_VAL = 0x0174,
82 B2_BSC_CTRL = 0x0178,
83 B2_BSC_STAT = 0x0179,
84 B2_BSC_TST = 0x017a,
85
86 B3_RAM_ADDR = 0x0180,
87 B3_RAM_DATA_LO = 0x0184,
88 B3_RAM_DATA_HI = 0x0188,
89 B3_RI_WTO_R1 = 0x0190,
90 B3_RI_WTO_XA1 = 0x0191,
91 B3_RI_WTO_XS1 = 0x0192,
92 B3_RI_RTO_R1 = 0x0193,
93 B3_RI_RTO_XA1 = 0x0194,
94 B3_RI_RTO_XS1 = 0x0195,
95 B3_RI_WTO_R2 = 0x0196,
96 B3_RI_WTO_XA2 = 0x0197,
97 B3_RI_WTO_XS2 = 0x0198,
98 B3_RI_RTO_R2 = 0x0199,
99 B3_RI_RTO_XA2 = 0x019a,
100 B3_RI_RTO_XS2 = 0x019b,
101 B3_RI_TO_VAL = 0x019c,
102 B3_RI_CTRL = 0x01a0,
103 B3_RI_TEST = 0x01a2,
104 B3_MA_TOINI_RX1 = 0x01b0,
105 B3_MA_TOINI_RX2 = 0x01b1,
106 B3_MA_TOINI_TX1 = 0x01b2,
107 B3_MA_TOINI_TX2 = 0x01b3,
108 B3_MA_TOVAL_RX1 = 0x01b4,
109 B3_MA_TOVAL_RX2 = 0x01b5,
110 B3_MA_TOVAL_TX1 = 0x01b6,
111 B3_MA_TOVAL_TX2 = 0x01b7,
112 B3_MA_TO_CTRL = 0x01b8,
113 B3_MA_TO_TEST = 0x01ba,
114 B3_MA_RCINI_RX1 = 0x01c0,
115 B3_MA_RCINI_RX2 = 0x01c1,
116 B3_MA_RCINI_TX1 = 0x01c2,
117 B3_MA_RCINI_TX2 = 0x01c3,
118 B3_MA_RCVAL_RX1 = 0x01c4,
119 B3_MA_RCVAL_RX2 = 0x01c5,
120 B3_MA_RCVAL_TX1 = 0x01c6,
121 B3_MA_RCVAL_TX2 = 0x01c7,
122 B3_MA_RC_CTRL = 0x01c8,
123 B3_MA_RC_TEST = 0x01ca,
124 B3_PA_TOINI_RX1 = 0x01d0,
125 B3_PA_TOINI_RX2 = 0x01d4,
126 B3_PA_TOINI_TX1 = 0x01d8,
127 B3_PA_TOINI_TX2 = 0x01dc,
128 B3_PA_TOVAL_RX1 = 0x01e0,
129 B3_PA_TOVAL_RX2 = 0x01e4,
130 B3_PA_TOVAL_TX1 = 0x01e8,
131 B3_PA_TOVAL_TX2 = 0x01ec,
132 B3_PA_CTRL = 0x01f0,
133 B3_PA_TEST = 0x01f2,
134};
135
136/* B0_CTST 16 bit Control/Status register */
137enum {
138 CS_CLK_RUN_HOT = 1<<13,/* CLK_RUN hot m. (YUKON-Lite only) */
139 CS_CLK_RUN_RST = 1<<12,/* CLK_RUN reset (YUKON-Lite only) */
140 CS_CLK_RUN_ENA = 1<<11,/* CLK_RUN enable (YUKON-Lite only) */
141 CS_VAUX_AVAIL = 1<<10,/* VAUX available (YUKON only) */
142 CS_BUS_CLOCK = 1<<9, /* Bus Clock 0/1 = 33/66 MHz */
143 CS_BUS_SLOT_SZ = 1<<8, /* Slot Size 0/1 = 32/64 bit slot */
144 CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */
145 CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */
146 CS_STOP_DONE = 1<<5, /* Stop Master is finished */
147 CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */
148 CS_MRST_CLR = 1<<3, /* Clear Master reset */
149 CS_MRST_SET = 1<<2, /* Set Master reset */
150 CS_RST_CLR = 1<<1, /* Clear Software reset */
151 CS_RST_SET = 1, /* Set Software reset */
152
153/* B0_LED 8 Bit LED register */
154/* Bit 7.. 2: reserved */
155 LED_STAT_ON = 1<<1, /* Status LED on */
156 LED_STAT_OFF = 1, /* Status LED off */
157
158/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */
159 PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */
160 PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */
161 PC_VCC_ENA = 1<<5, /* Switch VCC Enable */
162 PC_VCC_DIS = 1<<4, /* Switch VCC Disable */
163 PC_VAUX_ON = 1<<3, /* Switch VAUX On */
164 PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */
165 PC_VCC_ON = 1<<1, /* Switch VCC On */
166 PC_VCC_OFF = 1<<0, /* Switch VCC Off */
167};
168
169/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */
170enum {
171 IS_ALL_MSK = 0xbffffffful, /* All Interrupt bits */
172 IS_HW_ERR = 1<<31, /* Interrupt HW Error */
173 /* Bit 30: reserved */
174 IS_PA_TO_RX1 = 1<<29, /* Packet Arb Timeout Rx1 */
175 IS_PA_TO_RX2 = 1<<28, /* Packet Arb Timeout Rx2 */
176 IS_PA_TO_TX1 = 1<<27, /* Packet Arb Timeout Tx1 */
177 IS_PA_TO_TX2 = 1<<26, /* Packet Arb Timeout Tx2 */
178 IS_I2C_READY = 1<<25, /* IRQ on end of I2C Tx */
179 IS_IRQ_SW = 1<<24, /* SW forced IRQ */
180 IS_EXT_REG = 1<<23, /* IRQ from LM80 or PHY (GENESIS only) */
181 /* IRQ from PHY (YUKON only) */
182 IS_TIMINT = 1<<22, /* IRQ from Timer */
183 IS_MAC1 = 1<<21, /* IRQ from MAC 1 */
184 IS_LNK_SYNC_M1 = 1<<20, /* Link Sync Cnt wrap MAC 1 */
185 IS_MAC2 = 1<<19, /* IRQ from MAC 2 */
186 IS_LNK_SYNC_M2 = 1<<18, /* Link Sync Cnt wrap MAC 2 */
187/* Receive Queue 1 */
188 IS_R1_B = 1<<17, /* Q_R1 End of Buffer */
189 IS_R1_F = 1<<16, /* Q_R1 End of Frame */
190 IS_R1_C = 1<<15, /* Q_R1 Encoding Error */
191/* Receive Queue 2 */
192 IS_R2_B = 1<<14, /* Q_R2 End of Buffer */
193 IS_R2_F = 1<<13, /* Q_R2 End of Frame */
194 IS_R2_C = 1<<12, /* Q_R2 Encoding Error */
195/* Synchronous Transmit Queue 1 */
196 IS_XS1_B = 1<<11, /* Q_XS1 End of Buffer */
197 IS_XS1_F = 1<<10, /* Q_XS1 End of Frame */
198 IS_XS1_C = 1<<9, /* Q_XS1 Encoding Error */
199/* Asynchronous Transmit Queue 1 */
200 IS_XA1_B = 1<<8, /* Q_XA1 End of Buffer */
201 IS_XA1_F = 1<<7, /* Q_XA1 End of Frame */
202 IS_XA1_C = 1<<6, /* Q_XA1 Encoding Error */
203/* Synchronous Transmit Queue 2 */
204 IS_XS2_B = 1<<5, /* Q_XS2 End of Buffer */
205 IS_XS2_F = 1<<4, /* Q_XS2 End of Frame */
206 IS_XS2_C = 1<<3, /* Q_XS2 Encoding Error */
207/* Asynchronous Transmit Queue 2 */
208 IS_XA2_B = 1<<2, /* Q_XA2 End of Buffer */
209 IS_XA2_F = 1<<1, /* Q_XA2 End of Frame */
210 IS_XA2_C = 1<<0, /* Q_XA2 Encoding Error */
211
212 IS_TO_PORT1 = IS_PA_TO_RX1 | IS_PA_TO_TX1,
213 IS_TO_PORT2 = IS_PA_TO_RX2 | IS_PA_TO_TX2,
214
215 IS_PORT_1 = IS_XA1_F| IS_R1_F | IS_TO_PORT1 | IS_MAC1,
216 IS_PORT_2 = IS_XA2_F| IS_R2_F | IS_TO_PORT2 | IS_MAC2,
217};
218
219
220/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
221enum {
222 IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
223 IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */
224 IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */
225 IS_IRQ_STAT = 1<<10, /* IRQ status exception */
226 IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */
227 IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */
228 IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */
229 IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */
230 IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */
231 IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */
232 IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */
233 IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */
234 IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */
235 IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */
236
237 IS_ERR_MSK = IS_IRQ_MST_ERR | IS_IRQ_STAT
238 | IS_RAM_RD_PAR | IS_RAM_WR_PAR
239 | IS_M1_PAR_ERR | IS_M2_PAR_ERR
240 | IS_R1_PAR_ERR | IS_R2_PAR_ERR,
241};
242
243/* B2_TST_CTRL1 8 bit Test Control Register 1 */
244enum {
245 TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */
246 TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */
247 TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */
248 TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */
249 TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */
250 TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */
251 TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */
252 TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */
253};
254
255/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */
256enum {
257 CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */
258 /* Bit 3.. 2: reserved */
259 CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */
260 CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/
261};
262
263/* B2_CHIP_ID 8 bit Chip Identification Number */
264enum {
265 CHIP_ID_GENESIS = 0x0a, /* Chip ID for GENESIS */
266 CHIP_ID_YUKON = 0xb0, /* Chip ID for YUKON */
267 CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */
268 CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */
269 CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */
270 CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */
271 CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */
272
273 CHIP_REV_YU_LITE_A1 = 3, /* Chip Rev. for YUKON-Lite A1,A2 */
274 CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */
275};
276
277/* B2_TI_CTRL 8 bit Timer control */
278/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */
279enum {
280 TIM_START = 1<<2, /* Start Timer */
281 TIM_STOP = 1<<1, /* Stop Timer */
282 TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */
283};
284
285/* B2_TI_TEST 8 Bit Timer Test */
286/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */
287/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */
288enum {
289 TIM_T_ON = 1<<2, /* Test mode on */
290 TIM_T_OFF = 1<<1, /* Test mode off */
291 TIM_T_STEP = 1<<0, /* Test step */
292};
293
294/* B2_GP_IO 32 bit General Purpose I/O Register */
295enum {
296 GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */
297 GP_DIR_8 = 1<<24, /* IO_8 direct, 0=In/1=Out */
298 GP_DIR_7 = 1<<23, /* IO_7 direct, 0=In/1=Out */
299 GP_DIR_6 = 1<<22, /* IO_6 direct, 0=In/1=Out */
300 GP_DIR_5 = 1<<21, /* IO_5 direct, 0=In/1=Out */
301 GP_DIR_4 = 1<<20, /* IO_4 direct, 0=In/1=Out */
302 GP_DIR_3 = 1<<19, /* IO_3 direct, 0=In/1=Out */
303 GP_DIR_2 = 1<<18, /* IO_2 direct, 0=In/1=Out */
304 GP_DIR_1 = 1<<17, /* IO_1 direct, 0=In/1=Out */
305 GP_DIR_0 = 1<<16, /* IO_0 direct, 0=In/1=Out */
306
307 GP_IO_9 = 1<<9, /* IO_9 pin */
308 GP_IO_8 = 1<<8, /* IO_8 pin */
309 GP_IO_7 = 1<<7, /* IO_7 pin */
310 GP_IO_6 = 1<<6, /* IO_6 pin */
311 GP_IO_5 = 1<<5, /* IO_5 pin */
312 GP_IO_4 = 1<<4, /* IO_4 pin */
313 GP_IO_3 = 1<<3, /* IO_3 pin */
314 GP_IO_2 = 1<<2, /* IO_2 pin */
315 GP_IO_1 = 1<<1, /* IO_1 pin */
316 GP_IO_0 = 1<<0, /* IO_0 pin */
317};
318
319/* Descriptor Bit Definition */
320/* TxCtrl Transmit Buffer Control Field */
321/* RxCtrl Receive Buffer Control Field */
322enum {
323 BMU_OWN = 1<<31, /* OWN bit: 0=host/1=BMU */
324 BMU_STF = 1<<30, /* Start of Frame */
325 BMU_EOF = 1<<29, /* End of Frame */
326 BMU_IRQ_EOB = 1<<28, /* Req "End of Buffer" IRQ */
327 BMU_IRQ_EOF = 1<<27, /* Req "End of Frame" IRQ */
328 /* TxCtrl specific bits */
329 BMU_STFWD = 1<<26, /* (Tx) Store & Forward Frame */
330 BMU_NO_FCS = 1<<25, /* (Tx) Disable MAC FCS (CRC) generation */
331 BMU_SW = 1<<24, /* (Tx) 1 bit res. for SW use */
332 /* RxCtrl specific bits */
333 BMU_DEV_0 = 1<<26, /* (Rx) Transfer data to Dev0 */
334 BMU_STAT_VAL = 1<<25, /* (Rx) Rx Status Valid */
335 BMU_TIST_VAL = 1<<24, /* (Rx) Rx TimeStamp Valid */
336 /* Bit 23..16: BMU Check Opcodes */
337 BMU_CHECK = 0x55<<16, /* Default BMU check */
338 BMU_TCP_CHECK = 0x56<<16, /* Descr with TCP ext */
339 BMU_UDP_CHECK = 0x57<<16, /* Descr with UDP ext (YUKON only) */
340 BMU_BBC = 0xffffL, /* Bit 15.. 0: Buffer Byte Counter */
341};
342
343/* B2_BSC_CTRL 8 bit Blink Source Counter Control */
344enum {
345 BSC_START = 1<<1, /* Start Blink Source Counter */
346 BSC_STOP = 1<<0, /* Stop Blink Source Counter */
347};
348
349/* B2_BSC_STAT 8 bit Blink Source Counter Status */
350enum {
351 BSC_SRC = 1<<0, /* Blink Source, 0=Off / 1=On */
352};
353
354/* B2_BSC_TST 16 bit Blink Source Counter Test Reg */
355enum {
356 BSC_T_ON = 1<<2, /* Test mode on */
357 BSC_T_OFF = 1<<1, /* Test mode off */
358 BSC_T_STEP = 1<<0, /* Test step */
359};
360
361/* B3_RAM_ADDR 32 bit RAM Address, to read or write */
362 /* Bit 31..19: reserved */
363#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */
364/* RAM Interface Registers */
365
366/* B3_RI_CTRL 16 bit RAM Iface Control Register */
367enum {
368 RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */
369 RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/
370
371 RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */
372 RI_RST_SET = 1<<0, /* Set RAM Interface Reset */
373};
374
375/* MAC Arbiter Registers */
376/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */
377enum {
378 MA_FOE_ON = 1<<3, /* XMAC Fast Output Enable ON */
379 MA_FOE_OFF = 1<<2, /* XMAC Fast Output Enable OFF */
380 MA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */
381 MA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */
382
383};
384
385/* Timeout values */
386#define SK_MAC_TO_53 72 /* MAC arbiter timeout */
387#define SK_PKT_TO_53 0x2000 /* Packet arbiter timeout */
388#define SK_PKT_TO_MAX 0xffff /* Maximum value */
389#define SK_RI_TO_53 36 /* RAM interface timeout */
390
391/* Packet Arbiter Registers */
392/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */
393enum {
394 PA_CLR_TO_TX2 = 1<<13,/* Clear IRQ Packet Timeout TX2 */
395 PA_CLR_TO_TX1 = 1<<12,/* Clear IRQ Packet Timeout TX1 */
396 PA_CLR_TO_RX2 = 1<<11,/* Clear IRQ Packet Timeout RX2 */
397 PA_CLR_TO_RX1 = 1<<10,/* Clear IRQ Packet Timeout RX1 */
398 PA_ENA_TO_TX2 = 1<<9, /* Enable Timeout Timer TX2 */
399 PA_DIS_TO_TX2 = 1<<8, /* Disable Timeout Timer TX2 */
400 PA_ENA_TO_TX1 = 1<<7, /* Enable Timeout Timer TX1 */
401 PA_DIS_TO_TX1 = 1<<6, /* Disable Timeout Timer TX1 */
402 PA_ENA_TO_RX2 = 1<<5, /* Enable Timeout Timer RX2 */
403 PA_DIS_TO_RX2 = 1<<4, /* Disable Timeout Timer RX2 */
404 PA_ENA_TO_RX1 = 1<<3, /* Enable Timeout Timer RX1 */
405 PA_DIS_TO_RX1 = 1<<2, /* Disable Timeout Timer RX1 */
406 PA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */
407 PA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */
408};
409
410#define PA_ENA_TO_ALL (PA_ENA_TO_RX1 | PA_ENA_TO_RX2 |\
411 PA_ENA_TO_TX1 | PA_ENA_TO_TX2)
412
413
414/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
415/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */
416/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */
417/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */
418/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */
419
420#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */
421
422/* TXA_CTRL 8 bit Tx Arbiter Control Register */
423enum {
424 TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */
425 TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */
426 TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */
427 TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */
428 TXA_START_RC = 1<<3, /* Start sync Rate Control */
429 TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */
430 TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */
431 TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */
432};
433
434/*
435 * Bank 4 - 5
436 */
437/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
438enum {
439 TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/
440 TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */
441 TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */
442 TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */
443 TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */
444 TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */
445 TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */
446};
447
448
449enum {
450 B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */
451 B7_CFG_SPC = 0x0380,/* copy of the Configuration register */
452 B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */
453 B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */
454 B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */
455 B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */
456 B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */
457 B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */
458 B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */
459};
460
461/* Queue Register Offsets, use Q_ADDR() to access */
462enum {
463 B8_Q_REGS = 0x0400, /* base of Queue registers */
464 Q_D = 0x00, /* 8*32 bit Current Descriptor */
465 Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */
466 Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */
467 Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */
468 Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */
469 Q_BC = 0x30, /* 32 bit Current Byte Counter */
470 Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */
471 Q_F = 0x38, /* 32 bit Flag Register */
472 Q_T1 = 0x3c, /* 32 bit Test Register 1 */
473 Q_T1_TR = 0x3c, /* 8 bit Test Register 1 Transfer SM */
474 Q_T1_WR = 0x3d, /* 8 bit Test Register 1 Write Descriptor SM */
475 Q_T1_RD = 0x3e, /* 8 bit Test Register 1 Read Descriptor SM */
476 Q_T1_SV = 0x3f, /* 8 bit Test Register 1 Supervisor SM */
477 Q_T2 = 0x40, /* 32 bit Test Register 2 */
478 Q_T3 = 0x44, /* 32 bit Test Register 3 */
479
480};
481#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
482
483/* RAM Buffer Register Offsets */
484enum {
485
486 RB_START= 0x00,/* 32 bit RAM Buffer Start Address */
487 RB_END = 0x04,/* 32 bit RAM Buffer End Address */
488 RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */
489 RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */
490 RB_RX_UTPP= 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */
491 RB_RX_LTPP= 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */
492 RB_RX_UTHP= 0x18,/* 32 bit Rx Upper Threshold, High Prio */
493 RB_RX_LTHP= 0x1c,/* 32 bit Rx Lower Threshold, High Prio */
494 /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */
495 RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */
496 RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */
497 RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */
498 RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */
499 RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */
500};
501
502/* Receive and Transmit Queues */
503enum {
504 Q_R1 = 0x0000, /* Receive Queue 1 */
505 Q_R2 = 0x0080, /* Receive Queue 2 */
506 Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */
507 Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */
508 Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */
509 Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */
510};
511
512/* Different MAC Types */
513enum {
514 SK_MAC_XMAC = 0, /* Xaqti XMAC II */
515 SK_MAC_GMAC = 1, /* Marvell GMAC */
516};
517
518/* Different PHY Types */
519enum {
520 SK_PHY_XMAC = 0,/* integrated in XMAC II */
521 SK_PHY_BCOM = 1,/* Broadcom BCM5400 */
522 SK_PHY_LONE = 2,/* Level One LXT1000 [not supported]*/
523 SK_PHY_NAT = 3,/* National DP83891 [not supported] */
524 SK_PHY_MARV_COPPER= 4,/* Marvell 88E1011S */
525 SK_PHY_MARV_FIBER = 5,/* Marvell 88E1011S working on fiber */
526};
527
528/* PHY addresses (bits 12..8 of PHY address reg) */
529enum {
530 PHY_ADDR_XMAC = 0<<8,
531 PHY_ADDR_BCOM = 1<<8,
532
533/* GPHY address (bits 15..11 of SMI control reg) */
534 PHY_ADDR_MARV = 0,
535};
536
537#define RB_ADDR(offs, queue) ((u16)B16_RAM_REGS + (u16)(queue) + (offs))
538
539/* Receive MAC FIFO, Receive LED, and Link_Sync regs (GENESIS only) */
540enum {
541 RX_MFF_EA = 0x0c00,/* 32 bit Receive MAC FIFO End Address */
542 RX_MFF_WP = 0x0c04,/* 32 bit Receive MAC FIFO Write Pointer */
543
544 RX_MFF_RP = 0x0c0c,/* 32 bit Receive MAC FIFO Read Pointer */
545 RX_MFF_PC = 0x0c10,/* 32 bit Receive MAC FIFO Packet Cnt */
546 RX_MFF_LEV = 0x0c14,/* 32 bit Receive MAC FIFO Level */
547 RX_MFF_CTRL1 = 0x0c18,/* 16 bit Receive MAC FIFO Control Reg 1*/
548 RX_MFF_STAT_TO = 0x0c1a,/* 8 bit Receive MAC Status Timeout */
549 RX_MFF_TIST_TO = 0x0c1b,/* 8 bit Receive MAC Time Stamp Timeout */
550 RX_MFF_CTRL2 = 0x0c1c,/* 8 bit Receive MAC FIFO Control Reg 2*/
551 RX_MFF_TST1 = 0x0c1d,/* 8 bit Receive MAC FIFO Test Reg 1 */
552 RX_MFF_TST2 = 0x0c1e,/* 8 bit Receive MAC FIFO Test Reg 2 */
553
554 RX_LED_INI = 0x0c20,/* 32 bit Receive LED Cnt Init Value */
555 RX_LED_VAL = 0x0c24,/* 32 bit Receive LED Cnt Current Value */
556 RX_LED_CTRL = 0x0c28,/* 8 bit Receive LED Cnt Control Reg */
557 RX_LED_TST = 0x0c29,/* 8 bit Receive LED Cnt Test Register */
558
559 LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */
560 LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */
561 LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */
562 LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */
563 LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */
564};
565
566/* Receive and Transmit MAC FIFO Registers (GENESIS only) */
567/* RX_MFF_CTRL1 16 bit Receive MAC FIFO Control Reg 1 */
568enum {
569 MFF_ENA_RDY_PAT = 1<<13, /* Enable Ready Patch */
570 MFF_DIS_RDY_PAT = 1<<12, /* Disable Ready Patch */
571 MFF_ENA_TIM_PAT = 1<<11, /* Enable Timing Patch */
572 MFF_DIS_TIM_PAT = 1<<10, /* Disable Timing Patch */
573 MFF_ENA_ALM_FUL = 1<<9, /* Enable AlmostFull Sign */
574 MFF_DIS_ALM_FUL = 1<<8, /* Disable AlmostFull Sign */
575 MFF_ENA_PAUSE = 1<<7, /* Enable Pause Signaling */
576 MFF_DIS_PAUSE = 1<<6, /* Disable Pause Signaling */
577 MFF_ENA_FLUSH = 1<<5, /* Enable Frame Flushing */
578 MFF_DIS_FLUSH = 1<<4, /* Disable Frame Flushing */
579 MFF_ENA_TIST = 1<<3, /* Enable Time Stamp Gener */
580 MFF_DIS_TIST = 1<<2, /* Disable Time Stamp Gener */
581 MFF_CLR_INTIST = 1<<1, /* Clear IRQ No Time Stamp */
582 MFF_CLR_INSTAT = 1<<0, /* Clear IRQ No Status */
583 MFF_RX_CTRL_DEF = MFF_ENA_TIM_PAT,
584};
585
586/* TX_MFF_CTRL1 16 bit Transmit MAC FIFO Control Reg 1 */
587enum {
588 MFF_CLR_PERR = 1<<15, /* Clear Parity Error IRQ */
589
590 MFF_ENA_PKT_REC = 1<<13, /* Enable Packet Recovery */
591 MFF_DIS_PKT_REC = 1<<12, /* Disable Packet Recovery */
592
593 MFF_ENA_W4E = 1<<7, /* Enable Wait for Empty */
594 MFF_DIS_W4E = 1<<6, /* Disable Wait for Empty */
595
596 MFF_ENA_LOOPB = 1<<3, /* Enable Loopback */
597 MFF_DIS_LOOPB = 1<<2, /* Disable Loopback */
598 MFF_CLR_MAC_RST = 1<<1, /* Clear XMAC Reset */
599 MFF_SET_MAC_RST = 1<<0, /* Set XMAC Reset */
600
601 MFF_TX_CTRL_DEF = MFF_ENA_PKT_REC | (u16) MFF_ENA_TIM_PAT | MFF_ENA_FLUSH,
602};
603
604
605/* RX_MFF_TST2 8 bit Receive MAC FIFO Test Register 2 */
606/* TX_MFF_TST2 8 bit Transmit MAC FIFO Test Register 2 */
607enum {
608 MFF_WSP_T_ON = 1<<6, /* Tx: Write Shadow Ptr TestOn */
609 MFF_WSP_T_OFF = 1<<5, /* Tx: Write Shadow Ptr TstOff */
610 MFF_WSP_INC = 1<<4, /* Tx: Write Shadow Ptr Increment */
611 MFF_PC_DEC = 1<<3, /* Packet Counter Decrement */
612 MFF_PC_T_ON = 1<<2, /* Packet Counter Test On */
613 MFF_PC_T_OFF = 1<<1, /* Packet Counter Test Off */
614 MFF_PC_INC = 1<<0, /* Packet Counter Increment */
615};
616
617/* RX_MFF_TST1 8 bit Receive MAC FIFO Test Register 1 */
618/* TX_MFF_TST1 8 bit Transmit MAC FIFO Test Register 1 */
619enum {
620 MFF_WP_T_ON = 1<<6, /* Write Pointer Test On */
621 MFF_WP_T_OFF = 1<<5, /* Write Pointer Test Off */
622 MFF_WP_INC = 1<<4, /* Write Pointer Increm */
623
624 MFF_RP_T_ON = 1<<2, /* Read Pointer Test On */
625 MFF_RP_T_OFF = 1<<1, /* Read Pointer Test Off */
626 MFF_RP_DEC = 1<<0, /* Read Pointer Decrement */
627};
628
629/* RX_MFF_CTRL2 8 bit Receive MAC FIFO Control Reg 2 */
630/* TX_MFF_CTRL2 8 bit Transmit MAC FIFO Control Reg 2 */
631enum {
632 MFF_ENA_OP_MD = 1<<3, /* Enable Operation Mode */
633 MFF_DIS_OP_MD = 1<<2, /* Disable Operation Mode */
634 MFF_RST_CLR = 1<<1, /* Clear MAC FIFO Reset */
635 MFF_RST_SET = 1<<0, /* Set MAC FIFO Reset */
636};
637
638
639/* Link LED Counter Registers (GENESIS only) */
640
641/* RX_LED_CTRL 8 bit Receive LED Cnt Control Reg */
642/* TX_LED_CTRL 8 bit Transmit LED Cnt Control Reg */
643/* LNK_SYNC_CTRL 8 bit Link Sync Cnt Control Register */
644enum {
645 LED_START = 1<<2, /* Start Timer */
646 LED_STOP = 1<<1, /* Stop Timer */
647 LED_STATE = 1<<0, /* Rx/Tx: LED State, 1=LED on */
648};
649
650/* RX_LED_TST 8 bit Receive LED Cnt Test Register */
651/* TX_LED_TST 8 bit Transmit LED Cnt Test Register */
652/* LNK_SYNC_TST 8 bit Link Sync Cnt Test Register */
653enum {
654 LED_T_ON = 1<<2, /* LED Counter Test mode On */
655 LED_T_OFF = 1<<1, /* LED Counter Test mode Off */
656 LED_T_STEP = 1<<0, /* LED Counter Step */
657};
658
659/* LNK_LED_REG 8 bit Link LED Register */
660enum {
661 LED_BLK_ON = 1<<5, /* Link LED Blinking On */
662 LED_BLK_OFF = 1<<4, /* Link LED Blinking Off */
663 LED_SYNC_ON = 1<<3, /* Use Sync Wire to switch LED */
664 LED_SYNC_OFF = 1<<2, /* Disable Sync Wire Input */
665 LED_ON = 1<<1, /* switch LED on */
666 LED_OFF = 1<<0, /* switch LED off */
667};
668
669/* Receive GMAC FIFO (YUKON) */
670enum {
671 RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */
672 RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */
673 RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */
674 RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */
675 RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */
676 RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */
677 RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */
678 RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */
679 RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */
680};
681
682
683/* TXA_TEST 8 bit Tx Arbiter Test Register */
684enum {
685 TXA_INT_T_ON = 1<<5, /* Tx Arb Interval Timer Test On */
686 TXA_INT_T_OFF = 1<<4, /* Tx Arb Interval Timer Test Off */
687 TXA_INT_T_STEP = 1<<3, /* Tx Arb Interval Timer Step */
688 TXA_LIM_T_ON = 1<<2, /* Tx Arb Limit Timer Test On */
689 TXA_LIM_T_OFF = 1<<1, /* Tx Arb Limit Timer Test Off */
690 TXA_LIM_T_STEP = 1<<0, /* Tx Arb Limit Timer Step */
691};
692
693/* TXA_STAT 8 bit Tx Arbiter Status Register */
694enum {
695 TXA_PRIO_XS = 1<<0, /* sync queue has prio to send */
696};
697
698
699/* Q_BC 32 bit Current Byte Counter */
700
701/* BMU Control Status Registers */
702/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */
703/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */
704/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */
705/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */
706/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */
707/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */
708/* Q_CSR 32 bit BMU Control/Status Register */
709
710enum {
711 CSR_SV_IDLE = 1<<24, /* BMU SM Idle */
712
713 CSR_DESC_CLR = 1<<21, /* Clear Reset for Descr */
714 CSR_DESC_SET = 1<<20, /* Set Reset for Descr */
715 CSR_FIFO_CLR = 1<<19, /* Clear Reset for FIFO */
716 CSR_FIFO_SET = 1<<18, /* Set Reset for FIFO */
717 CSR_HPI_RUN = 1<<17, /* Release HPI SM */
718 CSR_HPI_RST = 1<<16, /* Reset HPI SM to Idle */
719 CSR_SV_RUN = 1<<15, /* Release Supervisor SM */
720 CSR_SV_RST = 1<<14, /* Reset Supervisor SM */
721 CSR_DREAD_RUN = 1<<13, /* Release Descr Read SM */
722 CSR_DREAD_RST = 1<<12, /* Reset Descr Read SM */
723 CSR_DWRITE_RUN = 1<<11, /* Release Descr Write SM */
724 CSR_DWRITE_RST = 1<<10, /* Reset Descr Write SM */
725 CSR_TRANS_RUN = 1<<9, /* Release Transfer SM */
726 CSR_TRANS_RST = 1<<8, /* Reset Transfer SM */
727 CSR_ENA_POL = 1<<7, /* Enable Descr Polling */
728 CSR_DIS_POL = 1<<6, /* Disable Descr Polling */
729 CSR_STOP = 1<<5, /* Stop Rx/Tx Queue */
730 CSR_START = 1<<4, /* Start Rx/Tx Queue */
731 CSR_IRQ_CL_P = 1<<3, /* (Rx) Clear Parity IRQ */
732 CSR_IRQ_CL_B = 1<<2, /* Clear EOB IRQ */
733 CSR_IRQ_CL_F = 1<<1, /* Clear EOF IRQ */
734 CSR_IRQ_CL_C = 1<<0, /* Clear ERR IRQ */
735};
736
737#define CSR_SET_RESET (CSR_DESC_SET | CSR_FIFO_SET | CSR_HPI_RST |\
738 CSR_SV_RST | CSR_DREAD_RST | CSR_DWRITE_RST |\
739 CSR_TRANS_RST)
740#define CSR_CLR_RESET (CSR_DESC_CLR | CSR_FIFO_CLR | CSR_HPI_RUN |\
741 CSR_SV_RUN | CSR_DREAD_RUN | CSR_DWRITE_RUN |\
742 CSR_TRANS_RUN)
743
744/* Q_F 32 bit Flag Register */
745enum {
746 F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */
747 F_EMPTY = 1<<27, /* Tx FIFO: empty flag */
748 F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */
749 F_WM_REACHED = 1<<25, /* Watermark reached */
750
751 F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */
752 F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */
753};
754
755/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */
756/* RB_START 32 bit RAM Buffer Start Address */
757/* RB_END 32 bit RAM Buffer End Address */
758/* RB_WP 32 bit RAM Buffer Write Pointer */
759/* RB_RP 32 bit RAM Buffer Read Pointer */
760/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */
761/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */
762/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */
763/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */
764/* RB_PC 32 bit RAM Buffer Packet Counter */
765/* RB_LEV 32 bit RAM Buffer Level Register */
766
767#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */
768/* RB_TST2 8 bit RAM Buffer Test Register 2 */
769/* RB_TST1 8 bit RAM Buffer Test Register 1 */
770
771/* RB_CTRL 8 bit RAM Buffer Control Register */
772enum {
773 RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */
774 RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */
775 RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */
776 RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */
777 RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */
778 RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */
779};
780
781/* Transmit MAC FIFO and Transmit LED Registers (GENESIS only), */
782enum {
783 TX_MFF_EA = 0x0d00,/* 32 bit Transmit MAC FIFO End Address */
784 TX_MFF_WP = 0x0d04,/* 32 bit Transmit MAC FIFO WR Pointer */
785 TX_MFF_WSP = 0x0d08,/* 32 bit Transmit MAC FIFO WR Shadow Ptr */
786 TX_MFF_RP = 0x0d0c,/* 32 bit Transmit MAC FIFO RD Pointer */
787 TX_MFF_PC = 0x0d10,/* 32 bit Transmit MAC FIFO Packet Cnt */
788 TX_MFF_LEV = 0x0d14,/* 32 bit Transmit MAC FIFO Level */
789 TX_MFF_CTRL1 = 0x0d18,/* 16 bit Transmit MAC FIFO Ctrl Reg 1 */
790 TX_MFF_WAF = 0x0d1a,/* 8 bit Transmit MAC Wait after flush */
791
792 TX_MFF_CTRL2 = 0x0d1c,/* 8 bit Transmit MAC FIFO Ctrl Reg 2 */
793 TX_MFF_TST1 = 0x0d1d,/* 8 bit Transmit MAC FIFO Test Reg 1 */
794 TX_MFF_TST2 = 0x0d1e,/* 8 bit Transmit MAC FIFO Test Reg 2 */
795
796 TX_LED_INI = 0x0d20,/* 32 bit Transmit LED Cnt Init Value */
797 TX_LED_VAL = 0x0d24,/* 32 bit Transmit LED Cnt Current Val */
798 TX_LED_CTRL = 0x0d28,/* 8 bit Transmit LED Cnt Control Reg */
799 TX_LED_TST = 0x0d29,/* 8 bit Transmit LED Cnt Test Reg */
800};
801
802/* Counter and Timer constants, for a host clock of 62.5 MHz */
803#define SK_XMIT_DUR 0x002faf08UL /* 50 ms */
804#define SK_BLK_DUR 0x01dcd650UL /* 500 ms */
805
806#define SK_DPOLL_DEF 0x00ee6b28UL /* 250 ms at 62.5 MHz */
807
808#define SK_DPOLL_MAX 0x00ffffffUL /* 268 ms at 62.5 MHz */
809 /* 215 ms at 78.12 MHz */
810
811#define SK_FACT_62 100 /* is given in percent */
812#define SK_FACT_53 85 /* on GENESIS: 53.12 MHz */
813#define SK_FACT_78 125 /* on YUKON: 78.12 MHz */
814
815
816/* Transmit GMAC FIFO (YUKON only) */
817enum {
818 TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */
819 TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/
820 TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */
821
822 TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */
823 TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */
824 TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */
825
826 TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */
827 TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */
828 TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */
829
830 /* Descriptor Poll Timer Registers */
831 B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */
832 B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */
833 B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */
834
835 B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */
836
837 /* Time Stamp Timer Registers (YUKON only) */
838 GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */
839 GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */
840 GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */
841};
842
843
844enum {
845 LINKLED_OFF = 0x01,
846 LINKLED_ON = 0x02,
847 LINKLED_LINKSYNC_OFF = 0x04,
848 LINKLED_LINKSYNC_ON = 0x08,
849 LINKLED_BLINK_OFF = 0x10,
850 LINKLED_BLINK_ON = 0x20,
851};
852
853/* GMAC and GPHY Control Registers (YUKON only) */
854enum {
855 GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */
856 GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */
857 GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */
858 GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */
859 GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */
860
861/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
862
863 WOL_REG_OFFS = 0x20,/* HW-Bug: Address is + 0x20 against spec. */
864
865 WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */
866 WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */
867 WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */
868 WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */
869 WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */
870
871/* WOL Pattern Length Registers (YUKON only) */
872
873 WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */
874 WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */
875
876/* WOL Pattern Counter Registers (YUKON only) */
877
878 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */
879 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */
880};
881#define WOL_REGS(port, x) (x + (port)*0x80)
882
883enum {
884 WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */
885 WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */
886};
887#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400)
888
889enum {
890 BASE_XMAC_1 = 0x2000,/* XMAC 1 registers */
891 BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */
892 BASE_XMAC_2 = 0x3000,/* XMAC 2 registers */
893 BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */
894};
895
896/*
897 * Receive Frame Status Encoding
898 */
899enum {
900 XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */
901 XMR_FS_LEN_SHIFT = 18,
902 XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/
903 XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/
904 XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */
905 XMR_FS_MC = 1<<14, /* Bit 14: Multicast Frame */
906 XMR_FS_UC = 1<<13, /* Bit 13: Unicast Frame */
907
908 XMR_FS_BURST = 1<<11, /* Bit 11: Burst Mode */
909 XMR_FS_CEX_ERR = 1<<10, /* Bit 10: Carrier Ext. Error */
910 XMR_FS_802_3 = 1<<9, /* Bit 9: 802.3 Frame */
911 XMR_FS_COL_ERR = 1<<8, /* Bit 8: Collision Error */
912 XMR_FS_CAR_ERR = 1<<7, /* Bit 7: Carrier Event Error */
913 XMR_FS_LEN_ERR = 1<<6, /* Bit 6: In-Range Length Error */
914 XMR_FS_FRA_ERR = 1<<5, /* Bit 5: Framing Error */
915 XMR_FS_RUNT = 1<<4, /* Bit 4: Runt Frame */
916 XMR_FS_LNG_ERR = 1<<3, /* Bit 3: Giant (Jumbo) Frame */
917 XMR_FS_FCS_ERR = 1<<2, /* Bit 2: Frame Check Sequ Err */
918 XMR_FS_ERR = 1<<1, /* Bit 1: Frame Error */
919 XMR_FS_MCTRL = 1<<0, /* Bit 0: MAC Control Packet */
920
921/*
922 * XMR_FS_ERR will be set if
923 * XMR_FS_FCS_ERR, XMR_FS_LNG_ERR, XMR_FS_RUNT,
924 * XMR_FS_FRA_ERR, XMR_FS_LEN_ERR, or XMR_FS_CEX_ERR
925 * is set. XMR_FS_LNG_ERR and XMR_FS_LEN_ERR will issue
926 * XMR_FS_ERR unless the corresponding bit in the Receive Command
927 * Register is set.
928 */
929};
930
931/*
932,* XMAC-PHY Registers, indirect addressed over the XMAC
933 */
934enum {
935 PHY_XMAC_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
936 PHY_XMAC_STAT = 0x01,/* 16 bit r/w PHY Status Register */
937 PHY_XMAC_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
938 PHY_XMAC_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
939 PHY_XMAC_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
940 PHY_XMAC_AUNE_LP = 0x05,/* 16 bit r/o Link Partner Abi Reg */
941 PHY_XMAC_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
942 PHY_XMAC_NEPG = 0x07,/* 16 bit r/w Next Page Register */
943 PHY_XMAC_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
944
945 PHY_XMAC_EXT_STAT = 0x0f,/* 16 bit r/o Ext Status Register */
946 PHY_XMAC_RES_ABI = 0x10,/* 16 bit r/o PHY Resolved Ability */
947};
948/*
949 * Broadcom-PHY Registers, indirect addressed over XMAC
950 */
951enum {
952 PHY_BCOM_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
953 PHY_BCOM_STAT = 0x01,/* 16 bit r/o PHY Status Register */
954 PHY_BCOM_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
955 PHY_BCOM_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
956 PHY_BCOM_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
957 PHY_BCOM_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
958 PHY_BCOM_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
959 PHY_BCOM_NEPG = 0x07,/* 16 bit r/w Next Page Register */
960 PHY_BCOM_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
961 /* Broadcom-specific registers */
962 PHY_BCOM_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
963 PHY_BCOM_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
964 PHY_BCOM_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
965 PHY_BCOM_P_EXT_CTRL = 0x10,/* 16 bit r/w PHY Extended Ctrl Reg */
966 PHY_BCOM_P_EXT_STAT = 0x11,/* 16 bit r/o PHY Extended Stat Reg */
967 PHY_BCOM_RE_CTR = 0x12,/* 16 bit r/w Receive Error Counter */
968 PHY_BCOM_FC_CTR = 0x13,/* 16 bit r/w False Carrier Sense Cnt */
969 PHY_BCOM_RNO_CTR = 0x14,/* 16 bit r/w Receiver NOT_OK Cnt */
970
971 PHY_BCOM_AUX_CTRL = 0x18,/* 16 bit r/w Auxiliary Control Reg */
972 PHY_BCOM_AUX_STAT = 0x19,/* 16 bit r/o Auxiliary Stat Summary */
973 PHY_BCOM_INT_STAT = 0x1a,/* 16 bit r/o Interrupt Status Reg */
974 PHY_BCOM_INT_MASK = 0x1b,/* 16 bit r/w Interrupt Mask Reg */
975};
976
977/*
978 * Marvel-PHY Registers, indirect addressed over GMAC
979 */
980enum {
981 PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
982 PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */
983 PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
984 PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
985 PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
986 PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
987 PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
988 PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */
989 PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
990 /* Marvel-specific registers */
991 PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
992 PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
993 PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
994 PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */
995 PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */
996 PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */
997 PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */
998 PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */
999 PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */
1000 PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */
1001 PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */
1002 PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */
1003 PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */
1004 PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */
1005 PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */
1006 PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */
1007 PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */
1008 PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */
1009
1010/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1011 PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */
1012 PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */
1013 PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */
1014 PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */
1015 PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */
1016};
1017
1018enum {
1019 PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */
1020 PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */
1021 PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */
1022 PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */
1023 PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */
1024 PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */
1025 PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */
1026 PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */
1027 PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */
1028 PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */
1029};
1030
1031enum {
1032 PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */
1033 PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */
1034 PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */
1035};
1036
1037enum {
1038 PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */
1039
1040 PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */
1041 PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */
1042 PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occurred */
1043 PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */
1044 PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */
1045 PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */
1046 PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */
1047};
1048
1049enum {
1050 PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */
1051 PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */
1052 PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */
1053};
1054
1055/* different Broadcom PHY Ids */
1056enum {
1057 PHY_BCOM_ID1_A1 = 0x6041,
1058 PHY_BCOM_ID1_B2 = 0x6043,
1059 PHY_BCOM_ID1_C0 = 0x6044,
1060 PHY_BCOM_ID1_C5 = 0x6047,
1061};
1062
1063/* different Marvell PHY Ids */
1064enum {
1065 PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */
1066 PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
1067 PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */
1068 PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
1069 PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
1070};
1071
1072/* Advertisement register bits */
1073enum {
1074 PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
1075 PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
1076 PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */
1077
1078 PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */
1079 PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */
1080 PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */
1081 PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */
1082 PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */
1083 PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */
1084 PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */
1085 PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */
1086 PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
1087 PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA,
1088 PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL |
1089 PHY_AN_100HALF | PHY_AN_100FULL,
1090};
1091
1092/* Xmac Specific */
1093enum {
1094 PHY_X_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
1095 PHY_X_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
1096 PHY_X_AN_RFB = 3<<12,/* Bit 13..12: Remote Fault Bits */
1097
1098 PHY_X_AN_PAUSE = 3<<7,/* Bit 8.. 7: Pause Bits */
1099 PHY_X_AN_HD = 1<<6, /* Bit 6: Half Duplex */
1100 PHY_X_AN_FD = 1<<5, /* Bit 5: Full Duplex */
1101};
1102
1103/* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */
1104enum {
1105 PHY_X_P_NO_PAUSE= 0<<7,/* Bit 8..7: no Pause Mode */
1106 PHY_X_P_SYM_MD = 1<<7, /* Bit 8..7: symmetric Pause Mode */
1107 PHY_X_P_ASYM_MD = 2<<7,/* Bit 8..7: asymmetric Pause Mode */
1108 PHY_X_P_BOTH_MD = 3<<7,/* Bit 8..7: both Pause Mode */
1109};
1110
1111
1112/***** PHY_XMAC_EXT_STAT 16 bit r/w Extended Status Register *****/
1113enum {
1114 PHY_X_EX_FD = 1<<15, /* Bit 15: Device Supports Full Duplex */
1115 PHY_X_EX_HD = 1<<14, /* Bit 14: Device Supports Half Duplex */
1116};
1117
1118/***** PHY_XMAC_RES_ABI 16 bit r/o PHY Resolved Ability *****/
1119enum {
1120 PHY_X_RS_PAUSE = 3<<7, /* Bit 8..7: selected Pause Mode */
1121 PHY_X_RS_HD = 1<<6, /* Bit 6: Half Duplex Mode selected */
1122 PHY_X_RS_FD = 1<<5, /* Bit 5: Full Duplex Mode selected */
1123 PHY_X_RS_ABLMIS = 1<<4, /* Bit 4: duplex or pause cap mismatch */
1124 PHY_X_RS_PAUMIS = 1<<3, /* Bit 3: pause capability mismatch */
1125};
1126
1127/* Remote Fault Bits (PHY_X_AN_RFB) encoding */
1128enum {
1129 X_RFB_OK = 0<<12,/* Bit 13..12 No errors, Link OK */
1130 X_RFB_LF = 1<<12,/* Bit 13..12 Link Failure */
1131 X_RFB_OFF = 2<<12,/* Bit 13..12 Offline */
1132 X_RFB_AN_ERR = 3<<12,/* Bit 13..12 Auto-Negotiation Error */
1133};
1134
1135/* Broadcom-Specific */
1136/***** PHY_BCOM_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
1137enum {
1138 PHY_B_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */
1139 PHY_B_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */
1140 PHY_B_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */
1141 PHY_B_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */
1142 PHY_B_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */
1143 PHY_B_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */
1144};
1145
1146/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
1147/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
1148enum {
1149 PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
1150 PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
1151 PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
1152 PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */
1153 PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */
1154 PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */
1155 /* Bit 9..8: reserved */
1156 PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
1157};
1158
1159/***** PHY_BCOM_EXT_STAT 16 bit r/o Extended Status Register *****/
1160enum {
1161 PHY_B_ES_X_FD_CAP = 1<<15, /* Bit 15: 1000Base-X FD capable */
1162 PHY_B_ES_X_HD_CAP = 1<<14, /* Bit 14: 1000Base-X HD capable */
1163 PHY_B_ES_T_FD_CAP = 1<<13, /* Bit 13: 1000Base-T FD capable */
1164 PHY_B_ES_T_HD_CAP = 1<<12, /* Bit 12: 1000Base-T HD capable */
1165};
1166
1167/***** PHY_BCOM_P_EXT_CTRL 16 bit r/w PHY Extended Control Reg *****/
1168enum {
1169 PHY_B_PEC_MAC_PHY = 1<<15, /* Bit 15: 10BIT/GMI-Interface */
1170 PHY_B_PEC_DIS_CROSS = 1<<14, /* Bit 14: Disable MDI Crossover */
1171 PHY_B_PEC_TX_DIS = 1<<13, /* Bit 13: Tx output Disabled */
1172 PHY_B_PEC_INT_DIS = 1<<12, /* Bit 12: Interrupts Disabled */
1173 PHY_B_PEC_F_INT = 1<<11, /* Bit 11: Force Interrupt */
1174 PHY_B_PEC_BY_45 = 1<<10, /* Bit 10: Bypass 4B5B-Decoder */
1175 PHY_B_PEC_BY_SCR = 1<<9, /* Bit 9: Bypass Scrambler */
1176 PHY_B_PEC_BY_MLT3 = 1<<8, /* Bit 8: Bypass MLT3 Encoder */
1177 PHY_B_PEC_BY_RXA = 1<<7, /* Bit 7: Bypass Rx Alignm. */
1178 PHY_B_PEC_RES_SCR = 1<<6, /* Bit 6: Reset Scrambler */
1179 PHY_B_PEC_EN_LTR = 1<<5, /* Bit 5: Ena LED Traffic Mode */
1180 PHY_B_PEC_LED_ON = 1<<4, /* Bit 4: Force LED's on */
1181 PHY_B_PEC_LED_OFF = 1<<3, /* Bit 3: Force LED's off */
1182 PHY_B_PEC_EX_IPG = 1<<2, /* Bit 2: Extend Tx IPG Mode */
1183 PHY_B_PEC_3_LED = 1<<1, /* Bit 1: Three Link LED mode */
1184 PHY_B_PEC_HIGH_LA = 1<<0, /* Bit 0: GMII FIFO Elasticy */
1185};
1186
1187/***** PHY_BCOM_P_EXT_STAT 16 bit r/o PHY Extended Status Reg *****/
1188enum {
1189 PHY_B_PES_CROSS_STAT = 1<<13, /* Bit 13: MDI Crossover Status */
1190 PHY_B_PES_INT_STAT = 1<<12, /* Bit 12: Interrupt Status */
1191 PHY_B_PES_RRS = 1<<11, /* Bit 11: Remote Receiver Stat. */
1192 PHY_B_PES_LRS = 1<<10, /* Bit 10: Local Receiver Stat. */
1193 PHY_B_PES_LOCKED = 1<<9, /* Bit 9: Locked */
1194 PHY_B_PES_LS = 1<<8, /* Bit 8: Link Status */
1195 PHY_B_PES_RF = 1<<7, /* Bit 7: Remote Fault */
1196 PHY_B_PES_CE_ER = 1<<6, /* Bit 6: Carrier Ext Error */
1197 PHY_B_PES_BAD_SSD = 1<<5, /* Bit 5: Bad SSD */
1198 PHY_B_PES_BAD_ESD = 1<<4, /* Bit 4: Bad ESD */
1199 PHY_B_PES_RX_ER = 1<<3, /* Bit 3: Receive Error */
1200 PHY_B_PES_TX_ER = 1<<2, /* Bit 2: Transmit Error */
1201 PHY_B_PES_LOCK_ER = 1<<1, /* Bit 1: Lock Error */
1202 PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */
1203};
1204
1205/* PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/
1206/* PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
1207enum {
1208 PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */
1209
1210 PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */
1211 PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */
1212};
1213
1214
1215/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/
1216enum {
1217 PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */
1218
1219/***** PHY_BCOM_RNO_CTR 16 bit r/w Receive NOT_OK Counter *****/
1220 PHY_B_RC_LOC_MSK = 0xff00, /* Bit 15..8: Local Rx NOT_OK cnt */
1221 PHY_B_RC_REM_MSK = 0x00ff, /* Bit 7..0: Remote Rx NOT_OK cnt */
1222
1223/***** PHY_BCOM_AUX_CTRL 16 bit r/w Auxiliary Control Reg *****/
1224 PHY_B_AC_L_SQE = 1<<15, /* Bit 15: Low Squelch */
1225 PHY_B_AC_LONG_PACK = 1<<14, /* Bit 14: Rx Long Packets */
1226 PHY_B_AC_ER_CTRL = 3<<12,/* Bit 13..12: Edgerate Control */
1227 /* Bit 11: reserved */
1228 PHY_B_AC_TX_TST = 1<<10, /* Bit 10: Tx test bit, always 1 */
1229 /* Bit 9.. 8: reserved */
1230 PHY_B_AC_DIS_PRF = 1<<7, /* Bit 7: dis part resp filter */
1231 /* Bit 6: reserved */
1232 PHY_B_AC_DIS_PM = 1<<5, /* Bit 5: dis power management */
1233 /* Bit 4: reserved */
1234 PHY_B_AC_DIAG = 1<<3, /* Bit 3: Diagnostic Mode */
1235};
1236
1237/***** PHY_BCOM_AUX_STAT 16 bit r/o Auxiliary Status Reg *****/
1238enum {
1239 PHY_B_AS_AN_C = 1<<15, /* Bit 15: AutoNeg complete */
1240 PHY_B_AS_AN_CA = 1<<14, /* Bit 14: AN Complete Ack */
1241 PHY_B_AS_ANACK_D = 1<<13, /* Bit 13: AN Ack Detect */
1242 PHY_B_AS_ANAB_D = 1<<12, /* Bit 12: AN Ability Detect */
1243 PHY_B_AS_NPW = 1<<11, /* Bit 11: AN Next Page Wait */
1244 PHY_B_AS_AN_RES_MSK = 7<<8,/* Bit 10..8: AN HDC */
1245 PHY_B_AS_PDF = 1<<7, /* Bit 7: Parallel Detect. Fault */
1246 PHY_B_AS_RF = 1<<6, /* Bit 6: Remote Fault */
1247 PHY_B_AS_ANP_R = 1<<5, /* Bit 5: AN Page Received */
1248 PHY_B_AS_LP_ANAB = 1<<4, /* Bit 4: LP AN Ability */
1249 PHY_B_AS_LP_NPAB = 1<<3, /* Bit 3: LP Next Page Ability */
1250 PHY_B_AS_LS = 1<<2, /* Bit 2: Link Status */
1251 PHY_B_AS_PRR = 1<<1, /* Bit 1: Pause Resolution-Rx */
1252 PHY_B_AS_PRT = 1<<0, /* Bit 0: Pause Resolution-Tx */
1253};
1254#define PHY_B_AS_PAUSE_MSK (PHY_B_AS_PRR | PHY_B_AS_PRT)
1255
1256/***** PHY_BCOM_INT_STAT 16 bit r/o Interrupt Status Reg *****/
1257/***** PHY_BCOM_INT_MASK 16 bit r/w Interrupt Mask Reg *****/
1258enum {
1259 PHY_B_IS_PSE = 1<<14, /* Bit 14: Pair Swap Error */
1260 PHY_B_IS_MDXI_SC = 1<<13, /* Bit 13: MDIX Status Change */
1261 PHY_B_IS_HCT = 1<<12, /* Bit 12: counter above 32k */
1262 PHY_B_IS_LCT = 1<<11, /* Bit 11: counter above 128 */
1263 PHY_B_IS_AN_PR = 1<<10, /* Bit 10: Page Received */
1264 PHY_B_IS_NO_HDCL = 1<<9, /* Bit 9: No HCD Link */
1265 PHY_B_IS_NO_HDC = 1<<8, /* Bit 8: No HCD */
1266 PHY_B_IS_NEG_USHDC = 1<<7, /* Bit 7: Negotiated Unsup. HCD */
1267 PHY_B_IS_SCR_S_ER = 1<<6, /* Bit 6: Scrambler Sync Error */
1268 PHY_B_IS_RRS_CHANGE = 1<<5, /* Bit 5: Remote Rx Stat Change */
1269 PHY_B_IS_LRS_CHANGE = 1<<4, /* Bit 4: Local Rx Stat Change */
1270 PHY_B_IS_DUP_CHANGE = 1<<3, /* Bit 3: Duplex Mode Change */
1271 PHY_B_IS_LSP_CHANGE = 1<<2, /* Bit 2: Link Speed Change */
1272 PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */
1273 PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */
1274};
1275#define PHY_B_DEF_MSK \
1276 (~(PHY_B_IS_PSE | PHY_B_IS_AN_PR | PHY_B_IS_DUP_CHANGE | \
1277 PHY_B_IS_LSP_CHANGE | PHY_B_IS_LST_CHANGE))
1278
1279/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */
1280enum {
1281 PHY_B_P_NO_PAUSE = 0<<10,/* Bit 11..10: no Pause Mode */
1282 PHY_B_P_SYM_MD = 1<<10, /* Bit 11..10: symmetric Pause Mode */
1283 PHY_B_P_ASYM_MD = 2<<10,/* Bit 11..10: asymmetric Pause Mode */
1284 PHY_B_P_BOTH_MD = 3<<10,/* Bit 11..10: both Pause Mode */
1285};
1286/*
1287 * Resolved Duplex mode and Capabilities (Aux Status Summary Reg)
1288 */
1289enum {
1290 PHY_B_RES_1000FD = 7<<8,/* Bit 10..8: 1000Base-T Full Dup. */
1291 PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */
1292};
1293
1294/** Marvell-Specific */
1295enum {
1296 PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */
1297 PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */
1298 PHY_M_AN_RF = 1<<13, /* Remote Fault */
1299
1300 PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */
1301 PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */
1302 PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */
1303 PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */
1304 PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */
1305 PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */
1306 PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */
1307 PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */
1308};
1309
1310/* special defines for FIBER (88E1011S only) */
1311enum {
1312 PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */
1313 PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */
1314 PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */
1315 PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */
1316};
1317
1318/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */
1319enum {
1320 PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */
1321 PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */
1322 PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */
1323 PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */
1324};
1325
1326/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
1327enum {
1328 PHY_M_1000C_TEST= 7<<13,/* Bit 15..13: Test Modes */
1329 PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */
1330 PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */
1331 PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */
1332 PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */
1333 PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */
1334};
1335
1336/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/
1337enum {
1338 PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */
1339 PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */
1340 PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */
1341 PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */
1342 PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */
1343 PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */
1344 PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */
1345 PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */
1346 PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */
1347 PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */
1348 PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */
1349 PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */
1350};
1351
1352enum {
1353 PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */
1354 PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */
1355};
1356
1357enum {
1358 PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */
1359 PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */
1360 PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */
1361};
1362
1363/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1364enum {
1365 PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */
1366 PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */
1367 PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */
1368 PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */
1369 PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */
1370
1371 PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */
1372 PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */
1373
1374 PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */
1375 PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */
1376};
1377
1378/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/
1379enum {
1380 PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */
1381 PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */
1382 PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */
1383 PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */
1384 PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */
1385 PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */
1386 PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */
1387 PHY_M_PS_LINK_UP = 1<<10, /* Link Up */
1388 PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */
1389 PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */
1390 PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */
1391 PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */
1392 PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */
1393 PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */
1394 PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */
1395 PHY_M_PS_JABBER = 1<<0, /* Jabber */
1396};
1397
1398#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN)
1399
1400/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1401enum {
1402 PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */
1403 PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */
1404};
1405
1406enum {
1407 PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */
1408 PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */
1409 PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */
1410 PHY_M_IS_AN_PR = 1<<12, /* Page Received */
1411 PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */
1412 PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */
1413 PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */
1414 PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */
1415 PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */
1416 PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */
1417 PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */
1418 PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */
1419
1420 PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */
1421 PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */
1422 PHY_M_IS_JABBER = 1<<0, /* Jabber */
1423
1424 PHY_M_IS_DEF_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_LSP_CHANGE |
1425 PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR,
1426
1427 PHY_M_IS_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL,
1428};
1429
1430/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/
1431enum {
1432 PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */
1433 PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */
1434
1435 PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */
1436 PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */
1437 /* (88E1011 only) */
1438 PHY_M_EC_S_DSC_MSK = 3<<8, /* Bit 9.. 8: Slave Downshift Counter */
1439 /* (88E1011 only) */
1440 PHY_M_EC_M_DSC_MSK2 = 7<<9, /* Bit 11.. 9: Master Downshift Counter */
1441 /* (88E1111 only) */
1442 PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */
1443 /* !!! Errata in spec. (1 = disable) */
1444 PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/
1445 PHY_M_EC_MAC_S_MSK = 7<<4, /* Bit 6.. 4: Def. MAC interface speed */
1446 PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
1447 PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */
1448 PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */
1449 PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */};
1450
1451#define PHY_M_EC_M_DSC(x) ((u16)(x)<<10) /* 00=1x; 01=2x; 10=3x; 11=4x */
1452#define PHY_M_EC_S_DSC(x) ((u16)(x)<<8) /* 00=dis; 01=1x; 10=2x; 11=3x */
1453#define PHY_M_EC_MAC_S(x) ((u16)(x)<<4) /* 01X=0; 110=2.5; 111=25 (MHz) */
1454
1455#define PHY_M_EC_M_DSC_2(x) ((u16)(x)<<9) /* 000=1x; 001=2x; 010=3x; 011=4x */
1456 /* 100=5x; 101=6x; 110=7x; 111=8x */
1457enum {
1458 MAC_TX_CLK_0_MHZ = 2,
1459 MAC_TX_CLK_2_5_MHZ = 6,
1460 MAC_TX_CLK_25_MHZ = 7,
1461};
1462
1463/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/
1464enum {
1465 PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */
1466 PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */
1467 PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */
1468 PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */
1469 PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */
1470 PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */
1471 PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */
1472 /* (88E1111 only) */
1473};
1474#define PHY_M_LED_PULS_DUR(x) (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK)
1475#define PHY_M_LED_BLINK_RT(x) (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK)
1476
1477enum {
1478 PHY_M_LEDC_LINK_MSK = 3<<3, /* Bit 4.. 3: Link Control Mask */
1479 /* (88E1011 only) */
1480 PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */
1481 PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */
1482 PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */
1483 PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */
1484 PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */
1485};
1486
1487enum {
1488 PULS_NO_STR = 0, /* no pulse stretching */
1489 PULS_21MS = 1, /* 21 ms to 42 ms */
1490 PULS_42MS = 2, /* 42 ms to 84 ms */
1491 PULS_84MS = 3, /* 84 ms to 170 ms */
1492 PULS_170MS = 4, /* 170 ms to 340 ms */
1493 PULS_340MS = 5, /* 340 ms to 670 ms */
1494 PULS_670MS = 6, /* 670 ms to 1.3 s */
1495 PULS_1300MS = 7, /* 1.3 s to 2.7 s */
1496};
1497
1498
1499enum {
1500 BLINK_42MS = 0, /* 42 ms */
1501 BLINK_84MS = 1, /* 84 ms */
1502 BLINK_170MS = 2, /* 170 ms */
1503 BLINK_340MS = 3, /* 340 ms */
1504 BLINK_670MS = 4, /* 670 ms */
1505};
1506
1507/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/
1508#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */
1509 /* Bit 13..12: reserved */
1510#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */
1511#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */
1512#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */
1513#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */
1514#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */
1515#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */
1516
1517enum {
1518 MO_LED_NORM = 0,
1519 MO_LED_BLINK = 1,
1520 MO_LED_OFF = 2,
1521 MO_LED_ON = 3,
1522};
1523
1524/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/
1525enum {
1526 PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */
1527 PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */
1528 PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */
1529 PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */
1530 PHY_M_EC2_FO_AM_MSK = 7, /* Bit 2.. 0: Fiber Output Amplitude */
1531};
1532
1533/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/
1534enum {
1535 PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */
1536 PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */
1537 PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */
1538 PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */
1539 PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */
1540 PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */
1541 PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */
1542 /* (88E1111 only) */
1543 /* Bit 9.. 4: reserved (88E1011 only) */
1544 PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */
1545 PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */
1546 PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */
1547};
1548
1549/***** PHY_MARV_CABLE_DIAG 16 bit r/o Cable Diagnostic Reg *****/
1550enum {
1551 PHY_M_CABD_ENA_TEST = 1<<15, /* Enable Test (Page 0) */
1552 PHY_M_CABD_DIS_WAIT = 1<<15, /* Disable Waiting Period (Page 1) */
1553 /* (88E1111 only) */
1554 PHY_M_CABD_STAT_MSK = 3<<13, /* Bit 14..13: Status Mask */
1555 PHY_M_CABD_AMPL_MSK = 0x1f<<8, /* Bit 12.. 8: Amplitude Mask */
1556 /* (88E1111 only) */
1557 PHY_M_CABD_DIST_MSK = 0xff, /* Bit 7.. 0: Distance Mask */
1558};
1559
1560/* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */
1561enum {
1562 CABD_STAT_NORMAL= 0,
1563 CABD_STAT_SHORT = 1,
1564 CABD_STAT_OPEN = 2,
1565 CABD_STAT_FAIL = 3,
1566};
1567
1568/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1569/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/
1570 /* Bit 15..12: reserved (used internally) */
1571enum {
1572 PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */
1573 PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */
1574 PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */
1575};
1576
1577#define PHY_M_FELP_LED2_CTRL(x) (((x)<<8) & PHY_M_FELP_LED2_MSK)
1578#define PHY_M_FELP_LED1_CTRL(x) (((x)<<4) & PHY_M_FELP_LED1_MSK)
1579#define PHY_M_FELP_LED0_CTRL(x) (((x)<<0) & PHY_M_FELP_LED0_MSK)
1580
1581enum {
1582 LED_PAR_CTRL_COLX = 0x00,
1583 LED_PAR_CTRL_ERROR = 0x01,
1584 LED_PAR_CTRL_DUPLEX = 0x02,
1585 LED_PAR_CTRL_DP_COL = 0x03,
1586 LED_PAR_CTRL_SPEED = 0x04,
1587 LED_PAR_CTRL_LINK = 0x05,
1588 LED_PAR_CTRL_TX = 0x06,
1589 LED_PAR_CTRL_RX = 0x07,
1590 LED_PAR_CTRL_ACT = 0x08,
1591 LED_PAR_CTRL_LNK_RX = 0x09,
1592 LED_PAR_CTRL_LNK_AC = 0x0a,
1593 LED_PAR_CTRL_ACT_BL = 0x0b,
1594 LED_PAR_CTRL_TX_BL = 0x0c,
1595 LED_PAR_CTRL_RX_BL = 0x0d,
1596 LED_PAR_CTRL_COL_BL = 0x0e,
1597 LED_PAR_CTRL_INACT = 0x0f
1598};
1599
1600/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/
1601enum {
1602 PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */
1603 PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */
1604 PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */
1605};
1606
1607
1608/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/
1609enum {
1610 PHY_M_LEDC_LOS_MSK = 0xf<<12, /* Bit 15..12: LOS LED Ctrl. Mask */
1611 PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */
1612 PHY_M_LEDC_STA1_MSK = 0xf<<4, /* Bit 7.. 4: STAT1 LED Ctrl. Mask */
1613 PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */
1614};
1615
1616#define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK)
1617#define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK)
1618#define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK)
1619#define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK)
1620
1621/* GMAC registers */
1622/* Port Registers */
1623enum {
1624 GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */
1625 GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */
1626 GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */
1627 GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */
1628 GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */
1629 GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */
1630 GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */
1631/* Source Address Registers */
1632 GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */
1633 GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */
1634 GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */
1635 GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */
1636 GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */
1637 GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */
1638
1639/* Multicast Address Hash Registers */
1640 GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */
1641 GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */
1642 GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */
1643 GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */
1644
1645/* Interrupt Source Registers */
1646 GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */
1647 GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */
1648 GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */
1649
1650/* Interrupt Mask Registers */
1651 GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */
1652 GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */
1653 GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */
1654
1655/* Serial Management Interface (SMI) Registers */
1656 GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */
1657 GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */
1658 GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */
1659};
1660
1661/* MIB Counters */
1662#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */
1663#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */
1664
1665/*
1666 * MIB Counters base address definitions (low word) -
1667 * use offset 4 for access to high word (32 bit r/o)
1668 */
1669enum {
1670 GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */
1671 GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */
1672 GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */
1673 GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */
1674 GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */
1675 /* GM_MIB_CNT_BASE + 40: reserved */
1676 GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */
1677 GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */
1678 GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */
1679 GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */
1680 GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */
1681 GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */
1682 GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */
1683 GM_RXF_127B = GM_MIB_CNT_BASE + 104, /* 65-127 Byte Rx Frame */
1684 GM_RXF_255B = GM_MIB_CNT_BASE + 112, /* 128-255 Byte Rx Frame */
1685 GM_RXF_511B = GM_MIB_CNT_BASE + 120, /* 256-511 Byte Rx Frame */
1686 GM_RXF_1023B = GM_MIB_CNT_BASE + 128, /* 512-1023 Byte Rx Frame */
1687 GM_RXF_1518B = GM_MIB_CNT_BASE + 136, /* 1024-1518 Byte Rx Frame */
1688 GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144, /* 1519-MaxSize Byte Rx Frame */
1689 GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152, /* Rx Frame too Long Error */
1690 GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160, /* Rx Jabber Packet Frame */
1691 /* GM_MIB_CNT_BASE + 168: reserved */
1692 GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176, /* Rx FIFO overflow Event */
1693 /* GM_MIB_CNT_BASE + 184: reserved */
1694 GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192, /* Unicast Frames Xmitted OK */
1695 GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200, /* Broadcast Frames Xmitted OK */
1696 GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208, /* Pause MAC Ctrl Frames Xmitted */
1697 GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216, /* Multicast Frames Xmitted OK */
1698 GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224, /* Octets Transmitted OK Low */
1699 GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232, /* Octets Transmitted OK High */
1700 GM_TXF_64B = GM_MIB_CNT_BASE + 240, /* 64 Byte Tx Frame */
1701 GM_TXF_127B = GM_MIB_CNT_BASE + 248, /* 65-127 Byte Tx Frame */
1702 GM_TXF_255B = GM_MIB_CNT_BASE + 256, /* 128-255 Byte Tx Frame */
1703 GM_TXF_511B = GM_MIB_CNT_BASE + 264, /* 256-511 Byte Tx Frame */
1704 GM_TXF_1023B = GM_MIB_CNT_BASE + 272, /* 512-1023 Byte Tx Frame */
1705 GM_TXF_1518B = GM_MIB_CNT_BASE + 280, /* 1024-1518 Byte Tx Frame */
1706 GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288, /* 1519-MaxSize Byte Tx Frame */
1707
1708 GM_TXF_COL = GM_MIB_CNT_BASE + 304, /* Tx Collision */
1709 GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312, /* Tx Late Collision */
1710 GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320, /* Tx aborted due to Exces. Col. */
1711 GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328, /* Tx Multiple Collision */
1712 GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336, /* Tx Single Collision */
1713 GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344, /* Tx FIFO Underrun Event */
1714};
1715
1716/* GMAC Bit Definitions */
1717/* GM_GP_STAT 16 bit r/o General Purpose Status Register */
1718enum {
1719 GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */
1720 GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */
1721 GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */
1722 GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */
1723 GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */
1724 GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */
1725 GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occurred */
1726 GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occurred */
1727
1728 GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */
1729 GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */
1730 GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */
1731 GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */
1732 GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */
1733};
1734
1735/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */
1736enum {
1737 GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */
1738 GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */
1739 GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */
1740 GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */
1741 GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */
1742 GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */
1743 GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */
1744 GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */
1745 GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */
1746 GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */
1747 GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */
1748 GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */
1749 GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */
1750 GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */
1751 GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */
1752};
1753
1754#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
1755#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS)
1756
1757/* GM_TX_CTRL 16 bit r/w Transmit Control Register */
1758enum {
1759 GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */
1760 GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */
1761 GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */
1762 GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */
1763};
1764
1765#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK)
1766#define TX_COL_DEF 0x04 /* late collision after 64 byte */
1767
1768/* GM_RX_CTRL 16 bit r/w Receive Control Register */
1769enum {
1770 GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */
1771 GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */
1772 GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */
1773 GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */
1774};
1775
1776/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */
1777enum {
1778 GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */
1779 GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */
1780 GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */
1781
1782 TX_JAM_LEN_DEF = 0x03,
1783 TX_JAM_IPG_DEF = 0x0b,
1784 TX_IPG_JAM_DEF = 0x1c,
1785};
1786
1787#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK)
1788#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK)
1789#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK)
1790
1791
1792/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
1793enum {
1794 GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */
1795 GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */
1796 GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */
1797 GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */
1798 GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
1799};
1800
1801#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
1802#define DATA_BLIND_DEF 0x04
1803
1804#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK)
1805#define IPG_DATA_DEF 0x1e
1806
1807/* GM_SMI_CTRL 16 bit r/w SMI Control Register */
1808enum {
1809 GM_SMI_CT_PHY_A_MSK = 0x1f<<11, /* Bit 15..11: PHY Device Address */
1810 GM_SMI_CT_REG_A_MSK = 0x1f<<6, /* Bit 10.. 6: PHY Register Address */
1811 GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/
1812 GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */
1813 GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */
1814};
1815
1816#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK)
1817#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK)
1818
1819/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */
1820enum {
1821 GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */
1822 GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */
1823};
1824
1825/* Receive Frame Status Encoding */
1826enum {
1827 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */
1828 GMR_FS_LEN_SHIFT = 16,
1829 GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */
1830 GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */
1831 GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */
1832 GMR_FS_MC = 1<<10, /* Bit 10: Multicast Packet */
1833 GMR_FS_BC = 1<<9, /* Bit 9: Broadcast Packet */
1834 GMR_FS_RX_OK = 1<<8, /* Bit 8: Receive OK (Good Packet) */
1835 GMR_FS_GOOD_FC = 1<<7, /* Bit 7: Good Flow-Control Packet */
1836 GMR_FS_BAD_FC = 1<<6, /* Bit 6: Bad Flow-Control Packet */
1837 GMR_FS_MII_ERR = 1<<5, /* Bit 5: MII Error */
1838 GMR_FS_LONG_ERR = 1<<4, /* Bit 4: Too Long Packet */
1839 GMR_FS_FRAGMENT = 1<<3, /* Bit 3: Fragment */
1840
1841 GMR_FS_CRC_ERR = 1<<1, /* Bit 1: CRC Error */
1842 GMR_FS_RX_FF_OV = 1<<0, /* Bit 0: Rx FIFO Overflow */
1843
1844/*
1845 * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR)
1846 */
1847 GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR |
1848 GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC |
1849 GMR_FS_JABBER,
1850/* Rx GMAC FIFO Flush Mask (default) */
1851 RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR |
1852 GMR_FS_BAD_FC | GMR_FS_UN_SIZE | GMR_FS_JABBER,
1853};
1854
1855/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
1856enum {
1857 GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */
1858 GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */
1859 GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */
1860
1861 GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */
1862 GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */
1863 GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */
1864 GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */
1865 GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */
1866 GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */
1867 GMF_CLI_RX_FC = 1<<4, /* Clear IRQ Rx Frame Complete */
1868 GMF_OPER_ON = 1<<3, /* Operational Mode On */
1869 GMF_OPER_OFF = 1<<2, /* Operational Mode Off */
1870 GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */
1871 GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */
1872
1873 RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */
1874};
1875
1876
1877/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
1878enum {
1879 GMF_WSP_TST_ON = 1<<18, /* Write Shadow Pointer Test On */
1880 GMF_WSP_TST_OFF = 1<<17, /* Write Shadow Pointer Test Off */
1881 GMF_WSP_STEP = 1<<16, /* Write Shadow Pointer Step/Increment */
1882
1883 GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */
1884 GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */
1885 GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */
1886};
1887
1888/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */
1889enum {
1890 GMT_ST_START = 1<<2, /* Start Time Stamp Timer */
1891 GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */
1892 GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */
1893};
1894
1895/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */
1896enum {
1897 GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */
1898 GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */
1899 GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */
1900 GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */
1901 GMC_PAUSE_ON = 1<<3, /* Pause On */
1902 GMC_PAUSE_OFF = 1<<2, /* Pause Off */
1903 GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */
1904 GMC_RST_SET = 1<<0, /* Set GMAC Reset */
1905};
1906
1907/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */
1908enum {
1909 GPC_SEL_BDT = 1<<28, /* Select Bi-Dir. Transfer for MDC/MDIO */
1910 GPC_INT_POL_HI = 1<<27, /* IRQ Polarity is Active HIGH */
1911 GPC_75_OHM = 1<<26, /* Use 75 Ohm Termination instead of 50 */
1912 GPC_DIS_FC = 1<<25, /* Disable Automatic Fiber/Copper Detection */
1913 GPC_DIS_SLEEP = 1<<24, /* Disable Energy Detect */
1914 GPC_HWCFG_M_3 = 1<<23, /* HWCFG_MODE[3] */
1915 GPC_HWCFG_M_2 = 1<<22, /* HWCFG_MODE[2] */
1916 GPC_HWCFG_M_1 = 1<<21, /* HWCFG_MODE[1] */
1917 GPC_HWCFG_M_0 = 1<<20, /* HWCFG_MODE[0] */
1918 GPC_ANEG_0 = 1<<19, /* ANEG[0] */
1919 GPC_ENA_XC = 1<<18, /* Enable MDI crossover */
1920 GPC_DIS_125 = 1<<17, /* Disable 125 MHz clock */
1921 GPC_ANEG_3 = 1<<16, /* ANEG[3] */
1922 GPC_ANEG_2 = 1<<15, /* ANEG[2] */
1923 GPC_ANEG_1 = 1<<14, /* ANEG[1] */
1924 GPC_ENA_PAUSE = 1<<13, /* Enable Pause (SYM_OR_REM) */
1925 GPC_PHYADDR_4 = 1<<12, /* Bit 4 of Phy Addr */
1926 GPC_PHYADDR_3 = 1<<11, /* Bit 3 of Phy Addr */
1927 GPC_PHYADDR_2 = 1<<10, /* Bit 2 of Phy Addr */
1928 GPC_PHYADDR_1 = 1<<9, /* Bit 1 of Phy Addr */
1929 GPC_PHYADDR_0 = 1<<8, /* Bit 0 of Phy Addr */
1930 /* Bits 7..2: reserved */
1931 GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */
1932 GPC_RST_SET = 1<<0, /* Set GPHY Reset */
1933};
1934
1935#define GPC_HWCFG_GMII_COP (GPC_HWCFG_M_3|GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0)
1936#define GPC_HWCFG_GMII_FIB (GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0)
1937#define GPC_ANEG_ADV_ALL_M (GPC_ANEG_3 | GPC_ANEG_2 | GPC_ANEG_1 | GPC_ANEG_0)
1938
1939/* forced speed and duplex mode (don't mix with other ANEG bits) */
1940#define GPC_FRC10MBIT_HALF 0
1941#define GPC_FRC10MBIT_FULL GPC_ANEG_0
1942#define GPC_FRC100MBIT_HALF GPC_ANEG_1
1943#define GPC_FRC100MBIT_FULL (GPC_ANEG_0 | GPC_ANEG_1)
1944
1945/* auto-negotiation with limited advertised speeds */
1946/* mix only with master/slave settings (for copper) */
1947#define GPC_ADV_1000_HALF GPC_ANEG_2
1948#define GPC_ADV_1000_FULL GPC_ANEG_3
1949#define GPC_ADV_ALL (GPC_ANEG_2 | GPC_ANEG_3)
1950
1951/* master/slave settings */
1952/* only for copper with 1000 Mbps */
1953#define GPC_FORCE_MASTER 0
1954#define GPC_FORCE_SLAVE GPC_ANEG_0
1955#define GPC_PREF_MASTER GPC_ANEG_1
1956#define GPC_PREF_SLAVE (GPC_ANEG_1 | GPC_ANEG_0)
1957
1958/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */
1959/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */
1960enum {
1961 GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */
1962 GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */
1963 GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */
1964 GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */
1965 GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
1966 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
1967
1968#define GMAC_DEF_MSK (GM_IS_RX_FF_OR | GM_IS_TX_FF_UR)
1969
1970/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
1971 /* Bits 15.. 2: reserved */
1972 GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */
1973 GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */
1974
1975
1976/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */
1977 WOL_CTL_LINK_CHG_OCC = 1<<15,
1978 WOL_CTL_MAGIC_PKT_OCC = 1<<14,
1979 WOL_CTL_PATTERN_OCC = 1<<13,
1980 WOL_CTL_CLEAR_RESULT = 1<<12,
1981 WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11,
1982 WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10,
1983 WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9,
1984 WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8,
1985 WOL_CTL_ENA_PME_ON_PATTERN = 1<<7,
1986 WOL_CTL_DIS_PME_ON_PATTERN = 1<<6,
1987 WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5,
1988 WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4,
1989 WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3,
1990 WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2,
1991 WOL_CTL_ENA_PATTERN_UNIT = 1<<1,
1992 WOL_CTL_DIS_PATTERN_UNIT = 1<<0,
1993};
1994
1995#define WOL_CTL_DEFAULT \
1996 (WOL_CTL_DIS_PME_ON_LINK_CHG | \
1997 WOL_CTL_DIS_PME_ON_PATTERN | \
1998 WOL_CTL_DIS_PME_ON_MAGIC_PKT | \
1999 WOL_CTL_DIS_LINK_CHG_UNIT | \
2000 WOL_CTL_DIS_PATTERN_UNIT | \
2001 WOL_CTL_DIS_MAGIC_PKT_UNIT)
2002
2003/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */
2004#define WOL_CTL_PATT_ENA(x) (1 << (x))
2005
2006
2007/* XMAC II registers */
2008enum {
2009 XM_MMU_CMD = 0x0000, /* 16 bit r/w MMU Command Register */
2010 XM_POFF = 0x0008, /* 32 bit r/w Packet Offset Register */
2011 XM_BURST = 0x000c, /* 32 bit r/w Burst Register for half duplex*/
2012 XM_1L_VLAN_TAG = 0x0010, /* 16 bit r/w One Level VLAN Tag ID */
2013 XM_2L_VLAN_TAG = 0x0014, /* 16 bit r/w Two Level VLAN Tag ID */
2014 XM_TX_CMD = 0x0020, /* 16 bit r/w Transmit Command Register */
2015 XM_TX_RT_LIM = 0x0024, /* 16 bit r/w Transmit Retry Limit Register */
2016 XM_TX_STIME = 0x0028, /* 16 bit r/w Transmit Slottime Register */
2017 XM_TX_IPG = 0x002c, /* 16 bit r/w Transmit Inter Packet Gap */
2018 XM_RX_CMD = 0x0030, /* 16 bit r/w Receive Command Register */
2019 XM_PHY_ADDR = 0x0034, /* 16 bit r/w PHY Address Register */
2020 XM_PHY_DATA = 0x0038, /* 16 bit r/w PHY Data Register */
2021 XM_GP_PORT = 0x0040, /* 32 bit r/w General Purpose Port Register */
2022 XM_IMSK = 0x0044, /* 16 bit r/w Interrupt Mask Register */
2023 XM_ISRC = 0x0048, /* 16 bit r/o Interrupt Status Register */
2024 XM_HW_CFG = 0x004c, /* 16 bit r/w Hardware Config Register */
2025 XM_TX_LO_WM = 0x0060, /* 16 bit r/w Tx FIFO Low Water Mark */
2026 XM_TX_HI_WM = 0x0062, /* 16 bit r/w Tx FIFO High Water Mark */
2027 XM_TX_THR = 0x0064, /* 16 bit r/w Tx Request Threshold */
2028 XM_HT_THR = 0x0066, /* 16 bit r/w Host Request Threshold */
2029 XM_PAUSE_DA = 0x0068, /* NA reg r/w Pause Destination Address */
2030 XM_CTL_PARA = 0x0070, /* 32 bit r/w Control Parameter Register */
2031 XM_MAC_OPCODE = 0x0074, /* 16 bit r/w Opcode for MAC control frames */
2032 XM_MAC_PTIME = 0x0076, /* 16 bit r/w Pause time for MAC ctrl frames*/
2033 XM_TX_STAT = 0x0078, /* 32 bit r/o Tx Status LIFO Register */
2034
2035 XM_EXM_START = 0x0080, /* r/w Start Address of the EXM Regs */
2036#define XM_EXM(reg) (XM_EXM_START + ((reg) << 3))
2037};
2038
2039enum {
2040 XM_SRC_CHK = 0x0100, /* NA reg r/w Source Check Address Register */
2041 XM_SA = 0x0108, /* NA reg r/w Station Address Register */
2042 XM_HSM = 0x0110, /* 64 bit r/w Hash Match Address Registers */
2043 XM_RX_LO_WM = 0x0118, /* 16 bit r/w Receive Low Water Mark */
2044 XM_RX_HI_WM = 0x011a, /* 16 bit r/w Receive High Water Mark */
2045 XM_RX_THR = 0x011c, /* 32 bit r/w Receive Request Threshold */
2046 XM_DEV_ID = 0x0120, /* 32 bit r/o Device ID Register */
2047 XM_MODE = 0x0124, /* 32 bit r/w Mode Register */
2048 XM_LSA = 0x0128, /* NA reg r/o Last Source Register */
2049 XM_TS_READ = 0x0130, /* 32 bit r/o Time Stamp Read Register */
2050 XM_TS_LOAD = 0x0134, /* 32 bit r/o Time Stamp Load Value */
2051 XM_STAT_CMD = 0x0200, /* 16 bit r/w Statistics Command Register */
2052 XM_RX_CNT_EV = 0x0204, /* 32 bit r/o Rx Counter Event Register */
2053 XM_TX_CNT_EV = 0x0208, /* 32 bit r/o Tx Counter Event Register */
2054 XM_RX_EV_MSK = 0x020c, /* 32 bit r/w Rx Counter Event Mask */
2055 XM_TX_EV_MSK = 0x0210, /* 32 bit r/w Tx Counter Event Mask */
2056 XM_TXF_OK = 0x0280, /* 32 bit r/o Frames Transmitted OK Conuter */
2057 XM_TXO_OK_HI = 0x0284, /* 32 bit r/o Octets Transmitted OK High Cnt*/
2058 XM_TXO_OK_LO = 0x0288, /* 32 bit r/o Octets Transmitted OK Low Cnt */
2059 XM_TXF_BC_OK = 0x028c, /* 32 bit r/o Broadcast Frames Xmitted OK */
2060 XM_TXF_MC_OK = 0x0290, /* 32 bit r/o Multicast Frames Xmitted OK */
2061 XM_TXF_UC_OK = 0x0294, /* 32 bit r/o Unicast Frames Xmitted OK */
2062 XM_TXF_LONG = 0x0298, /* 32 bit r/o Tx Long Frame Counter */
2063 XM_TXE_BURST = 0x029c, /* 32 bit r/o Tx Burst Event Counter */
2064 XM_TXF_MPAUSE = 0x02a0, /* 32 bit r/o Tx Pause MAC Ctrl Frame Cnt */
2065 XM_TXF_MCTRL = 0x02a4, /* 32 bit r/o Tx MAC Ctrl Frame Counter */
2066 XM_TXF_SNG_COL = 0x02a8, /* 32 bit r/o Tx Single Collision Counter */
2067 XM_TXF_MUL_COL = 0x02ac, /* 32 bit r/o Tx Multiple Collision Counter */
2068 XM_TXF_ABO_COL = 0x02b0, /* 32 bit r/o Tx aborted due to Exces. Col. */
2069 XM_TXF_LAT_COL = 0x02b4, /* 32 bit r/o Tx Late Collision Counter */
2070 XM_TXF_DEF = 0x02b8, /* 32 bit r/o Tx Deferred Frame Counter */
2071 XM_TXF_EX_DEF = 0x02bc, /* 32 bit r/o Tx Excessive Deferall Counter */
2072 XM_TXE_FIFO_UR = 0x02c0, /* 32 bit r/o Tx FIFO Underrun Event Cnt */
2073 XM_TXE_CS_ERR = 0x02c4, /* 32 bit r/o Tx Carrier Sense Error Cnt */
2074 XM_TXP_UTIL = 0x02c8, /* 32 bit r/o Tx Utilization in % */
2075 XM_TXF_64B = 0x02d0, /* 32 bit r/o 64 Byte Tx Frame Counter */
2076 XM_TXF_127B = 0x02d4, /* 32 bit r/o 65-127 Byte Tx Frame Counter */
2077 XM_TXF_255B = 0x02d8, /* 32 bit r/o 128-255 Byte Tx Frame Counter */
2078 XM_TXF_511B = 0x02dc, /* 32 bit r/o 256-511 Byte Tx Frame Counter */
2079 XM_TXF_1023B = 0x02e0, /* 32 bit r/o 512-1023 Byte Tx Frame Counter*/
2080 XM_TXF_MAX_SZ = 0x02e4, /* 32 bit r/o 1024-MaxSize Byte Tx Frame Cnt*/
2081 XM_RXF_OK = 0x0300, /* 32 bit r/o Frames Received OK */
2082 XM_RXO_OK_HI = 0x0304, /* 32 bit r/o Octets Received OK High Cnt */
2083 XM_RXO_OK_LO = 0x0308, /* 32 bit r/o Octets Received OK Low Counter*/
2084 XM_RXF_BC_OK = 0x030c, /* 32 bit r/o Broadcast Frames Received OK */
2085 XM_RXF_MC_OK = 0x0310, /* 32 bit r/o Multicast Frames Received OK */
2086 XM_RXF_UC_OK = 0x0314, /* 32 bit r/o Unicast Frames Received OK */
2087 XM_RXF_MPAUSE = 0x0318, /* 32 bit r/o Rx Pause MAC Ctrl Frame Cnt */
2088 XM_RXF_MCTRL = 0x031c, /* 32 bit r/o Rx MAC Ctrl Frame Counter */
2089 XM_RXF_INV_MP = 0x0320, /* 32 bit r/o Rx invalid Pause Frame Cnt */
2090 XM_RXF_INV_MOC = 0x0324, /* 32 bit r/o Rx Frames with inv. MAC Opcode*/
2091 XM_RXE_BURST = 0x0328, /* 32 bit r/o Rx Burst Event Counter */
2092 XM_RXE_FMISS = 0x032c, /* 32 bit r/o Rx Missed Frames Event Cnt */
2093 XM_RXF_FRA_ERR = 0x0330, /* 32 bit r/o Rx Framing Error Counter */
2094 XM_RXE_FIFO_OV = 0x0334, /* 32 bit r/o Rx FIFO overflow Event Cnt */
2095 XM_RXF_JAB_PKT = 0x0338, /* 32 bit r/o Rx Jabber Packet Frame Cnt */
2096 XM_RXE_CAR_ERR = 0x033c, /* 32 bit r/o Rx Carrier Event Error Cnt */
2097 XM_RXF_LEN_ERR = 0x0340, /* 32 bit r/o Rx in Range Length Error */
2098 XM_RXE_SYM_ERR = 0x0344, /* 32 bit r/o Rx Symbol Error Counter */
2099 XM_RXE_SHT_ERR = 0x0348, /* 32 bit r/o Rx Short Event Error Cnt */
2100 XM_RXE_RUNT = 0x034c, /* 32 bit r/o Rx Runt Event Counter */
2101 XM_RXF_LNG_ERR = 0x0350, /* 32 bit r/o Rx Frame too Long Error Cnt */
2102 XM_RXF_FCS_ERR = 0x0354, /* 32 bit r/o Rx Frame Check Seq. Error Cnt */
2103 XM_RXF_CEX_ERR = 0x035c, /* 32 bit r/o Rx Carrier Ext Error Frame Cnt*/
2104 XM_RXP_UTIL = 0x0360, /* 32 bit r/o Rx Utilization in % */
2105 XM_RXF_64B = 0x0368, /* 32 bit r/o 64 Byte Rx Frame Counter */
2106 XM_RXF_127B = 0x036c, /* 32 bit r/o 65-127 Byte Rx Frame Counter */
2107 XM_RXF_255B = 0x0370, /* 32 bit r/o 128-255 Byte Rx Frame Counter */
2108 XM_RXF_511B = 0x0374, /* 32 bit r/o 256-511 Byte Rx Frame Counter */
2109 XM_RXF_1023B = 0x0378, /* 32 bit r/o 512-1023 Byte Rx Frame Counter*/
2110 XM_RXF_MAX_SZ = 0x037c, /* 32 bit r/o 1024-MaxSize Byte Rx Frame Cnt*/
2111};
2112
2113/* XM_MMU_CMD 16 bit r/w MMU Command Register */
2114enum {
2115 XM_MMU_PHY_RDY = 1<<12, /* Bit 12: PHY Read Ready */
2116 XM_MMU_PHY_BUSY = 1<<11, /* Bit 11: PHY Busy */
2117 XM_MMU_IGN_PF = 1<<10, /* Bit 10: Ignore Pause Frame */
2118 XM_MMU_MAC_LB = 1<<9, /* Bit 9: Enable MAC Loopback */
2119 XM_MMU_FRC_COL = 1<<7, /* Bit 7: Force Collision */
2120 XM_MMU_SIM_COL = 1<<6, /* Bit 6: Simulate Collision */
2121 XM_MMU_NO_PRE = 1<<5, /* Bit 5: No MDIO Preamble */
2122 XM_MMU_GMII_FD = 1<<4, /* Bit 4: GMII uses Full Duplex */
2123 XM_MMU_RAT_CTRL = 1<<3, /* Bit 3: Enable Rate Control */
2124 XM_MMU_GMII_LOOP= 1<<2, /* Bit 2: PHY is in Loopback Mode */
2125 XM_MMU_ENA_RX = 1<<1, /* Bit 1: Enable Receiver */
2126 XM_MMU_ENA_TX = 1<<0, /* Bit 0: Enable Transmitter */
2127};
2128
2129
2130/* XM_TX_CMD 16 bit r/w Transmit Command Register */
2131enum {
2132 XM_TX_BK2BK = 1<<6, /* Bit 6: Ignor Carrier Sense (Tx Bk2Bk)*/
2133 XM_TX_ENC_BYP = 1<<5, /* Bit 5: Set Encoder in Bypass Mode */
2134 XM_TX_SAM_LINE = 1<<4, /* Bit 4: (sc) Start utilization calculation */
2135 XM_TX_NO_GIG_MD = 1<<3, /* Bit 3: Disable Carrier Extension */
2136 XM_TX_NO_PRE = 1<<2, /* Bit 2: Disable Preamble Generation */
2137 XM_TX_NO_CRC = 1<<1, /* Bit 1: Disable CRC Generation */
2138 XM_TX_AUTO_PAD = 1<<0, /* Bit 0: Enable Automatic Padding */
2139};
2140
2141/* XM_TX_RT_LIM 16 bit r/w Transmit Retry Limit Register */
2142#define XM_RT_LIM_MSK 0x1f /* Bit 4..0: Tx Retry Limit */
2143
2144
2145/* XM_TX_STIME 16 bit r/w Transmit Slottime Register */
2146#define XM_STIME_MSK 0x7f /* Bit 6..0: Tx Slottime bits */
2147
2148
2149/* XM_TX_IPG 16 bit r/w Transmit Inter Packet Gap */
2150#define XM_IPG_MSK 0xff /* Bit 7..0: IPG value bits */
2151
2152
2153/* XM_RX_CMD 16 bit r/w Receive Command Register */
2154enum {
2155 XM_RX_LENERR_OK = 1<<8, /* Bit 8 don't set Rx Err bit for */
2156 /* inrange error packets */
2157 XM_RX_BIG_PK_OK = 1<<7, /* Bit 7 don't set Rx Err bit for */
2158 /* jumbo packets */
2159 XM_RX_IPG_CAP = 1<<6, /* Bit 6 repl. type field with IPG */
2160 XM_RX_TP_MD = 1<<5, /* Bit 5: Enable transparent Mode */
2161 XM_RX_STRIP_FCS = 1<<4, /* Bit 4: Enable FCS Stripping */
2162 XM_RX_SELF_RX = 1<<3, /* Bit 3: Enable Rx of own packets */
2163 XM_RX_SAM_LINE = 1<<2, /* Bit 2: (sc) Start utilization calculation */
2164 XM_RX_STRIP_PAD = 1<<1, /* Bit 1: Strip pad bytes of Rx frames */
2165 XM_RX_DIS_CEXT = 1<<0, /* Bit 0: Disable carrier ext. check */
2166};
2167
2168
2169/* XM_GP_PORT 32 bit r/w General Purpose Port Register */
2170enum {
2171 XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */
2172 XM_GP_FRC_INT = 1<<5, /* Bit 5: (sc) Force Interrupt */
2173 XM_GP_RES_MAC = 1<<3, /* Bit 3: (sc) Reset MAC and FIFOs */
2174 XM_GP_RES_STAT = 1<<2, /* Bit 2: (sc) Reset the statistics module */
2175 XM_GP_INP_ASS = 1<<0, /* Bit 0: (ro) GP Input Pin asserted */
2176};
2177
2178
2179/* XM_IMSK 16 bit r/w Interrupt Mask Register */
2180/* XM_ISRC 16 bit r/o Interrupt Status Register */
2181enum {
2182 XM_IS_LNK_AE = 1<<14, /* Bit 14: Link Asynchronous Event */
2183 XM_IS_TX_ABORT = 1<<13, /* Bit 13: Transmit Abort, late Col. etc */
2184 XM_IS_FRC_INT = 1<<12, /* Bit 12: Force INT bit set in GP */
2185 XM_IS_INP_ASS = 1<<11, /* Bit 11: Input Asserted, GP bit 0 set */
2186 XM_IS_LIPA_RC = 1<<10, /* Bit 10: Link Partner requests config */
2187 XM_IS_RX_PAGE = 1<<9, /* Bit 9: Page Received */
2188 XM_IS_TX_PAGE = 1<<8, /* Bit 8: Next Page Loaded for Transmit */
2189 XM_IS_AND = 1<<7, /* Bit 7: Auto-Negotiation Done */
2190 XM_IS_TSC_OV = 1<<6, /* Bit 6: Time Stamp Counter Overflow */
2191 XM_IS_RXC_OV = 1<<5, /* Bit 5: Rx Counter Event Overflow */
2192 XM_IS_TXC_OV = 1<<4, /* Bit 4: Tx Counter Event Overflow */
2193 XM_IS_RXF_OV = 1<<3, /* Bit 3: Receive FIFO Overflow */
2194 XM_IS_TXF_UR = 1<<2, /* Bit 2: Transmit FIFO Underrun */
2195 XM_IS_TX_COMP = 1<<1, /* Bit 1: Frame Tx Complete */
2196 XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */
2197
2198 XM_IMSK_DISABLE = 0xffff,
2199};
2200
2201/* XM_HW_CFG 16 bit r/w Hardware Config Register */
2202enum {
2203 XM_HW_GEN_EOP = 1<<3, /* Bit 3: generate End of Packet pulse */
2204 XM_HW_COM4SIG = 1<<2, /* Bit 2: use Comma Detect for Sig. Det.*/
2205 XM_HW_GMII_MD = 1<<0, /* Bit 0: GMII Interface selected */
2206};
2207
2208
2209/* XM_TX_LO_WM 16 bit r/w Tx FIFO Low Water Mark */
2210/* XM_TX_HI_WM 16 bit r/w Tx FIFO High Water Mark */
2211#define XM_TX_WM_MSK 0x01ff /* Bit 9.. 0 Tx FIFO Watermark bits */
2212
2213/* XM_TX_THR 16 bit r/w Tx Request Threshold */
2214/* XM_HT_THR 16 bit r/w Host Request Threshold */
2215/* XM_RX_THR 16 bit r/w Rx Request Threshold */
2216#define XM_THR_MSK 0x03ff /* Bit 10.. 0 Rx/Tx Request Threshold bits */
2217
2218
2219/* XM_TX_STAT 32 bit r/o Tx Status LIFO Register */
2220enum {
2221 XM_ST_VALID = (1UL<<31), /* Bit 31: Status Valid */
2222 XM_ST_BYTE_CNT = (0x3fffL<<17), /* Bit 30..17: Tx frame Length */
2223 XM_ST_RETRY_CNT = (0x1fL<<12), /* Bit 16..12: Retry Count */
2224 XM_ST_EX_COL = 1<<11, /* Bit 11: Excessive Collisions */
2225 XM_ST_EX_DEF = 1<<10, /* Bit 10: Excessive Deferral */
2226 XM_ST_BURST = 1<<9, /* Bit 9: p. xmitted in burst md*/
2227 XM_ST_DEFER = 1<<8, /* Bit 8: packet was defered */
2228 XM_ST_BC = 1<<7, /* Bit 7: Broadcast packet */
2229 XM_ST_MC = 1<<6, /* Bit 6: Multicast packet */
2230 XM_ST_UC = 1<<5, /* Bit 5: Unicast packet */
2231 XM_ST_TX_UR = 1<<4, /* Bit 4: FIFO Underrun occurred */
2232 XM_ST_CS_ERR = 1<<3, /* Bit 3: Carrier Sense Error */
2233 XM_ST_LAT_COL = 1<<2, /* Bit 2: Late Collision Error */
2234 XM_ST_MUL_COL = 1<<1, /* Bit 1: Multiple Collisions */
2235 XM_ST_SGN_COL = 1<<0, /* Bit 0: Single Collision */
2236};
2237
2238/* XM_RX_LO_WM 16 bit r/w Receive Low Water Mark */
2239/* XM_RX_HI_WM 16 bit r/w Receive High Water Mark */
2240#define XM_RX_WM_MSK 0x03ff /* Bit 11.. 0: Rx FIFO Watermark bits */
2241
2242
2243/* XM_DEV_ID 32 bit r/o Device ID Register */
2244#define XM_DEV_OUI (0x00ffffffUL<<8) /* Bit 31..8: Device OUI */
2245#define XM_DEV_REV (0x07L << 5) /* Bit 7..5: Chip Rev Num */
2246
2247
2248/* XM_MODE 32 bit r/w Mode Register */
2249enum {
2250 XM_MD_ENA_REJ = 1<<26, /* Bit 26: Enable Frame Reject */
2251 XM_MD_SPOE_E = 1<<25, /* Bit 25: Send Pause on Edge */
2252 /* extern generated */
2253 XM_MD_TX_REP = 1<<24, /* Bit 24: Transmit Repeater Mode */
2254 XM_MD_SPOFF_I = 1<<23, /* Bit 23: Send Pause on FIFO full */
2255 /* intern generated */
2256 XM_MD_LE_STW = 1<<22, /* Bit 22: Rx Stat Word in Little Endian */
2257 XM_MD_TX_CONT = 1<<21, /* Bit 21: Send Continuous */
2258 XM_MD_TX_PAUSE = 1<<20, /* Bit 20: (sc) Send Pause Frame */
2259 XM_MD_ATS = 1<<19, /* Bit 19: Append Time Stamp */
2260 XM_MD_SPOL_I = 1<<18, /* Bit 18: Send Pause on Low */
2261 /* intern generated */
2262 XM_MD_SPOH_I = 1<<17, /* Bit 17: Send Pause on High */
2263 /* intern generated */
2264 XM_MD_CAP = 1<<16, /* Bit 16: Check Address Pair */
2265 XM_MD_ENA_HASH = 1<<15, /* Bit 15: Enable Hashing */
2266 XM_MD_CSA = 1<<14, /* Bit 14: Check Station Address */
2267 XM_MD_CAA = 1<<13, /* Bit 13: Check Address Array */
2268 XM_MD_RX_MCTRL = 1<<12, /* Bit 12: Rx MAC Control Frame */
2269 XM_MD_RX_RUNT = 1<<11, /* Bit 11: Rx Runt Frames */
2270 XM_MD_RX_IRLE = 1<<10, /* Bit 10: Rx in Range Len Err Frame */
2271 XM_MD_RX_LONG = 1<<9, /* Bit 9: Rx Long Frame */
2272 XM_MD_RX_CRCE = 1<<8, /* Bit 8: Rx CRC Error Frame */
2273 XM_MD_RX_ERR = 1<<7, /* Bit 7: Rx Error Frame */
2274 XM_MD_DIS_UC = 1<<6, /* Bit 6: Disable Rx Unicast */
2275 XM_MD_DIS_MC = 1<<5, /* Bit 5: Disable Rx Multicast */
2276 XM_MD_DIS_BC = 1<<4, /* Bit 4: Disable Rx Broadcast */
2277 XM_MD_ENA_PROM = 1<<3, /* Bit 3: Enable Promiscuous */
2278 XM_MD_ENA_BE = 1<<2, /* Bit 2: Enable Big Endian */
2279 XM_MD_FTF = 1<<1, /* Bit 1: (sc) Flush Tx FIFO */
2280 XM_MD_FRF = 1<<0, /* Bit 0: (sc) Flush Rx FIFO */
2281};
2282
2283#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I)
2284#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\
2285 XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA)
2286
2287/* XM_STAT_CMD 16 bit r/w Statistics Command Register */
2288enum {
2289 XM_SC_SNP_RXC = 1<<5, /* Bit 5: (sc) Snap Rx Counters */
2290 XM_SC_SNP_TXC = 1<<4, /* Bit 4: (sc) Snap Tx Counters */
2291 XM_SC_CP_RXC = 1<<3, /* Bit 3: Copy Rx Counters Continuously */
2292 XM_SC_CP_TXC = 1<<2, /* Bit 2: Copy Tx Counters Continuously */
2293 XM_SC_CLR_RXC = 1<<1, /* Bit 1: (sc) Clear Rx Counters */
2294 XM_SC_CLR_TXC = 1<<0, /* Bit 0: (sc) Clear Tx Counters */
2295};
2296
2297
2298/* XM_RX_CNT_EV 32 bit r/o Rx Counter Event Register */
2299/* XM_RX_EV_MSK 32 bit r/w Rx Counter Event Mask */
2300enum {
2301 XMR_MAX_SZ_OV = 1<<31, /* Bit 31: 1024-MaxSize Rx Cnt Ov*/
2302 XMR_1023B_OV = 1<<30, /* Bit 30: 512-1023Byte Rx Cnt Ov*/
2303 XMR_511B_OV = 1<<29, /* Bit 29: 256-511 Byte Rx Cnt Ov*/
2304 XMR_255B_OV = 1<<28, /* Bit 28: 128-255 Byte Rx Cnt Ov*/
2305 XMR_127B_OV = 1<<27, /* Bit 27: 65-127 Byte Rx Cnt Ov */
2306 XMR_64B_OV = 1<<26, /* Bit 26: 64 Byte Rx Cnt Ov */
2307 XMR_UTIL_OV = 1<<25, /* Bit 25: Rx Util Cnt Overflow */
2308 XMR_UTIL_UR = 1<<24, /* Bit 24: Rx Util Cnt Underrun */
2309 XMR_CEX_ERR_OV = 1<<23, /* Bit 23: CEXT Err Cnt Ov */
2310 XMR_FCS_ERR_OV = 1<<21, /* Bit 21: Rx FCS Error Cnt Ov */
2311 XMR_LNG_ERR_OV = 1<<20, /* Bit 20: Rx too Long Err Cnt Ov*/
2312 XMR_RUNT_OV = 1<<19, /* Bit 19: Runt Event Cnt Ov */
2313 XMR_SHT_ERR_OV = 1<<18, /* Bit 18: Rx Short Ev Err Cnt Ov*/
2314 XMR_SYM_ERR_OV = 1<<17, /* Bit 17: Rx Sym Err Cnt Ov */
2315 XMR_CAR_ERR_OV = 1<<15, /* Bit 15: Rx Carr Ev Err Cnt Ov */
2316 XMR_JAB_PKT_OV = 1<<14, /* Bit 14: Rx Jabb Packet Cnt Ov */
2317 XMR_FIFO_OV = 1<<13, /* Bit 13: Rx FIFO Ov Ev Cnt Ov */
2318 XMR_FRA_ERR_OV = 1<<12, /* Bit 12: Rx Framing Err Cnt Ov */
2319 XMR_FMISS_OV = 1<<11, /* Bit 11: Rx Missed Ev Cnt Ov */
2320 XMR_BURST = 1<<10, /* Bit 10: Rx Burst Event Cnt Ov */
2321 XMR_INV_MOC = 1<<9, /* Bit 9: Rx with inv. MAC OC Ov*/
2322 XMR_INV_MP = 1<<8, /* Bit 8: Rx inv Pause Frame Ov */
2323 XMR_MCTRL_OV = 1<<7, /* Bit 7: Rx MAC Ctrl-F Cnt Ov */
2324 XMR_MPAUSE_OV = 1<<6, /* Bit 6: Rx Pause MAC Ctrl-F Ov*/
2325 XMR_UC_OK_OV = 1<<5, /* Bit 5: Rx Unicast Frame CntOv*/
2326 XMR_MC_OK_OV = 1<<4, /* Bit 4: Rx Multicast Cnt Ov */
2327 XMR_BC_OK_OV = 1<<3, /* Bit 3: Rx Broadcast Cnt Ov */
2328 XMR_OK_LO_OV = 1<<2, /* Bit 2: Octets Rx OK Low CntOv*/
2329 XMR_OK_HI_OV = 1<<1, /* Bit 1: Octets Rx OK Hi Cnt Ov*/
2330 XMR_OK_OV = 1<<0, /* Bit 0: Frames Received Ok Ov */
2331};
2332
2333#define XMR_DEF_MSK (XMR_OK_LO_OV | XMR_OK_HI_OV)
2334
2335/* XM_TX_CNT_EV 32 bit r/o Tx Counter Event Register */
2336/* XM_TX_EV_MSK 32 bit r/w Tx Counter Event Mask */
2337enum {
2338 XMT_MAX_SZ_OV = 1<<25, /* Bit 25: 1024-MaxSize Tx Cnt Ov*/
2339 XMT_1023B_OV = 1<<24, /* Bit 24: 512-1023Byte Tx Cnt Ov*/
2340 XMT_511B_OV = 1<<23, /* Bit 23: 256-511 Byte Tx Cnt Ov*/
2341 XMT_255B_OV = 1<<22, /* Bit 22: 128-255 Byte Tx Cnt Ov*/
2342 XMT_127B_OV = 1<<21, /* Bit 21: 65-127 Byte Tx Cnt Ov */
2343 XMT_64B_OV = 1<<20, /* Bit 20: 64 Byte Tx Cnt Ov */
2344 XMT_UTIL_OV = 1<<19, /* Bit 19: Tx Util Cnt Overflow */
2345 XMT_UTIL_UR = 1<<18, /* Bit 18: Tx Util Cnt Underrun */
2346 XMT_CS_ERR_OV = 1<<17, /* Bit 17: Tx Carr Sen Err Cnt Ov*/
2347 XMT_FIFO_UR_OV = 1<<16, /* Bit 16: Tx FIFO Ur Ev Cnt Ov */
2348 XMT_EX_DEF_OV = 1<<15, /* Bit 15: Tx Ex Deferall Cnt Ov */
2349 XMT_DEF = 1<<14, /* Bit 14: Tx Deferred Cnt Ov */
2350 XMT_LAT_COL_OV = 1<<13, /* Bit 13: Tx Late Col Cnt Ov */
2351 XMT_ABO_COL_OV = 1<<12, /* Bit 12: Tx abo dueto Ex Col Ov*/
2352 XMT_MUL_COL_OV = 1<<11, /* Bit 11: Tx Mult Col Cnt Ov */
2353 XMT_SNG_COL = 1<<10, /* Bit 10: Tx Single Col Cnt Ov */
2354 XMT_MCTRL_OV = 1<<9, /* Bit 9: Tx MAC Ctrl Counter Ov*/
2355 XMT_MPAUSE = 1<<8, /* Bit 8: Tx Pause MAC Ctrl-F Ov*/
2356 XMT_BURST = 1<<7, /* Bit 7: Tx Burst Event Cnt Ov */
2357 XMT_LONG = 1<<6, /* Bit 6: Tx Long Frame Cnt Ov */
2358 XMT_UC_OK_OV = 1<<5, /* Bit 5: Tx Unicast Cnt Ov */
2359 XMT_MC_OK_OV = 1<<4, /* Bit 4: Tx Multicast Cnt Ov */
2360 XMT_BC_OK_OV = 1<<3, /* Bit 3: Tx Broadcast Cnt Ov */
2361 XMT_OK_LO_OV = 1<<2, /* Bit 2: Octets Tx OK Low CntOv*/
2362 XMT_OK_HI_OV = 1<<1, /* Bit 1: Octets Tx OK Hi Cnt Ov*/
2363 XMT_OK_OV = 1<<0, /* Bit 0: Frames Tx Ok Ov */
2364};
2365
2366#define XMT_DEF_MSK (XMT_OK_LO_OV | XMT_OK_HI_OV)
2367
2368struct skge_rx_desc {
2369 u32 control;
2370 u32 next_offset;
2371 u32 dma_lo;
2372 u32 dma_hi;
2373 u32 status;
2374 u32 timestamp;
2375 u16 csum2;
2376 u16 csum1;
2377 u16 csum2_start;
2378 u16 csum1_start;
2379};
2380
2381struct skge_tx_desc {
2382 u32 control;
2383 u32 next_offset;
2384 u32 dma_lo;
2385 u32 dma_hi;
2386 u32 status;
2387 u32 csum_offs;
2388 u16 csum_write;
2389 u16 csum_start;
2390 u32 rsvd;
2391};
2392
2393struct skge_element {
2394 struct skge_element *next;
2395 void *desc;
2396 struct sk_buff *skb;
2397 DEFINE_DMA_UNMAP_ADDR(mapaddr);
2398 DEFINE_DMA_UNMAP_LEN(maplen);
2399};
2400
2401struct skge_ring {
2402 struct skge_element *to_clean;
2403 struct skge_element *to_use;
2404 struct skge_element *start;
2405 unsigned long count;
2406};
2407
2408
2409struct skge_hw {
2410 void __iomem *regs;
2411 struct pci_dev *pdev;
2412 spinlock_t hw_lock;
2413 u32 intr_mask;
2414 struct net_device *dev[2];
2415
2416 u8 chip_id;
2417 u8 chip_rev;
2418 u8 copper;
2419 u8 ports;
2420 u8 phy_type;
2421
2422 u32 ram_size;
2423 u32 ram_offset;
2424 u16 phy_addr;
2425 spinlock_t phy_lock;
2426 struct tasklet_struct phy_task;
2427
2428 char irq_name[0]; /* skge@pci:000:04:00.0 */
2429};
2430
2431enum pause_control {
2432 FLOW_MODE_NONE = 1, /* No Flow-Control */
2433 FLOW_MODE_LOC_SEND = 2, /* Local station sends PAUSE */
2434 FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */
2435 FLOW_MODE_SYM_OR_REM = 4, /* Both stations may send PAUSE or
2436 * just the remote station may send PAUSE
2437 */
2438};
2439
2440enum pause_status {
2441 FLOW_STAT_INDETERMINATED=0, /* indeterminated */
2442 FLOW_STAT_NONE, /* No Flow Control */
2443 FLOW_STAT_REM_SEND, /* Remote Station sends PAUSE */
2444 FLOW_STAT_LOC_SEND, /* Local station sends PAUSE */
2445 FLOW_STAT_SYMMETRIC, /* Both station may send PAUSE */
2446};
2447
2448
2449struct skge_port {
2450 struct skge_hw *hw;
2451 struct net_device *netdev;
2452 struct napi_struct napi;
2453 int port;
2454 u32 msg_enable;
2455
2456 struct skge_ring tx_ring;
2457
2458 struct skge_ring rx_ring ____cacheline_aligned_in_smp;
2459 unsigned int rx_buf_size;
2460
2461 struct timer_list link_timer;
2462 enum pause_control flow_control;
2463 enum pause_status flow_status;
2464 u8 blink_on;
2465 u8 wol;
2466 u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */
2467 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
2468 u16 speed; /* SPEED_1000, SPEED_100, ... */
2469 u32 advertising;
2470
2471 void *mem; /* PCI memory for rings */
2472 dma_addr_t dma;
2473 unsigned long mem_size;
2474#ifdef CONFIG_SKGE_DEBUG
2475 struct dentry *debugfs;
2476#endif
2477};
2478
2479
2480/* Register accessor for memory mapped device */
2481static inline u32 skge_read32(const struct skge_hw *hw, int reg)
2482{
2483 return readl(hw->regs + reg);
2484}
2485
2486static inline u16 skge_read16(const struct skge_hw *hw, int reg)
2487{
2488 return readw(hw->regs + reg);
2489}
2490
2491static inline u8 skge_read8(const struct skge_hw *hw, int reg)
2492{
2493 return readb(hw->regs + reg);
2494}
2495
2496static inline void skge_write32(const struct skge_hw *hw, int reg, u32 val)
2497{
2498 writel(val, hw->regs + reg);
2499}
2500
2501static inline void skge_write16(const struct skge_hw *hw, int reg, u16 val)
2502{
2503 writew(val, hw->regs + reg);
2504}
2505
2506static inline void skge_write8(const struct skge_hw *hw, int reg, u8 val)
2507{
2508 writeb(val, hw->regs + reg);
2509}
2510
2511/* MAC Related Registers inside the device. */
2512#define SK_REG(port,reg) (((port)<<7)+(u16)(reg))
2513#define SK_XMAC_REG(port, reg) \
2514 ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1)
2515
2516static inline u32 xm_read32(const struct skge_hw *hw, int port, int reg)
2517{
2518 u32 v;
2519 v = skge_read16(hw, SK_XMAC_REG(port, reg));
2520 v |= (u32)skge_read16(hw, SK_XMAC_REG(port, reg+2)) << 16;
2521 return v;
2522}
2523
2524static inline u16 xm_read16(const struct skge_hw *hw, int port, int reg)
2525{
2526 return skge_read16(hw, SK_XMAC_REG(port,reg));
2527}
2528
2529static inline void xm_write32(const struct skge_hw *hw, int port, int r, u32 v)
2530{
2531 skge_write16(hw, SK_XMAC_REG(port,r), v & 0xffff);
2532 skge_write16(hw, SK_XMAC_REG(port,r+2), v >> 16);
2533}
2534
2535static inline void xm_write16(const struct skge_hw *hw, int port, int r, u16 v)
2536{
2537 skge_write16(hw, SK_XMAC_REG(port,r), v);
2538}
2539
2540static inline void xm_outhash(const struct skge_hw *hw, int port, int reg,
2541 const u8 *hash)
2542{
2543 xm_write16(hw, port, reg, (u16)hash[0] | ((u16)hash[1] << 8));
2544 xm_write16(hw, port, reg+2, (u16)hash[2] | ((u16)hash[3] << 8));
2545 xm_write16(hw, port, reg+4, (u16)hash[4] | ((u16)hash[5] << 8));
2546 xm_write16(hw, port, reg+6, (u16)hash[6] | ((u16)hash[7] << 8));
2547}
2548
2549static inline void xm_outaddr(const struct skge_hw *hw, int port, int reg,
2550 const u8 *addr)
2551{
2552 xm_write16(hw, port, reg, (u16)addr[0] | ((u16)addr[1] << 8));
2553 xm_write16(hw, port, reg+2, (u16)addr[2] | ((u16)addr[3] << 8));
2554 xm_write16(hw, port, reg+4, (u16)addr[4] | ((u16)addr[5] << 8));
2555}
2556
2557#define SK_GMAC_REG(port,reg) \
2558 (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg))
2559
2560static inline u16 gma_read16(const struct skge_hw *hw, int port, int reg)
2561{
2562 return skge_read16(hw, SK_GMAC_REG(port,reg));
2563}
2564
2565static inline u32 gma_read32(const struct skge_hw *hw, int port, int reg)
2566{
2567 return (u32) skge_read16(hw, SK_GMAC_REG(port,reg))
2568 | ((u32)skge_read16(hw, SK_GMAC_REG(port,reg+4)) << 16);
2569}
2570
2571static inline void gma_write16(const struct skge_hw *hw, int port, int r, u16 v)
2572{
2573 skge_write16(hw, SK_GMAC_REG(port,r), v);
2574}
2575
2576static inline void gma_set_addr(struct skge_hw *hw, int port, int reg,
2577 const u8 *addr)
2578{
2579 gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8));
2580 gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
2581 gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
2582}
2583
2584#endif
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
new file mode 100644
index 000000000000..cbd026f3bc57
--- /dev/null
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -0,0 +1,5158 @@
1/*
2 * New driver for Marvell Yukon 2 chipset.
3 * Based on earlier sk98lin, and skge driver.
4 *
5 * This driver intentionally does not support all the features
6 * of the original driver such as link fail-over and link management because
7 * those should be done at higher levels.
8 *
9 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
27#include <linux/crc32.h>
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/netdevice.h>
31#include <linux/dma-mapping.h>
32#include <linux/etherdevice.h>
33#include <linux/ethtool.h>
34#include <linux/pci.h>
35#include <linux/interrupt.h>
36#include <linux/ip.h>
37#include <linux/slab.h>
38#include <net/ip.h>
39#include <linux/tcp.h>
40#include <linux/in.h>
41#include <linux/delay.h>
42#include <linux/workqueue.h>
43#include <linux/if_vlan.h>
44#include <linux/prefetch.h>
45#include <linux/debugfs.h>
46#include <linux/mii.h>
47
48#include <asm/irq.h>
49
50#include "sky2.h"
51
52#define DRV_NAME "sky2"
53#define DRV_VERSION "1.29"
54
55/*
56 * The Yukon II chipset takes 64 bit command blocks (called list elements)
57 * that are organized into three (receive, transmit, status) different rings
58 * similar to Tigon3.
59 */
60
61#define RX_LE_SIZE 1024
62#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
63#define RX_MAX_PENDING (RX_LE_SIZE/6 - 2)
64#define RX_DEF_PENDING RX_MAX_PENDING
65
66/* This is the worst case number of transmit list elements for a single skb:
67 VLAN:GSO + CKSUM + Data + skb_frags * DMA */
68#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
69#define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
70#define TX_MAX_PENDING 1024
71#define TX_DEF_PENDING 127
72
73#define TX_WATCHDOG (5 * HZ)
74#define NAPI_WEIGHT 64
75#define PHY_RETRIES 1000
76
77#define SKY2_EEPROM_MAGIC 0x9955aabb
78
79#define RING_NEXT(x, s) (((x)+1) & ((s)-1))
80
81static const u32 default_msg =
82 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
83 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
84 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
85
86static int debug = -1; /* defaults above */
87module_param(debug, int, 0);
88MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
89
90static int copybreak __read_mostly = 128;
91module_param(copybreak, int, 0);
92MODULE_PARM_DESC(copybreak, "Receive copy threshold");
93
94static int disable_msi = 0;
95module_param(disable_msi, int, 0);
96MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
97
98static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
99 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
100 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
101 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */
102 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
103 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */
104 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */
105 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) }, /* DGE-550T */
106 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */
107 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */
108 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */
109 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */
110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */
111 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */
112 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */
113 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */
114 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */
115 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */
116 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */
117 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */
118 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */
119 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4355) }, /* 88E8040T */
120 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */
121 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */
122 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */
123 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */
124 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
125 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
126 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
127 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
128 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */
129 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
130 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
131 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
132 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */
133 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */
134 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
135 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
136 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
137 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
138 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
139 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
140 { 0 }
141};
142
143MODULE_DEVICE_TABLE(pci, sky2_id_table);
144
145/* Avoid conditionals by using array */
146static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
147static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
148static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
149
150static void sky2_set_multicast(struct net_device *dev);
151static irqreturn_t sky2_intr(int irq, void *dev_id);
152
153/* Access to PHY via serial interconnect */
154static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
155{
156 int i;
157
158 gma_write16(hw, port, GM_SMI_DATA, val);
159 gma_write16(hw, port, GM_SMI_CTRL,
160 GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
161
162 for (i = 0; i < PHY_RETRIES; i++) {
163 u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
164 if (ctrl == 0xffff)
165 goto io_error;
166
167 if (!(ctrl & GM_SMI_CT_BUSY))
168 return 0;
169
170 udelay(10);
171 }
172
173 dev_warn(&hw->pdev->dev, "%s: phy write timeout\n", hw->dev[port]->name);
174 return -ETIMEDOUT;
175
176io_error:
177 dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name);
178 return -EIO;
179}
180
181static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val)
182{
183 int i;
184
185 gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
186 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
187
188 for (i = 0; i < PHY_RETRIES; i++) {
189 u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
190 if (ctrl == 0xffff)
191 goto io_error;
192
193 if (ctrl & GM_SMI_CT_RD_VAL) {
194 *val = gma_read16(hw, port, GM_SMI_DATA);
195 return 0;
196 }
197
198 udelay(10);
199 }
200
201 dev_warn(&hw->pdev->dev, "%s: phy read timeout\n", hw->dev[port]->name);
202 return -ETIMEDOUT;
203io_error:
204 dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name);
205 return -EIO;
206}
207
208static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
209{
210 u16 v;
211 __gm_phy_read(hw, port, reg, &v);
212 return v;
213}
214
215
216static void sky2_power_on(struct sky2_hw *hw)
217{
218 /* switch power to VCC (WA for VAUX problem) */
219 sky2_write8(hw, B0_POWER_CTRL,
220 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
221
222 /* disable Core Clock Division, */
223 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
224
225 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
226 /* enable bits are inverted */
227 sky2_write8(hw, B2_Y2_CLK_GATE,
228 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
229 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
230 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
231 else
232 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
233
234 if (hw->flags & SKY2_HW_ADV_POWER_CTL) {
235 u32 reg;
236
237 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
238
239 reg = sky2_pci_read32(hw, PCI_DEV_REG4);
240 /* set all bits to 0 except bits 15..12 and 8 */
241 reg &= P_ASPM_CONTROL_MSK;
242 sky2_pci_write32(hw, PCI_DEV_REG4, reg);
243
244 reg = sky2_pci_read32(hw, PCI_DEV_REG5);
245 /* set all bits to 0 except bits 28 & 27 */
246 reg &= P_CTL_TIM_VMAIN_AV_MSK;
247 sky2_pci_write32(hw, PCI_DEV_REG5, reg);
248
249 sky2_pci_write32(hw, PCI_CFG_REG_1, 0);
250
251 sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON);
252
253 /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */
254 reg = sky2_read32(hw, B2_GP_IO);
255 reg |= GLB_GPIO_STAT_RACE_DIS;
256 sky2_write32(hw, B2_GP_IO, reg);
257
258 sky2_read32(hw, B2_GP_IO);
259 }
260
261 /* Turn on "driver loaded" LED */
262 sky2_write16(hw, B0_CTST, Y2_LED_STAT_ON);
263}
264
265static void sky2_power_aux(struct sky2_hw *hw)
266{
267 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
268 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
269 else
270 /* enable bits are inverted */
271 sky2_write8(hw, B2_Y2_CLK_GATE,
272 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
273 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
274 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
275
276 /* switch power to VAUX if supported and PME from D3cold */
277 if ( (sky2_read32(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
278 pci_pme_capable(hw->pdev, PCI_D3cold))
279 sky2_write8(hw, B0_POWER_CTRL,
280 (PC_VAUX_ENA | PC_VCC_ENA |
281 PC_VAUX_ON | PC_VCC_OFF));
282
283 /* turn off "driver loaded LED" */
284 sky2_write16(hw, B0_CTST, Y2_LED_STAT_OFF);
285}
286
287static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
288{
289 u16 reg;
290
291 /* disable all GMAC IRQ's */
292 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
293
294 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
295 gma_write16(hw, port, GM_MC_ADDR_H2, 0);
296 gma_write16(hw, port, GM_MC_ADDR_H3, 0);
297 gma_write16(hw, port, GM_MC_ADDR_H4, 0);
298
299 reg = gma_read16(hw, port, GM_RX_CTRL);
300 reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
301 gma_write16(hw, port, GM_RX_CTRL, reg);
302}
303
304/* flow control to advertise bits */
305static const u16 copper_fc_adv[] = {
306 [FC_NONE] = 0,
307 [FC_TX] = PHY_M_AN_ASP,
308 [FC_RX] = PHY_M_AN_PC,
309 [FC_BOTH] = PHY_M_AN_PC | PHY_M_AN_ASP,
310};
311
312/* flow control to advertise bits when using 1000BaseX */
313static const u16 fiber_fc_adv[] = {
314 [FC_NONE] = PHY_M_P_NO_PAUSE_X,
315 [FC_TX] = PHY_M_P_ASYM_MD_X,
316 [FC_RX] = PHY_M_P_SYM_MD_X,
317 [FC_BOTH] = PHY_M_P_BOTH_MD_X,
318};
319
320/* flow control to GMA disable bits */
321static const u16 gm_fc_disable[] = {
322 [FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS,
323 [FC_TX] = GM_GPCR_FC_RX_DIS,
324 [FC_RX] = GM_GPCR_FC_TX_DIS,
325 [FC_BOTH] = 0,
326};
327
328
329static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
330{
331 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
332 u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
333
334 if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) &&
335 !(hw->flags & SKY2_HW_NEWER_PHY)) {
336 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
337
338 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
339 PHY_M_EC_MAC_S_MSK);
340 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
341
342 /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */
343 if (hw->chip_id == CHIP_ID_YUKON_EC)
344 /* set downshift counter to 3x and enable downshift */
345 ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
346 else
347 /* set master & slave downshift counter to 1x */
348 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
349
350 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
351 }
352
353 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
354 if (sky2_is_copper(hw)) {
355 if (!(hw->flags & SKY2_HW_GIGABIT)) {
356 /* enable automatic crossover */
357 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
358
359 if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
360 hw->chip_rev == CHIP_REV_YU_FE2_A0) {
361 u16 spec;
362
363 /* Enable Class A driver for FE+ A0 */
364 spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2);
365 spec |= PHY_M_FESC_SEL_CL_A;
366 gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
367 }
368 } else {
369 if (hw->chip_id >= CHIP_ID_YUKON_OPT) {
370 u16 ctrl2 = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL_2);
371
372 /* enable PHY Reverse Auto-Negotiation */
373 ctrl2 |= 1u << 13;
374
375 /* Write PHY changes (SW-reset must follow) */
376 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL_2, ctrl2);
377 }
378
379
380 /* disable energy detect */
381 ctrl &= ~PHY_M_PC_EN_DET_MSK;
382
383 /* enable automatic crossover */
384 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
385
386 /* downshift on PHY 88E1112 and 88E1149 is changed */
387 if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) &&
388 (hw->flags & SKY2_HW_NEWER_PHY)) {
389 /* set downshift counter to 3x and enable downshift */
390 ctrl &= ~PHY_M_PC_DSC_MSK;
391 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
392 }
393 }
394 } else {
395 /* workaround for deviation #4.88 (CRC errors) */
396 /* disable Automatic Crossover */
397
398 ctrl &= ~PHY_M_PC_MDIX_MSK;
399 }
400
401 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
402
403 /* special setup for PHY 88E1112 Fiber */
404 if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) {
405 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
406
407 /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
408 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
409 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
410 ctrl &= ~PHY_M_MAC_MD_MSK;
411 ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
412 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
413
414 if (hw->pmd_type == 'P') {
415 /* select page 1 to access Fiber registers */
416 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
417
418 /* for SFP-module set SIGDET polarity to low */
419 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
420 ctrl |= PHY_M_FIB_SIGD_POL;
421 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
422 }
423
424 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
425 }
426
427 ctrl = PHY_CT_RESET;
428 ct1000 = 0;
429 adv = PHY_AN_CSMA;
430 reg = 0;
431
432 if (sky2->flags & SKY2_FLAG_AUTO_SPEED) {
433 if (sky2_is_copper(hw)) {
434 if (sky2->advertising & ADVERTISED_1000baseT_Full)
435 ct1000 |= PHY_M_1000C_AFD;
436 if (sky2->advertising & ADVERTISED_1000baseT_Half)
437 ct1000 |= PHY_M_1000C_AHD;
438 if (sky2->advertising & ADVERTISED_100baseT_Full)
439 adv |= PHY_M_AN_100_FD;
440 if (sky2->advertising & ADVERTISED_100baseT_Half)
441 adv |= PHY_M_AN_100_HD;
442 if (sky2->advertising & ADVERTISED_10baseT_Full)
443 adv |= PHY_M_AN_10_FD;
444 if (sky2->advertising & ADVERTISED_10baseT_Half)
445 adv |= PHY_M_AN_10_HD;
446
447 } else { /* special defines for FIBER (88E1040S only) */
448 if (sky2->advertising & ADVERTISED_1000baseT_Full)
449 adv |= PHY_M_AN_1000X_AFD;
450 if (sky2->advertising & ADVERTISED_1000baseT_Half)
451 adv |= PHY_M_AN_1000X_AHD;
452 }
453
454 /* Restart Auto-negotiation */
455 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
456 } else {
457 /* forced speed/duplex settings */
458 ct1000 = PHY_M_1000C_MSE;
459
460 /* Disable auto update for duplex flow control and duplex */
461 reg |= GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_SPD_DIS;
462
463 switch (sky2->speed) {
464 case SPEED_1000:
465 ctrl |= PHY_CT_SP1000;
466 reg |= GM_GPCR_SPEED_1000;
467 break;
468 case SPEED_100:
469 ctrl |= PHY_CT_SP100;
470 reg |= GM_GPCR_SPEED_100;
471 break;
472 }
473
474 if (sky2->duplex == DUPLEX_FULL) {
475 reg |= GM_GPCR_DUP_FULL;
476 ctrl |= PHY_CT_DUP_MD;
477 } else if (sky2->speed < SPEED_1000)
478 sky2->flow_mode = FC_NONE;
479 }
480
481 if (sky2->flags & SKY2_FLAG_AUTO_PAUSE) {
482 if (sky2_is_copper(hw))
483 adv |= copper_fc_adv[sky2->flow_mode];
484 else
485 adv |= fiber_fc_adv[sky2->flow_mode];
486 } else {
487 reg |= GM_GPCR_AU_FCT_DIS;
488 reg |= gm_fc_disable[sky2->flow_mode];
489
490 /* Forward pause packets to GMAC? */
491 if (sky2->flow_mode & FC_RX)
492 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
493 else
494 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
495 }
496
497 gma_write16(hw, port, GM_GP_CTRL, reg);
498
499 if (hw->flags & SKY2_HW_GIGABIT)
500 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
501
502 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
503 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
504
505 /* Setup Phy LED's */
506 ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
507 ledover = 0;
508
509 switch (hw->chip_id) {
510 case CHIP_ID_YUKON_FE:
511 /* on 88E3082 these bits are at 11..9 (shifted left) */
512 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
513
514 ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);
515
516 /* delete ACT LED control bits */
517 ctrl &= ~PHY_M_FELP_LED1_MSK;
518 /* change ACT LED control to blink mode */
519 ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
520 gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
521 break;
522
523 case CHIP_ID_YUKON_FE_P:
524 /* Enable Link Partner Next Page */
525 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
526 ctrl |= PHY_M_PC_ENA_LIP_NP;
527
528 /* disable Energy Detect and enable scrambler */
529 ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB);
530 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
531
532 /* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */
533 ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) |
534 PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) |
535 PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED);
536
537 gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
538 break;
539
540 case CHIP_ID_YUKON_XL:
541 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
542
543 /* select page 3 to access LED control register */
544 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
545
546 /* set LED Function Control register */
547 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
548 (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
549 PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
550 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
551 PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
552
553 /* set Polarity Control register */
554 gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
555 (PHY_M_POLC_LS1_P_MIX(4) |
556 PHY_M_POLC_IS0_P_MIX(4) |
557 PHY_M_POLC_LOS_CTRL(2) |
558 PHY_M_POLC_INIT_CTRL(2) |
559 PHY_M_POLC_STA1_CTRL(2) |
560 PHY_M_POLC_STA0_CTRL(2)));
561
562 /* restore page register */
563 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
564 break;
565
566 case CHIP_ID_YUKON_EC_U:
567 case CHIP_ID_YUKON_EX:
568 case CHIP_ID_YUKON_SUPR:
569 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
570
571 /* select page 3 to access LED control register */
572 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
573
574 /* set LED Function Control register */
575 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
576 (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
577 PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */
578 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
579 PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */
580
581 /* set Blink Rate in LED Timer Control Register */
582 gm_phy_write(hw, port, PHY_MARV_INT_MASK,
583 ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
584 /* restore page register */
585 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
586 break;
587
588 default:
589 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
590 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
591
592 /* turn off the Rx LED (LED_RX) */
593 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
594 }
595
596 if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_UL_2) {
597 /* apply fixes in PHY AFE */
598 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);
599
600 /* increase differential signal amplitude in 10BASE-T */
601 gm_phy_write(hw, port, 0x18, 0xaa99);
602 gm_phy_write(hw, port, 0x17, 0x2011);
603
604 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
605 /* fix for IEEE A/B Symmetry failure in 1000BASE-T */
606 gm_phy_write(hw, port, 0x18, 0xa204);
607 gm_phy_write(hw, port, 0x17, 0x2002);
608 }
609
610 /* set page register to 0 */
611 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
612 } else if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
613 hw->chip_rev == CHIP_REV_YU_FE2_A0) {
614 /* apply workaround for integrated resistors calibration */
615 gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17);
616 gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60);
617 } else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
618 /* apply fixes in PHY AFE */
619 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
620
621 /* apply RDAC termination workaround */
622 gm_phy_write(hw, port, 24, 0x2800);
623 gm_phy_write(hw, port, 23, 0x2001);
624
625 /* set page register back to 0 */
626 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
627 } else if (hw->chip_id != CHIP_ID_YUKON_EX &&
628 hw->chip_id < CHIP_ID_YUKON_SUPR) {
629 /* no effect on Yukon-XL */
630 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
631
632 if (!(sky2->flags & SKY2_FLAG_AUTO_SPEED) ||
633 sky2->speed == SPEED_100) {
634 /* turn on 100 Mbps LED (LED_LINK100) */
635 ledover |= PHY_M_LED_MO_100(MO_LED_ON);
636 }
637
638 if (ledover)
639 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
640
641 } else if (hw->chip_id == CHIP_ID_YUKON_PRM &&
642 (sky2_read8(hw, B2_MAC_CFG) & 0xf) == 0x7) {
643 int i;
644 /* This a phy register setup workaround copied from vendor driver. */
645 static const struct {
646 u16 reg, val;
647 } eee_afe[] = {
648 { 0x156, 0x58ce },
649 { 0x153, 0x99eb },
650 { 0x141, 0x8064 },
651 /* { 0x155, 0x130b },*/
652 { 0x000, 0x0000 },
653 { 0x151, 0x8433 },
654 { 0x14b, 0x8c44 },
655 { 0x14c, 0x0f90 },
656 { 0x14f, 0x39aa },
657 /* { 0x154, 0x2f39 },*/
658 { 0x14d, 0xba33 },
659 { 0x144, 0x0048 },
660 { 0x152, 0x2010 },
661 /* { 0x158, 0x1223 },*/
662 { 0x140, 0x4444 },
663 { 0x154, 0x2f3b },
664 { 0x158, 0xb203 },
665 { 0x157, 0x2029 },
666 };
667
668 /* Start Workaround for OptimaEEE Rev.Z0 */
669 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fb);
670
671 gm_phy_write(hw, port, 1, 0x4099);
672 gm_phy_write(hw, port, 3, 0x1120);
673 gm_phy_write(hw, port, 11, 0x113c);
674 gm_phy_write(hw, port, 14, 0x8100);
675 gm_phy_write(hw, port, 15, 0x112a);
676 gm_phy_write(hw, port, 17, 0x1008);
677
678 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fc);
679 gm_phy_write(hw, port, 1, 0x20b0);
680
681 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
682
683 for (i = 0; i < ARRAY_SIZE(eee_afe); i++) {
684 /* apply AFE settings */
685 gm_phy_write(hw, port, 17, eee_afe[i].val);
686 gm_phy_write(hw, port, 16, eee_afe[i].reg | 1u<<13);
687 }
688
689 /* End Workaround for OptimaEEE */
690 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
691
692 /* Enable 10Base-Te (EEE) */
693 if (hw->chip_id >= CHIP_ID_YUKON_PRM) {
694 reg = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
695 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL,
696 reg | PHY_M_10B_TE_ENABLE);
697 }
698 }
699
700 /* Enable phy interrupt on auto-negotiation complete (or link up) */
701 if (sky2->flags & SKY2_FLAG_AUTO_SPEED)
702 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
703 else
704 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
705}
706
707static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
708static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
709
710static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
711{
712 u32 reg1;
713
714 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
715 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
716 reg1 &= ~phy_power[port];
717
718 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
719 reg1 |= coma_mode[port];
720
721 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
722 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
723 sky2_pci_read32(hw, PCI_DEV_REG1);
724
725 if (hw->chip_id == CHIP_ID_YUKON_FE)
726 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE);
727 else if (hw->flags & SKY2_HW_ADV_POWER_CTL)
728 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
729}
730
731static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
732{
733 u32 reg1;
734 u16 ctrl;
735
736 /* release GPHY Control reset */
737 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
738
739 /* release GMAC reset */
740 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
741
742 if (hw->flags & SKY2_HW_NEWER_PHY) {
743 /* select page 2 to access MAC control register */
744 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
745
746 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
747 /* allow GMII Power Down */
748 ctrl &= ~PHY_M_MAC_GMIF_PUP;
749 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
750
751 /* set page register back to 0 */
752 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
753 }
754
755 /* setup General Purpose Control Register */
756 gma_write16(hw, port, GM_GP_CTRL,
757 GM_GPCR_FL_PASS | GM_GPCR_SPEED_100 |
758 GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS |
759 GM_GPCR_AU_SPD_DIS);
760
761 if (hw->chip_id != CHIP_ID_YUKON_EC) {
762 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
763 /* select page 2 to access MAC control register */
764 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
765
766 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
767 /* enable Power Down */
768 ctrl |= PHY_M_PC_POW_D_ENA;
769 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
770
771 /* set page register back to 0 */
772 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
773 }
774
775 /* set IEEE compatible Power Down Mode (dev. #4.99) */
776 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
777 }
778
779 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
780 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
781 reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */
782 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
783 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
784}
785
786/* configure IPG according to used link speed */
787static void sky2_set_ipg(struct sky2_port *sky2)
788{
789 u16 reg;
790
791 reg = gma_read16(sky2->hw, sky2->port, GM_SERIAL_MODE);
792 reg &= ~GM_SMOD_IPG_MSK;
793 if (sky2->speed > SPEED_100)
794 reg |= IPG_DATA_VAL(IPG_DATA_DEF_1000);
795 else
796 reg |= IPG_DATA_VAL(IPG_DATA_DEF_10_100);
797 gma_write16(sky2->hw, sky2->port, GM_SERIAL_MODE, reg);
798}
799
800/* Enable Rx/Tx */
801static void sky2_enable_rx_tx(struct sky2_port *sky2)
802{
803 struct sky2_hw *hw = sky2->hw;
804 unsigned port = sky2->port;
805 u16 reg;
806
807 reg = gma_read16(hw, port, GM_GP_CTRL);
808 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
809 gma_write16(hw, port, GM_GP_CTRL, reg);
810}
811
812/* Force a renegotiation */
813static void sky2_phy_reinit(struct sky2_port *sky2)
814{
815 spin_lock_bh(&sky2->phy_lock);
816 sky2_phy_init(sky2->hw, sky2->port);
817 sky2_enable_rx_tx(sky2);
818 spin_unlock_bh(&sky2->phy_lock);
819}
820
821/* Put device in state to listen for Wake On Lan */
822static void sky2_wol_init(struct sky2_port *sky2)
823{
824 struct sky2_hw *hw = sky2->hw;
825 unsigned port = sky2->port;
826 enum flow_control save_mode;
827 u16 ctrl;
828
829 /* Bring hardware out of reset */
830 sky2_write16(hw, B0_CTST, CS_RST_CLR);
831 sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
832
833 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
834 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
835
836 /* Force to 10/100
837 * sky2_reset will re-enable on resume
838 */
839 save_mode = sky2->flow_mode;
840 ctrl = sky2->advertising;
841
842 sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
843 sky2->flow_mode = FC_NONE;
844
845 spin_lock_bh(&sky2->phy_lock);
846 sky2_phy_power_up(hw, port);
847 sky2_phy_init(hw, port);
848 spin_unlock_bh(&sky2->phy_lock);
849
850 sky2->flow_mode = save_mode;
851 sky2->advertising = ctrl;
852
853 /* Set GMAC to no flow control and auto update for speed/duplex */
854 gma_write16(hw, port, GM_GP_CTRL,
855 GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
856 GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
857
858 /* Set WOL address */
859 memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
860 sky2->netdev->dev_addr, ETH_ALEN);
861
862 /* Turn on appropriate WOL control bits */
863 sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
864 ctrl = 0;
865 if (sky2->wol & WAKE_PHY)
866 ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
867 else
868 ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
869
870 if (sky2->wol & WAKE_MAGIC)
871 ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
872 else
873 ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;
874
875 ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
876 sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
877
878 /* Disable PiG firmware */
879 sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF);
880
881 /* block receiver */
882 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
883}
884
885static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
886{
887 struct net_device *dev = hw->dev[port];
888
889 if ( (hw->chip_id == CHIP_ID_YUKON_EX &&
890 hw->chip_rev != CHIP_REV_YU_EX_A0) ||
891 hw->chip_id >= CHIP_ID_YUKON_FE_P) {
892 /* Yukon-Extreme B0 and further Extreme devices */
893 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
894 } else if (dev->mtu > ETH_DATA_LEN) {
895 /* set Tx GMAC FIFO Almost Empty Threshold */
896 sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
897 (ECU_JUMBO_WM << 16) | ECU_AE_THR);
898
899 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
900 } else
901 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
902}
903
904static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
905{
906 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
907 u16 reg;
908 u32 rx_reg;
909 int i;
910 const u8 *addr = hw->dev[port]->dev_addr;
911
912 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
913 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
914
915 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
916
917 if (hw->chip_id == CHIP_ID_YUKON_XL &&
918 hw->chip_rev == CHIP_REV_YU_XL_A0 &&
919 port == 1) {
920 /* WA DEV_472 -- looks like crossed wires on port 2 */
921 /* clear GMAC 1 Control reset */
922 sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
923 do {
924 sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
925 sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
926 } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
927 gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
928 gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
929 }
930
931 sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
932
933 /* Enable Transmit FIFO Underrun */
934 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
935
936 spin_lock_bh(&sky2->phy_lock);
937 sky2_phy_power_up(hw, port);
938 sky2_phy_init(hw, port);
939 spin_unlock_bh(&sky2->phy_lock);
940
941 /* MIB clear */
942 reg = gma_read16(hw, port, GM_PHY_ADDR);
943 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
944
945 for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4)
946 gma_read16(hw, port, i);
947 gma_write16(hw, port, GM_PHY_ADDR, reg);
948
949 /* transmit control */
950 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
951
952 /* receive control reg: unicast + multicast + no FCS */
953 gma_write16(hw, port, GM_RX_CTRL,
954 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
955
956 /* transmit flow control */
957 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
958
959 /* transmit parameter */
960 gma_write16(hw, port, GM_TX_PARAM,
961 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
962 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
963 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
964 TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
965
966 /* serial mode register */
967 reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
968 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF_1000);
969
970 if (hw->dev[port]->mtu > ETH_DATA_LEN)
971 reg |= GM_SMOD_JUMBO_ENA;
972
973 if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
974 hw->chip_rev == CHIP_REV_YU_EC_U_B1)
975 reg |= GM_NEW_FLOW_CTRL;
976
977 gma_write16(hw, port, GM_SERIAL_MODE, reg);
978
979 /* virtual address for data */
980 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
981
982 /* physical address: used for pause frames */
983 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
984
985 /* ignore counter overflows */
986 gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
987 gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
988 gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
989
990 /* Configure Rx MAC FIFO */
991 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
992 rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
993 if (hw->chip_id == CHIP_ID_YUKON_EX ||
994 hw->chip_id == CHIP_ID_YUKON_FE_P)
995 rx_reg |= GMF_RX_OVER_ON;
996
997 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg);
998
999 if (hw->chip_id == CHIP_ID_YUKON_XL) {
1000 /* Hardware errata - clear flush mask */
1001 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0);
1002 } else {
1003 /* Flush Rx MAC FIFO on any flow control or error */
1004 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
1005 }
1006
1007 /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */
1008 reg = RX_GMF_FL_THR_DEF + 1;
1009 /* Another magic mystery workaround from sk98lin */
1010 if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
1011 hw->chip_rev == CHIP_REV_YU_FE2_A0)
1012 reg = 0x178;
1013 sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg);
1014
1015 /* Configure Tx MAC FIFO */
1016 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
1017 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
1018
1019 /* On chips without ram buffer, pause is controlled by MAC level */
1020 if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
1021 /* Pause threshold is scaled by 8 in bytes */
1022 if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
1023 hw->chip_rev == CHIP_REV_YU_FE2_A0)
1024 reg = 1568 / 8;
1025 else
1026 reg = 1024 / 8;
1027 sky2_write16(hw, SK_REG(port, RX_GMF_UP_THR), reg);
1028 sky2_write16(hw, SK_REG(port, RX_GMF_LP_THR), 768 / 8);
1029
1030 sky2_set_tx_stfwd(hw, port);
1031 }
1032
1033 if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
1034 hw->chip_rev == CHIP_REV_YU_FE2_A0) {
1035 /* disable dynamic watermark */
1036 reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA));
1037 reg &= ~TX_DYN_WM_ENA;
1038 sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg);
1039 }
1040}
1041
1042/* Assign Ram Buffer allocation to queue */
1043static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
1044{
1045 u32 end;
1046
1047 /* convert from K bytes to qwords used for hw register */
1048 start *= 1024/8;
1049 space *= 1024/8;
1050 end = start + space - 1;
1051
1052 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
1053 sky2_write32(hw, RB_ADDR(q, RB_START), start);
1054 sky2_write32(hw, RB_ADDR(q, RB_END), end);
1055 sky2_write32(hw, RB_ADDR(q, RB_WP), start);
1056 sky2_write32(hw, RB_ADDR(q, RB_RP), start);
1057
1058 if (q == Q_R1 || q == Q_R2) {
1059 u32 tp = space - space/4;
1060
1061 /* On receive queue's set the thresholds
1062 * give receiver priority when > 3/4 full
1063 * send pause when down to 2K
1064 */
1065 sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
1066 sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
1067
1068 tp = space - 2048/8;
1069 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
1070 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
1071 } else {
1072 /* Enable store & forward on Tx queue's because
1073 * Tx FIFO is only 1K on Yukon
1074 */
1075 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
1076 }
1077
1078 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
1079 sky2_read8(hw, RB_ADDR(q, RB_CTRL));
1080}
1081
1082/* Setup Bus Memory Interface */
1083static void sky2_qset(struct sky2_hw *hw, u16 q)
1084{
1085 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
1086 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
1087 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
1088 sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT);
1089}
1090
1091/* Setup prefetch unit registers. This is the interface between
1092 * hardware and driver list elements
1093 */
1094static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
1095 dma_addr_t addr, u32 last)
1096{
1097 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1098 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
1099 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), upper_32_bits(addr));
1100 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), lower_32_bits(addr));
1101 sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
1102 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
1103
1104 sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL));
1105}
1106
1107static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
1108{
1109 struct sky2_tx_le *le = sky2->tx_le + *slot;
1110
1111 *slot = RING_NEXT(*slot, sky2->tx_ring_size);
1112 le->ctrl = 0;
1113 return le;
1114}
1115
1116static void tx_init(struct sky2_port *sky2)
1117{
1118 struct sky2_tx_le *le;
1119
1120 sky2->tx_prod = sky2->tx_cons = 0;
1121 sky2->tx_tcpsum = 0;
1122 sky2->tx_last_mss = 0;
1123
1124 le = get_tx_le(sky2, &sky2->tx_prod);
1125 le->addr = 0;
1126 le->opcode = OP_ADDR64 | HW_OWNER;
1127 sky2->tx_last_upper = 0;
1128}
1129
1130/* Update chip's next pointer */
1131static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
1132{
1133 /* Make sure write' to descriptors are complete before we tell hardware */
1134 wmb();
1135 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
1136
1137 /* Synchronize I/O on since next processor may write to tail */
1138 mmiowb();
1139}
1140
1141
1142static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
1143{
1144 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
1145 sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
1146 le->ctrl = 0;
1147 return le;
1148}
1149
1150static unsigned sky2_get_rx_threshold(struct sky2_port *sky2)
1151{
1152 unsigned size;
1153
1154 /* Space needed for frame data + headers rounded up */
1155 size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
1156
1157 /* Stopping point for hardware truncation */
1158 return (size - 8) / sizeof(u32);
1159}
1160
1161static unsigned sky2_get_rx_data_size(struct sky2_port *sky2)
1162{
1163 struct rx_ring_info *re;
1164 unsigned size;
1165
1166 /* Space needed for frame data + headers rounded up */
1167 size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
1168
1169 sky2->rx_nfrags = size >> PAGE_SHIFT;
1170 BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
1171
1172 /* Compute residue after pages */
1173 size -= sky2->rx_nfrags << PAGE_SHIFT;
1174
1175 /* Optimize to handle small packets and headers */
1176 if (size < copybreak)
1177 size = copybreak;
1178 if (size < ETH_HLEN)
1179 size = ETH_HLEN;
1180
1181 return size;
1182}
1183
1184/* Build description to hardware for one receive segment */
1185static void sky2_rx_add(struct sky2_port *sky2, u8 op,
1186 dma_addr_t map, unsigned len)
1187{
1188 struct sky2_rx_le *le;
1189
1190 if (sizeof(dma_addr_t) > sizeof(u32)) {
1191 le = sky2_next_rx(sky2);
1192 le->addr = cpu_to_le32(upper_32_bits(map));
1193 le->opcode = OP_ADDR64 | HW_OWNER;
1194 }
1195
1196 le = sky2_next_rx(sky2);
1197 le->addr = cpu_to_le32(lower_32_bits(map));
1198 le->length = cpu_to_le16(len);
1199 le->opcode = op | HW_OWNER;
1200}
1201
1202/* Build description to hardware for one possibly fragmented skb */
1203static void sky2_rx_submit(struct sky2_port *sky2,
1204 const struct rx_ring_info *re)
1205{
1206 int i;
1207
1208 sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size);
1209
1210 for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++)
1211 sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE);
1212}
1213
1214
1215static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
1216 unsigned size)
1217{
1218 struct sk_buff *skb = re->skb;
1219 int i;
1220
1221 re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1222 if (pci_dma_mapping_error(pdev, re->data_addr))
1223 goto mapping_error;
1224
1225 dma_unmap_len_set(re, data_size, size);
1226
1227 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1228 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1229
1230 re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0,
1231 skb_frag_size(frag),
1232 DMA_FROM_DEVICE);
1233
1234 if (dma_mapping_error(&pdev->dev, re->frag_addr[i]))
1235 goto map_page_error;
1236 }
1237 return 0;
1238
1239map_page_error:
1240 while (--i >= 0) {
1241 pci_unmap_page(pdev, re->frag_addr[i],
1242 skb_frag_size(&skb_shinfo(skb)->frags[i]),
1243 PCI_DMA_FROMDEVICE);
1244 }
1245
1246 pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
1247 PCI_DMA_FROMDEVICE);
1248
1249mapping_error:
1250 if (net_ratelimit())
1251 dev_warn(&pdev->dev, "%s: rx mapping error\n",
1252 skb->dev->name);
1253 return -EIO;
1254}
1255
1256static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
1257{
1258 struct sk_buff *skb = re->skb;
1259 int i;
1260
1261 pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
1262 PCI_DMA_FROMDEVICE);
1263
1264 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1265 pci_unmap_page(pdev, re->frag_addr[i],
1266 skb_frag_size(&skb_shinfo(skb)->frags[i]),
1267 PCI_DMA_FROMDEVICE);
1268}
1269
1270/* Tell chip where to start receive checksum.
1271 * Actually has two checksums, but set both same to avoid possible byte
1272 * order problems.
1273 */
1274static void rx_set_checksum(struct sky2_port *sky2)
1275{
1276 struct sky2_rx_le *le = sky2_next_rx(sky2);
1277
1278 le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN);
1279 le->ctrl = 0;
1280 le->opcode = OP_TCPSTART | HW_OWNER;
1281
1282 sky2_write32(sky2->hw,
1283 Q_ADDR(rxqaddr[sky2->port], Q_CSR),
1284 (sky2->netdev->features & NETIF_F_RXCSUM)
1285 ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
1286}
1287
1288/* Enable/disable receive hash calculation (RSS) */
1289static void rx_set_rss(struct net_device *dev, u32 features)
1290{
1291 struct sky2_port *sky2 = netdev_priv(dev);
1292 struct sky2_hw *hw = sky2->hw;
1293 int i, nkeys = 4;
1294
1295 /* Supports IPv6 and other modes */
1296 if (hw->flags & SKY2_HW_NEW_LE) {
1297 nkeys = 10;
1298 sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL);
1299 }
1300
1301 /* Program RSS initial values */
1302 if (features & NETIF_F_RXHASH) {
1303 u32 key[nkeys];
1304
1305 get_random_bytes(key, nkeys * sizeof(u32));
1306 for (i = 0; i < nkeys; i++)
1307 sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
1308 key[i]);
1309
1310 /* Need to turn on (undocumented) flag to make hashing work */
1311 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
1312 RX_STFW_ENA);
1313
1314 sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
1315 BMU_ENA_RX_RSS_HASH);
1316 } else
1317 sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
1318 BMU_DIS_RX_RSS_HASH);
1319}
1320
1321/*
1322 * The RX Stop command will not work for Yukon-2 if the BMU does not
1323 * reach the end of packet and since we can't make sure that we have
1324 * incoming data, we must reset the BMU while it is not doing a DMA
1325 * transfer. Since it is possible that the RX path is still active,
1326 * the RX RAM buffer will be stopped first, so any possible incoming
1327 * data will not trigger a DMA. After the RAM buffer is stopped, the
1328 * BMU is polled until any DMA in progress is ended and only then it
1329 * will be reset.
1330 */
1331static void sky2_rx_stop(struct sky2_port *sky2)
1332{
1333 struct sky2_hw *hw = sky2->hw;
1334 unsigned rxq = rxqaddr[sky2->port];
1335 int i;
1336
1337 /* disable the RAM Buffer receive queue */
1338 sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
1339
1340 for (i = 0; i < 0xffff; i++)
1341 if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
1342 == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
1343 goto stopped;
1344
1345 netdev_warn(sky2->netdev, "receiver stop failed\n");
1346stopped:
1347 sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
1348
1349 /* reset the Rx prefetch unit */
1350 sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1351 mmiowb();
1352}
1353
1354/* Clean out receive buffer area, assumes receiver hardware stopped */
1355static void sky2_rx_clean(struct sky2_port *sky2)
1356{
1357 unsigned i;
1358
1359 memset(sky2->rx_le, 0, RX_LE_BYTES);
1360 for (i = 0; i < sky2->rx_pending; i++) {
1361 struct rx_ring_info *re = sky2->rx_ring + i;
1362
1363 if (re->skb) {
1364 sky2_rx_unmap_skb(sky2->hw->pdev, re);
1365 kfree_skb(re->skb);
1366 re->skb = NULL;
1367 }
1368 }
1369}
1370
1371/* Basic MII support */
1372static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1373{
1374 struct mii_ioctl_data *data = if_mii(ifr);
1375 struct sky2_port *sky2 = netdev_priv(dev);
1376 struct sky2_hw *hw = sky2->hw;
1377 int err = -EOPNOTSUPP;
1378
1379 if (!netif_running(dev))
1380 return -ENODEV; /* Phy still in reset */
1381
1382 switch (cmd) {
1383 case SIOCGMIIPHY:
1384 data->phy_id = PHY_ADDR_MARV;
1385
1386 /* fallthru */
1387 case SIOCGMIIREG: {
1388 u16 val = 0;
1389
1390 spin_lock_bh(&sky2->phy_lock);
1391 err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val);
1392 spin_unlock_bh(&sky2->phy_lock);
1393
1394 data->val_out = val;
1395 break;
1396 }
1397
1398 case SIOCSMIIREG:
1399 spin_lock_bh(&sky2->phy_lock);
1400 err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f,
1401 data->val_in);
1402 spin_unlock_bh(&sky2->phy_lock);
1403 break;
1404 }
1405 return err;
1406}
1407
1408#define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO)
1409
1410static void sky2_vlan_mode(struct net_device *dev, u32 features)
1411{
1412 struct sky2_port *sky2 = netdev_priv(dev);
1413 struct sky2_hw *hw = sky2->hw;
1414 u16 port = sky2->port;
1415
1416 if (features & NETIF_F_HW_VLAN_RX)
1417 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1418 RX_VLAN_STRIP_ON);
1419 else
1420 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1421 RX_VLAN_STRIP_OFF);
1422
1423 if (features & NETIF_F_HW_VLAN_TX) {
1424 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1425 TX_VLAN_TAG_ON);
1426
1427 dev->vlan_features |= SKY2_VLAN_OFFLOADS;
1428 } else {
1429 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1430 TX_VLAN_TAG_OFF);
1431
1432 /* Can't do transmit offload of vlan without hw vlan */
1433 dev->vlan_features &= ~SKY2_VLAN_OFFLOADS;
1434 }
1435}
1436
1437/* Amount of required worst case padding in rx buffer */
1438static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
1439{
1440 return (hw->flags & SKY2_HW_RAM_BUFFER) ? 8 : 2;
1441}
1442
1443/*
1444 * Allocate an skb for receiving. If the MTU is large enough
1445 * make the skb non-linear with a fragment list of pages.
1446 */
1447static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2, gfp_t gfp)
1448{
1449 struct sk_buff *skb;
1450 int i;
1451
1452 skb = __netdev_alloc_skb(sky2->netdev,
1453 sky2->rx_data_size + sky2_rx_pad(sky2->hw),
1454 gfp);
1455 if (!skb)
1456 goto nomem;
1457
1458 if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) {
1459 unsigned char *start;
1460 /*
1461 * Workaround for a bug in FIFO that cause hang
1462 * if the FIFO if the receive buffer is not 64 byte aligned.
1463 * The buffer returned from netdev_alloc_skb is
1464 * aligned except if slab debugging is enabled.
1465 */
1466 start = PTR_ALIGN(skb->data, 8);
1467 skb_reserve(skb, start - skb->data);
1468 } else
1469 skb_reserve(skb, NET_IP_ALIGN);
1470
1471 for (i = 0; i < sky2->rx_nfrags; i++) {
1472 struct page *page = alloc_page(gfp);
1473
1474 if (!page)
1475 goto free_partial;
1476 skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
1477 }
1478
1479 return skb;
1480free_partial:
1481 kfree_skb(skb);
1482nomem:
1483 return NULL;
1484}
1485
1486static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
1487{
1488 sky2_put_idx(sky2->hw, rxq, sky2->rx_put);
1489}
1490
1491static int sky2_alloc_rx_skbs(struct sky2_port *sky2)
1492{
1493 struct sky2_hw *hw = sky2->hw;
1494 unsigned i;
1495
1496 sky2->rx_data_size = sky2_get_rx_data_size(sky2);
1497
1498 /* Fill Rx ring */
1499 for (i = 0; i < sky2->rx_pending; i++) {
1500 struct rx_ring_info *re = sky2->rx_ring + i;
1501
1502 re->skb = sky2_rx_alloc(sky2, GFP_KERNEL);
1503 if (!re->skb)
1504 return -ENOMEM;
1505
1506 if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
1507 dev_kfree_skb(re->skb);
1508 re->skb = NULL;
1509 return -ENOMEM;
1510 }
1511 }
1512 return 0;
1513}
1514
1515/*
1516 * Setup receiver buffer pool.
1517 * Normal case this ends up creating one list element for skb
1518 * in the receive ring. Worst case if using large MTU and each
1519 * allocation falls on a different 64 bit region, that results
1520 * in 6 list elements per ring entry.
1521 * One element is used for checksum enable/disable, and one
1522 * extra to avoid wrap.
1523 */
1524static void sky2_rx_start(struct sky2_port *sky2)
1525{
1526 struct sky2_hw *hw = sky2->hw;
1527 struct rx_ring_info *re;
1528 unsigned rxq = rxqaddr[sky2->port];
1529 unsigned i, thresh;
1530
1531 sky2->rx_put = sky2->rx_next = 0;
1532 sky2_qset(hw, rxq);
1533
1534 /* On PCI express lowering the watermark gives better performance */
1535 if (pci_is_pcie(hw->pdev))
1536 sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX);
1537
1538 /* These chips have no ram buffer?
1539 * MAC Rx RAM Read is controlled by hardware */
1540 if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
1541 hw->chip_rev > CHIP_REV_YU_EC_U_A0)
1542 sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
1543
1544 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
1545
1546 if (!(hw->flags & SKY2_HW_NEW_LE))
1547 rx_set_checksum(sky2);
1548
1549 if (!(hw->flags & SKY2_HW_RSS_BROKEN))
1550 rx_set_rss(sky2->netdev, sky2->netdev->features);
1551
1552 /* submit Rx ring */
1553 for (i = 0; i < sky2->rx_pending; i++) {
1554 re = sky2->rx_ring + i;
1555 sky2_rx_submit(sky2, re);
1556 }
1557
1558 /*
1559 * The receiver hangs if it receives frames larger than the
1560 * packet buffer. As a workaround, truncate oversize frames, but
1561 * the register is limited to 9 bits, so if you do frames > 2052
1562 * you better get the MTU right!
1563 */
1564 thresh = sky2_get_rx_threshold(sky2);
1565 if (thresh > 0x1ff)
1566 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
1567 else {
1568 sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh);
1569 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
1570 }
1571
1572 /* Tell chip about available buffers */
1573 sky2_rx_update(sky2, rxq);
1574
1575 if (hw->chip_id == CHIP_ID_YUKON_EX ||
1576 hw->chip_id == CHIP_ID_YUKON_SUPR) {
1577 /*
1578 * Disable flushing of non ASF packets;
1579 * must be done after initializing the BMUs;
1580 * drivers without ASF support should do this too, otherwise
1581 * it may happen that they cannot run on ASF devices;
1582 * remember that the MAC FIFO isn't reset during initialization.
1583 */
1584 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_MACSEC_FLUSH_OFF);
1585 }
1586
1587 if (hw->chip_id >= CHIP_ID_YUKON_SUPR) {
1588 /* Enable RX Home Address & Routing Header checksum fix */
1589 sky2_write16(hw, SK_REG(sky2->port, RX_GMF_FL_CTRL),
1590 RX_IPV6_SA_MOB_ENA | RX_IPV6_DA_MOB_ENA);
1591
1592 /* Enable TX Home Address & Routing Header checksum fix */
1593 sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST),
1594 TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN);
1595 }
1596}
1597
1598static int sky2_alloc_buffers(struct sky2_port *sky2)
1599{
1600 struct sky2_hw *hw = sky2->hw;
1601
1602 /* must be power of 2 */
1603 sky2->tx_le = pci_alloc_consistent(hw->pdev,
1604 sky2->tx_ring_size *
1605 sizeof(struct sky2_tx_le),
1606 &sky2->tx_le_map);
1607 if (!sky2->tx_le)
1608 goto nomem;
1609
1610 sky2->tx_ring = kcalloc(sky2->tx_ring_size, sizeof(struct tx_ring_info),
1611 GFP_KERNEL);
1612 if (!sky2->tx_ring)
1613 goto nomem;
1614
1615 sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
1616 &sky2->rx_le_map);
1617 if (!sky2->rx_le)
1618 goto nomem;
1619 memset(sky2->rx_le, 0, RX_LE_BYTES);
1620
1621 sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info),
1622 GFP_KERNEL);
1623 if (!sky2->rx_ring)
1624 goto nomem;
1625
1626 return sky2_alloc_rx_skbs(sky2);
1627nomem:
1628 return -ENOMEM;
1629}
1630
1631static void sky2_free_buffers(struct sky2_port *sky2)
1632{
1633 struct sky2_hw *hw = sky2->hw;
1634
1635 sky2_rx_clean(sky2);
1636
1637 if (sky2->rx_le) {
1638 pci_free_consistent(hw->pdev, RX_LE_BYTES,
1639 sky2->rx_le, sky2->rx_le_map);
1640 sky2->rx_le = NULL;
1641 }
1642 if (sky2->tx_le) {
1643 pci_free_consistent(hw->pdev,
1644 sky2->tx_ring_size * sizeof(struct sky2_tx_le),
1645 sky2->tx_le, sky2->tx_le_map);
1646 sky2->tx_le = NULL;
1647 }
1648 kfree(sky2->tx_ring);
1649 kfree(sky2->rx_ring);
1650
1651 sky2->tx_ring = NULL;
1652 sky2->rx_ring = NULL;
1653}
1654
1655static void sky2_hw_up(struct sky2_port *sky2)
1656{
1657 struct sky2_hw *hw = sky2->hw;
1658 unsigned port = sky2->port;
1659 u32 ramsize;
1660 int cap;
1661 struct net_device *otherdev = hw->dev[sky2->port^1];
1662
1663 tx_init(sky2);
1664
1665 /*
1666 * On dual port PCI-X card, there is an problem where status
1667 * can be received out of order due to split transactions
1668 */
1669 if (otherdev && netif_running(otherdev) &&
1670 (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
1671 u16 cmd;
1672
1673 cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
1674 cmd &= ~PCI_X_CMD_MAX_SPLIT;
1675 sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
1676 }
1677
1678 sky2_mac_init(hw, port);
1679
1680 /* Register is number of 4K blocks on internal RAM buffer. */
1681 ramsize = sky2_read8(hw, B2_E_0) * 4;
1682 if (ramsize > 0) {
1683 u32 rxspace;
1684
1685 netdev_dbg(sky2->netdev, "ram buffer %dK\n", ramsize);
1686 if (ramsize < 16)
1687 rxspace = ramsize / 2;
1688 else
1689 rxspace = 8 + (2*(ramsize - 16))/3;
1690
1691 sky2_ramset(hw, rxqaddr[port], 0, rxspace);
1692 sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
1693
1694 /* Make sure SyncQ is disabled */
1695 sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
1696 RB_RST_SET);
1697 }
1698
1699 sky2_qset(hw, txqaddr[port]);
1700
1701 /* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */
1702 if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0)
1703 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF);
1704
1705 /* Set almost empty threshold */
1706 if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
1707 hw->chip_rev == CHIP_REV_YU_EC_U_A0)
1708 sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV);
1709
1710 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1711 sky2->tx_ring_size - 1);
1712
1713 sky2_vlan_mode(sky2->netdev, sky2->netdev->features);
1714 netdev_update_features(sky2->netdev);
1715
1716 sky2_rx_start(sky2);
1717}
1718
1719/* Setup device IRQ and enable napi to process */
1720static int sky2_setup_irq(struct sky2_hw *hw, const char *name)
1721{
1722 struct pci_dev *pdev = hw->pdev;
1723 int err;
1724
1725 err = request_irq(pdev->irq, sky2_intr,
1726 (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
1727 name, hw);
1728 if (err)
1729 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
1730 else {
1731 napi_enable(&hw->napi);
1732 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
1733 sky2_read32(hw, B0_IMSK);
1734 }
1735
1736 return err;
1737}
1738
1739
1740/* Bring up network interface. */
1741static int sky2_up(struct net_device *dev)
1742{
1743 struct sky2_port *sky2 = netdev_priv(dev);
1744 struct sky2_hw *hw = sky2->hw;
1745 unsigned port = sky2->port;
1746 u32 imask;
1747 int err;
1748
1749 netif_carrier_off(dev);
1750
1751 err = sky2_alloc_buffers(sky2);
1752 if (err)
1753 goto err_out;
1754
1755 /* With single port, IRQ is setup when device is brought up */
1756 if (hw->ports == 1 && (err = sky2_setup_irq(hw, dev->name)))
1757 goto err_out;
1758
1759 sky2_hw_up(sky2);
1760
1761 /* Enable interrupts from phy/mac for port */
1762 imask = sky2_read32(hw, B0_IMSK);
1763 imask |= portirq_msk[port];
1764 sky2_write32(hw, B0_IMSK, imask);
1765 sky2_read32(hw, B0_IMSK);
1766
1767 netif_info(sky2, ifup, dev, "enabling interface\n");
1768
1769 return 0;
1770
1771err_out:
1772 sky2_free_buffers(sky2);
1773 return err;
1774}
1775
1776/* Modular subtraction in ring */
1777static inline int tx_inuse(const struct sky2_port *sky2)
1778{
1779 return (sky2->tx_prod - sky2->tx_cons) & (sky2->tx_ring_size - 1);
1780}
1781
1782/* Number of list elements available for next tx */
1783static inline int tx_avail(const struct sky2_port *sky2)
1784{
1785 return sky2->tx_pending - tx_inuse(sky2);
1786}
1787
1788/* Estimate of number of transmit list elements required */
1789static unsigned tx_le_req(const struct sk_buff *skb)
1790{
1791 unsigned count;
1792
1793 count = (skb_shinfo(skb)->nr_frags + 1)
1794 * (sizeof(dma_addr_t) / sizeof(u32));
1795
1796 if (skb_is_gso(skb))
1797 ++count;
1798 else if (sizeof(dma_addr_t) == sizeof(u32))
1799 ++count; /* possible vlan */
1800
1801 if (skb->ip_summed == CHECKSUM_PARTIAL)
1802 ++count;
1803
1804 return count;
1805}
1806
1807static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
1808{
1809 if (re->flags & TX_MAP_SINGLE)
1810 pci_unmap_single(pdev, dma_unmap_addr(re, mapaddr),
1811 dma_unmap_len(re, maplen),
1812 PCI_DMA_TODEVICE);
1813 else if (re->flags & TX_MAP_PAGE)
1814 pci_unmap_page(pdev, dma_unmap_addr(re, mapaddr),
1815 dma_unmap_len(re, maplen),
1816 PCI_DMA_TODEVICE);
1817 re->flags = 0;
1818}
1819
1820/*
1821 * Put one packet in ring for transmit.
1822 * A single packet can generate multiple list elements, and
1823 * the number of ring elements will probably be less than the number
1824 * of list elements used.
1825 */
1826static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
1827 struct net_device *dev)
1828{
1829 struct sky2_port *sky2 = netdev_priv(dev);
1830 struct sky2_hw *hw = sky2->hw;
1831 struct sky2_tx_le *le = NULL;
1832 struct tx_ring_info *re;
1833 unsigned i, len;
1834 dma_addr_t mapping;
1835 u32 upper;
1836 u16 slot;
1837 u16 mss;
1838 u8 ctrl;
1839
1840 if (unlikely(tx_avail(sky2) < tx_le_req(skb)))
1841 return NETDEV_TX_BUSY;
1842
1843 len = skb_headlen(skb);
1844 mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
1845
1846 if (pci_dma_mapping_error(hw->pdev, mapping))
1847 goto mapping_error;
1848
1849 slot = sky2->tx_prod;
1850 netif_printk(sky2, tx_queued, KERN_DEBUG, dev,
1851 "tx queued, slot %u, len %d\n", slot, skb->len);
1852
1853 /* Send high bits if needed */
1854 upper = upper_32_bits(mapping);
1855 if (upper != sky2->tx_last_upper) {
1856 le = get_tx_le(sky2, &slot);
1857 le->addr = cpu_to_le32(upper);
1858 sky2->tx_last_upper = upper;
1859 le->opcode = OP_ADDR64 | HW_OWNER;
1860 }
1861
1862 /* Check for TCP Segmentation Offload */
1863 mss = skb_shinfo(skb)->gso_size;
1864 if (mss != 0) {
1865
1866 if (!(hw->flags & SKY2_HW_NEW_LE))
1867 mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1868
1869 if (mss != sky2->tx_last_mss) {
1870 le = get_tx_le(sky2, &slot);
1871 le->addr = cpu_to_le32(mss);
1872
1873 if (hw->flags & SKY2_HW_NEW_LE)
1874 le->opcode = OP_MSS | HW_OWNER;
1875 else
1876 le->opcode = OP_LRGLEN | HW_OWNER;
1877 sky2->tx_last_mss = mss;
1878 }
1879 }
1880
1881 ctrl = 0;
1882
1883 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
1884 if (vlan_tx_tag_present(skb)) {
1885 if (!le) {
1886 le = get_tx_le(sky2, &slot);
1887 le->addr = 0;
1888 le->opcode = OP_VLAN|HW_OWNER;
1889 } else
1890 le->opcode |= OP_VLAN;
1891 le->length = cpu_to_be16(vlan_tx_tag_get(skb));
1892 ctrl |= INS_VLAN;
1893 }
1894
1895 /* Handle TCP checksum offload */
1896 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1897 /* On Yukon EX (some versions) encoding change. */
1898 if (hw->flags & SKY2_HW_AUTO_TX_SUM)
1899 ctrl |= CALSUM; /* auto checksum */
1900 else {
1901 const unsigned offset = skb_transport_offset(skb);
1902 u32 tcpsum;
1903
1904 tcpsum = offset << 16; /* sum start */
1905 tcpsum |= offset + skb->csum_offset; /* sum write */
1906
1907 ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
1908 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1909 ctrl |= UDPTCP;
1910
1911 if (tcpsum != sky2->tx_tcpsum) {
1912 sky2->tx_tcpsum = tcpsum;
1913
1914 le = get_tx_le(sky2, &slot);
1915 le->addr = cpu_to_le32(tcpsum);
1916 le->length = 0; /* initial checksum value */
1917 le->ctrl = 1; /* one packet */
1918 le->opcode = OP_TCPLISW | HW_OWNER;
1919 }
1920 }
1921 }
1922
1923 re = sky2->tx_ring + slot;
1924 re->flags = TX_MAP_SINGLE;
1925 dma_unmap_addr_set(re, mapaddr, mapping);
1926 dma_unmap_len_set(re, maplen, len);
1927
1928 le = get_tx_le(sky2, &slot);
1929 le->addr = cpu_to_le32(lower_32_bits(mapping));
1930 le->length = cpu_to_le16(len);
1931 le->ctrl = ctrl;
1932 le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
1933
1934
1935 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1936 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1937
1938 mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
1939 skb_frag_size(frag), DMA_TO_DEVICE);
1940
1941 if (dma_mapping_error(&hw->pdev->dev, mapping))
1942 goto mapping_unwind;
1943
1944 upper = upper_32_bits(mapping);
1945 if (upper != sky2->tx_last_upper) {
1946 le = get_tx_le(sky2, &slot);
1947 le->addr = cpu_to_le32(upper);
1948 sky2->tx_last_upper = upper;
1949 le->opcode = OP_ADDR64 | HW_OWNER;
1950 }
1951
1952 re = sky2->tx_ring + slot;
1953 re->flags = TX_MAP_PAGE;
1954 dma_unmap_addr_set(re, mapaddr, mapping);
1955 dma_unmap_len_set(re, maplen, skb_frag_size(frag));
1956
1957 le = get_tx_le(sky2, &slot);
1958 le->addr = cpu_to_le32(lower_32_bits(mapping));
1959 le->length = cpu_to_le16(skb_frag_size(frag));
1960 le->ctrl = ctrl;
1961 le->opcode = OP_BUFFER | HW_OWNER;
1962 }
1963
1964 re->skb = skb;
1965 le->ctrl |= EOP;
1966
1967 sky2->tx_prod = slot;
1968
1969 if (tx_avail(sky2) <= MAX_SKB_TX_LE)
1970 netif_stop_queue(dev);
1971
1972 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
1973
1974 return NETDEV_TX_OK;
1975
1976mapping_unwind:
1977 for (i = sky2->tx_prod; i != slot; i = RING_NEXT(i, sky2->tx_ring_size)) {
1978 re = sky2->tx_ring + i;
1979
1980 sky2_tx_unmap(hw->pdev, re);
1981 }
1982
1983mapping_error:
1984 if (net_ratelimit())
1985 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
1986 dev_kfree_skb(skb);
1987 return NETDEV_TX_OK;
1988}
1989
1990/*
1991 * Free ring elements from starting at tx_cons until "done"
1992 *
1993 * NB:
1994 * 1. The hardware will tell us about partial completion of multi-part
1995 * buffers so make sure not to free skb to early.
1996 * 2. This may run in parallel start_xmit because the it only
1997 * looks at the tail of the queue of FIFO (tx_cons), not
1998 * the head (tx_prod)
1999 */
2000static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
2001{
2002 struct net_device *dev = sky2->netdev;
2003 unsigned idx;
2004
2005 BUG_ON(done >= sky2->tx_ring_size);
2006
2007 for (idx = sky2->tx_cons; idx != done;
2008 idx = RING_NEXT(idx, sky2->tx_ring_size)) {
2009 struct tx_ring_info *re = sky2->tx_ring + idx;
2010 struct sk_buff *skb = re->skb;
2011
2012 sky2_tx_unmap(sky2->hw->pdev, re);
2013
2014 if (skb) {
2015 netif_printk(sky2, tx_done, KERN_DEBUG, dev,
2016 "tx done %u\n", idx);
2017
2018 u64_stats_update_begin(&sky2->tx_stats.syncp);
2019 ++sky2->tx_stats.packets;
2020 sky2->tx_stats.bytes += skb->len;
2021 u64_stats_update_end(&sky2->tx_stats.syncp);
2022
2023 re->skb = NULL;
2024 dev_kfree_skb_any(skb);
2025
2026 sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
2027 }
2028 }
2029
2030 sky2->tx_cons = idx;
2031 smp_mb();
2032}
2033
2034static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
2035{
2036 /* Disable Force Sync bit and Enable Alloc bit */
2037 sky2_write8(hw, SK_REG(port, TXA_CTRL),
2038 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
2039
2040 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
2041 sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
2042 sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
2043
2044 /* Reset the PCI FIFO of the async Tx queue */
2045 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR),
2046 BMU_RST_SET | BMU_FIFO_RST);
2047
2048 /* Reset the Tx prefetch units */
2049 sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
2050 PREF_UNIT_RST_SET);
2051
2052 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
2053 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
2054}
2055
2056static void sky2_hw_down(struct sky2_port *sky2)
2057{
2058 struct sky2_hw *hw = sky2->hw;
2059 unsigned port = sky2->port;
2060 u16 ctrl;
2061
2062 /* Force flow control off */
2063 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
2064
2065 /* Stop transmitter */
2066 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
2067 sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
2068
2069 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
2070 RB_RST_SET | RB_DIS_OP_MD);
2071
2072 ctrl = gma_read16(hw, port, GM_GP_CTRL);
2073 ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
2074 gma_write16(hw, port, GM_GP_CTRL, ctrl);
2075
2076 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
2077
2078 /* Workaround shared GMAC reset */
2079 if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 &&
2080 port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
2081 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
2082
2083 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
2084
2085 /* Force any delayed status interrupt and NAPI */
2086 sky2_write32(hw, STAT_LEV_TIMER_CNT, 0);
2087 sky2_write32(hw, STAT_TX_TIMER_CNT, 0);
2088 sky2_write32(hw, STAT_ISR_TIMER_CNT, 0);
2089 sky2_read8(hw, STAT_ISR_TIMER_CTRL);
2090
2091 sky2_rx_stop(sky2);
2092
2093 spin_lock_bh(&sky2->phy_lock);
2094 sky2_phy_power_down(hw, port);
2095 spin_unlock_bh(&sky2->phy_lock);
2096
2097 sky2_tx_reset(hw, port);
2098
2099 /* Free any pending frames stuck in HW queue */
2100 sky2_tx_complete(sky2, sky2->tx_prod);
2101}
2102
2103/* Network shutdown */
2104static int sky2_down(struct net_device *dev)
2105{
2106 struct sky2_port *sky2 = netdev_priv(dev);
2107 struct sky2_hw *hw = sky2->hw;
2108
2109 /* Never really got started! */
2110 if (!sky2->tx_le)
2111 return 0;
2112
2113 netif_info(sky2, ifdown, dev, "disabling interface\n");
2114
2115 /* Disable port IRQ */
2116 sky2_write32(hw, B0_IMSK,
2117 sky2_read32(hw, B0_IMSK) & ~portirq_msk[sky2->port]);
2118 sky2_read32(hw, B0_IMSK);
2119
2120 if (hw->ports == 1) {
2121 napi_disable(&hw->napi);
2122 free_irq(hw->pdev->irq, hw);
2123 } else {
2124 synchronize_irq(hw->pdev->irq);
2125 napi_synchronize(&hw->napi);
2126 }
2127
2128 sky2_hw_down(sky2);
2129
2130 sky2_free_buffers(sky2);
2131
2132 return 0;
2133}
2134
2135static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
2136{
2137 if (hw->flags & SKY2_HW_FIBRE_PHY)
2138 return SPEED_1000;
2139
2140 if (!(hw->flags & SKY2_HW_GIGABIT)) {
2141 if (aux & PHY_M_PS_SPEED_100)
2142 return SPEED_100;
2143 else
2144 return SPEED_10;
2145 }
2146
2147 switch (aux & PHY_M_PS_SPEED_MSK) {
2148 case PHY_M_PS_SPEED_1000:
2149 return SPEED_1000;
2150 case PHY_M_PS_SPEED_100:
2151 return SPEED_100;
2152 default:
2153 return SPEED_10;
2154 }
2155}
2156
2157static void sky2_link_up(struct sky2_port *sky2)
2158{
2159 struct sky2_hw *hw = sky2->hw;
2160 unsigned port = sky2->port;
2161 static const char *fc_name[] = {
2162 [FC_NONE] = "none",
2163 [FC_TX] = "tx",
2164 [FC_RX] = "rx",
2165 [FC_BOTH] = "both",
2166 };
2167
2168 sky2_set_ipg(sky2);
2169
2170 sky2_enable_rx_tx(sky2);
2171
2172 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
2173
2174 netif_carrier_on(sky2->netdev);
2175
2176 mod_timer(&hw->watchdog_timer, jiffies + 1);
2177
2178 /* Turn on link LED */
2179 sky2_write8(hw, SK_REG(port, LNK_LED_REG),
2180 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
2181
2182 netif_info(sky2, link, sky2->netdev,
2183 "Link is up at %d Mbps, %s duplex, flow control %s\n",
2184 sky2->speed,
2185 sky2->duplex == DUPLEX_FULL ? "full" : "half",
2186 fc_name[sky2->flow_status]);
2187}
2188
2189static void sky2_link_down(struct sky2_port *sky2)
2190{
2191 struct sky2_hw *hw = sky2->hw;
2192 unsigned port = sky2->port;
2193 u16 reg;
2194
2195 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
2196
2197 reg = gma_read16(hw, port, GM_GP_CTRL);
2198 reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
2199 gma_write16(hw, port, GM_GP_CTRL, reg);
2200
2201 netif_carrier_off(sky2->netdev);
2202
2203 /* Turn off link LED */
2204 sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
2205
2206 netif_info(sky2, link, sky2->netdev, "Link is down\n");
2207
2208 sky2_phy_init(hw, port);
2209}
2210
2211static enum flow_control sky2_flow(int rx, int tx)
2212{
2213 if (rx)
2214 return tx ? FC_BOTH : FC_RX;
2215 else
2216 return tx ? FC_TX : FC_NONE;
2217}
2218
2219static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
2220{
2221 struct sky2_hw *hw = sky2->hw;
2222 unsigned port = sky2->port;
2223 u16 advert, lpa;
2224
2225 advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
2226 lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
2227 if (lpa & PHY_M_AN_RF) {
2228 netdev_err(sky2->netdev, "remote fault\n");
2229 return -1;
2230 }
2231
2232 if (!(aux & PHY_M_PS_SPDUP_RES)) {
2233 netdev_err(sky2->netdev, "speed/duplex mismatch\n");
2234 return -1;
2235 }
2236
2237 sky2->speed = sky2_phy_speed(hw, aux);
2238 sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
2239
2240 /* Since the pause result bits seem to in different positions on
2241 * different chips. look at registers.
2242 */
2243 if (hw->flags & SKY2_HW_FIBRE_PHY) {
2244 /* Shift for bits in fiber PHY */
2245 advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM);
2246 lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM);
2247
2248 if (advert & ADVERTISE_1000XPAUSE)
2249 advert |= ADVERTISE_PAUSE_CAP;
2250 if (advert & ADVERTISE_1000XPSE_ASYM)
2251 advert |= ADVERTISE_PAUSE_ASYM;
2252 if (lpa & LPA_1000XPAUSE)
2253 lpa |= LPA_PAUSE_CAP;
2254 if (lpa & LPA_1000XPAUSE_ASYM)
2255 lpa |= LPA_PAUSE_ASYM;
2256 }
2257
2258 sky2->flow_status = FC_NONE;
2259 if (advert & ADVERTISE_PAUSE_CAP) {
2260 if (lpa & LPA_PAUSE_CAP)
2261 sky2->flow_status = FC_BOTH;
2262 else if (advert & ADVERTISE_PAUSE_ASYM)
2263 sky2->flow_status = FC_RX;
2264 } else if (advert & ADVERTISE_PAUSE_ASYM) {
2265 if ((lpa & LPA_PAUSE_CAP) && (lpa & LPA_PAUSE_ASYM))
2266 sky2->flow_status = FC_TX;
2267 }
2268
2269 if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000 &&
2270 !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX))
2271 sky2->flow_status = FC_NONE;
2272
2273 if (sky2->flow_status & FC_TX)
2274 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
2275 else
2276 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
2277
2278 return 0;
2279}
2280
2281/* Interrupt from PHY */
2282static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
2283{
2284 struct net_device *dev = hw->dev[port];
2285 struct sky2_port *sky2 = netdev_priv(dev);
2286 u16 istatus, phystat;
2287
2288 if (!netif_running(dev))
2289 return;
2290
2291 spin_lock(&sky2->phy_lock);
2292 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
2293 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
2294
2295 netif_info(sky2, intr, sky2->netdev, "phy interrupt status 0x%x 0x%x\n",
2296 istatus, phystat);
2297
2298 if (istatus & PHY_M_IS_AN_COMPL) {
2299 if (sky2_autoneg_done(sky2, phystat) == 0 &&
2300 !netif_carrier_ok(dev))
2301 sky2_link_up(sky2);
2302 goto out;
2303 }
2304
2305 if (istatus & PHY_M_IS_LSP_CHANGE)
2306 sky2->speed = sky2_phy_speed(hw, phystat);
2307
2308 if (istatus & PHY_M_IS_DUP_CHANGE)
2309 sky2->duplex =
2310 (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
2311
2312 if (istatus & PHY_M_IS_LST_CHANGE) {
2313 if (phystat & PHY_M_PS_LINK_UP)
2314 sky2_link_up(sky2);
2315 else
2316 sky2_link_down(sky2);
2317 }
2318out:
2319 spin_unlock(&sky2->phy_lock);
2320}
2321
2322/* Special quick link interrupt (Yukon-2 Optima only) */
2323static void sky2_qlink_intr(struct sky2_hw *hw)
2324{
2325 struct sky2_port *sky2 = netdev_priv(hw->dev[0]);
2326 u32 imask;
2327 u16 phy;
2328
2329 /* disable irq */
2330 imask = sky2_read32(hw, B0_IMSK);
2331 imask &= ~Y2_IS_PHY_QLNK;
2332 sky2_write32(hw, B0_IMSK, imask);
2333
2334 /* reset PHY Link Detect */
2335 phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
2336 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2337 sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
2338 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2339
2340 sky2_link_up(sky2);
2341}
2342
2343/* Transmit timeout is only called if we are running, carrier is up
2344 * and tx queue is full (stopped).
2345 */
2346static void sky2_tx_timeout(struct net_device *dev)
2347{
2348 struct sky2_port *sky2 = netdev_priv(dev);
2349 struct sky2_hw *hw = sky2->hw;
2350
2351 netif_err(sky2, timer, dev, "tx timeout\n");
2352
2353 netdev_printk(KERN_DEBUG, dev, "transmit ring %u .. %u report=%u done=%u\n",
2354 sky2->tx_cons, sky2->tx_prod,
2355 sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
2356 sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
2357
2358 /* can't restart safely under softirq */
2359 schedule_work(&hw->restart_work);
2360}
2361
2362static int sky2_change_mtu(struct net_device *dev, int new_mtu)
2363{
2364 struct sky2_port *sky2 = netdev_priv(dev);
2365 struct sky2_hw *hw = sky2->hw;
2366 unsigned port = sky2->port;
2367 int err;
2368 u16 ctl, mode;
2369 u32 imask;
2370
2371 /* MTU size outside the spec */
2372 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
2373 return -EINVAL;
2374
2375 /* MTU > 1500 on yukon FE and FE+ not allowed */
2376 if (new_mtu > ETH_DATA_LEN &&
2377 (hw->chip_id == CHIP_ID_YUKON_FE ||
2378 hw->chip_id == CHIP_ID_YUKON_FE_P))
2379 return -EINVAL;
2380
2381 if (!netif_running(dev)) {
2382 dev->mtu = new_mtu;
2383 netdev_update_features(dev);
2384 return 0;
2385 }
2386
2387 imask = sky2_read32(hw, B0_IMSK);
2388 sky2_write32(hw, B0_IMSK, 0);
2389
2390 dev->trans_start = jiffies; /* prevent tx timeout */
2391 napi_disable(&hw->napi);
2392 netif_tx_disable(dev);
2393
2394 synchronize_irq(hw->pdev->irq);
2395
2396 if (!(hw->flags & SKY2_HW_RAM_BUFFER))
2397 sky2_set_tx_stfwd(hw, port);
2398
2399 ctl = gma_read16(hw, port, GM_GP_CTRL);
2400 gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
2401 sky2_rx_stop(sky2);
2402 sky2_rx_clean(sky2);
2403
2404 dev->mtu = new_mtu;
2405 netdev_update_features(dev);
2406
2407 mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA;
2408 if (sky2->speed > SPEED_100)
2409 mode |= IPG_DATA_VAL(IPG_DATA_DEF_1000);
2410 else
2411 mode |= IPG_DATA_VAL(IPG_DATA_DEF_10_100);
2412
2413 if (dev->mtu > ETH_DATA_LEN)
2414 mode |= GM_SMOD_JUMBO_ENA;
2415
2416 gma_write16(hw, port, GM_SERIAL_MODE, mode);
2417
2418 sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
2419
2420 err = sky2_alloc_rx_skbs(sky2);
2421 if (!err)
2422 sky2_rx_start(sky2);
2423 else
2424 sky2_rx_clean(sky2);
2425 sky2_write32(hw, B0_IMSK, imask);
2426
2427 sky2_read32(hw, B0_Y2_SP_LISR);
2428 napi_enable(&hw->napi);
2429
2430 if (err)
2431 dev_close(dev);
2432 else {
2433 gma_write16(hw, port, GM_GP_CTRL, ctl);
2434
2435 netif_wake_queue(dev);
2436 }
2437
2438 return err;
2439}
2440
2441/* For small just reuse existing skb for next receive */
2442static struct sk_buff *receive_copy(struct sky2_port *sky2,
2443 const struct rx_ring_info *re,
2444 unsigned length)
2445{
2446 struct sk_buff *skb;
2447
2448 skb = netdev_alloc_skb_ip_align(sky2->netdev, length);
2449 if (likely(skb)) {
2450 pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
2451 length, PCI_DMA_FROMDEVICE);
2452 skb_copy_from_linear_data(re->skb, skb->data, length);
2453 skb->ip_summed = re->skb->ip_summed;
2454 skb->csum = re->skb->csum;
2455 pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
2456 length, PCI_DMA_FROMDEVICE);
2457 re->skb->ip_summed = CHECKSUM_NONE;
2458 skb_put(skb, length);
2459 }
2460 return skb;
2461}
2462
2463/* Adjust length of skb with fragments to match received data */
2464static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
2465 unsigned int length)
2466{
2467 int i, num_frags;
2468 unsigned int size;
2469
2470 /* put header into skb */
2471 size = min(length, hdr_space);
2472 skb->tail += size;
2473 skb->len += size;
2474 length -= size;
2475
2476 num_frags = skb_shinfo(skb)->nr_frags;
2477 for (i = 0; i < num_frags; i++) {
2478 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2479
2480 if (length == 0) {
2481 /* don't need this page */
2482 __skb_frag_unref(frag);
2483 --skb_shinfo(skb)->nr_frags;
2484 } else {
2485 size = min(length, (unsigned) PAGE_SIZE);
2486
2487 skb_frag_size_set(frag, size);
2488 skb->data_len += size;
2489 skb->truesize += PAGE_SIZE;
2490 skb->len += size;
2491 length -= size;
2492 }
2493 }
2494}
2495
2496/* Normal packet - take skb from ring element and put in a new one */
2497static struct sk_buff *receive_new(struct sky2_port *sky2,
2498 struct rx_ring_info *re,
2499 unsigned int length)
2500{
2501 struct sk_buff *skb;
2502 struct rx_ring_info nre;
2503 unsigned hdr_space = sky2->rx_data_size;
2504
2505 nre.skb = sky2_rx_alloc(sky2, GFP_ATOMIC);
2506 if (unlikely(!nre.skb))
2507 goto nobuf;
2508
2509 if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space))
2510 goto nomap;
2511
2512 skb = re->skb;
2513 sky2_rx_unmap_skb(sky2->hw->pdev, re);
2514 prefetch(skb->data);
2515 *re = nre;
2516
2517 if (skb_shinfo(skb)->nr_frags)
2518 skb_put_frags(skb, hdr_space, length);
2519 else
2520 skb_put(skb, length);
2521 return skb;
2522
2523nomap:
2524 dev_kfree_skb(nre.skb);
2525nobuf:
2526 return NULL;
2527}
2528
2529/*
2530 * Receive one packet.
2531 * For larger packets, get new buffer.
2532 */
2533static struct sk_buff *sky2_receive(struct net_device *dev,
2534 u16 length, u32 status)
2535{
2536 struct sky2_port *sky2 = netdev_priv(dev);
2537 struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next;
2538 struct sk_buff *skb = NULL;
2539 u16 count = (status & GMR_FS_LEN) >> 16;
2540
2541 if (status & GMR_FS_VLAN)
2542 count -= VLAN_HLEN; /* Account for vlan tag */
2543
2544 netif_printk(sky2, rx_status, KERN_DEBUG, dev,
2545 "rx slot %u status 0x%x len %d\n",
2546 sky2->rx_next, status, length);
2547
2548 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
2549 prefetch(sky2->rx_ring + sky2->rx_next);
2550
2551 /* This chip has hardware problems that generates bogus status.
2552 * So do only marginal checking and expect higher level protocols
2553 * to handle crap frames.
2554 */
2555 if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
2556 sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 &&
2557 length != count)
2558 goto okay;
2559
2560 if (status & GMR_FS_ANY_ERR)
2561 goto error;
2562
2563 if (!(status & GMR_FS_RX_OK))
2564 goto resubmit;
2565
2566 /* if length reported by DMA does not match PHY, packet was truncated */
2567 if (length != count)
2568 goto error;
2569
2570okay:
2571 if (length < copybreak)
2572 skb = receive_copy(sky2, re, length);
2573 else
2574 skb = receive_new(sky2, re, length);
2575
2576 dev->stats.rx_dropped += (skb == NULL);
2577
2578resubmit:
2579 sky2_rx_submit(sky2, re);
2580
2581 return skb;
2582
2583error:
2584 ++dev->stats.rx_errors;
2585
2586 if (net_ratelimit())
2587 netif_info(sky2, rx_err, dev,
2588 "rx error, status 0x%x length %d\n", status, length);
2589
2590 goto resubmit;
2591}
2592
2593/* Transmit complete */
2594static inline void sky2_tx_done(struct net_device *dev, u16 last)
2595{
2596 struct sky2_port *sky2 = netdev_priv(dev);
2597
2598 if (netif_running(dev)) {
2599 sky2_tx_complete(sky2, last);
2600
2601 /* Wake unless it's detached, and called e.g. from sky2_down() */
2602 if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
2603 netif_wake_queue(dev);
2604 }
2605}
2606
2607static inline void sky2_skb_rx(const struct sky2_port *sky2,
2608 u32 status, struct sk_buff *skb)
2609{
2610 if (status & GMR_FS_VLAN)
2611 __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
2612
2613 if (skb->ip_summed == CHECKSUM_NONE)
2614 netif_receive_skb(skb);
2615 else
2616 napi_gro_receive(&sky2->hw->napi, skb);
2617}
2618
2619static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
2620 unsigned packets, unsigned bytes)
2621{
2622 struct net_device *dev = hw->dev[port];
2623 struct sky2_port *sky2 = netdev_priv(dev);
2624
2625 if (packets == 0)
2626 return;
2627
2628 u64_stats_update_begin(&sky2->rx_stats.syncp);
2629 sky2->rx_stats.packets += packets;
2630 sky2->rx_stats.bytes += bytes;
2631 u64_stats_update_end(&sky2->rx_stats.syncp);
2632
2633 dev->last_rx = jiffies;
2634 sky2_rx_update(netdev_priv(dev), rxqaddr[port]);
2635}
2636
2637static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
2638{
2639 /* If this happens then driver assuming wrong format for chip type */
2640 BUG_ON(sky2->hw->flags & SKY2_HW_NEW_LE);
2641
2642 /* Both checksum counters are programmed to start at
2643 * the same offset, so unless there is a problem they
2644 * should match. This failure is an early indication that
2645 * hardware receive checksumming won't work.
2646 */
2647 if (likely((u16)(status >> 16) == (u16)status)) {
2648 struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb;
2649 skb->ip_summed = CHECKSUM_COMPLETE;
2650 skb->csum = le16_to_cpu(status);
2651 } else {
2652 dev_notice(&sky2->hw->pdev->dev,
2653 "%s: receive checksum problem (status = %#x)\n",
2654 sky2->netdev->name, status);
2655
2656 /* Disable checksum offload
2657 * It will be reenabled on next ndo_set_features, but if it's
2658 * really broken, will get disabled again
2659 */
2660 sky2->netdev->features &= ~NETIF_F_RXCSUM;
2661 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
2662 BMU_DIS_RX_CHKSUM);
2663 }
2664}
2665
2666static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
2667{
2668 struct sk_buff *skb;
2669
2670 skb = sky2->rx_ring[sky2->rx_next].skb;
2671 skb->rxhash = le32_to_cpu(status);
2672}
2673
2674/* Process status response ring */
2675static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2676{
2677 int work_done = 0;
2678 unsigned int total_bytes[2] = { 0 };
2679 unsigned int total_packets[2] = { 0 };
2680
2681 rmb();
2682 do {
2683 struct sky2_port *sky2;
2684 struct sky2_status_le *le = hw->st_le + hw->st_idx;
2685 unsigned port;
2686 struct net_device *dev;
2687 struct sk_buff *skb;
2688 u32 status;
2689 u16 length;
2690 u8 opcode = le->opcode;
2691
2692 if (!(opcode & HW_OWNER))
2693 break;
2694
2695 hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size);
2696
2697 port = le->css & CSS_LINK_BIT;
2698 dev = hw->dev[port];
2699 sky2 = netdev_priv(dev);
2700 length = le16_to_cpu(le->length);
2701 status = le32_to_cpu(le->status);
2702
2703 le->opcode = 0;
2704 switch (opcode & ~HW_OWNER) {
2705 case OP_RXSTAT:
2706 total_packets[port]++;
2707 total_bytes[port] += length;
2708
2709 skb = sky2_receive(dev, length, status);
2710 if (!skb)
2711 break;
2712
2713 /* This chip reports checksum status differently */
2714 if (hw->flags & SKY2_HW_NEW_LE) {
2715 if ((dev->features & NETIF_F_RXCSUM) &&
2716 (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) &&
2717 (le->css & CSS_TCPUDPCSOK))
2718 skb->ip_summed = CHECKSUM_UNNECESSARY;
2719 else
2720 skb->ip_summed = CHECKSUM_NONE;
2721 }
2722
2723 skb->protocol = eth_type_trans(skb, dev);
2724
2725 sky2_skb_rx(sky2, status, skb);
2726
2727 /* Stop after net poll weight */
2728 if (++work_done >= to_do)
2729 goto exit_loop;
2730 break;
2731
2732 case OP_RXVLAN:
2733 sky2->rx_tag = length;
2734 break;
2735
2736 case OP_RXCHKSVLAN:
2737 sky2->rx_tag = length;
2738 /* fall through */
2739 case OP_RXCHKS:
2740 if (likely(dev->features & NETIF_F_RXCSUM))
2741 sky2_rx_checksum(sky2, status);
2742 break;
2743
2744 case OP_RSS_HASH:
2745 sky2_rx_hash(sky2, status);
2746 break;
2747
2748 case OP_TXINDEXLE:
2749 /* TX index reports status for both ports */
2750 sky2_tx_done(hw->dev[0], status & 0xfff);
2751 if (hw->dev[1])
2752 sky2_tx_done(hw->dev[1],
2753 ((status >> 24) & 0xff)
2754 | (u16)(length & 0xf) << 8);
2755 break;
2756
2757 default:
2758 if (net_ratelimit())
2759 pr_warning("unknown status opcode 0x%x\n", opcode);
2760 }
2761 } while (hw->st_idx != idx);
2762
2763 /* Fully processed status ring so clear irq */
2764 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2765
2766exit_loop:
2767 sky2_rx_done(hw, 0, total_packets[0], total_bytes[0]);
2768 sky2_rx_done(hw, 1, total_packets[1], total_bytes[1]);
2769
2770 return work_done;
2771}
2772
2773static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
2774{
2775 struct net_device *dev = hw->dev[port];
2776
2777 if (net_ratelimit())
2778 netdev_info(dev, "hw error interrupt status 0x%x\n", status);
2779
2780 if (status & Y2_IS_PAR_RD1) {
2781 if (net_ratelimit())
2782 netdev_err(dev, "ram data read parity error\n");
2783 /* Clear IRQ */
2784 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
2785 }
2786
2787 if (status & Y2_IS_PAR_WR1) {
2788 if (net_ratelimit())
2789 netdev_err(dev, "ram data write parity error\n");
2790
2791 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
2792 }
2793
2794 if (status & Y2_IS_PAR_MAC1) {
2795 if (net_ratelimit())
2796 netdev_err(dev, "MAC parity error\n");
2797 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
2798 }
2799
2800 if (status & Y2_IS_PAR_RX1) {
2801 if (net_ratelimit())
2802 netdev_err(dev, "RX parity error\n");
2803 sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
2804 }
2805
2806 if (status & Y2_IS_TCP_TXA1) {
2807 if (net_ratelimit())
2808 netdev_err(dev, "TCP segmentation error\n");
2809 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
2810 }
2811}
2812
2813static void sky2_hw_intr(struct sky2_hw *hw)
2814{
2815 struct pci_dev *pdev = hw->pdev;
2816 u32 status = sky2_read32(hw, B0_HWE_ISRC);
2817 u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
2818
2819 status &= hwmsk;
2820
2821 if (status & Y2_IS_TIST_OV)
2822 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2823
2824 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
2825 u16 pci_err;
2826
2827 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2828 pci_err = sky2_pci_read16(hw, PCI_STATUS);
2829 if (net_ratelimit())
2830 dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
2831 pci_err);
2832
2833 sky2_pci_write16(hw, PCI_STATUS,
2834 pci_err | PCI_STATUS_ERROR_BITS);
2835 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2836 }
2837
2838 if (status & Y2_IS_PCI_EXP) {
2839 /* PCI-Express uncorrectable Error occurred */
2840 u32 err;
2841
2842 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2843 err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2844 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
2845 0xfffffffful);
2846 if (net_ratelimit())
2847 dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
2848
2849 sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2850 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2851 }
2852
2853 if (status & Y2_HWE_L1_MASK)
2854 sky2_hw_error(hw, 0, status);
2855 status >>= 8;
2856 if (status & Y2_HWE_L1_MASK)
2857 sky2_hw_error(hw, 1, status);
2858}
2859
2860static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
2861{
2862 struct net_device *dev = hw->dev[port];
2863 struct sky2_port *sky2 = netdev_priv(dev);
2864 u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
2865
2866 netif_info(sky2, intr, dev, "mac interrupt status 0x%x\n", status);
2867
2868 if (status & GM_IS_RX_CO_OV)
2869 gma_read16(hw, port, GM_RX_IRQ_SRC);
2870
2871 if (status & GM_IS_TX_CO_OV)
2872 gma_read16(hw, port, GM_TX_IRQ_SRC);
2873
2874 if (status & GM_IS_RX_FF_OR) {
2875 ++dev->stats.rx_fifo_errors;
2876 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
2877 }
2878
2879 if (status & GM_IS_TX_FF_UR) {
2880 ++dev->stats.tx_fifo_errors;
2881 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
2882 }
2883}
2884
2885/* This should never happen it is a bug. */
2886static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q)
2887{
2888 struct net_device *dev = hw->dev[port];
2889 u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
2890
2891 dev_err(&hw->pdev->dev, "%s: descriptor error q=%#x get=%u put=%u\n",
2892 dev->name, (unsigned) q, (unsigned) idx,
2893 (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)));
2894
2895 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
2896}
2897
2898static int sky2_rx_hung(struct net_device *dev)
2899{
2900 struct sky2_port *sky2 = netdev_priv(dev);
2901 struct sky2_hw *hw = sky2->hw;
2902 unsigned port = sky2->port;
2903 unsigned rxq = rxqaddr[port];
2904 u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP));
2905 u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV));
2906 u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP));
2907 u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL));
2908
2909 /* If idle and MAC or PCI is stuck */
2910 if (sky2->check.last == dev->last_rx &&
2911 ((mac_rp == sky2->check.mac_rp &&
2912 mac_lev != 0 && mac_lev >= sky2->check.mac_lev) ||
2913 /* Check if the PCI RX hang */
2914 (fifo_rp == sky2->check.fifo_rp &&
2915 fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) {
2916 netdev_printk(KERN_DEBUG, dev,
2917 "hung mac %d:%d fifo %d (%d:%d)\n",
2918 mac_lev, mac_rp, fifo_lev,
2919 fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
2920 return 1;
2921 } else {
2922 sky2->check.last = dev->last_rx;
2923 sky2->check.mac_rp = mac_rp;
2924 sky2->check.mac_lev = mac_lev;
2925 sky2->check.fifo_rp = fifo_rp;
2926 sky2->check.fifo_lev = fifo_lev;
2927 return 0;
2928 }
2929}
2930
2931static void sky2_watchdog(unsigned long arg)
2932{
2933 struct sky2_hw *hw = (struct sky2_hw *) arg;
2934
2935 /* Check for lost IRQ once a second */
2936 if (sky2_read32(hw, B0_ISRC)) {
2937 napi_schedule(&hw->napi);
2938 } else {
2939 int i, active = 0;
2940
2941 for (i = 0; i < hw->ports; i++) {
2942 struct net_device *dev = hw->dev[i];
2943 if (!netif_running(dev))
2944 continue;
2945 ++active;
2946
2947 /* For chips with Rx FIFO, check if stuck */
2948 if ((hw->flags & SKY2_HW_RAM_BUFFER) &&
2949 sky2_rx_hung(dev)) {
2950 netdev_info(dev, "receiver hang detected\n");
2951 schedule_work(&hw->restart_work);
2952 return;
2953 }
2954 }
2955
2956 if (active == 0)
2957 return;
2958 }
2959
2960 mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ));
2961}
2962
2963/* Hardware/software error handling */
2964static void sky2_err_intr(struct sky2_hw *hw, u32 status)
2965{
2966 if (net_ratelimit())
2967 dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status);
2968
2969 if (status & Y2_IS_HW_ERR)
2970 sky2_hw_intr(hw);
2971
2972 if (status & Y2_IS_IRQ_MAC1)
2973 sky2_mac_intr(hw, 0);
2974
2975 if (status & Y2_IS_IRQ_MAC2)
2976 sky2_mac_intr(hw, 1);
2977
2978 if (status & Y2_IS_CHK_RX1)
2979 sky2_le_error(hw, 0, Q_R1);
2980
2981 if (status & Y2_IS_CHK_RX2)
2982 sky2_le_error(hw, 1, Q_R2);
2983
2984 if (status & Y2_IS_CHK_TXA1)
2985 sky2_le_error(hw, 0, Q_XA1);
2986
2987 if (status & Y2_IS_CHK_TXA2)
2988 sky2_le_error(hw, 1, Q_XA2);
2989}
2990
2991static int sky2_poll(struct napi_struct *napi, int work_limit)
2992{
2993 struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi);
2994 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
2995 int work_done = 0;
2996 u16 idx;
2997
2998 if (unlikely(status & Y2_IS_ERROR))
2999 sky2_err_intr(hw, status);
3000
3001 if (status & Y2_IS_IRQ_PHY1)
3002 sky2_phy_intr(hw, 0);
3003
3004 if (status & Y2_IS_IRQ_PHY2)
3005 sky2_phy_intr(hw, 1);
3006
3007 if (status & Y2_IS_PHY_QLNK)
3008 sky2_qlink_intr(hw);
3009
3010 while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) {
3011 work_done += sky2_status_intr(hw, work_limit - work_done, idx);
3012
3013 if (work_done >= work_limit)
3014 goto done;
3015 }
3016
3017 napi_complete(napi);
3018 sky2_read32(hw, B0_Y2_SP_LISR);
3019done:
3020
3021 return work_done;
3022}
3023
3024static irqreturn_t sky2_intr(int irq, void *dev_id)
3025{
3026 struct sky2_hw *hw = dev_id;
3027 u32 status;
3028
3029 /* Reading this mask interrupts as side effect */
3030 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
3031 if (status == 0 || status == ~0)
3032 return IRQ_NONE;
3033
3034 prefetch(&hw->st_le[hw->st_idx]);
3035
3036 napi_schedule(&hw->napi);
3037
3038 return IRQ_HANDLED;
3039}
3040
3041#ifdef CONFIG_NET_POLL_CONTROLLER
3042static void sky2_netpoll(struct net_device *dev)
3043{
3044 struct sky2_port *sky2 = netdev_priv(dev);
3045
3046 napi_schedule(&sky2->hw->napi);
3047}
3048#endif
3049
3050/* Chip internal frequency for clock calculations */
3051static u32 sky2_mhz(const struct sky2_hw *hw)
3052{
3053 switch (hw->chip_id) {
3054 case CHIP_ID_YUKON_EC:
3055 case CHIP_ID_YUKON_EC_U:
3056 case CHIP_ID_YUKON_EX:
3057 case CHIP_ID_YUKON_SUPR:
3058 case CHIP_ID_YUKON_UL_2:
3059 case CHIP_ID_YUKON_OPT:
3060 case CHIP_ID_YUKON_PRM:
3061 case CHIP_ID_YUKON_OP_2:
3062 return 125;
3063
3064 case CHIP_ID_YUKON_FE:
3065 return 100;
3066
3067 case CHIP_ID_YUKON_FE_P:
3068 return 50;
3069
3070 case CHIP_ID_YUKON_XL:
3071 return 156;
3072
3073 default:
3074 BUG();
3075 }
3076}
3077
3078static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
3079{
3080 return sky2_mhz(hw) * us;
3081}
3082
3083static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
3084{
3085 return clk / sky2_mhz(hw);
3086}
3087
3088
3089static int __devinit sky2_init(struct sky2_hw *hw)
3090{
3091 u8 t8;
3092
3093 /* Enable all clocks and check for bad PCI access */
3094 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
3095
3096 sky2_write8(hw, B0_CTST, CS_RST_CLR);
3097
3098 hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
3099 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
3100
3101 switch (hw->chip_id) {
3102 case CHIP_ID_YUKON_XL:
3103 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
3104 if (hw->chip_rev < CHIP_REV_YU_XL_A2)
3105 hw->flags |= SKY2_HW_RSS_BROKEN;
3106 break;
3107
3108 case CHIP_ID_YUKON_EC_U:
3109 hw->flags = SKY2_HW_GIGABIT
3110 | SKY2_HW_NEWER_PHY
3111 | SKY2_HW_ADV_POWER_CTL;
3112 break;
3113
3114 case CHIP_ID_YUKON_EX:
3115 hw->flags = SKY2_HW_GIGABIT
3116 | SKY2_HW_NEWER_PHY
3117 | SKY2_HW_NEW_LE
3118 | SKY2_HW_ADV_POWER_CTL
3119 | SKY2_HW_RSS_CHKSUM;
3120
3121 /* New transmit checksum */
3122 if (hw->chip_rev != CHIP_REV_YU_EX_B0)
3123 hw->flags |= SKY2_HW_AUTO_TX_SUM;
3124 break;
3125
3126 case CHIP_ID_YUKON_EC:
3127 /* This rev is really old, and requires untested workarounds */
3128 if (hw->chip_rev == CHIP_REV_YU_EC_A1) {
3129 dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
3130 return -EOPNOTSUPP;
3131 }
3132 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN;
3133 break;
3134
3135 case CHIP_ID_YUKON_FE:
3136 hw->flags = SKY2_HW_RSS_BROKEN;
3137 break;
3138
3139 case CHIP_ID_YUKON_FE_P:
3140 hw->flags = SKY2_HW_NEWER_PHY
3141 | SKY2_HW_NEW_LE
3142 | SKY2_HW_AUTO_TX_SUM
3143 | SKY2_HW_ADV_POWER_CTL;
3144
3145 /* The workaround for status conflicts VLAN tag detection. */
3146 if (hw->chip_rev == CHIP_REV_YU_FE2_A0)
3147 hw->flags |= SKY2_HW_VLAN_BROKEN | SKY2_HW_RSS_CHKSUM;
3148 break;
3149
3150 case CHIP_ID_YUKON_SUPR:
3151 hw->flags = SKY2_HW_GIGABIT
3152 | SKY2_HW_NEWER_PHY
3153 | SKY2_HW_NEW_LE
3154 | SKY2_HW_AUTO_TX_SUM
3155 | SKY2_HW_ADV_POWER_CTL;
3156
3157 if (hw->chip_rev == CHIP_REV_YU_SU_A0)
3158 hw->flags |= SKY2_HW_RSS_CHKSUM;
3159 break;
3160
3161 case CHIP_ID_YUKON_UL_2:
3162 hw->flags = SKY2_HW_GIGABIT
3163 | SKY2_HW_ADV_POWER_CTL;
3164 break;
3165
3166 case CHIP_ID_YUKON_OPT:
3167 case CHIP_ID_YUKON_PRM:
3168 case CHIP_ID_YUKON_OP_2:
3169 hw->flags = SKY2_HW_GIGABIT
3170 | SKY2_HW_NEW_LE
3171 | SKY2_HW_ADV_POWER_CTL;
3172 break;
3173
3174 default:
3175 dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
3176 hw->chip_id);
3177 return -EOPNOTSUPP;
3178 }
3179
3180 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
3181 if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P')
3182 hw->flags |= SKY2_HW_FIBRE_PHY;
3183
3184 hw->ports = 1;
3185 t8 = sky2_read8(hw, B2_Y2_HW_RES);
3186 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
3187 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
3188 ++hw->ports;
3189 }
3190
3191 if (sky2_read8(hw, B2_E_0))
3192 hw->flags |= SKY2_HW_RAM_BUFFER;
3193
3194 return 0;
3195}
3196
3197static void sky2_reset(struct sky2_hw *hw)
3198{
3199 struct pci_dev *pdev = hw->pdev;
3200 u16 status;
3201 int i;
3202 u32 hwe_mask = Y2_HWE_ALL_MASK;
3203
3204 /* disable ASF */
3205 if (hw->chip_id == CHIP_ID_YUKON_EX
3206 || hw->chip_id == CHIP_ID_YUKON_SUPR) {
3207 sky2_write32(hw, CPU_WDOG, 0);
3208 status = sky2_read16(hw, HCU_CCSR);
3209 status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
3210 HCU_CCSR_UC_STATE_MSK);
3211 /*
3212 * CPU clock divider shouldn't be used because
3213 * - ASF firmware may malfunction
3214 * - Yukon-Supreme: Parallel FLASH doesn't support divided clocks
3215 */
3216 status &= ~HCU_CCSR_CPU_CLK_DIVIDE_MSK;
3217 sky2_write16(hw, HCU_CCSR, status);
3218 sky2_write32(hw, CPU_WDOG, 0);
3219 } else
3220 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
3221 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
3222
3223 /* do a SW reset */
3224 sky2_write8(hw, B0_CTST, CS_RST_SET);
3225 sky2_write8(hw, B0_CTST, CS_RST_CLR);
3226
3227 /* allow writes to PCI config */
3228 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3229
3230 /* clear PCI errors, if any */
3231 status = sky2_pci_read16(hw, PCI_STATUS);
3232 status |= PCI_STATUS_ERROR_BITS;
3233 sky2_pci_write16(hw, PCI_STATUS, status);
3234
3235 sky2_write8(hw, B0_CTST, CS_MRST_CLR);
3236
3237 if (pci_is_pcie(pdev)) {
3238 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
3239 0xfffffffful);
3240
3241 /* If error bit is stuck on ignore it */
3242 if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP)
3243 dev_info(&pdev->dev, "ignoring stuck error report bit\n");
3244 else
3245 hwe_mask |= Y2_IS_PCI_EXP;
3246 }
3247
3248 sky2_power_on(hw);
3249 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3250
3251 for (i = 0; i < hw->ports; i++) {
3252 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
3253 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
3254
3255 if (hw->chip_id == CHIP_ID_YUKON_EX ||
3256 hw->chip_id == CHIP_ID_YUKON_SUPR)
3257 sky2_write16(hw, SK_REG(i, GMAC_CTRL),
3258 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON
3259 | GMC_BYP_RETR_ON);
3260
3261 }
3262
3263 if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev > CHIP_REV_YU_SU_B0) {
3264 /* enable MACSec clock gating */
3265 sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS);
3266 }
3267
3268 if (hw->chip_id == CHIP_ID_YUKON_OPT ||
3269 hw->chip_id == CHIP_ID_YUKON_PRM ||
3270 hw->chip_id == CHIP_ID_YUKON_OP_2) {
3271 u16 reg;
3272 u32 msk;
3273
3274 if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
3275 /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */
3276 sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7));
3277
3278 /* set PHY Link Detect Timer to 1.1 second (11x 100ms) */
3279 reg = 10;
3280
3281 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
3282 sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
3283 } else {
3284 /* set PHY Link Detect Timer to 0.4 second (4x 100ms) */
3285 reg = 3;
3286 }
3287
3288 reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
3289 reg |= PSM_CONFIG_REG4_RST_PHY_LINK_DETECT;
3290
3291 /* reset PHY Link Detect */
3292 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3293 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
3294
3295 /* enable PHY Quick Link */
3296 msk = sky2_read32(hw, B0_IMSK);
3297 msk |= Y2_IS_PHY_QLNK;
3298 sky2_write32(hw, B0_IMSK, msk);
3299
3300 /* check if PSMv2 was running before */
3301 reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
3302 if (reg & PCI_EXP_LNKCTL_ASPMC)
3303 /* restore the PCIe Link Control register */
3304 sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL,
3305 reg);
3306
3307 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3308
3309 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
3310 sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
3311 }
3312
3313 /* Clear I2C IRQ noise */
3314 sky2_write32(hw, B2_I2C_IRQ, 1);
3315
3316 /* turn off hardware timer (unused) */
3317 sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
3318 sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
3319
3320 /* Turn off descriptor polling */
3321 sky2_write32(hw, B28_DPT_CTRL, DPT_STOP);
3322
3323 /* Turn off receive timestamp */
3324 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
3325 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3326
3327 /* enable the Tx Arbiters */
3328 for (i = 0; i < hw->ports; i++)
3329 sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
3330
3331 /* Initialize ram interface */
3332 for (i = 0; i < hw->ports; i++) {
3333 sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
3334
3335 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
3336 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
3337 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
3338 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
3339 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
3340 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
3341 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
3342 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
3343 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
3344 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
3345 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
3346 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
3347 }
3348
3349 sky2_write32(hw, B0_HWE_IMSK, hwe_mask);
3350
3351 for (i = 0; i < hw->ports; i++)
3352 sky2_gmac_reset(hw, i);
3353
3354 memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le));
3355 hw->st_idx = 0;
3356
3357 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
3358 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
3359
3360 sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
3361 sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
3362
3363 /* Set the list last index */
3364 sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1);
3365
3366 sky2_write16(hw, STAT_TX_IDX_TH, 10);
3367 sky2_write8(hw, STAT_FIFO_WM, 16);
3368
3369 /* set Status-FIFO ISR watermark */
3370 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
3371 sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
3372 else
3373 sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
3374
3375 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
3376 sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
3377 sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
3378
3379 /* enable status unit */
3380 sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
3381
3382 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
3383 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
3384 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
3385}
3386
3387/* Take device down (offline).
3388 * Equivalent to doing dev_stop() but this does not
3389 * inform upper layers of the transition.
3390 */
3391static void sky2_detach(struct net_device *dev)
3392{
3393 if (netif_running(dev)) {
3394 netif_tx_lock(dev);
3395 netif_device_detach(dev); /* stop txq */
3396 netif_tx_unlock(dev);
3397 sky2_down(dev);
3398 }
3399}
3400
3401/* Bring device back after doing sky2_detach */
3402static int sky2_reattach(struct net_device *dev)
3403{
3404 int err = 0;
3405
3406 if (netif_running(dev)) {
3407 err = sky2_up(dev);
3408 if (err) {
3409 netdev_info(dev, "could not restart %d\n", err);
3410 dev_close(dev);
3411 } else {
3412 netif_device_attach(dev);
3413 sky2_set_multicast(dev);
3414 }
3415 }
3416
3417 return err;
3418}
3419
3420static void sky2_all_down(struct sky2_hw *hw)
3421{
3422 int i;
3423
3424 sky2_read32(hw, B0_IMSK);
3425 sky2_write32(hw, B0_IMSK, 0);
3426 synchronize_irq(hw->pdev->irq);
3427 napi_disable(&hw->napi);
3428
3429 for (i = 0; i < hw->ports; i++) {
3430 struct net_device *dev = hw->dev[i];
3431 struct sky2_port *sky2 = netdev_priv(dev);
3432
3433 if (!netif_running(dev))
3434 continue;
3435
3436 netif_carrier_off(dev);
3437 netif_tx_disable(dev);
3438 sky2_hw_down(sky2);
3439 }
3440}
3441
3442static void sky2_all_up(struct sky2_hw *hw)
3443{
3444 u32 imask = Y2_IS_BASE;
3445 int i;
3446
3447 for (i = 0; i < hw->ports; i++) {
3448 struct net_device *dev = hw->dev[i];
3449 struct sky2_port *sky2 = netdev_priv(dev);
3450
3451 if (!netif_running(dev))
3452 continue;
3453
3454 sky2_hw_up(sky2);
3455 sky2_set_multicast(dev);
3456 imask |= portirq_msk[i];
3457 netif_wake_queue(dev);
3458 }
3459
3460 sky2_write32(hw, B0_IMSK, imask);
3461 sky2_read32(hw, B0_IMSK);
3462
3463 sky2_read32(hw, B0_Y2_SP_LISR);
3464 napi_enable(&hw->napi);
3465}
3466
3467static void sky2_restart(struct work_struct *work)
3468{
3469 struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
3470
3471 rtnl_lock();
3472
3473 sky2_all_down(hw);
3474 sky2_reset(hw);
3475 sky2_all_up(hw);
3476
3477 rtnl_unlock();
3478}
3479
3480static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
3481{
3482 return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
3483}
3484
3485static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3486{
3487 const struct sky2_port *sky2 = netdev_priv(dev);
3488
3489 wol->supported = sky2_wol_supported(sky2->hw);
3490 wol->wolopts = sky2->wol;
3491}
3492
3493static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3494{
3495 struct sky2_port *sky2 = netdev_priv(dev);
3496 struct sky2_hw *hw = sky2->hw;
3497 bool enable_wakeup = false;
3498 int i;
3499
3500 if ((wol->wolopts & ~sky2_wol_supported(sky2->hw)) ||
3501 !device_can_wakeup(&hw->pdev->dev))
3502 return -EOPNOTSUPP;
3503
3504 sky2->wol = wol->wolopts;
3505
3506 for (i = 0; i < hw->ports; i++) {
3507 struct net_device *dev = hw->dev[i];
3508 struct sky2_port *sky2 = netdev_priv(dev);
3509
3510 if (sky2->wol)
3511 enable_wakeup = true;
3512 }
3513 device_set_wakeup_enable(&hw->pdev->dev, enable_wakeup);
3514
3515 return 0;
3516}
3517
3518static u32 sky2_supported_modes(const struct sky2_hw *hw)
3519{
3520 if (sky2_is_copper(hw)) {
3521 u32 modes = SUPPORTED_10baseT_Half
3522 | SUPPORTED_10baseT_Full
3523 | SUPPORTED_100baseT_Half
3524 | SUPPORTED_100baseT_Full;
3525
3526 if (hw->flags & SKY2_HW_GIGABIT)
3527 modes |= SUPPORTED_1000baseT_Half
3528 | SUPPORTED_1000baseT_Full;
3529 return modes;
3530 } else
3531 return SUPPORTED_1000baseT_Half
3532 | SUPPORTED_1000baseT_Full;
3533}
3534
3535static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3536{
3537 struct sky2_port *sky2 = netdev_priv(dev);
3538 struct sky2_hw *hw = sky2->hw;
3539
3540 ecmd->transceiver = XCVR_INTERNAL;
3541 ecmd->supported = sky2_supported_modes(hw);
3542 ecmd->phy_address = PHY_ADDR_MARV;
3543 if (sky2_is_copper(hw)) {
3544 ecmd->port = PORT_TP;
3545 ethtool_cmd_speed_set(ecmd, sky2->speed);
3546 ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP;
3547 } else {
3548 ethtool_cmd_speed_set(ecmd, SPEED_1000);
3549 ecmd->port = PORT_FIBRE;
3550 ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE;
3551 }
3552
3553 ecmd->advertising = sky2->advertising;
3554 ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED)
3555 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
3556 ecmd->duplex = sky2->duplex;
3557 return 0;
3558}
3559
3560static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3561{
3562 struct sky2_port *sky2 = netdev_priv(dev);
3563 const struct sky2_hw *hw = sky2->hw;
3564 u32 supported = sky2_supported_modes(hw);
3565
3566 if (ecmd->autoneg == AUTONEG_ENABLE) {
3567 if (ecmd->advertising & ~supported)
3568 return -EINVAL;
3569
3570 if (sky2_is_copper(hw))
3571 sky2->advertising = ecmd->advertising |
3572 ADVERTISED_TP |
3573 ADVERTISED_Autoneg;
3574 else
3575 sky2->advertising = ecmd->advertising |
3576 ADVERTISED_FIBRE |
3577 ADVERTISED_Autoneg;
3578
3579 sky2->flags |= SKY2_FLAG_AUTO_SPEED;
3580 sky2->duplex = -1;
3581 sky2->speed = -1;
3582 } else {
3583 u32 setting;
3584 u32 speed = ethtool_cmd_speed(ecmd);
3585
3586 switch (speed) {
3587 case SPEED_1000:
3588 if (ecmd->duplex == DUPLEX_FULL)
3589 setting = SUPPORTED_1000baseT_Full;
3590 else if (ecmd->duplex == DUPLEX_HALF)
3591 setting = SUPPORTED_1000baseT_Half;
3592 else
3593 return -EINVAL;
3594 break;
3595 case SPEED_100:
3596 if (ecmd->duplex == DUPLEX_FULL)
3597 setting = SUPPORTED_100baseT_Full;
3598 else if (ecmd->duplex == DUPLEX_HALF)
3599 setting = SUPPORTED_100baseT_Half;
3600 else
3601 return -EINVAL;
3602 break;
3603
3604 case SPEED_10:
3605 if (ecmd->duplex == DUPLEX_FULL)
3606 setting = SUPPORTED_10baseT_Full;
3607 else if (ecmd->duplex == DUPLEX_HALF)
3608 setting = SUPPORTED_10baseT_Half;
3609 else
3610 return -EINVAL;
3611 break;
3612 default:
3613 return -EINVAL;
3614 }
3615
3616 if ((setting & supported) == 0)
3617 return -EINVAL;
3618
3619 sky2->speed = speed;
3620 sky2->duplex = ecmd->duplex;
3621 sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
3622 }
3623
3624 if (netif_running(dev)) {
3625 sky2_phy_reinit(sky2);
3626 sky2_set_multicast(dev);
3627 }
3628
3629 return 0;
3630}
3631
3632static void sky2_get_drvinfo(struct net_device *dev,
3633 struct ethtool_drvinfo *info)
3634{
3635 struct sky2_port *sky2 = netdev_priv(dev);
3636
3637 strcpy(info->driver, DRV_NAME);
3638 strcpy(info->version, DRV_VERSION);
3639 strcpy(info->fw_version, "N/A");
3640 strcpy(info->bus_info, pci_name(sky2->hw->pdev));
3641}
3642
3643static const struct sky2_stat {
3644 char name[ETH_GSTRING_LEN];
3645 u16 offset;
3646} sky2_stats[] = {
3647 { "tx_bytes", GM_TXO_OK_HI },
3648 { "rx_bytes", GM_RXO_OK_HI },
3649 { "tx_broadcast", GM_TXF_BC_OK },
3650 { "rx_broadcast", GM_RXF_BC_OK },
3651 { "tx_multicast", GM_TXF_MC_OK },
3652 { "rx_multicast", GM_RXF_MC_OK },
3653 { "tx_unicast", GM_TXF_UC_OK },
3654 { "rx_unicast", GM_RXF_UC_OK },
3655 { "tx_mac_pause", GM_TXF_MPAUSE },
3656 { "rx_mac_pause", GM_RXF_MPAUSE },
3657 { "collisions", GM_TXF_COL },
3658 { "late_collision",GM_TXF_LAT_COL },
3659 { "aborted", GM_TXF_ABO_COL },
3660 { "single_collisions", GM_TXF_SNG_COL },
3661 { "multi_collisions", GM_TXF_MUL_COL },
3662
3663 { "rx_short", GM_RXF_SHT },
3664 { "rx_runt", GM_RXE_FRAG },
3665 { "rx_64_byte_packets", GM_RXF_64B },
3666 { "rx_65_to_127_byte_packets", GM_RXF_127B },
3667 { "rx_128_to_255_byte_packets", GM_RXF_255B },
3668 { "rx_256_to_511_byte_packets", GM_RXF_511B },
3669 { "rx_512_to_1023_byte_packets", GM_RXF_1023B },
3670 { "rx_1024_to_1518_byte_packets", GM_RXF_1518B },
3671 { "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ },
3672 { "rx_too_long", GM_RXF_LNG_ERR },
3673 { "rx_fifo_overflow", GM_RXE_FIFO_OV },
3674 { "rx_jabber", GM_RXF_JAB_PKT },
3675 { "rx_fcs_error", GM_RXF_FCS_ERR },
3676
3677 { "tx_64_byte_packets", GM_TXF_64B },
3678 { "tx_65_to_127_byte_packets", GM_TXF_127B },
3679 { "tx_128_to_255_byte_packets", GM_TXF_255B },
3680 { "tx_256_to_511_byte_packets", GM_TXF_511B },
3681 { "tx_512_to_1023_byte_packets", GM_TXF_1023B },
3682 { "tx_1024_to_1518_byte_packets", GM_TXF_1518B },
3683 { "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ },
3684 { "tx_fifo_underrun", GM_TXE_FIFO_UR },
3685};
3686
3687static u32 sky2_get_msglevel(struct net_device *netdev)
3688{
3689 struct sky2_port *sky2 = netdev_priv(netdev);
3690 return sky2->msg_enable;
3691}
3692
3693static int sky2_nway_reset(struct net_device *dev)
3694{
3695 struct sky2_port *sky2 = netdev_priv(dev);
3696
3697 if (!netif_running(dev) || !(sky2->flags & SKY2_FLAG_AUTO_SPEED))
3698 return -EINVAL;
3699
3700 sky2_phy_reinit(sky2);
3701 sky2_set_multicast(dev);
3702
3703 return 0;
3704}
3705
3706static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count)
3707{
3708 struct sky2_hw *hw = sky2->hw;
3709 unsigned port = sky2->port;
3710 int i;
3711
3712 data[0] = get_stats64(hw, port, GM_TXO_OK_LO);
3713 data[1] = get_stats64(hw, port, GM_RXO_OK_LO);
3714
3715 for (i = 2; i < count; i++)
3716 data[i] = get_stats32(hw, port, sky2_stats[i].offset);
3717}
3718
3719static void sky2_set_msglevel(struct net_device *netdev, u32 value)
3720{
3721 struct sky2_port *sky2 = netdev_priv(netdev);
3722 sky2->msg_enable = value;
3723}
3724
3725static int sky2_get_sset_count(struct net_device *dev, int sset)
3726{
3727 switch (sset) {
3728 case ETH_SS_STATS:
3729 return ARRAY_SIZE(sky2_stats);
3730 default:
3731 return -EOPNOTSUPP;
3732 }
3733}
3734
3735static void sky2_get_ethtool_stats(struct net_device *dev,
3736 struct ethtool_stats *stats, u64 * data)
3737{
3738 struct sky2_port *sky2 = netdev_priv(dev);
3739
3740 sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats));
3741}
3742
3743static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
3744{
3745 int i;
3746
3747 switch (stringset) {
3748 case ETH_SS_STATS:
3749 for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
3750 memcpy(data + i * ETH_GSTRING_LEN,
3751 sky2_stats[i].name, ETH_GSTRING_LEN);
3752 break;
3753 }
3754}
3755
3756static int sky2_set_mac_address(struct net_device *dev, void *p)
3757{
3758 struct sky2_port *sky2 = netdev_priv(dev);
3759 struct sky2_hw *hw = sky2->hw;
3760 unsigned port = sky2->port;
3761 const struct sockaddr *addr = p;
3762
3763 if (!is_valid_ether_addr(addr->sa_data))
3764 return -EADDRNOTAVAIL;
3765
3766 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
3767 memcpy_toio(hw->regs + B2_MAC_1 + port * 8,
3768 dev->dev_addr, ETH_ALEN);
3769 memcpy_toio(hw->regs + B2_MAC_2 + port * 8,
3770 dev->dev_addr, ETH_ALEN);
3771
3772 /* virtual address for data */
3773 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
3774
3775 /* physical address: used for pause frames */
3776 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
3777
3778 return 0;
3779}
3780
3781static inline void sky2_add_filter(u8 filter[8], const u8 *addr)
3782{
3783 u32 bit;
3784
3785 bit = ether_crc(ETH_ALEN, addr) & 63;
3786 filter[bit >> 3] |= 1 << (bit & 7);
3787}
3788
3789static void sky2_set_multicast(struct net_device *dev)
3790{
3791 struct sky2_port *sky2 = netdev_priv(dev);
3792 struct sky2_hw *hw = sky2->hw;
3793 unsigned port = sky2->port;
3794 struct netdev_hw_addr *ha;
3795 u16 reg;
3796 u8 filter[8];
3797 int rx_pause;
3798 static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
3799
3800 rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH);
3801 memset(filter, 0, sizeof(filter));
3802
3803 reg = gma_read16(hw, port, GM_RX_CTRL);
3804 reg |= GM_RXCR_UCF_ENA;
3805
3806 if (dev->flags & IFF_PROMISC) /* promiscuous */
3807 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
3808 else if (dev->flags & IFF_ALLMULTI)
3809 memset(filter, 0xff, sizeof(filter));
3810 else if (netdev_mc_empty(dev) && !rx_pause)
3811 reg &= ~GM_RXCR_MCF_ENA;
3812 else {
3813 reg |= GM_RXCR_MCF_ENA;
3814
3815 if (rx_pause)
3816 sky2_add_filter(filter, pause_mc_addr);
3817
3818 netdev_for_each_mc_addr(ha, dev)
3819 sky2_add_filter(filter, ha->addr);
3820 }
3821
3822 gma_write16(hw, port, GM_MC_ADDR_H1,
3823 (u16) filter[0] | ((u16) filter[1] << 8));
3824 gma_write16(hw, port, GM_MC_ADDR_H2,
3825 (u16) filter[2] | ((u16) filter[3] << 8));
3826 gma_write16(hw, port, GM_MC_ADDR_H3,
3827 (u16) filter[4] | ((u16) filter[5] << 8));
3828 gma_write16(hw, port, GM_MC_ADDR_H4,
3829 (u16) filter[6] | ((u16) filter[7] << 8));
3830
3831 gma_write16(hw, port, GM_RX_CTRL, reg);
3832}
3833
3834static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
3835 struct rtnl_link_stats64 *stats)
3836{
3837 struct sky2_port *sky2 = netdev_priv(dev);
3838 struct sky2_hw *hw = sky2->hw;
3839 unsigned port = sky2->port;
3840 unsigned int start;
3841 u64 _bytes, _packets;
3842
3843 do {
3844 start = u64_stats_fetch_begin_bh(&sky2->rx_stats.syncp);
3845 _bytes = sky2->rx_stats.bytes;
3846 _packets = sky2->rx_stats.packets;
3847 } while (u64_stats_fetch_retry_bh(&sky2->rx_stats.syncp, start));
3848
3849 stats->rx_packets = _packets;
3850 stats->rx_bytes = _bytes;
3851
3852 do {
3853 start = u64_stats_fetch_begin_bh(&sky2->tx_stats.syncp);
3854 _bytes = sky2->tx_stats.bytes;
3855 _packets = sky2->tx_stats.packets;
3856 } while (u64_stats_fetch_retry_bh(&sky2->tx_stats.syncp, start));
3857
3858 stats->tx_packets = _packets;
3859 stats->tx_bytes = _bytes;
3860
3861 stats->multicast = get_stats32(hw, port, GM_RXF_MC_OK)
3862 + get_stats32(hw, port, GM_RXF_BC_OK);
3863
3864 stats->collisions = get_stats32(hw, port, GM_TXF_COL);
3865
3866 stats->rx_length_errors = get_stats32(hw, port, GM_RXF_LNG_ERR);
3867 stats->rx_crc_errors = get_stats32(hw, port, GM_RXF_FCS_ERR);
3868 stats->rx_frame_errors = get_stats32(hw, port, GM_RXF_SHT)
3869 + get_stats32(hw, port, GM_RXE_FRAG);
3870 stats->rx_over_errors = get_stats32(hw, port, GM_RXE_FIFO_OV);
3871
3872 stats->rx_dropped = dev->stats.rx_dropped;
3873 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
3874 stats->tx_fifo_errors = dev->stats.tx_fifo_errors;
3875
3876 return stats;
3877}
3878
3879/* Can have one global because blinking is controlled by
3880 * ethtool and that is always under RTNL mutex
3881 */
3882static void sky2_led(struct sky2_port *sky2, enum led_mode mode)
3883{
3884 struct sky2_hw *hw = sky2->hw;
3885 unsigned port = sky2->port;
3886
3887 spin_lock_bh(&sky2->phy_lock);
3888 if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
3889 hw->chip_id == CHIP_ID_YUKON_EX ||
3890 hw->chip_id == CHIP_ID_YUKON_SUPR) {
3891 u16 pg;
3892 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
3893 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
3894
3895 switch (mode) {
3896 case MO_LED_OFF:
3897 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3898 PHY_M_LEDC_LOS_CTRL(8) |
3899 PHY_M_LEDC_INIT_CTRL(8) |
3900 PHY_M_LEDC_STA1_CTRL(8) |
3901 PHY_M_LEDC_STA0_CTRL(8));
3902 break;
3903 case MO_LED_ON:
3904 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3905 PHY_M_LEDC_LOS_CTRL(9) |
3906 PHY_M_LEDC_INIT_CTRL(9) |
3907 PHY_M_LEDC_STA1_CTRL(9) |
3908 PHY_M_LEDC_STA0_CTRL(9));
3909 break;
3910 case MO_LED_BLINK:
3911 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3912 PHY_M_LEDC_LOS_CTRL(0xa) |
3913 PHY_M_LEDC_INIT_CTRL(0xa) |
3914 PHY_M_LEDC_STA1_CTRL(0xa) |
3915 PHY_M_LEDC_STA0_CTRL(0xa));
3916 break;
3917 case MO_LED_NORM:
3918 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3919 PHY_M_LEDC_LOS_CTRL(1) |
3920 PHY_M_LEDC_INIT_CTRL(8) |
3921 PHY_M_LEDC_STA1_CTRL(7) |
3922 PHY_M_LEDC_STA0_CTRL(7));
3923 }
3924
3925 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
3926 } else
3927 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
3928 PHY_M_LED_MO_DUP(mode) |
3929 PHY_M_LED_MO_10(mode) |
3930 PHY_M_LED_MO_100(mode) |
3931 PHY_M_LED_MO_1000(mode) |
3932 PHY_M_LED_MO_RX(mode) |
3933 PHY_M_LED_MO_TX(mode));
3934
3935 spin_unlock_bh(&sky2->phy_lock);
3936}
3937
3938/* blink LED's for finding board */
3939static int sky2_set_phys_id(struct net_device *dev,
3940 enum ethtool_phys_id_state state)
3941{
3942 struct sky2_port *sky2 = netdev_priv(dev);
3943
3944 switch (state) {
3945 case ETHTOOL_ID_ACTIVE:
3946 return 1; /* cycle on/off once per second */
3947 case ETHTOOL_ID_INACTIVE:
3948 sky2_led(sky2, MO_LED_NORM);
3949 break;
3950 case ETHTOOL_ID_ON:
3951 sky2_led(sky2, MO_LED_ON);
3952 break;
3953 case ETHTOOL_ID_OFF:
3954 sky2_led(sky2, MO_LED_OFF);
3955 break;
3956 }
3957
3958 return 0;
3959}
3960
3961static void sky2_get_pauseparam(struct net_device *dev,
3962 struct ethtool_pauseparam *ecmd)
3963{
3964 struct sky2_port *sky2 = netdev_priv(dev);
3965
3966 switch (sky2->flow_mode) {
3967 case FC_NONE:
3968 ecmd->tx_pause = ecmd->rx_pause = 0;
3969 break;
3970 case FC_TX:
3971 ecmd->tx_pause = 1, ecmd->rx_pause = 0;
3972 break;
3973 case FC_RX:
3974 ecmd->tx_pause = 0, ecmd->rx_pause = 1;
3975 break;
3976 case FC_BOTH:
3977 ecmd->tx_pause = ecmd->rx_pause = 1;
3978 }
3979
3980 ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_PAUSE)
3981 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
3982}
3983
3984static int sky2_set_pauseparam(struct net_device *dev,
3985 struct ethtool_pauseparam *ecmd)
3986{
3987 struct sky2_port *sky2 = netdev_priv(dev);
3988
3989 if (ecmd->autoneg == AUTONEG_ENABLE)
3990 sky2->flags |= SKY2_FLAG_AUTO_PAUSE;
3991 else
3992 sky2->flags &= ~SKY2_FLAG_AUTO_PAUSE;
3993
3994 sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause);
3995
3996 if (netif_running(dev))
3997 sky2_phy_reinit(sky2);
3998
3999 return 0;
4000}
4001
4002static int sky2_get_coalesce(struct net_device *dev,
4003 struct ethtool_coalesce *ecmd)
4004{
4005 struct sky2_port *sky2 = netdev_priv(dev);
4006 struct sky2_hw *hw = sky2->hw;
4007
4008 if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP)
4009 ecmd->tx_coalesce_usecs = 0;
4010 else {
4011 u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI);
4012 ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks);
4013 }
4014 ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH);
4015
4016 if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP)
4017 ecmd->rx_coalesce_usecs = 0;
4018 else {
4019 u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI);
4020 ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks);
4021 }
4022 ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM);
4023
4024 if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP)
4025 ecmd->rx_coalesce_usecs_irq = 0;
4026 else {
4027 u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI);
4028 ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks);
4029 }
4030
4031 ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM);
4032
4033 return 0;
4034}
4035
4036/* Note: this affect both ports */
4037static int sky2_set_coalesce(struct net_device *dev,
4038 struct ethtool_coalesce *ecmd)
4039{
4040 struct sky2_port *sky2 = netdev_priv(dev);
4041 struct sky2_hw *hw = sky2->hw;
4042 const u32 tmax = sky2_clk2us(hw, 0x0ffffff);
4043
4044 if (ecmd->tx_coalesce_usecs > tmax ||
4045 ecmd->rx_coalesce_usecs > tmax ||
4046 ecmd->rx_coalesce_usecs_irq > tmax)
4047 return -EINVAL;
4048
4049 if (ecmd->tx_max_coalesced_frames >= sky2->tx_ring_size-1)
4050 return -EINVAL;
4051 if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
4052 return -EINVAL;
4053 if (ecmd->rx_max_coalesced_frames_irq > RX_MAX_PENDING)
4054 return -EINVAL;
4055
4056 if (ecmd->tx_coalesce_usecs == 0)
4057 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
4058 else {
4059 sky2_write32(hw, STAT_TX_TIMER_INI,
4060 sky2_us2clk(hw, ecmd->tx_coalesce_usecs));
4061 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
4062 }
4063 sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames);
4064
4065 if (ecmd->rx_coalesce_usecs == 0)
4066 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
4067 else {
4068 sky2_write32(hw, STAT_LEV_TIMER_INI,
4069 sky2_us2clk(hw, ecmd->rx_coalesce_usecs));
4070 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
4071 }
4072 sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames);
4073
4074 if (ecmd->rx_coalesce_usecs_irq == 0)
4075 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP);
4076 else {
4077 sky2_write32(hw, STAT_ISR_TIMER_INI,
4078 sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq));
4079 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
4080 }
4081 sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq);
4082 return 0;
4083}
4084
4085static void sky2_get_ringparam(struct net_device *dev,
4086 struct ethtool_ringparam *ering)
4087{
4088 struct sky2_port *sky2 = netdev_priv(dev);
4089
4090 ering->rx_max_pending = RX_MAX_PENDING;
4091 ering->tx_max_pending = TX_MAX_PENDING;
4092
4093 ering->rx_pending = sky2->rx_pending;
4094 ering->tx_pending = sky2->tx_pending;
4095}
4096
4097static int sky2_set_ringparam(struct net_device *dev,
4098 struct ethtool_ringparam *ering)
4099{
4100 struct sky2_port *sky2 = netdev_priv(dev);
4101
4102 if (ering->rx_pending > RX_MAX_PENDING ||
4103 ering->rx_pending < 8 ||
4104 ering->tx_pending < TX_MIN_PENDING ||
4105 ering->tx_pending > TX_MAX_PENDING)
4106 return -EINVAL;
4107
4108 sky2_detach(dev);
4109
4110 sky2->rx_pending = ering->rx_pending;
4111 sky2->tx_pending = ering->tx_pending;
4112 sky2->tx_ring_size = roundup_pow_of_two(sky2->tx_pending+1);
4113
4114 return sky2_reattach(dev);
4115}
4116
4117static int sky2_get_regs_len(struct net_device *dev)
4118{
4119 return 0x4000;
4120}
4121
4122static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b)
4123{
4124 /* This complicated switch statement is to make sure and
4125 * only access regions that are unreserved.
4126 * Some blocks are only valid on dual port cards.
4127 */
4128 switch (b) {
4129 /* second port */
4130 case 5: /* Tx Arbiter 2 */
4131 case 9: /* RX2 */
4132 case 14 ... 15: /* TX2 */
4133 case 17: case 19: /* Ram Buffer 2 */
4134 case 22 ... 23: /* Tx Ram Buffer 2 */
4135 case 25: /* Rx MAC Fifo 1 */
4136 case 27: /* Tx MAC Fifo 2 */
4137 case 31: /* GPHY 2 */
4138 case 40 ... 47: /* Pattern Ram 2 */
4139 case 52: case 54: /* TCP Segmentation 2 */
4140 case 112 ... 116: /* GMAC 2 */
4141 return hw->ports > 1;
4142
4143 case 0: /* Control */
4144 case 2: /* Mac address */
4145 case 4: /* Tx Arbiter 1 */
4146 case 7: /* PCI express reg */
4147 case 8: /* RX1 */
4148 case 12 ... 13: /* TX1 */
4149 case 16: case 18:/* Rx Ram Buffer 1 */
4150 case 20 ... 21: /* Tx Ram Buffer 1 */
4151 case 24: /* Rx MAC Fifo 1 */
4152 case 26: /* Tx MAC Fifo 1 */
4153 case 28 ... 29: /* Descriptor and status unit */
4154 case 30: /* GPHY 1*/
4155 case 32 ... 39: /* Pattern Ram 1 */
4156 case 48: case 50: /* TCP Segmentation 1 */
4157 case 56 ... 60: /* PCI space */
4158 case 80 ... 84: /* GMAC 1 */
4159 return 1;
4160
4161 default:
4162 return 0;
4163 }
4164}
4165
4166/*
4167 * Returns copy of control register region
4168 * Note: ethtool_get_regs always provides full size (16k) buffer
4169 */
4170static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4171 void *p)
4172{
4173 const struct sky2_port *sky2 = netdev_priv(dev);
4174 const void __iomem *io = sky2->hw->regs;
4175 unsigned int b;
4176
4177 regs->version = 1;
4178
4179 for (b = 0; b < 128; b++) {
4180 /* skip poisonous diagnostic ram region in block 3 */
4181 if (b == 3)
4182 memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
4183 else if (sky2_reg_access_ok(sky2->hw, b))
4184 memcpy_fromio(p, io, 128);
4185 else
4186 memset(p, 0, 128);
4187
4188 p += 128;
4189 io += 128;
4190 }
4191}
4192
4193static int sky2_get_eeprom_len(struct net_device *dev)
4194{
4195 struct sky2_port *sky2 = netdev_priv(dev);
4196 struct sky2_hw *hw = sky2->hw;
4197 u16 reg2;
4198
4199 reg2 = sky2_pci_read16(hw, PCI_DEV_REG2);
4200 return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
4201}
4202
4203static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
4204{
4205 unsigned long start = jiffies;
4206
4207 while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) {
4208 /* Can take up to 10.6 ms for write */
4209 if (time_after(jiffies, start + HZ/4)) {
4210 dev_err(&hw->pdev->dev, "VPD cycle timed out\n");
4211 return -ETIMEDOUT;
4212 }
4213 mdelay(1);
4214 }
4215
4216 return 0;
4217}
4218
4219static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data,
4220 u16 offset, size_t length)
4221{
4222 int rc = 0;
4223
4224 while (length > 0) {
4225 u32 val;
4226
4227 sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset);
4228 rc = sky2_vpd_wait(hw, cap, 0);
4229 if (rc)
4230 break;
4231
4232 val = sky2_pci_read32(hw, cap + PCI_VPD_DATA);
4233
4234 memcpy(data, &val, min(sizeof(val), length));
4235 offset += sizeof(u32);
4236 data += sizeof(u32);
4237 length -= sizeof(u32);
4238 }
4239
4240 return rc;
4241}
4242
4243static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data,
4244 u16 offset, unsigned int length)
4245{
4246 unsigned int i;
4247 int rc = 0;
4248
4249 for (i = 0; i < length; i += sizeof(u32)) {
4250 u32 val = *(u32 *)(data + i);
4251
4252 sky2_pci_write32(hw, cap + PCI_VPD_DATA, val);
4253 sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F);
4254
4255 rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F);
4256 if (rc)
4257 break;
4258 }
4259 return rc;
4260}
4261
4262static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4263 u8 *data)
4264{
4265 struct sky2_port *sky2 = netdev_priv(dev);
4266 int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
4267
4268 if (!cap)
4269 return -EINVAL;
4270
4271 eeprom->magic = SKY2_EEPROM_MAGIC;
4272
4273 return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len);
4274}
4275
4276static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4277 u8 *data)
4278{
4279 struct sky2_port *sky2 = netdev_priv(dev);
4280 int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
4281
4282 if (!cap)
4283 return -EINVAL;
4284
4285 if (eeprom->magic != SKY2_EEPROM_MAGIC)
4286 return -EINVAL;
4287
4288 /* Partial writes not supported */
4289 if ((eeprom->offset & 3) || (eeprom->len & 3))
4290 return -EINVAL;
4291
4292 return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
4293}
4294
4295static u32 sky2_fix_features(struct net_device *dev, u32 features)
4296{
4297 const struct sky2_port *sky2 = netdev_priv(dev);
4298 const struct sky2_hw *hw = sky2->hw;
4299
4300 /* In order to do Jumbo packets on these chips, need to turn off the
4301 * transmit store/forward. Therefore checksum offload won't work.
4302 */
4303 if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U) {
4304 netdev_info(dev, "checksum offload not possible with jumbo frames\n");
4305 features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM);
4306 }
4307
4308 /* Some hardware requires receive checksum for RSS to work. */
4309 if ( (features & NETIF_F_RXHASH) &&
4310 !(features & NETIF_F_RXCSUM) &&
4311 (sky2->hw->flags & SKY2_HW_RSS_CHKSUM)) {
4312 netdev_info(dev, "receive hashing forces receive checksum\n");
4313 features |= NETIF_F_RXCSUM;
4314 }
4315
4316 return features;
4317}
4318
4319static int sky2_set_features(struct net_device *dev, u32 features)
4320{
4321 struct sky2_port *sky2 = netdev_priv(dev);
4322 u32 changed = dev->features ^ features;
4323
4324 if (changed & NETIF_F_RXCSUM) {
4325 u32 on = features & NETIF_F_RXCSUM;
4326 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
4327 on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
4328 }
4329
4330 if (changed & NETIF_F_RXHASH)
4331 rx_set_rss(dev, features);
4332
4333 if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
4334 sky2_vlan_mode(dev, features);
4335
4336 return 0;
4337}
4338
4339static const struct ethtool_ops sky2_ethtool_ops = {
4340 .get_settings = sky2_get_settings,
4341 .set_settings = sky2_set_settings,
4342 .get_drvinfo = sky2_get_drvinfo,
4343 .get_wol = sky2_get_wol,
4344 .set_wol = sky2_set_wol,
4345 .get_msglevel = sky2_get_msglevel,
4346 .set_msglevel = sky2_set_msglevel,
4347 .nway_reset = sky2_nway_reset,
4348 .get_regs_len = sky2_get_regs_len,
4349 .get_regs = sky2_get_regs,
4350 .get_link = ethtool_op_get_link,
4351 .get_eeprom_len = sky2_get_eeprom_len,
4352 .get_eeprom = sky2_get_eeprom,
4353 .set_eeprom = sky2_set_eeprom,
4354 .get_strings = sky2_get_strings,
4355 .get_coalesce = sky2_get_coalesce,
4356 .set_coalesce = sky2_set_coalesce,
4357 .get_ringparam = sky2_get_ringparam,
4358 .set_ringparam = sky2_set_ringparam,
4359 .get_pauseparam = sky2_get_pauseparam,
4360 .set_pauseparam = sky2_set_pauseparam,
4361 .set_phys_id = sky2_set_phys_id,
4362 .get_sset_count = sky2_get_sset_count,
4363 .get_ethtool_stats = sky2_get_ethtool_stats,
4364};
4365
4366#ifdef CONFIG_SKY2_DEBUG
4367
4368static struct dentry *sky2_debug;
4369
4370
4371/*
4372 * Read and parse the first part of Vital Product Data
4373 */
4374#define VPD_SIZE 128
4375#define VPD_MAGIC 0x82
4376
4377static const struct vpd_tag {
4378 char tag[2];
4379 char *label;
4380} vpd_tags[] = {
4381 { "PN", "Part Number" },
4382 { "EC", "Engineering Level" },
4383 { "MN", "Manufacturer" },
4384 { "SN", "Serial Number" },
4385 { "YA", "Asset Tag" },
4386 { "VL", "First Error Log Message" },
4387 { "VF", "Second Error Log Message" },
4388 { "VB", "Boot Agent ROM Configuration" },
4389 { "VE", "EFI UNDI Configuration" },
4390};
4391
4392static void sky2_show_vpd(struct seq_file *seq, struct sky2_hw *hw)
4393{
4394 size_t vpd_size;
4395 loff_t offs;
4396 u8 len;
4397 unsigned char *buf;
4398 u16 reg2;
4399
4400 reg2 = sky2_pci_read16(hw, PCI_DEV_REG2);
4401 vpd_size = 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
4402
4403 seq_printf(seq, "%s Product Data\n", pci_name(hw->pdev));
4404 buf = kmalloc(vpd_size, GFP_KERNEL);
4405 if (!buf) {
4406 seq_puts(seq, "no memory!\n");
4407 return;
4408 }
4409
4410 if (pci_read_vpd(hw->pdev, 0, vpd_size, buf) < 0) {
4411 seq_puts(seq, "VPD read failed\n");
4412 goto out;
4413 }
4414
4415 if (buf[0] != VPD_MAGIC) {
4416 seq_printf(seq, "VPD tag mismatch: %#x\n", buf[0]);
4417 goto out;
4418 }
4419 len = buf[1];
4420 if (len == 0 || len > vpd_size - 4) {
4421 seq_printf(seq, "Invalid id length: %d\n", len);
4422 goto out;
4423 }
4424
4425 seq_printf(seq, "%.*s\n", len, buf + 3);
4426 offs = len + 3;
4427
4428 while (offs < vpd_size - 4) {
4429 int i;
4430
4431 if (!memcmp("RW", buf + offs, 2)) /* end marker */
4432 break;
4433 len = buf[offs + 2];
4434 if (offs + len + 3 >= vpd_size)
4435 break;
4436
4437 for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) {
4438 if (!memcmp(vpd_tags[i].tag, buf + offs, 2)) {
4439 seq_printf(seq, " %s: %.*s\n",
4440 vpd_tags[i].label, len, buf + offs + 3);
4441 break;
4442 }
4443 }
4444 offs += len + 3;
4445 }
4446out:
4447 kfree(buf);
4448}
4449
4450static int sky2_debug_show(struct seq_file *seq, void *v)
4451{
4452 struct net_device *dev = seq->private;
4453 const struct sky2_port *sky2 = netdev_priv(dev);
4454 struct sky2_hw *hw = sky2->hw;
4455 unsigned port = sky2->port;
4456 unsigned idx, last;
4457 int sop;
4458
4459 sky2_show_vpd(seq, hw);
4460
4461 seq_printf(seq, "\nIRQ src=%x mask=%x control=%x\n",
4462 sky2_read32(hw, B0_ISRC),
4463 sky2_read32(hw, B0_IMSK),
4464 sky2_read32(hw, B0_Y2_SP_ICR));
4465
4466 if (!netif_running(dev)) {
4467 seq_printf(seq, "network not running\n");
4468 return 0;
4469 }
4470
4471 napi_disable(&hw->napi);
4472 last = sky2_read16(hw, STAT_PUT_IDX);
4473
4474 seq_printf(seq, "Status ring %u\n", hw->st_size);
4475 if (hw->st_idx == last)
4476 seq_puts(seq, "Status ring (empty)\n");
4477 else {
4478 seq_puts(seq, "Status ring\n");
4479 for (idx = hw->st_idx; idx != last && idx < hw->st_size;
4480 idx = RING_NEXT(idx, hw->st_size)) {
4481 const struct sky2_status_le *le = hw->st_le + idx;
4482 seq_printf(seq, "[%d] %#x %d %#x\n",
4483 idx, le->opcode, le->length, le->status);
4484 }
4485 seq_puts(seq, "\n");
4486 }
4487
4488 seq_printf(seq, "Tx ring pending=%u...%u report=%d done=%d\n",
4489 sky2->tx_cons, sky2->tx_prod,
4490 sky2_read16(hw, port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
4491 sky2_read16(hw, Q_ADDR(txqaddr[port], Q_DONE)));
4492
4493 /* Dump contents of tx ring */
4494 sop = 1;
4495 for (idx = sky2->tx_next; idx != sky2->tx_prod && idx < sky2->tx_ring_size;
4496 idx = RING_NEXT(idx, sky2->tx_ring_size)) {
4497 const struct sky2_tx_le *le = sky2->tx_le + idx;
4498 u32 a = le32_to_cpu(le->addr);
4499
4500 if (sop)
4501 seq_printf(seq, "%u:", idx);
4502 sop = 0;
4503
4504 switch (le->opcode & ~HW_OWNER) {
4505 case OP_ADDR64:
4506 seq_printf(seq, " %#x:", a);
4507 break;
4508 case OP_LRGLEN:
4509 seq_printf(seq, " mtu=%d", a);
4510 break;
4511 case OP_VLAN:
4512 seq_printf(seq, " vlan=%d", be16_to_cpu(le->length));
4513 break;
4514 case OP_TCPLISW:
4515 seq_printf(seq, " csum=%#x", a);
4516 break;
4517 case OP_LARGESEND:
4518 seq_printf(seq, " tso=%#x(%d)", a, le16_to_cpu(le->length));
4519 break;
4520 case OP_PACKET:
4521 seq_printf(seq, " %#x(%d)", a, le16_to_cpu(le->length));
4522 break;
4523 case OP_BUFFER:
4524 seq_printf(seq, " frag=%#x(%d)", a, le16_to_cpu(le->length));
4525 break;
4526 default:
4527 seq_printf(seq, " op=%#x,%#x(%d)", le->opcode,
4528 a, le16_to_cpu(le->length));
4529 }
4530
4531 if (le->ctrl & EOP) {
4532 seq_putc(seq, '\n');
4533 sop = 1;
4534 }
4535 }
4536
4537 seq_printf(seq, "\nRx ring hw get=%d put=%d last=%d\n",
4538 sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_GET_IDX)),
4539 sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)),
4540 sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX)));
4541
4542 sky2_read32(hw, B0_Y2_SP_LISR);
4543 napi_enable(&hw->napi);
4544 return 0;
4545}
4546
4547static int sky2_debug_open(struct inode *inode, struct file *file)
4548{
4549 return single_open(file, sky2_debug_show, inode->i_private);
4550}
4551
4552static const struct file_operations sky2_debug_fops = {
4553 .owner = THIS_MODULE,
4554 .open = sky2_debug_open,
4555 .read = seq_read,
4556 .llseek = seq_lseek,
4557 .release = single_release,
4558};
4559
4560/*
4561 * Use network device events to create/remove/rename
4562 * debugfs file entries
4563 */
4564static int sky2_device_event(struct notifier_block *unused,
4565 unsigned long event, void *ptr)
4566{
4567 struct net_device *dev = ptr;
4568 struct sky2_port *sky2 = netdev_priv(dev);
4569
4570 if (dev->netdev_ops->ndo_open != sky2_up || !sky2_debug)
4571 return NOTIFY_DONE;
4572
4573 switch (event) {
4574 case NETDEV_CHANGENAME:
4575 if (sky2->debugfs) {
4576 sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs,
4577 sky2_debug, dev->name);
4578 }
4579 break;
4580
4581 case NETDEV_GOING_DOWN:
4582 if (sky2->debugfs) {
4583 netdev_printk(KERN_DEBUG, dev, "remove debugfs\n");
4584 debugfs_remove(sky2->debugfs);
4585 sky2->debugfs = NULL;
4586 }
4587 break;
4588
4589 case NETDEV_UP:
4590 sky2->debugfs = debugfs_create_file(dev->name, S_IRUGO,
4591 sky2_debug, dev,
4592 &sky2_debug_fops);
4593 if (IS_ERR(sky2->debugfs))
4594 sky2->debugfs = NULL;
4595 }
4596
4597 return NOTIFY_DONE;
4598}
4599
4600static struct notifier_block sky2_notifier = {
4601 .notifier_call = sky2_device_event,
4602};
4603
4604
4605static __init void sky2_debug_init(void)
4606{
4607 struct dentry *ent;
4608
4609 ent = debugfs_create_dir("sky2", NULL);
4610 if (!ent || IS_ERR(ent))
4611 return;
4612
4613 sky2_debug = ent;
4614 register_netdevice_notifier(&sky2_notifier);
4615}
4616
4617static __exit void sky2_debug_cleanup(void)
4618{
4619 if (sky2_debug) {
4620 unregister_netdevice_notifier(&sky2_notifier);
4621 debugfs_remove(sky2_debug);
4622 sky2_debug = NULL;
4623 }
4624}
4625
4626#else
4627#define sky2_debug_init()
4628#define sky2_debug_cleanup()
4629#endif
4630
4631/* Two copies of network device operations to handle special case of
4632 not allowing netpoll on second port */
4633static const struct net_device_ops sky2_netdev_ops[2] = {
4634 {
4635 .ndo_open = sky2_up,
4636 .ndo_stop = sky2_down,
4637 .ndo_start_xmit = sky2_xmit_frame,
4638 .ndo_do_ioctl = sky2_ioctl,
4639 .ndo_validate_addr = eth_validate_addr,
4640 .ndo_set_mac_address = sky2_set_mac_address,
4641 .ndo_set_rx_mode = sky2_set_multicast,
4642 .ndo_change_mtu = sky2_change_mtu,
4643 .ndo_fix_features = sky2_fix_features,
4644 .ndo_set_features = sky2_set_features,
4645 .ndo_tx_timeout = sky2_tx_timeout,
4646 .ndo_get_stats64 = sky2_get_stats,
4647#ifdef CONFIG_NET_POLL_CONTROLLER
4648 .ndo_poll_controller = sky2_netpoll,
4649#endif
4650 },
4651 {
4652 .ndo_open = sky2_up,
4653 .ndo_stop = sky2_down,
4654 .ndo_start_xmit = sky2_xmit_frame,
4655 .ndo_do_ioctl = sky2_ioctl,
4656 .ndo_validate_addr = eth_validate_addr,
4657 .ndo_set_mac_address = sky2_set_mac_address,
4658 .ndo_set_rx_mode = sky2_set_multicast,
4659 .ndo_change_mtu = sky2_change_mtu,
4660 .ndo_fix_features = sky2_fix_features,
4661 .ndo_set_features = sky2_set_features,
4662 .ndo_tx_timeout = sky2_tx_timeout,
4663 .ndo_get_stats64 = sky2_get_stats,
4664 },
4665};
4666
4667/* Initialize network device */
4668static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4669 unsigned port,
4670 int highmem, int wol)
4671{
4672 struct sky2_port *sky2;
4673 struct net_device *dev = alloc_etherdev(sizeof(*sky2));
4674
4675 if (!dev) {
4676 dev_err(&hw->pdev->dev, "etherdev alloc failed\n");
4677 return NULL;
4678 }
4679
4680 SET_NETDEV_DEV(dev, &hw->pdev->dev);
4681 dev->irq = hw->pdev->irq;
4682 SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
4683 dev->watchdog_timeo = TX_WATCHDOG;
4684 dev->netdev_ops = &sky2_netdev_ops[port];
4685
4686 sky2 = netdev_priv(dev);
4687 sky2->netdev = dev;
4688 sky2->hw = hw;
4689 sky2->msg_enable = netif_msg_init(debug, default_msg);
4690
4691 /* Auto speed and flow control */
4692 sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE;
4693 if (hw->chip_id != CHIP_ID_YUKON_XL)
4694 dev->hw_features |= NETIF_F_RXCSUM;
4695
4696 sky2->flow_mode = FC_BOTH;
4697
4698 sky2->duplex = -1;
4699 sky2->speed = -1;
4700 sky2->advertising = sky2_supported_modes(hw);
4701 sky2->wol = wol;
4702
4703 spin_lock_init(&sky2->phy_lock);
4704
4705 sky2->tx_pending = TX_DEF_PENDING;
4706 sky2->tx_ring_size = roundup_pow_of_two(TX_DEF_PENDING+1);
4707 sky2->rx_pending = RX_DEF_PENDING;
4708
4709 hw->dev[port] = dev;
4710
4711 sky2->port = port;
4712
4713 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
4714
4715 if (highmem)
4716 dev->features |= NETIF_F_HIGHDMA;
4717
4718 /* Enable receive hashing unless hardware is known broken */
4719 if (!(hw->flags & SKY2_HW_RSS_BROKEN))
4720 dev->hw_features |= NETIF_F_RXHASH;
4721
4722 if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) {
4723 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
4724 dev->vlan_features |= SKY2_VLAN_OFFLOADS;
4725 }
4726
4727 dev->features |= dev->hw_features;
4728
4729 /* read the mac address */
4730 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
4731 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
4732
4733 return dev;
4734}
4735
4736static void __devinit sky2_show_addr(struct net_device *dev)
4737{
4738 const struct sky2_port *sky2 = netdev_priv(dev);
4739
4740 netif_info(sky2, probe, dev, "addr %pM\n", dev->dev_addr);
4741}
4742
4743/* Handle software interrupt used during MSI test */
4744static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id)
4745{
4746 struct sky2_hw *hw = dev_id;
4747 u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
4748
4749 if (status == 0)
4750 return IRQ_NONE;
4751
4752 if (status & Y2_IS_IRQ_SW) {
4753 hw->flags |= SKY2_HW_USE_MSI;
4754 wake_up(&hw->msi_wait);
4755 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
4756 }
4757 sky2_write32(hw, B0_Y2_SP_ICR, 2);
4758
4759 return IRQ_HANDLED;
4760}
4761
4762/* Test interrupt path by forcing a a software IRQ */
4763static int __devinit sky2_test_msi(struct sky2_hw *hw)
4764{
4765 struct pci_dev *pdev = hw->pdev;
4766 int err;
4767
4768 init_waitqueue_head(&hw->msi_wait);
4769
4770 sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
4771
4772 err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
4773 if (err) {
4774 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
4775 return err;
4776 }
4777
4778 sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
4779 sky2_read8(hw, B0_CTST);
4780
4781 wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10);
4782
4783 if (!(hw->flags & SKY2_HW_USE_MSI)) {
4784 /* MSI test failed, go back to INTx mode */
4785 dev_info(&pdev->dev, "No interrupt generated using MSI, "
4786 "switching to INTx mode.\n");
4787
4788 err = -EOPNOTSUPP;
4789 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
4790 }
4791
4792 sky2_write32(hw, B0_IMSK, 0);
4793 sky2_read32(hw, B0_IMSK);
4794
4795 free_irq(pdev->irq, hw);
4796
4797 return err;
4798}
4799
4800/* This driver supports yukon2 chipset only */
4801static const char *sky2_name(u8 chipid, char *buf, int sz)
4802{
4803 const char *name[] = {
4804 "XL", /* 0xb3 */
4805 "EC Ultra", /* 0xb4 */
4806 "Extreme", /* 0xb5 */
4807 "EC", /* 0xb6 */
4808 "FE", /* 0xb7 */
4809 "FE+", /* 0xb8 */
4810 "Supreme", /* 0xb9 */
4811 "UL 2", /* 0xba */
4812 "Unknown", /* 0xbb */
4813 "Optima", /* 0xbc */
4814 "Optima Prime", /* 0xbd */
4815 "Optima 2", /* 0xbe */
4816 };
4817
4818 if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OP_2)
4819 strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
4820 else
4821 snprintf(buf, sz, "(chip %#x)", chipid);
4822 return buf;
4823}
4824
4825static int __devinit sky2_probe(struct pci_dev *pdev,
4826 const struct pci_device_id *ent)
4827{
4828 struct net_device *dev, *dev1;
4829 struct sky2_hw *hw;
4830 int err, using_dac = 0, wol_default;
4831 u32 reg;
4832 char buf1[16];
4833
4834 err = pci_enable_device(pdev);
4835 if (err) {
4836 dev_err(&pdev->dev, "cannot enable PCI device\n");
4837 goto err_out;
4838 }
4839
4840 /* Get configuration information
4841 * Note: only regular PCI config access once to test for HW issues
4842 * other PCI access through shared memory for speed and to
4843 * avoid MMCONFIG problems.
4844 */
4845 err = pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
4846 if (err) {
4847 dev_err(&pdev->dev, "PCI read config failed\n");
4848 goto err_out;
4849 }
4850
4851 if (~reg == 0) {
4852 dev_err(&pdev->dev, "PCI configuration read error\n");
4853 goto err_out;
4854 }
4855
4856 err = pci_request_regions(pdev, DRV_NAME);
4857 if (err) {
4858 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
4859 goto err_out_disable;
4860 }
4861
4862 pci_set_master(pdev);
4863
4864 if (sizeof(dma_addr_t) > sizeof(u32) &&
4865 !(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))) {
4866 using_dac = 1;
4867 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4868 if (err < 0) {
4869 dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
4870 "for consistent allocations\n");
4871 goto err_out_free_regions;
4872 }
4873 } else {
4874 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4875 if (err) {
4876 dev_err(&pdev->dev, "no usable DMA configuration\n");
4877 goto err_out_free_regions;
4878 }
4879 }
4880
4881
4882#ifdef __BIG_ENDIAN
4883 /* The sk98lin vendor driver uses hardware byte swapping but
4884 * this driver uses software swapping.
4885 */
4886 reg &= ~PCI_REV_DESC;
4887 err = pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
4888 if (err) {
4889 dev_err(&pdev->dev, "PCI write config failed\n");
4890 goto err_out_free_regions;
4891 }
4892#endif
4893
4894 wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0;
4895
4896 err = -ENOMEM;
4897
4898 hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
4899 + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
4900 if (!hw) {
4901 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
4902 goto err_out_free_regions;
4903 }
4904
4905 hw->pdev = pdev;
4906 sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
4907
4908 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
4909 if (!hw->regs) {
4910 dev_err(&pdev->dev, "cannot map device registers\n");
4911 goto err_out_free_hw;
4912 }
4913
4914 err = sky2_init(hw);
4915 if (err)
4916 goto err_out_iounmap;
4917
4918 /* ring for status responses */
4919 hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING);
4920 hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
4921 &hw->st_dma);
4922 if (!hw->st_le)
4923 goto err_out_reset;
4924
4925 dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
4926 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
4927
4928 sky2_reset(hw);
4929
4930 dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
4931 if (!dev) {
4932 err = -ENOMEM;
4933 goto err_out_free_pci;
4934 }
4935
4936 if (!disable_msi && pci_enable_msi(pdev) == 0) {
4937 err = sky2_test_msi(hw);
4938 if (err == -EOPNOTSUPP)
4939 pci_disable_msi(pdev);
4940 else if (err)
4941 goto err_out_free_netdev;
4942 }
4943
4944 err = register_netdev(dev);
4945 if (err) {
4946 dev_err(&pdev->dev, "cannot register net device\n");
4947 goto err_out_free_netdev;
4948 }
4949
4950 netif_carrier_off(dev);
4951
4952 netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
4953
4954 sky2_show_addr(dev);
4955
4956 if (hw->ports > 1) {
4957 dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default);
4958 if (!dev1) {
4959 err = -ENOMEM;
4960 goto err_out_unregister;
4961 }
4962
4963 err = register_netdev(dev1);
4964 if (err) {
4965 dev_err(&pdev->dev, "cannot register second net device\n");
4966 goto err_out_free_dev1;
4967 }
4968
4969 err = sky2_setup_irq(hw, hw->irq_name);
4970 if (err)
4971 goto err_out_unregister_dev1;
4972
4973 sky2_show_addr(dev1);
4974 }
4975
4976 setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw);
4977 INIT_WORK(&hw->restart_work, sky2_restart);
4978
4979 pci_set_drvdata(pdev, hw);
4980 pdev->d3_delay = 150;
4981
4982 return 0;
4983
4984err_out_unregister_dev1:
4985 unregister_netdev(dev1);
4986err_out_free_dev1:
4987 free_netdev(dev1);
4988err_out_unregister:
4989 if (hw->flags & SKY2_HW_USE_MSI)
4990 pci_disable_msi(pdev);
4991 unregister_netdev(dev);
4992err_out_free_netdev:
4993 free_netdev(dev);
4994err_out_free_pci:
4995 pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
4996 hw->st_le, hw->st_dma);
4997err_out_reset:
4998 sky2_write8(hw, B0_CTST, CS_RST_SET);
4999err_out_iounmap:
5000 iounmap(hw->regs);
5001err_out_free_hw:
5002 kfree(hw);
5003err_out_free_regions:
5004 pci_release_regions(pdev);
5005err_out_disable:
5006 pci_disable_device(pdev);
5007err_out:
5008 pci_set_drvdata(pdev, NULL);
5009 return err;
5010}
5011
5012static void __devexit sky2_remove(struct pci_dev *pdev)
5013{
5014 struct sky2_hw *hw = pci_get_drvdata(pdev);
5015 int i;
5016
5017 if (!hw)
5018 return;
5019
5020 del_timer_sync(&hw->watchdog_timer);
5021 cancel_work_sync(&hw->restart_work);
5022
5023 for (i = hw->ports-1; i >= 0; --i)
5024 unregister_netdev(hw->dev[i]);
5025
5026 sky2_write32(hw, B0_IMSK, 0);
5027 sky2_read32(hw, B0_IMSK);
5028
5029 sky2_power_aux(hw);
5030
5031 sky2_write8(hw, B0_CTST, CS_RST_SET);
5032 sky2_read8(hw, B0_CTST);
5033
5034 if (hw->ports > 1) {
5035 napi_disable(&hw->napi);
5036 free_irq(pdev->irq, hw);
5037 }
5038
5039 if (hw->flags & SKY2_HW_USE_MSI)
5040 pci_disable_msi(pdev);
5041 pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
5042 hw->st_le, hw->st_dma);
5043 pci_release_regions(pdev);
5044 pci_disable_device(pdev);
5045
5046 for (i = hw->ports-1; i >= 0; --i)
5047 free_netdev(hw->dev[i]);
5048
5049 iounmap(hw->regs);
5050 kfree(hw);
5051
5052 pci_set_drvdata(pdev, NULL);
5053}
5054
5055static int sky2_suspend(struct device *dev)
5056{
5057 struct pci_dev *pdev = to_pci_dev(dev);
5058 struct sky2_hw *hw = pci_get_drvdata(pdev);
5059 int i;
5060
5061 if (!hw)
5062 return 0;
5063
5064 del_timer_sync(&hw->watchdog_timer);
5065 cancel_work_sync(&hw->restart_work);
5066
5067 rtnl_lock();
5068
5069 sky2_all_down(hw);
5070 for (i = 0; i < hw->ports; i++) {
5071 struct net_device *dev = hw->dev[i];
5072 struct sky2_port *sky2 = netdev_priv(dev);
5073
5074 if (sky2->wol)
5075 sky2_wol_init(sky2);
5076 }
5077
5078 sky2_power_aux(hw);
5079 rtnl_unlock();
5080
5081 return 0;
5082}
5083
5084#ifdef CONFIG_PM_SLEEP
5085static int sky2_resume(struct device *dev)
5086{
5087 struct pci_dev *pdev = to_pci_dev(dev);
5088 struct sky2_hw *hw = pci_get_drvdata(pdev);
5089 int err;
5090
5091 if (!hw)
5092 return 0;
5093
5094 /* Re-enable all clocks */
5095 err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0);
5096 if (err) {
5097 dev_err(&pdev->dev, "PCI write config failed\n");
5098 goto out;
5099 }
5100
5101 rtnl_lock();
5102 sky2_reset(hw);
5103 sky2_all_up(hw);
5104 rtnl_unlock();
5105
5106 return 0;
5107out:
5108
5109 dev_err(&pdev->dev, "resume failed (%d)\n", err);
5110 pci_disable_device(pdev);
5111 return err;
5112}
5113
5114static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume);
5115#define SKY2_PM_OPS (&sky2_pm_ops)
5116
5117#else
5118
5119#define SKY2_PM_OPS NULL
5120#endif
5121
5122static void sky2_shutdown(struct pci_dev *pdev)
5123{
5124 sky2_suspend(&pdev->dev);
5125 pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
5126 pci_set_power_state(pdev, PCI_D3hot);
5127}
5128
5129static struct pci_driver sky2_driver = {
5130 .name = DRV_NAME,
5131 .id_table = sky2_id_table,
5132 .probe = sky2_probe,
5133 .remove = __devexit_p(sky2_remove),
5134 .shutdown = sky2_shutdown,
5135 .driver.pm = SKY2_PM_OPS,
5136};
5137
5138static int __init sky2_init_module(void)
5139{
5140 pr_info("driver version " DRV_VERSION "\n");
5141
5142 sky2_debug_init();
5143 return pci_register_driver(&sky2_driver);
5144}
5145
5146static void __exit sky2_cleanup_module(void)
5147{
5148 pci_unregister_driver(&sky2_driver);
5149 sky2_debug_cleanup();
5150}
5151
5152module_init(sky2_init_module);
5153module_exit(sky2_cleanup_module);
5154
5155MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver");
5156MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
5157MODULE_LICENSE("GPL");
5158MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
new file mode 100644
index 000000000000..0af31b8b5f10
--- /dev/null
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -0,0 +1,2427 @@
1/*
2 * Definitions for the new Marvell Yukon 2 driver.
3 */
4#ifndef _SKY2_H
5#define _SKY2_H
6
7#define ETH_JUMBO_MTU 9000 /* Maximum MTU supported */
8
9/* PCI config registers */
10enum {
11 PCI_DEV_REG1 = 0x40,
12 PCI_DEV_REG2 = 0x44,
13 PCI_DEV_STATUS = 0x7c,
14 PCI_DEV_REG3 = 0x80,
15 PCI_DEV_REG4 = 0x84,
16 PCI_DEV_REG5 = 0x88,
17 PCI_CFG_REG_0 = 0x90,
18 PCI_CFG_REG_1 = 0x94,
19
20 PSM_CONFIG_REG0 = 0x98,
21 PSM_CONFIG_REG1 = 0x9C,
22 PSM_CONFIG_REG2 = 0x160,
23 PSM_CONFIG_REG3 = 0x164,
24 PSM_CONFIG_REG4 = 0x168,
25
26};
27
28/* Yukon-2 */
29enum pci_dev_reg_1 {
30 PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */
31 PCI_Y2_DLL_DIS = 1<<30, /* Disable PCI DLL (YUKON-2) */
32 PCI_SW_PWR_ON_RST= 1<<30, /* SW Power on Reset (Yukon-EX) */
33 PCI_Y2_PHY2_COMA = 1<<29, /* Set PHY 2 to Coma Mode (YUKON-2) */
34 PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */
35 PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */
36 PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */
37 PCI_Y2_PME_LEGACY= 1<<15, /* PCI Express legacy power management mode */
38
39 PCI_PHY_LNK_TIM_MSK= 3L<<8,/* Bit 9.. 8: GPHY Link Trigger Timer */
40 PCI_ENA_L1_EVENT = 1<<7, /* Enable PEX L1 Event */
41 PCI_ENA_GPHY_LNK = 1<<6, /* Enable PEX L1 on GPHY Link down */
42 PCI_FORCE_PEX_L1 = 1<<5, /* Force to PEX L1 */
43};
44
45enum pci_dev_reg_2 {
46 PCI_VPD_WR_THR = 0xffL<<24, /* Bit 31..24: VPD Write Threshold */
47 PCI_DEV_SEL = 0x7fL<<17, /* Bit 23..17: EEPROM Device Select */
48 PCI_VPD_ROM_SZ = 7L<<14, /* Bit 16..14: VPD ROM Size */
49
50 PCI_PATCH_DIR = 0xfL<<8, /* Bit 11.. 8: Ext Patches dir 3..0 */
51 PCI_EXT_PATCHS = 0xfL<<4, /* Bit 7.. 4: Extended Patches 3..0 */
52 PCI_EN_DUMMY_RD = 1<<3, /* Enable Dummy Read */
53 PCI_REV_DESC = 1<<2, /* Reverse Desc. Bytes */
54
55 PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
56};
57
58/* PCI_OUR_REG_3 32 bit Our Register 3 (Yukon-ECU only) */
59enum pci_dev_reg_3 {
60 P_CLK_ASF_REGS_DIS = 1<<18,/* Disable Clock ASF (Yukon-Ext.) */
61 P_CLK_COR_REGS_D0_DIS = 1<<17,/* Disable Clock Core Regs D0 */
62 P_CLK_MACSEC_DIS = 1<<17,/* Disable Clock MACSec (Yukon-Ext.) */
63 P_CLK_PCI_REGS_D0_DIS = 1<<16,/* Disable Clock PCI Regs D0 */
64 P_CLK_COR_YTB_ARB_DIS = 1<<15,/* Disable Clock YTB Arbiter */
65 P_CLK_MAC_LNK1_D3_DIS = 1<<14,/* Disable Clock MAC Link1 D3 */
66 P_CLK_COR_LNK1_D0_DIS = 1<<13,/* Disable Clock Core Link1 D0 */
67 P_CLK_MAC_LNK1_D0_DIS = 1<<12,/* Disable Clock MAC Link1 D0 */
68 P_CLK_COR_LNK1_D3_DIS = 1<<11,/* Disable Clock Core Link1 D3 */
69 P_CLK_PCI_MST_ARB_DIS = 1<<10,/* Disable Clock PCI Master Arb. */
70 P_CLK_COR_REGS_D3_DIS = 1<<9, /* Disable Clock Core Regs D3 */
71 P_CLK_PCI_REGS_D3_DIS = 1<<8, /* Disable Clock PCI Regs D3 */
72 P_CLK_REF_LNK1_GM_DIS = 1<<7, /* Disable Clock Ref. Link1 GMAC */
73 P_CLK_COR_LNK1_GM_DIS = 1<<6, /* Disable Clock Core Link1 GMAC */
74 P_CLK_PCI_COMMON_DIS = 1<<5, /* Disable Clock PCI Common */
75 P_CLK_COR_COMMON_DIS = 1<<4, /* Disable Clock Core Common */
76 P_CLK_PCI_LNK1_BMU_DIS = 1<<3, /* Disable Clock PCI Link1 BMU */
77 P_CLK_COR_LNK1_BMU_DIS = 1<<2, /* Disable Clock Core Link1 BMU */
78 P_CLK_PCI_LNK1_BIU_DIS = 1<<1, /* Disable Clock PCI Link1 BIU */
79 P_CLK_COR_LNK1_BIU_DIS = 1<<0, /* Disable Clock Core Link1 BIU */
80 PCIE_OUR3_WOL_D3_COLD_SET = P_CLK_ASF_REGS_DIS |
81 P_CLK_COR_REGS_D0_DIS |
82 P_CLK_COR_LNK1_D0_DIS |
83 P_CLK_MAC_LNK1_D0_DIS |
84 P_CLK_PCI_MST_ARB_DIS |
85 P_CLK_COR_COMMON_DIS |
86 P_CLK_COR_LNK1_BMU_DIS,
87};
88
89/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */
90enum pci_dev_reg_4 {
91 /* (Link Training & Status State Machine) */
92 P_PEX_LTSSM_STAT_MSK = 0x7fL<<25, /* Bit 31..25: PEX LTSSM Mask */
93#define P_PEX_LTSSM_STAT(x) ((x << 25) & P_PEX_LTSSM_STAT_MSK)
94 P_PEX_LTSSM_L1_STAT = 0x34,
95 P_PEX_LTSSM_DET_STAT = 0x01,
96 P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */
97 /* (Active State Power Management) */
98 P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */
99 P_ASPM_GPHY_LINK_DOWN = 1<<14, /* GPHY Link Down (A1 only) */
100 P_ASPM_INT_FIFO_EMPTY = 1<<13, /* Internal FIFO Empty (A1 only) */
101 P_ASPM_CLKRUN_REQUEST = 1<<12, /* CLKRUN Request (A1 only) */
102
103 P_ASPM_FORCE_CLKREQ_ENA = 1<<4, /* Force CLKREQ Enable (A1b only) */
104 P_ASPM_CLKREQ_PAD_CTL = 1<<3, /* CLKREQ PAD Control (A1 only) */
105 P_ASPM_A1_MODE_SELECT = 1<<2, /* A1 Mode Select (A1 only) */
106 P_CLK_GATE_PEX_UNIT_ENA = 1<<1, /* Enable Gate PEX Unit Clock */
107 P_CLK_GATE_ROOT_COR_ENA = 1<<0, /* Enable Gate Root Core Clock */
108 P_ASPM_CONTROL_MSK = P_FORCE_ASPM_REQUEST | P_ASPM_GPHY_LINK_DOWN
109 | P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY,
110};
111
112/* PCI_OUR_REG_5 32 bit Our Register 5 (Yukon-ECU only) */
113enum pci_dev_reg_5 {
114 /* Bit 31..27: for A3 & later */
115 P_CTL_DIV_CORE_CLK_ENA = 1<<31, /* Divide Core Clock Enable */
116 P_CTL_SRESET_VMAIN_AV = 1<<30, /* Soft Reset for Vmain_av De-Glitch */
117 P_CTL_BYPASS_VMAIN_AV = 1<<29, /* Bypass En. for Vmain_av De-Glitch */
118 P_CTL_TIM_VMAIN_AV_MSK = 3<<27, /* Bit 28..27: Timer Vmain_av Mask */
119 /* Bit 26..16: Release Clock on Event */
120 P_REL_PCIE_RST_DE_ASS = 1<<26, /* PCIe Reset De-Asserted */
121 P_REL_GPHY_REC_PACKET = 1<<25, /* GPHY Received Packet */
122 P_REL_INT_FIFO_N_EMPTY = 1<<24, /* Internal FIFO Not Empty */
123 P_REL_MAIN_PWR_AVAIL = 1<<23, /* Main Power Available */
124 P_REL_CLKRUN_REQ_REL = 1<<22, /* CLKRUN Request Release */
125 P_REL_PCIE_RESET_ASS = 1<<21, /* PCIe Reset Asserted */
126 P_REL_PME_ASSERTED = 1<<20, /* PME Asserted */
127 P_REL_PCIE_EXIT_L1_ST = 1<<19, /* PCIe Exit L1 State */
128 P_REL_LOADER_NOT_FIN = 1<<18, /* EPROM Loader Not Finished */
129 P_REL_PCIE_RX_EX_IDLE = 1<<17, /* PCIe Rx Exit Electrical Idle State */
130 P_REL_GPHY_LINK_UP = 1<<16, /* GPHY Link Up */
131
132 /* Bit 10.. 0: Mask for Gate Clock */
133 P_GAT_PCIE_RST_ASSERTED = 1<<10,/* PCIe Reset Asserted */
134 P_GAT_GPHY_N_REC_PACKET = 1<<9, /* GPHY Not Received Packet */
135 P_GAT_INT_FIFO_EMPTY = 1<<8, /* Internal FIFO Empty */
136 P_GAT_MAIN_PWR_N_AVAIL = 1<<7, /* Main Power Not Available */
137 P_GAT_CLKRUN_REQ_REL = 1<<6, /* CLKRUN Not Requested */
138 P_GAT_PCIE_RESET_ASS = 1<<5, /* PCIe Reset Asserted */
139 P_GAT_PME_DE_ASSERTED = 1<<4, /* PME De-Asserted */
140 P_GAT_PCIE_ENTER_L1_ST = 1<<3, /* PCIe Enter L1 State */
141 P_GAT_LOADER_FINISHED = 1<<2, /* EPROM Loader Finished */
142 P_GAT_PCIE_RX_EL_IDLE = 1<<1, /* PCIe Rx Electrical Idle State */
143 P_GAT_GPHY_LINK_DOWN = 1<<0, /* GPHY Link Down */
144
145 PCIE_OUR5_EVENT_CLK_D3_SET = P_REL_GPHY_REC_PACKET |
146 P_REL_INT_FIFO_N_EMPTY |
147 P_REL_PCIE_EXIT_L1_ST |
148 P_REL_PCIE_RX_EX_IDLE |
149 P_GAT_GPHY_N_REC_PACKET |
150 P_GAT_INT_FIFO_EMPTY |
151 P_GAT_PCIE_ENTER_L1_ST |
152 P_GAT_PCIE_RX_EL_IDLE,
153};
154
155/* PCI_CFG_REG_1 32 bit Config Register 1 (Yukon-Ext only) */
156enum pci_cfg_reg1 {
157 P_CF1_DIS_REL_EVT_RST = 1<<24, /* Dis. Rel. Event during PCIE reset */
158 /* Bit 23..21: Release Clock on Event */
159 P_CF1_REL_LDR_NOT_FIN = 1<<23, /* EEPROM Loader Not Finished */
160 P_CF1_REL_VMAIN_AVLBL = 1<<22, /* Vmain available */
161 P_CF1_REL_PCIE_RESET = 1<<21, /* PCI-E reset */
162 /* Bit 20..18: Gate Clock on Event */
163 P_CF1_GAT_LDR_NOT_FIN = 1<<20, /* EEPROM Loader Finished */
164 P_CF1_GAT_PCIE_RX_IDLE = 1<<19, /* PCI-E Rx Electrical idle */
165 P_CF1_GAT_PCIE_RESET = 1<<18, /* PCI-E Reset */
166 P_CF1_PRST_PHY_CLKREQ = 1<<17, /* Enable PCI-E rst & PM2PHY gen. CLKREQ */
167 P_CF1_PCIE_RST_CLKREQ = 1<<16, /* Enable PCI-E rst generate CLKREQ */
168
169 P_CF1_ENA_CFG_LDR_DONE = 1<<8, /* Enable core level Config loader done */
170
171 P_CF1_ENA_TXBMU_RD_IDLE = 1<<1, /* Enable TX BMU Read IDLE for ASPM */
172 P_CF1_ENA_TXBMU_WR_IDLE = 1<<0, /* Enable TX BMU Write IDLE for ASPM */
173
174 PCIE_CFG1_EVENT_CLK_D3_SET = P_CF1_DIS_REL_EVT_RST |
175 P_CF1_REL_LDR_NOT_FIN |
176 P_CF1_REL_VMAIN_AVLBL |
177 P_CF1_REL_PCIE_RESET |
178 P_CF1_GAT_LDR_NOT_FIN |
179 P_CF1_GAT_PCIE_RESET |
180 P_CF1_PRST_PHY_CLKREQ |
181 P_CF1_ENA_CFG_LDR_DONE |
182 P_CF1_ENA_TXBMU_RD_IDLE |
183 P_CF1_ENA_TXBMU_WR_IDLE,
184};
185
186/* Yukon-Optima */
187enum {
188 PSM_CONFIG_REG1_AC_PRESENT_STATUS = 1<<31, /* AC Present Status */
189
190 PSM_CONFIG_REG1_PTP_CLK_SEL = 1<<29, /* PTP Clock Select */
191 PSM_CONFIG_REG1_PTP_MODE = 1<<28, /* PTP Mode */
192
193 PSM_CONFIG_REG1_MUX_PHY_LINK = 1<<27, /* PHY Energy Detect Event */
194
195 PSM_CONFIG_REG1_EN_PIN63_AC_PRESENT = 1<<26, /* Enable LED_DUPLEX for ac_present */
196 PSM_CONFIG_REG1_EN_PCIE_TIMER = 1<<25, /* Enable PCIe Timer */
197 PSM_CONFIG_REG1_EN_SPU_TIMER = 1<<24, /* Enable SPU Timer */
198 PSM_CONFIG_REG1_POLARITY_AC_PRESENT = 1<<23, /* AC Present Polarity */
199
200 PSM_CONFIG_REG1_EN_AC_PRESENT = 1<<21, /* Enable AC Present */
201
202 PSM_CONFIG_REG1_EN_GPHY_INT_PSM = 1<<20, /* Enable GPHY INT for PSM */
203 PSM_CONFIG_REG1_DIS_PSM_TIMER = 1<<19, /* Disable PSM Timer */
204};
205
206/* Yukon-Supreme */
207enum {
208 PSM_CONFIG_REG1_GPHY_ENERGY_STS = 1<<31, /* GPHY Energy Detect Status */
209
210 PSM_CONFIG_REG1_UART_MODE_MSK = 3<<29, /* UART_Mode */
211 PSM_CONFIG_REG1_CLK_RUN_ASF = 1<<28, /* Enable Clock Free Running for ASF Subsystem */
212 PSM_CONFIG_REG1_UART_CLK_DISABLE= 1<<27, /* Disable UART clock */
213 PSM_CONFIG_REG1_VAUX_ONE = 1<<26, /* Tie internal Vaux to 1'b1 */
214 PSM_CONFIG_REG1_UART_FC_RI_VAL = 1<<25, /* Default value for UART_RI_n */
215 PSM_CONFIG_REG1_UART_FC_DCD_VAL = 1<<24, /* Default value for UART_DCD_n */
216 PSM_CONFIG_REG1_UART_FC_DSR_VAL = 1<<23, /* Default value for UART_DSR_n */
217 PSM_CONFIG_REG1_UART_FC_CTS_VAL = 1<<22, /* Default value for UART_CTS_n */
218 PSM_CONFIG_REG1_LATCH_VAUX = 1<<21, /* Enable Latch current Vaux_avlbl */
219 PSM_CONFIG_REG1_FORCE_TESTMODE_INPUT= 1<<20, /* Force Testmode pin as input PAD */
220 PSM_CONFIG_REG1_UART_RST = 1<<19, /* UART_RST */
221 PSM_CONFIG_REG1_PSM_PCIE_L1_POL = 1<<18, /* PCIE L1 Event Polarity for PSM */
222 PSM_CONFIG_REG1_TIMER_STAT = 1<<17, /* PSM Timer Status */
223 PSM_CONFIG_REG1_GPHY_INT = 1<<16, /* GPHY INT Status */
224 PSM_CONFIG_REG1_FORCE_TESTMODE_ZERO= 1<<15, /* Force internal Testmode as 1'b0 */
225 PSM_CONFIG_REG1_EN_INT_ASPM_CLKREQ = 1<<14, /* ENABLE INT for CLKRUN on ASPM and CLKREQ */
226 PSM_CONFIG_REG1_EN_SND_TASK_ASPM_CLKREQ = 1<<13, /* ENABLE Snd_task for CLKRUN on ASPM and CLKREQ */
227 PSM_CONFIG_REG1_DIS_CLK_GATE_SND_TASK = 1<<12, /* Disable CLK_GATE control snd_task */
228 PSM_CONFIG_REG1_DIS_FF_CHIAN_SND_INTA = 1<<11, /* Disable flip-flop chain for sndmsg_inta */
229
230 PSM_CONFIG_REG1_DIS_LOADER = 1<<9, /* Disable Loader SM after PSM Goes back to IDLE */
231 PSM_CONFIG_REG1_DO_PWDN = 1<<8, /* Do Power Down, Start PSM Scheme */
232 PSM_CONFIG_REG1_DIS_PIG = 1<<7, /* Disable Plug-in-Go SM after PSM Goes back to IDLE */
233 PSM_CONFIG_REG1_DIS_PERST = 1<<6, /* Disable Internal PCIe Reset after PSM Goes back to IDLE */
234 PSM_CONFIG_REG1_EN_REG18_PD = 1<<5, /* Enable REG18 Power Down for PSM */
235 PSM_CONFIG_REG1_EN_PSM_LOAD = 1<<4, /* Disable EEPROM Loader after PSM Goes back to IDLE */
236 PSM_CONFIG_REG1_EN_PSM_HOT_RST = 1<<3, /* Enable PCIe Hot Reset for PSM */
237 PSM_CONFIG_REG1_EN_PSM_PERST = 1<<2, /* Enable PCIe Reset Event for PSM */
238 PSM_CONFIG_REG1_EN_PSM_PCIE_L1 = 1<<1, /* Enable PCIe L1 Event for PSM */
239 PSM_CONFIG_REG1_EN_PSM = 1<<0, /* Enable PSM Scheme */
240};
241
242/* PSM_CONFIG_REG4 0x0168 PSM Config Register 4 */
243enum {
244 /* PHY Link Detect Timer */
245 PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_MSK = 0xf<<4,
246 PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE = 4,
247
248 PSM_CONFIG_REG4_DEBUG_TIMER = 1<<1, /* Debug Timer */
249 PSM_CONFIG_REG4_RST_PHY_LINK_DETECT = 1<<0, /* Reset GPHY Link Detect */
250};
251
252
253#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
254 PCI_STATUS_SIG_SYSTEM_ERROR | \
255 PCI_STATUS_REC_MASTER_ABORT | \
256 PCI_STATUS_REC_TARGET_ABORT | \
257 PCI_STATUS_PARITY)
258
259enum csr_regs {
260 B0_RAP = 0x0000,
261 B0_CTST = 0x0004,
262
263 B0_POWER_CTRL = 0x0007,
264 B0_ISRC = 0x0008,
265 B0_IMSK = 0x000c,
266 B0_HWE_ISRC = 0x0010,
267 B0_HWE_IMSK = 0x0014,
268
269 /* Special ISR registers (Yukon-2 only) */
270 B0_Y2_SP_ISRC2 = 0x001c,
271 B0_Y2_SP_ISRC3 = 0x0020,
272 B0_Y2_SP_EISR = 0x0024,
273 B0_Y2_SP_LISR = 0x0028,
274 B0_Y2_SP_ICR = 0x002c,
275
276 B2_MAC_1 = 0x0100,
277 B2_MAC_2 = 0x0108,
278 B2_MAC_3 = 0x0110,
279 B2_CONN_TYP = 0x0118,
280 B2_PMD_TYP = 0x0119,
281 B2_MAC_CFG = 0x011a,
282 B2_CHIP_ID = 0x011b,
283 B2_E_0 = 0x011c,
284
285 B2_Y2_CLK_GATE = 0x011d,
286 B2_Y2_HW_RES = 0x011e,
287 B2_E_3 = 0x011f,
288 B2_Y2_CLK_CTRL = 0x0120,
289
290 B2_TI_INI = 0x0130,
291 B2_TI_VAL = 0x0134,
292 B2_TI_CTRL = 0x0138,
293 B2_TI_TEST = 0x0139,
294
295 B2_TST_CTRL1 = 0x0158,
296 B2_TST_CTRL2 = 0x0159,
297 B2_GP_IO = 0x015c,
298
299 B2_I2C_CTRL = 0x0160,
300 B2_I2C_DATA = 0x0164,
301 B2_I2C_IRQ = 0x0168,
302 B2_I2C_SW = 0x016c,
303
304 Y2_PEX_PHY_DATA = 0x0170,
305 Y2_PEX_PHY_ADDR = 0x0172,
306
307 B3_RAM_ADDR = 0x0180,
308 B3_RAM_DATA_LO = 0x0184,
309 B3_RAM_DATA_HI = 0x0188,
310
311/* RAM Interface Registers */
312/* Yukon-2: use RAM_BUFFER() to access the RAM buffer */
313/*
314 * The HW-Spec. calls this registers Timeout Value 0..11. But this names are
315 * not usable in SW. Please notice these are NOT real timeouts, these are
316 * the number of qWords transferred continuously.
317 */
318#define RAM_BUFFER(port, reg) (reg | (port <<6))
319
320 B3_RI_WTO_R1 = 0x0190,
321 B3_RI_WTO_XA1 = 0x0191,
322 B3_RI_WTO_XS1 = 0x0192,
323 B3_RI_RTO_R1 = 0x0193,
324 B3_RI_RTO_XA1 = 0x0194,
325 B3_RI_RTO_XS1 = 0x0195,
326 B3_RI_WTO_R2 = 0x0196,
327 B3_RI_WTO_XA2 = 0x0197,
328 B3_RI_WTO_XS2 = 0x0198,
329 B3_RI_RTO_R2 = 0x0199,
330 B3_RI_RTO_XA2 = 0x019a,
331 B3_RI_RTO_XS2 = 0x019b,
332 B3_RI_TO_VAL = 0x019c,
333 B3_RI_CTRL = 0x01a0,
334 B3_RI_TEST = 0x01a2,
335 B3_MA_TOINI_RX1 = 0x01b0,
336 B3_MA_TOINI_RX2 = 0x01b1,
337 B3_MA_TOINI_TX1 = 0x01b2,
338 B3_MA_TOINI_TX2 = 0x01b3,
339 B3_MA_TOVAL_RX1 = 0x01b4,
340 B3_MA_TOVAL_RX2 = 0x01b5,
341 B3_MA_TOVAL_TX1 = 0x01b6,
342 B3_MA_TOVAL_TX2 = 0x01b7,
343 B3_MA_TO_CTRL = 0x01b8,
344 B3_MA_TO_TEST = 0x01ba,
345 B3_MA_RCINI_RX1 = 0x01c0,
346 B3_MA_RCINI_RX2 = 0x01c1,
347 B3_MA_RCINI_TX1 = 0x01c2,
348 B3_MA_RCINI_TX2 = 0x01c3,
349 B3_MA_RCVAL_RX1 = 0x01c4,
350 B3_MA_RCVAL_RX2 = 0x01c5,
351 B3_MA_RCVAL_TX1 = 0x01c6,
352 B3_MA_RCVAL_TX2 = 0x01c7,
353 B3_MA_RC_CTRL = 0x01c8,
354 B3_MA_RC_TEST = 0x01ca,
355 B3_PA_TOINI_RX1 = 0x01d0,
356 B3_PA_TOINI_RX2 = 0x01d4,
357 B3_PA_TOINI_TX1 = 0x01d8,
358 B3_PA_TOINI_TX2 = 0x01dc,
359 B3_PA_TOVAL_RX1 = 0x01e0,
360 B3_PA_TOVAL_RX2 = 0x01e4,
361 B3_PA_TOVAL_TX1 = 0x01e8,
362 B3_PA_TOVAL_TX2 = 0x01ec,
363 B3_PA_CTRL = 0x01f0,
364 B3_PA_TEST = 0x01f2,
365
366 Y2_CFG_SPC = 0x1c00, /* PCI config space region */
367 Y2_CFG_AER = 0x1d00, /* PCI Advanced Error Report region */
368};
369
370/* B0_CTST 24 bit Control/Status register */
371enum {
372 Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */
373 Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */
374 Y2_HW_WOL_ON = 1<<15,/* HW WOL On (Yukon-EC Ultra A1 only) */
375 Y2_HW_WOL_OFF = 1<<14,/* HW WOL On (Yukon-EC Ultra A1 only) */
376 Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */
377 Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */
378 Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */
379 Y2_CLK_RUN_DIS = 1<<10,/* CLK_RUN Disable (YUKON-2 only) */
380 Y2_LED_STAT_ON = 1<<9, /* Status LED On (YUKON-2 only) */
381 Y2_LED_STAT_OFF = 1<<8, /* Status LED Off (YUKON-2 only) */
382
383 CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */
384 CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */
385 CS_STOP_DONE = 1<<5, /* Stop Master is finished */
386 CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */
387 CS_MRST_CLR = 1<<3, /* Clear Master reset */
388 CS_MRST_SET = 1<<2, /* Set Master reset */
389 CS_RST_CLR = 1<<1, /* Clear Software reset */
390 CS_RST_SET = 1, /* Set Software reset */
391};
392
393/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */
394enum {
395 PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */
396 PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */
397 PC_VCC_ENA = 1<<5, /* Switch VCC Enable */
398 PC_VCC_DIS = 1<<4, /* Switch VCC Disable */
399 PC_VAUX_ON = 1<<3, /* Switch VAUX On */
400 PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */
401 PC_VCC_ON = 1<<1, /* Switch VCC On */
402 PC_VCC_OFF = 1<<0, /* Switch VCC Off */
403};
404
405/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */
406
407/* B0_Y2_SP_ISRC2 32 bit Special Interrupt Source Reg 2 */
408/* B0_Y2_SP_ISRC3 32 bit Special Interrupt Source Reg 3 */
409/* B0_Y2_SP_EISR 32 bit Enter ISR Reg */
410/* B0_Y2_SP_LISR 32 bit Leave ISR Reg */
411enum {
412 Y2_IS_HW_ERR = 1<<31, /* Interrupt HW Error */
413 Y2_IS_STAT_BMU = 1<<30, /* Status BMU Interrupt */
414 Y2_IS_ASF = 1<<29, /* ASF subsystem Interrupt */
415 Y2_IS_CPU_TO = 1<<28, /* CPU Timeout */
416 Y2_IS_POLL_CHK = 1<<27, /* Check IRQ from polling unit */
417 Y2_IS_TWSI_RDY = 1<<26, /* IRQ on end of TWSI Tx */
418 Y2_IS_IRQ_SW = 1<<25, /* SW forced IRQ */
419 Y2_IS_TIMINT = 1<<24, /* IRQ from Timer */
420
421 Y2_IS_IRQ_PHY2 = 1<<12, /* Interrupt from PHY 2 */
422 Y2_IS_IRQ_MAC2 = 1<<11, /* Interrupt from MAC 2 */
423 Y2_IS_CHK_RX2 = 1<<10, /* Descriptor error Rx 2 */
424 Y2_IS_CHK_TXS2 = 1<<9, /* Descriptor error TXS 2 */
425 Y2_IS_CHK_TXA2 = 1<<8, /* Descriptor error TXA 2 */
426
427 Y2_IS_PSM_ACK = 1<<7, /* PSM Acknowledge (Yukon-Optima only) */
428 Y2_IS_PTP_TIST = 1<<6, /* PTP Time Stamp (Yukon-Optima only) */
429 Y2_IS_PHY_QLNK = 1<<5, /* PHY Quick Link (Yukon-Optima only) */
430
431 Y2_IS_IRQ_PHY1 = 1<<4, /* Interrupt from PHY 1 */
432 Y2_IS_IRQ_MAC1 = 1<<3, /* Interrupt from MAC 1 */
433 Y2_IS_CHK_RX1 = 1<<2, /* Descriptor error Rx 1 */
434 Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */
435 Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */
436
437 Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU,
438 Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1
439 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1,
440 Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2
441 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
442 Y2_IS_ERROR = Y2_IS_HW_ERR |
443 Y2_IS_IRQ_MAC1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1 |
444 Y2_IS_IRQ_MAC2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
445};
446
447/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
448enum {
449 IS_ERR_MSK = 0x00003fff,/* All Error bits */
450
451 IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
452 IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */
453 IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */
454 IS_IRQ_STAT = 1<<10, /* IRQ status exception */
455 IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */
456 IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */
457 IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */
458 IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */
459 IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */
460 IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */
461 IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */
462 IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */
463 IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */
464 IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */
465};
466
467/* Hardware error interrupt mask for Yukon 2 */
468enum {
469 Y2_IS_TIST_OV = 1<<29,/* Time Stamp Timer overflow interrupt */
470 Y2_IS_SENSOR = 1<<28, /* Sensor interrupt */
471 Y2_IS_MST_ERR = 1<<27, /* Master error interrupt */
472 Y2_IS_IRQ_STAT = 1<<26, /* Status exception interrupt */
473 Y2_IS_PCI_EXP = 1<<25, /* PCI-Express interrupt */
474 Y2_IS_PCI_NEXP = 1<<24, /* PCI-Express error similar to PCI error */
475 /* Link 2 */
476 Y2_IS_PAR_RD2 = 1<<13, /* Read RAM parity error interrupt */
477 Y2_IS_PAR_WR2 = 1<<12, /* Write RAM parity error interrupt */
478 Y2_IS_PAR_MAC2 = 1<<11, /* MAC hardware fault interrupt */
479 Y2_IS_PAR_RX2 = 1<<10, /* Parity Error Rx Queue 2 */
480 Y2_IS_TCP_TXS2 = 1<<9, /* TCP length mismatch sync Tx queue IRQ */
481 Y2_IS_TCP_TXA2 = 1<<8, /* TCP length mismatch async Tx queue IRQ */
482 /* Link 1 */
483 Y2_IS_PAR_RD1 = 1<<5, /* Read RAM parity error interrupt */
484 Y2_IS_PAR_WR1 = 1<<4, /* Write RAM parity error interrupt */
485 Y2_IS_PAR_MAC1 = 1<<3, /* MAC hardware fault interrupt */
486 Y2_IS_PAR_RX1 = 1<<2, /* Parity Error Rx Queue 1 */
487 Y2_IS_TCP_TXS1 = 1<<1, /* TCP length mismatch sync Tx queue IRQ */
488 Y2_IS_TCP_TXA1 = 1<<0, /* TCP length mismatch async Tx queue IRQ */
489
490 Y2_HWE_L1_MASK = Y2_IS_PAR_RD1 | Y2_IS_PAR_WR1 | Y2_IS_PAR_MAC1 |
491 Y2_IS_PAR_RX1 | Y2_IS_TCP_TXS1| Y2_IS_TCP_TXA1,
492 Y2_HWE_L2_MASK = Y2_IS_PAR_RD2 | Y2_IS_PAR_WR2 | Y2_IS_PAR_MAC2 |
493 Y2_IS_PAR_RX2 | Y2_IS_TCP_TXS2| Y2_IS_TCP_TXA2,
494
495 Y2_HWE_ALL_MASK = Y2_IS_TIST_OV | Y2_IS_MST_ERR | Y2_IS_IRQ_STAT |
496 Y2_HWE_L1_MASK | Y2_HWE_L2_MASK,
497};
498
499/* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */
500enum {
501 DPT_START = 1<<1,
502 DPT_STOP = 1<<0,
503};
504
505/* B2_TST_CTRL1 8 bit Test Control Register 1 */
506enum {
507 TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */
508 TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */
509 TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */
510 TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */
511 TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */
512 TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */
513 TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */
514 TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */
515};
516
517/* B2_GPIO */
518enum {
519 GLB_GPIO_CLK_DEB_ENA = 1<<31, /* Clock Debug Enable */
520 GLB_GPIO_CLK_DBG_MSK = 0xf<<26, /* Clock Debug */
521
522 GLB_GPIO_INT_RST_D3_DIS = 1<<15, /* Disable Internal Reset After D3 to D0 */
523 GLB_GPIO_LED_PAD_SPEED_UP = 1<<14, /* LED PAD Speed Up */
524 GLB_GPIO_STAT_RACE_DIS = 1<<13, /* Status Race Disable */
525 GLB_GPIO_TEST_SEL_MSK = 3<<11, /* Testmode Select */
526 GLB_GPIO_TEST_SEL_BASE = 1<<11,
527 GLB_GPIO_RAND_ENA = 1<<10, /* Random Enable */
528 GLB_GPIO_RAND_BIT_1 = 1<<9, /* Random Bit 1 */
529};
530
531/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */
532enum {
533 CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */
534 /* Bit 3.. 2: reserved */
535 CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */
536 CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/
537};
538
539/* B2_CHIP_ID 8 bit Chip Identification Number */
540enum {
541 CHIP_ID_YUKON_XL = 0xb3, /* YUKON-2 XL */
542 CHIP_ID_YUKON_EC_U = 0xb4, /* YUKON-2 EC Ultra */
543 CHIP_ID_YUKON_EX = 0xb5, /* YUKON-2 Extreme */
544 CHIP_ID_YUKON_EC = 0xb6, /* YUKON-2 EC */
545 CHIP_ID_YUKON_FE = 0xb7, /* YUKON-2 FE */
546 CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */
547 CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */
548 CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */
549 CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */
550 CHIP_ID_YUKON_PRM = 0xbd, /* YUKON-2 Optima Prime */
551 CHIP_ID_YUKON_OP_2 = 0xbe, /* YUKON-2 Optima 2 */
552};
553
554enum yukon_xl_rev {
555 CHIP_REV_YU_XL_A0 = 0,
556 CHIP_REV_YU_XL_A1 = 1,
557 CHIP_REV_YU_XL_A2 = 2,
558 CHIP_REV_YU_XL_A3 = 3,
559};
560
561enum yukon_ec_rev {
562 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
563 CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */
564 CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */
565};
566enum yukon_ec_u_rev {
567 CHIP_REV_YU_EC_U_A0 = 1,
568 CHIP_REV_YU_EC_U_A1 = 2,
569 CHIP_REV_YU_EC_U_B0 = 3,
570 CHIP_REV_YU_EC_U_B1 = 5,
571};
572enum yukon_fe_rev {
573 CHIP_REV_YU_FE_A1 = 1,
574 CHIP_REV_YU_FE_A2 = 2,
575};
576enum yukon_fe_p_rev {
577 CHIP_REV_YU_FE2_A0 = 0,
578};
579enum yukon_ex_rev {
580 CHIP_REV_YU_EX_A0 = 1,
581 CHIP_REV_YU_EX_B0 = 2,
582};
583enum yukon_supr_rev {
584 CHIP_REV_YU_SU_A0 = 0,
585 CHIP_REV_YU_SU_B0 = 1,
586 CHIP_REV_YU_SU_B1 = 3,
587};
588
589
590/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
591enum {
592 Y2_STATUS_LNK2_INAC = 1<<7, /* Status Link 2 inactive (0 = active) */
593 Y2_CLK_GAT_LNK2_DIS = 1<<6, /* Disable clock gating Link 2 */
594 Y2_COR_CLK_LNK2_DIS = 1<<5, /* Disable Core clock Link 2 */
595 Y2_PCI_CLK_LNK2_DIS = 1<<4, /* Disable PCI clock Link 2 */
596 Y2_STATUS_LNK1_INAC = 1<<3, /* Status Link 1 inactive (0 = active) */
597 Y2_CLK_GAT_LNK1_DIS = 1<<2, /* Disable clock gating Link 1 */
598 Y2_COR_CLK_LNK1_DIS = 1<<1, /* Disable Core clock Link 1 */
599 Y2_PCI_CLK_LNK1_DIS = 1<<0, /* Disable PCI clock Link 1 */
600};
601
602/* B2_Y2_HW_RES 8 bit HW Resources (Yukon-2 only) */
603enum {
604 CFG_LED_MODE_MSK = 7<<2, /* Bit 4.. 2: LED Mode Mask */
605 CFG_LINK_2_AVAIL = 1<<1, /* Link 2 available */
606 CFG_LINK_1_AVAIL = 1<<0, /* Link 1 available */
607};
608#define CFG_LED_MODE(x) (((x) & CFG_LED_MODE_MSK) >> 2)
609#define CFG_DUAL_MAC_MSK (CFG_LINK_2_AVAIL | CFG_LINK_1_AVAIL)
610
611
612/* B2_Y2_CLK_CTRL 32 bit Clock Frequency Control Register (Yukon-2/EC) */
613enum {
614 Y2_CLK_DIV_VAL_MSK = 0xff<<16,/* Bit 23..16: Clock Divisor Value */
615#define Y2_CLK_DIV_VAL(x) (((x)<<16) & Y2_CLK_DIV_VAL_MSK)
616 Y2_CLK_DIV_VAL2_MSK = 7<<21, /* Bit 23..21: Clock Divisor Value */
617 Y2_CLK_SELECT2_MSK = 0x1f<<16,/* Bit 20..16: Clock Select */
618#define Y2_CLK_DIV_VAL_2(x) (((x)<<21) & Y2_CLK_DIV_VAL2_MSK)
619#define Y2_CLK_SEL_VAL_2(x) (((x)<<16) & Y2_CLK_SELECT2_MSK)
620 Y2_CLK_DIV_ENA = 1<<1, /* Enable Core Clock Division */
621 Y2_CLK_DIV_DIS = 1<<0, /* Disable Core Clock Division */
622};
623
624/* B2_TI_CTRL 8 bit Timer control */
625/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */
626enum {
627 TIM_START = 1<<2, /* Start Timer */
628 TIM_STOP = 1<<1, /* Stop Timer */
629 TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */
630};
631
632/* B2_TI_TEST 8 Bit Timer Test */
633/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */
634/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */
635enum {
636 TIM_T_ON = 1<<2, /* Test mode on */
637 TIM_T_OFF = 1<<1, /* Test mode off */
638 TIM_T_STEP = 1<<0, /* Test step */
639};
640
641/* Y2_PEX_PHY_ADDR/DATA PEX PHY address and data reg (Yukon-2 only) */
642enum {
643 PEX_RD_ACCESS = 1<<31, /* Access Mode Read = 1, Write = 0 */
644 PEX_DB_ACCESS = 1<<30, /* Access to debug register */
645};
646
647/* B3_RAM_ADDR 32 bit RAM Address, to read or write */
648 /* Bit 31..19: reserved */
649#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */
650/* RAM Interface Registers */
651
652/* B3_RI_CTRL 16 bit RAM Interface Control Register */
653enum {
654 RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */
655 RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/
656
657 RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */
658 RI_RST_SET = 1<<0, /* Set RAM Interface Reset */
659};
660
661#define SK_RI_TO_53 36 /* RAM interface timeout */
662
663
664/* Port related registers FIFO, and Arbiter */
665#define SK_REG(port,reg) (((port)<<7)+(reg))
666
667/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
668/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */
669/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */
670/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */
671/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */
672
673#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */
674
675/* TXA_CTRL 8 bit Tx Arbiter Control Register */
676enum {
677 TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */
678 TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */
679 TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */
680 TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */
681 TXA_START_RC = 1<<3, /* Start sync Rate Control */
682 TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */
683 TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */
684 TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */
685};
686
687/*
688 * Bank 4 - 5
689 */
690/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
691enum {
692 TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/
693 TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */
694 TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */
695 TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */
696 TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */
697 TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */
698 TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */
699
700 RSS_KEY = 0x0220, /* RSS Key setup */
701 RSS_CFG = 0x0248, /* RSS Configuration */
702};
703
704enum {
705 HASH_TCP_IPV6_EX_CTRL = 1<<5,
706 HASH_IPV6_EX_CTRL = 1<<4,
707 HASH_TCP_IPV6_CTRL = 1<<3,
708 HASH_IPV6_CTRL = 1<<2,
709 HASH_TCP_IPV4_CTRL = 1<<1,
710 HASH_IPV4_CTRL = 1<<0,
711
712 HASH_ALL = 0x3f,
713};
714
715enum {
716 B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */
717 B7_CFG_SPC = 0x0380,/* copy of the Configuration register */
718 B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */
719 B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */
720 B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */
721 B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */
722 B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */
723 B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */
724 B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */
725};
726
727/* Queue Register Offsets, use Q_ADDR() to access */
728enum {
729 B8_Q_REGS = 0x0400, /* base of Queue registers */
730 Q_D = 0x00, /* 8*32 bit Current Descriptor */
731 Q_VLAN = 0x20, /* 16 bit Current VLAN Tag */
732 Q_DONE = 0x24, /* 16 bit Done Index */
733 Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */
734 Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */
735 Q_BC = 0x30, /* 32 bit Current Byte Counter */
736 Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */
737 Q_TEST = 0x38, /* 32 bit Test/Control Register */
738
739/* Yukon-2 */
740 Q_WM = 0x40, /* 16 bit FIFO Watermark */
741 Q_AL = 0x42, /* 8 bit FIFO Alignment */
742 Q_RSP = 0x44, /* 16 bit FIFO Read Shadow Pointer */
743 Q_RSL = 0x46, /* 8 bit FIFO Read Shadow Level */
744 Q_RP = 0x48, /* 8 bit FIFO Read Pointer */
745 Q_RL = 0x4a, /* 8 bit FIFO Read Level */
746 Q_WP = 0x4c, /* 8 bit FIFO Write Pointer */
747 Q_WSP = 0x4d, /* 8 bit FIFO Write Shadow Pointer */
748 Q_WL = 0x4e, /* 8 bit FIFO Write Level */
749 Q_WSL = 0x4f, /* 8 bit FIFO Write Shadow Level */
750};
751#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
752
753/* Q_TEST 32 bit Test Register */
754enum {
755 /* Transmit */
756 F_TX_CHK_AUTO_OFF = 1<<31, /* Tx checksum auto calc off (Yukon EX) */
757 F_TX_CHK_AUTO_ON = 1<<30, /* Tx checksum auto calc off (Yukon EX) */
758
759 /* Receive */
760 F_M_RX_RAM_DIS = 1<<24, /* MAC Rx RAM Read Port disable */
761
762 /* Hardware testbits not used */
763};
764
765/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
766enum {
767 Y2_B8_PREF_REGS = 0x0450,
768
769 PREF_UNIT_CTRL = 0x00, /* 32 bit Control register */
770 PREF_UNIT_LAST_IDX = 0x04, /* 16 bit Last Index */
771 PREF_UNIT_ADDR_LO = 0x08, /* 32 bit List start addr, low part */
772 PREF_UNIT_ADDR_HI = 0x0c, /* 32 bit List start addr, high part*/
773 PREF_UNIT_GET_IDX = 0x10, /* 16 bit Get Index */
774 PREF_UNIT_PUT_IDX = 0x14, /* 16 bit Put Index */
775 PREF_UNIT_FIFO_WP = 0x20, /* 8 bit FIFO write pointer */
776 PREF_UNIT_FIFO_RP = 0x24, /* 8 bit FIFO read pointer */
777 PREF_UNIT_FIFO_WM = 0x28, /* 8 bit FIFO watermark */
778 PREF_UNIT_FIFO_LEV = 0x2c, /* 8 bit FIFO level */
779
780 PREF_UNIT_MASK_IDX = 0x0fff,
781};
782#define Y2_QADDR(q,reg) (Y2_B8_PREF_REGS + (q) + (reg))
783
784/* RAM Buffer Register Offsets */
785enum {
786
787 RB_START = 0x00,/* 32 bit RAM Buffer Start Address */
788 RB_END = 0x04,/* 32 bit RAM Buffer End Address */
789 RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */
790 RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */
791 RB_RX_UTPP = 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */
792 RB_RX_LTPP = 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */
793 RB_RX_UTHP = 0x18,/* 32 bit Rx Upper Threshold, High Prio */
794 RB_RX_LTHP = 0x1c,/* 32 bit Rx Lower Threshold, High Prio */
795 /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */
796 RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */
797 RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */
798 RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */
799 RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */
800 RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */
801};
802
803/* Receive and Transmit Queues */
804enum {
805 Q_R1 = 0x0000, /* Receive Queue 1 */
806 Q_R2 = 0x0080, /* Receive Queue 2 */
807 Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */
808 Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */
809 Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */
810 Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */
811};
812
813/* Different PHY Types */
814enum {
815 PHY_ADDR_MARV = 0,
816};
817
818#define RB_ADDR(offs, queue) ((u16) B16_RAM_REGS + (queue) + (offs))
819
820
821enum {
822 LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */
823 LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */
824 LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */
825 LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */
826
827 LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */
828
829/* Receive GMAC FIFO (YUKON and Yukon-2) */
830
831 RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */
832 RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */
833 RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */
834 RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */
835 RX_GMF_FL_THR = 0x0c50,/* 16 bit Rx GMAC FIFO Flush Threshold */
836 RX_GMF_FL_CTRL = 0x0c52,/* 16 bit Rx GMAC FIFO Flush Control */
837 RX_GMF_TR_THR = 0x0c54,/* 32 bit Rx Truncation Threshold (Yukon-2) */
838 RX_GMF_UP_THR = 0x0c58,/* 16 bit Rx Upper Pause Thr (Yukon-EC_U) */
839 RX_GMF_LP_THR = 0x0c5a,/* 16 bit Rx Lower Pause Thr (Yukon-EC_U) */
840 RX_GMF_VLAN = 0x0c5c,/* 32 bit Rx VLAN Type Register (Yukon-2) */
841 RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */
842
843 RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */
844
845 RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */
846
847 RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */
848};
849
850
851/* Q_BC 32 bit Current Byte Counter */
852
853/* BMU Control Status Registers */
854/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */
855/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */
856/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */
857/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */
858/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */
859/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */
860/* Q_CSR 32 bit BMU Control/Status Register */
861
862/* Rx BMU Control / Status Registers (Yukon-2) */
863enum {
864 BMU_IDLE = 1<<31, /* BMU Idle State */
865 BMU_RX_TCP_PKT = 1<<30, /* Rx TCP Packet (when RSS Hash enabled) */
866 BMU_RX_IP_PKT = 1<<29, /* Rx IP Packet (when RSS Hash enabled) */
867
868 BMU_ENA_RX_RSS_HASH = 1<<15, /* Enable Rx RSS Hash */
869 BMU_DIS_RX_RSS_HASH = 1<<14, /* Disable Rx RSS Hash */
870 BMU_ENA_RX_CHKSUM = 1<<13, /* Enable Rx TCP/IP Checksum Check */
871 BMU_DIS_RX_CHKSUM = 1<<12, /* Disable Rx TCP/IP Checksum Check */
872 BMU_CLR_IRQ_PAR = 1<<11, /* Clear IRQ on Parity errors (Rx) */
873 BMU_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment. error (Tx) */
874 BMU_CLR_IRQ_CHK = 1<<10, /* Clear IRQ Check */
875 BMU_STOP = 1<<9, /* Stop Rx/Tx Queue */
876 BMU_START = 1<<8, /* Start Rx/Tx Queue */
877 BMU_FIFO_OP_ON = 1<<7, /* FIFO Operational On */
878 BMU_FIFO_OP_OFF = 1<<6, /* FIFO Operational Off */
879 BMU_FIFO_ENA = 1<<5, /* Enable FIFO */
880 BMU_FIFO_RST = 1<<4, /* Reset FIFO */
881 BMU_OP_ON = 1<<3, /* BMU Operational On */
882 BMU_OP_OFF = 1<<2, /* BMU Operational Off */
883 BMU_RST_CLR = 1<<1, /* Clear BMU Reset (Enable) */
884 BMU_RST_SET = 1<<0, /* Set BMU Reset */
885
886 BMU_CLR_RESET = BMU_FIFO_RST | BMU_OP_OFF | BMU_RST_CLR,
887 BMU_OPER_INIT = BMU_CLR_IRQ_PAR | BMU_CLR_IRQ_CHK | BMU_START |
888 BMU_FIFO_ENA | BMU_OP_ON,
889
890 BMU_WM_DEFAULT = 0x600,
891 BMU_WM_PEX = 0x80,
892};
893
894/* Tx BMU Control / Status Registers (Yukon-2) */
895 /* Bit 31: same as for Rx */
896enum {
897 BMU_TX_IPIDINCR_ON = 1<<13, /* Enable IP ID Increment */
898 BMU_TX_IPIDINCR_OFF = 1<<12, /* Disable IP ID Increment */
899 BMU_TX_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment length mismatch */
900};
901
902/* TBMU_TEST 0x06B8 Transmit BMU Test Register */
903enum {
904 TBMU_TEST_BMU_TX_CHK_AUTO_OFF = 1<<31, /* BMU Tx Checksum Auto Calculation Disable */
905 TBMU_TEST_BMU_TX_CHK_AUTO_ON = 1<<30, /* BMU Tx Checksum Auto Calculation Enable */
906 TBMU_TEST_HOME_ADD_PAD_FIX1_EN = 1<<29, /* Home Address Paddiing FIX1 Enable */
907 TBMU_TEST_HOME_ADD_PAD_FIX1_DIS = 1<<28, /* Home Address Paddiing FIX1 Disable */
908 TBMU_TEST_ROUTING_ADD_FIX_EN = 1<<27, /* Routing Address Fix Enable */
909 TBMU_TEST_ROUTING_ADD_FIX_DIS = 1<<26, /* Routing Address Fix Disable */
910 TBMU_TEST_HOME_ADD_FIX_EN = 1<<25, /* Home address checksum fix enable */
911 TBMU_TEST_HOME_ADD_FIX_DIS = 1<<24, /* Home address checksum fix disable */
912
913 TBMU_TEST_TEST_RSPTR_ON = 1<<22, /* Testmode Shadow Read Ptr On */
914 TBMU_TEST_TEST_RSPTR_OFF = 1<<21, /* Testmode Shadow Read Ptr Off */
915 TBMU_TEST_TESTSTEP_RSPTR = 1<<20, /* Teststep Shadow Read Ptr */
916
917 TBMU_TEST_TEST_RPTR_ON = 1<<18, /* Testmode Read Ptr On */
918 TBMU_TEST_TEST_RPTR_OFF = 1<<17, /* Testmode Read Ptr Off */
919 TBMU_TEST_TESTSTEP_RPTR = 1<<16, /* Teststep Read Ptr */
920
921 TBMU_TEST_TEST_WSPTR_ON = 1<<14, /* Testmode Shadow Write Ptr On */
922 TBMU_TEST_TEST_WSPTR_OFF = 1<<13, /* Testmode Shadow Write Ptr Off */
923 TBMU_TEST_TESTSTEP_WSPTR = 1<<12, /* Teststep Shadow Write Ptr */
924
925 TBMU_TEST_TEST_WPTR_ON = 1<<10, /* Testmode Write Ptr On */
926 TBMU_TEST_TEST_WPTR_OFF = 1<<9, /* Testmode Write Ptr Off */
927 TBMU_TEST_TESTSTEP_WPTR = 1<<8, /* Teststep Write Ptr */
928
929 TBMU_TEST_TEST_REQ_NB_ON = 1<<6, /* Testmode Req Nbytes/Addr On */
930 TBMU_TEST_TEST_REQ_NB_OFF = 1<<5, /* Testmode Req Nbytes/Addr Off */
931 TBMU_TEST_TESTSTEP_REQ_NB = 1<<4, /* Teststep Req Nbytes/Addr */
932
933 TBMU_TEST_TEST_DONE_IDX_ON = 1<<2, /* Testmode Done Index On */
934 TBMU_TEST_TEST_DONE_IDX_OFF = 1<<1, /* Testmode Done Index Off */
935 TBMU_TEST_TESTSTEP_DONE_IDX = 1<<0, /* Teststep Done Index */
936};
937
938/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
939/* PREF_UNIT_CTRL 32 bit Prefetch Control register */
940enum {
941 PREF_UNIT_OP_ON = 1<<3, /* prefetch unit operational */
942 PREF_UNIT_OP_OFF = 1<<2, /* prefetch unit not operational */
943 PREF_UNIT_RST_CLR = 1<<1, /* Clear Prefetch Unit Reset */
944 PREF_UNIT_RST_SET = 1<<0, /* Set Prefetch Unit Reset */
945};
946
947/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */
948/* RB_START 32 bit RAM Buffer Start Address */
949/* RB_END 32 bit RAM Buffer End Address */
950/* RB_WP 32 bit RAM Buffer Write Pointer */
951/* RB_RP 32 bit RAM Buffer Read Pointer */
952/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */
953/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */
954/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */
955/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */
956/* RB_PC 32 bit RAM Buffer Packet Counter */
957/* RB_LEV 32 bit RAM Buffer Level Register */
958
959#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */
960/* RB_TST2 8 bit RAM Buffer Test Register 2 */
961/* RB_TST1 8 bit RAM Buffer Test Register 1 */
962
963/* RB_CTRL 8 bit RAM Buffer Control Register */
964enum {
965 RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */
966 RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */
967 RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */
968 RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */
969 RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */
970 RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */
971};
972
973
974/* Transmit GMAC FIFO (YUKON only) */
975enum {
976 TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */
977 TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/
978 TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */
979
980 TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */
981 TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */
982 TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */
983
984 TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */
985 TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */
986 TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */
987
988 /* Threshold values for Yukon-EC Ultra and Extreme */
989 ECU_AE_THR = 0x0070, /* Almost Empty Threshold */
990 ECU_TXFF_LEV = 0x01a0, /* Tx BMU FIFO Level */
991 ECU_JUMBO_WM = 0x0080, /* Jumbo Mode Watermark */
992};
993
994/* Descriptor Poll Timer Registers */
995enum {
996 B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */
997 B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */
998 B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */
999
1000 B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */
1001};
1002
1003/* Time Stamp Timer Registers (YUKON only) */
1004enum {
1005 GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */
1006 GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */
1007 GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */
1008};
1009
1010/* Polling Unit Registers (Yukon-2 only) */
1011enum {
1012 POLL_CTRL = 0x0e20, /* 32 bit Polling Unit Control Reg */
1013 POLL_LAST_IDX = 0x0e24,/* 16 bit Polling Unit List Last Index */
1014
1015 POLL_LIST_ADDR_LO= 0x0e28,/* 32 bit Poll. List Start Addr (low) */
1016 POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit Poll. List Start Addr (high) */
1017};
1018
1019enum {
1020 SMB_CFG = 0x0e40, /* 32 bit SMBus Config Register */
1021 SMB_CSR = 0x0e44, /* 32 bit SMBus Control/Status Register */
1022};
1023
1024enum {
1025 CPU_WDOG = 0x0e48, /* 32 bit Watchdog Register */
1026 CPU_CNTR = 0x0e4C, /* 32 bit Counter Register */
1027 CPU_TIM = 0x0e50,/* 32 bit Timer Compare Register */
1028 CPU_AHB_ADDR = 0x0e54, /* 32 bit CPU AHB Debug Register */
1029 CPU_AHB_WDATA = 0x0e58, /* 32 bit CPU AHB Debug Register */
1030 CPU_AHB_RDATA = 0x0e5C, /* 32 bit CPU AHB Debug Register */
1031 HCU_MAP_BASE = 0x0e60, /* 32 bit Reset Mapping Base */
1032 CPU_AHB_CTRL = 0x0e64, /* 32 bit CPU AHB Debug Register */
1033 HCU_CCSR = 0x0e68, /* 32 bit CPU Control and Status Register */
1034 HCU_HCSR = 0x0e6C, /* 32 bit Host Control and Status Register */
1035};
1036
1037/* ASF Subsystem Registers (Yukon-2 only) */
1038enum {
1039 B28_Y2_SMB_CONFIG = 0x0e40,/* 32 bit ASF SMBus Config Register */
1040 B28_Y2_SMB_CSD_REG = 0x0e44,/* 32 bit ASF SMB Control/Status/Data */
1041 B28_Y2_ASF_IRQ_V_BASE=0x0e60,/* 32 bit ASF IRQ Vector Base */
1042
1043 B28_Y2_ASF_STAT_CMD= 0x0e68,/* 32 bit ASF Status and Command Reg */
1044 B28_Y2_ASF_HOST_COM= 0x0e6c,/* 32 bit ASF Host Communication Reg */
1045 B28_Y2_DATA_REG_1 = 0x0e70,/* 32 bit ASF/Host Data Register 1 */
1046 B28_Y2_DATA_REG_2 = 0x0e74,/* 32 bit ASF/Host Data Register 2 */
1047 B28_Y2_DATA_REG_3 = 0x0e78,/* 32 bit ASF/Host Data Register 3 */
1048 B28_Y2_DATA_REG_4 = 0x0e7c,/* 32 bit ASF/Host Data Register 4 */
1049};
1050
1051/* Status BMU Registers (Yukon-2 only)*/
1052enum {
1053 STAT_CTRL = 0x0e80,/* 32 bit Status BMU Control Reg */
1054 STAT_LAST_IDX = 0x0e84,/* 16 bit Status BMU Last Index */
1055
1056 STAT_LIST_ADDR_LO= 0x0e88,/* 32 bit Status List Start Addr (low) */
1057 STAT_LIST_ADDR_HI= 0x0e8c,/* 32 bit Status List Start Addr (high) */
1058 STAT_TXA1_RIDX = 0x0e90,/* 16 bit Status TxA1 Report Index Reg */
1059 STAT_TXS1_RIDX = 0x0e92,/* 16 bit Status TxS1 Report Index Reg */
1060 STAT_TXA2_RIDX = 0x0e94,/* 16 bit Status TxA2 Report Index Reg */
1061 STAT_TXS2_RIDX = 0x0e96,/* 16 bit Status TxS2 Report Index Reg */
1062 STAT_TX_IDX_TH = 0x0e98,/* 16 bit Status Tx Index Threshold Reg */
1063 STAT_PUT_IDX = 0x0e9c,/* 16 bit Status Put Index Reg */
1064
1065/* FIFO Control/Status Registers (Yukon-2 only)*/
1066 STAT_FIFO_WP = 0x0ea0,/* 8 bit Status FIFO Write Pointer Reg */
1067 STAT_FIFO_RP = 0x0ea4,/* 8 bit Status FIFO Read Pointer Reg */
1068 STAT_FIFO_RSP = 0x0ea6,/* 8 bit Status FIFO Read Shadow Ptr */
1069 STAT_FIFO_LEVEL = 0x0ea8,/* 8 bit Status FIFO Level Reg */
1070 STAT_FIFO_SHLVL = 0x0eaa,/* 8 bit Status FIFO Shadow Level Reg */
1071 STAT_FIFO_WM = 0x0eac,/* 8 bit Status FIFO Watermark Reg */
1072 STAT_FIFO_ISR_WM= 0x0ead,/* 8 bit Status FIFO ISR Watermark Reg */
1073
1074/* Level and ISR Timer Registers (Yukon-2 only)*/
1075 STAT_LEV_TIMER_INI= 0x0eb0,/* 32 bit Level Timer Init. Value Reg */
1076 STAT_LEV_TIMER_CNT= 0x0eb4,/* 32 bit Level Timer Counter Reg */
1077 STAT_LEV_TIMER_CTRL= 0x0eb8,/* 8 bit Level Timer Control Reg */
1078 STAT_LEV_TIMER_TEST= 0x0eb9,/* 8 bit Level Timer Test Reg */
1079 STAT_TX_TIMER_INI = 0x0ec0,/* 32 bit Tx Timer Init. Value Reg */
1080 STAT_TX_TIMER_CNT = 0x0ec4,/* 32 bit Tx Timer Counter Reg */
1081 STAT_TX_TIMER_CTRL = 0x0ec8,/* 8 bit Tx Timer Control Reg */
1082 STAT_TX_TIMER_TEST = 0x0ec9,/* 8 bit Tx Timer Test Reg */
1083 STAT_ISR_TIMER_INI = 0x0ed0,/* 32 bit ISR Timer Init. Value Reg */
1084 STAT_ISR_TIMER_CNT = 0x0ed4,/* 32 bit ISR Timer Counter Reg */
1085 STAT_ISR_TIMER_CTRL= 0x0ed8,/* 8 bit ISR Timer Control Reg */
1086 STAT_ISR_TIMER_TEST= 0x0ed9,/* 8 bit ISR Timer Test Reg */
1087};
1088
1089enum {
1090 LINKLED_OFF = 0x01,
1091 LINKLED_ON = 0x02,
1092 LINKLED_LINKSYNC_OFF = 0x04,
1093 LINKLED_LINKSYNC_ON = 0x08,
1094 LINKLED_BLINK_OFF = 0x10,
1095 LINKLED_BLINK_ON = 0x20,
1096};
1097
1098/* GMAC and GPHY Control Registers (YUKON only) */
1099enum {
1100 GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */
1101 GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */
1102 GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */
1103 GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */
1104 GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */
1105
1106/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
1107 WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */
1108 WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */
1109 WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */
1110 WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */
1111 WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */
1112
1113/* WOL Pattern Length Registers (YUKON only) */
1114 WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */
1115 WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */
1116
1117/* WOL Pattern Counter Registers (YUKON only) */
1118 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */
1119 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */
1120};
1121#define WOL_REGS(port, x) (x + (port)*0x80)
1122
1123enum {
1124 WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */
1125 WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */
1126};
1127#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400)
1128
1129enum {
1130 BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */
1131 BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */
1132};
1133
1134/*
1135 * Marvel-PHY Registers, indirect addressed over GMAC
1136 */
1137enum {
1138 PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
1139 PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */
1140 PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
1141 PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
1142 PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
1143 PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
1144 PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
1145 PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */
1146 PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
1147 /* Marvel-specific registers */
1148 PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
1149 PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
1150 PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
1151 PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */
1152 PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */
1153 PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */
1154 PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */
1155 PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */
1156 PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */
1157 PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */
1158 PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */
1159 PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */
1160 PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */
1161 PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */
1162 PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */
1163 PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */
1164 PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */
1165 PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */
1166
1167/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1168 PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */
1169 PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */
1170 PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */
1171 PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */
1172 PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */
1173};
1174
1175enum {
1176 PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */
1177 PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */
1178 PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */
1179 PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */
1180 PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */
1181 PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */
1182 PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */
1183 PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */
1184 PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */
1185 PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */
1186};
1187
1188enum {
1189 PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */
1190 PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */
1191 PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */
1192};
1193
1194enum {
1195 PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */
1196
1197 PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */
1198 PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */
1199 PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occurred */
1200 PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */
1201 PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */
1202 PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */
1203 PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */
1204};
1205
1206enum {
1207 PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */
1208 PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */
1209 PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */
1210};
1211
1212/* different Marvell PHY Ids */
1213enum {
1214 PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */
1215
1216 PHY_BCOM_ID1_A1 = 0x6041,
1217 PHY_BCOM_ID1_B2 = 0x6043,
1218 PHY_BCOM_ID1_C0 = 0x6044,
1219 PHY_BCOM_ID1_C5 = 0x6047,
1220
1221 PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
1222 PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */
1223 PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
1224 PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
1225 PHY_MARV_ID1_FE = 0x0C83, /* Yukon-FE (PHY 88E3082 Rev.A1) */
1226 PHY_MARV_ID1_ECU= 0x0CB0, /* Yukon-ECU (PHY 88E1149 Rev.B2?) */
1227};
1228
1229/* Advertisement register bits */
1230enum {
1231 PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
1232 PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
1233 PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */
1234
1235 PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */
1236 PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */
1237 PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */
1238 PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */
1239 PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */
1240 PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */
1241 PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */
1242 PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */
1243 PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
1244 PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA,
1245 PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL |
1246 PHY_AN_100HALF | PHY_AN_100FULL,
1247};
1248
1249/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
1250/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
1251enum {
1252 PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
1253 PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
1254 PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
1255 PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */
1256 PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */
1257 PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */
1258 /* Bit 9..8: reserved */
1259 PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
1260};
1261
1262/** Marvell-Specific */
1263enum {
1264 PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */
1265 PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */
1266 PHY_M_AN_RF = 1<<13, /* Remote Fault */
1267
1268 PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */
1269 PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */
1270 PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */
1271 PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */
1272 PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */
1273 PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */
1274 PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */
1275 PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */
1276};
1277
1278/* special defines for FIBER (88E1011S only) */
1279enum {
1280 PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */
1281 PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */
1282 PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */
1283 PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */
1284};
1285
1286/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */
1287enum {
1288 PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */
1289 PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */
1290 PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */
1291 PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */
1292};
1293
1294/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
1295enum {
1296 PHY_M_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */
1297 PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */
1298 PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */
1299 PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */
1300 PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */
1301 PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */
1302};
1303
1304/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/
1305enum {
1306 PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */
1307 PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */
1308 PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */
1309 PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */
1310 PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */
1311 PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */
1312 PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */
1313 PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */
1314 PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */
1315 PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */
1316 PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */
1317 PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */
1318};
1319
1320enum {
1321 PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */
1322 PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */
1323};
1324
1325#define PHY_M_PC_MDI_XMODE(x) (((u16)(x)<<5) & PHY_M_PC_MDIX_MSK)
1326
1327enum {
1328 PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */
1329 PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */
1330 PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */
1331};
1332
1333/* for Yukon-EC Ultra Gigabit Ethernet PHY (88E1149 only) */
1334enum {
1335 PHY_M_PC_COP_TX_DIS = 1<<3, /* Copper Transmitter Disable */
1336 PHY_M_PC_POW_D_ENA = 1<<2, /* Power Down Enable */
1337};
1338
1339/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1340enum {
1341 PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */
1342 PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */
1343 PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */
1344 PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */
1345 PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */
1346
1347 PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */
1348 PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */
1349
1350 PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */
1351 PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */
1352};
1353
1354/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/
1355enum {
1356 PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */
1357 PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */
1358 PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */
1359 PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */
1360 PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */
1361 PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */
1362 PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */
1363 PHY_M_PS_LINK_UP = 1<<10, /* Link Up */
1364 PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */
1365 PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */
1366 PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */
1367 PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */
1368 PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */
1369 PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */
1370 PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */
1371 PHY_M_PS_JABBER = 1<<0, /* Jabber */
1372};
1373
1374#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN)
1375
1376/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1377enum {
1378 PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */
1379 PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */
1380};
1381
1382enum {
1383 PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */
1384 PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */
1385 PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */
1386 PHY_M_IS_AN_PR = 1<<12, /* Page Received */
1387 PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */
1388 PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */
1389 PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */
1390 PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */
1391 PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */
1392 PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */
1393 PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */
1394 PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */
1395
1396 PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */
1397 PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */
1398 PHY_M_IS_JABBER = 1<<0, /* Jabber */
1399
1400 PHY_M_DEF_MSK = PHY_M_IS_LSP_CHANGE | PHY_M_IS_LST_CHANGE
1401 | PHY_M_IS_DUP_CHANGE,
1402 PHY_M_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL,
1403};
1404
1405
1406/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/
1407enum {
1408 PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */
1409 PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */
1410
1411 PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */
1412 PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */
1413 /* (88E1011 only) */
1414 PHY_M_EC_S_DSC_MSK = 3<<8,/* Bit 9.. 8: Slave Downshift Counter */
1415 /* (88E1011 only) */
1416 PHY_M_EC_M_DSC_MSK2 = 7<<9,/* Bit 11.. 9: Master Downshift Counter */
1417 /* (88E1111 only) */
1418 PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */
1419 /* !!! Errata in spec. (1 = disable) */
1420 PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/
1421 PHY_M_EC_MAC_S_MSK = 7<<4,/* Bit 6.. 4: Def. MAC interface speed */
1422 PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
1423 PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */
1424 PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */
1425 PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */
1426
1427 PHY_M_10B_TE_ENABLE = 1<<7, /* 10Base-Te Enable (88E8079 and above) */
1428};
1429#define PHY_M_EC_M_DSC(x) ((u16)(x)<<10 & PHY_M_EC_M_DSC_MSK)
1430 /* 00=1x; 01=2x; 10=3x; 11=4x */
1431#define PHY_M_EC_S_DSC(x) ((u16)(x)<<8 & PHY_M_EC_S_DSC_MSK)
1432 /* 00=dis; 01=1x; 10=2x; 11=3x */
1433#define PHY_M_EC_DSC_2(x) ((u16)(x)<<9 & PHY_M_EC_M_DSC_MSK2)
1434 /* 000=1x; 001=2x; 010=3x; 011=4x */
1435#define PHY_M_EC_MAC_S(x) ((u16)(x)<<4 & PHY_M_EC_MAC_S_MSK)
1436 /* 01X=0; 110=2.5; 111=25 (MHz) */
1437
1438/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
1439enum {
1440 PHY_M_PC_DIS_LINK_Pa = 1<<15,/* Disable Link Pulses */
1441 PHY_M_PC_DSC_MSK = 7<<12,/* Bit 14..12: Downshift Counter */
1442 PHY_M_PC_DOWN_S_ENA = 1<<11,/* Downshift Enable */
1443};
1444/* !!! Errata in spec. (1 = disable) */
1445
1446#define PHY_M_PC_DSC(x) (((u16)(x)<<12) & PHY_M_PC_DSC_MSK)
1447 /* 100=5x; 101=6x; 110=7x; 111=8x */
1448enum {
1449 MAC_TX_CLK_0_MHZ = 2,
1450 MAC_TX_CLK_2_5_MHZ = 6,
1451 MAC_TX_CLK_25_MHZ = 7,
1452};
1453
1454/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/
1455enum {
1456 PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */
1457 PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */
1458 PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */
1459 PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */
1460 PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */
1461 PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */
1462 PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */
1463 /* (88E1111 only) */
1464};
1465
1466enum {
1467 PHY_M_LEDC_LINK_MSK = 3<<3,/* Bit 4.. 3: Link Control Mask */
1468 /* (88E1011 only) */
1469 PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */
1470 PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */
1471 PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */
1472 PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */
1473 PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */
1474};
1475
1476#define PHY_M_LED_PULS_DUR(x) (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK)
1477
1478/***** PHY_MARV_PHY_STAT (page 3)16 bit r/w Polarity Control Reg. *****/
1479enum {
1480 PHY_M_POLC_LS1M_MSK = 0xf<<12, /* Bit 15..12: LOS,STAT1 Mix % Mask */
1481 PHY_M_POLC_IS0M_MSK = 0xf<<8, /* Bit 11.. 8: INIT,STAT0 Mix % Mask */
1482 PHY_M_POLC_LOS_MSK = 0x3<<6, /* Bit 7.. 6: LOS Pol. Ctrl. Mask */
1483 PHY_M_POLC_INIT_MSK = 0x3<<4, /* Bit 5.. 4: INIT Pol. Ctrl. Mask */
1484 PHY_M_POLC_STA1_MSK = 0x3<<2, /* Bit 3.. 2: STAT1 Pol. Ctrl. Mask */
1485 PHY_M_POLC_STA0_MSK = 0x3, /* Bit 1.. 0: STAT0 Pol. Ctrl. Mask */
1486};
1487
1488#define PHY_M_POLC_LS1_P_MIX(x) (((x)<<12) & PHY_M_POLC_LS1M_MSK)
1489#define PHY_M_POLC_IS0_P_MIX(x) (((x)<<8) & PHY_M_POLC_IS0M_MSK)
1490#define PHY_M_POLC_LOS_CTRL(x) (((x)<<6) & PHY_M_POLC_LOS_MSK)
1491#define PHY_M_POLC_INIT_CTRL(x) (((x)<<4) & PHY_M_POLC_INIT_MSK)
1492#define PHY_M_POLC_STA1_CTRL(x) (((x)<<2) & PHY_M_POLC_STA1_MSK)
1493#define PHY_M_POLC_STA0_CTRL(x) (((x)<<0) & PHY_M_POLC_STA0_MSK)
1494
1495enum {
1496 PULS_NO_STR = 0,/* no pulse stretching */
1497 PULS_21MS = 1,/* 21 ms to 42 ms */
1498 PULS_42MS = 2,/* 42 ms to 84 ms */
1499 PULS_84MS = 3,/* 84 ms to 170 ms */
1500 PULS_170MS = 4,/* 170 ms to 340 ms */
1501 PULS_340MS = 5,/* 340 ms to 670 ms */
1502 PULS_670MS = 6,/* 670 ms to 1.3 s */
1503 PULS_1300MS = 7,/* 1.3 s to 2.7 s */
1504};
1505
1506#define PHY_M_LED_BLINK_RT(x) (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK)
1507
1508enum {
1509 BLINK_42MS = 0,/* 42 ms */
1510 BLINK_84MS = 1,/* 84 ms */
1511 BLINK_170MS = 2,/* 170 ms */
1512 BLINK_340MS = 3,/* 340 ms */
1513 BLINK_670MS = 4,/* 670 ms */
1514};
1515
1516/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/
1517#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */
1518
1519#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */
1520#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */
1521#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */
1522#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */
1523#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */
1524#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */
1525
1526enum led_mode {
1527 MO_LED_NORM = 0,
1528 MO_LED_BLINK = 1,
1529 MO_LED_OFF = 2,
1530 MO_LED_ON = 3,
1531};
1532
1533/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/
1534enum {
1535 PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */
1536 PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */
1537 PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */
1538 PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */
1539 PHY_M_EC2_FO_AM_MSK = 7,/* Bit 2.. 0: Fiber Output Amplitude */
1540};
1541
1542/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/
1543enum {
1544 PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */
1545 PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */
1546 PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */
1547 PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */
1548 PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */
1549 PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */
1550 PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */
1551 /* (88E1111 only) */
1552
1553 PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */
1554 PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */
1555 PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */
1556};
1557
1558/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1559/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/
1560 /* Bit 15..12: reserved (used internally) */
1561enum {
1562 PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */
1563 PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */
1564 PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */
1565};
1566
1567#define PHY_M_FELP_LED2_CTRL(x) (((u16)(x)<<8) & PHY_M_FELP_LED2_MSK)
1568#define PHY_M_FELP_LED1_CTRL(x) (((u16)(x)<<4) & PHY_M_FELP_LED1_MSK)
1569#define PHY_M_FELP_LED0_CTRL(x) (((u16)(x)<<0) & PHY_M_FELP_LED0_MSK)
1570
1571enum {
1572 LED_PAR_CTRL_COLX = 0x00,
1573 LED_PAR_CTRL_ERROR = 0x01,
1574 LED_PAR_CTRL_DUPLEX = 0x02,
1575 LED_PAR_CTRL_DP_COL = 0x03,
1576 LED_PAR_CTRL_SPEED = 0x04,
1577 LED_PAR_CTRL_LINK = 0x05,
1578 LED_PAR_CTRL_TX = 0x06,
1579 LED_PAR_CTRL_RX = 0x07,
1580 LED_PAR_CTRL_ACT = 0x08,
1581 LED_PAR_CTRL_LNK_RX = 0x09,
1582 LED_PAR_CTRL_LNK_AC = 0x0a,
1583 LED_PAR_CTRL_ACT_BL = 0x0b,
1584 LED_PAR_CTRL_TX_BL = 0x0c,
1585 LED_PAR_CTRL_RX_BL = 0x0d,
1586 LED_PAR_CTRL_COL_BL = 0x0e,
1587 LED_PAR_CTRL_INACT = 0x0f
1588};
1589
1590/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/
1591enum {
1592 PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */
1593 PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */
1594 PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */
1595};
1596
1597/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
1598/***** PHY_MARV_PHY_CTRL (page 1) 16 bit r/w Fiber Specific Ctrl *****/
1599enum {
1600 PHY_M_FIB_FORCE_LNK = 1<<10,/* Force Link Good */
1601 PHY_M_FIB_SIGD_POL = 1<<9, /* SIGDET Polarity */
1602 PHY_M_FIB_TX_DIS = 1<<3, /* Transmitter Disable */
1603};
1604
1605/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
1606/***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/
1607enum {
1608 PHY_M_MAC_MD_MSK = 7<<7, /* Bit 9.. 7: Mode Select Mask */
1609 PHY_M_MAC_GMIF_PUP = 1<<3, /* GMII Power Up (88E1149 only) */
1610 PHY_M_MAC_MD_AUTO = 3,/* Auto Copper/1000Base-X */
1611 PHY_M_MAC_MD_COPPER = 5,/* Copper only */
1612 PHY_M_MAC_MD_1000BX = 7,/* 1000Base-X only */
1613};
1614#define PHY_M_MAC_MODE_SEL(x) (((x)<<7) & PHY_M_MAC_MD_MSK)
1615
1616/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/
1617enum {
1618 PHY_M_LEDC_LOS_MSK = 0xf<<12,/* Bit 15..12: LOS LED Ctrl. Mask */
1619 PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */
1620 PHY_M_LEDC_STA1_MSK = 0xf<<4,/* Bit 7.. 4: STAT1 LED Ctrl. Mask */
1621 PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */
1622};
1623
1624#define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK)
1625#define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK)
1626#define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK)
1627#define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK)
1628
1629/* GMAC registers */
1630/* Port Registers */
1631enum {
1632 GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */
1633 GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */
1634 GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */
1635 GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */
1636 GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */
1637 GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */
1638 GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */
1639/* Source Address Registers */
1640 GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */
1641 GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */
1642 GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */
1643 GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */
1644 GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */
1645 GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */
1646
1647/* Multicast Address Hash Registers */
1648 GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */
1649 GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */
1650 GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */
1651 GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */
1652
1653/* Interrupt Source Registers */
1654 GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */
1655 GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */
1656 GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */
1657
1658/* Interrupt Mask Registers */
1659 GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */
1660 GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */
1661 GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */
1662
1663/* Serial Management Interface (SMI) Registers */
1664 GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */
1665 GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */
1666 GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */
1667/* MIB Counters */
1668 GM_MIB_CNT_BASE = 0x0100, /* Base Address of MIB Counters */
1669 GM_MIB_CNT_END = 0x025C, /* Last MIB counter */
1670};
1671
1672
1673/*
1674 * MIB Counters base address definitions (low word) -
1675 * use offset 4 for access to high word (32 bit r/o)
1676 */
1677enum {
1678 GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */
1679 GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */
1680 GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */
1681 GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */
1682 GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */
1683
1684 GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */
1685 GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */
1686 GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */
1687 GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */
1688 GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */
1689 GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */
1690 GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */
1691 GM_RXF_127B = GM_MIB_CNT_BASE + 104,/* 65-127 Byte Rx Frame */
1692 GM_RXF_255B = GM_MIB_CNT_BASE + 112,/* 128-255 Byte Rx Frame */
1693 GM_RXF_511B = GM_MIB_CNT_BASE + 120,/* 256-511 Byte Rx Frame */
1694 GM_RXF_1023B = GM_MIB_CNT_BASE + 128,/* 512-1023 Byte Rx Frame */
1695 GM_RXF_1518B = GM_MIB_CNT_BASE + 136,/* 1024-1518 Byte Rx Frame */
1696 GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144,/* 1519-MaxSize Byte Rx Frame */
1697 GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152,/* Rx Frame too Long Error */
1698 GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160,/* Rx Jabber Packet Frame */
1699
1700 GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176,/* Rx FIFO overflow Event */
1701 GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192,/* Unicast Frames Xmitted OK */
1702 GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200,/* Broadcast Frames Xmitted OK */
1703 GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208,/* Pause MAC Ctrl Frames Xmitted */
1704 GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216,/* Multicast Frames Xmitted OK */
1705 GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224,/* Octets Transmitted OK Low */
1706 GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232,/* Octets Transmitted OK High */
1707 GM_TXF_64B = GM_MIB_CNT_BASE + 240,/* 64 Byte Tx Frame */
1708 GM_TXF_127B = GM_MIB_CNT_BASE + 248,/* 65-127 Byte Tx Frame */
1709 GM_TXF_255B = GM_MIB_CNT_BASE + 256,/* 128-255 Byte Tx Frame */
1710 GM_TXF_511B = GM_MIB_CNT_BASE + 264,/* 256-511 Byte Tx Frame */
1711 GM_TXF_1023B = GM_MIB_CNT_BASE + 272,/* 512-1023 Byte Tx Frame */
1712 GM_TXF_1518B = GM_MIB_CNT_BASE + 280,/* 1024-1518 Byte Tx Frame */
1713 GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288,/* 1519-MaxSize Byte Tx Frame */
1714
1715 GM_TXF_COL = GM_MIB_CNT_BASE + 304,/* Tx Collision */
1716 GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312,/* Tx Late Collision */
1717 GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320,/* Tx aborted due to Exces. Col. */
1718 GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328,/* Tx Multiple Collision */
1719 GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336,/* Tx Single Collision */
1720 GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344,/* Tx FIFO Underrun Event */
1721};
1722
1723/* GMAC Bit Definitions */
1724/* GM_GP_STAT 16 bit r/o General Purpose Status Register */
1725enum {
1726 GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */
1727 GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */
1728 GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */
1729 GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */
1730 GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */
1731 GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */
1732 GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occurred */
1733 GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occurred */
1734
1735 GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */
1736 GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */
1737 GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */
1738 GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */
1739 GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */
1740};
1741
1742/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */
1743enum {
1744 GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */
1745 GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */
1746 GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */
1747 GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */
1748 GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */
1749 GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */
1750 GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */
1751 GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */
1752 GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */
1753 GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */
1754 GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */
1755 GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */
1756 GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */
1757 GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */
1758 GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */
1759};
1760
1761#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
1762
1763/* GM_TX_CTRL 16 bit r/w Transmit Control Register */
1764enum {
1765 GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */
1766 GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */
1767 GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */
1768 GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */
1769};
1770
1771#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK)
1772#define TX_COL_DEF 0x04
1773
1774/* GM_RX_CTRL 16 bit r/w Receive Control Register */
1775enum {
1776 GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */
1777 GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */
1778 GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */
1779 GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */
1780};
1781
1782/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */
1783enum {
1784 GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */
1785 GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */
1786 GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */
1787 GM_TXPA_BO_LIM_MSK = 0x0f, /* Bit 3.. 0: Backoff Limit Mask */
1788
1789 TX_JAM_LEN_DEF = 0x03,
1790 TX_JAM_IPG_DEF = 0x0b,
1791 TX_IPG_JAM_DEF = 0x1c,
1792 TX_BOF_LIM_DEF = 0x04,
1793};
1794
1795#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK)
1796#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK)
1797#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK)
1798#define TX_BACK_OFF_LIM(x) ((x) & GM_TXPA_BO_LIM_MSK)
1799
1800
1801/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
1802enum {
1803 GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */
1804 GM_SMOD_LIMIT_4 = 1<<10, /* 4 consecutive Tx trials */
1805 GM_SMOD_VLAN_ENA = 1<<9, /* Enable VLAN (Max. Frame Len) */
1806 GM_SMOD_JUMBO_ENA = 1<<8, /* Enable Jumbo (Max. Frame Len) */
1807
1808 GM_NEW_FLOW_CTRL = 1<<6, /* Enable New Flow-Control */
1809
1810 GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
1811};
1812
1813#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
1814#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK)
1815
1816#define DATA_BLIND_DEF 0x04
1817#define IPG_DATA_DEF_1000 0x1e
1818#define IPG_DATA_DEF_10_100 0x18
1819
1820/* GM_SMI_CTRL 16 bit r/w SMI Control Register */
1821enum {
1822 GM_SMI_CT_PHY_A_MSK = 0x1f<<11,/* Bit 15..11: PHY Device Address */
1823 GM_SMI_CT_REG_A_MSK = 0x1f<<6,/* Bit 10.. 6: PHY Register Address */
1824 GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/
1825 GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */
1826 GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */
1827};
1828
1829#define GM_SMI_CT_PHY_AD(x) (((u16)(x)<<11) & GM_SMI_CT_PHY_A_MSK)
1830#define GM_SMI_CT_REG_AD(x) (((u16)(x)<<6) & GM_SMI_CT_REG_A_MSK)
1831
1832/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */
1833enum {
1834 GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */
1835 GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */
1836};
1837
1838/* Receive Frame Status Encoding */
1839enum {
1840 GMR_FS_LEN = 0x7fff<<16, /* Bit 30..16: Rx Frame Length */
1841 GMR_FS_VLAN = 1<<13, /* VLAN Packet */
1842 GMR_FS_JABBER = 1<<12, /* Jabber Packet */
1843 GMR_FS_UN_SIZE = 1<<11, /* Undersize Packet */
1844 GMR_FS_MC = 1<<10, /* Multicast Packet */
1845 GMR_FS_BC = 1<<9, /* Broadcast Packet */
1846 GMR_FS_RX_OK = 1<<8, /* Receive OK (Good Packet) */
1847 GMR_FS_GOOD_FC = 1<<7, /* Good Flow-Control Packet */
1848 GMR_FS_BAD_FC = 1<<6, /* Bad Flow-Control Packet */
1849 GMR_FS_MII_ERR = 1<<5, /* MII Error */
1850 GMR_FS_LONG_ERR = 1<<4, /* Too Long Packet */
1851 GMR_FS_FRAGMENT = 1<<3, /* Fragment */
1852
1853 GMR_FS_CRC_ERR = 1<<1, /* CRC Error */
1854 GMR_FS_RX_FF_OV = 1<<0, /* Rx FIFO Overflow */
1855
1856 GMR_FS_ANY_ERR = GMR_FS_RX_FF_OV | GMR_FS_CRC_ERR |
1857 GMR_FS_FRAGMENT | GMR_FS_LONG_ERR |
1858 GMR_FS_MII_ERR | GMR_FS_BAD_FC |
1859 GMR_FS_UN_SIZE | GMR_FS_JABBER,
1860};
1861
1862/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
1863enum {
1864 RX_GCLKMAC_ENA = 1<<31, /* RX MAC Clock Gating Enable */
1865 RX_GCLKMAC_OFF = 1<<30,
1866
1867 RX_STFW_DIS = 1<<29, /* RX Store and Forward Enable */
1868 RX_STFW_ENA = 1<<28,
1869
1870 RX_TRUNC_ON = 1<<27, /* enable packet truncation */
1871 RX_TRUNC_OFF = 1<<26, /* disable packet truncation */
1872 RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */
1873 RX_VLAN_STRIP_OFF = 1<<24, /* disable VLAN stripping */
1874
1875 RX_MACSEC_FLUSH_ON = 1<<23,
1876 RX_MACSEC_FLUSH_OFF = 1<<22,
1877 RX_MACSEC_ASF_FLUSH_ON = 1<<21,
1878 RX_MACSEC_ASF_FLUSH_OFF = 1<<20,
1879
1880 GMF_RX_OVER_ON = 1<<19, /* enable flushing on receive overrun */
1881 GMF_RX_OVER_OFF = 1<<18, /* disable flushing on receive overrun */
1882 GMF_ASF_RX_OVER_ON = 1<<17, /* enable flushing of ASF when overrun */
1883 GMF_ASF_RX_OVER_OFF = 1<<16, /* disable flushing of ASF when overrun */
1884
1885 GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */
1886 GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */
1887 GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */
1888
1889 GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */
1890 GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */
1891 GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */
1892 GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */
1893 GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */
1894 GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */
1895 GMF_CLI_RX_C = 1<<4, /* Clear IRQ Rx Frame Complete */
1896
1897 GMF_OPER_ON = 1<<3, /* Operational Mode On */
1898 GMF_OPER_OFF = 1<<2, /* Operational Mode Off */
1899 GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */
1900 GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */
1901
1902 RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */
1903
1904 GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON,
1905};
1906
1907/* RX_GMF_FL_CTRL 16 bit Rx GMAC FIFO Flush Control (Yukon-Supreme) */
1908enum {
1909 RX_IPV6_SA_MOB_ENA = 1<<9, /* IPv6 SA Mobility Support Enable */
1910 RX_IPV6_SA_MOB_DIS = 1<<8, /* IPv6 SA Mobility Support Disable */
1911 RX_IPV6_DA_MOB_ENA = 1<<7, /* IPv6 DA Mobility Support Enable */
1912 RX_IPV6_DA_MOB_DIS = 1<<6, /* IPv6 DA Mobility Support Disable */
1913 RX_PTR_SYNCDLY_ENA = 1<<5, /* Pointers Delay Synch Enable */
1914 RX_PTR_SYNCDLY_DIS = 1<<4, /* Pointers Delay Synch Disable */
1915 RX_ASF_NEWFLAG_ENA = 1<<3, /* RX ASF Flag New Logic Enable */
1916 RX_ASF_NEWFLAG_DIS = 1<<2, /* RX ASF Flag New Logic Disable */
1917 RX_FLSH_MISSPKT_ENA = 1<<1, /* RX Flush Miss-Packet Enable */
1918 RX_FLSH_MISSPKT_DIS = 1<<0, /* RX Flush Miss-Packet Disable */
1919};
1920
1921/* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */
1922enum {
1923 TX_DYN_WM_ENA = 3, /* Yukon-FE+ specific */
1924};
1925
1926/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
1927enum {
1928 TX_STFW_DIS = 1<<31,/* Disable Store & Forward */
1929 TX_STFW_ENA = 1<<30,/* Enable Store & Forward */
1930
1931 TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */
1932 TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */
1933
1934 TX_PCI_JUM_ENA = 1<<23,/* PCI Jumbo Mode enable */
1935 TX_PCI_JUM_DIS = 1<<22,/* PCI Jumbo Mode enable */
1936
1937 GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */
1938 GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
1939 GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */
1940
1941 GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */
1942 GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */
1943 GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */
1944};
1945
1946/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */
1947enum {
1948 GMT_ST_START = 1<<2, /* Start Time Stamp Timer */
1949 GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */
1950 GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */
1951};
1952
1953/* B28_Y2_ASF_STAT_CMD 32 bit ASF Status and Command Reg */
1954enum {
1955 Y2_ASF_OS_PRES = 1<<4, /* ASF operation system present */
1956 Y2_ASF_RESET = 1<<3, /* ASF system in reset state */
1957 Y2_ASF_RUNNING = 1<<2, /* ASF system operational */
1958 Y2_ASF_CLR_HSTI = 1<<1, /* Clear ASF IRQ */
1959 Y2_ASF_IRQ = 1<<0, /* Issue an IRQ to ASF system */
1960
1961 Y2_ASF_UC_STATE = 3<<2, /* ASF uC State */
1962 Y2_ASF_CLK_HALT = 0, /* ASF system clock stopped */
1963};
1964
1965/* B28_Y2_ASF_HOST_COM 32 bit ASF Host Communication Reg */
1966enum {
1967 Y2_ASF_CLR_ASFI = 1<<1, /* Clear host IRQ */
1968 Y2_ASF_HOST_IRQ = 1<<0, /* Issue an IRQ to HOST system */
1969};
1970/* HCU_CCSR CPU Control and Status Register */
1971enum {
1972 HCU_CCSR_SMBALERT_MONITOR= 1<<27, /* SMBALERT pin monitor */
1973 HCU_CCSR_CPU_SLEEP = 1<<26, /* CPU sleep status */
1974 /* Clock Stretching Timeout */
1975 HCU_CCSR_CS_TO = 1<<25,
1976 HCU_CCSR_WDOG = 1<<24, /* Watchdog Reset */
1977
1978 HCU_CCSR_CLR_IRQ_HOST = 1<<17, /* Clear IRQ_HOST */
1979 HCU_CCSR_SET_IRQ_HCU = 1<<16, /* Set IRQ_HCU */
1980
1981 HCU_CCSR_AHB_RST = 1<<9, /* Reset AHB bridge */
1982 HCU_CCSR_CPU_RST_MODE = 1<<8, /* CPU Reset Mode */
1983
1984 HCU_CCSR_SET_SYNC_CPU = 1<<5,
1985 HCU_CCSR_CPU_CLK_DIVIDE_MSK = 3<<3,/* CPU Clock Divide */
1986 HCU_CCSR_CPU_CLK_DIVIDE_BASE= 1<<3,
1987 HCU_CCSR_OS_PRSNT = 1<<2, /* ASF OS Present */
1988/* Microcontroller State */
1989 HCU_CCSR_UC_STATE_MSK = 3,
1990 HCU_CCSR_UC_STATE_BASE = 1<<0,
1991 HCU_CCSR_ASF_RESET = 0,
1992 HCU_CCSR_ASF_HALTED = 1<<1,
1993 HCU_CCSR_ASF_RUNNING = 1<<0,
1994};
1995
1996/* HCU_HCSR Host Control and Status Register */
1997enum {
1998 HCU_HCSR_SET_IRQ_CPU = 1<<16, /* Set IRQ_CPU */
1999
2000 HCU_HCSR_CLR_IRQ_HCU = 1<<1, /* Clear IRQ_HCU */
2001 HCU_HCSR_SET_IRQ_HOST = 1<<0, /* Set IRQ_HOST */
2002};
2003
2004/* STAT_CTRL 32 bit Status BMU control register (Yukon-2 only) */
2005enum {
2006 SC_STAT_CLR_IRQ = 1<<4, /* Status Burst IRQ clear */
2007 SC_STAT_OP_ON = 1<<3, /* Operational Mode On */
2008 SC_STAT_OP_OFF = 1<<2, /* Operational Mode Off */
2009 SC_STAT_RST_CLR = 1<<1, /* Clear Status Unit Reset (Enable) */
2010 SC_STAT_RST_SET = 1<<0, /* Set Status Unit Reset */
2011};
2012
2013/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */
2014enum {
2015 GMC_SET_RST = 1<<15,/* MAC SEC RST */
2016 GMC_SEC_RST_OFF = 1<<14,/* MAC SEC RSt OFF */
2017 GMC_BYP_MACSECRX_ON = 1<<13,/* Bypass macsec RX */
2018 GMC_BYP_MACSECRX_OFF= 1<<12,/* Bypass macsec RX off */
2019 GMC_BYP_MACSECTX_ON = 1<<11,/* Bypass macsec TX */
2020 GMC_BYP_MACSECTX_OFF= 1<<10,/* Bypass macsec TX off*/
2021 GMC_BYP_RETR_ON = 1<<9, /* Bypass retransmit FIFO On */
2022 GMC_BYP_RETR_OFF= 1<<8, /* Bypass retransmit FIFO Off */
2023
2024 GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */
2025 GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */
2026 GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */
2027 GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */
2028 GMC_PAUSE_ON = 1<<3, /* Pause On */
2029 GMC_PAUSE_OFF = 1<<2, /* Pause Off */
2030 GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */
2031 GMC_RST_SET = 1<<0, /* Set GMAC Reset */
2032};
2033
2034/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */
2035enum {
2036 GPC_TX_PAUSE = 1<<30, /* Tx pause enabled (ro) */
2037 GPC_RX_PAUSE = 1<<29, /* Rx pause enabled (ro) */
2038 GPC_SPEED = 3<<27, /* PHY speed (ro) */
2039 GPC_LINK = 1<<26, /* Link up (ro) */
2040 GPC_DUPLEX = 1<<25, /* Duplex (ro) */
2041 GPC_CLOCK = 1<<24, /* 125Mhz clock stable (ro) */
2042
2043 GPC_PDOWN = 1<<23, /* Internal regulator 2.5 power down */
2044 GPC_TSTMODE = 1<<22, /* Test mode */
2045 GPC_REG18 = 1<<21, /* Reg18 Power down */
2046 GPC_REG12SEL = 3<<19, /* Reg12 power setting */
2047 GPC_REG18SEL = 3<<17, /* Reg18 power setting */
2048 GPC_SPILOCK = 1<<16, /* SPI lock (ASF) */
2049
2050 GPC_LEDMUX = 3<<14, /* LED Mux */
2051 GPC_INTPOL = 1<<13, /* Interrupt polarity */
2052 GPC_DETECT = 1<<12, /* Energy detect */
2053 GPC_1000HD = 1<<11, /* Enable 1000Mbit HD */
2054 GPC_SLAVE = 1<<10, /* Slave mode */
2055 GPC_PAUSE = 1<<9, /* Pause enable */
2056 GPC_LEDCTL = 3<<6, /* GPHY Leds */
2057
2058 GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */
2059 GPC_RST_SET = 1<<0, /* Set GPHY Reset */
2060};
2061
2062/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */
2063/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */
2064enum {
2065 GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */
2066 GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */
2067 GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */
2068 GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */
2069 GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
2070 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
2071
2072#define GMAC_DEF_MSK GM_IS_TX_FF_UR
2073};
2074
2075/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
2076enum { /* Bits 15.. 2: reserved */
2077 GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */
2078 GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */
2079};
2080
2081
2082/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */
2083enum {
2084 WOL_CTL_LINK_CHG_OCC = 1<<15,
2085 WOL_CTL_MAGIC_PKT_OCC = 1<<14,
2086 WOL_CTL_PATTERN_OCC = 1<<13,
2087 WOL_CTL_CLEAR_RESULT = 1<<12,
2088 WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11,
2089 WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10,
2090 WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9,
2091 WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8,
2092 WOL_CTL_ENA_PME_ON_PATTERN = 1<<7,
2093 WOL_CTL_DIS_PME_ON_PATTERN = 1<<6,
2094 WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5,
2095 WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4,
2096 WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3,
2097 WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2,
2098 WOL_CTL_ENA_PATTERN_UNIT = 1<<1,
2099 WOL_CTL_DIS_PATTERN_UNIT = 1<<0,
2100};
2101
2102
2103/* Control flags */
2104enum {
2105 UDPTCP = 1<<0,
2106 CALSUM = 1<<1,
2107 WR_SUM = 1<<2,
2108 INIT_SUM= 1<<3,
2109 LOCK_SUM= 1<<4,
2110 INS_VLAN= 1<<5,
2111 EOP = 1<<7,
2112};
2113
2114enum {
2115 HW_OWNER = 1<<7,
2116 OP_TCPWRITE = 0x11,
2117 OP_TCPSTART = 0x12,
2118 OP_TCPINIT = 0x14,
2119 OP_TCPLCK = 0x18,
2120 OP_TCPCHKSUM = OP_TCPSTART,
2121 OP_TCPIS = OP_TCPINIT | OP_TCPSTART,
2122 OP_TCPLW = OP_TCPLCK | OP_TCPWRITE,
2123 OP_TCPLSW = OP_TCPLCK | OP_TCPSTART | OP_TCPWRITE,
2124 OP_TCPLISW = OP_TCPLCK | OP_TCPINIT | OP_TCPSTART | OP_TCPWRITE,
2125
2126 OP_ADDR64 = 0x21,
2127 OP_VLAN = 0x22,
2128 OP_ADDR64VLAN = OP_ADDR64 | OP_VLAN,
2129 OP_LRGLEN = 0x24,
2130 OP_LRGLENVLAN = OP_LRGLEN | OP_VLAN,
2131 OP_MSS = 0x28,
2132 OP_MSSVLAN = OP_MSS | OP_VLAN,
2133
2134 OP_BUFFER = 0x40,
2135 OP_PACKET = 0x41,
2136 OP_LARGESEND = 0x43,
2137 OP_LSOV2 = 0x45,
2138
2139/* YUKON-2 STATUS opcodes defines */
2140 OP_RXSTAT = 0x60,
2141 OP_RXTIMESTAMP = 0x61,
2142 OP_RXVLAN = 0x62,
2143 OP_RXCHKS = 0x64,
2144 OP_RXCHKSVLAN = OP_RXCHKS | OP_RXVLAN,
2145 OP_RXTIMEVLAN = OP_RXTIMESTAMP | OP_RXVLAN,
2146 OP_RSS_HASH = 0x65,
2147 OP_TXINDEXLE = 0x68,
2148 OP_MACSEC = 0x6c,
2149 OP_PUTIDX = 0x70,
2150};
2151
2152enum status_css {
2153 CSS_TCPUDPCSOK = 1<<7, /* TCP / UDP checksum is ok */
2154 CSS_ISUDP = 1<<6, /* packet is a UDP packet */
2155 CSS_ISTCP = 1<<5, /* packet is a TCP packet */
2156 CSS_ISIPFRAG = 1<<4, /* packet is a TCP/UDP frag, CS calc not done */
2157 CSS_ISIPV6 = 1<<3, /* packet is a IPv6 packet */
2158 CSS_IPV4CSUMOK = 1<<2, /* IP v4: TCP header checksum is ok */
2159 CSS_ISIPV4 = 1<<1, /* packet is a IPv4 packet */
2160 CSS_LINK_BIT = 1<<0, /* port number (legacy) */
2161};
2162
2163/* Yukon 2 hardware interface */
2164struct sky2_tx_le {
2165 __le32 addr;
2166 __le16 length; /* also vlan tag or checksum start */
2167 u8 ctrl;
2168 u8 opcode;
2169} __packed;
2170
2171struct sky2_rx_le {
2172 __le32 addr;
2173 __le16 length;
2174 u8 ctrl;
2175 u8 opcode;
2176} __packed;
2177
2178struct sky2_status_le {
2179 __le32 status; /* also checksum */
2180 __le16 length; /* also vlan tag */
2181 u8 css;
2182 u8 opcode;
2183} __packed;
2184
2185struct tx_ring_info {
2186 struct sk_buff *skb;
2187 unsigned long flags;
2188#define TX_MAP_SINGLE 0x0001
2189#define TX_MAP_PAGE 0x0002
2190 DEFINE_DMA_UNMAP_ADDR(mapaddr);
2191 DEFINE_DMA_UNMAP_LEN(maplen);
2192};
2193
2194struct rx_ring_info {
2195 struct sk_buff *skb;
2196 dma_addr_t data_addr;
2197 DEFINE_DMA_UNMAP_LEN(data_size);
2198 dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
2199};
2200
2201enum flow_control {
2202 FC_NONE = 0,
2203 FC_TX = 1,
2204 FC_RX = 2,
2205 FC_BOTH = 3,
2206};
2207
2208struct sky2_stats {
2209 struct u64_stats_sync syncp;
2210 u64 packets;
2211 u64 bytes;
2212};
2213
2214struct sky2_port {
2215 struct sky2_hw *hw;
2216 struct net_device *netdev;
2217 unsigned port;
2218 u32 msg_enable;
2219 spinlock_t phy_lock;
2220
2221 struct tx_ring_info *tx_ring;
2222 struct sky2_tx_le *tx_le;
2223 struct sky2_stats tx_stats;
2224
2225 u16 tx_ring_size;
2226 u16 tx_cons; /* next le to check */
2227 u16 tx_prod; /* next le to use */
2228 u16 tx_next; /* debug only */
2229
2230 u16 tx_pending;
2231 u16 tx_last_mss;
2232 u32 tx_last_upper;
2233 u32 tx_tcpsum;
2234
2235 struct rx_ring_info *rx_ring ____cacheline_aligned_in_smp;
2236 struct sky2_rx_le *rx_le;
2237 struct sky2_stats rx_stats;
2238
2239 u16 rx_next; /* next re to check */
2240 u16 rx_put; /* next le index to use */
2241 u16 rx_pending;
2242 u16 rx_data_size;
2243 u16 rx_nfrags;
2244 u16 rx_tag;
2245
2246 struct {
2247 unsigned long last;
2248 u32 mac_rp;
2249 u8 mac_lev;
2250 u8 fifo_rp;
2251 u8 fifo_lev;
2252 } check;
2253
2254 dma_addr_t rx_le_map;
2255 dma_addr_t tx_le_map;
2256
2257 u16 advertising; /* ADVERTISED_ bits */
2258 u16 speed; /* SPEED_1000, SPEED_100, ... */
2259 u8 wol; /* WAKE_ bits */
2260 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
2261 u16 flags;
2262#define SKY2_FLAG_AUTO_SPEED 0x0002
2263#define SKY2_FLAG_AUTO_PAUSE 0x0004
2264
2265 enum flow_control flow_mode;
2266 enum flow_control flow_status;
2267
2268#ifdef CONFIG_SKY2_DEBUG
2269 struct dentry *debugfs;
2270#endif
2271};
2272
2273struct sky2_hw {
2274 void __iomem *regs;
2275 struct pci_dev *pdev;
2276 struct napi_struct napi;
2277 struct net_device *dev[2];
2278 unsigned long flags;
2279#define SKY2_HW_USE_MSI 0x00000001
2280#define SKY2_HW_FIBRE_PHY 0x00000002
2281#define SKY2_HW_GIGABIT 0x00000004
2282#define SKY2_HW_NEWER_PHY 0x00000008
2283#define SKY2_HW_RAM_BUFFER 0x00000010
2284#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
2285#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
2286#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
2287#define SKY2_HW_RSS_BROKEN 0x00000100
2288#define SKY2_HW_VLAN_BROKEN 0x00000200
2289#define SKY2_HW_RSS_CHKSUM 0x00000400 /* RSS requires chksum */
2290
2291 u8 chip_id;
2292 u8 chip_rev;
2293 u8 pmd_type;
2294 u8 ports;
2295
2296 struct sky2_status_le *st_le;
2297 u32 st_size;
2298 u32 st_idx;
2299 dma_addr_t st_dma;
2300
2301 struct timer_list watchdog_timer;
2302 struct work_struct restart_work;
2303 wait_queue_head_t msi_wait;
2304
2305 char irq_name[0];
2306};
2307
2308static inline int sky2_is_copper(const struct sky2_hw *hw)
2309{
2310 return !(hw->flags & SKY2_HW_FIBRE_PHY);
2311}
2312
2313/* Register accessor for memory mapped device */
2314static inline u32 sky2_read32(const struct sky2_hw *hw, unsigned reg)
2315{
2316 return readl(hw->regs + reg);
2317}
2318
2319static inline u16 sky2_read16(const struct sky2_hw *hw, unsigned reg)
2320{
2321 return readw(hw->regs + reg);
2322}
2323
2324static inline u8 sky2_read8(const struct sky2_hw *hw, unsigned reg)
2325{
2326 return readb(hw->regs + reg);
2327}
2328
2329static inline void sky2_write32(const struct sky2_hw *hw, unsigned reg, u32 val)
2330{
2331 writel(val, hw->regs + reg);
2332}
2333
2334static inline void sky2_write16(const struct sky2_hw *hw, unsigned reg, u16 val)
2335{
2336 writew(val, hw->regs + reg);
2337}
2338
2339static inline void sky2_write8(const struct sky2_hw *hw, unsigned reg, u8 val)
2340{
2341 writeb(val, hw->regs + reg);
2342}
2343
2344/* Yukon PHY related registers */
2345#define SK_GMAC_REG(port,reg) \
2346 (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg))
2347#define GM_PHY_RETRIES 100
2348
2349static inline u16 gma_read16(const struct sky2_hw *hw, unsigned port, unsigned reg)
2350{
2351 return sky2_read16(hw, SK_GMAC_REG(port,reg));
2352}
2353
2354static inline u32 gma_read32(struct sky2_hw *hw, unsigned port, unsigned reg)
2355{
2356 unsigned base = SK_GMAC_REG(port, reg);
2357 return (u32) sky2_read16(hw, base)
2358 | (u32) sky2_read16(hw, base+4) << 16;
2359}
2360
2361static inline u64 gma_read64(struct sky2_hw *hw, unsigned port, unsigned reg)
2362{
2363 unsigned base = SK_GMAC_REG(port, reg);
2364
2365 return (u64) sky2_read16(hw, base)
2366 | (u64) sky2_read16(hw, base+4) << 16
2367 | (u64) sky2_read16(hw, base+8) << 32
2368 | (u64) sky2_read16(hw, base+12) << 48;
2369}
2370
2371/* There is no way to atomically read32 bit values from PHY, so retry */
2372static inline u32 get_stats32(struct sky2_hw *hw, unsigned port, unsigned reg)
2373{
2374 u32 val;
2375
2376 do {
2377 val = gma_read32(hw, port, reg);
2378 } while (gma_read32(hw, port, reg) != val);
2379
2380 return val;
2381}
2382
2383static inline u64 get_stats64(struct sky2_hw *hw, unsigned port, unsigned reg)
2384{
2385 u64 val;
2386
2387 do {
2388 val = gma_read64(hw, port, reg);
2389 } while (gma_read64(hw, port, reg) != val);
2390
2391 return val;
2392}
2393
2394static inline void gma_write16(const struct sky2_hw *hw, unsigned port, int r, u16 v)
2395{
2396 sky2_write16(hw, SK_GMAC_REG(port,r), v);
2397}
2398
2399static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg,
2400 const u8 *addr)
2401{
2402 gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8));
2403 gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
2404 gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
2405}
2406
2407/* PCI config space access */
2408static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg)
2409{
2410 return sky2_read32(hw, Y2_CFG_SPC + reg);
2411}
2412
2413static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg)
2414{
2415 return sky2_read16(hw, Y2_CFG_SPC + reg);
2416}
2417
2418static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val)
2419{
2420 sky2_write32(hw, Y2_CFG_SPC + reg, val);
2421}
2422
2423static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val)
2424{
2425 sky2_write16(hw, Y2_CFG_SPC + reg, val);
2426}
2427#endif