aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2007-10-23 20:15:54 -0400
committerJeff Garzik <jeff@garzik.org>2007-10-23 20:15:54 -0400
commit02bae2129710018883f9536969de7e6acf9304ca (patch)
treec2f3b120b4c48288e2735fa7136895da3ff8615a /drivers
parentc7ffb6bb7a1b6fe5912a009d561733911769d32b (diff)
parentc1f395f1c76b115c9691e1546942651fedb08c37 (diff)
Merge branch 'features' of git://farnsworth.org/dale/linux-2.6-mv643xx_eth into upstream
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/Kconfig13
-rw-r--r--drivers/net/mv643xx_eth.c806
-rw-r--r--drivers/net/mv643xx_eth.h370
3 files changed, 677 insertions, 512 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2538816817aa..86b8641b4664 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2371,13 +2371,16 @@ config UGETH_TX_ON_DEMAND
2371 depends on UCC_GETH 2371 depends on UCC_GETH
2372 2372
2373config MV643XX_ETH 2373config MV643XX_ETH
2374 tristate "MV-643XX Ethernet support" 2374 tristate "Marvell Discovery (643XX) and Orion ethernet support"
2375 depends on MV64360 || MV64X60 || (PPC_MULTIPLATFORM && PPC32) 2375 depends on MV64360 || MV64X60 || (PPC_MULTIPLATFORM && PPC32) || ARCH_ORION
2376 select MII 2376 select MII
2377 help 2377 help
2378 This driver supports the gigabit Ethernet on the Marvell MV643XX 2378 This driver supports the gigabit ethernet MACs in the
2379 chipset which is used in the Momenco Ocelot C and Jaguar ATX and 2379 Marvell Discovery PPC/MIPS chipset family (MV643XX) and
2380 Pegasos II, amongst other PPC and MIPS boards. 2380 in the Marvell Orion ARM SoC family.
2381
2382 Some boards that use the Discovery chipset are the Momenco
2383 Ocelot C and Jaguar ATX and Pegasos II.
2381 2384
2382config QLA3XXX 2385config QLA3XXX
2383 tristate "QLogic QLA3XXX Network Driver Support" 2386 tristate "QLogic QLA3XXX Network Driver Support"
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 54e48281c3d4..651c2699d5e1 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/mv643xx_eth.c - Driver for MV643XX ethernet ports 2 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> 3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
4 * 4 *
5 * Based on the 64360 driver from: 5 * Based on the 64360 driver from:
@@ -43,14 +43,567 @@
43#include <linux/ethtool.h> 43#include <linux/ethtool.h>
44#include <linux/platform_device.h> 44#include <linux/platform_device.h>
45 45
46#include <linux/module.h>
47#include <linux/kernel.h>
48#include <linux/spinlock.h>
49#include <linux/workqueue.h>
50#include <linux/mii.h>
51
52#include <linux/mv643xx_eth.h>
53
46#include <asm/io.h> 54#include <asm/io.h>
47#include <asm/types.h> 55#include <asm/types.h>
48#include <asm/pgtable.h> 56#include <asm/pgtable.h>
49#include <asm/system.h> 57#include <asm/system.h>
50#include <asm/delay.h> 58#include <asm/delay.h>
51#include "mv643xx_eth.h" 59#include <asm/dma-mapping.h>
60
61#define MV643XX_CHECKSUM_OFFLOAD_TX
62#define MV643XX_NAPI
63#define MV643XX_TX_FAST_REFILL
64#undef MV643XX_COAL
65
66/*
67 * Number of RX / TX descriptors on RX / TX rings.
68 * Note that allocating RX descriptors is done by allocating the RX
69 * ring AND a preallocated RX buffers (skb's) for each descriptor.
70 * The TX descriptors only allocates the TX descriptors ring,
71 * with no pre allocated TX buffers (skb's are allocated by higher layers.
72 */
73
74/* Default TX ring size is 1000 descriptors */
75#define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000
76
77/* Default RX ring size is 400 descriptors */
78#define MV643XX_DEFAULT_RX_QUEUE_SIZE 400
79
80#define MV643XX_TX_COAL 100
81#ifdef MV643XX_COAL
82#define MV643XX_RX_COAL 100
83#endif
84
85#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
86#define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
87#else
88#define MAX_DESCS_PER_SKB 1
89#endif
90
91#define ETH_VLAN_HLEN 4
92#define ETH_FCS_LEN 4
93#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
94#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
95 ETH_VLAN_HLEN + ETH_FCS_LEN)
96#define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + \
97 dma_get_cache_alignment())
98
99/*
100 * Registers shared between all ports.
101 */
102#define PHY_ADDR_REG 0x0000
103#define SMI_REG 0x0004
104
105/*
106 * Per-port registers.
107 */
108#define PORT_CONFIG_REG(p) (0x0400 + ((p) << 10))
109#define PORT_CONFIG_EXTEND_REG(p) (0x0404 + ((p) << 10))
110#define MAC_ADDR_LOW(p) (0x0414 + ((p) << 10))
111#define MAC_ADDR_HIGH(p) (0x0418 + ((p) << 10))
112#define SDMA_CONFIG_REG(p) (0x041c + ((p) << 10))
113#define PORT_SERIAL_CONTROL_REG(p) (0x043c + ((p) << 10))
114#define PORT_STATUS_REG(p) (0x0444 + ((p) << 10))
115#define TRANSMIT_QUEUE_COMMAND_REG(p) (0x0448 + ((p) << 10))
116#define MAXIMUM_TRANSMIT_UNIT(p) (0x0458 + ((p) << 10))
117#define INTERRUPT_CAUSE_REG(p) (0x0460 + ((p) << 10))
118#define INTERRUPT_CAUSE_EXTEND_REG(p) (0x0464 + ((p) << 10))
119#define INTERRUPT_MASK_REG(p) (0x0468 + ((p) << 10))
120#define INTERRUPT_EXTEND_MASK_REG(p) (0x046c + ((p) << 10))
121#define TX_FIFO_URGENT_THRESHOLD_REG(p) (0x0474 + ((p) << 10))
122#define RX_CURRENT_QUEUE_DESC_PTR_0(p) (0x060c + ((p) << 10))
123#define RECEIVE_QUEUE_COMMAND_REG(p) (0x0680 + ((p) << 10))
124#define TX_CURRENT_QUEUE_DESC_PTR_0(p) (0x06c0 + ((p) << 10))
125#define MIB_COUNTERS_BASE(p) (0x1000 + ((p) << 7))
126#define DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(p) (0x1400 + ((p) << 10))
127#define DA_FILTER_OTHER_MULTICAST_TABLE_BASE(p) (0x1500 + ((p) << 10))
128#define DA_FILTER_UNICAST_TABLE_BASE(p) (0x1600 + ((p) << 10))
129
130/* These macros describe Ethernet Port configuration reg (Px_cR) bits */
131#define UNICAST_NORMAL_MODE (0 << 0)
132#define UNICAST_PROMISCUOUS_MODE (1 << 0)
133#define DEFAULT_RX_QUEUE(queue) ((queue) << 1)
134#define DEFAULT_RX_ARP_QUEUE(queue) ((queue) << 4)
135#define RECEIVE_BC_IF_NOT_IP_OR_ARP (0 << 7)
136#define REJECT_BC_IF_NOT_IP_OR_ARP (1 << 7)
137#define RECEIVE_BC_IF_IP (0 << 8)
138#define REJECT_BC_IF_IP (1 << 8)
139#define RECEIVE_BC_IF_ARP (0 << 9)
140#define REJECT_BC_IF_ARP (1 << 9)
141#define TX_AM_NO_UPDATE_ERROR_SUMMARY (1 << 12)
142#define CAPTURE_TCP_FRAMES_DIS (0 << 14)
143#define CAPTURE_TCP_FRAMES_EN (1 << 14)
144#define CAPTURE_UDP_FRAMES_DIS (0 << 15)
145#define CAPTURE_UDP_FRAMES_EN (1 << 15)
146#define DEFAULT_RX_TCP_QUEUE(queue) ((queue) << 16)
147#define DEFAULT_RX_UDP_QUEUE(queue) ((queue) << 19)
148#define DEFAULT_RX_BPDU_QUEUE(queue) ((queue) << 22)
149
150#define PORT_CONFIG_DEFAULT_VALUE \
151 UNICAST_NORMAL_MODE | \
152 DEFAULT_RX_QUEUE(0) | \
153 DEFAULT_RX_ARP_QUEUE(0) | \
154 RECEIVE_BC_IF_NOT_IP_OR_ARP | \
155 RECEIVE_BC_IF_IP | \
156 RECEIVE_BC_IF_ARP | \
157 CAPTURE_TCP_FRAMES_DIS | \
158 CAPTURE_UDP_FRAMES_DIS | \
159 DEFAULT_RX_TCP_QUEUE(0) | \
160 DEFAULT_RX_UDP_QUEUE(0) | \
161 DEFAULT_RX_BPDU_QUEUE(0)
162
163/* These macros describe Ethernet Port configuration extend reg (Px_cXR) bits*/
164#define CLASSIFY_EN (1 << 0)
165#define SPAN_BPDU_PACKETS_AS_NORMAL (0 << 1)
166#define SPAN_BPDU_PACKETS_TO_RX_QUEUE_7 (1 << 1)
167#define PARTITION_DISABLE (0 << 2)
168#define PARTITION_ENABLE (1 << 2)
169
170#define PORT_CONFIG_EXTEND_DEFAULT_VALUE \
171 SPAN_BPDU_PACKETS_AS_NORMAL | \
172 PARTITION_DISABLE
173
174/* These macros describe Ethernet Port Sdma configuration reg (SDCR) bits */
175#define RIFB (1 << 0)
176#define RX_BURST_SIZE_1_64BIT (0 << 1)
177#define RX_BURST_SIZE_2_64BIT (1 << 1)
178#define RX_BURST_SIZE_4_64BIT (2 << 1)
179#define RX_BURST_SIZE_8_64BIT (3 << 1)
180#define RX_BURST_SIZE_16_64BIT (4 << 1)
181#define BLM_RX_NO_SWAP (1 << 4)
182#define BLM_RX_BYTE_SWAP (0 << 4)
183#define BLM_TX_NO_SWAP (1 << 5)
184#define BLM_TX_BYTE_SWAP (0 << 5)
185#define DESCRIPTORS_BYTE_SWAP (1 << 6)
186#define DESCRIPTORS_NO_SWAP (0 << 6)
187#define IPG_INT_RX(value) (((value) & 0x3fff) << 8)
188#define TX_BURST_SIZE_1_64BIT (0 << 22)
189#define TX_BURST_SIZE_2_64BIT (1 << 22)
190#define TX_BURST_SIZE_4_64BIT (2 << 22)
191#define TX_BURST_SIZE_8_64BIT (3 << 22)
192#define TX_BURST_SIZE_16_64BIT (4 << 22)
193
194#if defined(__BIG_ENDIAN)
195#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
196 RX_BURST_SIZE_4_64BIT | \
197 IPG_INT_RX(0) | \
198 TX_BURST_SIZE_4_64BIT
199#elif defined(__LITTLE_ENDIAN)
200#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
201 RX_BURST_SIZE_4_64BIT | \
202 BLM_RX_NO_SWAP | \
203 BLM_TX_NO_SWAP | \
204 IPG_INT_RX(0) | \
205 TX_BURST_SIZE_4_64BIT
206#else
207#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
208#endif
209
210/* These macros describe Ethernet Port serial control reg (PSCR) bits */
211#define SERIAL_PORT_DISABLE (0 << 0)
212#define SERIAL_PORT_ENABLE (1 << 0)
213#define DO_NOT_FORCE_LINK_PASS (0 << 1)
214#define FORCE_LINK_PASS (1 << 1)
215#define ENABLE_AUTO_NEG_FOR_DUPLX (0 << 2)
216#define DISABLE_AUTO_NEG_FOR_DUPLX (1 << 2)
217#define ENABLE_AUTO_NEG_FOR_FLOW_CTRL (0 << 3)
218#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3)
219#define ADV_NO_FLOW_CTRL (0 << 4)
220#define ADV_SYMMETRIC_FLOW_CTRL (1 << 4)
221#define FORCE_FC_MODE_NO_PAUSE_DIS_TX (0 << 5)
222#define FORCE_FC_MODE_TX_PAUSE_DIS (1 << 5)
223#define FORCE_BP_MODE_NO_JAM (0 << 7)
224#define FORCE_BP_MODE_JAM_TX (1 << 7)
225#define FORCE_BP_MODE_JAM_TX_ON_RX_ERR (2 << 7)
226#define SERIAL_PORT_CONTROL_RESERVED (1 << 9)
227#define FORCE_LINK_FAIL (0 << 10)
228#define DO_NOT_FORCE_LINK_FAIL (1 << 10)
229#define RETRANSMIT_16_ATTEMPTS (0 << 11)
230#define RETRANSMIT_FOREVER (1 << 11)
231#define ENABLE_AUTO_NEG_SPEED_GMII (0 << 13)
232#define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13)
233#define DTE_ADV_0 (0 << 14)
234#define DTE_ADV_1 (1 << 14)
235#define DISABLE_AUTO_NEG_BYPASS (0 << 15)
236#define ENABLE_AUTO_NEG_BYPASS (1 << 15)
237#define AUTO_NEG_NO_CHANGE (0 << 16)
238#define RESTART_AUTO_NEG (1 << 16)
239#define MAX_RX_PACKET_1518BYTE (0 << 17)
240#define MAX_RX_PACKET_1522BYTE (1 << 17)
241#define MAX_RX_PACKET_1552BYTE (2 << 17)
242#define MAX_RX_PACKET_9022BYTE (3 << 17)
243#define MAX_RX_PACKET_9192BYTE (4 << 17)
244#define MAX_RX_PACKET_9700BYTE (5 << 17)
245#define MAX_RX_PACKET_MASK (7 << 17)
246#define CLR_EXT_LOOPBACK (0 << 20)
247#define SET_EXT_LOOPBACK (1 << 20)
248#define SET_HALF_DUPLEX_MODE (0 << 21)
249#define SET_FULL_DUPLEX_MODE (1 << 21)
250#define DISABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX (0 << 22)
251#define ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX (1 << 22)
252#define SET_GMII_SPEED_TO_10_100 (0 << 23)
253#define SET_GMII_SPEED_TO_1000 (1 << 23)
254#define SET_MII_SPEED_TO_10 (0 << 24)
255#define SET_MII_SPEED_TO_100 (1 << 24)
256
257#define PORT_SERIAL_CONTROL_DEFAULT_VALUE \
258 DO_NOT_FORCE_LINK_PASS | \
259 ENABLE_AUTO_NEG_FOR_DUPLX | \
260 DISABLE_AUTO_NEG_FOR_FLOW_CTRL | \
261 ADV_SYMMETRIC_FLOW_CTRL | \
262 FORCE_FC_MODE_NO_PAUSE_DIS_TX | \
263 FORCE_BP_MODE_NO_JAM | \
264 (1 << 9) /* reserved */ | \
265 DO_NOT_FORCE_LINK_FAIL | \
266 RETRANSMIT_16_ATTEMPTS | \
267 ENABLE_AUTO_NEG_SPEED_GMII | \
268 DTE_ADV_0 | \
269 DISABLE_AUTO_NEG_BYPASS | \
270 AUTO_NEG_NO_CHANGE | \
271 MAX_RX_PACKET_9700BYTE | \
272 CLR_EXT_LOOPBACK | \
273 SET_FULL_DUPLEX_MODE | \
274 ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX
275
276/* These macros describe Ethernet Serial Status reg (PSR) bits */
277#define PORT_STATUS_MODE_10_BIT (1 << 0)
278#define PORT_STATUS_LINK_UP (1 << 1)
279#define PORT_STATUS_FULL_DUPLEX (1 << 2)
280#define PORT_STATUS_FLOW_CONTROL (1 << 3)
281#define PORT_STATUS_GMII_1000 (1 << 4)
282#define PORT_STATUS_MII_100 (1 << 5)
283/* PSR bit 6 is undocumented */
284#define PORT_STATUS_TX_IN_PROGRESS (1 << 7)
285#define PORT_STATUS_AUTONEG_BYPASSED (1 << 8)
286#define PORT_STATUS_PARTITION (1 << 9)
287#define PORT_STATUS_TX_FIFO_EMPTY (1 << 10)
288/* PSR bits 11-31 are reserved */
289
290#define PORT_DEFAULT_TRANSMIT_QUEUE_SIZE 800
291#define PORT_DEFAULT_RECEIVE_QUEUE_SIZE 400
292
293#define DESC_SIZE 64
294
295#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
296#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
297
298#define ETH_INT_CAUSE_RX_DONE (ETH_RX_QUEUES_ENABLED << 2)
299#define ETH_INT_CAUSE_RX_ERROR (ETH_RX_QUEUES_ENABLED << 9)
300#define ETH_INT_CAUSE_RX (ETH_INT_CAUSE_RX_DONE | ETH_INT_CAUSE_RX_ERROR)
301#define ETH_INT_CAUSE_EXT 0x00000002
302#define ETH_INT_UNMASK_ALL (ETH_INT_CAUSE_RX | ETH_INT_CAUSE_EXT)
303
304#define ETH_INT_CAUSE_TX_DONE (ETH_TX_QUEUES_ENABLED << 0)
305#define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8)
306#define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR)
307#define ETH_INT_CAUSE_PHY 0x00010000
308#define ETH_INT_CAUSE_STATE 0x00100000
309#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \
310 ETH_INT_CAUSE_STATE)
311
312#define ETH_INT_MASK_ALL 0x00000000
313#define ETH_INT_MASK_ALL_EXT 0x00000000
314
315#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
316#define PHY_WAIT_MICRO_SECONDS 10
317
318/* Buffer offset from buffer pointer */
319#define RX_BUF_OFFSET 0x2
320
321/* Gigabit Ethernet Unit Global Registers */
322
323/* MIB Counters register definitions */
324#define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0
325#define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4
326#define ETH_MIB_BAD_OCTETS_RECEIVED 0x8
327#define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc
328#define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10
329#define ETH_MIB_BAD_FRAMES_RECEIVED 0x14
330#define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18
331#define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c
332#define ETH_MIB_FRAMES_64_OCTETS 0x20
333#define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24
334#define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28
335#define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c
336#define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30
337#define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
338#define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38
339#define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c
340#define ETH_MIB_GOOD_FRAMES_SENT 0x40
341#define ETH_MIB_EXCESSIVE_COLLISION 0x44
342#define ETH_MIB_MULTICAST_FRAMES_SENT 0x48
343#define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c
344#define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50
345#define ETH_MIB_FC_SENT 0x54
346#define ETH_MIB_GOOD_FC_RECEIVED 0x58
347#define ETH_MIB_BAD_FC_RECEIVED 0x5c
348#define ETH_MIB_UNDERSIZE_RECEIVED 0x60
349#define ETH_MIB_FRAGMENTS_RECEIVED 0x64
350#define ETH_MIB_OVERSIZE_RECEIVED 0x68
351#define ETH_MIB_JABBER_RECEIVED 0x6c
352#define ETH_MIB_MAC_RECEIVE_ERROR 0x70
353#define ETH_MIB_BAD_CRC_EVENT 0x74
354#define ETH_MIB_COLLISION 0x78
355#define ETH_MIB_LATE_COLLISION 0x7c
356
357/* Port serial status reg (PSR) */
358#define ETH_INTERFACE_PCM 0x00000001
359#define ETH_LINK_IS_UP 0x00000002
360#define ETH_PORT_AT_FULL_DUPLEX 0x00000004
361#define ETH_RX_FLOW_CTRL_ENABLED 0x00000008
362#define ETH_GMII_SPEED_1000 0x00000010
363#define ETH_MII_SPEED_100 0x00000020
364#define ETH_TX_IN_PROGRESS 0x00000080
365#define ETH_BYPASS_ACTIVE 0x00000100
366#define ETH_PORT_AT_PARTITION_STATE 0x00000200
367#define ETH_PORT_TX_FIFO_EMPTY 0x00000400
368
369/* SMI reg */
370#define ETH_SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */
371#define ETH_SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */
372#define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read */
373#define ETH_SMI_OPCODE_READ 0x04000000 /* Operation is in progress */
374
375/* Interrupt Cause Register Bit Definitions */
376
377/* SDMA command status fields macros */
378
379/* Tx & Rx descriptors status */
380#define ETH_ERROR_SUMMARY 0x00000001
381
382/* Tx & Rx descriptors command */
383#define ETH_BUFFER_OWNED_BY_DMA 0x80000000
384
385/* Tx descriptors status */
386#define ETH_LC_ERROR 0
387#define ETH_UR_ERROR 0x00000002
388#define ETH_RL_ERROR 0x00000004
389#define ETH_LLC_SNAP_FORMAT 0x00000200
390
391/* Rx descriptors status */
392#define ETH_OVERRUN_ERROR 0x00000002
393#define ETH_MAX_FRAME_LENGTH_ERROR 0x00000004
394#define ETH_RESOURCE_ERROR 0x00000006
395#define ETH_VLAN_TAGGED 0x00080000
396#define ETH_BPDU_FRAME 0x00100000
397#define ETH_UDP_FRAME_OVER_IP_V_4 0x00200000
398#define ETH_OTHER_FRAME_TYPE 0x00400000
399#define ETH_LAYER_2_IS_ETH_V_2 0x00800000
400#define ETH_FRAME_TYPE_IP_V_4 0x01000000
401#define ETH_FRAME_HEADER_OK 0x02000000
402#define ETH_RX_LAST_DESC 0x04000000
403#define ETH_RX_FIRST_DESC 0x08000000
404#define ETH_UNKNOWN_DESTINATION_ADDR 0x10000000
405#define ETH_RX_ENABLE_INTERRUPT 0x20000000
406#define ETH_LAYER_4_CHECKSUM_OK 0x40000000
407
408/* Rx descriptors byte count */
409#define ETH_FRAME_FRAGMENTED 0x00000004
410
411/* Tx descriptors command */
412#define ETH_LAYER_4_CHECKSUM_FIRST_DESC 0x00000400
413#define ETH_FRAME_SET_TO_VLAN 0x00008000
414#define ETH_UDP_FRAME 0x00010000
415#define ETH_GEN_TCP_UDP_CHECKSUM 0x00020000
416#define ETH_GEN_IP_V_4_CHECKSUM 0x00040000
417#define ETH_ZERO_PADDING 0x00080000
418#define ETH_TX_LAST_DESC 0x00100000
419#define ETH_TX_FIRST_DESC 0x00200000
420#define ETH_GEN_CRC 0x00400000
421#define ETH_TX_ENABLE_INTERRUPT 0x00800000
422#define ETH_AUTO_MODE 0x40000000
423
424#define ETH_TX_IHL_SHIFT 11
425
426/* typedefs */
427
428typedef enum _eth_func_ret_status {
429 ETH_OK, /* Returned as expected. */
430 ETH_ERROR, /* Fundamental error. */
431 ETH_RETRY, /* Could not process request. Try later.*/
432 ETH_END_OF_JOB, /* Ring has nothing to process. */
433 ETH_QUEUE_FULL, /* Ring resource error. */
434 ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */
435} ETH_FUNC_RET_STATUS;
436
437typedef enum _eth_target {
438 ETH_TARGET_DRAM,
439 ETH_TARGET_DEVICE,
440 ETH_TARGET_CBS,
441 ETH_TARGET_PCI0,
442 ETH_TARGET_PCI1
443} ETH_TARGET;
444
445/* These are for big-endian machines. Little endian needs different
446 * definitions.
447 */
448#if defined(__BIG_ENDIAN)
449struct eth_rx_desc {
450 u16 byte_cnt; /* Descriptor buffer byte count */
451 u16 buf_size; /* Buffer size */
452 u32 cmd_sts; /* Descriptor command status */
453 u32 next_desc_ptr; /* Next descriptor pointer */
454 u32 buf_ptr; /* Descriptor buffer pointer */
455};
456
457struct eth_tx_desc {
458 u16 byte_cnt; /* buffer byte count */
459 u16 l4i_chk; /* CPU provided TCP checksum */
460 u32 cmd_sts; /* Command/status field */
461 u32 next_desc_ptr; /* Pointer to next descriptor */
462 u32 buf_ptr; /* pointer to buffer for this descriptor*/
463};
464#elif defined(__LITTLE_ENDIAN)
465struct eth_rx_desc {
466 u32 cmd_sts; /* Descriptor command status */
467 u16 buf_size; /* Buffer size */
468 u16 byte_cnt; /* Descriptor buffer byte count */
469 u32 buf_ptr; /* Descriptor buffer pointer */
470 u32 next_desc_ptr; /* Next descriptor pointer */
471};
472
473struct eth_tx_desc {
474 u32 cmd_sts; /* Command/status field */
475 u16 l4i_chk; /* CPU provided TCP checksum */
476 u16 byte_cnt; /* buffer byte count */
477 u32 buf_ptr; /* pointer to buffer for this descriptor*/
478 u32 next_desc_ptr; /* Pointer to next descriptor */
479};
480#else
481#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
482#endif
483
484/* Unified struct for Rx and Tx operations. The user is not required to */
485/* be familier with neither Tx nor Rx descriptors. */
486struct pkt_info {
487 unsigned short byte_cnt; /* Descriptor buffer byte count */
488 unsigned short l4i_chk; /* Tx CPU provided TCP Checksum */
489 unsigned int cmd_sts; /* Descriptor command status */
490 dma_addr_t buf_ptr; /* Descriptor buffer pointer */
491 struct sk_buff *return_info; /* User resource return information */
492};
493
494/* Ethernet port specific information */
495struct mv643xx_mib_counters {
496 u64 good_octets_received;
497 u32 bad_octets_received;
498 u32 internal_mac_transmit_err;
499 u32 good_frames_received;
500 u32 bad_frames_received;
501 u32 broadcast_frames_received;
502 u32 multicast_frames_received;
503 u32 frames_64_octets;
504 u32 frames_65_to_127_octets;
505 u32 frames_128_to_255_octets;
506 u32 frames_256_to_511_octets;
507 u32 frames_512_to_1023_octets;
508 u32 frames_1024_to_max_octets;
509 u64 good_octets_sent;
510 u32 good_frames_sent;
511 u32 excessive_collision;
512 u32 multicast_frames_sent;
513 u32 broadcast_frames_sent;
514 u32 unrec_mac_control_received;
515 u32 fc_sent;
516 u32 good_fc_received;
517 u32 bad_fc_received;
518 u32 undersize_received;
519 u32 fragments_received;
520 u32 oversize_received;
521 u32 jabber_received;
522 u32 mac_receive_error;
523 u32 bad_crc_event;
524 u32 collision;
525 u32 late_collision;
526};
527
528struct mv643xx_private {
529 int port_num; /* User Ethernet port number */
530
531 u32 rx_sram_addr; /* Base address of rx sram area */
532 u32 rx_sram_size; /* Size of rx sram area */
533 u32 tx_sram_addr; /* Base address of tx sram area */
534 u32 tx_sram_size; /* Size of tx sram area */
535
536 int rx_resource_err; /* Rx ring resource error flag */
537
538 /* Tx/Rx rings managment indexes fields. For driver use */
539
540 /* Next available and first returning Rx resource */
541 int rx_curr_desc_q, rx_used_desc_q;
542
543 /* Next available and first returning Tx resource */
544 int tx_curr_desc_q, tx_used_desc_q;
545
546#ifdef MV643XX_TX_FAST_REFILL
547 u32 tx_clean_threshold;
548#endif
549
550 struct eth_rx_desc *p_rx_desc_area;
551 dma_addr_t rx_desc_dma;
552 int rx_desc_area_size;
553 struct sk_buff **rx_skb;
554
555 struct eth_tx_desc *p_tx_desc_area;
556 dma_addr_t tx_desc_dma;
557 int tx_desc_area_size;
558 struct sk_buff **tx_skb;
559
560 struct work_struct tx_timeout_task;
561
562 struct net_device *dev;
563 struct napi_struct napi;
564 struct net_device_stats stats;
565 struct mv643xx_mib_counters mib_counters;
566 spinlock_t lock;
567 /* Size of Tx Ring per queue */
568 int tx_ring_size;
569 /* Number of tx descriptors in use */
570 int tx_desc_count;
571 /* Size of Rx Ring per queue */
572 int rx_ring_size;
573 /* Number of rx descriptors in use */
574 int rx_desc_count;
575
576 /*
577 * Used in case RX Ring is empty, which can be caused when
578 * system does not have resources (skb's)
579 */
580 struct timer_list timeout;
581
582 u32 rx_int_coal;
583 u32 tx_int_coal;
584 struct mii_if_info mii;
585};
52 586
53/* Static function declarations */ 587/* Static function declarations */
588static void eth_port_init(struct mv643xx_private *mp);
589static void eth_port_reset(unsigned int eth_port_num);
590static void eth_port_start(struct net_device *dev);
591
592static void ethernet_phy_reset(unsigned int eth_port_num);
593
594static void eth_port_write_smi_reg(unsigned int eth_port_num,
595 unsigned int phy_reg, unsigned int value);
596
597static void eth_port_read_smi_reg(unsigned int eth_port_num,
598 unsigned int phy_reg, unsigned int *value);
599
600static void eth_clear_mib_counters(unsigned int eth_port_num);
601
602static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
603 struct pkt_info *p_pkt_info);
604static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
605 struct pkt_info *p_pkt_info);
606
54static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr); 607static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr);
55static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr); 608static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr);
56static void eth_port_set_multicast_list(struct net_device *); 609static void eth_port_set_multicast_list(struct net_device *);
@@ -78,26 +631,19 @@ static const struct ethtool_ops mv643xx_ethtool_ops;
78static char mv643xx_driver_name[] = "mv643xx_eth"; 631static char mv643xx_driver_name[] = "mv643xx_eth";
79static char mv643xx_driver_version[] = "1.0"; 632static char mv643xx_driver_version[] = "1.0";
80 633
81static void __iomem *mv643xx_eth_shared_base; 634static void __iomem *mv643xx_eth_base;
82 635
83/* used to protect MV643XX_ETH_SMI_REG, which is shared across ports */ 636/* used to protect SMI_REG, which is shared across ports */
84static DEFINE_SPINLOCK(mv643xx_eth_phy_lock); 637static DEFINE_SPINLOCK(mv643xx_eth_phy_lock);
85 638
86static inline u32 mv_read(int offset) 639static inline u32 mv_read(int offset)
87{ 640{
88 void __iomem *reg_base; 641 return readl(mv643xx_eth_base + offset);
89
90 reg_base = mv643xx_eth_shared_base - MV643XX_ETH_SHARED_REGS;
91
92 return readl(reg_base + offset);
93} 642}
94 643
95static inline void mv_write(int offset, u32 data) 644static inline void mv_write(int offset, u32 data)
96{ 645{
97 void __iomem *reg_base; 646 writel(data, mv643xx_eth_base + offset);
98
99 reg_base = mv643xx_eth_shared_base - MV643XX_ETH_SHARED_REGS;
100 writel(data, reg_base + offset);
101} 647}
102 648
103/* 649/*
@@ -221,12 +767,12 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev)
221 struct mv643xx_private *mp = netdev_priv(dev); 767 struct mv643xx_private *mp = netdev_priv(dev);
222 u32 config_reg; 768 u32 config_reg;
223 769
224 config_reg = mv_read(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num)); 770 config_reg = mv_read(PORT_CONFIG_REG(mp->port_num));
225 if (dev->flags & IFF_PROMISC) 771 if (dev->flags & IFF_PROMISC)
226 config_reg |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; 772 config_reg |= (u32) UNICAST_PROMISCUOUS_MODE;
227 else 773 else
228 config_reg &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; 774 config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE;
229 mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), config_reg); 775 mv_write(PORT_CONFIG_REG(mp->port_num), config_reg);
230 776
231 eth_port_set_multicast_list(dev); 777 eth_port_set_multicast_list(dev);
232} 778}
@@ -462,41 +1008,37 @@ static void mv643xx_eth_update_pscr(struct net_device *dev,
462 u32 o_pscr, n_pscr; 1008 u32 o_pscr, n_pscr;
463 unsigned int queues; 1009 unsigned int queues;
464 1010
465 o_pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); 1011 o_pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num));
466 n_pscr = o_pscr; 1012 n_pscr = o_pscr;
467 1013
468 /* clear speed, duplex and rx buffer size fields */ 1014 /* clear speed, duplex and rx buffer size fields */
469 n_pscr &= ~(MV643XX_ETH_SET_MII_SPEED_TO_100 | 1015 n_pscr &= ~(SET_MII_SPEED_TO_100 |
470 MV643XX_ETH_SET_GMII_SPEED_TO_1000 | 1016 SET_GMII_SPEED_TO_1000 |
471 MV643XX_ETH_SET_FULL_DUPLEX_MODE | 1017 SET_FULL_DUPLEX_MODE |
472 MV643XX_ETH_MAX_RX_PACKET_MASK); 1018 MAX_RX_PACKET_MASK);
473 1019
474 if (ecmd->duplex == DUPLEX_FULL) 1020 if (ecmd->duplex == DUPLEX_FULL)
475 n_pscr |= MV643XX_ETH_SET_FULL_DUPLEX_MODE; 1021 n_pscr |= SET_FULL_DUPLEX_MODE;
476 1022
477 if (ecmd->speed == SPEED_1000) 1023 if (ecmd->speed == SPEED_1000)
478 n_pscr |= MV643XX_ETH_SET_GMII_SPEED_TO_1000 | 1024 n_pscr |= SET_GMII_SPEED_TO_1000 |
479 MV643XX_ETH_MAX_RX_PACKET_9700BYTE; 1025 MAX_RX_PACKET_9700BYTE;
480 else { 1026 else {
481 if (ecmd->speed == SPEED_100) 1027 if (ecmd->speed == SPEED_100)
482 n_pscr |= MV643XX_ETH_SET_MII_SPEED_TO_100; 1028 n_pscr |= SET_MII_SPEED_TO_100;
483 n_pscr |= MV643XX_ETH_MAX_RX_PACKET_1522BYTE; 1029 n_pscr |= MAX_RX_PACKET_1522BYTE;
484 } 1030 }
485 1031
486 if (n_pscr != o_pscr) { 1032 if (n_pscr != o_pscr) {
487 if ((o_pscr & MV643XX_ETH_SERIAL_PORT_ENABLE) == 0) 1033 if ((o_pscr & SERIAL_PORT_ENABLE) == 0)
488 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), 1034 mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
489 n_pscr);
490 else { 1035 else {
491 queues = mv643xx_eth_port_disable_tx(port_num); 1036 queues = mv643xx_eth_port_disable_tx(port_num);
492 1037
493 o_pscr &= ~MV643XX_ETH_SERIAL_PORT_ENABLE; 1038 o_pscr &= ~SERIAL_PORT_ENABLE;
494 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), 1039 mv_write(PORT_SERIAL_CONTROL_REG(port_num), o_pscr);
495 o_pscr); 1040 mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
496 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), 1041 mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
497 n_pscr);
498 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num),
499 n_pscr);
500 if (queues) 1042 if (queues)
501 mv643xx_eth_port_enable_tx(port_num, queues); 1043 mv643xx_eth_port_enable_tx(port_num, queues);
502 } 1044 }
@@ -522,13 +1064,13 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
522 unsigned int port_num = mp->port_num; 1064 unsigned int port_num = mp->port_num;
523 1065
524 /* Read interrupt cause registers */ 1066 /* Read interrupt cause registers */
525 eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & 1067 eth_int_cause = mv_read(INTERRUPT_CAUSE_REG(port_num)) &
526 ETH_INT_UNMASK_ALL; 1068 ETH_INT_UNMASK_ALL;
527 if (eth_int_cause & ETH_INT_CAUSE_EXT) { 1069 if (eth_int_cause & ETH_INT_CAUSE_EXT) {
528 eth_int_cause_ext = mv_read( 1070 eth_int_cause_ext = mv_read(
529 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & 1071 INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
530 ETH_INT_UNMASK_ALL_EXT; 1072 ETH_INT_UNMASK_ALL_EXT;
531 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 1073 mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num),
532 ~eth_int_cause_ext); 1074 ~eth_int_cause_ext);
533 } 1075 }
534 1076
@@ -556,10 +1098,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
556#ifdef MV643XX_NAPI 1098#ifdef MV643XX_NAPI
557 if (eth_int_cause & ETH_INT_CAUSE_RX) { 1099 if (eth_int_cause & ETH_INT_CAUSE_RX) {
558 /* schedule the NAPI poll routine to maintain port */ 1100 /* schedule the NAPI poll routine to maintain port */
559 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 1101 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
560 ETH_INT_MASK_ALL); 1102
561 /* wait for previous write to complete */ 1103 /* wait for previous write to complete */
562 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 1104 mv_read(INTERRUPT_MASK_REG(port_num));
563 1105
564 netif_rx_schedule(dev, &mp->napi); 1106 netif_rx_schedule(dev, &mp->napi);
565 } 1107 }
@@ -611,9 +1153,9 @@ static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num,
611 unsigned int coal = ((t_clk / 1000000) * delay) / 64; 1153 unsigned int coal = ((t_clk / 1000000) * delay) / 64;
612 1154
613 /* Set RX Coalescing mechanism */ 1155 /* Set RX Coalescing mechanism */
614 mv_write(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num), 1156 mv_write(SDMA_CONFIG_REG(eth_port_num),
615 ((coal & 0x3fff) << 8) | 1157 ((coal & 0x3fff) << 8) |
616 (mv_read(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num)) 1158 (mv_read(SDMA_CONFIG_REG(eth_port_num))
617 & 0xffc000ff)); 1159 & 0xffc000ff));
618 1160
619 return coal; 1161 return coal;
@@ -649,8 +1191,7 @@ static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num,
649 unsigned int coal; 1191 unsigned int coal;
650 coal = ((t_clk / 1000000) * delay) / 64; 1192 coal = ((t_clk / 1000000) * delay) / 64;
651 /* Set TX Coalescing mechanism */ 1193 /* Set TX Coalescing mechanism */
652 mv_write(MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), 1194 mv_write(TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), coal << 4);
653 coal << 4);
654 return coal; 1195 return coal;
655} 1196}
656 1197
@@ -786,10 +1327,10 @@ static int mv643xx_eth_open(struct net_device *dev)
786 int err; 1327 int err;
787 1328
788 /* Clear any pending ethernet port interrupts */ 1329 /* Clear any pending ethernet port interrupts */
789 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); 1330 mv_write(INTERRUPT_CAUSE_REG(port_num), 0);
790 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 1331 mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
791 /* wait for previous write to complete */ 1332 /* wait for previous write to complete */
792 mv_read (MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)); 1333 mv_read (INTERRUPT_CAUSE_EXTEND_REG(port_num));
793 1334
794 err = request_irq(dev->irq, mv643xx_eth_int_handler, 1335 err = request_irq(dev->irq, mv643xx_eth_int_handler,
795 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); 1336 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
@@ -896,11 +1437,10 @@ static int mv643xx_eth_open(struct net_device *dev)
896 eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); 1437 eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL);
897 1438
898 /* Unmask phy and link status changes interrupts */ 1439 /* Unmask phy and link status changes interrupts */
899 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 1440 mv_write(INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT);
900 ETH_INT_UNMASK_ALL_EXT);
901 1441
902 /* Unmask RX buffer and TX end interrupt */ 1442 /* Unmask RX buffer and TX end interrupt */
903 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); 1443 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
904 1444
905 return 0; 1445 return 0;
906 1446
@@ -980,9 +1520,9 @@ static int mv643xx_eth_stop(struct net_device *dev)
980 unsigned int port_num = mp->port_num; 1520 unsigned int port_num = mp->port_num;
981 1521
982 /* Mask all interrupts on ethernet port */ 1522 /* Mask all interrupts on ethernet port */
983 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); 1523 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
984 /* wait for previous write to complete */ 1524 /* wait for previous write to complete */
985 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 1525 mv_read(INTERRUPT_MASK_REG(port_num));
986 1526
987#ifdef MV643XX_NAPI 1527#ifdef MV643XX_NAPI
988 napi_disable(&mp->napi); 1528 napi_disable(&mp->napi);
@@ -1021,16 +1561,15 @@ static int mv643xx_poll(struct napi_struct *napi, int budget)
1021#endif 1561#endif
1022 1562
1023 work_done = 0; 1563 work_done = 0;
1024 if ((mv_read(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) 1564 if ((mv_read(RX_CURRENT_QUEUE_DESC_PTR_0(port_num)))
1025 != (u32) mp->rx_used_desc_q) 1565 != (u32) mp->rx_used_desc_q)
1026 work_done = mv643xx_eth_receive_queue(dev, budget); 1566 work_done = mv643xx_eth_receive_queue(dev, budget);
1027 1567
1028 if (work_done < budget) { 1568 if (work_done < budget) {
1029 netif_rx_complete(dev, napi); 1569 netif_rx_complete(dev, napi);
1030 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); 1570 mv_write(INTERRUPT_CAUSE_REG(port_num), 0);
1031 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 1571 mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
1032 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 1572 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
1033 ETH_INT_UNMASK_ALL);
1034 } 1573 }
1035 1574
1036 return work_done; 1575 return work_done;
@@ -1233,13 +1772,13 @@ static void mv643xx_netpoll(struct net_device *netdev)
1233 struct mv643xx_private *mp = netdev_priv(netdev); 1772 struct mv643xx_private *mp = netdev_priv(netdev);
1234 int port_num = mp->port_num; 1773 int port_num = mp->port_num;
1235 1774
1236 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); 1775 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
1237 /* wait for previous write to complete */ 1776 /* wait for previous write to complete */
1238 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 1777 mv_read(INTERRUPT_MASK_REG(port_num));
1239 1778
1240 mv643xx_eth_int_handler(netdev->irq, netdev); 1779 mv643xx_eth_int_handler(netdev->irq, netdev);
1241 1780
1242 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); 1781 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
1243} 1782}
1244#endif 1783#endif
1245 1784
@@ -1357,8 +1896,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1357 1896
1358 /* set default config values */ 1897 /* set default config values */
1359 eth_port_uc_addr_get(port_num, dev->dev_addr); 1898 eth_port_uc_addr_get(port_num, dev->dev_addr);
1360 mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; 1899 mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE;
1361 mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; 1900 mp->tx_ring_size = PORT_DEFAULT_TRANSMIT_QUEUE_SIZE;
1362 1901
1363 if (is_valid_ether_addr(pd->mac_addr)) 1902 if (is_valid_ether_addr(pd->mac_addr))
1364 memcpy(dev->dev_addr, pd->mac_addr, 6); 1903 memcpy(dev->dev_addr, pd->mac_addr, 6);
@@ -1470,9 +2009,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
1470 if (res == NULL) 2009 if (res == NULL)
1471 return -ENODEV; 2010 return -ENODEV;
1472 2011
1473 mv643xx_eth_shared_base = ioremap(res->start, 2012 mv643xx_eth_base = ioremap(res->start, res->end - res->start + 1);
1474 MV643XX_ETH_SHARED_REGS_SIZE); 2013 if (mv643xx_eth_base == NULL)
1475 if (mv643xx_eth_shared_base == NULL)
1476 return -ENOMEM; 2014 return -ENOMEM;
1477 2015
1478 return 0; 2016 return 0;
@@ -1481,8 +2019,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
1481 2019
1482static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2020static int mv643xx_eth_shared_remove(struct platform_device *pdev)
1483{ 2021{
1484 iounmap(mv643xx_eth_shared_base); 2022 iounmap(mv643xx_eth_base);
1485 mv643xx_eth_shared_base = NULL; 2023 mv643xx_eth_base = NULL;
1486 2024
1487 return 0; 2025 return 0;
1488} 2026}
@@ -1494,8 +2032,8 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev)
1494 unsigned int port_num = mp->port_num; 2032 unsigned int port_num = mp->port_num;
1495 2033
1496 /* Mask all interrupts on ethernet port */ 2034 /* Mask all interrupts on ethernet port */
1497 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0); 2035 mv_write(INTERRUPT_MASK_REG(port_num), 0);
1498 mv_read (MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 2036 mv_read (INTERRUPT_MASK_REG(port_num));
1499 2037
1500 eth_port_reset(port_num); 2038 eth_port_reset(port_num);
1501} 2039}
@@ -1762,49 +2300,49 @@ static void eth_port_start(struct net_device *dev)
1762 2300
1763 /* Assignment of Tx CTRP of given queue */ 2301 /* Assignment of Tx CTRP of given queue */
1764 tx_curr_desc = mp->tx_curr_desc_q; 2302 tx_curr_desc = mp->tx_curr_desc_q;
1765 mv_write(MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(port_num), 2303 mv_write(TX_CURRENT_QUEUE_DESC_PTR_0(port_num),
1766 (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); 2304 (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc));
1767 2305
1768 /* Assignment of Rx CRDP of given queue */ 2306 /* Assignment of Rx CRDP of given queue */
1769 rx_curr_desc = mp->rx_curr_desc_q; 2307 rx_curr_desc = mp->rx_curr_desc_q;
1770 mv_write(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num), 2308 mv_write(RX_CURRENT_QUEUE_DESC_PTR_0(port_num),
1771 (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); 2309 (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc));
1772 2310
1773 /* Add the assigned Ethernet address to the port's address table */ 2311 /* Add the assigned Ethernet address to the port's address table */
1774 eth_port_uc_addr_set(port_num, dev->dev_addr); 2312 eth_port_uc_addr_set(port_num, dev->dev_addr);
1775 2313
1776 /* Assign port configuration and command. */ 2314 /* Assign port configuration and command. */
1777 mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num), 2315 mv_write(PORT_CONFIG_REG(port_num),
1778 MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE); 2316 PORT_CONFIG_DEFAULT_VALUE);
1779 2317
1780 mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num), 2318 mv_write(PORT_CONFIG_EXTEND_REG(port_num),
1781 MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE); 2319 PORT_CONFIG_EXTEND_DEFAULT_VALUE);
1782 2320
1783 pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); 2321 pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num));
1784 2322
1785 pscr &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | MV643XX_ETH_FORCE_LINK_PASS); 2323 pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
1786 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); 2324 mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr);
1787 2325
1788 pscr |= MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL | 2326 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
1789 MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII | 2327 DISABLE_AUTO_NEG_SPEED_GMII |
1790 MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX | 2328 DISABLE_AUTO_NEG_FOR_DUPLX |
1791 MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | 2329 DO_NOT_FORCE_LINK_FAIL |
1792 MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED; 2330 SERIAL_PORT_CONTROL_RESERVED;
1793 2331
1794 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); 2332 mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr);
1795 2333
1796 pscr |= MV643XX_ETH_SERIAL_PORT_ENABLE; 2334 pscr |= SERIAL_PORT_ENABLE;
1797 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); 2335 mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr);
1798 2336
1799 /* Assign port SDMA configuration */ 2337 /* Assign port SDMA configuration */
1800 mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num), 2338 mv_write(SDMA_CONFIG_REG(port_num),
1801 MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE); 2339 PORT_SDMA_CONFIG_DEFAULT_VALUE);
1802 2340
1803 /* Enable port Rx. */ 2341 /* Enable port Rx. */
1804 mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED); 2342 mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED);
1805 2343
1806 /* Disable port bandwidth limits by clearing MTU register */ 2344 /* Disable port bandwidth limits by clearing MTU register */
1807 mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0); 2345 mv_write(MAXIMUM_TRANSMIT_UNIT(port_num), 0);
1808 2346
1809 /* save phy settings across reset */ 2347 /* save phy settings across reset */
1810 mv643xx_get_settings(dev, &ethtool_cmd); 2348 mv643xx_get_settings(dev, &ethtool_cmd);
@@ -1825,11 +2363,11 @@ static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr)
1825 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | 2363 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
1826 (p_addr[3] << 0); 2364 (p_addr[3] << 0);
1827 2365
1828 mv_write(MV643XX_ETH_MAC_ADDR_LOW(port_num), mac_l); 2366 mv_write(MAC_ADDR_LOW(port_num), mac_l);
1829 mv_write(MV643XX_ETH_MAC_ADDR_HIGH(port_num), mac_h); 2367 mv_write(MAC_ADDR_HIGH(port_num), mac_h);
1830 2368
1831 /* Accept frames with this address */ 2369 /* Accept frames with this address */
1832 table = MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE(port_num); 2370 table = DA_FILTER_UNICAST_TABLE_BASE(port_num);
1833 eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f); 2371 eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f);
1834} 2372}
1835 2373
@@ -1841,8 +2379,8 @@ static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr)
1841 unsigned int mac_h; 2379 unsigned int mac_h;
1842 unsigned int mac_l; 2380 unsigned int mac_l;
1843 2381
1844 mac_h = mv_read(MV643XX_ETH_MAC_ADDR_HIGH(port_num)); 2382 mac_h = mv_read(MAC_ADDR_HIGH(port_num));
1845 mac_l = mv_read(MV643XX_ETH_MAC_ADDR_LOW(port_num)); 2383 mac_l = mv_read(MAC_ADDR_LOW(port_num));
1846 2384
1847 p_addr[0] = (mac_h >> 24) & 0xff; 2385 p_addr[0] = (mac_h >> 24) & 0xff;
1848 p_addr[1] = (mac_h >> 16) & 0xff; 2386 p_addr[1] = (mac_h >> 16) & 0xff;
@@ -1902,7 +2440,7 @@ static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
1902 2440
1903 if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && 2441 if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) &&
1904 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { 2442 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) {
1905 table = MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2443 table = DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
1906 (eth_port_num); 2444 (eth_port_num);
1907 eth_port_set_filter_table_entry(table, p_addr[5]); 2445 eth_port_set_filter_table_entry(table, p_addr[5]);
1908 return; 2446 return;
@@ -1976,7 +2514,7 @@ static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
1976 for (i = 0; i < 8; i++) 2514 for (i = 0; i < 8; i++)
1977 crc_result = crc_result | (crc[i] << i); 2515 crc_result = crc_result | (crc[i] << i);
1978 2516
1979 table = MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num); 2517 table = DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num);
1980 eth_port_set_filter_table_entry(table, crc_result); 2518 eth_port_set_filter_table_entry(table, crc_result);
1981} 2519}
1982 2520
@@ -2006,7 +2544,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
2006 * 3-1 Queue ETH_Q0=0 2544 * 3-1 Queue ETH_Q0=0
2007 * 7-4 Reserved = 0; 2545 * 7-4 Reserved = 0;
2008 */ 2546 */
2009 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); 2547 mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
2010 2548
2011 /* Set all entries in DA filter other multicast 2549 /* Set all entries in DA filter other multicast
2012 * table (Ex_dFOMT) 2550 * table (Ex_dFOMT)
@@ -2016,7 +2554,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
2016 * 3-1 Queue ETH_Q0=0 2554 * 3-1 Queue ETH_Q0=0
2017 * 7-4 Reserved = 0; 2555 * 7-4 Reserved = 0;
2018 */ 2556 */
2019 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); 2557 mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
2020 } 2558 }
2021 return; 2559 return;
2022 } 2560 }
@@ -2026,11 +2564,11 @@ static void eth_port_set_multicast_list(struct net_device *dev)
2026 */ 2564 */
2027 for (table_index = 0; table_index <= 0xFC; table_index += 4) { 2565 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2028 /* Clear DA filter special multicast table (Ex_dFSMT) */ 2566 /* Clear DA filter special multicast table (Ex_dFSMT) */
2029 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2567 mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2030 (eth_port_num) + table_index, 0); 2568 (eth_port_num) + table_index, 0);
2031 2569
2032 /* Clear DA filter other multicast table (Ex_dFOMT) */ 2570 /* Clear DA filter other multicast table (Ex_dFOMT) */
2033 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE 2571 mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2034 (eth_port_num) + table_index, 0); 2572 (eth_port_num) + table_index, 0);
2035 } 2573 }
2036 2574
@@ -2064,15 +2602,15 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num)
2064 2602
2065 /* Clear DA filter unicast table (Ex_dFUT) */ 2603 /* Clear DA filter unicast table (Ex_dFUT) */
2066 for (table_index = 0; table_index <= 0xC; table_index += 4) 2604 for (table_index = 0; table_index <= 0xC; table_index += 4)
2067 mv_write(MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE 2605 mv_write(DA_FILTER_UNICAST_TABLE_BASE
2068 (eth_port_num) + table_index, 0); 2606 (eth_port_num) + table_index, 0);
2069 2607
2070 for (table_index = 0; table_index <= 0xFC; table_index += 4) { 2608 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2071 /* Clear DA filter special multicast table (Ex_dFSMT) */ 2609 /* Clear DA filter special multicast table (Ex_dFSMT) */
2072 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2610 mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2073 (eth_port_num) + table_index, 0); 2611 (eth_port_num) + table_index, 0);
2074 /* Clear DA filter other multicast table (Ex_dFOMT) */ 2612 /* Clear DA filter other multicast table (Ex_dFOMT) */
2075 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE 2613 mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2076 (eth_port_num) + table_index, 0); 2614 (eth_port_num) + table_index, 0);
2077 } 2615 }
2078} 2616}
@@ -2101,12 +2639,12 @@ static void eth_clear_mib_counters(unsigned int eth_port_num)
2101 /* Perform dummy reads from MIB counters */ 2639 /* Perform dummy reads from MIB counters */
2102 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; 2640 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION;
2103 i += 4) 2641 i += 4)
2104 mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(eth_port_num) + i); 2642 mv_read(MIB_COUNTERS_BASE(eth_port_num) + i);
2105} 2643}
2106 2644
2107static inline u32 read_mib(struct mv643xx_private *mp, int offset) 2645static inline u32 read_mib(struct mv643xx_private *mp, int offset)
2108{ 2646{
2109 return mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(mp->port_num) + offset); 2647 return mv_read(MIB_COUNTERS_BASE(mp->port_num) + offset);
2110} 2648}
2111 2649
2112static void eth_update_mib_counters(struct mv643xx_private *mp) 2650static void eth_update_mib_counters(struct mv643xx_private *mp)
@@ -2191,7 +2729,7 @@ static int ethernet_phy_get(unsigned int eth_port_num)
2191{ 2729{
2192 unsigned int reg_data; 2730 unsigned int reg_data;
2193 2731
2194 reg_data = mv_read(MV643XX_ETH_PHY_ADDR_REG); 2732 reg_data = mv_read(PHY_ADDR_REG);
2195 2733
2196 return ((reg_data >> (5 * eth_port_num)) & 0x1f); 2734 return ((reg_data >> (5 * eth_port_num)) & 0x1f);
2197} 2735}
@@ -2218,10 +2756,10 @@ static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr)
2218 u32 reg_data; 2756 u32 reg_data;
2219 int addr_shift = 5 * eth_port_num; 2757 int addr_shift = 5 * eth_port_num;
2220 2758
2221 reg_data = mv_read(MV643XX_ETH_PHY_ADDR_REG); 2759 reg_data = mv_read(PHY_ADDR_REG);
2222 reg_data &= ~(0x1f << addr_shift); 2760 reg_data &= ~(0x1f << addr_shift);
2223 reg_data |= (phy_addr & 0x1f) << addr_shift; 2761 reg_data |= (phy_addr & 0x1f) << addr_shift;
2224 mv_write(MV643XX_ETH_PHY_ADDR_REG, reg_data); 2762 mv_write(PHY_ADDR_REG, reg_data);
2225} 2763}
2226 2764
2227/* 2765/*
@@ -2259,13 +2797,13 @@ static void ethernet_phy_reset(unsigned int eth_port_num)
2259static void mv643xx_eth_port_enable_tx(unsigned int port_num, 2797static void mv643xx_eth_port_enable_tx(unsigned int port_num,
2260 unsigned int queues) 2798 unsigned int queues)
2261{ 2799{
2262 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), queues); 2800 mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), queues);
2263} 2801}
2264 2802
2265static void mv643xx_eth_port_enable_rx(unsigned int port_num, 2803static void mv643xx_eth_port_enable_rx(unsigned int port_num,
2266 unsigned int queues) 2804 unsigned int queues)
2267{ 2805{
2268 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), queues); 2806 mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), queues);
2269} 2807}
2270 2808
2271static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num) 2809static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num)
@@ -2273,21 +2811,18 @@ static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num)
2273 u32 queues; 2811 u32 queues;
2274 2812
2275 /* Stop Tx port activity. Check port Tx activity. */ 2813 /* Stop Tx port activity. Check port Tx activity. */
2276 queues = mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) 2814 queues = mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF;
2277 & 0xFF;
2278 if (queues) { 2815 if (queues) {
2279 /* Issue stop command for active queues only */ 2816 /* Issue stop command for active queues only */
2280 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 2817 mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8));
2281 (queues << 8));
2282 2818
2283 /* Wait for all Tx activity to terminate. */ 2819 /* Wait for all Tx activity to terminate. */
2284 /* Check port cause register that all Tx queues are stopped */ 2820 /* Check port cause register that all Tx queues are stopped */
2285 while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) 2821 while (mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF)
2286 & 0xFF)
2287 udelay(PHY_WAIT_MICRO_SECONDS); 2822 udelay(PHY_WAIT_MICRO_SECONDS);
2288 2823
2289 /* Wait for Tx FIFO to empty */ 2824 /* Wait for Tx FIFO to empty */
2290 while (mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num)) & 2825 while (mv_read(PORT_STATUS_REG(port_num)) &
2291 ETH_PORT_TX_FIFO_EMPTY) 2826 ETH_PORT_TX_FIFO_EMPTY)
2292 udelay(PHY_WAIT_MICRO_SECONDS); 2827 udelay(PHY_WAIT_MICRO_SECONDS);
2293 } 2828 }
@@ -2300,17 +2835,14 @@ static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num)
2300 u32 queues; 2835 u32 queues;
2301 2836
2302 /* Stop Rx port activity. Check port Rx activity. */ 2837 /* Stop Rx port activity. Check port Rx activity. */
2303 queues = mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) 2838 queues = mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF;
2304 & 0xFF;
2305 if (queues) { 2839 if (queues) {
2306 /* Issue stop command for active queues only */ 2840 /* Issue stop command for active queues only */
2307 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 2841 mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8));
2308 (queues << 8));
2309 2842
2310 /* Wait for all Rx activity to terminate. */ 2843 /* Wait for all Rx activity to terminate. */
2311 /* Check port cause register that all Rx queues are stopped */ 2844 /* Check port cause register that all Rx queues are stopped */
2312 while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) 2845 while (mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF)
2313 & 0xFF)
2314 udelay(PHY_WAIT_MICRO_SECONDS); 2846 udelay(PHY_WAIT_MICRO_SECONDS);
2315 } 2847 }
2316 2848
@@ -2346,11 +2878,11 @@ static void eth_port_reset(unsigned int port_num)
2346 eth_clear_mib_counters(port_num); 2878 eth_clear_mib_counters(port_num);
2347 2879
2348 /* Reset the Enable bit in the Configuration Register */ 2880 /* Reset the Enable bit in the Configuration Register */
2349 reg_data = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); 2881 reg_data = mv_read(PORT_SERIAL_CONTROL_REG(port_num));
2350 reg_data &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | 2882 reg_data &= ~(SERIAL_PORT_ENABLE |
2351 MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | 2883 DO_NOT_FORCE_LINK_FAIL |
2352 MV643XX_ETH_FORCE_LINK_PASS); 2884 FORCE_LINK_PASS);
2353 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data); 2885 mv_write(PORT_SERIAL_CONTROL_REG(port_num), reg_data);
2354} 2886}
2355 2887
2356 2888
@@ -2385,7 +2917,7 @@ static void eth_port_read_smi_reg(unsigned int port_num,
2385 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); 2917 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
2386 2918
2387 /* wait for the SMI register to become available */ 2919 /* wait for the SMI register to become available */
2388 for (i = 0; mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_BUSY; i++) { 2920 for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) {
2389 if (i == PHY_WAIT_ITERATIONS) { 2921 if (i == PHY_WAIT_ITERATIONS) {
2390 printk("mv643xx PHY busy timeout, port %d\n", port_num); 2922 printk("mv643xx PHY busy timeout, port %d\n", port_num);
2391 goto out; 2923 goto out;
@@ -2393,11 +2925,11 @@ static void eth_port_read_smi_reg(unsigned int port_num,
2393 udelay(PHY_WAIT_MICRO_SECONDS); 2925 udelay(PHY_WAIT_MICRO_SECONDS);
2394 } 2926 }
2395 2927
2396 mv_write(MV643XX_ETH_SMI_REG, 2928 mv_write(SMI_REG,
2397 (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); 2929 (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ);
2398 2930
2399 /* now wait for the data to be valid */ 2931 /* now wait for the data to be valid */
2400 for (i = 0; !(mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_READ_VALID); i++) { 2932 for (i = 0; !(mv_read(SMI_REG) & ETH_SMI_READ_VALID); i++) {
2401 if (i == PHY_WAIT_ITERATIONS) { 2933 if (i == PHY_WAIT_ITERATIONS) {
2402 printk("mv643xx PHY read timeout, port %d\n", port_num); 2934 printk("mv643xx PHY read timeout, port %d\n", port_num);
2403 goto out; 2935 goto out;
@@ -2405,7 +2937,7 @@ static void eth_port_read_smi_reg(unsigned int port_num,
2405 udelay(PHY_WAIT_MICRO_SECONDS); 2937 udelay(PHY_WAIT_MICRO_SECONDS);
2406 } 2938 }
2407 2939
2408 *value = mv_read(MV643XX_ETH_SMI_REG) & 0xffff; 2940 *value = mv_read(SMI_REG) & 0xffff;
2409out: 2941out:
2410 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); 2942 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
2411} 2943}
@@ -2443,7 +2975,7 @@ static void eth_port_write_smi_reg(unsigned int eth_port_num,
2443 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); 2975 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
2444 2976
2445 /* wait for the SMI register to become available */ 2977 /* wait for the SMI register to become available */
2446 for (i = 0; mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_BUSY; i++) { 2978 for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) {
2447 if (i == PHY_WAIT_ITERATIONS) { 2979 if (i == PHY_WAIT_ITERATIONS) {
2448 printk("mv643xx PHY busy timeout, port %d\n", 2980 printk("mv643xx PHY busy timeout, port %d\n",
2449 eth_port_num); 2981 eth_port_num);
@@ -2452,7 +2984,7 @@ static void eth_port_write_smi_reg(unsigned int eth_port_num,
2452 udelay(PHY_WAIT_MICRO_SECONDS); 2984 udelay(PHY_WAIT_MICRO_SECONDS);
2453 } 2985 }
2454 2986
2455 mv_write(MV643XX_ETH_SMI_REG, (phy_addr << 16) | (phy_reg << 21) | 2987 mv_write(SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
2456 ETH_SMI_OPCODE_WRITE | (value & 0xffff)); 2988 ETH_SMI_OPCODE_WRITE | (value & 0xffff));
2457out: 2989out:
2458 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); 2990 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
deleted file mode 100644
index be669eb23788..000000000000
--- a/drivers/net/mv643xx_eth.h
+++ /dev/null
@@ -1,370 +0,0 @@
1#ifndef __MV643XX_ETH_H__
2#define __MV643XX_ETH_H__
3
4#include <linux/module.h>
5#include <linux/kernel.h>
6#include <linux/spinlock.h>
7#include <linux/workqueue.h>
8#include <linux/mii.h>
9
10#include <linux/mv643xx.h>
11
12#include <asm/dma-mapping.h>
13
14/* Checksum offload for Tx works for most packets, but
15 * fails if previous packet sent did not use hw csum
16 */
17#define MV643XX_CHECKSUM_OFFLOAD_TX
18#define MV643XX_NAPI
19#define MV643XX_TX_FAST_REFILL
20#undef MV643XX_COAL
21
22/*
23 * Number of RX / TX descriptors on RX / TX rings.
24 * Note that allocating RX descriptors is done by allocating the RX
25 * ring AND a preallocated RX buffers (skb's) for each descriptor.
26 * The TX descriptors only allocates the TX descriptors ring,
27 * with no pre allocated TX buffers (skb's are allocated by higher layers.
28 */
29
30/* Default TX ring size is 1000 descriptors */
31#define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000
32
33/* Default RX ring size is 400 descriptors */
34#define MV643XX_DEFAULT_RX_QUEUE_SIZE 400
35
36#define MV643XX_TX_COAL 100
37#ifdef MV643XX_COAL
38#define MV643XX_RX_COAL 100
39#endif
40
41#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
42#define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
43#else
44#define MAX_DESCS_PER_SKB 1
45#endif
46
47#define ETH_VLAN_HLEN 4
48#define ETH_FCS_LEN 4
49#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
50#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
51 ETH_VLAN_HLEN + ETH_FCS_LEN)
52#define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + dma_get_cache_alignment())
53
54#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
55#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
56
57#define ETH_INT_CAUSE_RX_DONE (ETH_RX_QUEUES_ENABLED << 2)
58#define ETH_INT_CAUSE_RX_ERROR (ETH_RX_QUEUES_ENABLED << 9)
59#define ETH_INT_CAUSE_RX (ETH_INT_CAUSE_RX_DONE | ETH_INT_CAUSE_RX_ERROR)
60#define ETH_INT_CAUSE_EXT 0x00000002
61#define ETH_INT_UNMASK_ALL (ETH_INT_CAUSE_RX | ETH_INT_CAUSE_EXT)
62
63#define ETH_INT_CAUSE_TX_DONE (ETH_TX_QUEUES_ENABLED << 0)
64#define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8)
65#define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR)
66#define ETH_INT_CAUSE_PHY 0x00010000
67#define ETH_INT_CAUSE_STATE 0x00100000
68#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \
69 ETH_INT_CAUSE_STATE)
70
71#define ETH_INT_MASK_ALL 0x00000000
72#define ETH_INT_MASK_ALL_EXT 0x00000000
73
74#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
75#define PHY_WAIT_MICRO_SECONDS 10
76
77/* Buffer offset from buffer pointer */
78#define RX_BUF_OFFSET 0x2
79
80/* Gigabit Ethernet Unit Global Registers */
81
82/* MIB Counters register definitions */
83#define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0
84#define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4
85#define ETH_MIB_BAD_OCTETS_RECEIVED 0x8
86#define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc
87#define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10
88#define ETH_MIB_BAD_FRAMES_RECEIVED 0x14
89#define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18
90#define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c
91#define ETH_MIB_FRAMES_64_OCTETS 0x20
92#define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24
93#define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28
94#define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c
95#define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30
96#define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
97#define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38
98#define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c
99#define ETH_MIB_GOOD_FRAMES_SENT 0x40
100#define ETH_MIB_EXCESSIVE_COLLISION 0x44
101#define ETH_MIB_MULTICAST_FRAMES_SENT 0x48
102#define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c
103#define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50
104#define ETH_MIB_FC_SENT 0x54
105#define ETH_MIB_GOOD_FC_RECEIVED 0x58
106#define ETH_MIB_BAD_FC_RECEIVED 0x5c
107#define ETH_MIB_UNDERSIZE_RECEIVED 0x60
108#define ETH_MIB_FRAGMENTS_RECEIVED 0x64
109#define ETH_MIB_OVERSIZE_RECEIVED 0x68
110#define ETH_MIB_JABBER_RECEIVED 0x6c
111#define ETH_MIB_MAC_RECEIVE_ERROR 0x70
112#define ETH_MIB_BAD_CRC_EVENT 0x74
113#define ETH_MIB_COLLISION 0x78
114#define ETH_MIB_LATE_COLLISION 0x7c
115
116/* Port serial status reg (PSR) */
117#define ETH_INTERFACE_PCM 0x00000001
118#define ETH_LINK_IS_UP 0x00000002
119#define ETH_PORT_AT_FULL_DUPLEX 0x00000004
120#define ETH_RX_FLOW_CTRL_ENABLED 0x00000008
121#define ETH_GMII_SPEED_1000 0x00000010
122#define ETH_MII_SPEED_100 0x00000020
123#define ETH_TX_IN_PROGRESS 0x00000080
124#define ETH_BYPASS_ACTIVE 0x00000100
125#define ETH_PORT_AT_PARTITION_STATE 0x00000200
126#define ETH_PORT_TX_FIFO_EMPTY 0x00000400
127
128/* SMI reg */
129#define ETH_SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */
130#define ETH_SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */
131#define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read */
132#define ETH_SMI_OPCODE_READ 0x04000000 /* Operation is in progress */
133
134/* Interrupt Cause Register Bit Definitions */
135
136/* SDMA command status fields macros */
137
138/* Tx & Rx descriptors status */
139#define ETH_ERROR_SUMMARY 0x00000001
140
141/* Tx & Rx descriptors command */
142#define ETH_BUFFER_OWNED_BY_DMA 0x80000000
143
144/* Tx descriptors status */
145#define ETH_LC_ERROR 0
146#define ETH_UR_ERROR 0x00000002
147#define ETH_RL_ERROR 0x00000004
148#define ETH_LLC_SNAP_FORMAT 0x00000200
149
150/* Rx descriptors status */
151#define ETH_OVERRUN_ERROR 0x00000002
152#define ETH_MAX_FRAME_LENGTH_ERROR 0x00000004
153#define ETH_RESOURCE_ERROR 0x00000006
154#define ETH_VLAN_TAGGED 0x00080000
155#define ETH_BPDU_FRAME 0x00100000
156#define ETH_UDP_FRAME_OVER_IP_V_4 0x00200000
157#define ETH_OTHER_FRAME_TYPE 0x00400000
158#define ETH_LAYER_2_IS_ETH_V_2 0x00800000
159#define ETH_FRAME_TYPE_IP_V_4 0x01000000
160#define ETH_FRAME_HEADER_OK 0x02000000
161#define ETH_RX_LAST_DESC 0x04000000
162#define ETH_RX_FIRST_DESC 0x08000000
163#define ETH_UNKNOWN_DESTINATION_ADDR 0x10000000
164#define ETH_RX_ENABLE_INTERRUPT 0x20000000
165#define ETH_LAYER_4_CHECKSUM_OK 0x40000000
166
167/* Rx descriptors byte count */
168#define ETH_FRAME_FRAGMENTED 0x00000004
169
170/* Tx descriptors command */
171#define ETH_LAYER_4_CHECKSUM_FIRST_DESC 0x00000400
172#define ETH_FRAME_SET_TO_VLAN 0x00008000
173#define ETH_UDP_FRAME 0x00010000
174#define ETH_GEN_TCP_UDP_CHECKSUM 0x00020000
175#define ETH_GEN_IP_V_4_CHECKSUM 0x00040000
176#define ETH_ZERO_PADDING 0x00080000
177#define ETH_TX_LAST_DESC 0x00100000
178#define ETH_TX_FIRST_DESC 0x00200000
179#define ETH_GEN_CRC 0x00400000
180#define ETH_TX_ENABLE_INTERRUPT 0x00800000
181#define ETH_AUTO_MODE 0x40000000
182
183#define ETH_TX_IHL_SHIFT 11
184
185/* typedefs */
186
187typedef enum _eth_func_ret_status {
188 ETH_OK, /* Returned as expected. */
189 ETH_ERROR, /* Fundamental error. */
190 ETH_RETRY, /* Could not process request. Try later.*/
191 ETH_END_OF_JOB, /* Ring has nothing to process. */
192 ETH_QUEUE_FULL, /* Ring resource error. */
193 ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */
194} ETH_FUNC_RET_STATUS;
195
196typedef enum _eth_target {
197 ETH_TARGET_DRAM,
198 ETH_TARGET_DEVICE,
199 ETH_TARGET_CBS,
200 ETH_TARGET_PCI0,
201 ETH_TARGET_PCI1
202} ETH_TARGET;
203
204/* These are for big-endian machines. Little endian needs different
205 * definitions.
206 */
207#if defined(__BIG_ENDIAN)
208struct eth_rx_desc {
209 u16 byte_cnt; /* Descriptor buffer byte count */
210 u16 buf_size; /* Buffer size */
211 u32 cmd_sts; /* Descriptor command status */
212 u32 next_desc_ptr; /* Next descriptor pointer */
213 u32 buf_ptr; /* Descriptor buffer pointer */
214};
215
216struct eth_tx_desc {
217 u16 byte_cnt; /* buffer byte count */
218 u16 l4i_chk; /* CPU provided TCP checksum */
219 u32 cmd_sts; /* Command/status field */
220 u32 next_desc_ptr; /* Pointer to next descriptor */
221 u32 buf_ptr; /* pointer to buffer for this descriptor*/
222};
223
224#elif defined(__LITTLE_ENDIAN)
225struct eth_rx_desc {
226 u32 cmd_sts; /* Descriptor command status */
227 u16 buf_size; /* Buffer size */
228 u16 byte_cnt; /* Descriptor buffer byte count */
229 u32 buf_ptr; /* Descriptor buffer pointer */
230 u32 next_desc_ptr; /* Next descriptor pointer */
231};
232
233struct eth_tx_desc {
234 u32 cmd_sts; /* Command/status field */
235 u16 l4i_chk; /* CPU provided TCP checksum */
236 u16 byte_cnt; /* buffer byte count */
237 u32 buf_ptr; /* pointer to buffer for this descriptor*/
238 u32 next_desc_ptr; /* Pointer to next descriptor */
239};
240#else
241#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
242#endif
243
244/* Unified struct for Rx and Tx operations. The user is not required to */
245/* be familier with neither Tx nor Rx descriptors. */
246struct pkt_info {
247 unsigned short byte_cnt; /* Descriptor buffer byte count */
248 unsigned short l4i_chk; /* Tx CPU provided TCP Checksum */
249 unsigned int cmd_sts; /* Descriptor command status */
250 dma_addr_t buf_ptr; /* Descriptor buffer pointer */
251 struct sk_buff *return_info; /* User resource return information */
252};
253
254/* Ethernet port specific information */
255
256struct mv643xx_mib_counters {
257 u64 good_octets_received;
258 u32 bad_octets_received;
259 u32 internal_mac_transmit_err;
260 u32 good_frames_received;
261 u32 bad_frames_received;
262 u32 broadcast_frames_received;
263 u32 multicast_frames_received;
264 u32 frames_64_octets;
265 u32 frames_65_to_127_octets;
266 u32 frames_128_to_255_octets;
267 u32 frames_256_to_511_octets;
268 u32 frames_512_to_1023_octets;
269 u32 frames_1024_to_max_octets;
270 u64 good_octets_sent;
271 u32 good_frames_sent;
272 u32 excessive_collision;
273 u32 multicast_frames_sent;
274 u32 broadcast_frames_sent;
275 u32 unrec_mac_control_received;
276 u32 fc_sent;
277 u32 good_fc_received;
278 u32 bad_fc_received;
279 u32 undersize_received;
280 u32 fragments_received;
281 u32 oversize_received;
282 u32 jabber_received;
283 u32 mac_receive_error;
284 u32 bad_crc_event;
285 u32 collision;
286 u32 late_collision;
287};
288
289struct mv643xx_private {
290 int port_num; /* User Ethernet port number */
291
292 u32 rx_sram_addr; /* Base address of rx sram area */
293 u32 rx_sram_size; /* Size of rx sram area */
294 u32 tx_sram_addr; /* Base address of tx sram area */
295 u32 tx_sram_size; /* Size of tx sram area */
296
297 int rx_resource_err; /* Rx ring resource error flag */
298
299 /* Tx/Rx rings managment indexes fields. For driver use */
300
301 /* Next available and first returning Rx resource */
302 int rx_curr_desc_q, rx_used_desc_q;
303
304 /* Next available and first returning Tx resource */
305 int tx_curr_desc_q, tx_used_desc_q;
306
307#ifdef MV643XX_TX_FAST_REFILL
308 u32 tx_clean_threshold;
309#endif
310
311 struct eth_rx_desc *p_rx_desc_area;
312 dma_addr_t rx_desc_dma;
313 int rx_desc_area_size;
314 struct sk_buff **rx_skb;
315
316 struct eth_tx_desc *p_tx_desc_area;
317 dma_addr_t tx_desc_dma;
318 int tx_desc_area_size;
319 struct sk_buff **tx_skb;
320
321 struct work_struct tx_timeout_task;
322
323 struct net_device *dev;
324 struct napi_struct napi;
325 struct net_device_stats stats;
326 struct mv643xx_mib_counters mib_counters;
327 spinlock_t lock;
328 /* Size of Tx Ring per queue */
329 int tx_ring_size;
330 /* Number of tx descriptors in use */
331 int tx_desc_count;
332 /* Size of Rx Ring per queue */
333 int rx_ring_size;
334 /* Number of rx descriptors in use */
335 int rx_desc_count;
336
337 /*
338 * Used in case RX Ring is empty, which can be caused when
339 * system does not have resources (skb's)
340 */
341 struct timer_list timeout;
342
343 u32 rx_int_coal;
344 u32 tx_int_coal;
345 struct mii_if_info mii;
346};
347
348/* Port operation control routines */
349static void eth_port_init(struct mv643xx_private *mp);
350static void eth_port_reset(unsigned int eth_port_num);
351static void eth_port_start(struct net_device *dev);
352
353/* PHY and MIB routines */
354static void ethernet_phy_reset(unsigned int eth_port_num);
355
356static void eth_port_write_smi_reg(unsigned int eth_port_num,
357 unsigned int phy_reg, unsigned int value);
358
359static void eth_port_read_smi_reg(unsigned int eth_port_num,
360 unsigned int phy_reg, unsigned int *value);
361
362static void eth_clear_mib_counters(unsigned int eth_port_num);
363
364/* Port data flow control routines */
365static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
366 struct pkt_info *p_pkt_info);
367static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
368 struct pkt_info *p_pkt_info);
369
370#endif /* __MV643XX_ETH_H__ */