aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/agere
diff options
context:
space:
mode:
authorMark Einon <mark.einon@gmail.com>2014-09-30 17:29:46 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-03 15:22:19 -0400
commit38df6492eb511d2a6823303cb1a194c4fe423154 (patch)
tree178f95aa9898127f5dc16a9635db59dfe558a5e6 /drivers/net/ethernet/agere
parent739e4a758e0e2930f4bcdddd244254bae8dd7499 (diff)
et131x: Add PCIe gigabit ethernet driver et131x to drivers/net
This adds the ethernet driver for Agere et131x devices to drivers/net/ethernet. The driver being added has been in the staging tree for some time, and will be removed from there in a seperate patch. This one merely disables the staging version to prevent two instances being built. Signed-off-by: Mark Einon <mark.einon@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/agere')
-rw-r--r--drivers/net/ethernet/agere/Kconfig31
-rw-r--r--drivers/net/ethernet/agere/Makefile5
-rw-r--r--drivers/net/ethernet/agere/et131x.c4121
-rw-r--r--drivers/net/ethernet/agere/et131x.h1433
4 files changed, 5590 insertions, 0 deletions
diff --git a/drivers/net/ethernet/agere/Kconfig b/drivers/net/ethernet/agere/Kconfig
new file mode 100644
index 000000000000..63e805de619e
--- /dev/null
+++ b/drivers/net/ethernet/agere/Kconfig
@@ -0,0 +1,31 @@
1#
2# Agere device configuration
3#
4
5config NET_VENDOR_AGERE
6 bool "Agere devices"
7 default y
8 depends on PCI
9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from
12 <http://www.tldp.org/docs.html#howto>.
13
14 Note that the answer to this question doesn't directly affect the
15 kernel: saying N will just cause the configurator to skip all
16 the questions about Agere devices. If you say Y, you will be asked
17 for your specific card in the following questions.
18
19if NET_VENDOR_AGERE
20
21config ET131X
22 tristate "Agere ET-1310 Gigabit Ethernet support"
23 depends on PCI
24 select PHYLIB
25 ---help---
26 This driver supports Agere ET-1310 ethernet adapters.
27
28 To compile this driver as a module, choose M here. The module
29 will be called et131x.
30
31endif # NET_VENDOR_AGERE
diff --git a/drivers/net/ethernet/agere/Makefile b/drivers/net/ethernet/agere/Makefile
new file mode 100644
index 000000000000..027ff9453fe1
--- /dev/null
+++ b/drivers/net/ethernet/agere/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the Agere ET-131x ethernet driver
3#
4
5obj-$(CONFIG_ET131X) += et131x.o
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
new file mode 100644
index 000000000000..384dc163851b
--- /dev/null
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -0,0 +1,4121 @@
1/* Agere Systems Inc.
2 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
3 *
4 * Copyright © 2005 Agere Systems Inc.
5 * All rights reserved.
6 * http://www.agere.com
7 *
8 * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com>
9 *
10 *------------------------------------------------------------------------------
11 *
12 * SOFTWARE LICENSE
13 *
14 * This software is provided subject to the following terms and conditions,
15 * which you should read carefully before using the software. Using this
16 * software indicates your acceptance of these terms and conditions. If you do
17 * not agree with these terms and conditions, do not use the software.
18 *
19 * Copyright © 2005 Agere Systems Inc.
20 * All rights reserved.
21 *
22 * Redistribution and use in source or binary forms, with or without
23 * modifications, are permitted provided that the following conditions are met:
24 *
25 * . Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following Disclaimer as comments in the code as
27 * well as in the documentation and/or other materials provided with the
28 * distribution.
29 *
30 * . Redistributions in binary form must reproduce the above copyright notice,
31 * this list of conditions and the following Disclaimer in the documentation
32 * and/or other materials provided with the distribution.
33 *
34 * . Neither the name of Agere Systems Inc. nor the names of the contributors
35 * may be used to endorse or promote products derived from this software
36 * without specific prior written permission.
37 *
38 * Disclaimer
39 *
40 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
41 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
42 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
43 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
44 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
48 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
51 * DAMAGE.
52 */
53
54#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
56#include <linux/pci.h>
57#include <linux/module.h>
58#include <linux/types.h>
59#include <linux/kernel.h>
60
61#include <linux/sched.h>
62#include <linux/ptrace.h>
63#include <linux/slab.h>
64#include <linux/ctype.h>
65#include <linux/string.h>
66#include <linux/timer.h>
67#include <linux/interrupt.h>
68#include <linux/in.h>
69#include <linux/delay.h>
70#include <linux/bitops.h>
71#include <linux/io.h>
72
73#include <linux/netdevice.h>
74#include <linux/etherdevice.h>
75#include <linux/skbuff.h>
76#include <linux/if_arp.h>
77#include <linux/ioport.h>
78#include <linux/crc32.h>
79#include <linux/random.h>
80#include <linux/phy.h>
81
82#include "et131x.h"
83
84MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
85MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
86MODULE_LICENSE("Dual BSD/GPL");
87MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems");
88
89/* EEPROM defines */
90#define MAX_NUM_REGISTER_POLLS 1000
91#define MAX_NUM_WRITE_RETRIES 2
92
93/* MAC defines */
94#define COUNTER_WRAP_16_BIT 0x10000
95#define COUNTER_WRAP_12_BIT 0x1000
96
97/* PCI defines */
98#define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */
99#define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */
100
101/* ISR defines */
102/* For interrupts, normal running is:
103 * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
104 * watchdog_interrupt & txdma_xfer_done
105 *
106 * In both cases, when flow control is enabled for either Tx or bi-direction,
107 * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
108 * buffer rings are running low.
109 */
110#define INT_MASK_DISABLE 0xffffffff
111
112/* NOTE: Masking out MAC_STAT Interrupt for now...
113 * #define INT_MASK_ENABLE 0xfff6bf17
114 * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7
115 */
116#define INT_MASK_ENABLE 0xfffebf17
117#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
118
119/* General defines */
120/* Packet and header sizes */
121#define NIC_MIN_PACKET_SIZE 60
122
123/* Multicast list size */
124#define NIC_MAX_MCAST_LIST 128
125
126/* Supported Filters */
127#define ET131X_PACKET_TYPE_DIRECTED 0x0001
128#define ET131X_PACKET_TYPE_MULTICAST 0x0002
129#define ET131X_PACKET_TYPE_BROADCAST 0x0004
130#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
131#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
132
133/* Tx Timeout */
134#define ET131X_TX_TIMEOUT (1 * HZ)
135#define NIC_SEND_HANG_THRESHOLD 0
136
137/* MP_ADAPTER flags */
138#define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
139
140/* MP_SHARED flags */
141#define FMP_ADAPTER_LOWER_POWER 0x00200000
142
143#define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
144#define FMP_ADAPTER_HARDWARE_ERROR 0x04000000
145
146#define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
147
148/* Some offsets in PCI config space that are actually used. */
149#define ET1310_PCI_MAC_ADDRESS 0xA4
150#define ET1310_PCI_EEPROM_STATUS 0xB2
151#define ET1310_PCI_ACK_NACK 0xC0
152#define ET1310_PCI_REPLAY 0xC2
153#define ET1310_PCI_L0L1LATENCY 0xCF
154
155/* PCI Product IDs */
156#define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */
157#define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */
158
159/* Define order of magnitude converter */
160#define NANO_IN_A_MICRO 1000
161
162#define PARM_RX_NUM_BUFS_DEF 4
163#define PARM_RX_TIME_INT_DEF 10
164#define PARM_RX_MEM_END_DEF 0x2bc
165#define PARM_TX_TIME_INT_DEF 40
166#define PARM_TX_NUM_BUFS_DEF 4
167#define PARM_DMA_CACHE_DEF 0
168
169/* RX defines */
170#define FBR_CHUNKS 32
171#define MAX_DESC_PER_RING_RX 1024
172
173/* number of RFDs - default and min */
174#define RFD_LOW_WATER_MARK 40
175#define NIC_DEFAULT_NUM_RFD 1024
176#define NUM_FBRS 2
177
178#define MAX_PACKETS_HANDLED 256
179
180#define ALCATEL_MULTICAST_PKT 0x01000000
181#define ALCATEL_BROADCAST_PKT 0x02000000
182
183/* typedefs for Free Buffer Descriptors */
184struct fbr_desc {
185 u32 addr_lo;
186 u32 addr_hi;
187 u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */
188};
189
190/* Packet Status Ring Descriptors
191 *
192 * Word 0:
193 *
194 * top 16 bits are from the Alcatel Status Word as enumerated in
195 * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
196 *
197 * 0: hp hash pass
198 * 1: ipa IP checksum assist
199 * 2: ipp IP checksum pass
200 * 3: tcpa TCP checksum assist
201 * 4: tcpp TCP checksum pass
202 * 5: wol WOL Event
203 * 6: rxmac_error RXMAC Error Indicator
204 * 7: drop Drop packet
205 * 8: ft Frame Truncated
206 * 9: jp Jumbo Packet
207 * 10: vp VLAN Packet
208 * 11-15: unused
209 * 16: asw_prev_pkt_dropped e.g. IFG too small on previous
210 * 17: asw_RX_DV_event short receive event detected
211 * 18: asw_false_carrier_event bad carrier since last good packet
212 * 19: asw_code_err one or more nibbles signalled as errors
213 * 20: asw_CRC_err CRC error
214 * 21: asw_len_chk_err frame length field incorrect
215 * 22: asw_too_long frame length > 1518 bytes
216 * 23: asw_OK valid CRC + no code error
217 * 24: asw_multicast has a multicast address
218 * 25: asw_broadcast has a broadcast address
219 * 26: asw_dribble_nibble spurious bits after EOP
220 * 27: asw_control_frame is a control frame
221 * 28: asw_pause_frame is a pause frame
222 * 29: asw_unsupported_op unsupported OP code
223 * 30: asw_VLAN_tag VLAN tag detected
224 * 31: asw_long_evt Rx long event
225 *
226 * Word 1:
227 * 0-15: length length in bytes
228 * 16-25: bi Buffer Index
229 * 26-27: ri Ring Index
230 * 28-31: reserved
231 */
232struct pkt_stat_desc {
233 u32 word0;
234 u32 word1;
235};
236
237/* Typedefs for the RX DMA status word */
238
239/* rx status word 0 holds part of the status bits of the Rx DMA engine
240 * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word
241 * which contains the Free Buffer ring 0 and 1 available offset.
242 *
243 * bit 0-9 FBR1 offset
244 * bit 10 Wrap flag for FBR1
245 * bit 16-25 FBR0 offset
246 * bit 26 Wrap flag for FBR0
247 */
248
249/* RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
250 * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word
251 * which contains the Packet Status Ring available offset.
252 *
253 * bit 0-15 reserved
254 * bit 16-27 PSRoffset
255 * bit 28 PSRwrap
256 * bit 29-31 unused
257 */
258
259/* struct rx_status_block is a structure representing the status of the Rx
260 * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
261 */
262struct rx_status_block {
263 u32 word0;
264 u32 word1;
265};
266
267/* Structure for look-up table holding free buffer ring pointers, addresses
268 * and state.
269 */
270struct fbr_lookup {
271 void *virt[MAX_DESC_PER_RING_RX];
272 u32 bus_high[MAX_DESC_PER_RING_RX];
273 u32 bus_low[MAX_DESC_PER_RING_RX];
274 void *ring_virtaddr;
275 dma_addr_t ring_physaddr;
276 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
277 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
278 u32 local_full;
279 u32 num_entries;
280 dma_addr_t buffsize;
281};
282
283/* struct rx_ring is the structure representing the adaptor's local
284 * reference(s) to the rings
285 */
286struct rx_ring {
287 struct fbr_lookup *fbr[NUM_FBRS];
288 void *ps_ring_virtaddr;
289 dma_addr_t ps_ring_physaddr;
290 u32 local_psr_full;
291 u32 psr_entries;
292
293 struct rx_status_block *rx_status_block;
294 dma_addr_t rx_status_bus;
295
296 struct list_head recv_list;
297 u32 num_ready_recv;
298
299 u32 num_rfd;
300
301 bool unfinished_receives;
302};
303
304/* TX defines */
305/* word 2 of the control bits in the Tx Descriptor ring for the ET-1310
306 *
307 * 0-15: length of packet
308 * 16-27: VLAN tag
309 * 28: VLAN CFI
310 * 29-31: VLAN priority
311 *
312 * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
313 *
314 * 0: last packet in the sequence
315 * 1: first packet in the sequence
316 * 2: interrupt the processor when this pkt sent
317 * 3: Control word - no packet data
318 * 4: Issue half-duplex backpressure : XON/XOFF
319 * 5: send pause frame
320 * 6: Tx frame has error
321 * 7: append CRC
322 * 8: MAC override
323 * 9: pad packet
324 * 10: Packet is a Huge packet
325 * 11: append VLAN tag
326 * 12: IP checksum assist
327 * 13: TCP checksum assist
328 * 14: UDP checksum assist
329 */
330#define TXDESC_FLAG_LASTPKT 0x0001
331#define TXDESC_FLAG_FIRSTPKT 0x0002
332#define TXDESC_FLAG_INTPROC 0x0004
333
334/* struct tx_desc represents each descriptor on the ring */
335struct tx_desc {
336 u32 addr_hi;
337 u32 addr_lo;
338 u32 len_vlan; /* control words how to xmit the */
339 u32 flags; /* data (detailed above) */
340};
341
342/* The status of the Tx DMA engine it sits in free memory, and is pointed to
343 * by 0x101c / 0x1020. This is a DMA10 type
344 */
345
346/* TCB (Transmit Control Block: Host Side) */
347struct tcb {
348 struct tcb *next; /* Next entry in ring */
349 u32 count; /* Used to spot stuck/lost packets */
350 u32 stale; /* Used to spot stuck/lost packets */
351 struct sk_buff *skb; /* Network skb we are tied to */
352 u32 index; /* Ring indexes */
353 u32 index_start;
354};
355
356/* Structure representing our local reference(s) to the ring */
357struct tx_ring {
358 /* TCB (Transmit Control Block) memory and lists */
359 struct tcb *tcb_ring;
360
361 /* List of TCBs that are ready to be used */
362 struct tcb *tcb_qhead;
363 struct tcb *tcb_qtail;
364
365 /* list of TCBs that are currently being sent. */
366 struct tcb *send_head;
367 struct tcb *send_tail;
368 int used;
369
370 /* The actual descriptor ring */
371 struct tx_desc *tx_desc_ring;
372 dma_addr_t tx_desc_ring_pa;
373
374 /* send_idx indicates where we last wrote to in the descriptor ring. */
375 u32 send_idx;
376
377 /* The location of the write-back status block */
378 u32 *tx_status;
379 dma_addr_t tx_status_pa;
380
381 /* Packets since the last IRQ: used for interrupt coalescing */
382 int since_irq;
383};
384
385/* Do not change these values: if changed, then change also in respective
386 * TXdma and Rxdma engines
387 */
388#define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */
389#define NUM_TCB 64
390
391/* These values are all superseded by registry entries to facilitate tuning.
392 * Once the desired performance has been achieved, the optimal registry values
393 * should be re-populated to these #defines:
394 */
395#define TX_ERROR_PERIOD 1000
396
397#define LO_MARK_PERCENT_FOR_PSR 15
398#define LO_MARK_PERCENT_FOR_RX 15
399
400/* RFD (Receive Frame Descriptor) */
401struct rfd {
402 struct list_head list_node;
403 struct sk_buff *skb;
404 u32 len; /* total size of receive frame */
405 u16 bufferindex;
406 u8 ringindex;
407};
408
409/* Flow Control */
410#define FLOW_BOTH 0
411#define FLOW_TXONLY 1
412#define FLOW_RXONLY 2
413#define FLOW_NONE 3
414
415/* Struct to define some device statistics */
416struct ce_stats {
417 u32 multicast_pkts_rcvd;
418 u32 rcvd_pkts_dropped;
419
420 u32 tx_underflows;
421 u32 tx_collisions;
422 u32 tx_excessive_collisions;
423 u32 tx_first_collisions;
424 u32 tx_late_collisions;
425 u32 tx_max_pkt_errs;
426 u32 tx_deferred;
427
428 u32 rx_overflows;
429 u32 rx_length_errs;
430 u32 rx_align_errs;
431 u32 rx_crc_errs;
432 u32 rx_code_violations;
433 u32 rx_other_errs;
434
435 u32 interrupt_status;
436};
437
438/* The private adapter structure */
439struct et131x_adapter {
440 struct net_device *netdev;
441 struct pci_dev *pdev;
442 struct mii_bus *mii_bus;
443 struct phy_device *phydev;
444 struct napi_struct napi;
445
446 /* Flags that indicate current state of the adapter */
447 u32 flags;
448
449 /* local link state, to determine if a state change has occurred */
450 int link;
451
452 /* Configuration */
453 u8 rom_addr[ETH_ALEN];
454 u8 addr[ETH_ALEN];
455 bool has_eeprom;
456 u8 eeprom_data[2];
457
458 spinlock_t tcb_send_qlock; /* protects the tx_ring send tcb list */
459 spinlock_t tcb_ready_qlock; /* protects the tx_ring ready tcb list */
460 spinlock_t rcv_lock; /* protects the rx_ring receive list */
461
462 /* Packet Filter and look ahead size */
463 u32 packet_filter;
464
465 /* multicast list */
466 u32 multicast_addr_count;
467 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
468
469 /* Pointer to the device's PCI register space */
470 struct address_map __iomem *regs;
471
472 /* Registry parameters */
473 u8 wanted_flow; /* Flow we want for 802.3x flow control */
474 u32 registry_jumbo_packet; /* Max supported ethernet packet size */
475
476 /* Derived from the registry: */
477 u8 flow; /* flow control validated by the far-end */
478
479 /* Minimize init-time */
480 struct timer_list error_timer;
481
482 /* variable putting the phy into coma mode when boot up with no cable
483 * plugged in after 5 seconds
484 */
485 u8 boot_coma;
486
487 /* Tx Memory Variables */
488 struct tx_ring tx_ring;
489
490 /* Rx Memory Variables */
491 struct rx_ring rx_ring;
492
493 struct ce_stats stats;
494};
495
496static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
497{
498 u32 reg;
499 int i;
500
501 /* 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
502 * bits 7,1:0 both equal to 1, at least once after reset.
503 * Subsequent operations need only to check that bits 1:0 are equal
504 * to 1 prior to starting a single byte read/write
505 */
506 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
507 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg))
508 return -EIO;
509
510 /* I2C idle and Phy Queue Avail both true */
511 if ((reg & 0x3000) == 0x3000) {
512 if (status)
513 *status = reg;
514 return reg & 0xFF;
515 }
516 }
517 return -ETIMEDOUT;
518}
519
520static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
521{
522 struct pci_dev *pdev = adapter->pdev;
523 int index = 0;
524 int retries;
525 int err = 0;
526 int writeok = 0;
527 u32 status;
528 u32 val = 0;
529
530 /* For an EEPROM, an I2C single byte write is defined as a START
531 * condition followed by the device address, EEPROM address, one byte
532 * of data and a STOP condition. The STOP condition will trigger the
533 * EEPROM's internally timed write cycle to the nonvolatile memory.
534 * All inputs are disabled during this write cycle and the EEPROM will
535 * not respond to any access until the internal write is complete.
536 */
537 err = eeprom_wait_ready(pdev, NULL);
538 if (err < 0)
539 return err;
540
541 /* 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0,
542 * and bits 1:0 both =0. Bit 5 should be set according to the
543 * type of EEPROM being accessed (1=two byte addressing, 0=one
544 * byte addressing).
545 */
546 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
547 LBCIF_CONTROL_LBCIF_ENABLE |
548 LBCIF_CONTROL_I2C_WRITE))
549 return -EIO;
550
551 /* Prepare EEPROM address for Step 3 */
552 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
553 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
554 break;
555 /* Write the data to the LBCIF Data Register (the I2C write
556 * will begin).
557 */
558 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
559 break;
560 /* Monitor bit 1:0 of the LBCIF Status Register. When bits
561 * 1:0 are both equal to 1, the I2C write has completed and the
562 * internal write cycle of the EEPROM is about to start.
563 * (bits 1:0 = 01 is a legal state while waiting from both
564 * equal to 1, but bits 1:0 = 10 is invalid and implies that
565 * something is broken).
566 */
567 err = eeprom_wait_ready(pdev, &status);
568 if (err < 0)
569 return 0;
570
571 /* Check bit 3 of the LBCIF Status Register. If equal to 1,
572 * an error has occurred.Don't break here if we are revision
573 * 1, this is so we do a blind write for load bug.
574 */
575 if ((status & LBCIF_STATUS_GENERAL_ERROR) &&
576 adapter->pdev->revision == 0)
577 break;
578
579 /* Check bit 2 of the LBCIF Status Register. If equal to 1 an
580 * ACK error has occurred on the address phase of the write.
581 * This could be due to an actual hardware failure or the
582 * EEPROM may still be in its internal write cycle from a
583 * previous write. This write operation was ignored and must be
584 *repeated later.
585 */
586 if (status & LBCIF_STATUS_ACK_ERROR) {
587 /* This could be due to an actual hardware failure
588 * or the EEPROM may still be in its internal write
589 * cycle from a previous write. This write operation
590 * was ignored and must be repeated later.
591 */
592 udelay(10);
593 continue;
594 }
595
596 writeok = 1;
597 break;
598 }
599
600 udelay(10);
601
602 while (1) {
603 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
604 LBCIF_CONTROL_LBCIF_ENABLE))
605 writeok = 0;
606
607 /* Do read until internal ACK_ERROR goes away meaning write
608 * completed
609 */
610 do {
611 pci_write_config_dword(pdev,
612 LBCIF_ADDRESS_REGISTER,
613 addr);
614 do {
615 pci_read_config_dword(pdev,
616 LBCIF_DATA_REGISTER,
617 &val);
618 } while ((val & 0x00010000) == 0);
619 } while (val & 0x00040000);
620
621 if ((val & 0xFF00) != 0xC000 || index == 10000)
622 break;
623 index++;
624 }
625 return writeok ? 0 : -EIO;
626}
627
628static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
629{
630 struct pci_dev *pdev = adapter->pdev;
631 int err;
632 u32 status;
633
634 /* A single byte read is similar to the single byte write, with the
635 * exception of the data flow:
636 */
637 err = eeprom_wait_ready(pdev, NULL);
638 if (err < 0)
639 return err;
640 /* Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0,
641 * and bits 1:0 both =0. Bit 5 should be set according to the type
642 * of EEPROM being accessed (1=two byte addressing, 0=one byte
643 * addressing).
644 */
645 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
646 LBCIF_CONTROL_LBCIF_ENABLE))
647 return -EIO;
648 /* Write the address to the LBCIF Address Register (I2C read will
649 * begin).
650 */
651 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
652 return -EIO;
653 /* Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read
654 * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
655 * has occurred).
656 */
657 err = eeprom_wait_ready(pdev, &status);
658 if (err < 0)
659 return err;
660 /* Regardless of error status, read data byte from LBCIF Data
661 * Register.
662 */
663 *pdata = err;
664
665 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
666}
667
668static int et131x_init_eeprom(struct et131x_adapter *adapter)
669{
670 struct pci_dev *pdev = adapter->pdev;
671 u8 eestatus;
672
673 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus);
674
675 /* THIS IS A WORKAROUND:
676 * I need to call this function twice to get my card in a
677 * LG M1 Express Dual running. I tried also a msleep before this
678 * function, because I thought there could be some time conditions
679 * but it didn't work. Call the whole function twice also work.
680 */
681 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
682 dev_err(&pdev->dev,
683 "Could not read PCI config space for EEPROM Status\n");
684 return -EIO;
685 }
686
687 /* Determine if the error(s) we care about are present. If they are
688 * present we need to fail.
689 */
690 if (eestatus & 0x4C) {
691 int write_failed = 0;
692
693 if (pdev->revision == 0x01) {
694 int i;
695 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
696
697 /* Re-write the first 4 bytes if we have an eeprom
698 * present and the revision id is 1, this fixes the
699 * corruption seen with 1310 B Silicon
700 */
701 for (i = 0; i < 3; i++)
702 if (eeprom_write(adapter, i, eedata[i]) < 0)
703 write_failed = 1;
704 }
705 if (pdev->revision != 0x01 || write_failed) {
706 dev_err(&pdev->dev,
707 "Fatal EEPROM Status Error - 0x%04x\n",
708 eestatus);
709
710 /* This error could mean that there was an error
711 * reading the eeprom or that the eeprom doesn't exist.
712 * We will treat each case the same and not try to
713 * gather additional information that normally would
714 * come from the eeprom, like MAC Address
715 */
716 adapter->has_eeprom = 0;
717 return -EIO;
718 }
719 }
720 adapter->has_eeprom = 1;
721
722 /* Read the EEPROM for information regarding LED behavior. Refer to
723 * et131x_xcvr_init() for its use.
724 */
725 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
726 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
727
728 if (adapter->eeprom_data[0] != 0xcd)
729 /* Disable all optional features */
730 adapter->eeprom_data[1] = 0x00;
731
732 return 0;
733}
734
735static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
736{
737 /* Setup the receive dma configuration register for normal operation */
738 u32 csr = ET_RXDMA_CSR_FBR1_ENABLE;
739 struct rx_ring *rx_ring = &adapter->rx_ring;
740
741 if (rx_ring->fbr[1]->buffsize == 4096)
742 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO;
743 else if (rx_ring->fbr[1]->buffsize == 8192)
744 csr |= ET_RXDMA_CSR_FBR1_SIZE_HI;
745 else if (rx_ring->fbr[1]->buffsize == 16384)
746 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI;
747
748 csr |= ET_RXDMA_CSR_FBR0_ENABLE;
749 if (rx_ring->fbr[0]->buffsize == 256)
750 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO;
751 else if (rx_ring->fbr[0]->buffsize == 512)
752 csr |= ET_RXDMA_CSR_FBR0_SIZE_HI;
753 else if (rx_ring->fbr[0]->buffsize == 1024)
754 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI;
755 writel(csr, &adapter->regs->rxdma.csr);
756
757 csr = readl(&adapter->regs->rxdma.csr);
758 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
759 udelay(5);
760 csr = readl(&adapter->regs->rxdma.csr);
761 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
762 dev_err(&adapter->pdev->dev,
763 "RX Dma failed to exit halt state. CSR 0x%08x\n",
764 csr);
765 }
766 }
767}
768
769static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
770{
771 u32 csr;
772 /* Setup the receive dma configuration register */
773 writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE,
774 &adapter->regs->rxdma.csr);
775 csr = readl(&adapter->regs->rxdma.csr);
776 if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) {
777 udelay(5);
778 csr = readl(&adapter->regs->rxdma.csr);
779 if (!(csr & ET_RXDMA_CSR_HALT_STATUS))
780 dev_err(&adapter->pdev->dev,
781 "RX Dma failed to enter halt state. CSR 0x%08x\n",
782 csr);
783 }
784}
785
786static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
787{
788 /* Setup the transmit dma configuration register for normal
789 * operation
790 */
791 writel(ET_TXDMA_SNGL_EPKT | (PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
792 &adapter->regs->txdma.csr);
793}
794
795static inline void add_10bit(u32 *v, int n)
796{
797 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
798}
799
800static inline void add_12bit(u32 *v, int n)
801{
802 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
803}
804
805static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
806{
807 struct mac_regs __iomem *macregs = &adapter->regs->mac;
808 u32 station1;
809 u32 station2;
810 u32 ipg;
811
812 /* First we need to reset everything. Write to MAC configuration
813 * register 1 to perform reset.
814 */
815 writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
816 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
817 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC,
818 &macregs->cfg1);
819
820 /* Next lets configure the MAC Inter-packet gap register */
821 ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */
822 ipg |= 0x50 << 8; /* ifg enforce 0x50 */
823 writel(ipg, &macregs->ipg);
824
825 /* Next lets configure the MAC Half Duplex register */
826 /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
827 writel(0x00A1F037, &macregs->hfdp);
828
829 /* Next lets configure the MAC Interface Control register */
830 writel(0, &macregs->if_ctrl);
831
832 writel(ET_MAC_MIIMGMT_CLK_RST, &macregs->mii_mgmt_cfg);
833
834 /* Next lets configure the MAC Station Address register. These
835 * values are read from the EEPROM during initialization and stored
836 * in the adapter structure. We write what is stored in the adapter
837 * structure to the MAC Station Address registers high and low. This
838 * station address is used for generating and checking pause control
839 * packets.
840 */
841 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
842 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
843 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
844 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
845 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
846 adapter->addr[2];
847 writel(station1, &macregs->station_addr_1);
848 writel(station2, &macregs->station_addr_2);
849
850 /* Max ethernet packet in bytes that will be passed by the mac without
851 * being truncated. Allow the MAC to pass 4 more than our max packet
852 * size. This is 4 for the Ethernet CRC.
853 *
854 * Packets larger than (registry_jumbo_packet) that do not contain a
855 * VLAN ID will be dropped by the Rx function.
856 */
857 writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len);
858
859 /* clear out MAC config reset */
860 writel(0, &macregs->cfg1);
861}
862
863static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
864{
865 int32_t delay = 0;
866 struct mac_regs __iomem *mac = &adapter->regs->mac;
867 struct phy_device *phydev = adapter->phydev;
868 u32 cfg1;
869 u32 cfg2;
870 u32 ifctrl;
871 u32 ctl;
872
873 ctl = readl(&adapter->regs->txmac.ctl);
874 cfg1 = readl(&mac->cfg1);
875 cfg2 = readl(&mac->cfg2);
876 ifctrl = readl(&mac->if_ctrl);
877
878 /* Set up the if mode bits */
879 cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK;
880 if (phydev->speed == SPEED_1000) {
881 cfg2 |= ET_MAC_CFG2_IFMODE_1000;
882 ifctrl &= ~ET_MAC_IFCTRL_PHYMODE;
883 } else {
884 cfg2 |= ET_MAC_CFG2_IFMODE_100;
885 ifctrl |= ET_MAC_IFCTRL_PHYMODE;
886 }
887
888 cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE |
889 ET_MAC_CFG1_TX_FLOW;
890
891 cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW);
892 if (adapter->flow == FLOW_RXONLY || adapter->flow == FLOW_BOTH)
893 cfg1 |= ET_MAC_CFG1_RX_FLOW;
894 writel(cfg1, &mac->cfg1);
895
896 /* Now we need to initialize the MAC Configuration 2 register */
897 /* preamble 7, check length, huge frame off, pad crc, crc enable
898 * full duplex off
899 */
900 cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT;
901 cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK;
902 cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC;
903 cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE;
904 cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME;
905 cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX;
906
907 if (phydev->duplex == DUPLEX_FULL)
908 cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX;
909
910 ifctrl &= ~ET_MAC_IFCTRL_GHDMODE;
911 if (phydev->duplex == DUPLEX_HALF)
912 ifctrl |= ET_MAC_IFCTRL_GHDMODE;
913
914 writel(ifctrl, &mac->if_ctrl);
915 writel(cfg2, &mac->cfg2);
916
917 do {
918 udelay(10);
919 delay++;
920 cfg1 = readl(&mac->cfg1);
921 } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100);
922
923 if (delay == 100) {
924 dev_warn(&adapter->pdev->dev,
925 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
926 cfg1);
927 }
928
929 ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE;
930 writel(ctl, &adapter->regs->txmac.ctl);
931
932 if (adapter->flags & FMP_ADAPTER_LOWER_POWER) {
933 et131x_rx_dma_enable(adapter);
934 et131x_tx_dma_enable(adapter);
935 }
936}
937
938static int et1310_in_phy_coma(struct et131x_adapter *adapter)
939{
940 u32 pmcsr = readl(&adapter->regs->global.pm_csr);
941
942 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
943}
944
945static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
946{
947 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
948 u32 hash1 = 0;
949 u32 hash2 = 0;
950 u32 hash3 = 0;
951 u32 hash4 = 0;
952 u32 pm_csr;
953
954 /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
955 * the multi-cast LIST. If it is NOT specified, (and "ALL" is not
956 * specified) then we should pass NO multi-cast addresses to the
957 * driver.
958 */
959 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
960 int i;
961
962 /* Loop through our multicast array and set up the device */
963 for (i = 0; i < adapter->multicast_addr_count; i++) {
964 u32 result;
965
966 result = ether_crc(6, adapter->multicast_list[i]);
967
968 result = (result & 0x3F800000) >> 23;
969
970 if (result < 32) {
971 hash1 |= (1 << result);
972 } else if ((31 < result) && (result < 64)) {
973 result -= 32;
974 hash2 |= (1 << result);
975 } else if ((63 < result) && (result < 96)) {
976 result -= 64;
977 hash3 |= (1 << result);
978 } else {
979 result -= 96;
980 hash4 |= (1 << result);
981 }
982 }
983 }
984
985 /* Write out the new hash to the device */
986 pm_csr = readl(&adapter->regs->global.pm_csr);
987 if (!et1310_in_phy_coma(adapter)) {
988 writel(hash1, &rxmac->multi_hash1);
989 writel(hash2, &rxmac->multi_hash2);
990 writel(hash3, &rxmac->multi_hash3);
991 writel(hash4, &rxmac->multi_hash4);
992 }
993}
994
995static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
996{
997 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
998 u32 uni_pf1;
999 u32 uni_pf2;
1000 u32 uni_pf3;
1001 u32 pm_csr;
1002
1003 /* Set up unicast packet filter reg 3 to be the first two octets of
1004 * the MAC address for both address
1005 *
1006 * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
1007 * MAC address for second address
1008 *
1009 * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
1010 * MAC address for first address
1011 */
1012 uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) |
1013 (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) |
1014 (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) |
1015 adapter->addr[1];
1016
1017 uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) |
1018 (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) |
1019 (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) |
1020 adapter->addr[5];
1021
1022 uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) |
1023 (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) |
1024 (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) |
1025 adapter->addr[5];
1026
1027 pm_csr = readl(&adapter->regs->global.pm_csr);
1028 if (!et1310_in_phy_coma(adapter)) {
1029 writel(uni_pf1, &rxmac->uni_pf_addr1);
1030 writel(uni_pf2, &rxmac->uni_pf_addr2);
1031 writel(uni_pf3, &rxmac->uni_pf_addr3);
1032 }
1033}
1034
1035static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1036{
1037 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1038 struct phy_device *phydev = adapter->phydev;
1039 u32 sa_lo;
1040 u32 sa_hi = 0;
1041 u32 pf_ctrl = 0;
1042 u32 __iomem *wolw;
1043
1044 /* Disable the MAC while it is being configured (also disable WOL) */
1045 writel(0x8, &rxmac->ctrl);
1046
1047 /* Initialize WOL to disabled. */
1048 writel(0, &rxmac->crc0);
1049 writel(0, &rxmac->crc12);
1050 writel(0, &rxmac->crc34);
1051
1052 /* We need to set the WOL mask0 - mask4 next. We initialize it to
1053 * its default Values of 0x00000000 because there are not WOL masks
1054 * as of this time.
1055 */
1056 for (wolw = &rxmac->mask0_word0; wolw <= &rxmac->mask4_word3; wolw++)
1057 writel(0, wolw);
1058
1059 /* Lets setup the WOL Source Address */
1060 sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) |
1061 (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) |
1062 (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) |
1063 adapter->addr[5];
1064 writel(sa_lo, &rxmac->sa_lo);
1065
1066 sa_hi = (u32)(adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) |
1067 adapter->addr[1];
1068 writel(sa_hi, &rxmac->sa_hi);
1069
1070 /* Disable all Packet Filtering */
1071 writel(0, &rxmac->pf_ctrl);
1072
1073 /* Let's initialize the Unicast Packet filtering address */
1074 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1075 et1310_setup_device_for_unicast(adapter);
1076 pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE;
1077 } else {
1078 writel(0, &rxmac->uni_pf_addr1);
1079 writel(0, &rxmac->uni_pf_addr2);
1080 writel(0, &rxmac->uni_pf_addr3);
1081 }
1082
1083 /* Let's initialize the Multicast hash */
1084 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1085 pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE;
1086 et1310_setup_device_for_multicast(adapter);
1087 }
1088
1089 /* Runt packet filtering. Didn't work in version A silicon. */
1090 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT;
1091 pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE;
1092
1093 if (adapter->registry_jumbo_packet > 8192)
1094 /* In order to transmit jumbo packets greater than 8k, the
1095 * FIFO between RxMAC and RxDMA needs to be reduced in size
1096 * to (16k - Jumbo packet size). In order to implement this,
1097 * we must use "cut through" mode in the RxMAC, which chops
1098 * packets down into segments which are (max_size * 16). In
1099 * this case we selected 256 bytes, since this is the size of
1100 * the PCI-Express TLP's that the 1310 uses.
1101 *
1102 * seg_en on, fc_en off, size 0x10
1103 */
1104 writel(0x41, &rxmac->mcif_ctrl_max_seg);
1105 else
1106 writel(0, &rxmac->mcif_ctrl_max_seg);
1107
1108 writel(0, &rxmac->mcif_water_mark);
1109 writel(0, &rxmac->mif_ctrl);
1110 writel(0, &rxmac->space_avail);
1111
1112 /* Initialize the the mif_ctrl register
1113 * bit 3: Receive code error. One or more nibbles were signaled as
1114 * errors during the reception of the packet. Clear this
1115 * bit in Gigabit, set it in 100Mbit. This was derived
1116 * experimentally at UNH.
1117 * bit 4: Receive CRC error. The packet's CRC did not match the
1118 * internally generated CRC.
1119 * bit 5: Receive length check error. Indicates that frame length
1120 * field value in the packet does not match the actual data
1121 * byte length and is not a type field.
1122 * bit 16: Receive frame truncated.
1123 * bit 17: Drop packet enable
1124 */
1125 if (phydev && phydev->speed == SPEED_100)
1126 writel(0x30038, &rxmac->mif_ctrl);
1127 else
1128 writel(0x30030, &rxmac->mif_ctrl);
1129
1130 /* Finally we initialize RxMac to be enabled & WOL disabled. Packet
1131 * filter is always enabled since it is where the runt packets are
1132 * supposed to be dropped. For version A silicon, runt packet
1133 * dropping doesn't work, so it is disabled in the pf_ctrl register,
1134 * but we still leave the packet filter on.
1135 */
1136 writel(pf_ctrl, &rxmac->pf_ctrl);
1137 writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl);
1138}
1139
1140static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1141{
1142 struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1143
1144 /* We need to update the Control Frame Parameters
1145 * cfpt - control frame pause timer set to 64 (0x40)
1146 * cfep - control frame extended pause timer set to 0x0
1147 */
1148 if (adapter->flow == FLOW_NONE)
1149 writel(0, &txmac->cf_param);
1150 else
1151 writel(0x40, &txmac->cf_param);
1152}
1153
1154static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1155{
1156 struct macstat_regs __iomem *macstat = &adapter->regs->macstat;
1157 u32 __iomem *reg;
1158
1159 /* initialize all the macstat registers to zero on the device */
1160 for (reg = &macstat->txrx_0_64_byte_frames;
1161 reg <= &macstat->carry_reg2; reg++)
1162 writel(0, reg);
1163
1164 /* Unmask any counters that we want to track the overflow of.
1165 * Initially this will be all counters. It may become clear later
1166 * that we do not need to track all counters.
1167 */
1168 writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1169 writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1170}
1171
1172static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1173 u8 reg, u16 *value)
1174{
1175 struct mac_regs __iomem *mac = &adapter->regs->mac;
1176 int status = 0;
1177 u32 delay = 0;
1178 u32 mii_addr;
1179 u32 mii_cmd;
1180 u32 mii_indicator;
1181
1182 /* Save a local copy of the registers we are dealing with so we can
1183 * set them back
1184 */
1185 mii_addr = readl(&mac->mii_mgmt_addr);
1186 mii_cmd = readl(&mac->mii_mgmt_cmd);
1187
1188 /* Stop the current operation */
1189 writel(0, &mac->mii_mgmt_cmd);
1190
1191 /* Set up the register we need to read from on the correct PHY */
1192 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1193
1194 writel(0x1, &mac->mii_mgmt_cmd);
1195
1196 do {
1197 udelay(50);
1198 delay++;
1199 mii_indicator = readl(&mac->mii_mgmt_indicator);
1200 } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50);
1201
1202 /* If we hit the max delay, we could not read the register */
1203 if (delay == 50) {
1204 dev_warn(&adapter->pdev->dev,
1205 "reg 0x%08x could not be read\n", reg);
1206 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1207 mii_indicator);
1208
1209 status = -EIO;
1210 goto out;
1211 }
1212
1213 /* If we hit here we were able to read the register and we need to
1214 * return the value to the caller
1215 */
1216 *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK;
1217
1218out:
1219 /* Stop the read operation */
1220 writel(0, &mac->mii_mgmt_cmd);
1221
1222 /* set the registers we touched back to the state at which we entered
1223 * this function
1224 */
1225 writel(mii_addr, &mac->mii_mgmt_addr);
1226 writel(mii_cmd, &mac->mii_mgmt_cmd);
1227
1228 return status;
1229}
1230
1231static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1232{
1233 struct phy_device *phydev = adapter->phydev;
1234
1235 if (!phydev)
1236 return -EIO;
1237
1238 return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
1239}
1240
1241static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg,
1242 u16 value)
1243{
1244 struct mac_regs __iomem *mac = &adapter->regs->mac;
1245 int status = 0;
1246 u32 delay = 0;
1247 u32 mii_addr;
1248 u32 mii_cmd;
1249 u32 mii_indicator;
1250
1251 /* Save a local copy of the registers we are dealing with so we can
1252 * set them back
1253 */
1254 mii_addr = readl(&mac->mii_mgmt_addr);
1255 mii_cmd = readl(&mac->mii_mgmt_cmd);
1256
1257 /* Stop the current operation */
1258 writel(0, &mac->mii_mgmt_cmd);
1259
1260 /* Set up the register we need to write to on the correct PHY */
1261 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1262
1263 /* Add the value to write to the registers to the mac */
1264 writel(value, &mac->mii_mgmt_ctrl);
1265
1266 do {
1267 udelay(50);
1268 delay++;
1269 mii_indicator = readl(&mac->mii_mgmt_indicator);
1270 } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100);
1271
1272 /* If we hit the max delay, we could not write the register */
1273 if (delay == 100) {
1274 u16 tmp;
1275
1276 dev_warn(&adapter->pdev->dev,
1277 "reg 0x%08x could not be written", reg);
1278 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1279 mii_indicator);
1280 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
1281 readl(&mac->mii_mgmt_cmd));
1282
1283 et131x_mii_read(adapter, reg, &tmp);
1284
1285 status = -EIO;
1286 }
1287 /* Stop the write operation */
1288 writel(0, &mac->mii_mgmt_cmd);
1289
1290 /* set the registers we touched back to the state at which we entered
1291 * this function
1292 */
1293 writel(mii_addr, &mac->mii_mgmt_addr);
1294 writel(mii_cmd, &mac->mii_mgmt_cmd);
1295
1296 return status;
1297}
1298
1299static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter,
1300 u16 regnum,
1301 u16 bitnum,
1302 u8 *value)
1303{
1304 u16 reg;
1305 u16 mask = 1 << bitnum;
1306
1307 et131x_mii_read(adapter, regnum, &reg);
1308
1309 *value = (reg & mask) >> bitnum;
1310}
1311
1312static void et1310_config_flow_control(struct et131x_adapter *adapter)
1313{
1314 struct phy_device *phydev = adapter->phydev;
1315
1316 if (phydev->duplex == DUPLEX_HALF) {
1317 adapter->flow = FLOW_NONE;
1318 } else {
1319 char remote_pause, remote_async_pause;
1320
1321 et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause);
1322 et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause);
1323
1324 if (remote_pause && remote_async_pause) {
1325 adapter->flow = adapter->wanted_flow;
1326 } else if (remote_pause && !remote_async_pause) {
1327 if (adapter->wanted_flow == FLOW_BOTH)
1328 adapter->flow = FLOW_BOTH;
1329 else
1330 adapter->flow = FLOW_NONE;
1331 } else if (!remote_pause && !remote_async_pause) {
1332 adapter->flow = FLOW_NONE;
1333 } else {
1334 if (adapter->wanted_flow == FLOW_BOTH)
1335 adapter->flow = FLOW_RXONLY;
1336 else
1337 adapter->flow = FLOW_NONE;
1338 }
1339 }
1340}
1341
1342/* et1310_update_macstat_host_counters - Update local copy of the statistics */
1343static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1344{
1345 struct ce_stats *stats = &adapter->stats;
1346 struct macstat_regs __iomem *macstat =
1347 &adapter->regs->macstat;
1348
1349 stats->tx_collisions += readl(&macstat->tx_total_collisions);
1350 stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
1351 stats->tx_deferred += readl(&macstat->tx_deferred);
1352 stats->tx_excessive_collisions +=
1353 readl(&macstat->tx_multiple_collisions);
1354 stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
1355 stats->tx_underflows += readl(&macstat->tx_undersize_frames);
1356 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
1357
1358 stats->rx_align_errs += readl(&macstat->rx_align_errs);
1359 stats->rx_crc_errs += readl(&macstat->rx_code_errs);
1360 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
1361 stats->rx_overflows += readl(&macstat->rx_oversize_packets);
1362 stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
1363 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
1364 stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
1365}
1366
1367/* et1310_handle_macstat_interrupt
1368 *
1369 * One of the MACSTAT counters has wrapped. Update the local copy of
1370 * the statistics held in the adapter structure, checking the "wrap"
1371 * bit for each counter.
1372 */
1373static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1374{
1375 u32 carry_reg1;
1376 u32 carry_reg2;
1377
1378 /* Read the interrupt bits from the register(s). These are Clear On
1379 * Write.
1380 */
1381 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1382 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1383
1384 writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1385 writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1386
1387 /* We need to do update the host copy of all the MAC_STAT counters.
1388 * For each counter, check it's overflow bit. If the overflow bit is
1389 * set, then increment the host version of the count by one complete
1390 * revolution of the counter. This routine is called when the counter
1391 * block indicates that one of the counters has wrapped.
1392 */
1393 if (carry_reg1 & (1 << 14))
1394 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
1395 if (carry_reg1 & (1 << 8))
1396 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
1397 if (carry_reg1 & (1 << 7))
1398 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
1399 if (carry_reg1 & (1 << 2))
1400 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
1401 if (carry_reg1 & (1 << 6))
1402 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
1403 if (carry_reg1 & (1 << 3))
1404 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
1405 if (carry_reg1 & (1 << 0))
1406 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
1407 if (carry_reg2 & (1 << 16))
1408 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
1409 if (carry_reg2 & (1 << 15))
1410 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
1411 if (carry_reg2 & (1 << 6))
1412 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1413 if (carry_reg2 & (1 << 8))
1414 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
1415 if (carry_reg2 & (1 << 5))
1416 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1417 if (carry_reg2 & (1 << 4))
1418 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
1419 if (carry_reg2 & (1 << 2))
1420 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
1421}
1422
1423static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1424{
1425 struct net_device *netdev = bus->priv;
1426 struct et131x_adapter *adapter = netdev_priv(netdev);
1427 u16 value;
1428 int ret;
1429
1430 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1431
1432 if (ret < 0)
1433 return ret;
1434
1435 return value;
1436}
1437
1438static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
1439 int reg, u16 value)
1440{
1441 struct net_device *netdev = bus->priv;
1442 struct et131x_adapter *adapter = netdev_priv(netdev);
1443
1444 return et131x_mii_write(adapter, phy_addr, reg, value);
1445}
1446
1447/* et1310_phy_power_switch - PHY power control
1448 * @adapter: device to control
1449 * @down: true for off/false for back on
1450 *
1451 * one hundred, ten, one thousand megs
1452 * How would you like to have your LAN accessed
1453 * Can't you see that this code processed
1454 * Phy power, phy power..
1455 */
1456static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
1457{
1458 u16 data;
1459 struct phy_device *phydev = adapter->phydev;
1460
1461 et131x_mii_read(adapter, MII_BMCR, &data);
1462 data &= ~BMCR_PDOWN;
1463 if (down)
1464 data |= BMCR_PDOWN;
1465 et131x_mii_write(adapter, phydev->addr, MII_BMCR, data);
1466}
1467
1468/* et131x_xcvr_init - Init the phy if we are setting it into force mode */
1469static void et131x_xcvr_init(struct et131x_adapter *adapter)
1470{
1471 u16 lcr2;
1472 struct phy_device *phydev = adapter->phydev;
1473
1474 /* Set the LED behavior such that LED 1 indicates speed (off =
1475 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
1476 * link and activity (on for link, blink off for activity).
1477 *
1478 * NOTE: Some customizations have been added here for specific
1479 * vendors; The LED behavior is now determined by vendor data in the
1480 * EEPROM. However, the above description is the default.
1481 */
1482 if ((adapter->eeprom_data[1] & 0x4) == 0) {
1483 et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1484
1485 lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T);
1486 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1487
1488 if ((adapter->eeprom_data[1] & 0x8) == 0)
1489 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1490 else
1491 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1492
1493 et131x_mii_write(adapter, phydev->addr, PHY_LED_2, lcr2);
1494 }
1495}
1496
1497/* et131x_configure_global_regs - configure JAGCore global regs */
1498static void et131x_configure_global_regs(struct et131x_adapter *adapter)
1499{
1500 struct global_regs __iomem *regs = &adapter->regs->global;
1501
1502 writel(0, &regs->rxq_start_addr);
1503 writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr);
1504
1505 if (adapter->registry_jumbo_packet < 2048) {
1506 /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
1507 * block of RAM that the driver can split between Tx
1508 * and Rx as it desires. Our default is to split it
1509 * 50/50:
1510 */
1511 writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr);
1512 writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr);
1513 } else if (adapter->registry_jumbo_packet < 8192) {
1514 /* For jumbo packets > 2k but < 8k, split 50-50. */
1515 writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr);
1516 writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr);
1517 } else {
1518 /* 9216 is the only packet size greater than 8k that
1519 * is available. The Tx buffer has to be big enough
1520 * for one whole packet on the Tx side. We'll make
1521 * the Tx 9408, and give the rest to Rx
1522 */
1523 writel(0x01b3, &regs->rxq_end_addr);
1524 writel(0x01b4, &regs->txq_start_addr);
1525 }
1526
1527 /* Initialize the loopback register. Disable all loopbacks. */
1528 writel(0, &regs->loopback);
1529
1530 writel(0, &regs->msi_config);
1531
1532 /* By default, disable the watchdog timer. It will be enabled when
1533 * a packet is queued.
1534 */
1535 writel(0, &regs->watchdog_timer);
1536}
1537
1538/* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence */
1539static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
1540{
1541 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1542 struct rx_ring *rx_local = &adapter->rx_ring;
1543 struct fbr_desc *fbr_entry;
1544 u32 entry;
1545 u32 psr_num_des;
1546 unsigned long flags;
1547 u8 id;
1548
1549 et131x_rx_dma_disable(adapter);
1550
1551 /* Load the completion writeback physical address */
1552 writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi);
1553 writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo);
1554
1555 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1556
1557 /* Set the address and parameters of the packet status ring */
1558 writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi);
1559 writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo);
1560 writel(rx_local->psr_entries - 1, &rx_dma->psr_num_des);
1561 writel(0, &rx_dma->psr_full_offset);
1562
1563 psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK;
1564 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1565 &rx_dma->psr_min_des);
1566
1567 spin_lock_irqsave(&adapter->rcv_lock, flags);
1568
1569 /* These local variables track the PSR in the adapter structure */
1570 rx_local->local_psr_full = 0;
1571
1572 for (id = 0; id < NUM_FBRS; id++) {
1573 u32 __iomem *num_des;
1574 u32 __iomem *full_offset;
1575 u32 __iomem *min_des;
1576 u32 __iomem *base_hi;
1577 u32 __iomem *base_lo;
1578 struct fbr_lookup *fbr = rx_local->fbr[id];
1579
1580 if (id == 0) {
1581 num_des = &rx_dma->fbr0_num_des;
1582 full_offset = &rx_dma->fbr0_full_offset;
1583 min_des = &rx_dma->fbr0_min_des;
1584 base_hi = &rx_dma->fbr0_base_hi;
1585 base_lo = &rx_dma->fbr0_base_lo;
1586 } else {
1587 num_des = &rx_dma->fbr1_num_des;
1588 full_offset = &rx_dma->fbr1_full_offset;
1589 min_des = &rx_dma->fbr1_min_des;
1590 base_hi = &rx_dma->fbr1_base_hi;
1591 base_lo = &rx_dma->fbr1_base_lo;
1592 }
1593
1594 /* Now's the best time to initialize FBR contents */
1595 fbr_entry = fbr->ring_virtaddr;
1596 for (entry = 0; entry < fbr->num_entries; entry++) {
1597 fbr_entry->addr_hi = fbr->bus_high[entry];
1598 fbr_entry->addr_lo = fbr->bus_low[entry];
1599 fbr_entry->word2 = entry;
1600 fbr_entry++;
1601 }
1602
1603 /* Set the address and parameters of Free buffer ring 1 and 0 */
1604 writel(upper_32_bits(fbr->ring_physaddr), base_hi);
1605 writel(lower_32_bits(fbr->ring_physaddr), base_lo);
1606 writel(fbr->num_entries - 1, num_des);
1607 writel(ET_DMA10_WRAP, full_offset);
1608
1609 /* This variable tracks the free buffer ring 1 full position,
1610 * so it has to match the above.
1611 */
1612 fbr->local_full = ET_DMA10_WRAP;
1613 writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1614 min_des);
1615 }
1616
1617 /* Program the number of packets we will receive before generating an
1618 * interrupt.
1619 * For version B silicon, this value gets updated once autoneg is
1620 *complete.
1621 */
1622 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
1623
1624 /* The "time_done" is not working correctly to coalesce interrupts
1625 * after a given time period, but rather is giving us an interrupt
1626 * regardless of whether we have received packets.
1627 * This value gets updated once autoneg is complete.
1628 */
1629 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
1630
1631 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1632}
1633
1634/* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
1635 *
1636 * Configure the transmit engine with the ring buffers we have created
1637 * and prepare it for use.
1638 */
1639static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
1640{
1641 struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
1642 struct tx_ring *tx_ring = &adapter->tx_ring;
1643
1644 /* Load the hardware with the start of the transmit descriptor ring. */
1645 writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi);
1646 writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo);
1647
1648 /* Initialise the transmit DMA engine */
1649 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
1650
1651 /* Load the completion writeback physical address */
1652 writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi);
1653 writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo);
1654
1655 *tx_ring->tx_status = 0;
1656
1657 writel(0, &txdma->service_request);
1658 tx_ring->send_idx = 0;
1659}
1660
1661/* et131x_adapter_setup - Set the adapter up as per cassini+ documentation */
1662static void et131x_adapter_setup(struct et131x_adapter *adapter)
1663{
1664 et131x_configure_global_regs(adapter);
1665 et1310_config_mac_regs1(adapter);
1666
1667 /* Configure the MMC registers */
1668 /* All we need to do is initialize the Memory Control Register */
1669 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
1670
1671 et1310_config_rxmac_regs(adapter);
1672 et1310_config_txmac_regs(adapter);
1673
1674 et131x_config_rx_dma_regs(adapter);
1675 et131x_config_tx_dma_regs(adapter);
1676
1677 et1310_config_macstat_regs(adapter);
1678
1679 et1310_phy_power_switch(adapter, 0);
1680 et131x_xcvr_init(adapter);
1681}
1682
1683/* et131x_soft_reset - Issue soft reset to the hardware, complete for ET1310 */
1684static void et131x_soft_reset(struct et131x_adapter *adapter)
1685{
1686 u32 reg;
1687
1688 /* Disable MAC Core */
1689 reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
1690 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1691 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1692 writel(reg, &adapter->regs->mac.cfg1);
1693
1694 reg = ET_RESET_ALL;
1695 writel(reg, &adapter->regs->global.sw_reset);
1696
1697 reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1698 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1699 writel(reg, &adapter->regs->mac.cfg1);
1700 writel(0, &adapter->regs->mac.cfg1);
1701}
1702
1703static void et131x_enable_interrupts(struct et131x_adapter *adapter)
1704{
1705 u32 mask;
1706
1707 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH)
1708 mask = INT_MASK_ENABLE;
1709 else
1710 mask = INT_MASK_ENABLE_NO_FLOW;
1711
1712 writel(mask, &adapter->regs->global.int_mask);
1713}
1714
1715static void et131x_disable_interrupts(struct et131x_adapter *adapter)
1716{
1717 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
1718}
1719
1720static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
1721{
1722 /* Setup the transmit dma configuration register */
1723 writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT,
1724 &adapter->regs->txdma.csr);
1725}
1726
1727static void et131x_enable_txrx(struct net_device *netdev)
1728{
1729 struct et131x_adapter *adapter = netdev_priv(netdev);
1730
1731 et131x_rx_dma_enable(adapter);
1732 et131x_tx_dma_enable(adapter);
1733
1734 if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)
1735 et131x_enable_interrupts(adapter);
1736
1737 netif_start_queue(netdev);
1738}
1739
1740static void et131x_disable_txrx(struct net_device *netdev)
1741{
1742 struct et131x_adapter *adapter = netdev_priv(netdev);
1743
1744 netif_stop_queue(netdev);
1745
1746 et131x_rx_dma_disable(adapter);
1747 et131x_tx_dma_disable(adapter);
1748
1749 et131x_disable_interrupts(adapter);
1750}
1751
1752static void et131x_init_send(struct et131x_adapter *adapter)
1753{
1754 int i;
1755 struct tx_ring *tx_ring = &adapter->tx_ring;
1756 struct tcb *tcb = tx_ring->tcb_ring;
1757
1758 tx_ring->tcb_qhead = tcb;
1759
1760 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
1761
1762 for (i = 0; i < NUM_TCB; i++) {
1763 tcb->next = tcb + 1;
1764 tcb++;
1765 }
1766
1767 tcb--;
1768 tx_ring->tcb_qtail = tcb;
1769 tcb->next = NULL;
1770 /* Curr send queue should now be empty */
1771 tx_ring->send_head = NULL;
1772 tx_ring->send_tail = NULL;
1773}
1774
1775/* et1310_enable_phy_coma
1776 *
1777 * driver receive an phy status change interrupt while in D0 and check that
1778 * phy_status is down.
1779 *
1780 * -- gate off JAGCore;
1781 * -- set gigE PHY in Coma mode
1782 * -- wake on phy_interrupt; Perform software reset JAGCore,
1783 * re-initialize jagcore and gigE PHY
1784 */
1785static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
1786{
1787 u32 pmcsr = readl(&adapter->regs->global.pm_csr);
1788
1789 /* Stop sending packets. */
1790 adapter->flags |= FMP_ADAPTER_LOWER_POWER;
1791
1792 /* Wait for outstanding Receive packets */
1793 et131x_disable_txrx(adapter->netdev);
1794
1795 /* Gate off JAGCore 3 clock domains */
1796 pmcsr &= ~ET_PMCSR_INIT;
1797 writel(pmcsr, &adapter->regs->global.pm_csr);
1798
1799 /* Program gigE PHY in to Coma mode */
1800 pmcsr |= ET_PM_PHY_SW_COMA;
1801 writel(pmcsr, &adapter->regs->global.pm_csr);
1802}
1803
1804static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
1805{
1806 u32 pmcsr;
1807
1808 pmcsr = readl(&adapter->regs->global.pm_csr);
1809
1810 /* Disable phy_sw_coma register and re-enable JAGCore clocks */
1811 pmcsr |= ET_PMCSR_INIT;
1812 pmcsr &= ~ET_PM_PHY_SW_COMA;
1813 writel(pmcsr, &adapter->regs->global.pm_csr);
1814
1815 /* Restore the GbE PHY speed and duplex modes;
1816 * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
1817 */
1818
1819 /* Re-initialize the send structures */
1820 et131x_init_send(adapter);
1821
1822 /* Bring the device back to the state it was during init prior to
1823 * autonegotiation being complete. This way, when we get the auto-neg
1824 * complete interrupt, we can complete init by calling ConfigMacREGS2.
1825 */
1826 et131x_soft_reset(adapter);
1827
1828 et131x_adapter_setup(adapter);
1829
1830 /* Allow Tx to restart */
1831 adapter->flags &= ~FMP_ADAPTER_LOWER_POWER;
1832
1833 et131x_enable_txrx(adapter->netdev);
1834}
1835
1836static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
1837{
1838 u32 tmp_free_buff_ring = *free_buff_ring;
1839
1840 tmp_free_buff_ring++;
1841 /* This works for all cases where limit < 1024. The 1023 case
1842 * works because 1023++ is 1024 which means the if condition is not
1843 * taken but the carry of the bit into the wrap bit toggles the wrap
1844 * value correctly
1845 */
1846 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
1847 tmp_free_buff_ring &= ~ET_DMA10_MASK;
1848 tmp_free_buff_ring ^= ET_DMA10_WRAP;
1849 }
1850 /* For the 1023 case */
1851 tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP);
1852 *free_buff_ring = tmp_free_buff_ring;
1853 return tmp_free_buff_ring;
1854}
1855
1856/* et131x_rx_dma_memory_alloc
1857 *
1858 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
1859 * and the Packet Status Ring.
1860 */
1861static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
1862{
1863 u8 id;
1864 u32 i, j;
1865 u32 bufsize;
1866 u32 psr_size;
1867 u32 fbr_chunksize;
1868 struct rx_ring *rx_ring = &adapter->rx_ring;
1869 struct fbr_lookup *fbr;
1870
1871 /* Alloc memory for the lookup table */
1872 rx_ring->fbr[0] = kzalloc(sizeof(*fbr), GFP_KERNEL);
1873 if (rx_ring->fbr[0] == NULL)
1874 return -ENOMEM;
1875 rx_ring->fbr[1] = kzalloc(sizeof(*fbr), GFP_KERNEL);
1876 if (rx_ring->fbr[1] == NULL)
1877 return -ENOMEM;
1878
1879 /* The first thing we will do is configure the sizes of the buffer
1880 * rings. These will change based on jumbo packet support. Larger
1881 * jumbo packets increases the size of each entry in FBR0, and the
1882 * number of entries in FBR0, while at the same time decreasing the
1883 * number of entries in FBR1.
1884 *
1885 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
1886 * entries are huge in order to accommodate a "jumbo" frame, then it
1887 * will have less entries. Conversely, FBR1 will now be relied upon
1888 * to carry more "normal" frames, thus it's entry size also increases
1889 * and the number of entries goes up too (since it now carries
1890 * "small" + "regular" packets.
1891 *
1892 * In this scheme, we try to maintain 512 entries between the two
1893 * rings. Also, FBR1 remains a constant size - when it's size doubles
1894 * the number of entries halves. FBR0 increases in size, however.
1895 */
1896 if (adapter->registry_jumbo_packet < 2048) {
1897 rx_ring->fbr[0]->buffsize = 256;
1898 rx_ring->fbr[0]->num_entries = 512;
1899 rx_ring->fbr[1]->buffsize = 2048;
1900 rx_ring->fbr[1]->num_entries = 512;
1901 } else if (adapter->registry_jumbo_packet < 4096) {
1902 rx_ring->fbr[0]->buffsize = 512;
1903 rx_ring->fbr[0]->num_entries = 1024;
1904 rx_ring->fbr[1]->buffsize = 4096;
1905 rx_ring->fbr[1]->num_entries = 512;
1906 } else {
1907 rx_ring->fbr[0]->buffsize = 1024;
1908 rx_ring->fbr[0]->num_entries = 768;
1909 rx_ring->fbr[1]->buffsize = 16384;
1910 rx_ring->fbr[1]->num_entries = 128;
1911 }
1912
1913 rx_ring->psr_entries = rx_ring->fbr[0]->num_entries +
1914 rx_ring->fbr[1]->num_entries;
1915
1916 for (id = 0; id < NUM_FBRS; id++) {
1917 fbr = rx_ring->fbr[id];
1918 /* Allocate an area of memory for Free Buffer Ring */
1919 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
1920 fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
1921 bufsize,
1922 &fbr->ring_physaddr,
1923 GFP_KERNEL);
1924 if (!fbr->ring_virtaddr) {
1925 dev_err(&adapter->pdev->dev,
1926 "Cannot alloc memory for Free Buffer Ring %d\n",
1927 id);
1928 return -ENOMEM;
1929 }
1930 }
1931
1932 for (id = 0; id < NUM_FBRS; id++) {
1933 fbr = rx_ring->fbr[id];
1934 fbr_chunksize = (FBR_CHUNKS * fbr->buffsize);
1935
1936 for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) {
1937 dma_addr_t fbr_physaddr;
1938
1939 fbr->mem_virtaddrs[i] = dma_alloc_coherent(
1940 &adapter->pdev->dev, fbr_chunksize,
1941 &fbr->mem_physaddrs[i],
1942 GFP_KERNEL);
1943
1944 if (!fbr->mem_virtaddrs[i]) {
1945 dev_err(&adapter->pdev->dev,
1946 "Could not alloc memory\n");
1947 return -ENOMEM;
1948 }
1949
1950 /* See NOTE in "Save Physical Address" comment above */
1951 fbr_physaddr = fbr->mem_physaddrs[i];
1952
1953 for (j = 0; j < FBR_CHUNKS; j++) {
1954 u32 k = (i * FBR_CHUNKS) + j;
1955
1956 /* Save the Virtual address of this index for
1957 * quick access later
1958 */
1959 fbr->virt[k] = (u8 *)fbr->mem_virtaddrs[i] +
1960 (j * fbr->buffsize);
1961
1962 /* now store the physical address in the
1963 * descriptor so the device can access it
1964 */
1965 fbr->bus_high[k] = upper_32_bits(fbr_physaddr);
1966 fbr->bus_low[k] = lower_32_bits(fbr_physaddr);
1967 fbr_physaddr += fbr->buffsize;
1968 }
1969 }
1970 }
1971
1972 /* Allocate an area of memory for FIFO of Packet Status ring entries */
1973 psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries;
1974
1975 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
1976 psr_size,
1977 &rx_ring->ps_ring_physaddr,
1978 GFP_KERNEL);
1979
1980 if (!rx_ring->ps_ring_virtaddr) {
1981 dev_err(&adapter->pdev->dev,
1982 "Cannot alloc memory for Packet Status Ring\n");
1983 return -ENOMEM;
1984 }
1985
1986 /* Allocate an area of memory for writeback of status information */
1987 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
1988 sizeof(struct rx_status_block),
1989 &rx_ring->rx_status_bus,
1990 GFP_KERNEL);
1991 if (!rx_ring->rx_status_block) {
1992 dev_err(&adapter->pdev->dev,
1993 "Cannot alloc memory for Status Block\n");
1994 return -ENOMEM;
1995 }
1996 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
1997
1998 /* The RFDs are going to be put on lists later on, so initialize the
1999 * lists now.
2000 */
2001 INIT_LIST_HEAD(&rx_ring->recv_list);
2002 return 0;
2003}
2004
2005static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2006{
2007 u8 id;
2008 u32 ii;
2009 u32 bufsize;
2010 u32 psr_size;
2011 struct rfd *rfd;
2012 struct rx_ring *rx_ring = &adapter->rx_ring;
2013 struct fbr_lookup *fbr;
2014
2015 /* Free RFDs and associated packet descriptors */
2016 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2017
2018 while (!list_empty(&rx_ring->recv_list)) {
2019 rfd = list_entry(rx_ring->recv_list.next,
2020 struct rfd, list_node);
2021
2022 list_del(&rfd->list_node);
2023 rfd->skb = NULL;
2024 kfree(rfd);
2025 }
2026
2027 /* Free Free Buffer Rings */
2028 for (id = 0; id < NUM_FBRS; id++) {
2029 fbr = rx_ring->fbr[id];
2030
2031 if (!fbr || !fbr->ring_virtaddr)
2032 continue;
2033
2034 /* First the packet memory */
2035 for (ii = 0; ii < fbr->num_entries / FBR_CHUNKS; ii++) {
2036 if (fbr->mem_virtaddrs[ii]) {
2037 bufsize = fbr->buffsize * FBR_CHUNKS;
2038
2039 dma_free_coherent(&adapter->pdev->dev,
2040 bufsize,
2041 fbr->mem_virtaddrs[ii],
2042 fbr->mem_physaddrs[ii]);
2043
2044 fbr->mem_virtaddrs[ii] = NULL;
2045 }
2046 }
2047
2048 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
2049
2050 dma_free_coherent(&adapter->pdev->dev,
2051 bufsize,
2052 fbr->ring_virtaddr,
2053 fbr->ring_physaddr);
2054
2055 fbr->ring_virtaddr = NULL;
2056 }
2057
2058 /* Free Packet Status Ring */
2059 if (rx_ring->ps_ring_virtaddr) {
2060 psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries;
2061
2062 dma_free_coherent(&adapter->pdev->dev, psr_size,
2063 rx_ring->ps_ring_virtaddr,
2064 rx_ring->ps_ring_physaddr);
2065
2066 rx_ring->ps_ring_virtaddr = NULL;
2067 }
2068
2069 /* Free area of memory for the writeback of status information */
2070 if (rx_ring->rx_status_block) {
2071 dma_free_coherent(&adapter->pdev->dev,
2072 sizeof(struct rx_status_block),
2073 rx_ring->rx_status_block,
2074 rx_ring->rx_status_bus);
2075 rx_ring->rx_status_block = NULL;
2076 }
2077
2078 /* Free the FBR Lookup Table */
2079 kfree(rx_ring->fbr[0]);
2080 kfree(rx_ring->fbr[1]);
2081
2082 /* Reset Counters */
2083 rx_ring->num_ready_recv = 0;
2084}
2085
2086/* et131x_init_recv - Initialize receive data structures */
2087static int et131x_init_recv(struct et131x_adapter *adapter)
2088{
2089 struct rfd *rfd;
2090 u32 rfdct;
2091 struct rx_ring *rx_ring = &adapter->rx_ring;
2092
2093 /* Setup each RFD */
2094 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2095 rfd = kzalloc(sizeof(*rfd), GFP_ATOMIC | GFP_DMA);
2096 if (!rfd)
2097 return -ENOMEM;
2098
2099 rfd->skb = NULL;
2100
2101 /* Add this RFD to the recv_list */
2102 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2103
2104 /* Increment the available RFD's */
2105 rx_ring->num_ready_recv++;
2106 }
2107
2108 return 0;
2109}
2110
2111/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */
2112static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2113{
2114 struct phy_device *phydev = adapter->phydev;
2115
2116 /* For version B silicon, we do not use the RxDMA timer for 10 and 100
2117 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
2118 */
2119 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2120 writel(0, &adapter->regs->rxdma.max_pkt_time);
2121 writel(1, &adapter->regs->rxdma.num_pkt_done);
2122 }
2123}
2124
2125/* nic_return_rfd - Recycle a RFD and put it back onto the receive list */
2126static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2127{
2128 struct rx_ring *rx_local = &adapter->rx_ring;
2129 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2130 u16 buff_index = rfd->bufferindex;
2131 u8 ring_index = rfd->ringindex;
2132 unsigned long flags;
2133 struct fbr_lookup *fbr = rx_local->fbr[ring_index];
2134
2135 /* We don't use any of the OOB data besides status. Otherwise, we
2136 * need to clean up OOB data
2137 */
2138 if (buff_index < fbr->num_entries) {
2139 u32 free_buff_ring;
2140 u32 __iomem *offset;
2141 struct fbr_desc *next;
2142
2143 if (ring_index == 0)
2144 offset = &rx_dma->fbr0_full_offset;
2145 else
2146 offset = &rx_dma->fbr1_full_offset;
2147
2148 next = (struct fbr_desc *)(fbr->ring_virtaddr) +
2149 INDEX10(fbr->local_full);
2150
2151 /* Handle the Free Buffer Ring advancement here. Write
2152 * the PA / Buffer Index for the returned buffer into
2153 * the oldest (next to be freed)FBR entry
2154 */
2155 next->addr_hi = fbr->bus_high[buff_index];
2156 next->addr_lo = fbr->bus_low[buff_index];
2157 next->word2 = buff_index;
2158
2159 free_buff_ring = bump_free_buff_ring(&fbr->local_full,
2160 fbr->num_entries - 1);
2161 writel(free_buff_ring, offset);
2162 } else {
2163 dev_err(&adapter->pdev->dev,
2164 "%s illegal Buffer Index returned\n", __func__);
2165 }
2166
2167 /* The processing on this RFD is done, so put it back on the tail of
2168 * our list
2169 */
2170 spin_lock_irqsave(&adapter->rcv_lock, flags);
2171 list_add_tail(&rfd->list_node, &rx_local->recv_list);
2172 rx_local->num_ready_recv++;
2173 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2174
2175 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2176}
2177
2178/* nic_rx_pkts - Checks the hardware for available packets
2179 *
2180 * Checks the hardware for available packets, using completion ring
2181 * If packets are available, it gets an RFD from the recv_list, attaches
2182 * the packet to it, puts the RFD in the RecvPendList, and also returns
2183 * the pointer to the RFD.
2184 */
2185static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2186{
2187 struct rx_ring *rx_local = &adapter->rx_ring;
2188 struct rx_status_block *status;
2189 struct pkt_stat_desc *psr;
2190 struct rfd *rfd;
2191 unsigned long flags;
2192 struct list_head *element;
2193 u8 ring_index;
2194 u16 buff_index;
2195 u32 len;
2196 u32 word0;
2197 u32 word1;
2198 struct sk_buff *skb;
2199 struct fbr_lookup *fbr;
2200
2201 /* RX Status block is written by the DMA engine prior to every
2202 * interrupt. It contains the next to be used entry in the Packet
2203 * Status Ring, and also the two Free Buffer rings.
2204 */
2205 status = rx_local->rx_status_block;
2206 word1 = status->word1 >> 16;
2207
2208 /* Check the PSR and wrap bits do not match */
2209 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2210 return NULL; /* Looks like this ring is not updated yet */
2211
2212 /* The packet status ring indicates that data is available. */
2213 psr = (struct pkt_stat_desc *)(rx_local->ps_ring_virtaddr) +
2214 (rx_local->local_psr_full & 0xFFF);
2215
2216 /* Grab any information that is required once the PSR is advanced,
2217 * since we can no longer rely on the memory being accurate
2218 */
2219 len = psr->word1 & 0xFFFF;
2220 ring_index = (psr->word1 >> 26) & 0x03;
2221 fbr = rx_local->fbr[ring_index];
2222 buff_index = (psr->word1 >> 16) & 0x3FF;
2223 word0 = psr->word0;
2224
2225 /* Indicate that we have used this PSR entry. */
2226 /* FIXME wrap 12 */
2227 add_12bit(&rx_local->local_psr_full, 1);
2228 if ((rx_local->local_psr_full & 0xFFF) > rx_local->psr_entries - 1) {
2229 /* Clear psr full and toggle the wrap bit */
2230 rx_local->local_psr_full &= ~0xFFF;
2231 rx_local->local_psr_full ^= 0x1000;
2232 }
2233
2234 writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset);
2235
2236 if (ring_index > 1 || buff_index > fbr->num_entries - 1) {
2237 /* Illegal buffer or ring index cannot be used by S/W*/
2238 dev_err(&adapter->pdev->dev,
2239 "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n",
2240 rx_local->local_psr_full & 0xFFF, len, buff_index);
2241 return NULL;
2242 }
2243
2244 /* Get and fill the RFD. */
2245 spin_lock_irqsave(&adapter->rcv_lock, flags);
2246
2247 element = rx_local->recv_list.next;
2248 rfd = list_entry(element, struct rfd, list_node);
2249
2250 if (!rfd) {
2251 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2252 return NULL;
2253 }
2254
2255 list_del(&rfd->list_node);
2256 rx_local->num_ready_recv--;
2257
2258 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2259
2260 rfd->bufferindex = buff_index;
2261 rfd->ringindex = ring_index;
2262
2263 /* In V1 silicon, there is a bug which screws up filtering of runt
2264 * packets. Therefore runt packet filtering is disabled in the MAC and
2265 * the packets are dropped here. They are also counted here.
2266 */
2267 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
2268 adapter->stats.rx_other_errs++;
2269 rfd->len = 0;
2270 goto out;
2271 }
2272
2273 if ((word0 & ALCATEL_MULTICAST_PKT) && !(word0 & ALCATEL_BROADCAST_PKT))
2274 adapter->stats.multicast_pkts_rcvd++;
2275
2276 rfd->len = len;
2277
2278 skb = dev_alloc_skb(rfd->len + 2);
2279 if (!skb)
2280 return NULL;
2281
2282 adapter->netdev->stats.rx_bytes += rfd->len;
2283
2284 memcpy(skb_put(skb, rfd->len), fbr->virt[buff_index], rfd->len);
2285
2286 skb->protocol = eth_type_trans(skb, adapter->netdev);
2287 skb->ip_summed = CHECKSUM_NONE;
2288 netif_receive_skb(skb);
2289
2290out:
2291 nic_return_rfd(adapter, rfd);
2292 return rfd;
2293}
2294
2295static int et131x_handle_recv_pkts(struct et131x_adapter *adapter, int budget)
2296{
2297 struct rfd *rfd = NULL;
2298 int count = 0;
2299 int limit = budget;
2300 bool done = true;
2301 struct rx_ring *rx_ring = &adapter->rx_ring;
2302
2303 if (budget > MAX_PACKETS_HANDLED)
2304 limit = MAX_PACKETS_HANDLED;
2305
2306 /* Process up to available RFD's */
2307 while (count < limit) {
2308 if (list_empty(&rx_ring->recv_list)) {
2309 WARN_ON(rx_ring->num_ready_recv != 0);
2310 done = false;
2311 break;
2312 }
2313
2314 rfd = nic_rx_pkts(adapter);
2315
2316 if (rfd == NULL)
2317 break;
2318
2319 /* Do not receive any packets until a filter has been set.
2320 * Do not receive any packets until we have link.
2321 * If length is zero, return the RFD in order to advance the
2322 * Free buffer ring.
2323 */
2324 if (!adapter->packet_filter ||
2325 !netif_carrier_ok(adapter->netdev) ||
2326 rfd->len == 0)
2327 continue;
2328
2329 adapter->netdev->stats.rx_packets++;
2330
2331 if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK)
2332 dev_warn(&adapter->pdev->dev, "RFD's are running out\n");
2333
2334 count++;
2335 }
2336
2337 if (count == limit || !done) {
2338 rx_ring->unfinished_receives = true;
2339 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2340 &adapter->regs->global.watchdog_timer);
2341 } else {
2342 /* Watchdog timer will disable itself if appropriate. */
2343 rx_ring->unfinished_receives = false;
2344 }
2345
2346 return count;
2347}
2348
2349/* et131x_tx_dma_memory_alloc
2350 *
2351 * Allocates memory that will be visible both to the device and to the CPU.
2352 * The OS will pass us packets, pointers to which we will insert in the Tx
2353 * Descriptor queue. The device will read this queue to find the packets in
2354 * memory. The device will update the "status" in memory each time it xmits a
2355 * packet.
2356 */
2357static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2358{
2359 int desc_size = 0;
2360 struct tx_ring *tx_ring = &adapter->tx_ring;
2361
2362 /* Allocate memory for the TCB's (Transmit Control Block) */
2363 tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
2364 GFP_ATOMIC | GFP_DMA);
2365 if (!tx_ring->tcb_ring)
2366 return -ENOMEM;
2367
2368 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2369 tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev,
2370 desc_size,
2371 &tx_ring->tx_desc_ring_pa,
2372 GFP_KERNEL);
2373 if (!tx_ring->tx_desc_ring) {
2374 dev_err(&adapter->pdev->dev,
2375 "Cannot alloc memory for Tx Ring\n");
2376 return -ENOMEM;
2377 }
2378
2379 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
2380 sizeof(u32),
2381 &tx_ring->tx_status_pa,
2382 GFP_KERNEL);
2383 if (!tx_ring->tx_status_pa) {
2384 dev_err(&adapter->pdev->dev,
2385 "Cannot alloc memory for Tx status block\n");
2386 return -ENOMEM;
2387 }
2388 return 0;
2389}
2390
2391static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
2392{
2393 int desc_size = 0;
2394 struct tx_ring *tx_ring = &adapter->tx_ring;
2395
2396 if (tx_ring->tx_desc_ring) {
2397 /* Free memory relating to Tx rings here */
2398 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2399 dma_free_coherent(&adapter->pdev->dev,
2400 desc_size,
2401 tx_ring->tx_desc_ring,
2402 tx_ring->tx_desc_ring_pa);
2403 tx_ring->tx_desc_ring = NULL;
2404 }
2405
2406 /* Free memory for the Tx status block */
2407 if (tx_ring->tx_status) {
2408 dma_free_coherent(&adapter->pdev->dev,
2409 sizeof(u32),
2410 tx_ring->tx_status,
2411 tx_ring->tx_status_pa);
2412
2413 tx_ring->tx_status = NULL;
2414 }
2415 /* Free the memory for the tcb structures */
2416 kfree(tx_ring->tcb_ring);
2417}
2418
2419/* nic_send_packet - NIC specific send handler for version B silicon. */
2420static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
2421{
2422 u32 i;
2423 struct tx_desc desc[24];
2424 u32 frag = 0;
2425 u32 thiscopy, remainder;
2426 struct sk_buff *skb = tcb->skb;
2427 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
2428 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
2429 struct phy_device *phydev = adapter->phydev;
2430 dma_addr_t dma_addr;
2431 struct tx_ring *tx_ring = &adapter->tx_ring;
2432
2433 /* Part of the optimizations of this send routine restrict us to
2434 * sending 24 fragments at a pass. In practice we should never see
2435 * more than 5 fragments.
2436 */
2437
2438 /* nr_frags should be no more than 18. */
2439 BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23);
2440
2441 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
2442
2443 for (i = 0; i < nr_frags; i++) {
2444 /* If there is something in this element, lets get a
2445 * descriptor from the ring and get the necessary data
2446 */
2447 if (i == 0) {
2448 /* If the fragments are smaller than a standard MTU,
2449 * then map them to a single descriptor in the Tx
2450 * Desc ring. However, if they're larger, as is
2451 * possible with support for jumbo packets, then
2452 * split them each across 2 descriptors.
2453 *
2454 * This will work until we determine why the hardware
2455 * doesn't seem to like large fragments.
2456 */
2457 if (skb_headlen(skb) <= 1514) {
2458 /* Low 16bits are length, high is vlan and
2459 * unused currently so zero
2460 */
2461 desc[frag].len_vlan = skb_headlen(skb);
2462 dma_addr = dma_map_single(&adapter->pdev->dev,
2463 skb->data,
2464 skb_headlen(skb),
2465 DMA_TO_DEVICE);
2466 desc[frag].addr_lo = lower_32_bits(dma_addr);
2467 desc[frag].addr_hi = upper_32_bits(dma_addr);
2468 frag++;
2469 } else {
2470 desc[frag].len_vlan = skb_headlen(skb) / 2;
2471 dma_addr = dma_map_single(&adapter->pdev->dev,
2472 skb->data,
2473 skb_headlen(skb) / 2,
2474 DMA_TO_DEVICE);
2475 desc[frag].addr_lo = lower_32_bits(dma_addr);
2476 desc[frag].addr_hi = upper_32_bits(dma_addr);
2477 frag++;
2478
2479 desc[frag].len_vlan = skb_headlen(skb) / 2;
2480 dma_addr = dma_map_single(&adapter->pdev->dev,
2481 skb->data +
2482 skb_headlen(skb) / 2,
2483 skb_headlen(skb) / 2,
2484 DMA_TO_DEVICE);
2485 desc[frag].addr_lo = lower_32_bits(dma_addr);
2486 desc[frag].addr_hi = upper_32_bits(dma_addr);
2487 frag++;
2488 }
2489 } else {
2490 desc[frag].len_vlan = frags[i - 1].size;
2491 dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
2492 &frags[i - 1],
2493 0,
2494 frags[i - 1].size,
2495 DMA_TO_DEVICE);
2496 desc[frag].addr_lo = lower_32_bits(dma_addr);
2497 desc[frag].addr_hi = upper_32_bits(dma_addr);
2498 frag++;
2499 }
2500 }
2501
2502 if (phydev && phydev->speed == SPEED_1000) {
2503 if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) {
2504 /* Last element & Interrupt flag */
2505 desc[frag - 1].flags =
2506 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
2507 tx_ring->since_irq = 0;
2508 } else { /* Last element */
2509 desc[frag - 1].flags = TXDESC_FLAG_LASTPKT;
2510 }
2511 } else {
2512 desc[frag - 1].flags =
2513 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
2514 }
2515
2516 desc[0].flags |= TXDESC_FLAG_FIRSTPKT;
2517
2518 tcb->index_start = tx_ring->send_idx;
2519 tcb->stale = 0;
2520
2521 thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx);
2522
2523 if (thiscopy >= frag) {
2524 remainder = 0;
2525 thiscopy = frag;
2526 } else {
2527 remainder = frag - thiscopy;
2528 }
2529
2530 memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx),
2531 desc,
2532 sizeof(struct tx_desc) * thiscopy);
2533
2534 add_10bit(&tx_ring->send_idx, thiscopy);
2535
2536 if (INDEX10(tx_ring->send_idx) == 0 ||
2537 INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) {
2538 tx_ring->send_idx &= ~ET_DMA10_MASK;
2539 tx_ring->send_idx ^= ET_DMA10_WRAP;
2540 }
2541
2542 if (remainder) {
2543 memcpy(tx_ring->tx_desc_ring,
2544 desc + thiscopy,
2545 sizeof(struct tx_desc) * remainder);
2546
2547 add_10bit(&tx_ring->send_idx, remainder);
2548 }
2549
2550 if (INDEX10(tx_ring->send_idx) == 0) {
2551 if (tx_ring->send_idx)
2552 tcb->index = NUM_DESC_PER_RING_TX - 1;
2553 else
2554 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
2555 } else {
2556 tcb->index = tx_ring->send_idx - 1;
2557 }
2558
2559 spin_lock(&adapter->tcb_send_qlock);
2560
2561 if (tx_ring->send_tail)
2562 tx_ring->send_tail->next = tcb;
2563 else
2564 tx_ring->send_head = tcb;
2565
2566 tx_ring->send_tail = tcb;
2567
2568 WARN_ON(tcb->next != NULL);
2569
2570 tx_ring->used++;
2571
2572 spin_unlock(&adapter->tcb_send_qlock);
2573
2574 /* Write the new write pointer back to the device. */
2575 writel(tx_ring->send_idx, &adapter->regs->txdma.service_request);
2576
2577 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
2578 * timer to wake us up if this packet isn't followed by N more.
2579 */
2580 if (phydev && phydev->speed == SPEED_1000) {
2581 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2582 &adapter->regs->global.watchdog_timer);
2583 }
2584 return 0;
2585}
2586
2587static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
2588{
2589 int status;
2590 struct tcb *tcb;
2591 unsigned long flags;
2592 struct tx_ring *tx_ring = &adapter->tx_ring;
2593
2594 /* All packets must have at least a MAC address and a protocol type */
2595 if (skb->len < ETH_HLEN)
2596 return -EIO;
2597
2598 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2599
2600 tcb = tx_ring->tcb_qhead;
2601
2602 if (tcb == NULL) {
2603 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2604 return -ENOMEM;
2605 }
2606
2607 tx_ring->tcb_qhead = tcb->next;
2608
2609 if (tx_ring->tcb_qhead == NULL)
2610 tx_ring->tcb_qtail = NULL;
2611
2612 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2613
2614 tcb->skb = skb;
2615 tcb->next = NULL;
2616
2617 status = nic_send_packet(adapter, tcb);
2618
2619 if (status != 0) {
2620 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2621
2622 if (tx_ring->tcb_qtail)
2623 tx_ring->tcb_qtail->next = tcb;
2624 else
2625 /* Apparently ready Q is empty. */
2626 tx_ring->tcb_qhead = tcb;
2627
2628 tx_ring->tcb_qtail = tcb;
2629 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2630 return status;
2631 }
2632 WARN_ON(tx_ring->used > NUM_TCB);
2633 return 0;
2634}
2635
2636/* free_send_packet - Recycle a struct tcb */
2637static inline void free_send_packet(struct et131x_adapter *adapter,
2638 struct tcb *tcb)
2639{
2640 unsigned long flags;
2641 struct tx_desc *desc = NULL;
2642 struct net_device_stats *stats = &adapter->netdev->stats;
2643 struct tx_ring *tx_ring = &adapter->tx_ring;
2644 u64 dma_addr;
2645
2646 if (tcb->skb) {
2647 stats->tx_bytes += tcb->skb->len;
2648
2649 /* Iterate through the TX descriptors on the ring
2650 * corresponding to this packet and umap the fragments
2651 * they point to
2652 */
2653 do {
2654 desc = tx_ring->tx_desc_ring +
2655 INDEX10(tcb->index_start);
2656
2657 dma_addr = desc->addr_lo;
2658 dma_addr |= (u64)desc->addr_hi << 32;
2659
2660 dma_unmap_single(&adapter->pdev->dev,
2661 dma_addr,
2662 desc->len_vlan, DMA_TO_DEVICE);
2663
2664 add_10bit(&tcb->index_start, 1);
2665 if (INDEX10(tcb->index_start) >=
2666 NUM_DESC_PER_RING_TX) {
2667 tcb->index_start &= ~ET_DMA10_MASK;
2668 tcb->index_start ^= ET_DMA10_WRAP;
2669 }
2670 } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index));
2671
2672 dev_kfree_skb_any(tcb->skb);
2673 }
2674
2675 memset(tcb, 0, sizeof(struct tcb));
2676
2677 /* Add the TCB to the Ready Q */
2678 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2679
2680 stats->tx_packets++;
2681
2682 if (tx_ring->tcb_qtail)
2683 tx_ring->tcb_qtail->next = tcb;
2684 else /* Apparently ready Q is empty. */
2685 tx_ring->tcb_qhead = tcb;
2686
2687 tx_ring->tcb_qtail = tcb;
2688
2689 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2690 WARN_ON(tx_ring->used < 0);
2691}
2692
2693/* et131x_free_busy_send_packets - Free and complete the stopped active sends */
2694static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
2695{
2696 struct tcb *tcb;
2697 unsigned long flags;
2698 u32 freed = 0;
2699 struct tx_ring *tx_ring = &adapter->tx_ring;
2700
2701 /* Any packets being sent? Check the first TCB on the send list */
2702 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2703
2704 tcb = tx_ring->send_head;
2705
2706 while (tcb != NULL && freed < NUM_TCB) {
2707 struct tcb *next = tcb->next;
2708
2709 tx_ring->send_head = next;
2710
2711 if (next == NULL)
2712 tx_ring->send_tail = NULL;
2713
2714 tx_ring->used--;
2715
2716 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2717
2718 freed++;
2719 free_send_packet(adapter, tcb);
2720
2721 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2722
2723 tcb = tx_ring->send_head;
2724 }
2725
2726 WARN_ON(freed == NUM_TCB);
2727
2728 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2729
2730 tx_ring->used = 0;
2731}
2732
2733/* et131x_handle_send_pkts
2734 *
2735 * Re-claim the send resources, complete sends and get more to send from
2736 * the send wait queue.
2737 */
2738static void et131x_handle_send_pkts(struct et131x_adapter *adapter)
2739{
2740 unsigned long flags;
2741 u32 serviced;
2742 struct tcb *tcb;
2743 u32 index;
2744 struct tx_ring *tx_ring = &adapter->tx_ring;
2745
2746 serviced = readl(&adapter->regs->txdma.new_service_complete);
2747 index = INDEX10(serviced);
2748
2749 /* Has the ring wrapped? Process any descriptors that do not have
2750 * the same "wrap" indicator as the current completion indicator
2751 */
2752 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2753
2754 tcb = tx_ring->send_head;
2755
2756 while (tcb &&
2757 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
2758 index < INDEX10(tcb->index)) {
2759 tx_ring->used--;
2760 tx_ring->send_head = tcb->next;
2761 if (tcb->next == NULL)
2762 tx_ring->send_tail = NULL;
2763
2764 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2765 free_send_packet(adapter, tcb);
2766 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2767
2768 /* Goto the next packet */
2769 tcb = tx_ring->send_head;
2770 }
2771 while (tcb &&
2772 !((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
2773 index > (tcb->index & ET_DMA10_MASK)) {
2774 tx_ring->used--;
2775 tx_ring->send_head = tcb->next;
2776 if (tcb->next == NULL)
2777 tx_ring->send_tail = NULL;
2778
2779 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2780 free_send_packet(adapter, tcb);
2781 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2782
2783 /* Goto the next packet */
2784 tcb = tx_ring->send_head;
2785 }
2786
2787 /* Wake up the queue when we hit a low-water mark */
2788 if (tx_ring->used <= NUM_TCB / 3)
2789 netif_wake_queue(adapter->netdev);
2790
2791 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2792}
2793
2794static int et131x_get_settings(struct net_device *netdev,
2795 struct ethtool_cmd *cmd)
2796{
2797 struct et131x_adapter *adapter = netdev_priv(netdev);
2798
2799 return phy_ethtool_gset(adapter->phydev, cmd);
2800}
2801
2802static int et131x_set_settings(struct net_device *netdev,
2803 struct ethtool_cmd *cmd)
2804{
2805 struct et131x_adapter *adapter = netdev_priv(netdev);
2806
2807 return phy_ethtool_sset(adapter->phydev, cmd);
2808}
2809
2810static int et131x_get_regs_len(struct net_device *netdev)
2811{
2812#define ET131X_REGS_LEN 256
2813 return ET131X_REGS_LEN * sizeof(u32);
2814}
2815
2816static void et131x_get_regs(struct net_device *netdev,
2817 struct ethtool_regs *regs, void *regs_data)
2818{
2819 struct et131x_adapter *adapter = netdev_priv(netdev);
2820 struct address_map __iomem *aregs = adapter->regs;
2821 u32 *regs_buff = regs_data;
2822 u32 num = 0;
2823 u16 tmp;
2824
2825 memset(regs_data, 0, et131x_get_regs_len(netdev));
2826
2827 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
2828 adapter->pdev->device;
2829
2830 /* PHY regs */
2831 et131x_mii_read(adapter, MII_BMCR, &tmp);
2832 regs_buff[num++] = tmp;
2833 et131x_mii_read(adapter, MII_BMSR, &tmp);
2834 regs_buff[num++] = tmp;
2835 et131x_mii_read(adapter, MII_PHYSID1, &tmp);
2836 regs_buff[num++] = tmp;
2837 et131x_mii_read(adapter, MII_PHYSID2, &tmp);
2838 regs_buff[num++] = tmp;
2839 et131x_mii_read(adapter, MII_ADVERTISE, &tmp);
2840 regs_buff[num++] = tmp;
2841 et131x_mii_read(adapter, MII_LPA, &tmp);
2842 regs_buff[num++] = tmp;
2843 et131x_mii_read(adapter, MII_EXPANSION, &tmp);
2844 regs_buff[num++] = tmp;
2845 /* Autoneg next page transmit reg */
2846 et131x_mii_read(adapter, 0x07, &tmp);
2847 regs_buff[num++] = tmp;
2848 /* Link partner next page reg */
2849 et131x_mii_read(adapter, 0x08, &tmp);
2850 regs_buff[num++] = tmp;
2851 et131x_mii_read(adapter, MII_CTRL1000, &tmp);
2852 regs_buff[num++] = tmp;
2853 et131x_mii_read(adapter, MII_STAT1000, &tmp);
2854 regs_buff[num++] = tmp;
2855 et131x_mii_read(adapter, 0x0b, &tmp);
2856 regs_buff[num++] = tmp;
2857 et131x_mii_read(adapter, 0x0c, &tmp);
2858 regs_buff[num++] = tmp;
2859 et131x_mii_read(adapter, MII_MMD_CTRL, &tmp);
2860 regs_buff[num++] = tmp;
2861 et131x_mii_read(adapter, MII_MMD_DATA, &tmp);
2862 regs_buff[num++] = tmp;
2863 et131x_mii_read(adapter, MII_ESTATUS, &tmp);
2864 regs_buff[num++] = tmp;
2865
2866 et131x_mii_read(adapter, PHY_INDEX_REG, &tmp);
2867 regs_buff[num++] = tmp;
2868 et131x_mii_read(adapter, PHY_DATA_REG, &tmp);
2869 regs_buff[num++] = tmp;
2870 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp);
2871 regs_buff[num++] = tmp;
2872 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp);
2873 regs_buff[num++] = tmp;
2874 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp);
2875 regs_buff[num++] = tmp;
2876
2877 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp);
2878 regs_buff[num++] = tmp;
2879 et131x_mii_read(adapter, PHY_CONFIG, &tmp);
2880 regs_buff[num++] = tmp;
2881 et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp);
2882 regs_buff[num++] = tmp;
2883 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp);
2884 regs_buff[num++] = tmp;
2885 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp);
2886 regs_buff[num++] = tmp;
2887 et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp);
2888 regs_buff[num++] = tmp;
2889 et131x_mii_read(adapter, PHY_LED_1, &tmp);
2890 regs_buff[num++] = tmp;
2891 et131x_mii_read(adapter, PHY_LED_2, &tmp);
2892 regs_buff[num++] = tmp;
2893
2894 /* Global regs */
2895 regs_buff[num++] = readl(&aregs->global.txq_start_addr);
2896 regs_buff[num++] = readl(&aregs->global.txq_end_addr);
2897 regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
2898 regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
2899 regs_buff[num++] = readl(&aregs->global.pm_csr);
2900 regs_buff[num++] = adapter->stats.interrupt_status;
2901 regs_buff[num++] = readl(&aregs->global.int_mask);
2902 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
2903 regs_buff[num++] = readl(&aregs->global.int_status_alias);
2904 regs_buff[num++] = readl(&aregs->global.sw_reset);
2905 regs_buff[num++] = readl(&aregs->global.slv_timer);
2906 regs_buff[num++] = readl(&aregs->global.msi_config);
2907 regs_buff[num++] = readl(&aregs->global.loopback);
2908 regs_buff[num++] = readl(&aregs->global.watchdog_timer);
2909
2910 /* TXDMA regs */
2911 regs_buff[num++] = readl(&aregs->txdma.csr);
2912 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
2913 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
2914 regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
2915 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
2916 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
2917 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
2918 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
2919 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
2920 regs_buff[num++] = readl(&aregs->txdma.service_request);
2921 regs_buff[num++] = readl(&aregs->txdma.service_complete);
2922 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
2923 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
2924 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
2925 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
2926 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
2927 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
2928 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
2929 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
2930 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
2931 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
2932 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
2933 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
2934 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
2935 regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
2936 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
2937
2938 /* RXDMA regs */
2939 regs_buff[num++] = readl(&aregs->rxdma.csr);
2940 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
2941 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
2942 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
2943 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
2944 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
2945 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
2946 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
2947 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
2948 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
2949 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
2950 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
2951 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
2952 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
2953 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
2954 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
2955 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
2956 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
2957 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
2958 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
2959 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
2960 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
2961 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
2962 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
2963 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
2964 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
2965 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
2966 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
2967 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
2968}
2969
2970static void et131x_get_drvinfo(struct net_device *netdev,
2971 struct ethtool_drvinfo *info)
2972{
2973 struct et131x_adapter *adapter = netdev_priv(netdev);
2974
2975 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
2976 strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
2977 strlcpy(info->bus_info, pci_name(adapter->pdev),
2978 sizeof(info->bus_info));
2979}
2980
2981static struct ethtool_ops et131x_ethtool_ops = {
2982 .get_settings = et131x_get_settings,
2983 .set_settings = et131x_set_settings,
2984 .get_drvinfo = et131x_get_drvinfo,
2985 .get_regs_len = et131x_get_regs_len,
2986 .get_regs = et131x_get_regs,
2987 .get_link = ethtool_op_get_link,
2988};
2989
2990/* et131x_hwaddr_init - set up the MAC Address */
2991static void et131x_hwaddr_init(struct et131x_adapter *adapter)
2992{
2993 /* If have our default mac from init and no mac address from
2994 * EEPROM then we need to generate the last octet and set it on the
2995 * device
2996 */
2997 if (is_zero_ether_addr(adapter->rom_addr)) {
2998 /* We need to randomly generate the last octet so we
2999 * decrease our chances of setting the mac address to
3000 * same as another one of our cards in the system
3001 */
3002 get_random_bytes(&adapter->addr[5], 1);
3003 /* We have the default value in the register we are
3004 * working with so we need to copy the current
3005 * address into the permanent address
3006 */
3007 ether_addr_copy(adapter->rom_addr, adapter->addr);
3008 } else {
3009 /* We do not have an override address, so set the
3010 * current address to the permanent address and add
3011 * it to the device
3012 */
3013 ether_addr_copy(adapter->addr, adapter->rom_addr);
3014 }
3015}
3016
3017static int et131x_pci_init(struct et131x_adapter *adapter,
3018 struct pci_dev *pdev)
3019{
3020 u16 max_payload;
3021 int i, rc;
3022
3023 rc = et131x_init_eeprom(adapter);
3024 if (rc < 0)
3025 goto out;
3026
3027 if (!pci_is_pcie(pdev)) {
3028 dev_err(&pdev->dev, "Missing PCIe capabilities\n");
3029 goto err_out;
3030 }
3031
3032 /* Program the Ack/Nak latency and replay timers */
3033 max_payload = pdev->pcie_mpss;
3034
3035 if (max_payload < 2) {
3036 static const u16 acknak[2] = { 0x76, 0xD0 };
3037 static const u16 replay[2] = { 0x1E0, 0x2ED };
3038
3039 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
3040 acknak[max_payload])) {
3041 dev_err(&pdev->dev,
3042 "Could not write PCI config space for ACK/NAK\n");
3043 goto err_out;
3044 }
3045 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
3046 replay[max_payload])) {
3047 dev_err(&pdev->dev,
3048 "Could not write PCI config space for Replay Timer\n");
3049 goto err_out;
3050 }
3051 }
3052
3053 /* l0s and l1 latency timers. We are using default values.
3054 * Representing 001 for L0s and 010 for L1
3055 */
3056 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
3057 dev_err(&pdev->dev,
3058 "Could not write PCI config space for Latency Timers\n");
3059 goto err_out;
3060 }
3061
3062 /* Change the max read size to 2k */
3063 if (pcie_set_readrq(pdev, 2048)) {
3064 dev_err(&pdev->dev,
3065 "Couldn't change PCI config space for Max read size\n");
3066 goto err_out;
3067 }
3068
3069 /* Get MAC address from config space if an eeprom exists, otherwise
3070 * the MAC address there will not be valid
3071 */
3072 if (!adapter->has_eeprom) {
3073 et131x_hwaddr_init(adapter);
3074 return 0;
3075 }
3076
3077 for (i = 0; i < ETH_ALEN; i++) {
3078 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
3079 adapter->rom_addr + i)) {
3080 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
3081 goto err_out;
3082 }
3083 }
3084 ether_addr_copy(adapter->addr, adapter->rom_addr);
3085out:
3086 return rc;
3087err_out:
3088 rc = -EIO;
3089 goto out;
3090}
3091
3092/* et131x_error_timer_handler
3093 * @data: timer-specific variable; here a pointer to our adapter structure
3094 *
3095 * The routine called when the error timer expires, to track the number of
3096 * recurring errors.
3097 */
3098static void et131x_error_timer_handler(unsigned long data)
3099{
3100 struct et131x_adapter *adapter = (struct et131x_adapter *)data;
3101 struct phy_device *phydev = adapter->phydev;
3102
3103 if (et1310_in_phy_coma(adapter)) {
3104 /* Bring the device immediately out of coma, to
3105 * prevent it from sleeping indefinitely, this
3106 * mechanism could be improved!
3107 */
3108 et1310_disable_phy_coma(adapter);
3109 adapter->boot_coma = 20;
3110 } else {
3111 et1310_update_macstat_host_counters(adapter);
3112 }
3113
3114 if (!phydev->link && adapter->boot_coma < 11)
3115 adapter->boot_coma++;
3116
3117 if (adapter->boot_coma == 10) {
3118 if (!phydev->link) {
3119 if (!et1310_in_phy_coma(adapter)) {
3120 /* NOTE - This was originally a 'sync with
3121 * interrupt'. How to do that under Linux?
3122 */
3123 et131x_enable_interrupts(adapter);
3124 et1310_enable_phy_coma(adapter);
3125 }
3126 }
3127 }
3128
3129 /* This is a periodic timer, so reschedule */
3130 mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000);
3131}
3132
3133static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
3134{
3135 et131x_tx_dma_memory_free(adapter);
3136 et131x_rx_dma_memory_free(adapter);
3137}
3138
3139static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
3140{
3141 int status;
3142
3143 status = et131x_tx_dma_memory_alloc(adapter);
3144 if (status) {
3145 dev_err(&adapter->pdev->dev,
3146 "et131x_tx_dma_memory_alloc FAILED\n");
3147 et131x_tx_dma_memory_free(adapter);
3148 return status;
3149 }
3150
3151 status = et131x_rx_dma_memory_alloc(adapter);
3152 if (status) {
3153 dev_err(&adapter->pdev->dev,
3154 "et131x_rx_dma_memory_alloc FAILED\n");
3155 et131x_adapter_memory_free(adapter);
3156 return status;
3157 }
3158
3159 status = et131x_init_recv(adapter);
3160 if (status) {
3161 dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n");
3162 et131x_adapter_memory_free(adapter);
3163 }
3164 return status;
3165}
3166
3167static void et131x_adjust_link(struct net_device *netdev)
3168{
3169 struct et131x_adapter *adapter = netdev_priv(netdev);
3170 struct phy_device *phydev = adapter->phydev;
3171
3172 if (!phydev)
3173 return;
3174 if (phydev->link == adapter->link)
3175 return;
3176
3177 /* Check to see if we are in coma mode and if
3178 * so, disable it because we will not be able
3179 * to read PHY values until we are out.
3180 */
3181 if (et1310_in_phy_coma(adapter))
3182 et1310_disable_phy_coma(adapter);
3183
3184 adapter->link = phydev->link;
3185 phy_print_status(phydev);
3186
3187 if (phydev->link) {
3188 adapter->boot_coma = 20;
3189 if (phydev->speed == SPEED_10) {
3190 u16 register18;
3191
3192 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3193 &register18);
3194 et131x_mii_write(adapter, phydev->addr,
3195 PHY_MPHY_CONTROL_REG,
3196 register18 | 0x4);
3197 et131x_mii_write(adapter, phydev->addr, PHY_INDEX_REG,
3198 register18 | 0x8402);
3199 et131x_mii_write(adapter, phydev->addr, PHY_DATA_REG,
3200 register18 | 511);
3201 et131x_mii_write(adapter, phydev->addr,
3202 PHY_MPHY_CONTROL_REG, register18);
3203 }
3204
3205 et1310_config_flow_control(adapter);
3206
3207 if (phydev->speed == SPEED_1000 &&
3208 adapter->registry_jumbo_packet > 2048) {
3209 u16 reg;
3210
3211 et131x_mii_read(adapter, PHY_CONFIG, &reg);
3212 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
3213 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
3214 et131x_mii_write(adapter, phydev->addr, PHY_CONFIG,
3215 reg);
3216 }
3217
3218 et131x_set_rx_dma_timer(adapter);
3219 et1310_config_mac_regs2(adapter);
3220 } else {
3221 adapter->boot_coma = 0;
3222
3223 if (phydev->speed == SPEED_10) {
3224 u16 register18;
3225
3226 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3227 &register18);
3228 et131x_mii_write(adapter, phydev->addr,
3229 PHY_MPHY_CONTROL_REG,
3230 register18 | 0x4);
3231 et131x_mii_write(adapter, phydev->addr,
3232 PHY_INDEX_REG, register18 | 0x8402);
3233 et131x_mii_write(adapter, phydev->addr,
3234 PHY_DATA_REG, register18 | 511);
3235 et131x_mii_write(adapter, phydev->addr,
3236 PHY_MPHY_CONTROL_REG, register18);
3237 }
3238
3239 et131x_free_busy_send_packets(adapter);
3240 et131x_init_send(adapter);
3241
3242 /* Bring the device back to the state it was during
3243 * init prior to autonegotiation being complete. This
3244 * way, when we get the auto-neg complete interrupt,
3245 * we can complete init by calling config_mac_regs2.
3246 */
3247 et131x_soft_reset(adapter);
3248
3249 et131x_adapter_setup(adapter);
3250
3251 et131x_disable_txrx(netdev);
3252 et131x_enable_txrx(netdev);
3253 }
3254}
3255
3256static int et131x_mii_probe(struct net_device *netdev)
3257{
3258 struct et131x_adapter *adapter = netdev_priv(netdev);
3259 struct phy_device *phydev = NULL;
3260
3261 phydev = phy_find_first(adapter->mii_bus);
3262 if (!phydev) {
3263 dev_err(&adapter->pdev->dev, "no PHY found\n");
3264 return -ENODEV;
3265 }
3266
3267 phydev = phy_connect(netdev, dev_name(&phydev->dev),
3268 &et131x_adjust_link, PHY_INTERFACE_MODE_MII);
3269
3270 if (IS_ERR(phydev)) {
3271 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
3272 return PTR_ERR(phydev);
3273 }
3274
3275 phydev->supported &= (SUPPORTED_10baseT_Half |
3276 SUPPORTED_10baseT_Full |
3277 SUPPORTED_100baseT_Half |
3278 SUPPORTED_100baseT_Full |
3279 SUPPORTED_Autoneg |
3280 SUPPORTED_MII |
3281 SUPPORTED_TP);
3282
3283 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
3284 phydev->supported |= SUPPORTED_1000baseT_Half |
3285 SUPPORTED_1000baseT_Full;
3286
3287 phydev->advertising = phydev->supported;
3288 phydev->autoneg = AUTONEG_ENABLE;
3289 adapter->phydev = phydev;
3290
3291 dev_info(&adapter->pdev->dev,
3292 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
3293 phydev->drv->name, dev_name(&phydev->dev));
3294
3295 return 0;
3296}
3297
3298static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
3299 struct pci_dev *pdev)
3300{
3301 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
3302
3303 struct et131x_adapter *adapter;
3304
3305 adapter = netdev_priv(netdev);
3306 adapter->pdev = pci_dev_get(pdev);
3307 adapter->netdev = netdev;
3308
3309 spin_lock_init(&adapter->tcb_send_qlock);
3310 spin_lock_init(&adapter->tcb_ready_qlock);
3311 spin_lock_init(&adapter->rcv_lock);
3312
3313 adapter->registry_jumbo_packet = 1514; /* 1514-9216 */
3314
3315 ether_addr_copy(adapter->addr, default_mac);
3316
3317 return adapter;
3318}
3319
3320static void et131x_pci_remove(struct pci_dev *pdev)
3321{
3322 struct net_device *netdev = pci_get_drvdata(pdev);
3323 struct et131x_adapter *adapter = netdev_priv(netdev);
3324
3325 unregister_netdev(netdev);
3326 netif_napi_del(&adapter->napi);
3327 phy_disconnect(adapter->phydev);
3328 mdiobus_unregister(adapter->mii_bus);
3329 kfree(adapter->mii_bus->irq);
3330 mdiobus_free(adapter->mii_bus);
3331
3332 et131x_adapter_memory_free(adapter);
3333 iounmap(adapter->regs);
3334 pci_dev_put(pdev);
3335
3336 free_netdev(netdev);
3337 pci_release_regions(pdev);
3338 pci_disable_device(pdev);
3339}
3340
3341static void et131x_up(struct net_device *netdev)
3342{
3343 struct et131x_adapter *adapter = netdev_priv(netdev);
3344
3345 et131x_enable_txrx(netdev);
3346 phy_start(adapter->phydev);
3347}
3348
3349static void et131x_down(struct net_device *netdev)
3350{
3351 struct et131x_adapter *adapter = netdev_priv(netdev);
3352
3353 /* Save the timestamp for the TX watchdog, prevent a timeout */
3354 netdev->trans_start = jiffies;
3355
3356 phy_stop(adapter->phydev);
3357 et131x_disable_txrx(netdev);
3358}
3359
3360#ifdef CONFIG_PM_SLEEP
3361static int et131x_suspend(struct device *dev)
3362{
3363 struct pci_dev *pdev = to_pci_dev(dev);
3364 struct net_device *netdev = pci_get_drvdata(pdev);
3365
3366 if (netif_running(netdev)) {
3367 netif_device_detach(netdev);
3368 et131x_down(netdev);
3369 pci_save_state(pdev);
3370 }
3371
3372 return 0;
3373}
3374
3375static int et131x_resume(struct device *dev)
3376{
3377 struct pci_dev *pdev = to_pci_dev(dev);
3378 struct net_device *netdev = pci_get_drvdata(pdev);
3379
3380 if (netif_running(netdev)) {
3381 pci_restore_state(pdev);
3382 et131x_up(netdev);
3383 netif_device_attach(netdev);
3384 }
3385
3386 return 0;
3387}
3388#endif
3389
3390static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
3391
3392static irqreturn_t et131x_isr(int irq, void *dev_id)
3393{
3394 bool handled = true;
3395 bool enable_interrupts = true;
3396 struct net_device *netdev = dev_id;
3397 struct et131x_adapter *adapter = netdev_priv(netdev);
3398 struct address_map __iomem *iomem = adapter->regs;
3399 struct rx_ring *rx_ring = &adapter->rx_ring;
3400 struct tx_ring *tx_ring = &adapter->tx_ring;
3401 u32 status;
3402
3403 if (!netif_device_present(netdev)) {
3404 handled = false;
3405 enable_interrupts = false;
3406 goto out;
3407 }
3408
3409 et131x_disable_interrupts(adapter);
3410
3411 status = readl(&adapter->regs->global.int_status);
3412
3413 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH)
3414 status &= ~INT_MASK_ENABLE;
3415 else
3416 status &= ~INT_MASK_ENABLE_NO_FLOW;
3417
3418 /* Make sure this is our interrupt */
3419 if (!status) {
3420 handled = false;
3421 et131x_enable_interrupts(adapter);
3422 goto out;
3423 }
3424
3425 /* This is our interrupt, so process accordingly */
3426 if (status & ET_INTR_WATCHDOG) {
3427 struct tcb *tcb = tx_ring->send_head;
3428
3429 if (tcb)
3430 if (++tcb->stale > 1)
3431 status |= ET_INTR_TXDMA_ISR;
3432
3433 if (rx_ring->unfinished_receives)
3434 status |= ET_INTR_RXDMA_XFR_DONE;
3435 else if (tcb == NULL)
3436 writel(0, &adapter->regs->global.watchdog_timer);
3437
3438 status &= ~ET_INTR_WATCHDOG;
3439 }
3440
3441 if (status & (ET_INTR_RXDMA_XFR_DONE | ET_INTR_TXDMA_ISR)) {
3442 enable_interrupts = false;
3443 napi_schedule(&adapter->napi);
3444 }
3445
3446 status &= ~(ET_INTR_TXDMA_ISR | ET_INTR_RXDMA_XFR_DONE);
3447
3448 if (!status)
3449 goto out;
3450
3451 if (status & ET_INTR_TXDMA_ERR) {
3452 /* Following read also clears the register (COR) */
3453 u32 txdma_err = readl(&iomem->txdma.tx_dma_error);
3454
3455 dev_warn(&adapter->pdev->dev,
3456 "TXDMA_ERR interrupt, error = %d\n",
3457 txdma_err);
3458 }
3459
3460 if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
3461 /* This indicates the number of unused buffers in RXDMA free
3462 * buffer ring 0 is <= the limit you programmed. Free buffer
3463 * resources need to be returned. Free buffers are consumed as
3464 * packets are passed from the network to the host. The host
3465 * becomes aware of the packets from the contents of the packet
3466 * status ring. This ring is queried when the packet done
3467 * interrupt occurs. Packets are then passed to the OS. When
3468 * the OS is done with the packets the resources can be
3469 * returned to the ET1310 for re-use. This interrupt is one
3470 * method of returning resources.
3471 */
3472
3473 /* If the user has flow control on, then we will
3474 * send a pause packet, otherwise just exit
3475 */
3476 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) {
3477 u32 pm_csr;
3478
3479 /* Tell the device to send a pause packet via the back
3480 * pressure register (bp req and bp xon/xoff)
3481 */
3482 pm_csr = readl(&iomem->global.pm_csr);
3483 if (!et1310_in_phy_coma(adapter))
3484 writel(3, &iomem->txmac.bp_ctrl);
3485 }
3486 }
3487
3488 /* Handle Packet Status Ring Low Interrupt */
3489 if (status & ET_INTR_RXDMA_STAT_LOW) {
3490 /* Same idea as with the two Free Buffer Rings. Packets going
3491 * from the network to the host each consume a free buffer
3492 * resource and a packet status resource. These resources are
3493 * passed to the OS. When the OS is done with the resources,
3494 * they need to be returned to the ET1310. This is one method
3495 * of returning the resources.
3496 */
3497 }
3498
3499 if (status & ET_INTR_RXDMA_ERR) {
3500 /* The rxdma_error interrupt is sent when a time-out on a
3501 * request issued by the JAGCore has occurred or a completion is
3502 * returned with an un-successful status. In both cases the
3503 * request is considered complete. The JAGCore will
3504 * automatically re-try the request in question. Normally
3505 * information on events like these are sent to the host using
3506 * the "Advanced Error Reporting" capability. This interrupt is
3507 * another way of getting similar information. The only thing
3508 * required is to clear the interrupt by reading the ISR in the
3509 * global resources. The JAGCore will do a re-try on the
3510 * request. Normally you should never see this interrupt. If
3511 * you start to see this interrupt occurring frequently then
3512 * something bad has occurred. A reset might be the thing to do.
3513 */
3514 /* TRAP();*/
3515
3516 dev_warn(&adapter->pdev->dev, "RxDMA_ERR interrupt, error %x\n",
3517 readl(&iomem->txmac.tx_test));
3518 }
3519
3520 /* Handle the Wake on LAN Event */
3521 if (status & ET_INTR_WOL) {
3522 /* This is a secondary interrupt for wake on LAN. The driver
3523 * should never see this, if it does, something serious is
3524 * wrong.
3525 */
3526 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
3527 }
3528
3529 if (status & ET_INTR_TXMAC) {
3530 u32 err = readl(&iomem->txmac.err);
3531
3532 /* When any of the errors occur and TXMAC generates an
3533 * interrupt to report these errors, it usually means that
3534 * TXMAC has detected an error in the data stream retrieved
3535 * from the on-chip Tx Q. All of these errors are catastrophic
3536 * and TXMAC won't be able to recover data when these errors
3537 * occur. In a nutshell, the whole Tx path will have to be reset
3538 * and re-configured afterwards.
3539 */
3540 dev_warn(&adapter->pdev->dev, "TXMAC interrupt, error 0x%08x\n",
3541 err);
3542
3543 /* If we are debugging, we want to see this error, otherwise we
3544 * just want the device to be reset and continue
3545 */
3546 }
3547
3548 if (status & ET_INTR_RXMAC) {
3549 /* These interrupts are catastrophic to the device, what we need
3550 * to do is disable the interrupts and set the flag to cause us
3551 * to reset so we can solve this issue.
3552 */
3553 dev_warn(&adapter->pdev->dev,
3554 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
3555 readl(&iomem->rxmac.err_reg));
3556
3557 dev_warn(&adapter->pdev->dev,
3558 "Enable 0x%08x, Diag 0x%08x\n",
3559 readl(&iomem->rxmac.ctrl),
3560 readl(&iomem->rxmac.rxq_diag));
3561
3562 /* If we are debugging, we want to see this error, otherwise we
3563 * just want the device to be reset and continue
3564 */
3565 }
3566
3567 if (status & ET_INTR_MAC_STAT) {
3568 /* This means at least one of the un-masked counters in the
3569 * MAC_STAT block has rolled over. Use this to maintain the top,
3570 * software managed bits of the counter(s).
3571 */
3572 et1310_handle_macstat_interrupt(adapter);
3573 }
3574
3575 if (status & ET_INTR_SLV_TIMEOUT) {
3576 /* This means a timeout has occurred on a read or write request
3577 * to one of the JAGCore registers. The Global Resources block
3578 * has terminated the request and on a read request, returned a
3579 * "fake" value. The most likely reasons are: Bad Address or the
3580 * addressed module is in a power-down state and can't respond.
3581 */
3582 }
3583
3584out:
3585 if (enable_interrupts)
3586 et131x_enable_interrupts(adapter);
3587
3588 return IRQ_RETVAL(handled);
3589}
3590
3591static int et131x_poll(struct napi_struct *napi, int budget)
3592{
3593 struct et131x_adapter *adapter =
3594 container_of(napi, struct et131x_adapter, napi);
3595 int work_done = et131x_handle_recv_pkts(adapter, budget);
3596
3597 et131x_handle_send_pkts(adapter);
3598
3599 if (work_done < budget) {
3600 napi_complete(&adapter->napi);
3601 et131x_enable_interrupts(adapter);
3602 }
3603
3604 return work_done;
3605}
3606
3607/* et131x_stats - Return the current device statistics */
3608static struct net_device_stats *et131x_stats(struct net_device *netdev)
3609{
3610 struct et131x_adapter *adapter = netdev_priv(netdev);
3611 struct net_device_stats *stats = &adapter->netdev->stats;
3612 struct ce_stats *devstat = &adapter->stats;
3613
3614 stats->rx_errors = devstat->rx_length_errs +
3615 devstat->rx_align_errs +
3616 devstat->rx_crc_errs +
3617 devstat->rx_code_violations +
3618 devstat->rx_other_errs;
3619 stats->tx_errors = devstat->tx_max_pkt_errs;
3620 stats->multicast = devstat->multicast_pkts_rcvd;
3621 stats->collisions = devstat->tx_collisions;
3622
3623 stats->rx_length_errors = devstat->rx_length_errs;
3624 stats->rx_over_errors = devstat->rx_overflows;
3625 stats->rx_crc_errors = devstat->rx_crc_errs;
3626 stats->rx_dropped = devstat->rcvd_pkts_dropped;
3627
3628 /* NOTE: Not used, can't find analogous statistics */
3629 /* stats->rx_frame_errors = devstat->; */
3630 /* stats->rx_fifo_errors = devstat->; */
3631 /* stats->rx_missed_errors = devstat->; */
3632
3633 /* stats->tx_aborted_errors = devstat->; */
3634 /* stats->tx_carrier_errors = devstat->; */
3635 /* stats->tx_fifo_errors = devstat->; */
3636 /* stats->tx_heartbeat_errors = devstat->; */
3637 /* stats->tx_window_errors = devstat->; */
3638 return stats;
3639}
3640
3641static int et131x_open(struct net_device *netdev)
3642{
3643 struct et131x_adapter *adapter = netdev_priv(netdev);
3644 struct pci_dev *pdev = adapter->pdev;
3645 unsigned int irq = pdev->irq;
3646 int result;
3647
3648 /* Start the timer to track NIC errors */
3649 init_timer(&adapter->error_timer);
3650 adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
3651 adapter->error_timer.function = et131x_error_timer_handler;
3652 adapter->error_timer.data = (unsigned long)adapter;
3653 add_timer(&adapter->error_timer);
3654
3655 result = request_irq(irq, et131x_isr,
3656 IRQF_SHARED, netdev->name, netdev);
3657 if (result) {
3658 dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
3659 return result;
3660 }
3661
3662 adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE;
3663
3664 napi_enable(&adapter->napi);
3665
3666 et131x_up(netdev);
3667
3668 return result;
3669}
3670
3671static int et131x_close(struct net_device *netdev)
3672{
3673 struct et131x_adapter *adapter = netdev_priv(netdev);
3674
3675 et131x_down(netdev);
3676 napi_disable(&adapter->napi);
3677
3678 adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE;
3679 free_irq(adapter->pdev->irq, netdev);
3680
3681 /* Stop the error timer */
3682 return del_timer_sync(&adapter->error_timer);
3683}
3684
3685static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
3686 int cmd)
3687{
3688 struct et131x_adapter *adapter = netdev_priv(netdev);
3689
3690 if (!adapter->phydev)
3691 return -EINVAL;
3692
3693 return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
3694}
3695
3696/* et131x_set_packet_filter - Configures the Rx Packet filtering */
3697static int et131x_set_packet_filter(struct et131x_adapter *adapter)
3698{
3699 int filter = adapter->packet_filter;
3700 u32 ctrl;
3701 u32 pf_ctrl;
3702
3703 ctrl = readl(&adapter->regs->rxmac.ctrl);
3704 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
3705
3706 /* Default to disabled packet filtering */
3707 ctrl |= 0x04;
3708
3709 /* Set us to be in promiscuous mode so we receive everything, this
3710 * is also true when we get a packet filter of 0
3711 */
3712 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
3713 pf_ctrl &= ~7; /* Clear filter bits */
3714 else {
3715 /* Set us up with Multicast packet filtering. Three cases are
3716 * possible - (1) we have a multi-cast list, (2) we receive ALL
3717 * multicast entries or (3) we receive none.
3718 */
3719 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
3720 pf_ctrl &= ~2; /* Multicast filter bit */
3721 else {
3722 et1310_setup_device_for_multicast(adapter);
3723 pf_ctrl |= 2;
3724 ctrl &= ~0x04;
3725 }
3726
3727 /* Set us up with Unicast packet filtering */
3728 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
3729 et1310_setup_device_for_unicast(adapter);
3730 pf_ctrl |= 4;
3731 ctrl &= ~0x04;
3732 }
3733
3734 /* Set us up with Broadcast packet filtering */
3735 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
3736 pf_ctrl |= 1; /* Broadcast filter bit */
3737 ctrl &= ~0x04;
3738 } else {
3739 pf_ctrl &= ~1;
3740 }
3741
3742 /* Setup the receive mac configuration registers - Packet
3743 * Filter control + the enable / disable for packet filter
3744 * in the control reg.
3745 */
3746 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
3747 writel(ctrl, &adapter->regs->rxmac.ctrl);
3748 }
3749 return 0;
3750}
3751
3752static void et131x_multicast(struct net_device *netdev)
3753{
3754 struct et131x_adapter *adapter = netdev_priv(netdev);
3755 int packet_filter;
3756 struct netdev_hw_addr *ha;
3757 int i;
3758
3759 /* Before we modify the platform-independent filter flags, store them
3760 * locally. This allows us to determine if anything's changed and if
3761 * we even need to bother the hardware
3762 */
3763 packet_filter = adapter->packet_filter;
3764
3765 /* Clear the 'multicast' flag locally; because we only have a single
3766 * flag to check multicast, and multiple multicast addresses can be
3767 * set, this is the easiest way to determine if more than one
3768 * multicast address is being set.
3769 */
3770 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
3771
3772 /* Check the net_device flags and set the device independent flags
3773 * accordingly
3774 */
3775 if (netdev->flags & IFF_PROMISC)
3776 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
3777 else
3778 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
3779
3780 if ((netdev->flags & IFF_ALLMULTI) ||
3781 (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST))
3782 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
3783
3784 if (netdev_mc_count(netdev) < 1) {
3785 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
3786 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
3787 } else {
3788 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
3789 }
3790
3791 /* Set values in the private adapter struct */
3792 i = 0;
3793 netdev_for_each_mc_addr(ha, netdev) {
3794 if (i == NIC_MAX_MCAST_LIST)
3795 break;
3796 ether_addr_copy(adapter->multicast_list[i++], ha->addr);
3797 }
3798 adapter->multicast_addr_count = i;
3799
3800 /* Are the new flags different from the previous ones? If not, then no
3801 * action is required
3802 *
3803 * NOTE - This block will always update the multicast_list with the
3804 * hardware, even if the addresses aren't the same.
3805 */
3806 if (packet_filter != adapter->packet_filter)
3807 et131x_set_packet_filter(adapter);
3808}
3809
3810static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
3811{
3812 struct et131x_adapter *adapter = netdev_priv(netdev);
3813 struct tx_ring *tx_ring = &adapter->tx_ring;
3814
3815 /* stop the queue if it's getting full */
3816 if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
3817 netif_stop_queue(netdev);
3818
3819 /* Save the timestamp for the TX timeout watchdog */
3820 netdev->trans_start = jiffies;
3821
3822 /* TCB is not available */
3823 if (tx_ring->used >= NUM_TCB)
3824 goto drop_err;
3825
3826 if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) ||
3827 !netif_carrier_ok(netdev))
3828 goto drop_err;
3829
3830 if (send_packet(skb, adapter))
3831 goto drop_err;
3832
3833 return NETDEV_TX_OK;
3834
3835drop_err:
3836 dev_kfree_skb_any(skb);
3837 adapter->netdev->stats.tx_dropped++;
3838 return NETDEV_TX_OK;
3839}
3840
3841/* et131x_tx_timeout - Timeout handler
3842 *
3843 * The handler called when a Tx request times out. The timeout period is
3844 * specified by the 'tx_timeo" element in the net_device structure (see
3845 * et131x_alloc_device() to see how this value is set).
3846 */
3847static void et131x_tx_timeout(struct net_device *netdev)
3848{
3849 struct et131x_adapter *adapter = netdev_priv(netdev);
3850 struct tx_ring *tx_ring = &adapter->tx_ring;
3851 struct tcb *tcb;
3852 unsigned long flags;
3853
3854 /* If the device is closed, ignore the timeout */
3855 if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
3856 return;
3857
3858 /* Any nonrecoverable hardware error?
3859 * Checks adapter->flags for any failure in phy reading
3860 */
3861 if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR)
3862 return;
3863
3864 /* Hardware failure? */
3865 if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) {
3866 dev_err(&adapter->pdev->dev, "hardware error - reset\n");
3867 return;
3868 }
3869
3870 /* Is send stuck? */
3871 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3872 tcb = tx_ring->send_head;
3873 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3874
3875 if (tcb) {
3876 tcb->count++;
3877
3878 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
3879 dev_warn(&adapter->pdev->dev,
3880 "Send stuck - reset. tcb->WrIndex %x\n",
3881 tcb->index);
3882
3883 adapter->netdev->stats.tx_errors++;
3884
3885 /* perform reset of tx/rx */
3886 et131x_disable_txrx(netdev);
3887 et131x_enable_txrx(netdev);
3888 }
3889 }
3890}
3891
3892static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
3893{
3894 int result = 0;
3895 struct et131x_adapter *adapter = netdev_priv(netdev);
3896
3897 if (new_mtu < 64 || new_mtu > 9216)
3898 return -EINVAL;
3899
3900 et131x_disable_txrx(netdev);
3901
3902 netdev->mtu = new_mtu;
3903
3904 et131x_adapter_memory_free(adapter);
3905
3906 /* Set the config parameter for Jumbo Packet support */
3907 adapter->registry_jumbo_packet = new_mtu + 14;
3908 et131x_soft_reset(adapter);
3909
3910 result = et131x_adapter_memory_alloc(adapter);
3911 if (result != 0) {
3912 dev_warn(&adapter->pdev->dev,
3913 "Change MTU failed; couldn't re-alloc DMA memory\n");
3914 return result;
3915 }
3916
3917 et131x_init_send(adapter);
3918 et131x_hwaddr_init(adapter);
3919 ether_addr_copy(netdev->dev_addr, adapter->addr);
3920
3921 /* Init the device with the new settings */
3922 et131x_adapter_setup(adapter);
3923 et131x_enable_txrx(netdev);
3924
3925 return result;
3926}
3927
3928static const struct net_device_ops et131x_netdev_ops = {
3929 .ndo_open = et131x_open,
3930 .ndo_stop = et131x_close,
3931 .ndo_start_xmit = et131x_tx,
3932 .ndo_set_rx_mode = et131x_multicast,
3933 .ndo_tx_timeout = et131x_tx_timeout,
3934 .ndo_change_mtu = et131x_change_mtu,
3935 .ndo_set_mac_address = eth_mac_addr,
3936 .ndo_validate_addr = eth_validate_addr,
3937 .ndo_get_stats = et131x_stats,
3938 .ndo_do_ioctl = et131x_ioctl,
3939};
3940
3941static int et131x_pci_setup(struct pci_dev *pdev,
3942 const struct pci_device_id *ent)
3943{
3944 struct net_device *netdev;
3945 struct et131x_adapter *adapter;
3946 int rc;
3947 int ii;
3948
3949 rc = pci_enable_device(pdev);
3950 if (rc < 0) {
3951 dev_err(&pdev->dev, "pci_enable_device() failed\n");
3952 goto out;
3953 }
3954
3955 /* Perform some basic PCI checks */
3956 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
3957 dev_err(&pdev->dev, "Can't find PCI device's base address\n");
3958 rc = -ENODEV;
3959 goto err_disable;
3960 }
3961
3962 rc = pci_request_regions(pdev, DRIVER_NAME);
3963 if (rc < 0) {
3964 dev_err(&pdev->dev, "Can't get PCI resources\n");
3965 goto err_disable;
3966 }
3967
3968 pci_set_master(pdev);
3969
3970 /* Check the DMA addressing support of this device */
3971 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
3972 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
3973 dev_err(&pdev->dev, "No usable DMA addressing method\n");
3974 rc = -EIO;
3975 goto err_release_res;
3976 }
3977
3978 netdev = alloc_etherdev(sizeof(struct et131x_adapter));
3979 if (!netdev) {
3980 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
3981 rc = -ENOMEM;
3982 goto err_release_res;
3983 }
3984
3985 netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
3986 netdev->netdev_ops = &et131x_netdev_ops;
3987
3988 SET_NETDEV_DEV(netdev, &pdev->dev);
3989 netdev->ethtool_ops = &et131x_ethtool_ops;
3990
3991 adapter = et131x_adapter_init(netdev, pdev);
3992
3993 rc = et131x_pci_init(adapter, pdev);
3994 if (rc < 0)
3995 goto err_free_dev;
3996
3997 /* Map the bus-relative registers to system virtual memory */
3998 adapter->regs = pci_ioremap_bar(pdev, 0);
3999 if (!adapter->regs) {
4000 dev_err(&pdev->dev, "Cannot map device registers\n");
4001 rc = -ENOMEM;
4002 goto err_free_dev;
4003 }
4004
4005 /* If Phy COMA mode was enabled when we went down, disable it here. */
4006 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
4007
4008 et131x_soft_reset(adapter);
4009 et131x_disable_interrupts(adapter);
4010
4011 rc = et131x_adapter_memory_alloc(adapter);
4012 if (rc < 0) {
4013 dev_err(&pdev->dev, "Could not alloc adapter memory (DMA)\n");
4014 goto err_iounmap;
4015 }
4016
4017 et131x_init_send(adapter);
4018
4019 netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
4020
4021 ether_addr_copy(netdev->dev_addr, adapter->addr);
4022
4023 rc = -ENOMEM;
4024
4025 adapter->mii_bus = mdiobus_alloc();
4026 if (!adapter->mii_bus) {
4027 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
4028 goto err_mem_free;
4029 }
4030
4031 adapter->mii_bus->name = "et131x_eth_mii";
4032 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
4033 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
4034 adapter->mii_bus->priv = netdev;
4035 adapter->mii_bus->read = et131x_mdio_read;
4036 adapter->mii_bus->write = et131x_mdio_write;
4037 adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int),
4038 GFP_KERNEL);
4039 if (!adapter->mii_bus->irq)
4040 goto err_mdio_free;
4041
4042 for (ii = 0; ii < PHY_MAX_ADDR; ii++)
4043 adapter->mii_bus->irq[ii] = PHY_POLL;
4044
4045 rc = mdiobus_register(adapter->mii_bus);
4046 if (rc < 0) {
4047 dev_err(&pdev->dev, "failed to register MII bus\n");
4048 goto err_mdio_free_irq;
4049 }
4050
4051 rc = et131x_mii_probe(netdev);
4052 if (rc < 0) {
4053 dev_err(&pdev->dev, "failed to probe MII bus\n");
4054 goto err_mdio_unregister;
4055 }
4056
4057 et131x_adapter_setup(adapter);
4058
4059 /* Init variable for counting how long we do not have link status */
4060 adapter->boot_coma = 0;
4061 et1310_disable_phy_coma(adapter);
4062
4063 /* We can enable interrupts now
4064 *
4065 * NOTE - Because registration of interrupt handler is done in the
4066 * device's open(), defer enabling device interrupts to that
4067 * point
4068 */
4069
4070 rc = register_netdev(netdev);
4071 if (rc < 0) {
4072 dev_err(&pdev->dev, "register_netdev() failed\n");
4073 goto err_phy_disconnect;
4074 }
4075
4076 /* Register the net_device struct with the PCI subsystem. Save a copy
4077 * of the PCI config space for this device now that the device has
4078 * been initialized, just in case it needs to be quickly restored.
4079 */
4080 pci_set_drvdata(pdev, netdev);
4081out:
4082 return rc;
4083
4084err_phy_disconnect:
4085 phy_disconnect(adapter->phydev);
4086err_mdio_unregister:
4087 mdiobus_unregister(adapter->mii_bus);
4088err_mdio_free_irq:
4089 kfree(adapter->mii_bus->irq);
4090err_mdio_free:
4091 mdiobus_free(adapter->mii_bus);
4092err_mem_free:
4093 et131x_adapter_memory_free(adapter);
4094err_iounmap:
4095 iounmap(adapter->regs);
4096err_free_dev:
4097 pci_dev_put(pdev);
4098 free_netdev(netdev);
4099err_release_res:
4100 pci_release_regions(pdev);
4101err_disable:
4102 pci_disable_device(pdev);
4103 goto out;
4104}
4105
4106static const struct pci_device_id et131x_pci_table[] = {
4107 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
4108 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
4109 { 0,}
4110};
4111MODULE_DEVICE_TABLE(pci, et131x_pci_table);
4112
4113static struct pci_driver et131x_driver = {
4114 .name = DRIVER_NAME,
4115 .id_table = et131x_pci_table,
4116 .probe = et131x_pci_setup,
4117 .remove = et131x_pci_remove,
4118 .driver.pm = &et131x_pm_ops,
4119};
4120
4121module_pci_driver(et131x_driver);
diff --git a/drivers/net/ethernet/agere/et131x.h b/drivers/net/ethernet/agere/et131x.h
new file mode 100644
index 000000000000..be9a11c02526
--- /dev/null
+++ b/drivers/net/ethernet/agere/et131x.h
@@ -0,0 +1,1433 @@
1/* Copyright © 2005 Agere Systems Inc.
2 * All rights reserved.
3 * http://www.agere.com
4 *
5 * SOFTWARE LICENSE
6 *
7 * This software is provided subject to the following terms and conditions,
8 * which you should read carefully before using the software. Using this
9 * software indicates your acceptance of these terms and conditions. If you do
10 * not agree with these terms and conditions, do not use the software.
11 *
12 * Copyright © 2005 Agere Systems Inc.
13 * All rights reserved.
14 *
15 * Redistribution and use in source or binary forms, with or without
16 * modifications, are permitted provided that the following conditions are met:
17 *
18 * . Redistributions of source code must retain the above copyright notice, this
19 * list of conditions and the following Disclaimer as comments in the code as
20 * well as in the documentation and/or other materials provided with the
21 * distribution.
22 *
23 * . Redistributions in binary form must reproduce the above copyright notice,
24 * this list of conditions and the following Disclaimer in the documentation
25 * and/or other materials provided with the distribution.
26 *
27 * . Neither the name of Agere Systems Inc. nor the names of the contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
30 *
31 * Disclaimer
32 *
33 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
34 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
35 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
36 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
37 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
38 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
39 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
40 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
41 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
42 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
43 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
44 * DAMAGE.
45 *
46 */
47
48#define DRIVER_NAME "et131x"
49#define DRIVER_VERSION "v2.0"
50
51/* EEPROM registers */
52
53/* LBCIF Register Groups (addressed via 32-bit offsets) */
54#define LBCIF_DWORD0_GROUP 0xAC
55#define LBCIF_DWORD1_GROUP 0xB0
56
57/* LBCIF Registers (addressed via 8-bit offsets) */
58#define LBCIF_ADDRESS_REGISTER 0xAC
59#define LBCIF_DATA_REGISTER 0xB0
60#define LBCIF_CONTROL_REGISTER 0xB1
61#define LBCIF_STATUS_REGISTER 0xB2
62
63/* LBCIF Control Register Bits */
64#define LBCIF_CONTROL_SEQUENTIAL_READ 0x01
65#define LBCIF_CONTROL_PAGE_WRITE 0x02
66#define LBCIF_CONTROL_EEPROM_RELOAD 0x08
67#define LBCIF_CONTROL_TWO_BYTE_ADDR 0x20
68#define LBCIF_CONTROL_I2C_WRITE 0x40
69#define LBCIF_CONTROL_LBCIF_ENABLE 0x80
70
71/* LBCIF Status Register Bits */
72#define LBCIF_STATUS_PHY_QUEUE_AVAIL 0x01
73#define LBCIF_STATUS_I2C_IDLE 0x02
74#define LBCIF_STATUS_ACK_ERROR 0x04
75#define LBCIF_STATUS_GENERAL_ERROR 0x08
76#define LBCIF_STATUS_CHECKSUM_ERROR 0x40
77#define LBCIF_STATUS_EEPROM_PRESENT 0x80
78
79/* START OF GLOBAL REGISTER ADDRESS MAP */
80/* 10bit registers
81 *
82 * Tx queue start address reg in global address map at address 0x0000
83 * tx queue end address reg in global address map at address 0x0004
84 * rx queue start address reg in global address map at address 0x0008
85 * rx queue end address reg in global address map at address 0x000C
86 */
87
88/* structure for power management control status reg in global address map
89 * located at address 0x0010
90 * jagcore_rx_rdy bit 9
91 * jagcore_tx_rdy bit 8
92 * phy_lped_en bit 7
93 * phy_sw_coma bit 6
94 * rxclk_gate bit 5
95 * txclk_gate bit 4
96 * sysclk_gate bit 3
97 * jagcore_rx_en bit 2
98 * jagcore_tx_en bit 1
99 * gigephy_en bit 0
100 */
101#define ET_PM_PHY_SW_COMA 0x40
102#define ET_PMCSR_INIT 0x38
103
104/* Interrupt status reg at address 0x0018
105 */
106#define ET_INTR_TXDMA_ISR 0x00000008
107#define ET_INTR_TXDMA_ERR 0x00000010
108#define ET_INTR_RXDMA_XFR_DONE 0x00000020
109#define ET_INTR_RXDMA_FB_R0_LOW 0x00000040
110#define ET_INTR_RXDMA_FB_R1_LOW 0x00000080
111#define ET_INTR_RXDMA_STAT_LOW 0x00000100
112#define ET_INTR_RXDMA_ERR 0x00000200
113#define ET_INTR_WATCHDOG 0x00004000
114#define ET_INTR_WOL 0x00008000
115#define ET_INTR_PHY 0x00010000
116#define ET_INTR_TXMAC 0x00020000
117#define ET_INTR_RXMAC 0x00040000
118#define ET_INTR_MAC_STAT 0x00080000
119#define ET_INTR_SLV_TIMEOUT 0x00100000
120
121/* Interrupt mask register at address 0x001C
122 * Interrupt alias clear mask reg at address 0x0020
123 * Interrupt status alias reg at address 0x0024
124 *
125 * Same masks as above
126 */
127
128/* Software reset reg at address 0x0028
129 * 0: txdma_sw_reset
130 * 1: rxdma_sw_reset
131 * 2: txmac_sw_reset
132 * 3: rxmac_sw_reset
133 * 4: mac_sw_reset
134 * 5: mac_stat_sw_reset
135 * 6: mmc_sw_reset
136 *31: selfclr_disable
137 */
138#define ET_RESET_ALL 0x007F
139
140/* SLV Timer reg at address 0x002C (low 24 bits)
141 */
142
143/* MSI Configuration reg at address 0x0030
144 */
145#define ET_MSI_VECTOR 0x0000001F
146#define ET_MSI_TC 0x00070000
147
148/* Loopback reg located at address 0x0034
149 */
150#define ET_LOOP_MAC 0x00000001
151#define ET_LOOP_DMA 0x00000002
152
153/* GLOBAL Module of JAGCore Address Mapping
154 * Located at address 0x0000
155 */
156struct global_regs { /* Location: */
157 u32 txq_start_addr; /* 0x0000 */
158 u32 txq_end_addr; /* 0x0004 */
159 u32 rxq_start_addr; /* 0x0008 */
160 u32 rxq_end_addr; /* 0x000C */
161 u32 pm_csr; /* 0x0010 */
162 u32 unused; /* 0x0014 */
163 u32 int_status; /* 0x0018 */
164 u32 int_mask; /* 0x001C */
165 u32 int_alias_clr_en; /* 0x0020 */
166 u32 int_status_alias; /* 0x0024 */
167 u32 sw_reset; /* 0x0028 */
168 u32 slv_timer; /* 0x002C */
169 u32 msi_config; /* 0x0030 */
170 u32 loopback; /* 0x0034 */
171 u32 watchdog_timer; /* 0x0038 */
172};
173
174/* START OF TXDMA REGISTER ADDRESS MAP */
175/* txdma control status reg at address 0x1000
176 */
177#define ET_TXDMA_CSR_HALT 0x00000001
178#define ET_TXDMA_DROP_TLP 0x00000002
179#define ET_TXDMA_CACHE_THRS 0x000000F0
180#define ET_TXDMA_CACHE_SHIFT 4
181#define ET_TXDMA_SNGL_EPKT 0x00000100
182#define ET_TXDMA_CLASS 0x00001E00
183
184/* structure for txdma packet ring base address hi reg in txdma address map
185 * located at address 0x1004
186 * Defined earlier (u32)
187 */
188
189/* structure for txdma packet ring base address low reg in txdma address map
190 * located at address 0x1008
191 * Defined earlier (u32)
192 */
193
194/* structure for txdma packet ring number of descriptor reg in txdma address
195 * map. Located at address 0x100C
196 *
197 * 31-10: unused
198 * 9-0: pr ndes
199 */
200#define ET_DMA12_MASK 0x0FFF /* 12 bit mask for DMA12W types */
201#define ET_DMA12_WRAP 0x1000
202#define ET_DMA10_MASK 0x03FF /* 10 bit mask for DMA10W types */
203#define ET_DMA10_WRAP 0x0400
204#define ET_DMA4_MASK 0x000F /* 4 bit mask for DMA4W types */
205#define ET_DMA4_WRAP 0x0010
206
207#define INDEX12(x) ((x) & ET_DMA12_MASK)
208#define INDEX10(x) ((x) & ET_DMA10_MASK)
209#define INDEX4(x) ((x) & ET_DMA4_MASK)
210
211/* 10bit DMA with wrap
212 * txdma tx queue write address reg in txdma address map at 0x1010
213 * txdma tx queue write address external reg in txdma address map at 0x1014
214 * txdma tx queue read address reg in txdma address map at 0x1018
215 *
216 * u32
217 * txdma status writeback address hi reg in txdma address map at0x101C
218 * txdma status writeback address lo reg in txdma address map at 0x1020
219 *
220 * 10bit DMA with wrap
221 * txdma service request reg in txdma address map at 0x1024
222 * structure for txdma service complete reg in txdma address map at 0x1028
223 *
224 * 4bit DMA with wrap
225 * txdma tx descriptor cache read index reg in txdma address map at 0x102C
226 * txdma tx descriptor cache write index reg in txdma address map at 0x1030
227 *
228 * txdma error reg in txdma address map at address 0x1034
229 * 0: PyldResend
230 * 1: PyldRewind
231 * 4: DescrResend
232 * 5: DescrRewind
233 * 8: WrbkResend
234 * 9: WrbkRewind
235 */
236
237/* Tx DMA Module of JAGCore Address Mapping
238 * Located at address 0x1000
239 */
240struct txdma_regs { /* Location: */
241 u32 csr; /* 0x1000 */
242 u32 pr_base_hi; /* 0x1004 */
243 u32 pr_base_lo; /* 0x1008 */
244 u32 pr_num_des; /* 0x100C */
245 u32 txq_wr_addr; /* 0x1010 */
246 u32 txq_wr_addr_ext; /* 0x1014 */
247 u32 txq_rd_addr; /* 0x1018 */
248 u32 dma_wb_base_hi; /* 0x101C */
249 u32 dma_wb_base_lo; /* 0x1020 */
250 u32 service_request; /* 0x1024 */
251 u32 service_complete; /* 0x1028 */
252 u32 cache_rd_index; /* 0x102C */
253 u32 cache_wr_index; /* 0x1030 */
254 u32 tx_dma_error; /* 0x1034 */
255 u32 desc_abort_cnt; /* 0x1038 */
256 u32 payload_abort_cnt; /* 0x103c */
257 u32 writeback_abort_cnt; /* 0x1040 */
258 u32 desc_timeout_cnt; /* 0x1044 */
259 u32 payload_timeout_cnt; /* 0x1048 */
260 u32 writeback_timeout_cnt; /* 0x104c */
261 u32 desc_error_cnt; /* 0x1050 */
262 u32 payload_error_cnt; /* 0x1054 */
263 u32 writeback_error_cnt; /* 0x1058 */
264 u32 dropped_tlp_cnt; /* 0x105c */
265 u32 new_service_complete; /* 0x1060 */
266 u32 ethernet_packet_cnt; /* 0x1064 */
267};
268
269/* END OF TXDMA REGISTER ADDRESS MAP */
270
271/* START OF RXDMA REGISTER ADDRESS MAP */
272/* structure for control status reg in rxdma address map
273 * Located at address 0x2000
274 *
275 * CSR
276 * 0: halt
277 * 1-3: tc
278 * 4: fbr_big_endian
279 * 5: psr_big_endian
280 * 6: pkt_big_endian
281 * 7: dma_big_endian
282 * 8-9: fbr0_size
283 * 10: fbr0_enable
284 * 11-12: fbr1_size
285 * 13: fbr1_enable
286 * 14: unused
287 * 15: pkt_drop_disable
288 * 16: pkt_done_flush
289 * 17: halt_status
290 * 18-31: unused
291 */
292#define ET_RXDMA_CSR_HALT 0x0001
293#define ET_RXDMA_CSR_FBR0_SIZE_LO 0x0100
294#define ET_RXDMA_CSR_FBR0_SIZE_HI 0x0200
295#define ET_RXDMA_CSR_FBR0_ENABLE 0x0400
296#define ET_RXDMA_CSR_FBR1_SIZE_LO 0x0800
297#define ET_RXDMA_CSR_FBR1_SIZE_HI 0x1000
298#define ET_RXDMA_CSR_FBR1_ENABLE 0x2000
299#define ET_RXDMA_CSR_HALT_STATUS 0x00020000
300
301/* structure for dma writeback lo reg in rxdma address map
302 * located at address 0x2004
303 * Defined earlier (u32)
304 */
305
306/* structure for dma writeback hi reg in rxdma address map
307 * located at address 0x2008
308 * Defined earlier (u32)
309 */
310
311/* structure for number of packets done reg in rxdma address map
312 * located at address 0x200C
313 *
314 * 31-8: unused
315 * 7-0: num done
316 */
317
318/* structure for max packet time reg in rxdma address map
319 * located at address 0x2010
320 *
321 * 31-18: unused
322 * 17-0: time done
323 */
324
325/* structure for rx queue read address reg in rxdma address map
326 * located at address 0x2014
327 * Defined earlier (u32)
328 */
329
330/* structure for rx queue read address external reg in rxdma address map
331 * located at address 0x2018
332 * Defined earlier (u32)
333 */
334
335/* structure for rx queue write address reg in rxdma address map
336 * located at address 0x201C
337 * Defined earlier (u32)
338 */
339
340/* structure for packet status ring base address lo reg in rxdma address map
341 * located at address 0x2020
342 * Defined earlier (u32)
343 */
344
345/* structure for packet status ring base address hi reg in rxdma address map
346 * located at address 0x2024
347 * Defined earlier (u32)
348 */
349
350/* structure for packet status ring number of descriptors reg in rxdma address
351 * map. Located at address 0x2028
352 *
353 * 31-12: unused
354 * 11-0: psr ndes
355 */
356#define ET_RXDMA_PSR_NUM_DES_MASK 0xFFF
357
358/* structure for packet status ring available offset reg in rxdma address map
359 * located at address 0x202C
360 *
361 * 31-13: unused
362 * 12: psr avail wrap
363 * 11-0: psr avail
364 */
365
366/* structure for packet status ring full offset reg in rxdma address map
367 * located at address 0x2030
368 *
369 * 31-13: unused
370 * 12: psr full wrap
371 * 11-0: psr full
372 */
373
374/* structure for packet status ring access index reg in rxdma address map
375 * located at address 0x2034
376 *
377 * 31-5: unused
378 * 4-0: psr_ai
379 */
380
381/* structure for packet status ring minimum descriptors reg in rxdma address
382 * map. Located at address 0x2038
383 *
384 * 31-12: unused
385 * 11-0: psr_min
386 */
387
388/* structure for free buffer ring base lo address reg in rxdma address map
389 * located at address 0x203C
390 * Defined earlier (u32)
391 */
392
393/* structure for free buffer ring base hi address reg in rxdma address map
394 * located at address 0x2040
395 * Defined earlier (u32)
396 */
397
398/* structure for free buffer ring number of descriptors reg in rxdma address
399 * map. Located at address 0x2044
400 *
401 * 31-10: unused
402 * 9-0: fbr ndesc
403 */
404
405/* structure for free buffer ring 0 available offset reg in rxdma address map
406 * located at address 0x2048
407 * Defined earlier (u32)
408 */
409
410/* structure for free buffer ring 0 full offset reg in rxdma address map
411 * located at address 0x204C
412 * Defined earlier (u32)
413 */
414
415/* structure for free buffer cache 0 full offset reg in rxdma address map
416 * located at address 0x2050
417 *
418 * 31-5: unused
419 * 4-0: fbc rdi
420 */
421
422/* structure for free buffer ring 0 minimum descriptor reg in rxdma address map
423 * located at address 0x2054
424 *
425 * 31-10: unused
426 * 9-0: fbr min
427 */
428
429/* structure for free buffer ring 1 base address lo reg in rxdma address map
430 * located at address 0x2058 - 0x205C
431 * Defined earlier (RXDMA_FBR_BASE_LO_t and RXDMA_FBR_BASE_HI_t)
432 */
433
434/* structure for free buffer ring 1 number of descriptors reg in rxdma address
435 * map. Located at address 0x2060
436 * Defined earlier (RXDMA_FBR_NUM_DES_t)
437 */
438
439/* structure for free buffer ring 1 available offset reg in rxdma address map
440 * located at address 0x2064
441 * Defined Earlier (RXDMA_FBR_AVAIL_OFFSET_t)
442 */
443
444/* structure for free buffer ring 1 full offset reg in rxdma address map
445 * located at address 0x2068
446 * Defined Earlier (RXDMA_FBR_FULL_OFFSET_t)
447 */
448
449/* structure for free buffer cache 1 read index reg in rxdma address map
450 * located at address 0x206C
451 * Defined Earlier (RXDMA_FBC_RD_INDEX_t)
452 */
453
454/* structure for free buffer ring 1 minimum descriptor reg in rxdma address map
455 * located at address 0x2070
456 * Defined Earlier (RXDMA_FBR_MIN_DES_t)
457 */
458
459/* Rx DMA Module of JAGCore Address Mapping
460 * Located at address 0x2000
461 */
462struct rxdma_regs { /* Location: */
463 u32 csr; /* 0x2000 */
464 u32 dma_wb_base_lo; /* 0x2004 */
465 u32 dma_wb_base_hi; /* 0x2008 */
466 u32 num_pkt_done; /* 0x200C */
467 u32 max_pkt_time; /* 0x2010 */
468 u32 rxq_rd_addr; /* 0x2014 */
469 u32 rxq_rd_addr_ext; /* 0x2018 */
470 u32 rxq_wr_addr; /* 0x201C */
471 u32 psr_base_lo; /* 0x2020 */
472 u32 psr_base_hi; /* 0x2024 */
473 u32 psr_num_des; /* 0x2028 */
474 u32 psr_avail_offset; /* 0x202C */
475 u32 psr_full_offset; /* 0x2030 */
476 u32 psr_access_index; /* 0x2034 */
477 u32 psr_min_des; /* 0x2038 */
478 u32 fbr0_base_lo; /* 0x203C */
479 u32 fbr0_base_hi; /* 0x2040 */
480 u32 fbr0_num_des; /* 0x2044 */
481 u32 fbr0_avail_offset; /* 0x2048 */
482 u32 fbr0_full_offset; /* 0x204C */
483 u32 fbr0_rd_index; /* 0x2050 */
484 u32 fbr0_min_des; /* 0x2054 */
485 u32 fbr1_base_lo; /* 0x2058 */
486 u32 fbr1_base_hi; /* 0x205C */
487 u32 fbr1_num_des; /* 0x2060 */
488 u32 fbr1_avail_offset; /* 0x2064 */
489 u32 fbr1_full_offset; /* 0x2068 */
490 u32 fbr1_rd_index; /* 0x206C */
491 u32 fbr1_min_des; /* 0x2070 */
492};
493
494/* END OF RXDMA REGISTER ADDRESS MAP */
495
496/* START OF TXMAC REGISTER ADDRESS MAP */
497/* structure for control reg in txmac address map
498 * located at address 0x3000
499 *
500 * bits
501 * 31-8: unused
502 * 7: cklseg_disable
503 * 6: ckbcnt_disable
504 * 5: cksegnum
505 * 4: async_disable
506 * 3: fc_disable
507 * 2: mcif_disable
508 * 1: mif_disable
509 * 0: txmac_en
510 */
511#define ET_TX_CTRL_FC_DISABLE 0x0008
512#define ET_TX_CTRL_TXMAC_ENABLE 0x0001
513
514/* structure for shadow pointer reg in txmac address map
515 * located at address 0x3004
516 * 31-27: reserved
517 * 26-16: txq rd ptr
518 * 15-11: reserved
519 * 10-0: txq wr ptr
520 */
521
522/* structure for error count reg in txmac address map
523 * located at address 0x3008
524 *
525 * 31-12: unused
526 * 11-8: reserved
527 * 7-4: txq_underrun
528 * 3-0: fifo_underrun
529 */
530
531/* structure for max fill reg in txmac address map
532 * located at address 0x300C
533 * 31-12: unused
534 * 11-0: max fill
535 */
536
537/* structure for cf parameter reg in txmac address map
538 * located at address 0x3010
539 * 31-16: cfep
540 * 15-0: cfpt
541 */
542
543/* structure for tx test reg in txmac address map
544 * located at address 0x3014
545 * 31-17: unused
546 * 16: reserved
547 * 15: txtest_en
548 * 14-11: unused
549 * 10-0: txq test pointer
550 */
551
552/* structure for error reg in txmac address map
553 * located at address 0x3018
554 *
555 * 31-9: unused
556 * 8: fifo_underrun
557 * 7-6: unused
558 * 5: ctrl2_err
559 * 4: txq_underrun
560 * 3: bcnt_err
561 * 2: lseg_err
562 * 1: segnum_err
563 * 0: seg0_err
564 */
565
566/* structure for error interrupt reg in txmac address map
567 * located at address 0x301C
568 *
569 * 31-9: unused
570 * 8: fifo_underrun
571 * 7-6: unused
572 * 5: ctrl2_err
573 * 4: txq_underrun
574 * 3: bcnt_err
575 * 2: lseg_err
576 * 1: segnum_err
577 * 0: seg0_err
578 */
579
580/* structure for error interrupt reg in txmac address map
581 * located at address 0x3020
582 *
583 * 31-2: unused
584 * 1: bp_req
585 * 0: bp_xonxoff
586 */
587
588/* Tx MAC Module of JAGCore Address Mapping
589 */
590struct txmac_regs { /* Location: */
591 u32 ctl; /* 0x3000 */
592 u32 shadow_ptr; /* 0x3004 */
593 u32 err_cnt; /* 0x3008 */
594 u32 max_fill; /* 0x300C */
595 u32 cf_param; /* 0x3010 */
596 u32 tx_test; /* 0x3014 */
597 u32 err; /* 0x3018 */
598 u32 err_int; /* 0x301C */
599 u32 bp_ctrl; /* 0x3020 */
600};
601
602/* END OF TXMAC REGISTER ADDRESS MAP */
603
604/* START OF RXMAC REGISTER ADDRESS MAP */
605
606/* structure for rxmac control reg in rxmac address map
607 * located at address 0x4000
608 *
609 * 31-7: reserved
610 * 6: rxmac_int_disable
611 * 5: async_disable
612 * 4: mif_disable
613 * 3: wol_disable
614 * 2: pkt_filter_disable
615 * 1: mcif_disable
616 * 0: rxmac_en
617 */
618#define ET_RX_CTRL_WOL_DISABLE 0x0008
619#define ET_RX_CTRL_RXMAC_ENABLE 0x0001
620
621/* structure for Wake On Lan Control and CRC 0 reg in rxmac address map
622 * located at address 0x4004
623 * 31-16: crc
624 * 15-12: reserved
625 * 11: ignore_pp
626 * 10: ignore_mp
627 * 9: clr_intr
628 * 8: ignore_link_chg
629 * 7: ignore_uni
630 * 6: ignore_multi
631 * 5: ignore_broad
632 * 4-0: valid_crc 4-0
633 */
634
635/* structure for CRC 1 and CRC 2 reg in rxmac address map
636 * located at address 0x4008
637 *
638 * 31-16: crc2
639 * 15-0: crc1
640 */
641
642/* structure for CRC 3 and CRC 4 reg in rxmac address map
643 * located at address 0x400C
644 *
645 * 31-16: crc4
646 * 15-0: crc3
647 */
648
649/* structure for Wake On Lan Source Address Lo reg in rxmac address map
650 * located at address 0x4010
651 *
652 * 31-24: sa3
653 * 23-16: sa4
654 * 15-8: sa5
655 * 7-0: sa6
656 */
657#define ET_RX_WOL_LO_SA3_SHIFT 24
658#define ET_RX_WOL_LO_SA4_SHIFT 16
659#define ET_RX_WOL_LO_SA5_SHIFT 8
660
661/* structure for Wake On Lan Source Address Hi reg in rxmac address map
662 * located at address 0x4014
663 *
664 * 31-16: reserved
665 * 15-8: sa1
666 * 7-0: sa2
667 */
668#define ET_RX_WOL_HI_SA1_SHIFT 8
669
670/* structure for Wake On Lan mask reg in rxmac address map
671 * located at address 0x4018 - 0x4064
672 * Defined earlier (u32)
673 */
674
675/* structure for Unicast Packet Filter Address 1 reg in rxmac address map
676 * located at address 0x4068
677 *
678 * 31-24: addr1_3
679 * 23-16: addr1_4
680 * 15-8: addr1_5
681 * 7-0: addr1_6
682 */
683#define ET_RX_UNI_PF_ADDR1_3_SHIFT 24
684#define ET_RX_UNI_PF_ADDR1_4_SHIFT 16
685#define ET_RX_UNI_PF_ADDR1_5_SHIFT 8
686
687/* structure for Unicast Packet Filter Address 2 reg in rxmac address map
688 * located at address 0x406C
689 *
690 * 31-24: addr2_3
691 * 23-16: addr2_4
692 * 15-8: addr2_5
693 * 7-0: addr2_6
694 */
695#define ET_RX_UNI_PF_ADDR2_3_SHIFT 24
696#define ET_RX_UNI_PF_ADDR2_4_SHIFT 16
697#define ET_RX_UNI_PF_ADDR2_5_SHIFT 8
698
699/* structure for Unicast Packet Filter Address 1 & 2 reg in rxmac address map
700 * located at address 0x4070
701 *
702 * 31-24: addr2_1
703 * 23-16: addr2_2
704 * 15-8: addr1_1
705 * 7-0: addr1_2
706 */
707#define ET_RX_UNI_PF_ADDR2_1_SHIFT 24
708#define ET_RX_UNI_PF_ADDR2_2_SHIFT 16
709#define ET_RX_UNI_PF_ADDR1_1_SHIFT 8
710
711/* structure for Multicast Hash reg in rxmac address map
712 * located at address 0x4074 - 0x4080
713 * Defined earlier (u32)
714 */
715
716/* structure for Packet Filter Control reg in rxmac address map
717 * located at address 0x4084
718 *
719 * 31-23: unused
720 * 22-16: min_pkt_size
721 * 15-4: unused
722 * 3: filter_frag_en
723 * 2: filter_uni_en
724 * 1: filter_multi_en
725 * 0: filter_broad_en
726 */
727#define ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT 16
728#define ET_RX_PFCTRL_FRAG_FILTER_ENABLE 0x0008
729#define ET_RX_PFCTRL_UNICST_FILTER_ENABLE 0x0004
730#define ET_RX_PFCTRL_MLTCST_FILTER_ENABLE 0x0002
731#define ET_RX_PFCTRL_BRDCST_FILTER_ENABLE 0x0001
732
733/* structure for Memory Controller Interface Control Max Segment reg in rxmac
734 * address map. Located at address 0x4088
735 *
736 * 31-10: reserved
737 * 9-2: max_size
738 * 1: fc_en
739 * 0: seg_en
740 */
741#define ET_RX_MCIF_CTRL_MAX_SEG_SIZE_SHIFT 2
742#define ET_RX_MCIF_CTRL_MAX_SEG_FC_ENABLE 0x0002
743#define ET_RX_MCIF_CTRL_MAX_SEG_ENABLE 0x0001
744
745/* structure for Memory Controller Interface Water Mark reg in rxmac address
746 * map. Located at address 0x408C
747 *
748 * 31-26: unused
749 * 25-16: mark_hi
750 * 15-10: unused
751 * 9-0: mark_lo
752 */
753
754/* structure for Rx Queue Dialog reg in rxmac address map.
755 * located at address 0x4090
756 *
757 * 31-26: reserved
758 * 25-16: rd_ptr
759 * 15-10: reserved
760 * 9-0: wr_ptr
761 */
762
763/* structure for space available reg in rxmac address map.
764 * located at address 0x4094
765 *
766 * 31-17: reserved
767 * 16: space_avail_en
768 * 15-10: reserved
769 * 9-0: space_avail
770 */
771
772/* structure for management interface reg in rxmac address map.
773 * located at address 0x4098
774 *
775 * 31-18: reserved
776 * 17: drop_pkt_en
777 * 16-0: drop_pkt_mask
778 */
779
780/* structure for Error reg in rxmac address map.
781 * located at address 0x409C
782 *
783 * 31-4: unused
784 * 3: mif
785 * 2: async
786 * 1: pkt_filter
787 * 0: mcif
788 */
789
790/* Rx MAC Module of JAGCore Address Mapping
791 */
792struct rxmac_regs { /* Location: */
793 u32 ctrl; /* 0x4000 */
794 u32 crc0; /* 0x4004 */
795 u32 crc12; /* 0x4008 */
796 u32 crc34; /* 0x400C */
797 u32 sa_lo; /* 0x4010 */
798 u32 sa_hi; /* 0x4014 */
799 u32 mask0_word0; /* 0x4018 */
800 u32 mask0_word1; /* 0x401C */
801 u32 mask0_word2; /* 0x4020 */
802 u32 mask0_word3; /* 0x4024 */
803 u32 mask1_word0; /* 0x4028 */
804 u32 mask1_word1; /* 0x402C */
805 u32 mask1_word2; /* 0x4030 */
806 u32 mask1_word3; /* 0x4034 */
807 u32 mask2_word0; /* 0x4038 */
808 u32 mask2_word1; /* 0x403C */
809 u32 mask2_word2; /* 0x4040 */
810 u32 mask2_word3; /* 0x4044 */
811 u32 mask3_word0; /* 0x4048 */
812 u32 mask3_word1; /* 0x404C */
813 u32 mask3_word2; /* 0x4050 */
814 u32 mask3_word3; /* 0x4054 */
815 u32 mask4_word0; /* 0x4058 */
816 u32 mask4_word1; /* 0x405C */
817 u32 mask4_word2; /* 0x4060 */
818 u32 mask4_word3; /* 0x4064 */
819 u32 uni_pf_addr1; /* 0x4068 */
820 u32 uni_pf_addr2; /* 0x406C */
821 u32 uni_pf_addr3; /* 0x4070 */
822 u32 multi_hash1; /* 0x4074 */
823 u32 multi_hash2; /* 0x4078 */
824 u32 multi_hash3; /* 0x407C */
825 u32 multi_hash4; /* 0x4080 */
826 u32 pf_ctrl; /* 0x4084 */
827 u32 mcif_ctrl_max_seg; /* 0x4088 */
828 u32 mcif_water_mark; /* 0x408C */
829 u32 rxq_diag; /* 0x4090 */
830 u32 space_avail; /* 0x4094 */
831
832 u32 mif_ctrl; /* 0x4098 */
833 u32 err_reg; /* 0x409C */
834};
835
836/* END OF RXMAC REGISTER ADDRESS MAP */
837
838/* START OF MAC REGISTER ADDRESS MAP */
839/* structure for configuration #1 reg in mac address map.
840 * located at address 0x5000
841 *
842 * 31: soft reset
843 * 30: sim reset
844 * 29-20: reserved
845 * 19: reset rx mc
846 * 18: reset tx mc
847 * 17: reset rx func
848 * 16: reset tx fnc
849 * 15-9: reserved
850 * 8: loopback
851 * 7-6: reserved
852 * 5: rx flow
853 * 4: tx flow
854 * 3: syncd rx en
855 * 2: rx enable
856 * 1: syncd tx en
857 * 0: tx enable
858 */
859#define ET_MAC_CFG1_SOFT_RESET 0x80000000
860#define ET_MAC_CFG1_SIM_RESET 0x40000000
861#define ET_MAC_CFG1_RESET_RXMC 0x00080000
862#define ET_MAC_CFG1_RESET_TXMC 0x00040000
863#define ET_MAC_CFG1_RESET_RXFUNC 0x00020000
864#define ET_MAC_CFG1_RESET_TXFUNC 0x00010000
865#define ET_MAC_CFG1_LOOPBACK 0x00000100
866#define ET_MAC_CFG1_RX_FLOW 0x00000020
867#define ET_MAC_CFG1_TX_FLOW 0x00000010
868#define ET_MAC_CFG1_RX_ENABLE 0x00000004
869#define ET_MAC_CFG1_TX_ENABLE 0x00000001
870#define ET_MAC_CFG1_WAIT 0x0000000A /* RX & TX syncd */
871
872/* structure for configuration #2 reg in mac address map.
873 * located at address 0x5004
874 * 31-16: reserved
875 * 15-12: preamble
876 * 11-10: reserved
877 * 9-8: if mode
878 * 7-6: reserved
879 * 5: huge frame
880 * 4: length check
881 * 3: undefined
882 * 2: pad crc
883 * 1: crc enable
884 * 0: full duplex
885 */
886#define ET_MAC_CFG2_PREAMBLE_SHIFT 12
887#define ET_MAC_CFG2_IFMODE_MASK 0x0300
888#define ET_MAC_CFG2_IFMODE_1000 0x0200
889#define ET_MAC_CFG2_IFMODE_100 0x0100
890#define ET_MAC_CFG2_IFMODE_HUGE_FRAME 0x0020
891#define ET_MAC_CFG2_IFMODE_LEN_CHECK 0x0010
892#define ET_MAC_CFG2_IFMODE_PAD_CRC 0x0004
893#define ET_MAC_CFG2_IFMODE_CRC_ENABLE 0x0002
894#define ET_MAC_CFG2_IFMODE_FULL_DPLX 0x0001
895
896/* structure for Interpacket gap reg in mac address map.
897 * located at address 0x5008
898 *
899 * 31: reserved
900 * 30-24: non B2B ipg 1
901 * 23: undefined
902 * 22-16: non B2B ipg 2
903 * 15-8: Min ifg enforce
904 * 7-0: B2B ipg
905 *
906 * structure for half duplex reg in mac address map.
907 * located at address 0x500C
908 * 31-24: reserved
909 * 23-20: Alt BEB trunc
910 * 19: Alt BEB enable
911 * 18: BP no backoff
912 * 17: no backoff
913 * 16: excess defer
914 * 15-12: re-xmit max
915 * 11-10: reserved
916 * 9-0: collision window
917 */
918
919/* structure for Maximum Frame Length reg in mac address map.
920 * located at address 0x5010: bits 0-15 hold the length.
921 */
922
923/* structure for Reserve 1 reg in mac address map.
924 * located at address 0x5014 - 0x5018
925 * Defined earlier (u32)
926 */
927
928/* structure for Test reg in mac address map.
929 * located at address 0x501C
930 * test: bits 0-2, rest unused
931 */
932
933/* structure for MII Management Configuration reg in mac address map.
934 * located at address 0x5020
935 *
936 * 31: reset MII mgmt
937 * 30-6: unused
938 * 5: scan auto increment
939 * 4: preamble suppress
940 * 3: undefined
941 * 2-0: mgmt clock reset
942 */
943#define ET_MAC_MIIMGMT_CLK_RST 0x0007
944
945/* structure for MII Management Command reg in mac address map.
946 * located at address 0x5024
947 * bit 1: scan cycle
948 * bit 0: read cycle
949 */
950
951/* structure for MII Management Address reg in mac address map.
952 * located at address 0x5028
953 * 31-13: reserved
954 * 12-8: phy addr
955 * 7-5: reserved
956 * 4-0: register
957 */
958#define ET_MAC_MII_ADDR(phy, reg) ((phy) << 8 | (reg))
959
960/* structure for MII Management Control reg in mac address map.
961 * located at address 0x502C
962 * 31-16: reserved
963 * 15-0: phy control
964 */
965
966/* structure for MII Management Status reg in mac address map.
967 * located at address 0x5030
968 * 31-16: reserved
969 * 15-0: phy control
970 */
971#define ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK 0xFFFF
972
973/* structure for MII Management Indicators reg in mac address map.
974 * located at address 0x5034
975 * 31-3: reserved
976 * 2: not valid
977 * 1: scanning
978 * 0: busy
979 */
980#define ET_MAC_MGMT_BUSY 0x00000001 /* busy */
981#define ET_MAC_MGMT_WAIT 0x00000005 /* busy | not valid */
982
983/* structure for Interface Control reg in mac address map.
984 * located at address 0x5038
985 *
986 * 31: reset if module
987 * 30-28: reserved
988 * 27: tbi mode
989 * 26: ghd mode
990 * 25: lhd mode
991 * 24: phy mode
992 * 23: reset per mii
993 * 22-17: reserved
994 * 16: speed
995 * 15: reset pe100x
996 * 14-11: reserved
997 * 10: force quiet
998 * 9: no cipher
999 * 8: disable link fail
1000 * 7: reset gpsi
1001 * 6-1: reserved
1002 * 0: enable jabber protection
1003 */
1004#define ET_MAC_IFCTRL_GHDMODE (1 << 26)
1005#define ET_MAC_IFCTRL_PHYMODE (1 << 24)
1006
1007/* structure for Interface Status reg in mac address map.
1008 * located at address 0x503C
1009 *
1010 * 31-10: reserved
1011 * 9: excess_defer
1012 * 8: clash
1013 * 7: phy_jabber
1014 * 6: phy_link_ok
1015 * 5: phy_full_duplex
1016 * 4: phy_speed
1017 * 3: pe100x_link_fail
1018 * 2: pe10t_loss_carrier
1019 * 1: pe10t_sqe_error
1020 * 0: pe10t_jabber
1021 */
1022
1023/* structure for Mac Station Address, Part 1 reg in mac address map.
1024 * located at address 0x5040
1025 *
1026 * 31-24: Octet6
1027 * 23-16: Octet5
1028 * 15-8: Octet4
1029 * 7-0: Octet3
1030 */
1031#define ET_MAC_STATION_ADDR1_OC6_SHIFT 24
1032#define ET_MAC_STATION_ADDR1_OC5_SHIFT 16
1033#define ET_MAC_STATION_ADDR1_OC4_SHIFT 8
1034
1035/* structure for Mac Station Address, Part 2 reg in mac address map.
1036 * located at address 0x5044
1037 *
1038 * 31-24: Octet2
1039 * 23-16: Octet1
1040 * 15-0: reserved
1041 */
1042#define ET_MAC_STATION_ADDR2_OC2_SHIFT 24
1043#define ET_MAC_STATION_ADDR2_OC1_SHIFT 16
1044
1045/* MAC Module of JAGCore Address Mapping
1046 */
1047struct mac_regs { /* Location: */
1048 u32 cfg1; /* 0x5000 */
1049 u32 cfg2; /* 0x5004 */
1050 u32 ipg; /* 0x5008 */
1051 u32 hfdp; /* 0x500C */
1052 u32 max_fm_len; /* 0x5010 */
1053 u32 rsv1; /* 0x5014 */
1054 u32 rsv2; /* 0x5018 */
1055 u32 mac_test; /* 0x501C */
1056 u32 mii_mgmt_cfg; /* 0x5020 */
1057 u32 mii_mgmt_cmd; /* 0x5024 */
1058 u32 mii_mgmt_addr; /* 0x5028 */
1059 u32 mii_mgmt_ctrl; /* 0x502C */
1060 u32 mii_mgmt_stat; /* 0x5030 */
1061 u32 mii_mgmt_indicator; /* 0x5034 */
1062 u32 if_ctrl; /* 0x5038 */
1063 u32 if_stat; /* 0x503C */
1064 u32 station_addr_1; /* 0x5040 */
1065 u32 station_addr_2; /* 0x5044 */
1066};
1067
1068/* END OF MAC REGISTER ADDRESS MAP */
1069
1070/* START OF MAC STAT REGISTER ADDRESS MAP */
1071/* structure for Carry Register One and it's Mask Register reg located in mac
1072 * stat address map address 0x6130 and 0x6138.
1073 *
1074 * 31: tr64
1075 * 30: tr127
1076 * 29: tr255
1077 * 28: tr511
1078 * 27: tr1k
1079 * 26: trmax
1080 * 25: trmgv
1081 * 24-17: unused
1082 * 16: rbyt
1083 * 15: rpkt
1084 * 14: rfcs
1085 * 13: rmca
1086 * 12: rbca
1087 * 11: rxcf
1088 * 10: rxpf
1089 * 9: rxuo
1090 * 8: raln
1091 * 7: rflr
1092 * 6: rcde
1093 * 5: rcse
1094 * 4: rund
1095 * 3: rovr
1096 * 2: rfrg
1097 * 1: rjbr
1098 * 0: rdrp
1099 */
1100
1101/* structure for Carry Register Two Mask Register reg in mac stat address map.
1102 * located at address 0x613C
1103 *
1104 * 31-20: unused
1105 * 19: tjbr
1106 * 18: tfcs
1107 * 17: txcf
1108 * 16: tovr
1109 * 15: tund
1110 * 14: trfg
1111 * 13: tbyt
1112 * 12: tpkt
1113 * 11: tmca
1114 * 10: tbca
1115 * 9: txpf
1116 * 8: tdfr
1117 * 7: tedf
1118 * 6: tscl
1119 * 5: tmcl
1120 * 4: tlcl
1121 * 3: txcl
1122 * 2: tncl
1123 * 1: tpfh
1124 * 0: tdrp
1125 */
1126
1127/* MAC STATS Module of JAGCore Address Mapping
1128 */
1129struct macstat_regs { /* Location: */
1130 u32 pad[32]; /* 0x6000 - 607C */
1131
1132 /* counters */
1133 u32 txrx_0_64_byte_frames; /* 0x6080 */
1134 u32 txrx_65_127_byte_frames; /* 0x6084 */
1135 u32 txrx_128_255_byte_frames; /* 0x6088 */
1136 u32 txrx_256_511_byte_frames; /* 0x608C */
1137 u32 txrx_512_1023_byte_frames; /* 0x6090 */
1138 u32 txrx_1024_1518_byte_frames; /* 0x6094 */
1139 u32 txrx_1519_1522_gvln_frames; /* 0x6098 */
1140 u32 rx_bytes; /* 0x609C */
1141 u32 rx_packets; /* 0x60A0 */
1142 u32 rx_fcs_errs; /* 0x60A4 */
1143 u32 rx_multicast_packets; /* 0x60A8 */
1144 u32 rx_broadcast_packets; /* 0x60AC */
1145 u32 rx_control_frames; /* 0x60B0 */
1146 u32 rx_pause_frames; /* 0x60B4 */
1147 u32 rx_unknown_opcodes; /* 0x60B8 */
1148 u32 rx_align_errs; /* 0x60BC */
1149 u32 rx_frame_len_errs; /* 0x60C0 */
1150 u32 rx_code_errs; /* 0x60C4 */
1151 u32 rx_carrier_sense_errs; /* 0x60C8 */
1152 u32 rx_undersize_packets; /* 0x60CC */
1153 u32 rx_oversize_packets; /* 0x60D0 */
1154 u32 rx_fragment_packets; /* 0x60D4 */
1155 u32 rx_jabbers; /* 0x60D8 */
1156 u32 rx_drops; /* 0x60DC */
1157 u32 tx_bytes; /* 0x60E0 */
1158 u32 tx_packets; /* 0x60E4 */
1159 u32 tx_multicast_packets; /* 0x60E8 */
1160 u32 tx_broadcast_packets; /* 0x60EC */
1161 u32 tx_pause_frames; /* 0x60F0 */
1162 u32 tx_deferred; /* 0x60F4 */
1163 u32 tx_excessive_deferred; /* 0x60F8 */
1164 u32 tx_single_collisions; /* 0x60FC */
1165 u32 tx_multiple_collisions; /* 0x6100 */
1166 u32 tx_late_collisions; /* 0x6104 */
1167 u32 tx_excessive_collisions; /* 0x6108 */
1168 u32 tx_total_collisions; /* 0x610C */
1169 u32 tx_pause_honored_frames; /* 0x6110 */
1170 u32 tx_drops; /* 0x6114 */
1171 u32 tx_jabbers; /* 0x6118 */
1172 u32 tx_fcs_errs; /* 0x611C */
1173 u32 tx_control_frames; /* 0x6120 */
1174 u32 tx_oversize_frames; /* 0x6124 */
1175 u32 tx_undersize_frames; /* 0x6128 */
1176 u32 tx_fragments; /* 0x612C */
1177 u32 carry_reg1; /* 0x6130 */
1178 u32 carry_reg2; /* 0x6134 */
1179 u32 carry_reg1_mask; /* 0x6138 */
1180 u32 carry_reg2_mask; /* 0x613C */
1181};
1182
1183/* END OF MAC STAT REGISTER ADDRESS MAP */
1184
1185/* START OF MMC REGISTER ADDRESS MAP */
1186/* Main Memory Controller Control reg in mmc address map.
1187 * located at address 0x7000
1188 */
1189#define ET_MMC_ENABLE 1
1190#define ET_MMC_ARB_DISABLE 2
1191#define ET_MMC_RXMAC_DISABLE 4
1192#define ET_MMC_TXMAC_DISABLE 8
1193#define ET_MMC_TXDMA_DISABLE 16
1194#define ET_MMC_RXDMA_DISABLE 32
1195#define ET_MMC_FORCE_CE 64
1196
1197/* Main Memory Controller Host Memory Access Address reg in mmc
1198 * address map. Located at address 0x7004. Top 16 bits hold the address bits
1199 */
1200#define ET_SRAM_REQ_ACCESS 1
1201#define ET_SRAM_WR_ACCESS 2
1202#define ET_SRAM_IS_CTRL 4
1203
1204/* structure for Main Memory Controller Host Memory Access Data reg in mmc
1205 * address map. Located at address 0x7008 - 0x7014
1206 * Defined earlier (u32)
1207 */
1208
1209/* Memory Control Module of JAGCore Address Mapping
1210 */
1211struct mmc_regs { /* Location: */
1212 u32 mmc_ctrl; /* 0x7000 */
1213 u32 sram_access; /* 0x7004 */
1214 u32 sram_word1; /* 0x7008 */
1215 u32 sram_word2; /* 0x700C */
1216 u32 sram_word3; /* 0x7010 */
1217 u32 sram_word4; /* 0x7014 */
1218};
1219
1220/* END OF MMC REGISTER ADDRESS MAP */
1221
1222/* JAGCore Address Mapping
1223 */
1224struct address_map {
1225 struct global_regs global;
1226 /* unused section of global address map */
1227 u8 unused_global[4096 - sizeof(struct global_regs)];
1228 struct txdma_regs txdma;
1229 /* unused section of txdma address map */
1230 u8 unused_txdma[4096 - sizeof(struct txdma_regs)];
1231 struct rxdma_regs rxdma;
1232 /* unused section of rxdma address map */
1233 u8 unused_rxdma[4096 - sizeof(struct rxdma_regs)];
1234 struct txmac_regs txmac;
1235 /* unused section of txmac address map */
1236 u8 unused_txmac[4096 - sizeof(struct txmac_regs)];
1237 struct rxmac_regs rxmac;
1238 /* unused section of rxmac address map */
1239 u8 unused_rxmac[4096 - sizeof(struct rxmac_regs)];
1240 struct mac_regs mac;
1241 /* unused section of mac address map */
1242 u8 unused_mac[4096 - sizeof(struct mac_regs)];
1243 struct macstat_regs macstat;
1244 /* unused section of mac stat address map */
1245 u8 unused_mac_stat[4096 - sizeof(struct macstat_regs)];
1246 struct mmc_regs mmc;
1247 /* unused section of mmc address map */
1248 u8 unused_mmc[4096 - sizeof(struct mmc_regs)];
1249 /* unused section of address map */
1250 u8 unused_[1015808];
1251 u8 unused_exp_rom[4096]; /* MGS-size TBD */
1252 u8 unused__[524288]; /* unused section of address map */
1253};
1254
1255/* Defines for generic MII registers 0x00 -> 0x0F can be found in
1256 * include/linux/mii.h
1257 */
1258/* some defines for modem registers that seem to be 'reserved' */
1259#define PHY_INDEX_REG 0x10
1260#define PHY_DATA_REG 0x11
1261#define PHY_MPHY_CONTROL_REG 0x12
1262
1263/* defines for specified registers */
1264#define PHY_LOOPBACK_CONTROL 0x13 /* TRU_VMI_LOOPBACK_CONTROL_1_REG 19 */
1265 /* TRU_VMI_LOOPBACK_CONTROL_2_REG 20 */
1266#define PHY_REGISTER_MGMT_CONTROL 0x15 /* TRU_VMI_MI_SEQ_CONTROL_REG 21 */
1267#define PHY_CONFIG 0x16 /* TRU_VMI_CONFIGURATION_REG 22 */
1268#define PHY_PHY_CONTROL 0x17 /* TRU_VMI_PHY_CONTROL_REG 23 */
1269#define PHY_INTERRUPT_MASK 0x18 /* TRU_VMI_INTERRUPT_MASK_REG 24 */
1270#define PHY_INTERRUPT_STATUS 0x19 /* TRU_VMI_INTERRUPT_STATUS_REG 25 */
1271#define PHY_PHY_STATUS 0x1A /* TRU_VMI_PHY_STATUS_REG 26 */
1272#define PHY_LED_1 0x1B /* TRU_VMI_LED_CONTROL_1_REG 27 */
1273#define PHY_LED_2 0x1C /* TRU_VMI_LED_CONTROL_2_REG 28 */
1274 /* TRU_VMI_LINK_CONTROL_REG 29 */
1275 /* TRU_VMI_TIMING_CONTROL_REG */
1276
1277/* MI Register 10: Gigabit basic mode status reg(Reg 0x0A) */
1278#define ET_1000BT_MSTR_SLV 0x4000
1279
1280/* MI Register 16 - 18: Reserved Reg(0x10-0x12) */
1281
1282/* MI Register 19: Loopback Control Reg(0x13)
1283 * 15: mii_en
1284 * 14: pcs_en
1285 * 13: pmd_en
1286 * 12: all_digital_en
1287 * 11: replica_en
1288 * 10: line_driver_en
1289 * 9-0: reserved
1290 */
1291
1292/* MI Register 20: Reserved Reg(0x14) */
1293
1294/* MI Register 21: Management Interface Control Reg(0x15)
1295 * 15-11: reserved
1296 * 10-4: mi_error_count
1297 * 3: reserved
1298 * 2: ignore_10g_fr
1299 * 1: reserved
1300 * 0: preamble_suppress_en
1301 */
1302
1303/* MI Register 22: PHY Configuration Reg(0x16)
1304 * 15: crs_tx_en
1305 * 14: reserved
1306 * 13-12: tx_fifo_depth
1307 * 11-10: speed_downshift
1308 * 9: pbi_detect
1309 * 8: tbi_rate
1310 * 7: alternate_np
1311 * 6: group_mdio_en
1312 * 5: tx_clock_en
1313 * 4: sys_clock_en
1314 * 3: reserved
1315 * 2-0: mac_if_mode
1316 */
1317#define ET_PHY_CONFIG_TX_FIFO_DEPTH 0x3000
1318
1319#define ET_PHY_CONFIG_FIFO_DEPTH_8 0x0000
1320#define ET_PHY_CONFIG_FIFO_DEPTH_16 0x1000
1321#define ET_PHY_CONFIG_FIFO_DEPTH_32 0x2000
1322#define ET_PHY_CONFIG_FIFO_DEPTH_64 0x3000
1323
1324/* MI Register 23: PHY CONTROL Reg(0x17)
1325 * 15: reserved
1326 * 14: tdr_en
1327 * 13: reserved
1328 * 12-11: downshift_attempts
1329 * 10-6: reserved
1330 * 5: jabber_10baseT
1331 * 4: sqe_10baseT
1332 * 3: tp_loopback_10baseT
1333 * 2: preamble_gen_en
1334 * 1: reserved
1335 * 0: force_int
1336 */
1337
1338/* MI Register 24: Interrupt Mask Reg(0x18)
1339 * 15-10: reserved
1340 * 9: mdio_sync_lost
1341 * 8: autoneg_status
1342 * 7: hi_bit_err
1343 * 6: np_rx
1344 * 5: err_counter_full
1345 * 4: fifo_over_underflow
1346 * 3: rx_status
1347 * 2: link_status
1348 * 1: automatic_speed
1349 * 0: int_en
1350 */
1351
1352/* MI Register 25: Interrupt Status Reg(0x19)
1353 * 15-10: reserved
1354 * 9: mdio_sync_lost
1355 * 8: autoneg_status
1356 * 7: hi_bit_err
1357 * 6: np_rx
1358 * 5: err_counter_full
1359 * 4: fifo_over_underflow
1360 * 3: rx_status
1361 * 2: link_status
1362 * 1: automatic_speed
1363 * 0: int_en
1364 */
1365
1366/* MI Register 26: PHY Status Reg(0x1A)
1367 * 15: reserved
1368 * 14-13: autoneg_fault
1369 * 12: autoneg_status
1370 * 11: mdi_x_status
1371 * 10: polarity_status
1372 * 9-8: speed_status
1373 * 7: duplex_status
1374 * 6: link_status
1375 * 5: tx_status
1376 * 4: rx_status
1377 * 3: collision_status
1378 * 2: autoneg_en
1379 * 1: pause_en
1380 * 0: asymmetric_dir
1381 */
1382#define ET_PHY_AUTONEG_STATUS 0x1000
1383#define ET_PHY_POLARITY_STATUS 0x0400
1384#define ET_PHY_SPEED_STATUS 0x0300
1385#define ET_PHY_DUPLEX_STATUS 0x0080
1386#define ET_PHY_LSTATUS 0x0040
1387#define ET_PHY_AUTONEG_ENABLE 0x0020
1388
1389/* MI Register 27: LED Control Reg 1(0x1B)
1390 * 15-14: reserved
1391 * 13-12: led_dup_indicate
1392 * 11-10: led_10baseT
1393 * 9-8: led_collision
1394 * 7-4: reserved
1395 * 3-2: pulse_dur
1396 * 1: pulse_stretch1
1397 * 0: pulse_stretch0
1398 */
1399
1400/* MI Register 28: LED Control Reg 2(0x1C)
1401 * 15-12: led_link
1402 * 11-8: led_tx_rx
1403 * 7-4: led_100BaseTX
1404 * 3-0: led_1000BaseT
1405 */
1406#define ET_LED2_LED_LINK 0xF000
1407#define ET_LED2_LED_TXRX 0x0F00
1408#define ET_LED2_LED_100TX 0x00F0
1409#define ET_LED2_LED_1000T 0x000F
1410
1411/* defines for LED control reg 2 values */
1412#define LED_VAL_1000BT 0x0
1413#define LED_VAL_100BTX 0x1
1414#define LED_VAL_10BT 0x2
1415#define LED_VAL_1000BT_100BTX 0x3 /* 1000BT on, 100BTX blink */
1416#define LED_VAL_LINKON 0x4
1417#define LED_VAL_TX 0x5
1418#define LED_VAL_RX 0x6
1419#define LED_VAL_TXRX 0x7 /* TX or RX */
1420#define LED_VAL_DUPLEXFULL 0x8
1421#define LED_VAL_COLLISION 0x9
1422#define LED_VAL_LINKON_ACTIVE 0xA /* Link on, activity blink */
1423#define LED_VAL_LINKON_RECV 0xB /* Link on, receive blink */
1424#define LED_VAL_DUPLEXFULL_COLLISION 0xC /* Duplex on, collision blink */
1425#define LED_VAL_BLINK 0xD
1426#define LED_VAL_ON 0xE
1427#define LED_VAL_OFF 0xF
1428
1429#define LED_LINK_SHIFT 12
1430#define LED_TXRX_SHIFT 8
1431#define LED_100TX_SHIFT 4
1432
1433/* MI Register 29 - 31: Reserved Reg(0x1D - 0x1E) */