diff options
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r-- | drivers/net/ethernet/Kconfig | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/Kconfig | 110 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/Makefile | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/mv643xx_eth.c | 3020 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/pxa168_eth.c | 1662 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/skge.c | 4133 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/skge.h | 2584 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/sky2.c | 5130 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/sky2.h | 2427 |
10 files changed, 19076 insertions, 0 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 0eaf95770ab7..1c447d96d7e5 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig | |||
@@ -28,6 +28,7 @@ source "drivers/net/ethernet/ibm/Kconfig" | |||
28 | source "drivers/net/ethernet/intel/Kconfig" | 28 | source "drivers/net/ethernet/intel/Kconfig" |
29 | source "drivers/net/ethernet/i825xx/Kconfig" | 29 | source "drivers/net/ethernet/i825xx/Kconfig" |
30 | source "drivers/net/ethernet/xscale/Kconfig" | 30 | source "drivers/net/ethernet/xscale/Kconfig" |
31 | source "drivers/net/ethernet/marvell/Kconfig" | ||
31 | source "drivers/net/ethernet/mellanox/Kconfig" | 32 | source "drivers/net/ethernet/mellanox/Kconfig" |
32 | source "drivers/net/ethernet/myricom/Kconfig" | 33 | source "drivers/net/ethernet/myricom/Kconfig" |
33 | source "drivers/net/ethernet/natsemi/Kconfig" | 34 | source "drivers/net/ethernet/natsemi/Kconfig" |
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index b5ca872f2444..48c8656b96c2 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile | |||
@@ -20,6 +20,7 @@ obj-$(CONFIG_NET_VENDOR_IBM) += ibm/ | |||
20 | obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ | 20 | obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ |
21 | obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ | 21 | obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ |
22 | obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ | 22 | obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ |
23 | obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/ | ||
23 | obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ | 24 | obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ |
24 | obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ | 25 | obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ |
25 | obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ | 26 | obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ |
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig new file mode 100644 index 000000000000..e525408367b6 --- /dev/null +++ b/drivers/net/ethernet/marvell/Kconfig | |||
@@ -0,0 +1,110 @@ | |||
1 | # | ||
2 | # Marvell device configuration | ||
3 | # | ||
4 | |||
5 | config NET_VENDOR_MARVELL | ||
6 | bool "Marvell devices" | ||
7 | depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET | ||
8 | ---help--- | ||
9 | If you have a network (Ethernet) card belonging to this class, say Y | ||
10 | and read the Ethernet-HOWTO, available from | ||
11 | <http://www.tldp.org/docs.html#howto>. | ||
12 | |||
13 | Note that the answer to this question doesn't directly affect the | ||
14 | kernel: saying N will just cause the configurator to skip all | ||
15 | the questions about Marvell devices. If you say Y, you will be | ||
16 | asked for your specific card in the following questions. | ||
17 | |||
18 | if NET_VENDOR_MARVELL | ||
19 | |||
20 | config MV643XX_ETH | ||
21 | tristate "Marvell Discovery (643XX) and Orion ethernet support" | ||
22 | depends on (MV64X60 || PPC32 || PLAT_ORION) && INET | ||
23 | select INET_LRO | ||
24 | select PHYLIB | ||
25 | ---help--- | ||
26 | This driver supports the gigabit ethernet MACs in the | ||
27 | Marvell Discovery PPC/MIPS chipset family (MV643XX) and | ||
28 | in the Marvell Orion ARM SoC family. | ||
29 | |||
30 | Some boards that use the Discovery chipset are the Momenco | ||
31 | Ocelot C and Jaguar ATX and Pegasos II. | ||
32 | |||
33 | config PXA168_ETH | ||
34 | tristate "Marvell pxa168 ethernet support" | ||
35 | depends on CPU_PXA168 | ||
36 | select PHYLIB | ||
37 | ---help--- | ||
38 | This driver supports the pxa168 Ethernet ports. | ||
39 | |||
40 | To compile this driver as a module, choose M here. The module | ||
41 | will be called pxa168_eth. | ||
42 | |||
43 | config SKGE | ||
44 | tristate "Marvell Yukon Gigabit Ethernet support" | ||
45 | depends on PCI | ||
46 | select CRC32 | ||
47 | ---help--- | ||
48 | This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx | ||
49 | and related Gigabit Ethernet adapters. It is a new smaller driver | ||
50 | with better performance and more complete ethtool support. | ||
51 | |||
52 | It does not support the link failover and network management | ||
53 | features that "portable" vendor supplied sk98lin driver does. | ||
54 | |||
55 | This driver supports adapters based on the original Yukon chipset: | ||
56 | Marvell 88E8001, Belkin F5D5005, CNet GigaCard, DLink DGE-530T, | ||
57 | Linksys EG1032/EG1064, 3Com 3C940/3C940B, SysKonnect SK-9871/9872. | ||
58 | |||
59 | It does not support the newer Yukon2 chipset: a separate driver, | ||
60 | sky2, is provided for these adapters. | ||
61 | |||
62 | To compile this driver as a module, choose M here: the module | ||
63 | will be called skge. This is recommended. | ||
64 | |||
65 | config SKGE_DEBUG | ||
66 | bool "Debugging interface" | ||
67 | depends on SKGE && DEBUG_FS | ||
68 | ---help--- | ||
69 | This option adds the ability to dump driver state for debugging. | ||
70 | The file /sys/kernel/debug/skge/ethX displays the state of the internal | ||
71 | transmit and receive rings. | ||
72 | |||
73 | If unsure, say N. | ||
74 | |||
75 | config SKGE_GENESIS | ||
76 | bool "Support for older SysKonnect Genesis boards" | ||
77 | depends on SKGE | ||
78 | ---help--- | ||
79 | This enables support for the older and uncommon SysKonnect Genesis | ||
80 | chips, which support MII via an external transceiver, instead of | ||
81 | an internal one. Disabling this option will save some memory | ||
82 | by making code smaller. If unsure say Y. | ||
83 | |||
84 | config SKY2 | ||
85 | tristate "Marvell Yukon 2 support" | ||
86 | depends on PCI | ||
87 | select CRC32 | ||
88 | ---help--- | ||
89 | This driver supports Gigabit Ethernet adapters based on the | ||
90 | Marvell Yukon 2 chipset: | ||
91 | Marvell 88E8021/88E8022/88E8035/88E8036/88E8038/88E8050/88E8052/ | ||
92 | 88E8053/88E8055/88E8061/88E8062, SysKonnect SK-9E21D/SK-9S21 | ||
93 | |||
94 | There is companion driver for the older Marvell Yukon and | ||
95 | SysKonnect Genesis based adapters: skge. | ||
96 | |||
97 | To compile this driver as a module, choose M here: the module | ||
98 | will be called sky2. This is recommended. | ||
99 | |||
100 | config SKY2_DEBUG | ||
101 | bool "Debugging interface" | ||
102 | depends on SKY2 && DEBUG_FS | ||
103 | ---help--- | ||
104 | This option adds the ability to dump driver state for debugging. | ||
105 | The file /sys/kernel/debug/sky2/ethX displays the state of the internal | ||
106 | transmit and receive rings. | ||
107 | |||
108 | If unsure, say N. | ||
109 | |||
110 | endif # NET_VENDOR_MARVELL | ||
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile new file mode 100644 index 000000000000..57e3234a37ba --- /dev/null +++ b/drivers/net/ethernet/marvell/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # Makefile for the Marvell device drivers. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o | ||
6 | obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o | ||
7 | obj-$(CONFIG_SKGE) += skge.o | ||
8 | obj-$(CONFIG_SKY2) += sky2.o | ||
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c new file mode 100644 index 000000000000..259699983ca5 --- /dev/null +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
@@ -0,0 +1,3020 @@ | |||
1 | /* | ||
2 | * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports | ||
3 | * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> | ||
4 | * | ||
5 | * Based on the 64360 driver from: | ||
6 | * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il> | ||
7 | * Rabeeh Khoury <rabeeh@marvell.com> | ||
8 | * | ||
9 | * Copyright (C) 2003 PMC-Sierra, Inc., | ||
10 | * written by Manish Lachwani | ||
11 | * | ||
12 | * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> | ||
13 | * | ||
14 | * Copyright (C) 2004-2006 MontaVista Software, Inc. | ||
15 | * Dale Farnsworth <dale@farnsworth.org> | ||
16 | * | ||
17 | * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> | ||
18 | * <sjhill@realitydiluted.com> | ||
19 | * | ||
20 | * Copyright (C) 2007-2008 Marvell Semiconductor | ||
21 | * Lennert Buytenhek <buytenh@marvell.com> | ||
22 | * | ||
23 | * This program is free software; you can redistribute it and/or | ||
24 | * modify it under the terms of the GNU General Public License | ||
25 | * as published by the Free Software Foundation; either version 2 | ||
26 | * of the License, or (at your option) any later version. | ||
27 | * | ||
28 | * This program is distributed in the hope that it will be useful, | ||
29 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
30 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
31 | * GNU General Public License for more details. | ||
32 | * | ||
33 | * You should have received a copy of the GNU General Public License | ||
34 | * along with this program; if not, write to the Free Software | ||
35 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
36 | */ | ||
37 | |||
38 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
39 | |||
40 | #include <linux/init.h> | ||
41 | #include <linux/dma-mapping.h> | ||
42 | #include <linux/in.h> | ||
43 | #include <linux/ip.h> | ||
44 | #include <linux/tcp.h> | ||
45 | #include <linux/udp.h> | ||
46 | #include <linux/etherdevice.h> | ||
47 | #include <linux/delay.h> | ||
48 | #include <linux/ethtool.h> | ||
49 | #include <linux/platform_device.h> | ||
50 | #include <linux/module.h> | ||
51 | #include <linux/kernel.h> | ||
52 | #include <linux/spinlock.h> | ||
53 | #include <linux/workqueue.h> | ||
54 | #include <linux/phy.h> | ||
55 | #include <linux/mv643xx_eth.h> | ||
56 | #include <linux/io.h> | ||
57 | #include <linux/types.h> | ||
58 | #include <linux/inet_lro.h> | ||
59 | #include <linux/slab.h> | ||
60 | #include <asm/system.h> | ||
61 | |||
62 | static char mv643xx_eth_driver_name[] = "mv643xx_eth"; | ||
63 | static char mv643xx_eth_driver_version[] = "1.4"; | ||
64 | |||
65 | |||
66 | /* | ||
67 | * Registers shared between all ports. | ||
68 | */ | ||
69 | #define PHY_ADDR 0x0000 | ||
70 | #define SMI_REG 0x0004 | ||
71 | #define SMI_BUSY 0x10000000 | ||
72 | #define SMI_READ_VALID 0x08000000 | ||
73 | #define SMI_OPCODE_READ 0x04000000 | ||
74 | #define SMI_OPCODE_WRITE 0x00000000 | ||
75 | #define ERR_INT_CAUSE 0x0080 | ||
76 | #define ERR_INT_SMI_DONE 0x00000010 | ||
77 | #define ERR_INT_MASK 0x0084 | ||
78 | #define WINDOW_BASE(w) (0x0200 + ((w) << 3)) | ||
79 | #define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) | ||
80 | #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) | ||
81 | #define WINDOW_BAR_ENABLE 0x0290 | ||
82 | #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4)) | ||
83 | |||
84 | /* | ||
85 | * Main per-port registers. These live at offset 0x0400 for | ||
86 | * port #0, 0x0800 for port #1, and 0x0c00 for port #2. | ||
87 | */ | ||
88 | #define PORT_CONFIG 0x0000 | ||
89 | #define UNICAST_PROMISCUOUS_MODE 0x00000001 | ||
90 | #define PORT_CONFIG_EXT 0x0004 | ||
91 | #define MAC_ADDR_LOW 0x0014 | ||
92 | #define MAC_ADDR_HIGH 0x0018 | ||
93 | #define SDMA_CONFIG 0x001c | ||
94 | #define TX_BURST_SIZE_16_64BIT 0x01000000 | ||
95 | #define TX_BURST_SIZE_4_64BIT 0x00800000 | ||
96 | #define BLM_TX_NO_SWAP 0x00000020 | ||
97 | #define BLM_RX_NO_SWAP 0x00000010 | ||
98 | #define RX_BURST_SIZE_16_64BIT 0x00000008 | ||
99 | #define RX_BURST_SIZE_4_64BIT 0x00000004 | ||
100 | #define PORT_SERIAL_CONTROL 0x003c | ||
101 | #define SET_MII_SPEED_TO_100 0x01000000 | ||
102 | #define SET_GMII_SPEED_TO_1000 0x00800000 | ||
103 | #define SET_FULL_DUPLEX_MODE 0x00200000 | ||
104 | #define MAX_RX_PACKET_9700BYTE 0x000a0000 | ||
105 | #define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000 | ||
106 | #define DO_NOT_FORCE_LINK_FAIL 0x00000400 | ||
107 | #define SERIAL_PORT_CONTROL_RESERVED 0x00000200 | ||
108 | #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008 | ||
109 | #define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004 | ||
110 | #define FORCE_LINK_PASS 0x00000002 | ||
111 | #define SERIAL_PORT_ENABLE 0x00000001 | ||
112 | #define PORT_STATUS 0x0044 | ||
113 | #define TX_FIFO_EMPTY 0x00000400 | ||
114 | #define TX_IN_PROGRESS 0x00000080 | ||
115 | #define PORT_SPEED_MASK 0x00000030 | ||
116 | #define PORT_SPEED_1000 0x00000010 | ||
117 | #define PORT_SPEED_100 0x00000020 | ||
118 | #define PORT_SPEED_10 0x00000000 | ||
119 | #define FLOW_CONTROL_ENABLED 0x00000008 | ||
120 | #define FULL_DUPLEX 0x00000004 | ||
121 | #define LINK_UP 0x00000002 | ||
122 | #define TXQ_COMMAND 0x0048 | ||
123 | #define TXQ_FIX_PRIO_CONF 0x004c | ||
124 | #define TX_BW_RATE 0x0050 | ||
125 | #define TX_BW_MTU 0x0058 | ||
126 | #define TX_BW_BURST 0x005c | ||
127 | #define INT_CAUSE 0x0060 | ||
128 | #define INT_TX_END 0x07f80000 | ||
129 | #define INT_TX_END_0 0x00080000 | ||
130 | #define INT_RX 0x000003fc | ||
131 | #define INT_RX_0 0x00000004 | ||
132 | #define INT_EXT 0x00000002 | ||
133 | #define INT_CAUSE_EXT 0x0064 | ||
134 | #define INT_EXT_LINK_PHY 0x00110000 | ||
135 | #define INT_EXT_TX 0x000000ff | ||
136 | #define INT_MASK 0x0068 | ||
137 | #define INT_MASK_EXT 0x006c | ||
138 | #define TX_FIFO_URGENT_THRESHOLD 0x0074 | ||
139 | #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc | ||
140 | #define TX_BW_RATE_MOVED 0x00e0 | ||
141 | #define TX_BW_MTU_MOVED 0x00e8 | ||
142 | #define TX_BW_BURST_MOVED 0x00ec | ||
143 | #define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4)) | ||
144 | #define RXQ_COMMAND 0x0280 | ||
145 | #define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2)) | ||
146 | #define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4)) | ||
147 | #define TXQ_BW_CONF(q) (0x0304 + ((q) << 4)) | ||
148 | #define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4)) | ||
149 | |||
150 | /* | ||
151 | * Misc per-port registers. | ||
152 | */ | ||
153 | #define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) | ||
154 | #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) | ||
155 | #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) | ||
156 | #define UNICAST_TABLE(p) (0x1600 + ((p) << 10)) | ||
157 | |||
158 | |||
159 | /* | ||
160 | * SDMA configuration register default value. | ||
161 | */ | ||
162 | #if defined(__BIG_ENDIAN) | ||
163 | #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ | ||
164 | (RX_BURST_SIZE_4_64BIT | \ | ||
165 | TX_BURST_SIZE_4_64BIT) | ||
166 | #elif defined(__LITTLE_ENDIAN) | ||
167 | #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ | ||
168 | (RX_BURST_SIZE_4_64BIT | \ | ||
169 | BLM_RX_NO_SWAP | \ | ||
170 | BLM_TX_NO_SWAP | \ | ||
171 | TX_BURST_SIZE_4_64BIT) | ||
172 | #else | ||
173 | #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined | ||
174 | #endif | ||
175 | |||
176 | |||
177 | /* | ||
178 | * Misc definitions. | ||
179 | */ | ||
180 | #define DEFAULT_RX_QUEUE_SIZE 128 | ||
181 | #define DEFAULT_TX_QUEUE_SIZE 256 | ||
182 | #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) | ||
183 | |||
184 | |||
185 | /* | ||
186 | * RX/TX descriptors. | ||
187 | */ | ||
188 | #if defined(__BIG_ENDIAN) | ||
189 | struct rx_desc { | ||
190 | u16 byte_cnt; /* Descriptor buffer byte count */ | ||
191 | u16 buf_size; /* Buffer size */ | ||
192 | u32 cmd_sts; /* Descriptor command status */ | ||
193 | u32 next_desc_ptr; /* Next descriptor pointer */ | ||
194 | u32 buf_ptr; /* Descriptor buffer pointer */ | ||
195 | }; | ||
196 | |||
197 | struct tx_desc { | ||
198 | u16 byte_cnt; /* buffer byte count */ | ||
199 | u16 l4i_chk; /* CPU provided TCP checksum */ | ||
200 | u32 cmd_sts; /* Command/status field */ | ||
201 | u32 next_desc_ptr; /* Pointer to next descriptor */ | ||
202 | u32 buf_ptr; /* pointer to buffer for this descriptor*/ | ||
203 | }; | ||
204 | #elif defined(__LITTLE_ENDIAN) | ||
205 | struct rx_desc { | ||
206 | u32 cmd_sts; /* Descriptor command status */ | ||
207 | u16 buf_size; /* Buffer size */ | ||
208 | u16 byte_cnt; /* Descriptor buffer byte count */ | ||
209 | u32 buf_ptr; /* Descriptor buffer pointer */ | ||
210 | u32 next_desc_ptr; /* Next descriptor pointer */ | ||
211 | }; | ||
212 | |||
213 | struct tx_desc { | ||
214 | u32 cmd_sts; /* Command/status field */ | ||
215 | u16 l4i_chk; /* CPU provided TCP checksum */ | ||
216 | u16 byte_cnt; /* buffer byte count */ | ||
217 | u32 buf_ptr; /* pointer to buffer for this descriptor*/ | ||
218 | u32 next_desc_ptr; /* Pointer to next descriptor */ | ||
219 | }; | ||
220 | #else | ||
221 | #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined | ||
222 | #endif | ||
223 | |||
224 | /* RX & TX descriptor command */ | ||
225 | #define BUFFER_OWNED_BY_DMA 0x80000000 | ||
226 | |||
227 | /* RX & TX descriptor status */ | ||
228 | #define ERROR_SUMMARY 0x00000001 | ||
229 | |||
230 | /* RX descriptor status */ | ||
231 | #define LAYER_4_CHECKSUM_OK 0x40000000 | ||
232 | #define RX_ENABLE_INTERRUPT 0x20000000 | ||
233 | #define RX_FIRST_DESC 0x08000000 | ||
234 | #define RX_LAST_DESC 0x04000000 | ||
235 | #define RX_IP_HDR_OK 0x02000000 | ||
236 | #define RX_PKT_IS_IPV4 0x01000000 | ||
237 | #define RX_PKT_IS_ETHERNETV2 0x00800000 | ||
238 | #define RX_PKT_LAYER4_TYPE_MASK 0x00600000 | ||
239 | #define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000 | ||
240 | #define RX_PKT_IS_VLAN_TAGGED 0x00080000 | ||
241 | |||
242 | /* TX descriptor command */ | ||
243 | #define TX_ENABLE_INTERRUPT 0x00800000 | ||
244 | #define GEN_CRC 0x00400000 | ||
245 | #define TX_FIRST_DESC 0x00200000 | ||
246 | #define TX_LAST_DESC 0x00100000 | ||
247 | #define ZERO_PADDING 0x00080000 | ||
248 | #define GEN_IP_V4_CHECKSUM 0x00040000 | ||
249 | #define GEN_TCP_UDP_CHECKSUM 0x00020000 | ||
250 | #define UDP_FRAME 0x00010000 | ||
251 | #define MAC_HDR_EXTRA_4_BYTES 0x00008000 | ||
252 | #define MAC_HDR_EXTRA_8_BYTES 0x00000200 | ||
253 | |||
254 | #define TX_IHL_SHIFT 11 | ||
255 | |||
256 | |||
257 | /* global *******************************************************************/ | ||
258 | struct mv643xx_eth_shared_private { | ||
259 | /* | ||
260 | * Ethernet controller base address. | ||
261 | */ | ||
262 | void __iomem *base; | ||
263 | |||
264 | /* | ||
265 | * Points at the right SMI instance to use. | ||
266 | */ | ||
267 | struct mv643xx_eth_shared_private *smi; | ||
268 | |||
269 | /* | ||
270 | * Provides access to local SMI interface. | ||
271 | */ | ||
272 | struct mii_bus *smi_bus; | ||
273 | |||
274 | /* | ||
275 | * If we have access to the error interrupt pin (which is | ||
276 | * somewhat misnamed as it not only reflects internal errors | ||
277 | * but also reflects SMI completion), use that to wait for | ||
278 | * SMI access completion instead of polling the SMI busy bit. | ||
279 | */ | ||
280 | int err_interrupt; | ||
281 | wait_queue_head_t smi_busy_wait; | ||
282 | |||
283 | /* | ||
284 | * Per-port MBUS window access register value. | ||
285 | */ | ||
286 | u32 win_protect; | ||
287 | |||
288 | /* | ||
289 | * Hardware-specific parameters. | ||
290 | */ | ||
291 | unsigned int t_clk; | ||
292 | int extended_rx_coal_limit; | ||
293 | int tx_bw_control; | ||
294 | int tx_csum_limit; | ||
295 | }; | ||
296 | |||
297 | #define TX_BW_CONTROL_ABSENT 0 | ||
298 | #define TX_BW_CONTROL_OLD_LAYOUT 1 | ||
299 | #define TX_BW_CONTROL_NEW_LAYOUT 2 | ||
300 | |||
301 | static int mv643xx_eth_open(struct net_device *dev); | ||
302 | static int mv643xx_eth_stop(struct net_device *dev); | ||
303 | |||
304 | |||
305 | /* per-port *****************************************************************/ | ||
306 | struct mib_counters { | ||
307 | u64 good_octets_received; | ||
308 | u32 bad_octets_received; | ||
309 | u32 internal_mac_transmit_err; | ||
310 | u32 good_frames_received; | ||
311 | u32 bad_frames_received; | ||
312 | u32 broadcast_frames_received; | ||
313 | u32 multicast_frames_received; | ||
314 | u32 frames_64_octets; | ||
315 | u32 frames_65_to_127_octets; | ||
316 | u32 frames_128_to_255_octets; | ||
317 | u32 frames_256_to_511_octets; | ||
318 | u32 frames_512_to_1023_octets; | ||
319 | u32 frames_1024_to_max_octets; | ||
320 | u64 good_octets_sent; | ||
321 | u32 good_frames_sent; | ||
322 | u32 excessive_collision; | ||
323 | u32 multicast_frames_sent; | ||
324 | u32 broadcast_frames_sent; | ||
325 | u32 unrec_mac_control_received; | ||
326 | u32 fc_sent; | ||
327 | u32 good_fc_received; | ||
328 | u32 bad_fc_received; | ||
329 | u32 undersize_received; | ||
330 | u32 fragments_received; | ||
331 | u32 oversize_received; | ||
332 | u32 jabber_received; | ||
333 | u32 mac_receive_error; | ||
334 | u32 bad_crc_event; | ||
335 | u32 collision; | ||
336 | u32 late_collision; | ||
337 | }; | ||
338 | |||
339 | struct lro_counters { | ||
340 | u32 lro_aggregated; | ||
341 | u32 lro_flushed; | ||
342 | u32 lro_no_desc; | ||
343 | }; | ||
344 | |||
345 | struct rx_queue { | ||
346 | int index; | ||
347 | |||
348 | int rx_ring_size; | ||
349 | |||
350 | int rx_desc_count; | ||
351 | int rx_curr_desc; | ||
352 | int rx_used_desc; | ||
353 | |||
354 | struct rx_desc *rx_desc_area; | ||
355 | dma_addr_t rx_desc_dma; | ||
356 | int rx_desc_area_size; | ||
357 | struct sk_buff **rx_skb; | ||
358 | |||
359 | struct net_lro_mgr lro_mgr; | ||
360 | struct net_lro_desc lro_arr[8]; | ||
361 | }; | ||
362 | |||
363 | struct tx_queue { | ||
364 | int index; | ||
365 | |||
366 | int tx_ring_size; | ||
367 | |||
368 | int tx_desc_count; | ||
369 | int tx_curr_desc; | ||
370 | int tx_used_desc; | ||
371 | |||
372 | struct tx_desc *tx_desc_area; | ||
373 | dma_addr_t tx_desc_dma; | ||
374 | int tx_desc_area_size; | ||
375 | |||
376 | struct sk_buff_head tx_skb; | ||
377 | |||
378 | unsigned long tx_packets; | ||
379 | unsigned long tx_bytes; | ||
380 | unsigned long tx_dropped; | ||
381 | }; | ||
382 | |||
383 | struct mv643xx_eth_private { | ||
384 | struct mv643xx_eth_shared_private *shared; | ||
385 | void __iomem *base; | ||
386 | int port_num; | ||
387 | |||
388 | struct net_device *dev; | ||
389 | |||
390 | struct phy_device *phy; | ||
391 | |||
392 | struct timer_list mib_counters_timer; | ||
393 | spinlock_t mib_counters_lock; | ||
394 | struct mib_counters mib_counters; | ||
395 | |||
396 | struct lro_counters lro_counters; | ||
397 | |||
398 | struct work_struct tx_timeout_task; | ||
399 | |||
400 | struct napi_struct napi; | ||
401 | u32 int_mask; | ||
402 | u8 oom; | ||
403 | u8 work_link; | ||
404 | u8 work_tx; | ||
405 | u8 work_tx_end; | ||
406 | u8 work_rx; | ||
407 | u8 work_rx_refill; | ||
408 | |||
409 | int skb_size; | ||
410 | struct sk_buff_head rx_recycle; | ||
411 | |||
412 | /* | ||
413 | * RX state. | ||
414 | */ | ||
415 | int rx_ring_size; | ||
416 | unsigned long rx_desc_sram_addr; | ||
417 | int rx_desc_sram_size; | ||
418 | int rxq_count; | ||
419 | struct timer_list rx_oom; | ||
420 | struct rx_queue rxq[8]; | ||
421 | |||
422 | /* | ||
423 | * TX state. | ||
424 | */ | ||
425 | int tx_ring_size; | ||
426 | unsigned long tx_desc_sram_addr; | ||
427 | int tx_desc_sram_size; | ||
428 | int txq_count; | ||
429 | struct tx_queue txq[8]; | ||
430 | }; | ||
431 | |||
432 | |||
433 | /* port register accessors **************************************************/ | ||
434 | static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) | ||
435 | { | ||
436 | return readl(mp->shared->base + offset); | ||
437 | } | ||
438 | |||
439 | static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) | ||
440 | { | ||
441 | return readl(mp->base + offset); | ||
442 | } | ||
443 | |||
444 | static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) | ||
445 | { | ||
446 | writel(data, mp->shared->base + offset); | ||
447 | } | ||
448 | |||
449 | static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) | ||
450 | { | ||
451 | writel(data, mp->base + offset); | ||
452 | } | ||
453 | |||
454 | |||
455 | /* rxq/txq helper functions *************************************************/ | ||
456 | static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) | ||
457 | { | ||
458 | return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); | ||
459 | } | ||
460 | |||
461 | static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) | ||
462 | { | ||
463 | return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); | ||
464 | } | ||
465 | |||
466 | static void rxq_enable(struct rx_queue *rxq) | ||
467 | { | ||
468 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); | ||
469 | wrlp(mp, RXQ_COMMAND, 1 << rxq->index); | ||
470 | } | ||
471 | |||
472 | static void rxq_disable(struct rx_queue *rxq) | ||
473 | { | ||
474 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); | ||
475 | u8 mask = 1 << rxq->index; | ||
476 | |||
477 | wrlp(mp, RXQ_COMMAND, mask << 8); | ||
478 | while (rdlp(mp, RXQ_COMMAND) & mask) | ||
479 | udelay(10); | ||
480 | } | ||
481 | |||
482 | static void txq_reset_hw_ptr(struct tx_queue *txq) | ||
483 | { | ||
484 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | ||
485 | u32 addr; | ||
486 | |||
487 | addr = (u32)txq->tx_desc_dma; | ||
488 | addr += txq->tx_curr_desc * sizeof(struct tx_desc); | ||
489 | wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); | ||
490 | } | ||
491 | |||
492 | static void txq_enable(struct tx_queue *txq) | ||
493 | { | ||
494 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | ||
495 | wrlp(mp, TXQ_COMMAND, 1 << txq->index); | ||
496 | } | ||
497 | |||
498 | static void txq_disable(struct tx_queue *txq) | ||
499 | { | ||
500 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | ||
501 | u8 mask = 1 << txq->index; | ||
502 | |||
503 | wrlp(mp, TXQ_COMMAND, mask << 8); | ||
504 | while (rdlp(mp, TXQ_COMMAND) & mask) | ||
505 | udelay(10); | ||
506 | } | ||
507 | |||
508 | static void txq_maybe_wake(struct tx_queue *txq) | ||
509 | { | ||
510 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | ||
511 | struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); | ||
512 | |||
513 | if (netif_tx_queue_stopped(nq)) { | ||
514 | __netif_tx_lock(nq, smp_processor_id()); | ||
515 | if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) | ||
516 | netif_tx_wake_queue(nq); | ||
517 | __netif_tx_unlock(nq); | ||
518 | } | ||
519 | } | ||
520 | |||
521 | |||
522 | /* rx napi ******************************************************************/ | ||
523 | static int | ||
524 | mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph, | ||
525 | u64 *hdr_flags, void *priv) | ||
526 | { | ||
527 | unsigned long cmd_sts = (unsigned long)priv; | ||
528 | |||
529 | /* | ||
530 | * Make sure that this packet is Ethernet II, is not VLAN | ||
531 | * tagged, is IPv4, has a valid IP header, and is TCP. | ||
532 | */ | ||
533 | if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | | ||
534 | RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK | | ||
535 | RX_PKT_IS_VLAN_TAGGED)) != | ||
536 | (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | | ||
537 | RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4)) | ||
538 | return -1; | ||
539 | |||
540 | skb_reset_network_header(skb); | ||
541 | skb_set_transport_header(skb, ip_hdrlen(skb)); | ||
542 | *iphdr = ip_hdr(skb); | ||
543 | *tcph = tcp_hdr(skb); | ||
544 | *hdr_flags = LRO_IPV4 | LRO_TCP; | ||
545 | |||
546 | return 0; | ||
547 | } | ||
548 | |||
549 | static int rxq_process(struct rx_queue *rxq, int budget) | ||
550 | { | ||
551 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); | ||
552 | struct net_device_stats *stats = &mp->dev->stats; | ||
553 | int lro_flush_needed; | ||
554 | int rx; | ||
555 | |||
556 | lro_flush_needed = 0; | ||
557 | rx = 0; | ||
558 | while (rx < budget && rxq->rx_desc_count) { | ||
559 | struct rx_desc *rx_desc; | ||
560 | unsigned int cmd_sts; | ||
561 | struct sk_buff *skb; | ||
562 | u16 byte_cnt; | ||
563 | |||
564 | rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; | ||
565 | |||
566 | cmd_sts = rx_desc->cmd_sts; | ||
567 | if (cmd_sts & BUFFER_OWNED_BY_DMA) | ||
568 | break; | ||
569 | rmb(); | ||
570 | |||
571 | skb = rxq->rx_skb[rxq->rx_curr_desc]; | ||
572 | rxq->rx_skb[rxq->rx_curr_desc] = NULL; | ||
573 | |||
574 | rxq->rx_curr_desc++; | ||
575 | if (rxq->rx_curr_desc == rxq->rx_ring_size) | ||
576 | rxq->rx_curr_desc = 0; | ||
577 | |||
578 | dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, | ||
579 | rx_desc->buf_size, DMA_FROM_DEVICE); | ||
580 | rxq->rx_desc_count--; | ||
581 | rx++; | ||
582 | |||
583 | mp->work_rx_refill |= 1 << rxq->index; | ||
584 | |||
585 | byte_cnt = rx_desc->byte_cnt; | ||
586 | |||
587 | /* | ||
588 | * Update statistics. | ||
589 | * | ||
590 | * Note that the descriptor byte count includes 2 dummy | ||
591 | * bytes automatically inserted by the hardware at the | ||
592 | * start of the packet (which we don't count), and a 4 | ||
593 | * byte CRC at the end of the packet (which we do count). | ||
594 | */ | ||
595 | stats->rx_packets++; | ||
596 | stats->rx_bytes += byte_cnt - 2; | ||
597 | |||
598 | /* | ||
599 | * In case we received a packet without first / last bits | ||
600 | * on, or the error summary bit is set, the packet needs | ||
601 | * to be dropped. | ||
602 | */ | ||
603 | if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY)) | ||
604 | != (RX_FIRST_DESC | RX_LAST_DESC)) | ||
605 | goto err; | ||
606 | |||
607 | /* | ||
608 | * The -4 is for the CRC in the trailer of the | ||
609 | * received packet | ||
610 | */ | ||
611 | skb_put(skb, byte_cnt - 2 - 4); | ||
612 | |||
613 | if (cmd_sts & LAYER_4_CHECKSUM_OK) | ||
614 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
615 | skb->protocol = eth_type_trans(skb, mp->dev); | ||
616 | |||
617 | if (skb->dev->features & NETIF_F_LRO && | ||
618 | skb->ip_summed == CHECKSUM_UNNECESSARY) { | ||
619 | lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts); | ||
620 | lro_flush_needed = 1; | ||
621 | } else | ||
622 | netif_receive_skb(skb); | ||
623 | |||
624 | continue; | ||
625 | |||
626 | err: | ||
627 | stats->rx_dropped++; | ||
628 | |||
629 | if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != | ||
630 | (RX_FIRST_DESC | RX_LAST_DESC)) { | ||
631 | if (net_ratelimit()) | ||
632 | netdev_err(mp->dev, | ||
633 | "received packet spanning multiple descriptors\n"); | ||
634 | } | ||
635 | |||
636 | if (cmd_sts & ERROR_SUMMARY) | ||
637 | stats->rx_errors++; | ||
638 | |||
639 | dev_kfree_skb(skb); | ||
640 | } | ||
641 | |||
642 | if (lro_flush_needed) | ||
643 | lro_flush_all(&rxq->lro_mgr); | ||
644 | |||
645 | if (rx < budget) | ||
646 | mp->work_rx &= ~(1 << rxq->index); | ||
647 | |||
648 | return rx; | ||
649 | } | ||
650 | |||
651 | static int rxq_refill(struct rx_queue *rxq, int budget) | ||
652 | { | ||
653 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); | ||
654 | int refilled; | ||
655 | |||
656 | refilled = 0; | ||
657 | while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { | ||
658 | struct sk_buff *skb; | ||
659 | int rx; | ||
660 | struct rx_desc *rx_desc; | ||
661 | int size; | ||
662 | |||
663 | skb = __skb_dequeue(&mp->rx_recycle); | ||
664 | if (skb == NULL) | ||
665 | skb = dev_alloc_skb(mp->skb_size); | ||
666 | |||
667 | if (skb == NULL) { | ||
668 | mp->oom = 1; | ||
669 | goto oom; | ||
670 | } | ||
671 | |||
672 | if (SKB_DMA_REALIGN) | ||
673 | skb_reserve(skb, SKB_DMA_REALIGN); | ||
674 | |||
675 | refilled++; | ||
676 | rxq->rx_desc_count++; | ||
677 | |||
678 | rx = rxq->rx_used_desc++; | ||
679 | if (rxq->rx_used_desc == rxq->rx_ring_size) | ||
680 | rxq->rx_used_desc = 0; | ||
681 | |||
682 | rx_desc = rxq->rx_desc_area + rx; | ||
683 | |||
684 | size = skb->end - skb->data; | ||
685 | rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, | ||
686 | skb->data, size, | ||
687 | DMA_FROM_DEVICE); | ||
688 | rx_desc->buf_size = size; | ||
689 | rxq->rx_skb[rx] = skb; | ||
690 | wmb(); | ||
691 | rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; | ||
692 | wmb(); | ||
693 | |||
694 | /* | ||
695 | * The hardware automatically prepends 2 bytes of | ||
696 | * dummy data to each received packet, so that the | ||
697 | * IP header ends up 16-byte aligned. | ||
698 | */ | ||
699 | skb_reserve(skb, 2); | ||
700 | } | ||
701 | |||
702 | if (refilled < budget) | ||
703 | mp->work_rx_refill &= ~(1 << rxq->index); | ||
704 | |||
705 | oom: | ||
706 | return refilled; | ||
707 | } | ||
708 | |||
709 | |||
710 | /* tx ***********************************************************************/ | ||
711 | static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) | ||
712 | { | ||
713 | int frag; | ||
714 | |||
715 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | ||
716 | skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; | ||
717 | if (fragp->size <= 8 && fragp->page_offset & 7) | ||
718 | return 1; | ||
719 | } | ||
720 | |||
721 | return 0; | ||
722 | } | ||
723 | |||
724 | static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) | ||
725 | { | ||
726 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | ||
727 | int nr_frags = skb_shinfo(skb)->nr_frags; | ||
728 | int frag; | ||
729 | |||
730 | for (frag = 0; frag < nr_frags; frag++) { | ||
731 | skb_frag_t *this_frag; | ||
732 | int tx_index; | ||
733 | struct tx_desc *desc; | ||
734 | |||
735 | this_frag = &skb_shinfo(skb)->frags[frag]; | ||
736 | tx_index = txq->tx_curr_desc++; | ||
737 | if (txq->tx_curr_desc == txq->tx_ring_size) | ||
738 | txq->tx_curr_desc = 0; | ||
739 | desc = &txq->tx_desc_area[tx_index]; | ||
740 | |||
741 | /* | ||
742 | * The last fragment will generate an interrupt | ||
743 | * which will free the skb on TX completion. | ||
744 | */ | ||
745 | if (frag == nr_frags - 1) { | ||
746 | desc->cmd_sts = BUFFER_OWNED_BY_DMA | | ||
747 | ZERO_PADDING | TX_LAST_DESC | | ||
748 | TX_ENABLE_INTERRUPT; | ||
749 | } else { | ||
750 | desc->cmd_sts = BUFFER_OWNED_BY_DMA; | ||
751 | } | ||
752 | |||
753 | desc->l4i_chk = 0; | ||
754 | desc->byte_cnt = this_frag->size; | ||
755 | desc->buf_ptr = dma_map_page(mp->dev->dev.parent, | ||
756 | this_frag->page, | ||
757 | this_frag->page_offset, | ||
758 | this_frag->size, DMA_TO_DEVICE); | ||
759 | } | ||
760 | } | ||
761 | |||
762 | static inline __be16 sum16_as_be(__sum16 sum) | ||
763 | { | ||
764 | return (__force __be16)sum; | ||
765 | } | ||
766 | |||
767 | static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) | ||
768 | { | ||
769 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | ||
770 | int nr_frags = skb_shinfo(skb)->nr_frags; | ||
771 | int tx_index; | ||
772 | struct tx_desc *desc; | ||
773 | u32 cmd_sts; | ||
774 | u16 l4i_chk; | ||
775 | int length; | ||
776 | |||
777 | cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; | ||
778 | l4i_chk = 0; | ||
779 | |||
780 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
781 | int hdr_len; | ||
782 | int tag_bytes; | ||
783 | |||
784 | BUG_ON(skb->protocol != htons(ETH_P_IP) && | ||
785 | skb->protocol != htons(ETH_P_8021Q)); | ||
786 | |||
787 | hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; | ||
788 | tag_bytes = hdr_len - ETH_HLEN; | ||
789 | if (skb->len - hdr_len > mp->shared->tx_csum_limit || | ||
790 | unlikely(tag_bytes & ~12)) { | ||
791 | if (skb_checksum_help(skb) == 0) | ||
792 | goto no_csum; | ||
793 | kfree_skb(skb); | ||
794 | return 1; | ||
795 | } | ||
796 | |||
797 | if (tag_bytes & 4) | ||
798 | cmd_sts |= MAC_HDR_EXTRA_4_BYTES; | ||
799 | if (tag_bytes & 8) | ||
800 | cmd_sts |= MAC_HDR_EXTRA_8_BYTES; | ||
801 | |||
802 | cmd_sts |= GEN_TCP_UDP_CHECKSUM | | ||
803 | GEN_IP_V4_CHECKSUM | | ||
804 | ip_hdr(skb)->ihl << TX_IHL_SHIFT; | ||
805 | |||
806 | switch (ip_hdr(skb)->protocol) { | ||
807 | case IPPROTO_UDP: | ||
808 | cmd_sts |= UDP_FRAME; | ||
809 | l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); | ||
810 | break; | ||
811 | case IPPROTO_TCP: | ||
812 | l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); | ||
813 | break; | ||
814 | default: | ||
815 | BUG(); | ||
816 | } | ||
817 | } else { | ||
818 | no_csum: | ||
819 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ | ||
820 | cmd_sts |= 5 << TX_IHL_SHIFT; | ||
821 | } | ||
822 | |||
823 | tx_index = txq->tx_curr_desc++; | ||
824 | if (txq->tx_curr_desc == txq->tx_ring_size) | ||
825 | txq->tx_curr_desc = 0; | ||
826 | desc = &txq->tx_desc_area[tx_index]; | ||
827 | |||
828 | if (nr_frags) { | ||
829 | txq_submit_frag_skb(txq, skb); | ||
830 | length = skb_headlen(skb); | ||
831 | } else { | ||
832 | cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; | ||
833 | length = skb->len; | ||
834 | } | ||
835 | |||
836 | desc->l4i_chk = l4i_chk; | ||
837 | desc->byte_cnt = length; | ||
838 | desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, | ||
839 | length, DMA_TO_DEVICE); | ||
840 | |||
841 | __skb_queue_tail(&txq->tx_skb, skb); | ||
842 | |||
843 | skb_tx_timestamp(skb); | ||
844 | |||
845 | /* ensure all other descriptors are written before first cmd_sts */ | ||
846 | wmb(); | ||
847 | desc->cmd_sts = cmd_sts; | ||
848 | |||
849 | /* clear TX_END status */ | ||
850 | mp->work_tx_end &= ~(1 << txq->index); | ||
851 | |||
852 | /* ensure all descriptors are written before poking hardware */ | ||
853 | wmb(); | ||
854 | txq_enable(txq); | ||
855 | |||
856 | txq->tx_desc_count += nr_frags + 1; | ||
857 | |||
858 | return 0; | ||
859 | } | ||
860 | |||
861 | static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) | ||
862 | { | ||
863 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
864 | int length, queue; | ||
865 | struct tx_queue *txq; | ||
866 | struct netdev_queue *nq; | ||
867 | |||
868 | queue = skb_get_queue_mapping(skb); | ||
869 | txq = mp->txq + queue; | ||
870 | nq = netdev_get_tx_queue(dev, queue); | ||
871 | |||
872 | if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { | ||
873 | txq->tx_dropped++; | ||
874 | netdev_printk(KERN_DEBUG, dev, | ||
875 | "failed to linearize skb with tiny unaligned fragment\n"); | ||
876 | return NETDEV_TX_BUSY; | ||
877 | } | ||
878 | |||
879 | if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { | ||
880 | if (net_ratelimit()) | ||
881 | netdev_err(dev, "tx queue full?!\n"); | ||
882 | kfree_skb(skb); | ||
883 | return NETDEV_TX_OK; | ||
884 | } | ||
885 | |||
886 | length = skb->len; | ||
887 | |||
888 | if (!txq_submit_skb(txq, skb)) { | ||
889 | int entries_left; | ||
890 | |||
891 | txq->tx_bytes += length; | ||
892 | txq->tx_packets++; | ||
893 | |||
894 | entries_left = txq->tx_ring_size - txq->tx_desc_count; | ||
895 | if (entries_left < MAX_SKB_FRAGS + 1) | ||
896 | netif_tx_stop_queue(nq); | ||
897 | } | ||
898 | |||
899 | return NETDEV_TX_OK; | ||
900 | } | ||
901 | |||
902 | |||
903 | /* tx napi ******************************************************************/ | ||
904 | static void txq_kick(struct tx_queue *txq) | ||
905 | { | ||
906 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | ||
907 | struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); | ||
908 | u32 hw_desc_ptr; | ||
909 | u32 expected_ptr; | ||
910 | |||
911 | __netif_tx_lock(nq, smp_processor_id()); | ||
912 | |||
913 | if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) | ||
914 | goto out; | ||
915 | |||
916 | hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); | ||
917 | expected_ptr = (u32)txq->tx_desc_dma + | ||
918 | txq->tx_curr_desc * sizeof(struct tx_desc); | ||
919 | |||
920 | if (hw_desc_ptr != expected_ptr) | ||
921 | txq_enable(txq); | ||
922 | |||
923 | out: | ||
924 | __netif_tx_unlock(nq); | ||
925 | |||
926 | mp->work_tx_end &= ~(1 << txq->index); | ||
927 | } | ||
928 | |||
929 | static int txq_reclaim(struct tx_queue *txq, int budget, int force) | ||
930 | { | ||
931 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | ||
932 | struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); | ||
933 | int reclaimed; | ||
934 | |||
935 | __netif_tx_lock(nq, smp_processor_id()); | ||
936 | |||
937 | reclaimed = 0; | ||
938 | while (reclaimed < budget && txq->tx_desc_count > 0) { | ||
939 | int tx_index; | ||
940 | struct tx_desc *desc; | ||
941 | u32 cmd_sts; | ||
942 | struct sk_buff *skb; | ||
943 | |||
944 | tx_index = txq->tx_used_desc; | ||
945 | desc = &txq->tx_desc_area[tx_index]; | ||
946 | cmd_sts = desc->cmd_sts; | ||
947 | |||
948 | if (cmd_sts & BUFFER_OWNED_BY_DMA) { | ||
949 | if (!force) | ||
950 | break; | ||
951 | desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; | ||
952 | } | ||
953 | |||
954 | txq->tx_used_desc = tx_index + 1; | ||
955 | if (txq->tx_used_desc == txq->tx_ring_size) | ||
956 | txq->tx_used_desc = 0; | ||
957 | |||
958 | reclaimed++; | ||
959 | txq->tx_desc_count--; | ||
960 | |||
961 | skb = NULL; | ||
962 | if (cmd_sts & TX_LAST_DESC) | ||
963 | skb = __skb_dequeue(&txq->tx_skb); | ||
964 | |||
965 | if (cmd_sts & ERROR_SUMMARY) { | ||
966 | netdev_info(mp->dev, "tx error\n"); | ||
967 | mp->dev->stats.tx_errors++; | ||
968 | } | ||
969 | |||
970 | if (cmd_sts & TX_FIRST_DESC) { | ||
971 | dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, | ||
972 | desc->byte_cnt, DMA_TO_DEVICE); | ||
973 | } else { | ||
974 | dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr, | ||
975 | desc->byte_cnt, DMA_TO_DEVICE); | ||
976 | } | ||
977 | |||
978 | if (skb != NULL) { | ||
979 | if (skb_queue_len(&mp->rx_recycle) < | ||
980 | mp->rx_ring_size && | ||
981 | skb_recycle_check(skb, mp->skb_size)) | ||
982 | __skb_queue_head(&mp->rx_recycle, skb); | ||
983 | else | ||
984 | dev_kfree_skb(skb); | ||
985 | } | ||
986 | } | ||
987 | |||
988 | __netif_tx_unlock(nq); | ||
989 | |||
990 | if (reclaimed < budget) | ||
991 | mp->work_tx &= ~(1 << txq->index); | ||
992 | |||
993 | return reclaimed; | ||
994 | } | ||
995 | |||
996 | |||
997 | /* tx rate control **********************************************************/ | ||
998 | /* | ||
999 | * Set total maximum TX rate (shared by all TX queues for this port) | ||
1000 | * to 'rate' bits per second, with a maximum burst of 'burst' bytes. | ||
1001 | */ | ||
1002 | static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst) | ||
1003 | { | ||
1004 | int token_rate; | ||
1005 | int mtu; | ||
1006 | int bucket_size; | ||
1007 | |||
1008 | token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); | ||
1009 | if (token_rate > 1023) | ||
1010 | token_rate = 1023; | ||
1011 | |||
1012 | mtu = (mp->dev->mtu + 255) >> 8; | ||
1013 | if (mtu > 63) | ||
1014 | mtu = 63; | ||
1015 | |||
1016 | bucket_size = (burst + 255) >> 8; | ||
1017 | if (bucket_size > 65535) | ||
1018 | bucket_size = 65535; | ||
1019 | |||
1020 | switch (mp->shared->tx_bw_control) { | ||
1021 | case TX_BW_CONTROL_OLD_LAYOUT: | ||
1022 | wrlp(mp, TX_BW_RATE, token_rate); | ||
1023 | wrlp(mp, TX_BW_MTU, mtu); | ||
1024 | wrlp(mp, TX_BW_BURST, bucket_size); | ||
1025 | break; | ||
1026 | case TX_BW_CONTROL_NEW_LAYOUT: | ||
1027 | wrlp(mp, TX_BW_RATE_MOVED, token_rate); | ||
1028 | wrlp(mp, TX_BW_MTU_MOVED, mtu); | ||
1029 | wrlp(mp, TX_BW_BURST_MOVED, bucket_size); | ||
1030 | break; | ||
1031 | } | ||
1032 | } | ||
1033 | |||
1034 | static void txq_set_rate(struct tx_queue *txq, int rate, int burst) | ||
1035 | { | ||
1036 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | ||
1037 | int token_rate; | ||
1038 | int bucket_size; | ||
1039 | |||
1040 | token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); | ||
1041 | if (token_rate > 1023) | ||
1042 | token_rate = 1023; | ||
1043 | |||
1044 | bucket_size = (burst + 255) >> 8; | ||
1045 | if (bucket_size > 65535) | ||
1046 | bucket_size = 65535; | ||
1047 | |||
1048 | wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); | ||
1049 | wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); | ||
1050 | } | ||
1051 | |||
1052 | static void txq_set_fixed_prio_mode(struct tx_queue *txq) | ||
1053 | { | ||
1054 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | ||
1055 | int off; | ||
1056 | u32 val; | ||
1057 | |||
1058 | /* | ||
1059 | * Turn on fixed priority mode. | ||
1060 | */ | ||
1061 | off = 0; | ||
1062 | switch (mp->shared->tx_bw_control) { | ||
1063 | case TX_BW_CONTROL_OLD_LAYOUT: | ||
1064 | off = TXQ_FIX_PRIO_CONF; | ||
1065 | break; | ||
1066 | case TX_BW_CONTROL_NEW_LAYOUT: | ||
1067 | off = TXQ_FIX_PRIO_CONF_MOVED; | ||
1068 | break; | ||
1069 | } | ||
1070 | |||
1071 | if (off) { | ||
1072 | val = rdlp(mp, off); | ||
1073 | val |= 1 << txq->index; | ||
1074 | wrlp(mp, off, val); | ||
1075 | } | ||
1076 | } | ||
1077 | |||
1078 | |||
1079 | /* mii management interface *************************************************/ | ||
1080 | static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) | ||
1081 | { | ||
1082 | struct mv643xx_eth_shared_private *msp = dev_id; | ||
1083 | |||
1084 | if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) { | ||
1085 | writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE); | ||
1086 | wake_up(&msp->smi_busy_wait); | ||
1087 | return IRQ_HANDLED; | ||
1088 | } | ||
1089 | |||
1090 | return IRQ_NONE; | ||
1091 | } | ||
1092 | |||
1093 | static int smi_is_done(struct mv643xx_eth_shared_private *msp) | ||
1094 | { | ||
1095 | return !(readl(msp->base + SMI_REG) & SMI_BUSY); | ||
1096 | } | ||
1097 | |||
1098 | static int smi_wait_ready(struct mv643xx_eth_shared_private *msp) | ||
1099 | { | ||
1100 | if (msp->err_interrupt == NO_IRQ) { | ||
1101 | int i; | ||
1102 | |||
1103 | for (i = 0; !smi_is_done(msp); i++) { | ||
1104 | if (i == 10) | ||
1105 | return -ETIMEDOUT; | ||
1106 | msleep(10); | ||
1107 | } | ||
1108 | |||
1109 | return 0; | ||
1110 | } | ||
1111 | |||
1112 | if (!smi_is_done(msp)) { | ||
1113 | wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp), | ||
1114 | msecs_to_jiffies(100)); | ||
1115 | if (!smi_is_done(msp)) | ||
1116 | return -ETIMEDOUT; | ||
1117 | } | ||
1118 | |||
1119 | return 0; | ||
1120 | } | ||
1121 | |||
1122 | static int smi_bus_read(struct mii_bus *bus, int addr, int reg) | ||
1123 | { | ||
1124 | struct mv643xx_eth_shared_private *msp = bus->priv; | ||
1125 | void __iomem *smi_reg = msp->base + SMI_REG; | ||
1126 | int ret; | ||
1127 | |||
1128 | if (smi_wait_ready(msp)) { | ||
1129 | pr_warn("SMI bus busy timeout\n"); | ||
1130 | return -ETIMEDOUT; | ||
1131 | } | ||
1132 | |||
1133 | writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); | ||
1134 | |||
1135 | if (smi_wait_ready(msp)) { | ||
1136 | pr_warn("SMI bus busy timeout\n"); | ||
1137 | return -ETIMEDOUT; | ||
1138 | } | ||
1139 | |||
1140 | ret = readl(smi_reg); | ||
1141 | if (!(ret & SMI_READ_VALID)) { | ||
1142 | pr_warn("SMI bus read not valid\n"); | ||
1143 | return -ENODEV; | ||
1144 | } | ||
1145 | |||
1146 | return ret & 0xffff; | ||
1147 | } | ||
1148 | |||
1149 | static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val) | ||
1150 | { | ||
1151 | struct mv643xx_eth_shared_private *msp = bus->priv; | ||
1152 | void __iomem *smi_reg = msp->base + SMI_REG; | ||
1153 | |||
1154 | if (smi_wait_ready(msp)) { | ||
1155 | pr_warn("SMI bus busy timeout\n"); | ||
1156 | return -ETIMEDOUT; | ||
1157 | } | ||
1158 | |||
1159 | writel(SMI_OPCODE_WRITE | (reg << 21) | | ||
1160 | (addr << 16) | (val & 0xffff), smi_reg); | ||
1161 | |||
1162 | if (smi_wait_ready(msp)) { | ||
1163 | pr_warn("SMI bus busy timeout\n"); | ||
1164 | return -ETIMEDOUT; | ||
1165 | } | ||
1166 | |||
1167 | return 0; | ||
1168 | } | ||
1169 | |||
1170 | |||
1171 | /* statistics ***************************************************************/ | ||
1172 | static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) | ||
1173 | { | ||
1174 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
1175 | struct net_device_stats *stats = &dev->stats; | ||
1176 | unsigned long tx_packets = 0; | ||
1177 | unsigned long tx_bytes = 0; | ||
1178 | unsigned long tx_dropped = 0; | ||
1179 | int i; | ||
1180 | |||
1181 | for (i = 0; i < mp->txq_count; i++) { | ||
1182 | struct tx_queue *txq = mp->txq + i; | ||
1183 | |||
1184 | tx_packets += txq->tx_packets; | ||
1185 | tx_bytes += txq->tx_bytes; | ||
1186 | tx_dropped += txq->tx_dropped; | ||
1187 | } | ||
1188 | |||
1189 | stats->tx_packets = tx_packets; | ||
1190 | stats->tx_bytes = tx_bytes; | ||
1191 | stats->tx_dropped = tx_dropped; | ||
1192 | |||
1193 | return stats; | ||
1194 | } | ||
1195 | |||
1196 | static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp) | ||
1197 | { | ||
1198 | u32 lro_aggregated = 0; | ||
1199 | u32 lro_flushed = 0; | ||
1200 | u32 lro_no_desc = 0; | ||
1201 | int i; | ||
1202 | |||
1203 | for (i = 0; i < mp->rxq_count; i++) { | ||
1204 | struct rx_queue *rxq = mp->rxq + i; | ||
1205 | |||
1206 | lro_aggregated += rxq->lro_mgr.stats.aggregated; | ||
1207 | lro_flushed += rxq->lro_mgr.stats.flushed; | ||
1208 | lro_no_desc += rxq->lro_mgr.stats.no_desc; | ||
1209 | } | ||
1210 | |||
1211 | mp->lro_counters.lro_aggregated = lro_aggregated; | ||
1212 | mp->lro_counters.lro_flushed = lro_flushed; | ||
1213 | mp->lro_counters.lro_no_desc = lro_no_desc; | ||
1214 | } | ||
1215 | |||
1216 | static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) | ||
1217 | { | ||
1218 | return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); | ||
1219 | } | ||
1220 | |||
1221 | static void mib_counters_clear(struct mv643xx_eth_private *mp) | ||
1222 | { | ||
1223 | int i; | ||
1224 | |||
1225 | for (i = 0; i < 0x80; i += 4) | ||
1226 | mib_read(mp, i); | ||
1227 | } | ||
1228 | |||
1229 | static void mib_counters_update(struct mv643xx_eth_private *mp) | ||
1230 | { | ||
1231 | struct mib_counters *p = &mp->mib_counters; | ||
1232 | |||
1233 | spin_lock_bh(&mp->mib_counters_lock); | ||
1234 | p->good_octets_received += mib_read(mp, 0x00); | ||
1235 | p->bad_octets_received += mib_read(mp, 0x08); | ||
1236 | p->internal_mac_transmit_err += mib_read(mp, 0x0c); | ||
1237 | p->good_frames_received += mib_read(mp, 0x10); | ||
1238 | p->bad_frames_received += mib_read(mp, 0x14); | ||
1239 | p->broadcast_frames_received += mib_read(mp, 0x18); | ||
1240 | p->multicast_frames_received += mib_read(mp, 0x1c); | ||
1241 | p->frames_64_octets += mib_read(mp, 0x20); | ||
1242 | p->frames_65_to_127_octets += mib_read(mp, 0x24); | ||
1243 | p->frames_128_to_255_octets += mib_read(mp, 0x28); | ||
1244 | p->frames_256_to_511_octets += mib_read(mp, 0x2c); | ||
1245 | p->frames_512_to_1023_octets += mib_read(mp, 0x30); | ||
1246 | p->frames_1024_to_max_octets += mib_read(mp, 0x34); | ||
1247 | p->good_octets_sent += mib_read(mp, 0x38); | ||
1248 | p->good_frames_sent += mib_read(mp, 0x40); | ||
1249 | p->excessive_collision += mib_read(mp, 0x44); | ||
1250 | p->multicast_frames_sent += mib_read(mp, 0x48); | ||
1251 | p->broadcast_frames_sent += mib_read(mp, 0x4c); | ||
1252 | p->unrec_mac_control_received += mib_read(mp, 0x50); | ||
1253 | p->fc_sent += mib_read(mp, 0x54); | ||
1254 | p->good_fc_received += mib_read(mp, 0x58); | ||
1255 | p->bad_fc_received += mib_read(mp, 0x5c); | ||
1256 | p->undersize_received += mib_read(mp, 0x60); | ||
1257 | p->fragments_received += mib_read(mp, 0x64); | ||
1258 | p->oversize_received += mib_read(mp, 0x68); | ||
1259 | p->jabber_received += mib_read(mp, 0x6c); | ||
1260 | p->mac_receive_error += mib_read(mp, 0x70); | ||
1261 | p->bad_crc_event += mib_read(mp, 0x74); | ||
1262 | p->collision += mib_read(mp, 0x78); | ||
1263 | p->late_collision += mib_read(mp, 0x7c); | ||
1264 | spin_unlock_bh(&mp->mib_counters_lock); | ||
1265 | |||
1266 | mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); | ||
1267 | } | ||
1268 | |||
1269 | static void mib_counters_timer_wrapper(unsigned long _mp) | ||
1270 | { | ||
1271 | struct mv643xx_eth_private *mp = (void *)_mp; | ||
1272 | |||
1273 | mib_counters_update(mp); | ||
1274 | } | ||
1275 | |||
1276 | |||
1277 | /* interrupt coalescing *****************************************************/ | ||
1278 | /* | ||
1279 | * Hardware coalescing parameters are set in units of 64 t_clk | ||
1280 | * cycles. I.e.: | ||
1281 | * | ||
1282 | * coal_delay_in_usec = 64000000 * register_value / t_clk_rate | ||
1283 | * | ||
1284 | * register_value = coal_delay_in_usec * t_clk_rate / 64000000 | ||
1285 | * | ||
1286 | * In the ->set*() methods, we round the computed register value | ||
1287 | * to the nearest integer. | ||
1288 | */ | ||
1289 | static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) | ||
1290 | { | ||
1291 | u32 val = rdlp(mp, SDMA_CONFIG); | ||
1292 | u64 temp; | ||
1293 | |||
1294 | if (mp->shared->extended_rx_coal_limit) | ||
1295 | temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7); | ||
1296 | else | ||
1297 | temp = (val & 0x003fff00) >> 8; | ||
1298 | |||
1299 | temp *= 64000000; | ||
1300 | do_div(temp, mp->shared->t_clk); | ||
1301 | |||
1302 | return (unsigned int)temp; | ||
1303 | } | ||
1304 | |||
1305 | static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec) | ||
1306 | { | ||
1307 | u64 temp; | ||
1308 | u32 val; | ||
1309 | |||
1310 | temp = (u64)usec * mp->shared->t_clk; | ||
1311 | temp += 31999999; | ||
1312 | do_div(temp, 64000000); | ||
1313 | |||
1314 | val = rdlp(mp, SDMA_CONFIG); | ||
1315 | if (mp->shared->extended_rx_coal_limit) { | ||
1316 | if (temp > 0xffff) | ||
1317 | temp = 0xffff; | ||
1318 | val &= ~0x023fff80; | ||
1319 | val |= (temp & 0x8000) << 10; | ||
1320 | val |= (temp & 0x7fff) << 7; | ||
1321 | } else { | ||
1322 | if (temp > 0x3fff) | ||
1323 | temp = 0x3fff; | ||
1324 | val &= ~0x003fff00; | ||
1325 | val |= (temp & 0x3fff) << 8; | ||
1326 | } | ||
1327 | wrlp(mp, SDMA_CONFIG, val); | ||
1328 | } | ||
1329 | |||
1330 | static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) | ||
1331 | { | ||
1332 | u64 temp; | ||
1333 | |||
1334 | temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; | ||
1335 | temp *= 64000000; | ||
1336 | do_div(temp, mp->shared->t_clk); | ||
1337 | |||
1338 | return (unsigned int)temp; | ||
1339 | } | ||
1340 | |||
1341 | static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec) | ||
1342 | { | ||
1343 | u64 temp; | ||
1344 | |||
1345 | temp = (u64)usec * mp->shared->t_clk; | ||
1346 | temp += 31999999; | ||
1347 | do_div(temp, 64000000); | ||
1348 | |||
1349 | if (temp > 0x3fff) | ||
1350 | temp = 0x3fff; | ||
1351 | |||
1352 | wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4); | ||
1353 | } | ||
1354 | |||
1355 | |||
1356 | /* ethtool ******************************************************************/ | ||
1357 | struct mv643xx_eth_stats { | ||
1358 | char stat_string[ETH_GSTRING_LEN]; | ||
1359 | int sizeof_stat; | ||
1360 | int netdev_off; | ||
1361 | int mp_off; | ||
1362 | }; | ||
1363 | |||
1364 | #define SSTAT(m) \ | ||
1365 | { #m, FIELD_SIZEOF(struct net_device_stats, m), \ | ||
1366 | offsetof(struct net_device, stats.m), -1 } | ||
1367 | |||
1368 | #define MIBSTAT(m) \ | ||
1369 | { #m, FIELD_SIZEOF(struct mib_counters, m), \ | ||
1370 | -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } | ||
1371 | |||
1372 | #define LROSTAT(m) \ | ||
1373 | { #m, FIELD_SIZEOF(struct lro_counters, m), \ | ||
1374 | -1, offsetof(struct mv643xx_eth_private, lro_counters.m) } | ||
1375 | |||
1376 | static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { | ||
1377 | SSTAT(rx_packets), | ||
1378 | SSTAT(tx_packets), | ||
1379 | SSTAT(rx_bytes), | ||
1380 | SSTAT(tx_bytes), | ||
1381 | SSTAT(rx_errors), | ||
1382 | SSTAT(tx_errors), | ||
1383 | SSTAT(rx_dropped), | ||
1384 | SSTAT(tx_dropped), | ||
1385 | MIBSTAT(good_octets_received), | ||
1386 | MIBSTAT(bad_octets_received), | ||
1387 | MIBSTAT(internal_mac_transmit_err), | ||
1388 | MIBSTAT(good_frames_received), | ||
1389 | MIBSTAT(bad_frames_received), | ||
1390 | MIBSTAT(broadcast_frames_received), | ||
1391 | MIBSTAT(multicast_frames_received), | ||
1392 | MIBSTAT(frames_64_octets), | ||
1393 | MIBSTAT(frames_65_to_127_octets), | ||
1394 | MIBSTAT(frames_128_to_255_octets), | ||
1395 | MIBSTAT(frames_256_to_511_octets), | ||
1396 | MIBSTAT(frames_512_to_1023_octets), | ||
1397 | MIBSTAT(frames_1024_to_max_octets), | ||
1398 | MIBSTAT(good_octets_sent), | ||
1399 | MIBSTAT(good_frames_sent), | ||
1400 | MIBSTAT(excessive_collision), | ||
1401 | MIBSTAT(multicast_frames_sent), | ||
1402 | MIBSTAT(broadcast_frames_sent), | ||
1403 | MIBSTAT(unrec_mac_control_received), | ||
1404 | MIBSTAT(fc_sent), | ||
1405 | MIBSTAT(good_fc_received), | ||
1406 | MIBSTAT(bad_fc_received), | ||
1407 | MIBSTAT(undersize_received), | ||
1408 | MIBSTAT(fragments_received), | ||
1409 | MIBSTAT(oversize_received), | ||
1410 | MIBSTAT(jabber_received), | ||
1411 | MIBSTAT(mac_receive_error), | ||
1412 | MIBSTAT(bad_crc_event), | ||
1413 | MIBSTAT(collision), | ||
1414 | MIBSTAT(late_collision), | ||
1415 | LROSTAT(lro_aggregated), | ||
1416 | LROSTAT(lro_flushed), | ||
1417 | LROSTAT(lro_no_desc), | ||
1418 | }; | ||
1419 | |||
1420 | static int | ||
1421 | mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp, | ||
1422 | struct ethtool_cmd *cmd) | ||
1423 | { | ||
1424 | int err; | ||
1425 | |||
1426 | err = phy_read_status(mp->phy); | ||
1427 | if (err == 0) | ||
1428 | err = phy_ethtool_gset(mp->phy, cmd); | ||
1429 | |||
1430 | /* | ||
1431 | * The MAC does not support 1000baseT_Half. | ||
1432 | */ | ||
1433 | cmd->supported &= ~SUPPORTED_1000baseT_Half; | ||
1434 | cmd->advertising &= ~ADVERTISED_1000baseT_Half; | ||
1435 | |||
1436 | return err; | ||
1437 | } | ||
1438 | |||
1439 | static int | ||
1440 | mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp, | ||
1441 | struct ethtool_cmd *cmd) | ||
1442 | { | ||
1443 | u32 port_status; | ||
1444 | |||
1445 | port_status = rdlp(mp, PORT_STATUS); | ||
1446 | |||
1447 | cmd->supported = SUPPORTED_MII; | ||
1448 | cmd->advertising = ADVERTISED_MII; | ||
1449 | switch (port_status & PORT_SPEED_MASK) { | ||
1450 | case PORT_SPEED_10: | ||
1451 | ethtool_cmd_speed_set(cmd, SPEED_10); | ||
1452 | break; | ||
1453 | case PORT_SPEED_100: | ||
1454 | ethtool_cmd_speed_set(cmd, SPEED_100); | ||
1455 | break; | ||
1456 | case PORT_SPEED_1000: | ||
1457 | ethtool_cmd_speed_set(cmd, SPEED_1000); | ||
1458 | break; | ||
1459 | default: | ||
1460 | cmd->speed = -1; | ||
1461 | break; | ||
1462 | } | ||
1463 | cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; | ||
1464 | cmd->port = PORT_MII; | ||
1465 | cmd->phy_address = 0; | ||
1466 | cmd->transceiver = XCVR_INTERNAL; | ||
1467 | cmd->autoneg = AUTONEG_DISABLE; | ||
1468 | cmd->maxtxpkt = 1; | ||
1469 | cmd->maxrxpkt = 1; | ||
1470 | |||
1471 | return 0; | ||
1472 | } | ||
1473 | |||
1474 | static int | ||
1475 | mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1476 | { | ||
1477 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
1478 | |||
1479 | if (mp->phy != NULL) | ||
1480 | return mv643xx_eth_get_settings_phy(mp, cmd); | ||
1481 | else | ||
1482 | return mv643xx_eth_get_settings_phyless(mp, cmd); | ||
1483 | } | ||
1484 | |||
1485 | static int | ||
1486 | mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1487 | { | ||
1488 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
1489 | |||
1490 | if (mp->phy == NULL) | ||
1491 | return -EINVAL; | ||
1492 | |||
1493 | /* | ||
1494 | * The MAC does not support 1000baseT_Half. | ||
1495 | */ | ||
1496 | cmd->advertising &= ~ADVERTISED_1000baseT_Half; | ||
1497 | |||
1498 | return phy_ethtool_sset(mp->phy, cmd); | ||
1499 | } | ||
1500 | |||
1501 | static void mv643xx_eth_get_drvinfo(struct net_device *dev, | ||
1502 | struct ethtool_drvinfo *drvinfo) | ||
1503 | { | ||
1504 | strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32); | ||
1505 | strncpy(drvinfo->version, mv643xx_eth_driver_version, 32); | ||
1506 | strncpy(drvinfo->fw_version, "N/A", 32); | ||
1507 | strncpy(drvinfo->bus_info, "platform", 32); | ||
1508 | drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats); | ||
1509 | } | ||
1510 | |||
1511 | static int mv643xx_eth_nway_reset(struct net_device *dev) | ||
1512 | { | ||
1513 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
1514 | |||
1515 | if (mp->phy == NULL) | ||
1516 | return -EINVAL; | ||
1517 | |||
1518 | return genphy_restart_aneg(mp->phy); | ||
1519 | } | ||
1520 | |||
1521 | static int | ||
1522 | mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) | ||
1523 | { | ||
1524 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
1525 | |||
1526 | ec->rx_coalesce_usecs = get_rx_coal(mp); | ||
1527 | ec->tx_coalesce_usecs = get_tx_coal(mp); | ||
1528 | |||
1529 | return 0; | ||
1530 | } | ||
1531 | |||
1532 | static int | ||
1533 | mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) | ||
1534 | { | ||
1535 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
1536 | |||
1537 | set_rx_coal(mp, ec->rx_coalesce_usecs); | ||
1538 | set_tx_coal(mp, ec->tx_coalesce_usecs); | ||
1539 | |||
1540 | return 0; | ||
1541 | } | ||
1542 | |||
1543 | static void | ||
1544 | mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er) | ||
1545 | { | ||
1546 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
1547 | |||
1548 | er->rx_max_pending = 4096; | ||
1549 | er->tx_max_pending = 4096; | ||
1550 | er->rx_mini_max_pending = 0; | ||
1551 | er->rx_jumbo_max_pending = 0; | ||
1552 | |||
1553 | er->rx_pending = mp->rx_ring_size; | ||
1554 | er->tx_pending = mp->tx_ring_size; | ||
1555 | er->rx_mini_pending = 0; | ||
1556 | er->rx_jumbo_pending = 0; | ||
1557 | } | ||
1558 | |||
1559 | static int | ||
1560 | mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) | ||
1561 | { | ||
1562 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
1563 | |||
1564 | if (er->rx_mini_pending || er->rx_jumbo_pending) | ||
1565 | return -EINVAL; | ||
1566 | |||
1567 | mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; | ||
1568 | mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096; | ||
1569 | |||
1570 | if (netif_running(dev)) { | ||
1571 | mv643xx_eth_stop(dev); | ||
1572 | if (mv643xx_eth_open(dev)) { | ||
1573 | netdev_err(dev, | ||
1574 | "fatal error on re-opening device after ring param change\n"); | ||
1575 | return -ENOMEM; | ||
1576 | } | ||
1577 | } | ||
1578 | |||
1579 | return 0; | ||
1580 | } | ||
1581 | |||
1582 | |||
1583 | static int | ||
1584 | mv643xx_eth_set_features(struct net_device *dev, u32 features) | ||
1585 | { | ||
1586 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
1587 | u32 rx_csum = features & NETIF_F_RXCSUM; | ||
1588 | |||
1589 | wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); | ||
1590 | |||
1591 | return 0; | ||
1592 | } | ||
1593 | |||
1594 | static void mv643xx_eth_get_strings(struct net_device *dev, | ||
1595 | uint32_t stringset, uint8_t *data) | ||
1596 | { | ||
1597 | int i; | ||
1598 | |||
1599 | if (stringset == ETH_SS_STATS) { | ||
1600 | for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { | ||
1601 | memcpy(data + i * ETH_GSTRING_LEN, | ||
1602 | mv643xx_eth_stats[i].stat_string, | ||
1603 | ETH_GSTRING_LEN); | ||
1604 | } | ||
1605 | } | ||
1606 | } | ||
1607 | |||
1608 | static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, | ||
1609 | struct ethtool_stats *stats, | ||
1610 | uint64_t *data) | ||
1611 | { | ||
1612 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
1613 | int i; | ||
1614 | |||
1615 | mv643xx_eth_get_stats(dev); | ||
1616 | mib_counters_update(mp); | ||
1617 | mv643xx_eth_grab_lro_stats(mp); | ||
1618 | |||
1619 | for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { | ||
1620 | const struct mv643xx_eth_stats *stat; | ||
1621 | void *p; | ||
1622 | |||
1623 | stat = mv643xx_eth_stats + i; | ||
1624 | |||
1625 | if (stat->netdev_off >= 0) | ||
1626 | p = ((void *)mp->dev) + stat->netdev_off; | ||
1627 | else | ||
1628 | p = ((void *)mp) + stat->mp_off; | ||
1629 | |||
1630 | data[i] = (stat->sizeof_stat == 8) ? | ||
1631 | *(uint64_t *)p : *(uint32_t *)p; | ||
1632 | } | ||
1633 | } | ||
1634 | |||
1635 | static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) | ||
1636 | { | ||
1637 | if (sset == ETH_SS_STATS) | ||
1638 | return ARRAY_SIZE(mv643xx_eth_stats); | ||
1639 | |||
1640 | return -EOPNOTSUPP; | ||
1641 | } | ||
1642 | |||
1643 | static const struct ethtool_ops mv643xx_eth_ethtool_ops = { | ||
1644 | .get_settings = mv643xx_eth_get_settings, | ||
1645 | .set_settings = mv643xx_eth_set_settings, | ||
1646 | .get_drvinfo = mv643xx_eth_get_drvinfo, | ||
1647 | .nway_reset = mv643xx_eth_nway_reset, | ||
1648 | .get_link = ethtool_op_get_link, | ||
1649 | .get_coalesce = mv643xx_eth_get_coalesce, | ||
1650 | .set_coalesce = mv643xx_eth_set_coalesce, | ||
1651 | .get_ringparam = mv643xx_eth_get_ringparam, | ||
1652 | .set_ringparam = mv643xx_eth_set_ringparam, | ||
1653 | .get_strings = mv643xx_eth_get_strings, | ||
1654 | .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, | ||
1655 | .get_sset_count = mv643xx_eth_get_sset_count, | ||
1656 | }; | ||
1657 | |||
1658 | |||
1659 | /* address handling *********************************************************/ | ||
1660 | static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) | ||
1661 | { | ||
1662 | unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH); | ||
1663 | unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW); | ||
1664 | |||
1665 | addr[0] = (mac_h >> 24) & 0xff; | ||
1666 | addr[1] = (mac_h >> 16) & 0xff; | ||
1667 | addr[2] = (mac_h >> 8) & 0xff; | ||
1668 | addr[3] = mac_h & 0xff; | ||
1669 | addr[4] = (mac_l >> 8) & 0xff; | ||
1670 | addr[5] = mac_l & 0xff; | ||
1671 | } | ||
1672 | |||
1673 | static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) | ||
1674 | { | ||
1675 | wrlp(mp, MAC_ADDR_HIGH, | ||
1676 | (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); | ||
1677 | wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]); | ||
1678 | } | ||
1679 | |||
1680 | static u32 uc_addr_filter_mask(struct net_device *dev) | ||
1681 | { | ||
1682 | struct netdev_hw_addr *ha; | ||
1683 | u32 nibbles; | ||
1684 | |||
1685 | if (dev->flags & IFF_PROMISC) | ||
1686 | return 0; | ||
1687 | |||
1688 | nibbles = 1 << (dev->dev_addr[5] & 0x0f); | ||
1689 | netdev_for_each_uc_addr(ha, dev) { | ||
1690 | if (memcmp(dev->dev_addr, ha->addr, 5)) | ||
1691 | return 0; | ||
1692 | if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) | ||
1693 | return 0; | ||
1694 | |||
1695 | nibbles |= 1 << (ha->addr[5] & 0x0f); | ||
1696 | } | ||
1697 | |||
1698 | return nibbles; | ||
1699 | } | ||
1700 | |||
1701 | static void mv643xx_eth_program_unicast_filter(struct net_device *dev) | ||
1702 | { | ||
1703 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
1704 | u32 port_config; | ||
1705 | u32 nibbles; | ||
1706 | int i; | ||
1707 | |||
1708 | uc_addr_set(mp, dev->dev_addr); | ||
1709 | |||
1710 | port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; | ||
1711 | |||
1712 | nibbles = uc_addr_filter_mask(dev); | ||
1713 | if (!nibbles) { | ||
1714 | port_config |= UNICAST_PROMISCUOUS_MODE; | ||
1715 | nibbles = 0xffff; | ||
1716 | } | ||
1717 | |||
1718 | for (i = 0; i < 16; i += 4) { | ||
1719 | int off = UNICAST_TABLE(mp->port_num) + i; | ||
1720 | u32 v; | ||
1721 | |||
1722 | v = 0; | ||
1723 | if (nibbles & 1) | ||
1724 | v |= 0x00000001; | ||
1725 | if (nibbles & 2) | ||
1726 | v |= 0x00000100; | ||
1727 | if (nibbles & 4) | ||
1728 | v |= 0x00010000; | ||
1729 | if (nibbles & 8) | ||
1730 | v |= 0x01000000; | ||
1731 | nibbles >>= 4; | ||
1732 | |||
1733 | wrl(mp, off, v); | ||
1734 | } | ||
1735 | |||
1736 | wrlp(mp, PORT_CONFIG, port_config); | ||
1737 | } | ||
1738 | |||
1739 | static int addr_crc(unsigned char *addr) | ||
1740 | { | ||
1741 | int crc = 0; | ||
1742 | int i; | ||
1743 | |||
1744 | for (i = 0; i < 6; i++) { | ||
1745 | int j; | ||
1746 | |||
1747 | crc = (crc ^ addr[i]) << 8; | ||
1748 | for (j = 7; j >= 0; j--) { | ||
1749 | if (crc & (0x100 << j)) | ||
1750 | crc ^= 0x107 << j; | ||
1751 | } | ||
1752 | } | ||
1753 | |||
1754 | return crc; | ||
1755 | } | ||
1756 | |||
1757 | static void mv643xx_eth_program_multicast_filter(struct net_device *dev) | ||
1758 | { | ||
1759 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
1760 | u32 *mc_spec; | ||
1761 | u32 *mc_other; | ||
1762 | struct netdev_hw_addr *ha; | ||
1763 | int i; | ||
1764 | |||
1765 | if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { | ||
1766 | int port_num; | ||
1767 | u32 accept; | ||
1768 | |||
1769 | oom: | ||
1770 | port_num = mp->port_num; | ||
1771 | accept = 0x01010101; | ||
1772 | for (i = 0; i < 0x100; i += 4) { | ||
1773 | wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept); | ||
1774 | wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept); | ||
1775 | } | ||
1776 | return; | ||
1777 | } | ||
1778 | |||
1779 | mc_spec = kmalloc(0x200, GFP_ATOMIC); | ||
1780 | if (mc_spec == NULL) | ||
1781 | goto oom; | ||
1782 | mc_other = mc_spec + (0x100 >> 2); | ||
1783 | |||
1784 | memset(mc_spec, 0, 0x100); | ||
1785 | memset(mc_other, 0, 0x100); | ||
1786 | |||
1787 | netdev_for_each_mc_addr(ha, dev) { | ||
1788 | u8 *a = ha->addr; | ||
1789 | u32 *table; | ||
1790 | int entry; | ||
1791 | |||
1792 | if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) { | ||
1793 | table = mc_spec; | ||
1794 | entry = a[5]; | ||
1795 | } else { | ||
1796 | table = mc_other; | ||
1797 | entry = addr_crc(a); | ||
1798 | } | ||
1799 | |||
1800 | table[entry >> 2] |= 1 << (8 * (entry & 3)); | ||
1801 | } | ||
1802 | |||
1803 | for (i = 0; i < 0x100; i += 4) { | ||
1804 | wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]); | ||
1805 | wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]); | ||
1806 | } | ||
1807 | |||
1808 | kfree(mc_spec); | ||
1809 | } | ||
1810 | |||
1811 | static void mv643xx_eth_set_rx_mode(struct net_device *dev) | ||
1812 | { | ||
1813 | mv643xx_eth_program_unicast_filter(dev); | ||
1814 | mv643xx_eth_program_multicast_filter(dev); | ||
1815 | } | ||
1816 | |||
1817 | static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) | ||
1818 | { | ||
1819 | struct sockaddr *sa = addr; | ||
1820 | |||
1821 | if (!is_valid_ether_addr(sa->sa_data)) | ||
1822 | return -EINVAL; | ||
1823 | |||
1824 | memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); | ||
1825 | |||
1826 | netif_addr_lock_bh(dev); | ||
1827 | mv643xx_eth_program_unicast_filter(dev); | ||
1828 | netif_addr_unlock_bh(dev); | ||
1829 | |||
1830 | return 0; | ||
1831 | } | ||
1832 | |||
1833 | |||
1834 | /* rx/tx queue initialisation ***********************************************/ | ||
1835 | static int rxq_init(struct mv643xx_eth_private *mp, int index) | ||
1836 | { | ||
1837 | struct rx_queue *rxq = mp->rxq + index; | ||
1838 | struct rx_desc *rx_desc; | ||
1839 | int size; | ||
1840 | int i; | ||
1841 | |||
1842 | rxq->index = index; | ||
1843 | |||
1844 | rxq->rx_ring_size = mp->rx_ring_size; | ||
1845 | |||
1846 | rxq->rx_desc_count = 0; | ||
1847 | rxq->rx_curr_desc = 0; | ||
1848 | rxq->rx_used_desc = 0; | ||
1849 | |||
1850 | size = rxq->rx_ring_size * sizeof(struct rx_desc); | ||
1851 | |||
1852 | if (index == 0 && size <= mp->rx_desc_sram_size) { | ||
1853 | rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, | ||
1854 | mp->rx_desc_sram_size); | ||
1855 | rxq->rx_desc_dma = mp->rx_desc_sram_addr; | ||
1856 | } else { | ||
1857 | rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, | ||
1858 | size, &rxq->rx_desc_dma, | ||
1859 | GFP_KERNEL); | ||
1860 | } | ||
1861 | |||
1862 | if (rxq->rx_desc_area == NULL) { | ||
1863 | netdev_err(mp->dev, | ||
1864 | "can't allocate rx ring (%d bytes)\n", size); | ||
1865 | goto out; | ||
1866 | } | ||
1867 | memset(rxq->rx_desc_area, 0, size); | ||
1868 | |||
1869 | rxq->rx_desc_area_size = size; | ||
1870 | rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), | ||
1871 | GFP_KERNEL); | ||
1872 | if (rxq->rx_skb == NULL) { | ||
1873 | netdev_err(mp->dev, "can't allocate rx skb ring\n"); | ||
1874 | goto out_free; | ||
1875 | } | ||
1876 | |||
1877 | rx_desc = (struct rx_desc *)rxq->rx_desc_area; | ||
1878 | for (i = 0; i < rxq->rx_ring_size; i++) { | ||
1879 | int nexti; | ||
1880 | |||
1881 | nexti = i + 1; | ||
1882 | if (nexti == rxq->rx_ring_size) | ||
1883 | nexti = 0; | ||
1884 | |||
1885 | rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + | ||
1886 | nexti * sizeof(struct rx_desc); | ||
1887 | } | ||
1888 | |||
1889 | rxq->lro_mgr.dev = mp->dev; | ||
1890 | memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats)); | ||
1891 | rxq->lro_mgr.features = LRO_F_NAPI; | ||
1892 | rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; | ||
1893 | rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; | ||
1894 | rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr); | ||
1895 | rxq->lro_mgr.max_aggr = 32; | ||
1896 | rxq->lro_mgr.frag_align_pad = 0; | ||
1897 | rxq->lro_mgr.lro_arr = rxq->lro_arr; | ||
1898 | rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header; | ||
1899 | |||
1900 | memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr)); | ||
1901 | |||
1902 | return 0; | ||
1903 | |||
1904 | |||
1905 | out_free: | ||
1906 | if (index == 0 && size <= mp->rx_desc_sram_size) | ||
1907 | iounmap(rxq->rx_desc_area); | ||
1908 | else | ||
1909 | dma_free_coherent(mp->dev->dev.parent, size, | ||
1910 | rxq->rx_desc_area, | ||
1911 | rxq->rx_desc_dma); | ||
1912 | |||
1913 | out: | ||
1914 | return -ENOMEM; | ||
1915 | } | ||
1916 | |||
1917 | static void rxq_deinit(struct rx_queue *rxq) | ||
1918 | { | ||
1919 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); | ||
1920 | int i; | ||
1921 | |||
1922 | rxq_disable(rxq); | ||
1923 | |||
1924 | for (i = 0; i < rxq->rx_ring_size; i++) { | ||
1925 | if (rxq->rx_skb[i]) { | ||
1926 | dev_kfree_skb(rxq->rx_skb[i]); | ||
1927 | rxq->rx_desc_count--; | ||
1928 | } | ||
1929 | } | ||
1930 | |||
1931 | if (rxq->rx_desc_count) { | ||
1932 | netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n", | ||
1933 | rxq->rx_desc_count); | ||
1934 | } | ||
1935 | |||
1936 | if (rxq->index == 0 && | ||
1937 | rxq->rx_desc_area_size <= mp->rx_desc_sram_size) | ||
1938 | iounmap(rxq->rx_desc_area); | ||
1939 | else | ||
1940 | dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, | ||
1941 | rxq->rx_desc_area, rxq->rx_desc_dma); | ||
1942 | |||
1943 | kfree(rxq->rx_skb); | ||
1944 | } | ||
1945 | |||
1946 | static int txq_init(struct mv643xx_eth_private *mp, int index) | ||
1947 | { | ||
1948 | struct tx_queue *txq = mp->txq + index; | ||
1949 | struct tx_desc *tx_desc; | ||
1950 | int size; | ||
1951 | int i; | ||
1952 | |||
1953 | txq->index = index; | ||
1954 | |||
1955 | txq->tx_ring_size = mp->tx_ring_size; | ||
1956 | |||
1957 | txq->tx_desc_count = 0; | ||
1958 | txq->tx_curr_desc = 0; | ||
1959 | txq->tx_used_desc = 0; | ||
1960 | |||
1961 | size = txq->tx_ring_size * sizeof(struct tx_desc); | ||
1962 | |||
1963 | if (index == 0 && size <= mp->tx_desc_sram_size) { | ||
1964 | txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, | ||
1965 | mp->tx_desc_sram_size); | ||
1966 | txq->tx_desc_dma = mp->tx_desc_sram_addr; | ||
1967 | } else { | ||
1968 | txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, | ||
1969 | size, &txq->tx_desc_dma, | ||
1970 | GFP_KERNEL); | ||
1971 | } | ||
1972 | |||
1973 | if (txq->tx_desc_area == NULL) { | ||
1974 | netdev_err(mp->dev, | ||
1975 | "can't allocate tx ring (%d bytes)\n", size); | ||
1976 | return -ENOMEM; | ||
1977 | } | ||
1978 | memset(txq->tx_desc_area, 0, size); | ||
1979 | |||
1980 | txq->tx_desc_area_size = size; | ||
1981 | |||
1982 | tx_desc = (struct tx_desc *)txq->tx_desc_area; | ||
1983 | for (i = 0; i < txq->tx_ring_size; i++) { | ||
1984 | struct tx_desc *txd = tx_desc + i; | ||
1985 | int nexti; | ||
1986 | |||
1987 | nexti = i + 1; | ||
1988 | if (nexti == txq->tx_ring_size) | ||
1989 | nexti = 0; | ||
1990 | |||
1991 | txd->cmd_sts = 0; | ||
1992 | txd->next_desc_ptr = txq->tx_desc_dma + | ||
1993 | nexti * sizeof(struct tx_desc); | ||
1994 | } | ||
1995 | |||
1996 | skb_queue_head_init(&txq->tx_skb); | ||
1997 | |||
1998 | return 0; | ||
1999 | } | ||
2000 | |||
2001 | static void txq_deinit(struct tx_queue *txq) | ||
2002 | { | ||
2003 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | ||
2004 | |||
2005 | txq_disable(txq); | ||
2006 | txq_reclaim(txq, txq->tx_ring_size, 1); | ||
2007 | |||
2008 | BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); | ||
2009 | |||
2010 | if (txq->index == 0 && | ||
2011 | txq->tx_desc_area_size <= mp->tx_desc_sram_size) | ||
2012 | iounmap(txq->tx_desc_area); | ||
2013 | else | ||
2014 | dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, | ||
2015 | txq->tx_desc_area, txq->tx_desc_dma); | ||
2016 | } | ||
2017 | |||
2018 | |||
2019 | /* netdev ops and related ***************************************************/ | ||
2020 | static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) | ||
2021 | { | ||
2022 | u32 int_cause; | ||
2023 | u32 int_cause_ext; | ||
2024 | |||
2025 | int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; | ||
2026 | if (int_cause == 0) | ||
2027 | return 0; | ||
2028 | |||
2029 | int_cause_ext = 0; | ||
2030 | if (int_cause & INT_EXT) { | ||
2031 | int_cause &= ~INT_EXT; | ||
2032 | int_cause_ext = rdlp(mp, INT_CAUSE_EXT); | ||
2033 | } | ||
2034 | |||
2035 | if (int_cause) { | ||
2036 | wrlp(mp, INT_CAUSE, ~int_cause); | ||
2037 | mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & | ||
2038 | ~(rdlp(mp, TXQ_COMMAND) & 0xff); | ||
2039 | mp->work_rx |= (int_cause & INT_RX) >> 2; | ||
2040 | } | ||
2041 | |||
2042 | int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; | ||
2043 | if (int_cause_ext) { | ||
2044 | wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext); | ||
2045 | if (int_cause_ext & INT_EXT_LINK_PHY) | ||
2046 | mp->work_link = 1; | ||
2047 | mp->work_tx |= int_cause_ext & INT_EXT_TX; | ||
2048 | } | ||
2049 | |||
2050 | return 1; | ||
2051 | } | ||
2052 | |||
2053 | static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) | ||
2054 | { | ||
2055 | struct net_device *dev = (struct net_device *)dev_id; | ||
2056 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
2057 | |||
2058 | if (unlikely(!mv643xx_eth_collect_events(mp))) | ||
2059 | return IRQ_NONE; | ||
2060 | |||
2061 | wrlp(mp, INT_MASK, 0); | ||
2062 | napi_schedule(&mp->napi); | ||
2063 | |||
2064 | return IRQ_HANDLED; | ||
2065 | } | ||
2066 | |||
2067 | static void handle_link_event(struct mv643xx_eth_private *mp) | ||
2068 | { | ||
2069 | struct net_device *dev = mp->dev; | ||
2070 | u32 port_status; | ||
2071 | int speed; | ||
2072 | int duplex; | ||
2073 | int fc; | ||
2074 | |||
2075 | port_status = rdlp(mp, PORT_STATUS); | ||
2076 | if (!(port_status & LINK_UP)) { | ||
2077 | if (netif_carrier_ok(dev)) { | ||
2078 | int i; | ||
2079 | |||
2080 | netdev_info(dev, "link down\n"); | ||
2081 | |||
2082 | netif_carrier_off(dev); | ||
2083 | |||
2084 | for (i = 0; i < mp->txq_count; i++) { | ||
2085 | struct tx_queue *txq = mp->txq + i; | ||
2086 | |||
2087 | txq_reclaim(txq, txq->tx_ring_size, 1); | ||
2088 | txq_reset_hw_ptr(txq); | ||
2089 | } | ||
2090 | } | ||
2091 | return; | ||
2092 | } | ||
2093 | |||
2094 | switch (port_status & PORT_SPEED_MASK) { | ||
2095 | case PORT_SPEED_10: | ||
2096 | speed = 10; | ||
2097 | break; | ||
2098 | case PORT_SPEED_100: | ||
2099 | speed = 100; | ||
2100 | break; | ||
2101 | case PORT_SPEED_1000: | ||
2102 | speed = 1000; | ||
2103 | break; | ||
2104 | default: | ||
2105 | speed = -1; | ||
2106 | break; | ||
2107 | } | ||
2108 | duplex = (port_status & FULL_DUPLEX) ? 1 : 0; | ||
2109 | fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; | ||
2110 | |||
2111 | netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n", | ||
2112 | speed, duplex ? "full" : "half", fc ? "en" : "dis"); | ||
2113 | |||
2114 | if (!netif_carrier_ok(dev)) | ||
2115 | netif_carrier_on(dev); | ||
2116 | } | ||
2117 | |||
2118 | static int mv643xx_eth_poll(struct napi_struct *napi, int budget) | ||
2119 | { | ||
2120 | struct mv643xx_eth_private *mp; | ||
2121 | int work_done; | ||
2122 | |||
2123 | mp = container_of(napi, struct mv643xx_eth_private, napi); | ||
2124 | |||
2125 | if (unlikely(mp->oom)) { | ||
2126 | mp->oom = 0; | ||
2127 | del_timer(&mp->rx_oom); | ||
2128 | } | ||
2129 | |||
2130 | work_done = 0; | ||
2131 | while (work_done < budget) { | ||
2132 | u8 queue_mask; | ||
2133 | int queue; | ||
2134 | int work_tbd; | ||
2135 | |||
2136 | if (mp->work_link) { | ||
2137 | mp->work_link = 0; | ||
2138 | handle_link_event(mp); | ||
2139 | work_done++; | ||
2140 | continue; | ||
2141 | } | ||
2142 | |||
2143 | queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; | ||
2144 | if (likely(!mp->oom)) | ||
2145 | queue_mask |= mp->work_rx_refill; | ||
2146 | |||
2147 | if (!queue_mask) { | ||
2148 | if (mv643xx_eth_collect_events(mp)) | ||
2149 | continue; | ||
2150 | break; | ||
2151 | } | ||
2152 | |||
2153 | queue = fls(queue_mask) - 1; | ||
2154 | queue_mask = 1 << queue; | ||
2155 | |||
2156 | work_tbd = budget - work_done; | ||
2157 | if (work_tbd > 16) | ||
2158 | work_tbd = 16; | ||
2159 | |||
2160 | if (mp->work_tx_end & queue_mask) { | ||
2161 | txq_kick(mp->txq + queue); | ||
2162 | } else if (mp->work_tx & queue_mask) { | ||
2163 | work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); | ||
2164 | txq_maybe_wake(mp->txq + queue); | ||
2165 | } else if (mp->work_rx & queue_mask) { | ||
2166 | work_done += rxq_process(mp->rxq + queue, work_tbd); | ||
2167 | } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { | ||
2168 | work_done += rxq_refill(mp->rxq + queue, work_tbd); | ||
2169 | } else { | ||
2170 | BUG(); | ||
2171 | } | ||
2172 | } | ||
2173 | |||
2174 | if (work_done < budget) { | ||
2175 | if (mp->oom) | ||
2176 | mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); | ||
2177 | napi_complete(napi); | ||
2178 | wrlp(mp, INT_MASK, mp->int_mask); | ||
2179 | } | ||
2180 | |||
2181 | return work_done; | ||
2182 | } | ||
2183 | |||
2184 | static inline void oom_timer_wrapper(unsigned long data) | ||
2185 | { | ||
2186 | struct mv643xx_eth_private *mp = (void *)data; | ||
2187 | |||
2188 | napi_schedule(&mp->napi); | ||
2189 | } | ||
2190 | |||
2191 | static void phy_reset(struct mv643xx_eth_private *mp) | ||
2192 | { | ||
2193 | int data; | ||
2194 | |||
2195 | data = phy_read(mp->phy, MII_BMCR); | ||
2196 | if (data < 0) | ||
2197 | return; | ||
2198 | |||
2199 | data |= BMCR_RESET; | ||
2200 | if (phy_write(mp->phy, MII_BMCR, data) < 0) | ||
2201 | return; | ||
2202 | |||
2203 | do { | ||
2204 | data = phy_read(mp->phy, MII_BMCR); | ||
2205 | } while (data >= 0 && data & BMCR_RESET); | ||
2206 | } | ||
2207 | |||
2208 | static void port_start(struct mv643xx_eth_private *mp) | ||
2209 | { | ||
2210 | u32 pscr; | ||
2211 | int i; | ||
2212 | |||
2213 | /* | ||
2214 | * Perform PHY reset, if there is a PHY. | ||
2215 | */ | ||
2216 | if (mp->phy != NULL) { | ||
2217 | struct ethtool_cmd cmd; | ||
2218 | |||
2219 | mv643xx_eth_get_settings(mp->dev, &cmd); | ||
2220 | phy_reset(mp); | ||
2221 | mv643xx_eth_set_settings(mp->dev, &cmd); | ||
2222 | } | ||
2223 | |||
2224 | /* | ||
2225 | * Configure basic link parameters. | ||
2226 | */ | ||
2227 | pscr = rdlp(mp, PORT_SERIAL_CONTROL); | ||
2228 | |||
2229 | pscr |= SERIAL_PORT_ENABLE; | ||
2230 | wrlp(mp, PORT_SERIAL_CONTROL, pscr); | ||
2231 | |||
2232 | pscr |= DO_NOT_FORCE_LINK_FAIL; | ||
2233 | if (mp->phy == NULL) | ||
2234 | pscr |= FORCE_LINK_PASS; | ||
2235 | wrlp(mp, PORT_SERIAL_CONTROL, pscr); | ||
2236 | |||
2237 | /* | ||
2238 | * Configure TX path and queues. | ||
2239 | */ | ||
2240 | tx_set_rate(mp, 1000000000, 16777216); | ||
2241 | for (i = 0; i < mp->txq_count; i++) { | ||
2242 | struct tx_queue *txq = mp->txq + i; | ||
2243 | |||
2244 | txq_reset_hw_ptr(txq); | ||
2245 | txq_set_rate(txq, 1000000000, 16777216); | ||
2246 | txq_set_fixed_prio_mode(txq); | ||
2247 | } | ||
2248 | |||
2249 | /* | ||
2250 | * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast | ||
2251 | * frames to RX queue #0, and include the pseudo-header when | ||
2252 | * calculating receive checksums. | ||
2253 | */ | ||
2254 | mv643xx_eth_set_features(mp->dev, mp->dev->features); | ||
2255 | |||
2256 | /* | ||
2257 | * Treat BPDUs as normal multicasts, and disable partition mode. | ||
2258 | */ | ||
2259 | wrlp(mp, PORT_CONFIG_EXT, 0x00000000); | ||
2260 | |||
2261 | /* | ||
2262 | * Add configured unicast addresses to address filter table. | ||
2263 | */ | ||
2264 | mv643xx_eth_program_unicast_filter(mp->dev); | ||
2265 | |||
2266 | /* | ||
2267 | * Enable the receive queues. | ||
2268 | */ | ||
2269 | for (i = 0; i < mp->rxq_count; i++) { | ||
2270 | struct rx_queue *rxq = mp->rxq + i; | ||
2271 | u32 addr; | ||
2272 | |||
2273 | addr = (u32)rxq->rx_desc_dma; | ||
2274 | addr += rxq->rx_curr_desc * sizeof(struct rx_desc); | ||
2275 | wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr); | ||
2276 | |||
2277 | rxq_enable(rxq); | ||
2278 | } | ||
2279 | } | ||
2280 | |||
2281 | static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) | ||
2282 | { | ||
2283 | int skb_size; | ||
2284 | |||
2285 | /* | ||
2286 | * Reserve 2+14 bytes for an ethernet header (the hardware | ||
2287 | * automatically prepends 2 bytes of dummy data to each | ||
2288 | * received packet), 16 bytes for up to four VLAN tags, and | ||
2289 | * 4 bytes for the trailing FCS -- 36 bytes total. | ||
2290 | */ | ||
2291 | skb_size = mp->dev->mtu + 36; | ||
2292 | |||
2293 | /* | ||
2294 | * Make sure that the skb size is a multiple of 8 bytes, as | ||
2295 | * the lower three bits of the receive descriptor's buffer | ||
2296 | * size field are ignored by the hardware. | ||
2297 | */ | ||
2298 | mp->skb_size = (skb_size + 7) & ~7; | ||
2299 | |||
2300 | /* | ||
2301 | * If NET_SKB_PAD is smaller than a cache line, | ||
2302 | * netdev_alloc_skb() will cause skb->data to be misaligned | ||
2303 | * to a cache line boundary. If this is the case, include | ||
2304 | * some extra space to allow re-aligning the data area. | ||
2305 | */ | ||
2306 | mp->skb_size += SKB_DMA_REALIGN; | ||
2307 | } | ||
2308 | |||
2309 | static int mv643xx_eth_open(struct net_device *dev) | ||
2310 | { | ||
2311 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
2312 | int err; | ||
2313 | int i; | ||
2314 | |||
2315 | wrlp(mp, INT_CAUSE, 0); | ||
2316 | wrlp(mp, INT_CAUSE_EXT, 0); | ||
2317 | rdlp(mp, INT_CAUSE_EXT); | ||
2318 | |||
2319 | err = request_irq(dev->irq, mv643xx_eth_irq, | ||
2320 | IRQF_SHARED, dev->name, dev); | ||
2321 | if (err) { | ||
2322 | netdev_err(dev, "can't assign irq\n"); | ||
2323 | return -EAGAIN; | ||
2324 | } | ||
2325 | |||
2326 | mv643xx_eth_recalc_skb_size(mp); | ||
2327 | |||
2328 | napi_enable(&mp->napi); | ||
2329 | |||
2330 | skb_queue_head_init(&mp->rx_recycle); | ||
2331 | |||
2332 | mp->int_mask = INT_EXT; | ||
2333 | |||
2334 | for (i = 0; i < mp->rxq_count; i++) { | ||
2335 | err = rxq_init(mp, i); | ||
2336 | if (err) { | ||
2337 | while (--i >= 0) | ||
2338 | rxq_deinit(mp->rxq + i); | ||
2339 | goto out; | ||
2340 | } | ||
2341 | |||
2342 | rxq_refill(mp->rxq + i, INT_MAX); | ||
2343 | mp->int_mask |= INT_RX_0 << i; | ||
2344 | } | ||
2345 | |||
2346 | if (mp->oom) { | ||
2347 | mp->rx_oom.expires = jiffies + (HZ / 10); | ||
2348 | add_timer(&mp->rx_oom); | ||
2349 | } | ||
2350 | |||
2351 | for (i = 0; i < mp->txq_count; i++) { | ||
2352 | err = txq_init(mp, i); | ||
2353 | if (err) { | ||
2354 | while (--i >= 0) | ||
2355 | txq_deinit(mp->txq + i); | ||
2356 | goto out_free; | ||
2357 | } | ||
2358 | mp->int_mask |= INT_TX_END_0 << i; | ||
2359 | } | ||
2360 | |||
2361 | port_start(mp); | ||
2362 | |||
2363 | wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); | ||
2364 | wrlp(mp, INT_MASK, mp->int_mask); | ||
2365 | |||
2366 | return 0; | ||
2367 | |||
2368 | |||
2369 | out_free: | ||
2370 | for (i = 0; i < mp->rxq_count; i++) | ||
2371 | rxq_deinit(mp->rxq + i); | ||
2372 | out: | ||
2373 | free_irq(dev->irq, dev); | ||
2374 | |||
2375 | return err; | ||
2376 | } | ||
2377 | |||
2378 | static void port_reset(struct mv643xx_eth_private *mp) | ||
2379 | { | ||
2380 | unsigned int data; | ||
2381 | int i; | ||
2382 | |||
2383 | for (i = 0; i < mp->rxq_count; i++) | ||
2384 | rxq_disable(mp->rxq + i); | ||
2385 | for (i = 0; i < mp->txq_count; i++) | ||
2386 | txq_disable(mp->txq + i); | ||
2387 | |||
2388 | while (1) { | ||
2389 | u32 ps = rdlp(mp, PORT_STATUS); | ||
2390 | |||
2391 | if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY) | ||
2392 | break; | ||
2393 | udelay(10); | ||
2394 | } | ||
2395 | |||
2396 | /* Reset the Enable bit in the Configuration Register */ | ||
2397 | data = rdlp(mp, PORT_SERIAL_CONTROL); | ||
2398 | data &= ~(SERIAL_PORT_ENABLE | | ||
2399 | DO_NOT_FORCE_LINK_FAIL | | ||
2400 | FORCE_LINK_PASS); | ||
2401 | wrlp(mp, PORT_SERIAL_CONTROL, data); | ||
2402 | } | ||
2403 | |||
2404 | static int mv643xx_eth_stop(struct net_device *dev) | ||
2405 | { | ||
2406 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
2407 | int i; | ||
2408 | |||
2409 | wrlp(mp, INT_MASK_EXT, 0x00000000); | ||
2410 | wrlp(mp, INT_MASK, 0x00000000); | ||
2411 | rdlp(mp, INT_MASK); | ||
2412 | |||
2413 | napi_disable(&mp->napi); | ||
2414 | |||
2415 | del_timer_sync(&mp->rx_oom); | ||
2416 | |||
2417 | netif_carrier_off(dev); | ||
2418 | |||
2419 | free_irq(dev->irq, dev); | ||
2420 | |||
2421 | port_reset(mp); | ||
2422 | mv643xx_eth_get_stats(dev); | ||
2423 | mib_counters_update(mp); | ||
2424 | del_timer_sync(&mp->mib_counters_timer); | ||
2425 | |||
2426 | skb_queue_purge(&mp->rx_recycle); | ||
2427 | |||
2428 | for (i = 0; i < mp->rxq_count; i++) | ||
2429 | rxq_deinit(mp->rxq + i); | ||
2430 | for (i = 0; i < mp->txq_count; i++) | ||
2431 | txq_deinit(mp->txq + i); | ||
2432 | |||
2433 | return 0; | ||
2434 | } | ||
2435 | |||
2436 | static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
2437 | { | ||
2438 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
2439 | |||
2440 | if (mp->phy != NULL) | ||
2441 | return phy_mii_ioctl(mp->phy, ifr, cmd); | ||
2442 | |||
2443 | return -EOPNOTSUPP; | ||
2444 | } | ||
2445 | |||
2446 | static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) | ||
2447 | { | ||
2448 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
2449 | |||
2450 | if (new_mtu < 64 || new_mtu > 9500) | ||
2451 | return -EINVAL; | ||
2452 | |||
2453 | dev->mtu = new_mtu; | ||
2454 | mv643xx_eth_recalc_skb_size(mp); | ||
2455 | tx_set_rate(mp, 1000000000, 16777216); | ||
2456 | |||
2457 | if (!netif_running(dev)) | ||
2458 | return 0; | ||
2459 | |||
2460 | /* | ||
2461 | * Stop and then re-open the interface. This will allocate RX | ||
2462 | * skbs of the new MTU. | ||
2463 | * There is a possible danger that the open will not succeed, | ||
2464 | * due to memory being full. | ||
2465 | */ | ||
2466 | mv643xx_eth_stop(dev); | ||
2467 | if (mv643xx_eth_open(dev)) { | ||
2468 | netdev_err(dev, | ||
2469 | "fatal error on re-opening device after MTU change\n"); | ||
2470 | } | ||
2471 | |||
2472 | return 0; | ||
2473 | } | ||
2474 | |||
2475 | static void tx_timeout_task(struct work_struct *ugly) | ||
2476 | { | ||
2477 | struct mv643xx_eth_private *mp; | ||
2478 | |||
2479 | mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); | ||
2480 | if (netif_running(mp->dev)) { | ||
2481 | netif_tx_stop_all_queues(mp->dev); | ||
2482 | port_reset(mp); | ||
2483 | port_start(mp); | ||
2484 | netif_tx_wake_all_queues(mp->dev); | ||
2485 | } | ||
2486 | } | ||
2487 | |||
2488 | static void mv643xx_eth_tx_timeout(struct net_device *dev) | ||
2489 | { | ||
2490 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
2491 | |||
2492 | netdev_info(dev, "tx timeout\n"); | ||
2493 | |||
2494 | schedule_work(&mp->tx_timeout_task); | ||
2495 | } | ||
2496 | |||
2497 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2498 | static void mv643xx_eth_netpoll(struct net_device *dev) | ||
2499 | { | ||
2500 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
2501 | |||
2502 | wrlp(mp, INT_MASK, 0x00000000); | ||
2503 | rdlp(mp, INT_MASK); | ||
2504 | |||
2505 | mv643xx_eth_irq(dev->irq, dev); | ||
2506 | |||
2507 | wrlp(mp, INT_MASK, mp->int_mask); | ||
2508 | } | ||
2509 | #endif | ||
2510 | |||
2511 | |||
2512 | /* platform glue ************************************************************/ | ||
2513 | static void | ||
2514 | mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp, | ||
2515 | struct mbus_dram_target_info *dram) | ||
2516 | { | ||
2517 | void __iomem *base = msp->base; | ||
2518 | u32 win_enable; | ||
2519 | u32 win_protect; | ||
2520 | int i; | ||
2521 | |||
2522 | for (i = 0; i < 6; i++) { | ||
2523 | writel(0, base + WINDOW_BASE(i)); | ||
2524 | writel(0, base + WINDOW_SIZE(i)); | ||
2525 | if (i < 4) | ||
2526 | writel(0, base + WINDOW_REMAP_HIGH(i)); | ||
2527 | } | ||
2528 | |||
2529 | win_enable = 0x3f; | ||
2530 | win_protect = 0; | ||
2531 | |||
2532 | for (i = 0; i < dram->num_cs; i++) { | ||
2533 | struct mbus_dram_window *cs = dram->cs + i; | ||
2534 | |||
2535 | writel((cs->base & 0xffff0000) | | ||
2536 | (cs->mbus_attr << 8) | | ||
2537 | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); | ||
2538 | writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); | ||
2539 | |||
2540 | win_enable &= ~(1 << i); | ||
2541 | win_protect |= 3 << (2 * i); | ||
2542 | } | ||
2543 | |||
2544 | writel(win_enable, base + WINDOW_BAR_ENABLE); | ||
2545 | msp->win_protect = win_protect; | ||
2546 | } | ||
2547 | |||
2548 | static void infer_hw_params(struct mv643xx_eth_shared_private *msp) | ||
2549 | { | ||
2550 | /* | ||
2551 | * Check whether we have a 14-bit coal limit field in bits | ||
2552 | * [21:8], or a 16-bit coal limit in bits [25,21:7] of the | ||
2553 | * SDMA config register. | ||
2554 | */ | ||
2555 | writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG); | ||
2556 | if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000) | ||
2557 | msp->extended_rx_coal_limit = 1; | ||
2558 | else | ||
2559 | msp->extended_rx_coal_limit = 0; | ||
2560 | |||
2561 | /* | ||
2562 | * Check whether the MAC supports TX rate control, and if | ||
2563 | * yes, whether its associated registers are in the old or | ||
2564 | * the new place. | ||
2565 | */ | ||
2566 | writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED); | ||
2567 | if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) { | ||
2568 | msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; | ||
2569 | } else { | ||
2570 | writel(7, msp->base + 0x0400 + TX_BW_RATE); | ||
2571 | if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7) | ||
2572 | msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; | ||
2573 | else | ||
2574 | msp->tx_bw_control = TX_BW_CONTROL_ABSENT; | ||
2575 | } | ||
2576 | } | ||
2577 | |||
2578 | static int mv643xx_eth_shared_probe(struct platform_device *pdev) | ||
2579 | { | ||
2580 | static int mv643xx_eth_version_printed; | ||
2581 | struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; | ||
2582 | struct mv643xx_eth_shared_private *msp; | ||
2583 | struct resource *res; | ||
2584 | int ret; | ||
2585 | |||
2586 | if (!mv643xx_eth_version_printed++) | ||
2587 | pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n", | ||
2588 | mv643xx_eth_driver_version); | ||
2589 | |||
2590 | ret = -EINVAL; | ||
2591 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
2592 | if (res == NULL) | ||
2593 | goto out; | ||
2594 | |||
2595 | ret = -ENOMEM; | ||
2596 | msp = kzalloc(sizeof(*msp), GFP_KERNEL); | ||
2597 | if (msp == NULL) | ||
2598 | goto out; | ||
2599 | |||
2600 | msp->base = ioremap(res->start, resource_size(res)); | ||
2601 | if (msp->base == NULL) | ||
2602 | goto out_free; | ||
2603 | |||
2604 | /* | ||
2605 | * Set up and register SMI bus. | ||
2606 | */ | ||
2607 | if (pd == NULL || pd->shared_smi == NULL) { | ||
2608 | msp->smi_bus = mdiobus_alloc(); | ||
2609 | if (msp->smi_bus == NULL) | ||
2610 | goto out_unmap; | ||
2611 | |||
2612 | msp->smi_bus->priv = msp; | ||
2613 | msp->smi_bus->name = "mv643xx_eth smi"; | ||
2614 | msp->smi_bus->read = smi_bus_read; | ||
2615 | msp->smi_bus->write = smi_bus_write, | ||
2616 | snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); | ||
2617 | msp->smi_bus->parent = &pdev->dev; | ||
2618 | msp->smi_bus->phy_mask = 0xffffffff; | ||
2619 | if (mdiobus_register(msp->smi_bus) < 0) | ||
2620 | goto out_free_mii_bus; | ||
2621 | msp->smi = msp; | ||
2622 | } else { | ||
2623 | msp->smi = platform_get_drvdata(pd->shared_smi); | ||
2624 | } | ||
2625 | |||
2626 | msp->err_interrupt = NO_IRQ; | ||
2627 | init_waitqueue_head(&msp->smi_busy_wait); | ||
2628 | |||
2629 | /* | ||
2630 | * Check whether the error interrupt is hooked up. | ||
2631 | */ | ||
2632 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
2633 | if (res != NULL) { | ||
2634 | int err; | ||
2635 | |||
2636 | err = request_irq(res->start, mv643xx_eth_err_irq, | ||
2637 | IRQF_SHARED, "mv643xx_eth", msp); | ||
2638 | if (!err) { | ||
2639 | writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK); | ||
2640 | msp->err_interrupt = res->start; | ||
2641 | } | ||
2642 | } | ||
2643 | |||
2644 | /* | ||
2645 | * (Re-)program MBUS remapping windows if we are asked to. | ||
2646 | */ | ||
2647 | if (pd != NULL && pd->dram != NULL) | ||
2648 | mv643xx_eth_conf_mbus_windows(msp, pd->dram); | ||
2649 | |||
2650 | /* | ||
2651 | * Detect hardware parameters. | ||
2652 | */ | ||
2653 | msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; | ||
2654 | msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? | ||
2655 | pd->tx_csum_limit : 9 * 1024; | ||
2656 | infer_hw_params(msp); | ||
2657 | |||
2658 | platform_set_drvdata(pdev, msp); | ||
2659 | |||
2660 | return 0; | ||
2661 | |||
2662 | out_free_mii_bus: | ||
2663 | mdiobus_free(msp->smi_bus); | ||
2664 | out_unmap: | ||
2665 | iounmap(msp->base); | ||
2666 | out_free: | ||
2667 | kfree(msp); | ||
2668 | out: | ||
2669 | return ret; | ||
2670 | } | ||
2671 | |||
2672 | static int mv643xx_eth_shared_remove(struct platform_device *pdev) | ||
2673 | { | ||
2674 | struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); | ||
2675 | struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; | ||
2676 | |||
2677 | if (pd == NULL || pd->shared_smi == NULL) { | ||
2678 | mdiobus_unregister(msp->smi_bus); | ||
2679 | mdiobus_free(msp->smi_bus); | ||
2680 | } | ||
2681 | if (msp->err_interrupt != NO_IRQ) | ||
2682 | free_irq(msp->err_interrupt, msp); | ||
2683 | iounmap(msp->base); | ||
2684 | kfree(msp); | ||
2685 | |||
2686 | return 0; | ||
2687 | } | ||
2688 | |||
2689 | static struct platform_driver mv643xx_eth_shared_driver = { | ||
2690 | .probe = mv643xx_eth_shared_probe, | ||
2691 | .remove = mv643xx_eth_shared_remove, | ||
2692 | .driver = { | ||
2693 | .name = MV643XX_ETH_SHARED_NAME, | ||
2694 | .owner = THIS_MODULE, | ||
2695 | }, | ||
2696 | }; | ||
2697 | |||
2698 | static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) | ||
2699 | { | ||
2700 | int addr_shift = 5 * mp->port_num; | ||
2701 | u32 data; | ||
2702 | |||
2703 | data = rdl(mp, PHY_ADDR); | ||
2704 | data &= ~(0x1f << addr_shift); | ||
2705 | data |= (phy_addr & 0x1f) << addr_shift; | ||
2706 | wrl(mp, PHY_ADDR, data); | ||
2707 | } | ||
2708 | |||
2709 | static int phy_addr_get(struct mv643xx_eth_private *mp) | ||
2710 | { | ||
2711 | unsigned int data; | ||
2712 | |||
2713 | data = rdl(mp, PHY_ADDR); | ||
2714 | |||
2715 | return (data >> (5 * mp->port_num)) & 0x1f; | ||
2716 | } | ||
2717 | |||
2718 | static void set_params(struct mv643xx_eth_private *mp, | ||
2719 | struct mv643xx_eth_platform_data *pd) | ||
2720 | { | ||
2721 | struct net_device *dev = mp->dev; | ||
2722 | |||
2723 | if (is_valid_ether_addr(pd->mac_addr)) | ||
2724 | memcpy(dev->dev_addr, pd->mac_addr, 6); | ||
2725 | else | ||
2726 | uc_addr_get(mp, dev->dev_addr); | ||
2727 | |||
2728 | mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; | ||
2729 | if (pd->rx_queue_size) | ||
2730 | mp->rx_ring_size = pd->rx_queue_size; | ||
2731 | mp->rx_desc_sram_addr = pd->rx_sram_addr; | ||
2732 | mp->rx_desc_sram_size = pd->rx_sram_size; | ||
2733 | |||
2734 | mp->rxq_count = pd->rx_queue_count ? : 1; | ||
2735 | |||
2736 | mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; | ||
2737 | if (pd->tx_queue_size) | ||
2738 | mp->tx_ring_size = pd->tx_queue_size; | ||
2739 | mp->tx_desc_sram_addr = pd->tx_sram_addr; | ||
2740 | mp->tx_desc_sram_size = pd->tx_sram_size; | ||
2741 | |||
2742 | mp->txq_count = pd->tx_queue_count ? : 1; | ||
2743 | } | ||
2744 | |||
2745 | static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, | ||
2746 | int phy_addr) | ||
2747 | { | ||
2748 | struct mii_bus *bus = mp->shared->smi->smi_bus; | ||
2749 | struct phy_device *phydev; | ||
2750 | int start; | ||
2751 | int num; | ||
2752 | int i; | ||
2753 | |||
2754 | if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { | ||
2755 | start = phy_addr_get(mp) & 0x1f; | ||
2756 | num = 32; | ||
2757 | } else { | ||
2758 | start = phy_addr & 0x1f; | ||
2759 | num = 1; | ||
2760 | } | ||
2761 | |||
2762 | phydev = NULL; | ||
2763 | for (i = 0; i < num; i++) { | ||
2764 | int addr = (start + i) & 0x1f; | ||
2765 | |||
2766 | if (bus->phy_map[addr] == NULL) | ||
2767 | mdiobus_scan(bus, addr); | ||
2768 | |||
2769 | if (phydev == NULL) { | ||
2770 | phydev = bus->phy_map[addr]; | ||
2771 | if (phydev != NULL) | ||
2772 | phy_addr_set(mp, addr); | ||
2773 | } | ||
2774 | } | ||
2775 | |||
2776 | return phydev; | ||
2777 | } | ||
2778 | |||
2779 | static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) | ||
2780 | { | ||
2781 | struct phy_device *phy = mp->phy; | ||
2782 | |||
2783 | phy_reset(mp); | ||
2784 | |||
2785 | phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII); | ||
2786 | |||
2787 | if (speed == 0) { | ||
2788 | phy->autoneg = AUTONEG_ENABLE; | ||
2789 | phy->speed = 0; | ||
2790 | phy->duplex = 0; | ||
2791 | phy->advertising = phy->supported | ADVERTISED_Autoneg; | ||
2792 | } else { | ||
2793 | phy->autoneg = AUTONEG_DISABLE; | ||
2794 | phy->advertising = 0; | ||
2795 | phy->speed = speed; | ||
2796 | phy->duplex = duplex; | ||
2797 | } | ||
2798 | phy_start_aneg(phy); | ||
2799 | } | ||
2800 | |||
2801 | static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) | ||
2802 | { | ||
2803 | u32 pscr; | ||
2804 | |||
2805 | pscr = rdlp(mp, PORT_SERIAL_CONTROL); | ||
2806 | if (pscr & SERIAL_PORT_ENABLE) { | ||
2807 | pscr &= ~SERIAL_PORT_ENABLE; | ||
2808 | wrlp(mp, PORT_SERIAL_CONTROL, pscr); | ||
2809 | } | ||
2810 | |||
2811 | pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; | ||
2812 | if (mp->phy == NULL) { | ||
2813 | pscr |= DISABLE_AUTO_NEG_SPEED_GMII; | ||
2814 | if (speed == SPEED_1000) | ||
2815 | pscr |= SET_GMII_SPEED_TO_1000; | ||
2816 | else if (speed == SPEED_100) | ||
2817 | pscr |= SET_MII_SPEED_TO_100; | ||
2818 | |||
2819 | pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL; | ||
2820 | |||
2821 | pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX; | ||
2822 | if (duplex == DUPLEX_FULL) | ||
2823 | pscr |= SET_FULL_DUPLEX_MODE; | ||
2824 | } | ||
2825 | |||
2826 | wrlp(mp, PORT_SERIAL_CONTROL, pscr); | ||
2827 | } | ||
2828 | |||
2829 | static const struct net_device_ops mv643xx_eth_netdev_ops = { | ||
2830 | .ndo_open = mv643xx_eth_open, | ||
2831 | .ndo_stop = mv643xx_eth_stop, | ||
2832 | .ndo_start_xmit = mv643xx_eth_xmit, | ||
2833 | .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, | ||
2834 | .ndo_set_mac_address = mv643xx_eth_set_mac_address, | ||
2835 | .ndo_validate_addr = eth_validate_addr, | ||
2836 | .ndo_do_ioctl = mv643xx_eth_ioctl, | ||
2837 | .ndo_change_mtu = mv643xx_eth_change_mtu, | ||
2838 | .ndo_set_features = mv643xx_eth_set_features, | ||
2839 | .ndo_tx_timeout = mv643xx_eth_tx_timeout, | ||
2840 | .ndo_get_stats = mv643xx_eth_get_stats, | ||
2841 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2842 | .ndo_poll_controller = mv643xx_eth_netpoll, | ||
2843 | #endif | ||
2844 | }; | ||
2845 | |||
2846 | static int mv643xx_eth_probe(struct platform_device *pdev) | ||
2847 | { | ||
2848 | struct mv643xx_eth_platform_data *pd; | ||
2849 | struct mv643xx_eth_private *mp; | ||
2850 | struct net_device *dev; | ||
2851 | struct resource *res; | ||
2852 | int err; | ||
2853 | |||
2854 | pd = pdev->dev.platform_data; | ||
2855 | if (pd == NULL) { | ||
2856 | dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n"); | ||
2857 | return -ENODEV; | ||
2858 | } | ||
2859 | |||
2860 | if (pd->shared == NULL) { | ||
2861 | dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n"); | ||
2862 | return -ENODEV; | ||
2863 | } | ||
2864 | |||
2865 | dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8); | ||
2866 | if (!dev) | ||
2867 | return -ENOMEM; | ||
2868 | |||
2869 | mp = netdev_priv(dev); | ||
2870 | platform_set_drvdata(pdev, mp); | ||
2871 | |||
2872 | mp->shared = platform_get_drvdata(pd->shared); | ||
2873 | mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); | ||
2874 | mp->port_num = pd->port_number; | ||
2875 | |||
2876 | mp->dev = dev; | ||
2877 | |||
2878 | set_params(mp, pd); | ||
2879 | netif_set_real_num_tx_queues(dev, mp->txq_count); | ||
2880 | netif_set_real_num_rx_queues(dev, mp->rxq_count); | ||
2881 | |||
2882 | if (pd->phy_addr != MV643XX_ETH_PHY_NONE) | ||
2883 | mp->phy = phy_scan(mp, pd->phy_addr); | ||
2884 | |||
2885 | if (mp->phy != NULL) | ||
2886 | phy_init(mp, pd->speed, pd->duplex); | ||
2887 | |||
2888 | SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); | ||
2889 | |||
2890 | init_pscr(mp, pd->speed, pd->duplex); | ||
2891 | |||
2892 | |||
2893 | mib_counters_clear(mp); | ||
2894 | |||
2895 | init_timer(&mp->mib_counters_timer); | ||
2896 | mp->mib_counters_timer.data = (unsigned long)mp; | ||
2897 | mp->mib_counters_timer.function = mib_counters_timer_wrapper; | ||
2898 | mp->mib_counters_timer.expires = jiffies + 30 * HZ; | ||
2899 | add_timer(&mp->mib_counters_timer); | ||
2900 | |||
2901 | spin_lock_init(&mp->mib_counters_lock); | ||
2902 | |||
2903 | INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); | ||
2904 | |||
2905 | netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128); | ||
2906 | |||
2907 | init_timer(&mp->rx_oom); | ||
2908 | mp->rx_oom.data = (unsigned long)mp; | ||
2909 | mp->rx_oom.function = oom_timer_wrapper; | ||
2910 | |||
2911 | |||
2912 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
2913 | BUG_ON(!res); | ||
2914 | dev->irq = res->start; | ||
2915 | |||
2916 | dev->netdev_ops = &mv643xx_eth_netdev_ops; | ||
2917 | |||
2918 | dev->watchdog_timeo = 2 * HZ; | ||
2919 | dev->base_addr = 0; | ||
2920 | |||
2921 | dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | | ||
2922 | NETIF_F_RXCSUM | NETIF_F_LRO; | ||
2923 | dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; | ||
2924 | dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2925 | |||
2926 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
2927 | |||
2928 | if (mp->shared->win_protect) | ||
2929 | wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); | ||
2930 | |||
2931 | netif_carrier_off(dev); | ||
2932 | |||
2933 | wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); | ||
2934 | |||
2935 | set_rx_coal(mp, 250); | ||
2936 | set_tx_coal(mp, 0); | ||
2937 | |||
2938 | err = register_netdev(dev); | ||
2939 | if (err) | ||
2940 | goto out; | ||
2941 | |||
2942 | netdev_notice(dev, "port %d with MAC address %pM\n", | ||
2943 | mp->port_num, dev->dev_addr); | ||
2944 | |||
2945 | if (mp->tx_desc_sram_size > 0) | ||
2946 | netdev_notice(dev, "configured with sram\n"); | ||
2947 | |||
2948 | return 0; | ||
2949 | |||
2950 | out: | ||
2951 | free_netdev(dev); | ||
2952 | |||
2953 | return err; | ||
2954 | } | ||
2955 | |||
2956 | static int mv643xx_eth_remove(struct platform_device *pdev) | ||
2957 | { | ||
2958 | struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); | ||
2959 | |||
2960 | unregister_netdev(mp->dev); | ||
2961 | if (mp->phy != NULL) | ||
2962 | phy_detach(mp->phy); | ||
2963 | cancel_work_sync(&mp->tx_timeout_task); | ||
2964 | free_netdev(mp->dev); | ||
2965 | |||
2966 | platform_set_drvdata(pdev, NULL); | ||
2967 | |||
2968 | return 0; | ||
2969 | } | ||
2970 | |||
2971 | static void mv643xx_eth_shutdown(struct platform_device *pdev) | ||
2972 | { | ||
2973 | struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); | ||
2974 | |||
2975 | /* Mask all interrupts on ethernet port */ | ||
2976 | wrlp(mp, INT_MASK, 0); | ||
2977 | rdlp(mp, INT_MASK); | ||
2978 | |||
2979 | if (netif_running(mp->dev)) | ||
2980 | port_reset(mp); | ||
2981 | } | ||
2982 | |||
2983 | static struct platform_driver mv643xx_eth_driver = { | ||
2984 | .probe = mv643xx_eth_probe, | ||
2985 | .remove = mv643xx_eth_remove, | ||
2986 | .shutdown = mv643xx_eth_shutdown, | ||
2987 | .driver = { | ||
2988 | .name = MV643XX_ETH_NAME, | ||
2989 | .owner = THIS_MODULE, | ||
2990 | }, | ||
2991 | }; | ||
2992 | |||
2993 | static int __init mv643xx_eth_init_module(void) | ||
2994 | { | ||
2995 | int rc; | ||
2996 | |||
2997 | rc = platform_driver_register(&mv643xx_eth_shared_driver); | ||
2998 | if (!rc) { | ||
2999 | rc = platform_driver_register(&mv643xx_eth_driver); | ||
3000 | if (rc) | ||
3001 | platform_driver_unregister(&mv643xx_eth_shared_driver); | ||
3002 | } | ||
3003 | |||
3004 | return rc; | ||
3005 | } | ||
3006 | module_init(mv643xx_eth_init_module); | ||
3007 | |||
3008 | static void __exit mv643xx_eth_cleanup_module(void) | ||
3009 | { | ||
3010 | platform_driver_unregister(&mv643xx_eth_driver); | ||
3011 | platform_driver_unregister(&mv643xx_eth_shared_driver); | ||
3012 | } | ||
3013 | module_exit(mv643xx_eth_cleanup_module); | ||
3014 | |||
3015 | MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, " | ||
3016 | "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek"); | ||
3017 | MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); | ||
3018 | MODULE_LICENSE("GPL"); | ||
3019 | MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); | ||
3020 | MODULE_ALIAS("platform:" MV643XX_ETH_NAME); | ||
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c new file mode 100644 index 000000000000..1a3033d8e7ed --- /dev/null +++ b/drivers/net/ethernet/marvell/pxa168_eth.c | |||
@@ -0,0 +1,1662 @@ | |||
1 | /* | ||
2 | * PXA168 ethernet driver. | ||
3 | * Most of the code is derived from mv643xx ethernet driver. | ||
4 | * | ||
5 | * Copyright (C) 2010 Marvell International Ltd. | ||
6 | * Sachin Sanap <ssanap@marvell.com> | ||
7 | * Zhangfei Gao <zgao6@marvell.com> | ||
8 | * Philip Rakity <prakity@marvell.com> | ||
9 | * Mark Brown <markb@marvell.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version 2 | ||
14 | * of the License, or (at your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
24 | */ | ||
25 | |||
26 | #include <linux/init.h> | ||
27 | #include <linux/dma-mapping.h> | ||
28 | #include <linux/in.h> | ||
29 | #include <linux/ip.h> | ||
30 | #include <linux/tcp.h> | ||
31 | #include <linux/udp.h> | ||
32 | #include <linux/etherdevice.h> | ||
33 | #include <linux/bitops.h> | ||
34 | #include <linux/delay.h> | ||
35 | #include <linux/ethtool.h> | ||
36 | #include <linux/platform_device.h> | ||
37 | #include <linux/module.h> | ||
38 | #include <linux/kernel.h> | ||
39 | #include <linux/workqueue.h> | ||
40 | #include <linux/clk.h> | ||
41 | #include <linux/phy.h> | ||
42 | #include <linux/io.h> | ||
43 | #include <linux/types.h> | ||
44 | #include <asm/pgtable.h> | ||
45 | #include <asm/system.h> | ||
46 | #include <asm/cacheflush.h> | ||
47 | #include <linux/pxa168_eth.h> | ||
48 | |||
49 | #define DRIVER_NAME "pxa168-eth" | ||
50 | #define DRIVER_VERSION "0.3" | ||
51 | |||
52 | /* | ||
53 | * Registers | ||
54 | */ | ||
55 | |||
56 | #define PHY_ADDRESS 0x0000 | ||
57 | #define SMI 0x0010 | ||
58 | #define PORT_CONFIG 0x0400 | ||
59 | #define PORT_CONFIG_EXT 0x0408 | ||
60 | #define PORT_COMMAND 0x0410 | ||
61 | #define PORT_STATUS 0x0418 | ||
62 | #define HTPR 0x0428 | ||
63 | #define SDMA_CONFIG 0x0440 | ||
64 | #define SDMA_CMD 0x0448 | ||
65 | #define INT_CAUSE 0x0450 | ||
66 | #define INT_W_CLEAR 0x0454 | ||
67 | #define INT_MASK 0x0458 | ||
68 | #define ETH_F_RX_DESC_0 0x0480 | ||
69 | #define ETH_C_RX_DESC_0 0x04A0 | ||
70 | #define ETH_C_TX_DESC_1 0x04E4 | ||
71 | |||
72 | /* smi register */ | ||
73 | #define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */ | ||
74 | #define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */ | ||
75 | #define SMI_OP_W (0 << 26) /* Write operation */ | ||
76 | #define SMI_OP_R (1 << 26) /* Read operation */ | ||
77 | |||
78 | #define PHY_WAIT_ITERATIONS 10 | ||
79 | |||
80 | #define PXA168_ETH_PHY_ADDR_DEFAULT 0 | ||
81 | /* RX & TX descriptor command */ | ||
82 | #define BUF_OWNED_BY_DMA (1 << 31) | ||
83 | |||
84 | /* RX descriptor status */ | ||
85 | #define RX_EN_INT (1 << 23) | ||
86 | #define RX_FIRST_DESC (1 << 17) | ||
87 | #define RX_LAST_DESC (1 << 16) | ||
88 | #define RX_ERROR (1 << 15) | ||
89 | |||
90 | /* TX descriptor command */ | ||
91 | #define TX_EN_INT (1 << 23) | ||
92 | #define TX_GEN_CRC (1 << 22) | ||
93 | #define TX_ZERO_PADDING (1 << 18) | ||
94 | #define TX_FIRST_DESC (1 << 17) | ||
95 | #define TX_LAST_DESC (1 << 16) | ||
96 | #define TX_ERROR (1 << 15) | ||
97 | |||
98 | /* SDMA_CMD */ | ||
99 | #define SDMA_CMD_AT (1 << 31) | ||
100 | #define SDMA_CMD_TXDL (1 << 24) | ||
101 | #define SDMA_CMD_TXDH (1 << 23) | ||
102 | #define SDMA_CMD_AR (1 << 15) | ||
103 | #define SDMA_CMD_ERD (1 << 7) | ||
104 | |||
105 | /* Bit definitions of the Port Config Reg */ | ||
106 | #define PCR_HS (1 << 12) | ||
107 | #define PCR_EN (1 << 7) | ||
108 | #define PCR_PM (1 << 0) | ||
109 | |||
110 | /* Bit definitions of the Port Config Extend Reg */ | ||
111 | #define PCXR_2BSM (1 << 28) | ||
112 | #define PCXR_DSCP_EN (1 << 21) | ||
113 | #define PCXR_MFL_1518 (0 << 14) | ||
114 | #define PCXR_MFL_1536 (1 << 14) | ||
115 | #define PCXR_MFL_2048 (2 << 14) | ||
116 | #define PCXR_MFL_64K (3 << 14) | ||
117 | #define PCXR_FLP (1 << 11) | ||
118 | #define PCXR_PRIO_TX_OFF 3 | ||
119 | #define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF) | ||
120 | |||
121 | /* Bit definitions of the SDMA Config Reg */ | ||
122 | #define SDCR_BSZ_OFF 12 | ||
123 | #define SDCR_BSZ8 (3 << SDCR_BSZ_OFF) | ||
124 | #define SDCR_BSZ4 (2 << SDCR_BSZ_OFF) | ||
125 | #define SDCR_BSZ2 (1 << SDCR_BSZ_OFF) | ||
126 | #define SDCR_BSZ1 (0 << SDCR_BSZ_OFF) | ||
127 | #define SDCR_BLMR (1 << 6) | ||
128 | #define SDCR_BLMT (1 << 7) | ||
129 | #define SDCR_RIFB (1 << 9) | ||
130 | #define SDCR_RC_OFF 2 | ||
131 | #define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF) | ||
132 | |||
133 | /* | ||
134 | * Bit definitions of the Interrupt Cause Reg | ||
135 | * and Interrupt MASK Reg is the same | ||
136 | */ | ||
137 | #define ICR_RXBUF (1 << 0) | ||
138 | #define ICR_TXBUF_H (1 << 2) | ||
139 | #define ICR_TXBUF_L (1 << 3) | ||
140 | #define ICR_TXEND_H (1 << 6) | ||
141 | #define ICR_TXEND_L (1 << 7) | ||
142 | #define ICR_RXERR (1 << 8) | ||
143 | #define ICR_TXERR_H (1 << 10) | ||
144 | #define ICR_TXERR_L (1 << 11) | ||
145 | #define ICR_TX_UDR (1 << 13) | ||
146 | #define ICR_MII_CH (1 << 28) | ||
147 | |||
148 | #define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\ | ||
149 | ICR_TXERR_H | ICR_TXERR_L |\ | ||
150 | ICR_TXEND_H | ICR_TXEND_L |\ | ||
151 | ICR_RXBUF | ICR_RXERR | ICR_MII_CH) | ||
152 | |||
153 | #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ | ||
154 | |||
155 | #define NUM_RX_DESCS 64 | ||
156 | #define NUM_TX_DESCS 64 | ||
157 | |||
158 | #define HASH_ADD 0 | ||
159 | #define HASH_DELETE 1 | ||
160 | #define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */ | ||
161 | #define HOP_NUMBER 12 | ||
162 | |||
163 | /* Bit definitions for Port status */ | ||
164 | #define PORT_SPEED_100 (1 << 0) | ||
165 | #define FULL_DUPLEX (1 << 1) | ||
166 | #define FLOW_CONTROL_ENABLED (1 << 2) | ||
167 | #define LINK_UP (1 << 3) | ||
168 | |||
169 | /* Bit definitions for work to be done */ | ||
170 | #define WORK_LINK (1 << 0) | ||
171 | #define WORK_TX_DONE (1 << 1) | ||
172 | |||
173 | /* | ||
174 | * Misc definitions. | ||
175 | */ | ||
176 | #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) | ||
177 | |||
178 | struct rx_desc { | ||
179 | u32 cmd_sts; /* Descriptor command status */ | ||
180 | u16 byte_cnt; /* Descriptor buffer byte count */ | ||
181 | u16 buf_size; /* Buffer size */ | ||
182 | u32 buf_ptr; /* Descriptor buffer pointer */ | ||
183 | u32 next_desc_ptr; /* Next descriptor pointer */ | ||
184 | }; | ||
185 | |||
186 | struct tx_desc { | ||
187 | u32 cmd_sts; /* Command/status field */ | ||
188 | u16 reserved; | ||
189 | u16 byte_cnt; /* buffer byte count */ | ||
190 | u32 buf_ptr; /* pointer to buffer for this descriptor */ | ||
191 | u32 next_desc_ptr; /* Pointer to next descriptor */ | ||
192 | }; | ||
193 | |||
194 | struct pxa168_eth_private { | ||
195 | int port_num; /* User Ethernet port number */ | ||
196 | |||
197 | int rx_resource_err; /* Rx ring resource error flag */ | ||
198 | |||
199 | /* Next available and first returning Rx resource */ | ||
200 | int rx_curr_desc_q, rx_used_desc_q; | ||
201 | |||
202 | /* Next available and first returning Tx resource */ | ||
203 | int tx_curr_desc_q, tx_used_desc_q; | ||
204 | |||
205 | struct rx_desc *p_rx_desc_area; | ||
206 | dma_addr_t rx_desc_dma; | ||
207 | int rx_desc_area_size; | ||
208 | struct sk_buff **rx_skb; | ||
209 | |||
210 | struct tx_desc *p_tx_desc_area; | ||
211 | dma_addr_t tx_desc_dma; | ||
212 | int tx_desc_area_size; | ||
213 | struct sk_buff **tx_skb; | ||
214 | |||
215 | struct work_struct tx_timeout_task; | ||
216 | |||
217 | struct net_device *dev; | ||
218 | struct napi_struct napi; | ||
219 | u8 work_todo; | ||
220 | int skb_size; | ||
221 | |||
222 | struct net_device_stats stats; | ||
223 | /* Size of Tx Ring per queue */ | ||
224 | int tx_ring_size; | ||
225 | /* Number of tx descriptors in use */ | ||
226 | int tx_desc_count; | ||
227 | /* Size of Rx Ring per queue */ | ||
228 | int rx_ring_size; | ||
229 | /* Number of rx descriptors in use */ | ||
230 | int rx_desc_count; | ||
231 | |||
232 | /* | ||
233 | * Used in case RX Ring is empty, which can occur when | ||
234 | * system does not have resources (skb's) | ||
235 | */ | ||
236 | struct timer_list timeout; | ||
237 | struct mii_bus *smi_bus; | ||
238 | struct phy_device *phy; | ||
239 | |||
240 | /* clock */ | ||
241 | struct clk *clk; | ||
242 | struct pxa168_eth_platform_data *pd; | ||
243 | /* | ||
244 | * Ethernet controller base address. | ||
245 | */ | ||
246 | void __iomem *base; | ||
247 | |||
248 | /* Pointer to the hardware address filter table */ | ||
249 | void *htpr; | ||
250 | dma_addr_t htpr_dma; | ||
251 | }; | ||
252 | |||
253 | struct addr_table_entry { | ||
254 | __le32 lo; | ||
255 | __le32 hi; | ||
256 | }; | ||
257 | |||
258 | /* Bit fields of a Hash Table Entry */ | ||
259 | enum hash_table_entry { | ||
260 | HASH_ENTRY_VALID = 1, | ||
261 | SKIP = 2, | ||
262 | HASH_ENTRY_RECEIVE_DISCARD = 4, | ||
263 | HASH_ENTRY_RECEIVE_DISCARD_BIT = 2 | ||
264 | }; | ||
265 | |||
266 | static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); | ||
267 | static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd); | ||
268 | static int pxa168_init_hw(struct pxa168_eth_private *pep); | ||
269 | static void eth_port_reset(struct net_device *dev); | ||
270 | static void eth_port_start(struct net_device *dev); | ||
271 | static int pxa168_eth_open(struct net_device *dev); | ||
272 | static int pxa168_eth_stop(struct net_device *dev); | ||
273 | static int ethernet_phy_setup(struct net_device *dev); | ||
274 | |||
275 | static inline u32 rdl(struct pxa168_eth_private *pep, int offset) | ||
276 | { | ||
277 | return readl(pep->base + offset); | ||
278 | } | ||
279 | |||
280 | static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data) | ||
281 | { | ||
282 | writel(data, pep->base + offset); | ||
283 | } | ||
284 | |||
285 | static void abort_dma(struct pxa168_eth_private *pep) | ||
286 | { | ||
287 | int delay; | ||
288 | int max_retries = 40; | ||
289 | |||
290 | do { | ||
291 | wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT); | ||
292 | udelay(100); | ||
293 | |||
294 | delay = 10; | ||
295 | while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT)) | ||
296 | && delay-- > 0) { | ||
297 | udelay(10); | ||
298 | } | ||
299 | } while (max_retries-- > 0 && delay <= 0); | ||
300 | |||
301 | if (max_retries <= 0) | ||
302 | printk(KERN_ERR "%s : DMA Stuck\n", __func__); | ||
303 | } | ||
304 | |||
305 | static int ethernet_phy_get(struct pxa168_eth_private *pep) | ||
306 | { | ||
307 | unsigned int reg_data; | ||
308 | |||
309 | reg_data = rdl(pep, PHY_ADDRESS); | ||
310 | |||
311 | return (reg_data >> (5 * pep->port_num)) & 0x1f; | ||
312 | } | ||
313 | |||
314 | static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr) | ||
315 | { | ||
316 | u32 reg_data; | ||
317 | int addr_shift = 5 * pep->port_num; | ||
318 | |||
319 | reg_data = rdl(pep, PHY_ADDRESS); | ||
320 | reg_data &= ~(0x1f << addr_shift); | ||
321 | reg_data |= (phy_addr & 0x1f) << addr_shift; | ||
322 | wrl(pep, PHY_ADDRESS, reg_data); | ||
323 | } | ||
324 | |||
325 | static void ethernet_phy_reset(struct pxa168_eth_private *pep) | ||
326 | { | ||
327 | int data; | ||
328 | |||
329 | data = phy_read(pep->phy, MII_BMCR); | ||
330 | if (data < 0) | ||
331 | return; | ||
332 | |||
333 | data |= BMCR_RESET; | ||
334 | if (phy_write(pep->phy, MII_BMCR, data) < 0) | ||
335 | return; | ||
336 | |||
337 | do { | ||
338 | data = phy_read(pep->phy, MII_BMCR); | ||
339 | } while (data >= 0 && data & BMCR_RESET); | ||
340 | } | ||
341 | |||
342 | static void rxq_refill(struct net_device *dev) | ||
343 | { | ||
344 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
345 | struct sk_buff *skb; | ||
346 | struct rx_desc *p_used_rx_desc; | ||
347 | int used_rx_desc; | ||
348 | |||
349 | while (pep->rx_desc_count < pep->rx_ring_size) { | ||
350 | int size; | ||
351 | |||
352 | skb = dev_alloc_skb(pep->skb_size); | ||
353 | if (!skb) | ||
354 | break; | ||
355 | if (SKB_DMA_REALIGN) | ||
356 | skb_reserve(skb, SKB_DMA_REALIGN); | ||
357 | pep->rx_desc_count++; | ||
358 | /* Get 'used' Rx descriptor */ | ||
359 | used_rx_desc = pep->rx_used_desc_q; | ||
360 | p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc]; | ||
361 | size = skb->end - skb->data; | ||
362 | p_used_rx_desc->buf_ptr = dma_map_single(NULL, | ||
363 | skb->data, | ||
364 | size, | ||
365 | DMA_FROM_DEVICE); | ||
366 | p_used_rx_desc->buf_size = size; | ||
367 | pep->rx_skb[used_rx_desc] = skb; | ||
368 | |||
369 | /* Return the descriptor to DMA ownership */ | ||
370 | wmb(); | ||
371 | p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT; | ||
372 | wmb(); | ||
373 | |||
374 | /* Move the used descriptor pointer to the next descriptor */ | ||
375 | pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size; | ||
376 | |||
377 | /* Any Rx return cancels the Rx resource error status */ | ||
378 | pep->rx_resource_err = 0; | ||
379 | |||
380 | skb_reserve(skb, ETH_HW_IP_ALIGN); | ||
381 | } | ||
382 | |||
383 | /* | ||
384 | * If RX ring is empty of SKB, set a timer to try allocating | ||
385 | * again at a later time. | ||
386 | */ | ||
387 | if (pep->rx_desc_count == 0) { | ||
388 | pep->timeout.expires = jiffies + (HZ / 10); | ||
389 | add_timer(&pep->timeout); | ||
390 | } | ||
391 | } | ||
392 | |||
393 | static inline void rxq_refill_timer_wrapper(unsigned long data) | ||
394 | { | ||
395 | struct pxa168_eth_private *pep = (void *)data; | ||
396 | napi_schedule(&pep->napi); | ||
397 | } | ||
398 | |||
399 | static inline u8 flip_8_bits(u8 x) | ||
400 | { | ||
401 | return (((x) & 0x01) << 3) | (((x) & 0x02) << 1) | ||
402 | | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3) | ||
403 | | (((x) & 0x10) << 3) | (((x) & 0x20) << 1) | ||
404 | | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3); | ||
405 | } | ||
406 | |||
407 | static void nibble_swap_every_byte(unsigned char *mac_addr) | ||
408 | { | ||
409 | int i; | ||
410 | for (i = 0; i < ETH_ALEN; i++) { | ||
411 | mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) | | ||
412 | ((mac_addr[i] & 0xf0) >> 4); | ||
413 | } | ||
414 | } | ||
415 | |||
416 | static void inverse_every_nibble(unsigned char *mac_addr) | ||
417 | { | ||
418 | int i; | ||
419 | for (i = 0; i < ETH_ALEN; i++) | ||
420 | mac_addr[i] = flip_8_bits(mac_addr[i]); | ||
421 | } | ||
422 | |||
423 | /* | ||
424 | * ---------------------------------------------------------------------------- | ||
425 | * This function will calculate the hash function of the address. | ||
426 | * Inputs | ||
427 | * mac_addr_orig - MAC address. | ||
428 | * Outputs | ||
429 | * return the calculated entry. | ||
430 | */ | ||
431 | static u32 hash_function(unsigned char *mac_addr_orig) | ||
432 | { | ||
433 | u32 hash_result; | ||
434 | u32 addr0; | ||
435 | u32 addr1; | ||
436 | u32 addr2; | ||
437 | u32 addr3; | ||
438 | unsigned char mac_addr[ETH_ALEN]; | ||
439 | |||
440 | /* Make a copy of MAC address since we are going to performe bit | ||
441 | * operations on it | ||
442 | */ | ||
443 | memcpy(mac_addr, mac_addr_orig, ETH_ALEN); | ||
444 | |||
445 | nibble_swap_every_byte(mac_addr); | ||
446 | inverse_every_nibble(mac_addr); | ||
447 | |||
448 | addr0 = (mac_addr[5] >> 2) & 0x3f; | ||
449 | addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2); | ||
450 | addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1; | ||
451 | addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8); | ||
452 | |||
453 | hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3); | ||
454 | hash_result = hash_result & 0x07ff; | ||
455 | return hash_result; | ||
456 | } | ||
457 | |||
458 | /* | ||
459 | * ---------------------------------------------------------------------------- | ||
460 | * This function will add/del an entry to the address table. | ||
461 | * Inputs | ||
462 | * pep - ETHERNET . | ||
463 | * mac_addr - MAC address. | ||
464 | * skip - if 1, skip this address.Used in case of deleting an entry which is a | ||
465 | * part of chain in the hash table.We can't just delete the entry since | ||
466 | * that will break the chain.We need to defragment the tables time to | ||
467 | * time. | ||
468 | * rd - 0 Discard packet upon match. | ||
469 | * - 1 Receive packet upon match. | ||
470 | * Outputs | ||
471 | * address table entry is added/deleted. | ||
472 | * 0 if success. | ||
473 | * -ENOSPC if table full | ||
474 | */ | ||
475 | static int add_del_hash_entry(struct pxa168_eth_private *pep, | ||
476 | unsigned char *mac_addr, | ||
477 | u32 rd, u32 skip, int del) | ||
478 | { | ||
479 | struct addr_table_entry *entry, *start; | ||
480 | u32 new_high; | ||
481 | u32 new_low; | ||
482 | u32 i; | ||
483 | |||
484 | new_low = (((mac_addr[1] >> 4) & 0xf) << 15) | ||
485 | | (((mac_addr[1] >> 0) & 0xf) << 11) | ||
486 | | (((mac_addr[0] >> 4) & 0xf) << 7) | ||
487 | | (((mac_addr[0] >> 0) & 0xf) << 3) | ||
488 | | (((mac_addr[3] >> 4) & 0x1) << 31) | ||
489 | | (((mac_addr[3] >> 0) & 0xf) << 27) | ||
490 | | (((mac_addr[2] >> 4) & 0xf) << 23) | ||
491 | | (((mac_addr[2] >> 0) & 0xf) << 19) | ||
492 | | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT) | ||
493 | | HASH_ENTRY_VALID; | ||
494 | |||
495 | new_high = (((mac_addr[5] >> 4) & 0xf) << 15) | ||
496 | | (((mac_addr[5] >> 0) & 0xf) << 11) | ||
497 | | (((mac_addr[4] >> 4) & 0xf) << 7) | ||
498 | | (((mac_addr[4] >> 0) & 0xf) << 3) | ||
499 | | (((mac_addr[3] >> 5) & 0x7) << 0); | ||
500 | |||
501 | /* | ||
502 | * Pick the appropriate table, start scanning for free/reusable | ||
503 | * entries at the index obtained by hashing the specified MAC address | ||
504 | */ | ||
505 | start = pep->htpr; | ||
506 | entry = start + hash_function(mac_addr); | ||
507 | for (i = 0; i < HOP_NUMBER; i++) { | ||
508 | if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) { | ||
509 | break; | ||
510 | } else { | ||
511 | /* if same address put in same position */ | ||
512 | if (((le32_to_cpu(entry->lo) & 0xfffffff8) == | ||
513 | (new_low & 0xfffffff8)) && | ||
514 | (le32_to_cpu(entry->hi) == new_high)) { | ||
515 | break; | ||
516 | } | ||
517 | } | ||
518 | if (entry == start + 0x7ff) | ||
519 | entry = start; | ||
520 | else | ||
521 | entry++; | ||
522 | } | ||
523 | |||
524 | if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) && | ||
525 | (le32_to_cpu(entry->hi) != new_high) && del) | ||
526 | return 0; | ||
527 | |||
528 | if (i == HOP_NUMBER) { | ||
529 | if (!del) { | ||
530 | printk(KERN_INFO "%s: table section is full, need to " | ||
531 | "move to 16kB implementation?\n", | ||
532 | __FILE__); | ||
533 | return -ENOSPC; | ||
534 | } else | ||
535 | return 0; | ||
536 | } | ||
537 | |||
538 | /* | ||
539 | * Update the selected entry | ||
540 | */ | ||
541 | if (del) { | ||
542 | entry->hi = 0; | ||
543 | entry->lo = 0; | ||
544 | } else { | ||
545 | entry->hi = cpu_to_le32(new_high); | ||
546 | entry->lo = cpu_to_le32(new_low); | ||
547 | } | ||
548 | |||
549 | return 0; | ||
550 | } | ||
551 | |||
552 | /* | ||
553 | * ---------------------------------------------------------------------------- | ||
554 | * Create an addressTable entry from MAC address info | ||
555 | * found in the specifed net_device struct | ||
556 | * | ||
557 | * Input : pointer to ethernet interface network device structure | ||
558 | * Output : N/A | ||
559 | */ | ||
560 | static void update_hash_table_mac_address(struct pxa168_eth_private *pep, | ||
561 | unsigned char *oaddr, | ||
562 | unsigned char *addr) | ||
563 | { | ||
564 | /* Delete old entry */ | ||
565 | if (oaddr) | ||
566 | add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE); | ||
567 | /* Add new entry */ | ||
568 | add_del_hash_entry(pep, addr, 1, 0, HASH_ADD); | ||
569 | } | ||
570 | |||
571 | static int init_hash_table(struct pxa168_eth_private *pep) | ||
572 | { | ||
573 | /* | ||
574 | * Hardware expects CPU to build a hash table based on a predefined | ||
575 | * hash function and populate it based on hardware address. The | ||
576 | * location of the hash table is identified by 32-bit pointer stored | ||
577 | * in HTPR internal register. Two possible sizes exists for the hash | ||
578 | * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB | ||
579 | * (16kB of DRAM required (4 x 4 kB banks)).We currently only support | ||
580 | * 1/2kB. | ||
581 | */ | ||
582 | /* TODO: Add support for 8kB hash table and alternative hash | ||
583 | * function.Driver can dynamically switch to them if the 1/2kB hash | ||
584 | * table is full. | ||
585 | */ | ||
586 | if (pep->htpr == NULL) { | ||
587 | pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, | ||
588 | HASH_ADDR_TABLE_SIZE, | ||
589 | &pep->htpr_dma, GFP_KERNEL); | ||
590 | if (pep->htpr == NULL) | ||
591 | return -ENOMEM; | ||
592 | } | ||
593 | memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); | ||
594 | wrl(pep, HTPR, pep->htpr_dma); | ||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | static void pxa168_eth_set_rx_mode(struct net_device *dev) | ||
599 | { | ||
600 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
601 | struct netdev_hw_addr *ha; | ||
602 | u32 val; | ||
603 | |||
604 | val = rdl(pep, PORT_CONFIG); | ||
605 | if (dev->flags & IFF_PROMISC) | ||
606 | val |= PCR_PM; | ||
607 | else | ||
608 | val &= ~PCR_PM; | ||
609 | wrl(pep, PORT_CONFIG, val); | ||
610 | |||
611 | /* | ||
612 | * Remove the old list of MAC address and add dev->addr | ||
613 | * and multicast address. | ||
614 | */ | ||
615 | memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); | ||
616 | update_hash_table_mac_address(pep, NULL, dev->dev_addr); | ||
617 | |||
618 | netdev_for_each_mc_addr(ha, dev) | ||
619 | update_hash_table_mac_address(pep, NULL, ha->addr); | ||
620 | } | ||
621 | |||
622 | static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr) | ||
623 | { | ||
624 | struct sockaddr *sa = addr; | ||
625 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
626 | unsigned char oldMac[ETH_ALEN]; | ||
627 | |||
628 | if (!is_valid_ether_addr(sa->sa_data)) | ||
629 | return -EINVAL; | ||
630 | memcpy(oldMac, dev->dev_addr, ETH_ALEN); | ||
631 | memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); | ||
632 | netif_addr_lock_bh(dev); | ||
633 | update_hash_table_mac_address(pep, oldMac, dev->dev_addr); | ||
634 | netif_addr_unlock_bh(dev); | ||
635 | return 0; | ||
636 | } | ||
637 | |||
638 | static void eth_port_start(struct net_device *dev) | ||
639 | { | ||
640 | unsigned int val = 0; | ||
641 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
642 | int tx_curr_desc, rx_curr_desc; | ||
643 | |||
644 | /* Perform PHY reset, if there is a PHY. */ | ||
645 | if (pep->phy != NULL) { | ||
646 | struct ethtool_cmd cmd; | ||
647 | |||
648 | pxa168_get_settings(pep->dev, &cmd); | ||
649 | ethernet_phy_reset(pep); | ||
650 | pxa168_set_settings(pep->dev, &cmd); | ||
651 | } | ||
652 | |||
653 | /* Assignment of Tx CTRP of given queue */ | ||
654 | tx_curr_desc = pep->tx_curr_desc_q; | ||
655 | wrl(pep, ETH_C_TX_DESC_1, | ||
656 | (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc))); | ||
657 | |||
658 | /* Assignment of Rx CRDP of given queue */ | ||
659 | rx_curr_desc = pep->rx_curr_desc_q; | ||
660 | wrl(pep, ETH_C_RX_DESC_0, | ||
661 | (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc))); | ||
662 | |||
663 | wrl(pep, ETH_F_RX_DESC_0, | ||
664 | (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc))); | ||
665 | |||
666 | /* Clear all interrupts */ | ||
667 | wrl(pep, INT_CAUSE, 0); | ||
668 | |||
669 | /* Enable all interrupts for receive, transmit and error. */ | ||
670 | wrl(pep, INT_MASK, ALL_INTS); | ||
671 | |||
672 | val = rdl(pep, PORT_CONFIG); | ||
673 | val |= PCR_EN; | ||
674 | wrl(pep, PORT_CONFIG, val); | ||
675 | |||
676 | /* Start RX DMA engine */ | ||
677 | val = rdl(pep, SDMA_CMD); | ||
678 | val |= SDMA_CMD_ERD; | ||
679 | wrl(pep, SDMA_CMD, val); | ||
680 | } | ||
681 | |||
682 | static void eth_port_reset(struct net_device *dev) | ||
683 | { | ||
684 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
685 | unsigned int val = 0; | ||
686 | |||
687 | /* Stop all interrupts for receive, transmit and error. */ | ||
688 | wrl(pep, INT_MASK, 0); | ||
689 | |||
690 | /* Clear all interrupts */ | ||
691 | wrl(pep, INT_CAUSE, 0); | ||
692 | |||
693 | /* Stop RX DMA */ | ||
694 | val = rdl(pep, SDMA_CMD); | ||
695 | val &= ~SDMA_CMD_ERD; /* abort dma command */ | ||
696 | |||
697 | /* Abort any transmit and receive operations and put DMA | ||
698 | * in idle state. | ||
699 | */ | ||
700 | abort_dma(pep); | ||
701 | |||
702 | /* Disable port */ | ||
703 | val = rdl(pep, PORT_CONFIG); | ||
704 | val &= ~PCR_EN; | ||
705 | wrl(pep, PORT_CONFIG, val); | ||
706 | } | ||
707 | |||
708 | /* | ||
709 | * txq_reclaim - Free the tx desc data for completed descriptors | ||
710 | * If force is non-zero, frees uncompleted descriptors as well | ||
711 | */ | ||
712 | static int txq_reclaim(struct net_device *dev, int force) | ||
713 | { | ||
714 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
715 | struct tx_desc *desc; | ||
716 | u32 cmd_sts; | ||
717 | struct sk_buff *skb; | ||
718 | int tx_index; | ||
719 | dma_addr_t addr; | ||
720 | int count; | ||
721 | int released = 0; | ||
722 | |||
723 | netif_tx_lock(dev); | ||
724 | |||
725 | pep->work_todo &= ~WORK_TX_DONE; | ||
726 | while (pep->tx_desc_count > 0) { | ||
727 | tx_index = pep->tx_used_desc_q; | ||
728 | desc = &pep->p_tx_desc_area[tx_index]; | ||
729 | cmd_sts = desc->cmd_sts; | ||
730 | if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) { | ||
731 | if (released > 0) { | ||
732 | goto txq_reclaim_end; | ||
733 | } else { | ||
734 | released = -1; | ||
735 | goto txq_reclaim_end; | ||
736 | } | ||
737 | } | ||
738 | pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size; | ||
739 | pep->tx_desc_count--; | ||
740 | addr = desc->buf_ptr; | ||
741 | count = desc->byte_cnt; | ||
742 | skb = pep->tx_skb[tx_index]; | ||
743 | if (skb) | ||
744 | pep->tx_skb[tx_index] = NULL; | ||
745 | |||
746 | if (cmd_sts & TX_ERROR) { | ||
747 | if (net_ratelimit()) | ||
748 | printk(KERN_ERR "%s: Error in TX\n", dev->name); | ||
749 | dev->stats.tx_errors++; | ||
750 | } | ||
751 | dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); | ||
752 | if (skb) | ||
753 | dev_kfree_skb_irq(skb); | ||
754 | released++; | ||
755 | } | ||
756 | txq_reclaim_end: | ||
757 | netif_tx_unlock(dev); | ||
758 | return released; | ||
759 | } | ||
760 | |||
761 | static void pxa168_eth_tx_timeout(struct net_device *dev) | ||
762 | { | ||
763 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
764 | |||
765 | printk(KERN_INFO "%s: TX timeout desc_count %d\n", | ||
766 | dev->name, pep->tx_desc_count); | ||
767 | |||
768 | schedule_work(&pep->tx_timeout_task); | ||
769 | } | ||
770 | |||
771 | static void pxa168_eth_tx_timeout_task(struct work_struct *work) | ||
772 | { | ||
773 | struct pxa168_eth_private *pep = container_of(work, | ||
774 | struct pxa168_eth_private, | ||
775 | tx_timeout_task); | ||
776 | struct net_device *dev = pep->dev; | ||
777 | pxa168_eth_stop(dev); | ||
778 | pxa168_eth_open(dev); | ||
779 | } | ||
780 | |||
781 | static int rxq_process(struct net_device *dev, int budget) | ||
782 | { | ||
783 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
784 | struct net_device_stats *stats = &dev->stats; | ||
785 | unsigned int received_packets = 0; | ||
786 | struct sk_buff *skb; | ||
787 | |||
788 | while (budget-- > 0) { | ||
789 | int rx_next_curr_desc, rx_curr_desc, rx_used_desc; | ||
790 | struct rx_desc *rx_desc; | ||
791 | unsigned int cmd_sts; | ||
792 | |||
793 | /* Do not process Rx ring in case of Rx ring resource error */ | ||
794 | if (pep->rx_resource_err) | ||
795 | break; | ||
796 | rx_curr_desc = pep->rx_curr_desc_q; | ||
797 | rx_used_desc = pep->rx_used_desc_q; | ||
798 | rx_desc = &pep->p_rx_desc_area[rx_curr_desc]; | ||
799 | cmd_sts = rx_desc->cmd_sts; | ||
800 | rmb(); | ||
801 | if (cmd_sts & (BUF_OWNED_BY_DMA)) | ||
802 | break; | ||
803 | skb = pep->rx_skb[rx_curr_desc]; | ||
804 | pep->rx_skb[rx_curr_desc] = NULL; | ||
805 | |||
806 | rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size; | ||
807 | pep->rx_curr_desc_q = rx_next_curr_desc; | ||
808 | |||
809 | /* Rx descriptors exhausted. */ | ||
810 | /* Set the Rx ring resource error flag */ | ||
811 | if (rx_next_curr_desc == rx_used_desc) | ||
812 | pep->rx_resource_err = 1; | ||
813 | pep->rx_desc_count--; | ||
814 | dma_unmap_single(NULL, rx_desc->buf_ptr, | ||
815 | rx_desc->buf_size, | ||
816 | DMA_FROM_DEVICE); | ||
817 | received_packets++; | ||
818 | /* | ||
819 | * Update statistics. | ||
820 | * Note byte count includes 4 byte CRC count | ||
821 | */ | ||
822 | stats->rx_packets++; | ||
823 | stats->rx_bytes += rx_desc->byte_cnt; | ||
824 | /* | ||
825 | * In case received a packet without first / last bits on OR | ||
826 | * the error summary bit is on, the packets needs to be droped. | ||
827 | */ | ||
828 | if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != | ||
829 | (RX_FIRST_DESC | RX_LAST_DESC)) | ||
830 | || (cmd_sts & RX_ERROR)) { | ||
831 | |||
832 | stats->rx_dropped++; | ||
833 | if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != | ||
834 | (RX_FIRST_DESC | RX_LAST_DESC)) { | ||
835 | if (net_ratelimit()) | ||
836 | printk(KERN_ERR | ||
837 | "%s: Rx pkt on multiple desc\n", | ||
838 | dev->name); | ||
839 | } | ||
840 | if (cmd_sts & RX_ERROR) | ||
841 | stats->rx_errors++; | ||
842 | dev_kfree_skb_irq(skb); | ||
843 | } else { | ||
844 | /* | ||
845 | * The -4 is for the CRC in the trailer of the | ||
846 | * received packet | ||
847 | */ | ||
848 | skb_put(skb, rx_desc->byte_cnt - 4); | ||
849 | skb->protocol = eth_type_trans(skb, dev); | ||
850 | netif_receive_skb(skb); | ||
851 | } | ||
852 | } | ||
853 | /* Fill RX ring with skb's */ | ||
854 | rxq_refill(dev); | ||
855 | return received_packets; | ||
856 | } | ||
857 | |||
858 | static int pxa168_eth_collect_events(struct pxa168_eth_private *pep, | ||
859 | struct net_device *dev) | ||
860 | { | ||
861 | u32 icr; | ||
862 | int ret = 0; | ||
863 | |||
864 | icr = rdl(pep, INT_CAUSE); | ||
865 | if (icr == 0) | ||
866 | return IRQ_NONE; | ||
867 | |||
868 | wrl(pep, INT_CAUSE, ~icr); | ||
869 | if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) { | ||
870 | pep->work_todo |= WORK_TX_DONE; | ||
871 | ret = 1; | ||
872 | } | ||
873 | if (icr & ICR_RXBUF) | ||
874 | ret = 1; | ||
875 | if (icr & ICR_MII_CH) { | ||
876 | pep->work_todo |= WORK_LINK; | ||
877 | ret = 1; | ||
878 | } | ||
879 | return ret; | ||
880 | } | ||
881 | |||
882 | static void handle_link_event(struct pxa168_eth_private *pep) | ||
883 | { | ||
884 | struct net_device *dev = pep->dev; | ||
885 | u32 port_status; | ||
886 | int speed; | ||
887 | int duplex; | ||
888 | int fc; | ||
889 | |||
890 | port_status = rdl(pep, PORT_STATUS); | ||
891 | if (!(port_status & LINK_UP)) { | ||
892 | if (netif_carrier_ok(dev)) { | ||
893 | printk(KERN_INFO "%s: link down\n", dev->name); | ||
894 | netif_carrier_off(dev); | ||
895 | txq_reclaim(dev, 1); | ||
896 | } | ||
897 | return; | ||
898 | } | ||
899 | if (port_status & PORT_SPEED_100) | ||
900 | speed = 100; | ||
901 | else | ||
902 | speed = 10; | ||
903 | |||
904 | duplex = (port_status & FULL_DUPLEX) ? 1 : 0; | ||
905 | fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; | ||
906 | printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " | ||
907 | "flow control %sabled\n", dev->name, | ||
908 | speed, duplex ? "full" : "half", fc ? "en" : "dis"); | ||
909 | if (!netif_carrier_ok(dev)) | ||
910 | netif_carrier_on(dev); | ||
911 | } | ||
912 | |||
913 | static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id) | ||
914 | { | ||
915 | struct net_device *dev = (struct net_device *)dev_id; | ||
916 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
917 | |||
918 | if (unlikely(!pxa168_eth_collect_events(pep, dev))) | ||
919 | return IRQ_NONE; | ||
920 | /* Disable interrupts */ | ||
921 | wrl(pep, INT_MASK, 0); | ||
922 | napi_schedule(&pep->napi); | ||
923 | return IRQ_HANDLED; | ||
924 | } | ||
925 | |||
926 | static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep) | ||
927 | { | ||
928 | int skb_size; | ||
929 | |||
930 | /* | ||
931 | * Reserve 2+14 bytes for an ethernet header (the hardware | ||
932 | * automatically prepends 2 bytes of dummy data to each | ||
933 | * received packet), 16 bytes for up to four VLAN tags, and | ||
934 | * 4 bytes for the trailing FCS -- 36 bytes total. | ||
935 | */ | ||
936 | skb_size = pep->dev->mtu + 36; | ||
937 | |||
938 | /* | ||
939 | * Make sure that the skb size is a multiple of 8 bytes, as | ||
940 | * the lower three bits of the receive descriptor's buffer | ||
941 | * size field are ignored by the hardware. | ||
942 | */ | ||
943 | pep->skb_size = (skb_size + 7) & ~7; | ||
944 | |||
945 | /* | ||
946 | * If NET_SKB_PAD is smaller than a cache line, | ||
947 | * netdev_alloc_skb() will cause skb->data to be misaligned | ||
948 | * to a cache line boundary. If this is the case, include | ||
949 | * some extra space to allow re-aligning the data area. | ||
950 | */ | ||
951 | pep->skb_size += SKB_DMA_REALIGN; | ||
952 | |||
953 | } | ||
954 | |||
955 | static int set_port_config_ext(struct pxa168_eth_private *pep) | ||
956 | { | ||
957 | int skb_size; | ||
958 | |||
959 | pxa168_eth_recalc_skb_size(pep); | ||
960 | if (pep->skb_size <= 1518) | ||
961 | skb_size = PCXR_MFL_1518; | ||
962 | else if (pep->skb_size <= 1536) | ||
963 | skb_size = PCXR_MFL_1536; | ||
964 | else if (pep->skb_size <= 2048) | ||
965 | skb_size = PCXR_MFL_2048; | ||
966 | else | ||
967 | skb_size = PCXR_MFL_64K; | ||
968 | |||
969 | /* Extended Port Configuration */ | ||
970 | wrl(pep, | ||
971 | PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */ | ||
972 | PCXR_DSCP_EN | /* Enable DSCP in IP */ | ||
973 | skb_size | PCXR_FLP | /* do not force link pass */ | ||
974 | PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */ | ||
975 | |||
976 | return 0; | ||
977 | } | ||
978 | |||
979 | static int pxa168_init_hw(struct pxa168_eth_private *pep) | ||
980 | { | ||
981 | int err = 0; | ||
982 | |||
983 | /* Disable interrupts */ | ||
984 | wrl(pep, INT_MASK, 0); | ||
985 | wrl(pep, INT_CAUSE, 0); | ||
986 | /* Write to ICR to clear interrupts. */ | ||
987 | wrl(pep, INT_W_CLEAR, 0); | ||
988 | /* Abort any transmit and receive operations and put DMA | ||
989 | * in idle state. | ||
990 | */ | ||
991 | abort_dma(pep); | ||
992 | /* Initialize address hash table */ | ||
993 | err = init_hash_table(pep); | ||
994 | if (err) | ||
995 | return err; | ||
996 | /* SDMA configuration */ | ||
997 | wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */ | ||
998 | SDCR_RIFB | /* Rx interrupt on frame */ | ||
999 | SDCR_BLMT | /* Little endian transmit */ | ||
1000 | SDCR_BLMR | /* Little endian receive */ | ||
1001 | SDCR_RC_MAX_RETRANS); /* Max retransmit count */ | ||
1002 | /* Port Configuration */ | ||
1003 | wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */ | ||
1004 | set_port_config_ext(pep); | ||
1005 | |||
1006 | return err; | ||
1007 | } | ||
1008 | |||
1009 | static int rxq_init(struct net_device *dev) | ||
1010 | { | ||
1011 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
1012 | struct rx_desc *p_rx_desc; | ||
1013 | int size = 0, i = 0; | ||
1014 | int rx_desc_num = pep->rx_ring_size; | ||
1015 | |||
1016 | /* Allocate RX skb rings */ | ||
1017 | pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, | ||
1018 | GFP_KERNEL); | ||
1019 | if (!pep->rx_skb) { | ||
1020 | printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name); | ||
1021 | return -ENOMEM; | ||
1022 | } | ||
1023 | /* Allocate RX ring */ | ||
1024 | pep->rx_desc_count = 0; | ||
1025 | size = pep->rx_ring_size * sizeof(struct rx_desc); | ||
1026 | pep->rx_desc_area_size = size; | ||
1027 | pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, | ||
1028 | &pep->rx_desc_dma, GFP_KERNEL); | ||
1029 | if (!pep->p_rx_desc_area) { | ||
1030 | printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n", | ||
1031 | dev->name, size); | ||
1032 | goto out; | ||
1033 | } | ||
1034 | memset((void *)pep->p_rx_desc_area, 0, size); | ||
1035 | /* initialize the next_desc_ptr links in the Rx descriptors ring */ | ||
1036 | p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area; | ||
1037 | for (i = 0; i < rx_desc_num; i++) { | ||
1038 | p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma + | ||
1039 | ((i + 1) % rx_desc_num) * sizeof(struct rx_desc); | ||
1040 | } | ||
1041 | /* Save Rx desc pointer to driver struct. */ | ||
1042 | pep->rx_curr_desc_q = 0; | ||
1043 | pep->rx_used_desc_q = 0; | ||
1044 | pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc); | ||
1045 | return 0; | ||
1046 | out: | ||
1047 | kfree(pep->rx_skb); | ||
1048 | return -ENOMEM; | ||
1049 | } | ||
1050 | |||
1051 | static void rxq_deinit(struct net_device *dev) | ||
1052 | { | ||
1053 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
1054 | int curr; | ||
1055 | |||
1056 | /* Free preallocated skb's on RX rings */ | ||
1057 | for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) { | ||
1058 | if (pep->rx_skb[curr]) { | ||
1059 | dev_kfree_skb(pep->rx_skb[curr]); | ||
1060 | pep->rx_desc_count--; | ||
1061 | } | ||
1062 | } | ||
1063 | if (pep->rx_desc_count) | ||
1064 | printk(KERN_ERR | ||
1065 | "Error in freeing Rx Ring. %d skb's still\n", | ||
1066 | pep->rx_desc_count); | ||
1067 | /* Free RX ring */ | ||
1068 | if (pep->p_rx_desc_area) | ||
1069 | dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size, | ||
1070 | pep->p_rx_desc_area, pep->rx_desc_dma); | ||
1071 | kfree(pep->rx_skb); | ||
1072 | } | ||
1073 | |||
1074 | static int txq_init(struct net_device *dev) | ||
1075 | { | ||
1076 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
1077 | struct tx_desc *p_tx_desc; | ||
1078 | int size = 0, i = 0; | ||
1079 | int tx_desc_num = pep->tx_ring_size; | ||
1080 | |||
1081 | pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, | ||
1082 | GFP_KERNEL); | ||
1083 | if (!pep->tx_skb) { | ||
1084 | printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name); | ||
1085 | return -ENOMEM; | ||
1086 | } | ||
1087 | /* Allocate TX ring */ | ||
1088 | pep->tx_desc_count = 0; | ||
1089 | size = pep->tx_ring_size * sizeof(struct tx_desc); | ||
1090 | pep->tx_desc_area_size = size; | ||
1091 | pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, | ||
1092 | &pep->tx_desc_dma, GFP_KERNEL); | ||
1093 | if (!pep->p_tx_desc_area) { | ||
1094 | printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", | ||
1095 | dev->name, size); | ||
1096 | goto out; | ||
1097 | } | ||
1098 | memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size); | ||
1099 | /* Initialize the next_desc_ptr links in the Tx descriptors ring */ | ||
1100 | p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area; | ||
1101 | for (i = 0; i < tx_desc_num; i++) { | ||
1102 | p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma + | ||
1103 | ((i + 1) % tx_desc_num) * sizeof(struct tx_desc); | ||
1104 | } | ||
1105 | pep->tx_curr_desc_q = 0; | ||
1106 | pep->tx_used_desc_q = 0; | ||
1107 | pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc); | ||
1108 | return 0; | ||
1109 | out: | ||
1110 | kfree(pep->tx_skb); | ||
1111 | return -ENOMEM; | ||
1112 | } | ||
1113 | |||
1114 | static void txq_deinit(struct net_device *dev) | ||
1115 | { | ||
1116 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
1117 | |||
1118 | /* Free outstanding skb's on TX ring */ | ||
1119 | txq_reclaim(dev, 1); | ||
1120 | BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q); | ||
1121 | /* Free TX ring */ | ||
1122 | if (pep->p_tx_desc_area) | ||
1123 | dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size, | ||
1124 | pep->p_tx_desc_area, pep->tx_desc_dma); | ||
1125 | kfree(pep->tx_skb); | ||
1126 | } | ||
1127 | |||
1128 | static int pxa168_eth_open(struct net_device *dev) | ||
1129 | { | ||
1130 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
1131 | int err; | ||
1132 | |||
1133 | err = request_irq(dev->irq, pxa168_eth_int_handler, | ||
1134 | IRQF_DISABLED, dev->name, dev); | ||
1135 | if (err) { | ||
1136 | dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); | ||
1137 | return -EAGAIN; | ||
1138 | } | ||
1139 | pep->rx_resource_err = 0; | ||
1140 | err = rxq_init(dev); | ||
1141 | if (err != 0) | ||
1142 | goto out_free_irq; | ||
1143 | err = txq_init(dev); | ||
1144 | if (err != 0) | ||
1145 | goto out_free_rx_skb; | ||
1146 | pep->rx_used_desc_q = 0; | ||
1147 | pep->rx_curr_desc_q = 0; | ||
1148 | |||
1149 | /* Fill RX ring with skb's */ | ||
1150 | rxq_refill(dev); | ||
1151 | pep->rx_used_desc_q = 0; | ||
1152 | pep->rx_curr_desc_q = 0; | ||
1153 | netif_carrier_off(dev); | ||
1154 | eth_port_start(dev); | ||
1155 | napi_enable(&pep->napi); | ||
1156 | return 0; | ||
1157 | out_free_rx_skb: | ||
1158 | rxq_deinit(dev); | ||
1159 | out_free_irq: | ||
1160 | free_irq(dev->irq, dev); | ||
1161 | return err; | ||
1162 | } | ||
1163 | |||
1164 | static int pxa168_eth_stop(struct net_device *dev) | ||
1165 | { | ||
1166 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
1167 | eth_port_reset(dev); | ||
1168 | |||
1169 | /* Disable interrupts */ | ||
1170 | wrl(pep, INT_MASK, 0); | ||
1171 | wrl(pep, INT_CAUSE, 0); | ||
1172 | /* Write to ICR to clear interrupts. */ | ||
1173 | wrl(pep, INT_W_CLEAR, 0); | ||
1174 | napi_disable(&pep->napi); | ||
1175 | del_timer_sync(&pep->timeout); | ||
1176 | netif_carrier_off(dev); | ||
1177 | free_irq(dev->irq, dev); | ||
1178 | rxq_deinit(dev); | ||
1179 | txq_deinit(dev); | ||
1180 | |||
1181 | return 0; | ||
1182 | } | ||
1183 | |||
1184 | static int pxa168_eth_change_mtu(struct net_device *dev, int mtu) | ||
1185 | { | ||
1186 | int retval; | ||
1187 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
1188 | |||
1189 | if ((mtu > 9500) || (mtu < 68)) | ||
1190 | return -EINVAL; | ||
1191 | |||
1192 | dev->mtu = mtu; | ||
1193 | retval = set_port_config_ext(pep); | ||
1194 | |||
1195 | if (!netif_running(dev)) | ||
1196 | return 0; | ||
1197 | |||
1198 | /* | ||
1199 | * Stop and then re-open the interface. This will allocate RX | ||
1200 | * skbs of the new MTU. | ||
1201 | * There is a possible danger that the open will not succeed, | ||
1202 | * due to memory being full. | ||
1203 | */ | ||
1204 | pxa168_eth_stop(dev); | ||
1205 | if (pxa168_eth_open(dev)) { | ||
1206 | dev_printk(KERN_ERR, &dev->dev, | ||
1207 | "fatal error on re-opening device after " | ||
1208 | "MTU change\n"); | ||
1209 | } | ||
1210 | |||
1211 | return 0; | ||
1212 | } | ||
1213 | |||
1214 | static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep) | ||
1215 | { | ||
1216 | int tx_desc_curr; | ||
1217 | |||
1218 | tx_desc_curr = pep->tx_curr_desc_q; | ||
1219 | pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size; | ||
1220 | BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q); | ||
1221 | pep->tx_desc_count++; | ||
1222 | |||
1223 | return tx_desc_curr; | ||
1224 | } | ||
1225 | |||
1226 | static int pxa168_rx_poll(struct napi_struct *napi, int budget) | ||
1227 | { | ||
1228 | struct pxa168_eth_private *pep = | ||
1229 | container_of(napi, struct pxa168_eth_private, napi); | ||
1230 | struct net_device *dev = pep->dev; | ||
1231 | int work_done = 0; | ||
1232 | |||
1233 | if (unlikely(pep->work_todo & WORK_LINK)) { | ||
1234 | pep->work_todo &= ~(WORK_LINK); | ||
1235 | handle_link_event(pep); | ||
1236 | } | ||
1237 | /* | ||
1238 | * We call txq_reclaim every time since in NAPI interupts are disabled | ||
1239 | * and due to this we miss the TX_DONE interrupt,which is not updated in | ||
1240 | * interrupt status register. | ||
1241 | */ | ||
1242 | txq_reclaim(dev, 0); | ||
1243 | if (netif_queue_stopped(dev) | ||
1244 | && pep->tx_ring_size - pep->tx_desc_count > 1) { | ||
1245 | netif_wake_queue(dev); | ||
1246 | } | ||
1247 | work_done = rxq_process(dev, budget); | ||
1248 | if (work_done < budget) { | ||
1249 | napi_complete(napi); | ||
1250 | wrl(pep, INT_MASK, ALL_INTS); | ||
1251 | } | ||
1252 | |||
1253 | return work_done; | ||
1254 | } | ||
1255 | |||
1256 | static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
1257 | { | ||
1258 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
1259 | struct net_device_stats *stats = &dev->stats; | ||
1260 | struct tx_desc *desc; | ||
1261 | int tx_index; | ||
1262 | int length; | ||
1263 | |||
1264 | tx_index = eth_alloc_tx_desc_index(pep); | ||
1265 | desc = &pep->p_tx_desc_area[tx_index]; | ||
1266 | length = skb->len; | ||
1267 | pep->tx_skb[tx_index] = skb; | ||
1268 | desc->byte_cnt = length; | ||
1269 | desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); | ||
1270 | |||
1271 | skb_tx_timestamp(skb); | ||
1272 | |||
1273 | wmb(); | ||
1274 | desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC | | ||
1275 | TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT; | ||
1276 | wmb(); | ||
1277 | wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD); | ||
1278 | |||
1279 | stats->tx_bytes += length; | ||
1280 | stats->tx_packets++; | ||
1281 | dev->trans_start = jiffies; | ||
1282 | if (pep->tx_ring_size - pep->tx_desc_count <= 1) { | ||
1283 | /* We handled the current skb, but now we are out of space.*/ | ||
1284 | netif_stop_queue(dev); | ||
1285 | } | ||
1286 | |||
1287 | return NETDEV_TX_OK; | ||
1288 | } | ||
1289 | |||
1290 | static int smi_wait_ready(struct pxa168_eth_private *pep) | ||
1291 | { | ||
1292 | int i = 0; | ||
1293 | |||
1294 | /* wait for the SMI register to become available */ | ||
1295 | for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) { | ||
1296 | if (i == PHY_WAIT_ITERATIONS) | ||
1297 | return -ETIMEDOUT; | ||
1298 | msleep(10); | ||
1299 | } | ||
1300 | |||
1301 | return 0; | ||
1302 | } | ||
1303 | |||
1304 | static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum) | ||
1305 | { | ||
1306 | struct pxa168_eth_private *pep = bus->priv; | ||
1307 | int i = 0; | ||
1308 | int val; | ||
1309 | |||
1310 | if (smi_wait_ready(pep)) { | ||
1311 | printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n"); | ||
1312 | return -ETIMEDOUT; | ||
1313 | } | ||
1314 | wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R); | ||
1315 | /* now wait for the data to be valid */ | ||
1316 | for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) { | ||
1317 | if (i == PHY_WAIT_ITERATIONS) { | ||
1318 | printk(KERN_WARNING | ||
1319 | "pxa168_eth: SMI bus read not valid\n"); | ||
1320 | return -ENODEV; | ||
1321 | } | ||
1322 | msleep(10); | ||
1323 | } | ||
1324 | |||
1325 | return val & 0xffff; | ||
1326 | } | ||
1327 | |||
1328 | static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum, | ||
1329 | u16 value) | ||
1330 | { | ||
1331 | struct pxa168_eth_private *pep = bus->priv; | ||
1332 | |||
1333 | if (smi_wait_ready(pep)) { | ||
1334 | printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n"); | ||
1335 | return -ETIMEDOUT; | ||
1336 | } | ||
1337 | |||
1338 | wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | | ||
1339 | SMI_OP_W | (value & 0xffff)); | ||
1340 | |||
1341 | if (smi_wait_ready(pep)) { | ||
1342 | printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n"); | ||
1343 | return -ETIMEDOUT; | ||
1344 | } | ||
1345 | |||
1346 | return 0; | ||
1347 | } | ||
1348 | |||
1349 | static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, | ||
1350 | int cmd) | ||
1351 | { | ||
1352 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
1353 | if (pep->phy != NULL) | ||
1354 | return phy_mii_ioctl(pep->phy, ifr, cmd); | ||
1355 | |||
1356 | return -EOPNOTSUPP; | ||
1357 | } | ||
1358 | |||
1359 | static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr) | ||
1360 | { | ||
1361 | struct mii_bus *bus = pep->smi_bus; | ||
1362 | struct phy_device *phydev; | ||
1363 | int start; | ||
1364 | int num; | ||
1365 | int i; | ||
1366 | |||
1367 | if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) { | ||
1368 | /* Scan entire range */ | ||
1369 | start = ethernet_phy_get(pep); | ||
1370 | num = 32; | ||
1371 | } else { | ||
1372 | /* Use phy addr specific to platform */ | ||
1373 | start = phy_addr & 0x1f; | ||
1374 | num = 1; | ||
1375 | } | ||
1376 | phydev = NULL; | ||
1377 | for (i = 0; i < num; i++) { | ||
1378 | int addr = (start + i) & 0x1f; | ||
1379 | if (bus->phy_map[addr] == NULL) | ||
1380 | mdiobus_scan(bus, addr); | ||
1381 | |||
1382 | if (phydev == NULL) { | ||
1383 | phydev = bus->phy_map[addr]; | ||
1384 | if (phydev != NULL) | ||
1385 | ethernet_phy_set_addr(pep, addr); | ||
1386 | } | ||
1387 | } | ||
1388 | |||
1389 | return phydev; | ||
1390 | } | ||
1391 | |||
1392 | static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex) | ||
1393 | { | ||
1394 | struct phy_device *phy = pep->phy; | ||
1395 | ethernet_phy_reset(pep); | ||
1396 | |||
1397 | phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII); | ||
1398 | |||
1399 | if (speed == 0) { | ||
1400 | phy->autoneg = AUTONEG_ENABLE; | ||
1401 | phy->speed = 0; | ||
1402 | phy->duplex = 0; | ||
1403 | phy->supported &= PHY_BASIC_FEATURES; | ||
1404 | phy->advertising = phy->supported | ADVERTISED_Autoneg; | ||
1405 | } else { | ||
1406 | phy->autoneg = AUTONEG_DISABLE; | ||
1407 | phy->advertising = 0; | ||
1408 | phy->speed = speed; | ||
1409 | phy->duplex = duplex; | ||
1410 | } | ||
1411 | phy_start_aneg(phy); | ||
1412 | } | ||
1413 | |||
1414 | static int ethernet_phy_setup(struct net_device *dev) | ||
1415 | { | ||
1416 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
1417 | |||
1418 | if (pep->pd->init) | ||
1419 | pep->pd->init(); | ||
1420 | pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f); | ||
1421 | if (pep->phy != NULL) | ||
1422 | phy_init(pep, pep->pd->speed, pep->pd->duplex); | ||
1423 | update_hash_table_mac_address(pep, NULL, dev->dev_addr); | ||
1424 | |||
1425 | return 0; | ||
1426 | } | ||
1427 | |||
1428 | static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1429 | { | ||
1430 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
1431 | int err; | ||
1432 | |||
1433 | err = phy_read_status(pep->phy); | ||
1434 | if (err == 0) | ||
1435 | err = phy_ethtool_gset(pep->phy, cmd); | ||
1436 | |||
1437 | return err; | ||
1438 | } | ||
1439 | |||
1440 | static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1441 | { | ||
1442 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
1443 | |||
1444 | return phy_ethtool_sset(pep->phy, cmd); | ||
1445 | } | ||
1446 | |||
1447 | static void pxa168_get_drvinfo(struct net_device *dev, | ||
1448 | struct ethtool_drvinfo *info) | ||
1449 | { | ||
1450 | strncpy(info->driver, DRIVER_NAME, 32); | ||
1451 | strncpy(info->version, DRIVER_VERSION, 32); | ||
1452 | strncpy(info->fw_version, "N/A", 32); | ||
1453 | strncpy(info->bus_info, "N/A", 32); | ||
1454 | } | ||
1455 | |||
1456 | static const struct ethtool_ops pxa168_ethtool_ops = { | ||
1457 | .get_settings = pxa168_get_settings, | ||
1458 | .set_settings = pxa168_set_settings, | ||
1459 | .get_drvinfo = pxa168_get_drvinfo, | ||
1460 | .get_link = ethtool_op_get_link, | ||
1461 | }; | ||
1462 | |||
1463 | static const struct net_device_ops pxa168_eth_netdev_ops = { | ||
1464 | .ndo_open = pxa168_eth_open, | ||
1465 | .ndo_stop = pxa168_eth_stop, | ||
1466 | .ndo_start_xmit = pxa168_eth_start_xmit, | ||
1467 | .ndo_set_rx_mode = pxa168_eth_set_rx_mode, | ||
1468 | .ndo_set_mac_address = pxa168_eth_set_mac_address, | ||
1469 | .ndo_validate_addr = eth_validate_addr, | ||
1470 | .ndo_do_ioctl = pxa168_eth_do_ioctl, | ||
1471 | .ndo_change_mtu = pxa168_eth_change_mtu, | ||
1472 | .ndo_tx_timeout = pxa168_eth_tx_timeout, | ||
1473 | }; | ||
1474 | |||
1475 | static int pxa168_eth_probe(struct platform_device *pdev) | ||
1476 | { | ||
1477 | struct pxa168_eth_private *pep = NULL; | ||
1478 | struct net_device *dev = NULL; | ||
1479 | struct resource *res; | ||
1480 | struct clk *clk; | ||
1481 | int err; | ||
1482 | |||
1483 | printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n"); | ||
1484 | |||
1485 | clk = clk_get(&pdev->dev, "MFUCLK"); | ||
1486 | if (IS_ERR(clk)) { | ||
1487 | printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n", | ||
1488 | DRIVER_NAME); | ||
1489 | return -ENODEV; | ||
1490 | } | ||
1491 | clk_enable(clk); | ||
1492 | |||
1493 | dev = alloc_etherdev(sizeof(struct pxa168_eth_private)); | ||
1494 | if (!dev) { | ||
1495 | err = -ENOMEM; | ||
1496 | goto err_clk; | ||
1497 | } | ||
1498 | |||
1499 | platform_set_drvdata(pdev, dev); | ||
1500 | pep = netdev_priv(dev); | ||
1501 | pep->dev = dev; | ||
1502 | pep->clk = clk; | ||
1503 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1504 | if (res == NULL) { | ||
1505 | err = -ENODEV; | ||
1506 | goto err_netdev; | ||
1507 | } | ||
1508 | pep->base = ioremap(res->start, resource_size(res)); | ||
1509 | if (pep->base == NULL) { | ||
1510 | err = -ENOMEM; | ||
1511 | goto err_netdev; | ||
1512 | } | ||
1513 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
1514 | BUG_ON(!res); | ||
1515 | dev->irq = res->start; | ||
1516 | dev->netdev_ops = &pxa168_eth_netdev_ops; | ||
1517 | dev->watchdog_timeo = 2 * HZ; | ||
1518 | dev->base_addr = 0; | ||
1519 | SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops); | ||
1520 | |||
1521 | INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); | ||
1522 | |||
1523 | printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME); | ||
1524 | random_ether_addr(dev->dev_addr); | ||
1525 | |||
1526 | pep->pd = pdev->dev.platform_data; | ||
1527 | pep->rx_ring_size = NUM_RX_DESCS; | ||
1528 | if (pep->pd->rx_queue_size) | ||
1529 | pep->rx_ring_size = pep->pd->rx_queue_size; | ||
1530 | |||
1531 | pep->tx_ring_size = NUM_TX_DESCS; | ||
1532 | if (pep->pd->tx_queue_size) | ||
1533 | pep->tx_ring_size = pep->pd->tx_queue_size; | ||
1534 | |||
1535 | pep->port_num = pep->pd->port_number; | ||
1536 | /* Hardware supports only 3 ports */ | ||
1537 | BUG_ON(pep->port_num > 2); | ||
1538 | netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size); | ||
1539 | |||
1540 | memset(&pep->timeout, 0, sizeof(struct timer_list)); | ||
1541 | init_timer(&pep->timeout); | ||
1542 | pep->timeout.function = rxq_refill_timer_wrapper; | ||
1543 | pep->timeout.data = (unsigned long)pep; | ||
1544 | |||
1545 | pep->smi_bus = mdiobus_alloc(); | ||
1546 | if (pep->smi_bus == NULL) { | ||
1547 | err = -ENOMEM; | ||
1548 | goto err_base; | ||
1549 | } | ||
1550 | pep->smi_bus->priv = pep; | ||
1551 | pep->smi_bus->name = "pxa168_eth smi"; | ||
1552 | pep->smi_bus->read = pxa168_smi_read; | ||
1553 | pep->smi_bus->write = pxa168_smi_write; | ||
1554 | snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); | ||
1555 | pep->smi_bus->parent = &pdev->dev; | ||
1556 | pep->smi_bus->phy_mask = 0xffffffff; | ||
1557 | err = mdiobus_register(pep->smi_bus); | ||
1558 | if (err) | ||
1559 | goto err_free_mdio; | ||
1560 | |||
1561 | pxa168_init_hw(pep); | ||
1562 | err = ethernet_phy_setup(dev); | ||
1563 | if (err) | ||
1564 | goto err_mdiobus; | ||
1565 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
1566 | err = register_netdev(dev); | ||
1567 | if (err) | ||
1568 | goto err_mdiobus; | ||
1569 | return 0; | ||
1570 | |||
1571 | err_mdiobus: | ||
1572 | mdiobus_unregister(pep->smi_bus); | ||
1573 | err_free_mdio: | ||
1574 | mdiobus_free(pep->smi_bus); | ||
1575 | err_base: | ||
1576 | iounmap(pep->base); | ||
1577 | err_netdev: | ||
1578 | free_netdev(dev); | ||
1579 | err_clk: | ||
1580 | clk_disable(clk); | ||
1581 | clk_put(clk); | ||
1582 | return err; | ||
1583 | } | ||
1584 | |||
1585 | static int pxa168_eth_remove(struct platform_device *pdev) | ||
1586 | { | ||
1587 | struct net_device *dev = platform_get_drvdata(pdev); | ||
1588 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
1589 | |||
1590 | if (pep->htpr) { | ||
1591 | dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE, | ||
1592 | pep->htpr, pep->htpr_dma); | ||
1593 | pep->htpr = NULL; | ||
1594 | } | ||
1595 | if (pep->clk) { | ||
1596 | clk_disable(pep->clk); | ||
1597 | clk_put(pep->clk); | ||
1598 | pep->clk = NULL; | ||
1599 | } | ||
1600 | if (pep->phy != NULL) | ||
1601 | phy_detach(pep->phy); | ||
1602 | |||
1603 | iounmap(pep->base); | ||
1604 | pep->base = NULL; | ||
1605 | mdiobus_unregister(pep->smi_bus); | ||
1606 | mdiobus_free(pep->smi_bus); | ||
1607 | unregister_netdev(dev); | ||
1608 | cancel_work_sync(&pep->tx_timeout_task); | ||
1609 | free_netdev(dev); | ||
1610 | platform_set_drvdata(pdev, NULL); | ||
1611 | return 0; | ||
1612 | } | ||
1613 | |||
1614 | static void pxa168_eth_shutdown(struct platform_device *pdev) | ||
1615 | { | ||
1616 | struct net_device *dev = platform_get_drvdata(pdev); | ||
1617 | eth_port_reset(dev); | ||
1618 | } | ||
1619 | |||
1620 | #ifdef CONFIG_PM | ||
1621 | static int pxa168_eth_resume(struct platform_device *pdev) | ||
1622 | { | ||
1623 | return -ENOSYS; | ||
1624 | } | ||
1625 | |||
1626 | static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state) | ||
1627 | { | ||
1628 | return -ENOSYS; | ||
1629 | } | ||
1630 | |||
1631 | #else | ||
1632 | #define pxa168_eth_resume NULL | ||
1633 | #define pxa168_eth_suspend NULL | ||
1634 | #endif | ||
1635 | |||
1636 | static struct platform_driver pxa168_eth_driver = { | ||
1637 | .probe = pxa168_eth_probe, | ||
1638 | .remove = pxa168_eth_remove, | ||
1639 | .shutdown = pxa168_eth_shutdown, | ||
1640 | .resume = pxa168_eth_resume, | ||
1641 | .suspend = pxa168_eth_suspend, | ||
1642 | .driver = { | ||
1643 | .name = DRIVER_NAME, | ||
1644 | }, | ||
1645 | }; | ||
1646 | |||
1647 | static int __init pxa168_init_module(void) | ||
1648 | { | ||
1649 | return platform_driver_register(&pxa168_eth_driver); | ||
1650 | } | ||
1651 | |||
1652 | static void __exit pxa168_cleanup_module(void) | ||
1653 | { | ||
1654 | platform_driver_unregister(&pxa168_eth_driver); | ||
1655 | } | ||
1656 | |||
1657 | module_init(pxa168_init_module); | ||
1658 | module_exit(pxa168_cleanup_module); | ||
1659 | |||
1660 | MODULE_LICENSE("GPL"); | ||
1661 | MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168"); | ||
1662 | MODULE_ALIAS("platform:pxa168_eth"); | ||
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c new file mode 100644 index 000000000000..98ec614c5690 --- /dev/null +++ b/drivers/net/ethernet/marvell/skge.c | |||
@@ -0,0 +1,4133 @@ | |||
1 | /* | ||
2 | * New driver for Marvell Yukon chipset and SysKonnect Gigabit | ||
3 | * Ethernet adapters. Based on earlier sk98lin, e100 and | ||
4 | * FreeBSD if_sk drivers. | ||
5 | * | ||
6 | * This driver intentionally does not support all the features | ||
7 | * of the original driver such as link fail-over and link management because | ||
8 | * those should be done at higher levels. | ||
9 | * | ||
10 | * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
24 | */ | ||
25 | |||
26 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
27 | |||
28 | #include <linux/in.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/moduleparam.h> | ||
32 | #include <linux/netdevice.h> | ||
33 | #include <linux/etherdevice.h> | ||
34 | #include <linux/ethtool.h> | ||
35 | #include <linux/pci.h> | ||
36 | #include <linux/if_vlan.h> | ||
37 | #include <linux/ip.h> | ||
38 | #include <linux/delay.h> | ||
39 | #include <linux/crc32.h> | ||
40 | #include <linux/dma-mapping.h> | ||
41 | #include <linux/debugfs.h> | ||
42 | #include <linux/sched.h> | ||
43 | #include <linux/seq_file.h> | ||
44 | #include <linux/mii.h> | ||
45 | #include <linux/slab.h> | ||
46 | #include <linux/dmi.h> | ||
47 | #include <linux/prefetch.h> | ||
48 | #include <asm/irq.h> | ||
49 | |||
50 | #include "skge.h" | ||
51 | |||
52 | #define DRV_NAME "skge" | ||
53 | #define DRV_VERSION "1.14" | ||
54 | |||
55 | #define DEFAULT_TX_RING_SIZE 128 | ||
56 | #define DEFAULT_RX_RING_SIZE 512 | ||
57 | #define MAX_TX_RING_SIZE 1024 | ||
58 | #define TX_LOW_WATER (MAX_SKB_FRAGS + 1) | ||
59 | #define MAX_RX_RING_SIZE 4096 | ||
60 | #define RX_COPY_THRESHOLD 128 | ||
61 | #define RX_BUF_SIZE 1536 | ||
62 | #define PHY_RETRIES 1000 | ||
63 | #define ETH_JUMBO_MTU 9000 | ||
64 | #define TX_WATCHDOG (5 * HZ) | ||
65 | #define NAPI_WEIGHT 64 | ||
66 | #define BLINK_MS 250 | ||
67 | #define LINK_HZ HZ | ||
68 | |||
69 | #define SKGE_EEPROM_MAGIC 0x9933aabb | ||
70 | |||
71 | |||
72 | MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); | ||
73 | MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); | ||
74 | MODULE_LICENSE("GPL"); | ||
75 | MODULE_VERSION(DRV_VERSION); | ||
76 | |||
77 | static const u32 default_msg = (NETIF_MSG_DRV | NETIF_MSG_PROBE | | ||
78 | NETIF_MSG_LINK | NETIF_MSG_IFUP | | ||
79 | NETIF_MSG_IFDOWN); | ||
80 | |||
81 | static int debug = -1; /* defaults above */ | ||
82 | module_param(debug, int, 0); | ||
83 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | ||
84 | |||
85 | static DEFINE_PCI_DEVICE_TABLE(skge_id_table) = { | ||
86 | { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x1700) }, /* 3Com 3C940 */ | ||
87 | { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x80EB) }, /* 3Com 3C940B */ | ||
88 | #ifdef CONFIG_SKGE_GENESIS | ||
89 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4300) }, /* SK-9xx */ | ||
90 | #endif | ||
91 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */ | ||
92 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* D-Link DGE-530T (rev.B) */ | ||
93 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) }, /* D-Link DGE-530T */ | ||
94 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) }, /* D-Link DGE-530T Rev C1 */ | ||
95 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, /* Marvell Yukon 88E8001/8003/8010 */ | ||
96 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ | ||
97 | { PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) }, /* CNet PowerG-2000 */ | ||
98 | { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, 0x1064) }, /* Linksys EG1064 v2 */ | ||
99 | { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 }, /* Linksys EG1032 v2 */ | ||
100 | { 0 } | ||
101 | }; | ||
102 | MODULE_DEVICE_TABLE(pci, skge_id_table); | ||
103 | |||
104 | static int skge_up(struct net_device *dev); | ||
105 | static int skge_down(struct net_device *dev); | ||
106 | static void skge_phy_reset(struct skge_port *skge); | ||
107 | static void skge_tx_clean(struct net_device *dev); | ||
108 | static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); | ||
109 | static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); | ||
110 | static void genesis_get_stats(struct skge_port *skge, u64 *data); | ||
111 | static void yukon_get_stats(struct skge_port *skge, u64 *data); | ||
112 | static void yukon_init(struct skge_hw *hw, int port); | ||
113 | static void genesis_mac_init(struct skge_hw *hw, int port); | ||
114 | static void genesis_link_up(struct skge_port *skge); | ||
115 | static void skge_set_multicast(struct net_device *dev); | ||
116 | |||
117 | /* Avoid conditionals by using array */ | ||
118 | static const int txqaddr[] = { Q_XA1, Q_XA2 }; | ||
119 | static const int rxqaddr[] = { Q_R1, Q_R2 }; | ||
120 | static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; | ||
121 | static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; | ||
122 | static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F }; | ||
123 | static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 }; | ||
124 | |||
125 | static inline bool is_genesis(const struct skge_hw *hw) | ||
126 | { | ||
127 | #ifdef CONFIG_SKGE_GENESIS | ||
128 | return hw->chip_id == CHIP_ID_GENESIS; | ||
129 | #else | ||
130 | return false; | ||
131 | #endif | ||
132 | } | ||
133 | |||
134 | static int skge_get_regs_len(struct net_device *dev) | ||
135 | { | ||
136 | return 0x4000; | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * Returns copy of whole control register region | ||
141 | * Note: skip RAM address register because accessing it will | ||
142 | * cause bus hangs! | ||
143 | */ | ||
144 | static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, | ||
145 | void *p) | ||
146 | { | ||
147 | const struct skge_port *skge = netdev_priv(dev); | ||
148 | const void __iomem *io = skge->hw->regs; | ||
149 | |||
150 | regs->version = 1; | ||
151 | memset(p, 0, regs->len); | ||
152 | memcpy_fromio(p, io, B3_RAM_ADDR); | ||
153 | |||
154 | memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, | ||
155 | regs->len - B3_RI_WTO_R1); | ||
156 | } | ||
157 | |||
158 | /* Wake on Lan only supported on Yukon chips with rev 1 or above */ | ||
159 | static u32 wol_supported(const struct skge_hw *hw) | ||
160 | { | ||
161 | if (is_genesis(hw)) | ||
162 | return 0; | ||
163 | |||
164 | if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) | ||
165 | return 0; | ||
166 | |||
167 | return WAKE_MAGIC | WAKE_PHY; | ||
168 | } | ||
169 | |||
170 | static void skge_wol_init(struct skge_port *skge) | ||
171 | { | ||
172 | struct skge_hw *hw = skge->hw; | ||
173 | int port = skge->port; | ||
174 | u16 ctrl; | ||
175 | |||
176 | skge_write16(hw, B0_CTST, CS_RST_CLR); | ||
177 | skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); | ||
178 | |||
179 | /* Turn on Vaux */ | ||
180 | skge_write8(hw, B0_POWER_CTRL, | ||
181 | PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); | ||
182 | |||
183 | /* WA code for COMA mode -- clear PHY reset */ | ||
184 | if (hw->chip_id == CHIP_ID_YUKON_LITE && | ||
185 | hw->chip_rev >= CHIP_REV_YU_LITE_A3) { | ||
186 | u32 reg = skge_read32(hw, B2_GP_IO); | ||
187 | reg |= GP_DIR_9; | ||
188 | reg &= ~GP_IO_9; | ||
189 | skge_write32(hw, B2_GP_IO, reg); | ||
190 | } | ||
191 | |||
192 | skge_write32(hw, SK_REG(port, GPHY_CTRL), | ||
193 | GPC_DIS_SLEEP | | ||
194 | GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | | ||
195 | GPC_ANEG_1 | GPC_RST_SET); | ||
196 | |||
197 | skge_write32(hw, SK_REG(port, GPHY_CTRL), | ||
198 | GPC_DIS_SLEEP | | ||
199 | GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | | ||
200 | GPC_ANEG_1 | GPC_RST_CLR); | ||
201 | |||
202 | skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); | ||
203 | |||
204 | /* Force to 10/100 skge_reset will re-enable on resume */ | ||
205 | gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, | ||
206 | (PHY_AN_100FULL | PHY_AN_100HALF | | ||
207 | PHY_AN_10FULL | PHY_AN_10HALF | PHY_AN_CSMA)); | ||
208 | /* no 1000 HD/FD */ | ||
209 | gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0); | ||
210 | gm_phy_write(hw, port, PHY_MARV_CTRL, | ||
211 | PHY_CT_RESET | PHY_CT_SPS_LSB | PHY_CT_ANE | | ||
212 | PHY_CT_RE_CFG | PHY_CT_DUP_MD); | ||
213 | |||
214 | |||
215 | /* Set GMAC to no flow control and auto update for speed/duplex */ | ||
216 | gma_write16(hw, port, GM_GP_CTRL, | ||
217 | GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| | ||
218 | GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); | ||
219 | |||
220 | /* Set WOL address */ | ||
221 | memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), | ||
222 | skge->netdev->dev_addr, ETH_ALEN); | ||
223 | |||
224 | /* Turn on appropriate WOL control bits */ | ||
225 | skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); | ||
226 | ctrl = 0; | ||
227 | if (skge->wol & WAKE_PHY) | ||
228 | ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; | ||
229 | else | ||
230 | ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; | ||
231 | |||
232 | if (skge->wol & WAKE_MAGIC) | ||
233 | ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; | ||
234 | else | ||
235 | ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT; | ||
236 | |||
237 | ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; | ||
238 | skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); | ||
239 | |||
240 | /* block receiver */ | ||
241 | skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); | ||
242 | } | ||
243 | |||
244 | static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
245 | { | ||
246 | struct skge_port *skge = netdev_priv(dev); | ||
247 | |||
248 | wol->supported = wol_supported(skge->hw); | ||
249 | wol->wolopts = skge->wol; | ||
250 | } | ||
251 | |||
252 | static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
253 | { | ||
254 | struct skge_port *skge = netdev_priv(dev); | ||
255 | struct skge_hw *hw = skge->hw; | ||
256 | |||
257 | if ((wol->wolopts & ~wol_supported(hw)) || | ||
258 | !device_can_wakeup(&hw->pdev->dev)) | ||
259 | return -EOPNOTSUPP; | ||
260 | |||
261 | skge->wol = wol->wolopts; | ||
262 | |||
263 | device_set_wakeup_enable(&hw->pdev->dev, skge->wol); | ||
264 | |||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | /* Determine supported/advertised modes based on hardware. | ||
269 | * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx | ||
270 | */ | ||
271 | static u32 skge_supported_modes(const struct skge_hw *hw) | ||
272 | { | ||
273 | u32 supported; | ||
274 | |||
275 | if (hw->copper) { | ||
276 | supported = (SUPPORTED_10baseT_Half | | ||
277 | SUPPORTED_10baseT_Full | | ||
278 | SUPPORTED_100baseT_Half | | ||
279 | SUPPORTED_100baseT_Full | | ||
280 | SUPPORTED_1000baseT_Half | | ||
281 | SUPPORTED_1000baseT_Full | | ||
282 | SUPPORTED_Autoneg | | ||
283 | SUPPORTED_TP); | ||
284 | |||
285 | if (is_genesis(hw)) | ||
286 | supported &= ~(SUPPORTED_10baseT_Half | | ||
287 | SUPPORTED_10baseT_Full | | ||
288 | SUPPORTED_100baseT_Half | | ||
289 | SUPPORTED_100baseT_Full); | ||
290 | |||
291 | else if (hw->chip_id == CHIP_ID_YUKON) | ||
292 | supported &= ~SUPPORTED_1000baseT_Half; | ||
293 | } else | ||
294 | supported = (SUPPORTED_1000baseT_Full | | ||
295 | SUPPORTED_1000baseT_Half | | ||
296 | SUPPORTED_FIBRE | | ||
297 | SUPPORTED_Autoneg); | ||
298 | |||
299 | return supported; | ||
300 | } | ||
301 | |||
302 | static int skge_get_settings(struct net_device *dev, | ||
303 | struct ethtool_cmd *ecmd) | ||
304 | { | ||
305 | struct skge_port *skge = netdev_priv(dev); | ||
306 | struct skge_hw *hw = skge->hw; | ||
307 | |||
308 | ecmd->transceiver = XCVR_INTERNAL; | ||
309 | ecmd->supported = skge_supported_modes(hw); | ||
310 | |||
311 | if (hw->copper) { | ||
312 | ecmd->port = PORT_TP; | ||
313 | ecmd->phy_address = hw->phy_addr; | ||
314 | } else | ||
315 | ecmd->port = PORT_FIBRE; | ||
316 | |||
317 | ecmd->advertising = skge->advertising; | ||
318 | ecmd->autoneg = skge->autoneg; | ||
319 | ethtool_cmd_speed_set(ecmd, skge->speed); | ||
320 | ecmd->duplex = skge->duplex; | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | ||
325 | { | ||
326 | struct skge_port *skge = netdev_priv(dev); | ||
327 | const struct skge_hw *hw = skge->hw; | ||
328 | u32 supported = skge_supported_modes(hw); | ||
329 | int err = 0; | ||
330 | |||
331 | if (ecmd->autoneg == AUTONEG_ENABLE) { | ||
332 | ecmd->advertising = supported; | ||
333 | skge->duplex = -1; | ||
334 | skge->speed = -1; | ||
335 | } else { | ||
336 | u32 setting; | ||
337 | u32 speed = ethtool_cmd_speed(ecmd); | ||
338 | |||
339 | switch (speed) { | ||
340 | case SPEED_1000: | ||
341 | if (ecmd->duplex == DUPLEX_FULL) | ||
342 | setting = SUPPORTED_1000baseT_Full; | ||
343 | else if (ecmd->duplex == DUPLEX_HALF) | ||
344 | setting = SUPPORTED_1000baseT_Half; | ||
345 | else | ||
346 | return -EINVAL; | ||
347 | break; | ||
348 | case SPEED_100: | ||
349 | if (ecmd->duplex == DUPLEX_FULL) | ||
350 | setting = SUPPORTED_100baseT_Full; | ||
351 | else if (ecmd->duplex == DUPLEX_HALF) | ||
352 | setting = SUPPORTED_100baseT_Half; | ||
353 | else | ||
354 | return -EINVAL; | ||
355 | break; | ||
356 | |||
357 | case SPEED_10: | ||
358 | if (ecmd->duplex == DUPLEX_FULL) | ||
359 | setting = SUPPORTED_10baseT_Full; | ||
360 | else if (ecmd->duplex == DUPLEX_HALF) | ||
361 | setting = SUPPORTED_10baseT_Half; | ||
362 | else | ||
363 | return -EINVAL; | ||
364 | break; | ||
365 | default: | ||
366 | return -EINVAL; | ||
367 | } | ||
368 | |||
369 | if ((setting & supported) == 0) | ||
370 | return -EINVAL; | ||
371 | |||
372 | skge->speed = speed; | ||
373 | skge->duplex = ecmd->duplex; | ||
374 | } | ||
375 | |||
376 | skge->autoneg = ecmd->autoneg; | ||
377 | skge->advertising = ecmd->advertising; | ||
378 | |||
379 | if (netif_running(dev)) { | ||
380 | skge_down(dev); | ||
381 | err = skge_up(dev); | ||
382 | if (err) { | ||
383 | dev_close(dev); | ||
384 | return err; | ||
385 | } | ||
386 | } | ||
387 | |||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | static void skge_get_drvinfo(struct net_device *dev, | ||
392 | struct ethtool_drvinfo *info) | ||
393 | { | ||
394 | struct skge_port *skge = netdev_priv(dev); | ||
395 | |||
396 | strcpy(info->driver, DRV_NAME); | ||
397 | strcpy(info->version, DRV_VERSION); | ||
398 | strcpy(info->fw_version, "N/A"); | ||
399 | strcpy(info->bus_info, pci_name(skge->hw->pdev)); | ||
400 | } | ||
401 | |||
402 | static const struct skge_stat { | ||
403 | char name[ETH_GSTRING_LEN]; | ||
404 | u16 xmac_offset; | ||
405 | u16 gma_offset; | ||
406 | } skge_stats[] = { | ||
407 | { "tx_bytes", XM_TXO_OK_HI, GM_TXO_OK_HI }, | ||
408 | { "rx_bytes", XM_RXO_OK_HI, GM_RXO_OK_HI }, | ||
409 | |||
410 | { "tx_broadcast", XM_TXF_BC_OK, GM_TXF_BC_OK }, | ||
411 | { "rx_broadcast", XM_RXF_BC_OK, GM_RXF_BC_OK }, | ||
412 | { "tx_multicast", XM_TXF_MC_OK, GM_TXF_MC_OK }, | ||
413 | { "rx_multicast", XM_RXF_MC_OK, GM_RXF_MC_OK }, | ||
414 | { "tx_unicast", XM_TXF_UC_OK, GM_TXF_UC_OK }, | ||
415 | { "rx_unicast", XM_RXF_UC_OK, GM_RXF_UC_OK }, | ||
416 | { "tx_mac_pause", XM_TXF_MPAUSE, GM_TXF_MPAUSE }, | ||
417 | { "rx_mac_pause", XM_RXF_MPAUSE, GM_RXF_MPAUSE }, | ||
418 | |||
419 | { "collisions", XM_TXF_SNG_COL, GM_TXF_SNG_COL }, | ||
420 | { "multi_collisions", XM_TXF_MUL_COL, GM_TXF_MUL_COL }, | ||
421 | { "aborted", XM_TXF_ABO_COL, GM_TXF_ABO_COL }, | ||
422 | { "late_collision", XM_TXF_LAT_COL, GM_TXF_LAT_COL }, | ||
423 | { "fifo_underrun", XM_TXE_FIFO_UR, GM_TXE_FIFO_UR }, | ||
424 | { "fifo_overflow", XM_RXE_FIFO_OV, GM_RXE_FIFO_OV }, | ||
425 | |||
426 | { "rx_toolong", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, | ||
427 | { "rx_jabber", XM_RXF_JAB_PKT, GM_RXF_JAB_PKT }, | ||
428 | { "rx_runt", XM_RXE_RUNT, GM_RXE_FRAG }, | ||
429 | { "rx_too_long", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, | ||
430 | { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR }, | ||
431 | }; | ||
432 | |||
433 | static int skge_get_sset_count(struct net_device *dev, int sset) | ||
434 | { | ||
435 | switch (sset) { | ||
436 | case ETH_SS_STATS: | ||
437 | return ARRAY_SIZE(skge_stats); | ||
438 | default: | ||
439 | return -EOPNOTSUPP; | ||
440 | } | ||
441 | } | ||
442 | |||
443 | static void skge_get_ethtool_stats(struct net_device *dev, | ||
444 | struct ethtool_stats *stats, u64 *data) | ||
445 | { | ||
446 | struct skge_port *skge = netdev_priv(dev); | ||
447 | |||
448 | if (is_genesis(skge->hw)) | ||
449 | genesis_get_stats(skge, data); | ||
450 | else | ||
451 | yukon_get_stats(skge, data); | ||
452 | } | ||
453 | |||
454 | /* Use hardware MIB variables for critical path statistics and | ||
455 | * transmit feedback not reported at interrupt. | ||
456 | * Other errors are accounted for in interrupt handler. | ||
457 | */ | ||
458 | static struct net_device_stats *skge_get_stats(struct net_device *dev) | ||
459 | { | ||
460 | struct skge_port *skge = netdev_priv(dev); | ||
461 | u64 data[ARRAY_SIZE(skge_stats)]; | ||
462 | |||
463 | if (is_genesis(skge->hw)) | ||
464 | genesis_get_stats(skge, data); | ||
465 | else | ||
466 | yukon_get_stats(skge, data); | ||
467 | |||
468 | dev->stats.tx_bytes = data[0]; | ||
469 | dev->stats.rx_bytes = data[1]; | ||
470 | dev->stats.tx_packets = data[2] + data[4] + data[6]; | ||
471 | dev->stats.rx_packets = data[3] + data[5] + data[7]; | ||
472 | dev->stats.multicast = data[3] + data[5]; | ||
473 | dev->stats.collisions = data[10]; | ||
474 | dev->stats.tx_aborted_errors = data[12]; | ||
475 | |||
476 | return &dev->stats; | ||
477 | } | ||
478 | |||
479 | static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data) | ||
480 | { | ||
481 | int i; | ||
482 | |||
483 | switch (stringset) { | ||
484 | case ETH_SS_STATS: | ||
485 | for (i = 0; i < ARRAY_SIZE(skge_stats); i++) | ||
486 | memcpy(data + i * ETH_GSTRING_LEN, | ||
487 | skge_stats[i].name, ETH_GSTRING_LEN); | ||
488 | break; | ||
489 | } | ||
490 | } | ||
491 | |||
492 | static void skge_get_ring_param(struct net_device *dev, | ||
493 | struct ethtool_ringparam *p) | ||
494 | { | ||
495 | struct skge_port *skge = netdev_priv(dev); | ||
496 | |||
497 | p->rx_max_pending = MAX_RX_RING_SIZE; | ||
498 | p->tx_max_pending = MAX_TX_RING_SIZE; | ||
499 | p->rx_mini_max_pending = 0; | ||
500 | p->rx_jumbo_max_pending = 0; | ||
501 | |||
502 | p->rx_pending = skge->rx_ring.count; | ||
503 | p->tx_pending = skge->tx_ring.count; | ||
504 | p->rx_mini_pending = 0; | ||
505 | p->rx_jumbo_pending = 0; | ||
506 | } | ||
507 | |||
508 | static int skge_set_ring_param(struct net_device *dev, | ||
509 | struct ethtool_ringparam *p) | ||
510 | { | ||
511 | struct skge_port *skge = netdev_priv(dev); | ||
512 | int err = 0; | ||
513 | |||
514 | if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || | ||
515 | p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE) | ||
516 | return -EINVAL; | ||
517 | |||
518 | skge->rx_ring.count = p->rx_pending; | ||
519 | skge->tx_ring.count = p->tx_pending; | ||
520 | |||
521 | if (netif_running(dev)) { | ||
522 | skge_down(dev); | ||
523 | err = skge_up(dev); | ||
524 | if (err) | ||
525 | dev_close(dev); | ||
526 | } | ||
527 | |||
528 | return err; | ||
529 | } | ||
530 | |||
531 | static u32 skge_get_msglevel(struct net_device *netdev) | ||
532 | { | ||
533 | struct skge_port *skge = netdev_priv(netdev); | ||
534 | return skge->msg_enable; | ||
535 | } | ||
536 | |||
537 | static void skge_set_msglevel(struct net_device *netdev, u32 value) | ||
538 | { | ||
539 | struct skge_port *skge = netdev_priv(netdev); | ||
540 | skge->msg_enable = value; | ||
541 | } | ||
542 | |||
543 | static int skge_nway_reset(struct net_device *dev) | ||
544 | { | ||
545 | struct skge_port *skge = netdev_priv(dev); | ||
546 | |||
547 | if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev)) | ||
548 | return -EINVAL; | ||
549 | |||
550 | skge_phy_reset(skge); | ||
551 | return 0; | ||
552 | } | ||
553 | |||
554 | static void skge_get_pauseparam(struct net_device *dev, | ||
555 | struct ethtool_pauseparam *ecmd) | ||
556 | { | ||
557 | struct skge_port *skge = netdev_priv(dev); | ||
558 | |||
559 | ecmd->rx_pause = ((skge->flow_control == FLOW_MODE_SYMMETRIC) || | ||
560 | (skge->flow_control == FLOW_MODE_SYM_OR_REM)); | ||
561 | ecmd->tx_pause = (ecmd->rx_pause || | ||
562 | (skge->flow_control == FLOW_MODE_LOC_SEND)); | ||
563 | |||
564 | ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause; | ||
565 | } | ||
566 | |||
567 | static int skge_set_pauseparam(struct net_device *dev, | ||
568 | struct ethtool_pauseparam *ecmd) | ||
569 | { | ||
570 | struct skge_port *skge = netdev_priv(dev); | ||
571 | struct ethtool_pauseparam old; | ||
572 | int err = 0; | ||
573 | |||
574 | skge_get_pauseparam(dev, &old); | ||
575 | |||
576 | if (ecmd->autoneg != old.autoneg) | ||
577 | skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC; | ||
578 | else { | ||
579 | if (ecmd->rx_pause && ecmd->tx_pause) | ||
580 | skge->flow_control = FLOW_MODE_SYMMETRIC; | ||
581 | else if (ecmd->rx_pause && !ecmd->tx_pause) | ||
582 | skge->flow_control = FLOW_MODE_SYM_OR_REM; | ||
583 | else if (!ecmd->rx_pause && ecmd->tx_pause) | ||
584 | skge->flow_control = FLOW_MODE_LOC_SEND; | ||
585 | else | ||
586 | skge->flow_control = FLOW_MODE_NONE; | ||
587 | } | ||
588 | |||
589 | if (netif_running(dev)) { | ||
590 | skge_down(dev); | ||
591 | err = skge_up(dev); | ||
592 | if (err) { | ||
593 | dev_close(dev); | ||
594 | return err; | ||
595 | } | ||
596 | } | ||
597 | |||
598 | return 0; | ||
599 | } | ||
600 | |||
601 | /* Chip internal frequency for clock calculations */ | ||
602 | static inline u32 hwkhz(const struct skge_hw *hw) | ||
603 | { | ||
604 | return is_genesis(hw) ? 53125 : 78125; | ||
605 | } | ||
606 | |||
607 | /* Chip HZ to microseconds */ | ||
608 | static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks) | ||
609 | { | ||
610 | return (ticks * 1000) / hwkhz(hw); | ||
611 | } | ||
612 | |||
613 | /* Microseconds to chip HZ */ | ||
614 | static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) | ||
615 | { | ||
616 | return hwkhz(hw) * usec / 1000; | ||
617 | } | ||
618 | |||
619 | static int skge_get_coalesce(struct net_device *dev, | ||
620 | struct ethtool_coalesce *ecmd) | ||
621 | { | ||
622 | struct skge_port *skge = netdev_priv(dev); | ||
623 | struct skge_hw *hw = skge->hw; | ||
624 | int port = skge->port; | ||
625 | |||
626 | ecmd->rx_coalesce_usecs = 0; | ||
627 | ecmd->tx_coalesce_usecs = 0; | ||
628 | |||
629 | if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) { | ||
630 | u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI)); | ||
631 | u32 msk = skge_read32(hw, B2_IRQM_MSK); | ||
632 | |||
633 | if (msk & rxirqmask[port]) | ||
634 | ecmd->rx_coalesce_usecs = delay; | ||
635 | if (msk & txirqmask[port]) | ||
636 | ecmd->tx_coalesce_usecs = delay; | ||
637 | } | ||
638 | |||
639 | return 0; | ||
640 | } | ||
641 | |||
642 | /* Note: interrupt timer is per board, but can turn on/off per port */ | ||
643 | static int skge_set_coalesce(struct net_device *dev, | ||
644 | struct ethtool_coalesce *ecmd) | ||
645 | { | ||
646 | struct skge_port *skge = netdev_priv(dev); | ||
647 | struct skge_hw *hw = skge->hw; | ||
648 | int port = skge->port; | ||
649 | u32 msk = skge_read32(hw, B2_IRQM_MSK); | ||
650 | u32 delay = 25; | ||
651 | |||
652 | if (ecmd->rx_coalesce_usecs == 0) | ||
653 | msk &= ~rxirqmask[port]; | ||
654 | else if (ecmd->rx_coalesce_usecs < 25 || | ||
655 | ecmd->rx_coalesce_usecs > 33333) | ||
656 | return -EINVAL; | ||
657 | else { | ||
658 | msk |= rxirqmask[port]; | ||
659 | delay = ecmd->rx_coalesce_usecs; | ||
660 | } | ||
661 | |||
662 | if (ecmd->tx_coalesce_usecs == 0) | ||
663 | msk &= ~txirqmask[port]; | ||
664 | else if (ecmd->tx_coalesce_usecs < 25 || | ||
665 | ecmd->tx_coalesce_usecs > 33333) | ||
666 | return -EINVAL; | ||
667 | else { | ||
668 | msk |= txirqmask[port]; | ||
669 | delay = min(delay, ecmd->rx_coalesce_usecs); | ||
670 | } | ||
671 | |||
672 | skge_write32(hw, B2_IRQM_MSK, msk); | ||
673 | if (msk == 0) | ||
674 | skge_write32(hw, B2_IRQM_CTRL, TIM_STOP); | ||
675 | else { | ||
676 | skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay)); | ||
677 | skge_write32(hw, B2_IRQM_CTRL, TIM_START); | ||
678 | } | ||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST }; | ||
683 | static void skge_led(struct skge_port *skge, enum led_mode mode) | ||
684 | { | ||
685 | struct skge_hw *hw = skge->hw; | ||
686 | int port = skge->port; | ||
687 | |||
688 | spin_lock_bh(&hw->phy_lock); | ||
689 | if (is_genesis(hw)) { | ||
690 | switch (mode) { | ||
691 | case LED_MODE_OFF: | ||
692 | if (hw->phy_type == SK_PHY_BCOM) | ||
693 | xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF); | ||
694 | else { | ||
695 | skge_write32(hw, SK_REG(port, TX_LED_VAL), 0); | ||
696 | skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF); | ||
697 | } | ||
698 | skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); | ||
699 | skge_write32(hw, SK_REG(port, RX_LED_VAL), 0); | ||
700 | skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF); | ||
701 | break; | ||
702 | |||
703 | case LED_MODE_ON: | ||
704 | skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON); | ||
705 | skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON); | ||
706 | |||
707 | skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); | ||
708 | skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); | ||
709 | |||
710 | break; | ||
711 | |||
712 | case LED_MODE_TST: | ||
713 | skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON); | ||
714 | skge_write32(hw, SK_REG(port, RX_LED_VAL), 100); | ||
715 | skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); | ||
716 | |||
717 | if (hw->phy_type == SK_PHY_BCOM) | ||
718 | xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON); | ||
719 | else { | ||
720 | skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON); | ||
721 | skge_write32(hw, SK_REG(port, TX_LED_VAL), 100); | ||
722 | skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); | ||
723 | } | ||
724 | |||
725 | } | ||
726 | } else { | ||
727 | switch (mode) { | ||
728 | case LED_MODE_OFF: | ||
729 | gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); | ||
730 | gm_phy_write(hw, port, PHY_MARV_LED_OVER, | ||
731 | PHY_M_LED_MO_DUP(MO_LED_OFF) | | ||
732 | PHY_M_LED_MO_10(MO_LED_OFF) | | ||
733 | PHY_M_LED_MO_100(MO_LED_OFF) | | ||
734 | PHY_M_LED_MO_1000(MO_LED_OFF) | | ||
735 | PHY_M_LED_MO_RX(MO_LED_OFF)); | ||
736 | break; | ||
737 | case LED_MODE_ON: | ||
738 | gm_phy_write(hw, port, PHY_MARV_LED_CTRL, | ||
739 | PHY_M_LED_PULS_DUR(PULS_170MS) | | ||
740 | PHY_M_LED_BLINK_RT(BLINK_84MS) | | ||
741 | PHY_M_LEDC_TX_CTRL | | ||
742 | PHY_M_LEDC_DP_CTRL); | ||
743 | |||
744 | gm_phy_write(hw, port, PHY_MARV_LED_OVER, | ||
745 | PHY_M_LED_MO_RX(MO_LED_OFF) | | ||
746 | (skge->speed == SPEED_100 ? | ||
747 | PHY_M_LED_MO_100(MO_LED_ON) : 0)); | ||
748 | break; | ||
749 | case LED_MODE_TST: | ||
750 | gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); | ||
751 | gm_phy_write(hw, port, PHY_MARV_LED_OVER, | ||
752 | PHY_M_LED_MO_DUP(MO_LED_ON) | | ||
753 | PHY_M_LED_MO_10(MO_LED_ON) | | ||
754 | PHY_M_LED_MO_100(MO_LED_ON) | | ||
755 | PHY_M_LED_MO_1000(MO_LED_ON) | | ||
756 | PHY_M_LED_MO_RX(MO_LED_ON)); | ||
757 | } | ||
758 | } | ||
759 | spin_unlock_bh(&hw->phy_lock); | ||
760 | } | ||
761 | |||
762 | /* blink LED's for finding board */ | ||
763 | static int skge_set_phys_id(struct net_device *dev, | ||
764 | enum ethtool_phys_id_state state) | ||
765 | { | ||
766 | struct skge_port *skge = netdev_priv(dev); | ||
767 | |||
768 | switch (state) { | ||
769 | case ETHTOOL_ID_ACTIVE: | ||
770 | return 2; /* cycle on/off twice per second */ | ||
771 | |||
772 | case ETHTOOL_ID_ON: | ||
773 | skge_led(skge, LED_MODE_TST); | ||
774 | break; | ||
775 | |||
776 | case ETHTOOL_ID_OFF: | ||
777 | skge_led(skge, LED_MODE_OFF); | ||
778 | break; | ||
779 | |||
780 | case ETHTOOL_ID_INACTIVE: | ||
781 | /* back to regular LED state */ | ||
782 | skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF); | ||
783 | } | ||
784 | |||
785 | return 0; | ||
786 | } | ||
787 | |||
788 | static int skge_get_eeprom_len(struct net_device *dev) | ||
789 | { | ||
790 | struct skge_port *skge = netdev_priv(dev); | ||
791 | u32 reg2; | ||
792 | |||
793 | pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, ®2); | ||
794 | return 1 << (((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); | ||
795 | } | ||
796 | |||
797 | static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset) | ||
798 | { | ||
799 | u32 val; | ||
800 | |||
801 | pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset); | ||
802 | |||
803 | do { | ||
804 | pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); | ||
805 | } while (!(offset & PCI_VPD_ADDR_F)); | ||
806 | |||
807 | pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val); | ||
808 | return val; | ||
809 | } | ||
810 | |||
811 | static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val) | ||
812 | { | ||
813 | pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val); | ||
814 | pci_write_config_word(pdev, cap + PCI_VPD_ADDR, | ||
815 | offset | PCI_VPD_ADDR_F); | ||
816 | |||
817 | do { | ||
818 | pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); | ||
819 | } while (offset & PCI_VPD_ADDR_F); | ||
820 | } | ||
821 | |||
822 | static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | ||
823 | u8 *data) | ||
824 | { | ||
825 | struct skge_port *skge = netdev_priv(dev); | ||
826 | struct pci_dev *pdev = skge->hw->pdev; | ||
827 | int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); | ||
828 | int length = eeprom->len; | ||
829 | u16 offset = eeprom->offset; | ||
830 | |||
831 | if (!cap) | ||
832 | return -EINVAL; | ||
833 | |||
834 | eeprom->magic = SKGE_EEPROM_MAGIC; | ||
835 | |||
836 | while (length > 0) { | ||
837 | u32 val = skge_vpd_read(pdev, cap, offset); | ||
838 | int n = min_t(int, length, sizeof(val)); | ||
839 | |||
840 | memcpy(data, &val, n); | ||
841 | length -= n; | ||
842 | data += n; | ||
843 | offset += n; | ||
844 | } | ||
845 | return 0; | ||
846 | } | ||
847 | |||
848 | static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | ||
849 | u8 *data) | ||
850 | { | ||
851 | struct skge_port *skge = netdev_priv(dev); | ||
852 | struct pci_dev *pdev = skge->hw->pdev; | ||
853 | int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); | ||
854 | int length = eeprom->len; | ||
855 | u16 offset = eeprom->offset; | ||
856 | |||
857 | if (!cap) | ||
858 | return -EINVAL; | ||
859 | |||
860 | if (eeprom->magic != SKGE_EEPROM_MAGIC) | ||
861 | return -EINVAL; | ||
862 | |||
863 | while (length > 0) { | ||
864 | u32 val; | ||
865 | int n = min_t(int, length, sizeof(val)); | ||
866 | |||
867 | if (n < sizeof(val)) | ||
868 | val = skge_vpd_read(pdev, cap, offset); | ||
869 | memcpy(&val, data, n); | ||
870 | |||
871 | skge_vpd_write(pdev, cap, offset, val); | ||
872 | |||
873 | length -= n; | ||
874 | data += n; | ||
875 | offset += n; | ||
876 | } | ||
877 | return 0; | ||
878 | } | ||
879 | |||
880 | static const struct ethtool_ops skge_ethtool_ops = { | ||
881 | .get_settings = skge_get_settings, | ||
882 | .set_settings = skge_set_settings, | ||
883 | .get_drvinfo = skge_get_drvinfo, | ||
884 | .get_regs_len = skge_get_regs_len, | ||
885 | .get_regs = skge_get_regs, | ||
886 | .get_wol = skge_get_wol, | ||
887 | .set_wol = skge_set_wol, | ||
888 | .get_msglevel = skge_get_msglevel, | ||
889 | .set_msglevel = skge_set_msglevel, | ||
890 | .nway_reset = skge_nway_reset, | ||
891 | .get_link = ethtool_op_get_link, | ||
892 | .get_eeprom_len = skge_get_eeprom_len, | ||
893 | .get_eeprom = skge_get_eeprom, | ||
894 | .set_eeprom = skge_set_eeprom, | ||
895 | .get_ringparam = skge_get_ring_param, | ||
896 | .set_ringparam = skge_set_ring_param, | ||
897 | .get_pauseparam = skge_get_pauseparam, | ||
898 | .set_pauseparam = skge_set_pauseparam, | ||
899 | .get_coalesce = skge_get_coalesce, | ||
900 | .set_coalesce = skge_set_coalesce, | ||
901 | .get_strings = skge_get_strings, | ||
902 | .set_phys_id = skge_set_phys_id, | ||
903 | .get_sset_count = skge_get_sset_count, | ||
904 | .get_ethtool_stats = skge_get_ethtool_stats, | ||
905 | }; | ||
906 | |||
907 | /* | ||
908 | * Allocate ring elements and chain them together | ||
909 | * One-to-one association of board descriptors with ring elements | ||
910 | */ | ||
911 | static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) | ||
912 | { | ||
913 | struct skge_tx_desc *d; | ||
914 | struct skge_element *e; | ||
915 | int i; | ||
916 | |||
917 | ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL); | ||
918 | if (!ring->start) | ||
919 | return -ENOMEM; | ||
920 | |||
921 | for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { | ||
922 | e->desc = d; | ||
923 | if (i == ring->count - 1) { | ||
924 | e->next = ring->start; | ||
925 | d->next_offset = base; | ||
926 | } else { | ||
927 | e->next = e + 1; | ||
928 | d->next_offset = base + (i+1) * sizeof(*d); | ||
929 | } | ||
930 | } | ||
931 | ring->to_use = ring->to_clean = ring->start; | ||
932 | |||
933 | return 0; | ||
934 | } | ||
935 | |||
936 | /* Allocate and setup a new buffer for receiving */ | ||
937 | static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, | ||
938 | struct sk_buff *skb, unsigned int bufsize) | ||
939 | { | ||
940 | struct skge_rx_desc *rd = e->desc; | ||
941 | u64 map; | ||
942 | |||
943 | map = pci_map_single(skge->hw->pdev, skb->data, bufsize, | ||
944 | PCI_DMA_FROMDEVICE); | ||
945 | |||
946 | rd->dma_lo = map; | ||
947 | rd->dma_hi = map >> 32; | ||
948 | e->skb = skb; | ||
949 | rd->csum1_start = ETH_HLEN; | ||
950 | rd->csum2_start = ETH_HLEN; | ||
951 | rd->csum1 = 0; | ||
952 | rd->csum2 = 0; | ||
953 | |||
954 | wmb(); | ||
955 | |||
956 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; | ||
957 | dma_unmap_addr_set(e, mapaddr, map); | ||
958 | dma_unmap_len_set(e, maplen, bufsize); | ||
959 | } | ||
960 | |||
961 | /* Resume receiving using existing skb, | ||
962 | * Note: DMA address is not changed by chip. | ||
963 | * MTU not changed while receiver active. | ||
964 | */ | ||
965 | static inline void skge_rx_reuse(struct skge_element *e, unsigned int size) | ||
966 | { | ||
967 | struct skge_rx_desc *rd = e->desc; | ||
968 | |||
969 | rd->csum2 = 0; | ||
970 | rd->csum2_start = ETH_HLEN; | ||
971 | |||
972 | wmb(); | ||
973 | |||
974 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size; | ||
975 | } | ||
976 | |||
977 | |||
978 | /* Free all buffers in receive ring, assumes receiver stopped */ | ||
979 | static void skge_rx_clean(struct skge_port *skge) | ||
980 | { | ||
981 | struct skge_hw *hw = skge->hw; | ||
982 | struct skge_ring *ring = &skge->rx_ring; | ||
983 | struct skge_element *e; | ||
984 | |||
985 | e = ring->start; | ||
986 | do { | ||
987 | struct skge_rx_desc *rd = e->desc; | ||
988 | rd->control = 0; | ||
989 | if (e->skb) { | ||
990 | pci_unmap_single(hw->pdev, | ||
991 | dma_unmap_addr(e, mapaddr), | ||
992 | dma_unmap_len(e, maplen), | ||
993 | PCI_DMA_FROMDEVICE); | ||
994 | dev_kfree_skb(e->skb); | ||
995 | e->skb = NULL; | ||
996 | } | ||
997 | } while ((e = e->next) != ring->start); | ||
998 | } | ||
999 | |||
1000 | |||
1001 | /* Allocate buffers for receive ring | ||
1002 | * For receive: to_clean is next received frame. | ||
1003 | */ | ||
1004 | static int skge_rx_fill(struct net_device *dev) | ||
1005 | { | ||
1006 | struct skge_port *skge = netdev_priv(dev); | ||
1007 | struct skge_ring *ring = &skge->rx_ring; | ||
1008 | struct skge_element *e; | ||
1009 | |||
1010 | e = ring->start; | ||
1011 | do { | ||
1012 | struct sk_buff *skb; | ||
1013 | |||
1014 | skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN, | ||
1015 | GFP_KERNEL); | ||
1016 | if (!skb) | ||
1017 | return -ENOMEM; | ||
1018 | |||
1019 | skb_reserve(skb, NET_IP_ALIGN); | ||
1020 | skge_rx_setup(skge, e, skb, skge->rx_buf_size); | ||
1021 | } while ((e = e->next) != ring->start); | ||
1022 | |||
1023 | ring->to_clean = ring->start; | ||
1024 | return 0; | ||
1025 | } | ||
1026 | |||
1027 | static const char *skge_pause(enum pause_status status) | ||
1028 | { | ||
1029 | switch (status) { | ||
1030 | case FLOW_STAT_NONE: | ||
1031 | return "none"; | ||
1032 | case FLOW_STAT_REM_SEND: | ||
1033 | return "rx only"; | ||
1034 | case FLOW_STAT_LOC_SEND: | ||
1035 | return "tx_only"; | ||
1036 | case FLOW_STAT_SYMMETRIC: /* Both station may send PAUSE */ | ||
1037 | return "both"; | ||
1038 | default: | ||
1039 | return "indeterminated"; | ||
1040 | } | ||
1041 | } | ||
1042 | |||
1043 | |||
1044 | static void skge_link_up(struct skge_port *skge) | ||
1045 | { | ||
1046 | skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), | ||
1047 | LED_BLK_OFF|LED_SYNC_OFF|LED_ON); | ||
1048 | |||
1049 | netif_carrier_on(skge->netdev); | ||
1050 | netif_wake_queue(skge->netdev); | ||
1051 | |||
1052 | netif_info(skge, link, skge->netdev, | ||
1053 | "Link is up at %d Mbps, %s duplex, flow control %s\n", | ||
1054 | skge->speed, | ||
1055 | skge->duplex == DUPLEX_FULL ? "full" : "half", | ||
1056 | skge_pause(skge->flow_status)); | ||
1057 | } | ||
1058 | |||
1059 | static void skge_link_down(struct skge_port *skge) | ||
1060 | { | ||
1061 | skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); | ||
1062 | netif_carrier_off(skge->netdev); | ||
1063 | netif_stop_queue(skge->netdev); | ||
1064 | |||
1065 | netif_info(skge, link, skge->netdev, "Link is down\n"); | ||
1066 | } | ||
1067 | |||
1068 | static void xm_link_down(struct skge_hw *hw, int port) | ||
1069 | { | ||
1070 | struct net_device *dev = hw->dev[port]; | ||
1071 | struct skge_port *skge = netdev_priv(dev); | ||
1072 | |||
1073 | xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); | ||
1074 | |||
1075 | if (netif_carrier_ok(dev)) | ||
1076 | skge_link_down(skge); | ||
1077 | } | ||
1078 | |||
1079 | static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) | ||
1080 | { | ||
1081 | int i; | ||
1082 | |||
1083 | xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); | ||
1084 | *val = xm_read16(hw, port, XM_PHY_DATA); | ||
1085 | |||
1086 | if (hw->phy_type == SK_PHY_XMAC) | ||
1087 | goto ready; | ||
1088 | |||
1089 | for (i = 0; i < PHY_RETRIES; i++) { | ||
1090 | if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) | ||
1091 | goto ready; | ||
1092 | udelay(1); | ||
1093 | } | ||
1094 | |||
1095 | return -ETIMEDOUT; | ||
1096 | ready: | ||
1097 | *val = xm_read16(hw, port, XM_PHY_DATA); | ||
1098 | |||
1099 | return 0; | ||
1100 | } | ||
1101 | |||
1102 | static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg) | ||
1103 | { | ||
1104 | u16 v = 0; | ||
1105 | if (__xm_phy_read(hw, port, reg, &v)) | ||
1106 | pr_warning("%s: phy read timed out\n", hw->dev[port]->name); | ||
1107 | return v; | ||
1108 | } | ||
1109 | |||
1110 | static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) | ||
1111 | { | ||
1112 | int i; | ||
1113 | |||
1114 | xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); | ||
1115 | for (i = 0; i < PHY_RETRIES; i++) { | ||
1116 | if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) | ||
1117 | goto ready; | ||
1118 | udelay(1); | ||
1119 | } | ||
1120 | return -EIO; | ||
1121 | |||
1122 | ready: | ||
1123 | xm_write16(hw, port, XM_PHY_DATA, val); | ||
1124 | for (i = 0; i < PHY_RETRIES; i++) { | ||
1125 | if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) | ||
1126 | return 0; | ||
1127 | udelay(1); | ||
1128 | } | ||
1129 | return -ETIMEDOUT; | ||
1130 | } | ||
1131 | |||
1132 | static void genesis_init(struct skge_hw *hw) | ||
1133 | { | ||
1134 | /* set blink source counter */ | ||
1135 | skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100); | ||
1136 | skge_write8(hw, B2_BSC_CTRL, BSC_START); | ||
1137 | |||
1138 | /* configure mac arbiter */ | ||
1139 | skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); | ||
1140 | |||
1141 | /* configure mac arbiter timeout values */ | ||
1142 | skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53); | ||
1143 | skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53); | ||
1144 | skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53); | ||
1145 | skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53); | ||
1146 | |||
1147 | skge_write8(hw, B3_MA_RCINI_RX1, 0); | ||
1148 | skge_write8(hw, B3_MA_RCINI_RX2, 0); | ||
1149 | skge_write8(hw, B3_MA_RCINI_TX1, 0); | ||
1150 | skge_write8(hw, B3_MA_RCINI_TX2, 0); | ||
1151 | |||
1152 | /* configure packet arbiter timeout */ | ||
1153 | skge_write16(hw, B3_PA_CTRL, PA_RST_CLR); | ||
1154 | skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX); | ||
1155 | skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX); | ||
1156 | skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX); | ||
1157 | skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX); | ||
1158 | } | ||
1159 | |||
1160 | static void genesis_reset(struct skge_hw *hw, int port) | ||
1161 | { | ||
1162 | static const u8 zero[8] = { 0 }; | ||
1163 | u32 reg; | ||
1164 | |||
1165 | skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); | ||
1166 | |||
1167 | /* reset the statistics module */ | ||
1168 | xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); | ||
1169 | xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); | ||
1170 | xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ | ||
1171 | xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ | ||
1172 | xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ | ||
1173 | |||
1174 | /* disable Broadcom PHY IRQ */ | ||
1175 | if (hw->phy_type == SK_PHY_BCOM) | ||
1176 | xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff); | ||
1177 | |||
1178 | xm_outhash(hw, port, XM_HSM, zero); | ||
1179 | |||
1180 | /* Flush TX and RX fifo */ | ||
1181 | reg = xm_read32(hw, port, XM_MODE); | ||
1182 | xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF); | ||
1183 | xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF); | ||
1184 | } | ||
1185 | |||
1186 | /* Convert mode to MII values */ | ||
1187 | static const u16 phy_pause_map[] = { | ||
1188 | [FLOW_MODE_NONE] = 0, | ||
1189 | [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM, | ||
1190 | [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP, | ||
1191 | [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM, | ||
1192 | }; | ||
1193 | |||
1194 | /* special defines for FIBER (88E1011S only) */ | ||
1195 | static const u16 fiber_pause_map[] = { | ||
1196 | [FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE, | ||
1197 | [FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD, | ||
1198 | [FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD, | ||
1199 | [FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD, | ||
1200 | }; | ||
1201 | |||
1202 | |||
1203 | /* Check status of Broadcom phy link */ | ||
1204 | static void bcom_check_link(struct skge_hw *hw, int port) | ||
1205 | { | ||
1206 | struct net_device *dev = hw->dev[port]; | ||
1207 | struct skge_port *skge = netdev_priv(dev); | ||
1208 | u16 status; | ||
1209 | |||
1210 | /* read twice because of latch */ | ||
1211 | xm_phy_read(hw, port, PHY_BCOM_STAT); | ||
1212 | status = xm_phy_read(hw, port, PHY_BCOM_STAT); | ||
1213 | |||
1214 | if ((status & PHY_ST_LSYNC) == 0) { | ||
1215 | xm_link_down(hw, port); | ||
1216 | return; | ||
1217 | } | ||
1218 | |||
1219 | if (skge->autoneg == AUTONEG_ENABLE) { | ||
1220 | u16 lpa, aux; | ||
1221 | |||
1222 | if (!(status & PHY_ST_AN_OVER)) | ||
1223 | return; | ||
1224 | |||
1225 | lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); | ||
1226 | if (lpa & PHY_B_AN_RF) { | ||
1227 | netdev_notice(dev, "remote fault\n"); | ||
1228 | return; | ||
1229 | } | ||
1230 | |||
1231 | aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT); | ||
1232 | |||
1233 | /* Check Duplex mismatch */ | ||
1234 | switch (aux & PHY_B_AS_AN_RES_MSK) { | ||
1235 | case PHY_B_RES_1000FD: | ||
1236 | skge->duplex = DUPLEX_FULL; | ||
1237 | break; | ||
1238 | case PHY_B_RES_1000HD: | ||
1239 | skge->duplex = DUPLEX_HALF; | ||
1240 | break; | ||
1241 | default: | ||
1242 | netdev_notice(dev, "duplex mismatch\n"); | ||
1243 | return; | ||
1244 | } | ||
1245 | |||
1246 | /* We are using IEEE 802.3z/D5.0 Table 37-4 */ | ||
1247 | switch (aux & PHY_B_AS_PAUSE_MSK) { | ||
1248 | case PHY_B_AS_PAUSE_MSK: | ||
1249 | skge->flow_status = FLOW_STAT_SYMMETRIC; | ||
1250 | break; | ||
1251 | case PHY_B_AS_PRR: | ||
1252 | skge->flow_status = FLOW_STAT_REM_SEND; | ||
1253 | break; | ||
1254 | case PHY_B_AS_PRT: | ||
1255 | skge->flow_status = FLOW_STAT_LOC_SEND; | ||
1256 | break; | ||
1257 | default: | ||
1258 | skge->flow_status = FLOW_STAT_NONE; | ||
1259 | } | ||
1260 | skge->speed = SPEED_1000; | ||
1261 | } | ||
1262 | |||
1263 | if (!netif_carrier_ok(dev)) | ||
1264 | genesis_link_up(skge); | ||
1265 | } | ||
1266 | |||
1267 | /* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional | ||
1268 | * Phy on for 100 or 10Mbit operation | ||
1269 | */ | ||
1270 | static void bcom_phy_init(struct skge_port *skge) | ||
1271 | { | ||
1272 | struct skge_hw *hw = skge->hw; | ||
1273 | int port = skge->port; | ||
1274 | int i; | ||
1275 | u16 id1, r, ext, ctl; | ||
1276 | |||
1277 | /* magic workaround patterns for Broadcom */ | ||
1278 | static const struct { | ||
1279 | u16 reg; | ||
1280 | u16 val; | ||
1281 | } A1hack[] = { | ||
1282 | { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, | ||
1283 | { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 }, | ||
1284 | { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 }, | ||
1285 | { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, | ||
1286 | }, C0hack[] = { | ||
1287 | { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, | ||
1288 | { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, | ||
1289 | }; | ||
1290 | |||
1291 | /* read Id from external PHY (all have the same address) */ | ||
1292 | id1 = xm_phy_read(hw, port, PHY_XMAC_ID1); | ||
1293 | |||
1294 | /* Optimize MDIO transfer by suppressing preamble. */ | ||
1295 | r = xm_read16(hw, port, XM_MMU_CMD); | ||
1296 | r |= XM_MMU_NO_PRE; | ||
1297 | xm_write16(hw, port, XM_MMU_CMD, r); | ||
1298 | |||
1299 | switch (id1) { | ||
1300 | case PHY_BCOM_ID1_C0: | ||
1301 | /* | ||
1302 | * Workaround BCOM Errata for the C0 type. | ||
1303 | * Write magic patterns to reserved registers. | ||
1304 | */ | ||
1305 | for (i = 0; i < ARRAY_SIZE(C0hack); i++) | ||
1306 | xm_phy_write(hw, port, | ||
1307 | C0hack[i].reg, C0hack[i].val); | ||
1308 | |||
1309 | break; | ||
1310 | case PHY_BCOM_ID1_A1: | ||
1311 | /* | ||
1312 | * Workaround BCOM Errata for the A1 type. | ||
1313 | * Write magic patterns to reserved registers. | ||
1314 | */ | ||
1315 | for (i = 0; i < ARRAY_SIZE(A1hack); i++) | ||
1316 | xm_phy_write(hw, port, | ||
1317 | A1hack[i].reg, A1hack[i].val); | ||
1318 | break; | ||
1319 | } | ||
1320 | |||
1321 | /* | ||
1322 | * Workaround BCOM Errata (#10523) for all BCom PHYs. | ||
1323 | * Disable Power Management after reset. | ||
1324 | */ | ||
1325 | r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL); | ||
1326 | r |= PHY_B_AC_DIS_PM; | ||
1327 | xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r); | ||
1328 | |||
1329 | /* Dummy read */ | ||
1330 | xm_read16(hw, port, XM_ISRC); | ||
1331 | |||
1332 | ext = PHY_B_PEC_EN_LTR; /* enable tx led */ | ||
1333 | ctl = PHY_CT_SP1000; /* always 1000mbit */ | ||
1334 | |||
1335 | if (skge->autoneg == AUTONEG_ENABLE) { | ||
1336 | /* | ||
1337 | * Workaround BCOM Errata #1 for the C5 type. | ||
1338 | * 1000Base-T Link Acquisition Failure in Slave Mode | ||
1339 | * Set Repeater/DTE bit 10 of the 1000Base-T Control Register | ||
1340 | */ | ||
1341 | u16 adv = PHY_B_1000C_RD; | ||
1342 | if (skge->advertising & ADVERTISED_1000baseT_Half) | ||
1343 | adv |= PHY_B_1000C_AHD; | ||
1344 | if (skge->advertising & ADVERTISED_1000baseT_Full) | ||
1345 | adv |= PHY_B_1000C_AFD; | ||
1346 | xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv); | ||
1347 | |||
1348 | ctl |= PHY_CT_ANE | PHY_CT_RE_CFG; | ||
1349 | } else { | ||
1350 | if (skge->duplex == DUPLEX_FULL) | ||
1351 | ctl |= PHY_CT_DUP_MD; | ||
1352 | /* Force to slave */ | ||
1353 | xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE); | ||
1354 | } | ||
1355 | |||
1356 | /* Set autonegotiation pause parameters */ | ||
1357 | xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV, | ||
1358 | phy_pause_map[skge->flow_control] | PHY_AN_CSMA); | ||
1359 | |||
1360 | /* Handle Jumbo frames */ | ||
1361 | if (hw->dev[port]->mtu > ETH_DATA_LEN) { | ||
1362 | xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, | ||
1363 | PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK); | ||
1364 | |||
1365 | ext |= PHY_B_PEC_HIGH_LA; | ||
1366 | |||
1367 | } | ||
1368 | |||
1369 | xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext); | ||
1370 | xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl); | ||
1371 | |||
1372 | /* Use link status change interrupt */ | ||
1373 | xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); | ||
1374 | } | ||
1375 | |||
1376 | static void xm_phy_init(struct skge_port *skge) | ||
1377 | { | ||
1378 | struct skge_hw *hw = skge->hw; | ||
1379 | int port = skge->port; | ||
1380 | u16 ctrl = 0; | ||
1381 | |||
1382 | if (skge->autoneg == AUTONEG_ENABLE) { | ||
1383 | if (skge->advertising & ADVERTISED_1000baseT_Half) | ||
1384 | ctrl |= PHY_X_AN_HD; | ||
1385 | if (skge->advertising & ADVERTISED_1000baseT_Full) | ||
1386 | ctrl |= PHY_X_AN_FD; | ||
1387 | |||
1388 | ctrl |= fiber_pause_map[skge->flow_control]; | ||
1389 | |||
1390 | xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl); | ||
1391 | |||
1392 | /* Restart Auto-negotiation */ | ||
1393 | ctrl = PHY_CT_ANE | PHY_CT_RE_CFG; | ||
1394 | } else { | ||
1395 | /* Set DuplexMode in Config register */ | ||
1396 | if (skge->duplex == DUPLEX_FULL) | ||
1397 | ctrl |= PHY_CT_DUP_MD; | ||
1398 | /* | ||
1399 | * Do NOT enable Auto-negotiation here. This would hold | ||
1400 | * the link down because no IDLEs are transmitted | ||
1401 | */ | ||
1402 | } | ||
1403 | |||
1404 | xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl); | ||
1405 | |||
1406 | /* Poll PHY for status changes */ | ||
1407 | mod_timer(&skge->link_timer, jiffies + LINK_HZ); | ||
1408 | } | ||
1409 | |||
1410 | static int xm_check_link(struct net_device *dev) | ||
1411 | { | ||
1412 | struct skge_port *skge = netdev_priv(dev); | ||
1413 | struct skge_hw *hw = skge->hw; | ||
1414 | int port = skge->port; | ||
1415 | u16 status; | ||
1416 | |||
1417 | /* read twice because of latch */ | ||
1418 | xm_phy_read(hw, port, PHY_XMAC_STAT); | ||
1419 | status = xm_phy_read(hw, port, PHY_XMAC_STAT); | ||
1420 | |||
1421 | if ((status & PHY_ST_LSYNC) == 0) { | ||
1422 | xm_link_down(hw, port); | ||
1423 | return 0; | ||
1424 | } | ||
1425 | |||
1426 | if (skge->autoneg == AUTONEG_ENABLE) { | ||
1427 | u16 lpa, res; | ||
1428 | |||
1429 | if (!(status & PHY_ST_AN_OVER)) | ||
1430 | return 0; | ||
1431 | |||
1432 | lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); | ||
1433 | if (lpa & PHY_B_AN_RF) { | ||
1434 | netdev_notice(dev, "remote fault\n"); | ||
1435 | return 0; | ||
1436 | } | ||
1437 | |||
1438 | res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI); | ||
1439 | |||
1440 | /* Check Duplex mismatch */ | ||
1441 | switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) { | ||
1442 | case PHY_X_RS_FD: | ||
1443 | skge->duplex = DUPLEX_FULL; | ||
1444 | break; | ||
1445 | case PHY_X_RS_HD: | ||
1446 | skge->duplex = DUPLEX_HALF; | ||
1447 | break; | ||
1448 | default: | ||
1449 | netdev_notice(dev, "duplex mismatch\n"); | ||
1450 | return 0; | ||
1451 | } | ||
1452 | |||
1453 | /* We are using IEEE 802.3z/D5.0 Table 37-4 */ | ||
1454 | if ((skge->flow_control == FLOW_MODE_SYMMETRIC || | ||
1455 | skge->flow_control == FLOW_MODE_SYM_OR_REM) && | ||
1456 | (lpa & PHY_X_P_SYM_MD)) | ||
1457 | skge->flow_status = FLOW_STAT_SYMMETRIC; | ||
1458 | else if (skge->flow_control == FLOW_MODE_SYM_OR_REM && | ||
1459 | (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD) | ||
1460 | /* Enable PAUSE receive, disable PAUSE transmit */ | ||
1461 | skge->flow_status = FLOW_STAT_REM_SEND; | ||
1462 | else if (skge->flow_control == FLOW_MODE_LOC_SEND && | ||
1463 | (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD) | ||
1464 | /* Disable PAUSE receive, enable PAUSE transmit */ | ||
1465 | skge->flow_status = FLOW_STAT_LOC_SEND; | ||
1466 | else | ||
1467 | skge->flow_status = FLOW_STAT_NONE; | ||
1468 | |||
1469 | skge->speed = SPEED_1000; | ||
1470 | } | ||
1471 | |||
1472 | if (!netif_carrier_ok(dev)) | ||
1473 | genesis_link_up(skge); | ||
1474 | return 1; | ||
1475 | } | ||
1476 | |||
1477 | /* Poll to check for link coming up. | ||
1478 | * | ||
1479 | * Since internal PHY is wired to a level triggered pin, can't | ||
1480 | * get an interrupt when carrier is detected, need to poll for | ||
1481 | * link coming up. | ||
1482 | */ | ||
1483 | static void xm_link_timer(unsigned long arg) | ||
1484 | { | ||
1485 | struct skge_port *skge = (struct skge_port *) arg; | ||
1486 | struct net_device *dev = skge->netdev; | ||
1487 | struct skge_hw *hw = skge->hw; | ||
1488 | int port = skge->port; | ||
1489 | int i; | ||
1490 | unsigned long flags; | ||
1491 | |||
1492 | if (!netif_running(dev)) | ||
1493 | return; | ||
1494 | |||
1495 | spin_lock_irqsave(&hw->phy_lock, flags); | ||
1496 | |||
1497 | /* | ||
1498 | * Verify that the link by checking GPIO register three times. | ||
1499 | * This pin has the signal from the link_sync pin connected to it. | ||
1500 | */ | ||
1501 | for (i = 0; i < 3; i++) { | ||
1502 | if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS) | ||
1503 | goto link_down; | ||
1504 | } | ||
1505 | |||
1506 | /* Re-enable interrupt to detect link down */ | ||
1507 | if (xm_check_link(dev)) { | ||
1508 | u16 msk = xm_read16(hw, port, XM_IMSK); | ||
1509 | msk &= ~XM_IS_INP_ASS; | ||
1510 | xm_write16(hw, port, XM_IMSK, msk); | ||
1511 | xm_read16(hw, port, XM_ISRC); | ||
1512 | } else { | ||
1513 | link_down: | ||
1514 | mod_timer(&skge->link_timer, | ||
1515 | round_jiffies(jiffies + LINK_HZ)); | ||
1516 | } | ||
1517 | spin_unlock_irqrestore(&hw->phy_lock, flags); | ||
1518 | } | ||
1519 | |||
1520 | static void genesis_mac_init(struct skge_hw *hw, int port) | ||
1521 | { | ||
1522 | struct net_device *dev = hw->dev[port]; | ||
1523 | struct skge_port *skge = netdev_priv(dev); | ||
1524 | int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN; | ||
1525 | int i; | ||
1526 | u32 r; | ||
1527 | static const u8 zero[6] = { 0 }; | ||
1528 | |||
1529 | for (i = 0; i < 10; i++) { | ||
1530 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), | ||
1531 | MFF_SET_MAC_RST); | ||
1532 | if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST) | ||
1533 | goto reset_ok; | ||
1534 | udelay(1); | ||
1535 | } | ||
1536 | |||
1537 | netdev_warn(dev, "genesis reset failed\n"); | ||
1538 | |||
1539 | reset_ok: | ||
1540 | /* Unreset the XMAC. */ | ||
1541 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); | ||
1542 | |||
1543 | /* | ||
1544 | * Perform additional initialization for external PHYs, | ||
1545 | * namely for the 1000baseTX cards that use the XMAC's | ||
1546 | * GMII mode. | ||
1547 | */ | ||
1548 | if (hw->phy_type != SK_PHY_XMAC) { | ||
1549 | /* Take external Phy out of reset */ | ||
1550 | r = skge_read32(hw, B2_GP_IO); | ||
1551 | if (port == 0) | ||
1552 | r |= GP_DIR_0|GP_IO_0; | ||
1553 | else | ||
1554 | r |= GP_DIR_2|GP_IO_2; | ||
1555 | |||
1556 | skge_write32(hw, B2_GP_IO, r); | ||
1557 | |||
1558 | /* Enable GMII interface */ | ||
1559 | xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); | ||
1560 | } | ||
1561 | |||
1562 | |||
1563 | switch (hw->phy_type) { | ||
1564 | case SK_PHY_XMAC: | ||
1565 | xm_phy_init(skge); | ||
1566 | break; | ||
1567 | case SK_PHY_BCOM: | ||
1568 | bcom_phy_init(skge); | ||
1569 | bcom_check_link(hw, port); | ||
1570 | } | ||
1571 | |||
1572 | /* Set Station Address */ | ||
1573 | xm_outaddr(hw, port, XM_SA, dev->dev_addr); | ||
1574 | |||
1575 | /* We don't use match addresses so clear */ | ||
1576 | for (i = 1; i < 16; i++) | ||
1577 | xm_outaddr(hw, port, XM_EXM(i), zero); | ||
1578 | |||
1579 | /* Clear MIB counters */ | ||
1580 | xm_write16(hw, port, XM_STAT_CMD, | ||
1581 | XM_SC_CLR_RXC | XM_SC_CLR_TXC); | ||
1582 | /* Clear two times according to Errata #3 */ | ||
1583 | xm_write16(hw, port, XM_STAT_CMD, | ||
1584 | XM_SC_CLR_RXC | XM_SC_CLR_TXC); | ||
1585 | |||
1586 | /* configure Rx High Water Mark (XM_RX_HI_WM) */ | ||
1587 | xm_write16(hw, port, XM_RX_HI_WM, 1450); | ||
1588 | |||
1589 | /* We don't need the FCS appended to the packet. */ | ||
1590 | r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS; | ||
1591 | if (jumbo) | ||
1592 | r |= XM_RX_BIG_PK_OK; | ||
1593 | |||
1594 | if (skge->duplex == DUPLEX_HALF) { | ||
1595 | /* | ||
1596 | * If in manual half duplex mode the other side might be in | ||
1597 | * full duplex mode, so ignore if a carrier extension is not seen | ||
1598 | * on frames received | ||
1599 | */ | ||
1600 | r |= XM_RX_DIS_CEXT; | ||
1601 | } | ||
1602 | xm_write16(hw, port, XM_RX_CMD, r); | ||
1603 | |||
1604 | /* We want short frames padded to 60 bytes. */ | ||
1605 | xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD); | ||
1606 | |||
1607 | /* Increase threshold for jumbo frames on dual port */ | ||
1608 | if (hw->ports > 1 && jumbo) | ||
1609 | xm_write16(hw, port, XM_TX_THR, 1020); | ||
1610 | else | ||
1611 | xm_write16(hw, port, XM_TX_THR, 512); | ||
1612 | |||
1613 | /* | ||
1614 | * Enable the reception of all error frames. This is is | ||
1615 | * a necessary evil due to the design of the XMAC. The | ||
1616 | * XMAC's receive FIFO is only 8K in size, however jumbo | ||
1617 | * frames can be up to 9000 bytes in length. When bad | ||
1618 | * frame filtering is enabled, the XMAC's RX FIFO operates | ||
1619 | * in 'store and forward' mode. For this to work, the | ||
1620 | * entire frame has to fit into the FIFO, but that means | ||
1621 | * that jumbo frames larger than 8192 bytes will be | ||
1622 | * truncated. Disabling all bad frame filtering causes | ||
1623 | * the RX FIFO to operate in streaming mode, in which | ||
1624 | * case the XMAC will start transferring frames out of the | ||
1625 | * RX FIFO as soon as the FIFO threshold is reached. | ||
1626 | */ | ||
1627 | xm_write32(hw, port, XM_MODE, XM_DEF_MODE); | ||
1628 | |||
1629 | |||
1630 | /* | ||
1631 | * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK) | ||
1632 | * - Enable all bits excepting 'Octets Rx OK Low CntOv' | ||
1633 | * and 'Octets Rx OK Hi Cnt Ov'. | ||
1634 | */ | ||
1635 | xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK); | ||
1636 | |||
1637 | /* | ||
1638 | * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK) | ||
1639 | * - Enable all bits excepting 'Octets Tx OK Low CntOv' | ||
1640 | * and 'Octets Tx OK Hi Cnt Ov'. | ||
1641 | */ | ||
1642 | xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK); | ||
1643 | |||
1644 | /* Configure MAC arbiter */ | ||
1645 | skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); | ||
1646 | |||
1647 | /* configure timeout values */ | ||
1648 | skge_write8(hw, B3_MA_TOINI_RX1, 72); | ||
1649 | skge_write8(hw, B3_MA_TOINI_RX2, 72); | ||
1650 | skge_write8(hw, B3_MA_TOINI_TX1, 72); | ||
1651 | skge_write8(hw, B3_MA_TOINI_TX2, 72); | ||
1652 | |||
1653 | skge_write8(hw, B3_MA_RCINI_RX1, 0); | ||
1654 | skge_write8(hw, B3_MA_RCINI_RX2, 0); | ||
1655 | skge_write8(hw, B3_MA_RCINI_TX1, 0); | ||
1656 | skge_write8(hw, B3_MA_RCINI_TX2, 0); | ||
1657 | |||
1658 | /* Configure Rx MAC FIFO */ | ||
1659 | skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR); | ||
1660 | skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT); | ||
1661 | skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD); | ||
1662 | |||
1663 | /* Configure Tx MAC FIFO */ | ||
1664 | skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR); | ||
1665 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); | ||
1666 | skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD); | ||
1667 | |||
1668 | if (jumbo) { | ||
1669 | /* Enable frame flushing if jumbo frames used */ | ||
1670 | skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_FLUSH); | ||
1671 | } else { | ||
1672 | /* enable timeout timers if normal frames */ | ||
1673 | skge_write16(hw, B3_PA_CTRL, | ||
1674 | (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2); | ||
1675 | } | ||
1676 | } | ||
1677 | |||
1678 | static void genesis_stop(struct skge_port *skge) | ||
1679 | { | ||
1680 | struct skge_hw *hw = skge->hw; | ||
1681 | int port = skge->port; | ||
1682 | unsigned retries = 1000; | ||
1683 | u16 cmd; | ||
1684 | |||
1685 | /* Disable Tx and Rx */ | ||
1686 | cmd = xm_read16(hw, port, XM_MMU_CMD); | ||
1687 | cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); | ||
1688 | xm_write16(hw, port, XM_MMU_CMD, cmd); | ||
1689 | |||
1690 | genesis_reset(hw, port); | ||
1691 | |||
1692 | /* Clear Tx packet arbiter timeout IRQ */ | ||
1693 | skge_write16(hw, B3_PA_CTRL, | ||
1694 | port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); | ||
1695 | |||
1696 | /* Reset the MAC */ | ||
1697 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); | ||
1698 | do { | ||
1699 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); | ||
1700 | if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)) | ||
1701 | break; | ||
1702 | } while (--retries > 0); | ||
1703 | |||
1704 | /* For external PHYs there must be special handling */ | ||
1705 | if (hw->phy_type != SK_PHY_XMAC) { | ||
1706 | u32 reg = skge_read32(hw, B2_GP_IO); | ||
1707 | if (port == 0) { | ||
1708 | reg |= GP_DIR_0; | ||
1709 | reg &= ~GP_IO_0; | ||
1710 | } else { | ||
1711 | reg |= GP_DIR_2; | ||
1712 | reg &= ~GP_IO_2; | ||
1713 | } | ||
1714 | skge_write32(hw, B2_GP_IO, reg); | ||
1715 | skge_read32(hw, B2_GP_IO); | ||
1716 | } | ||
1717 | |||
1718 | xm_write16(hw, port, XM_MMU_CMD, | ||
1719 | xm_read16(hw, port, XM_MMU_CMD) | ||
1720 | & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); | ||
1721 | |||
1722 | xm_read16(hw, port, XM_MMU_CMD); | ||
1723 | } | ||
1724 | |||
1725 | |||
1726 | static void genesis_get_stats(struct skge_port *skge, u64 *data) | ||
1727 | { | ||
1728 | struct skge_hw *hw = skge->hw; | ||
1729 | int port = skge->port; | ||
1730 | int i; | ||
1731 | unsigned long timeout = jiffies + HZ; | ||
1732 | |||
1733 | xm_write16(hw, port, | ||
1734 | XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC); | ||
1735 | |||
1736 | /* wait for update to complete */ | ||
1737 | while (xm_read16(hw, port, XM_STAT_CMD) | ||
1738 | & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) { | ||
1739 | if (time_after(jiffies, timeout)) | ||
1740 | break; | ||
1741 | udelay(10); | ||
1742 | } | ||
1743 | |||
1744 | /* special case for 64 bit octet counter */ | ||
1745 | data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32 | ||
1746 | | xm_read32(hw, port, XM_TXO_OK_LO); | ||
1747 | data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32 | ||
1748 | | xm_read32(hw, port, XM_RXO_OK_LO); | ||
1749 | |||
1750 | for (i = 2; i < ARRAY_SIZE(skge_stats); i++) | ||
1751 | data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset); | ||
1752 | } | ||
1753 | |||
1754 | static void genesis_mac_intr(struct skge_hw *hw, int port) | ||
1755 | { | ||
1756 | struct net_device *dev = hw->dev[port]; | ||
1757 | struct skge_port *skge = netdev_priv(dev); | ||
1758 | u16 status = xm_read16(hw, port, XM_ISRC); | ||
1759 | |||
1760 | netif_printk(skge, intr, KERN_DEBUG, skge->netdev, | ||
1761 | "mac interrupt status 0x%x\n", status); | ||
1762 | |||
1763 | if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) { | ||
1764 | xm_link_down(hw, port); | ||
1765 | mod_timer(&skge->link_timer, jiffies + 1); | ||
1766 | } | ||
1767 | |||
1768 | if (status & XM_IS_TXF_UR) { | ||
1769 | xm_write32(hw, port, XM_MODE, XM_MD_FTF); | ||
1770 | ++dev->stats.tx_fifo_errors; | ||
1771 | } | ||
1772 | } | ||
1773 | |||
1774 | static void genesis_link_up(struct skge_port *skge) | ||
1775 | { | ||
1776 | struct skge_hw *hw = skge->hw; | ||
1777 | int port = skge->port; | ||
1778 | u16 cmd, msk; | ||
1779 | u32 mode; | ||
1780 | |||
1781 | cmd = xm_read16(hw, port, XM_MMU_CMD); | ||
1782 | |||
1783 | /* | ||
1784 | * enabling pause frame reception is required for 1000BT | ||
1785 | * because the XMAC is not reset if the link is going down | ||
1786 | */ | ||
1787 | if (skge->flow_status == FLOW_STAT_NONE || | ||
1788 | skge->flow_status == FLOW_STAT_LOC_SEND) | ||
1789 | /* Disable Pause Frame Reception */ | ||
1790 | cmd |= XM_MMU_IGN_PF; | ||
1791 | else | ||
1792 | /* Enable Pause Frame Reception */ | ||
1793 | cmd &= ~XM_MMU_IGN_PF; | ||
1794 | |||
1795 | xm_write16(hw, port, XM_MMU_CMD, cmd); | ||
1796 | |||
1797 | mode = xm_read32(hw, port, XM_MODE); | ||
1798 | if (skge->flow_status == FLOW_STAT_SYMMETRIC || | ||
1799 | skge->flow_status == FLOW_STAT_LOC_SEND) { | ||
1800 | /* | ||
1801 | * Configure Pause Frame Generation | ||
1802 | * Use internal and external Pause Frame Generation. | ||
1803 | * Sending pause frames is edge triggered. | ||
1804 | * Send a Pause frame with the maximum pause time if | ||
1805 | * internal oder external FIFO full condition occurs. | ||
1806 | * Send a zero pause time frame to re-start transmission. | ||
1807 | */ | ||
1808 | /* XM_PAUSE_DA = '010000C28001' (default) */ | ||
1809 | /* XM_MAC_PTIME = 0xffff (maximum) */ | ||
1810 | /* remember this value is defined in big endian (!) */ | ||
1811 | xm_write16(hw, port, XM_MAC_PTIME, 0xffff); | ||
1812 | |||
1813 | mode |= XM_PAUSE_MODE; | ||
1814 | skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE); | ||
1815 | } else { | ||
1816 | /* | ||
1817 | * disable pause frame generation is required for 1000BT | ||
1818 | * because the XMAC is not reset if the link is going down | ||
1819 | */ | ||
1820 | /* Disable Pause Mode in Mode Register */ | ||
1821 | mode &= ~XM_PAUSE_MODE; | ||
1822 | |||
1823 | skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE); | ||
1824 | } | ||
1825 | |||
1826 | xm_write32(hw, port, XM_MODE, mode); | ||
1827 | |||
1828 | /* Turn on detection of Tx underrun */ | ||
1829 | msk = xm_read16(hw, port, XM_IMSK); | ||
1830 | msk &= ~XM_IS_TXF_UR; | ||
1831 | xm_write16(hw, port, XM_IMSK, msk); | ||
1832 | |||
1833 | xm_read16(hw, port, XM_ISRC); | ||
1834 | |||
1835 | /* get MMU Command Reg. */ | ||
1836 | cmd = xm_read16(hw, port, XM_MMU_CMD); | ||
1837 | if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL) | ||
1838 | cmd |= XM_MMU_GMII_FD; | ||
1839 | |||
1840 | /* | ||
1841 | * Workaround BCOM Errata (#10523) for all BCom Phys | ||
1842 | * Enable Power Management after link up | ||
1843 | */ | ||
1844 | if (hw->phy_type == SK_PHY_BCOM) { | ||
1845 | xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, | ||
1846 | xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL) | ||
1847 | & ~PHY_B_AC_DIS_PM); | ||
1848 | xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); | ||
1849 | } | ||
1850 | |||
1851 | /* enable Rx/Tx */ | ||
1852 | xm_write16(hw, port, XM_MMU_CMD, | ||
1853 | cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX); | ||
1854 | skge_link_up(skge); | ||
1855 | } | ||
1856 | |||
1857 | |||
1858 | static inline void bcom_phy_intr(struct skge_port *skge) | ||
1859 | { | ||
1860 | struct skge_hw *hw = skge->hw; | ||
1861 | int port = skge->port; | ||
1862 | u16 isrc; | ||
1863 | |||
1864 | isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT); | ||
1865 | netif_printk(skge, intr, KERN_DEBUG, skge->netdev, | ||
1866 | "phy interrupt status 0x%x\n", isrc); | ||
1867 | |||
1868 | if (isrc & PHY_B_IS_PSE) | ||
1869 | pr_err("%s: uncorrectable pair swap error\n", | ||
1870 | hw->dev[port]->name); | ||
1871 | |||
1872 | /* Workaround BCom Errata: | ||
1873 | * enable and disable loopback mode if "NO HCD" occurs. | ||
1874 | */ | ||
1875 | if (isrc & PHY_B_IS_NO_HDCL) { | ||
1876 | u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL); | ||
1877 | xm_phy_write(hw, port, PHY_BCOM_CTRL, | ||
1878 | ctrl | PHY_CT_LOOP); | ||
1879 | xm_phy_write(hw, port, PHY_BCOM_CTRL, | ||
1880 | ctrl & ~PHY_CT_LOOP); | ||
1881 | } | ||
1882 | |||
1883 | if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) | ||
1884 | bcom_check_link(hw, port); | ||
1885 | |||
1886 | } | ||
1887 | |||
1888 | static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) | ||
1889 | { | ||
1890 | int i; | ||
1891 | |||
1892 | gma_write16(hw, port, GM_SMI_DATA, val); | ||
1893 | gma_write16(hw, port, GM_SMI_CTRL, | ||
1894 | GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); | ||
1895 | for (i = 0; i < PHY_RETRIES; i++) { | ||
1896 | udelay(1); | ||
1897 | |||
1898 | if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) | ||
1899 | return 0; | ||
1900 | } | ||
1901 | |||
1902 | pr_warning("%s: phy write timeout\n", hw->dev[port]->name); | ||
1903 | return -EIO; | ||
1904 | } | ||
1905 | |||
1906 | static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) | ||
1907 | { | ||
1908 | int i; | ||
1909 | |||
1910 | gma_write16(hw, port, GM_SMI_CTRL, | ||
1911 | GM_SMI_CT_PHY_AD(hw->phy_addr) | ||
1912 | | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); | ||
1913 | |||
1914 | for (i = 0; i < PHY_RETRIES; i++) { | ||
1915 | udelay(1); | ||
1916 | if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) | ||
1917 | goto ready; | ||
1918 | } | ||
1919 | |||
1920 | return -ETIMEDOUT; | ||
1921 | ready: | ||
1922 | *val = gma_read16(hw, port, GM_SMI_DATA); | ||
1923 | return 0; | ||
1924 | } | ||
1925 | |||
1926 | static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) | ||
1927 | { | ||
1928 | u16 v = 0; | ||
1929 | if (__gm_phy_read(hw, port, reg, &v)) | ||
1930 | pr_warning("%s: phy read timeout\n", hw->dev[port]->name); | ||
1931 | return v; | ||
1932 | } | ||
1933 | |||
1934 | /* Marvell Phy Initialization */ | ||
1935 | static void yukon_init(struct skge_hw *hw, int port) | ||
1936 | { | ||
1937 | struct skge_port *skge = netdev_priv(hw->dev[port]); | ||
1938 | u16 ctrl, ct1000, adv; | ||
1939 | |||
1940 | if (skge->autoneg == AUTONEG_ENABLE) { | ||
1941 | u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); | ||
1942 | |||
1943 | ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | | ||
1944 | PHY_M_EC_MAC_S_MSK); | ||
1945 | ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); | ||
1946 | |||
1947 | ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); | ||
1948 | |||
1949 | gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); | ||
1950 | } | ||
1951 | |||
1952 | ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); | ||
1953 | if (skge->autoneg == AUTONEG_DISABLE) | ||
1954 | ctrl &= ~PHY_CT_ANE; | ||
1955 | |||
1956 | ctrl |= PHY_CT_RESET; | ||
1957 | gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); | ||
1958 | |||
1959 | ctrl = 0; | ||
1960 | ct1000 = 0; | ||
1961 | adv = PHY_AN_CSMA; | ||
1962 | |||
1963 | if (skge->autoneg == AUTONEG_ENABLE) { | ||
1964 | if (hw->copper) { | ||
1965 | if (skge->advertising & ADVERTISED_1000baseT_Full) | ||
1966 | ct1000 |= PHY_M_1000C_AFD; | ||
1967 | if (skge->advertising & ADVERTISED_1000baseT_Half) | ||
1968 | ct1000 |= PHY_M_1000C_AHD; | ||
1969 | if (skge->advertising & ADVERTISED_100baseT_Full) | ||
1970 | adv |= PHY_M_AN_100_FD; | ||
1971 | if (skge->advertising & ADVERTISED_100baseT_Half) | ||
1972 | adv |= PHY_M_AN_100_HD; | ||
1973 | if (skge->advertising & ADVERTISED_10baseT_Full) | ||
1974 | adv |= PHY_M_AN_10_FD; | ||
1975 | if (skge->advertising & ADVERTISED_10baseT_Half) | ||
1976 | adv |= PHY_M_AN_10_HD; | ||
1977 | |||
1978 | /* Set Flow-control capabilities */ | ||
1979 | adv |= phy_pause_map[skge->flow_control]; | ||
1980 | } else { | ||
1981 | if (skge->advertising & ADVERTISED_1000baseT_Full) | ||
1982 | adv |= PHY_M_AN_1000X_AFD; | ||
1983 | if (skge->advertising & ADVERTISED_1000baseT_Half) | ||
1984 | adv |= PHY_M_AN_1000X_AHD; | ||
1985 | |||
1986 | adv |= fiber_pause_map[skge->flow_control]; | ||
1987 | } | ||
1988 | |||
1989 | /* Restart Auto-negotiation */ | ||
1990 | ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; | ||
1991 | } else { | ||
1992 | /* forced speed/duplex settings */ | ||
1993 | ct1000 = PHY_M_1000C_MSE; | ||
1994 | |||
1995 | if (skge->duplex == DUPLEX_FULL) | ||
1996 | ctrl |= PHY_CT_DUP_MD; | ||
1997 | |||
1998 | switch (skge->speed) { | ||
1999 | case SPEED_1000: | ||
2000 | ctrl |= PHY_CT_SP1000; | ||
2001 | break; | ||
2002 | case SPEED_100: | ||
2003 | ctrl |= PHY_CT_SP100; | ||
2004 | break; | ||
2005 | } | ||
2006 | |||
2007 | ctrl |= PHY_CT_RESET; | ||
2008 | } | ||
2009 | |||
2010 | gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); | ||
2011 | |||
2012 | gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); | ||
2013 | gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); | ||
2014 | |||
2015 | /* Enable phy interrupt on autonegotiation complete (or link up) */ | ||
2016 | if (skge->autoneg == AUTONEG_ENABLE) | ||
2017 | gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK); | ||
2018 | else | ||
2019 | gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); | ||
2020 | } | ||
2021 | |||
2022 | static void yukon_reset(struct skge_hw *hw, int port) | ||
2023 | { | ||
2024 | gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */ | ||
2025 | gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ | ||
2026 | gma_write16(hw, port, GM_MC_ADDR_H2, 0); | ||
2027 | gma_write16(hw, port, GM_MC_ADDR_H3, 0); | ||
2028 | gma_write16(hw, port, GM_MC_ADDR_H4, 0); | ||
2029 | |||
2030 | gma_write16(hw, port, GM_RX_CTRL, | ||
2031 | gma_read16(hw, port, GM_RX_CTRL) | ||
2032 | | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); | ||
2033 | } | ||
2034 | |||
2035 | /* Apparently, early versions of Yukon-Lite had wrong chip_id? */ | ||
2036 | static int is_yukon_lite_a0(struct skge_hw *hw) | ||
2037 | { | ||
2038 | u32 reg; | ||
2039 | int ret; | ||
2040 | |||
2041 | if (hw->chip_id != CHIP_ID_YUKON) | ||
2042 | return 0; | ||
2043 | |||
2044 | reg = skge_read32(hw, B2_FAR); | ||
2045 | skge_write8(hw, B2_FAR + 3, 0xff); | ||
2046 | ret = (skge_read8(hw, B2_FAR + 3) != 0); | ||
2047 | skge_write32(hw, B2_FAR, reg); | ||
2048 | return ret; | ||
2049 | } | ||
2050 | |||
2051 | static void yukon_mac_init(struct skge_hw *hw, int port) | ||
2052 | { | ||
2053 | struct skge_port *skge = netdev_priv(hw->dev[port]); | ||
2054 | int i; | ||
2055 | u32 reg; | ||
2056 | const u8 *addr = hw->dev[port]->dev_addr; | ||
2057 | |||
2058 | /* WA code for COMA mode -- set PHY reset */ | ||
2059 | if (hw->chip_id == CHIP_ID_YUKON_LITE && | ||
2060 | hw->chip_rev >= CHIP_REV_YU_LITE_A3) { | ||
2061 | reg = skge_read32(hw, B2_GP_IO); | ||
2062 | reg |= GP_DIR_9 | GP_IO_9; | ||
2063 | skge_write32(hw, B2_GP_IO, reg); | ||
2064 | } | ||
2065 | |||
2066 | /* hard reset */ | ||
2067 | skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); | ||
2068 | skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); | ||
2069 | |||
2070 | /* WA code for COMA mode -- clear PHY reset */ | ||
2071 | if (hw->chip_id == CHIP_ID_YUKON_LITE && | ||
2072 | hw->chip_rev >= CHIP_REV_YU_LITE_A3) { | ||
2073 | reg = skge_read32(hw, B2_GP_IO); | ||
2074 | reg |= GP_DIR_9; | ||
2075 | reg &= ~GP_IO_9; | ||
2076 | skge_write32(hw, B2_GP_IO, reg); | ||
2077 | } | ||
2078 | |||
2079 | /* Set hardware config mode */ | ||
2080 | reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | | ||
2081 | GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE; | ||
2082 | reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; | ||
2083 | |||
2084 | /* Clear GMC reset */ | ||
2085 | skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); | ||
2086 | skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); | ||
2087 | skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); | ||
2088 | |||
2089 | if (skge->autoneg == AUTONEG_DISABLE) { | ||
2090 | reg = GM_GPCR_AU_ALL_DIS; | ||
2091 | gma_write16(hw, port, GM_GP_CTRL, | ||
2092 | gma_read16(hw, port, GM_GP_CTRL) | reg); | ||
2093 | |||
2094 | switch (skge->speed) { | ||
2095 | case SPEED_1000: | ||
2096 | reg &= ~GM_GPCR_SPEED_100; | ||
2097 | reg |= GM_GPCR_SPEED_1000; | ||
2098 | break; | ||
2099 | case SPEED_100: | ||
2100 | reg &= ~GM_GPCR_SPEED_1000; | ||
2101 | reg |= GM_GPCR_SPEED_100; | ||
2102 | break; | ||
2103 | case SPEED_10: | ||
2104 | reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100); | ||
2105 | break; | ||
2106 | } | ||
2107 | |||
2108 | if (skge->duplex == DUPLEX_FULL) | ||
2109 | reg |= GM_GPCR_DUP_FULL; | ||
2110 | } else | ||
2111 | reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; | ||
2112 | |||
2113 | switch (skge->flow_control) { | ||
2114 | case FLOW_MODE_NONE: | ||
2115 | skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); | ||
2116 | reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; | ||
2117 | break; | ||
2118 | case FLOW_MODE_LOC_SEND: | ||
2119 | /* disable Rx flow-control */ | ||
2120 | reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; | ||
2121 | break; | ||
2122 | case FLOW_MODE_SYMMETRIC: | ||
2123 | case FLOW_MODE_SYM_OR_REM: | ||
2124 | /* enable Tx & Rx flow-control */ | ||
2125 | break; | ||
2126 | } | ||
2127 | |||
2128 | gma_write16(hw, port, GM_GP_CTRL, reg); | ||
2129 | skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); | ||
2130 | |||
2131 | yukon_init(hw, port); | ||
2132 | |||
2133 | /* MIB clear */ | ||
2134 | reg = gma_read16(hw, port, GM_PHY_ADDR); | ||
2135 | gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); | ||
2136 | |||
2137 | for (i = 0; i < GM_MIB_CNT_SIZE; i++) | ||
2138 | gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i); | ||
2139 | gma_write16(hw, port, GM_PHY_ADDR, reg); | ||
2140 | |||
2141 | /* transmit control */ | ||
2142 | gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); | ||
2143 | |||
2144 | /* receive control reg: unicast + multicast + no FCS */ | ||
2145 | gma_write16(hw, port, GM_RX_CTRL, | ||
2146 | GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); | ||
2147 | |||
2148 | /* transmit flow control */ | ||
2149 | gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); | ||
2150 | |||
2151 | /* transmit parameter */ | ||
2152 | gma_write16(hw, port, GM_TX_PARAM, | ||
2153 | TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | | ||
2154 | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | | ||
2155 | TX_IPG_JAM_DATA(TX_IPG_JAM_DEF)); | ||
2156 | |||
2157 | /* configure the Serial Mode Register */ | ||
2158 | reg = DATA_BLIND_VAL(DATA_BLIND_DEF) | ||
2159 | | GM_SMOD_VLAN_ENA | ||
2160 | | IPG_DATA_VAL(IPG_DATA_DEF); | ||
2161 | |||
2162 | if (hw->dev[port]->mtu > ETH_DATA_LEN) | ||
2163 | reg |= GM_SMOD_JUMBO_ENA; | ||
2164 | |||
2165 | gma_write16(hw, port, GM_SERIAL_MODE, reg); | ||
2166 | |||
2167 | /* physical address: used for pause frames */ | ||
2168 | gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); | ||
2169 | /* virtual address for data */ | ||
2170 | gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); | ||
2171 | |||
2172 | /* enable interrupt mask for counter overflows */ | ||
2173 | gma_write16(hw, port, GM_TX_IRQ_MSK, 0); | ||
2174 | gma_write16(hw, port, GM_RX_IRQ_MSK, 0); | ||
2175 | gma_write16(hw, port, GM_TR_IRQ_MSK, 0); | ||
2176 | |||
2177 | /* Initialize Mac Fifo */ | ||
2178 | |||
2179 | /* Configure Rx MAC FIFO */ | ||
2180 | skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); | ||
2181 | reg = GMF_OPER_ON | GMF_RX_F_FL_ON; | ||
2182 | |||
2183 | /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */ | ||
2184 | if (is_yukon_lite_a0(hw)) | ||
2185 | reg &= ~GMF_RX_F_FL_ON; | ||
2186 | |||
2187 | skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); | ||
2188 | skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); | ||
2189 | /* | ||
2190 | * because Pause Packet Truncation in GMAC is not working | ||
2191 | * we have to increase the Flush Threshold to 64 bytes | ||
2192 | * in order to flush pause packets in Rx FIFO on Yukon-1 | ||
2193 | */ | ||
2194 | skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1); | ||
2195 | |||
2196 | /* Configure Tx MAC FIFO */ | ||
2197 | skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); | ||
2198 | skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); | ||
2199 | } | ||
2200 | |||
2201 | /* Go into power down mode */ | ||
2202 | static void yukon_suspend(struct skge_hw *hw, int port) | ||
2203 | { | ||
2204 | u16 ctrl; | ||
2205 | |||
2206 | ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); | ||
2207 | ctrl |= PHY_M_PC_POL_R_DIS; | ||
2208 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); | ||
2209 | |||
2210 | ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); | ||
2211 | ctrl |= PHY_CT_RESET; | ||
2212 | gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); | ||
2213 | |||
2214 | /* switch IEEE compatible power down mode on */ | ||
2215 | ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); | ||
2216 | ctrl |= PHY_CT_PDOWN; | ||
2217 | gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); | ||
2218 | } | ||
2219 | |||
2220 | static void yukon_stop(struct skge_port *skge) | ||
2221 | { | ||
2222 | struct skge_hw *hw = skge->hw; | ||
2223 | int port = skge->port; | ||
2224 | |||
2225 | skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); | ||
2226 | yukon_reset(hw, port); | ||
2227 | |||
2228 | gma_write16(hw, port, GM_GP_CTRL, | ||
2229 | gma_read16(hw, port, GM_GP_CTRL) | ||
2230 | & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA)); | ||
2231 | gma_read16(hw, port, GM_GP_CTRL); | ||
2232 | |||
2233 | yukon_suspend(hw, port); | ||
2234 | |||
2235 | /* set GPHY Control reset */ | ||
2236 | skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); | ||
2237 | skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); | ||
2238 | } | ||
2239 | |||
2240 | static void yukon_get_stats(struct skge_port *skge, u64 *data) | ||
2241 | { | ||
2242 | struct skge_hw *hw = skge->hw; | ||
2243 | int port = skge->port; | ||
2244 | int i; | ||
2245 | |||
2246 | data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32 | ||
2247 | | gma_read32(hw, port, GM_TXO_OK_LO); | ||
2248 | data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32 | ||
2249 | | gma_read32(hw, port, GM_RXO_OK_LO); | ||
2250 | |||
2251 | for (i = 2; i < ARRAY_SIZE(skge_stats); i++) | ||
2252 | data[i] = gma_read32(hw, port, | ||
2253 | skge_stats[i].gma_offset); | ||
2254 | } | ||
2255 | |||
2256 | static void yukon_mac_intr(struct skge_hw *hw, int port) | ||
2257 | { | ||
2258 | struct net_device *dev = hw->dev[port]; | ||
2259 | struct skge_port *skge = netdev_priv(dev); | ||
2260 | u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); | ||
2261 | |||
2262 | netif_printk(skge, intr, KERN_DEBUG, skge->netdev, | ||
2263 | "mac interrupt status 0x%x\n", status); | ||
2264 | |||
2265 | if (status & GM_IS_RX_FF_OR) { | ||
2266 | ++dev->stats.rx_fifo_errors; | ||
2267 | skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); | ||
2268 | } | ||
2269 | |||
2270 | if (status & GM_IS_TX_FF_UR) { | ||
2271 | ++dev->stats.tx_fifo_errors; | ||
2272 | skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); | ||
2273 | } | ||
2274 | |||
2275 | } | ||
2276 | |||
2277 | static u16 yukon_speed(const struct skge_hw *hw, u16 aux) | ||
2278 | { | ||
2279 | switch (aux & PHY_M_PS_SPEED_MSK) { | ||
2280 | case PHY_M_PS_SPEED_1000: | ||
2281 | return SPEED_1000; | ||
2282 | case PHY_M_PS_SPEED_100: | ||
2283 | return SPEED_100; | ||
2284 | default: | ||
2285 | return SPEED_10; | ||
2286 | } | ||
2287 | } | ||
2288 | |||
2289 | static void yukon_link_up(struct skge_port *skge) | ||
2290 | { | ||
2291 | struct skge_hw *hw = skge->hw; | ||
2292 | int port = skge->port; | ||
2293 | u16 reg; | ||
2294 | |||
2295 | /* Enable Transmit FIFO Underrun */ | ||
2296 | skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); | ||
2297 | |||
2298 | reg = gma_read16(hw, port, GM_GP_CTRL); | ||
2299 | if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) | ||
2300 | reg |= GM_GPCR_DUP_FULL; | ||
2301 | |||
2302 | /* enable Rx/Tx */ | ||
2303 | reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; | ||
2304 | gma_write16(hw, port, GM_GP_CTRL, reg); | ||
2305 | |||
2306 | gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); | ||
2307 | skge_link_up(skge); | ||
2308 | } | ||
2309 | |||
2310 | static void yukon_link_down(struct skge_port *skge) | ||
2311 | { | ||
2312 | struct skge_hw *hw = skge->hw; | ||
2313 | int port = skge->port; | ||
2314 | u16 ctrl; | ||
2315 | |||
2316 | ctrl = gma_read16(hw, port, GM_GP_CTRL); | ||
2317 | ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); | ||
2318 | gma_write16(hw, port, GM_GP_CTRL, ctrl); | ||
2319 | |||
2320 | if (skge->flow_status == FLOW_STAT_REM_SEND) { | ||
2321 | ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); | ||
2322 | ctrl |= PHY_M_AN_ASP; | ||
2323 | /* restore Asymmetric Pause bit */ | ||
2324 | gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl); | ||
2325 | } | ||
2326 | |||
2327 | skge_link_down(skge); | ||
2328 | |||
2329 | yukon_init(hw, port); | ||
2330 | } | ||
2331 | |||
2332 | static void yukon_phy_intr(struct skge_port *skge) | ||
2333 | { | ||
2334 | struct skge_hw *hw = skge->hw; | ||
2335 | int port = skge->port; | ||
2336 | const char *reason = NULL; | ||
2337 | u16 istatus, phystat; | ||
2338 | |||
2339 | istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); | ||
2340 | phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); | ||
2341 | |||
2342 | netif_printk(skge, intr, KERN_DEBUG, skge->netdev, | ||
2343 | "phy interrupt status 0x%x 0x%x\n", istatus, phystat); | ||
2344 | |||
2345 | if (istatus & PHY_M_IS_AN_COMPL) { | ||
2346 | if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP) | ||
2347 | & PHY_M_AN_RF) { | ||
2348 | reason = "remote fault"; | ||
2349 | goto failed; | ||
2350 | } | ||
2351 | |||
2352 | if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) { | ||
2353 | reason = "master/slave fault"; | ||
2354 | goto failed; | ||
2355 | } | ||
2356 | |||
2357 | if (!(phystat & PHY_M_PS_SPDUP_RES)) { | ||
2358 | reason = "speed/duplex"; | ||
2359 | goto failed; | ||
2360 | } | ||
2361 | |||
2362 | skge->duplex = (phystat & PHY_M_PS_FULL_DUP) | ||
2363 | ? DUPLEX_FULL : DUPLEX_HALF; | ||
2364 | skge->speed = yukon_speed(hw, phystat); | ||
2365 | |||
2366 | /* We are using IEEE 802.3z/D5.0 Table 37-4 */ | ||
2367 | switch (phystat & PHY_M_PS_PAUSE_MSK) { | ||
2368 | case PHY_M_PS_PAUSE_MSK: | ||
2369 | skge->flow_status = FLOW_STAT_SYMMETRIC; | ||
2370 | break; | ||
2371 | case PHY_M_PS_RX_P_EN: | ||
2372 | skge->flow_status = FLOW_STAT_REM_SEND; | ||
2373 | break; | ||
2374 | case PHY_M_PS_TX_P_EN: | ||
2375 | skge->flow_status = FLOW_STAT_LOC_SEND; | ||
2376 | break; | ||
2377 | default: | ||
2378 | skge->flow_status = FLOW_STAT_NONE; | ||
2379 | } | ||
2380 | |||
2381 | if (skge->flow_status == FLOW_STAT_NONE || | ||
2382 | (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) | ||
2383 | skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); | ||
2384 | else | ||
2385 | skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); | ||
2386 | yukon_link_up(skge); | ||
2387 | return; | ||
2388 | } | ||
2389 | |||
2390 | if (istatus & PHY_M_IS_LSP_CHANGE) | ||
2391 | skge->speed = yukon_speed(hw, phystat); | ||
2392 | |||
2393 | if (istatus & PHY_M_IS_DUP_CHANGE) | ||
2394 | skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; | ||
2395 | if (istatus & PHY_M_IS_LST_CHANGE) { | ||
2396 | if (phystat & PHY_M_PS_LINK_UP) | ||
2397 | yukon_link_up(skge); | ||
2398 | else | ||
2399 | yukon_link_down(skge); | ||
2400 | } | ||
2401 | return; | ||
2402 | failed: | ||
2403 | pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason); | ||
2404 | |||
2405 | /* XXX restart autonegotiation? */ | ||
2406 | } | ||
2407 | |||
2408 | static void skge_phy_reset(struct skge_port *skge) | ||
2409 | { | ||
2410 | struct skge_hw *hw = skge->hw; | ||
2411 | int port = skge->port; | ||
2412 | struct net_device *dev = hw->dev[port]; | ||
2413 | |||
2414 | netif_stop_queue(skge->netdev); | ||
2415 | netif_carrier_off(skge->netdev); | ||
2416 | |||
2417 | spin_lock_bh(&hw->phy_lock); | ||
2418 | if (is_genesis(hw)) { | ||
2419 | genesis_reset(hw, port); | ||
2420 | genesis_mac_init(hw, port); | ||
2421 | } else { | ||
2422 | yukon_reset(hw, port); | ||
2423 | yukon_init(hw, port); | ||
2424 | } | ||
2425 | spin_unlock_bh(&hw->phy_lock); | ||
2426 | |||
2427 | skge_set_multicast(dev); | ||
2428 | } | ||
2429 | |||
2430 | /* Basic MII support */ | ||
2431 | static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
2432 | { | ||
2433 | struct mii_ioctl_data *data = if_mii(ifr); | ||
2434 | struct skge_port *skge = netdev_priv(dev); | ||
2435 | struct skge_hw *hw = skge->hw; | ||
2436 | int err = -EOPNOTSUPP; | ||
2437 | |||
2438 | if (!netif_running(dev)) | ||
2439 | return -ENODEV; /* Phy still in reset */ | ||
2440 | |||
2441 | switch (cmd) { | ||
2442 | case SIOCGMIIPHY: | ||
2443 | data->phy_id = hw->phy_addr; | ||
2444 | |||
2445 | /* fallthru */ | ||
2446 | case SIOCGMIIREG: { | ||
2447 | u16 val = 0; | ||
2448 | spin_lock_bh(&hw->phy_lock); | ||
2449 | |||
2450 | if (is_genesis(hw)) | ||
2451 | err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); | ||
2452 | else | ||
2453 | err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); | ||
2454 | spin_unlock_bh(&hw->phy_lock); | ||
2455 | data->val_out = val; | ||
2456 | break; | ||
2457 | } | ||
2458 | |||
2459 | case SIOCSMIIREG: | ||
2460 | spin_lock_bh(&hw->phy_lock); | ||
2461 | if (is_genesis(hw)) | ||
2462 | err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, | ||
2463 | data->val_in); | ||
2464 | else | ||
2465 | err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, | ||
2466 | data->val_in); | ||
2467 | spin_unlock_bh(&hw->phy_lock); | ||
2468 | break; | ||
2469 | } | ||
2470 | return err; | ||
2471 | } | ||
2472 | |||
2473 | static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len) | ||
2474 | { | ||
2475 | u32 end; | ||
2476 | |||
2477 | start /= 8; | ||
2478 | len /= 8; | ||
2479 | end = start + len - 1; | ||
2480 | |||
2481 | skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); | ||
2482 | skge_write32(hw, RB_ADDR(q, RB_START), start); | ||
2483 | skge_write32(hw, RB_ADDR(q, RB_WP), start); | ||
2484 | skge_write32(hw, RB_ADDR(q, RB_RP), start); | ||
2485 | skge_write32(hw, RB_ADDR(q, RB_END), end); | ||
2486 | |||
2487 | if (q == Q_R1 || q == Q_R2) { | ||
2488 | /* Set thresholds on receive queue's */ | ||
2489 | skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), | ||
2490 | start + (2*len)/3); | ||
2491 | skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), | ||
2492 | start + (len/3)); | ||
2493 | } else { | ||
2494 | /* Enable store & forward on Tx queue's because | ||
2495 | * Tx FIFO is only 4K on Genesis and 1K on Yukon | ||
2496 | */ | ||
2497 | skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); | ||
2498 | } | ||
2499 | |||
2500 | skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); | ||
2501 | } | ||
2502 | |||
2503 | /* Setup Bus Memory Interface */ | ||
2504 | static void skge_qset(struct skge_port *skge, u16 q, | ||
2505 | const struct skge_element *e) | ||
2506 | { | ||
2507 | struct skge_hw *hw = skge->hw; | ||
2508 | u32 watermark = 0x600; | ||
2509 | u64 base = skge->dma + (e->desc - skge->mem); | ||
2510 | |||
2511 | /* optimization to reduce window on 32bit/33mhz */ | ||
2512 | if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0) | ||
2513 | watermark /= 2; | ||
2514 | |||
2515 | skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET); | ||
2516 | skge_write32(hw, Q_ADDR(q, Q_F), watermark); | ||
2517 | skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32)); | ||
2518 | skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base); | ||
2519 | } | ||
2520 | |||
2521 | static int skge_up(struct net_device *dev) | ||
2522 | { | ||
2523 | struct skge_port *skge = netdev_priv(dev); | ||
2524 | struct skge_hw *hw = skge->hw; | ||
2525 | int port = skge->port; | ||
2526 | u32 chunk, ram_addr; | ||
2527 | size_t rx_size, tx_size; | ||
2528 | int err; | ||
2529 | |||
2530 | if (!is_valid_ether_addr(dev->dev_addr)) | ||
2531 | return -EINVAL; | ||
2532 | |||
2533 | netif_info(skge, ifup, skge->netdev, "enabling interface\n"); | ||
2534 | |||
2535 | if (dev->mtu > RX_BUF_SIZE) | ||
2536 | skge->rx_buf_size = dev->mtu + ETH_HLEN; | ||
2537 | else | ||
2538 | skge->rx_buf_size = RX_BUF_SIZE; | ||
2539 | |||
2540 | |||
2541 | rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); | ||
2542 | tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); | ||
2543 | skge->mem_size = tx_size + rx_size; | ||
2544 | skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma); | ||
2545 | if (!skge->mem) | ||
2546 | return -ENOMEM; | ||
2547 | |||
2548 | BUG_ON(skge->dma & 7); | ||
2549 | |||
2550 | if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { | ||
2551 | dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); | ||
2552 | err = -EINVAL; | ||
2553 | goto free_pci_mem; | ||
2554 | } | ||
2555 | |||
2556 | memset(skge->mem, 0, skge->mem_size); | ||
2557 | |||
2558 | err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma); | ||
2559 | if (err) | ||
2560 | goto free_pci_mem; | ||
2561 | |||
2562 | err = skge_rx_fill(dev); | ||
2563 | if (err) | ||
2564 | goto free_rx_ring; | ||
2565 | |||
2566 | err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, | ||
2567 | skge->dma + rx_size); | ||
2568 | if (err) | ||
2569 | goto free_rx_ring; | ||
2570 | |||
2571 | /* Initialize MAC */ | ||
2572 | spin_lock_bh(&hw->phy_lock); | ||
2573 | if (is_genesis(hw)) | ||
2574 | genesis_mac_init(hw, port); | ||
2575 | else | ||
2576 | yukon_mac_init(hw, port); | ||
2577 | spin_unlock_bh(&hw->phy_lock); | ||
2578 | |||
2579 | /* Configure RAMbuffers - equally between ports and tx/rx */ | ||
2580 | chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2); | ||
2581 | ram_addr = hw->ram_offset + 2 * chunk * port; | ||
2582 | |||
2583 | skge_ramset(hw, rxqaddr[port], ram_addr, chunk); | ||
2584 | skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); | ||
2585 | |||
2586 | BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean); | ||
2587 | skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk); | ||
2588 | skge_qset(skge, txqaddr[port], skge->tx_ring.to_use); | ||
2589 | |||
2590 | /* Start receiver BMU */ | ||
2591 | wmb(); | ||
2592 | skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); | ||
2593 | skge_led(skge, LED_MODE_ON); | ||
2594 | |||
2595 | spin_lock_irq(&hw->hw_lock); | ||
2596 | hw->intr_mask |= portmask[port]; | ||
2597 | skge_write32(hw, B0_IMSK, hw->intr_mask); | ||
2598 | spin_unlock_irq(&hw->hw_lock); | ||
2599 | |||
2600 | napi_enable(&skge->napi); | ||
2601 | return 0; | ||
2602 | |||
2603 | free_rx_ring: | ||
2604 | skge_rx_clean(skge); | ||
2605 | kfree(skge->rx_ring.start); | ||
2606 | free_pci_mem: | ||
2607 | pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); | ||
2608 | skge->mem = NULL; | ||
2609 | |||
2610 | return err; | ||
2611 | } | ||
2612 | |||
2613 | /* stop receiver */ | ||
2614 | static void skge_rx_stop(struct skge_hw *hw, int port) | ||
2615 | { | ||
2616 | skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP); | ||
2617 | skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL), | ||
2618 | RB_RST_SET|RB_DIS_OP_MD); | ||
2619 | skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); | ||
2620 | } | ||
2621 | |||
2622 | static int skge_down(struct net_device *dev) | ||
2623 | { | ||
2624 | struct skge_port *skge = netdev_priv(dev); | ||
2625 | struct skge_hw *hw = skge->hw; | ||
2626 | int port = skge->port; | ||
2627 | |||
2628 | if (skge->mem == NULL) | ||
2629 | return 0; | ||
2630 | |||
2631 | netif_info(skge, ifdown, skge->netdev, "disabling interface\n"); | ||
2632 | |||
2633 | netif_tx_disable(dev); | ||
2634 | |||
2635 | if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC) | ||
2636 | del_timer_sync(&skge->link_timer); | ||
2637 | |||
2638 | napi_disable(&skge->napi); | ||
2639 | netif_carrier_off(dev); | ||
2640 | |||
2641 | spin_lock_irq(&hw->hw_lock); | ||
2642 | hw->intr_mask &= ~portmask[port]; | ||
2643 | skge_write32(hw, B0_IMSK, hw->intr_mask); | ||
2644 | spin_unlock_irq(&hw->hw_lock); | ||
2645 | |||
2646 | skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); | ||
2647 | if (is_genesis(hw)) | ||
2648 | genesis_stop(skge); | ||
2649 | else | ||
2650 | yukon_stop(skge); | ||
2651 | |||
2652 | /* Stop transmitter */ | ||
2653 | skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); | ||
2654 | skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), | ||
2655 | RB_RST_SET|RB_DIS_OP_MD); | ||
2656 | |||
2657 | |||
2658 | /* Disable Force Sync bit and Enable Alloc bit */ | ||
2659 | skge_write8(hw, SK_REG(port, TXA_CTRL), | ||
2660 | TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); | ||
2661 | |||
2662 | /* Stop Interval Timer and Limit Counter of Tx Arbiter */ | ||
2663 | skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); | ||
2664 | skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); | ||
2665 | |||
2666 | /* Reset PCI FIFO */ | ||
2667 | skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET); | ||
2668 | skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); | ||
2669 | |||
2670 | /* Reset the RAM Buffer async Tx queue */ | ||
2671 | skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET); | ||
2672 | |||
2673 | skge_rx_stop(hw, port); | ||
2674 | |||
2675 | if (is_genesis(hw)) { | ||
2676 | skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET); | ||
2677 | skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET); | ||
2678 | } else { | ||
2679 | skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); | ||
2680 | skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); | ||
2681 | } | ||
2682 | |||
2683 | skge_led(skge, LED_MODE_OFF); | ||
2684 | |||
2685 | netif_tx_lock_bh(dev); | ||
2686 | skge_tx_clean(dev); | ||
2687 | netif_tx_unlock_bh(dev); | ||
2688 | |||
2689 | skge_rx_clean(skge); | ||
2690 | |||
2691 | kfree(skge->rx_ring.start); | ||
2692 | kfree(skge->tx_ring.start); | ||
2693 | pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); | ||
2694 | skge->mem = NULL; | ||
2695 | return 0; | ||
2696 | } | ||
2697 | |||
2698 | static inline int skge_avail(const struct skge_ring *ring) | ||
2699 | { | ||
2700 | smp_mb(); | ||
2701 | return ((ring->to_clean > ring->to_use) ? 0 : ring->count) | ||
2702 | + (ring->to_clean - ring->to_use) - 1; | ||
2703 | } | ||
2704 | |||
2705 | static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | ||
2706 | struct net_device *dev) | ||
2707 | { | ||
2708 | struct skge_port *skge = netdev_priv(dev); | ||
2709 | struct skge_hw *hw = skge->hw; | ||
2710 | struct skge_element *e; | ||
2711 | struct skge_tx_desc *td; | ||
2712 | int i; | ||
2713 | u32 control, len; | ||
2714 | u64 map; | ||
2715 | |||
2716 | if (skb_padto(skb, ETH_ZLEN)) | ||
2717 | return NETDEV_TX_OK; | ||
2718 | |||
2719 | if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) | ||
2720 | return NETDEV_TX_BUSY; | ||
2721 | |||
2722 | e = skge->tx_ring.to_use; | ||
2723 | td = e->desc; | ||
2724 | BUG_ON(td->control & BMU_OWN); | ||
2725 | e->skb = skb; | ||
2726 | len = skb_headlen(skb); | ||
2727 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); | ||
2728 | dma_unmap_addr_set(e, mapaddr, map); | ||
2729 | dma_unmap_len_set(e, maplen, len); | ||
2730 | |||
2731 | td->dma_lo = map; | ||
2732 | td->dma_hi = map >> 32; | ||
2733 | |||
2734 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
2735 | const int offset = skb_checksum_start_offset(skb); | ||
2736 | |||
2737 | /* This seems backwards, but it is what the sk98lin | ||
2738 | * does. Looks like hardware is wrong? | ||
2739 | */ | ||
2740 | if (ipip_hdr(skb)->protocol == IPPROTO_UDP && | ||
2741 | hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) | ||
2742 | control = BMU_TCP_CHECK; | ||
2743 | else | ||
2744 | control = BMU_UDP_CHECK; | ||
2745 | |||
2746 | td->csum_offs = 0; | ||
2747 | td->csum_start = offset; | ||
2748 | td->csum_write = offset + skb->csum_offset; | ||
2749 | } else | ||
2750 | control = BMU_CHECK; | ||
2751 | |||
2752 | if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */ | ||
2753 | control |= BMU_EOF | BMU_IRQ_EOF; | ||
2754 | else { | ||
2755 | struct skge_tx_desc *tf = td; | ||
2756 | |||
2757 | control |= BMU_STFWD; | ||
2758 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
2759 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
2760 | |||
2761 | map = pci_map_page(hw->pdev, frag->page, frag->page_offset, | ||
2762 | frag->size, PCI_DMA_TODEVICE); | ||
2763 | |||
2764 | e = e->next; | ||
2765 | e->skb = skb; | ||
2766 | tf = e->desc; | ||
2767 | BUG_ON(tf->control & BMU_OWN); | ||
2768 | |||
2769 | tf->dma_lo = map; | ||
2770 | tf->dma_hi = (u64) map >> 32; | ||
2771 | dma_unmap_addr_set(e, mapaddr, map); | ||
2772 | dma_unmap_len_set(e, maplen, frag->size); | ||
2773 | |||
2774 | tf->control = BMU_OWN | BMU_SW | control | frag->size; | ||
2775 | } | ||
2776 | tf->control |= BMU_EOF | BMU_IRQ_EOF; | ||
2777 | } | ||
2778 | /* Make sure all the descriptors written */ | ||
2779 | wmb(); | ||
2780 | td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; | ||
2781 | wmb(); | ||
2782 | |||
2783 | skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); | ||
2784 | |||
2785 | netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev, | ||
2786 | "tx queued, slot %td, len %d\n", | ||
2787 | e - skge->tx_ring.start, skb->len); | ||
2788 | |||
2789 | skge->tx_ring.to_use = e->next; | ||
2790 | smp_wmb(); | ||
2791 | |||
2792 | if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) { | ||
2793 | netdev_dbg(dev, "transmit queue full\n"); | ||
2794 | netif_stop_queue(dev); | ||
2795 | } | ||
2796 | |||
2797 | return NETDEV_TX_OK; | ||
2798 | } | ||
2799 | |||
2800 | |||
2801 | /* Free resources associated with this reing element */ | ||
2802 | static void skge_tx_free(struct skge_port *skge, struct skge_element *e, | ||
2803 | u32 control) | ||
2804 | { | ||
2805 | struct pci_dev *pdev = skge->hw->pdev; | ||
2806 | |||
2807 | /* skb header vs. fragment */ | ||
2808 | if (control & BMU_STF) | ||
2809 | pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr), | ||
2810 | dma_unmap_len(e, maplen), | ||
2811 | PCI_DMA_TODEVICE); | ||
2812 | else | ||
2813 | pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr), | ||
2814 | dma_unmap_len(e, maplen), | ||
2815 | PCI_DMA_TODEVICE); | ||
2816 | |||
2817 | if (control & BMU_EOF) { | ||
2818 | netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, | ||
2819 | "tx done slot %td\n", e - skge->tx_ring.start); | ||
2820 | |||
2821 | dev_kfree_skb(e->skb); | ||
2822 | } | ||
2823 | } | ||
2824 | |||
2825 | /* Free all buffers in transmit ring */ | ||
2826 | static void skge_tx_clean(struct net_device *dev) | ||
2827 | { | ||
2828 | struct skge_port *skge = netdev_priv(dev); | ||
2829 | struct skge_element *e; | ||
2830 | |||
2831 | for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { | ||
2832 | struct skge_tx_desc *td = e->desc; | ||
2833 | skge_tx_free(skge, e, td->control); | ||
2834 | td->control = 0; | ||
2835 | } | ||
2836 | |||
2837 | skge->tx_ring.to_clean = e; | ||
2838 | } | ||
2839 | |||
2840 | static void skge_tx_timeout(struct net_device *dev) | ||
2841 | { | ||
2842 | struct skge_port *skge = netdev_priv(dev); | ||
2843 | |||
2844 | netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n"); | ||
2845 | |||
2846 | skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); | ||
2847 | skge_tx_clean(dev); | ||
2848 | netif_wake_queue(dev); | ||
2849 | } | ||
2850 | |||
2851 | static int skge_change_mtu(struct net_device *dev, int new_mtu) | ||
2852 | { | ||
2853 | int err; | ||
2854 | |||
2855 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) | ||
2856 | return -EINVAL; | ||
2857 | |||
2858 | if (!netif_running(dev)) { | ||
2859 | dev->mtu = new_mtu; | ||
2860 | return 0; | ||
2861 | } | ||
2862 | |||
2863 | skge_down(dev); | ||
2864 | |||
2865 | dev->mtu = new_mtu; | ||
2866 | |||
2867 | err = skge_up(dev); | ||
2868 | if (err) | ||
2869 | dev_close(dev); | ||
2870 | |||
2871 | return err; | ||
2872 | } | ||
2873 | |||
2874 | static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 }; | ||
2875 | |||
2876 | static void genesis_add_filter(u8 filter[8], const u8 *addr) | ||
2877 | { | ||
2878 | u32 crc, bit; | ||
2879 | |||
2880 | crc = ether_crc_le(ETH_ALEN, addr); | ||
2881 | bit = ~crc & 0x3f; | ||
2882 | filter[bit/8] |= 1 << (bit%8); | ||
2883 | } | ||
2884 | |||
2885 | static void genesis_set_multicast(struct net_device *dev) | ||
2886 | { | ||
2887 | struct skge_port *skge = netdev_priv(dev); | ||
2888 | struct skge_hw *hw = skge->hw; | ||
2889 | int port = skge->port; | ||
2890 | struct netdev_hw_addr *ha; | ||
2891 | u32 mode; | ||
2892 | u8 filter[8]; | ||
2893 | |||
2894 | mode = xm_read32(hw, port, XM_MODE); | ||
2895 | mode |= XM_MD_ENA_HASH; | ||
2896 | if (dev->flags & IFF_PROMISC) | ||
2897 | mode |= XM_MD_ENA_PROM; | ||
2898 | else | ||
2899 | mode &= ~XM_MD_ENA_PROM; | ||
2900 | |||
2901 | if (dev->flags & IFF_ALLMULTI) | ||
2902 | memset(filter, 0xff, sizeof(filter)); | ||
2903 | else { | ||
2904 | memset(filter, 0, sizeof(filter)); | ||
2905 | |||
2906 | if (skge->flow_status == FLOW_STAT_REM_SEND || | ||
2907 | skge->flow_status == FLOW_STAT_SYMMETRIC) | ||
2908 | genesis_add_filter(filter, pause_mc_addr); | ||
2909 | |||
2910 | netdev_for_each_mc_addr(ha, dev) | ||
2911 | genesis_add_filter(filter, ha->addr); | ||
2912 | } | ||
2913 | |||
2914 | xm_write32(hw, port, XM_MODE, mode); | ||
2915 | xm_outhash(hw, port, XM_HSM, filter); | ||
2916 | } | ||
2917 | |||
2918 | static void yukon_add_filter(u8 filter[8], const u8 *addr) | ||
2919 | { | ||
2920 | u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f; | ||
2921 | filter[bit/8] |= 1 << (bit%8); | ||
2922 | } | ||
2923 | |||
2924 | static void yukon_set_multicast(struct net_device *dev) | ||
2925 | { | ||
2926 | struct skge_port *skge = netdev_priv(dev); | ||
2927 | struct skge_hw *hw = skge->hw; | ||
2928 | int port = skge->port; | ||
2929 | struct netdev_hw_addr *ha; | ||
2930 | int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND || | ||
2931 | skge->flow_status == FLOW_STAT_SYMMETRIC); | ||
2932 | u16 reg; | ||
2933 | u8 filter[8]; | ||
2934 | |||
2935 | memset(filter, 0, sizeof(filter)); | ||
2936 | |||
2937 | reg = gma_read16(hw, port, GM_RX_CTRL); | ||
2938 | reg |= GM_RXCR_UCF_ENA; | ||
2939 | |||
2940 | if (dev->flags & IFF_PROMISC) /* promiscuous */ | ||
2941 | reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); | ||
2942 | else if (dev->flags & IFF_ALLMULTI) /* all multicast */ | ||
2943 | memset(filter, 0xff, sizeof(filter)); | ||
2944 | else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */ | ||
2945 | reg &= ~GM_RXCR_MCF_ENA; | ||
2946 | else { | ||
2947 | reg |= GM_RXCR_MCF_ENA; | ||
2948 | |||
2949 | if (rx_pause) | ||
2950 | yukon_add_filter(filter, pause_mc_addr); | ||
2951 | |||
2952 | netdev_for_each_mc_addr(ha, dev) | ||
2953 | yukon_add_filter(filter, ha->addr); | ||
2954 | } | ||
2955 | |||
2956 | |||
2957 | gma_write16(hw, port, GM_MC_ADDR_H1, | ||
2958 | (u16)filter[0] | ((u16)filter[1] << 8)); | ||
2959 | gma_write16(hw, port, GM_MC_ADDR_H2, | ||
2960 | (u16)filter[2] | ((u16)filter[3] << 8)); | ||
2961 | gma_write16(hw, port, GM_MC_ADDR_H3, | ||
2962 | (u16)filter[4] | ((u16)filter[5] << 8)); | ||
2963 | gma_write16(hw, port, GM_MC_ADDR_H4, | ||
2964 | (u16)filter[6] | ((u16)filter[7] << 8)); | ||
2965 | |||
2966 | gma_write16(hw, port, GM_RX_CTRL, reg); | ||
2967 | } | ||
2968 | |||
2969 | static inline u16 phy_length(const struct skge_hw *hw, u32 status) | ||
2970 | { | ||
2971 | if (is_genesis(hw)) | ||
2972 | return status >> XMR_FS_LEN_SHIFT; | ||
2973 | else | ||
2974 | return status >> GMR_FS_LEN_SHIFT; | ||
2975 | } | ||
2976 | |||
2977 | static inline int bad_phy_status(const struct skge_hw *hw, u32 status) | ||
2978 | { | ||
2979 | if (is_genesis(hw)) | ||
2980 | return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0; | ||
2981 | else | ||
2982 | return (status & GMR_FS_ANY_ERR) || | ||
2983 | (status & GMR_FS_RX_OK) == 0; | ||
2984 | } | ||
2985 | |||
2986 | static void skge_set_multicast(struct net_device *dev) | ||
2987 | { | ||
2988 | struct skge_port *skge = netdev_priv(dev); | ||
2989 | |||
2990 | if (is_genesis(skge->hw)) | ||
2991 | genesis_set_multicast(dev); | ||
2992 | else | ||
2993 | yukon_set_multicast(dev); | ||
2994 | |||
2995 | } | ||
2996 | |||
2997 | |||
2998 | /* Get receive buffer from descriptor. | ||
2999 | * Handles copy of small buffers and reallocation failures | ||
3000 | */ | ||
3001 | static struct sk_buff *skge_rx_get(struct net_device *dev, | ||
3002 | struct skge_element *e, | ||
3003 | u32 control, u32 status, u16 csum) | ||
3004 | { | ||
3005 | struct skge_port *skge = netdev_priv(dev); | ||
3006 | struct sk_buff *skb; | ||
3007 | u16 len = control & BMU_BBC; | ||
3008 | |||
3009 | netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev, | ||
3010 | "rx slot %td status 0x%x len %d\n", | ||
3011 | e - skge->rx_ring.start, status, len); | ||
3012 | |||
3013 | if (len > skge->rx_buf_size) | ||
3014 | goto error; | ||
3015 | |||
3016 | if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)) | ||
3017 | goto error; | ||
3018 | |||
3019 | if (bad_phy_status(skge->hw, status)) | ||
3020 | goto error; | ||
3021 | |||
3022 | if (phy_length(skge->hw, status) != len) | ||
3023 | goto error; | ||
3024 | |||
3025 | if (len < RX_COPY_THRESHOLD) { | ||
3026 | skb = netdev_alloc_skb_ip_align(dev, len); | ||
3027 | if (!skb) | ||
3028 | goto resubmit; | ||
3029 | |||
3030 | pci_dma_sync_single_for_cpu(skge->hw->pdev, | ||
3031 | dma_unmap_addr(e, mapaddr), | ||
3032 | len, PCI_DMA_FROMDEVICE); | ||
3033 | skb_copy_from_linear_data(e->skb, skb->data, len); | ||
3034 | pci_dma_sync_single_for_device(skge->hw->pdev, | ||
3035 | dma_unmap_addr(e, mapaddr), | ||
3036 | len, PCI_DMA_FROMDEVICE); | ||
3037 | skge_rx_reuse(e, skge->rx_buf_size); | ||
3038 | } else { | ||
3039 | struct sk_buff *nskb; | ||
3040 | |||
3041 | nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); | ||
3042 | if (!nskb) | ||
3043 | goto resubmit; | ||
3044 | |||
3045 | pci_unmap_single(skge->hw->pdev, | ||
3046 | dma_unmap_addr(e, mapaddr), | ||
3047 | dma_unmap_len(e, maplen), | ||
3048 | PCI_DMA_FROMDEVICE); | ||
3049 | skb = e->skb; | ||
3050 | prefetch(skb->data); | ||
3051 | skge_rx_setup(skge, e, nskb, skge->rx_buf_size); | ||
3052 | } | ||
3053 | |||
3054 | skb_put(skb, len); | ||
3055 | |||
3056 | if (dev->features & NETIF_F_RXCSUM) { | ||
3057 | skb->csum = csum; | ||
3058 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
3059 | } | ||
3060 | |||
3061 | skb->protocol = eth_type_trans(skb, dev); | ||
3062 | |||
3063 | return skb; | ||
3064 | error: | ||
3065 | |||
3066 | netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev, | ||
3067 | "rx err, slot %td control 0x%x status 0x%x\n", | ||
3068 | e - skge->rx_ring.start, control, status); | ||
3069 | |||
3070 | if (is_genesis(skge->hw)) { | ||
3071 | if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) | ||
3072 | dev->stats.rx_length_errors++; | ||
3073 | if (status & XMR_FS_FRA_ERR) | ||
3074 | dev->stats.rx_frame_errors++; | ||
3075 | if (status & XMR_FS_FCS_ERR) | ||
3076 | dev->stats.rx_crc_errors++; | ||
3077 | } else { | ||
3078 | if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) | ||
3079 | dev->stats.rx_length_errors++; | ||
3080 | if (status & GMR_FS_FRAGMENT) | ||
3081 | dev->stats.rx_frame_errors++; | ||
3082 | if (status & GMR_FS_CRC_ERR) | ||
3083 | dev->stats.rx_crc_errors++; | ||
3084 | } | ||
3085 | |||
3086 | resubmit: | ||
3087 | skge_rx_reuse(e, skge->rx_buf_size); | ||
3088 | return NULL; | ||
3089 | } | ||
3090 | |||
3091 | /* Free all buffers in Tx ring which are no longer owned by device */ | ||
3092 | static void skge_tx_done(struct net_device *dev) | ||
3093 | { | ||
3094 | struct skge_port *skge = netdev_priv(dev); | ||
3095 | struct skge_ring *ring = &skge->tx_ring; | ||
3096 | struct skge_element *e; | ||
3097 | |||
3098 | skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); | ||
3099 | |||
3100 | for (e = ring->to_clean; e != ring->to_use; e = e->next) { | ||
3101 | u32 control = ((const struct skge_tx_desc *) e->desc)->control; | ||
3102 | |||
3103 | if (control & BMU_OWN) | ||
3104 | break; | ||
3105 | |||
3106 | skge_tx_free(skge, e, control); | ||
3107 | } | ||
3108 | skge->tx_ring.to_clean = e; | ||
3109 | |||
3110 | /* Can run lockless until we need to synchronize to restart queue. */ | ||
3111 | smp_mb(); | ||
3112 | |||
3113 | if (unlikely(netif_queue_stopped(dev) && | ||
3114 | skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { | ||
3115 | netif_tx_lock(dev); | ||
3116 | if (unlikely(netif_queue_stopped(dev) && | ||
3117 | skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { | ||
3118 | netif_wake_queue(dev); | ||
3119 | |||
3120 | } | ||
3121 | netif_tx_unlock(dev); | ||
3122 | } | ||
3123 | } | ||
3124 | |||
3125 | static int skge_poll(struct napi_struct *napi, int to_do) | ||
3126 | { | ||
3127 | struct skge_port *skge = container_of(napi, struct skge_port, napi); | ||
3128 | struct net_device *dev = skge->netdev; | ||
3129 | struct skge_hw *hw = skge->hw; | ||
3130 | struct skge_ring *ring = &skge->rx_ring; | ||
3131 | struct skge_element *e; | ||
3132 | int work_done = 0; | ||
3133 | |||
3134 | skge_tx_done(dev); | ||
3135 | |||
3136 | skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); | ||
3137 | |||
3138 | for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { | ||
3139 | struct skge_rx_desc *rd = e->desc; | ||
3140 | struct sk_buff *skb; | ||
3141 | u32 control; | ||
3142 | |||
3143 | rmb(); | ||
3144 | control = rd->control; | ||
3145 | if (control & BMU_OWN) | ||
3146 | break; | ||
3147 | |||
3148 | skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); | ||
3149 | if (likely(skb)) { | ||
3150 | napi_gro_receive(napi, skb); | ||
3151 | ++work_done; | ||
3152 | } | ||
3153 | } | ||
3154 | ring->to_clean = e; | ||
3155 | |||
3156 | /* restart receiver */ | ||
3157 | wmb(); | ||
3158 | skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); | ||
3159 | |||
3160 | if (work_done < to_do) { | ||
3161 | unsigned long flags; | ||
3162 | |||
3163 | napi_gro_flush(napi); | ||
3164 | spin_lock_irqsave(&hw->hw_lock, flags); | ||
3165 | __napi_complete(napi); | ||
3166 | hw->intr_mask |= napimask[skge->port]; | ||
3167 | skge_write32(hw, B0_IMSK, hw->intr_mask); | ||
3168 | skge_read32(hw, B0_IMSK); | ||
3169 | spin_unlock_irqrestore(&hw->hw_lock, flags); | ||
3170 | } | ||
3171 | |||
3172 | return work_done; | ||
3173 | } | ||
3174 | |||
3175 | /* Parity errors seem to happen when Genesis is connected to a switch | ||
3176 | * with no other ports present. Heartbeat error?? | ||
3177 | */ | ||
3178 | static void skge_mac_parity(struct skge_hw *hw, int port) | ||
3179 | { | ||
3180 | struct net_device *dev = hw->dev[port]; | ||
3181 | |||
3182 | ++dev->stats.tx_heartbeat_errors; | ||
3183 | |||
3184 | if (is_genesis(hw)) | ||
3185 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), | ||
3186 | MFF_CLR_PERR); | ||
3187 | else | ||
3188 | /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ | ||
3189 | skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), | ||
3190 | (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) | ||
3191 | ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); | ||
3192 | } | ||
3193 | |||
3194 | static void skge_mac_intr(struct skge_hw *hw, int port) | ||
3195 | { | ||
3196 | if (is_genesis(hw)) | ||
3197 | genesis_mac_intr(hw, port); | ||
3198 | else | ||
3199 | yukon_mac_intr(hw, port); | ||
3200 | } | ||
3201 | |||
3202 | /* Handle device specific framing and timeout interrupts */ | ||
3203 | static void skge_error_irq(struct skge_hw *hw) | ||
3204 | { | ||
3205 | struct pci_dev *pdev = hw->pdev; | ||
3206 | u32 hwstatus = skge_read32(hw, B0_HWE_ISRC); | ||
3207 | |||
3208 | if (is_genesis(hw)) { | ||
3209 | /* clear xmac errors */ | ||
3210 | if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) | ||
3211 | skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT); | ||
3212 | if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) | ||
3213 | skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT); | ||
3214 | } else { | ||
3215 | /* Timestamp (unused) overflow */ | ||
3216 | if (hwstatus & IS_IRQ_TIST_OV) | ||
3217 | skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); | ||
3218 | } | ||
3219 | |||
3220 | if (hwstatus & IS_RAM_RD_PAR) { | ||
3221 | dev_err(&pdev->dev, "Ram read data parity error\n"); | ||
3222 | skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR); | ||
3223 | } | ||
3224 | |||
3225 | if (hwstatus & IS_RAM_WR_PAR) { | ||
3226 | dev_err(&pdev->dev, "Ram write data parity error\n"); | ||
3227 | skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR); | ||
3228 | } | ||
3229 | |||
3230 | if (hwstatus & IS_M1_PAR_ERR) | ||
3231 | skge_mac_parity(hw, 0); | ||
3232 | |||
3233 | if (hwstatus & IS_M2_PAR_ERR) | ||
3234 | skge_mac_parity(hw, 1); | ||
3235 | |||
3236 | if (hwstatus & IS_R1_PAR_ERR) { | ||
3237 | dev_err(&pdev->dev, "%s: receive queue parity error\n", | ||
3238 | hw->dev[0]->name); | ||
3239 | skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); | ||
3240 | } | ||
3241 | |||
3242 | if (hwstatus & IS_R2_PAR_ERR) { | ||
3243 | dev_err(&pdev->dev, "%s: receive queue parity error\n", | ||
3244 | hw->dev[1]->name); | ||
3245 | skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); | ||
3246 | } | ||
3247 | |||
3248 | if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { | ||
3249 | u16 pci_status, pci_cmd; | ||
3250 | |||
3251 | pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); | ||
3252 | pci_read_config_word(pdev, PCI_STATUS, &pci_status); | ||
3253 | |||
3254 | dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n", | ||
3255 | pci_cmd, pci_status); | ||
3256 | |||
3257 | /* Write the error bits back to clear them. */ | ||
3258 | pci_status &= PCI_STATUS_ERROR_BITS; | ||
3259 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
3260 | pci_write_config_word(pdev, PCI_COMMAND, | ||
3261 | pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY); | ||
3262 | pci_write_config_word(pdev, PCI_STATUS, pci_status); | ||
3263 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
3264 | |||
3265 | /* if error still set then just ignore it */ | ||
3266 | hwstatus = skge_read32(hw, B0_HWE_ISRC); | ||
3267 | if (hwstatus & IS_IRQ_STAT) { | ||
3268 | dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n"); | ||
3269 | hw->intr_mask &= ~IS_HW_ERR; | ||
3270 | } | ||
3271 | } | ||
3272 | } | ||
3273 | |||
3274 | /* | ||
3275 | * Interrupt from PHY are handled in tasklet (softirq) | ||
3276 | * because accessing phy registers requires spin wait which might | ||
3277 | * cause excess interrupt latency. | ||
3278 | */ | ||
3279 | static void skge_extirq(unsigned long arg) | ||
3280 | { | ||
3281 | struct skge_hw *hw = (struct skge_hw *) arg; | ||
3282 | int port; | ||
3283 | |||
3284 | for (port = 0; port < hw->ports; port++) { | ||
3285 | struct net_device *dev = hw->dev[port]; | ||
3286 | |||
3287 | if (netif_running(dev)) { | ||
3288 | struct skge_port *skge = netdev_priv(dev); | ||
3289 | |||
3290 | spin_lock(&hw->phy_lock); | ||
3291 | if (!is_genesis(hw)) | ||
3292 | yukon_phy_intr(skge); | ||
3293 | else if (hw->phy_type == SK_PHY_BCOM) | ||
3294 | bcom_phy_intr(skge); | ||
3295 | spin_unlock(&hw->phy_lock); | ||
3296 | } | ||
3297 | } | ||
3298 | |||
3299 | spin_lock_irq(&hw->hw_lock); | ||
3300 | hw->intr_mask |= IS_EXT_REG; | ||
3301 | skge_write32(hw, B0_IMSK, hw->intr_mask); | ||
3302 | skge_read32(hw, B0_IMSK); | ||
3303 | spin_unlock_irq(&hw->hw_lock); | ||
3304 | } | ||
3305 | |||
3306 | static irqreturn_t skge_intr(int irq, void *dev_id) | ||
3307 | { | ||
3308 | struct skge_hw *hw = dev_id; | ||
3309 | u32 status; | ||
3310 | int handled = 0; | ||
3311 | |||
3312 | spin_lock(&hw->hw_lock); | ||
3313 | /* Reading this register masks IRQ */ | ||
3314 | status = skge_read32(hw, B0_SP_ISRC); | ||
3315 | if (status == 0 || status == ~0) | ||
3316 | goto out; | ||
3317 | |||
3318 | handled = 1; | ||
3319 | status &= hw->intr_mask; | ||
3320 | if (status & IS_EXT_REG) { | ||
3321 | hw->intr_mask &= ~IS_EXT_REG; | ||
3322 | tasklet_schedule(&hw->phy_task); | ||
3323 | } | ||
3324 | |||
3325 | if (status & (IS_XA1_F|IS_R1_F)) { | ||
3326 | struct skge_port *skge = netdev_priv(hw->dev[0]); | ||
3327 | hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); | ||
3328 | napi_schedule(&skge->napi); | ||
3329 | } | ||
3330 | |||
3331 | if (status & IS_PA_TO_TX1) | ||
3332 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); | ||
3333 | |||
3334 | if (status & IS_PA_TO_RX1) { | ||
3335 | ++hw->dev[0]->stats.rx_over_errors; | ||
3336 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); | ||
3337 | } | ||
3338 | |||
3339 | |||
3340 | if (status & IS_MAC1) | ||
3341 | skge_mac_intr(hw, 0); | ||
3342 | |||
3343 | if (hw->dev[1]) { | ||
3344 | struct skge_port *skge = netdev_priv(hw->dev[1]); | ||
3345 | |||
3346 | if (status & (IS_XA2_F|IS_R2_F)) { | ||
3347 | hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); | ||
3348 | napi_schedule(&skge->napi); | ||
3349 | } | ||
3350 | |||
3351 | if (status & IS_PA_TO_RX2) { | ||
3352 | ++hw->dev[1]->stats.rx_over_errors; | ||
3353 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); | ||
3354 | } | ||
3355 | |||
3356 | if (status & IS_PA_TO_TX2) | ||
3357 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2); | ||
3358 | |||
3359 | if (status & IS_MAC2) | ||
3360 | skge_mac_intr(hw, 1); | ||
3361 | } | ||
3362 | |||
3363 | if (status & IS_HW_ERR) | ||
3364 | skge_error_irq(hw); | ||
3365 | |||
3366 | skge_write32(hw, B0_IMSK, hw->intr_mask); | ||
3367 | skge_read32(hw, B0_IMSK); | ||
3368 | out: | ||
3369 | spin_unlock(&hw->hw_lock); | ||
3370 | |||
3371 | return IRQ_RETVAL(handled); | ||
3372 | } | ||
3373 | |||
3374 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
3375 | static void skge_netpoll(struct net_device *dev) | ||
3376 | { | ||
3377 | struct skge_port *skge = netdev_priv(dev); | ||
3378 | |||
3379 | disable_irq(dev->irq); | ||
3380 | skge_intr(dev->irq, skge->hw); | ||
3381 | enable_irq(dev->irq); | ||
3382 | } | ||
3383 | #endif | ||
3384 | |||
3385 | static int skge_set_mac_address(struct net_device *dev, void *p) | ||
3386 | { | ||
3387 | struct skge_port *skge = netdev_priv(dev); | ||
3388 | struct skge_hw *hw = skge->hw; | ||
3389 | unsigned port = skge->port; | ||
3390 | const struct sockaddr *addr = p; | ||
3391 | u16 ctrl; | ||
3392 | |||
3393 | if (!is_valid_ether_addr(addr->sa_data)) | ||
3394 | return -EADDRNOTAVAIL; | ||
3395 | |||
3396 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | ||
3397 | |||
3398 | if (!netif_running(dev)) { | ||
3399 | memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); | ||
3400 | memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); | ||
3401 | } else { | ||
3402 | /* disable Rx */ | ||
3403 | spin_lock_bh(&hw->phy_lock); | ||
3404 | ctrl = gma_read16(hw, port, GM_GP_CTRL); | ||
3405 | gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA); | ||
3406 | |||
3407 | memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); | ||
3408 | memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); | ||
3409 | |||
3410 | if (is_genesis(hw)) | ||
3411 | xm_outaddr(hw, port, XM_SA, dev->dev_addr); | ||
3412 | else { | ||
3413 | gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); | ||
3414 | gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); | ||
3415 | } | ||
3416 | |||
3417 | gma_write16(hw, port, GM_GP_CTRL, ctrl); | ||
3418 | spin_unlock_bh(&hw->phy_lock); | ||
3419 | } | ||
3420 | |||
3421 | return 0; | ||
3422 | } | ||
3423 | |||
3424 | static const struct { | ||
3425 | u8 id; | ||
3426 | const char *name; | ||
3427 | } skge_chips[] = { | ||
3428 | { CHIP_ID_GENESIS, "Genesis" }, | ||
3429 | { CHIP_ID_YUKON, "Yukon" }, | ||
3430 | { CHIP_ID_YUKON_LITE, "Yukon-Lite"}, | ||
3431 | { CHIP_ID_YUKON_LP, "Yukon-LP"}, | ||
3432 | }; | ||
3433 | |||
3434 | static const char *skge_board_name(const struct skge_hw *hw) | ||
3435 | { | ||
3436 | int i; | ||
3437 | static char buf[16]; | ||
3438 | |||
3439 | for (i = 0; i < ARRAY_SIZE(skge_chips); i++) | ||
3440 | if (skge_chips[i].id == hw->chip_id) | ||
3441 | return skge_chips[i].name; | ||
3442 | |||
3443 | snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id); | ||
3444 | return buf; | ||
3445 | } | ||
3446 | |||
3447 | |||
3448 | /* | ||
3449 | * Setup the board data structure, but don't bring up | ||
3450 | * the port(s) | ||
3451 | */ | ||
3452 | static int skge_reset(struct skge_hw *hw) | ||
3453 | { | ||
3454 | u32 reg; | ||
3455 | u16 ctst, pci_status; | ||
3456 | u8 t8, mac_cfg, pmd_type; | ||
3457 | int i; | ||
3458 | |||
3459 | ctst = skge_read16(hw, B0_CTST); | ||
3460 | |||
3461 | /* do a SW reset */ | ||
3462 | skge_write8(hw, B0_CTST, CS_RST_SET); | ||
3463 | skge_write8(hw, B0_CTST, CS_RST_CLR); | ||
3464 | |||
3465 | /* clear PCI errors, if any */ | ||
3466 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
3467 | skge_write8(hw, B2_TST_CTRL2, 0); | ||
3468 | |||
3469 | pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); | ||
3470 | pci_write_config_word(hw->pdev, PCI_STATUS, | ||
3471 | pci_status | PCI_STATUS_ERROR_BITS); | ||
3472 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
3473 | skge_write8(hw, B0_CTST, CS_MRST_CLR); | ||
3474 | |||
3475 | /* restore CLK_RUN bits (for Yukon-Lite) */ | ||
3476 | skge_write16(hw, B0_CTST, | ||
3477 | ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA)); | ||
3478 | |||
3479 | hw->chip_id = skge_read8(hw, B2_CHIP_ID); | ||
3480 | hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; | ||
3481 | pmd_type = skge_read8(hw, B2_PMD_TYP); | ||
3482 | hw->copper = (pmd_type == 'T' || pmd_type == '1'); | ||
3483 | |||
3484 | switch (hw->chip_id) { | ||
3485 | case CHIP_ID_GENESIS: | ||
3486 | #ifdef CONFIG_SKGE_GENESIS | ||
3487 | switch (hw->phy_type) { | ||
3488 | case SK_PHY_XMAC: | ||
3489 | hw->phy_addr = PHY_ADDR_XMAC; | ||
3490 | break; | ||
3491 | case SK_PHY_BCOM: | ||
3492 | hw->phy_addr = PHY_ADDR_BCOM; | ||
3493 | break; | ||
3494 | default: | ||
3495 | dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n", | ||
3496 | hw->phy_type); | ||
3497 | return -EOPNOTSUPP; | ||
3498 | } | ||
3499 | break; | ||
3500 | #else | ||
3501 | dev_err(&hw->pdev->dev, "Genesis chip detected but not configured\n"); | ||
3502 | return -EOPNOTSUPP; | ||
3503 | #endif | ||
3504 | |||
3505 | case CHIP_ID_YUKON: | ||
3506 | case CHIP_ID_YUKON_LITE: | ||
3507 | case CHIP_ID_YUKON_LP: | ||
3508 | if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S') | ||
3509 | hw->copper = 1; | ||
3510 | |||
3511 | hw->phy_addr = PHY_ADDR_MARV; | ||
3512 | break; | ||
3513 | |||
3514 | default: | ||
3515 | dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", | ||
3516 | hw->chip_id); | ||
3517 | return -EOPNOTSUPP; | ||
3518 | } | ||
3519 | |||
3520 | mac_cfg = skge_read8(hw, B2_MAC_CFG); | ||
3521 | hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2; | ||
3522 | hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4; | ||
3523 | |||
3524 | /* read the adapters RAM size */ | ||
3525 | t8 = skge_read8(hw, B2_E_0); | ||
3526 | if (is_genesis(hw)) { | ||
3527 | if (t8 == 3) { | ||
3528 | /* special case: 4 x 64k x 36, offset = 0x80000 */ | ||
3529 | hw->ram_size = 0x100000; | ||
3530 | hw->ram_offset = 0x80000; | ||
3531 | } else | ||
3532 | hw->ram_size = t8 * 512; | ||
3533 | } else if (t8 == 0) | ||
3534 | hw->ram_size = 0x20000; | ||
3535 | else | ||
3536 | hw->ram_size = t8 * 4096; | ||
3537 | |||
3538 | hw->intr_mask = IS_HW_ERR; | ||
3539 | |||
3540 | /* Use PHY IRQ for all but fiber based Genesis board */ | ||
3541 | if (!(is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)) | ||
3542 | hw->intr_mask |= IS_EXT_REG; | ||
3543 | |||
3544 | if (is_genesis(hw)) | ||
3545 | genesis_init(hw); | ||
3546 | else { | ||
3547 | /* switch power to VCC (WA for VAUX problem) */ | ||
3548 | skge_write8(hw, B0_POWER_CTRL, | ||
3549 | PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); | ||
3550 | |||
3551 | /* avoid boards with stuck Hardware error bits */ | ||
3552 | if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) && | ||
3553 | (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) { | ||
3554 | dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n"); | ||
3555 | hw->intr_mask &= ~IS_HW_ERR; | ||
3556 | } | ||
3557 | |||
3558 | /* Clear PHY COMA */ | ||
3559 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
3560 | pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®); | ||
3561 | reg &= ~PCI_PHY_COMA; | ||
3562 | pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg); | ||
3563 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
3564 | |||
3565 | |||
3566 | for (i = 0; i < hw->ports; i++) { | ||
3567 | skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); | ||
3568 | skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); | ||
3569 | } | ||
3570 | } | ||
3571 | |||
3572 | /* turn off hardware timer (unused) */ | ||
3573 | skge_write8(hw, B2_TI_CTRL, TIM_STOP); | ||
3574 | skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); | ||
3575 | skge_write8(hw, B0_LED, LED_STAT_ON); | ||
3576 | |||
3577 | /* enable the Tx Arbiters */ | ||
3578 | for (i = 0; i < hw->ports; i++) | ||
3579 | skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); | ||
3580 | |||
3581 | /* Initialize ram interface */ | ||
3582 | skge_write16(hw, B3_RI_CTRL, RI_RST_CLR); | ||
3583 | |||
3584 | skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53); | ||
3585 | skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53); | ||
3586 | skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53); | ||
3587 | skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53); | ||
3588 | skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53); | ||
3589 | skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53); | ||
3590 | skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53); | ||
3591 | skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53); | ||
3592 | skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53); | ||
3593 | skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53); | ||
3594 | skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53); | ||
3595 | skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53); | ||
3596 | |||
3597 | skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK); | ||
3598 | |||
3599 | /* Set interrupt moderation for Transmit only | ||
3600 | * Receive interrupts avoided by NAPI | ||
3601 | */ | ||
3602 | skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F); | ||
3603 | skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); | ||
3604 | skge_write32(hw, B2_IRQM_CTRL, TIM_START); | ||
3605 | |||
3606 | skge_write32(hw, B0_IMSK, hw->intr_mask); | ||
3607 | |||
3608 | for (i = 0; i < hw->ports; i++) { | ||
3609 | if (is_genesis(hw)) | ||
3610 | genesis_reset(hw, i); | ||
3611 | else | ||
3612 | yukon_reset(hw, i); | ||
3613 | } | ||
3614 | |||
3615 | return 0; | ||
3616 | } | ||
3617 | |||
3618 | |||
3619 | #ifdef CONFIG_SKGE_DEBUG | ||
3620 | |||
3621 | static struct dentry *skge_debug; | ||
3622 | |||
3623 | static int skge_debug_show(struct seq_file *seq, void *v) | ||
3624 | { | ||
3625 | struct net_device *dev = seq->private; | ||
3626 | const struct skge_port *skge = netdev_priv(dev); | ||
3627 | const struct skge_hw *hw = skge->hw; | ||
3628 | const struct skge_element *e; | ||
3629 | |||
3630 | if (!netif_running(dev)) | ||
3631 | return -ENETDOWN; | ||
3632 | |||
3633 | seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC), | ||
3634 | skge_read32(hw, B0_IMSK)); | ||
3635 | |||
3636 | seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring)); | ||
3637 | for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { | ||
3638 | const struct skge_tx_desc *t = e->desc; | ||
3639 | seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n", | ||
3640 | t->control, t->dma_hi, t->dma_lo, t->status, | ||
3641 | t->csum_offs, t->csum_write, t->csum_start); | ||
3642 | } | ||
3643 | |||
3644 | seq_printf(seq, "\nRx Ring:\n"); | ||
3645 | for (e = skge->rx_ring.to_clean; ; e = e->next) { | ||
3646 | const struct skge_rx_desc *r = e->desc; | ||
3647 | |||
3648 | if (r->control & BMU_OWN) | ||
3649 | break; | ||
3650 | |||
3651 | seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n", | ||
3652 | r->control, r->dma_hi, r->dma_lo, r->status, | ||
3653 | r->timestamp, r->csum1, r->csum1_start); | ||
3654 | } | ||
3655 | |||
3656 | return 0; | ||
3657 | } | ||
3658 | |||
3659 | static int skge_debug_open(struct inode *inode, struct file *file) | ||
3660 | { | ||
3661 | return single_open(file, skge_debug_show, inode->i_private); | ||
3662 | } | ||
3663 | |||
3664 | static const struct file_operations skge_debug_fops = { | ||
3665 | .owner = THIS_MODULE, | ||
3666 | .open = skge_debug_open, | ||
3667 | .read = seq_read, | ||
3668 | .llseek = seq_lseek, | ||
3669 | .release = single_release, | ||
3670 | }; | ||
3671 | |||
3672 | /* | ||
3673 | * Use network device events to create/remove/rename | ||
3674 | * debugfs file entries | ||
3675 | */ | ||
3676 | static int skge_device_event(struct notifier_block *unused, | ||
3677 | unsigned long event, void *ptr) | ||
3678 | { | ||
3679 | struct net_device *dev = ptr; | ||
3680 | struct skge_port *skge; | ||
3681 | struct dentry *d; | ||
3682 | |||
3683 | if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug) | ||
3684 | goto done; | ||
3685 | |||
3686 | skge = netdev_priv(dev); | ||
3687 | switch (event) { | ||
3688 | case NETDEV_CHANGENAME: | ||
3689 | if (skge->debugfs) { | ||
3690 | d = debugfs_rename(skge_debug, skge->debugfs, | ||
3691 | skge_debug, dev->name); | ||
3692 | if (d) | ||
3693 | skge->debugfs = d; | ||
3694 | else { | ||
3695 | netdev_info(dev, "rename failed\n"); | ||
3696 | debugfs_remove(skge->debugfs); | ||
3697 | } | ||
3698 | } | ||
3699 | break; | ||
3700 | |||
3701 | case NETDEV_GOING_DOWN: | ||
3702 | if (skge->debugfs) { | ||
3703 | debugfs_remove(skge->debugfs); | ||
3704 | skge->debugfs = NULL; | ||
3705 | } | ||
3706 | break; | ||
3707 | |||
3708 | case NETDEV_UP: | ||
3709 | d = debugfs_create_file(dev->name, S_IRUGO, | ||
3710 | skge_debug, dev, | ||
3711 | &skge_debug_fops); | ||
3712 | if (!d || IS_ERR(d)) | ||
3713 | netdev_info(dev, "debugfs create failed\n"); | ||
3714 | else | ||
3715 | skge->debugfs = d; | ||
3716 | break; | ||
3717 | } | ||
3718 | |||
3719 | done: | ||
3720 | return NOTIFY_DONE; | ||
3721 | } | ||
3722 | |||
3723 | static struct notifier_block skge_notifier = { | ||
3724 | .notifier_call = skge_device_event, | ||
3725 | }; | ||
3726 | |||
3727 | |||
3728 | static __init void skge_debug_init(void) | ||
3729 | { | ||
3730 | struct dentry *ent; | ||
3731 | |||
3732 | ent = debugfs_create_dir("skge", NULL); | ||
3733 | if (!ent || IS_ERR(ent)) { | ||
3734 | pr_info("debugfs create directory failed\n"); | ||
3735 | return; | ||
3736 | } | ||
3737 | |||
3738 | skge_debug = ent; | ||
3739 | register_netdevice_notifier(&skge_notifier); | ||
3740 | } | ||
3741 | |||
3742 | static __exit void skge_debug_cleanup(void) | ||
3743 | { | ||
3744 | if (skge_debug) { | ||
3745 | unregister_netdevice_notifier(&skge_notifier); | ||
3746 | debugfs_remove(skge_debug); | ||
3747 | skge_debug = NULL; | ||
3748 | } | ||
3749 | } | ||
3750 | |||
3751 | #else | ||
3752 | #define skge_debug_init() | ||
3753 | #define skge_debug_cleanup() | ||
3754 | #endif | ||
3755 | |||
3756 | static const struct net_device_ops skge_netdev_ops = { | ||
3757 | .ndo_open = skge_up, | ||
3758 | .ndo_stop = skge_down, | ||
3759 | .ndo_start_xmit = skge_xmit_frame, | ||
3760 | .ndo_do_ioctl = skge_ioctl, | ||
3761 | .ndo_get_stats = skge_get_stats, | ||
3762 | .ndo_tx_timeout = skge_tx_timeout, | ||
3763 | .ndo_change_mtu = skge_change_mtu, | ||
3764 | .ndo_validate_addr = eth_validate_addr, | ||
3765 | .ndo_set_multicast_list = skge_set_multicast, | ||
3766 | .ndo_set_mac_address = skge_set_mac_address, | ||
3767 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
3768 | .ndo_poll_controller = skge_netpoll, | ||
3769 | #endif | ||
3770 | }; | ||
3771 | |||
3772 | |||
3773 | /* Initialize network device */ | ||
3774 | static struct net_device *skge_devinit(struct skge_hw *hw, int port, | ||
3775 | int highmem) | ||
3776 | { | ||
3777 | struct skge_port *skge; | ||
3778 | struct net_device *dev = alloc_etherdev(sizeof(*skge)); | ||
3779 | |||
3780 | if (!dev) { | ||
3781 | dev_err(&hw->pdev->dev, "etherdev alloc failed\n"); | ||
3782 | return NULL; | ||
3783 | } | ||
3784 | |||
3785 | SET_NETDEV_DEV(dev, &hw->pdev->dev); | ||
3786 | dev->netdev_ops = &skge_netdev_ops; | ||
3787 | dev->ethtool_ops = &skge_ethtool_ops; | ||
3788 | dev->watchdog_timeo = TX_WATCHDOG; | ||
3789 | dev->irq = hw->pdev->irq; | ||
3790 | |||
3791 | if (highmem) | ||
3792 | dev->features |= NETIF_F_HIGHDMA; | ||
3793 | |||
3794 | skge = netdev_priv(dev); | ||
3795 | netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT); | ||
3796 | skge->netdev = dev; | ||
3797 | skge->hw = hw; | ||
3798 | skge->msg_enable = netif_msg_init(debug, default_msg); | ||
3799 | |||
3800 | skge->tx_ring.count = DEFAULT_TX_RING_SIZE; | ||
3801 | skge->rx_ring.count = DEFAULT_RX_RING_SIZE; | ||
3802 | |||
3803 | /* Auto speed and flow control */ | ||
3804 | skge->autoneg = AUTONEG_ENABLE; | ||
3805 | skge->flow_control = FLOW_MODE_SYM_OR_REM; | ||
3806 | skge->duplex = -1; | ||
3807 | skge->speed = -1; | ||
3808 | skge->advertising = skge_supported_modes(hw); | ||
3809 | |||
3810 | if (device_can_wakeup(&hw->pdev->dev)) { | ||
3811 | skge->wol = wol_supported(hw) & WAKE_MAGIC; | ||
3812 | device_set_wakeup_enable(&hw->pdev->dev, skge->wol); | ||
3813 | } | ||
3814 | |||
3815 | hw->dev[port] = dev; | ||
3816 | |||
3817 | skge->port = port; | ||
3818 | |||
3819 | /* Only used for Genesis XMAC */ | ||
3820 | if (is_genesis(hw)) | ||
3821 | setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge); | ||
3822 | else { | ||
3823 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | | ||
3824 | NETIF_F_RXCSUM; | ||
3825 | dev->features |= dev->hw_features; | ||
3826 | } | ||
3827 | |||
3828 | /* read the mac address */ | ||
3829 | memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); | ||
3830 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | ||
3831 | |||
3832 | return dev; | ||
3833 | } | ||
3834 | |||
3835 | static void __devinit skge_show_addr(struct net_device *dev) | ||
3836 | { | ||
3837 | const struct skge_port *skge = netdev_priv(dev); | ||
3838 | |||
3839 | netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr); | ||
3840 | } | ||
3841 | |||
3842 | static int only_32bit_dma; | ||
3843 | |||
3844 | static int __devinit skge_probe(struct pci_dev *pdev, | ||
3845 | const struct pci_device_id *ent) | ||
3846 | { | ||
3847 | struct net_device *dev, *dev1; | ||
3848 | struct skge_hw *hw; | ||
3849 | int err, using_dac = 0; | ||
3850 | |||
3851 | err = pci_enable_device(pdev); | ||
3852 | if (err) { | ||
3853 | dev_err(&pdev->dev, "cannot enable PCI device\n"); | ||
3854 | goto err_out; | ||
3855 | } | ||
3856 | |||
3857 | err = pci_request_regions(pdev, DRV_NAME); | ||
3858 | if (err) { | ||
3859 | dev_err(&pdev->dev, "cannot obtain PCI resources\n"); | ||
3860 | goto err_out_disable_pdev; | ||
3861 | } | ||
3862 | |||
3863 | pci_set_master(pdev); | ||
3864 | |||
3865 | if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
3866 | using_dac = 1; | ||
3867 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
3868 | } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { | ||
3869 | using_dac = 0; | ||
3870 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
3871 | } | ||
3872 | |||
3873 | if (err) { | ||
3874 | dev_err(&pdev->dev, "no usable DMA configuration\n"); | ||
3875 | goto err_out_free_regions; | ||
3876 | } | ||
3877 | |||
3878 | #ifdef __BIG_ENDIAN | ||
3879 | /* byte swap descriptors in hardware */ | ||
3880 | { | ||
3881 | u32 reg; | ||
3882 | |||
3883 | pci_read_config_dword(pdev, PCI_DEV_REG2, ®); | ||
3884 | reg |= PCI_REV_DESC; | ||
3885 | pci_write_config_dword(pdev, PCI_DEV_REG2, reg); | ||
3886 | } | ||
3887 | #endif | ||
3888 | |||
3889 | err = -ENOMEM; | ||
3890 | /* space for skge@pci:0000:04:00.0 */ | ||
3891 | hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") | ||
3892 | + strlen(pci_name(pdev)) + 1, GFP_KERNEL); | ||
3893 | if (!hw) { | ||
3894 | dev_err(&pdev->dev, "cannot allocate hardware struct\n"); | ||
3895 | goto err_out_free_regions; | ||
3896 | } | ||
3897 | sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); | ||
3898 | |||
3899 | hw->pdev = pdev; | ||
3900 | spin_lock_init(&hw->hw_lock); | ||
3901 | spin_lock_init(&hw->phy_lock); | ||
3902 | tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw); | ||
3903 | |||
3904 | hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); | ||
3905 | if (!hw->regs) { | ||
3906 | dev_err(&pdev->dev, "cannot map device registers\n"); | ||
3907 | goto err_out_free_hw; | ||
3908 | } | ||
3909 | |||
3910 | err = skge_reset(hw); | ||
3911 | if (err) | ||
3912 | goto err_out_iounmap; | ||
3913 | |||
3914 | pr_info("%s addr 0x%llx irq %d chip %s rev %d\n", | ||
3915 | DRV_VERSION, | ||
3916 | (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, | ||
3917 | skge_board_name(hw), hw->chip_rev); | ||
3918 | |||
3919 | dev = skge_devinit(hw, 0, using_dac); | ||
3920 | if (!dev) | ||
3921 | goto err_out_led_off; | ||
3922 | |||
3923 | /* Some motherboards are broken and has zero in ROM. */ | ||
3924 | if (!is_valid_ether_addr(dev->dev_addr)) | ||
3925 | dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n"); | ||
3926 | |||
3927 | err = register_netdev(dev); | ||
3928 | if (err) { | ||
3929 | dev_err(&pdev->dev, "cannot register net device\n"); | ||
3930 | goto err_out_free_netdev; | ||
3931 | } | ||
3932 | |||
3933 | err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, hw->irq_name, hw); | ||
3934 | if (err) { | ||
3935 | dev_err(&pdev->dev, "%s: cannot assign irq %d\n", | ||
3936 | dev->name, pdev->irq); | ||
3937 | goto err_out_unregister; | ||
3938 | } | ||
3939 | skge_show_addr(dev); | ||
3940 | |||
3941 | if (hw->ports > 1) { | ||
3942 | dev1 = skge_devinit(hw, 1, using_dac); | ||
3943 | if (dev1 && register_netdev(dev1) == 0) | ||
3944 | skge_show_addr(dev1); | ||
3945 | else { | ||
3946 | /* Failure to register second port need not be fatal */ | ||
3947 | dev_warn(&pdev->dev, "register of second port failed\n"); | ||
3948 | hw->dev[1] = NULL; | ||
3949 | hw->ports = 1; | ||
3950 | if (dev1) | ||
3951 | free_netdev(dev1); | ||
3952 | } | ||
3953 | } | ||
3954 | pci_set_drvdata(pdev, hw); | ||
3955 | |||
3956 | return 0; | ||
3957 | |||
3958 | err_out_unregister: | ||
3959 | unregister_netdev(dev); | ||
3960 | err_out_free_netdev: | ||
3961 | free_netdev(dev); | ||
3962 | err_out_led_off: | ||
3963 | skge_write16(hw, B0_LED, LED_STAT_OFF); | ||
3964 | err_out_iounmap: | ||
3965 | iounmap(hw->regs); | ||
3966 | err_out_free_hw: | ||
3967 | kfree(hw); | ||
3968 | err_out_free_regions: | ||
3969 | pci_release_regions(pdev); | ||
3970 | err_out_disable_pdev: | ||
3971 | pci_disable_device(pdev); | ||
3972 | pci_set_drvdata(pdev, NULL); | ||
3973 | err_out: | ||
3974 | return err; | ||
3975 | } | ||
3976 | |||
3977 | static void __devexit skge_remove(struct pci_dev *pdev) | ||
3978 | { | ||
3979 | struct skge_hw *hw = pci_get_drvdata(pdev); | ||
3980 | struct net_device *dev0, *dev1; | ||
3981 | |||
3982 | if (!hw) | ||
3983 | return; | ||
3984 | |||
3985 | dev1 = hw->dev[1]; | ||
3986 | if (dev1) | ||
3987 | unregister_netdev(dev1); | ||
3988 | dev0 = hw->dev[0]; | ||
3989 | unregister_netdev(dev0); | ||
3990 | |||
3991 | tasklet_disable(&hw->phy_task); | ||
3992 | |||
3993 | spin_lock_irq(&hw->hw_lock); | ||
3994 | hw->intr_mask = 0; | ||
3995 | skge_write32(hw, B0_IMSK, 0); | ||
3996 | skge_read32(hw, B0_IMSK); | ||
3997 | spin_unlock_irq(&hw->hw_lock); | ||
3998 | |||
3999 | skge_write16(hw, B0_LED, LED_STAT_OFF); | ||
4000 | skge_write8(hw, B0_CTST, CS_RST_SET); | ||
4001 | |||
4002 | free_irq(pdev->irq, hw); | ||
4003 | pci_release_regions(pdev); | ||
4004 | pci_disable_device(pdev); | ||
4005 | if (dev1) | ||
4006 | free_netdev(dev1); | ||
4007 | free_netdev(dev0); | ||
4008 | |||
4009 | iounmap(hw->regs); | ||
4010 | kfree(hw); | ||
4011 | pci_set_drvdata(pdev, NULL); | ||
4012 | } | ||
4013 | |||
4014 | #ifdef CONFIG_PM | ||
4015 | static int skge_suspend(struct device *dev) | ||
4016 | { | ||
4017 | struct pci_dev *pdev = to_pci_dev(dev); | ||
4018 | struct skge_hw *hw = pci_get_drvdata(pdev); | ||
4019 | int i; | ||
4020 | |||
4021 | if (!hw) | ||
4022 | return 0; | ||
4023 | |||
4024 | for (i = 0; i < hw->ports; i++) { | ||
4025 | struct net_device *dev = hw->dev[i]; | ||
4026 | struct skge_port *skge = netdev_priv(dev); | ||
4027 | |||
4028 | if (netif_running(dev)) | ||
4029 | skge_down(dev); | ||
4030 | |||
4031 | if (skge->wol) | ||
4032 | skge_wol_init(skge); | ||
4033 | } | ||
4034 | |||
4035 | skge_write32(hw, B0_IMSK, 0); | ||
4036 | |||
4037 | return 0; | ||
4038 | } | ||
4039 | |||
4040 | static int skge_resume(struct device *dev) | ||
4041 | { | ||
4042 | struct pci_dev *pdev = to_pci_dev(dev); | ||
4043 | struct skge_hw *hw = pci_get_drvdata(pdev); | ||
4044 | int i, err; | ||
4045 | |||
4046 | if (!hw) | ||
4047 | return 0; | ||
4048 | |||
4049 | err = skge_reset(hw); | ||
4050 | if (err) | ||
4051 | goto out; | ||
4052 | |||
4053 | for (i = 0; i < hw->ports; i++) { | ||
4054 | struct net_device *dev = hw->dev[i]; | ||
4055 | |||
4056 | if (netif_running(dev)) { | ||
4057 | err = skge_up(dev); | ||
4058 | |||
4059 | if (err) { | ||
4060 | netdev_err(dev, "could not up: %d\n", err); | ||
4061 | dev_close(dev); | ||
4062 | goto out; | ||
4063 | } | ||
4064 | } | ||
4065 | } | ||
4066 | out: | ||
4067 | return err; | ||
4068 | } | ||
4069 | |||
4070 | static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume); | ||
4071 | #define SKGE_PM_OPS (&skge_pm_ops) | ||
4072 | |||
4073 | #else | ||
4074 | |||
4075 | #define SKGE_PM_OPS NULL | ||
4076 | #endif | ||
4077 | |||
4078 | static void skge_shutdown(struct pci_dev *pdev) | ||
4079 | { | ||
4080 | struct skge_hw *hw = pci_get_drvdata(pdev); | ||
4081 | int i; | ||
4082 | |||
4083 | if (!hw) | ||
4084 | return; | ||
4085 | |||
4086 | for (i = 0; i < hw->ports; i++) { | ||
4087 | struct net_device *dev = hw->dev[i]; | ||
4088 | struct skge_port *skge = netdev_priv(dev); | ||
4089 | |||
4090 | if (skge->wol) | ||
4091 | skge_wol_init(skge); | ||
4092 | } | ||
4093 | |||
4094 | pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); | ||
4095 | pci_set_power_state(pdev, PCI_D3hot); | ||
4096 | } | ||
4097 | |||
4098 | static struct pci_driver skge_driver = { | ||
4099 | .name = DRV_NAME, | ||
4100 | .id_table = skge_id_table, | ||
4101 | .probe = skge_probe, | ||
4102 | .remove = __devexit_p(skge_remove), | ||
4103 | .shutdown = skge_shutdown, | ||
4104 | .driver.pm = SKGE_PM_OPS, | ||
4105 | }; | ||
4106 | |||
4107 | static struct dmi_system_id skge_32bit_dma_boards[] = { | ||
4108 | { | ||
4109 | .ident = "Gigabyte nForce boards", | ||
4110 | .matches = { | ||
4111 | DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"), | ||
4112 | DMI_MATCH(DMI_BOARD_NAME, "nForce"), | ||
4113 | }, | ||
4114 | }, | ||
4115 | {} | ||
4116 | }; | ||
4117 | |||
4118 | static int __init skge_init_module(void) | ||
4119 | { | ||
4120 | if (dmi_check_system(skge_32bit_dma_boards)) | ||
4121 | only_32bit_dma = 1; | ||
4122 | skge_debug_init(); | ||
4123 | return pci_register_driver(&skge_driver); | ||
4124 | } | ||
4125 | |||
4126 | static void __exit skge_cleanup_module(void) | ||
4127 | { | ||
4128 | pci_unregister_driver(&skge_driver); | ||
4129 | skge_debug_cleanup(); | ||
4130 | } | ||
4131 | |||
4132 | module_init(skge_init_module); | ||
4133 | module_exit(skge_cleanup_module); | ||
diff --git a/drivers/net/ethernet/marvell/skge.h b/drivers/net/ethernet/marvell/skge.h new file mode 100644 index 000000000000..a2eb34115844 --- /dev/null +++ b/drivers/net/ethernet/marvell/skge.h | |||
@@ -0,0 +1,2584 @@ | |||
1 | /* | ||
2 | * Definitions for the new Marvell Yukon / SysKonnect driver. | ||
3 | */ | ||
4 | #ifndef _SKGE_H | ||
5 | #define _SKGE_H | ||
6 | #include <linux/interrupt.h> | ||
7 | |||
8 | /* PCI config registers */ | ||
9 | #define PCI_DEV_REG1 0x40 | ||
10 | #define PCI_PHY_COMA 0x8000000 | ||
11 | #define PCI_VIO 0x2000000 | ||
12 | |||
13 | #define PCI_DEV_REG2 0x44 | ||
14 | #define PCI_VPD_ROM_SZ 7L<<14 /* VPD ROM size 0=256, 1=512, ... */ | ||
15 | #define PCI_REV_DESC 1<<2 /* Reverse Descriptor bytes */ | ||
16 | |||
17 | #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ | ||
18 | PCI_STATUS_SIG_SYSTEM_ERROR | \ | ||
19 | PCI_STATUS_REC_MASTER_ABORT | \ | ||
20 | PCI_STATUS_REC_TARGET_ABORT | \ | ||
21 | PCI_STATUS_PARITY) | ||
22 | |||
23 | enum csr_regs { | ||
24 | B0_RAP = 0x0000, | ||
25 | B0_CTST = 0x0004, | ||
26 | B0_LED = 0x0006, | ||
27 | B0_POWER_CTRL = 0x0007, | ||
28 | B0_ISRC = 0x0008, | ||
29 | B0_IMSK = 0x000c, | ||
30 | B0_HWE_ISRC = 0x0010, | ||
31 | B0_HWE_IMSK = 0x0014, | ||
32 | B0_SP_ISRC = 0x0018, | ||
33 | B0_XM1_IMSK = 0x0020, | ||
34 | B0_XM1_ISRC = 0x0028, | ||
35 | B0_XM1_PHY_ADDR = 0x0030, | ||
36 | B0_XM1_PHY_DATA = 0x0034, | ||
37 | B0_XM2_IMSK = 0x0040, | ||
38 | B0_XM2_ISRC = 0x0048, | ||
39 | B0_XM2_PHY_ADDR = 0x0050, | ||
40 | B0_XM2_PHY_DATA = 0x0054, | ||
41 | B0_R1_CSR = 0x0060, | ||
42 | B0_R2_CSR = 0x0064, | ||
43 | B0_XS1_CSR = 0x0068, | ||
44 | B0_XA1_CSR = 0x006c, | ||
45 | B0_XS2_CSR = 0x0070, | ||
46 | B0_XA2_CSR = 0x0074, | ||
47 | |||
48 | B2_MAC_1 = 0x0100, | ||
49 | B2_MAC_2 = 0x0108, | ||
50 | B2_MAC_3 = 0x0110, | ||
51 | B2_CONN_TYP = 0x0118, | ||
52 | B2_PMD_TYP = 0x0119, | ||
53 | B2_MAC_CFG = 0x011a, | ||
54 | B2_CHIP_ID = 0x011b, | ||
55 | B2_E_0 = 0x011c, | ||
56 | B2_E_1 = 0x011d, | ||
57 | B2_E_2 = 0x011e, | ||
58 | B2_E_3 = 0x011f, | ||
59 | B2_FAR = 0x0120, | ||
60 | B2_FDP = 0x0124, | ||
61 | B2_LD_CTRL = 0x0128, | ||
62 | B2_LD_TEST = 0x0129, | ||
63 | B2_TI_INI = 0x0130, | ||
64 | B2_TI_VAL = 0x0134, | ||
65 | B2_TI_CTRL = 0x0138, | ||
66 | B2_TI_TEST = 0x0139, | ||
67 | B2_IRQM_INI = 0x0140, | ||
68 | B2_IRQM_VAL = 0x0144, | ||
69 | B2_IRQM_CTRL = 0x0148, | ||
70 | B2_IRQM_TEST = 0x0149, | ||
71 | B2_IRQM_MSK = 0x014c, | ||
72 | B2_IRQM_HWE_MSK = 0x0150, | ||
73 | B2_TST_CTRL1 = 0x0158, | ||
74 | B2_TST_CTRL2 = 0x0159, | ||
75 | B2_GP_IO = 0x015c, | ||
76 | B2_I2C_CTRL = 0x0160, | ||
77 | B2_I2C_DATA = 0x0164, | ||
78 | B2_I2C_IRQ = 0x0168, | ||
79 | B2_I2C_SW = 0x016c, | ||
80 | B2_BSC_INI = 0x0170, | ||
81 | B2_BSC_VAL = 0x0174, | ||
82 | B2_BSC_CTRL = 0x0178, | ||
83 | B2_BSC_STAT = 0x0179, | ||
84 | B2_BSC_TST = 0x017a, | ||
85 | |||
86 | B3_RAM_ADDR = 0x0180, | ||
87 | B3_RAM_DATA_LO = 0x0184, | ||
88 | B3_RAM_DATA_HI = 0x0188, | ||
89 | B3_RI_WTO_R1 = 0x0190, | ||
90 | B3_RI_WTO_XA1 = 0x0191, | ||
91 | B3_RI_WTO_XS1 = 0x0192, | ||
92 | B3_RI_RTO_R1 = 0x0193, | ||
93 | B3_RI_RTO_XA1 = 0x0194, | ||
94 | B3_RI_RTO_XS1 = 0x0195, | ||
95 | B3_RI_WTO_R2 = 0x0196, | ||
96 | B3_RI_WTO_XA2 = 0x0197, | ||
97 | B3_RI_WTO_XS2 = 0x0198, | ||
98 | B3_RI_RTO_R2 = 0x0199, | ||
99 | B3_RI_RTO_XA2 = 0x019a, | ||
100 | B3_RI_RTO_XS2 = 0x019b, | ||
101 | B3_RI_TO_VAL = 0x019c, | ||
102 | B3_RI_CTRL = 0x01a0, | ||
103 | B3_RI_TEST = 0x01a2, | ||
104 | B3_MA_TOINI_RX1 = 0x01b0, | ||
105 | B3_MA_TOINI_RX2 = 0x01b1, | ||
106 | B3_MA_TOINI_TX1 = 0x01b2, | ||
107 | B3_MA_TOINI_TX2 = 0x01b3, | ||
108 | B3_MA_TOVAL_RX1 = 0x01b4, | ||
109 | B3_MA_TOVAL_RX2 = 0x01b5, | ||
110 | B3_MA_TOVAL_TX1 = 0x01b6, | ||
111 | B3_MA_TOVAL_TX2 = 0x01b7, | ||
112 | B3_MA_TO_CTRL = 0x01b8, | ||
113 | B3_MA_TO_TEST = 0x01ba, | ||
114 | B3_MA_RCINI_RX1 = 0x01c0, | ||
115 | B3_MA_RCINI_RX2 = 0x01c1, | ||
116 | B3_MA_RCINI_TX1 = 0x01c2, | ||
117 | B3_MA_RCINI_TX2 = 0x01c3, | ||
118 | B3_MA_RCVAL_RX1 = 0x01c4, | ||
119 | B3_MA_RCVAL_RX2 = 0x01c5, | ||
120 | B3_MA_RCVAL_TX1 = 0x01c6, | ||
121 | B3_MA_RCVAL_TX2 = 0x01c7, | ||
122 | B3_MA_RC_CTRL = 0x01c8, | ||
123 | B3_MA_RC_TEST = 0x01ca, | ||
124 | B3_PA_TOINI_RX1 = 0x01d0, | ||
125 | B3_PA_TOINI_RX2 = 0x01d4, | ||
126 | B3_PA_TOINI_TX1 = 0x01d8, | ||
127 | B3_PA_TOINI_TX2 = 0x01dc, | ||
128 | B3_PA_TOVAL_RX1 = 0x01e0, | ||
129 | B3_PA_TOVAL_RX2 = 0x01e4, | ||
130 | B3_PA_TOVAL_TX1 = 0x01e8, | ||
131 | B3_PA_TOVAL_TX2 = 0x01ec, | ||
132 | B3_PA_CTRL = 0x01f0, | ||
133 | B3_PA_TEST = 0x01f2, | ||
134 | }; | ||
135 | |||
136 | /* B0_CTST 16 bit Control/Status register */ | ||
137 | enum { | ||
138 | CS_CLK_RUN_HOT = 1<<13,/* CLK_RUN hot m. (YUKON-Lite only) */ | ||
139 | CS_CLK_RUN_RST = 1<<12,/* CLK_RUN reset (YUKON-Lite only) */ | ||
140 | CS_CLK_RUN_ENA = 1<<11,/* CLK_RUN enable (YUKON-Lite only) */ | ||
141 | CS_VAUX_AVAIL = 1<<10,/* VAUX available (YUKON only) */ | ||
142 | CS_BUS_CLOCK = 1<<9, /* Bus Clock 0/1 = 33/66 MHz */ | ||
143 | CS_BUS_SLOT_SZ = 1<<8, /* Slot Size 0/1 = 32/64 bit slot */ | ||
144 | CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */ | ||
145 | CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */ | ||
146 | CS_STOP_DONE = 1<<5, /* Stop Master is finished */ | ||
147 | CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */ | ||
148 | CS_MRST_CLR = 1<<3, /* Clear Master reset */ | ||
149 | CS_MRST_SET = 1<<2, /* Set Master reset */ | ||
150 | CS_RST_CLR = 1<<1, /* Clear Software reset */ | ||
151 | CS_RST_SET = 1, /* Set Software reset */ | ||
152 | |||
153 | /* B0_LED 8 Bit LED register */ | ||
154 | /* Bit 7.. 2: reserved */ | ||
155 | LED_STAT_ON = 1<<1, /* Status LED on */ | ||
156 | LED_STAT_OFF = 1, /* Status LED off */ | ||
157 | |||
158 | /* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */ | ||
159 | PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */ | ||
160 | PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */ | ||
161 | PC_VCC_ENA = 1<<5, /* Switch VCC Enable */ | ||
162 | PC_VCC_DIS = 1<<4, /* Switch VCC Disable */ | ||
163 | PC_VAUX_ON = 1<<3, /* Switch VAUX On */ | ||
164 | PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */ | ||
165 | PC_VCC_ON = 1<<1, /* Switch VCC On */ | ||
166 | PC_VCC_OFF = 1<<0, /* Switch VCC Off */ | ||
167 | }; | ||
168 | |||
169 | /* B2_IRQM_MSK 32 bit IRQ Moderation Mask */ | ||
170 | enum { | ||
171 | IS_ALL_MSK = 0xbffffffful, /* All Interrupt bits */ | ||
172 | IS_HW_ERR = 1<<31, /* Interrupt HW Error */ | ||
173 | /* Bit 30: reserved */ | ||
174 | IS_PA_TO_RX1 = 1<<29, /* Packet Arb Timeout Rx1 */ | ||
175 | IS_PA_TO_RX2 = 1<<28, /* Packet Arb Timeout Rx2 */ | ||
176 | IS_PA_TO_TX1 = 1<<27, /* Packet Arb Timeout Tx1 */ | ||
177 | IS_PA_TO_TX2 = 1<<26, /* Packet Arb Timeout Tx2 */ | ||
178 | IS_I2C_READY = 1<<25, /* IRQ on end of I2C Tx */ | ||
179 | IS_IRQ_SW = 1<<24, /* SW forced IRQ */ | ||
180 | IS_EXT_REG = 1<<23, /* IRQ from LM80 or PHY (GENESIS only) */ | ||
181 | /* IRQ from PHY (YUKON only) */ | ||
182 | IS_TIMINT = 1<<22, /* IRQ from Timer */ | ||
183 | IS_MAC1 = 1<<21, /* IRQ from MAC 1 */ | ||
184 | IS_LNK_SYNC_M1 = 1<<20, /* Link Sync Cnt wrap MAC 1 */ | ||
185 | IS_MAC2 = 1<<19, /* IRQ from MAC 2 */ | ||
186 | IS_LNK_SYNC_M2 = 1<<18, /* Link Sync Cnt wrap MAC 2 */ | ||
187 | /* Receive Queue 1 */ | ||
188 | IS_R1_B = 1<<17, /* Q_R1 End of Buffer */ | ||
189 | IS_R1_F = 1<<16, /* Q_R1 End of Frame */ | ||
190 | IS_R1_C = 1<<15, /* Q_R1 Encoding Error */ | ||
191 | /* Receive Queue 2 */ | ||
192 | IS_R2_B = 1<<14, /* Q_R2 End of Buffer */ | ||
193 | IS_R2_F = 1<<13, /* Q_R2 End of Frame */ | ||
194 | IS_R2_C = 1<<12, /* Q_R2 Encoding Error */ | ||
195 | /* Synchronous Transmit Queue 1 */ | ||
196 | IS_XS1_B = 1<<11, /* Q_XS1 End of Buffer */ | ||
197 | IS_XS1_F = 1<<10, /* Q_XS1 End of Frame */ | ||
198 | IS_XS1_C = 1<<9, /* Q_XS1 Encoding Error */ | ||
199 | /* Asynchronous Transmit Queue 1 */ | ||
200 | IS_XA1_B = 1<<8, /* Q_XA1 End of Buffer */ | ||
201 | IS_XA1_F = 1<<7, /* Q_XA1 End of Frame */ | ||
202 | IS_XA1_C = 1<<6, /* Q_XA1 Encoding Error */ | ||
203 | /* Synchronous Transmit Queue 2 */ | ||
204 | IS_XS2_B = 1<<5, /* Q_XS2 End of Buffer */ | ||
205 | IS_XS2_F = 1<<4, /* Q_XS2 End of Frame */ | ||
206 | IS_XS2_C = 1<<3, /* Q_XS2 Encoding Error */ | ||
207 | /* Asynchronous Transmit Queue 2 */ | ||
208 | IS_XA2_B = 1<<2, /* Q_XA2 End of Buffer */ | ||
209 | IS_XA2_F = 1<<1, /* Q_XA2 End of Frame */ | ||
210 | IS_XA2_C = 1<<0, /* Q_XA2 Encoding Error */ | ||
211 | |||
212 | IS_TO_PORT1 = IS_PA_TO_RX1 | IS_PA_TO_TX1, | ||
213 | IS_TO_PORT2 = IS_PA_TO_RX2 | IS_PA_TO_TX2, | ||
214 | |||
215 | IS_PORT_1 = IS_XA1_F| IS_R1_F | IS_TO_PORT1 | IS_MAC1, | ||
216 | IS_PORT_2 = IS_XA2_F| IS_R2_F | IS_TO_PORT2 | IS_MAC2, | ||
217 | }; | ||
218 | |||
219 | |||
220 | /* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ | ||
221 | enum { | ||
222 | IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */ | ||
223 | IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */ | ||
224 | IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */ | ||
225 | IS_IRQ_STAT = 1<<10, /* IRQ status exception */ | ||
226 | IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */ | ||
227 | IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */ | ||
228 | IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */ | ||
229 | IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */ | ||
230 | IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */ | ||
231 | IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */ | ||
232 | IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */ | ||
233 | IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */ | ||
234 | IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */ | ||
235 | IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */ | ||
236 | |||
237 | IS_ERR_MSK = IS_IRQ_MST_ERR | IS_IRQ_STAT | ||
238 | | IS_RAM_RD_PAR | IS_RAM_WR_PAR | ||
239 | | IS_M1_PAR_ERR | IS_M2_PAR_ERR | ||
240 | | IS_R1_PAR_ERR | IS_R2_PAR_ERR, | ||
241 | }; | ||
242 | |||
243 | /* B2_TST_CTRL1 8 bit Test Control Register 1 */ | ||
244 | enum { | ||
245 | TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */ | ||
246 | TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */ | ||
247 | TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */ | ||
248 | TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */ | ||
249 | TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */ | ||
250 | TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */ | ||
251 | TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */ | ||
252 | TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */ | ||
253 | }; | ||
254 | |||
255 | /* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */ | ||
256 | enum { | ||
257 | CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */ | ||
258 | /* Bit 3.. 2: reserved */ | ||
259 | CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */ | ||
260 | CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/ | ||
261 | }; | ||
262 | |||
263 | /* B2_CHIP_ID 8 bit Chip Identification Number */ | ||
264 | enum { | ||
265 | CHIP_ID_GENESIS = 0x0a, /* Chip ID for GENESIS */ | ||
266 | CHIP_ID_YUKON = 0xb0, /* Chip ID for YUKON */ | ||
267 | CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */ | ||
268 | CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */ | ||
269 | CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */ | ||
270 | CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */ | ||
271 | CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */ | ||
272 | |||
273 | CHIP_REV_YU_LITE_A1 = 3, /* Chip Rev. for YUKON-Lite A1,A2 */ | ||
274 | CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */ | ||
275 | }; | ||
276 | |||
277 | /* B2_TI_CTRL 8 bit Timer control */ | ||
278 | /* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */ | ||
279 | enum { | ||
280 | TIM_START = 1<<2, /* Start Timer */ | ||
281 | TIM_STOP = 1<<1, /* Stop Timer */ | ||
282 | TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */ | ||
283 | }; | ||
284 | |||
285 | /* B2_TI_TEST 8 Bit Timer Test */ | ||
286 | /* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */ | ||
287 | /* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */ | ||
288 | enum { | ||
289 | TIM_T_ON = 1<<2, /* Test mode on */ | ||
290 | TIM_T_OFF = 1<<1, /* Test mode off */ | ||
291 | TIM_T_STEP = 1<<0, /* Test step */ | ||
292 | }; | ||
293 | |||
294 | /* B2_GP_IO 32 bit General Purpose I/O Register */ | ||
295 | enum { | ||
296 | GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */ | ||
297 | GP_DIR_8 = 1<<24, /* IO_8 direct, 0=In/1=Out */ | ||
298 | GP_DIR_7 = 1<<23, /* IO_7 direct, 0=In/1=Out */ | ||
299 | GP_DIR_6 = 1<<22, /* IO_6 direct, 0=In/1=Out */ | ||
300 | GP_DIR_5 = 1<<21, /* IO_5 direct, 0=In/1=Out */ | ||
301 | GP_DIR_4 = 1<<20, /* IO_4 direct, 0=In/1=Out */ | ||
302 | GP_DIR_3 = 1<<19, /* IO_3 direct, 0=In/1=Out */ | ||
303 | GP_DIR_2 = 1<<18, /* IO_2 direct, 0=In/1=Out */ | ||
304 | GP_DIR_1 = 1<<17, /* IO_1 direct, 0=In/1=Out */ | ||
305 | GP_DIR_0 = 1<<16, /* IO_0 direct, 0=In/1=Out */ | ||
306 | |||
307 | GP_IO_9 = 1<<9, /* IO_9 pin */ | ||
308 | GP_IO_8 = 1<<8, /* IO_8 pin */ | ||
309 | GP_IO_7 = 1<<7, /* IO_7 pin */ | ||
310 | GP_IO_6 = 1<<6, /* IO_6 pin */ | ||
311 | GP_IO_5 = 1<<5, /* IO_5 pin */ | ||
312 | GP_IO_4 = 1<<4, /* IO_4 pin */ | ||
313 | GP_IO_3 = 1<<3, /* IO_3 pin */ | ||
314 | GP_IO_2 = 1<<2, /* IO_2 pin */ | ||
315 | GP_IO_1 = 1<<1, /* IO_1 pin */ | ||
316 | GP_IO_0 = 1<<0, /* IO_0 pin */ | ||
317 | }; | ||
318 | |||
319 | /* Descriptor Bit Definition */ | ||
320 | /* TxCtrl Transmit Buffer Control Field */ | ||
321 | /* RxCtrl Receive Buffer Control Field */ | ||
322 | enum { | ||
323 | BMU_OWN = 1<<31, /* OWN bit: 0=host/1=BMU */ | ||
324 | BMU_STF = 1<<30, /* Start of Frame */ | ||
325 | BMU_EOF = 1<<29, /* End of Frame */ | ||
326 | BMU_IRQ_EOB = 1<<28, /* Req "End of Buffer" IRQ */ | ||
327 | BMU_IRQ_EOF = 1<<27, /* Req "End of Frame" IRQ */ | ||
328 | /* TxCtrl specific bits */ | ||
329 | BMU_STFWD = 1<<26, /* (Tx) Store & Forward Frame */ | ||
330 | BMU_NO_FCS = 1<<25, /* (Tx) Disable MAC FCS (CRC) generation */ | ||
331 | BMU_SW = 1<<24, /* (Tx) 1 bit res. for SW use */ | ||
332 | /* RxCtrl specific bits */ | ||
333 | BMU_DEV_0 = 1<<26, /* (Rx) Transfer data to Dev0 */ | ||
334 | BMU_STAT_VAL = 1<<25, /* (Rx) Rx Status Valid */ | ||
335 | BMU_TIST_VAL = 1<<24, /* (Rx) Rx TimeStamp Valid */ | ||
336 | /* Bit 23..16: BMU Check Opcodes */ | ||
337 | BMU_CHECK = 0x55<<16, /* Default BMU check */ | ||
338 | BMU_TCP_CHECK = 0x56<<16, /* Descr with TCP ext */ | ||
339 | BMU_UDP_CHECK = 0x57<<16, /* Descr with UDP ext (YUKON only) */ | ||
340 | BMU_BBC = 0xffffL, /* Bit 15.. 0: Buffer Byte Counter */ | ||
341 | }; | ||
342 | |||
343 | /* B2_BSC_CTRL 8 bit Blink Source Counter Control */ | ||
344 | enum { | ||
345 | BSC_START = 1<<1, /* Start Blink Source Counter */ | ||
346 | BSC_STOP = 1<<0, /* Stop Blink Source Counter */ | ||
347 | }; | ||
348 | |||
349 | /* B2_BSC_STAT 8 bit Blink Source Counter Status */ | ||
350 | enum { | ||
351 | BSC_SRC = 1<<0, /* Blink Source, 0=Off / 1=On */ | ||
352 | }; | ||
353 | |||
354 | /* B2_BSC_TST 16 bit Blink Source Counter Test Reg */ | ||
355 | enum { | ||
356 | BSC_T_ON = 1<<2, /* Test mode on */ | ||
357 | BSC_T_OFF = 1<<1, /* Test mode off */ | ||
358 | BSC_T_STEP = 1<<0, /* Test step */ | ||
359 | }; | ||
360 | |||
361 | /* B3_RAM_ADDR 32 bit RAM Address, to read or write */ | ||
362 | /* Bit 31..19: reserved */ | ||
363 | #define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */ | ||
364 | /* RAM Interface Registers */ | ||
365 | |||
366 | /* B3_RI_CTRL 16 bit RAM Iface Control Register */ | ||
367 | enum { | ||
368 | RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */ | ||
369 | RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/ | ||
370 | |||
371 | RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */ | ||
372 | RI_RST_SET = 1<<0, /* Set RAM Interface Reset */ | ||
373 | }; | ||
374 | |||
375 | /* MAC Arbiter Registers */ | ||
376 | /* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */ | ||
377 | enum { | ||
378 | MA_FOE_ON = 1<<3, /* XMAC Fast Output Enable ON */ | ||
379 | MA_FOE_OFF = 1<<2, /* XMAC Fast Output Enable OFF */ | ||
380 | MA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */ | ||
381 | MA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */ | ||
382 | |||
383 | }; | ||
384 | |||
385 | /* Timeout values */ | ||
386 | #define SK_MAC_TO_53 72 /* MAC arbiter timeout */ | ||
387 | #define SK_PKT_TO_53 0x2000 /* Packet arbiter timeout */ | ||
388 | #define SK_PKT_TO_MAX 0xffff /* Maximum value */ | ||
389 | #define SK_RI_TO_53 36 /* RAM interface timeout */ | ||
390 | |||
391 | /* Packet Arbiter Registers */ | ||
392 | /* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */ | ||
393 | enum { | ||
394 | PA_CLR_TO_TX2 = 1<<13,/* Clear IRQ Packet Timeout TX2 */ | ||
395 | PA_CLR_TO_TX1 = 1<<12,/* Clear IRQ Packet Timeout TX1 */ | ||
396 | PA_CLR_TO_RX2 = 1<<11,/* Clear IRQ Packet Timeout RX2 */ | ||
397 | PA_CLR_TO_RX1 = 1<<10,/* Clear IRQ Packet Timeout RX1 */ | ||
398 | PA_ENA_TO_TX2 = 1<<9, /* Enable Timeout Timer TX2 */ | ||
399 | PA_DIS_TO_TX2 = 1<<8, /* Disable Timeout Timer TX2 */ | ||
400 | PA_ENA_TO_TX1 = 1<<7, /* Enable Timeout Timer TX1 */ | ||
401 | PA_DIS_TO_TX1 = 1<<6, /* Disable Timeout Timer TX1 */ | ||
402 | PA_ENA_TO_RX2 = 1<<5, /* Enable Timeout Timer RX2 */ | ||
403 | PA_DIS_TO_RX2 = 1<<4, /* Disable Timeout Timer RX2 */ | ||
404 | PA_ENA_TO_RX1 = 1<<3, /* Enable Timeout Timer RX1 */ | ||
405 | PA_DIS_TO_RX1 = 1<<2, /* Disable Timeout Timer RX1 */ | ||
406 | PA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */ | ||
407 | PA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */ | ||
408 | }; | ||
409 | |||
410 | #define PA_ENA_TO_ALL (PA_ENA_TO_RX1 | PA_ENA_TO_RX2 |\ | ||
411 | PA_ENA_TO_TX1 | PA_ENA_TO_TX2) | ||
412 | |||
413 | |||
414 | /* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ | ||
415 | /* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */ | ||
416 | /* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */ | ||
417 | /* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ | ||
418 | /* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */ | ||
419 | |||
420 | #define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */ | ||
421 | |||
422 | /* TXA_CTRL 8 bit Tx Arbiter Control Register */ | ||
423 | enum { | ||
424 | TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */ | ||
425 | TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */ | ||
426 | TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */ | ||
427 | TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */ | ||
428 | TXA_START_RC = 1<<3, /* Start sync Rate Control */ | ||
429 | TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */ | ||
430 | TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */ | ||
431 | TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */ | ||
432 | }; | ||
433 | |||
434 | /* | ||
435 | * Bank 4 - 5 | ||
436 | */ | ||
437 | /* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ | ||
438 | enum { | ||
439 | TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/ | ||
440 | TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */ | ||
441 | TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */ | ||
442 | TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */ | ||
443 | TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */ | ||
444 | TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */ | ||
445 | TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */ | ||
446 | }; | ||
447 | |||
448 | |||
449 | enum { | ||
450 | B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */ | ||
451 | B7_CFG_SPC = 0x0380,/* copy of the Configuration register */ | ||
452 | B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */ | ||
453 | B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */ | ||
454 | B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */ | ||
455 | B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */ | ||
456 | B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */ | ||
457 | B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */ | ||
458 | B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */ | ||
459 | }; | ||
460 | |||
461 | /* Queue Register Offsets, use Q_ADDR() to access */ | ||
462 | enum { | ||
463 | B8_Q_REGS = 0x0400, /* base of Queue registers */ | ||
464 | Q_D = 0x00, /* 8*32 bit Current Descriptor */ | ||
465 | Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */ | ||
466 | Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */ | ||
467 | Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */ | ||
468 | Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */ | ||
469 | Q_BC = 0x30, /* 32 bit Current Byte Counter */ | ||
470 | Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */ | ||
471 | Q_F = 0x38, /* 32 bit Flag Register */ | ||
472 | Q_T1 = 0x3c, /* 32 bit Test Register 1 */ | ||
473 | Q_T1_TR = 0x3c, /* 8 bit Test Register 1 Transfer SM */ | ||
474 | Q_T1_WR = 0x3d, /* 8 bit Test Register 1 Write Descriptor SM */ | ||
475 | Q_T1_RD = 0x3e, /* 8 bit Test Register 1 Read Descriptor SM */ | ||
476 | Q_T1_SV = 0x3f, /* 8 bit Test Register 1 Supervisor SM */ | ||
477 | Q_T2 = 0x40, /* 32 bit Test Register 2 */ | ||
478 | Q_T3 = 0x44, /* 32 bit Test Register 3 */ | ||
479 | |||
480 | }; | ||
481 | #define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs)) | ||
482 | |||
483 | /* RAM Buffer Register Offsets */ | ||
484 | enum { | ||
485 | |||
486 | RB_START= 0x00,/* 32 bit RAM Buffer Start Address */ | ||
487 | RB_END = 0x04,/* 32 bit RAM Buffer End Address */ | ||
488 | RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */ | ||
489 | RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */ | ||
490 | RB_RX_UTPP= 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */ | ||
491 | RB_RX_LTPP= 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */ | ||
492 | RB_RX_UTHP= 0x18,/* 32 bit Rx Upper Threshold, High Prio */ | ||
493 | RB_RX_LTHP= 0x1c,/* 32 bit Rx Lower Threshold, High Prio */ | ||
494 | /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */ | ||
495 | RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */ | ||
496 | RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */ | ||
497 | RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */ | ||
498 | RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */ | ||
499 | RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */ | ||
500 | }; | ||
501 | |||
502 | /* Receive and Transmit Queues */ | ||
503 | enum { | ||
504 | Q_R1 = 0x0000, /* Receive Queue 1 */ | ||
505 | Q_R2 = 0x0080, /* Receive Queue 2 */ | ||
506 | Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */ | ||
507 | Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */ | ||
508 | Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */ | ||
509 | Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */ | ||
510 | }; | ||
511 | |||
512 | /* Different MAC Types */ | ||
513 | enum { | ||
514 | SK_MAC_XMAC = 0, /* Xaqti XMAC II */ | ||
515 | SK_MAC_GMAC = 1, /* Marvell GMAC */ | ||
516 | }; | ||
517 | |||
518 | /* Different PHY Types */ | ||
519 | enum { | ||
520 | SK_PHY_XMAC = 0,/* integrated in XMAC II */ | ||
521 | SK_PHY_BCOM = 1,/* Broadcom BCM5400 */ | ||
522 | SK_PHY_LONE = 2,/* Level One LXT1000 [not supported]*/ | ||
523 | SK_PHY_NAT = 3,/* National DP83891 [not supported] */ | ||
524 | SK_PHY_MARV_COPPER= 4,/* Marvell 88E1011S */ | ||
525 | SK_PHY_MARV_FIBER = 5,/* Marvell 88E1011S working on fiber */ | ||
526 | }; | ||
527 | |||
528 | /* PHY addresses (bits 12..8 of PHY address reg) */ | ||
529 | enum { | ||
530 | PHY_ADDR_XMAC = 0<<8, | ||
531 | PHY_ADDR_BCOM = 1<<8, | ||
532 | |||
533 | /* GPHY address (bits 15..11 of SMI control reg) */ | ||
534 | PHY_ADDR_MARV = 0, | ||
535 | }; | ||
536 | |||
537 | #define RB_ADDR(offs, queue) ((u16)B16_RAM_REGS + (u16)(queue) + (offs)) | ||
538 | |||
539 | /* Receive MAC FIFO, Receive LED, and Link_Sync regs (GENESIS only) */ | ||
540 | enum { | ||
541 | RX_MFF_EA = 0x0c00,/* 32 bit Receive MAC FIFO End Address */ | ||
542 | RX_MFF_WP = 0x0c04,/* 32 bit Receive MAC FIFO Write Pointer */ | ||
543 | |||
544 | RX_MFF_RP = 0x0c0c,/* 32 bit Receive MAC FIFO Read Pointer */ | ||
545 | RX_MFF_PC = 0x0c10,/* 32 bit Receive MAC FIFO Packet Cnt */ | ||
546 | RX_MFF_LEV = 0x0c14,/* 32 bit Receive MAC FIFO Level */ | ||
547 | RX_MFF_CTRL1 = 0x0c18,/* 16 bit Receive MAC FIFO Control Reg 1*/ | ||
548 | RX_MFF_STAT_TO = 0x0c1a,/* 8 bit Receive MAC Status Timeout */ | ||
549 | RX_MFF_TIST_TO = 0x0c1b,/* 8 bit Receive MAC Time Stamp Timeout */ | ||
550 | RX_MFF_CTRL2 = 0x0c1c,/* 8 bit Receive MAC FIFO Control Reg 2*/ | ||
551 | RX_MFF_TST1 = 0x0c1d,/* 8 bit Receive MAC FIFO Test Reg 1 */ | ||
552 | RX_MFF_TST2 = 0x0c1e,/* 8 bit Receive MAC FIFO Test Reg 2 */ | ||
553 | |||
554 | RX_LED_INI = 0x0c20,/* 32 bit Receive LED Cnt Init Value */ | ||
555 | RX_LED_VAL = 0x0c24,/* 32 bit Receive LED Cnt Current Value */ | ||
556 | RX_LED_CTRL = 0x0c28,/* 8 bit Receive LED Cnt Control Reg */ | ||
557 | RX_LED_TST = 0x0c29,/* 8 bit Receive LED Cnt Test Register */ | ||
558 | |||
559 | LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */ | ||
560 | LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */ | ||
561 | LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */ | ||
562 | LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */ | ||
563 | LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */ | ||
564 | }; | ||
565 | |||
566 | /* Receive and Transmit MAC FIFO Registers (GENESIS only) */ | ||
567 | /* RX_MFF_CTRL1 16 bit Receive MAC FIFO Control Reg 1 */ | ||
568 | enum { | ||
569 | MFF_ENA_RDY_PAT = 1<<13, /* Enable Ready Patch */ | ||
570 | MFF_DIS_RDY_PAT = 1<<12, /* Disable Ready Patch */ | ||
571 | MFF_ENA_TIM_PAT = 1<<11, /* Enable Timing Patch */ | ||
572 | MFF_DIS_TIM_PAT = 1<<10, /* Disable Timing Patch */ | ||
573 | MFF_ENA_ALM_FUL = 1<<9, /* Enable AlmostFull Sign */ | ||
574 | MFF_DIS_ALM_FUL = 1<<8, /* Disable AlmostFull Sign */ | ||
575 | MFF_ENA_PAUSE = 1<<7, /* Enable Pause Signaling */ | ||
576 | MFF_DIS_PAUSE = 1<<6, /* Disable Pause Signaling */ | ||
577 | MFF_ENA_FLUSH = 1<<5, /* Enable Frame Flushing */ | ||
578 | MFF_DIS_FLUSH = 1<<4, /* Disable Frame Flushing */ | ||
579 | MFF_ENA_TIST = 1<<3, /* Enable Time Stamp Gener */ | ||
580 | MFF_DIS_TIST = 1<<2, /* Disable Time Stamp Gener */ | ||
581 | MFF_CLR_INTIST = 1<<1, /* Clear IRQ No Time Stamp */ | ||
582 | MFF_CLR_INSTAT = 1<<0, /* Clear IRQ No Status */ | ||
583 | MFF_RX_CTRL_DEF = MFF_ENA_TIM_PAT, | ||
584 | }; | ||
585 | |||
586 | /* TX_MFF_CTRL1 16 bit Transmit MAC FIFO Control Reg 1 */ | ||
587 | enum { | ||
588 | MFF_CLR_PERR = 1<<15, /* Clear Parity Error IRQ */ | ||
589 | |||
590 | MFF_ENA_PKT_REC = 1<<13, /* Enable Packet Recovery */ | ||
591 | MFF_DIS_PKT_REC = 1<<12, /* Disable Packet Recovery */ | ||
592 | |||
593 | MFF_ENA_W4E = 1<<7, /* Enable Wait for Empty */ | ||
594 | MFF_DIS_W4E = 1<<6, /* Disable Wait for Empty */ | ||
595 | |||
596 | MFF_ENA_LOOPB = 1<<3, /* Enable Loopback */ | ||
597 | MFF_DIS_LOOPB = 1<<2, /* Disable Loopback */ | ||
598 | MFF_CLR_MAC_RST = 1<<1, /* Clear XMAC Reset */ | ||
599 | MFF_SET_MAC_RST = 1<<0, /* Set XMAC Reset */ | ||
600 | |||
601 | MFF_TX_CTRL_DEF = MFF_ENA_PKT_REC | (u16) MFF_ENA_TIM_PAT | MFF_ENA_FLUSH, | ||
602 | }; | ||
603 | |||
604 | |||
605 | /* RX_MFF_TST2 8 bit Receive MAC FIFO Test Register 2 */ | ||
606 | /* TX_MFF_TST2 8 bit Transmit MAC FIFO Test Register 2 */ | ||
607 | enum { | ||
608 | MFF_WSP_T_ON = 1<<6, /* Tx: Write Shadow Ptr TestOn */ | ||
609 | MFF_WSP_T_OFF = 1<<5, /* Tx: Write Shadow Ptr TstOff */ | ||
610 | MFF_WSP_INC = 1<<4, /* Tx: Write Shadow Ptr Increment */ | ||
611 | MFF_PC_DEC = 1<<3, /* Packet Counter Decrement */ | ||
612 | MFF_PC_T_ON = 1<<2, /* Packet Counter Test On */ | ||
613 | MFF_PC_T_OFF = 1<<1, /* Packet Counter Test Off */ | ||
614 | MFF_PC_INC = 1<<0, /* Packet Counter Increment */ | ||
615 | }; | ||
616 | |||
617 | /* RX_MFF_TST1 8 bit Receive MAC FIFO Test Register 1 */ | ||
618 | /* TX_MFF_TST1 8 bit Transmit MAC FIFO Test Register 1 */ | ||
619 | enum { | ||
620 | MFF_WP_T_ON = 1<<6, /* Write Pointer Test On */ | ||
621 | MFF_WP_T_OFF = 1<<5, /* Write Pointer Test Off */ | ||
622 | MFF_WP_INC = 1<<4, /* Write Pointer Increm */ | ||
623 | |||
624 | MFF_RP_T_ON = 1<<2, /* Read Pointer Test On */ | ||
625 | MFF_RP_T_OFF = 1<<1, /* Read Pointer Test Off */ | ||
626 | MFF_RP_DEC = 1<<0, /* Read Pointer Decrement */ | ||
627 | }; | ||
628 | |||
629 | /* RX_MFF_CTRL2 8 bit Receive MAC FIFO Control Reg 2 */ | ||
630 | /* TX_MFF_CTRL2 8 bit Transmit MAC FIFO Control Reg 2 */ | ||
631 | enum { | ||
632 | MFF_ENA_OP_MD = 1<<3, /* Enable Operation Mode */ | ||
633 | MFF_DIS_OP_MD = 1<<2, /* Disable Operation Mode */ | ||
634 | MFF_RST_CLR = 1<<1, /* Clear MAC FIFO Reset */ | ||
635 | MFF_RST_SET = 1<<0, /* Set MAC FIFO Reset */ | ||
636 | }; | ||
637 | |||
638 | |||
639 | /* Link LED Counter Registers (GENESIS only) */ | ||
640 | |||
641 | /* RX_LED_CTRL 8 bit Receive LED Cnt Control Reg */ | ||
642 | /* TX_LED_CTRL 8 bit Transmit LED Cnt Control Reg */ | ||
643 | /* LNK_SYNC_CTRL 8 bit Link Sync Cnt Control Register */ | ||
644 | enum { | ||
645 | LED_START = 1<<2, /* Start Timer */ | ||
646 | LED_STOP = 1<<1, /* Stop Timer */ | ||
647 | LED_STATE = 1<<0, /* Rx/Tx: LED State, 1=LED on */ | ||
648 | }; | ||
649 | |||
650 | /* RX_LED_TST 8 bit Receive LED Cnt Test Register */ | ||
651 | /* TX_LED_TST 8 bit Transmit LED Cnt Test Register */ | ||
652 | /* LNK_SYNC_TST 8 bit Link Sync Cnt Test Register */ | ||
653 | enum { | ||
654 | LED_T_ON = 1<<2, /* LED Counter Test mode On */ | ||
655 | LED_T_OFF = 1<<1, /* LED Counter Test mode Off */ | ||
656 | LED_T_STEP = 1<<0, /* LED Counter Step */ | ||
657 | }; | ||
658 | |||
659 | /* LNK_LED_REG 8 bit Link LED Register */ | ||
660 | enum { | ||
661 | LED_BLK_ON = 1<<5, /* Link LED Blinking On */ | ||
662 | LED_BLK_OFF = 1<<4, /* Link LED Blinking Off */ | ||
663 | LED_SYNC_ON = 1<<3, /* Use Sync Wire to switch LED */ | ||
664 | LED_SYNC_OFF = 1<<2, /* Disable Sync Wire Input */ | ||
665 | LED_ON = 1<<1, /* switch LED on */ | ||
666 | LED_OFF = 1<<0, /* switch LED off */ | ||
667 | }; | ||
668 | |||
669 | /* Receive GMAC FIFO (YUKON) */ | ||
670 | enum { | ||
671 | RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */ | ||
672 | RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */ | ||
673 | RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */ | ||
674 | RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */ | ||
675 | RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */ | ||
676 | RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */ | ||
677 | RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */ | ||
678 | RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */ | ||
679 | RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */ | ||
680 | }; | ||
681 | |||
682 | |||
683 | /* TXA_TEST 8 bit Tx Arbiter Test Register */ | ||
684 | enum { | ||
685 | TXA_INT_T_ON = 1<<5, /* Tx Arb Interval Timer Test On */ | ||
686 | TXA_INT_T_OFF = 1<<4, /* Tx Arb Interval Timer Test Off */ | ||
687 | TXA_INT_T_STEP = 1<<3, /* Tx Arb Interval Timer Step */ | ||
688 | TXA_LIM_T_ON = 1<<2, /* Tx Arb Limit Timer Test On */ | ||
689 | TXA_LIM_T_OFF = 1<<1, /* Tx Arb Limit Timer Test Off */ | ||
690 | TXA_LIM_T_STEP = 1<<0, /* Tx Arb Limit Timer Step */ | ||
691 | }; | ||
692 | |||
693 | /* TXA_STAT 8 bit Tx Arbiter Status Register */ | ||
694 | enum { | ||
695 | TXA_PRIO_XS = 1<<0, /* sync queue has prio to send */ | ||
696 | }; | ||
697 | |||
698 | |||
699 | /* Q_BC 32 bit Current Byte Counter */ | ||
700 | |||
701 | /* BMU Control Status Registers */ | ||
702 | /* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */ | ||
703 | /* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */ | ||
704 | /* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */ | ||
705 | /* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */ | ||
706 | /* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */ | ||
707 | /* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */ | ||
708 | /* Q_CSR 32 bit BMU Control/Status Register */ | ||
709 | |||
710 | enum { | ||
711 | CSR_SV_IDLE = 1<<24, /* BMU SM Idle */ | ||
712 | |||
713 | CSR_DESC_CLR = 1<<21, /* Clear Reset for Descr */ | ||
714 | CSR_DESC_SET = 1<<20, /* Set Reset for Descr */ | ||
715 | CSR_FIFO_CLR = 1<<19, /* Clear Reset for FIFO */ | ||
716 | CSR_FIFO_SET = 1<<18, /* Set Reset for FIFO */ | ||
717 | CSR_HPI_RUN = 1<<17, /* Release HPI SM */ | ||
718 | CSR_HPI_RST = 1<<16, /* Reset HPI SM to Idle */ | ||
719 | CSR_SV_RUN = 1<<15, /* Release Supervisor SM */ | ||
720 | CSR_SV_RST = 1<<14, /* Reset Supervisor SM */ | ||
721 | CSR_DREAD_RUN = 1<<13, /* Release Descr Read SM */ | ||
722 | CSR_DREAD_RST = 1<<12, /* Reset Descr Read SM */ | ||
723 | CSR_DWRITE_RUN = 1<<11, /* Release Descr Write SM */ | ||
724 | CSR_DWRITE_RST = 1<<10, /* Reset Descr Write SM */ | ||
725 | CSR_TRANS_RUN = 1<<9, /* Release Transfer SM */ | ||
726 | CSR_TRANS_RST = 1<<8, /* Reset Transfer SM */ | ||
727 | CSR_ENA_POL = 1<<7, /* Enable Descr Polling */ | ||
728 | CSR_DIS_POL = 1<<6, /* Disable Descr Polling */ | ||
729 | CSR_STOP = 1<<5, /* Stop Rx/Tx Queue */ | ||
730 | CSR_START = 1<<4, /* Start Rx/Tx Queue */ | ||
731 | CSR_IRQ_CL_P = 1<<3, /* (Rx) Clear Parity IRQ */ | ||
732 | CSR_IRQ_CL_B = 1<<2, /* Clear EOB IRQ */ | ||
733 | CSR_IRQ_CL_F = 1<<1, /* Clear EOF IRQ */ | ||
734 | CSR_IRQ_CL_C = 1<<0, /* Clear ERR IRQ */ | ||
735 | }; | ||
736 | |||
737 | #define CSR_SET_RESET (CSR_DESC_SET | CSR_FIFO_SET | CSR_HPI_RST |\ | ||
738 | CSR_SV_RST | CSR_DREAD_RST | CSR_DWRITE_RST |\ | ||
739 | CSR_TRANS_RST) | ||
740 | #define CSR_CLR_RESET (CSR_DESC_CLR | CSR_FIFO_CLR | CSR_HPI_RUN |\ | ||
741 | CSR_SV_RUN | CSR_DREAD_RUN | CSR_DWRITE_RUN |\ | ||
742 | CSR_TRANS_RUN) | ||
743 | |||
744 | /* Q_F 32 bit Flag Register */ | ||
745 | enum { | ||
746 | F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */ | ||
747 | F_EMPTY = 1<<27, /* Tx FIFO: empty flag */ | ||
748 | F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */ | ||
749 | F_WM_REACHED = 1<<25, /* Watermark reached */ | ||
750 | |||
751 | F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */ | ||
752 | F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */ | ||
753 | }; | ||
754 | |||
755 | /* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */ | ||
756 | /* RB_START 32 bit RAM Buffer Start Address */ | ||
757 | /* RB_END 32 bit RAM Buffer End Address */ | ||
758 | /* RB_WP 32 bit RAM Buffer Write Pointer */ | ||
759 | /* RB_RP 32 bit RAM Buffer Read Pointer */ | ||
760 | /* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */ | ||
761 | /* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */ | ||
762 | /* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */ | ||
763 | /* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */ | ||
764 | /* RB_PC 32 bit RAM Buffer Packet Counter */ | ||
765 | /* RB_LEV 32 bit RAM Buffer Level Register */ | ||
766 | |||
767 | #define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */ | ||
768 | /* RB_TST2 8 bit RAM Buffer Test Register 2 */ | ||
769 | /* RB_TST1 8 bit RAM Buffer Test Register 1 */ | ||
770 | |||
771 | /* RB_CTRL 8 bit RAM Buffer Control Register */ | ||
772 | enum { | ||
773 | RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */ | ||
774 | RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */ | ||
775 | RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */ | ||
776 | RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */ | ||
777 | RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */ | ||
778 | RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */ | ||
779 | }; | ||
780 | |||
781 | /* Transmit MAC FIFO and Transmit LED Registers (GENESIS only), */ | ||
782 | enum { | ||
783 | TX_MFF_EA = 0x0d00,/* 32 bit Transmit MAC FIFO End Address */ | ||
784 | TX_MFF_WP = 0x0d04,/* 32 bit Transmit MAC FIFO WR Pointer */ | ||
785 | TX_MFF_WSP = 0x0d08,/* 32 bit Transmit MAC FIFO WR Shadow Ptr */ | ||
786 | TX_MFF_RP = 0x0d0c,/* 32 bit Transmit MAC FIFO RD Pointer */ | ||
787 | TX_MFF_PC = 0x0d10,/* 32 bit Transmit MAC FIFO Packet Cnt */ | ||
788 | TX_MFF_LEV = 0x0d14,/* 32 bit Transmit MAC FIFO Level */ | ||
789 | TX_MFF_CTRL1 = 0x0d18,/* 16 bit Transmit MAC FIFO Ctrl Reg 1 */ | ||
790 | TX_MFF_WAF = 0x0d1a,/* 8 bit Transmit MAC Wait after flush */ | ||
791 | |||
792 | TX_MFF_CTRL2 = 0x0d1c,/* 8 bit Transmit MAC FIFO Ctrl Reg 2 */ | ||
793 | TX_MFF_TST1 = 0x0d1d,/* 8 bit Transmit MAC FIFO Test Reg 1 */ | ||
794 | TX_MFF_TST2 = 0x0d1e,/* 8 bit Transmit MAC FIFO Test Reg 2 */ | ||
795 | |||
796 | TX_LED_INI = 0x0d20,/* 32 bit Transmit LED Cnt Init Value */ | ||
797 | TX_LED_VAL = 0x0d24,/* 32 bit Transmit LED Cnt Current Val */ | ||
798 | TX_LED_CTRL = 0x0d28,/* 8 bit Transmit LED Cnt Control Reg */ | ||
799 | TX_LED_TST = 0x0d29,/* 8 bit Transmit LED Cnt Test Reg */ | ||
800 | }; | ||
801 | |||
802 | /* Counter and Timer constants, for a host clock of 62.5 MHz */ | ||
803 | #define SK_XMIT_DUR 0x002faf08UL /* 50 ms */ | ||
804 | #define SK_BLK_DUR 0x01dcd650UL /* 500 ms */ | ||
805 | |||
806 | #define SK_DPOLL_DEF 0x00ee6b28UL /* 250 ms at 62.5 MHz */ | ||
807 | |||
808 | #define SK_DPOLL_MAX 0x00ffffffUL /* 268 ms at 62.5 MHz */ | ||
809 | /* 215 ms at 78.12 MHz */ | ||
810 | |||
811 | #define SK_FACT_62 100 /* is given in percent */ | ||
812 | #define SK_FACT_53 85 /* on GENESIS: 53.12 MHz */ | ||
813 | #define SK_FACT_78 125 /* on YUKON: 78.12 MHz */ | ||
814 | |||
815 | |||
816 | /* Transmit GMAC FIFO (YUKON only) */ | ||
817 | enum { | ||
818 | TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */ | ||
819 | TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/ | ||
820 | TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */ | ||
821 | |||
822 | TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */ | ||
823 | TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */ | ||
824 | TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */ | ||
825 | |||
826 | TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */ | ||
827 | TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */ | ||
828 | TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */ | ||
829 | |||
830 | /* Descriptor Poll Timer Registers */ | ||
831 | B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */ | ||
832 | B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */ | ||
833 | B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */ | ||
834 | |||
835 | B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */ | ||
836 | |||
837 | /* Time Stamp Timer Registers (YUKON only) */ | ||
838 | GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */ | ||
839 | GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */ | ||
840 | GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */ | ||
841 | }; | ||
842 | |||
843 | |||
844 | enum { | ||
845 | LINKLED_OFF = 0x01, | ||
846 | LINKLED_ON = 0x02, | ||
847 | LINKLED_LINKSYNC_OFF = 0x04, | ||
848 | LINKLED_LINKSYNC_ON = 0x08, | ||
849 | LINKLED_BLINK_OFF = 0x10, | ||
850 | LINKLED_BLINK_ON = 0x20, | ||
851 | }; | ||
852 | |||
853 | /* GMAC and GPHY Control Registers (YUKON only) */ | ||
854 | enum { | ||
855 | GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */ | ||
856 | GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */ | ||
857 | GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */ | ||
858 | GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */ | ||
859 | GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */ | ||
860 | |||
861 | /* Wake-up Frame Pattern Match Control Registers (YUKON only) */ | ||
862 | |||
863 | WOL_REG_OFFS = 0x20,/* HW-Bug: Address is + 0x20 against spec. */ | ||
864 | |||
865 | WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */ | ||
866 | WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */ | ||
867 | WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */ | ||
868 | WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */ | ||
869 | WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */ | ||
870 | |||
871 | /* WOL Pattern Length Registers (YUKON only) */ | ||
872 | |||
873 | WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */ | ||
874 | WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */ | ||
875 | |||
876 | /* WOL Pattern Counter Registers (YUKON only) */ | ||
877 | |||
878 | WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */ | ||
879 | WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */ | ||
880 | }; | ||
881 | #define WOL_REGS(port, x) (x + (port)*0x80) | ||
882 | |||
883 | enum { | ||
884 | WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */ | ||
885 | WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */ | ||
886 | }; | ||
887 | #define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400) | ||
888 | |||
889 | enum { | ||
890 | BASE_XMAC_1 = 0x2000,/* XMAC 1 registers */ | ||
891 | BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */ | ||
892 | BASE_XMAC_2 = 0x3000,/* XMAC 2 registers */ | ||
893 | BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */ | ||
894 | }; | ||
895 | |||
896 | /* | ||
897 | * Receive Frame Status Encoding | ||
898 | */ | ||
899 | enum { | ||
900 | XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */ | ||
901 | XMR_FS_LEN_SHIFT = 18, | ||
902 | XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/ | ||
903 | XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/ | ||
904 | XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */ | ||
905 | XMR_FS_MC = 1<<14, /* Bit 14: Multicast Frame */ | ||
906 | XMR_FS_UC = 1<<13, /* Bit 13: Unicast Frame */ | ||
907 | |||
908 | XMR_FS_BURST = 1<<11, /* Bit 11: Burst Mode */ | ||
909 | XMR_FS_CEX_ERR = 1<<10, /* Bit 10: Carrier Ext. Error */ | ||
910 | XMR_FS_802_3 = 1<<9, /* Bit 9: 802.3 Frame */ | ||
911 | XMR_FS_COL_ERR = 1<<8, /* Bit 8: Collision Error */ | ||
912 | XMR_FS_CAR_ERR = 1<<7, /* Bit 7: Carrier Event Error */ | ||
913 | XMR_FS_LEN_ERR = 1<<6, /* Bit 6: In-Range Length Error */ | ||
914 | XMR_FS_FRA_ERR = 1<<5, /* Bit 5: Framing Error */ | ||
915 | XMR_FS_RUNT = 1<<4, /* Bit 4: Runt Frame */ | ||
916 | XMR_FS_LNG_ERR = 1<<3, /* Bit 3: Giant (Jumbo) Frame */ | ||
917 | XMR_FS_FCS_ERR = 1<<2, /* Bit 2: Frame Check Sequ Err */ | ||
918 | XMR_FS_ERR = 1<<1, /* Bit 1: Frame Error */ | ||
919 | XMR_FS_MCTRL = 1<<0, /* Bit 0: MAC Control Packet */ | ||
920 | |||
921 | /* | ||
922 | * XMR_FS_ERR will be set if | ||
923 | * XMR_FS_FCS_ERR, XMR_FS_LNG_ERR, XMR_FS_RUNT, | ||
924 | * XMR_FS_FRA_ERR, XMR_FS_LEN_ERR, or XMR_FS_CEX_ERR | ||
925 | * is set. XMR_FS_LNG_ERR and XMR_FS_LEN_ERR will issue | ||
926 | * XMR_FS_ERR unless the corresponding bit in the Receive Command | ||
927 | * Register is set. | ||
928 | */ | ||
929 | }; | ||
930 | |||
931 | /* | ||
932 | ,* XMAC-PHY Registers, indirect addressed over the XMAC | ||
933 | */ | ||
934 | enum { | ||
935 | PHY_XMAC_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ | ||
936 | PHY_XMAC_STAT = 0x01,/* 16 bit r/w PHY Status Register */ | ||
937 | PHY_XMAC_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ | ||
938 | PHY_XMAC_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ | ||
939 | PHY_XMAC_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ | ||
940 | PHY_XMAC_AUNE_LP = 0x05,/* 16 bit r/o Link Partner Abi Reg */ | ||
941 | PHY_XMAC_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ | ||
942 | PHY_XMAC_NEPG = 0x07,/* 16 bit r/w Next Page Register */ | ||
943 | PHY_XMAC_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ | ||
944 | |||
945 | PHY_XMAC_EXT_STAT = 0x0f,/* 16 bit r/o Ext Status Register */ | ||
946 | PHY_XMAC_RES_ABI = 0x10,/* 16 bit r/o PHY Resolved Ability */ | ||
947 | }; | ||
948 | /* | ||
949 | * Broadcom-PHY Registers, indirect addressed over XMAC | ||
950 | */ | ||
951 | enum { | ||
952 | PHY_BCOM_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ | ||
953 | PHY_BCOM_STAT = 0x01,/* 16 bit r/o PHY Status Register */ | ||
954 | PHY_BCOM_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ | ||
955 | PHY_BCOM_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ | ||
956 | PHY_BCOM_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ | ||
957 | PHY_BCOM_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */ | ||
958 | PHY_BCOM_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ | ||
959 | PHY_BCOM_NEPG = 0x07,/* 16 bit r/w Next Page Register */ | ||
960 | PHY_BCOM_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ | ||
961 | /* Broadcom-specific registers */ | ||
962 | PHY_BCOM_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ | ||
963 | PHY_BCOM_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ | ||
964 | PHY_BCOM_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */ | ||
965 | PHY_BCOM_P_EXT_CTRL = 0x10,/* 16 bit r/w PHY Extended Ctrl Reg */ | ||
966 | PHY_BCOM_P_EXT_STAT = 0x11,/* 16 bit r/o PHY Extended Stat Reg */ | ||
967 | PHY_BCOM_RE_CTR = 0x12,/* 16 bit r/w Receive Error Counter */ | ||
968 | PHY_BCOM_FC_CTR = 0x13,/* 16 bit r/w False Carrier Sense Cnt */ | ||
969 | PHY_BCOM_RNO_CTR = 0x14,/* 16 bit r/w Receiver NOT_OK Cnt */ | ||
970 | |||
971 | PHY_BCOM_AUX_CTRL = 0x18,/* 16 bit r/w Auxiliary Control Reg */ | ||
972 | PHY_BCOM_AUX_STAT = 0x19,/* 16 bit r/o Auxiliary Stat Summary */ | ||
973 | PHY_BCOM_INT_STAT = 0x1a,/* 16 bit r/o Interrupt Status Reg */ | ||
974 | PHY_BCOM_INT_MASK = 0x1b,/* 16 bit r/w Interrupt Mask Reg */ | ||
975 | }; | ||
976 | |||
977 | /* | ||
978 | * Marvel-PHY Registers, indirect addressed over GMAC | ||
979 | */ | ||
980 | enum { | ||
981 | PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ | ||
982 | PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */ | ||
983 | PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ | ||
984 | PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ | ||
985 | PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ | ||
986 | PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */ | ||
987 | PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ | ||
988 | PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */ | ||
989 | PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ | ||
990 | /* Marvel-specific registers */ | ||
991 | PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ | ||
992 | PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ | ||
993 | PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */ | ||
994 | PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */ | ||
995 | PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */ | ||
996 | PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */ | ||
997 | PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */ | ||
998 | PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */ | ||
999 | PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */ | ||
1000 | PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */ | ||
1001 | PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */ | ||
1002 | PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */ | ||
1003 | PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */ | ||
1004 | PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */ | ||
1005 | PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */ | ||
1006 | PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */ | ||
1007 | PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */ | ||
1008 | PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */ | ||
1009 | |||
1010 | /* for 10/100 Fast Ethernet PHY (88E3082 only) */ | ||
1011 | PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */ | ||
1012 | PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */ | ||
1013 | PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */ | ||
1014 | PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */ | ||
1015 | PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */ | ||
1016 | }; | ||
1017 | |||
1018 | enum { | ||
1019 | PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */ | ||
1020 | PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */ | ||
1021 | PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */ | ||
1022 | PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */ | ||
1023 | PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */ | ||
1024 | PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */ | ||
1025 | PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */ | ||
1026 | PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */ | ||
1027 | PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */ | ||
1028 | PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */ | ||
1029 | }; | ||
1030 | |||
1031 | enum { | ||
1032 | PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */ | ||
1033 | PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */ | ||
1034 | PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */ | ||
1035 | }; | ||
1036 | |||
1037 | enum { | ||
1038 | PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */ | ||
1039 | |||
1040 | PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */ | ||
1041 | PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */ | ||
1042 | PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occurred */ | ||
1043 | PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */ | ||
1044 | PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */ | ||
1045 | PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */ | ||
1046 | PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */ | ||
1047 | }; | ||
1048 | |||
1049 | enum { | ||
1050 | PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */ | ||
1051 | PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */ | ||
1052 | PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */ | ||
1053 | }; | ||
1054 | |||
1055 | /* different Broadcom PHY Ids */ | ||
1056 | enum { | ||
1057 | PHY_BCOM_ID1_A1 = 0x6041, | ||
1058 | PHY_BCOM_ID1_B2 = 0x6043, | ||
1059 | PHY_BCOM_ID1_C0 = 0x6044, | ||
1060 | PHY_BCOM_ID1_C5 = 0x6047, | ||
1061 | }; | ||
1062 | |||
1063 | /* different Marvell PHY Ids */ | ||
1064 | enum { | ||
1065 | PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */ | ||
1066 | PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */ | ||
1067 | PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */ | ||
1068 | PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */ | ||
1069 | PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ | ||
1070 | }; | ||
1071 | |||
1072 | /* Advertisement register bits */ | ||
1073 | enum { | ||
1074 | PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ | ||
1075 | PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ | ||
1076 | PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */ | ||
1077 | |||
1078 | PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */ | ||
1079 | PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */ | ||
1080 | PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */ | ||
1081 | PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */ | ||
1082 | PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */ | ||
1083 | PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */ | ||
1084 | PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */ | ||
1085 | PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */ | ||
1086 | PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/ | ||
1087 | PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA, | ||
1088 | PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL | | ||
1089 | PHY_AN_100HALF | PHY_AN_100FULL, | ||
1090 | }; | ||
1091 | |||
1092 | /* Xmac Specific */ | ||
1093 | enum { | ||
1094 | PHY_X_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ | ||
1095 | PHY_X_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ | ||
1096 | PHY_X_AN_RFB = 3<<12,/* Bit 13..12: Remote Fault Bits */ | ||
1097 | |||
1098 | PHY_X_AN_PAUSE = 3<<7,/* Bit 8.. 7: Pause Bits */ | ||
1099 | PHY_X_AN_HD = 1<<6, /* Bit 6: Half Duplex */ | ||
1100 | PHY_X_AN_FD = 1<<5, /* Bit 5: Full Duplex */ | ||
1101 | }; | ||
1102 | |||
1103 | /* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */ | ||
1104 | enum { | ||
1105 | PHY_X_P_NO_PAUSE= 0<<7,/* Bit 8..7: no Pause Mode */ | ||
1106 | PHY_X_P_SYM_MD = 1<<7, /* Bit 8..7: symmetric Pause Mode */ | ||
1107 | PHY_X_P_ASYM_MD = 2<<7,/* Bit 8..7: asymmetric Pause Mode */ | ||
1108 | PHY_X_P_BOTH_MD = 3<<7,/* Bit 8..7: both Pause Mode */ | ||
1109 | }; | ||
1110 | |||
1111 | |||
1112 | /***** PHY_XMAC_EXT_STAT 16 bit r/w Extended Status Register *****/ | ||
1113 | enum { | ||
1114 | PHY_X_EX_FD = 1<<15, /* Bit 15: Device Supports Full Duplex */ | ||
1115 | PHY_X_EX_HD = 1<<14, /* Bit 14: Device Supports Half Duplex */ | ||
1116 | }; | ||
1117 | |||
1118 | /***** PHY_XMAC_RES_ABI 16 bit r/o PHY Resolved Ability *****/ | ||
1119 | enum { | ||
1120 | PHY_X_RS_PAUSE = 3<<7, /* Bit 8..7: selected Pause Mode */ | ||
1121 | PHY_X_RS_HD = 1<<6, /* Bit 6: Half Duplex Mode selected */ | ||
1122 | PHY_X_RS_FD = 1<<5, /* Bit 5: Full Duplex Mode selected */ | ||
1123 | PHY_X_RS_ABLMIS = 1<<4, /* Bit 4: duplex or pause cap mismatch */ | ||
1124 | PHY_X_RS_PAUMIS = 1<<3, /* Bit 3: pause capability mismatch */ | ||
1125 | }; | ||
1126 | |||
1127 | /* Remote Fault Bits (PHY_X_AN_RFB) encoding */ | ||
1128 | enum { | ||
1129 | X_RFB_OK = 0<<12,/* Bit 13..12 No errors, Link OK */ | ||
1130 | X_RFB_LF = 1<<12,/* Bit 13..12 Link Failure */ | ||
1131 | X_RFB_OFF = 2<<12,/* Bit 13..12 Offline */ | ||
1132 | X_RFB_AN_ERR = 3<<12,/* Bit 13..12 Auto-Negotiation Error */ | ||
1133 | }; | ||
1134 | |||
1135 | /* Broadcom-Specific */ | ||
1136 | /***** PHY_BCOM_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ | ||
1137 | enum { | ||
1138 | PHY_B_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */ | ||
1139 | PHY_B_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */ | ||
1140 | PHY_B_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */ | ||
1141 | PHY_B_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */ | ||
1142 | PHY_B_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */ | ||
1143 | PHY_B_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */ | ||
1144 | }; | ||
1145 | |||
1146 | /***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ | ||
1147 | /***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ | ||
1148 | enum { | ||
1149 | PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */ | ||
1150 | PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */ | ||
1151 | PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */ | ||
1152 | PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */ | ||
1153 | PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */ | ||
1154 | PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */ | ||
1155 | /* Bit 9..8: reserved */ | ||
1156 | PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */ | ||
1157 | }; | ||
1158 | |||
1159 | /***** PHY_BCOM_EXT_STAT 16 bit r/o Extended Status Register *****/ | ||
1160 | enum { | ||
1161 | PHY_B_ES_X_FD_CAP = 1<<15, /* Bit 15: 1000Base-X FD capable */ | ||
1162 | PHY_B_ES_X_HD_CAP = 1<<14, /* Bit 14: 1000Base-X HD capable */ | ||
1163 | PHY_B_ES_T_FD_CAP = 1<<13, /* Bit 13: 1000Base-T FD capable */ | ||
1164 | PHY_B_ES_T_HD_CAP = 1<<12, /* Bit 12: 1000Base-T HD capable */ | ||
1165 | }; | ||
1166 | |||
1167 | /***** PHY_BCOM_P_EXT_CTRL 16 bit r/w PHY Extended Control Reg *****/ | ||
1168 | enum { | ||
1169 | PHY_B_PEC_MAC_PHY = 1<<15, /* Bit 15: 10BIT/GMI-Interface */ | ||
1170 | PHY_B_PEC_DIS_CROSS = 1<<14, /* Bit 14: Disable MDI Crossover */ | ||
1171 | PHY_B_PEC_TX_DIS = 1<<13, /* Bit 13: Tx output Disabled */ | ||
1172 | PHY_B_PEC_INT_DIS = 1<<12, /* Bit 12: Interrupts Disabled */ | ||
1173 | PHY_B_PEC_F_INT = 1<<11, /* Bit 11: Force Interrupt */ | ||
1174 | PHY_B_PEC_BY_45 = 1<<10, /* Bit 10: Bypass 4B5B-Decoder */ | ||
1175 | PHY_B_PEC_BY_SCR = 1<<9, /* Bit 9: Bypass Scrambler */ | ||
1176 | PHY_B_PEC_BY_MLT3 = 1<<8, /* Bit 8: Bypass MLT3 Encoder */ | ||
1177 | PHY_B_PEC_BY_RXA = 1<<7, /* Bit 7: Bypass Rx Alignm. */ | ||
1178 | PHY_B_PEC_RES_SCR = 1<<6, /* Bit 6: Reset Scrambler */ | ||
1179 | PHY_B_PEC_EN_LTR = 1<<5, /* Bit 5: Ena LED Traffic Mode */ | ||
1180 | PHY_B_PEC_LED_ON = 1<<4, /* Bit 4: Force LED's on */ | ||
1181 | PHY_B_PEC_LED_OFF = 1<<3, /* Bit 3: Force LED's off */ | ||
1182 | PHY_B_PEC_EX_IPG = 1<<2, /* Bit 2: Extend Tx IPG Mode */ | ||
1183 | PHY_B_PEC_3_LED = 1<<1, /* Bit 1: Three Link LED mode */ | ||
1184 | PHY_B_PEC_HIGH_LA = 1<<0, /* Bit 0: GMII FIFO Elasticy */ | ||
1185 | }; | ||
1186 | |||
1187 | /***** PHY_BCOM_P_EXT_STAT 16 bit r/o PHY Extended Status Reg *****/ | ||
1188 | enum { | ||
1189 | PHY_B_PES_CROSS_STAT = 1<<13, /* Bit 13: MDI Crossover Status */ | ||
1190 | PHY_B_PES_INT_STAT = 1<<12, /* Bit 12: Interrupt Status */ | ||
1191 | PHY_B_PES_RRS = 1<<11, /* Bit 11: Remote Receiver Stat. */ | ||
1192 | PHY_B_PES_LRS = 1<<10, /* Bit 10: Local Receiver Stat. */ | ||
1193 | PHY_B_PES_LOCKED = 1<<9, /* Bit 9: Locked */ | ||
1194 | PHY_B_PES_LS = 1<<8, /* Bit 8: Link Status */ | ||
1195 | PHY_B_PES_RF = 1<<7, /* Bit 7: Remote Fault */ | ||
1196 | PHY_B_PES_CE_ER = 1<<6, /* Bit 6: Carrier Ext Error */ | ||
1197 | PHY_B_PES_BAD_SSD = 1<<5, /* Bit 5: Bad SSD */ | ||
1198 | PHY_B_PES_BAD_ESD = 1<<4, /* Bit 4: Bad ESD */ | ||
1199 | PHY_B_PES_RX_ER = 1<<3, /* Bit 3: Receive Error */ | ||
1200 | PHY_B_PES_TX_ER = 1<<2, /* Bit 2: Transmit Error */ | ||
1201 | PHY_B_PES_LOCK_ER = 1<<1, /* Bit 1: Lock Error */ | ||
1202 | PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */ | ||
1203 | }; | ||
1204 | |||
1205 | /* PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ | ||
1206 | /* PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/ | ||
1207 | enum { | ||
1208 | PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */ | ||
1209 | |||
1210 | PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */ | ||
1211 | PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */ | ||
1212 | }; | ||
1213 | |||
1214 | |||
1215 | /***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/ | ||
1216 | enum { | ||
1217 | PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */ | ||
1218 | |||
1219 | /***** PHY_BCOM_RNO_CTR 16 bit r/w Receive NOT_OK Counter *****/ | ||
1220 | PHY_B_RC_LOC_MSK = 0xff00, /* Bit 15..8: Local Rx NOT_OK cnt */ | ||
1221 | PHY_B_RC_REM_MSK = 0x00ff, /* Bit 7..0: Remote Rx NOT_OK cnt */ | ||
1222 | |||
1223 | /***** PHY_BCOM_AUX_CTRL 16 bit r/w Auxiliary Control Reg *****/ | ||
1224 | PHY_B_AC_L_SQE = 1<<15, /* Bit 15: Low Squelch */ | ||
1225 | PHY_B_AC_LONG_PACK = 1<<14, /* Bit 14: Rx Long Packets */ | ||
1226 | PHY_B_AC_ER_CTRL = 3<<12,/* Bit 13..12: Edgerate Control */ | ||
1227 | /* Bit 11: reserved */ | ||
1228 | PHY_B_AC_TX_TST = 1<<10, /* Bit 10: Tx test bit, always 1 */ | ||
1229 | /* Bit 9.. 8: reserved */ | ||
1230 | PHY_B_AC_DIS_PRF = 1<<7, /* Bit 7: dis part resp filter */ | ||
1231 | /* Bit 6: reserved */ | ||
1232 | PHY_B_AC_DIS_PM = 1<<5, /* Bit 5: dis power management */ | ||
1233 | /* Bit 4: reserved */ | ||
1234 | PHY_B_AC_DIAG = 1<<3, /* Bit 3: Diagnostic Mode */ | ||
1235 | }; | ||
1236 | |||
1237 | /***** PHY_BCOM_AUX_STAT 16 bit r/o Auxiliary Status Reg *****/ | ||
1238 | enum { | ||
1239 | PHY_B_AS_AN_C = 1<<15, /* Bit 15: AutoNeg complete */ | ||
1240 | PHY_B_AS_AN_CA = 1<<14, /* Bit 14: AN Complete Ack */ | ||
1241 | PHY_B_AS_ANACK_D = 1<<13, /* Bit 13: AN Ack Detect */ | ||
1242 | PHY_B_AS_ANAB_D = 1<<12, /* Bit 12: AN Ability Detect */ | ||
1243 | PHY_B_AS_NPW = 1<<11, /* Bit 11: AN Next Page Wait */ | ||
1244 | PHY_B_AS_AN_RES_MSK = 7<<8,/* Bit 10..8: AN HDC */ | ||
1245 | PHY_B_AS_PDF = 1<<7, /* Bit 7: Parallel Detect. Fault */ | ||
1246 | PHY_B_AS_RF = 1<<6, /* Bit 6: Remote Fault */ | ||
1247 | PHY_B_AS_ANP_R = 1<<5, /* Bit 5: AN Page Received */ | ||
1248 | PHY_B_AS_LP_ANAB = 1<<4, /* Bit 4: LP AN Ability */ | ||
1249 | PHY_B_AS_LP_NPAB = 1<<3, /* Bit 3: LP Next Page Ability */ | ||
1250 | PHY_B_AS_LS = 1<<2, /* Bit 2: Link Status */ | ||
1251 | PHY_B_AS_PRR = 1<<1, /* Bit 1: Pause Resolution-Rx */ | ||
1252 | PHY_B_AS_PRT = 1<<0, /* Bit 0: Pause Resolution-Tx */ | ||
1253 | }; | ||
1254 | #define PHY_B_AS_PAUSE_MSK (PHY_B_AS_PRR | PHY_B_AS_PRT) | ||
1255 | |||
1256 | /***** PHY_BCOM_INT_STAT 16 bit r/o Interrupt Status Reg *****/ | ||
1257 | /***** PHY_BCOM_INT_MASK 16 bit r/w Interrupt Mask Reg *****/ | ||
1258 | enum { | ||
1259 | PHY_B_IS_PSE = 1<<14, /* Bit 14: Pair Swap Error */ | ||
1260 | PHY_B_IS_MDXI_SC = 1<<13, /* Bit 13: MDIX Status Change */ | ||
1261 | PHY_B_IS_HCT = 1<<12, /* Bit 12: counter above 32k */ | ||
1262 | PHY_B_IS_LCT = 1<<11, /* Bit 11: counter above 128 */ | ||
1263 | PHY_B_IS_AN_PR = 1<<10, /* Bit 10: Page Received */ | ||
1264 | PHY_B_IS_NO_HDCL = 1<<9, /* Bit 9: No HCD Link */ | ||
1265 | PHY_B_IS_NO_HDC = 1<<8, /* Bit 8: No HCD */ | ||
1266 | PHY_B_IS_NEG_USHDC = 1<<7, /* Bit 7: Negotiated Unsup. HCD */ | ||
1267 | PHY_B_IS_SCR_S_ER = 1<<6, /* Bit 6: Scrambler Sync Error */ | ||
1268 | PHY_B_IS_RRS_CHANGE = 1<<5, /* Bit 5: Remote Rx Stat Change */ | ||
1269 | PHY_B_IS_LRS_CHANGE = 1<<4, /* Bit 4: Local Rx Stat Change */ | ||
1270 | PHY_B_IS_DUP_CHANGE = 1<<3, /* Bit 3: Duplex Mode Change */ | ||
1271 | PHY_B_IS_LSP_CHANGE = 1<<2, /* Bit 2: Link Speed Change */ | ||
1272 | PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */ | ||
1273 | PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */ | ||
1274 | }; | ||
1275 | #define PHY_B_DEF_MSK \ | ||
1276 | (~(PHY_B_IS_PSE | PHY_B_IS_AN_PR | PHY_B_IS_DUP_CHANGE | \ | ||
1277 | PHY_B_IS_LSP_CHANGE | PHY_B_IS_LST_CHANGE)) | ||
1278 | |||
1279 | /* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */ | ||
1280 | enum { | ||
1281 | PHY_B_P_NO_PAUSE = 0<<10,/* Bit 11..10: no Pause Mode */ | ||
1282 | PHY_B_P_SYM_MD = 1<<10, /* Bit 11..10: symmetric Pause Mode */ | ||
1283 | PHY_B_P_ASYM_MD = 2<<10,/* Bit 11..10: asymmetric Pause Mode */ | ||
1284 | PHY_B_P_BOTH_MD = 3<<10,/* Bit 11..10: both Pause Mode */ | ||
1285 | }; | ||
1286 | /* | ||
1287 | * Resolved Duplex mode and Capabilities (Aux Status Summary Reg) | ||
1288 | */ | ||
1289 | enum { | ||
1290 | PHY_B_RES_1000FD = 7<<8,/* Bit 10..8: 1000Base-T Full Dup. */ | ||
1291 | PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */ | ||
1292 | }; | ||
1293 | |||
1294 | /** Marvell-Specific */ | ||
1295 | enum { | ||
1296 | PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */ | ||
1297 | PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */ | ||
1298 | PHY_M_AN_RF = 1<<13, /* Remote Fault */ | ||
1299 | |||
1300 | PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */ | ||
1301 | PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */ | ||
1302 | PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */ | ||
1303 | PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */ | ||
1304 | PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */ | ||
1305 | PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */ | ||
1306 | PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */ | ||
1307 | PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */ | ||
1308 | }; | ||
1309 | |||
1310 | /* special defines for FIBER (88E1011S only) */ | ||
1311 | enum { | ||
1312 | PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */ | ||
1313 | PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */ | ||
1314 | PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */ | ||
1315 | PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */ | ||
1316 | }; | ||
1317 | |||
1318 | /* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */ | ||
1319 | enum { | ||
1320 | PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */ | ||
1321 | PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */ | ||
1322 | PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */ | ||
1323 | PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */ | ||
1324 | }; | ||
1325 | |||
1326 | /***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ | ||
1327 | enum { | ||
1328 | PHY_M_1000C_TEST= 7<<13,/* Bit 15..13: Test Modes */ | ||
1329 | PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */ | ||
1330 | PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */ | ||
1331 | PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */ | ||
1332 | PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */ | ||
1333 | PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */ | ||
1334 | }; | ||
1335 | |||
1336 | /***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/ | ||
1337 | enum { | ||
1338 | PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */ | ||
1339 | PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */ | ||
1340 | PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */ | ||
1341 | PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */ | ||
1342 | PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */ | ||
1343 | PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */ | ||
1344 | PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */ | ||
1345 | PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */ | ||
1346 | PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */ | ||
1347 | PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */ | ||
1348 | PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */ | ||
1349 | PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */ | ||
1350 | }; | ||
1351 | |||
1352 | enum { | ||
1353 | PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */ | ||
1354 | PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */ | ||
1355 | }; | ||
1356 | |||
1357 | enum { | ||
1358 | PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */ | ||
1359 | PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */ | ||
1360 | PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */ | ||
1361 | }; | ||
1362 | |||
1363 | /* for 10/100 Fast Ethernet PHY (88E3082 only) */ | ||
1364 | enum { | ||
1365 | PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */ | ||
1366 | PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */ | ||
1367 | PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */ | ||
1368 | PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */ | ||
1369 | PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */ | ||
1370 | |||
1371 | PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */ | ||
1372 | PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */ | ||
1373 | |||
1374 | PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */ | ||
1375 | PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */ | ||
1376 | }; | ||
1377 | |||
1378 | /***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/ | ||
1379 | enum { | ||
1380 | PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */ | ||
1381 | PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */ | ||
1382 | PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */ | ||
1383 | PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */ | ||
1384 | PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */ | ||
1385 | PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */ | ||
1386 | PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */ | ||
1387 | PHY_M_PS_LINK_UP = 1<<10, /* Link Up */ | ||
1388 | PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */ | ||
1389 | PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */ | ||
1390 | PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */ | ||
1391 | PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */ | ||
1392 | PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */ | ||
1393 | PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */ | ||
1394 | PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */ | ||
1395 | PHY_M_PS_JABBER = 1<<0, /* Jabber */ | ||
1396 | }; | ||
1397 | |||
1398 | #define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN) | ||
1399 | |||
1400 | /* for 10/100 Fast Ethernet PHY (88E3082 only) */ | ||
1401 | enum { | ||
1402 | PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */ | ||
1403 | PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */ | ||
1404 | }; | ||
1405 | |||
1406 | enum { | ||
1407 | PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */ | ||
1408 | PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */ | ||
1409 | PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */ | ||
1410 | PHY_M_IS_AN_PR = 1<<12, /* Page Received */ | ||
1411 | PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */ | ||
1412 | PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */ | ||
1413 | PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */ | ||
1414 | PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */ | ||
1415 | PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */ | ||
1416 | PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */ | ||
1417 | PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */ | ||
1418 | PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */ | ||
1419 | |||
1420 | PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */ | ||
1421 | PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */ | ||
1422 | PHY_M_IS_JABBER = 1<<0, /* Jabber */ | ||
1423 | |||
1424 | PHY_M_IS_DEF_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_LSP_CHANGE | | ||
1425 | PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR, | ||
1426 | |||
1427 | PHY_M_IS_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL, | ||
1428 | }; | ||
1429 | |||
1430 | /***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/ | ||
1431 | enum { | ||
1432 | PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */ | ||
1433 | PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */ | ||
1434 | |||
1435 | PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */ | ||
1436 | PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */ | ||
1437 | /* (88E1011 only) */ | ||
1438 | PHY_M_EC_S_DSC_MSK = 3<<8, /* Bit 9.. 8: Slave Downshift Counter */ | ||
1439 | /* (88E1011 only) */ | ||
1440 | PHY_M_EC_M_DSC_MSK2 = 7<<9, /* Bit 11.. 9: Master Downshift Counter */ | ||
1441 | /* (88E1111 only) */ | ||
1442 | PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */ | ||
1443 | /* !!! Errata in spec. (1 = disable) */ | ||
1444 | PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/ | ||
1445 | PHY_M_EC_MAC_S_MSK = 7<<4, /* Bit 6.. 4: Def. MAC interface speed */ | ||
1446 | PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */ | ||
1447 | PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */ | ||
1448 | PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */ | ||
1449 | PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */}; | ||
1450 | |||
1451 | #define PHY_M_EC_M_DSC(x) ((u16)(x)<<10) /* 00=1x; 01=2x; 10=3x; 11=4x */ | ||
1452 | #define PHY_M_EC_S_DSC(x) ((u16)(x)<<8) /* 00=dis; 01=1x; 10=2x; 11=3x */ | ||
1453 | #define PHY_M_EC_MAC_S(x) ((u16)(x)<<4) /* 01X=0; 110=2.5; 111=25 (MHz) */ | ||
1454 | |||
1455 | #define PHY_M_EC_M_DSC_2(x) ((u16)(x)<<9) /* 000=1x; 001=2x; 010=3x; 011=4x */ | ||
1456 | /* 100=5x; 101=6x; 110=7x; 111=8x */ | ||
1457 | enum { | ||
1458 | MAC_TX_CLK_0_MHZ = 2, | ||
1459 | MAC_TX_CLK_2_5_MHZ = 6, | ||
1460 | MAC_TX_CLK_25_MHZ = 7, | ||
1461 | }; | ||
1462 | |||
1463 | /***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/ | ||
1464 | enum { | ||
1465 | PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */ | ||
1466 | PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */ | ||
1467 | PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */ | ||
1468 | PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */ | ||
1469 | PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */ | ||
1470 | PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */ | ||
1471 | PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */ | ||
1472 | /* (88E1111 only) */ | ||
1473 | }; | ||
1474 | #define PHY_M_LED_PULS_DUR(x) (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK) | ||
1475 | #define PHY_M_LED_BLINK_RT(x) (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK) | ||
1476 | |||
1477 | enum { | ||
1478 | PHY_M_LEDC_LINK_MSK = 3<<3, /* Bit 4.. 3: Link Control Mask */ | ||
1479 | /* (88E1011 only) */ | ||
1480 | PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */ | ||
1481 | PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */ | ||
1482 | PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */ | ||
1483 | PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */ | ||
1484 | PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */ | ||
1485 | }; | ||
1486 | |||
1487 | enum { | ||
1488 | PULS_NO_STR = 0, /* no pulse stretching */ | ||
1489 | PULS_21MS = 1, /* 21 ms to 42 ms */ | ||
1490 | PULS_42MS = 2, /* 42 ms to 84 ms */ | ||
1491 | PULS_84MS = 3, /* 84 ms to 170 ms */ | ||
1492 | PULS_170MS = 4, /* 170 ms to 340 ms */ | ||
1493 | PULS_340MS = 5, /* 340 ms to 670 ms */ | ||
1494 | PULS_670MS = 6, /* 670 ms to 1.3 s */ | ||
1495 | PULS_1300MS = 7, /* 1.3 s to 2.7 s */ | ||
1496 | }; | ||
1497 | |||
1498 | |||
1499 | enum { | ||
1500 | BLINK_42MS = 0, /* 42 ms */ | ||
1501 | BLINK_84MS = 1, /* 84 ms */ | ||
1502 | BLINK_170MS = 2, /* 170 ms */ | ||
1503 | BLINK_340MS = 3, /* 340 ms */ | ||
1504 | BLINK_670MS = 4, /* 670 ms */ | ||
1505 | }; | ||
1506 | |||
1507 | /***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/ | ||
1508 | #define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */ | ||
1509 | /* Bit 13..12: reserved */ | ||
1510 | #define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */ | ||
1511 | #define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */ | ||
1512 | #define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */ | ||
1513 | #define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */ | ||
1514 | #define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */ | ||
1515 | #define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */ | ||
1516 | |||
1517 | enum { | ||
1518 | MO_LED_NORM = 0, | ||
1519 | MO_LED_BLINK = 1, | ||
1520 | MO_LED_OFF = 2, | ||
1521 | MO_LED_ON = 3, | ||
1522 | }; | ||
1523 | |||
1524 | /***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/ | ||
1525 | enum { | ||
1526 | PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */ | ||
1527 | PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */ | ||
1528 | PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */ | ||
1529 | PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */ | ||
1530 | PHY_M_EC2_FO_AM_MSK = 7, /* Bit 2.. 0: Fiber Output Amplitude */ | ||
1531 | }; | ||
1532 | |||
1533 | /***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/ | ||
1534 | enum { | ||
1535 | PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */ | ||
1536 | PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */ | ||
1537 | PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */ | ||
1538 | PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */ | ||
1539 | PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */ | ||
1540 | PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */ | ||
1541 | PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */ | ||
1542 | /* (88E1111 only) */ | ||
1543 | /* Bit 9.. 4: reserved (88E1011 only) */ | ||
1544 | PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */ | ||
1545 | PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */ | ||
1546 | PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */ | ||
1547 | }; | ||
1548 | |||
1549 | /***** PHY_MARV_CABLE_DIAG 16 bit r/o Cable Diagnostic Reg *****/ | ||
1550 | enum { | ||
1551 | PHY_M_CABD_ENA_TEST = 1<<15, /* Enable Test (Page 0) */ | ||
1552 | PHY_M_CABD_DIS_WAIT = 1<<15, /* Disable Waiting Period (Page 1) */ | ||
1553 | /* (88E1111 only) */ | ||
1554 | PHY_M_CABD_STAT_MSK = 3<<13, /* Bit 14..13: Status Mask */ | ||
1555 | PHY_M_CABD_AMPL_MSK = 0x1f<<8, /* Bit 12.. 8: Amplitude Mask */ | ||
1556 | /* (88E1111 only) */ | ||
1557 | PHY_M_CABD_DIST_MSK = 0xff, /* Bit 7.. 0: Distance Mask */ | ||
1558 | }; | ||
1559 | |||
1560 | /* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */ | ||
1561 | enum { | ||
1562 | CABD_STAT_NORMAL= 0, | ||
1563 | CABD_STAT_SHORT = 1, | ||
1564 | CABD_STAT_OPEN = 2, | ||
1565 | CABD_STAT_FAIL = 3, | ||
1566 | }; | ||
1567 | |||
1568 | /* for 10/100 Fast Ethernet PHY (88E3082 only) */ | ||
1569 | /***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/ | ||
1570 | /* Bit 15..12: reserved (used internally) */ | ||
1571 | enum { | ||
1572 | PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */ | ||
1573 | PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */ | ||
1574 | PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */ | ||
1575 | }; | ||
1576 | |||
1577 | #define PHY_M_FELP_LED2_CTRL(x) (((x)<<8) & PHY_M_FELP_LED2_MSK) | ||
1578 | #define PHY_M_FELP_LED1_CTRL(x) (((x)<<4) & PHY_M_FELP_LED1_MSK) | ||
1579 | #define PHY_M_FELP_LED0_CTRL(x) (((x)<<0) & PHY_M_FELP_LED0_MSK) | ||
1580 | |||
1581 | enum { | ||
1582 | LED_PAR_CTRL_COLX = 0x00, | ||
1583 | LED_PAR_CTRL_ERROR = 0x01, | ||
1584 | LED_PAR_CTRL_DUPLEX = 0x02, | ||
1585 | LED_PAR_CTRL_DP_COL = 0x03, | ||
1586 | LED_PAR_CTRL_SPEED = 0x04, | ||
1587 | LED_PAR_CTRL_LINK = 0x05, | ||
1588 | LED_PAR_CTRL_TX = 0x06, | ||
1589 | LED_PAR_CTRL_RX = 0x07, | ||
1590 | LED_PAR_CTRL_ACT = 0x08, | ||
1591 | LED_PAR_CTRL_LNK_RX = 0x09, | ||
1592 | LED_PAR_CTRL_LNK_AC = 0x0a, | ||
1593 | LED_PAR_CTRL_ACT_BL = 0x0b, | ||
1594 | LED_PAR_CTRL_TX_BL = 0x0c, | ||
1595 | LED_PAR_CTRL_RX_BL = 0x0d, | ||
1596 | LED_PAR_CTRL_COL_BL = 0x0e, | ||
1597 | LED_PAR_CTRL_INACT = 0x0f | ||
1598 | }; | ||
1599 | |||
1600 | /*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/ | ||
1601 | enum { | ||
1602 | PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */ | ||
1603 | PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */ | ||
1604 | PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */ | ||
1605 | }; | ||
1606 | |||
1607 | |||
1608 | /***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/ | ||
1609 | enum { | ||
1610 | PHY_M_LEDC_LOS_MSK = 0xf<<12, /* Bit 15..12: LOS LED Ctrl. Mask */ | ||
1611 | PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */ | ||
1612 | PHY_M_LEDC_STA1_MSK = 0xf<<4, /* Bit 7.. 4: STAT1 LED Ctrl. Mask */ | ||
1613 | PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */ | ||
1614 | }; | ||
1615 | |||
1616 | #define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK) | ||
1617 | #define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK) | ||
1618 | #define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK) | ||
1619 | #define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK) | ||
1620 | |||
1621 | /* GMAC registers */ | ||
1622 | /* Port Registers */ | ||
1623 | enum { | ||
1624 | GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */ | ||
1625 | GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */ | ||
1626 | GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */ | ||
1627 | GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */ | ||
1628 | GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */ | ||
1629 | GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */ | ||
1630 | GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */ | ||
1631 | /* Source Address Registers */ | ||
1632 | GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */ | ||
1633 | GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */ | ||
1634 | GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */ | ||
1635 | GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */ | ||
1636 | GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */ | ||
1637 | GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */ | ||
1638 | |||
1639 | /* Multicast Address Hash Registers */ | ||
1640 | GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */ | ||
1641 | GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */ | ||
1642 | GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */ | ||
1643 | GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */ | ||
1644 | |||
1645 | /* Interrupt Source Registers */ | ||
1646 | GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */ | ||
1647 | GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */ | ||
1648 | GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */ | ||
1649 | |||
1650 | /* Interrupt Mask Registers */ | ||
1651 | GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */ | ||
1652 | GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */ | ||
1653 | GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */ | ||
1654 | |||
1655 | /* Serial Management Interface (SMI) Registers */ | ||
1656 | GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */ | ||
1657 | GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */ | ||
1658 | GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */ | ||
1659 | }; | ||
1660 | |||
1661 | /* MIB Counters */ | ||
1662 | #define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */ | ||
1663 | #define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */ | ||
1664 | |||
1665 | /* | ||
1666 | * MIB Counters base address definitions (low word) - | ||
1667 | * use offset 4 for access to high word (32 bit r/o) | ||
1668 | */ | ||
1669 | enum { | ||
1670 | GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */ | ||
1671 | GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */ | ||
1672 | GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */ | ||
1673 | GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */ | ||
1674 | GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */ | ||
1675 | /* GM_MIB_CNT_BASE + 40: reserved */ | ||
1676 | GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */ | ||
1677 | GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */ | ||
1678 | GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */ | ||
1679 | GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */ | ||
1680 | GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */ | ||
1681 | GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */ | ||
1682 | GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */ | ||
1683 | GM_RXF_127B = GM_MIB_CNT_BASE + 104, /* 65-127 Byte Rx Frame */ | ||
1684 | GM_RXF_255B = GM_MIB_CNT_BASE + 112, /* 128-255 Byte Rx Frame */ | ||
1685 | GM_RXF_511B = GM_MIB_CNT_BASE + 120, /* 256-511 Byte Rx Frame */ | ||
1686 | GM_RXF_1023B = GM_MIB_CNT_BASE + 128, /* 512-1023 Byte Rx Frame */ | ||
1687 | GM_RXF_1518B = GM_MIB_CNT_BASE + 136, /* 1024-1518 Byte Rx Frame */ | ||
1688 | GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144, /* 1519-MaxSize Byte Rx Frame */ | ||
1689 | GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152, /* Rx Frame too Long Error */ | ||
1690 | GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160, /* Rx Jabber Packet Frame */ | ||
1691 | /* GM_MIB_CNT_BASE + 168: reserved */ | ||
1692 | GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176, /* Rx FIFO overflow Event */ | ||
1693 | /* GM_MIB_CNT_BASE + 184: reserved */ | ||
1694 | GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192, /* Unicast Frames Xmitted OK */ | ||
1695 | GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200, /* Broadcast Frames Xmitted OK */ | ||
1696 | GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208, /* Pause MAC Ctrl Frames Xmitted */ | ||
1697 | GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216, /* Multicast Frames Xmitted OK */ | ||
1698 | GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224, /* Octets Transmitted OK Low */ | ||
1699 | GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232, /* Octets Transmitted OK High */ | ||
1700 | GM_TXF_64B = GM_MIB_CNT_BASE + 240, /* 64 Byte Tx Frame */ | ||
1701 | GM_TXF_127B = GM_MIB_CNT_BASE + 248, /* 65-127 Byte Tx Frame */ | ||
1702 | GM_TXF_255B = GM_MIB_CNT_BASE + 256, /* 128-255 Byte Tx Frame */ | ||
1703 | GM_TXF_511B = GM_MIB_CNT_BASE + 264, /* 256-511 Byte Tx Frame */ | ||
1704 | GM_TXF_1023B = GM_MIB_CNT_BASE + 272, /* 512-1023 Byte Tx Frame */ | ||
1705 | GM_TXF_1518B = GM_MIB_CNT_BASE + 280, /* 1024-1518 Byte Tx Frame */ | ||
1706 | GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288, /* 1519-MaxSize Byte Tx Frame */ | ||
1707 | |||
1708 | GM_TXF_COL = GM_MIB_CNT_BASE + 304, /* Tx Collision */ | ||
1709 | GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312, /* Tx Late Collision */ | ||
1710 | GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320, /* Tx aborted due to Exces. Col. */ | ||
1711 | GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328, /* Tx Multiple Collision */ | ||
1712 | GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336, /* Tx Single Collision */ | ||
1713 | GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344, /* Tx FIFO Underrun Event */ | ||
1714 | }; | ||
1715 | |||
1716 | /* GMAC Bit Definitions */ | ||
1717 | /* GM_GP_STAT 16 bit r/o General Purpose Status Register */ | ||
1718 | enum { | ||
1719 | GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */ | ||
1720 | GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */ | ||
1721 | GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */ | ||
1722 | GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */ | ||
1723 | GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */ | ||
1724 | GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */ | ||
1725 | GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occurred */ | ||
1726 | GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occurred */ | ||
1727 | |||
1728 | GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */ | ||
1729 | GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */ | ||
1730 | GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */ | ||
1731 | GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */ | ||
1732 | GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */ | ||
1733 | }; | ||
1734 | |||
1735 | /* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ | ||
1736 | enum { | ||
1737 | GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */ | ||
1738 | GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */ | ||
1739 | GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */ | ||
1740 | GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */ | ||
1741 | GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */ | ||
1742 | GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */ | ||
1743 | GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */ | ||
1744 | GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */ | ||
1745 | GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */ | ||
1746 | GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */ | ||
1747 | GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */ | ||
1748 | GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */ | ||
1749 | GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */ | ||
1750 | GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */ | ||
1751 | GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */ | ||
1752 | }; | ||
1753 | |||
1754 | #define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) | ||
1755 | #define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS) | ||
1756 | |||
1757 | /* GM_TX_CTRL 16 bit r/w Transmit Control Register */ | ||
1758 | enum { | ||
1759 | GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ | ||
1760 | GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ | ||
1761 | GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ | ||
1762 | GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */ | ||
1763 | }; | ||
1764 | |||
1765 | #define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) | ||
1766 | #define TX_COL_DEF 0x04 /* late collision after 64 byte */ | ||
1767 | |||
1768 | /* GM_RX_CTRL 16 bit r/w Receive Control Register */ | ||
1769 | enum { | ||
1770 | GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */ | ||
1771 | GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */ | ||
1772 | GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */ | ||
1773 | GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */ | ||
1774 | }; | ||
1775 | |||
1776 | /* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ | ||
1777 | enum { | ||
1778 | GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */ | ||
1779 | GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */ | ||
1780 | GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */ | ||
1781 | |||
1782 | TX_JAM_LEN_DEF = 0x03, | ||
1783 | TX_JAM_IPG_DEF = 0x0b, | ||
1784 | TX_IPG_JAM_DEF = 0x1c, | ||
1785 | }; | ||
1786 | |||
1787 | #define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK) | ||
1788 | #define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK) | ||
1789 | #define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK) | ||
1790 | |||
1791 | |||
1792 | /* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ | ||
1793 | enum { | ||
1794 | GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */ | ||
1795 | GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */ | ||
1796 | GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */ | ||
1797 | GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */ | ||
1798 | GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ | ||
1799 | }; | ||
1800 | |||
1801 | #define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK) | ||
1802 | #define DATA_BLIND_DEF 0x04 | ||
1803 | |||
1804 | #define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK) | ||
1805 | #define IPG_DATA_DEF 0x1e | ||
1806 | |||
1807 | /* GM_SMI_CTRL 16 bit r/w SMI Control Register */ | ||
1808 | enum { | ||
1809 | GM_SMI_CT_PHY_A_MSK = 0x1f<<11, /* Bit 15..11: PHY Device Address */ | ||
1810 | GM_SMI_CT_REG_A_MSK = 0x1f<<6, /* Bit 10.. 6: PHY Register Address */ | ||
1811 | GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/ | ||
1812 | GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */ | ||
1813 | GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */ | ||
1814 | }; | ||
1815 | |||
1816 | #define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK) | ||
1817 | #define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK) | ||
1818 | |||
1819 | /* GM_PHY_ADDR 16 bit r/w GPHY Address Register */ | ||
1820 | enum { | ||
1821 | GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */ | ||
1822 | GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */ | ||
1823 | }; | ||
1824 | |||
1825 | /* Receive Frame Status Encoding */ | ||
1826 | enum { | ||
1827 | GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */ | ||
1828 | GMR_FS_LEN_SHIFT = 16, | ||
1829 | GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */ | ||
1830 | GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */ | ||
1831 | GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */ | ||
1832 | GMR_FS_MC = 1<<10, /* Bit 10: Multicast Packet */ | ||
1833 | GMR_FS_BC = 1<<9, /* Bit 9: Broadcast Packet */ | ||
1834 | GMR_FS_RX_OK = 1<<8, /* Bit 8: Receive OK (Good Packet) */ | ||
1835 | GMR_FS_GOOD_FC = 1<<7, /* Bit 7: Good Flow-Control Packet */ | ||
1836 | GMR_FS_BAD_FC = 1<<6, /* Bit 6: Bad Flow-Control Packet */ | ||
1837 | GMR_FS_MII_ERR = 1<<5, /* Bit 5: MII Error */ | ||
1838 | GMR_FS_LONG_ERR = 1<<4, /* Bit 4: Too Long Packet */ | ||
1839 | GMR_FS_FRAGMENT = 1<<3, /* Bit 3: Fragment */ | ||
1840 | |||
1841 | GMR_FS_CRC_ERR = 1<<1, /* Bit 1: CRC Error */ | ||
1842 | GMR_FS_RX_FF_OV = 1<<0, /* Bit 0: Rx FIFO Overflow */ | ||
1843 | |||
1844 | /* | ||
1845 | * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR) | ||
1846 | */ | ||
1847 | GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR | | ||
1848 | GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC | | ||
1849 | GMR_FS_JABBER, | ||
1850 | /* Rx GMAC FIFO Flush Mask (default) */ | ||
1851 | RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR | | ||
1852 | GMR_FS_BAD_FC | GMR_FS_UN_SIZE | GMR_FS_JABBER, | ||
1853 | }; | ||
1854 | |||
1855 | /* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ | ||
1856 | enum { | ||
1857 | GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */ | ||
1858 | GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */ | ||
1859 | GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */ | ||
1860 | |||
1861 | GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */ | ||
1862 | GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */ | ||
1863 | GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */ | ||
1864 | GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */ | ||
1865 | GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */ | ||
1866 | GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */ | ||
1867 | GMF_CLI_RX_FC = 1<<4, /* Clear IRQ Rx Frame Complete */ | ||
1868 | GMF_OPER_ON = 1<<3, /* Operational Mode On */ | ||
1869 | GMF_OPER_OFF = 1<<2, /* Operational Mode Off */ | ||
1870 | GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */ | ||
1871 | GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */ | ||
1872 | |||
1873 | RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */ | ||
1874 | }; | ||
1875 | |||
1876 | |||
1877 | /* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */ | ||
1878 | enum { | ||
1879 | GMF_WSP_TST_ON = 1<<18, /* Write Shadow Pointer Test On */ | ||
1880 | GMF_WSP_TST_OFF = 1<<17, /* Write Shadow Pointer Test Off */ | ||
1881 | GMF_WSP_STEP = 1<<16, /* Write Shadow Pointer Step/Increment */ | ||
1882 | |||
1883 | GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */ | ||
1884 | GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */ | ||
1885 | GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */ | ||
1886 | }; | ||
1887 | |||
1888 | /* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */ | ||
1889 | enum { | ||
1890 | GMT_ST_START = 1<<2, /* Start Time Stamp Timer */ | ||
1891 | GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */ | ||
1892 | GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */ | ||
1893 | }; | ||
1894 | |||
1895 | /* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */ | ||
1896 | enum { | ||
1897 | GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */ | ||
1898 | GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */ | ||
1899 | GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */ | ||
1900 | GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */ | ||
1901 | GMC_PAUSE_ON = 1<<3, /* Pause On */ | ||
1902 | GMC_PAUSE_OFF = 1<<2, /* Pause Off */ | ||
1903 | GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */ | ||
1904 | GMC_RST_SET = 1<<0, /* Set GMAC Reset */ | ||
1905 | }; | ||
1906 | |||
1907 | /* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */ | ||
1908 | enum { | ||
1909 | GPC_SEL_BDT = 1<<28, /* Select Bi-Dir. Transfer for MDC/MDIO */ | ||
1910 | GPC_INT_POL_HI = 1<<27, /* IRQ Polarity is Active HIGH */ | ||
1911 | GPC_75_OHM = 1<<26, /* Use 75 Ohm Termination instead of 50 */ | ||
1912 | GPC_DIS_FC = 1<<25, /* Disable Automatic Fiber/Copper Detection */ | ||
1913 | GPC_DIS_SLEEP = 1<<24, /* Disable Energy Detect */ | ||
1914 | GPC_HWCFG_M_3 = 1<<23, /* HWCFG_MODE[3] */ | ||
1915 | GPC_HWCFG_M_2 = 1<<22, /* HWCFG_MODE[2] */ | ||
1916 | GPC_HWCFG_M_1 = 1<<21, /* HWCFG_MODE[1] */ | ||
1917 | GPC_HWCFG_M_0 = 1<<20, /* HWCFG_MODE[0] */ | ||
1918 | GPC_ANEG_0 = 1<<19, /* ANEG[0] */ | ||
1919 | GPC_ENA_XC = 1<<18, /* Enable MDI crossover */ | ||
1920 | GPC_DIS_125 = 1<<17, /* Disable 125 MHz clock */ | ||
1921 | GPC_ANEG_3 = 1<<16, /* ANEG[3] */ | ||
1922 | GPC_ANEG_2 = 1<<15, /* ANEG[2] */ | ||
1923 | GPC_ANEG_1 = 1<<14, /* ANEG[1] */ | ||
1924 | GPC_ENA_PAUSE = 1<<13, /* Enable Pause (SYM_OR_REM) */ | ||
1925 | GPC_PHYADDR_4 = 1<<12, /* Bit 4 of Phy Addr */ | ||
1926 | GPC_PHYADDR_3 = 1<<11, /* Bit 3 of Phy Addr */ | ||
1927 | GPC_PHYADDR_2 = 1<<10, /* Bit 2 of Phy Addr */ | ||
1928 | GPC_PHYADDR_1 = 1<<9, /* Bit 1 of Phy Addr */ | ||
1929 | GPC_PHYADDR_0 = 1<<8, /* Bit 0 of Phy Addr */ | ||
1930 | /* Bits 7..2: reserved */ | ||
1931 | GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */ | ||
1932 | GPC_RST_SET = 1<<0, /* Set GPHY Reset */ | ||
1933 | }; | ||
1934 | |||
1935 | #define GPC_HWCFG_GMII_COP (GPC_HWCFG_M_3|GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0) | ||
1936 | #define GPC_HWCFG_GMII_FIB (GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0) | ||
1937 | #define GPC_ANEG_ADV_ALL_M (GPC_ANEG_3 | GPC_ANEG_2 | GPC_ANEG_1 | GPC_ANEG_0) | ||
1938 | |||
1939 | /* forced speed and duplex mode (don't mix with other ANEG bits) */ | ||
1940 | #define GPC_FRC10MBIT_HALF 0 | ||
1941 | #define GPC_FRC10MBIT_FULL GPC_ANEG_0 | ||
1942 | #define GPC_FRC100MBIT_HALF GPC_ANEG_1 | ||
1943 | #define GPC_FRC100MBIT_FULL (GPC_ANEG_0 | GPC_ANEG_1) | ||
1944 | |||
1945 | /* auto-negotiation with limited advertised speeds */ | ||
1946 | /* mix only with master/slave settings (for copper) */ | ||
1947 | #define GPC_ADV_1000_HALF GPC_ANEG_2 | ||
1948 | #define GPC_ADV_1000_FULL GPC_ANEG_3 | ||
1949 | #define GPC_ADV_ALL (GPC_ANEG_2 | GPC_ANEG_3) | ||
1950 | |||
1951 | /* master/slave settings */ | ||
1952 | /* only for copper with 1000 Mbps */ | ||
1953 | #define GPC_FORCE_MASTER 0 | ||
1954 | #define GPC_FORCE_SLAVE GPC_ANEG_0 | ||
1955 | #define GPC_PREF_MASTER GPC_ANEG_1 | ||
1956 | #define GPC_PREF_SLAVE (GPC_ANEG_1 | GPC_ANEG_0) | ||
1957 | |||
1958 | /* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */ | ||
1959 | /* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */ | ||
1960 | enum { | ||
1961 | GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */ | ||
1962 | GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */ | ||
1963 | GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */ | ||
1964 | GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */ | ||
1965 | GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ | ||
1966 | GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ | ||
1967 | |||
1968 | #define GMAC_DEF_MSK (GM_IS_RX_FF_OR | GM_IS_TX_FF_UR) | ||
1969 | |||
1970 | /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ | ||
1971 | /* Bits 15.. 2: reserved */ | ||
1972 | GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */ | ||
1973 | GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */ | ||
1974 | |||
1975 | |||
1976 | /* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */ | ||
1977 | WOL_CTL_LINK_CHG_OCC = 1<<15, | ||
1978 | WOL_CTL_MAGIC_PKT_OCC = 1<<14, | ||
1979 | WOL_CTL_PATTERN_OCC = 1<<13, | ||
1980 | WOL_CTL_CLEAR_RESULT = 1<<12, | ||
1981 | WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11, | ||
1982 | WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10, | ||
1983 | WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9, | ||
1984 | WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8, | ||
1985 | WOL_CTL_ENA_PME_ON_PATTERN = 1<<7, | ||
1986 | WOL_CTL_DIS_PME_ON_PATTERN = 1<<6, | ||
1987 | WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5, | ||
1988 | WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4, | ||
1989 | WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3, | ||
1990 | WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2, | ||
1991 | WOL_CTL_ENA_PATTERN_UNIT = 1<<1, | ||
1992 | WOL_CTL_DIS_PATTERN_UNIT = 1<<0, | ||
1993 | }; | ||
1994 | |||
1995 | #define WOL_CTL_DEFAULT \ | ||
1996 | (WOL_CTL_DIS_PME_ON_LINK_CHG | \ | ||
1997 | WOL_CTL_DIS_PME_ON_PATTERN | \ | ||
1998 | WOL_CTL_DIS_PME_ON_MAGIC_PKT | \ | ||
1999 | WOL_CTL_DIS_LINK_CHG_UNIT | \ | ||
2000 | WOL_CTL_DIS_PATTERN_UNIT | \ | ||
2001 | WOL_CTL_DIS_MAGIC_PKT_UNIT) | ||
2002 | |||
2003 | /* WOL_MATCH_CTL 8 bit WOL Match Control Reg */ | ||
2004 | #define WOL_CTL_PATT_ENA(x) (1 << (x)) | ||
2005 | |||
2006 | |||
2007 | /* XMAC II registers */ | ||
2008 | enum { | ||
2009 | XM_MMU_CMD = 0x0000, /* 16 bit r/w MMU Command Register */ | ||
2010 | XM_POFF = 0x0008, /* 32 bit r/w Packet Offset Register */ | ||
2011 | XM_BURST = 0x000c, /* 32 bit r/w Burst Register for half duplex*/ | ||
2012 | XM_1L_VLAN_TAG = 0x0010, /* 16 bit r/w One Level VLAN Tag ID */ | ||
2013 | XM_2L_VLAN_TAG = 0x0014, /* 16 bit r/w Two Level VLAN Tag ID */ | ||
2014 | XM_TX_CMD = 0x0020, /* 16 bit r/w Transmit Command Register */ | ||
2015 | XM_TX_RT_LIM = 0x0024, /* 16 bit r/w Transmit Retry Limit Register */ | ||
2016 | XM_TX_STIME = 0x0028, /* 16 bit r/w Transmit Slottime Register */ | ||
2017 | XM_TX_IPG = 0x002c, /* 16 bit r/w Transmit Inter Packet Gap */ | ||
2018 | XM_RX_CMD = 0x0030, /* 16 bit r/w Receive Command Register */ | ||
2019 | XM_PHY_ADDR = 0x0034, /* 16 bit r/w PHY Address Register */ | ||
2020 | XM_PHY_DATA = 0x0038, /* 16 bit r/w PHY Data Register */ | ||
2021 | XM_GP_PORT = 0x0040, /* 32 bit r/w General Purpose Port Register */ | ||
2022 | XM_IMSK = 0x0044, /* 16 bit r/w Interrupt Mask Register */ | ||
2023 | XM_ISRC = 0x0048, /* 16 bit r/o Interrupt Status Register */ | ||
2024 | XM_HW_CFG = 0x004c, /* 16 bit r/w Hardware Config Register */ | ||
2025 | XM_TX_LO_WM = 0x0060, /* 16 bit r/w Tx FIFO Low Water Mark */ | ||
2026 | XM_TX_HI_WM = 0x0062, /* 16 bit r/w Tx FIFO High Water Mark */ | ||
2027 | XM_TX_THR = 0x0064, /* 16 bit r/w Tx Request Threshold */ | ||
2028 | XM_HT_THR = 0x0066, /* 16 bit r/w Host Request Threshold */ | ||
2029 | XM_PAUSE_DA = 0x0068, /* NA reg r/w Pause Destination Address */ | ||
2030 | XM_CTL_PARA = 0x0070, /* 32 bit r/w Control Parameter Register */ | ||
2031 | XM_MAC_OPCODE = 0x0074, /* 16 bit r/w Opcode for MAC control frames */ | ||
2032 | XM_MAC_PTIME = 0x0076, /* 16 bit r/w Pause time for MAC ctrl frames*/ | ||
2033 | XM_TX_STAT = 0x0078, /* 32 bit r/o Tx Status LIFO Register */ | ||
2034 | |||
2035 | XM_EXM_START = 0x0080, /* r/w Start Address of the EXM Regs */ | ||
2036 | #define XM_EXM(reg) (XM_EXM_START + ((reg) << 3)) | ||
2037 | }; | ||
2038 | |||
2039 | enum { | ||
2040 | XM_SRC_CHK = 0x0100, /* NA reg r/w Source Check Address Register */ | ||
2041 | XM_SA = 0x0108, /* NA reg r/w Station Address Register */ | ||
2042 | XM_HSM = 0x0110, /* 64 bit r/w Hash Match Address Registers */ | ||
2043 | XM_RX_LO_WM = 0x0118, /* 16 bit r/w Receive Low Water Mark */ | ||
2044 | XM_RX_HI_WM = 0x011a, /* 16 bit r/w Receive High Water Mark */ | ||
2045 | XM_RX_THR = 0x011c, /* 32 bit r/w Receive Request Threshold */ | ||
2046 | XM_DEV_ID = 0x0120, /* 32 bit r/o Device ID Register */ | ||
2047 | XM_MODE = 0x0124, /* 32 bit r/w Mode Register */ | ||
2048 | XM_LSA = 0x0128, /* NA reg r/o Last Source Register */ | ||
2049 | XM_TS_READ = 0x0130, /* 32 bit r/o Time Stamp Read Register */ | ||
2050 | XM_TS_LOAD = 0x0134, /* 32 bit r/o Time Stamp Load Value */ | ||
2051 | XM_STAT_CMD = 0x0200, /* 16 bit r/w Statistics Command Register */ | ||
2052 | XM_RX_CNT_EV = 0x0204, /* 32 bit r/o Rx Counter Event Register */ | ||
2053 | XM_TX_CNT_EV = 0x0208, /* 32 bit r/o Tx Counter Event Register */ | ||
2054 | XM_RX_EV_MSK = 0x020c, /* 32 bit r/w Rx Counter Event Mask */ | ||
2055 | XM_TX_EV_MSK = 0x0210, /* 32 bit r/w Tx Counter Event Mask */ | ||
2056 | XM_TXF_OK = 0x0280, /* 32 bit r/o Frames Transmitted OK Conuter */ | ||
2057 | XM_TXO_OK_HI = 0x0284, /* 32 bit r/o Octets Transmitted OK High Cnt*/ | ||
2058 | XM_TXO_OK_LO = 0x0288, /* 32 bit r/o Octets Transmitted OK Low Cnt */ | ||
2059 | XM_TXF_BC_OK = 0x028c, /* 32 bit r/o Broadcast Frames Xmitted OK */ | ||
2060 | XM_TXF_MC_OK = 0x0290, /* 32 bit r/o Multicast Frames Xmitted OK */ | ||
2061 | XM_TXF_UC_OK = 0x0294, /* 32 bit r/o Unicast Frames Xmitted OK */ | ||
2062 | XM_TXF_LONG = 0x0298, /* 32 bit r/o Tx Long Frame Counter */ | ||
2063 | XM_TXE_BURST = 0x029c, /* 32 bit r/o Tx Burst Event Counter */ | ||
2064 | XM_TXF_MPAUSE = 0x02a0, /* 32 bit r/o Tx Pause MAC Ctrl Frame Cnt */ | ||
2065 | XM_TXF_MCTRL = 0x02a4, /* 32 bit r/o Tx MAC Ctrl Frame Counter */ | ||
2066 | XM_TXF_SNG_COL = 0x02a8, /* 32 bit r/o Tx Single Collision Counter */ | ||
2067 | XM_TXF_MUL_COL = 0x02ac, /* 32 bit r/o Tx Multiple Collision Counter */ | ||
2068 | XM_TXF_ABO_COL = 0x02b0, /* 32 bit r/o Tx aborted due to Exces. Col. */ | ||
2069 | XM_TXF_LAT_COL = 0x02b4, /* 32 bit r/o Tx Late Collision Counter */ | ||
2070 | XM_TXF_DEF = 0x02b8, /* 32 bit r/o Tx Deferred Frame Counter */ | ||
2071 | XM_TXF_EX_DEF = 0x02bc, /* 32 bit r/o Tx Excessive Deferall Counter */ | ||
2072 | XM_TXE_FIFO_UR = 0x02c0, /* 32 bit r/o Tx FIFO Underrun Event Cnt */ | ||
2073 | XM_TXE_CS_ERR = 0x02c4, /* 32 bit r/o Tx Carrier Sense Error Cnt */ | ||
2074 | XM_TXP_UTIL = 0x02c8, /* 32 bit r/o Tx Utilization in % */ | ||
2075 | XM_TXF_64B = 0x02d0, /* 32 bit r/o 64 Byte Tx Frame Counter */ | ||
2076 | XM_TXF_127B = 0x02d4, /* 32 bit r/o 65-127 Byte Tx Frame Counter */ | ||
2077 | XM_TXF_255B = 0x02d8, /* 32 bit r/o 128-255 Byte Tx Frame Counter */ | ||
2078 | XM_TXF_511B = 0x02dc, /* 32 bit r/o 256-511 Byte Tx Frame Counter */ | ||
2079 | XM_TXF_1023B = 0x02e0, /* 32 bit r/o 512-1023 Byte Tx Frame Counter*/ | ||
2080 | XM_TXF_MAX_SZ = 0x02e4, /* 32 bit r/o 1024-MaxSize Byte Tx Frame Cnt*/ | ||
2081 | XM_RXF_OK = 0x0300, /* 32 bit r/o Frames Received OK */ | ||
2082 | XM_RXO_OK_HI = 0x0304, /* 32 bit r/o Octets Received OK High Cnt */ | ||
2083 | XM_RXO_OK_LO = 0x0308, /* 32 bit r/o Octets Received OK Low Counter*/ | ||
2084 | XM_RXF_BC_OK = 0x030c, /* 32 bit r/o Broadcast Frames Received OK */ | ||
2085 | XM_RXF_MC_OK = 0x0310, /* 32 bit r/o Multicast Frames Received OK */ | ||
2086 | XM_RXF_UC_OK = 0x0314, /* 32 bit r/o Unicast Frames Received OK */ | ||
2087 | XM_RXF_MPAUSE = 0x0318, /* 32 bit r/o Rx Pause MAC Ctrl Frame Cnt */ | ||
2088 | XM_RXF_MCTRL = 0x031c, /* 32 bit r/o Rx MAC Ctrl Frame Counter */ | ||
2089 | XM_RXF_INV_MP = 0x0320, /* 32 bit r/o Rx invalid Pause Frame Cnt */ | ||
2090 | XM_RXF_INV_MOC = 0x0324, /* 32 bit r/o Rx Frames with inv. MAC Opcode*/ | ||
2091 | XM_RXE_BURST = 0x0328, /* 32 bit r/o Rx Burst Event Counter */ | ||
2092 | XM_RXE_FMISS = 0x032c, /* 32 bit r/o Rx Missed Frames Event Cnt */ | ||
2093 | XM_RXF_FRA_ERR = 0x0330, /* 32 bit r/o Rx Framing Error Counter */ | ||
2094 | XM_RXE_FIFO_OV = 0x0334, /* 32 bit r/o Rx FIFO overflow Event Cnt */ | ||
2095 | XM_RXF_JAB_PKT = 0x0338, /* 32 bit r/o Rx Jabber Packet Frame Cnt */ | ||
2096 | XM_RXE_CAR_ERR = 0x033c, /* 32 bit r/o Rx Carrier Event Error Cnt */ | ||
2097 | XM_RXF_LEN_ERR = 0x0340, /* 32 bit r/o Rx in Range Length Error */ | ||
2098 | XM_RXE_SYM_ERR = 0x0344, /* 32 bit r/o Rx Symbol Error Counter */ | ||
2099 | XM_RXE_SHT_ERR = 0x0348, /* 32 bit r/o Rx Short Event Error Cnt */ | ||
2100 | XM_RXE_RUNT = 0x034c, /* 32 bit r/o Rx Runt Event Counter */ | ||
2101 | XM_RXF_LNG_ERR = 0x0350, /* 32 bit r/o Rx Frame too Long Error Cnt */ | ||
2102 | XM_RXF_FCS_ERR = 0x0354, /* 32 bit r/o Rx Frame Check Seq. Error Cnt */ | ||
2103 | XM_RXF_CEX_ERR = 0x035c, /* 32 bit r/o Rx Carrier Ext Error Frame Cnt*/ | ||
2104 | XM_RXP_UTIL = 0x0360, /* 32 bit r/o Rx Utilization in % */ | ||
2105 | XM_RXF_64B = 0x0368, /* 32 bit r/o 64 Byte Rx Frame Counter */ | ||
2106 | XM_RXF_127B = 0x036c, /* 32 bit r/o 65-127 Byte Rx Frame Counter */ | ||
2107 | XM_RXF_255B = 0x0370, /* 32 bit r/o 128-255 Byte Rx Frame Counter */ | ||
2108 | XM_RXF_511B = 0x0374, /* 32 bit r/o 256-511 Byte Rx Frame Counter */ | ||
2109 | XM_RXF_1023B = 0x0378, /* 32 bit r/o 512-1023 Byte Rx Frame Counter*/ | ||
2110 | XM_RXF_MAX_SZ = 0x037c, /* 32 bit r/o 1024-MaxSize Byte Rx Frame Cnt*/ | ||
2111 | }; | ||
2112 | |||
2113 | /* XM_MMU_CMD 16 bit r/w MMU Command Register */ | ||
2114 | enum { | ||
2115 | XM_MMU_PHY_RDY = 1<<12, /* Bit 12: PHY Read Ready */ | ||
2116 | XM_MMU_PHY_BUSY = 1<<11, /* Bit 11: PHY Busy */ | ||
2117 | XM_MMU_IGN_PF = 1<<10, /* Bit 10: Ignore Pause Frame */ | ||
2118 | XM_MMU_MAC_LB = 1<<9, /* Bit 9: Enable MAC Loopback */ | ||
2119 | XM_MMU_FRC_COL = 1<<7, /* Bit 7: Force Collision */ | ||
2120 | XM_MMU_SIM_COL = 1<<6, /* Bit 6: Simulate Collision */ | ||
2121 | XM_MMU_NO_PRE = 1<<5, /* Bit 5: No MDIO Preamble */ | ||
2122 | XM_MMU_GMII_FD = 1<<4, /* Bit 4: GMII uses Full Duplex */ | ||
2123 | XM_MMU_RAT_CTRL = 1<<3, /* Bit 3: Enable Rate Control */ | ||
2124 | XM_MMU_GMII_LOOP= 1<<2, /* Bit 2: PHY is in Loopback Mode */ | ||
2125 | XM_MMU_ENA_RX = 1<<1, /* Bit 1: Enable Receiver */ | ||
2126 | XM_MMU_ENA_TX = 1<<0, /* Bit 0: Enable Transmitter */ | ||
2127 | }; | ||
2128 | |||
2129 | |||
2130 | /* XM_TX_CMD 16 bit r/w Transmit Command Register */ | ||
2131 | enum { | ||
2132 | XM_TX_BK2BK = 1<<6, /* Bit 6: Ignor Carrier Sense (Tx Bk2Bk)*/ | ||
2133 | XM_TX_ENC_BYP = 1<<5, /* Bit 5: Set Encoder in Bypass Mode */ | ||
2134 | XM_TX_SAM_LINE = 1<<4, /* Bit 4: (sc) Start utilization calculation */ | ||
2135 | XM_TX_NO_GIG_MD = 1<<3, /* Bit 3: Disable Carrier Extension */ | ||
2136 | XM_TX_NO_PRE = 1<<2, /* Bit 2: Disable Preamble Generation */ | ||
2137 | XM_TX_NO_CRC = 1<<1, /* Bit 1: Disable CRC Generation */ | ||
2138 | XM_TX_AUTO_PAD = 1<<0, /* Bit 0: Enable Automatic Padding */ | ||
2139 | }; | ||
2140 | |||
2141 | /* XM_TX_RT_LIM 16 bit r/w Transmit Retry Limit Register */ | ||
2142 | #define XM_RT_LIM_MSK 0x1f /* Bit 4..0: Tx Retry Limit */ | ||
2143 | |||
2144 | |||
2145 | /* XM_TX_STIME 16 bit r/w Transmit Slottime Register */ | ||
2146 | #define XM_STIME_MSK 0x7f /* Bit 6..0: Tx Slottime bits */ | ||
2147 | |||
2148 | |||
2149 | /* XM_TX_IPG 16 bit r/w Transmit Inter Packet Gap */ | ||
2150 | #define XM_IPG_MSK 0xff /* Bit 7..0: IPG value bits */ | ||
2151 | |||
2152 | |||
2153 | /* XM_RX_CMD 16 bit r/w Receive Command Register */ | ||
2154 | enum { | ||
2155 | XM_RX_LENERR_OK = 1<<8, /* Bit 8 don't set Rx Err bit for */ | ||
2156 | /* inrange error packets */ | ||
2157 | XM_RX_BIG_PK_OK = 1<<7, /* Bit 7 don't set Rx Err bit for */ | ||
2158 | /* jumbo packets */ | ||
2159 | XM_RX_IPG_CAP = 1<<6, /* Bit 6 repl. type field with IPG */ | ||
2160 | XM_RX_TP_MD = 1<<5, /* Bit 5: Enable transparent Mode */ | ||
2161 | XM_RX_STRIP_FCS = 1<<4, /* Bit 4: Enable FCS Stripping */ | ||
2162 | XM_RX_SELF_RX = 1<<3, /* Bit 3: Enable Rx of own packets */ | ||
2163 | XM_RX_SAM_LINE = 1<<2, /* Bit 2: (sc) Start utilization calculation */ | ||
2164 | XM_RX_STRIP_PAD = 1<<1, /* Bit 1: Strip pad bytes of Rx frames */ | ||
2165 | XM_RX_DIS_CEXT = 1<<0, /* Bit 0: Disable carrier ext. check */ | ||
2166 | }; | ||
2167 | |||
2168 | |||
2169 | /* XM_GP_PORT 32 bit r/w General Purpose Port Register */ | ||
2170 | enum { | ||
2171 | XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */ | ||
2172 | XM_GP_FRC_INT = 1<<5, /* Bit 5: (sc) Force Interrupt */ | ||
2173 | XM_GP_RES_MAC = 1<<3, /* Bit 3: (sc) Reset MAC and FIFOs */ | ||
2174 | XM_GP_RES_STAT = 1<<2, /* Bit 2: (sc) Reset the statistics module */ | ||
2175 | XM_GP_INP_ASS = 1<<0, /* Bit 0: (ro) GP Input Pin asserted */ | ||
2176 | }; | ||
2177 | |||
2178 | |||
2179 | /* XM_IMSK 16 bit r/w Interrupt Mask Register */ | ||
2180 | /* XM_ISRC 16 bit r/o Interrupt Status Register */ | ||
2181 | enum { | ||
2182 | XM_IS_LNK_AE = 1<<14, /* Bit 14: Link Asynchronous Event */ | ||
2183 | XM_IS_TX_ABORT = 1<<13, /* Bit 13: Transmit Abort, late Col. etc */ | ||
2184 | XM_IS_FRC_INT = 1<<12, /* Bit 12: Force INT bit set in GP */ | ||
2185 | XM_IS_INP_ASS = 1<<11, /* Bit 11: Input Asserted, GP bit 0 set */ | ||
2186 | XM_IS_LIPA_RC = 1<<10, /* Bit 10: Link Partner requests config */ | ||
2187 | XM_IS_RX_PAGE = 1<<9, /* Bit 9: Page Received */ | ||
2188 | XM_IS_TX_PAGE = 1<<8, /* Bit 8: Next Page Loaded for Transmit */ | ||
2189 | XM_IS_AND = 1<<7, /* Bit 7: Auto-Negotiation Done */ | ||
2190 | XM_IS_TSC_OV = 1<<6, /* Bit 6: Time Stamp Counter Overflow */ | ||
2191 | XM_IS_RXC_OV = 1<<5, /* Bit 5: Rx Counter Event Overflow */ | ||
2192 | XM_IS_TXC_OV = 1<<4, /* Bit 4: Tx Counter Event Overflow */ | ||
2193 | XM_IS_RXF_OV = 1<<3, /* Bit 3: Receive FIFO Overflow */ | ||
2194 | XM_IS_TXF_UR = 1<<2, /* Bit 2: Transmit FIFO Underrun */ | ||
2195 | XM_IS_TX_COMP = 1<<1, /* Bit 1: Frame Tx Complete */ | ||
2196 | XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */ | ||
2197 | |||
2198 | XM_IMSK_DISABLE = 0xffff, | ||
2199 | }; | ||
2200 | |||
2201 | /* XM_HW_CFG 16 bit r/w Hardware Config Register */ | ||
2202 | enum { | ||
2203 | XM_HW_GEN_EOP = 1<<3, /* Bit 3: generate End of Packet pulse */ | ||
2204 | XM_HW_COM4SIG = 1<<2, /* Bit 2: use Comma Detect for Sig. Det.*/ | ||
2205 | XM_HW_GMII_MD = 1<<0, /* Bit 0: GMII Interface selected */ | ||
2206 | }; | ||
2207 | |||
2208 | |||
2209 | /* XM_TX_LO_WM 16 bit r/w Tx FIFO Low Water Mark */ | ||
2210 | /* XM_TX_HI_WM 16 bit r/w Tx FIFO High Water Mark */ | ||
2211 | #define XM_TX_WM_MSK 0x01ff /* Bit 9.. 0 Tx FIFO Watermark bits */ | ||
2212 | |||
2213 | /* XM_TX_THR 16 bit r/w Tx Request Threshold */ | ||
2214 | /* XM_HT_THR 16 bit r/w Host Request Threshold */ | ||
2215 | /* XM_RX_THR 16 bit r/w Rx Request Threshold */ | ||
2216 | #define XM_THR_MSK 0x03ff /* Bit 10.. 0 Rx/Tx Request Threshold bits */ | ||
2217 | |||
2218 | |||
2219 | /* XM_TX_STAT 32 bit r/o Tx Status LIFO Register */ | ||
2220 | enum { | ||
2221 | XM_ST_VALID = (1UL<<31), /* Bit 31: Status Valid */ | ||
2222 | XM_ST_BYTE_CNT = (0x3fffL<<17), /* Bit 30..17: Tx frame Length */ | ||
2223 | XM_ST_RETRY_CNT = (0x1fL<<12), /* Bit 16..12: Retry Count */ | ||
2224 | XM_ST_EX_COL = 1<<11, /* Bit 11: Excessive Collisions */ | ||
2225 | XM_ST_EX_DEF = 1<<10, /* Bit 10: Excessive Deferral */ | ||
2226 | XM_ST_BURST = 1<<9, /* Bit 9: p. xmitted in burst md*/ | ||
2227 | XM_ST_DEFER = 1<<8, /* Bit 8: packet was defered */ | ||
2228 | XM_ST_BC = 1<<7, /* Bit 7: Broadcast packet */ | ||
2229 | XM_ST_MC = 1<<6, /* Bit 6: Multicast packet */ | ||
2230 | XM_ST_UC = 1<<5, /* Bit 5: Unicast packet */ | ||
2231 | XM_ST_TX_UR = 1<<4, /* Bit 4: FIFO Underrun occurred */ | ||
2232 | XM_ST_CS_ERR = 1<<3, /* Bit 3: Carrier Sense Error */ | ||
2233 | XM_ST_LAT_COL = 1<<2, /* Bit 2: Late Collision Error */ | ||
2234 | XM_ST_MUL_COL = 1<<1, /* Bit 1: Multiple Collisions */ | ||
2235 | XM_ST_SGN_COL = 1<<0, /* Bit 0: Single Collision */ | ||
2236 | }; | ||
2237 | |||
2238 | /* XM_RX_LO_WM 16 bit r/w Receive Low Water Mark */ | ||
2239 | /* XM_RX_HI_WM 16 bit r/w Receive High Water Mark */ | ||
2240 | #define XM_RX_WM_MSK 0x03ff /* Bit 11.. 0: Rx FIFO Watermark bits */ | ||
2241 | |||
2242 | |||
2243 | /* XM_DEV_ID 32 bit r/o Device ID Register */ | ||
2244 | #define XM_DEV_OUI (0x00ffffffUL<<8) /* Bit 31..8: Device OUI */ | ||
2245 | #define XM_DEV_REV (0x07L << 5) /* Bit 7..5: Chip Rev Num */ | ||
2246 | |||
2247 | |||
2248 | /* XM_MODE 32 bit r/w Mode Register */ | ||
2249 | enum { | ||
2250 | XM_MD_ENA_REJ = 1<<26, /* Bit 26: Enable Frame Reject */ | ||
2251 | XM_MD_SPOE_E = 1<<25, /* Bit 25: Send Pause on Edge */ | ||
2252 | /* extern generated */ | ||
2253 | XM_MD_TX_REP = 1<<24, /* Bit 24: Transmit Repeater Mode */ | ||
2254 | XM_MD_SPOFF_I = 1<<23, /* Bit 23: Send Pause on FIFO full */ | ||
2255 | /* intern generated */ | ||
2256 | XM_MD_LE_STW = 1<<22, /* Bit 22: Rx Stat Word in Little Endian */ | ||
2257 | XM_MD_TX_CONT = 1<<21, /* Bit 21: Send Continuous */ | ||
2258 | XM_MD_TX_PAUSE = 1<<20, /* Bit 20: (sc) Send Pause Frame */ | ||
2259 | XM_MD_ATS = 1<<19, /* Bit 19: Append Time Stamp */ | ||
2260 | XM_MD_SPOL_I = 1<<18, /* Bit 18: Send Pause on Low */ | ||
2261 | /* intern generated */ | ||
2262 | XM_MD_SPOH_I = 1<<17, /* Bit 17: Send Pause on High */ | ||
2263 | /* intern generated */ | ||
2264 | XM_MD_CAP = 1<<16, /* Bit 16: Check Address Pair */ | ||
2265 | XM_MD_ENA_HASH = 1<<15, /* Bit 15: Enable Hashing */ | ||
2266 | XM_MD_CSA = 1<<14, /* Bit 14: Check Station Address */ | ||
2267 | XM_MD_CAA = 1<<13, /* Bit 13: Check Address Array */ | ||
2268 | XM_MD_RX_MCTRL = 1<<12, /* Bit 12: Rx MAC Control Frame */ | ||
2269 | XM_MD_RX_RUNT = 1<<11, /* Bit 11: Rx Runt Frames */ | ||
2270 | XM_MD_RX_IRLE = 1<<10, /* Bit 10: Rx in Range Len Err Frame */ | ||
2271 | XM_MD_RX_LONG = 1<<9, /* Bit 9: Rx Long Frame */ | ||
2272 | XM_MD_RX_CRCE = 1<<8, /* Bit 8: Rx CRC Error Frame */ | ||
2273 | XM_MD_RX_ERR = 1<<7, /* Bit 7: Rx Error Frame */ | ||
2274 | XM_MD_DIS_UC = 1<<6, /* Bit 6: Disable Rx Unicast */ | ||
2275 | XM_MD_DIS_MC = 1<<5, /* Bit 5: Disable Rx Multicast */ | ||
2276 | XM_MD_DIS_BC = 1<<4, /* Bit 4: Disable Rx Broadcast */ | ||
2277 | XM_MD_ENA_PROM = 1<<3, /* Bit 3: Enable Promiscuous */ | ||
2278 | XM_MD_ENA_BE = 1<<2, /* Bit 2: Enable Big Endian */ | ||
2279 | XM_MD_FTF = 1<<1, /* Bit 1: (sc) Flush Tx FIFO */ | ||
2280 | XM_MD_FRF = 1<<0, /* Bit 0: (sc) Flush Rx FIFO */ | ||
2281 | }; | ||
2282 | |||
2283 | #define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I) | ||
2284 | #define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\ | ||
2285 | XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA) | ||
2286 | |||
2287 | /* XM_STAT_CMD 16 bit r/w Statistics Command Register */ | ||
2288 | enum { | ||
2289 | XM_SC_SNP_RXC = 1<<5, /* Bit 5: (sc) Snap Rx Counters */ | ||
2290 | XM_SC_SNP_TXC = 1<<4, /* Bit 4: (sc) Snap Tx Counters */ | ||
2291 | XM_SC_CP_RXC = 1<<3, /* Bit 3: Copy Rx Counters Continuously */ | ||
2292 | XM_SC_CP_TXC = 1<<2, /* Bit 2: Copy Tx Counters Continuously */ | ||
2293 | XM_SC_CLR_RXC = 1<<1, /* Bit 1: (sc) Clear Rx Counters */ | ||
2294 | XM_SC_CLR_TXC = 1<<0, /* Bit 0: (sc) Clear Tx Counters */ | ||
2295 | }; | ||
2296 | |||
2297 | |||
2298 | /* XM_RX_CNT_EV 32 bit r/o Rx Counter Event Register */ | ||
2299 | /* XM_RX_EV_MSK 32 bit r/w Rx Counter Event Mask */ | ||
2300 | enum { | ||
2301 | XMR_MAX_SZ_OV = 1<<31, /* Bit 31: 1024-MaxSize Rx Cnt Ov*/ | ||
2302 | XMR_1023B_OV = 1<<30, /* Bit 30: 512-1023Byte Rx Cnt Ov*/ | ||
2303 | XMR_511B_OV = 1<<29, /* Bit 29: 256-511 Byte Rx Cnt Ov*/ | ||
2304 | XMR_255B_OV = 1<<28, /* Bit 28: 128-255 Byte Rx Cnt Ov*/ | ||
2305 | XMR_127B_OV = 1<<27, /* Bit 27: 65-127 Byte Rx Cnt Ov */ | ||
2306 | XMR_64B_OV = 1<<26, /* Bit 26: 64 Byte Rx Cnt Ov */ | ||
2307 | XMR_UTIL_OV = 1<<25, /* Bit 25: Rx Util Cnt Overflow */ | ||
2308 | XMR_UTIL_UR = 1<<24, /* Bit 24: Rx Util Cnt Underrun */ | ||
2309 | XMR_CEX_ERR_OV = 1<<23, /* Bit 23: CEXT Err Cnt Ov */ | ||
2310 | XMR_FCS_ERR_OV = 1<<21, /* Bit 21: Rx FCS Error Cnt Ov */ | ||
2311 | XMR_LNG_ERR_OV = 1<<20, /* Bit 20: Rx too Long Err Cnt Ov*/ | ||
2312 | XMR_RUNT_OV = 1<<19, /* Bit 19: Runt Event Cnt Ov */ | ||
2313 | XMR_SHT_ERR_OV = 1<<18, /* Bit 18: Rx Short Ev Err Cnt Ov*/ | ||
2314 | XMR_SYM_ERR_OV = 1<<17, /* Bit 17: Rx Sym Err Cnt Ov */ | ||
2315 | XMR_CAR_ERR_OV = 1<<15, /* Bit 15: Rx Carr Ev Err Cnt Ov */ | ||
2316 | XMR_JAB_PKT_OV = 1<<14, /* Bit 14: Rx Jabb Packet Cnt Ov */ | ||
2317 | XMR_FIFO_OV = 1<<13, /* Bit 13: Rx FIFO Ov Ev Cnt Ov */ | ||
2318 | XMR_FRA_ERR_OV = 1<<12, /* Bit 12: Rx Framing Err Cnt Ov */ | ||
2319 | XMR_FMISS_OV = 1<<11, /* Bit 11: Rx Missed Ev Cnt Ov */ | ||
2320 | XMR_BURST = 1<<10, /* Bit 10: Rx Burst Event Cnt Ov */ | ||
2321 | XMR_INV_MOC = 1<<9, /* Bit 9: Rx with inv. MAC OC Ov*/ | ||
2322 | XMR_INV_MP = 1<<8, /* Bit 8: Rx inv Pause Frame Ov */ | ||
2323 | XMR_MCTRL_OV = 1<<7, /* Bit 7: Rx MAC Ctrl-F Cnt Ov */ | ||
2324 | XMR_MPAUSE_OV = 1<<6, /* Bit 6: Rx Pause MAC Ctrl-F Ov*/ | ||
2325 | XMR_UC_OK_OV = 1<<5, /* Bit 5: Rx Unicast Frame CntOv*/ | ||
2326 | XMR_MC_OK_OV = 1<<4, /* Bit 4: Rx Multicast Cnt Ov */ | ||
2327 | XMR_BC_OK_OV = 1<<3, /* Bit 3: Rx Broadcast Cnt Ov */ | ||
2328 | XMR_OK_LO_OV = 1<<2, /* Bit 2: Octets Rx OK Low CntOv*/ | ||
2329 | XMR_OK_HI_OV = 1<<1, /* Bit 1: Octets Rx OK Hi Cnt Ov*/ | ||
2330 | XMR_OK_OV = 1<<0, /* Bit 0: Frames Received Ok Ov */ | ||
2331 | }; | ||
2332 | |||
2333 | #define XMR_DEF_MSK (XMR_OK_LO_OV | XMR_OK_HI_OV) | ||
2334 | |||
2335 | /* XM_TX_CNT_EV 32 bit r/o Tx Counter Event Register */ | ||
2336 | /* XM_TX_EV_MSK 32 bit r/w Tx Counter Event Mask */ | ||
2337 | enum { | ||
2338 | XMT_MAX_SZ_OV = 1<<25, /* Bit 25: 1024-MaxSize Tx Cnt Ov*/ | ||
2339 | XMT_1023B_OV = 1<<24, /* Bit 24: 512-1023Byte Tx Cnt Ov*/ | ||
2340 | XMT_511B_OV = 1<<23, /* Bit 23: 256-511 Byte Tx Cnt Ov*/ | ||
2341 | XMT_255B_OV = 1<<22, /* Bit 22: 128-255 Byte Tx Cnt Ov*/ | ||
2342 | XMT_127B_OV = 1<<21, /* Bit 21: 65-127 Byte Tx Cnt Ov */ | ||
2343 | XMT_64B_OV = 1<<20, /* Bit 20: 64 Byte Tx Cnt Ov */ | ||
2344 | XMT_UTIL_OV = 1<<19, /* Bit 19: Tx Util Cnt Overflow */ | ||
2345 | XMT_UTIL_UR = 1<<18, /* Bit 18: Tx Util Cnt Underrun */ | ||
2346 | XMT_CS_ERR_OV = 1<<17, /* Bit 17: Tx Carr Sen Err Cnt Ov*/ | ||
2347 | XMT_FIFO_UR_OV = 1<<16, /* Bit 16: Tx FIFO Ur Ev Cnt Ov */ | ||
2348 | XMT_EX_DEF_OV = 1<<15, /* Bit 15: Tx Ex Deferall Cnt Ov */ | ||
2349 | XMT_DEF = 1<<14, /* Bit 14: Tx Deferred Cnt Ov */ | ||
2350 | XMT_LAT_COL_OV = 1<<13, /* Bit 13: Tx Late Col Cnt Ov */ | ||
2351 | XMT_ABO_COL_OV = 1<<12, /* Bit 12: Tx abo dueto Ex Col Ov*/ | ||
2352 | XMT_MUL_COL_OV = 1<<11, /* Bit 11: Tx Mult Col Cnt Ov */ | ||
2353 | XMT_SNG_COL = 1<<10, /* Bit 10: Tx Single Col Cnt Ov */ | ||
2354 | XMT_MCTRL_OV = 1<<9, /* Bit 9: Tx MAC Ctrl Counter Ov*/ | ||
2355 | XMT_MPAUSE = 1<<8, /* Bit 8: Tx Pause MAC Ctrl-F Ov*/ | ||
2356 | XMT_BURST = 1<<7, /* Bit 7: Tx Burst Event Cnt Ov */ | ||
2357 | XMT_LONG = 1<<6, /* Bit 6: Tx Long Frame Cnt Ov */ | ||
2358 | XMT_UC_OK_OV = 1<<5, /* Bit 5: Tx Unicast Cnt Ov */ | ||
2359 | XMT_MC_OK_OV = 1<<4, /* Bit 4: Tx Multicast Cnt Ov */ | ||
2360 | XMT_BC_OK_OV = 1<<3, /* Bit 3: Tx Broadcast Cnt Ov */ | ||
2361 | XMT_OK_LO_OV = 1<<2, /* Bit 2: Octets Tx OK Low CntOv*/ | ||
2362 | XMT_OK_HI_OV = 1<<1, /* Bit 1: Octets Tx OK Hi Cnt Ov*/ | ||
2363 | XMT_OK_OV = 1<<0, /* Bit 0: Frames Tx Ok Ov */ | ||
2364 | }; | ||
2365 | |||
2366 | #define XMT_DEF_MSK (XMT_OK_LO_OV | XMT_OK_HI_OV) | ||
2367 | |||
2368 | struct skge_rx_desc { | ||
2369 | u32 control; | ||
2370 | u32 next_offset; | ||
2371 | u32 dma_lo; | ||
2372 | u32 dma_hi; | ||
2373 | u32 status; | ||
2374 | u32 timestamp; | ||
2375 | u16 csum2; | ||
2376 | u16 csum1; | ||
2377 | u16 csum2_start; | ||
2378 | u16 csum1_start; | ||
2379 | }; | ||
2380 | |||
2381 | struct skge_tx_desc { | ||
2382 | u32 control; | ||
2383 | u32 next_offset; | ||
2384 | u32 dma_lo; | ||
2385 | u32 dma_hi; | ||
2386 | u32 status; | ||
2387 | u32 csum_offs; | ||
2388 | u16 csum_write; | ||
2389 | u16 csum_start; | ||
2390 | u32 rsvd; | ||
2391 | }; | ||
2392 | |||
2393 | struct skge_element { | ||
2394 | struct skge_element *next; | ||
2395 | void *desc; | ||
2396 | struct sk_buff *skb; | ||
2397 | DEFINE_DMA_UNMAP_ADDR(mapaddr); | ||
2398 | DEFINE_DMA_UNMAP_LEN(maplen); | ||
2399 | }; | ||
2400 | |||
2401 | struct skge_ring { | ||
2402 | struct skge_element *to_clean; | ||
2403 | struct skge_element *to_use; | ||
2404 | struct skge_element *start; | ||
2405 | unsigned long count; | ||
2406 | }; | ||
2407 | |||
2408 | |||
2409 | struct skge_hw { | ||
2410 | void __iomem *regs; | ||
2411 | struct pci_dev *pdev; | ||
2412 | spinlock_t hw_lock; | ||
2413 | u32 intr_mask; | ||
2414 | struct net_device *dev[2]; | ||
2415 | |||
2416 | u8 chip_id; | ||
2417 | u8 chip_rev; | ||
2418 | u8 copper; | ||
2419 | u8 ports; | ||
2420 | u8 phy_type; | ||
2421 | |||
2422 | u32 ram_size; | ||
2423 | u32 ram_offset; | ||
2424 | u16 phy_addr; | ||
2425 | spinlock_t phy_lock; | ||
2426 | struct tasklet_struct phy_task; | ||
2427 | |||
2428 | char irq_name[0]; /* skge@pci:000:04:00.0 */ | ||
2429 | }; | ||
2430 | |||
2431 | enum pause_control { | ||
2432 | FLOW_MODE_NONE = 1, /* No Flow-Control */ | ||
2433 | FLOW_MODE_LOC_SEND = 2, /* Local station sends PAUSE */ | ||
2434 | FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */ | ||
2435 | FLOW_MODE_SYM_OR_REM = 4, /* Both stations may send PAUSE or | ||
2436 | * just the remote station may send PAUSE | ||
2437 | */ | ||
2438 | }; | ||
2439 | |||
2440 | enum pause_status { | ||
2441 | FLOW_STAT_INDETERMINATED=0, /* indeterminated */ | ||
2442 | FLOW_STAT_NONE, /* No Flow Control */ | ||
2443 | FLOW_STAT_REM_SEND, /* Remote Station sends PAUSE */ | ||
2444 | FLOW_STAT_LOC_SEND, /* Local station sends PAUSE */ | ||
2445 | FLOW_STAT_SYMMETRIC, /* Both station may send PAUSE */ | ||
2446 | }; | ||
2447 | |||
2448 | |||
2449 | struct skge_port { | ||
2450 | struct skge_hw *hw; | ||
2451 | struct net_device *netdev; | ||
2452 | struct napi_struct napi; | ||
2453 | int port; | ||
2454 | u32 msg_enable; | ||
2455 | |||
2456 | struct skge_ring tx_ring; | ||
2457 | |||
2458 | struct skge_ring rx_ring ____cacheline_aligned_in_smp; | ||
2459 | unsigned int rx_buf_size; | ||
2460 | |||
2461 | struct timer_list link_timer; | ||
2462 | enum pause_control flow_control; | ||
2463 | enum pause_status flow_status; | ||
2464 | u8 blink_on; | ||
2465 | u8 wol; | ||
2466 | u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */ | ||
2467 | u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ | ||
2468 | u16 speed; /* SPEED_1000, SPEED_100, ... */ | ||
2469 | u32 advertising; | ||
2470 | |||
2471 | void *mem; /* PCI memory for rings */ | ||
2472 | dma_addr_t dma; | ||
2473 | unsigned long mem_size; | ||
2474 | #ifdef CONFIG_SKGE_DEBUG | ||
2475 | struct dentry *debugfs; | ||
2476 | #endif | ||
2477 | }; | ||
2478 | |||
2479 | |||
2480 | /* Register accessor for memory mapped device */ | ||
2481 | static inline u32 skge_read32(const struct skge_hw *hw, int reg) | ||
2482 | { | ||
2483 | return readl(hw->regs + reg); | ||
2484 | } | ||
2485 | |||
2486 | static inline u16 skge_read16(const struct skge_hw *hw, int reg) | ||
2487 | { | ||
2488 | return readw(hw->regs + reg); | ||
2489 | } | ||
2490 | |||
2491 | static inline u8 skge_read8(const struct skge_hw *hw, int reg) | ||
2492 | { | ||
2493 | return readb(hw->regs + reg); | ||
2494 | } | ||
2495 | |||
2496 | static inline void skge_write32(const struct skge_hw *hw, int reg, u32 val) | ||
2497 | { | ||
2498 | writel(val, hw->regs + reg); | ||
2499 | } | ||
2500 | |||
2501 | static inline void skge_write16(const struct skge_hw *hw, int reg, u16 val) | ||
2502 | { | ||
2503 | writew(val, hw->regs + reg); | ||
2504 | } | ||
2505 | |||
2506 | static inline void skge_write8(const struct skge_hw *hw, int reg, u8 val) | ||
2507 | { | ||
2508 | writeb(val, hw->regs + reg); | ||
2509 | } | ||
2510 | |||
2511 | /* MAC Related Registers inside the device. */ | ||
2512 | #define SK_REG(port,reg) (((port)<<7)+(u16)(reg)) | ||
2513 | #define SK_XMAC_REG(port, reg) \ | ||
2514 | ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1) | ||
2515 | |||
2516 | static inline u32 xm_read32(const struct skge_hw *hw, int port, int reg) | ||
2517 | { | ||
2518 | u32 v; | ||
2519 | v = skge_read16(hw, SK_XMAC_REG(port, reg)); | ||
2520 | v |= (u32)skge_read16(hw, SK_XMAC_REG(port, reg+2)) << 16; | ||
2521 | return v; | ||
2522 | } | ||
2523 | |||
2524 | static inline u16 xm_read16(const struct skge_hw *hw, int port, int reg) | ||
2525 | { | ||
2526 | return skge_read16(hw, SK_XMAC_REG(port,reg)); | ||
2527 | } | ||
2528 | |||
2529 | static inline void xm_write32(const struct skge_hw *hw, int port, int r, u32 v) | ||
2530 | { | ||
2531 | skge_write16(hw, SK_XMAC_REG(port,r), v & 0xffff); | ||
2532 | skge_write16(hw, SK_XMAC_REG(port,r+2), v >> 16); | ||
2533 | } | ||
2534 | |||
2535 | static inline void xm_write16(const struct skge_hw *hw, int port, int r, u16 v) | ||
2536 | { | ||
2537 | skge_write16(hw, SK_XMAC_REG(port,r), v); | ||
2538 | } | ||
2539 | |||
2540 | static inline void xm_outhash(const struct skge_hw *hw, int port, int reg, | ||
2541 | const u8 *hash) | ||
2542 | { | ||
2543 | xm_write16(hw, port, reg, (u16)hash[0] | ((u16)hash[1] << 8)); | ||
2544 | xm_write16(hw, port, reg+2, (u16)hash[2] | ((u16)hash[3] << 8)); | ||
2545 | xm_write16(hw, port, reg+4, (u16)hash[4] | ((u16)hash[5] << 8)); | ||
2546 | xm_write16(hw, port, reg+6, (u16)hash[6] | ((u16)hash[7] << 8)); | ||
2547 | } | ||
2548 | |||
2549 | static inline void xm_outaddr(const struct skge_hw *hw, int port, int reg, | ||
2550 | const u8 *addr) | ||
2551 | { | ||
2552 | xm_write16(hw, port, reg, (u16)addr[0] | ((u16)addr[1] << 8)); | ||
2553 | xm_write16(hw, port, reg+2, (u16)addr[2] | ((u16)addr[3] << 8)); | ||
2554 | xm_write16(hw, port, reg+4, (u16)addr[4] | ((u16)addr[5] << 8)); | ||
2555 | } | ||
2556 | |||
2557 | #define SK_GMAC_REG(port,reg) \ | ||
2558 | (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg)) | ||
2559 | |||
2560 | static inline u16 gma_read16(const struct skge_hw *hw, int port, int reg) | ||
2561 | { | ||
2562 | return skge_read16(hw, SK_GMAC_REG(port,reg)); | ||
2563 | } | ||
2564 | |||
2565 | static inline u32 gma_read32(const struct skge_hw *hw, int port, int reg) | ||
2566 | { | ||
2567 | return (u32) skge_read16(hw, SK_GMAC_REG(port,reg)) | ||
2568 | | ((u32)skge_read16(hw, SK_GMAC_REG(port,reg+4)) << 16); | ||
2569 | } | ||
2570 | |||
2571 | static inline void gma_write16(const struct skge_hw *hw, int port, int r, u16 v) | ||
2572 | { | ||
2573 | skge_write16(hw, SK_GMAC_REG(port,r), v); | ||
2574 | } | ||
2575 | |||
2576 | static inline void gma_set_addr(struct skge_hw *hw, int port, int reg, | ||
2577 | const u8 *addr) | ||
2578 | { | ||
2579 | gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8)); | ||
2580 | gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); | ||
2581 | gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); | ||
2582 | } | ||
2583 | |||
2584 | #endif | ||
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c new file mode 100644 index 000000000000..57339da76326 --- /dev/null +++ b/drivers/net/ethernet/marvell/sky2.c | |||
@@ -0,0 +1,5130 @@ | |||
1 | /* | ||
2 | * New driver for Marvell Yukon 2 chipset. | ||
3 | * Based on earlier sk98lin, and skge driver. | ||
4 | * | ||
5 | * This driver intentionally does not support all the features | ||
6 | * of the original driver such as link fail-over and link management because | ||
7 | * those should be done at higher levels. | ||
8 | * | ||
9 | * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
23 | */ | ||
24 | |||
25 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
26 | |||
27 | #include <linux/crc32.h> | ||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/netdevice.h> | ||
31 | #include <linux/dma-mapping.h> | ||
32 | #include <linux/etherdevice.h> | ||
33 | #include <linux/ethtool.h> | ||
34 | #include <linux/pci.h> | ||
35 | #include <linux/interrupt.h> | ||
36 | #include <linux/ip.h> | ||
37 | #include <linux/slab.h> | ||
38 | #include <net/ip.h> | ||
39 | #include <linux/tcp.h> | ||
40 | #include <linux/in.h> | ||
41 | #include <linux/delay.h> | ||
42 | #include <linux/workqueue.h> | ||
43 | #include <linux/if_vlan.h> | ||
44 | #include <linux/prefetch.h> | ||
45 | #include <linux/debugfs.h> | ||
46 | #include <linux/mii.h> | ||
47 | |||
48 | #include <asm/irq.h> | ||
49 | |||
50 | #include "sky2.h" | ||
51 | |||
52 | #define DRV_NAME "sky2" | ||
53 | #define DRV_VERSION "1.29" | ||
54 | |||
55 | /* | ||
56 | * The Yukon II chipset takes 64 bit command blocks (called list elements) | ||
57 | * that are organized into three (receive, transmit, status) different rings | ||
58 | * similar to Tigon3. | ||
59 | */ | ||
60 | |||
61 | #define RX_LE_SIZE 1024 | ||
62 | #define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) | ||
63 | #define RX_MAX_PENDING (RX_LE_SIZE/6 - 2) | ||
64 | #define RX_DEF_PENDING RX_MAX_PENDING | ||
65 | |||
66 | /* This is the worst case number of transmit list elements for a single skb: | ||
67 | VLAN:GSO + CKSUM + Data + skb_frags * DMA */ | ||
68 | #define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1)) | ||
69 | #define TX_MIN_PENDING (MAX_SKB_TX_LE+1) | ||
70 | #define TX_MAX_PENDING 1024 | ||
71 | #define TX_DEF_PENDING 127 | ||
72 | |||
73 | #define TX_WATCHDOG (5 * HZ) | ||
74 | #define NAPI_WEIGHT 64 | ||
75 | #define PHY_RETRIES 1000 | ||
76 | |||
77 | #define SKY2_EEPROM_MAGIC 0x9955aabb | ||
78 | |||
79 | #define RING_NEXT(x, s) (((x)+1) & ((s)-1)) | ||
80 | |||
81 | static const u32 default_msg = | ||
82 | NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | ||
83 | | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | ||
84 | | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; | ||
85 | |||
86 | static int debug = -1; /* defaults above */ | ||
87 | module_param(debug, int, 0); | ||
88 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | ||
89 | |||
90 | static int copybreak __read_mostly = 128; | ||
91 | module_param(copybreak, int, 0); | ||
92 | MODULE_PARM_DESC(copybreak, "Receive copy threshold"); | ||
93 | |||
94 | static int disable_msi = 0; | ||
95 | module_param(disable_msi, int, 0); | ||
96 | MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); | ||
97 | |||
98 | static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = { | ||
99 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */ | ||
100 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */ | ||
101 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */ | ||
102 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ | ||
103 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */ | ||
104 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */ | ||
105 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) }, /* DGE-550T */ | ||
106 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */ | ||
107 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */ | ||
108 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */ | ||
109 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */ | ||
110 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */ | ||
111 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */ | ||
112 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */ | ||
113 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */ | ||
114 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */ | ||
115 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */ | ||
116 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */ | ||
117 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */ | ||
118 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */ | ||
119 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4355) }, /* 88E8040T */ | ||
120 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */ | ||
121 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */ | ||
122 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */ | ||
123 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */ | ||
124 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */ | ||
125 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */ | ||
126 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */ | ||
127 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */ | ||
128 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */ | ||
129 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */ | ||
130 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */ | ||
131 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ | ||
132 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */ | ||
133 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */ | ||
134 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */ | ||
135 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */ | ||
136 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */ | ||
137 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */ | ||
138 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */ | ||
139 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */ | ||
140 | { 0 } | ||
141 | }; | ||
142 | |||
143 | MODULE_DEVICE_TABLE(pci, sky2_id_table); | ||
144 | |||
145 | /* Avoid conditionals by using array */ | ||
146 | static const unsigned txqaddr[] = { Q_XA1, Q_XA2 }; | ||
147 | static const unsigned rxqaddr[] = { Q_R1, Q_R2 }; | ||
148 | static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 }; | ||
149 | |||
150 | static void sky2_set_multicast(struct net_device *dev); | ||
151 | |||
152 | /* Access to PHY via serial interconnect */ | ||
153 | static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val) | ||
154 | { | ||
155 | int i; | ||
156 | |||
157 | gma_write16(hw, port, GM_SMI_DATA, val); | ||
158 | gma_write16(hw, port, GM_SMI_CTRL, | ||
159 | GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg)); | ||
160 | |||
161 | for (i = 0; i < PHY_RETRIES; i++) { | ||
162 | u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL); | ||
163 | if (ctrl == 0xffff) | ||
164 | goto io_error; | ||
165 | |||
166 | if (!(ctrl & GM_SMI_CT_BUSY)) | ||
167 | return 0; | ||
168 | |||
169 | udelay(10); | ||
170 | } | ||
171 | |||
172 | dev_warn(&hw->pdev->dev, "%s: phy write timeout\n", hw->dev[port]->name); | ||
173 | return -ETIMEDOUT; | ||
174 | |||
175 | io_error: | ||
176 | dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name); | ||
177 | return -EIO; | ||
178 | } | ||
179 | |||
180 | static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val) | ||
181 | { | ||
182 | int i; | ||
183 | |||
184 | gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | ||
185 | | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); | ||
186 | |||
187 | for (i = 0; i < PHY_RETRIES; i++) { | ||
188 | u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL); | ||
189 | if (ctrl == 0xffff) | ||
190 | goto io_error; | ||
191 | |||
192 | if (ctrl & GM_SMI_CT_RD_VAL) { | ||
193 | *val = gma_read16(hw, port, GM_SMI_DATA); | ||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | udelay(10); | ||
198 | } | ||
199 | |||
200 | dev_warn(&hw->pdev->dev, "%s: phy read timeout\n", hw->dev[port]->name); | ||
201 | return -ETIMEDOUT; | ||
202 | io_error: | ||
203 | dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name); | ||
204 | return -EIO; | ||
205 | } | ||
206 | |||
207 | static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) | ||
208 | { | ||
209 | u16 v; | ||
210 | __gm_phy_read(hw, port, reg, &v); | ||
211 | return v; | ||
212 | } | ||
213 | |||
214 | |||
215 | static void sky2_power_on(struct sky2_hw *hw) | ||
216 | { | ||
217 | /* switch power to VCC (WA for VAUX problem) */ | ||
218 | sky2_write8(hw, B0_POWER_CTRL, | ||
219 | PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); | ||
220 | |||
221 | /* disable Core Clock Division, */ | ||
222 | sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); | ||
223 | |||
224 | if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) | ||
225 | /* enable bits are inverted */ | ||
226 | sky2_write8(hw, B2_Y2_CLK_GATE, | ||
227 | Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | | ||
228 | Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | | ||
229 | Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS); | ||
230 | else | ||
231 | sky2_write8(hw, B2_Y2_CLK_GATE, 0); | ||
232 | |||
233 | if (hw->flags & SKY2_HW_ADV_POWER_CTL) { | ||
234 | u32 reg; | ||
235 | |||
236 | sky2_pci_write32(hw, PCI_DEV_REG3, 0); | ||
237 | |||
238 | reg = sky2_pci_read32(hw, PCI_DEV_REG4); | ||
239 | /* set all bits to 0 except bits 15..12 and 8 */ | ||
240 | reg &= P_ASPM_CONTROL_MSK; | ||
241 | sky2_pci_write32(hw, PCI_DEV_REG4, reg); | ||
242 | |||
243 | reg = sky2_pci_read32(hw, PCI_DEV_REG5); | ||
244 | /* set all bits to 0 except bits 28 & 27 */ | ||
245 | reg &= P_CTL_TIM_VMAIN_AV_MSK; | ||
246 | sky2_pci_write32(hw, PCI_DEV_REG5, reg); | ||
247 | |||
248 | sky2_pci_write32(hw, PCI_CFG_REG_1, 0); | ||
249 | |||
250 | sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON); | ||
251 | |||
252 | /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */ | ||
253 | reg = sky2_read32(hw, B2_GP_IO); | ||
254 | reg |= GLB_GPIO_STAT_RACE_DIS; | ||
255 | sky2_write32(hw, B2_GP_IO, reg); | ||
256 | |||
257 | sky2_read32(hw, B2_GP_IO); | ||
258 | } | ||
259 | |||
260 | /* Turn on "driver loaded" LED */ | ||
261 | sky2_write16(hw, B0_CTST, Y2_LED_STAT_ON); | ||
262 | } | ||
263 | |||
264 | static void sky2_power_aux(struct sky2_hw *hw) | ||
265 | { | ||
266 | if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) | ||
267 | sky2_write8(hw, B2_Y2_CLK_GATE, 0); | ||
268 | else | ||
269 | /* enable bits are inverted */ | ||
270 | sky2_write8(hw, B2_Y2_CLK_GATE, | ||
271 | Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | | ||
272 | Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | | ||
273 | Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS); | ||
274 | |||
275 | /* switch power to VAUX if supported and PME from D3cold */ | ||
276 | if ( (sky2_read32(hw, B0_CTST) & Y2_VAUX_AVAIL) && | ||
277 | pci_pme_capable(hw->pdev, PCI_D3cold)) | ||
278 | sky2_write8(hw, B0_POWER_CTRL, | ||
279 | (PC_VAUX_ENA | PC_VCC_ENA | | ||
280 | PC_VAUX_ON | PC_VCC_OFF)); | ||
281 | |||
282 | /* turn off "driver loaded LED" */ | ||
283 | sky2_write16(hw, B0_CTST, Y2_LED_STAT_OFF); | ||
284 | } | ||
285 | |||
286 | static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) | ||
287 | { | ||
288 | u16 reg; | ||
289 | |||
290 | /* disable all GMAC IRQ's */ | ||
291 | sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); | ||
292 | |||
293 | gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ | ||
294 | gma_write16(hw, port, GM_MC_ADDR_H2, 0); | ||
295 | gma_write16(hw, port, GM_MC_ADDR_H3, 0); | ||
296 | gma_write16(hw, port, GM_MC_ADDR_H4, 0); | ||
297 | |||
298 | reg = gma_read16(hw, port, GM_RX_CTRL); | ||
299 | reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA; | ||
300 | gma_write16(hw, port, GM_RX_CTRL, reg); | ||
301 | } | ||
302 | |||
303 | /* flow control to advertise bits */ | ||
304 | static const u16 copper_fc_adv[] = { | ||
305 | [FC_NONE] = 0, | ||
306 | [FC_TX] = PHY_M_AN_ASP, | ||
307 | [FC_RX] = PHY_M_AN_PC, | ||
308 | [FC_BOTH] = PHY_M_AN_PC | PHY_M_AN_ASP, | ||
309 | }; | ||
310 | |||
311 | /* flow control to advertise bits when using 1000BaseX */ | ||
312 | static const u16 fiber_fc_adv[] = { | ||
313 | [FC_NONE] = PHY_M_P_NO_PAUSE_X, | ||
314 | [FC_TX] = PHY_M_P_ASYM_MD_X, | ||
315 | [FC_RX] = PHY_M_P_SYM_MD_X, | ||
316 | [FC_BOTH] = PHY_M_P_BOTH_MD_X, | ||
317 | }; | ||
318 | |||
319 | /* flow control to GMA disable bits */ | ||
320 | static const u16 gm_fc_disable[] = { | ||
321 | [FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS, | ||
322 | [FC_TX] = GM_GPCR_FC_RX_DIS, | ||
323 | [FC_RX] = GM_GPCR_FC_TX_DIS, | ||
324 | [FC_BOTH] = 0, | ||
325 | }; | ||
326 | |||
327 | |||
328 | static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | ||
329 | { | ||
330 | struct sky2_port *sky2 = netdev_priv(hw->dev[port]); | ||
331 | u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg; | ||
332 | |||
333 | if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) && | ||
334 | !(hw->flags & SKY2_HW_NEWER_PHY)) { | ||
335 | u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); | ||
336 | |||
337 | ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | | ||
338 | PHY_M_EC_MAC_S_MSK); | ||
339 | ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); | ||
340 | |||
341 | /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */ | ||
342 | if (hw->chip_id == CHIP_ID_YUKON_EC) | ||
343 | /* set downshift counter to 3x and enable downshift */ | ||
344 | ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA; | ||
345 | else | ||
346 | /* set master & slave downshift counter to 1x */ | ||
347 | ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); | ||
348 | |||
349 | gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); | ||
350 | } | ||
351 | |||
352 | ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); | ||
353 | if (sky2_is_copper(hw)) { | ||
354 | if (!(hw->flags & SKY2_HW_GIGABIT)) { | ||
355 | /* enable automatic crossover */ | ||
356 | ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1; | ||
357 | |||
358 | if (hw->chip_id == CHIP_ID_YUKON_FE_P && | ||
359 | hw->chip_rev == CHIP_REV_YU_FE2_A0) { | ||
360 | u16 spec; | ||
361 | |||
362 | /* Enable Class A driver for FE+ A0 */ | ||
363 | spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2); | ||
364 | spec |= PHY_M_FESC_SEL_CL_A; | ||
365 | gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec); | ||
366 | } | ||
367 | } else { | ||
368 | if (hw->chip_id >= CHIP_ID_YUKON_OPT) { | ||
369 | u16 ctrl2 = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL_2); | ||
370 | |||
371 | /* enable PHY Reverse Auto-Negotiation */ | ||
372 | ctrl2 |= 1u << 13; | ||
373 | |||
374 | /* Write PHY changes (SW-reset must follow) */ | ||
375 | gm_phy_write(hw, port, PHY_MARV_EXT_CTRL_2, ctrl2); | ||
376 | } | ||
377 | |||
378 | |||
379 | /* disable energy detect */ | ||
380 | ctrl &= ~PHY_M_PC_EN_DET_MSK; | ||
381 | |||
382 | /* enable automatic crossover */ | ||
383 | ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); | ||
384 | |||
385 | /* downshift on PHY 88E1112 and 88E1149 is changed */ | ||
386 | if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) && | ||
387 | (hw->flags & SKY2_HW_NEWER_PHY)) { | ||
388 | /* set downshift counter to 3x and enable downshift */ | ||
389 | ctrl &= ~PHY_M_PC_DSC_MSK; | ||
390 | ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; | ||
391 | } | ||
392 | } | ||
393 | } else { | ||
394 | /* workaround for deviation #4.88 (CRC errors) */ | ||
395 | /* disable Automatic Crossover */ | ||
396 | |||
397 | ctrl &= ~PHY_M_PC_MDIX_MSK; | ||
398 | } | ||
399 | |||
400 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); | ||
401 | |||
402 | /* special setup for PHY 88E1112 Fiber */ | ||
403 | if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) { | ||
404 | pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); | ||
405 | |||
406 | /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */ | ||
407 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); | ||
408 | ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); | ||
409 | ctrl &= ~PHY_M_MAC_MD_MSK; | ||
410 | ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX); | ||
411 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); | ||
412 | |||
413 | if (hw->pmd_type == 'P') { | ||
414 | /* select page 1 to access Fiber registers */ | ||
415 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1); | ||
416 | |||
417 | /* for SFP-module set SIGDET polarity to low */ | ||
418 | ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); | ||
419 | ctrl |= PHY_M_FIB_SIGD_POL; | ||
420 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); | ||
421 | } | ||
422 | |||
423 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); | ||
424 | } | ||
425 | |||
426 | ctrl = PHY_CT_RESET; | ||
427 | ct1000 = 0; | ||
428 | adv = PHY_AN_CSMA; | ||
429 | reg = 0; | ||
430 | |||
431 | if (sky2->flags & SKY2_FLAG_AUTO_SPEED) { | ||
432 | if (sky2_is_copper(hw)) { | ||
433 | if (sky2->advertising & ADVERTISED_1000baseT_Full) | ||
434 | ct1000 |= PHY_M_1000C_AFD; | ||
435 | if (sky2->advertising & ADVERTISED_1000baseT_Half) | ||
436 | ct1000 |= PHY_M_1000C_AHD; | ||
437 | if (sky2->advertising & ADVERTISED_100baseT_Full) | ||
438 | adv |= PHY_M_AN_100_FD; | ||
439 | if (sky2->advertising & ADVERTISED_100baseT_Half) | ||
440 | adv |= PHY_M_AN_100_HD; | ||
441 | if (sky2->advertising & ADVERTISED_10baseT_Full) | ||
442 | adv |= PHY_M_AN_10_FD; | ||
443 | if (sky2->advertising & ADVERTISED_10baseT_Half) | ||
444 | adv |= PHY_M_AN_10_HD; | ||
445 | |||
446 | } else { /* special defines for FIBER (88E1040S only) */ | ||
447 | if (sky2->advertising & ADVERTISED_1000baseT_Full) | ||
448 | adv |= PHY_M_AN_1000X_AFD; | ||
449 | if (sky2->advertising & ADVERTISED_1000baseT_Half) | ||
450 | adv |= PHY_M_AN_1000X_AHD; | ||
451 | } | ||
452 | |||
453 | /* Restart Auto-negotiation */ | ||
454 | ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; | ||
455 | } else { | ||
456 | /* forced speed/duplex settings */ | ||
457 | ct1000 = PHY_M_1000C_MSE; | ||
458 | |||
459 | /* Disable auto update for duplex flow control and duplex */ | ||
460 | reg |= GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_SPD_DIS; | ||
461 | |||
462 | switch (sky2->speed) { | ||
463 | case SPEED_1000: | ||
464 | ctrl |= PHY_CT_SP1000; | ||
465 | reg |= GM_GPCR_SPEED_1000; | ||
466 | break; | ||
467 | case SPEED_100: | ||
468 | ctrl |= PHY_CT_SP100; | ||
469 | reg |= GM_GPCR_SPEED_100; | ||
470 | break; | ||
471 | } | ||
472 | |||
473 | if (sky2->duplex == DUPLEX_FULL) { | ||
474 | reg |= GM_GPCR_DUP_FULL; | ||
475 | ctrl |= PHY_CT_DUP_MD; | ||
476 | } else if (sky2->speed < SPEED_1000) | ||
477 | sky2->flow_mode = FC_NONE; | ||
478 | } | ||
479 | |||
480 | if (sky2->flags & SKY2_FLAG_AUTO_PAUSE) { | ||
481 | if (sky2_is_copper(hw)) | ||
482 | adv |= copper_fc_adv[sky2->flow_mode]; | ||
483 | else | ||
484 | adv |= fiber_fc_adv[sky2->flow_mode]; | ||
485 | } else { | ||
486 | reg |= GM_GPCR_AU_FCT_DIS; | ||
487 | reg |= gm_fc_disable[sky2->flow_mode]; | ||
488 | |||
489 | /* Forward pause packets to GMAC? */ | ||
490 | if (sky2->flow_mode & FC_RX) | ||
491 | sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); | ||
492 | else | ||
493 | sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); | ||
494 | } | ||
495 | |||
496 | gma_write16(hw, port, GM_GP_CTRL, reg); | ||
497 | |||
498 | if (hw->flags & SKY2_HW_GIGABIT) | ||
499 | gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); | ||
500 | |||
501 | gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); | ||
502 | gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); | ||
503 | |||
504 | /* Setup Phy LED's */ | ||
505 | ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS); | ||
506 | ledover = 0; | ||
507 | |||
508 | switch (hw->chip_id) { | ||
509 | case CHIP_ID_YUKON_FE: | ||
510 | /* on 88E3082 these bits are at 11..9 (shifted left) */ | ||
511 | ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1; | ||
512 | |||
513 | ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR); | ||
514 | |||
515 | /* delete ACT LED control bits */ | ||
516 | ctrl &= ~PHY_M_FELP_LED1_MSK; | ||
517 | /* change ACT LED control to blink mode */ | ||
518 | ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL); | ||
519 | gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); | ||
520 | break; | ||
521 | |||
522 | case CHIP_ID_YUKON_FE_P: | ||
523 | /* Enable Link Partner Next Page */ | ||
524 | ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); | ||
525 | ctrl |= PHY_M_PC_ENA_LIP_NP; | ||
526 | |||
527 | /* disable Energy Detect and enable scrambler */ | ||
528 | ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB); | ||
529 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); | ||
530 | |||
531 | /* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */ | ||
532 | ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) | | ||
533 | PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) | | ||
534 | PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED); | ||
535 | |||
536 | gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); | ||
537 | break; | ||
538 | |||
539 | case CHIP_ID_YUKON_XL: | ||
540 | pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); | ||
541 | |||
542 | /* select page 3 to access LED control register */ | ||
543 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); | ||
544 | |||
545 | /* set LED Function Control register */ | ||
546 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, | ||
547 | (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ | ||
548 | PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */ | ||
549 | PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ | ||
550 | PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */ | ||
551 | |||
552 | /* set Polarity Control register */ | ||
553 | gm_phy_write(hw, port, PHY_MARV_PHY_STAT, | ||
554 | (PHY_M_POLC_LS1_P_MIX(4) | | ||
555 | PHY_M_POLC_IS0_P_MIX(4) | | ||
556 | PHY_M_POLC_LOS_CTRL(2) | | ||
557 | PHY_M_POLC_INIT_CTRL(2) | | ||
558 | PHY_M_POLC_STA1_CTRL(2) | | ||
559 | PHY_M_POLC_STA0_CTRL(2))); | ||
560 | |||
561 | /* restore page register */ | ||
562 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); | ||
563 | break; | ||
564 | |||
565 | case CHIP_ID_YUKON_EC_U: | ||
566 | case CHIP_ID_YUKON_EX: | ||
567 | case CHIP_ID_YUKON_SUPR: | ||
568 | pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); | ||
569 | |||
570 | /* select page 3 to access LED control register */ | ||
571 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); | ||
572 | |||
573 | /* set LED Function Control register */ | ||
574 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, | ||
575 | (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ | ||
576 | PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */ | ||
577 | PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ | ||
578 | PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */ | ||
579 | |||
580 | /* set Blink Rate in LED Timer Control Register */ | ||
581 | gm_phy_write(hw, port, PHY_MARV_INT_MASK, | ||
582 | ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS)); | ||
583 | /* restore page register */ | ||
584 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); | ||
585 | break; | ||
586 | |||
587 | default: | ||
588 | /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ | ||
589 | ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL; | ||
590 | |||
591 | /* turn off the Rx LED (LED_RX) */ | ||
592 | ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); | ||
593 | } | ||
594 | |||
595 | if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_UL_2) { | ||
596 | /* apply fixes in PHY AFE */ | ||
597 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255); | ||
598 | |||
599 | /* increase differential signal amplitude in 10BASE-T */ | ||
600 | gm_phy_write(hw, port, 0x18, 0xaa99); | ||
601 | gm_phy_write(hw, port, 0x17, 0x2011); | ||
602 | |||
603 | if (hw->chip_id == CHIP_ID_YUKON_EC_U) { | ||
604 | /* fix for IEEE A/B Symmetry failure in 1000BASE-T */ | ||
605 | gm_phy_write(hw, port, 0x18, 0xa204); | ||
606 | gm_phy_write(hw, port, 0x17, 0x2002); | ||
607 | } | ||
608 | |||
609 | /* set page register to 0 */ | ||
610 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); | ||
611 | } else if (hw->chip_id == CHIP_ID_YUKON_FE_P && | ||
612 | hw->chip_rev == CHIP_REV_YU_FE2_A0) { | ||
613 | /* apply workaround for integrated resistors calibration */ | ||
614 | gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17); | ||
615 | gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60); | ||
616 | } else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) { | ||
617 | /* apply fixes in PHY AFE */ | ||
618 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff); | ||
619 | |||
620 | /* apply RDAC termination workaround */ | ||
621 | gm_phy_write(hw, port, 24, 0x2800); | ||
622 | gm_phy_write(hw, port, 23, 0x2001); | ||
623 | |||
624 | /* set page register back to 0 */ | ||
625 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); | ||
626 | } else if (hw->chip_id != CHIP_ID_YUKON_EX && | ||
627 | hw->chip_id < CHIP_ID_YUKON_SUPR) { | ||
628 | /* no effect on Yukon-XL */ | ||
629 | gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); | ||
630 | |||
631 | if (!(sky2->flags & SKY2_FLAG_AUTO_SPEED) || | ||
632 | sky2->speed == SPEED_100) { | ||
633 | /* turn on 100 Mbps LED (LED_LINK100) */ | ||
634 | ledover |= PHY_M_LED_MO_100(MO_LED_ON); | ||
635 | } | ||
636 | |||
637 | if (ledover) | ||
638 | gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); | ||
639 | |||
640 | } else if (hw->chip_id == CHIP_ID_YUKON_PRM && | ||
641 | (sky2_read8(hw, B2_MAC_CFG) & 0xf) == 0x7) { | ||
642 | int i; | ||
643 | /* This a phy register setup workaround copied from vendor driver. */ | ||
644 | static const struct { | ||
645 | u16 reg, val; | ||
646 | } eee_afe[] = { | ||
647 | { 0x156, 0x58ce }, | ||
648 | { 0x153, 0x99eb }, | ||
649 | { 0x141, 0x8064 }, | ||
650 | /* { 0x155, 0x130b },*/ | ||
651 | { 0x000, 0x0000 }, | ||
652 | { 0x151, 0x8433 }, | ||
653 | { 0x14b, 0x8c44 }, | ||
654 | { 0x14c, 0x0f90 }, | ||
655 | { 0x14f, 0x39aa }, | ||
656 | /* { 0x154, 0x2f39 },*/ | ||
657 | { 0x14d, 0xba33 }, | ||
658 | { 0x144, 0x0048 }, | ||
659 | { 0x152, 0x2010 }, | ||
660 | /* { 0x158, 0x1223 },*/ | ||
661 | { 0x140, 0x4444 }, | ||
662 | { 0x154, 0x2f3b }, | ||
663 | { 0x158, 0xb203 }, | ||
664 | { 0x157, 0x2029 }, | ||
665 | }; | ||
666 | |||
667 | /* Start Workaround for OptimaEEE Rev.Z0 */ | ||
668 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fb); | ||
669 | |||
670 | gm_phy_write(hw, port, 1, 0x4099); | ||
671 | gm_phy_write(hw, port, 3, 0x1120); | ||
672 | gm_phy_write(hw, port, 11, 0x113c); | ||
673 | gm_phy_write(hw, port, 14, 0x8100); | ||
674 | gm_phy_write(hw, port, 15, 0x112a); | ||
675 | gm_phy_write(hw, port, 17, 0x1008); | ||
676 | |||
677 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fc); | ||
678 | gm_phy_write(hw, port, 1, 0x20b0); | ||
679 | |||
680 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff); | ||
681 | |||
682 | for (i = 0; i < ARRAY_SIZE(eee_afe); i++) { | ||
683 | /* apply AFE settings */ | ||
684 | gm_phy_write(hw, port, 17, eee_afe[i].val); | ||
685 | gm_phy_write(hw, port, 16, eee_afe[i].reg | 1u<<13); | ||
686 | } | ||
687 | |||
688 | /* End Workaround for OptimaEEE */ | ||
689 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); | ||
690 | |||
691 | /* Enable 10Base-Te (EEE) */ | ||
692 | if (hw->chip_id >= CHIP_ID_YUKON_PRM) { | ||
693 | reg = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); | ||
694 | gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, | ||
695 | reg | PHY_M_10B_TE_ENABLE); | ||
696 | } | ||
697 | } | ||
698 | |||
699 | /* Enable phy interrupt on auto-negotiation complete (or link up) */ | ||
700 | if (sky2->flags & SKY2_FLAG_AUTO_SPEED) | ||
701 | gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL); | ||
702 | else | ||
703 | gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); | ||
704 | } | ||
705 | |||
706 | static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD }; | ||
707 | static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA }; | ||
708 | |||
709 | static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port) | ||
710 | { | ||
711 | u32 reg1; | ||
712 | |||
713 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
714 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); | ||
715 | reg1 &= ~phy_power[port]; | ||
716 | |||
717 | if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) | ||
718 | reg1 |= coma_mode[port]; | ||
719 | |||
720 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); | ||
721 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
722 | sky2_pci_read32(hw, PCI_DEV_REG1); | ||
723 | |||
724 | if (hw->chip_id == CHIP_ID_YUKON_FE) | ||
725 | gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE); | ||
726 | else if (hw->flags & SKY2_HW_ADV_POWER_CTL) | ||
727 | sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); | ||
728 | } | ||
729 | |||
730 | static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port) | ||
731 | { | ||
732 | u32 reg1; | ||
733 | u16 ctrl; | ||
734 | |||
735 | /* release GPHY Control reset */ | ||
736 | sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); | ||
737 | |||
738 | /* release GMAC reset */ | ||
739 | sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); | ||
740 | |||
741 | if (hw->flags & SKY2_HW_NEWER_PHY) { | ||
742 | /* select page 2 to access MAC control register */ | ||
743 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); | ||
744 | |||
745 | ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); | ||
746 | /* allow GMII Power Down */ | ||
747 | ctrl &= ~PHY_M_MAC_GMIF_PUP; | ||
748 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); | ||
749 | |||
750 | /* set page register back to 0 */ | ||
751 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); | ||
752 | } | ||
753 | |||
754 | /* setup General Purpose Control Register */ | ||
755 | gma_write16(hw, port, GM_GP_CTRL, | ||
756 | GM_GPCR_FL_PASS | GM_GPCR_SPEED_100 | | ||
757 | GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS | | ||
758 | GM_GPCR_AU_SPD_DIS); | ||
759 | |||
760 | if (hw->chip_id != CHIP_ID_YUKON_EC) { | ||
761 | if (hw->chip_id == CHIP_ID_YUKON_EC_U) { | ||
762 | /* select page 2 to access MAC control register */ | ||
763 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); | ||
764 | |||
765 | ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); | ||
766 | /* enable Power Down */ | ||
767 | ctrl |= PHY_M_PC_POW_D_ENA; | ||
768 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); | ||
769 | |||
770 | /* set page register back to 0 */ | ||
771 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); | ||
772 | } | ||
773 | |||
774 | /* set IEEE compatible Power Down Mode (dev. #4.99) */ | ||
775 | gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN); | ||
776 | } | ||
777 | |||
778 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
779 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); | ||
780 | reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */ | ||
781 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); | ||
782 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
783 | } | ||
784 | |||
785 | /* configure IPG according to used link speed */ | ||
786 | static void sky2_set_ipg(struct sky2_port *sky2) | ||
787 | { | ||
788 | u16 reg; | ||
789 | |||
790 | reg = gma_read16(sky2->hw, sky2->port, GM_SERIAL_MODE); | ||
791 | reg &= ~GM_SMOD_IPG_MSK; | ||
792 | if (sky2->speed > SPEED_100) | ||
793 | reg |= IPG_DATA_VAL(IPG_DATA_DEF_1000); | ||
794 | else | ||
795 | reg |= IPG_DATA_VAL(IPG_DATA_DEF_10_100); | ||
796 | gma_write16(sky2->hw, sky2->port, GM_SERIAL_MODE, reg); | ||
797 | } | ||
798 | |||
799 | /* Enable Rx/Tx */ | ||
800 | static void sky2_enable_rx_tx(struct sky2_port *sky2) | ||
801 | { | ||
802 | struct sky2_hw *hw = sky2->hw; | ||
803 | unsigned port = sky2->port; | ||
804 | u16 reg; | ||
805 | |||
806 | reg = gma_read16(hw, port, GM_GP_CTRL); | ||
807 | reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; | ||
808 | gma_write16(hw, port, GM_GP_CTRL, reg); | ||
809 | } | ||
810 | |||
811 | /* Force a renegotiation */ | ||
812 | static void sky2_phy_reinit(struct sky2_port *sky2) | ||
813 | { | ||
814 | spin_lock_bh(&sky2->phy_lock); | ||
815 | sky2_phy_init(sky2->hw, sky2->port); | ||
816 | sky2_enable_rx_tx(sky2); | ||
817 | spin_unlock_bh(&sky2->phy_lock); | ||
818 | } | ||
819 | |||
820 | /* Put device in state to listen for Wake On Lan */ | ||
821 | static void sky2_wol_init(struct sky2_port *sky2) | ||
822 | { | ||
823 | struct sky2_hw *hw = sky2->hw; | ||
824 | unsigned port = sky2->port; | ||
825 | enum flow_control save_mode; | ||
826 | u16 ctrl; | ||
827 | |||
828 | /* Bring hardware out of reset */ | ||
829 | sky2_write16(hw, B0_CTST, CS_RST_CLR); | ||
830 | sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); | ||
831 | |||
832 | sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); | ||
833 | sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); | ||
834 | |||
835 | /* Force to 10/100 | ||
836 | * sky2_reset will re-enable on resume | ||
837 | */ | ||
838 | save_mode = sky2->flow_mode; | ||
839 | ctrl = sky2->advertising; | ||
840 | |||
841 | sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full); | ||
842 | sky2->flow_mode = FC_NONE; | ||
843 | |||
844 | spin_lock_bh(&sky2->phy_lock); | ||
845 | sky2_phy_power_up(hw, port); | ||
846 | sky2_phy_init(hw, port); | ||
847 | spin_unlock_bh(&sky2->phy_lock); | ||
848 | |||
849 | sky2->flow_mode = save_mode; | ||
850 | sky2->advertising = ctrl; | ||
851 | |||
852 | /* Set GMAC to no flow control and auto update for speed/duplex */ | ||
853 | gma_write16(hw, port, GM_GP_CTRL, | ||
854 | GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| | ||
855 | GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); | ||
856 | |||
857 | /* Set WOL address */ | ||
858 | memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), | ||
859 | sky2->netdev->dev_addr, ETH_ALEN); | ||
860 | |||
861 | /* Turn on appropriate WOL control bits */ | ||
862 | sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); | ||
863 | ctrl = 0; | ||
864 | if (sky2->wol & WAKE_PHY) | ||
865 | ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; | ||
866 | else | ||
867 | ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; | ||
868 | |||
869 | if (sky2->wol & WAKE_MAGIC) | ||
870 | ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; | ||
871 | else | ||
872 | ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT; | ||
873 | |||
874 | ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; | ||
875 | sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); | ||
876 | |||
877 | /* Disable PiG firmware */ | ||
878 | sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF); | ||
879 | |||
880 | /* block receiver */ | ||
881 | sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); | ||
882 | } | ||
883 | |||
884 | static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port) | ||
885 | { | ||
886 | struct net_device *dev = hw->dev[port]; | ||
887 | |||
888 | if ( (hw->chip_id == CHIP_ID_YUKON_EX && | ||
889 | hw->chip_rev != CHIP_REV_YU_EX_A0) || | ||
890 | hw->chip_id >= CHIP_ID_YUKON_FE_P) { | ||
891 | /* Yukon-Extreme B0 and further Extreme devices */ | ||
892 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA); | ||
893 | } else if (dev->mtu > ETH_DATA_LEN) { | ||
894 | /* set Tx GMAC FIFO Almost Empty Threshold */ | ||
895 | sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), | ||
896 | (ECU_JUMBO_WM << 16) | ECU_AE_THR); | ||
897 | |||
898 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS); | ||
899 | } else | ||
900 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA); | ||
901 | } | ||
902 | |||
903 | static void sky2_mac_init(struct sky2_hw *hw, unsigned port) | ||
904 | { | ||
905 | struct sky2_port *sky2 = netdev_priv(hw->dev[port]); | ||
906 | u16 reg; | ||
907 | u32 rx_reg; | ||
908 | int i; | ||
909 | const u8 *addr = hw->dev[port]->dev_addr; | ||
910 | |||
911 | sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); | ||
912 | sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); | ||
913 | |||
914 | sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); | ||
915 | |||
916 | if (hw->chip_id == CHIP_ID_YUKON_XL && | ||
917 | hw->chip_rev == CHIP_REV_YU_XL_A0 && | ||
918 | port == 1) { | ||
919 | /* WA DEV_472 -- looks like crossed wires on port 2 */ | ||
920 | /* clear GMAC 1 Control reset */ | ||
921 | sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR); | ||
922 | do { | ||
923 | sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET); | ||
924 | sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR); | ||
925 | } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL || | ||
926 | gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 || | ||
927 | gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0); | ||
928 | } | ||
929 | |||
930 | sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); | ||
931 | |||
932 | /* Enable Transmit FIFO Underrun */ | ||
933 | sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); | ||
934 | |||
935 | spin_lock_bh(&sky2->phy_lock); | ||
936 | sky2_phy_power_up(hw, port); | ||
937 | sky2_phy_init(hw, port); | ||
938 | spin_unlock_bh(&sky2->phy_lock); | ||
939 | |||
940 | /* MIB clear */ | ||
941 | reg = gma_read16(hw, port, GM_PHY_ADDR); | ||
942 | gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); | ||
943 | |||
944 | for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4) | ||
945 | gma_read16(hw, port, i); | ||
946 | gma_write16(hw, port, GM_PHY_ADDR, reg); | ||
947 | |||
948 | /* transmit control */ | ||
949 | gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); | ||
950 | |||
951 | /* receive control reg: unicast + multicast + no FCS */ | ||
952 | gma_write16(hw, port, GM_RX_CTRL, | ||
953 | GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); | ||
954 | |||
955 | /* transmit flow control */ | ||
956 | gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); | ||
957 | |||
958 | /* transmit parameter */ | ||
959 | gma_write16(hw, port, GM_TX_PARAM, | ||
960 | TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | | ||
961 | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | | ||
962 | TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | | ||
963 | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); | ||
964 | |||
965 | /* serial mode register */ | ||
966 | reg = DATA_BLIND_VAL(DATA_BLIND_DEF) | | ||
967 | GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF_1000); | ||
968 | |||
969 | if (hw->dev[port]->mtu > ETH_DATA_LEN) | ||
970 | reg |= GM_SMOD_JUMBO_ENA; | ||
971 | |||
972 | if (hw->chip_id == CHIP_ID_YUKON_EC_U && | ||
973 | hw->chip_rev == CHIP_REV_YU_EC_U_B1) | ||
974 | reg |= GM_NEW_FLOW_CTRL; | ||
975 | |||
976 | gma_write16(hw, port, GM_SERIAL_MODE, reg); | ||
977 | |||
978 | /* virtual address for data */ | ||
979 | gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); | ||
980 | |||
981 | /* physical address: used for pause frames */ | ||
982 | gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); | ||
983 | |||
984 | /* ignore counter overflows */ | ||
985 | gma_write16(hw, port, GM_TX_IRQ_MSK, 0); | ||
986 | gma_write16(hw, port, GM_RX_IRQ_MSK, 0); | ||
987 | gma_write16(hw, port, GM_TR_IRQ_MSK, 0); | ||
988 | |||
989 | /* Configure Rx MAC FIFO */ | ||
990 | sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); | ||
991 | rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON; | ||
992 | if (hw->chip_id == CHIP_ID_YUKON_EX || | ||
993 | hw->chip_id == CHIP_ID_YUKON_FE_P) | ||
994 | rx_reg |= GMF_RX_OVER_ON; | ||
995 | |||
996 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg); | ||
997 | |||
998 | if (hw->chip_id == CHIP_ID_YUKON_XL) { | ||
999 | /* Hardware errata - clear flush mask */ | ||
1000 | sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0); | ||
1001 | } else { | ||
1002 | /* Flush Rx MAC FIFO on any flow control or error */ | ||
1003 | sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); | ||
1004 | } | ||
1005 | |||
1006 | /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */ | ||
1007 | reg = RX_GMF_FL_THR_DEF + 1; | ||
1008 | /* Another magic mystery workaround from sk98lin */ | ||
1009 | if (hw->chip_id == CHIP_ID_YUKON_FE_P && | ||
1010 | hw->chip_rev == CHIP_REV_YU_FE2_A0) | ||
1011 | reg = 0x178; | ||
1012 | sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg); | ||
1013 | |||
1014 | /* Configure Tx MAC FIFO */ | ||
1015 | sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); | ||
1016 | sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); | ||
1017 | |||
1018 | /* On chips without ram buffer, pause is controlled by MAC level */ | ||
1019 | if (!(hw->flags & SKY2_HW_RAM_BUFFER)) { | ||
1020 | /* Pause threshold is scaled by 8 in bytes */ | ||
1021 | if (hw->chip_id == CHIP_ID_YUKON_FE_P && | ||
1022 | hw->chip_rev == CHIP_REV_YU_FE2_A0) | ||
1023 | reg = 1568 / 8; | ||
1024 | else | ||
1025 | reg = 1024 / 8; | ||
1026 | sky2_write16(hw, SK_REG(port, RX_GMF_UP_THR), reg); | ||
1027 | sky2_write16(hw, SK_REG(port, RX_GMF_LP_THR), 768 / 8); | ||
1028 | |||
1029 | sky2_set_tx_stfwd(hw, port); | ||
1030 | } | ||
1031 | |||
1032 | if (hw->chip_id == CHIP_ID_YUKON_FE_P && | ||
1033 | hw->chip_rev == CHIP_REV_YU_FE2_A0) { | ||
1034 | /* disable dynamic watermark */ | ||
1035 | reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA)); | ||
1036 | reg &= ~TX_DYN_WM_ENA; | ||
1037 | sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg); | ||
1038 | } | ||
1039 | } | ||
1040 | |||
1041 | /* Assign Ram Buffer allocation to queue */ | ||
1042 | static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space) | ||
1043 | { | ||
1044 | u32 end; | ||
1045 | |||
1046 | /* convert from K bytes to qwords used for hw register */ | ||
1047 | start *= 1024/8; | ||
1048 | space *= 1024/8; | ||
1049 | end = start + space - 1; | ||
1050 | |||
1051 | sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); | ||
1052 | sky2_write32(hw, RB_ADDR(q, RB_START), start); | ||
1053 | sky2_write32(hw, RB_ADDR(q, RB_END), end); | ||
1054 | sky2_write32(hw, RB_ADDR(q, RB_WP), start); | ||
1055 | sky2_write32(hw, RB_ADDR(q, RB_RP), start); | ||
1056 | |||
1057 | if (q == Q_R1 || q == Q_R2) { | ||
1058 | u32 tp = space - space/4; | ||
1059 | |||
1060 | /* On receive queue's set the thresholds | ||
1061 | * give receiver priority when > 3/4 full | ||
1062 | * send pause when down to 2K | ||
1063 | */ | ||
1064 | sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); | ||
1065 | sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); | ||
1066 | |||
1067 | tp = space - 2048/8; | ||
1068 | sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); | ||
1069 | sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); | ||
1070 | } else { | ||
1071 | /* Enable store & forward on Tx queue's because | ||
1072 | * Tx FIFO is only 1K on Yukon | ||
1073 | */ | ||
1074 | sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); | ||
1075 | } | ||
1076 | |||
1077 | sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); | ||
1078 | sky2_read8(hw, RB_ADDR(q, RB_CTRL)); | ||
1079 | } | ||
1080 | |||
1081 | /* Setup Bus Memory Interface */ | ||
1082 | static void sky2_qset(struct sky2_hw *hw, u16 q) | ||
1083 | { | ||
1084 | sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET); | ||
1085 | sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT); | ||
1086 | sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON); | ||
1087 | sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT); | ||
1088 | } | ||
1089 | |||
1090 | /* Setup prefetch unit registers. This is the interface between | ||
1091 | * hardware and driver list elements | ||
1092 | */ | ||
1093 | static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr, | ||
1094 | dma_addr_t addr, u32 last) | ||
1095 | { | ||
1096 | sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); | ||
1097 | sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR); | ||
1098 | sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), upper_32_bits(addr)); | ||
1099 | sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), lower_32_bits(addr)); | ||
1100 | sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last); | ||
1101 | sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON); | ||
1102 | |||
1103 | sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL)); | ||
1104 | } | ||
1105 | |||
1106 | static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) | ||
1107 | { | ||
1108 | struct sky2_tx_le *le = sky2->tx_le + *slot; | ||
1109 | |||
1110 | *slot = RING_NEXT(*slot, sky2->tx_ring_size); | ||
1111 | le->ctrl = 0; | ||
1112 | return le; | ||
1113 | } | ||
1114 | |||
1115 | static void tx_init(struct sky2_port *sky2) | ||
1116 | { | ||
1117 | struct sky2_tx_le *le; | ||
1118 | |||
1119 | sky2->tx_prod = sky2->tx_cons = 0; | ||
1120 | sky2->tx_tcpsum = 0; | ||
1121 | sky2->tx_last_mss = 0; | ||
1122 | |||
1123 | le = get_tx_le(sky2, &sky2->tx_prod); | ||
1124 | le->addr = 0; | ||
1125 | le->opcode = OP_ADDR64 | HW_OWNER; | ||
1126 | sky2->tx_last_upper = 0; | ||
1127 | } | ||
1128 | |||
1129 | /* Update chip's next pointer */ | ||
1130 | static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) | ||
1131 | { | ||
1132 | /* Make sure write' to descriptors are complete before we tell hardware */ | ||
1133 | wmb(); | ||
1134 | sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); | ||
1135 | |||
1136 | /* Synchronize I/O on since next processor may write to tail */ | ||
1137 | mmiowb(); | ||
1138 | } | ||
1139 | |||
1140 | |||
1141 | static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) | ||
1142 | { | ||
1143 | struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; | ||
1144 | sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE); | ||
1145 | le->ctrl = 0; | ||
1146 | return le; | ||
1147 | } | ||
1148 | |||
1149 | static unsigned sky2_get_rx_threshold(struct sky2_port *sky2) | ||
1150 | { | ||
1151 | unsigned size; | ||
1152 | |||
1153 | /* Space needed for frame data + headers rounded up */ | ||
1154 | size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8); | ||
1155 | |||
1156 | /* Stopping point for hardware truncation */ | ||
1157 | return (size - 8) / sizeof(u32); | ||
1158 | } | ||
1159 | |||
1160 | static unsigned sky2_get_rx_data_size(struct sky2_port *sky2) | ||
1161 | { | ||
1162 | struct rx_ring_info *re; | ||
1163 | unsigned size; | ||
1164 | |||
1165 | /* Space needed for frame data + headers rounded up */ | ||
1166 | size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8); | ||
1167 | |||
1168 | sky2->rx_nfrags = size >> PAGE_SHIFT; | ||
1169 | BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr)); | ||
1170 | |||
1171 | /* Compute residue after pages */ | ||
1172 | size -= sky2->rx_nfrags << PAGE_SHIFT; | ||
1173 | |||
1174 | /* Optimize to handle small packets and headers */ | ||
1175 | if (size < copybreak) | ||
1176 | size = copybreak; | ||
1177 | if (size < ETH_HLEN) | ||
1178 | size = ETH_HLEN; | ||
1179 | |||
1180 | return size; | ||
1181 | } | ||
1182 | |||
1183 | /* Build description to hardware for one receive segment */ | ||
1184 | static void sky2_rx_add(struct sky2_port *sky2, u8 op, | ||
1185 | dma_addr_t map, unsigned len) | ||
1186 | { | ||
1187 | struct sky2_rx_le *le; | ||
1188 | |||
1189 | if (sizeof(dma_addr_t) > sizeof(u32)) { | ||
1190 | le = sky2_next_rx(sky2); | ||
1191 | le->addr = cpu_to_le32(upper_32_bits(map)); | ||
1192 | le->opcode = OP_ADDR64 | HW_OWNER; | ||
1193 | } | ||
1194 | |||
1195 | le = sky2_next_rx(sky2); | ||
1196 | le->addr = cpu_to_le32(lower_32_bits(map)); | ||
1197 | le->length = cpu_to_le16(len); | ||
1198 | le->opcode = op | HW_OWNER; | ||
1199 | } | ||
1200 | |||
1201 | /* Build description to hardware for one possibly fragmented skb */ | ||
1202 | static void sky2_rx_submit(struct sky2_port *sky2, | ||
1203 | const struct rx_ring_info *re) | ||
1204 | { | ||
1205 | int i; | ||
1206 | |||
1207 | sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size); | ||
1208 | |||
1209 | for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++) | ||
1210 | sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE); | ||
1211 | } | ||
1212 | |||
1213 | |||
1214 | static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re, | ||
1215 | unsigned size) | ||
1216 | { | ||
1217 | struct sk_buff *skb = re->skb; | ||
1218 | int i; | ||
1219 | |||
1220 | re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE); | ||
1221 | if (pci_dma_mapping_error(pdev, re->data_addr)) | ||
1222 | goto mapping_error; | ||
1223 | |||
1224 | dma_unmap_len_set(re, data_size, size); | ||
1225 | |||
1226 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
1227 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
1228 | |||
1229 | re->frag_addr[i] = pci_map_page(pdev, frag->page, | ||
1230 | frag->page_offset, | ||
1231 | frag->size, | ||
1232 | PCI_DMA_FROMDEVICE); | ||
1233 | |||
1234 | if (pci_dma_mapping_error(pdev, re->frag_addr[i])) | ||
1235 | goto map_page_error; | ||
1236 | } | ||
1237 | return 0; | ||
1238 | |||
1239 | map_page_error: | ||
1240 | while (--i >= 0) { | ||
1241 | pci_unmap_page(pdev, re->frag_addr[i], | ||
1242 | skb_shinfo(skb)->frags[i].size, | ||
1243 | PCI_DMA_FROMDEVICE); | ||
1244 | } | ||
1245 | |||
1246 | pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size), | ||
1247 | PCI_DMA_FROMDEVICE); | ||
1248 | |||
1249 | mapping_error: | ||
1250 | if (net_ratelimit()) | ||
1251 | dev_warn(&pdev->dev, "%s: rx mapping error\n", | ||
1252 | skb->dev->name); | ||
1253 | return -EIO; | ||
1254 | } | ||
1255 | |||
1256 | static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re) | ||
1257 | { | ||
1258 | struct sk_buff *skb = re->skb; | ||
1259 | int i; | ||
1260 | |||
1261 | pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size), | ||
1262 | PCI_DMA_FROMDEVICE); | ||
1263 | |||
1264 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | ||
1265 | pci_unmap_page(pdev, re->frag_addr[i], | ||
1266 | skb_shinfo(skb)->frags[i].size, | ||
1267 | PCI_DMA_FROMDEVICE); | ||
1268 | } | ||
1269 | |||
1270 | /* Tell chip where to start receive checksum. | ||
1271 | * Actually has two checksums, but set both same to avoid possible byte | ||
1272 | * order problems. | ||
1273 | */ | ||
1274 | static void rx_set_checksum(struct sky2_port *sky2) | ||
1275 | { | ||
1276 | struct sky2_rx_le *le = sky2_next_rx(sky2); | ||
1277 | |||
1278 | le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN); | ||
1279 | le->ctrl = 0; | ||
1280 | le->opcode = OP_TCPSTART | HW_OWNER; | ||
1281 | |||
1282 | sky2_write32(sky2->hw, | ||
1283 | Q_ADDR(rxqaddr[sky2->port], Q_CSR), | ||
1284 | (sky2->netdev->features & NETIF_F_RXCSUM) | ||
1285 | ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); | ||
1286 | } | ||
1287 | |||
1288 | /* Enable/disable receive hash calculation (RSS) */ | ||
1289 | static void rx_set_rss(struct net_device *dev, u32 features) | ||
1290 | { | ||
1291 | struct sky2_port *sky2 = netdev_priv(dev); | ||
1292 | struct sky2_hw *hw = sky2->hw; | ||
1293 | int i, nkeys = 4; | ||
1294 | |||
1295 | /* Supports IPv6 and other modes */ | ||
1296 | if (hw->flags & SKY2_HW_NEW_LE) { | ||
1297 | nkeys = 10; | ||
1298 | sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL); | ||
1299 | } | ||
1300 | |||
1301 | /* Program RSS initial values */ | ||
1302 | if (features & NETIF_F_RXHASH) { | ||
1303 | u32 key[nkeys]; | ||
1304 | |||
1305 | get_random_bytes(key, nkeys * sizeof(u32)); | ||
1306 | for (i = 0; i < nkeys; i++) | ||
1307 | sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4), | ||
1308 | key[i]); | ||
1309 | |||
1310 | /* Need to turn on (undocumented) flag to make hashing work */ | ||
1311 | sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), | ||
1312 | RX_STFW_ENA); | ||
1313 | |||
1314 | sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), | ||
1315 | BMU_ENA_RX_RSS_HASH); | ||
1316 | } else | ||
1317 | sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), | ||
1318 | BMU_DIS_RX_RSS_HASH); | ||
1319 | } | ||
1320 | |||
1321 | /* | ||
1322 | * The RX Stop command will not work for Yukon-2 if the BMU does not | ||
1323 | * reach the end of packet and since we can't make sure that we have | ||
1324 | * incoming data, we must reset the BMU while it is not doing a DMA | ||
1325 | * transfer. Since it is possible that the RX path is still active, | ||
1326 | * the RX RAM buffer will be stopped first, so any possible incoming | ||
1327 | * data will not trigger a DMA. After the RAM buffer is stopped, the | ||
1328 | * BMU is polled until any DMA in progress is ended and only then it | ||
1329 | * will be reset. | ||
1330 | */ | ||
1331 | static void sky2_rx_stop(struct sky2_port *sky2) | ||
1332 | { | ||
1333 | struct sky2_hw *hw = sky2->hw; | ||
1334 | unsigned rxq = rxqaddr[sky2->port]; | ||
1335 | int i; | ||
1336 | |||
1337 | /* disable the RAM Buffer receive queue */ | ||
1338 | sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD); | ||
1339 | |||
1340 | for (i = 0; i < 0xffff; i++) | ||
1341 | if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL)) | ||
1342 | == sky2_read8(hw, RB_ADDR(rxq, Q_RL))) | ||
1343 | goto stopped; | ||
1344 | |||
1345 | netdev_warn(sky2->netdev, "receiver stop failed\n"); | ||
1346 | stopped: | ||
1347 | sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST); | ||
1348 | |||
1349 | /* reset the Rx prefetch unit */ | ||
1350 | sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); | ||
1351 | mmiowb(); | ||
1352 | } | ||
1353 | |||
1354 | /* Clean out receive buffer area, assumes receiver hardware stopped */ | ||
1355 | static void sky2_rx_clean(struct sky2_port *sky2) | ||
1356 | { | ||
1357 | unsigned i; | ||
1358 | |||
1359 | memset(sky2->rx_le, 0, RX_LE_BYTES); | ||
1360 | for (i = 0; i < sky2->rx_pending; i++) { | ||
1361 | struct rx_ring_info *re = sky2->rx_ring + i; | ||
1362 | |||
1363 | if (re->skb) { | ||
1364 | sky2_rx_unmap_skb(sky2->hw->pdev, re); | ||
1365 | kfree_skb(re->skb); | ||
1366 | re->skb = NULL; | ||
1367 | } | ||
1368 | } | ||
1369 | } | ||
1370 | |||
1371 | /* Basic MII support */ | ||
1372 | static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
1373 | { | ||
1374 | struct mii_ioctl_data *data = if_mii(ifr); | ||
1375 | struct sky2_port *sky2 = netdev_priv(dev); | ||
1376 | struct sky2_hw *hw = sky2->hw; | ||
1377 | int err = -EOPNOTSUPP; | ||
1378 | |||
1379 | if (!netif_running(dev)) | ||
1380 | return -ENODEV; /* Phy still in reset */ | ||
1381 | |||
1382 | switch (cmd) { | ||
1383 | case SIOCGMIIPHY: | ||
1384 | data->phy_id = PHY_ADDR_MARV; | ||
1385 | |||
1386 | /* fallthru */ | ||
1387 | case SIOCGMIIREG: { | ||
1388 | u16 val = 0; | ||
1389 | |||
1390 | spin_lock_bh(&sky2->phy_lock); | ||
1391 | err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val); | ||
1392 | spin_unlock_bh(&sky2->phy_lock); | ||
1393 | |||
1394 | data->val_out = val; | ||
1395 | break; | ||
1396 | } | ||
1397 | |||
1398 | case SIOCSMIIREG: | ||
1399 | spin_lock_bh(&sky2->phy_lock); | ||
1400 | err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f, | ||
1401 | data->val_in); | ||
1402 | spin_unlock_bh(&sky2->phy_lock); | ||
1403 | break; | ||
1404 | } | ||
1405 | return err; | ||
1406 | } | ||
1407 | |||
1408 | #define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO) | ||
1409 | |||
1410 | static void sky2_vlan_mode(struct net_device *dev, u32 features) | ||
1411 | { | ||
1412 | struct sky2_port *sky2 = netdev_priv(dev); | ||
1413 | struct sky2_hw *hw = sky2->hw; | ||
1414 | u16 port = sky2->port; | ||
1415 | |||
1416 | if (features & NETIF_F_HW_VLAN_RX) | ||
1417 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), | ||
1418 | RX_VLAN_STRIP_ON); | ||
1419 | else | ||
1420 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), | ||
1421 | RX_VLAN_STRIP_OFF); | ||
1422 | |||
1423 | if (features & NETIF_F_HW_VLAN_TX) { | ||
1424 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | ||
1425 | TX_VLAN_TAG_ON); | ||
1426 | |||
1427 | dev->vlan_features |= SKY2_VLAN_OFFLOADS; | ||
1428 | } else { | ||
1429 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | ||
1430 | TX_VLAN_TAG_OFF); | ||
1431 | |||
1432 | /* Can't do transmit offload of vlan without hw vlan */ | ||
1433 | dev->vlan_features &= ~SKY2_VLAN_OFFLOADS; | ||
1434 | } | ||
1435 | } | ||
1436 | |||
1437 | /* Amount of required worst case padding in rx buffer */ | ||
1438 | static inline unsigned sky2_rx_pad(const struct sky2_hw *hw) | ||
1439 | { | ||
1440 | return (hw->flags & SKY2_HW_RAM_BUFFER) ? 8 : 2; | ||
1441 | } | ||
1442 | |||
1443 | /* | ||
1444 | * Allocate an skb for receiving. If the MTU is large enough | ||
1445 | * make the skb non-linear with a fragment list of pages. | ||
1446 | */ | ||
1447 | static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2, gfp_t gfp) | ||
1448 | { | ||
1449 | struct sk_buff *skb; | ||
1450 | int i; | ||
1451 | |||
1452 | skb = __netdev_alloc_skb(sky2->netdev, | ||
1453 | sky2->rx_data_size + sky2_rx_pad(sky2->hw), | ||
1454 | gfp); | ||
1455 | if (!skb) | ||
1456 | goto nomem; | ||
1457 | |||
1458 | if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) { | ||
1459 | unsigned char *start; | ||
1460 | /* | ||
1461 | * Workaround for a bug in FIFO that cause hang | ||
1462 | * if the FIFO if the receive buffer is not 64 byte aligned. | ||
1463 | * The buffer returned from netdev_alloc_skb is | ||
1464 | * aligned except if slab debugging is enabled. | ||
1465 | */ | ||
1466 | start = PTR_ALIGN(skb->data, 8); | ||
1467 | skb_reserve(skb, start - skb->data); | ||
1468 | } else | ||
1469 | skb_reserve(skb, NET_IP_ALIGN); | ||
1470 | |||
1471 | for (i = 0; i < sky2->rx_nfrags; i++) { | ||
1472 | struct page *page = alloc_page(gfp); | ||
1473 | |||
1474 | if (!page) | ||
1475 | goto free_partial; | ||
1476 | skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE); | ||
1477 | } | ||
1478 | |||
1479 | return skb; | ||
1480 | free_partial: | ||
1481 | kfree_skb(skb); | ||
1482 | nomem: | ||
1483 | return NULL; | ||
1484 | } | ||
1485 | |||
1486 | static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq) | ||
1487 | { | ||
1488 | sky2_put_idx(sky2->hw, rxq, sky2->rx_put); | ||
1489 | } | ||
1490 | |||
1491 | static int sky2_alloc_rx_skbs(struct sky2_port *sky2) | ||
1492 | { | ||
1493 | struct sky2_hw *hw = sky2->hw; | ||
1494 | unsigned i; | ||
1495 | |||
1496 | sky2->rx_data_size = sky2_get_rx_data_size(sky2); | ||
1497 | |||
1498 | /* Fill Rx ring */ | ||
1499 | for (i = 0; i < sky2->rx_pending; i++) { | ||
1500 | struct rx_ring_info *re = sky2->rx_ring + i; | ||
1501 | |||
1502 | re->skb = sky2_rx_alloc(sky2, GFP_KERNEL); | ||
1503 | if (!re->skb) | ||
1504 | return -ENOMEM; | ||
1505 | |||
1506 | if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) { | ||
1507 | dev_kfree_skb(re->skb); | ||
1508 | re->skb = NULL; | ||
1509 | return -ENOMEM; | ||
1510 | } | ||
1511 | } | ||
1512 | return 0; | ||
1513 | } | ||
1514 | |||
1515 | /* | ||
1516 | * Setup receiver buffer pool. | ||
1517 | * Normal case this ends up creating one list element for skb | ||
1518 | * in the receive ring. Worst case if using large MTU and each | ||
1519 | * allocation falls on a different 64 bit region, that results | ||
1520 | * in 6 list elements per ring entry. | ||
1521 | * One element is used for checksum enable/disable, and one | ||
1522 | * extra to avoid wrap. | ||
1523 | */ | ||
1524 | static void sky2_rx_start(struct sky2_port *sky2) | ||
1525 | { | ||
1526 | struct sky2_hw *hw = sky2->hw; | ||
1527 | struct rx_ring_info *re; | ||
1528 | unsigned rxq = rxqaddr[sky2->port]; | ||
1529 | unsigned i, thresh; | ||
1530 | |||
1531 | sky2->rx_put = sky2->rx_next = 0; | ||
1532 | sky2_qset(hw, rxq); | ||
1533 | |||
1534 | /* On PCI express lowering the watermark gives better performance */ | ||
1535 | if (pci_is_pcie(hw->pdev)) | ||
1536 | sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX); | ||
1537 | |||
1538 | /* These chips have no ram buffer? | ||
1539 | * MAC Rx RAM Read is controlled by hardware */ | ||
1540 | if (hw->chip_id == CHIP_ID_YUKON_EC_U && | ||
1541 | hw->chip_rev > CHIP_REV_YU_EC_U_A0) | ||
1542 | sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS); | ||
1543 | |||
1544 | sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); | ||
1545 | |||
1546 | if (!(hw->flags & SKY2_HW_NEW_LE)) | ||
1547 | rx_set_checksum(sky2); | ||
1548 | |||
1549 | if (!(hw->flags & SKY2_HW_RSS_BROKEN)) | ||
1550 | rx_set_rss(sky2->netdev, sky2->netdev->features); | ||
1551 | |||
1552 | /* submit Rx ring */ | ||
1553 | for (i = 0; i < sky2->rx_pending; i++) { | ||
1554 | re = sky2->rx_ring + i; | ||
1555 | sky2_rx_submit(sky2, re); | ||
1556 | } | ||
1557 | |||
1558 | /* | ||
1559 | * The receiver hangs if it receives frames larger than the | ||
1560 | * packet buffer. As a workaround, truncate oversize frames, but | ||
1561 | * the register is limited to 9 bits, so if you do frames > 2052 | ||
1562 | * you better get the MTU right! | ||
1563 | */ | ||
1564 | thresh = sky2_get_rx_threshold(sky2); | ||
1565 | if (thresh > 0x1ff) | ||
1566 | sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF); | ||
1567 | else { | ||
1568 | sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh); | ||
1569 | sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON); | ||
1570 | } | ||
1571 | |||
1572 | /* Tell chip about available buffers */ | ||
1573 | sky2_rx_update(sky2, rxq); | ||
1574 | |||
1575 | if (hw->chip_id == CHIP_ID_YUKON_EX || | ||
1576 | hw->chip_id == CHIP_ID_YUKON_SUPR) { | ||
1577 | /* | ||
1578 | * Disable flushing of non ASF packets; | ||
1579 | * must be done after initializing the BMUs; | ||
1580 | * drivers without ASF support should do this too, otherwise | ||
1581 | * it may happen that they cannot run on ASF devices; | ||
1582 | * remember that the MAC FIFO isn't reset during initialization. | ||
1583 | */ | ||
1584 | sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_MACSEC_FLUSH_OFF); | ||
1585 | } | ||
1586 | |||
1587 | if (hw->chip_id >= CHIP_ID_YUKON_SUPR) { | ||
1588 | /* Enable RX Home Address & Routing Header checksum fix */ | ||
1589 | sky2_write16(hw, SK_REG(sky2->port, RX_GMF_FL_CTRL), | ||
1590 | RX_IPV6_SA_MOB_ENA | RX_IPV6_DA_MOB_ENA); | ||
1591 | |||
1592 | /* Enable TX Home Address & Routing Header checksum fix */ | ||
1593 | sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST), | ||
1594 | TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN); | ||
1595 | } | ||
1596 | } | ||
1597 | |||
1598 | static int sky2_alloc_buffers(struct sky2_port *sky2) | ||
1599 | { | ||
1600 | struct sky2_hw *hw = sky2->hw; | ||
1601 | |||
1602 | /* must be power of 2 */ | ||
1603 | sky2->tx_le = pci_alloc_consistent(hw->pdev, | ||
1604 | sky2->tx_ring_size * | ||
1605 | sizeof(struct sky2_tx_le), | ||
1606 | &sky2->tx_le_map); | ||
1607 | if (!sky2->tx_le) | ||
1608 | goto nomem; | ||
1609 | |||
1610 | sky2->tx_ring = kcalloc(sky2->tx_ring_size, sizeof(struct tx_ring_info), | ||
1611 | GFP_KERNEL); | ||
1612 | if (!sky2->tx_ring) | ||
1613 | goto nomem; | ||
1614 | |||
1615 | sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES, | ||
1616 | &sky2->rx_le_map); | ||
1617 | if (!sky2->rx_le) | ||
1618 | goto nomem; | ||
1619 | memset(sky2->rx_le, 0, RX_LE_BYTES); | ||
1620 | |||
1621 | sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info), | ||
1622 | GFP_KERNEL); | ||
1623 | if (!sky2->rx_ring) | ||
1624 | goto nomem; | ||
1625 | |||
1626 | return sky2_alloc_rx_skbs(sky2); | ||
1627 | nomem: | ||
1628 | return -ENOMEM; | ||
1629 | } | ||
1630 | |||
1631 | static void sky2_free_buffers(struct sky2_port *sky2) | ||
1632 | { | ||
1633 | struct sky2_hw *hw = sky2->hw; | ||
1634 | |||
1635 | sky2_rx_clean(sky2); | ||
1636 | |||
1637 | if (sky2->rx_le) { | ||
1638 | pci_free_consistent(hw->pdev, RX_LE_BYTES, | ||
1639 | sky2->rx_le, sky2->rx_le_map); | ||
1640 | sky2->rx_le = NULL; | ||
1641 | } | ||
1642 | if (sky2->tx_le) { | ||
1643 | pci_free_consistent(hw->pdev, | ||
1644 | sky2->tx_ring_size * sizeof(struct sky2_tx_le), | ||
1645 | sky2->tx_le, sky2->tx_le_map); | ||
1646 | sky2->tx_le = NULL; | ||
1647 | } | ||
1648 | kfree(sky2->tx_ring); | ||
1649 | kfree(sky2->rx_ring); | ||
1650 | |||
1651 | sky2->tx_ring = NULL; | ||
1652 | sky2->rx_ring = NULL; | ||
1653 | } | ||
1654 | |||
1655 | static void sky2_hw_up(struct sky2_port *sky2) | ||
1656 | { | ||
1657 | struct sky2_hw *hw = sky2->hw; | ||
1658 | unsigned port = sky2->port; | ||
1659 | u32 ramsize; | ||
1660 | int cap; | ||
1661 | struct net_device *otherdev = hw->dev[sky2->port^1]; | ||
1662 | |||
1663 | tx_init(sky2); | ||
1664 | |||
1665 | /* | ||
1666 | * On dual port PCI-X card, there is an problem where status | ||
1667 | * can be received out of order due to split transactions | ||
1668 | */ | ||
1669 | if (otherdev && netif_running(otherdev) && | ||
1670 | (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { | ||
1671 | u16 cmd; | ||
1672 | |||
1673 | cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); | ||
1674 | cmd &= ~PCI_X_CMD_MAX_SPLIT; | ||
1675 | sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); | ||
1676 | } | ||
1677 | |||
1678 | sky2_mac_init(hw, port); | ||
1679 | |||
1680 | /* Register is number of 4K blocks on internal RAM buffer. */ | ||
1681 | ramsize = sky2_read8(hw, B2_E_0) * 4; | ||
1682 | if (ramsize > 0) { | ||
1683 | u32 rxspace; | ||
1684 | |||
1685 | netdev_dbg(sky2->netdev, "ram buffer %dK\n", ramsize); | ||
1686 | if (ramsize < 16) | ||
1687 | rxspace = ramsize / 2; | ||
1688 | else | ||
1689 | rxspace = 8 + (2*(ramsize - 16))/3; | ||
1690 | |||
1691 | sky2_ramset(hw, rxqaddr[port], 0, rxspace); | ||
1692 | sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace); | ||
1693 | |||
1694 | /* Make sure SyncQ is disabled */ | ||
1695 | sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL), | ||
1696 | RB_RST_SET); | ||
1697 | } | ||
1698 | |||
1699 | sky2_qset(hw, txqaddr[port]); | ||
1700 | |||
1701 | /* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */ | ||
1702 | if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0) | ||
1703 | sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF); | ||
1704 | |||
1705 | /* Set almost empty threshold */ | ||
1706 | if (hw->chip_id == CHIP_ID_YUKON_EC_U && | ||
1707 | hw->chip_rev == CHIP_REV_YU_EC_U_A0) | ||
1708 | sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV); | ||
1709 | |||
1710 | sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, | ||
1711 | sky2->tx_ring_size - 1); | ||
1712 | |||
1713 | sky2_vlan_mode(sky2->netdev, sky2->netdev->features); | ||
1714 | netdev_update_features(sky2->netdev); | ||
1715 | |||
1716 | sky2_rx_start(sky2); | ||
1717 | } | ||
1718 | |||
1719 | /* Bring up network interface. */ | ||
1720 | static int sky2_up(struct net_device *dev) | ||
1721 | { | ||
1722 | struct sky2_port *sky2 = netdev_priv(dev); | ||
1723 | struct sky2_hw *hw = sky2->hw; | ||
1724 | unsigned port = sky2->port; | ||
1725 | u32 imask; | ||
1726 | int err; | ||
1727 | |||
1728 | netif_carrier_off(dev); | ||
1729 | |||
1730 | err = sky2_alloc_buffers(sky2); | ||
1731 | if (err) | ||
1732 | goto err_out; | ||
1733 | |||
1734 | sky2_hw_up(sky2); | ||
1735 | |||
1736 | /* Enable interrupts from phy/mac for port */ | ||
1737 | imask = sky2_read32(hw, B0_IMSK); | ||
1738 | imask |= portirq_msk[port]; | ||
1739 | sky2_write32(hw, B0_IMSK, imask); | ||
1740 | sky2_read32(hw, B0_IMSK); | ||
1741 | |||
1742 | netif_info(sky2, ifup, dev, "enabling interface\n"); | ||
1743 | |||
1744 | return 0; | ||
1745 | |||
1746 | err_out: | ||
1747 | sky2_free_buffers(sky2); | ||
1748 | return err; | ||
1749 | } | ||
1750 | |||
1751 | /* Modular subtraction in ring */ | ||
1752 | static inline int tx_inuse(const struct sky2_port *sky2) | ||
1753 | { | ||
1754 | return (sky2->tx_prod - sky2->tx_cons) & (sky2->tx_ring_size - 1); | ||
1755 | } | ||
1756 | |||
1757 | /* Number of list elements available for next tx */ | ||
1758 | static inline int tx_avail(const struct sky2_port *sky2) | ||
1759 | { | ||
1760 | return sky2->tx_pending - tx_inuse(sky2); | ||
1761 | } | ||
1762 | |||
1763 | /* Estimate of number of transmit list elements required */ | ||
1764 | static unsigned tx_le_req(const struct sk_buff *skb) | ||
1765 | { | ||
1766 | unsigned count; | ||
1767 | |||
1768 | count = (skb_shinfo(skb)->nr_frags + 1) | ||
1769 | * (sizeof(dma_addr_t) / sizeof(u32)); | ||
1770 | |||
1771 | if (skb_is_gso(skb)) | ||
1772 | ++count; | ||
1773 | else if (sizeof(dma_addr_t) == sizeof(u32)) | ||
1774 | ++count; /* possible vlan */ | ||
1775 | |||
1776 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
1777 | ++count; | ||
1778 | |||
1779 | return count; | ||
1780 | } | ||
1781 | |||
1782 | static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re) | ||
1783 | { | ||
1784 | if (re->flags & TX_MAP_SINGLE) | ||
1785 | pci_unmap_single(pdev, dma_unmap_addr(re, mapaddr), | ||
1786 | dma_unmap_len(re, maplen), | ||
1787 | PCI_DMA_TODEVICE); | ||
1788 | else if (re->flags & TX_MAP_PAGE) | ||
1789 | pci_unmap_page(pdev, dma_unmap_addr(re, mapaddr), | ||
1790 | dma_unmap_len(re, maplen), | ||
1791 | PCI_DMA_TODEVICE); | ||
1792 | re->flags = 0; | ||
1793 | } | ||
1794 | |||
1795 | /* | ||
1796 | * Put one packet in ring for transmit. | ||
1797 | * A single packet can generate multiple list elements, and | ||
1798 | * the number of ring elements will probably be less than the number | ||
1799 | * of list elements used. | ||
1800 | */ | ||
1801 | static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, | ||
1802 | struct net_device *dev) | ||
1803 | { | ||
1804 | struct sky2_port *sky2 = netdev_priv(dev); | ||
1805 | struct sky2_hw *hw = sky2->hw; | ||
1806 | struct sky2_tx_le *le = NULL; | ||
1807 | struct tx_ring_info *re; | ||
1808 | unsigned i, len; | ||
1809 | dma_addr_t mapping; | ||
1810 | u32 upper; | ||
1811 | u16 slot; | ||
1812 | u16 mss; | ||
1813 | u8 ctrl; | ||
1814 | |||
1815 | if (unlikely(tx_avail(sky2) < tx_le_req(skb))) | ||
1816 | return NETDEV_TX_BUSY; | ||
1817 | |||
1818 | len = skb_headlen(skb); | ||
1819 | mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); | ||
1820 | |||
1821 | if (pci_dma_mapping_error(hw->pdev, mapping)) | ||
1822 | goto mapping_error; | ||
1823 | |||
1824 | slot = sky2->tx_prod; | ||
1825 | netif_printk(sky2, tx_queued, KERN_DEBUG, dev, | ||
1826 | "tx queued, slot %u, len %d\n", slot, skb->len); | ||
1827 | |||
1828 | /* Send high bits if needed */ | ||
1829 | upper = upper_32_bits(mapping); | ||
1830 | if (upper != sky2->tx_last_upper) { | ||
1831 | le = get_tx_le(sky2, &slot); | ||
1832 | le->addr = cpu_to_le32(upper); | ||
1833 | sky2->tx_last_upper = upper; | ||
1834 | le->opcode = OP_ADDR64 | HW_OWNER; | ||
1835 | } | ||
1836 | |||
1837 | /* Check for TCP Segmentation Offload */ | ||
1838 | mss = skb_shinfo(skb)->gso_size; | ||
1839 | if (mss != 0) { | ||
1840 | |||
1841 | if (!(hw->flags & SKY2_HW_NEW_LE)) | ||
1842 | mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); | ||
1843 | |||
1844 | if (mss != sky2->tx_last_mss) { | ||
1845 | le = get_tx_le(sky2, &slot); | ||
1846 | le->addr = cpu_to_le32(mss); | ||
1847 | |||
1848 | if (hw->flags & SKY2_HW_NEW_LE) | ||
1849 | le->opcode = OP_MSS | HW_OWNER; | ||
1850 | else | ||
1851 | le->opcode = OP_LRGLEN | HW_OWNER; | ||
1852 | sky2->tx_last_mss = mss; | ||
1853 | } | ||
1854 | } | ||
1855 | |||
1856 | ctrl = 0; | ||
1857 | |||
1858 | /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ | ||
1859 | if (vlan_tx_tag_present(skb)) { | ||
1860 | if (!le) { | ||
1861 | le = get_tx_le(sky2, &slot); | ||
1862 | le->addr = 0; | ||
1863 | le->opcode = OP_VLAN|HW_OWNER; | ||
1864 | } else | ||
1865 | le->opcode |= OP_VLAN; | ||
1866 | le->length = cpu_to_be16(vlan_tx_tag_get(skb)); | ||
1867 | ctrl |= INS_VLAN; | ||
1868 | } | ||
1869 | |||
1870 | /* Handle TCP checksum offload */ | ||
1871 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
1872 | /* On Yukon EX (some versions) encoding change. */ | ||
1873 | if (hw->flags & SKY2_HW_AUTO_TX_SUM) | ||
1874 | ctrl |= CALSUM; /* auto checksum */ | ||
1875 | else { | ||
1876 | const unsigned offset = skb_transport_offset(skb); | ||
1877 | u32 tcpsum; | ||
1878 | |||
1879 | tcpsum = offset << 16; /* sum start */ | ||
1880 | tcpsum |= offset + skb->csum_offset; /* sum write */ | ||
1881 | |||
1882 | ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; | ||
1883 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) | ||
1884 | ctrl |= UDPTCP; | ||
1885 | |||
1886 | if (tcpsum != sky2->tx_tcpsum) { | ||
1887 | sky2->tx_tcpsum = tcpsum; | ||
1888 | |||
1889 | le = get_tx_le(sky2, &slot); | ||
1890 | le->addr = cpu_to_le32(tcpsum); | ||
1891 | le->length = 0; /* initial checksum value */ | ||
1892 | le->ctrl = 1; /* one packet */ | ||
1893 | le->opcode = OP_TCPLISW | HW_OWNER; | ||
1894 | } | ||
1895 | } | ||
1896 | } | ||
1897 | |||
1898 | re = sky2->tx_ring + slot; | ||
1899 | re->flags = TX_MAP_SINGLE; | ||
1900 | dma_unmap_addr_set(re, mapaddr, mapping); | ||
1901 | dma_unmap_len_set(re, maplen, len); | ||
1902 | |||
1903 | le = get_tx_le(sky2, &slot); | ||
1904 | le->addr = cpu_to_le32(lower_32_bits(mapping)); | ||
1905 | le->length = cpu_to_le16(len); | ||
1906 | le->ctrl = ctrl; | ||
1907 | le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER); | ||
1908 | |||
1909 | |||
1910 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
1911 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
1912 | |||
1913 | mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset, | ||
1914 | frag->size, PCI_DMA_TODEVICE); | ||
1915 | |||
1916 | if (pci_dma_mapping_error(hw->pdev, mapping)) | ||
1917 | goto mapping_unwind; | ||
1918 | |||
1919 | upper = upper_32_bits(mapping); | ||
1920 | if (upper != sky2->tx_last_upper) { | ||
1921 | le = get_tx_le(sky2, &slot); | ||
1922 | le->addr = cpu_to_le32(upper); | ||
1923 | sky2->tx_last_upper = upper; | ||
1924 | le->opcode = OP_ADDR64 | HW_OWNER; | ||
1925 | } | ||
1926 | |||
1927 | re = sky2->tx_ring + slot; | ||
1928 | re->flags = TX_MAP_PAGE; | ||
1929 | dma_unmap_addr_set(re, mapaddr, mapping); | ||
1930 | dma_unmap_len_set(re, maplen, frag->size); | ||
1931 | |||
1932 | le = get_tx_le(sky2, &slot); | ||
1933 | le->addr = cpu_to_le32(lower_32_bits(mapping)); | ||
1934 | le->length = cpu_to_le16(frag->size); | ||
1935 | le->ctrl = ctrl; | ||
1936 | le->opcode = OP_BUFFER | HW_OWNER; | ||
1937 | } | ||
1938 | |||
1939 | re->skb = skb; | ||
1940 | le->ctrl |= EOP; | ||
1941 | |||
1942 | sky2->tx_prod = slot; | ||
1943 | |||
1944 | if (tx_avail(sky2) <= MAX_SKB_TX_LE) | ||
1945 | netif_stop_queue(dev); | ||
1946 | |||
1947 | sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod); | ||
1948 | |||
1949 | return NETDEV_TX_OK; | ||
1950 | |||
1951 | mapping_unwind: | ||
1952 | for (i = sky2->tx_prod; i != slot; i = RING_NEXT(i, sky2->tx_ring_size)) { | ||
1953 | re = sky2->tx_ring + i; | ||
1954 | |||
1955 | sky2_tx_unmap(hw->pdev, re); | ||
1956 | } | ||
1957 | |||
1958 | mapping_error: | ||
1959 | if (net_ratelimit()) | ||
1960 | dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); | ||
1961 | dev_kfree_skb(skb); | ||
1962 | return NETDEV_TX_OK; | ||
1963 | } | ||
1964 | |||
1965 | /* | ||
1966 | * Free ring elements from starting at tx_cons until "done" | ||
1967 | * | ||
1968 | * NB: | ||
1969 | * 1. The hardware will tell us about partial completion of multi-part | ||
1970 | * buffers so make sure not to free skb to early. | ||
1971 | * 2. This may run in parallel start_xmit because the it only | ||
1972 | * looks at the tail of the queue of FIFO (tx_cons), not | ||
1973 | * the head (tx_prod) | ||
1974 | */ | ||
1975 | static void sky2_tx_complete(struct sky2_port *sky2, u16 done) | ||
1976 | { | ||
1977 | struct net_device *dev = sky2->netdev; | ||
1978 | unsigned idx; | ||
1979 | |||
1980 | BUG_ON(done >= sky2->tx_ring_size); | ||
1981 | |||
1982 | for (idx = sky2->tx_cons; idx != done; | ||
1983 | idx = RING_NEXT(idx, sky2->tx_ring_size)) { | ||
1984 | struct tx_ring_info *re = sky2->tx_ring + idx; | ||
1985 | struct sk_buff *skb = re->skb; | ||
1986 | |||
1987 | sky2_tx_unmap(sky2->hw->pdev, re); | ||
1988 | |||
1989 | if (skb) { | ||
1990 | netif_printk(sky2, tx_done, KERN_DEBUG, dev, | ||
1991 | "tx done %u\n", idx); | ||
1992 | |||
1993 | u64_stats_update_begin(&sky2->tx_stats.syncp); | ||
1994 | ++sky2->tx_stats.packets; | ||
1995 | sky2->tx_stats.bytes += skb->len; | ||
1996 | u64_stats_update_end(&sky2->tx_stats.syncp); | ||
1997 | |||
1998 | re->skb = NULL; | ||
1999 | dev_kfree_skb_any(skb); | ||
2000 | |||
2001 | sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); | ||
2002 | } | ||
2003 | } | ||
2004 | |||
2005 | sky2->tx_cons = idx; | ||
2006 | smp_mb(); | ||
2007 | } | ||
2008 | |||
2009 | static void sky2_tx_reset(struct sky2_hw *hw, unsigned port) | ||
2010 | { | ||
2011 | /* Disable Force Sync bit and Enable Alloc bit */ | ||
2012 | sky2_write8(hw, SK_REG(port, TXA_CTRL), | ||
2013 | TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); | ||
2014 | |||
2015 | /* Stop Interval Timer and Limit Counter of Tx Arbiter */ | ||
2016 | sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); | ||
2017 | sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); | ||
2018 | |||
2019 | /* Reset the PCI FIFO of the async Tx queue */ | ||
2020 | sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), | ||
2021 | BMU_RST_SET | BMU_FIFO_RST); | ||
2022 | |||
2023 | /* Reset the Tx prefetch units */ | ||
2024 | sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL), | ||
2025 | PREF_UNIT_RST_SET); | ||
2026 | |||
2027 | sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); | ||
2028 | sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); | ||
2029 | } | ||
2030 | |||
2031 | static void sky2_hw_down(struct sky2_port *sky2) | ||
2032 | { | ||
2033 | struct sky2_hw *hw = sky2->hw; | ||
2034 | unsigned port = sky2->port; | ||
2035 | u16 ctrl; | ||
2036 | |||
2037 | /* Force flow control off */ | ||
2038 | sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); | ||
2039 | |||
2040 | /* Stop transmitter */ | ||
2041 | sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP); | ||
2042 | sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR)); | ||
2043 | |||
2044 | sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), | ||
2045 | RB_RST_SET | RB_DIS_OP_MD); | ||
2046 | |||
2047 | ctrl = gma_read16(hw, port, GM_GP_CTRL); | ||
2048 | ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA); | ||
2049 | gma_write16(hw, port, GM_GP_CTRL, ctrl); | ||
2050 | |||
2051 | sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); | ||
2052 | |||
2053 | /* Workaround shared GMAC reset */ | ||
2054 | if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && | ||
2055 | port == 0 && hw->dev[1] && netif_running(hw->dev[1]))) | ||
2056 | sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); | ||
2057 | |||
2058 | sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); | ||
2059 | |||
2060 | /* Force any delayed status interrrupt and NAPI */ | ||
2061 | sky2_write32(hw, STAT_LEV_TIMER_CNT, 0); | ||
2062 | sky2_write32(hw, STAT_TX_TIMER_CNT, 0); | ||
2063 | sky2_write32(hw, STAT_ISR_TIMER_CNT, 0); | ||
2064 | sky2_read8(hw, STAT_ISR_TIMER_CTRL); | ||
2065 | |||
2066 | sky2_rx_stop(sky2); | ||
2067 | |||
2068 | spin_lock_bh(&sky2->phy_lock); | ||
2069 | sky2_phy_power_down(hw, port); | ||
2070 | spin_unlock_bh(&sky2->phy_lock); | ||
2071 | |||
2072 | sky2_tx_reset(hw, port); | ||
2073 | |||
2074 | /* Free any pending frames stuck in HW queue */ | ||
2075 | sky2_tx_complete(sky2, sky2->tx_prod); | ||
2076 | } | ||
2077 | |||
2078 | /* Network shutdown */ | ||
2079 | static int sky2_down(struct net_device *dev) | ||
2080 | { | ||
2081 | struct sky2_port *sky2 = netdev_priv(dev); | ||
2082 | struct sky2_hw *hw = sky2->hw; | ||
2083 | |||
2084 | /* Never really got started! */ | ||
2085 | if (!sky2->tx_le) | ||
2086 | return 0; | ||
2087 | |||
2088 | netif_info(sky2, ifdown, dev, "disabling interface\n"); | ||
2089 | |||
2090 | /* Disable port IRQ */ | ||
2091 | sky2_write32(hw, B0_IMSK, | ||
2092 | sky2_read32(hw, B0_IMSK) & ~portirq_msk[sky2->port]); | ||
2093 | sky2_read32(hw, B0_IMSK); | ||
2094 | |||
2095 | synchronize_irq(hw->pdev->irq); | ||
2096 | napi_synchronize(&hw->napi); | ||
2097 | |||
2098 | sky2_hw_down(sky2); | ||
2099 | |||
2100 | sky2_free_buffers(sky2); | ||
2101 | |||
2102 | return 0; | ||
2103 | } | ||
2104 | |||
2105 | static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux) | ||
2106 | { | ||
2107 | if (hw->flags & SKY2_HW_FIBRE_PHY) | ||
2108 | return SPEED_1000; | ||
2109 | |||
2110 | if (!(hw->flags & SKY2_HW_GIGABIT)) { | ||
2111 | if (aux & PHY_M_PS_SPEED_100) | ||
2112 | return SPEED_100; | ||
2113 | else | ||
2114 | return SPEED_10; | ||
2115 | } | ||
2116 | |||
2117 | switch (aux & PHY_M_PS_SPEED_MSK) { | ||
2118 | case PHY_M_PS_SPEED_1000: | ||
2119 | return SPEED_1000; | ||
2120 | case PHY_M_PS_SPEED_100: | ||
2121 | return SPEED_100; | ||
2122 | default: | ||
2123 | return SPEED_10; | ||
2124 | } | ||
2125 | } | ||
2126 | |||
2127 | static void sky2_link_up(struct sky2_port *sky2) | ||
2128 | { | ||
2129 | struct sky2_hw *hw = sky2->hw; | ||
2130 | unsigned port = sky2->port; | ||
2131 | static const char *fc_name[] = { | ||
2132 | [FC_NONE] = "none", | ||
2133 | [FC_TX] = "tx", | ||
2134 | [FC_RX] = "rx", | ||
2135 | [FC_BOTH] = "both", | ||
2136 | }; | ||
2137 | |||
2138 | sky2_set_ipg(sky2); | ||
2139 | |||
2140 | sky2_enable_rx_tx(sky2); | ||
2141 | |||
2142 | gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); | ||
2143 | |||
2144 | netif_carrier_on(sky2->netdev); | ||
2145 | |||
2146 | mod_timer(&hw->watchdog_timer, jiffies + 1); | ||
2147 | |||
2148 | /* Turn on link LED */ | ||
2149 | sky2_write8(hw, SK_REG(port, LNK_LED_REG), | ||
2150 | LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); | ||
2151 | |||
2152 | netif_info(sky2, link, sky2->netdev, | ||
2153 | "Link is up at %d Mbps, %s duplex, flow control %s\n", | ||
2154 | sky2->speed, | ||
2155 | sky2->duplex == DUPLEX_FULL ? "full" : "half", | ||
2156 | fc_name[sky2->flow_status]); | ||
2157 | } | ||
2158 | |||
2159 | static void sky2_link_down(struct sky2_port *sky2) | ||
2160 | { | ||
2161 | struct sky2_hw *hw = sky2->hw; | ||
2162 | unsigned port = sky2->port; | ||
2163 | u16 reg; | ||
2164 | |||
2165 | gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); | ||
2166 | |||
2167 | reg = gma_read16(hw, port, GM_GP_CTRL); | ||
2168 | reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); | ||
2169 | gma_write16(hw, port, GM_GP_CTRL, reg); | ||
2170 | |||
2171 | netif_carrier_off(sky2->netdev); | ||
2172 | |||
2173 | /* Turn off link LED */ | ||
2174 | sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); | ||
2175 | |||
2176 | netif_info(sky2, link, sky2->netdev, "Link is down\n"); | ||
2177 | |||
2178 | sky2_phy_init(hw, port); | ||
2179 | } | ||
2180 | |||
2181 | static enum flow_control sky2_flow(int rx, int tx) | ||
2182 | { | ||
2183 | if (rx) | ||
2184 | return tx ? FC_BOTH : FC_RX; | ||
2185 | else | ||
2186 | return tx ? FC_TX : FC_NONE; | ||
2187 | } | ||
2188 | |||
2189 | static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux) | ||
2190 | { | ||
2191 | struct sky2_hw *hw = sky2->hw; | ||
2192 | unsigned port = sky2->port; | ||
2193 | u16 advert, lpa; | ||
2194 | |||
2195 | advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); | ||
2196 | lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP); | ||
2197 | if (lpa & PHY_M_AN_RF) { | ||
2198 | netdev_err(sky2->netdev, "remote fault\n"); | ||
2199 | return -1; | ||
2200 | } | ||
2201 | |||
2202 | if (!(aux & PHY_M_PS_SPDUP_RES)) { | ||
2203 | netdev_err(sky2->netdev, "speed/duplex mismatch\n"); | ||
2204 | return -1; | ||
2205 | } | ||
2206 | |||
2207 | sky2->speed = sky2_phy_speed(hw, aux); | ||
2208 | sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; | ||
2209 | |||
2210 | /* Since the pause result bits seem to in different positions on | ||
2211 | * different chips. look at registers. | ||
2212 | */ | ||
2213 | if (hw->flags & SKY2_HW_FIBRE_PHY) { | ||
2214 | /* Shift for bits in fiber PHY */ | ||
2215 | advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM); | ||
2216 | lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM); | ||
2217 | |||
2218 | if (advert & ADVERTISE_1000XPAUSE) | ||
2219 | advert |= ADVERTISE_PAUSE_CAP; | ||
2220 | if (advert & ADVERTISE_1000XPSE_ASYM) | ||
2221 | advert |= ADVERTISE_PAUSE_ASYM; | ||
2222 | if (lpa & LPA_1000XPAUSE) | ||
2223 | lpa |= LPA_PAUSE_CAP; | ||
2224 | if (lpa & LPA_1000XPAUSE_ASYM) | ||
2225 | lpa |= LPA_PAUSE_ASYM; | ||
2226 | } | ||
2227 | |||
2228 | sky2->flow_status = FC_NONE; | ||
2229 | if (advert & ADVERTISE_PAUSE_CAP) { | ||
2230 | if (lpa & LPA_PAUSE_CAP) | ||
2231 | sky2->flow_status = FC_BOTH; | ||
2232 | else if (advert & ADVERTISE_PAUSE_ASYM) | ||
2233 | sky2->flow_status = FC_RX; | ||
2234 | } else if (advert & ADVERTISE_PAUSE_ASYM) { | ||
2235 | if ((lpa & LPA_PAUSE_CAP) && (lpa & LPA_PAUSE_ASYM)) | ||
2236 | sky2->flow_status = FC_TX; | ||
2237 | } | ||
2238 | |||
2239 | if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000 && | ||
2240 | !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX)) | ||
2241 | sky2->flow_status = FC_NONE; | ||
2242 | |||
2243 | if (sky2->flow_status & FC_TX) | ||
2244 | sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); | ||
2245 | else | ||
2246 | sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); | ||
2247 | |||
2248 | return 0; | ||
2249 | } | ||
2250 | |||
2251 | /* Interrupt from PHY */ | ||
2252 | static void sky2_phy_intr(struct sky2_hw *hw, unsigned port) | ||
2253 | { | ||
2254 | struct net_device *dev = hw->dev[port]; | ||
2255 | struct sky2_port *sky2 = netdev_priv(dev); | ||
2256 | u16 istatus, phystat; | ||
2257 | |||
2258 | if (!netif_running(dev)) | ||
2259 | return; | ||
2260 | |||
2261 | spin_lock(&sky2->phy_lock); | ||
2262 | istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); | ||
2263 | phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); | ||
2264 | |||
2265 | netif_info(sky2, intr, sky2->netdev, "phy interrupt status 0x%x 0x%x\n", | ||
2266 | istatus, phystat); | ||
2267 | |||
2268 | if (istatus & PHY_M_IS_AN_COMPL) { | ||
2269 | if (sky2_autoneg_done(sky2, phystat) == 0 && | ||
2270 | !netif_carrier_ok(dev)) | ||
2271 | sky2_link_up(sky2); | ||
2272 | goto out; | ||
2273 | } | ||
2274 | |||
2275 | if (istatus & PHY_M_IS_LSP_CHANGE) | ||
2276 | sky2->speed = sky2_phy_speed(hw, phystat); | ||
2277 | |||
2278 | if (istatus & PHY_M_IS_DUP_CHANGE) | ||
2279 | sky2->duplex = | ||
2280 | (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; | ||
2281 | |||
2282 | if (istatus & PHY_M_IS_LST_CHANGE) { | ||
2283 | if (phystat & PHY_M_PS_LINK_UP) | ||
2284 | sky2_link_up(sky2); | ||
2285 | else | ||
2286 | sky2_link_down(sky2); | ||
2287 | } | ||
2288 | out: | ||
2289 | spin_unlock(&sky2->phy_lock); | ||
2290 | } | ||
2291 | |||
2292 | /* Special quick link interrupt (Yukon-2 Optima only) */ | ||
2293 | static void sky2_qlink_intr(struct sky2_hw *hw) | ||
2294 | { | ||
2295 | struct sky2_port *sky2 = netdev_priv(hw->dev[0]); | ||
2296 | u32 imask; | ||
2297 | u16 phy; | ||
2298 | |||
2299 | /* disable irq */ | ||
2300 | imask = sky2_read32(hw, B0_IMSK); | ||
2301 | imask &= ~Y2_IS_PHY_QLNK; | ||
2302 | sky2_write32(hw, B0_IMSK, imask); | ||
2303 | |||
2304 | /* reset PHY Link Detect */ | ||
2305 | phy = sky2_pci_read16(hw, PSM_CONFIG_REG4); | ||
2306 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2307 | sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1); | ||
2308 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
2309 | |||
2310 | sky2_link_up(sky2); | ||
2311 | } | ||
2312 | |||
2313 | /* Transmit timeout is only called if we are running, carrier is up | ||
2314 | * and tx queue is full (stopped). | ||
2315 | */ | ||
2316 | static void sky2_tx_timeout(struct net_device *dev) | ||
2317 | { | ||
2318 | struct sky2_port *sky2 = netdev_priv(dev); | ||
2319 | struct sky2_hw *hw = sky2->hw; | ||
2320 | |||
2321 | netif_err(sky2, timer, dev, "tx timeout\n"); | ||
2322 | |||
2323 | netdev_printk(KERN_DEBUG, dev, "transmit ring %u .. %u report=%u done=%u\n", | ||
2324 | sky2->tx_cons, sky2->tx_prod, | ||
2325 | sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX), | ||
2326 | sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE))); | ||
2327 | |||
2328 | /* can't restart safely under softirq */ | ||
2329 | schedule_work(&hw->restart_work); | ||
2330 | } | ||
2331 | |||
2332 | static int sky2_change_mtu(struct net_device *dev, int new_mtu) | ||
2333 | { | ||
2334 | struct sky2_port *sky2 = netdev_priv(dev); | ||
2335 | struct sky2_hw *hw = sky2->hw; | ||
2336 | unsigned port = sky2->port; | ||
2337 | int err; | ||
2338 | u16 ctl, mode; | ||
2339 | u32 imask; | ||
2340 | |||
2341 | /* MTU size outside the spec */ | ||
2342 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) | ||
2343 | return -EINVAL; | ||
2344 | |||
2345 | /* MTU > 1500 on yukon FE and FE+ not allowed */ | ||
2346 | if (new_mtu > ETH_DATA_LEN && | ||
2347 | (hw->chip_id == CHIP_ID_YUKON_FE || | ||
2348 | hw->chip_id == CHIP_ID_YUKON_FE_P)) | ||
2349 | return -EINVAL; | ||
2350 | |||
2351 | if (!netif_running(dev)) { | ||
2352 | dev->mtu = new_mtu; | ||
2353 | netdev_update_features(dev); | ||
2354 | return 0; | ||
2355 | } | ||
2356 | |||
2357 | imask = sky2_read32(hw, B0_IMSK); | ||
2358 | sky2_write32(hw, B0_IMSK, 0); | ||
2359 | |||
2360 | dev->trans_start = jiffies; /* prevent tx timeout */ | ||
2361 | napi_disable(&hw->napi); | ||
2362 | netif_tx_disable(dev); | ||
2363 | |||
2364 | synchronize_irq(hw->pdev->irq); | ||
2365 | |||
2366 | if (!(hw->flags & SKY2_HW_RAM_BUFFER)) | ||
2367 | sky2_set_tx_stfwd(hw, port); | ||
2368 | |||
2369 | ctl = gma_read16(hw, port, GM_GP_CTRL); | ||
2370 | gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); | ||
2371 | sky2_rx_stop(sky2); | ||
2372 | sky2_rx_clean(sky2); | ||
2373 | |||
2374 | dev->mtu = new_mtu; | ||
2375 | netdev_update_features(dev); | ||
2376 | |||
2377 | mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA; | ||
2378 | if (sky2->speed > SPEED_100) | ||
2379 | mode |= IPG_DATA_VAL(IPG_DATA_DEF_1000); | ||
2380 | else | ||
2381 | mode |= IPG_DATA_VAL(IPG_DATA_DEF_10_100); | ||
2382 | |||
2383 | if (dev->mtu > ETH_DATA_LEN) | ||
2384 | mode |= GM_SMOD_JUMBO_ENA; | ||
2385 | |||
2386 | gma_write16(hw, port, GM_SERIAL_MODE, mode); | ||
2387 | |||
2388 | sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD); | ||
2389 | |||
2390 | err = sky2_alloc_rx_skbs(sky2); | ||
2391 | if (!err) | ||
2392 | sky2_rx_start(sky2); | ||
2393 | else | ||
2394 | sky2_rx_clean(sky2); | ||
2395 | sky2_write32(hw, B0_IMSK, imask); | ||
2396 | |||
2397 | sky2_read32(hw, B0_Y2_SP_LISR); | ||
2398 | napi_enable(&hw->napi); | ||
2399 | |||
2400 | if (err) | ||
2401 | dev_close(dev); | ||
2402 | else { | ||
2403 | gma_write16(hw, port, GM_GP_CTRL, ctl); | ||
2404 | |||
2405 | netif_wake_queue(dev); | ||
2406 | } | ||
2407 | |||
2408 | return err; | ||
2409 | } | ||
2410 | |||
2411 | /* For small just reuse existing skb for next receive */ | ||
2412 | static struct sk_buff *receive_copy(struct sky2_port *sky2, | ||
2413 | const struct rx_ring_info *re, | ||
2414 | unsigned length) | ||
2415 | { | ||
2416 | struct sk_buff *skb; | ||
2417 | |||
2418 | skb = netdev_alloc_skb_ip_align(sky2->netdev, length); | ||
2419 | if (likely(skb)) { | ||
2420 | pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr, | ||
2421 | length, PCI_DMA_FROMDEVICE); | ||
2422 | skb_copy_from_linear_data(re->skb, skb->data, length); | ||
2423 | skb->ip_summed = re->skb->ip_summed; | ||
2424 | skb->csum = re->skb->csum; | ||
2425 | pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, | ||
2426 | length, PCI_DMA_FROMDEVICE); | ||
2427 | re->skb->ip_summed = CHECKSUM_NONE; | ||
2428 | skb_put(skb, length); | ||
2429 | } | ||
2430 | return skb; | ||
2431 | } | ||
2432 | |||
2433 | /* Adjust length of skb with fragments to match received data */ | ||
2434 | static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, | ||
2435 | unsigned int length) | ||
2436 | { | ||
2437 | int i, num_frags; | ||
2438 | unsigned int size; | ||
2439 | |||
2440 | /* put header into skb */ | ||
2441 | size = min(length, hdr_space); | ||
2442 | skb->tail += size; | ||
2443 | skb->len += size; | ||
2444 | length -= size; | ||
2445 | |||
2446 | num_frags = skb_shinfo(skb)->nr_frags; | ||
2447 | for (i = 0; i < num_frags; i++) { | ||
2448 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
2449 | |||
2450 | if (length == 0) { | ||
2451 | /* don't need this page */ | ||
2452 | __free_page(frag->page); | ||
2453 | --skb_shinfo(skb)->nr_frags; | ||
2454 | } else { | ||
2455 | size = min(length, (unsigned) PAGE_SIZE); | ||
2456 | |||
2457 | frag->size = size; | ||
2458 | skb->data_len += size; | ||
2459 | skb->truesize += size; | ||
2460 | skb->len += size; | ||
2461 | length -= size; | ||
2462 | } | ||
2463 | } | ||
2464 | } | ||
2465 | |||
2466 | /* Normal packet - take skb from ring element and put in a new one */ | ||
2467 | static struct sk_buff *receive_new(struct sky2_port *sky2, | ||
2468 | struct rx_ring_info *re, | ||
2469 | unsigned int length) | ||
2470 | { | ||
2471 | struct sk_buff *skb; | ||
2472 | struct rx_ring_info nre; | ||
2473 | unsigned hdr_space = sky2->rx_data_size; | ||
2474 | |||
2475 | nre.skb = sky2_rx_alloc(sky2, GFP_ATOMIC); | ||
2476 | if (unlikely(!nre.skb)) | ||
2477 | goto nobuf; | ||
2478 | |||
2479 | if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space)) | ||
2480 | goto nomap; | ||
2481 | |||
2482 | skb = re->skb; | ||
2483 | sky2_rx_unmap_skb(sky2->hw->pdev, re); | ||
2484 | prefetch(skb->data); | ||
2485 | *re = nre; | ||
2486 | |||
2487 | if (skb_shinfo(skb)->nr_frags) | ||
2488 | skb_put_frags(skb, hdr_space, length); | ||
2489 | else | ||
2490 | skb_put(skb, length); | ||
2491 | return skb; | ||
2492 | |||
2493 | nomap: | ||
2494 | dev_kfree_skb(nre.skb); | ||
2495 | nobuf: | ||
2496 | return NULL; | ||
2497 | } | ||
2498 | |||
2499 | /* | ||
2500 | * Receive one packet. | ||
2501 | * For larger packets, get new buffer. | ||
2502 | */ | ||
2503 | static struct sk_buff *sky2_receive(struct net_device *dev, | ||
2504 | u16 length, u32 status) | ||
2505 | { | ||
2506 | struct sky2_port *sky2 = netdev_priv(dev); | ||
2507 | struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next; | ||
2508 | struct sk_buff *skb = NULL; | ||
2509 | u16 count = (status & GMR_FS_LEN) >> 16; | ||
2510 | |||
2511 | if (status & GMR_FS_VLAN) | ||
2512 | count -= VLAN_HLEN; /* Account for vlan tag */ | ||
2513 | |||
2514 | netif_printk(sky2, rx_status, KERN_DEBUG, dev, | ||
2515 | "rx slot %u status 0x%x len %d\n", | ||
2516 | sky2->rx_next, status, length); | ||
2517 | |||
2518 | sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; | ||
2519 | prefetch(sky2->rx_ring + sky2->rx_next); | ||
2520 | |||
2521 | /* This chip has hardware problems that generates bogus status. | ||
2522 | * So do only marginal checking and expect higher level protocols | ||
2523 | * to handle crap frames. | ||
2524 | */ | ||
2525 | if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && | ||
2526 | sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 && | ||
2527 | length != count) | ||
2528 | goto okay; | ||
2529 | |||
2530 | if (status & GMR_FS_ANY_ERR) | ||
2531 | goto error; | ||
2532 | |||
2533 | if (!(status & GMR_FS_RX_OK)) | ||
2534 | goto resubmit; | ||
2535 | |||
2536 | /* if length reported by DMA does not match PHY, packet was truncated */ | ||
2537 | if (length != count) | ||
2538 | goto error; | ||
2539 | |||
2540 | okay: | ||
2541 | if (length < copybreak) | ||
2542 | skb = receive_copy(sky2, re, length); | ||
2543 | else | ||
2544 | skb = receive_new(sky2, re, length); | ||
2545 | |||
2546 | dev->stats.rx_dropped += (skb == NULL); | ||
2547 | |||
2548 | resubmit: | ||
2549 | sky2_rx_submit(sky2, re); | ||
2550 | |||
2551 | return skb; | ||
2552 | |||
2553 | error: | ||
2554 | ++dev->stats.rx_errors; | ||
2555 | |||
2556 | if (net_ratelimit()) | ||
2557 | netif_info(sky2, rx_err, dev, | ||
2558 | "rx error, status 0x%x length %d\n", status, length); | ||
2559 | |||
2560 | goto resubmit; | ||
2561 | } | ||
2562 | |||
2563 | /* Transmit complete */ | ||
2564 | static inline void sky2_tx_done(struct net_device *dev, u16 last) | ||
2565 | { | ||
2566 | struct sky2_port *sky2 = netdev_priv(dev); | ||
2567 | |||
2568 | if (netif_running(dev)) { | ||
2569 | sky2_tx_complete(sky2, last); | ||
2570 | |||
2571 | /* Wake unless it's detached, and called e.g. from sky2_down() */ | ||
2572 | if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) | ||
2573 | netif_wake_queue(dev); | ||
2574 | } | ||
2575 | } | ||
2576 | |||
2577 | static inline void sky2_skb_rx(const struct sky2_port *sky2, | ||
2578 | u32 status, struct sk_buff *skb) | ||
2579 | { | ||
2580 | if (status & GMR_FS_VLAN) | ||
2581 | __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag)); | ||
2582 | |||
2583 | if (skb->ip_summed == CHECKSUM_NONE) | ||
2584 | netif_receive_skb(skb); | ||
2585 | else | ||
2586 | napi_gro_receive(&sky2->hw->napi, skb); | ||
2587 | } | ||
2588 | |||
2589 | static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port, | ||
2590 | unsigned packets, unsigned bytes) | ||
2591 | { | ||
2592 | struct net_device *dev = hw->dev[port]; | ||
2593 | struct sky2_port *sky2 = netdev_priv(dev); | ||
2594 | |||
2595 | if (packets == 0) | ||
2596 | return; | ||
2597 | |||
2598 | u64_stats_update_begin(&sky2->rx_stats.syncp); | ||
2599 | sky2->rx_stats.packets += packets; | ||
2600 | sky2->rx_stats.bytes += bytes; | ||
2601 | u64_stats_update_end(&sky2->rx_stats.syncp); | ||
2602 | |||
2603 | dev->last_rx = jiffies; | ||
2604 | sky2_rx_update(netdev_priv(dev), rxqaddr[port]); | ||
2605 | } | ||
2606 | |||
2607 | static void sky2_rx_checksum(struct sky2_port *sky2, u32 status) | ||
2608 | { | ||
2609 | /* If this happens then driver assuming wrong format for chip type */ | ||
2610 | BUG_ON(sky2->hw->flags & SKY2_HW_NEW_LE); | ||
2611 | |||
2612 | /* Both checksum counters are programmed to start at | ||
2613 | * the same offset, so unless there is a problem they | ||
2614 | * should match. This failure is an early indication that | ||
2615 | * hardware receive checksumming won't work. | ||
2616 | */ | ||
2617 | if (likely((u16)(status >> 16) == (u16)status)) { | ||
2618 | struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb; | ||
2619 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
2620 | skb->csum = le16_to_cpu(status); | ||
2621 | } else { | ||
2622 | dev_notice(&sky2->hw->pdev->dev, | ||
2623 | "%s: receive checksum problem (status = %#x)\n", | ||
2624 | sky2->netdev->name, status); | ||
2625 | |||
2626 | /* Disable checksum offload | ||
2627 | * It will be reenabled on next ndo_set_features, but if it's | ||
2628 | * really broken, will get disabled again | ||
2629 | */ | ||
2630 | sky2->netdev->features &= ~NETIF_F_RXCSUM; | ||
2631 | sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), | ||
2632 | BMU_DIS_RX_CHKSUM); | ||
2633 | } | ||
2634 | } | ||
2635 | |||
2636 | static void sky2_rx_hash(struct sky2_port *sky2, u32 status) | ||
2637 | { | ||
2638 | struct sk_buff *skb; | ||
2639 | |||
2640 | skb = sky2->rx_ring[sky2->rx_next].skb; | ||
2641 | skb->rxhash = le32_to_cpu(status); | ||
2642 | } | ||
2643 | |||
2644 | /* Process status response ring */ | ||
2645 | static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) | ||
2646 | { | ||
2647 | int work_done = 0; | ||
2648 | unsigned int total_bytes[2] = { 0 }; | ||
2649 | unsigned int total_packets[2] = { 0 }; | ||
2650 | |||
2651 | rmb(); | ||
2652 | do { | ||
2653 | struct sky2_port *sky2; | ||
2654 | struct sky2_status_le *le = hw->st_le + hw->st_idx; | ||
2655 | unsigned port; | ||
2656 | struct net_device *dev; | ||
2657 | struct sk_buff *skb; | ||
2658 | u32 status; | ||
2659 | u16 length; | ||
2660 | u8 opcode = le->opcode; | ||
2661 | |||
2662 | if (!(opcode & HW_OWNER)) | ||
2663 | break; | ||
2664 | |||
2665 | hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size); | ||
2666 | |||
2667 | port = le->css & CSS_LINK_BIT; | ||
2668 | dev = hw->dev[port]; | ||
2669 | sky2 = netdev_priv(dev); | ||
2670 | length = le16_to_cpu(le->length); | ||
2671 | status = le32_to_cpu(le->status); | ||
2672 | |||
2673 | le->opcode = 0; | ||
2674 | switch (opcode & ~HW_OWNER) { | ||
2675 | case OP_RXSTAT: | ||
2676 | total_packets[port]++; | ||
2677 | total_bytes[port] += length; | ||
2678 | |||
2679 | skb = sky2_receive(dev, length, status); | ||
2680 | if (!skb) | ||
2681 | break; | ||
2682 | |||
2683 | /* This chip reports checksum status differently */ | ||
2684 | if (hw->flags & SKY2_HW_NEW_LE) { | ||
2685 | if ((dev->features & NETIF_F_RXCSUM) && | ||
2686 | (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) && | ||
2687 | (le->css & CSS_TCPUDPCSOK)) | ||
2688 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
2689 | else | ||
2690 | skb->ip_summed = CHECKSUM_NONE; | ||
2691 | } | ||
2692 | |||
2693 | skb->protocol = eth_type_trans(skb, dev); | ||
2694 | |||
2695 | sky2_skb_rx(sky2, status, skb); | ||
2696 | |||
2697 | /* Stop after net poll weight */ | ||
2698 | if (++work_done >= to_do) | ||
2699 | goto exit_loop; | ||
2700 | break; | ||
2701 | |||
2702 | case OP_RXVLAN: | ||
2703 | sky2->rx_tag = length; | ||
2704 | break; | ||
2705 | |||
2706 | case OP_RXCHKSVLAN: | ||
2707 | sky2->rx_tag = length; | ||
2708 | /* fall through */ | ||
2709 | case OP_RXCHKS: | ||
2710 | if (likely(dev->features & NETIF_F_RXCSUM)) | ||
2711 | sky2_rx_checksum(sky2, status); | ||
2712 | break; | ||
2713 | |||
2714 | case OP_RSS_HASH: | ||
2715 | sky2_rx_hash(sky2, status); | ||
2716 | break; | ||
2717 | |||
2718 | case OP_TXINDEXLE: | ||
2719 | /* TX index reports status for both ports */ | ||
2720 | sky2_tx_done(hw->dev[0], status & 0xfff); | ||
2721 | if (hw->dev[1]) | ||
2722 | sky2_tx_done(hw->dev[1], | ||
2723 | ((status >> 24) & 0xff) | ||
2724 | | (u16)(length & 0xf) << 8); | ||
2725 | break; | ||
2726 | |||
2727 | default: | ||
2728 | if (net_ratelimit()) | ||
2729 | pr_warning("unknown status opcode 0x%x\n", opcode); | ||
2730 | } | ||
2731 | } while (hw->st_idx != idx); | ||
2732 | |||
2733 | /* Fully processed status ring so clear irq */ | ||
2734 | sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); | ||
2735 | |||
2736 | exit_loop: | ||
2737 | sky2_rx_done(hw, 0, total_packets[0], total_bytes[0]); | ||
2738 | sky2_rx_done(hw, 1, total_packets[1], total_bytes[1]); | ||
2739 | |||
2740 | return work_done; | ||
2741 | } | ||
2742 | |||
2743 | static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status) | ||
2744 | { | ||
2745 | struct net_device *dev = hw->dev[port]; | ||
2746 | |||
2747 | if (net_ratelimit()) | ||
2748 | netdev_info(dev, "hw error interrupt status 0x%x\n", status); | ||
2749 | |||
2750 | if (status & Y2_IS_PAR_RD1) { | ||
2751 | if (net_ratelimit()) | ||
2752 | netdev_err(dev, "ram data read parity error\n"); | ||
2753 | /* Clear IRQ */ | ||
2754 | sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR); | ||
2755 | } | ||
2756 | |||
2757 | if (status & Y2_IS_PAR_WR1) { | ||
2758 | if (net_ratelimit()) | ||
2759 | netdev_err(dev, "ram data write parity error\n"); | ||
2760 | |||
2761 | sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR); | ||
2762 | } | ||
2763 | |||
2764 | if (status & Y2_IS_PAR_MAC1) { | ||
2765 | if (net_ratelimit()) | ||
2766 | netdev_err(dev, "MAC parity error\n"); | ||
2767 | sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE); | ||
2768 | } | ||
2769 | |||
2770 | if (status & Y2_IS_PAR_RX1) { | ||
2771 | if (net_ratelimit()) | ||
2772 | netdev_err(dev, "RX parity error\n"); | ||
2773 | sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR); | ||
2774 | } | ||
2775 | |||
2776 | if (status & Y2_IS_TCP_TXA1) { | ||
2777 | if (net_ratelimit()) | ||
2778 | netdev_err(dev, "TCP segmentation error\n"); | ||
2779 | sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP); | ||
2780 | } | ||
2781 | } | ||
2782 | |||
2783 | static void sky2_hw_intr(struct sky2_hw *hw) | ||
2784 | { | ||
2785 | struct pci_dev *pdev = hw->pdev; | ||
2786 | u32 status = sky2_read32(hw, B0_HWE_ISRC); | ||
2787 | u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK); | ||
2788 | |||
2789 | status &= hwmsk; | ||
2790 | |||
2791 | if (status & Y2_IS_TIST_OV) | ||
2792 | sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); | ||
2793 | |||
2794 | if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { | ||
2795 | u16 pci_err; | ||
2796 | |||
2797 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2798 | pci_err = sky2_pci_read16(hw, PCI_STATUS); | ||
2799 | if (net_ratelimit()) | ||
2800 | dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", | ||
2801 | pci_err); | ||
2802 | |||
2803 | sky2_pci_write16(hw, PCI_STATUS, | ||
2804 | pci_err | PCI_STATUS_ERROR_BITS); | ||
2805 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
2806 | } | ||
2807 | |||
2808 | if (status & Y2_IS_PCI_EXP) { | ||
2809 | /* PCI-Express uncorrectable Error occurred */ | ||
2810 | u32 err; | ||
2811 | |||
2812 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2813 | err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); | ||
2814 | sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, | ||
2815 | 0xfffffffful); | ||
2816 | if (net_ratelimit()) | ||
2817 | dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); | ||
2818 | |||
2819 | sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); | ||
2820 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
2821 | } | ||
2822 | |||
2823 | if (status & Y2_HWE_L1_MASK) | ||
2824 | sky2_hw_error(hw, 0, status); | ||
2825 | status >>= 8; | ||
2826 | if (status & Y2_HWE_L1_MASK) | ||
2827 | sky2_hw_error(hw, 1, status); | ||
2828 | } | ||
2829 | |||
2830 | static void sky2_mac_intr(struct sky2_hw *hw, unsigned port) | ||
2831 | { | ||
2832 | struct net_device *dev = hw->dev[port]; | ||
2833 | struct sky2_port *sky2 = netdev_priv(dev); | ||
2834 | u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); | ||
2835 | |||
2836 | netif_info(sky2, intr, dev, "mac interrupt status 0x%x\n", status); | ||
2837 | |||
2838 | if (status & GM_IS_RX_CO_OV) | ||
2839 | gma_read16(hw, port, GM_RX_IRQ_SRC); | ||
2840 | |||
2841 | if (status & GM_IS_TX_CO_OV) | ||
2842 | gma_read16(hw, port, GM_TX_IRQ_SRC); | ||
2843 | |||
2844 | if (status & GM_IS_RX_FF_OR) { | ||
2845 | ++dev->stats.rx_fifo_errors; | ||
2846 | sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); | ||
2847 | } | ||
2848 | |||
2849 | if (status & GM_IS_TX_FF_UR) { | ||
2850 | ++dev->stats.tx_fifo_errors; | ||
2851 | sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); | ||
2852 | } | ||
2853 | } | ||
2854 | |||
2855 | /* This should never happen it is a bug. */ | ||
2856 | static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q) | ||
2857 | { | ||
2858 | struct net_device *dev = hw->dev[port]; | ||
2859 | u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); | ||
2860 | |||
2861 | dev_err(&hw->pdev->dev, "%s: descriptor error q=%#x get=%u put=%u\n", | ||
2862 | dev->name, (unsigned) q, (unsigned) idx, | ||
2863 | (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX))); | ||
2864 | |||
2865 | sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK); | ||
2866 | } | ||
2867 | |||
2868 | static int sky2_rx_hung(struct net_device *dev) | ||
2869 | { | ||
2870 | struct sky2_port *sky2 = netdev_priv(dev); | ||
2871 | struct sky2_hw *hw = sky2->hw; | ||
2872 | unsigned port = sky2->port; | ||
2873 | unsigned rxq = rxqaddr[port]; | ||
2874 | u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP)); | ||
2875 | u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV)); | ||
2876 | u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP)); | ||
2877 | u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL)); | ||
2878 | |||
2879 | /* If idle and MAC or PCI is stuck */ | ||
2880 | if (sky2->check.last == dev->last_rx && | ||
2881 | ((mac_rp == sky2->check.mac_rp && | ||
2882 | mac_lev != 0 && mac_lev >= sky2->check.mac_lev) || | ||
2883 | /* Check if the PCI RX hang */ | ||
2884 | (fifo_rp == sky2->check.fifo_rp && | ||
2885 | fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) { | ||
2886 | netdev_printk(KERN_DEBUG, dev, | ||
2887 | "hung mac %d:%d fifo %d (%d:%d)\n", | ||
2888 | mac_lev, mac_rp, fifo_lev, | ||
2889 | fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP))); | ||
2890 | return 1; | ||
2891 | } else { | ||
2892 | sky2->check.last = dev->last_rx; | ||
2893 | sky2->check.mac_rp = mac_rp; | ||
2894 | sky2->check.mac_lev = mac_lev; | ||
2895 | sky2->check.fifo_rp = fifo_rp; | ||
2896 | sky2->check.fifo_lev = fifo_lev; | ||
2897 | return 0; | ||
2898 | } | ||
2899 | } | ||
2900 | |||
2901 | static void sky2_watchdog(unsigned long arg) | ||
2902 | { | ||
2903 | struct sky2_hw *hw = (struct sky2_hw *) arg; | ||
2904 | |||
2905 | /* Check for lost IRQ once a second */ | ||
2906 | if (sky2_read32(hw, B0_ISRC)) { | ||
2907 | napi_schedule(&hw->napi); | ||
2908 | } else { | ||
2909 | int i, active = 0; | ||
2910 | |||
2911 | for (i = 0; i < hw->ports; i++) { | ||
2912 | struct net_device *dev = hw->dev[i]; | ||
2913 | if (!netif_running(dev)) | ||
2914 | continue; | ||
2915 | ++active; | ||
2916 | |||
2917 | /* For chips with Rx FIFO, check if stuck */ | ||
2918 | if ((hw->flags & SKY2_HW_RAM_BUFFER) && | ||
2919 | sky2_rx_hung(dev)) { | ||
2920 | netdev_info(dev, "receiver hang detected\n"); | ||
2921 | schedule_work(&hw->restart_work); | ||
2922 | return; | ||
2923 | } | ||
2924 | } | ||
2925 | |||
2926 | if (active == 0) | ||
2927 | return; | ||
2928 | } | ||
2929 | |||
2930 | mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ)); | ||
2931 | } | ||
2932 | |||
2933 | /* Hardware/software error handling */ | ||
2934 | static void sky2_err_intr(struct sky2_hw *hw, u32 status) | ||
2935 | { | ||
2936 | if (net_ratelimit()) | ||
2937 | dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status); | ||
2938 | |||
2939 | if (status & Y2_IS_HW_ERR) | ||
2940 | sky2_hw_intr(hw); | ||
2941 | |||
2942 | if (status & Y2_IS_IRQ_MAC1) | ||
2943 | sky2_mac_intr(hw, 0); | ||
2944 | |||
2945 | if (status & Y2_IS_IRQ_MAC2) | ||
2946 | sky2_mac_intr(hw, 1); | ||
2947 | |||
2948 | if (status & Y2_IS_CHK_RX1) | ||
2949 | sky2_le_error(hw, 0, Q_R1); | ||
2950 | |||
2951 | if (status & Y2_IS_CHK_RX2) | ||
2952 | sky2_le_error(hw, 1, Q_R2); | ||
2953 | |||
2954 | if (status & Y2_IS_CHK_TXA1) | ||
2955 | sky2_le_error(hw, 0, Q_XA1); | ||
2956 | |||
2957 | if (status & Y2_IS_CHK_TXA2) | ||
2958 | sky2_le_error(hw, 1, Q_XA2); | ||
2959 | } | ||
2960 | |||
2961 | static int sky2_poll(struct napi_struct *napi, int work_limit) | ||
2962 | { | ||
2963 | struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi); | ||
2964 | u32 status = sky2_read32(hw, B0_Y2_SP_EISR); | ||
2965 | int work_done = 0; | ||
2966 | u16 idx; | ||
2967 | |||
2968 | if (unlikely(status & Y2_IS_ERROR)) | ||
2969 | sky2_err_intr(hw, status); | ||
2970 | |||
2971 | if (status & Y2_IS_IRQ_PHY1) | ||
2972 | sky2_phy_intr(hw, 0); | ||
2973 | |||
2974 | if (status & Y2_IS_IRQ_PHY2) | ||
2975 | sky2_phy_intr(hw, 1); | ||
2976 | |||
2977 | if (status & Y2_IS_PHY_QLNK) | ||
2978 | sky2_qlink_intr(hw); | ||
2979 | |||
2980 | while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) { | ||
2981 | work_done += sky2_status_intr(hw, work_limit - work_done, idx); | ||
2982 | |||
2983 | if (work_done >= work_limit) | ||
2984 | goto done; | ||
2985 | } | ||
2986 | |||
2987 | napi_complete(napi); | ||
2988 | sky2_read32(hw, B0_Y2_SP_LISR); | ||
2989 | done: | ||
2990 | |||
2991 | return work_done; | ||
2992 | } | ||
2993 | |||
2994 | static irqreturn_t sky2_intr(int irq, void *dev_id) | ||
2995 | { | ||
2996 | struct sky2_hw *hw = dev_id; | ||
2997 | u32 status; | ||
2998 | |||
2999 | /* Reading this mask interrupts as side effect */ | ||
3000 | status = sky2_read32(hw, B0_Y2_SP_ISRC2); | ||
3001 | if (status == 0 || status == ~0) | ||
3002 | return IRQ_NONE; | ||
3003 | |||
3004 | prefetch(&hw->st_le[hw->st_idx]); | ||
3005 | |||
3006 | napi_schedule(&hw->napi); | ||
3007 | |||
3008 | return IRQ_HANDLED; | ||
3009 | } | ||
3010 | |||
3011 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
3012 | static void sky2_netpoll(struct net_device *dev) | ||
3013 | { | ||
3014 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3015 | |||
3016 | napi_schedule(&sky2->hw->napi); | ||
3017 | } | ||
3018 | #endif | ||
3019 | |||
3020 | /* Chip internal frequency for clock calculations */ | ||
3021 | static u32 sky2_mhz(const struct sky2_hw *hw) | ||
3022 | { | ||
3023 | switch (hw->chip_id) { | ||
3024 | case CHIP_ID_YUKON_EC: | ||
3025 | case CHIP_ID_YUKON_EC_U: | ||
3026 | case CHIP_ID_YUKON_EX: | ||
3027 | case CHIP_ID_YUKON_SUPR: | ||
3028 | case CHIP_ID_YUKON_UL_2: | ||
3029 | case CHIP_ID_YUKON_OPT: | ||
3030 | case CHIP_ID_YUKON_PRM: | ||
3031 | case CHIP_ID_YUKON_OP_2: | ||
3032 | return 125; | ||
3033 | |||
3034 | case CHIP_ID_YUKON_FE: | ||
3035 | return 100; | ||
3036 | |||
3037 | case CHIP_ID_YUKON_FE_P: | ||
3038 | return 50; | ||
3039 | |||
3040 | case CHIP_ID_YUKON_XL: | ||
3041 | return 156; | ||
3042 | |||
3043 | default: | ||
3044 | BUG(); | ||
3045 | } | ||
3046 | } | ||
3047 | |||
3048 | static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us) | ||
3049 | { | ||
3050 | return sky2_mhz(hw) * us; | ||
3051 | } | ||
3052 | |||
3053 | static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk) | ||
3054 | { | ||
3055 | return clk / sky2_mhz(hw); | ||
3056 | } | ||
3057 | |||
3058 | |||
3059 | static int __devinit sky2_init(struct sky2_hw *hw) | ||
3060 | { | ||
3061 | u8 t8; | ||
3062 | |||
3063 | /* Enable all clocks and check for bad PCI access */ | ||
3064 | sky2_pci_write32(hw, PCI_DEV_REG3, 0); | ||
3065 | |||
3066 | sky2_write8(hw, B0_CTST, CS_RST_CLR); | ||
3067 | |||
3068 | hw->chip_id = sky2_read8(hw, B2_CHIP_ID); | ||
3069 | hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; | ||
3070 | |||
3071 | switch (hw->chip_id) { | ||
3072 | case CHIP_ID_YUKON_XL: | ||
3073 | hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY; | ||
3074 | if (hw->chip_rev < CHIP_REV_YU_XL_A2) | ||
3075 | hw->flags |= SKY2_HW_RSS_BROKEN; | ||
3076 | break; | ||
3077 | |||
3078 | case CHIP_ID_YUKON_EC_U: | ||
3079 | hw->flags = SKY2_HW_GIGABIT | ||
3080 | | SKY2_HW_NEWER_PHY | ||
3081 | | SKY2_HW_ADV_POWER_CTL; | ||
3082 | break; | ||
3083 | |||
3084 | case CHIP_ID_YUKON_EX: | ||
3085 | hw->flags = SKY2_HW_GIGABIT | ||
3086 | | SKY2_HW_NEWER_PHY | ||
3087 | | SKY2_HW_NEW_LE | ||
3088 | | SKY2_HW_ADV_POWER_CTL | ||
3089 | | SKY2_HW_RSS_CHKSUM; | ||
3090 | |||
3091 | /* New transmit checksum */ | ||
3092 | if (hw->chip_rev != CHIP_REV_YU_EX_B0) | ||
3093 | hw->flags |= SKY2_HW_AUTO_TX_SUM; | ||
3094 | break; | ||
3095 | |||
3096 | case CHIP_ID_YUKON_EC: | ||
3097 | /* This rev is really old, and requires untested workarounds */ | ||
3098 | if (hw->chip_rev == CHIP_REV_YU_EC_A1) { | ||
3099 | dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n"); | ||
3100 | return -EOPNOTSUPP; | ||
3101 | } | ||
3102 | hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN; | ||
3103 | break; | ||
3104 | |||
3105 | case CHIP_ID_YUKON_FE: | ||
3106 | hw->flags = SKY2_HW_RSS_BROKEN; | ||
3107 | break; | ||
3108 | |||
3109 | case CHIP_ID_YUKON_FE_P: | ||
3110 | hw->flags = SKY2_HW_NEWER_PHY | ||
3111 | | SKY2_HW_NEW_LE | ||
3112 | | SKY2_HW_AUTO_TX_SUM | ||
3113 | | SKY2_HW_ADV_POWER_CTL; | ||
3114 | |||
3115 | /* The workaround for status conflicts VLAN tag detection. */ | ||
3116 | if (hw->chip_rev == CHIP_REV_YU_FE2_A0) | ||
3117 | hw->flags |= SKY2_HW_VLAN_BROKEN | SKY2_HW_RSS_CHKSUM; | ||
3118 | break; | ||
3119 | |||
3120 | case CHIP_ID_YUKON_SUPR: | ||
3121 | hw->flags = SKY2_HW_GIGABIT | ||
3122 | | SKY2_HW_NEWER_PHY | ||
3123 | | SKY2_HW_NEW_LE | ||
3124 | | SKY2_HW_AUTO_TX_SUM | ||
3125 | | SKY2_HW_ADV_POWER_CTL; | ||
3126 | |||
3127 | if (hw->chip_rev == CHIP_REV_YU_SU_A0) | ||
3128 | hw->flags |= SKY2_HW_RSS_CHKSUM; | ||
3129 | break; | ||
3130 | |||
3131 | case CHIP_ID_YUKON_UL_2: | ||
3132 | hw->flags = SKY2_HW_GIGABIT | ||
3133 | | SKY2_HW_ADV_POWER_CTL; | ||
3134 | break; | ||
3135 | |||
3136 | case CHIP_ID_YUKON_OPT: | ||
3137 | case CHIP_ID_YUKON_PRM: | ||
3138 | case CHIP_ID_YUKON_OP_2: | ||
3139 | hw->flags = SKY2_HW_GIGABIT | ||
3140 | | SKY2_HW_NEW_LE | ||
3141 | | SKY2_HW_ADV_POWER_CTL; | ||
3142 | break; | ||
3143 | |||
3144 | default: | ||
3145 | dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", | ||
3146 | hw->chip_id); | ||
3147 | return -EOPNOTSUPP; | ||
3148 | } | ||
3149 | |||
3150 | hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); | ||
3151 | if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') | ||
3152 | hw->flags |= SKY2_HW_FIBRE_PHY; | ||
3153 | |||
3154 | hw->ports = 1; | ||
3155 | t8 = sky2_read8(hw, B2_Y2_HW_RES); | ||
3156 | if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) { | ||
3157 | if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) | ||
3158 | ++hw->ports; | ||
3159 | } | ||
3160 | |||
3161 | if (sky2_read8(hw, B2_E_0)) | ||
3162 | hw->flags |= SKY2_HW_RAM_BUFFER; | ||
3163 | |||
3164 | return 0; | ||
3165 | } | ||
3166 | |||
3167 | static void sky2_reset(struct sky2_hw *hw) | ||
3168 | { | ||
3169 | struct pci_dev *pdev = hw->pdev; | ||
3170 | u16 status; | ||
3171 | int i; | ||
3172 | u32 hwe_mask = Y2_HWE_ALL_MASK; | ||
3173 | |||
3174 | /* disable ASF */ | ||
3175 | if (hw->chip_id == CHIP_ID_YUKON_EX | ||
3176 | || hw->chip_id == CHIP_ID_YUKON_SUPR) { | ||
3177 | sky2_write32(hw, CPU_WDOG, 0); | ||
3178 | status = sky2_read16(hw, HCU_CCSR); | ||
3179 | status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE | | ||
3180 | HCU_CCSR_UC_STATE_MSK); | ||
3181 | /* | ||
3182 | * CPU clock divider shouldn't be used because | ||
3183 | * - ASF firmware may malfunction | ||
3184 | * - Yukon-Supreme: Parallel FLASH doesn't support divided clocks | ||
3185 | */ | ||
3186 | status &= ~HCU_CCSR_CPU_CLK_DIVIDE_MSK; | ||
3187 | sky2_write16(hw, HCU_CCSR, status); | ||
3188 | sky2_write32(hw, CPU_WDOG, 0); | ||
3189 | } else | ||
3190 | sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); | ||
3191 | sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE); | ||
3192 | |||
3193 | /* do a SW reset */ | ||
3194 | sky2_write8(hw, B0_CTST, CS_RST_SET); | ||
3195 | sky2_write8(hw, B0_CTST, CS_RST_CLR); | ||
3196 | |||
3197 | /* allow writes to PCI config */ | ||
3198 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
3199 | |||
3200 | /* clear PCI errors, if any */ | ||
3201 | status = sky2_pci_read16(hw, PCI_STATUS); | ||
3202 | status |= PCI_STATUS_ERROR_BITS; | ||
3203 | sky2_pci_write16(hw, PCI_STATUS, status); | ||
3204 | |||
3205 | sky2_write8(hw, B0_CTST, CS_MRST_CLR); | ||
3206 | |||
3207 | if (pci_is_pcie(pdev)) { | ||
3208 | sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, | ||
3209 | 0xfffffffful); | ||
3210 | |||
3211 | /* If error bit is stuck on ignore it */ | ||
3212 | if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP) | ||
3213 | dev_info(&pdev->dev, "ignoring stuck error report bit\n"); | ||
3214 | else | ||
3215 | hwe_mask |= Y2_IS_PCI_EXP; | ||
3216 | } | ||
3217 | |||
3218 | sky2_power_on(hw); | ||
3219 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
3220 | |||
3221 | for (i = 0; i < hw->ports; i++) { | ||
3222 | sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); | ||
3223 | sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); | ||
3224 | |||
3225 | if (hw->chip_id == CHIP_ID_YUKON_EX || | ||
3226 | hw->chip_id == CHIP_ID_YUKON_SUPR) | ||
3227 | sky2_write16(hw, SK_REG(i, GMAC_CTRL), | ||
3228 | GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | ||
3229 | | GMC_BYP_RETR_ON); | ||
3230 | |||
3231 | } | ||
3232 | |||
3233 | if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev > CHIP_REV_YU_SU_B0) { | ||
3234 | /* enable MACSec clock gating */ | ||
3235 | sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS); | ||
3236 | } | ||
3237 | |||
3238 | if (hw->chip_id == CHIP_ID_YUKON_OPT || | ||
3239 | hw->chip_id == CHIP_ID_YUKON_PRM || | ||
3240 | hw->chip_id == CHIP_ID_YUKON_OP_2) { | ||
3241 | u16 reg; | ||
3242 | u32 msk; | ||
3243 | |||
3244 | if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) { | ||
3245 | /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */ | ||
3246 | sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7)); | ||
3247 | |||
3248 | /* set PHY Link Detect Timer to 1.1 second (11x 100ms) */ | ||
3249 | reg = 10; | ||
3250 | |||
3251 | /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ | ||
3252 | sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); | ||
3253 | } else { | ||
3254 | /* set PHY Link Detect Timer to 0.4 second (4x 100ms) */ | ||
3255 | reg = 3; | ||
3256 | } | ||
3257 | |||
3258 | reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE; | ||
3259 | reg |= PSM_CONFIG_REG4_RST_PHY_LINK_DETECT; | ||
3260 | |||
3261 | /* reset PHY Link Detect */ | ||
3262 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
3263 | sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); | ||
3264 | |||
3265 | /* enable PHY Quick Link */ | ||
3266 | msk = sky2_read32(hw, B0_IMSK); | ||
3267 | msk |= Y2_IS_PHY_QLNK; | ||
3268 | sky2_write32(hw, B0_IMSK, msk); | ||
3269 | |||
3270 | /* check if PSMv2 was running before */ | ||
3271 | reg = sky2_pci_read16(hw, PSM_CONFIG_REG3); | ||
3272 | if (reg & PCI_EXP_LNKCTL_ASPMC) | ||
3273 | /* restore the PCIe Link Control register */ | ||
3274 | sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL, | ||
3275 | reg); | ||
3276 | |||
3277 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
3278 | |||
3279 | /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ | ||
3280 | sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); | ||
3281 | } | ||
3282 | |||
3283 | /* Clear I2C IRQ noise */ | ||
3284 | sky2_write32(hw, B2_I2C_IRQ, 1); | ||
3285 | |||
3286 | /* turn off hardware timer (unused) */ | ||
3287 | sky2_write8(hw, B2_TI_CTRL, TIM_STOP); | ||
3288 | sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); | ||
3289 | |||
3290 | /* Turn off descriptor polling */ | ||
3291 | sky2_write32(hw, B28_DPT_CTRL, DPT_STOP); | ||
3292 | |||
3293 | /* Turn off receive timestamp */ | ||
3294 | sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP); | ||
3295 | sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); | ||
3296 | |||
3297 | /* enable the Tx Arbiters */ | ||
3298 | for (i = 0; i < hw->ports; i++) | ||
3299 | sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); | ||
3300 | |||
3301 | /* Initialize ram interface */ | ||
3302 | for (i = 0; i < hw->ports; i++) { | ||
3303 | sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); | ||
3304 | |||
3305 | sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53); | ||
3306 | sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53); | ||
3307 | sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53); | ||
3308 | sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53); | ||
3309 | sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53); | ||
3310 | sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53); | ||
3311 | sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53); | ||
3312 | sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53); | ||
3313 | sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53); | ||
3314 | sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53); | ||
3315 | sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53); | ||
3316 | sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53); | ||
3317 | } | ||
3318 | |||
3319 | sky2_write32(hw, B0_HWE_IMSK, hwe_mask); | ||
3320 | |||
3321 | for (i = 0; i < hw->ports; i++) | ||
3322 | sky2_gmac_reset(hw, i); | ||
3323 | |||
3324 | memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le)); | ||
3325 | hw->st_idx = 0; | ||
3326 | |||
3327 | sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET); | ||
3328 | sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR); | ||
3329 | |||
3330 | sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma); | ||
3331 | sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32); | ||
3332 | |||
3333 | /* Set the list last index */ | ||
3334 | sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1); | ||
3335 | |||
3336 | sky2_write16(hw, STAT_TX_IDX_TH, 10); | ||
3337 | sky2_write8(hw, STAT_FIFO_WM, 16); | ||
3338 | |||
3339 | /* set Status-FIFO ISR watermark */ | ||
3340 | if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0) | ||
3341 | sky2_write8(hw, STAT_FIFO_ISR_WM, 4); | ||
3342 | else | ||
3343 | sky2_write8(hw, STAT_FIFO_ISR_WM, 16); | ||
3344 | |||
3345 | sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); | ||
3346 | sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20)); | ||
3347 | sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100)); | ||
3348 | |||
3349 | /* enable status unit */ | ||
3350 | sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON); | ||
3351 | |||
3352 | sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); | ||
3353 | sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); | ||
3354 | sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); | ||
3355 | } | ||
3356 | |||
3357 | /* Take device down (offline). | ||
3358 | * Equivalent to doing dev_stop() but this does not | ||
3359 | * inform upper layers of the transition. | ||
3360 | */ | ||
3361 | static void sky2_detach(struct net_device *dev) | ||
3362 | { | ||
3363 | if (netif_running(dev)) { | ||
3364 | netif_tx_lock(dev); | ||
3365 | netif_device_detach(dev); /* stop txq */ | ||
3366 | netif_tx_unlock(dev); | ||
3367 | sky2_down(dev); | ||
3368 | } | ||
3369 | } | ||
3370 | |||
3371 | /* Bring device back after doing sky2_detach */ | ||
3372 | static int sky2_reattach(struct net_device *dev) | ||
3373 | { | ||
3374 | int err = 0; | ||
3375 | |||
3376 | if (netif_running(dev)) { | ||
3377 | err = sky2_up(dev); | ||
3378 | if (err) { | ||
3379 | netdev_info(dev, "could not restart %d\n", err); | ||
3380 | dev_close(dev); | ||
3381 | } else { | ||
3382 | netif_device_attach(dev); | ||
3383 | sky2_set_multicast(dev); | ||
3384 | } | ||
3385 | } | ||
3386 | |||
3387 | return err; | ||
3388 | } | ||
3389 | |||
3390 | static void sky2_all_down(struct sky2_hw *hw) | ||
3391 | { | ||
3392 | int i; | ||
3393 | |||
3394 | sky2_read32(hw, B0_IMSK); | ||
3395 | sky2_write32(hw, B0_IMSK, 0); | ||
3396 | synchronize_irq(hw->pdev->irq); | ||
3397 | napi_disable(&hw->napi); | ||
3398 | |||
3399 | for (i = 0; i < hw->ports; i++) { | ||
3400 | struct net_device *dev = hw->dev[i]; | ||
3401 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3402 | |||
3403 | if (!netif_running(dev)) | ||
3404 | continue; | ||
3405 | |||
3406 | netif_carrier_off(dev); | ||
3407 | netif_tx_disable(dev); | ||
3408 | sky2_hw_down(sky2); | ||
3409 | } | ||
3410 | } | ||
3411 | |||
3412 | static void sky2_all_up(struct sky2_hw *hw) | ||
3413 | { | ||
3414 | u32 imask = Y2_IS_BASE; | ||
3415 | int i; | ||
3416 | |||
3417 | for (i = 0; i < hw->ports; i++) { | ||
3418 | struct net_device *dev = hw->dev[i]; | ||
3419 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3420 | |||
3421 | if (!netif_running(dev)) | ||
3422 | continue; | ||
3423 | |||
3424 | sky2_hw_up(sky2); | ||
3425 | sky2_set_multicast(dev); | ||
3426 | imask |= portirq_msk[i]; | ||
3427 | netif_wake_queue(dev); | ||
3428 | } | ||
3429 | |||
3430 | sky2_write32(hw, B0_IMSK, imask); | ||
3431 | sky2_read32(hw, B0_IMSK); | ||
3432 | |||
3433 | sky2_read32(hw, B0_Y2_SP_LISR); | ||
3434 | napi_enable(&hw->napi); | ||
3435 | } | ||
3436 | |||
3437 | static void sky2_restart(struct work_struct *work) | ||
3438 | { | ||
3439 | struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work); | ||
3440 | |||
3441 | rtnl_lock(); | ||
3442 | |||
3443 | sky2_all_down(hw); | ||
3444 | sky2_reset(hw); | ||
3445 | sky2_all_up(hw); | ||
3446 | |||
3447 | rtnl_unlock(); | ||
3448 | } | ||
3449 | |||
3450 | static inline u8 sky2_wol_supported(const struct sky2_hw *hw) | ||
3451 | { | ||
3452 | return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0; | ||
3453 | } | ||
3454 | |||
3455 | static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
3456 | { | ||
3457 | const struct sky2_port *sky2 = netdev_priv(dev); | ||
3458 | |||
3459 | wol->supported = sky2_wol_supported(sky2->hw); | ||
3460 | wol->wolopts = sky2->wol; | ||
3461 | } | ||
3462 | |||
3463 | static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
3464 | { | ||
3465 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3466 | struct sky2_hw *hw = sky2->hw; | ||
3467 | bool enable_wakeup = false; | ||
3468 | int i; | ||
3469 | |||
3470 | if ((wol->wolopts & ~sky2_wol_supported(sky2->hw)) || | ||
3471 | !device_can_wakeup(&hw->pdev->dev)) | ||
3472 | return -EOPNOTSUPP; | ||
3473 | |||
3474 | sky2->wol = wol->wolopts; | ||
3475 | |||
3476 | for (i = 0; i < hw->ports; i++) { | ||
3477 | struct net_device *dev = hw->dev[i]; | ||
3478 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3479 | |||
3480 | if (sky2->wol) | ||
3481 | enable_wakeup = true; | ||
3482 | } | ||
3483 | device_set_wakeup_enable(&hw->pdev->dev, enable_wakeup); | ||
3484 | |||
3485 | return 0; | ||
3486 | } | ||
3487 | |||
3488 | static u32 sky2_supported_modes(const struct sky2_hw *hw) | ||
3489 | { | ||
3490 | if (sky2_is_copper(hw)) { | ||
3491 | u32 modes = SUPPORTED_10baseT_Half | ||
3492 | | SUPPORTED_10baseT_Full | ||
3493 | | SUPPORTED_100baseT_Half | ||
3494 | | SUPPORTED_100baseT_Full; | ||
3495 | |||
3496 | if (hw->flags & SKY2_HW_GIGABIT) | ||
3497 | modes |= SUPPORTED_1000baseT_Half | ||
3498 | | SUPPORTED_1000baseT_Full; | ||
3499 | return modes; | ||
3500 | } else | ||
3501 | return SUPPORTED_1000baseT_Half | ||
3502 | | SUPPORTED_1000baseT_Full; | ||
3503 | } | ||
3504 | |||
3505 | static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | ||
3506 | { | ||
3507 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3508 | struct sky2_hw *hw = sky2->hw; | ||
3509 | |||
3510 | ecmd->transceiver = XCVR_INTERNAL; | ||
3511 | ecmd->supported = sky2_supported_modes(hw); | ||
3512 | ecmd->phy_address = PHY_ADDR_MARV; | ||
3513 | if (sky2_is_copper(hw)) { | ||
3514 | ecmd->port = PORT_TP; | ||
3515 | ethtool_cmd_speed_set(ecmd, sky2->speed); | ||
3516 | ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP; | ||
3517 | } else { | ||
3518 | ethtool_cmd_speed_set(ecmd, SPEED_1000); | ||
3519 | ecmd->port = PORT_FIBRE; | ||
3520 | ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE; | ||
3521 | } | ||
3522 | |||
3523 | ecmd->advertising = sky2->advertising; | ||
3524 | ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED) | ||
3525 | ? AUTONEG_ENABLE : AUTONEG_DISABLE; | ||
3526 | ecmd->duplex = sky2->duplex; | ||
3527 | return 0; | ||
3528 | } | ||
3529 | |||
3530 | static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | ||
3531 | { | ||
3532 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3533 | const struct sky2_hw *hw = sky2->hw; | ||
3534 | u32 supported = sky2_supported_modes(hw); | ||
3535 | |||
3536 | if (ecmd->autoneg == AUTONEG_ENABLE) { | ||
3537 | if (ecmd->advertising & ~supported) | ||
3538 | return -EINVAL; | ||
3539 | |||
3540 | if (sky2_is_copper(hw)) | ||
3541 | sky2->advertising = ecmd->advertising | | ||
3542 | ADVERTISED_TP | | ||
3543 | ADVERTISED_Autoneg; | ||
3544 | else | ||
3545 | sky2->advertising = ecmd->advertising | | ||
3546 | ADVERTISED_FIBRE | | ||
3547 | ADVERTISED_Autoneg; | ||
3548 | |||
3549 | sky2->flags |= SKY2_FLAG_AUTO_SPEED; | ||
3550 | sky2->duplex = -1; | ||
3551 | sky2->speed = -1; | ||
3552 | } else { | ||
3553 | u32 setting; | ||
3554 | u32 speed = ethtool_cmd_speed(ecmd); | ||
3555 | |||
3556 | switch (speed) { | ||
3557 | case SPEED_1000: | ||
3558 | if (ecmd->duplex == DUPLEX_FULL) | ||
3559 | setting = SUPPORTED_1000baseT_Full; | ||
3560 | else if (ecmd->duplex == DUPLEX_HALF) | ||
3561 | setting = SUPPORTED_1000baseT_Half; | ||
3562 | else | ||
3563 | return -EINVAL; | ||
3564 | break; | ||
3565 | case SPEED_100: | ||
3566 | if (ecmd->duplex == DUPLEX_FULL) | ||
3567 | setting = SUPPORTED_100baseT_Full; | ||
3568 | else if (ecmd->duplex == DUPLEX_HALF) | ||
3569 | setting = SUPPORTED_100baseT_Half; | ||
3570 | else | ||
3571 | return -EINVAL; | ||
3572 | break; | ||
3573 | |||
3574 | case SPEED_10: | ||
3575 | if (ecmd->duplex == DUPLEX_FULL) | ||
3576 | setting = SUPPORTED_10baseT_Full; | ||
3577 | else if (ecmd->duplex == DUPLEX_HALF) | ||
3578 | setting = SUPPORTED_10baseT_Half; | ||
3579 | else | ||
3580 | return -EINVAL; | ||
3581 | break; | ||
3582 | default: | ||
3583 | return -EINVAL; | ||
3584 | } | ||
3585 | |||
3586 | if ((setting & supported) == 0) | ||
3587 | return -EINVAL; | ||
3588 | |||
3589 | sky2->speed = speed; | ||
3590 | sky2->duplex = ecmd->duplex; | ||
3591 | sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; | ||
3592 | } | ||
3593 | |||
3594 | if (netif_running(dev)) { | ||
3595 | sky2_phy_reinit(sky2); | ||
3596 | sky2_set_multicast(dev); | ||
3597 | } | ||
3598 | |||
3599 | return 0; | ||
3600 | } | ||
3601 | |||
3602 | static void sky2_get_drvinfo(struct net_device *dev, | ||
3603 | struct ethtool_drvinfo *info) | ||
3604 | { | ||
3605 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3606 | |||
3607 | strcpy(info->driver, DRV_NAME); | ||
3608 | strcpy(info->version, DRV_VERSION); | ||
3609 | strcpy(info->fw_version, "N/A"); | ||
3610 | strcpy(info->bus_info, pci_name(sky2->hw->pdev)); | ||
3611 | } | ||
3612 | |||
3613 | static const struct sky2_stat { | ||
3614 | char name[ETH_GSTRING_LEN]; | ||
3615 | u16 offset; | ||
3616 | } sky2_stats[] = { | ||
3617 | { "tx_bytes", GM_TXO_OK_HI }, | ||
3618 | { "rx_bytes", GM_RXO_OK_HI }, | ||
3619 | { "tx_broadcast", GM_TXF_BC_OK }, | ||
3620 | { "rx_broadcast", GM_RXF_BC_OK }, | ||
3621 | { "tx_multicast", GM_TXF_MC_OK }, | ||
3622 | { "rx_multicast", GM_RXF_MC_OK }, | ||
3623 | { "tx_unicast", GM_TXF_UC_OK }, | ||
3624 | { "rx_unicast", GM_RXF_UC_OK }, | ||
3625 | { "tx_mac_pause", GM_TXF_MPAUSE }, | ||
3626 | { "rx_mac_pause", GM_RXF_MPAUSE }, | ||
3627 | { "collisions", GM_TXF_COL }, | ||
3628 | { "late_collision",GM_TXF_LAT_COL }, | ||
3629 | { "aborted", GM_TXF_ABO_COL }, | ||
3630 | { "single_collisions", GM_TXF_SNG_COL }, | ||
3631 | { "multi_collisions", GM_TXF_MUL_COL }, | ||
3632 | |||
3633 | { "rx_short", GM_RXF_SHT }, | ||
3634 | { "rx_runt", GM_RXE_FRAG }, | ||
3635 | { "rx_64_byte_packets", GM_RXF_64B }, | ||
3636 | { "rx_65_to_127_byte_packets", GM_RXF_127B }, | ||
3637 | { "rx_128_to_255_byte_packets", GM_RXF_255B }, | ||
3638 | { "rx_256_to_511_byte_packets", GM_RXF_511B }, | ||
3639 | { "rx_512_to_1023_byte_packets", GM_RXF_1023B }, | ||
3640 | { "rx_1024_to_1518_byte_packets", GM_RXF_1518B }, | ||
3641 | { "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ }, | ||
3642 | { "rx_too_long", GM_RXF_LNG_ERR }, | ||
3643 | { "rx_fifo_overflow", GM_RXE_FIFO_OV }, | ||
3644 | { "rx_jabber", GM_RXF_JAB_PKT }, | ||
3645 | { "rx_fcs_error", GM_RXF_FCS_ERR }, | ||
3646 | |||
3647 | { "tx_64_byte_packets", GM_TXF_64B }, | ||
3648 | { "tx_65_to_127_byte_packets", GM_TXF_127B }, | ||
3649 | { "tx_128_to_255_byte_packets", GM_TXF_255B }, | ||
3650 | { "tx_256_to_511_byte_packets", GM_TXF_511B }, | ||
3651 | { "tx_512_to_1023_byte_packets", GM_TXF_1023B }, | ||
3652 | { "tx_1024_to_1518_byte_packets", GM_TXF_1518B }, | ||
3653 | { "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ }, | ||
3654 | { "tx_fifo_underrun", GM_TXE_FIFO_UR }, | ||
3655 | }; | ||
3656 | |||
3657 | static u32 sky2_get_msglevel(struct net_device *netdev) | ||
3658 | { | ||
3659 | struct sky2_port *sky2 = netdev_priv(netdev); | ||
3660 | return sky2->msg_enable; | ||
3661 | } | ||
3662 | |||
3663 | static int sky2_nway_reset(struct net_device *dev) | ||
3664 | { | ||
3665 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3666 | |||
3667 | if (!netif_running(dev) || !(sky2->flags & SKY2_FLAG_AUTO_SPEED)) | ||
3668 | return -EINVAL; | ||
3669 | |||
3670 | sky2_phy_reinit(sky2); | ||
3671 | sky2_set_multicast(dev); | ||
3672 | |||
3673 | return 0; | ||
3674 | } | ||
3675 | |||
3676 | static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count) | ||
3677 | { | ||
3678 | struct sky2_hw *hw = sky2->hw; | ||
3679 | unsigned port = sky2->port; | ||
3680 | int i; | ||
3681 | |||
3682 | data[0] = get_stats64(hw, port, GM_TXO_OK_LO); | ||
3683 | data[1] = get_stats64(hw, port, GM_RXO_OK_LO); | ||
3684 | |||
3685 | for (i = 2; i < count; i++) | ||
3686 | data[i] = get_stats32(hw, port, sky2_stats[i].offset); | ||
3687 | } | ||
3688 | |||
3689 | static void sky2_set_msglevel(struct net_device *netdev, u32 value) | ||
3690 | { | ||
3691 | struct sky2_port *sky2 = netdev_priv(netdev); | ||
3692 | sky2->msg_enable = value; | ||
3693 | } | ||
3694 | |||
3695 | static int sky2_get_sset_count(struct net_device *dev, int sset) | ||
3696 | { | ||
3697 | switch (sset) { | ||
3698 | case ETH_SS_STATS: | ||
3699 | return ARRAY_SIZE(sky2_stats); | ||
3700 | default: | ||
3701 | return -EOPNOTSUPP; | ||
3702 | } | ||
3703 | } | ||
3704 | |||
3705 | static void sky2_get_ethtool_stats(struct net_device *dev, | ||
3706 | struct ethtool_stats *stats, u64 * data) | ||
3707 | { | ||
3708 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3709 | |||
3710 | sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats)); | ||
3711 | } | ||
3712 | |||
3713 | static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data) | ||
3714 | { | ||
3715 | int i; | ||
3716 | |||
3717 | switch (stringset) { | ||
3718 | case ETH_SS_STATS: | ||
3719 | for (i = 0; i < ARRAY_SIZE(sky2_stats); i++) | ||
3720 | memcpy(data + i * ETH_GSTRING_LEN, | ||
3721 | sky2_stats[i].name, ETH_GSTRING_LEN); | ||
3722 | break; | ||
3723 | } | ||
3724 | } | ||
3725 | |||
3726 | static int sky2_set_mac_address(struct net_device *dev, void *p) | ||
3727 | { | ||
3728 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3729 | struct sky2_hw *hw = sky2->hw; | ||
3730 | unsigned port = sky2->port; | ||
3731 | const struct sockaddr *addr = p; | ||
3732 | |||
3733 | if (!is_valid_ether_addr(addr->sa_data)) | ||
3734 | return -EADDRNOTAVAIL; | ||
3735 | |||
3736 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | ||
3737 | memcpy_toio(hw->regs + B2_MAC_1 + port * 8, | ||
3738 | dev->dev_addr, ETH_ALEN); | ||
3739 | memcpy_toio(hw->regs + B2_MAC_2 + port * 8, | ||
3740 | dev->dev_addr, ETH_ALEN); | ||
3741 | |||
3742 | /* virtual address for data */ | ||
3743 | gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); | ||
3744 | |||
3745 | /* physical address: used for pause frames */ | ||
3746 | gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); | ||
3747 | |||
3748 | return 0; | ||
3749 | } | ||
3750 | |||
3751 | static inline void sky2_add_filter(u8 filter[8], const u8 *addr) | ||
3752 | { | ||
3753 | u32 bit; | ||
3754 | |||
3755 | bit = ether_crc(ETH_ALEN, addr) & 63; | ||
3756 | filter[bit >> 3] |= 1 << (bit & 7); | ||
3757 | } | ||
3758 | |||
3759 | static void sky2_set_multicast(struct net_device *dev) | ||
3760 | { | ||
3761 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3762 | struct sky2_hw *hw = sky2->hw; | ||
3763 | unsigned port = sky2->port; | ||
3764 | struct netdev_hw_addr *ha; | ||
3765 | u16 reg; | ||
3766 | u8 filter[8]; | ||
3767 | int rx_pause; | ||
3768 | static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 }; | ||
3769 | |||
3770 | rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH); | ||
3771 | memset(filter, 0, sizeof(filter)); | ||
3772 | |||
3773 | reg = gma_read16(hw, port, GM_RX_CTRL); | ||
3774 | reg |= GM_RXCR_UCF_ENA; | ||
3775 | |||
3776 | if (dev->flags & IFF_PROMISC) /* promiscuous */ | ||
3777 | reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); | ||
3778 | else if (dev->flags & IFF_ALLMULTI) | ||
3779 | memset(filter, 0xff, sizeof(filter)); | ||
3780 | else if (netdev_mc_empty(dev) && !rx_pause) | ||
3781 | reg &= ~GM_RXCR_MCF_ENA; | ||
3782 | else { | ||
3783 | reg |= GM_RXCR_MCF_ENA; | ||
3784 | |||
3785 | if (rx_pause) | ||
3786 | sky2_add_filter(filter, pause_mc_addr); | ||
3787 | |||
3788 | netdev_for_each_mc_addr(ha, dev) | ||
3789 | sky2_add_filter(filter, ha->addr); | ||
3790 | } | ||
3791 | |||
3792 | gma_write16(hw, port, GM_MC_ADDR_H1, | ||
3793 | (u16) filter[0] | ((u16) filter[1] << 8)); | ||
3794 | gma_write16(hw, port, GM_MC_ADDR_H2, | ||
3795 | (u16) filter[2] | ((u16) filter[3] << 8)); | ||
3796 | gma_write16(hw, port, GM_MC_ADDR_H3, | ||
3797 | (u16) filter[4] | ((u16) filter[5] << 8)); | ||
3798 | gma_write16(hw, port, GM_MC_ADDR_H4, | ||
3799 | (u16) filter[6] | ((u16) filter[7] << 8)); | ||
3800 | |||
3801 | gma_write16(hw, port, GM_RX_CTRL, reg); | ||
3802 | } | ||
3803 | |||
3804 | static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev, | ||
3805 | struct rtnl_link_stats64 *stats) | ||
3806 | { | ||
3807 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3808 | struct sky2_hw *hw = sky2->hw; | ||
3809 | unsigned port = sky2->port; | ||
3810 | unsigned int start; | ||
3811 | u64 _bytes, _packets; | ||
3812 | |||
3813 | do { | ||
3814 | start = u64_stats_fetch_begin_bh(&sky2->rx_stats.syncp); | ||
3815 | _bytes = sky2->rx_stats.bytes; | ||
3816 | _packets = sky2->rx_stats.packets; | ||
3817 | } while (u64_stats_fetch_retry_bh(&sky2->rx_stats.syncp, start)); | ||
3818 | |||
3819 | stats->rx_packets = _packets; | ||
3820 | stats->rx_bytes = _bytes; | ||
3821 | |||
3822 | do { | ||
3823 | start = u64_stats_fetch_begin_bh(&sky2->tx_stats.syncp); | ||
3824 | _bytes = sky2->tx_stats.bytes; | ||
3825 | _packets = sky2->tx_stats.packets; | ||
3826 | } while (u64_stats_fetch_retry_bh(&sky2->tx_stats.syncp, start)); | ||
3827 | |||
3828 | stats->tx_packets = _packets; | ||
3829 | stats->tx_bytes = _bytes; | ||
3830 | |||
3831 | stats->multicast = get_stats32(hw, port, GM_RXF_MC_OK) | ||
3832 | + get_stats32(hw, port, GM_RXF_BC_OK); | ||
3833 | |||
3834 | stats->collisions = get_stats32(hw, port, GM_TXF_COL); | ||
3835 | |||
3836 | stats->rx_length_errors = get_stats32(hw, port, GM_RXF_LNG_ERR); | ||
3837 | stats->rx_crc_errors = get_stats32(hw, port, GM_RXF_FCS_ERR); | ||
3838 | stats->rx_frame_errors = get_stats32(hw, port, GM_RXF_SHT) | ||
3839 | + get_stats32(hw, port, GM_RXE_FRAG); | ||
3840 | stats->rx_over_errors = get_stats32(hw, port, GM_RXE_FIFO_OV); | ||
3841 | |||
3842 | stats->rx_dropped = dev->stats.rx_dropped; | ||
3843 | stats->rx_fifo_errors = dev->stats.rx_fifo_errors; | ||
3844 | stats->tx_fifo_errors = dev->stats.tx_fifo_errors; | ||
3845 | |||
3846 | return stats; | ||
3847 | } | ||
3848 | |||
3849 | /* Can have one global because blinking is controlled by | ||
3850 | * ethtool and that is always under RTNL mutex | ||
3851 | */ | ||
3852 | static void sky2_led(struct sky2_port *sky2, enum led_mode mode) | ||
3853 | { | ||
3854 | struct sky2_hw *hw = sky2->hw; | ||
3855 | unsigned port = sky2->port; | ||
3856 | |||
3857 | spin_lock_bh(&sky2->phy_lock); | ||
3858 | if (hw->chip_id == CHIP_ID_YUKON_EC_U || | ||
3859 | hw->chip_id == CHIP_ID_YUKON_EX || | ||
3860 | hw->chip_id == CHIP_ID_YUKON_SUPR) { | ||
3861 | u16 pg; | ||
3862 | pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); | ||
3863 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); | ||
3864 | |||
3865 | switch (mode) { | ||
3866 | case MO_LED_OFF: | ||
3867 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, | ||
3868 | PHY_M_LEDC_LOS_CTRL(8) | | ||
3869 | PHY_M_LEDC_INIT_CTRL(8) | | ||
3870 | PHY_M_LEDC_STA1_CTRL(8) | | ||
3871 | PHY_M_LEDC_STA0_CTRL(8)); | ||
3872 | break; | ||
3873 | case MO_LED_ON: | ||
3874 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, | ||
3875 | PHY_M_LEDC_LOS_CTRL(9) | | ||
3876 | PHY_M_LEDC_INIT_CTRL(9) | | ||
3877 | PHY_M_LEDC_STA1_CTRL(9) | | ||
3878 | PHY_M_LEDC_STA0_CTRL(9)); | ||
3879 | break; | ||
3880 | case MO_LED_BLINK: | ||
3881 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, | ||
3882 | PHY_M_LEDC_LOS_CTRL(0xa) | | ||
3883 | PHY_M_LEDC_INIT_CTRL(0xa) | | ||
3884 | PHY_M_LEDC_STA1_CTRL(0xa) | | ||
3885 | PHY_M_LEDC_STA0_CTRL(0xa)); | ||
3886 | break; | ||
3887 | case MO_LED_NORM: | ||
3888 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, | ||
3889 | PHY_M_LEDC_LOS_CTRL(1) | | ||
3890 | PHY_M_LEDC_INIT_CTRL(8) | | ||
3891 | PHY_M_LEDC_STA1_CTRL(7) | | ||
3892 | PHY_M_LEDC_STA0_CTRL(7)); | ||
3893 | } | ||
3894 | |||
3895 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); | ||
3896 | } else | ||
3897 | gm_phy_write(hw, port, PHY_MARV_LED_OVER, | ||
3898 | PHY_M_LED_MO_DUP(mode) | | ||
3899 | PHY_M_LED_MO_10(mode) | | ||
3900 | PHY_M_LED_MO_100(mode) | | ||
3901 | PHY_M_LED_MO_1000(mode) | | ||
3902 | PHY_M_LED_MO_RX(mode) | | ||
3903 | PHY_M_LED_MO_TX(mode)); | ||
3904 | |||
3905 | spin_unlock_bh(&sky2->phy_lock); | ||
3906 | } | ||
3907 | |||
3908 | /* blink LED's for finding board */ | ||
3909 | static int sky2_set_phys_id(struct net_device *dev, | ||
3910 | enum ethtool_phys_id_state state) | ||
3911 | { | ||
3912 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3913 | |||
3914 | switch (state) { | ||
3915 | case ETHTOOL_ID_ACTIVE: | ||
3916 | return 1; /* cycle on/off once per second */ | ||
3917 | case ETHTOOL_ID_INACTIVE: | ||
3918 | sky2_led(sky2, MO_LED_NORM); | ||
3919 | break; | ||
3920 | case ETHTOOL_ID_ON: | ||
3921 | sky2_led(sky2, MO_LED_ON); | ||
3922 | break; | ||
3923 | case ETHTOOL_ID_OFF: | ||
3924 | sky2_led(sky2, MO_LED_OFF); | ||
3925 | break; | ||
3926 | } | ||
3927 | |||
3928 | return 0; | ||
3929 | } | ||
3930 | |||
3931 | static void sky2_get_pauseparam(struct net_device *dev, | ||
3932 | struct ethtool_pauseparam *ecmd) | ||
3933 | { | ||
3934 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3935 | |||
3936 | switch (sky2->flow_mode) { | ||
3937 | case FC_NONE: | ||
3938 | ecmd->tx_pause = ecmd->rx_pause = 0; | ||
3939 | break; | ||
3940 | case FC_TX: | ||
3941 | ecmd->tx_pause = 1, ecmd->rx_pause = 0; | ||
3942 | break; | ||
3943 | case FC_RX: | ||
3944 | ecmd->tx_pause = 0, ecmd->rx_pause = 1; | ||
3945 | break; | ||
3946 | case FC_BOTH: | ||
3947 | ecmd->tx_pause = ecmd->rx_pause = 1; | ||
3948 | } | ||
3949 | |||
3950 | ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_PAUSE) | ||
3951 | ? AUTONEG_ENABLE : AUTONEG_DISABLE; | ||
3952 | } | ||
3953 | |||
3954 | static int sky2_set_pauseparam(struct net_device *dev, | ||
3955 | struct ethtool_pauseparam *ecmd) | ||
3956 | { | ||
3957 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3958 | |||
3959 | if (ecmd->autoneg == AUTONEG_ENABLE) | ||
3960 | sky2->flags |= SKY2_FLAG_AUTO_PAUSE; | ||
3961 | else | ||
3962 | sky2->flags &= ~SKY2_FLAG_AUTO_PAUSE; | ||
3963 | |||
3964 | sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause); | ||
3965 | |||
3966 | if (netif_running(dev)) | ||
3967 | sky2_phy_reinit(sky2); | ||
3968 | |||
3969 | return 0; | ||
3970 | } | ||
3971 | |||
3972 | static int sky2_get_coalesce(struct net_device *dev, | ||
3973 | struct ethtool_coalesce *ecmd) | ||
3974 | { | ||
3975 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3976 | struct sky2_hw *hw = sky2->hw; | ||
3977 | |||
3978 | if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP) | ||
3979 | ecmd->tx_coalesce_usecs = 0; | ||
3980 | else { | ||
3981 | u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI); | ||
3982 | ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks); | ||
3983 | } | ||
3984 | ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH); | ||
3985 | |||
3986 | if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP) | ||
3987 | ecmd->rx_coalesce_usecs = 0; | ||
3988 | else { | ||
3989 | u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI); | ||
3990 | ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks); | ||
3991 | } | ||
3992 | ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM); | ||
3993 | |||
3994 | if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP) | ||
3995 | ecmd->rx_coalesce_usecs_irq = 0; | ||
3996 | else { | ||
3997 | u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI); | ||
3998 | ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks); | ||
3999 | } | ||
4000 | |||
4001 | ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM); | ||
4002 | |||
4003 | return 0; | ||
4004 | } | ||
4005 | |||
4006 | /* Note: this affect both ports */ | ||
4007 | static int sky2_set_coalesce(struct net_device *dev, | ||
4008 | struct ethtool_coalesce *ecmd) | ||
4009 | { | ||
4010 | struct sky2_port *sky2 = netdev_priv(dev); | ||
4011 | struct sky2_hw *hw = sky2->hw; | ||
4012 | const u32 tmax = sky2_clk2us(hw, 0x0ffffff); | ||
4013 | |||
4014 | if (ecmd->tx_coalesce_usecs > tmax || | ||
4015 | ecmd->rx_coalesce_usecs > tmax || | ||
4016 | ecmd->rx_coalesce_usecs_irq > tmax) | ||
4017 | return -EINVAL; | ||
4018 | |||
4019 | if (ecmd->tx_max_coalesced_frames >= sky2->tx_ring_size-1) | ||
4020 | return -EINVAL; | ||
4021 | if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING) | ||
4022 | return -EINVAL; | ||
4023 | if (ecmd->rx_max_coalesced_frames_irq > RX_MAX_PENDING) | ||
4024 | return -EINVAL; | ||
4025 | |||
4026 | if (ecmd->tx_coalesce_usecs == 0) | ||
4027 | sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); | ||
4028 | else { | ||
4029 | sky2_write32(hw, STAT_TX_TIMER_INI, | ||
4030 | sky2_us2clk(hw, ecmd->tx_coalesce_usecs)); | ||
4031 | sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); | ||
4032 | } | ||
4033 | sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames); | ||
4034 | |||
4035 | if (ecmd->rx_coalesce_usecs == 0) | ||
4036 | sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP); | ||
4037 | else { | ||
4038 | sky2_write32(hw, STAT_LEV_TIMER_INI, | ||
4039 | sky2_us2clk(hw, ecmd->rx_coalesce_usecs)); | ||
4040 | sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); | ||
4041 | } | ||
4042 | sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames); | ||
4043 | |||
4044 | if (ecmd->rx_coalesce_usecs_irq == 0) | ||
4045 | sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP); | ||
4046 | else { | ||
4047 | sky2_write32(hw, STAT_ISR_TIMER_INI, | ||
4048 | sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq)); | ||
4049 | sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); | ||
4050 | } | ||
4051 | sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq); | ||
4052 | return 0; | ||
4053 | } | ||
4054 | |||
4055 | static void sky2_get_ringparam(struct net_device *dev, | ||
4056 | struct ethtool_ringparam *ering) | ||
4057 | { | ||
4058 | struct sky2_port *sky2 = netdev_priv(dev); | ||
4059 | |||
4060 | ering->rx_max_pending = RX_MAX_PENDING; | ||
4061 | ering->rx_mini_max_pending = 0; | ||
4062 | ering->rx_jumbo_max_pending = 0; | ||
4063 | ering->tx_max_pending = TX_MAX_PENDING; | ||
4064 | |||
4065 | ering->rx_pending = sky2->rx_pending; | ||
4066 | ering->rx_mini_pending = 0; | ||
4067 | ering->rx_jumbo_pending = 0; | ||
4068 | ering->tx_pending = sky2->tx_pending; | ||
4069 | } | ||
4070 | |||
4071 | static int sky2_set_ringparam(struct net_device *dev, | ||
4072 | struct ethtool_ringparam *ering) | ||
4073 | { | ||
4074 | struct sky2_port *sky2 = netdev_priv(dev); | ||
4075 | |||
4076 | if (ering->rx_pending > RX_MAX_PENDING || | ||
4077 | ering->rx_pending < 8 || | ||
4078 | ering->tx_pending < TX_MIN_PENDING || | ||
4079 | ering->tx_pending > TX_MAX_PENDING) | ||
4080 | return -EINVAL; | ||
4081 | |||
4082 | sky2_detach(dev); | ||
4083 | |||
4084 | sky2->rx_pending = ering->rx_pending; | ||
4085 | sky2->tx_pending = ering->tx_pending; | ||
4086 | sky2->tx_ring_size = roundup_pow_of_two(sky2->tx_pending+1); | ||
4087 | |||
4088 | return sky2_reattach(dev); | ||
4089 | } | ||
4090 | |||
4091 | static int sky2_get_regs_len(struct net_device *dev) | ||
4092 | { | ||
4093 | return 0x4000; | ||
4094 | } | ||
4095 | |||
4096 | static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b) | ||
4097 | { | ||
4098 | /* This complicated switch statement is to make sure and | ||
4099 | * only access regions that are unreserved. | ||
4100 | * Some blocks are only valid on dual port cards. | ||
4101 | */ | ||
4102 | switch (b) { | ||
4103 | /* second port */ | ||
4104 | case 5: /* Tx Arbiter 2 */ | ||
4105 | case 9: /* RX2 */ | ||
4106 | case 14 ... 15: /* TX2 */ | ||
4107 | case 17: case 19: /* Ram Buffer 2 */ | ||
4108 | case 22 ... 23: /* Tx Ram Buffer 2 */ | ||
4109 | case 25: /* Rx MAC Fifo 1 */ | ||
4110 | case 27: /* Tx MAC Fifo 2 */ | ||
4111 | case 31: /* GPHY 2 */ | ||
4112 | case 40 ... 47: /* Pattern Ram 2 */ | ||
4113 | case 52: case 54: /* TCP Segmentation 2 */ | ||
4114 | case 112 ... 116: /* GMAC 2 */ | ||
4115 | return hw->ports > 1; | ||
4116 | |||
4117 | case 0: /* Control */ | ||
4118 | case 2: /* Mac address */ | ||
4119 | case 4: /* Tx Arbiter 1 */ | ||
4120 | case 7: /* PCI express reg */ | ||
4121 | case 8: /* RX1 */ | ||
4122 | case 12 ... 13: /* TX1 */ | ||
4123 | case 16: case 18:/* Rx Ram Buffer 1 */ | ||
4124 | case 20 ... 21: /* Tx Ram Buffer 1 */ | ||
4125 | case 24: /* Rx MAC Fifo 1 */ | ||
4126 | case 26: /* Tx MAC Fifo 1 */ | ||
4127 | case 28 ... 29: /* Descriptor and status unit */ | ||
4128 | case 30: /* GPHY 1*/ | ||
4129 | case 32 ... 39: /* Pattern Ram 1 */ | ||
4130 | case 48: case 50: /* TCP Segmentation 1 */ | ||
4131 | case 56 ... 60: /* PCI space */ | ||
4132 | case 80 ... 84: /* GMAC 1 */ | ||
4133 | return 1; | ||
4134 | |||
4135 | default: | ||
4136 | return 0; | ||
4137 | } | ||
4138 | } | ||
4139 | |||
4140 | /* | ||
4141 | * Returns copy of control register region | ||
4142 | * Note: ethtool_get_regs always provides full size (16k) buffer | ||
4143 | */ | ||
4144 | static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs, | ||
4145 | void *p) | ||
4146 | { | ||
4147 | const struct sky2_port *sky2 = netdev_priv(dev); | ||
4148 | const void __iomem *io = sky2->hw->regs; | ||
4149 | unsigned int b; | ||
4150 | |||
4151 | regs->version = 1; | ||
4152 | |||
4153 | for (b = 0; b < 128; b++) { | ||
4154 | /* skip poisonous diagnostic ram region in block 3 */ | ||
4155 | if (b == 3) | ||
4156 | memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10); | ||
4157 | else if (sky2_reg_access_ok(sky2->hw, b)) | ||
4158 | memcpy_fromio(p, io, 128); | ||
4159 | else | ||
4160 | memset(p, 0, 128); | ||
4161 | |||
4162 | p += 128; | ||
4163 | io += 128; | ||
4164 | } | ||
4165 | } | ||
4166 | |||
4167 | static int sky2_get_eeprom_len(struct net_device *dev) | ||
4168 | { | ||
4169 | struct sky2_port *sky2 = netdev_priv(dev); | ||
4170 | struct sky2_hw *hw = sky2->hw; | ||
4171 | u16 reg2; | ||
4172 | |||
4173 | reg2 = sky2_pci_read16(hw, PCI_DEV_REG2); | ||
4174 | return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); | ||
4175 | } | ||
4176 | |||
4177 | static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy) | ||
4178 | { | ||
4179 | unsigned long start = jiffies; | ||
4180 | |||
4181 | while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) { | ||
4182 | /* Can take up to 10.6 ms for write */ | ||
4183 | if (time_after(jiffies, start + HZ/4)) { | ||
4184 | dev_err(&hw->pdev->dev, "VPD cycle timed out\n"); | ||
4185 | return -ETIMEDOUT; | ||
4186 | } | ||
4187 | mdelay(1); | ||
4188 | } | ||
4189 | |||
4190 | return 0; | ||
4191 | } | ||
4192 | |||
4193 | static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data, | ||
4194 | u16 offset, size_t length) | ||
4195 | { | ||
4196 | int rc = 0; | ||
4197 | |||
4198 | while (length > 0) { | ||
4199 | u32 val; | ||
4200 | |||
4201 | sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset); | ||
4202 | rc = sky2_vpd_wait(hw, cap, 0); | ||
4203 | if (rc) | ||
4204 | break; | ||
4205 | |||
4206 | val = sky2_pci_read32(hw, cap + PCI_VPD_DATA); | ||
4207 | |||
4208 | memcpy(data, &val, min(sizeof(val), length)); | ||
4209 | offset += sizeof(u32); | ||
4210 | data += sizeof(u32); | ||
4211 | length -= sizeof(u32); | ||
4212 | } | ||
4213 | |||
4214 | return rc; | ||
4215 | } | ||
4216 | |||
4217 | static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data, | ||
4218 | u16 offset, unsigned int length) | ||
4219 | { | ||
4220 | unsigned int i; | ||
4221 | int rc = 0; | ||
4222 | |||
4223 | for (i = 0; i < length; i += sizeof(u32)) { | ||
4224 | u32 val = *(u32 *)(data + i); | ||
4225 | |||
4226 | sky2_pci_write32(hw, cap + PCI_VPD_DATA, val); | ||
4227 | sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); | ||
4228 | |||
4229 | rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F); | ||
4230 | if (rc) | ||
4231 | break; | ||
4232 | } | ||
4233 | return rc; | ||
4234 | } | ||
4235 | |||
4236 | static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | ||
4237 | u8 *data) | ||
4238 | { | ||
4239 | struct sky2_port *sky2 = netdev_priv(dev); | ||
4240 | int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); | ||
4241 | |||
4242 | if (!cap) | ||
4243 | return -EINVAL; | ||
4244 | |||
4245 | eeprom->magic = SKY2_EEPROM_MAGIC; | ||
4246 | |||
4247 | return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len); | ||
4248 | } | ||
4249 | |||
4250 | static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | ||
4251 | u8 *data) | ||
4252 | { | ||
4253 | struct sky2_port *sky2 = netdev_priv(dev); | ||
4254 | int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); | ||
4255 | |||
4256 | if (!cap) | ||
4257 | return -EINVAL; | ||
4258 | |||
4259 | if (eeprom->magic != SKY2_EEPROM_MAGIC) | ||
4260 | return -EINVAL; | ||
4261 | |||
4262 | /* Partial writes not supported */ | ||
4263 | if ((eeprom->offset & 3) || (eeprom->len & 3)) | ||
4264 | return -EINVAL; | ||
4265 | |||
4266 | return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len); | ||
4267 | } | ||
4268 | |||
4269 | static u32 sky2_fix_features(struct net_device *dev, u32 features) | ||
4270 | { | ||
4271 | const struct sky2_port *sky2 = netdev_priv(dev); | ||
4272 | const struct sky2_hw *hw = sky2->hw; | ||
4273 | |||
4274 | /* In order to do Jumbo packets on these chips, need to turn off the | ||
4275 | * transmit store/forward. Therefore checksum offload won't work. | ||
4276 | */ | ||
4277 | if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U) { | ||
4278 | netdev_info(dev, "checksum offload not possible with jumbo frames\n"); | ||
4279 | features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM); | ||
4280 | } | ||
4281 | |||
4282 | /* Some hardware requires receive checksum for RSS to work. */ | ||
4283 | if ( (features & NETIF_F_RXHASH) && | ||
4284 | !(features & NETIF_F_RXCSUM) && | ||
4285 | (sky2->hw->flags & SKY2_HW_RSS_CHKSUM)) { | ||
4286 | netdev_info(dev, "receive hashing forces receive checksum\n"); | ||
4287 | features |= NETIF_F_RXCSUM; | ||
4288 | } | ||
4289 | |||
4290 | return features; | ||
4291 | } | ||
4292 | |||
4293 | static int sky2_set_features(struct net_device *dev, u32 features) | ||
4294 | { | ||
4295 | struct sky2_port *sky2 = netdev_priv(dev); | ||
4296 | u32 changed = dev->features ^ features; | ||
4297 | |||
4298 | if (changed & NETIF_F_RXCSUM) { | ||
4299 | u32 on = features & NETIF_F_RXCSUM; | ||
4300 | sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), | ||
4301 | on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); | ||
4302 | } | ||
4303 | |||
4304 | if (changed & NETIF_F_RXHASH) | ||
4305 | rx_set_rss(dev, features); | ||
4306 | |||
4307 | if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) | ||
4308 | sky2_vlan_mode(dev, features); | ||
4309 | |||
4310 | return 0; | ||
4311 | } | ||
4312 | |||
4313 | static const struct ethtool_ops sky2_ethtool_ops = { | ||
4314 | .get_settings = sky2_get_settings, | ||
4315 | .set_settings = sky2_set_settings, | ||
4316 | .get_drvinfo = sky2_get_drvinfo, | ||
4317 | .get_wol = sky2_get_wol, | ||
4318 | .set_wol = sky2_set_wol, | ||
4319 | .get_msglevel = sky2_get_msglevel, | ||
4320 | .set_msglevel = sky2_set_msglevel, | ||
4321 | .nway_reset = sky2_nway_reset, | ||
4322 | .get_regs_len = sky2_get_regs_len, | ||
4323 | .get_regs = sky2_get_regs, | ||
4324 | .get_link = ethtool_op_get_link, | ||
4325 | .get_eeprom_len = sky2_get_eeprom_len, | ||
4326 | .get_eeprom = sky2_get_eeprom, | ||
4327 | .set_eeprom = sky2_set_eeprom, | ||
4328 | .get_strings = sky2_get_strings, | ||
4329 | .get_coalesce = sky2_get_coalesce, | ||
4330 | .set_coalesce = sky2_set_coalesce, | ||
4331 | .get_ringparam = sky2_get_ringparam, | ||
4332 | .set_ringparam = sky2_set_ringparam, | ||
4333 | .get_pauseparam = sky2_get_pauseparam, | ||
4334 | .set_pauseparam = sky2_set_pauseparam, | ||
4335 | .set_phys_id = sky2_set_phys_id, | ||
4336 | .get_sset_count = sky2_get_sset_count, | ||
4337 | .get_ethtool_stats = sky2_get_ethtool_stats, | ||
4338 | }; | ||
4339 | |||
4340 | #ifdef CONFIG_SKY2_DEBUG | ||
4341 | |||
4342 | static struct dentry *sky2_debug; | ||
4343 | |||
4344 | |||
4345 | /* | ||
4346 | * Read and parse the first part of Vital Product Data | ||
4347 | */ | ||
4348 | #define VPD_SIZE 128 | ||
4349 | #define VPD_MAGIC 0x82 | ||
4350 | |||
4351 | static const struct vpd_tag { | ||
4352 | char tag[2]; | ||
4353 | char *label; | ||
4354 | } vpd_tags[] = { | ||
4355 | { "PN", "Part Number" }, | ||
4356 | { "EC", "Engineering Level" }, | ||
4357 | { "MN", "Manufacturer" }, | ||
4358 | { "SN", "Serial Number" }, | ||
4359 | { "YA", "Asset Tag" }, | ||
4360 | { "VL", "First Error Log Message" }, | ||
4361 | { "VF", "Second Error Log Message" }, | ||
4362 | { "VB", "Boot Agent ROM Configuration" }, | ||
4363 | { "VE", "EFI UNDI Configuration" }, | ||
4364 | }; | ||
4365 | |||
4366 | static void sky2_show_vpd(struct seq_file *seq, struct sky2_hw *hw) | ||
4367 | { | ||
4368 | size_t vpd_size; | ||
4369 | loff_t offs; | ||
4370 | u8 len; | ||
4371 | unsigned char *buf; | ||
4372 | u16 reg2; | ||
4373 | |||
4374 | reg2 = sky2_pci_read16(hw, PCI_DEV_REG2); | ||
4375 | vpd_size = 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); | ||
4376 | |||
4377 | seq_printf(seq, "%s Product Data\n", pci_name(hw->pdev)); | ||
4378 | buf = kmalloc(vpd_size, GFP_KERNEL); | ||
4379 | if (!buf) { | ||
4380 | seq_puts(seq, "no memory!\n"); | ||
4381 | return; | ||
4382 | } | ||
4383 | |||
4384 | if (pci_read_vpd(hw->pdev, 0, vpd_size, buf) < 0) { | ||
4385 | seq_puts(seq, "VPD read failed\n"); | ||
4386 | goto out; | ||
4387 | } | ||
4388 | |||
4389 | if (buf[0] != VPD_MAGIC) { | ||
4390 | seq_printf(seq, "VPD tag mismatch: %#x\n", buf[0]); | ||
4391 | goto out; | ||
4392 | } | ||
4393 | len = buf[1]; | ||
4394 | if (len == 0 || len > vpd_size - 4) { | ||
4395 | seq_printf(seq, "Invalid id length: %d\n", len); | ||
4396 | goto out; | ||
4397 | } | ||
4398 | |||
4399 | seq_printf(seq, "%.*s\n", len, buf + 3); | ||
4400 | offs = len + 3; | ||
4401 | |||
4402 | while (offs < vpd_size - 4) { | ||
4403 | int i; | ||
4404 | |||
4405 | if (!memcmp("RW", buf + offs, 2)) /* end marker */ | ||
4406 | break; | ||
4407 | len = buf[offs + 2]; | ||
4408 | if (offs + len + 3 >= vpd_size) | ||
4409 | break; | ||
4410 | |||
4411 | for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) { | ||
4412 | if (!memcmp(vpd_tags[i].tag, buf + offs, 2)) { | ||
4413 | seq_printf(seq, " %s: %.*s\n", | ||
4414 | vpd_tags[i].label, len, buf + offs + 3); | ||
4415 | break; | ||
4416 | } | ||
4417 | } | ||
4418 | offs += len + 3; | ||
4419 | } | ||
4420 | out: | ||
4421 | kfree(buf); | ||
4422 | } | ||
4423 | |||
4424 | static int sky2_debug_show(struct seq_file *seq, void *v) | ||
4425 | { | ||
4426 | struct net_device *dev = seq->private; | ||
4427 | const struct sky2_port *sky2 = netdev_priv(dev); | ||
4428 | struct sky2_hw *hw = sky2->hw; | ||
4429 | unsigned port = sky2->port; | ||
4430 | unsigned idx, last; | ||
4431 | int sop; | ||
4432 | |||
4433 | sky2_show_vpd(seq, hw); | ||
4434 | |||
4435 | seq_printf(seq, "\nIRQ src=%x mask=%x control=%x\n", | ||
4436 | sky2_read32(hw, B0_ISRC), | ||
4437 | sky2_read32(hw, B0_IMSK), | ||
4438 | sky2_read32(hw, B0_Y2_SP_ICR)); | ||
4439 | |||
4440 | if (!netif_running(dev)) { | ||
4441 | seq_printf(seq, "network not running\n"); | ||
4442 | return 0; | ||
4443 | } | ||
4444 | |||
4445 | napi_disable(&hw->napi); | ||
4446 | last = sky2_read16(hw, STAT_PUT_IDX); | ||
4447 | |||
4448 | seq_printf(seq, "Status ring %u\n", hw->st_size); | ||
4449 | if (hw->st_idx == last) | ||
4450 | seq_puts(seq, "Status ring (empty)\n"); | ||
4451 | else { | ||
4452 | seq_puts(seq, "Status ring\n"); | ||
4453 | for (idx = hw->st_idx; idx != last && idx < hw->st_size; | ||
4454 | idx = RING_NEXT(idx, hw->st_size)) { | ||
4455 | const struct sky2_status_le *le = hw->st_le + idx; | ||
4456 | seq_printf(seq, "[%d] %#x %d %#x\n", | ||
4457 | idx, le->opcode, le->length, le->status); | ||
4458 | } | ||
4459 | seq_puts(seq, "\n"); | ||
4460 | } | ||
4461 | |||
4462 | seq_printf(seq, "Tx ring pending=%u...%u report=%d done=%d\n", | ||
4463 | sky2->tx_cons, sky2->tx_prod, | ||
4464 | sky2_read16(hw, port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX), | ||
4465 | sky2_read16(hw, Q_ADDR(txqaddr[port], Q_DONE))); | ||
4466 | |||
4467 | /* Dump contents of tx ring */ | ||
4468 | sop = 1; | ||
4469 | for (idx = sky2->tx_next; idx != sky2->tx_prod && idx < sky2->tx_ring_size; | ||
4470 | idx = RING_NEXT(idx, sky2->tx_ring_size)) { | ||
4471 | const struct sky2_tx_le *le = sky2->tx_le + idx; | ||
4472 | u32 a = le32_to_cpu(le->addr); | ||
4473 | |||
4474 | if (sop) | ||
4475 | seq_printf(seq, "%u:", idx); | ||
4476 | sop = 0; | ||
4477 | |||
4478 | switch (le->opcode & ~HW_OWNER) { | ||
4479 | case OP_ADDR64: | ||
4480 | seq_printf(seq, " %#x:", a); | ||
4481 | break; | ||
4482 | case OP_LRGLEN: | ||
4483 | seq_printf(seq, " mtu=%d", a); | ||
4484 | break; | ||
4485 | case OP_VLAN: | ||
4486 | seq_printf(seq, " vlan=%d", be16_to_cpu(le->length)); | ||
4487 | break; | ||
4488 | case OP_TCPLISW: | ||
4489 | seq_printf(seq, " csum=%#x", a); | ||
4490 | break; | ||
4491 | case OP_LARGESEND: | ||
4492 | seq_printf(seq, " tso=%#x(%d)", a, le16_to_cpu(le->length)); | ||
4493 | break; | ||
4494 | case OP_PACKET: | ||
4495 | seq_printf(seq, " %#x(%d)", a, le16_to_cpu(le->length)); | ||
4496 | break; | ||
4497 | case OP_BUFFER: | ||
4498 | seq_printf(seq, " frag=%#x(%d)", a, le16_to_cpu(le->length)); | ||
4499 | break; | ||
4500 | default: | ||
4501 | seq_printf(seq, " op=%#x,%#x(%d)", le->opcode, | ||
4502 | a, le16_to_cpu(le->length)); | ||
4503 | } | ||
4504 | |||
4505 | if (le->ctrl & EOP) { | ||
4506 | seq_putc(seq, '\n'); | ||
4507 | sop = 1; | ||
4508 | } | ||
4509 | } | ||
4510 | |||
4511 | seq_printf(seq, "\nRx ring hw get=%d put=%d last=%d\n", | ||
4512 | sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_GET_IDX)), | ||
4513 | sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)), | ||
4514 | sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX))); | ||
4515 | |||
4516 | sky2_read32(hw, B0_Y2_SP_LISR); | ||
4517 | napi_enable(&hw->napi); | ||
4518 | return 0; | ||
4519 | } | ||
4520 | |||
4521 | static int sky2_debug_open(struct inode *inode, struct file *file) | ||
4522 | { | ||
4523 | return single_open(file, sky2_debug_show, inode->i_private); | ||
4524 | } | ||
4525 | |||
4526 | static const struct file_operations sky2_debug_fops = { | ||
4527 | .owner = THIS_MODULE, | ||
4528 | .open = sky2_debug_open, | ||
4529 | .read = seq_read, | ||
4530 | .llseek = seq_lseek, | ||
4531 | .release = single_release, | ||
4532 | }; | ||
4533 | |||
4534 | /* | ||
4535 | * Use network device events to create/remove/rename | ||
4536 | * debugfs file entries | ||
4537 | */ | ||
4538 | static int sky2_device_event(struct notifier_block *unused, | ||
4539 | unsigned long event, void *ptr) | ||
4540 | { | ||
4541 | struct net_device *dev = ptr; | ||
4542 | struct sky2_port *sky2 = netdev_priv(dev); | ||
4543 | |||
4544 | if (dev->netdev_ops->ndo_open != sky2_up || !sky2_debug) | ||
4545 | return NOTIFY_DONE; | ||
4546 | |||
4547 | switch (event) { | ||
4548 | case NETDEV_CHANGENAME: | ||
4549 | if (sky2->debugfs) { | ||
4550 | sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs, | ||
4551 | sky2_debug, dev->name); | ||
4552 | } | ||
4553 | break; | ||
4554 | |||
4555 | case NETDEV_GOING_DOWN: | ||
4556 | if (sky2->debugfs) { | ||
4557 | netdev_printk(KERN_DEBUG, dev, "remove debugfs\n"); | ||
4558 | debugfs_remove(sky2->debugfs); | ||
4559 | sky2->debugfs = NULL; | ||
4560 | } | ||
4561 | break; | ||
4562 | |||
4563 | case NETDEV_UP: | ||
4564 | sky2->debugfs = debugfs_create_file(dev->name, S_IRUGO, | ||
4565 | sky2_debug, dev, | ||
4566 | &sky2_debug_fops); | ||
4567 | if (IS_ERR(sky2->debugfs)) | ||
4568 | sky2->debugfs = NULL; | ||
4569 | } | ||
4570 | |||
4571 | return NOTIFY_DONE; | ||
4572 | } | ||
4573 | |||
4574 | static struct notifier_block sky2_notifier = { | ||
4575 | .notifier_call = sky2_device_event, | ||
4576 | }; | ||
4577 | |||
4578 | |||
4579 | static __init void sky2_debug_init(void) | ||
4580 | { | ||
4581 | struct dentry *ent; | ||
4582 | |||
4583 | ent = debugfs_create_dir("sky2", NULL); | ||
4584 | if (!ent || IS_ERR(ent)) | ||
4585 | return; | ||
4586 | |||
4587 | sky2_debug = ent; | ||
4588 | register_netdevice_notifier(&sky2_notifier); | ||
4589 | } | ||
4590 | |||
4591 | static __exit void sky2_debug_cleanup(void) | ||
4592 | { | ||
4593 | if (sky2_debug) { | ||
4594 | unregister_netdevice_notifier(&sky2_notifier); | ||
4595 | debugfs_remove(sky2_debug); | ||
4596 | sky2_debug = NULL; | ||
4597 | } | ||
4598 | } | ||
4599 | |||
4600 | #else | ||
4601 | #define sky2_debug_init() | ||
4602 | #define sky2_debug_cleanup() | ||
4603 | #endif | ||
4604 | |||
4605 | /* Two copies of network device operations to handle special case of | ||
4606 | not allowing netpoll on second port */ | ||
4607 | static const struct net_device_ops sky2_netdev_ops[2] = { | ||
4608 | { | ||
4609 | .ndo_open = sky2_up, | ||
4610 | .ndo_stop = sky2_down, | ||
4611 | .ndo_start_xmit = sky2_xmit_frame, | ||
4612 | .ndo_do_ioctl = sky2_ioctl, | ||
4613 | .ndo_validate_addr = eth_validate_addr, | ||
4614 | .ndo_set_mac_address = sky2_set_mac_address, | ||
4615 | .ndo_set_multicast_list = sky2_set_multicast, | ||
4616 | .ndo_change_mtu = sky2_change_mtu, | ||
4617 | .ndo_fix_features = sky2_fix_features, | ||
4618 | .ndo_set_features = sky2_set_features, | ||
4619 | .ndo_tx_timeout = sky2_tx_timeout, | ||
4620 | .ndo_get_stats64 = sky2_get_stats, | ||
4621 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
4622 | .ndo_poll_controller = sky2_netpoll, | ||
4623 | #endif | ||
4624 | }, | ||
4625 | { | ||
4626 | .ndo_open = sky2_up, | ||
4627 | .ndo_stop = sky2_down, | ||
4628 | .ndo_start_xmit = sky2_xmit_frame, | ||
4629 | .ndo_do_ioctl = sky2_ioctl, | ||
4630 | .ndo_validate_addr = eth_validate_addr, | ||
4631 | .ndo_set_mac_address = sky2_set_mac_address, | ||
4632 | .ndo_set_multicast_list = sky2_set_multicast, | ||
4633 | .ndo_change_mtu = sky2_change_mtu, | ||
4634 | .ndo_fix_features = sky2_fix_features, | ||
4635 | .ndo_set_features = sky2_set_features, | ||
4636 | .ndo_tx_timeout = sky2_tx_timeout, | ||
4637 | .ndo_get_stats64 = sky2_get_stats, | ||
4638 | }, | ||
4639 | }; | ||
4640 | |||
4641 | /* Initialize network device */ | ||
4642 | static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, | ||
4643 | unsigned port, | ||
4644 | int highmem, int wol) | ||
4645 | { | ||
4646 | struct sky2_port *sky2; | ||
4647 | struct net_device *dev = alloc_etherdev(sizeof(*sky2)); | ||
4648 | |||
4649 | if (!dev) { | ||
4650 | dev_err(&hw->pdev->dev, "etherdev alloc failed\n"); | ||
4651 | return NULL; | ||
4652 | } | ||
4653 | |||
4654 | SET_NETDEV_DEV(dev, &hw->pdev->dev); | ||
4655 | dev->irq = hw->pdev->irq; | ||
4656 | SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops); | ||
4657 | dev->watchdog_timeo = TX_WATCHDOG; | ||
4658 | dev->netdev_ops = &sky2_netdev_ops[port]; | ||
4659 | |||
4660 | sky2 = netdev_priv(dev); | ||
4661 | sky2->netdev = dev; | ||
4662 | sky2->hw = hw; | ||
4663 | sky2->msg_enable = netif_msg_init(debug, default_msg); | ||
4664 | |||
4665 | /* Auto speed and flow control */ | ||
4666 | sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE; | ||
4667 | if (hw->chip_id != CHIP_ID_YUKON_XL) | ||
4668 | dev->hw_features |= NETIF_F_RXCSUM; | ||
4669 | |||
4670 | sky2->flow_mode = FC_BOTH; | ||
4671 | |||
4672 | sky2->duplex = -1; | ||
4673 | sky2->speed = -1; | ||
4674 | sky2->advertising = sky2_supported_modes(hw); | ||
4675 | sky2->wol = wol; | ||
4676 | |||
4677 | spin_lock_init(&sky2->phy_lock); | ||
4678 | |||
4679 | sky2->tx_pending = TX_DEF_PENDING; | ||
4680 | sky2->tx_ring_size = roundup_pow_of_two(TX_DEF_PENDING+1); | ||
4681 | sky2->rx_pending = RX_DEF_PENDING; | ||
4682 | |||
4683 | hw->dev[port] = dev; | ||
4684 | |||
4685 | sky2->port = port; | ||
4686 | |||
4687 | dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; | ||
4688 | |||
4689 | if (highmem) | ||
4690 | dev->features |= NETIF_F_HIGHDMA; | ||
4691 | |||
4692 | /* Enable receive hashing unless hardware is known broken */ | ||
4693 | if (!(hw->flags & SKY2_HW_RSS_BROKEN)) | ||
4694 | dev->hw_features |= NETIF_F_RXHASH; | ||
4695 | |||
4696 | if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) { | ||
4697 | dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
4698 | dev->vlan_features |= SKY2_VLAN_OFFLOADS; | ||
4699 | } | ||
4700 | |||
4701 | dev->features |= dev->hw_features; | ||
4702 | |||
4703 | /* read the mac address */ | ||
4704 | memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); | ||
4705 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | ||
4706 | |||
4707 | return dev; | ||
4708 | } | ||
4709 | |||
4710 | static void __devinit sky2_show_addr(struct net_device *dev) | ||
4711 | { | ||
4712 | const struct sky2_port *sky2 = netdev_priv(dev); | ||
4713 | |||
4714 | netif_info(sky2, probe, dev, "addr %pM\n", dev->dev_addr); | ||
4715 | } | ||
4716 | |||
4717 | /* Handle software interrupt used during MSI test */ | ||
4718 | static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id) | ||
4719 | { | ||
4720 | struct sky2_hw *hw = dev_id; | ||
4721 | u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2); | ||
4722 | |||
4723 | if (status == 0) | ||
4724 | return IRQ_NONE; | ||
4725 | |||
4726 | if (status & Y2_IS_IRQ_SW) { | ||
4727 | hw->flags |= SKY2_HW_USE_MSI; | ||
4728 | wake_up(&hw->msi_wait); | ||
4729 | sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); | ||
4730 | } | ||
4731 | sky2_write32(hw, B0_Y2_SP_ICR, 2); | ||
4732 | |||
4733 | return IRQ_HANDLED; | ||
4734 | } | ||
4735 | |||
4736 | /* Test interrupt path by forcing a a software IRQ */ | ||
4737 | static int __devinit sky2_test_msi(struct sky2_hw *hw) | ||
4738 | { | ||
4739 | struct pci_dev *pdev = hw->pdev; | ||
4740 | int err; | ||
4741 | |||
4742 | init_waitqueue_head(&hw->msi_wait); | ||
4743 | |||
4744 | sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); | ||
4745 | |||
4746 | err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw); | ||
4747 | if (err) { | ||
4748 | dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); | ||
4749 | return err; | ||
4750 | } | ||
4751 | |||
4752 | sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); | ||
4753 | sky2_read8(hw, B0_CTST); | ||
4754 | |||
4755 | wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10); | ||
4756 | |||
4757 | if (!(hw->flags & SKY2_HW_USE_MSI)) { | ||
4758 | /* MSI test failed, go back to INTx mode */ | ||
4759 | dev_info(&pdev->dev, "No interrupt generated using MSI, " | ||
4760 | "switching to INTx mode.\n"); | ||
4761 | |||
4762 | err = -EOPNOTSUPP; | ||
4763 | sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); | ||
4764 | } | ||
4765 | |||
4766 | sky2_write32(hw, B0_IMSK, 0); | ||
4767 | sky2_read32(hw, B0_IMSK); | ||
4768 | |||
4769 | free_irq(pdev->irq, hw); | ||
4770 | |||
4771 | return err; | ||
4772 | } | ||
4773 | |||
4774 | /* This driver supports yukon2 chipset only */ | ||
4775 | static const char *sky2_name(u8 chipid, char *buf, int sz) | ||
4776 | { | ||
4777 | const char *name[] = { | ||
4778 | "XL", /* 0xb3 */ | ||
4779 | "EC Ultra", /* 0xb4 */ | ||
4780 | "Extreme", /* 0xb5 */ | ||
4781 | "EC", /* 0xb6 */ | ||
4782 | "FE", /* 0xb7 */ | ||
4783 | "FE+", /* 0xb8 */ | ||
4784 | "Supreme", /* 0xb9 */ | ||
4785 | "UL 2", /* 0xba */ | ||
4786 | "Unknown", /* 0xbb */ | ||
4787 | "Optima", /* 0xbc */ | ||
4788 | "Optima Prime", /* 0xbd */ | ||
4789 | "Optima 2", /* 0xbe */ | ||
4790 | }; | ||
4791 | |||
4792 | if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OP_2) | ||
4793 | strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz); | ||
4794 | else | ||
4795 | snprintf(buf, sz, "(chip %#x)", chipid); | ||
4796 | return buf; | ||
4797 | } | ||
4798 | |||
4799 | static int __devinit sky2_probe(struct pci_dev *pdev, | ||
4800 | const struct pci_device_id *ent) | ||
4801 | { | ||
4802 | struct net_device *dev; | ||
4803 | struct sky2_hw *hw; | ||
4804 | int err, using_dac = 0, wol_default; | ||
4805 | u32 reg; | ||
4806 | char buf1[16]; | ||
4807 | |||
4808 | err = pci_enable_device(pdev); | ||
4809 | if (err) { | ||
4810 | dev_err(&pdev->dev, "cannot enable PCI device\n"); | ||
4811 | goto err_out; | ||
4812 | } | ||
4813 | |||
4814 | /* Get configuration information | ||
4815 | * Note: only regular PCI config access once to test for HW issues | ||
4816 | * other PCI access through shared memory for speed and to | ||
4817 | * avoid MMCONFIG problems. | ||
4818 | */ | ||
4819 | err = pci_read_config_dword(pdev, PCI_DEV_REG2, ®); | ||
4820 | if (err) { | ||
4821 | dev_err(&pdev->dev, "PCI read config failed\n"); | ||
4822 | goto err_out; | ||
4823 | } | ||
4824 | |||
4825 | if (~reg == 0) { | ||
4826 | dev_err(&pdev->dev, "PCI configuration read error\n"); | ||
4827 | goto err_out; | ||
4828 | } | ||
4829 | |||
4830 | err = pci_request_regions(pdev, DRV_NAME); | ||
4831 | if (err) { | ||
4832 | dev_err(&pdev->dev, "cannot obtain PCI resources\n"); | ||
4833 | goto err_out_disable; | ||
4834 | } | ||
4835 | |||
4836 | pci_set_master(pdev); | ||
4837 | |||
4838 | if (sizeof(dma_addr_t) > sizeof(u32) && | ||
4839 | !(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))) { | ||
4840 | using_dac = 1; | ||
4841 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
4842 | if (err < 0) { | ||
4843 | dev_err(&pdev->dev, "unable to obtain 64 bit DMA " | ||
4844 | "for consistent allocations\n"); | ||
4845 | goto err_out_free_regions; | ||
4846 | } | ||
4847 | } else { | ||
4848 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
4849 | if (err) { | ||
4850 | dev_err(&pdev->dev, "no usable DMA configuration\n"); | ||
4851 | goto err_out_free_regions; | ||
4852 | } | ||
4853 | } | ||
4854 | |||
4855 | |||
4856 | #ifdef __BIG_ENDIAN | ||
4857 | /* The sk98lin vendor driver uses hardware byte swapping but | ||
4858 | * this driver uses software swapping. | ||
4859 | */ | ||
4860 | reg &= ~PCI_REV_DESC; | ||
4861 | err = pci_write_config_dword(pdev, PCI_DEV_REG2, reg); | ||
4862 | if (err) { | ||
4863 | dev_err(&pdev->dev, "PCI write config failed\n"); | ||
4864 | goto err_out_free_regions; | ||
4865 | } | ||
4866 | #endif | ||
4867 | |||
4868 | wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0; | ||
4869 | |||
4870 | err = -ENOMEM; | ||
4871 | |||
4872 | hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") | ||
4873 | + strlen(pci_name(pdev)) + 1, GFP_KERNEL); | ||
4874 | if (!hw) { | ||
4875 | dev_err(&pdev->dev, "cannot allocate hardware struct\n"); | ||
4876 | goto err_out_free_regions; | ||
4877 | } | ||
4878 | |||
4879 | hw->pdev = pdev; | ||
4880 | sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); | ||
4881 | |||
4882 | hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); | ||
4883 | if (!hw->regs) { | ||
4884 | dev_err(&pdev->dev, "cannot map device registers\n"); | ||
4885 | goto err_out_free_hw; | ||
4886 | } | ||
4887 | |||
4888 | err = sky2_init(hw); | ||
4889 | if (err) | ||
4890 | goto err_out_iounmap; | ||
4891 | |||
4892 | /* ring for status responses */ | ||
4893 | hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING); | ||
4894 | hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), | ||
4895 | &hw->st_dma); | ||
4896 | if (!hw->st_le) | ||
4897 | goto err_out_reset; | ||
4898 | |||
4899 | dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n", | ||
4900 | sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev); | ||
4901 | |||
4902 | sky2_reset(hw); | ||
4903 | |||
4904 | dev = sky2_init_netdev(hw, 0, using_dac, wol_default); | ||
4905 | if (!dev) { | ||
4906 | err = -ENOMEM; | ||
4907 | goto err_out_free_pci; | ||
4908 | } | ||
4909 | |||
4910 | if (!disable_msi && pci_enable_msi(pdev) == 0) { | ||
4911 | err = sky2_test_msi(hw); | ||
4912 | if (err == -EOPNOTSUPP) | ||
4913 | pci_disable_msi(pdev); | ||
4914 | else if (err) | ||
4915 | goto err_out_free_netdev; | ||
4916 | } | ||
4917 | |||
4918 | err = register_netdev(dev); | ||
4919 | if (err) { | ||
4920 | dev_err(&pdev->dev, "cannot register net device\n"); | ||
4921 | goto err_out_free_netdev; | ||
4922 | } | ||
4923 | |||
4924 | netif_carrier_off(dev); | ||
4925 | |||
4926 | netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT); | ||
4927 | |||
4928 | err = request_irq(pdev->irq, sky2_intr, | ||
4929 | (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, | ||
4930 | hw->irq_name, hw); | ||
4931 | if (err) { | ||
4932 | dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); | ||
4933 | goto err_out_unregister; | ||
4934 | } | ||
4935 | sky2_write32(hw, B0_IMSK, Y2_IS_BASE); | ||
4936 | napi_enable(&hw->napi); | ||
4937 | |||
4938 | sky2_show_addr(dev); | ||
4939 | |||
4940 | if (hw->ports > 1) { | ||
4941 | struct net_device *dev1; | ||
4942 | |||
4943 | err = -ENOMEM; | ||
4944 | dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default); | ||
4945 | if (dev1 && (err = register_netdev(dev1)) == 0) | ||
4946 | sky2_show_addr(dev1); | ||
4947 | else { | ||
4948 | dev_warn(&pdev->dev, | ||
4949 | "register of second port failed (%d)\n", err); | ||
4950 | hw->dev[1] = NULL; | ||
4951 | hw->ports = 1; | ||
4952 | if (dev1) | ||
4953 | free_netdev(dev1); | ||
4954 | } | ||
4955 | } | ||
4956 | |||
4957 | setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw); | ||
4958 | INIT_WORK(&hw->restart_work, sky2_restart); | ||
4959 | |||
4960 | pci_set_drvdata(pdev, hw); | ||
4961 | pdev->d3_delay = 150; | ||
4962 | |||
4963 | return 0; | ||
4964 | |||
4965 | err_out_unregister: | ||
4966 | if (hw->flags & SKY2_HW_USE_MSI) | ||
4967 | pci_disable_msi(pdev); | ||
4968 | unregister_netdev(dev); | ||
4969 | err_out_free_netdev: | ||
4970 | free_netdev(dev); | ||
4971 | err_out_free_pci: | ||
4972 | pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), | ||
4973 | hw->st_le, hw->st_dma); | ||
4974 | err_out_reset: | ||
4975 | sky2_write8(hw, B0_CTST, CS_RST_SET); | ||
4976 | err_out_iounmap: | ||
4977 | iounmap(hw->regs); | ||
4978 | err_out_free_hw: | ||
4979 | kfree(hw); | ||
4980 | err_out_free_regions: | ||
4981 | pci_release_regions(pdev); | ||
4982 | err_out_disable: | ||
4983 | pci_disable_device(pdev); | ||
4984 | err_out: | ||
4985 | pci_set_drvdata(pdev, NULL); | ||
4986 | return err; | ||
4987 | } | ||
4988 | |||
4989 | static void __devexit sky2_remove(struct pci_dev *pdev) | ||
4990 | { | ||
4991 | struct sky2_hw *hw = pci_get_drvdata(pdev); | ||
4992 | int i; | ||
4993 | |||
4994 | if (!hw) | ||
4995 | return; | ||
4996 | |||
4997 | del_timer_sync(&hw->watchdog_timer); | ||
4998 | cancel_work_sync(&hw->restart_work); | ||
4999 | |||
5000 | for (i = hw->ports-1; i >= 0; --i) | ||
5001 | unregister_netdev(hw->dev[i]); | ||
5002 | |||
5003 | sky2_write32(hw, B0_IMSK, 0); | ||
5004 | |||
5005 | sky2_power_aux(hw); | ||
5006 | |||
5007 | sky2_write8(hw, B0_CTST, CS_RST_SET); | ||
5008 | sky2_read8(hw, B0_CTST); | ||
5009 | |||
5010 | free_irq(pdev->irq, hw); | ||
5011 | if (hw->flags & SKY2_HW_USE_MSI) | ||
5012 | pci_disable_msi(pdev); | ||
5013 | pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), | ||
5014 | hw->st_le, hw->st_dma); | ||
5015 | pci_release_regions(pdev); | ||
5016 | pci_disable_device(pdev); | ||
5017 | |||
5018 | for (i = hw->ports-1; i >= 0; --i) | ||
5019 | free_netdev(hw->dev[i]); | ||
5020 | |||
5021 | iounmap(hw->regs); | ||
5022 | kfree(hw); | ||
5023 | |||
5024 | pci_set_drvdata(pdev, NULL); | ||
5025 | } | ||
5026 | |||
5027 | static int sky2_suspend(struct device *dev) | ||
5028 | { | ||
5029 | struct pci_dev *pdev = to_pci_dev(dev); | ||
5030 | struct sky2_hw *hw = pci_get_drvdata(pdev); | ||
5031 | int i; | ||
5032 | |||
5033 | if (!hw) | ||
5034 | return 0; | ||
5035 | |||
5036 | del_timer_sync(&hw->watchdog_timer); | ||
5037 | cancel_work_sync(&hw->restart_work); | ||
5038 | |||
5039 | rtnl_lock(); | ||
5040 | |||
5041 | sky2_all_down(hw); | ||
5042 | for (i = 0; i < hw->ports; i++) { | ||
5043 | struct net_device *dev = hw->dev[i]; | ||
5044 | struct sky2_port *sky2 = netdev_priv(dev); | ||
5045 | |||
5046 | if (sky2->wol) | ||
5047 | sky2_wol_init(sky2); | ||
5048 | } | ||
5049 | |||
5050 | sky2_power_aux(hw); | ||
5051 | rtnl_unlock(); | ||
5052 | |||
5053 | return 0; | ||
5054 | } | ||
5055 | |||
5056 | #ifdef CONFIG_PM_SLEEP | ||
5057 | static int sky2_resume(struct device *dev) | ||
5058 | { | ||
5059 | struct pci_dev *pdev = to_pci_dev(dev); | ||
5060 | struct sky2_hw *hw = pci_get_drvdata(pdev); | ||
5061 | int err; | ||
5062 | |||
5063 | if (!hw) | ||
5064 | return 0; | ||
5065 | |||
5066 | /* Re-enable all clocks */ | ||
5067 | err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0); | ||
5068 | if (err) { | ||
5069 | dev_err(&pdev->dev, "PCI write config failed\n"); | ||
5070 | goto out; | ||
5071 | } | ||
5072 | |||
5073 | rtnl_lock(); | ||
5074 | sky2_reset(hw); | ||
5075 | sky2_all_up(hw); | ||
5076 | rtnl_unlock(); | ||
5077 | |||
5078 | return 0; | ||
5079 | out: | ||
5080 | |||
5081 | dev_err(&pdev->dev, "resume failed (%d)\n", err); | ||
5082 | pci_disable_device(pdev); | ||
5083 | return err; | ||
5084 | } | ||
5085 | |||
5086 | static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume); | ||
5087 | #define SKY2_PM_OPS (&sky2_pm_ops) | ||
5088 | |||
5089 | #else | ||
5090 | |||
5091 | #define SKY2_PM_OPS NULL | ||
5092 | #endif | ||
5093 | |||
5094 | static void sky2_shutdown(struct pci_dev *pdev) | ||
5095 | { | ||
5096 | sky2_suspend(&pdev->dev); | ||
5097 | pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); | ||
5098 | pci_set_power_state(pdev, PCI_D3hot); | ||
5099 | } | ||
5100 | |||
5101 | static struct pci_driver sky2_driver = { | ||
5102 | .name = DRV_NAME, | ||
5103 | .id_table = sky2_id_table, | ||
5104 | .probe = sky2_probe, | ||
5105 | .remove = __devexit_p(sky2_remove), | ||
5106 | .shutdown = sky2_shutdown, | ||
5107 | .driver.pm = SKY2_PM_OPS, | ||
5108 | }; | ||
5109 | |||
5110 | static int __init sky2_init_module(void) | ||
5111 | { | ||
5112 | pr_info("driver version " DRV_VERSION "\n"); | ||
5113 | |||
5114 | sky2_debug_init(); | ||
5115 | return pci_register_driver(&sky2_driver); | ||
5116 | } | ||
5117 | |||
5118 | static void __exit sky2_cleanup_module(void) | ||
5119 | { | ||
5120 | pci_unregister_driver(&sky2_driver); | ||
5121 | sky2_debug_cleanup(); | ||
5122 | } | ||
5123 | |||
5124 | module_init(sky2_init_module); | ||
5125 | module_exit(sky2_cleanup_module); | ||
5126 | |||
5127 | MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver"); | ||
5128 | MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); | ||
5129 | MODULE_LICENSE("GPL"); | ||
5130 | MODULE_VERSION(DRV_VERSION); | ||
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h new file mode 100644 index 000000000000..0af31b8b5f10 --- /dev/null +++ b/drivers/net/ethernet/marvell/sky2.h | |||
@@ -0,0 +1,2427 @@ | |||
1 | /* | ||
2 | * Definitions for the new Marvell Yukon 2 driver. | ||
3 | */ | ||
4 | #ifndef _SKY2_H | ||
5 | #define _SKY2_H | ||
6 | |||
7 | #define ETH_JUMBO_MTU 9000 /* Maximum MTU supported */ | ||
8 | |||
9 | /* PCI config registers */ | ||
10 | enum { | ||
11 | PCI_DEV_REG1 = 0x40, | ||
12 | PCI_DEV_REG2 = 0x44, | ||
13 | PCI_DEV_STATUS = 0x7c, | ||
14 | PCI_DEV_REG3 = 0x80, | ||
15 | PCI_DEV_REG4 = 0x84, | ||
16 | PCI_DEV_REG5 = 0x88, | ||
17 | PCI_CFG_REG_0 = 0x90, | ||
18 | PCI_CFG_REG_1 = 0x94, | ||
19 | |||
20 | PSM_CONFIG_REG0 = 0x98, | ||
21 | PSM_CONFIG_REG1 = 0x9C, | ||
22 | PSM_CONFIG_REG2 = 0x160, | ||
23 | PSM_CONFIG_REG3 = 0x164, | ||
24 | PSM_CONFIG_REG4 = 0x168, | ||
25 | |||
26 | }; | ||
27 | |||
28 | /* Yukon-2 */ | ||
29 | enum pci_dev_reg_1 { | ||
30 | PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */ | ||
31 | PCI_Y2_DLL_DIS = 1<<30, /* Disable PCI DLL (YUKON-2) */ | ||
32 | PCI_SW_PWR_ON_RST= 1<<30, /* SW Power on Reset (Yukon-EX) */ | ||
33 | PCI_Y2_PHY2_COMA = 1<<29, /* Set PHY 2 to Coma Mode (YUKON-2) */ | ||
34 | PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */ | ||
35 | PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */ | ||
36 | PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */ | ||
37 | PCI_Y2_PME_LEGACY= 1<<15, /* PCI Express legacy power management mode */ | ||
38 | |||
39 | PCI_PHY_LNK_TIM_MSK= 3L<<8,/* Bit 9.. 8: GPHY Link Trigger Timer */ | ||
40 | PCI_ENA_L1_EVENT = 1<<7, /* Enable PEX L1 Event */ | ||
41 | PCI_ENA_GPHY_LNK = 1<<6, /* Enable PEX L1 on GPHY Link down */ | ||
42 | PCI_FORCE_PEX_L1 = 1<<5, /* Force to PEX L1 */ | ||
43 | }; | ||
44 | |||
45 | enum pci_dev_reg_2 { | ||
46 | PCI_VPD_WR_THR = 0xffL<<24, /* Bit 31..24: VPD Write Threshold */ | ||
47 | PCI_DEV_SEL = 0x7fL<<17, /* Bit 23..17: EEPROM Device Select */ | ||
48 | PCI_VPD_ROM_SZ = 7L<<14, /* Bit 16..14: VPD ROM Size */ | ||
49 | |||
50 | PCI_PATCH_DIR = 0xfL<<8, /* Bit 11.. 8: Ext Patches dir 3..0 */ | ||
51 | PCI_EXT_PATCHS = 0xfL<<4, /* Bit 7.. 4: Extended Patches 3..0 */ | ||
52 | PCI_EN_DUMMY_RD = 1<<3, /* Enable Dummy Read */ | ||
53 | PCI_REV_DESC = 1<<2, /* Reverse Desc. Bytes */ | ||
54 | |||
55 | PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */ | ||
56 | }; | ||
57 | |||
58 | /* PCI_OUR_REG_3 32 bit Our Register 3 (Yukon-ECU only) */ | ||
59 | enum pci_dev_reg_3 { | ||
60 | P_CLK_ASF_REGS_DIS = 1<<18,/* Disable Clock ASF (Yukon-Ext.) */ | ||
61 | P_CLK_COR_REGS_D0_DIS = 1<<17,/* Disable Clock Core Regs D0 */ | ||
62 | P_CLK_MACSEC_DIS = 1<<17,/* Disable Clock MACSec (Yukon-Ext.) */ | ||
63 | P_CLK_PCI_REGS_D0_DIS = 1<<16,/* Disable Clock PCI Regs D0 */ | ||
64 | P_CLK_COR_YTB_ARB_DIS = 1<<15,/* Disable Clock YTB Arbiter */ | ||
65 | P_CLK_MAC_LNK1_D3_DIS = 1<<14,/* Disable Clock MAC Link1 D3 */ | ||
66 | P_CLK_COR_LNK1_D0_DIS = 1<<13,/* Disable Clock Core Link1 D0 */ | ||
67 | P_CLK_MAC_LNK1_D0_DIS = 1<<12,/* Disable Clock MAC Link1 D0 */ | ||
68 | P_CLK_COR_LNK1_D3_DIS = 1<<11,/* Disable Clock Core Link1 D3 */ | ||
69 | P_CLK_PCI_MST_ARB_DIS = 1<<10,/* Disable Clock PCI Master Arb. */ | ||
70 | P_CLK_COR_REGS_D3_DIS = 1<<9, /* Disable Clock Core Regs D3 */ | ||
71 | P_CLK_PCI_REGS_D3_DIS = 1<<8, /* Disable Clock PCI Regs D3 */ | ||
72 | P_CLK_REF_LNK1_GM_DIS = 1<<7, /* Disable Clock Ref. Link1 GMAC */ | ||
73 | P_CLK_COR_LNK1_GM_DIS = 1<<6, /* Disable Clock Core Link1 GMAC */ | ||
74 | P_CLK_PCI_COMMON_DIS = 1<<5, /* Disable Clock PCI Common */ | ||
75 | P_CLK_COR_COMMON_DIS = 1<<4, /* Disable Clock Core Common */ | ||
76 | P_CLK_PCI_LNK1_BMU_DIS = 1<<3, /* Disable Clock PCI Link1 BMU */ | ||
77 | P_CLK_COR_LNK1_BMU_DIS = 1<<2, /* Disable Clock Core Link1 BMU */ | ||
78 | P_CLK_PCI_LNK1_BIU_DIS = 1<<1, /* Disable Clock PCI Link1 BIU */ | ||
79 | P_CLK_COR_LNK1_BIU_DIS = 1<<0, /* Disable Clock Core Link1 BIU */ | ||
80 | PCIE_OUR3_WOL_D3_COLD_SET = P_CLK_ASF_REGS_DIS | | ||
81 | P_CLK_COR_REGS_D0_DIS | | ||
82 | P_CLK_COR_LNK1_D0_DIS | | ||
83 | P_CLK_MAC_LNK1_D0_DIS | | ||
84 | P_CLK_PCI_MST_ARB_DIS | | ||
85 | P_CLK_COR_COMMON_DIS | | ||
86 | P_CLK_COR_LNK1_BMU_DIS, | ||
87 | }; | ||
88 | |||
89 | /* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */ | ||
90 | enum pci_dev_reg_4 { | ||
91 | /* (Link Training & Status State Machine) */ | ||
92 | P_PEX_LTSSM_STAT_MSK = 0x7fL<<25, /* Bit 31..25: PEX LTSSM Mask */ | ||
93 | #define P_PEX_LTSSM_STAT(x) ((x << 25) & P_PEX_LTSSM_STAT_MSK) | ||
94 | P_PEX_LTSSM_L1_STAT = 0x34, | ||
95 | P_PEX_LTSSM_DET_STAT = 0x01, | ||
96 | P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */ | ||
97 | /* (Active State Power Management) */ | ||
98 | P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */ | ||
99 | P_ASPM_GPHY_LINK_DOWN = 1<<14, /* GPHY Link Down (A1 only) */ | ||
100 | P_ASPM_INT_FIFO_EMPTY = 1<<13, /* Internal FIFO Empty (A1 only) */ | ||
101 | P_ASPM_CLKRUN_REQUEST = 1<<12, /* CLKRUN Request (A1 only) */ | ||
102 | |||
103 | P_ASPM_FORCE_CLKREQ_ENA = 1<<4, /* Force CLKREQ Enable (A1b only) */ | ||
104 | P_ASPM_CLKREQ_PAD_CTL = 1<<3, /* CLKREQ PAD Control (A1 only) */ | ||
105 | P_ASPM_A1_MODE_SELECT = 1<<2, /* A1 Mode Select (A1 only) */ | ||
106 | P_CLK_GATE_PEX_UNIT_ENA = 1<<1, /* Enable Gate PEX Unit Clock */ | ||
107 | P_CLK_GATE_ROOT_COR_ENA = 1<<0, /* Enable Gate Root Core Clock */ | ||
108 | P_ASPM_CONTROL_MSK = P_FORCE_ASPM_REQUEST | P_ASPM_GPHY_LINK_DOWN | ||
109 | | P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY, | ||
110 | }; | ||
111 | |||
112 | /* PCI_OUR_REG_5 32 bit Our Register 5 (Yukon-ECU only) */ | ||
113 | enum pci_dev_reg_5 { | ||
114 | /* Bit 31..27: for A3 & later */ | ||
115 | P_CTL_DIV_CORE_CLK_ENA = 1<<31, /* Divide Core Clock Enable */ | ||
116 | P_CTL_SRESET_VMAIN_AV = 1<<30, /* Soft Reset for Vmain_av De-Glitch */ | ||
117 | P_CTL_BYPASS_VMAIN_AV = 1<<29, /* Bypass En. for Vmain_av De-Glitch */ | ||
118 | P_CTL_TIM_VMAIN_AV_MSK = 3<<27, /* Bit 28..27: Timer Vmain_av Mask */ | ||
119 | /* Bit 26..16: Release Clock on Event */ | ||
120 | P_REL_PCIE_RST_DE_ASS = 1<<26, /* PCIe Reset De-Asserted */ | ||
121 | P_REL_GPHY_REC_PACKET = 1<<25, /* GPHY Received Packet */ | ||
122 | P_REL_INT_FIFO_N_EMPTY = 1<<24, /* Internal FIFO Not Empty */ | ||
123 | P_REL_MAIN_PWR_AVAIL = 1<<23, /* Main Power Available */ | ||
124 | P_REL_CLKRUN_REQ_REL = 1<<22, /* CLKRUN Request Release */ | ||
125 | P_REL_PCIE_RESET_ASS = 1<<21, /* PCIe Reset Asserted */ | ||
126 | P_REL_PME_ASSERTED = 1<<20, /* PME Asserted */ | ||
127 | P_REL_PCIE_EXIT_L1_ST = 1<<19, /* PCIe Exit L1 State */ | ||
128 | P_REL_LOADER_NOT_FIN = 1<<18, /* EPROM Loader Not Finished */ | ||
129 | P_REL_PCIE_RX_EX_IDLE = 1<<17, /* PCIe Rx Exit Electrical Idle State */ | ||
130 | P_REL_GPHY_LINK_UP = 1<<16, /* GPHY Link Up */ | ||
131 | |||
132 | /* Bit 10.. 0: Mask for Gate Clock */ | ||
133 | P_GAT_PCIE_RST_ASSERTED = 1<<10,/* PCIe Reset Asserted */ | ||
134 | P_GAT_GPHY_N_REC_PACKET = 1<<9, /* GPHY Not Received Packet */ | ||
135 | P_GAT_INT_FIFO_EMPTY = 1<<8, /* Internal FIFO Empty */ | ||
136 | P_GAT_MAIN_PWR_N_AVAIL = 1<<7, /* Main Power Not Available */ | ||
137 | P_GAT_CLKRUN_REQ_REL = 1<<6, /* CLKRUN Not Requested */ | ||
138 | P_GAT_PCIE_RESET_ASS = 1<<5, /* PCIe Reset Asserted */ | ||
139 | P_GAT_PME_DE_ASSERTED = 1<<4, /* PME De-Asserted */ | ||
140 | P_GAT_PCIE_ENTER_L1_ST = 1<<3, /* PCIe Enter L1 State */ | ||
141 | P_GAT_LOADER_FINISHED = 1<<2, /* EPROM Loader Finished */ | ||
142 | P_GAT_PCIE_RX_EL_IDLE = 1<<1, /* PCIe Rx Electrical Idle State */ | ||
143 | P_GAT_GPHY_LINK_DOWN = 1<<0, /* GPHY Link Down */ | ||
144 | |||
145 | PCIE_OUR5_EVENT_CLK_D3_SET = P_REL_GPHY_REC_PACKET | | ||
146 | P_REL_INT_FIFO_N_EMPTY | | ||
147 | P_REL_PCIE_EXIT_L1_ST | | ||
148 | P_REL_PCIE_RX_EX_IDLE | | ||
149 | P_GAT_GPHY_N_REC_PACKET | | ||
150 | P_GAT_INT_FIFO_EMPTY | | ||
151 | P_GAT_PCIE_ENTER_L1_ST | | ||
152 | P_GAT_PCIE_RX_EL_IDLE, | ||
153 | }; | ||
154 | |||
155 | /* PCI_CFG_REG_1 32 bit Config Register 1 (Yukon-Ext only) */ | ||
156 | enum pci_cfg_reg1 { | ||
157 | P_CF1_DIS_REL_EVT_RST = 1<<24, /* Dis. Rel. Event during PCIE reset */ | ||
158 | /* Bit 23..21: Release Clock on Event */ | ||
159 | P_CF1_REL_LDR_NOT_FIN = 1<<23, /* EEPROM Loader Not Finished */ | ||
160 | P_CF1_REL_VMAIN_AVLBL = 1<<22, /* Vmain available */ | ||
161 | P_CF1_REL_PCIE_RESET = 1<<21, /* PCI-E reset */ | ||
162 | /* Bit 20..18: Gate Clock on Event */ | ||
163 | P_CF1_GAT_LDR_NOT_FIN = 1<<20, /* EEPROM Loader Finished */ | ||
164 | P_CF1_GAT_PCIE_RX_IDLE = 1<<19, /* PCI-E Rx Electrical idle */ | ||
165 | P_CF1_GAT_PCIE_RESET = 1<<18, /* PCI-E Reset */ | ||
166 | P_CF1_PRST_PHY_CLKREQ = 1<<17, /* Enable PCI-E rst & PM2PHY gen. CLKREQ */ | ||
167 | P_CF1_PCIE_RST_CLKREQ = 1<<16, /* Enable PCI-E rst generate CLKREQ */ | ||
168 | |||
169 | P_CF1_ENA_CFG_LDR_DONE = 1<<8, /* Enable core level Config loader done */ | ||
170 | |||
171 | P_CF1_ENA_TXBMU_RD_IDLE = 1<<1, /* Enable TX BMU Read IDLE for ASPM */ | ||
172 | P_CF1_ENA_TXBMU_WR_IDLE = 1<<0, /* Enable TX BMU Write IDLE for ASPM */ | ||
173 | |||
174 | PCIE_CFG1_EVENT_CLK_D3_SET = P_CF1_DIS_REL_EVT_RST | | ||
175 | P_CF1_REL_LDR_NOT_FIN | | ||
176 | P_CF1_REL_VMAIN_AVLBL | | ||
177 | P_CF1_REL_PCIE_RESET | | ||
178 | P_CF1_GAT_LDR_NOT_FIN | | ||
179 | P_CF1_GAT_PCIE_RESET | | ||
180 | P_CF1_PRST_PHY_CLKREQ | | ||
181 | P_CF1_ENA_CFG_LDR_DONE | | ||
182 | P_CF1_ENA_TXBMU_RD_IDLE | | ||
183 | P_CF1_ENA_TXBMU_WR_IDLE, | ||
184 | }; | ||
185 | |||
186 | /* Yukon-Optima */ | ||
187 | enum { | ||
188 | PSM_CONFIG_REG1_AC_PRESENT_STATUS = 1<<31, /* AC Present Status */ | ||
189 | |||
190 | PSM_CONFIG_REG1_PTP_CLK_SEL = 1<<29, /* PTP Clock Select */ | ||
191 | PSM_CONFIG_REG1_PTP_MODE = 1<<28, /* PTP Mode */ | ||
192 | |||
193 | PSM_CONFIG_REG1_MUX_PHY_LINK = 1<<27, /* PHY Energy Detect Event */ | ||
194 | |||
195 | PSM_CONFIG_REG1_EN_PIN63_AC_PRESENT = 1<<26, /* Enable LED_DUPLEX for ac_present */ | ||
196 | PSM_CONFIG_REG1_EN_PCIE_TIMER = 1<<25, /* Enable PCIe Timer */ | ||
197 | PSM_CONFIG_REG1_EN_SPU_TIMER = 1<<24, /* Enable SPU Timer */ | ||
198 | PSM_CONFIG_REG1_POLARITY_AC_PRESENT = 1<<23, /* AC Present Polarity */ | ||
199 | |||
200 | PSM_CONFIG_REG1_EN_AC_PRESENT = 1<<21, /* Enable AC Present */ | ||
201 | |||
202 | PSM_CONFIG_REG1_EN_GPHY_INT_PSM = 1<<20, /* Enable GPHY INT for PSM */ | ||
203 | PSM_CONFIG_REG1_DIS_PSM_TIMER = 1<<19, /* Disable PSM Timer */ | ||
204 | }; | ||
205 | |||
206 | /* Yukon-Supreme */ | ||
207 | enum { | ||
208 | PSM_CONFIG_REG1_GPHY_ENERGY_STS = 1<<31, /* GPHY Energy Detect Status */ | ||
209 | |||
210 | PSM_CONFIG_REG1_UART_MODE_MSK = 3<<29, /* UART_Mode */ | ||
211 | PSM_CONFIG_REG1_CLK_RUN_ASF = 1<<28, /* Enable Clock Free Running for ASF Subsystem */ | ||
212 | PSM_CONFIG_REG1_UART_CLK_DISABLE= 1<<27, /* Disable UART clock */ | ||
213 | PSM_CONFIG_REG1_VAUX_ONE = 1<<26, /* Tie internal Vaux to 1'b1 */ | ||
214 | PSM_CONFIG_REG1_UART_FC_RI_VAL = 1<<25, /* Default value for UART_RI_n */ | ||
215 | PSM_CONFIG_REG1_UART_FC_DCD_VAL = 1<<24, /* Default value for UART_DCD_n */ | ||
216 | PSM_CONFIG_REG1_UART_FC_DSR_VAL = 1<<23, /* Default value for UART_DSR_n */ | ||
217 | PSM_CONFIG_REG1_UART_FC_CTS_VAL = 1<<22, /* Default value for UART_CTS_n */ | ||
218 | PSM_CONFIG_REG1_LATCH_VAUX = 1<<21, /* Enable Latch current Vaux_avlbl */ | ||
219 | PSM_CONFIG_REG1_FORCE_TESTMODE_INPUT= 1<<20, /* Force Testmode pin as input PAD */ | ||
220 | PSM_CONFIG_REG1_UART_RST = 1<<19, /* UART_RST */ | ||
221 | PSM_CONFIG_REG1_PSM_PCIE_L1_POL = 1<<18, /* PCIE L1 Event Polarity for PSM */ | ||
222 | PSM_CONFIG_REG1_TIMER_STAT = 1<<17, /* PSM Timer Status */ | ||
223 | PSM_CONFIG_REG1_GPHY_INT = 1<<16, /* GPHY INT Status */ | ||
224 | PSM_CONFIG_REG1_FORCE_TESTMODE_ZERO= 1<<15, /* Force internal Testmode as 1'b0 */ | ||
225 | PSM_CONFIG_REG1_EN_INT_ASPM_CLKREQ = 1<<14, /* ENABLE INT for CLKRUN on ASPM and CLKREQ */ | ||
226 | PSM_CONFIG_REG1_EN_SND_TASK_ASPM_CLKREQ = 1<<13, /* ENABLE Snd_task for CLKRUN on ASPM and CLKREQ */ | ||
227 | PSM_CONFIG_REG1_DIS_CLK_GATE_SND_TASK = 1<<12, /* Disable CLK_GATE control snd_task */ | ||
228 | PSM_CONFIG_REG1_DIS_FF_CHIAN_SND_INTA = 1<<11, /* Disable flip-flop chain for sndmsg_inta */ | ||
229 | |||
230 | PSM_CONFIG_REG1_DIS_LOADER = 1<<9, /* Disable Loader SM after PSM Goes back to IDLE */ | ||
231 | PSM_CONFIG_REG1_DO_PWDN = 1<<8, /* Do Power Down, Start PSM Scheme */ | ||
232 | PSM_CONFIG_REG1_DIS_PIG = 1<<7, /* Disable Plug-in-Go SM after PSM Goes back to IDLE */ | ||
233 | PSM_CONFIG_REG1_DIS_PERST = 1<<6, /* Disable Internal PCIe Reset after PSM Goes back to IDLE */ | ||
234 | PSM_CONFIG_REG1_EN_REG18_PD = 1<<5, /* Enable REG18 Power Down for PSM */ | ||
235 | PSM_CONFIG_REG1_EN_PSM_LOAD = 1<<4, /* Disable EEPROM Loader after PSM Goes back to IDLE */ | ||
236 | PSM_CONFIG_REG1_EN_PSM_HOT_RST = 1<<3, /* Enable PCIe Hot Reset for PSM */ | ||
237 | PSM_CONFIG_REG1_EN_PSM_PERST = 1<<2, /* Enable PCIe Reset Event for PSM */ | ||
238 | PSM_CONFIG_REG1_EN_PSM_PCIE_L1 = 1<<1, /* Enable PCIe L1 Event for PSM */ | ||
239 | PSM_CONFIG_REG1_EN_PSM = 1<<0, /* Enable PSM Scheme */ | ||
240 | }; | ||
241 | |||
242 | /* PSM_CONFIG_REG4 0x0168 PSM Config Register 4 */ | ||
243 | enum { | ||
244 | /* PHY Link Detect Timer */ | ||
245 | PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_MSK = 0xf<<4, | ||
246 | PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE = 4, | ||
247 | |||
248 | PSM_CONFIG_REG4_DEBUG_TIMER = 1<<1, /* Debug Timer */ | ||
249 | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT = 1<<0, /* Reset GPHY Link Detect */ | ||
250 | }; | ||
251 | |||
252 | |||
253 | #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ | ||
254 | PCI_STATUS_SIG_SYSTEM_ERROR | \ | ||
255 | PCI_STATUS_REC_MASTER_ABORT | \ | ||
256 | PCI_STATUS_REC_TARGET_ABORT | \ | ||
257 | PCI_STATUS_PARITY) | ||
258 | |||
259 | enum csr_regs { | ||
260 | B0_RAP = 0x0000, | ||
261 | B0_CTST = 0x0004, | ||
262 | |||
263 | B0_POWER_CTRL = 0x0007, | ||
264 | B0_ISRC = 0x0008, | ||
265 | B0_IMSK = 0x000c, | ||
266 | B0_HWE_ISRC = 0x0010, | ||
267 | B0_HWE_IMSK = 0x0014, | ||
268 | |||
269 | /* Special ISR registers (Yukon-2 only) */ | ||
270 | B0_Y2_SP_ISRC2 = 0x001c, | ||
271 | B0_Y2_SP_ISRC3 = 0x0020, | ||
272 | B0_Y2_SP_EISR = 0x0024, | ||
273 | B0_Y2_SP_LISR = 0x0028, | ||
274 | B0_Y2_SP_ICR = 0x002c, | ||
275 | |||
276 | B2_MAC_1 = 0x0100, | ||
277 | B2_MAC_2 = 0x0108, | ||
278 | B2_MAC_3 = 0x0110, | ||
279 | B2_CONN_TYP = 0x0118, | ||
280 | B2_PMD_TYP = 0x0119, | ||
281 | B2_MAC_CFG = 0x011a, | ||
282 | B2_CHIP_ID = 0x011b, | ||
283 | B2_E_0 = 0x011c, | ||
284 | |||
285 | B2_Y2_CLK_GATE = 0x011d, | ||
286 | B2_Y2_HW_RES = 0x011e, | ||
287 | B2_E_3 = 0x011f, | ||
288 | B2_Y2_CLK_CTRL = 0x0120, | ||
289 | |||
290 | B2_TI_INI = 0x0130, | ||
291 | B2_TI_VAL = 0x0134, | ||
292 | B2_TI_CTRL = 0x0138, | ||
293 | B2_TI_TEST = 0x0139, | ||
294 | |||
295 | B2_TST_CTRL1 = 0x0158, | ||
296 | B2_TST_CTRL2 = 0x0159, | ||
297 | B2_GP_IO = 0x015c, | ||
298 | |||
299 | B2_I2C_CTRL = 0x0160, | ||
300 | B2_I2C_DATA = 0x0164, | ||
301 | B2_I2C_IRQ = 0x0168, | ||
302 | B2_I2C_SW = 0x016c, | ||
303 | |||
304 | Y2_PEX_PHY_DATA = 0x0170, | ||
305 | Y2_PEX_PHY_ADDR = 0x0172, | ||
306 | |||
307 | B3_RAM_ADDR = 0x0180, | ||
308 | B3_RAM_DATA_LO = 0x0184, | ||
309 | B3_RAM_DATA_HI = 0x0188, | ||
310 | |||
311 | /* RAM Interface Registers */ | ||
312 | /* Yukon-2: use RAM_BUFFER() to access the RAM buffer */ | ||
313 | /* | ||
314 | * The HW-Spec. calls this registers Timeout Value 0..11. But this names are | ||
315 | * not usable in SW. Please notice these are NOT real timeouts, these are | ||
316 | * the number of qWords transferred continuously. | ||
317 | */ | ||
318 | #define RAM_BUFFER(port, reg) (reg | (port <<6)) | ||
319 | |||
320 | B3_RI_WTO_R1 = 0x0190, | ||
321 | B3_RI_WTO_XA1 = 0x0191, | ||
322 | B3_RI_WTO_XS1 = 0x0192, | ||
323 | B3_RI_RTO_R1 = 0x0193, | ||
324 | B3_RI_RTO_XA1 = 0x0194, | ||
325 | B3_RI_RTO_XS1 = 0x0195, | ||
326 | B3_RI_WTO_R2 = 0x0196, | ||
327 | B3_RI_WTO_XA2 = 0x0197, | ||
328 | B3_RI_WTO_XS2 = 0x0198, | ||
329 | B3_RI_RTO_R2 = 0x0199, | ||
330 | B3_RI_RTO_XA2 = 0x019a, | ||
331 | B3_RI_RTO_XS2 = 0x019b, | ||
332 | B3_RI_TO_VAL = 0x019c, | ||
333 | B3_RI_CTRL = 0x01a0, | ||
334 | B3_RI_TEST = 0x01a2, | ||
335 | B3_MA_TOINI_RX1 = 0x01b0, | ||
336 | B3_MA_TOINI_RX2 = 0x01b1, | ||
337 | B3_MA_TOINI_TX1 = 0x01b2, | ||
338 | B3_MA_TOINI_TX2 = 0x01b3, | ||
339 | B3_MA_TOVAL_RX1 = 0x01b4, | ||
340 | B3_MA_TOVAL_RX2 = 0x01b5, | ||
341 | B3_MA_TOVAL_TX1 = 0x01b6, | ||
342 | B3_MA_TOVAL_TX2 = 0x01b7, | ||
343 | B3_MA_TO_CTRL = 0x01b8, | ||
344 | B3_MA_TO_TEST = 0x01ba, | ||
345 | B3_MA_RCINI_RX1 = 0x01c0, | ||
346 | B3_MA_RCINI_RX2 = 0x01c1, | ||
347 | B3_MA_RCINI_TX1 = 0x01c2, | ||
348 | B3_MA_RCINI_TX2 = 0x01c3, | ||
349 | B3_MA_RCVAL_RX1 = 0x01c4, | ||
350 | B3_MA_RCVAL_RX2 = 0x01c5, | ||
351 | B3_MA_RCVAL_TX1 = 0x01c6, | ||
352 | B3_MA_RCVAL_TX2 = 0x01c7, | ||
353 | B3_MA_RC_CTRL = 0x01c8, | ||
354 | B3_MA_RC_TEST = 0x01ca, | ||
355 | B3_PA_TOINI_RX1 = 0x01d0, | ||
356 | B3_PA_TOINI_RX2 = 0x01d4, | ||
357 | B3_PA_TOINI_TX1 = 0x01d8, | ||
358 | B3_PA_TOINI_TX2 = 0x01dc, | ||
359 | B3_PA_TOVAL_RX1 = 0x01e0, | ||
360 | B3_PA_TOVAL_RX2 = 0x01e4, | ||
361 | B3_PA_TOVAL_TX1 = 0x01e8, | ||
362 | B3_PA_TOVAL_TX2 = 0x01ec, | ||
363 | B3_PA_CTRL = 0x01f0, | ||
364 | B3_PA_TEST = 0x01f2, | ||
365 | |||
366 | Y2_CFG_SPC = 0x1c00, /* PCI config space region */ | ||
367 | Y2_CFG_AER = 0x1d00, /* PCI Advanced Error Report region */ | ||
368 | }; | ||
369 | |||
370 | /* B0_CTST 24 bit Control/Status register */ | ||
371 | enum { | ||
372 | Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */ | ||
373 | Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */ | ||
374 | Y2_HW_WOL_ON = 1<<15,/* HW WOL On (Yukon-EC Ultra A1 only) */ | ||
375 | Y2_HW_WOL_OFF = 1<<14,/* HW WOL On (Yukon-EC Ultra A1 only) */ | ||
376 | Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */ | ||
377 | Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */ | ||
378 | Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */ | ||
379 | Y2_CLK_RUN_DIS = 1<<10,/* CLK_RUN Disable (YUKON-2 only) */ | ||
380 | Y2_LED_STAT_ON = 1<<9, /* Status LED On (YUKON-2 only) */ | ||
381 | Y2_LED_STAT_OFF = 1<<8, /* Status LED Off (YUKON-2 only) */ | ||
382 | |||
383 | CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */ | ||
384 | CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */ | ||
385 | CS_STOP_DONE = 1<<5, /* Stop Master is finished */ | ||
386 | CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */ | ||
387 | CS_MRST_CLR = 1<<3, /* Clear Master reset */ | ||
388 | CS_MRST_SET = 1<<2, /* Set Master reset */ | ||
389 | CS_RST_CLR = 1<<1, /* Clear Software reset */ | ||
390 | CS_RST_SET = 1, /* Set Software reset */ | ||
391 | }; | ||
392 | |||
393 | /* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */ | ||
394 | enum { | ||
395 | PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */ | ||
396 | PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */ | ||
397 | PC_VCC_ENA = 1<<5, /* Switch VCC Enable */ | ||
398 | PC_VCC_DIS = 1<<4, /* Switch VCC Disable */ | ||
399 | PC_VAUX_ON = 1<<3, /* Switch VAUX On */ | ||
400 | PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */ | ||
401 | PC_VCC_ON = 1<<1, /* Switch VCC On */ | ||
402 | PC_VCC_OFF = 1<<0, /* Switch VCC Off */ | ||
403 | }; | ||
404 | |||
405 | /* B2_IRQM_MSK 32 bit IRQ Moderation Mask */ | ||
406 | |||
407 | /* B0_Y2_SP_ISRC2 32 bit Special Interrupt Source Reg 2 */ | ||
408 | /* B0_Y2_SP_ISRC3 32 bit Special Interrupt Source Reg 3 */ | ||
409 | /* B0_Y2_SP_EISR 32 bit Enter ISR Reg */ | ||
410 | /* B0_Y2_SP_LISR 32 bit Leave ISR Reg */ | ||
411 | enum { | ||
412 | Y2_IS_HW_ERR = 1<<31, /* Interrupt HW Error */ | ||
413 | Y2_IS_STAT_BMU = 1<<30, /* Status BMU Interrupt */ | ||
414 | Y2_IS_ASF = 1<<29, /* ASF subsystem Interrupt */ | ||
415 | Y2_IS_CPU_TO = 1<<28, /* CPU Timeout */ | ||
416 | Y2_IS_POLL_CHK = 1<<27, /* Check IRQ from polling unit */ | ||
417 | Y2_IS_TWSI_RDY = 1<<26, /* IRQ on end of TWSI Tx */ | ||
418 | Y2_IS_IRQ_SW = 1<<25, /* SW forced IRQ */ | ||
419 | Y2_IS_TIMINT = 1<<24, /* IRQ from Timer */ | ||
420 | |||
421 | Y2_IS_IRQ_PHY2 = 1<<12, /* Interrupt from PHY 2 */ | ||
422 | Y2_IS_IRQ_MAC2 = 1<<11, /* Interrupt from MAC 2 */ | ||
423 | Y2_IS_CHK_RX2 = 1<<10, /* Descriptor error Rx 2 */ | ||
424 | Y2_IS_CHK_TXS2 = 1<<9, /* Descriptor error TXS 2 */ | ||
425 | Y2_IS_CHK_TXA2 = 1<<8, /* Descriptor error TXA 2 */ | ||
426 | |||
427 | Y2_IS_PSM_ACK = 1<<7, /* PSM Acknowledge (Yukon-Optima only) */ | ||
428 | Y2_IS_PTP_TIST = 1<<6, /* PTP Time Stamp (Yukon-Optima only) */ | ||
429 | Y2_IS_PHY_QLNK = 1<<5, /* PHY Quick Link (Yukon-Optima only) */ | ||
430 | |||
431 | Y2_IS_IRQ_PHY1 = 1<<4, /* Interrupt from PHY 1 */ | ||
432 | Y2_IS_IRQ_MAC1 = 1<<3, /* Interrupt from MAC 1 */ | ||
433 | Y2_IS_CHK_RX1 = 1<<2, /* Descriptor error Rx 1 */ | ||
434 | Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */ | ||
435 | Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */ | ||
436 | |||
437 | Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU, | ||
438 | Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1 | ||
439 | | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1, | ||
440 | Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 | ||
441 | | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2, | ||
442 | Y2_IS_ERROR = Y2_IS_HW_ERR | | ||
443 | Y2_IS_IRQ_MAC1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1 | | ||
444 | Y2_IS_IRQ_MAC2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2, | ||
445 | }; | ||
446 | |||
447 | /* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ | ||
448 | enum { | ||
449 | IS_ERR_MSK = 0x00003fff,/* All Error bits */ | ||
450 | |||
451 | IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */ | ||
452 | IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */ | ||
453 | IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */ | ||
454 | IS_IRQ_STAT = 1<<10, /* IRQ status exception */ | ||
455 | IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */ | ||
456 | IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */ | ||
457 | IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */ | ||
458 | IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */ | ||
459 | IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */ | ||
460 | IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */ | ||
461 | IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */ | ||
462 | IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */ | ||
463 | IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */ | ||
464 | IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */ | ||
465 | }; | ||
466 | |||
467 | /* Hardware error interrupt mask for Yukon 2 */ | ||
468 | enum { | ||
469 | Y2_IS_TIST_OV = 1<<29,/* Time Stamp Timer overflow interrupt */ | ||
470 | Y2_IS_SENSOR = 1<<28, /* Sensor interrupt */ | ||
471 | Y2_IS_MST_ERR = 1<<27, /* Master error interrupt */ | ||
472 | Y2_IS_IRQ_STAT = 1<<26, /* Status exception interrupt */ | ||
473 | Y2_IS_PCI_EXP = 1<<25, /* PCI-Express interrupt */ | ||
474 | Y2_IS_PCI_NEXP = 1<<24, /* PCI-Express error similar to PCI error */ | ||
475 | /* Link 2 */ | ||
476 | Y2_IS_PAR_RD2 = 1<<13, /* Read RAM parity error interrupt */ | ||
477 | Y2_IS_PAR_WR2 = 1<<12, /* Write RAM parity error interrupt */ | ||
478 | Y2_IS_PAR_MAC2 = 1<<11, /* MAC hardware fault interrupt */ | ||
479 | Y2_IS_PAR_RX2 = 1<<10, /* Parity Error Rx Queue 2 */ | ||
480 | Y2_IS_TCP_TXS2 = 1<<9, /* TCP length mismatch sync Tx queue IRQ */ | ||
481 | Y2_IS_TCP_TXA2 = 1<<8, /* TCP length mismatch async Tx queue IRQ */ | ||
482 | /* Link 1 */ | ||
483 | Y2_IS_PAR_RD1 = 1<<5, /* Read RAM parity error interrupt */ | ||
484 | Y2_IS_PAR_WR1 = 1<<4, /* Write RAM parity error interrupt */ | ||
485 | Y2_IS_PAR_MAC1 = 1<<3, /* MAC hardware fault interrupt */ | ||
486 | Y2_IS_PAR_RX1 = 1<<2, /* Parity Error Rx Queue 1 */ | ||
487 | Y2_IS_TCP_TXS1 = 1<<1, /* TCP length mismatch sync Tx queue IRQ */ | ||
488 | Y2_IS_TCP_TXA1 = 1<<0, /* TCP length mismatch async Tx queue IRQ */ | ||
489 | |||
490 | Y2_HWE_L1_MASK = Y2_IS_PAR_RD1 | Y2_IS_PAR_WR1 | Y2_IS_PAR_MAC1 | | ||
491 | Y2_IS_PAR_RX1 | Y2_IS_TCP_TXS1| Y2_IS_TCP_TXA1, | ||
492 | Y2_HWE_L2_MASK = Y2_IS_PAR_RD2 | Y2_IS_PAR_WR2 | Y2_IS_PAR_MAC2 | | ||
493 | Y2_IS_PAR_RX2 | Y2_IS_TCP_TXS2| Y2_IS_TCP_TXA2, | ||
494 | |||
495 | Y2_HWE_ALL_MASK = Y2_IS_TIST_OV | Y2_IS_MST_ERR | Y2_IS_IRQ_STAT | | ||
496 | Y2_HWE_L1_MASK | Y2_HWE_L2_MASK, | ||
497 | }; | ||
498 | |||
499 | /* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */ | ||
500 | enum { | ||
501 | DPT_START = 1<<1, | ||
502 | DPT_STOP = 1<<0, | ||
503 | }; | ||
504 | |||
505 | /* B2_TST_CTRL1 8 bit Test Control Register 1 */ | ||
506 | enum { | ||
507 | TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */ | ||
508 | TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */ | ||
509 | TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */ | ||
510 | TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */ | ||
511 | TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */ | ||
512 | TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */ | ||
513 | TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */ | ||
514 | TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */ | ||
515 | }; | ||
516 | |||
517 | /* B2_GPIO */ | ||
518 | enum { | ||
519 | GLB_GPIO_CLK_DEB_ENA = 1<<31, /* Clock Debug Enable */ | ||
520 | GLB_GPIO_CLK_DBG_MSK = 0xf<<26, /* Clock Debug */ | ||
521 | |||
522 | GLB_GPIO_INT_RST_D3_DIS = 1<<15, /* Disable Internal Reset After D3 to D0 */ | ||
523 | GLB_GPIO_LED_PAD_SPEED_UP = 1<<14, /* LED PAD Speed Up */ | ||
524 | GLB_GPIO_STAT_RACE_DIS = 1<<13, /* Status Race Disable */ | ||
525 | GLB_GPIO_TEST_SEL_MSK = 3<<11, /* Testmode Select */ | ||
526 | GLB_GPIO_TEST_SEL_BASE = 1<<11, | ||
527 | GLB_GPIO_RAND_ENA = 1<<10, /* Random Enable */ | ||
528 | GLB_GPIO_RAND_BIT_1 = 1<<9, /* Random Bit 1 */ | ||
529 | }; | ||
530 | |||
531 | /* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */ | ||
532 | enum { | ||
533 | CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */ | ||
534 | /* Bit 3.. 2: reserved */ | ||
535 | CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */ | ||
536 | CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/ | ||
537 | }; | ||
538 | |||
539 | /* B2_CHIP_ID 8 bit Chip Identification Number */ | ||
540 | enum { | ||
541 | CHIP_ID_YUKON_XL = 0xb3, /* YUKON-2 XL */ | ||
542 | CHIP_ID_YUKON_EC_U = 0xb4, /* YUKON-2 EC Ultra */ | ||
543 | CHIP_ID_YUKON_EX = 0xb5, /* YUKON-2 Extreme */ | ||
544 | CHIP_ID_YUKON_EC = 0xb6, /* YUKON-2 EC */ | ||
545 | CHIP_ID_YUKON_FE = 0xb7, /* YUKON-2 FE */ | ||
546 | CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */ | ||
547 | CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */ | ||
548 | CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */ | ||
549 | CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */ | ||
550 | CHIP_ID_YUKON_PRM = 0xbd, /* YUKON-2 Optima Prime */ | ||
551 | CHIP_ID_YUKON_OP_2 = 0xbe, /* YUKON-2 Optima 2 */ | ||
552 | }; | ||
553 | |||
554 | enum yukon_xl_rev { | ||
555 | CHIP_REV_YU_XL_A0 = 0, | ||
556 | CHIP_REV_YU_XL_A1 = 1, | ||
557 | CHIP_REV_YU_XL_A2 = 2, | ||
558 | CHIP_REV_YU_XL_A3 = 3, | ||
559 | }; | ||
560 | |||
561 | enum yukon_ec_rev { | ||
562 | CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ | ||
563 | CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */ | ||
564 | CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */ | ||
565 | }; | ||
566 | enum yukon_ec_u_rev { | ||
567 | CHIP_REV_YU_EC_U_A0 = 1, | ||
568 | CHIP_REV_YU_EC_U_A1 = 2, | ||
569 | CHIP_REV_YU_EC_U_B0 = 3, | ||
570 | CHIP_REV_YU_EC_U_B1 = 5, | ||
571 | }; | ||
572 | enum yukon_fe_rev { | ||
573 | CHIP_REV_YU_FE_A1 = 1, | ||
574 | CHIP_REV_YU_FE_A2 = 2, | ||
575 | }; | ||
576 | enum yukon_fe_p_rev { | ||
577 | CHIP_REV_YU_FE2_A0 = 0, | ||
578 | }; | ||
579 | enum yukon_ex_rev { | ||
580 | CHIP_REV_YU_EX_A0 = 1, | ||
581 | CHIP_REV_YU_EX_B0 = 2, | ||
582 | }; | ||
583 | enum yukon_supr_rev { | ||
584 | CHIP_REV_YU_SU_A0 = 0, | ||
585 | CHIP_REV_YU_SU_B0 = 1, | ||
586 | CHIP_REV_YU_SU_B1 = 3, | ||
587 | }; | ||
588 | |||
589 | |||
590 | /* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ | ||
591 | enum { | ||
592 | Y2_STATUS_LNK2_INAC = 1<<7, /* Status Link 2 inactive (0 = active) */ | ||
593 | Y2_CLK_GAT_LNK2_DIS = 1<<6, /* Disable clock gating Link 2 */ | ||
594 | Y2_COR_CLK_LNK2_DIS = 1<<5, /* Disable Core clock Link 2 */ | ||
595 | Y2_PCI_CLK_LNK2_DIS = 1<<4, /* Disable PCI clock Link 2 */ | ||
596 | Y2_STATUS_LNK1_INAC = 1<<3, /* Status Link 1 inactive (0 = active) */ | ||
597 | Y2_CLK_GAT_LNK1_DIS = 1<<2, /* Disable clock gating Link 1 */ | ||
598 | Y2_COR_CLK_LNK1_DIS = 1<<1, /* Disable Core clock Link 1 */ | ||
599 | Y2_PCI_CLK_LNK1_DIS = 1<<0, /* Disable PCI clock Link 1 */ | ||
600 | }; | ||
601 | |||
602 | /* B2_Y2_HW_RES 8 bit HW Resources (Yukon-2 only) */ | ||
603 | enum { | ||
604 | CFG_LED_MODE_MSK = 7<<2, /* Bit 4.. 2: LED Mode Mask */ | ||
605 | CFG_LINK_2_AVAIL = 1<<1, /* Link 2 available */ | ||
606 | CFG_LINK_1_AVAIL = 1<<0, /* Link 1 available */ | ||
607 | }; | ||
608 | #define CFG_LED_MODE(x) (((x) & CFG_LED_MODE_MSK) >> 2) | ||
609 | #define CFG_DUAL_MAC_MSK (CFG_LINK_2_AVAIL | CFG_LINK_1_AVAIL) | ||
610 | |||
611 | |||
612 | /* B2_Y2_CLK_CTRL 32 bit Clock Frequency Control Register (Yukon-2/EC) */ | ||
613 | enum { | ||
614 | Y2_CLK_DIV_VAL_MSK = 0xff<<16,/* Bit 23..16: Clock Divisor Value */ | ||
615 | #define Y2_CLK_DIV_VAL(x) (((x)<<16) & Y2_CLK_DIV_VAL_MSK) | ||
616 | Y2_CLK_DIV_VAL2_MSK = 7<<21, /* Bit 23..21: Clock Divisor Value */ | ||
617 | Y2_CLK_SELECT2_MSK = 0x1f<<16,/* Bit 20..16: Clock Select */ | ||
618 | #define Y2_CLK_DIV_VAL_2(x) (((x)<<21) & Y2_CLK_DIV_VAL2_MSK) | ||
619 | #define Y2_CLK_SEL_VAL_2(x) (((x)<<16) & Y2_CLK_SELECT2_MSK) | ||
620 | Y2_CLK_DIV_ENA = 1<<1, /* Enable Core Clock Division */ | ||
621 | Y2_CLK_DIV_DIS = 1<<0, /* Disable Core Clock Division */ | ||
622 | }; | ||
623 | |||
624 | /* B2_TI_CTRL 8 bit Timer control */ | ||
625 | /* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */ | ||
626 | enum { | ||
627 | TIM_START = 1<<2, /* Start Timer */ | ||
628 | TIM_STOP = 1<<1, /* Stop Timer */ | ||
629 | TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */ | ||
630 | }; | ||
631 | |||
632 | /* B2_TI_TEST 8 Bit Timer Test */ | ||
633 | /* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */ | ||
634 | /* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */ | ||
635 | enum { | ||
636 | TIM_T_ON = 1<<2, /* Test mode on */ | ||
637 | TIM_T_OFF = 1<<1, /* Test mode off */ | ||
638 | TIM_T_STEP = 1<<0, /* Test step */ | ||
639 | }; | ||
640 | |||
641 | /* Y2_PEX_PHY_ADDR/DATA PEX PHY address and data reg (Yukon-2 only) */ | ||
642 | enum { | ||
643 | PEX_RD_ACCESS = 1<<31, /* Access Mode Read = 1, Write = 0 */ | ||
644 | PEX_DB_ACCESS = 1<<30, /* Access to debug register */ | ||
645 | }; | ||
646 | |||
647 | /* B3_RAM_ADDR 32 bit RAM Address, to read or write */ | ||
648 | /* Bit 31..19: reserved */ | ||
649 | #define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */ | ||
650 | /* RAM Interface Registers */ | ||
651 | |||
652 | /* B3_RI_CTRL 16 bit RAM Interface Control Register */ | ||
653 | enum { | ||
654 | RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */ | ||
655 | RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/ | ||
656 | |||
657 | RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */ | ||
658 | RI_RST_SET = 1<<0, /* Set RAM Interface Reset */ | ||
659 | }; | ||
660 | |||
661 | #define SK_RI_TO_53 36 /* RAM interface timeout */ | ||
662 | |||
663 | |||
664 | /* Port related registers FIFO, and Arbiter */ | ||
665 | #define SK_REG(port,reg) (((port)<<7)+(reg)) | ||
666 | |||
667 | /* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ | ||
668 | /* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */ | ||
669 | /* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */ | ||
670 | /* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ | ||
671 | /* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */ | ||
672 | |||
673 | #define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */ | ||
674 | |||
675 | /* TXA_CTRL 8 bit Tx Arbiter Control Register */ | ||
676 | enum { | ||
677 | TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */ | ||
678 | TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */ | ||
679 | TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */ | ||
680 | TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */ | ||
681 | TXA_START_RC = 1<<3, /* Start sync Rate Control */ | ||
682 | TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */ | ||
683 | TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */ | ||
684 | TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */ | ||
685 | }; | ||
686 | |||
687 | /* | ||
688 | * Bank 4 - 5 | ||
689 | */ | ||
690 | /* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ | ||
691 | enum { | ||
692 | TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/ | ||
693 | TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */ | ||
694 | TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */ | ||
695 | TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */ | ||
696 | TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */ | ||
697 | TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */ | ||
698 | TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */ | ||
699 | |||
700 | RSS_KEY = 0x0220, /* RSS Key setup */ | ||
701 | RSS_CFG = 0x0248, /* RSS Configuration */ | ||
702 | }; | ||
703 | |||
704 | enum { | ||
705 | HASH_TCP_IPV6_EX_CTRL = 1<<5, | ||
706 | HASH_IPV6_EX_CTRL = 1<<4, | ||
707 | HASH_TCP_IPV6_CTRL = 1<<3, | ||
708 | HASH_IPV6_CTRL = 1<<2, | ||
709 | HASH_TCP_IPV4_CTRL = 1<<1, | ||
710 | HASH_IPV4_CTRL = 1<<0, | ||
711 | |||
712 | HASH_ALL = 0x3f, | ||
713 | }; | ||
714 | |||
715 | enum { | ||
716 | B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */ | ||
717 | B7_CFG_SPC = 0x0380,/* copy of the Configuration register */ | ||
718 | B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */ | ||
719 | B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */ | ||
720 | B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */ | ||
721 | B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */ | ||
722 | B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */ | ||
723 | B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */ | ||
724 | B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */ | ||
725 | }; | ||
726 | |||
727 | /* Queue Register Offsets, use Q_ADDR() to access */ | ||
728 | enum { | ||
729 | B8_Q_REGS = 0x0400, /* base of Queue registers */ | ||
730 | Q_D = 0x00, /* 8*32 bit Current Descriptor */ | ||
731 | Q_VLAN = 0x20, /* 16 bit Current VLAN Tag */ | ||
732 | Q_DONE = 0x24, /* 16 bit Done Index */ | ||
733 | Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */ | ||
734 | Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */ | ||
735 | Q_BC = 0x30, /* 32 bit Current Byte Counter */ | ||
736 | Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */ | ||
737 | Q_TEST = 0x38, /* 32 bit Test/Control Register */ | ||
738 | |||
739 | /* Yukon-2 */ | ||
740 | Q_WM = 0x40, /* 16 bit FIFO Watermark */ | ||
741 | Q_AL = 0x42, /* 8 bit FIFO Alignment */ | ||
742 | Q_RSP = 0x44, /* 16 bit FIFO Read Shadow Pointer */ | ||
743 | Q_RSL = 0x46, /* 8 bit FIFO Read Shadow Level */ | ||
744 | Q_RP = 0x48, /* 8 bit FIFO Read Pointer */ | ||
745 | Q_RL = 0x4a, /* 8 bit FIFO Read Level */ | ||
746 | Q_WP = 0x4c, /* 8 bit FIFO Write Pointer */ | ||
747 | Q_WSP = 0x4d, /* 8 bit FIFO Write Shadow Pointer */ | ||
748 | Q_WL = 0x4e, /* 8 bit FIFO Write Level */ | ||
749 | Q_WSL = 0x4f, /* 8 bit FIFO Write Shadow Level */ | ||
750 | }; | ||
751 | #define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs)) | ||
752 | |||
753 | /* Q_TEST 32 bit Test Register */ | ||
754 | enum { | ||
755 | /* Transmit */ | ||
756 | F_TX_CHK_AUTO_OFF = 1<<31, /* Tx checksum auto calc off (Yukon EX) */ | ||
757 | F_TX_CHK_AUTO_ON = 1<<30, /* Tx checksum auto calc off (Yukon EX) */ | ||
758 | |||
759 | /* Receive */ | ||
760 | F_M_RX_RAM_DIS = 1<<24, /* MAC Rx RAM Read Port disable */ | ||
761 | |||
762 | /* Hardware testbits not used */ | ||
763 | }; | ||
764 | |||
765 | /* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/ | ||
766 | enum { | ||
767 | Y2_B8_PREF_REGS = 0x0450, | ||
768 | |||
769 | PREF_UNIT_CTRL = 0x00, /* 32 bit Control register */ | ||
770 | PREF_UNIT_LAST_IDX = 0x04, /* 16 bit Last Index */ | ||
771 | PREF_UNIT_ADDR_LO = 0x08, /* 32 bit List start addr, low part */ | ||
772 | PREF_UNIT_ADDR_HI = 0x0c, /* 32 bit List start addr, high part*/ | ||
773 | PREF_UNIT_GET_IDX = 0x10, /* 16 bit Get Index */ | ||
774 | PREF_UNIT_PUT_IDX = 0x14, /* 16 bit Put Index */ | ||
775 | PREF_UNIT_FIFO_WP = 0x20, /* 8 bit FIFO write pointer */ | ||
776 | PREF_UNIT_FIFO_RP = 0x24, /* 8 bit FIFO read pointer */ | ||
777 | PREF_UNIT_FIFO_WM = 0x28, /* 8 bit FIFO watermark */ | ||
778 | PREF_UNIT_FIFO_LEV = 0x2c, /* 8 bit FIFO level */ | ||
779 | |||
780 | PREF_UNIT_MASK_IDX = 0x0fff, | ||
781 | }; | ||
782 | #define Y2_QADDR(q,reg) (Y2_B8_PREF_REGS + (q) + (reg)) | ||
783 | |||
784 | /* RAM Buffer Register Offsets */ | ||
785 | enum { | ||
786 | |||
787 | RB_START = 0x00,/* 32 bit RAM Buffer Start Address */ | ||
788 | RB_END = 0x04,/* 32 bit RAM Buffer End Address */ | ||
789 | RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */ | ||
790 | RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */ | ||
791 | RB_RX_UTPP = 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */ | ||
792 | RB_RX_LTPP = 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */ | ||
793 | RB_RX_UTHP = 0x18,/* 32 bit Rx Upper Threshold, High Prio */ | ||
794 | RB_RX_LTHP = 0x1c,/* 32 bit Rx Lower Threshold, High Prio */ | ||
795 | /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */ | ||
796 | RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */ | ||
797 | RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */ | ||
798 | RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */ | ||
799 | RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */ | ||
800 | RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */ | ||
801 | }; | ||
802 | |||
803 | /* Receive and Transmit Queues */ | ||
804 | enum { | ||
805 | Q_R1 = 0x0000, /* Receive Queue 1 */ | ||
806 | Q_R2 = 0x0080, /* Receive Queue 2 */ | ||
807 | Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */ | ||
808 | Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */ | ||
809 | Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */ | ||
810 | Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */ | ||
811 | }; | ||
812 | |||
813 | /* Different PHY Types */ | ||
814 | enum { | ||
815 | PHY_ADDR_MARV = 0, | ||
816 | }; | ||
817 | |||
818 | #define RB_ADDR(offs, queue) ((u16) B16_RAM_REGS + (queue) + (offs)) | ||
819 | |||
820 | |||
821 | enum { | ||
822 | LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */ | ||
823 | LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */ | ||
824 | LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */ | ||
825 | LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */ | ||
826 | |||
827 | LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */ | ||
828 | |||
829 | /* Receive GMAC FIFO (YUKON and Yukon-2) */ | ||
830 | |||
831 | RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */ | ||
832 | RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */ | ||
833 | RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */ | ||
834 | RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */ | ||
835 | RX_GMF_FL_THR = 0x0c50,/* 16 bit Rx GMAC FIFO Flush Threshold */ | ||
836 | RX_GMF_FL_CTRL = 0x0c52,/* 16 bit Rx GMAC FIFO Flush Control */ | ||
837 | RX_GMF_TR_THR = 0x0c54,/* 32 bit Rx Truncation Threshold (Yukon-2) */ | ||
838 | RX_GMF_UP_THR = 0x0c58,/* 16 bit Rx Upper Pause Thr (Yukon-EC_U) */ | ||
839 | RX_GMF_LP_THR = 0x0c5a,/* 16 bit Rx Lower Pause Thr (Yukon-EC_U) */ | ||
840 | RX_GMF_VLAN = 0x0c5c,/* 32 bit Rx VLAN Type Register (Yukon-2) */ | ||
841 | RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */ | ||
842 | |||
843 | RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */ | ||
844 | |||
845 | RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */ | ||
846 | |||
847 | RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */ | ||
848 | }; | ||
849 | |||
850 | |||
851 | /* Q_BC 32 bit Current Byte Counter */ | ||
852 | |||
853 | /* BMU Control Status Registers */ | ||
854 | /* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */ | ||
855 | /* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */ | ||
856 | /* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */ | ||
857 | /* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */ | ||
858 | /* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */ | ||
859 | /* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */ | ||
860 | /* Q_CSR 32 bit BMU Control/Status Register */ | ||
861 | |||
862 | /* Rx BMU Control / Status Registers (Yukon-2) */ | ||
863 | enum { | ||
864 | BMU_IDLE = 1<<31, /* BMU Idle State */ | ||
865 | BMU_RX_TCP_PKT = 1<<30, /* Rx TCP Packet (when RSS Hash enabled) */ | ||
866 | BMU_RX_IP_PKT = 1<<29, /* Rx IP Packet (when RSS Hash enabled) */ | ||
867 | |||
868 | BMU_ENA_RX_RSS_HASH = 1<<15, /* Enable Rx RSS Hash */ | ||
869 | BMU_DIS_RX_RSS_HASH = 1<<14, /* Disable Rx RSS Hash */ | ||
870 | BMU_ENA_RX_CHKSUM = 1<<13, /* Enable Rx TCP/IP Checksum Check */ | ||
871 | BMU_DIS_RX_CHKSUM = 1<<12, /* Disable Rx TCP/IP Checksum Check */ | ||
872 | BMU_CLR_IRQ_PAR = 1<<11, /* Clear IRQ on Parity errors (Rx) */ | ||
873 | BMU_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment. error (Tx) */ | ||
874 | BMU_CLR_IRQ_CHK = 1<<10, /* Clear IRQ Check */ | ||
875 | BMU_STOP = 1<<9, /* Stop Rx/Tx Queue */ | ||
876 | BMU_START = 1<<8, /* Start Rx/Tx Queue */ | ||
877 | BMU_FIFO_OP_ON = 1<<7, /* FIFO Operational On */ | ||
878 | BMU_FIFO_OP_OFF = 1<<6, /* FIFO Operational Off */ | ||
879 | BMU_FIFO_ENA = 1<<5, /* Enable FIFO */ | ||
880 | BMU_FIFO_RST = 1<<4, /* Reset FIFO */ | ||
881 | BMU_OP_ON = 1<<3, /* BMU Operational On */ | ||
882 | BMU_OP_OFF = 1<<2, /* BMU Operational Off */ | ||
883 | BMU_RST_CLR = 1<<1, /* Clear BMU Reset (Enable) */ | ||
884 | BMU_RST_SET = 1<<0, /* Set BMU Reset */ | ||
885 | |||
886 | BMU_CLR_RESET = BMU_FIFO_RST | BMU_OP_OFF | BMU_RST_CLR, | ||
887 | BMU_OPER_INIT = BMU_CLR_IRQ_PAR | BMU_CLR_IRQ_CHK | BMU_START | | ||
888 | BMU_FIFO_ENA | BMU_OP_ON, | ||
889 | |||
890 | BMU_WM_DEFAULT = 0x600, | ||
891 | BMU_WM_PEX = 0x80, | ||
892 | }; | ||
893 | |||
894 | /* Tx BMU Control / Status Registers (Yukon-2) */ | ||
895 | /* Bit 31: same as for Rx */ | ||
896 | enum { | ||
897 | BMU_TX_IPIDINCR_ON = 1<<13, /* Enable IP ID Increment */ | ||
898 | BMU_TX_IPIDINCR_OFF = 1<<12, /* Disable IP ID Increment */ | ||
899 | BMU_TX_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment length mismatch */ | ||
900 | }; | ||
901 | |||
902 | /* TBMU_TEST 0x06B8 Transmit BMU Test Register */ | ||
903 | enum { | ||
904 | TBMU_TEST_BMU_TX_CHK_AUTO_OFF = 1<<31, /* BMU Tx Checksum Auto Calculation Disable */ | ||
905 | TBMU_TEST_BMU_TX_CHK_AUTO_ON = 1<<30, /* BMU Tx Checksum Auto Calculation Enable */ | ||
906 | TBMU_TEST_HOME_ADD_PAD_FIX1_EN = 1<<29, /* Home Address Paddiing FIX1 Enable */ | ||
907 | TBMU_TEST_HOME_ADD_PAD_FIX1_DIS = 1<<28, /* Home Address Paddiing FIX1 Disable */ | ||
908 | TBMU_TEST_ROUTING_ADD_FIX_EN = 1<<27, /* Routing Address Fix Enable */ | ||
909 | TBMU_TEST_ROUTING_ADD_FIX_DIS = 1<<26, /* Routing Address Fix Disable */ | ||
910 | TBMU_TEST_HOME_ADD_FIX_EN = 1<<25, /* Home address checksum fix enable */ | ||
911 | TBMU_TEST_HOME_ADD_FIX_DIS = 1<<24, /* Home address checksum fix disable */ | ||
912 | |||
913 | TBMU_TEST_TEST_RSPTR_ON = 1<<22, /* Testmode Shadow Read Ptr On */ | ||
914 | TBMU_TEST_TEST_RSPTR_OFF = 1<<21, /* Testmode Shadow Read Ptr Off */ | ||
915 | TBMU_TEST_TESTSTEP_RSPTR = 1<<20, /* Teststep Shadow Read Ptr */ | ||
916 | |||
917 | TBMU_TEST_TEST_RPTR_ON = 1<<18, /* Testmode Read Ptr On */ | ||
918 | TBMU_TEST_TEST_RPTR_OFF = 1<<17, /* Testmode Read Ptr Off */ | ||
919 | TBMU_TEST_TESTSTEP_RPTR = 1<<16, /* Teststep Read Ptr */ | ||
920 | |||
921 | TBMU_TEST_TEST_WSPTR_ON = 1<<14, /* Testmode Shadow Write Ptr On */ | ||
922 | TBMU_TEST_TEST_WSPTR_OFF = 1<<13, /* Testmode Shadow Write Ptr Off */ | ||
923 | TBMU_TEST_TESTSTEP_WSPTR = 1<<12, /* Teststep Shadow Write Ptr */ | ||
924 | |||
925 | TBMU_TEST_TEST_WPTR_ON = 1<<10, /* Testmode Write Ptr On */ | ||
926 | TBMU_TEST_TEST_WPTR_OFF = 1<<9, /* Testmode Write Ptr Off */ | ||
927 | TBMU_TEST_TESTSTEP_WPTR = 1<<8, /* Teststep Write Ptr */ | ||
928 | |||
929 | TBMU_TEST_TEST_REQ_NB_ON = 1<<6, /* Testmode Req Nbytes/Addr On */ | ||
930 | TBMU_TEST_TEST_REQ_NB_OFF = 1<<5, /* Testmode Req Nbytes/Addr Off */ | ||
931 | TBMU_TEST_TESTSTEP_REQ_NB = 1<<4, /* Teststep Req Nbytes/Addr */ | ||
932 | |||
933 | TBMU_TEST_TEST_DONE_IDX_ON = 1<<2, /* Testmode Done Index On */ | ||
934 | TBMU_TEST_TEST_DONE_IDX_OFF = 1<<1, /* Testmode Done Index Off */ | ||
935 | TBMU_TEST_TESTSTEP_DONE_IDX = 1<<0, /* Teststep Done Index */ | ||
936 | }; | ||
937 | |||
938 | /* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/ | ||
939 | /* PREF_UNIT_CTRL 32 bit Prefetch Control register */ | ||
940 | enum { | ||
941 | PREF_UNIT_OP_ON = 1<<3, /* prefetch unit operational */ | ||
942 | PREF_UNIT_OP_OFF = 1<<2, /* prefetch unit not operational */ | ||
943 | PREF_UNIT_RST_CLR = 1<<1, /* Clear Prefetch Unit Reset */ | ||
944 | PREF_UNIT_RST_SET = 1<<0, /* Set Prefetch Unit Reset */ | ||
945 | }; | ||
946 | |||
947 | /* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */ | ||
948 | /* RB_START 32 bit RAM Buffer Start Address */ | ||
949 | /* RB_END 32 bit RAM Buffer End Address */ | ||
950 | /* RB_WP 32 bit RAM Buffer Write Pointer */ | ||
951 | /* RB_RP 32 bit RAM Buffer Read Pointer */ | ||
952 | /* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */ | ||
953 | /* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */ | ||
954 | /* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */ | ||
955 | /* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */ | ||
956 | /* RB_PC 32 bit RAM Buffer Packet Counter */ | ||
957 | /* RB_LEV 32 bit RAM Buffer Level Register */ | ||
958 | |||
959 | #define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */ | ||
960 | /* RB_TST2 8 bit RAM Buffer Test Register 2 */ | ||
961 | /* RB_TST1 8 bit RAM Buffer Test Register 1 */ | ||
962 | |||
963 | /* RB_CTRL 8 bit RAM Buffer Control Register */ | ||
964 | enum { | ||
965 | RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */ | ||
966 | RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */ | ||
967 | RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */ | ||
968 | RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */ | ||
969 | RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */ | ||
970 | RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */ | ||
971 | }; | ||
972 | |||
973 | |||
974 | /* Transmit GMAC FIFO (YUKON only) */ | ||
975 | enum { | ||
976 | TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */ | ||
977 | TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/ | ||
978 | TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */ | ||
979 | |||
980 | TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */ | ||
981 | TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */ | ||
982 | TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */ | ||
983 | |||
984 | TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */ | ||
985 | TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */ | ||
986 | TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */ | ||
987 | |||
988 | /* Threshold values for Yukon-EC Ultra and Extreme */ | ||
989 | ECU_AE_THR = 0x0070, /* Almost Empty Threshold */ | ||
990 | ECU_TXFF_LEV = 0x01a0, /* Tx BMU FIFO Level */ | ||
991 | ECU_JUMBO_WM = 0x0080, /* Jumbo Mode Watermark */ | ||
992 | }; | ||
993 | |||
994 | /* Descriptor Poll Timer Registers */ | ||
995 | enum { | ||
996 | B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */ | ||
997 | B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */ | ||
998 | B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */ | ||
999 | |||
1000 | B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */ | ||
1001 | }; | ||
1002 | |||
1003 | /* Time Stamp Timer Registers (YUKON only) */ | ||
1004 | enum { | ||
1005 | GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */ | ||
1006 | GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */ | ||
1007 | GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */ | ||
1008 | }; | ||
1009 | |||
1010 | /* Polling Unit Registers (Yukon-2 only) */ | ||
1011 | enum { | ||
1012 | POLL_CTRL = 0x0e20, /* 32 bit Polling Unit Control Reg */ | ||
1013 | POLL_LAST_IDX = 0x0e24,/* 16 bit Polling Unit List Last Index */ | ||
1014 | |||
1015 | POLL_LIST_ADDR_LO= 0x0e28,/* 32 bit Poll. List Start Addr (low) */ | ||
1016 | POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit Poll. List Start Addr (high) */ | ||
1017 | }; | ||
1018 | |||
1019 | enum { | ||
1020 | SMB_CFG = 0x0e40, /* 32 bit SMBus Config Register */ | ||
1021 | SMB_CSR = 0x0e44, /* 32 bit SMBus Control/Status Register */ | ||
1022 | }; | ||
1023 | |||
1024 | enum { | ||
1025 | CPU_WDOG = 0x0e48, /* 32 bit Watchdog Register */ | ||
1026 | CPU_CNTR = 0x0e4C, /* 32 bit Counter Register */ | ||
1027 | CPU_TIM = 0x0e50,/* 32 bit Timer Compare Register */ | ||
1028 | CPU_AHB_ADDR = 0x0e54, /* 32 bit CPU AHB Debug Register */ | ||
1029 | CPU_AHB_WDATA = 0x0e58, /* 32 bit CPU AHB Debug Register */ | ||
1030 | CPU_AHB_RDATA = 0x0e5C, /* 32 bit CPU AHB Debug Register */ | ||
1031 | HCU_MAP_BASE = 0x0e60, /* 32 bit Reset Mapping Base */ | ||
1032 | CPU_AHB_CTRL = 0x0e64, /* 32 bit CPU AHB Debug Register */ | ||
1033 | HCU_CCSR = 0x0e68, /* 32 bit CPU Control and Status Register */ | ||
1034 | HCU_HCSR = 0x0e6C, /* 32 bit Host Control and Status Register */ | ||
1035 | }; | ||
1036 | |||
1037 | /* ASF Subsystem Registers (Yukon-2 only) */ | ||
1038 | enum { | ||
1039 | B28_Y2_SMB_CONFIG = 0x0e40,/* 32 bit ASF SMBus Config Register */ | ||
1040 | B28_Y2_SMB_CSD_REG = 0x0e44,/* 32 bit ASF SMB Control/Status/Data */ | ||
1041 | B28_Y2_ASF_IRQ_V_BASE=0x0e60,/* 32 bit ASF IRQ Vector Base */ | ||
1042 | |||
1043 | B28_Y2_ASF_STAT_CMD= 0x0e68,/* 32 bit ASF Status and Command Reg */ | ||
1044 | B28_Y2_ASF_HOST_COM= 0x0e6c,/* 32 bit ASF Host Communication Reg */ | ||
1045 | B28_Y2_DATA_REG_1 = 0x0e70,/* 32 bit ASF/Host Data Register 1 */ | ||
1046 | B28_Y2_DATA_REG_2 = 0x0e74,/* 32 bit ASF/Host Data Register 2 */ | ||
1047 | B28_Y2_DATA_REG_3 = 0x0e78,/* 32 bit ASF/Host Data Register 3 */ | ||
1048 | B28_Y2_DATA_REG_4 = 0x0e7c,/* 32 bit ASF/Host Data Register 4 */ | ||
1049 | }; | ||
1050 | |||
1051 | /* Status BMU Registers (Yukon-2 only)*/ | ||
1052 | enum { | ||
1053 | STAT_CTRL = 0x0e80,/* 32 bit Status BMU Control Reg */ | ||
1054 | STAT_LAST_IDX = 0x0e84,/* 16 bit Status BMU Last Index */ | ||
1055 | |||
1056 | STAT_LIST_ADDR_LO= 0x0e88,/* 32 bit Status List Start Addr (low) */ | ||
1057 | STAT_LIST_ADDR_HI= 0x0e8c,/* 32 bit Status List Start Addr (high) */ | ||
1058 | STAT_TXA1_RIDX = 0x0e90,/* 16 bit Status TxA1 Report Index Reg */ | ||
1059 | STAT_TXS1_RIDX = 0x0e92,/* 16 bit Status TxS1 Report Index Reg */ | ||
1060 | STAT_TXA2_RIDX = 0x0e94,/* 16 bit Status TxA2 Report Index Reg */ | ||
1061 | STAT_TXS2_RIDX = 0x0e96,/* 16 bit Status TxS2 Report Index Reg */ | ||
1062 | STAT_TX_IDX_TH = 0x0e98,/* 16 bit Status Tx Index Threshold Reg */ | ||
1063 | STAT_PUT_IDX = 0x0e9c,/* 16 bit Status Put Index Reg */ | ||
1064 | |||
1065 | /* FIFO Control/Status Registers (Yukon-2 only)*/ | ||
1066 | STAT_FIFO_WP = 0x0ea0,/* 8 bit Status FIFO Write Pointer Reg */ | ||
1067 | STAT_FIFO_RP = 0x0ea4,/* 8 bit Status FIFO Read Pointer Reg */ | ||
1068 | STAT_FIFO_RSP = 0x0ea6,/* 8 bit Status FIFO Read Shadow Ptr */ | ||
1069 | STAT_FIFO_LEVEL = 0x0ea8,/* 8 bit Status FIFO Level Reg */ | ||
1070 | STAT_FIFO_SHLVL = 0x0eaa,/* 8 bit Status FIFO Shadow Level Reg */ | ||
1071 | STAT_FIFO_WM = 0x0eac,/* 8 bit Status FIFO Watermark Reg */ | ||
1072 | STAT_FIFO_ISR_WM= 0x0ead,/* 8 bit Status FIFO ISR Watermark Reg */ | ||
1073 | |||
1074 | /* Level and ISR Timer Registers (Yukon-2 only)*/ | ||
1075 | STAT_LEV_TIMER_INI= 0x0eb0,/* 32 bit Level Timer Init. Value Reg */ | ||
1076 | STAT_LEV_TIMER_CNT= 0x0eb4,/* 32 bit Level Timer Counter Reg */ | ||
1077 | STAT_LEV_TIMER_CTRL= 0x0eb8,/* 8 bit Level Timer Control Reg */ | ||
1078 | STAT_LEV_TIMER_TEST= 0x0eb9,/* 8 bit Level Timer Test Reg */ | ||
1079 | STAT_TX_TIMER_INI = 0x0ec0,/* 32 bit Tx Timer Init. Value Reg */ | ||
1080 | STAT_TX_TIMER_CNT = 0x0ec4,/* 32 bit Tx Timer Counter Reg */ | ||
1081 | STAT_TX_TIMER_CTRL = 0x0ec8,/* 8 bit Tx Timer Control Reg */ | ||
1082 | STAT_TX_TIMER_TEST = 0x0ec9,/* 8 bit Tx Timer Test Reg */ | ||
1083 | STAT_ISR_TIMER_INI = 0x0ed0,/* 32 bit ISR Timer Init. Value Reg */ | ||
1084 | STAT_ISR_TIMER_CNT = 0x0ed4,/* 32 bit ISR Timer Counter Reg */ | ||
1085 | STAT_ISR_TIMER_CTRL= 0x0ed8,/* 8 bit ISR Timer Control Reg */ | ||
1086 | STAT_ISR_TIMER_TEST= 0x0ed9,/* 8 bit ISR Timer Test Reg */ | ||
1087 | }; | ||
1088 | |||
1089 | enum { | ||
1090 | LINKLED_OFF = 0x01, | ||
1091 | LINKLED_ON = 0x02, | ||
1092 | LINKLED_LINKSYNC_OFF = 0x04, | ||
1093 | LINKLED_LINKSYNC_ON = 0x08, | ||
1094 | LINKLED_BLINK_OFF = 0x10, | ||
1095 | LINKLED_BLINK_ON = 0x20, | ||
1096 | }; | ||
1097 | |||
1098 | /* GMAC and GPHY Control Registers (YUKON only) */ | ||
1099 | enum { | ||
1100 | GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */ | ||
1101 | GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */ | ||
1102 | GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */ | ||
1103 | GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */ | ||
1104 | GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */ | ||
1105 | |||
1106 | /* Wake-up Frame Pattern Match Control Registers (YUKON only) */ | ||
1107 | WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */ | ||
1108 | WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */ | ||
1109 | WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */ | ||
1110 | WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */ | ||
1111 | WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */ | ||
1112 | |||
1113 | /* WOL Pattern Length Registers (YUKON only) */ | ||
1114 | WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */ | ||
1115 | WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */ | ||
1116 | |||
1117 | /* WOL Pattern Counter Registers (YUKON only) */ | ||
1118 | WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */ | ||
1119 | WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */ | ||
1120 | }; | ||
1121 | #define WOL_REGS(port, x) (x + (port)*0x80) | ||
1122 | |||
1123 | enum { | ||
1124 | WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */ | ||
1125 | WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */ | ||
1126 | }; | ||
1127 | #define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400) | ||
1128 | |||
1129 | enum { | ||
1130 | BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */ | ||
1131 | BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */ | ||
1132 | }; | ||
1133 | |||
1134 | /* | ||
1135 | * Marvel-PHY Registers, indirect addressed over GMAC | ||
1136 | */ | ||
1137 | enum { | ||
1138 | PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ | ||
1139 | PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */ | ||
1140 | PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ | ||
1141 | PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ | ||
1142 | PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ | ||
1143 | PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */ | ||
1144 | PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ | ||
1145 | PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */ | ||
1146 | PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ | ||
1147 | /* Marvel-specific registers */ | ||
1148 | PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ | ||
1149 | PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ | ||
1150 | PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */ | ||
1151 | PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */ | ||
1152 | PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */ | ||
1153 | PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */ | ||
1154 | PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */ | ||
1155 | PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */ | ||
1156 | PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */ | ||
1157 | PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */ | ||
1158 | PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */ | ||
1159 | PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */ | ||
1160 | PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */ | ||
1161 | PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */ | ||
1162 | PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */ | ||
1163 | PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */ | ||
1164 | PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */ | ||
1165 | PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */ | ||
1166 | |||
1167 | /* for 10/100 Fast Ethernet PHY (88E3082 only) */ | ||
1168 | PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */ | ||
1169 | PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */ | ||
1170 | PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */ | ||
1171 | PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */ | ||
1172 | PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */ | ||
1173 | }; | ||
1174 | |||
1175 | enum { | ||
1176 | PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */ | ||
1177 | PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */ | ||
1178 | PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */ | ||
1179 | PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */ | ||
1180 | PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */ | ||
1181 | PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */ | ||
1182 | PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */ | ||
1183 | PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */ | ||
1184 | PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */ | ||
1185 | PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */ | ||
1186 | }; | ||
1187 | |||
1188 | enum { | ||
1189 | PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */ | ||
1190 | PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */ | ||
1191 | PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */ | ||
1192 | }; | ||
1193 | |||
1194 | enum { | ||
1195 | PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */ | ||
1196 | |||
1197 | PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */ | ||
1198 | PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */ | ||
1199 | PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occurred */ | ||
1200 | PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */ | ||
1201 | PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */ | ||
1202 | PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */ | ||
1203 | PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */ | ||
1204 | }; | ||
1205 | |||
1206 | enum { | ||
1207 | PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */ | ||
1208 | PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */ | ||
1209 | PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */ | ||
1210 | }; | ||
1211 | |||
1212 | /* different Marvell PHY Ids */ | ||
1213 | enum { | ||
1214 | PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */ | ||
1215 | |||
1216 | PHY_BCOM_ID1_A1 = 0x6041, | ||
1217 | PHY_BCOM_ID1_B2 = 0x6043, | ||
1218 | PHY_BCOM_ID1_C0 = 0x6044, | ||
1219 | PHY_BCOM_ID1_C5 = 0x6047, | ||
1220 | |||
1221 | PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */ | ||
1222 | PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */ | ||
1223 | PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */ | ||
1224 | PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ | ||
1225 | PHY_MARV_ID1_FE = 0x0C83, /* Yukon-FE (PHY 88E3082 Rev.A1) */ | ||
1226 | PHY_MARV_ID1_ECU= 0x0CB0, /* Yukon-ECU (PHY 88E1149 Rev.B2?) */ | ||
1227 | }; | ||
1228 | |||
1229 | /* Advertisement register bits */ | ||
1230 | enum { | ||
1231 | PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ | ||
1232 | PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ | ||
1233 | PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */ | ||
1234 | |||
1235 | PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */ | ||
1236 | PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */ | ||
1237 | PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */ | ||
1238 | PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */ | ||
1239 | PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */ | ||
1240 | PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */ | ||
1241 | PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */ | ||
1242 | PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */ | ||
1243 | PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/ | ||
1244 | PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA, | ||
1245 | PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL | | ||
1246 | PHY_AN_100HALF | PHY_AN_100FULL, | ||
1247 | }; | ||
1248 | |||
1249 | /***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ | ||
1250 | /***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ | ||
1251 | enum { | ||
1252 | PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */ | ||
1253 | PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */ | ||
1254 | PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */ | ||
1255 | PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */ | ||
1256 | PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */ | ||
1257 | PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */ | ||
1258 | /* Bit 9..8: reserved */ | ||
1259 | PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */ | ||
1260 | }; | ||
1261 | |||
1262 | /** Marvell-Specific */ | ||
1263 | enum { | ||
1264 | PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */ | ||
1265 | PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */ | ||
1266 | PHY_M_AN_RF = 1<<13, /* Remote Fault */ | ||
1267 | |||
1268 | PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */ | ||
1269 | PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */ | ||
1270 | PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */ | ||
1271 | PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */ | ||
1272 | PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */ | ||
1273 | PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */ | ||
1274 | PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */ | ||
1275 | PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */ | ||
1276 | }; | ||
1277 | |||
1278 | /* special defines for FIBER (88E1011S only) */ | ||
1279 | enum { | ||
1280 | PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */ | ||
1281 | PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */ | ||
1282 | PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */ | ||
1283 | PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */ | ||
1284 | }; | ||
1285 | |||
1286 | /* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */ | ||
1287 | enum { | ||
1288 | PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */ | ||
1289 | PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */ | ||
1290 | PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */ | ||
1291 | PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */ | ||
1292 | }; | ||
1293 | |||
1294 | /***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ | ||
1295 | enum { | ||
1296 | PHY_M_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */ | ||
1297 | PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */ | ||
1298 | PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */ | ||
1299 | PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */ | ||
1300 | PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */ | ||
1301 | PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */ | ||
1302 | }; | ||
1303 | |||
1304 | /***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/ | ||
1305 | enum { | ||
1306 | PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */ | ||
1307 | PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */ | ||
1308 | PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */ | ||
1309 | PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */ | ||
1310 | PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */ | ||
1311 | PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */ | ||
1312 | PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */ | ||
1313 | PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */ | ||
1314 | PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */ | ||
1315 | PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */ | ||
1316 | PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */ | ||
1317 | PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */ | ||
1318 | }; | ||
1319 | |||
1320 | enum { | ||
1321 | PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */ | ||
1322 | PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */ | ||
1323 | }; | ||
1324 | |||
1325 | #define PHY_M_PC_MDI_XMODE(x) (((u16)(x)<<5) & PHY_M_PC_MDIX_MSK) | ||
1326 | |||
1327 | enum { | ||
1328 | PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */ | ||
1329 | PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */ | ||
1330 | PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */ | ||
1331 | }; | ||
1332 | |||
1333 | /* for Yukon-EC Ultra Gigabit Ethernet PHY (88E1149 only) */ | ||
1334 | enum { | ||
1335 | PHY_M_PC_COP_TX_DIS = 1<<3, /* Copper Transmitter Disable */ | ||
1336 | PHY_M_PC_POW_D_ENA = 1<<2, /* Power Down Enable */ | ||
1337 | }; | ||
1338 | |||
1339 | /* for 10/100 Fast Ethernet PHY (88E3082 only) */ | ||
1340 | enum { | ||
1341 | PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */ | ||
1342 | PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */ | ||
1343 | PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */ | ||
1344 | PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */ | ||
1345 | PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */ | ||
1346 | |||
1347 | PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */ | ||
1348 | PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */ | ||
1349 | |||
1350 | PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */ | ||
1351 | PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */ | ||
1352 | }; | ||
1353 | |||
1354 | /***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/ | ||
1355 | enum { | ||
1356 | PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */ | ||
1357 | PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */ | ||
1358 | PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */ | ||
1359 | PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */ | ||
1360 | PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */ | ||
1361 | PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */ | ||
1362 | PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */ | ||
1363 | PHY_M_PS_LINK_UP = 1<<10, /* Link Up */ | ||
1364 | PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */ | ||
1365 | PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */ | ||
1366 | PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */ | ||
1367 | PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */ | ||
1368 | PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */ | ||
1369 | PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */ | ||
1370 | PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */ | ||
1371 | PHY_M_PS_JABBER = 1<<0, /* Jabber */ | ||
1372 | }; | ||
1373 | |||
1374 | #define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN) | ||
1375 | |||
1376 | /* for 10/100 Fast Ethernet PHY (88E3082 only) */ | ||
1377 | enum { | ||
1378 | PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */ | ||
1379 | PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */ | ||
1380 | }; | ||
1381 | |||
1382 | enum { | ||
1383 | PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */ | ||
1384 | PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */ | ||
1385 | PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */ | ||
1386 | PHY_M_IS_AN_PR = 1<<12, /* Page Received */ | ||
1387 | PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */ | ||
1388 | PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */ | ||
1389 | PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */ | ||
1390 | PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */ | ||
1391 | PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */ | ||
1392 | PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */ | ||
1393 | PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */ | ||
1394 | PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */ | ||
1395 | |||
1396 | PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */ | ||
1397 | PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */ | ||
1398 | PHY_M_IS_JABBER = 1<<0, /* Jabber */ | ||
1399 | |||
1400 | PHY_M_DEF_MSK = PHY_M_IS_LSP_CHANGE | PHY_M_IS_LST_CHANGE | ||
1401 | | PHY_M_IS_DUP_CHANGE, | ||
1402 | PHY_M_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL, | ||
1403 | }; | ||
1404 | |||
1405 | |||
1406 | /***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/ | ||
1407 | enum { | ||
1408 | PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */ | ||
1409 | PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */ | ||
1410 | |||
1411 | PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */ | ||
1412 | PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */ | ||
1413 | /* (88E1011 only) */ | ||
1414 | PHY_M_EC_S_DSC_MSK = 3<<8,/* Bit 9.. 8: Slave Downshift Counter */ | ||
1415 | /* (88E1011 only) */ | ||
1416 | PHY_M_EC_M_DSC_MSK2 = 7<<9,/* Bit 11.. 9: Master Downshift Counter */ | ||
1417 | /* (88E1111 only) */ | ||
1418 | PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */ | ||
1419 | /* !!! Errata in spec. (1 = disable) */ | ||
1420 | PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/ | ||
1421 | PHY_M_EC_MAC_S_MSK = 7<<4,/* Bit 6.. 4: Def. MAC interface speed */ | ||
1422 | PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */ | ||
1423 | PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */ | ||
1424 | PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */ | ||
1425 | PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */ | ||
1426 | |||
1427 | PHY_M_10B_TE_ENABLE = 1<<7, /* 10Base-Te Enable (88E8079 and above) */ | ||
1428 | }; | ||
1429 | #define PHY_M_EC_M_DSC(x) ((u16)(x)<<10 & PHY_M_EC_M_DSC_MSK) | ||
1430 | /* 00=1x; 01=2x; 10=3x; 11=4x */ | ||
1431 | #define PHY_M_EC_S_DSC(x) ((u16)(x)<<8 & PHY_M_EC_S_DSC_MSK) | ||
1432 | /* 00=dis; 01=1x; 10=2x; 11=3x */ | ||
1433 | #define PHY_M_EC_DSC_2(x) ((u16)(x)<<9 & PHY_M_EC_M_DSC_MSK2) | ||
1434 | /* 000=1x; 001=2x; 010=3x; 011=4x */ | ||
1435 | #define PHY_M_EC_MAC_S(x) ((u16)(x)<<4 & PHY_M_EC_MAC_S_MSK) | ||
1436 | /* 01X=0; 110=2.5; 111=25 (MHz) */ | ||
1437 | |||
1438 | /* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */ | ||
1439 | enum { | ||
1440 | PHY_M_PC_DIS_LINK_Pa = 1<<15,/* Disable Link Pulses */ | ||
1441 | PHY_M_PC_DSC_MSK = 7<<12,/* Bit 14..12: Downshift Counter */ | ||
1442 | PHY_M_PC_DOWN_S_ENA = 1<<11,/* Downshift Enable */ | ||
1443 | }; | ||
1444 | /* !!! Errata in spec. (1 = disable) */ | ||
1445 | |||
1446 | #define PHY_M_PC_DSC(x) (((u16)(x)<<12) & PHY_M_PC_DSC_MSK) | ||
1447 | /* 100=5x; 101=6x; 110=7x; 111=8x */ | ||
1448 | enum { | ||
1449 | MAC_TX_CLK_0_MHZ = 2, | ||
1450 | MAC_TX_CLK_2_5_MHZ = 6, | ||
1451 | MAC_TX_CLK_25_MHZ = 7, | ||
1452 | }; | ||
1453 | |||
1454 | /***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/ | ||
1455 | enum { | ||
1456 | PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */ | ||
1457 | PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */ | ||
1458 | PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */ | ||
1459 | PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */ | ||
1460 | PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */ | ||
1461 | PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */ | ||
1462 | PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */ | ||
1463 | /* (88E1111 only) */ | ||
1464 | }; | ||
1465 | |||
1466 | enum { | ||
1467 | PHY_M_LEDC_LINK_MSK = 3<<3,/* Bit 4.. 3: Link Control Mask */ | ||
1468 | /* (88E1011 only) */ | ||
1469 | PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */ | ||
1470 | PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */ | ||
1471 | PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */ | ||
1472 | PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */ | ||
1473 | PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */ | ||
1474 | }; | ||
1475 | |||
1476 | #define PHY_M_LED_PULS_DUR(x) (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK) | ||
1477 | |||
1478 | /***** PHY_MARV_PHY_STAT (page 3)16 bit r/w Polarity Control Reg. *****/ | ||
1479 | enum { | ||
1480 | PHY_M_POLC_LS1M_MSK = 0xf<<12, /* Bit 15..12: LOS,STAT1 Mix % Mask */ | ||
1481 | PHY_M_POLC_IS0M_MSK = 0xf<<8, /* Bit 11.. 8: INIT,STAT0 Mix % Mask */ | ||
1482 | PHY_M_POLC_LOS_MSK = 0x3<<6, /* Bit 7.. 6: LOS Pol. Ctrl. Mask */ | ||
1483 | PHY_M_POLC_INIT_MSK = 0x3<<4, /* Bit 5.. 4: INIT Pol. Ctrl. Mask */ | ||
1484 | PHY_M_POLC_STA1_MSK = 0x3<<2, /* Bit 3.. 2: STAT1 Pol. Ctrl. Mask */ | ||
1485 | PHY_M_POLC_STA0_MSK = 0x3, /* Bit 1.. 0: STAT0 Pol. Ctrl. Mask */ | ||
1486 | }; | ||
1487 | |||
1488 | #define PHY_M_POLC_LS1_P_MIX(x) (((x)<<12) & PHY_M_POLC_LS1M_MSK) | ||
1489 | #define PHY_M_POLC_IS0_P_MIX(x) (((x)<<8) & PHY_M_POLC_IS0M_MSK) | ||
1490 | #define PHY_M_POLC_LOS_CTRL(x) (((x)<<6) & PHY_M_POLC_LOS_MSK) | ||
1491 | #define PHY_M_POLC_INIT_CTRL(x) (((x)<<4) & PHY_M_POLC_INIT_MSK) | ||
1492 | #define PHY_M_POLC_STA1_CTRL(x) (((x)<<2) & PHY_M_POLC_STA1_MSK) | ||
1493 | #define PHY_M_POLC_STA0_CTRL(x) (((x)<<0) & PHY_M_POLC_STA0_MSK) | ||
1494 | |||
1495 | enum { | ||
1496 | PULS_NO_STR = 0,/* no pulse stretching */ | ||
1497 | PULS_21MS = 1,/* 21 ms to 42 ms */ | ||
1498 | PULS_42MS = 2,/* 42 ms to 84 ms */ | ||
1499 | PULS_84MS = 3,/* 84 ms to 170 ms */ | ||
1500 | PULS_170MS = 4,/* 170 ms to 340 ms */ | ||
1501 | PULS_340MS = 5,/* 340 ms to 670 ms */ | ||
1502 | PULS_670MS = 6,/* 670 ms to 1.3 s */ | ||
1503 | PULS_1300MS = 7,/* 1.3 s to 2.7 s */ | ||
1504 | }; | ||
1505 | |||
1506 | #define PHY_M_LED_BLINK_RT(x) (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK) | ||
1507 | |||
1508 | enum { | ||
1509 | BLINK_42MS = 0,/* 42 ms */ | ||
1510 | BLINK_84MS = 1,/* 84 ms */ | ||
1511 | BLINK_170MS = 2,/* 170 ms */ | ||
1512 | BLINK_340MS = 3,/* 340 ms */ | ||
1513 | BLINK_670MS = 4,/* 670 ms */ | ||
1514 | }; | ||
1515 | |||
1516 | /***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/ | ||
1517 | #define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */ | ||
1518 | |||
1519 | #define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */ | ||
1520 | #define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */ | ||
1521 | #define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */ | ||
1522 | #define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */ | ||
1523 | #define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */ | ||
1524 | #define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */ | ||
1525 | |||
1526 | enum led_mode { | ||
1527 | MO_LED_NORM = 0, | ||
1528 | MO_LED_BLINK = 1, | ||
1529 | MO_LED_OFF = 2, | ||
1530 | MO_LED_ON = 3, | ||
1531 | }; | ||
1532 | |||
1533 | /***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/ | ||
1534 | enum { | ||
1535 | PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */ | ||
1536 | PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */ | ||
1537 | PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */ | ||
1538 | PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */ | ||
1539 | PHY_M_EC2_FO_AM_MSK = 7,/* Bit 2.. 0: Fiber Output Amplitude */ | ||
1540 | }; | ||
1541 | |||
1542 | /***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/ | ||
1543 | enum { | ||
1544 | PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */ | ||
1545 | PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */ | ||
1546 | PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */ | ||
1547 | PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */ | ||
1548 | PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */ | ||
1549 | PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */ | ||
1550 | PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */ | ||
1551 | /* (88E1111 only) */ | ||
1552 | |||
1553 | PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */ | ||
1554 | PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */ | ||
1555 | PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */ | ||
1556 | }; | ||
1557 | |||
1558 | /* for 10/100 Fast Ethernet PHY (88E3082 only) */ | ||
1559 | /***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/ | ||
1560 | /* Bit 15..12: reserved (used internally) */ | ||
1561 | enum { | ||
1562 | PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */ | ||
1563 | PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */ | ||
1564 | PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */ | ||
1565 | }; | ||
1566 | |||
1567 | #define PHY_M_FELP_LED2_CTRL(x) (((u16)(x)<<8) & PHY_M_FELP_LED2_MSK) | ||
1568 | #define PHY_M_FELP_LED1_CTRL(x) (((u16)(x)<<4) & PHY_M_FELP_LED1_MSK) | ||
1569 | #define PHY_M_FELP_LED0_CTRL(x) (((u16)(x)<<0) & PHY_M_FELP_LED0_MSK) | ||
1570 | |||
1571 | enum { | ||
1572 | LED_PAR_CTRL_COLX = 0x00, | ||
1573 | LED_PAR_CTRL_ERROR = 0x01, | ||
1574 | LED_PAR_CTRL_DUPLEX = 0x02, | ||
1575 | LED_PAR_CTRL_DP_COL = 0x03, | ||
1576 | LED_PAR_CTRL_SPEED = 0x04, | ||
1577 | LED_PAR_CTRL_LINK = 0x05, | ||
1578 | LED_PAR_CTRL_TX = 0x06, | ||
1579 | LED_PAR_CTRL_RX = 0x07, | ||
1580 | LED_PAR_CTRL_ACT = 0x08, | ||
1581 | LED_PAR_CTRL_LNK_RX = 0x09, | ||
1582 | LED_PAR_CTRL_LNK_AC = 0x0a, | ||
1583 | LED_PAR_CTRL_ACT_BL = 0x0b, | ||
1584 | LED_PAR_CTRL_TX_BL = 0x0c, | ||
1585 | LED_PAR_CTRL_RX_BL = 0x0d, | ||
1586 | LED_PAR_CTRL_COL_BL = 0x0e, | ||
1587 | LED_PAR_CTRL_INACT = 0x0f | ||
1588 | }; | ||
1589 | |||
1590 | /*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/ | ||
1591 | enum { | ||
1592 | PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */ | ||
1593 | PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */ | ||
1594 | PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */ | ||
1595 | }; | ||
1596 | |||
1597 | /* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */ | ||
1598 | /***** PHY_MARV_PHY_CTRL (page 1) 16 bit r/w Fiber Specific Ctrl *****/ | ||
1599 | enum { | ||
1600 | PHY_M_FIB_FORCE_LNK = 1<<10,/* Force Link Good */ | ||
1601 | PHY_M_FIB_SIGD_POL = 1<<9, /* SIGDET Polarity */ | ||
1602 | PHY_M_FIB_TX_DIS = 1<<3, /* Transmitter Disable */ | ||
1603 | }; | ||
1604 | |||
1605 | /* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */ | ||
1606 | /***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/ | ||
1607 | enum { | ||
1608 | PHY_M_MAC_MD_MSK = 7<<7, /* Bit 9.. 7: Mode Select Mask */ | ||
1609 | PHY_M_MAC_GMIF_PUP = 1<<3, /* GMII Power Up (88E1149 only) */ | ||
1610 | PHY_M_MAC_MD_AUTO = 3,/* Auto Copper/1000Base-X */ | ||
1611 | PHY_M_MAC_MD_COPPER = 5,/* Copper only */ | ||
1612 | PHY_M_MAC_MD_1000BX = 7,/* 1000Base-X only */ | ||
1613 | }; | ||
1614 | #define PHY_M_MAC_MODE_SEL(x) (((x)<<7) & PHY_M_MAC_MD_MSK) | ||
1615 | |||
1616 | /***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/ | ||
1617 | enum { | ||
1618 | PHY_M_LEDC_LOS_MSK = 0xf<<12,/* Bit 15..12: LOS LED Ctrl. Mask */ | ||
1619 | PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */ | ||
1620 | PHY_M_LEDC_STA1_MSK = 0xf<<4,/* Bit 7.. 4: STAT1 LED Ctrl. Mask */ | ||
1621 | PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */ | ||
1622 | }; | ||
1623 | |||
1624 | #define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK) | ||
1625 | #define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK) | ||
1626 | #define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK) | ||
1627 | #define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK) | ||
1628 | |||
1629 | /* GMAC registers */ | ||
1630 | /* Port Registers */ | ||
1631 | enum { | ||
1632 | GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */ | ||
1633 | GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */ | ||
1634 | GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */ | ||
1635 | GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */ | ||
1636 | GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */ | ||
1637 | GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */ | ||
1638 | GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */ | ||
1639 | /* Source Address Registers */ | ||
1640 | GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */ | ||
1641 | GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */ | ||
1642 | GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */ | ||
1643 | GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */ | ||
1644 | GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */ | ||
1645 | GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */ | ||
1646 | |||
1647 | /* Multicast Address Hash Registers */ | ||
1648 | GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */ | ||
1649 | GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */ | ||
1650 | GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */ | ||
1651 | GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */ | ||
1652 | |||
1653 | /* Interrupt Source Registers */ | ||
1654 | GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */ | ||
1655 | GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */ | ||
1656 | GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */ | ||
1657 | |||
1658 | /* Interrupt Mask Registers */ | ||
1659 | GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */ | ||
1660 | GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */ | ||
1661 | GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */ | ||
1662 | |||
1663 | /* Serial Management Interface (SMI) Registers */ | ||
1664 | GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */ | ||
1665 | GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */ | ||
1666 | GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */ | ||
1667 | /* MIB Counters */ | ||
1668 | GM_MIB_CNT_BASE = 0x0100, /* Base Address of MIB Counters */ | ||
1669 | GM_MIB_CNT_END = 0x025C, /* Last MIB counter */ | ||
1670 | }; | ||
1671 | |||
1672 | |||
1673 | /* | ||
1674 | * MIB Counters base address definitions (low word) - | ||
1675 | * use offset 4 for access to high word (32 bit r/o) | ||
1676 | */ | ||
1677 | enum { | ||
1678 | GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */ | ||
1679 | GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */ | ||
1680 | GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */ | ||
1681 | GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */ | ||
1682 | GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */ | ||
1683 | |||
1684 | GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */ | ||
1685 | GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */ | ||
1686 | GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */ | ||
1687 | GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */ | ||
1688 | GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */ | ||
1689 | GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */ | ||
1690 | GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */ | ||
1691 | GM_RXF_127B = GM_MIB_CNT_BASE + 104,/* 65-127 Byte Rx Frame */ | ||
1692 | GM_RXF_255B = GM_MIB_CNT_BASE + 112,/* 128-255 Byte Rx Frame */ | ||
1693 | GM_RXF_511B = GM_MIB_CNT_BASE + 120,/* 256-511 Byte Rx Frame */ | ||
1694 | GM_RXF_1023B = GM_MIB_CNT_BASE + 128,/* 512-1023 Byte Rx Frame */ | ||
1695 | GM_RXF_1518B = GM_MIB_CNT_BASE + 136,/* 1024-1518 Byte Rx Frame */ | ||
1696 | GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144,/* 1519-MaxSize Byte Rx Frame */ | ||
1697 | GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152,/* Rx Frame too Long Error */ | ||
1698 | GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160,/* Rx Jabber Packet Frame */ | ||
1699 | |||
1700 | GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176,/* Rx FIFO overflow Event */ | ||
1701 | GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192,/* Unicast Frames Xmitted OK */ | ||
1702 | GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200,/* Broadcast Frames Xmitted OK */ | ||
1703 | GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208,/* Pause MAC Ctrl Frames Xmitted */ | ||
1704 | GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216,/* Multicast Frames Xmitted OK */ | ||
1705 | GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224,/* Octets Transmitted OK Low */ | ||
1706 | GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232,/* Octets Transmitted OK High */ | ||
1707 | GM_TXF_64B = GM_MIB_CNT_BASE + 240,/* 64 Byte Tx Frame */ | ||
1708 | GM_TXF_127B = GM_MIB_CNT_BASE + 248,/* 65-127 Byte Tx Frame */ | ||
1709 | GM_TXF_255B = GM_MIB_CNT_BASE + 256,/* 128-255 Byte Tx Frame */ | ||
1710 | GM_TXF_511B = GM_MIB_CNT_BASE + 264,/* 256-511 Byte Tx Frame */ | ||
1711 | GM_TXF_1023B = GM_MIB_CNT_BASE + 272,/* 512-1023 Byte Tx Frame */ | ||
1712 | GM_TXF_1518B = GM_MIB_CNT_BASE + 280,/* 1024-1518 Byte Tx Frame */ | ||
1713 | GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288,/* 1519-MaxSize Byte Tx Frame */ | ||
1714 | |||
1715 | GM_TXF_COL = GM_MIB_CNT_BASE + 304,/* Tx Collision */ | ||
1716 | GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312,/* Tx Late Collision */ | ||
1717 | GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320,/* Tx aborted due to Exces. Col. */ | ||
1718 | GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328,/* Tx Multiple Collision */ | ||
1719 | GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336,/* Tx Single Collision */ | ||
1720 | GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344,/* Tx FIFO Underrun Event */ | ||
1721 | }; | ||
1722 | |||
1723 | /* GMAC Bit Definitions */ | ||
1724 | /* GM_GP_STAT 16 bit r/o General Purpose Status Register */ | ||
1725 | enum { | ||
1726 | GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */ | ||
1727 | GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */ | ||
1728 | GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */ | ||
1729 | GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */ | ||
1730 | GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */ | ||
1731 | GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */ | ||
1732 | GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occurred */ | ||
1733 | GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occurred */ | ||
1734 | |||
1735 | GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */ | ||
1736 | GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */ | ||
1737 | GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */ | ||
1738 | GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */ | ||
1739 | GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */ | ||
1740 | }; | ||
1741 | |||
1742 | /* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ | ||
1743 | enum { | ||
1744 | GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */ | ||
1745 | GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */ | ||
1746 | GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */ | ||
1747 | GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */ | ||
1748 | GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */ | ||
1749 | GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */ | ||
1750 | GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */ | ||
1751 | GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */ | ||
1752 | GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */ | ||
1753 | GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */ | ||
1754 | GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */ | ||
1755 | GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */ | ||
1756 | GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */ | ||
1757 | GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */ | ||
1758 | GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */ | ||
1759 | }; | ||
1760 | |||
1761 | #define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) | ||
1762 | |||
1763 | /* GM_TX_CTRL 16 bit r/w Transmit Control Register */ | ||
1764 | enum { | ||
1765 | GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ | ||
1766 | GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ | ||
1767 | GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ | ||
1768 | GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */ | ||
1769 | }; | ||
1770 | |||
1771 | #define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) | ||
1772 | #define TX_COL_DEF 0x04 | ||
1773 | |||
1774 | /* GM_RX_CTRL 16 bit r/w Receive Control Register */ | ||
1775 | enum { | ||
1776 | GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */ | ||
1777 | GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */ | ||
1778 | GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */ | ||
1779 | GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */ | ||
1780 | }; | ||
1781 | |||
1782 | /* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ | ||
1783 | enum { | ||
1784 | GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */ | ||
1785 | GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */ | ||
1786 | GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */ | ||
1787 | GM_TXPA_BO_LIM_MSK = 0x0f, /* Bit 3.. 0: Backoff Limit Mask */ | ||
1788 | |||
1789 | TX_JAM_LEN_DEF = 0x03, | ||
1790 | TX_JAM_IPG_DEF = 0x0b, | ||
1791 | TX_IPG_JAM_DEF = 0x1c, | ||
1792 | TX_BOF_LIM_DEF = 0x04, | ||
1793 | }; | ||
1794 | |||
1795 | #define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK) | ||
1796 | #define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK) | ||
1797 | #define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK) | ||
1798 | #define TX_BACK_OFF_LIM(x) ((x) & GM_TXPA_BO_LIM_MSK) | ||
1799 | |||
1800 | |||
1801 | /* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ | ||
1802 | enum { | ||
1803 | GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */ | ||
1804 | GM_SMOD_LIMIT_4 = 1<<10, /* 4 consecutive Tx trials */ | ||
1805 | GM_SMOD_VLAN_ENA = 1<<9, /* Enable VLAN (Max. Frame Len) */ | ||
1806 | GM_SMOD_JUMBO_ENA = 1<<8, /* Enable Jumbo (Max. Frame Len) */ | ||
1807 | |||
1808 | GM_NEW_FLOW_CTRL = 1<<6, /* Enable New Flow-Control */ | ||
1809 | |||
1810 | GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ | ||
1811 | }; | ||
1812 | |||
1813 | #define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK) | ||
1814 | #define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK) | ||
1815 | |||
1816 | #define DATA_BLIND_DEF 0x04 | ||
1817 | #define IPG_DATA_DEF_1000 0x1e | ||
1818 | #define IPG_DATA_DEF_10_100 0x18 | ||
1819 | |||
1820 | /* GM_SMI_CTRL 16 bit r/w SMI Control Register */ | ||
1821 | enum { | ||
1822 | GM_SMI_CT_PHY_A_MSK = 0x1f<<11,/* Bit 15..11: PHY Device Address */ | ||
1823 | GM_SMI_CT_REG_A_MSK = 0x1f<<6,/* Bit 10.. 6: PHY Register Address */ | ||
1824 | GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/ | ||
1825 | GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */ | ||
1826 | GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */ | ||
1827 | }; | ||
1828 | |||
1829 | #define GM_SMI_CT_PHY_AD(x) (((u16)(x)<<11) & GM_SMI_CT_PHY_A_MSK) | ||
1830 | #define GM_SMI_CT_REG_AD(x) (((u16)(x)<<6) & GM_SMI_CT_REG_A_MSK) | ||
1831 | |||
1832 | /* GM_PHY_ADDR 16 bit r/w GPHY Address Register */ | ||
1833 | enum { | ||
1834 | GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */ | ||
1835 | GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */ | ||
1836 | }; | ||
1837 | |||
1838 | /* Receive Frame Status Encoding */ | ||
1839 | enum { | ||
1840 | GMR_FS_LEN = 0x7fff<<16, /* Bit 30..16: Rx Frame Length */ | ||
1841 | GMR_FS_VLAN = 1<<13, /* VLAN Packet */ | ||
1842 | GMR_FS_JABBER = 1<<12, /* Jabber Packet */ | ||
1843 | GMR_FS_UN_SIZE = 1<<11, /* Undersize Packet */ | ||
1844 | GMR_FS_MC = 1<<10, /* Multicast Packet */ | ||
1845 | GMR_FS_BC = 1<<9, /* Broadcast Packet */ | ||
1846 | GMR_FS_RX_OK = 1<<8, /* Receive OK (Good Packet) */ | ||
1847 | GMR_FS_GOOD_FC = 1<<7, /* Good Flow-Control Packet */ | ||
1848 | GMR_FS_BAD_FC = 1<<6, /* Bad Flow-Control Packet */ | ||
1849 | GMR_FS_MII_ERR = 1<<5, /* MII Error */ | ||
1850 | GMR_FS_LONG_ERR = 1<<4, /* Too Long Packet */ | ||
1851 | GMR_FS_FRAGMENT = 1<<3, /* Fragment */ | ||
1852 | |||
1853 | GMR_FS_CRC_ERR = 1<<1, /* CRC Error */ | ||
1854 | GMR_FS_RX_FF_OV = 1<<0, /* Rx FIFO Overflow */ | ||
1855 | |||
1856 | GMR_FS_ANY_ERR = GMR_FS_RX_FF_OV | GMR_FS_CRC_ERR | | ||
1857 | GMR_FS_FRAGMENT | GMR_FS_LONG_ERR | | ||
1858 | GMR_FS_MII_ERR | GMR_FS_BAD_FC | | ||
1859 | GMR_FS_UN_SIZE | GMR_FS_JABBER, | ||
1860 | }; | ||
1861 | |||
1862 | /* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ | ||
1863 | enum { | ||
1864 | RX_GCLKMAC_ENA = 1<<31, /* RX MAC Clock Gating Enable */ | ||
1865 | RX_GCLKMAC_OFF = 1<<30, | ||
1866 | |||
1867 | RX_STFW_DIS = 1<<29, /* RX Store and Forward Enable */ | ||
1868 | RX_STFW_ENA = 1<<28, | ||
1869 | |||
1870 | RX_TRUNC_ON = 1<<27, /* enable packet truncation */ | ||
1871 | RX_TRUNC_OFF = 1<<26, /* disable packet truncation */ | ||
1872 | RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */ | ||
1873 | RX_VLAN_STRIP_OFF = 1<<24, /* disable VLAN stripping */ | ||
1874 | |||
1875 | RX_MACSEC_FLUSH_ON = 1<<23, | ||
1876 | RX_MACSEC_FLUSH_OFF = 1<<22, | ||
1877 | RX_MACSEC_ASF_FLUSH_ON = 1<<21, | ||
1878 | RX_MACSEC_ASF_FLUSH_OFF = 1<<20, | ||
1879 | |||
1880 | GMF_RX_OVER_ON = 1<<19, /* enable flushing on receive overrun */ | ||
1881 | GMF_RX_OVER_OFF = 1<<18, /* disable flushing on receive overrun */ | ||
1882 | GMF_ASF_RX_OVER_ON = 1<<17, /* enable flushing of ASF when overrun */ | ||
1883 | GMF_ASF_RX_OVER_OFF = 1<<16, /* disable flushing of ASF when overrun */ | ||
1884 | |||
1885 | GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */ | ||
1886 | GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */ | ||
1887 | GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */ | ||
1888 | |||
1889 | GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */ | ||
1890 | GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */ | ||
1891 | GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */ | ||
1892 | GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */ | ||
1893 | GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */ | ||
1894 | GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */ | ||
1895 | GMF_CLI_RX_C = 1<<4, /* Clear IRQ Rx Frame Complete */ | ||
1896 | |||
1897 | GMF_OPER_ON = 1<<3, /* Operational Mode On */ | ||
1898 | GMF_OPER_OFF = 1<<2, /* Operational Mode Off */ | ||
1899 | GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */ | ||
1900 | GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */ | ||
1901 | |||
1902 | RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */ | ||
1903 | |||
1904 | GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON, | ||
1905 | }; | ||
1906 | |||
1907 | /* RX_GMF_FL_CTRL 16 bit Rx GMAC FIFO Flush Control (Yukon-Supreme) */ | ||
1908 | enum { | ||
1909 | RX_IPV6_SA_MOB_ENA = 1<<9, /* IPv6 SA Mobility Support Enable */ | ||
1910 | RX_IPV6_SA_MOB_DIS = 1<<8, /* IPv6 SA Mobility Support Disable */ | ||
1911 | RX_IPV6_DA_MOB_ENA = 1<<7, /* IPv6 DA Mobility Support Enable */ | ||
1912 | RX_IPV6_DA_MOB_DIS = 1<<6, /* IPv6 DA Mobility Support Disable */ | ||
1913 | RX_PTR_SYNCDLY_ENA = 1<<5, /* Pointers Delay Synch Enable */ | ||
1914 | RX_PTR_SYNCDLY_DIS = 1<<4, /* Pointers Delay Synch Disable */ | ||
1915 | RX_ASF_NEWFLAG_ENA = 1<<3, /* RX ASF Flag New Logic Enable */ | ||
1916 | RX_ASF_NEWFLAG_DIS = 1<<2, /* RX ASF Flag New Logic Disable */ | ||
1917 | RX_FLSH_MISSPKT_ENA = 1<<1, /* RX Flush Miss-Packet Enable */ | ||
1918 | RX_FLSH_MISSPKT_DIS = 1<<0, /* RX Flush Miss-Packet Disable */ | ||
1919 | }; | ||
1920 | |||
1921 | /* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */ | ||
1922 | enum { | ||
1923 | TX_DYN_WM_ENA = 3, /* Yukon-FE+ specific */ | ||
1924 | }; | ||
1925 | |||
1926 | /* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */ | ||
1927 | enum { | ||
1928 | TX_STFW_DIS = 1<<31,/* Disable Store & Forward */ | ||
1929 | TX_STFW_ENA = 1<<30,/* Enable Store & Forward */ | ||
1930 | |||
1931 | TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */ | ||
1932 | TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */ | ||
1933 | |||
1934 | TX_PCI_JUM_ENA = 1<<23,/* PCI Jumbo Mode enable */ | ||
1935 | TX_PCI_JUM_DIS = 1<<22,/* PCI Jumbo Mode enable */ | ||
1936 | |||
1937 | GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */ | ||
1938 | GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */ | ||
1939 | GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */ | ||
1940 | |||
1941 | GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */ | ||
1942 | GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */ | ||
1943 | GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */ | ||
1944 | }; | ||
1945 | |||
1946 | /* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */ | ||
1947 | enum { | ||
1948 | GMT_ST_START = 1<<2, /* Start Time Stamp Timer */ | ||
1949 | GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */ | ||
1950 | GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */ | ||
1951 | }; | ||
1952 | |||
1953 | /* B28_Y2_ASF_STAT_CMD 32 bit ASF Status and Command Reg */ | ||
1954 | enum { | ||
1955 | Y2_ASF_OS_PRES = 1<<4, /* ASF operation system present */ | ||
1956 | Y2_ASF_RESET = 1<<3, /* ASF system in reset state */ | ||
1957 | Y2_ASF_RUNNING = 1<<2, /* ASF system operational */ | ||
1958 | Y2_ASF_CLR_HSTI = 1<<1, /* Clear ASF IRQ */ | ||
1959 | Y2_ASF_IRQ = 1<<0, /* Issue an IRQ to ASF system */ | ||
1960 | |||
1961 | Y2_ASF_UC_STATE = 3<<2, /* ASF uC State */ | ||
1962 | Y2_ASF_CLK_HALT = 0, /* ASF system clock stopped */ | ||
1963 | }; | ||
1964 | |||
1965 | /* B28_Y2_ASF_HOST_COM 32 bit ASF Host Communication Reg */ | ||
1966 | enum { | ||
1967 | Y2_ASF_CLR_ASFI = 1<<1, /* Clear host IRQ */ | ||
1968 | Y2_ASF_HOST_IRQ = 1<<0, /* Issue an IRQ to HOST system */ | ||
1969 | }; | ||
1970 | /* HCU_CCSR CPU Control and Status Register */ | ||
1971 | enum { | ||
1972 | HCU_CCSR_SMBALERT_MONITOR= 1<<27, /* SMBALERT pin monitor */ | ||
1973 | HCU_CCSR_CPU_SLEEP = 1<<26, /* CPU sleep status */ | ||
1974 | /* Clock Stretching Timeout */ | ||
1975 | HCU_CCSR_CS_TO = 1<<25, | ||
1976 | HCU_CCSR_WDOG = 1<<24, /* Watchdog Reset */ | ||
1977 | |||
1978 | HCU_CCSR_CLR_IRQ_HOST = 1<<17, /* Clear IRQ_HOST */ | ||
1979 | HCU_CCSR_SET_IRQ_HCU = 1<<16, /* Set IRQ_HCU */ | ||
1980 | |||
1981 | HCU_CCSR_AHB_RST = 1<<9, /* Reset AHB bridge */ | ||
1982 | HCU_CCSR_CPU_RST_MODE = 1<<8, /* CPU Reset Mode */ | ||
1983 | |||
1984 | HCU_CCSR_SET_SYNC_CPU = 1<<5, | ||
1985 | HCU_CCSR_CPU_CLK_DIVIDE_MSK = 3<<3,/* CPU Clock Divide */ | ||
1986 | HCU_CCSR_CPU_CLK_DIVIDE_BASE= 1<<3, | ||
1987 | HCU_CCSR_OS_PRSNT = 1<<2, /* ASF OS Present */ | ||
1988 | /* Microcontroller State */ | ||
1989 | HCU_CCSR_UC_STATE_MSK = 3, | ||
1990 | HCU_CCSR_UC_STATE_BASE = 1<<0, | ||
1991 | HCU_CCSR_ASF_RESET = 0, | ||
1992 | HCU_CCSR_ASF_HALTED = 1<<1, | ||
1993 | HCU_CCSR_ASF_RUNNING = 1<<0, | ||
1994 | }; | ||
1995 | |||
1996 | /* HCU_HCSR Host Control and Status Register */ | ||
1997 | enum { | ||
1998 | HCU_HCSR_SET_IRQ_CPU = 1<<16, /* Set IRQ_CPU */ | ||
1999 | |||
2000 | HCU_HCSR_CLR_IRQ_HCU = 1<<1, /* Clear IRQ_HCU */ | ||
2001 | HCU_HCSR_SET_IRQ_HOST = 1<<0, /* Set IRQ_HOST */ | ||
2002 | }; | ||
2003 | |||
2004 | /* STAT_CTRL 32 bit Status BMU control register (Yukon-2 only) */ | ||
2005 | enum { | ||
2006 | SC_STAT_CLR_IRQ = 1<<4, /* Status Burst IRQ clear */ | ||
2007 | SC_STAT_OP_ON = 1<<3, /* Operational Mode On */ | ||
2008 | SC_STAT_OP_OFF = 1<<2, /* Operational Mode Off */ | ||
2009 | SC_STAT_RST_CLR = 1<<1, /* Clear Status Unit Reset (Enable) */ | ||
2010 | SC_STAT_RST_SET = 1<<0, /* Set Status Unit Reset */ | ||
2011 | }; | ||
2012 | |||
2013 | /* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */ | ||
2014 | enum { | ||
2015 | GMC_SET_RST = 1<<15,/* MAC SEC RST */ | ||
2016 | GMC_SEC_RST_OFF = 1<<14,/* MAC SEC RSt OFF */ | ||
2017 | GMC_BYP_MACSECRX_ON = 1<<13,/* Bypass macsec RX */ | ||
2018 | GMC_BYP_MACSECRX_OFF= 1<<12,/* Bypass macsec RX off */ | ||
2019 | GMC_BYP_MACSECTX_ON = 1<<11,/* Bypass macsec TX */ | ||
2020 | GMC_BYP_MACSECTX_OFF= 1<<10,/* Bypass macsec TX off*/ | ||
2021 | GMC_BYP_RETR_ON = 1<<9, /* Bypass retransmit FIFO On */ | ||
2022 | GMC_BYP_RETR_OFF= 1<<8, /* Bypass retransmit FIFO Off */ | ||
2023 | |||
2024 | GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */ | ||
2025 | GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */ | ||
2026 | GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */ | ||
2027 | GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */ | ||
2028 | GMC_PAUSE_ON = 1<<3, /* Pause On */ | ||
2029 | GMC_PAUSE_OFF = 1<<2, /* Pause Off */ | ||
2030 | GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */ | ||
2031 | GMC_RST_SET = 1<<0, /* Set GMAC Reset */ | ||
2032 | }; | ||
2033 | |||
2034 | /* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */ | ||
2035 | enum { | ||
2036 | GPC_TX_PAUSE = 1<<30, /* Tx pause enabled (ro) */ | ||
2037 | GPC_RX_PAUSE = 1<<29, /* Rx pause enabled (ro) */ | ||
2038 | GPC_SPEED = 3<<27, /* PHY speed (ro) */ | ||
2039 | GPC_LINK = 1<<26, /* Link up (ro) */ | ||
2040 | GPC_DUPLEX = 1<<25, /* Duplex (ro) */ | ||
2041 | GPC_CLOCK = 1<<24, /* 125Mhz clock stable (ro) */ | ||
2042 | |||
2043 | GPC_PDOWN = 1<<23, /* Internal regulator 2.5 power down */ | ||
2044 | GPC_TSTMODE = 1<<22, /* Test mode */ | ||
2045 | GPC_REG18 = 1<<21, /* Reg18 Power down */ | ||
2046 | GPC_REG12SEL = 3<<19, /* Reg12 power setting */ | ||
2047 | GPC_REG18SEL = 3<<17, /* Reg18 power setting */ | ||
2048 | GPC_SPILOCK = 1<<16, /* SPI lock (ASF) */ | ||
2049 | |||
2050 | GPC_LEDMUX = 3<<14, /* LED Mux */ | ||
2051 | GPC_INTPOL = 1<<13, /* Interrupt polarity */ | ||
2052 | GPC_DETECT = 1<<12, /* Energy detect */ | ||
2053 | GPC_1000HD = 1<<11, /* Enable 1000Mbit HD */ | ||
2054 | GPC_SLAVE = 1<<10, /* Slave mode */ | ||
2055 | GPC_PAUSE = 1<<9, /* Pause enable */ | ||
2056 | GPC_LEDCTL = 3<<6, /* GPHY Leds */ | ||
2057 | |||
2058 | GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */ | ||
2059 | GPC_RST_SET = 1<<0, /* Set GPHY Reset */ | ||
2060 | }; | ||
2061 | |||
2062 | /* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */ | ||
2063 | /* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */ | ||
2064 | enum { | ||
2065 | GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */ | ||
2066 | GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */ | ||
2067 | GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */ | ||
2068 | GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */ | ||
2069 | GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ | ||
2070 | GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ | ||
2071 | |||
2072 | #define GMAC_DEF_MSK GM_IS_TX_FF_UR | ||
2073 | }; | ||
2074 | |||
2075 | /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ | ||
2076 | enum { /* Bits 15.. 2: reserved */ | ||
2077 | GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */ | ||
2078 | GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */ | ||
2079 | }; | ||
2080 | |||
2081 | |||
2082 | /* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */ | ||
2083 | enum { | ||
2084 | WOL_CTL_LINK_CHG_OCC = 1<<15, | ||
2085 | WOL_CTL_MAGIC_PKT_OCC = 1<<14, | ||
2086 | WOL_CTL_PATTERN_OCC = 1<<13, | ||
2087 | WOL_CTL_CLEAR_RESULT = 1<<12, | ||
2088 | WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11, | ||
2089 | WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10, | ||
2090 | WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9, | ||
2091 | WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8, | ||
2092 | WOL_CTL_ENA_PME_ON_PATTERN = 1<<7, | ||
2093 | WOL_CTL_DIS_PME_ON_PATTERN = 1<<6, | ||
2094 | WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5, | ||
2095 | WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4, | ||
2096 | WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3, | ||
2097 | WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2, | ||
2098 | WOL_CTL_ENA_PATTERN_UNIT = 1<<1, | ||
2099 | WOL_CTL_DIS_PATTERN_UNIT = 1<<0, | ||
2100 | }; | ||
2101 | |||
2102 | |||
2103 | /* Control flags */ | ||
2104 | enum { | ||
2105 | UDPTCP = 1<<0, | ||
2106 | CALSUM = 1<<1, | ||
2107 | WR_SUM = 1<<2, | ||
2108 | INIT_SUM= 1<<3, | ||
2109 | LOCK_SUM= 1<<4, | ||
2110 | INS_VLAN= 1<<5, | ||
2111 | EOP = 1<<7, | ||
2112 | }; | ||
2113 | |||
2114 | enum { | ||
2115 | HW_OWNER = 1<<7, | ||
2116 | OP_TCPWRITE = 0x11, | ||
2117 | OP_TCPSTART = 0x12, | ||
2118 | OP_TCPINIT = 0x14, | ||
2119 | OP_TCPLCK = 0x18, | ||
2120 | OP_TCPCHKSUM = OP_TCPSTART, | ||
2121 | OP_TCPIS = OP_TCPINIT | OP_TCPSTART, | ||
2122 | OP_TCPLW = OP_TCPLCK | OP_TCPWRITE, | ||
2123 | OP_TCPLSW = OP_TCPLCK | OP_TCPSTART | OP_TCPWRITE, | ||
2124 | OP_TCPLISW = OP_TCPLCK | OP_TCPINIT | OP_TCPSTART | OP_TCPWRITE, | ||
2125 | |||
2126 | OP_ADDR64 = 0x21, | ||
2127 | OP_VLAN = 0x22, | ||
2128 | OP_ADDR64VLAN = OP_ADDR64 | OP_VLAN, | ||
2129 | OP_LRGLEN = 0x24, | ||
2130 | OP_LRGLENVLAN = OP_LRGLEN | OP_VLAN, | ||
2131 | OP_MSS = 0x28, | ||
2132 | OP_MSSVLAN = OP_MSS | OP_VLAN, | ||
2133 | |||
2134 | OP_BUFFER = 0x40, | ||
2135 | OP_PACKET = 0x41, | ||
2136 | OP_LARGESEND = 0x43, | ||
2137 | OP_LSOV2 = 0x45, | ||
2138 | |||
2139 | /* YUKON-2 STATUS opcodes defines */ | ||
2140 | OP_RXSTAT = 0x60, | ||
2141 | OP_RXTIMESTAMP = 0x61, | ||
2142 | OP_RXVLAN = 0x62, | ||
2143 | OP_RXCHKS = 0x64, | ||
2144 | OP_RXCHKSVLAN = OP_RXCHKS | OP_RXVLAN, | ||
2145 | OP_RXTIMEVLAN = OP_RXTIMESTAMP | OP_RXVLAN, | ||
2146 | OP_RSS_HASH = 0x65, | ||
2147 | OP_TXINDEXLE = 0x68, | ||
2148 | OP_MACSEC = 0x6c, | ||
2149 | OP_PUTIDX = 0x70, | ||
2150 | }; | ||
2151 | |||
2152 | enum status_css { | ||
2153 | CSS_TCPUDPCSOK = 1<<7, /* TCP / UDP checksum is ok */ | ||
2154 | CSS_ISUDP = 1<<6, /* packet is a UDP packet */ | ||
2155 | CSS_ISTCP = 1<<5, /* packet is a TCP packet */ | ||
2156 | CSS_ISIPFRAG = 1<<4, /* packet is a TCP/UDP frag, CS calc not done */ | ||
2157 | CSS_ISIPV6 = 1<<3, /* packet is a IPv6 packet */ | ||
2158 | CSS_IPV4CSUMOK = 1<<2, /* IP v4: TCP header checksum is ok */ | ||
2159 | CSS_ISIPV4 = 1<<1, /* packet is a IPv4 packet */ | ||
2160 | CSS_LINK_BIT = 1<<0, /* port number (legacy) */ | ||
2161 | }; | ||
2162 | |||
2163 | /* Yukon 2 hardware interface */ | ||
2164 | struct sky2_tx_le { | ||
2165 | __le32 addr; | ||
2166 | __le16 length; /* also vlan tag or checksum start */ | ||
2167 | u8 ctrl; | ||
2168 | u8 opcode; | ||
2169 | } __packed; | ||
2170 | |||
2171 | struct sky2_rx_le { | ||
2172 | __le32 addr; | ||
2173 | __le16 length; | ||
2174 | u8 ctrl; | ||
2175 | u8 opcode; | ||
2176 | } __packed; | ||
2177 | |||
2178 | struct sky2_status_le { | ||
2179 | __le32 status; /* also checksum */ | ||
2180 | __le16 length; /* also vlan tag */ | ||
2181 | u8 css; | ||
2182 | u8 opcode; | ||
2183 | } __packed; | ||
2184 | |||
2185 | struct tx_ring_info { | ||
2186 | struct sk_buff *skb; | ||
2187 | unsigned long flags; | ||
2188 | #define TX_MAP_SINGLE 0x0001 | ||
2189 | #define TX_MAP_PAGE 0x0002 | ||
2190 | DEFINE_DMA_UNMAP_ADDR(mapaddr); | ||
2191 | DEFINE_DMA_UNMAP_LEN(maplen); | ||
2192 | }; | ||
2193 | |||
2194 | struct rx_ring_info { | ||
2195 | struct sk_buff *skb; | ||
2196 | dma_addr_t data_addr; | ||
2197 | DEFINE_DMA_UNMAP_LEN(data_size); | ||
2198 | dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; | ||
2199 | }; | ||
2200 | |||
2201 | enum flow_control { | ||
2202 | FC_NONE = 0, | ||
2203 | FC_TX = 1, | ||
2204 | FC_RX = 2, | ||
2205 | FC_BOTH = 3, | ||
2206 | }; | ||
2207 | |||
2208 | struct sky2_stats { | ||
2209 | struct u64_stats_sync syncp; | ||
2210 | u64 packets; | ||
2211 | u64 bytes; | ||
2212 | }; | ||
2213 | |||
2214 | struct sky2_port { | ||
2215 | struct sky2_hw *hw; | ||
2216 | struct net_device *netdev; | ||
2217 | unsigned port; | ||
2218 | u32 msg_enable; | ||
2219 | spinlock_t phy_lock; | ||
2220 | |||
2221 | struct tx_ring_info *tx_ring; | ||
2222 | struct sky2_tx_le *tx_le; | ||
2223 | struct sky2_stats tx_stats; | ||
2224 | |||
2225 | u16 tx_ring_size; | ||
2226 | u16 tx_cons; /* next le to check */ | ||
2227 | u16 tx_prod; /* next le to use */ | ||
2228 | u16 tx_next; /* debug only */ | ||
2229 | |||
2230 | u16 tx_pending; | ||
2231 | u16 tx_last_mss; | ||
2232 | u32 tx_last_upper; | ||
2233 | u32 tx_tcpsum; | ||
2234 | |||
2235 | struct rx_ring_info *rx_ring ____cacheline_aligned_in_smp; | ||
2236 | struct sky2_rx_le *rx_le; | ||
2237 | struct sky2_stats rx_stats; | ||
2238 | |||
2239 | u16 rx_next; /* next re to check */ | ||
2240 | u16 rx_put; /* next le index to use */ | ||
2241 | u16 rx_pending; | ||
2242 | u16 rx_data_size; | ||
2243 | u16 rx_nfrags; | ||
2244 | u16 rx_tag; | ||
2245 | |||
2246 | struct { | ||
2247 | unsigned long last; | ||
2248 | u32 mac_rp; | ||
2249 | u8 mac_lev; | ||
2250 | u8 fifo_rp; | ||
2251 | u8 fifo_lev; | ||
2252 | } check; | ||
2253 | |||
2254 | dma_addr_t rx_le_map; | ||
2255 | dma_addr_t tx_le_map; | ||
2256 | |||
2257 | u16 advertising; /* ADVERTISED_ bits */ | ||
2258 | u16 speed; /* SPEED_1000, SPEED_100, ... */ | ||
2259 | u8 wol; /* WAKE_ bits */ | ||
2260 | u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ | ||
2261 | u16 flags; | ||
2262 | #define SKY2_FLAG_AUTO_SPEED 0x0002 | ||
2263 | #define SKY2_FLAG_AUTO_PAUSE 0x0004 | ||
2264 | |||
2265 | enum flow_control flow_mode; | ||
2266 | enum flow_control flow_status; | ||
2267 | |||
2268 | #ifdef CONFIG_SKY2_DEBUG | ||
2269 | struct dentry *debugfs; | ||
2270 | #endif | ||
2271 | }; | ||
2272 | |||
2273 | struct sky2_hw { | ||
2274 | void __iomem *regs; | ||
2275 | struct pci_dev *pdev; | ||
2276 | struct napi_struct napi; | ||
2277 | struct net_device *dev[2]; | ||
2278 | unsigned long flags; | ||
2279 | #define SKY2_HW_USE_MSI 0x00000001 | ||
2280 | #define SKY2_HW_FIBRE_PHY 0x00000002 | ||
2281 | #define SKY2_HW_GIGABIT 0x00000004 | ||
2282 | #define SKY2_HW_NEWER_PHY 0x00000008 | ||
2283 | #define SKY2_HW_RAM_BUFFER 0x00000010 | ||
2284 | #define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ | ||
2285 | #define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ | ||
2286 | #define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ | ||
2287 | #define SKY2_HW_RSS_BROKEN 0x00000100 | ||
2288 | #define SKY2_HW_VLAN_BROKEN 0x00000200 | ||
2289 | #define SKY2_HW_RSS_CHKSUM 0x00000400 /* RSS requires chksum */ | ||
2290 | |||
2291 | u8 chip_id; | ||
2292 | u8 chip_rev; | ||
2293 | u8 pmd_type; | ||
2294 | u8 ports; | ||
2295 | |||
2296 | struct sky2_status_le *st_le; | ||
2297 | u32 st_size; | ||
2298 | u32 st_idx; | ||
2299 | dma_addr_t st_dma; | ||
2300 | |||
2301 | struct timer_list watchdog_timer; | ||
2302 | struct work_struct restart_work; | ||
2303 | wait_queue_head_t msi_wait; | ||
2304 | |||
2305 | char irq_name[0]; | ||
2306 | }; | ||
2307 | |||
2308 | static inline int sky2_is_copper(const struct sky2_hw *hw) | ||
2309 | { | ||
2310 | return !(hw->flags & SKY2_HW_FIBRE_PHY); | ||
2311 | } | ||
2312 | |||
2313 | /* Register accessor for memory mapped device */ | ||
2314 | static inline u32 sky2_read32(const struct sky2_hw *hw, unsigned reg) | ||
2315 | { | ||
2316 | return readl(hw->regs + reg); | ||
2317 | } | ||
2318 | |||
2319 | static inline u16 sky2_read16(const struct sky2_hw *hw, unsigned reg) | ||
2320 | { | ||
2321 | return readw(hw->regs + reg); | ||
2322 | } | ||
2323 | |||
2324 | static inline u8 sky2_read8(const struct sky2_hw *hw, unsigned reg) | ||
2325 | { | ||
2326 | return readb(hw->regs + reg); | ||
2327 | } | ||
2328 | |||
2329 | static inline void sky2_write32(const struct sky2_hw *hw, unsigned reg, u32 val) | ||
2330 | { | ||
2331 | writel(val, hw->regs + reg); | ||
2332 | } | ||
2333 | |||
2334 | static inline void sky2_write16(const struct sky2_hw *hw, unsigned reg, u16 val) | ||
2335 | { | ||
2336 | writew(val, hw->regs + reg); | ||
2337 | } | ||
2338 | |||
2339 | static inline void sky2_write8(const struct sky2_hw *hw, unsigned reg, u8 val) | ||
2340 | { | ||
2341 | writeb(val, hw->regs + reg); | ||
2342 | } | ||
2343 | |||
2344 | /* Yukon PHY related registers */ | ||
2345 | #define SK_GMAC_REG(port,reg) \ | ||
2346 | (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg)) | ||
2347 | #define GM_PHY_RETRIES 100 | ||
2348 | |||
2349 | static inline u16 gma_read16(const struct sky2_hw *hw, unsigned port, unsigned reg) | ||
2350 | { | ||
2351 | return sky2_read16(hw, SK_GMAC_REG(port,reg)); | ||
2352 | } | ||
2353 | |||
2354 | static inline u32 gma_read32(struct sky2_hw *hw, unsigned port, unsigned reg) | ||
2355 | { | ||
2356 | unsigned base = SK_GMAC_REG(port, reg); | ||
2357 | return (u32) sky2_read16(hw, base) | ||
2358 | | (u32) sky2_read16(hw, base+4) << 16; | ||
2359 | } | ||
2360 | |||
2361 | static inline u64 gma_read64(struct sky2_hw *hw, unsigned port, unsigned reg) | ||
2362 | { | ||
2363 | unsigned base = SK_GMAC_REG(port, reg); | ||
2364 | |||
2365 | return (u64) sky2_read16(hw, base) | ||
2366 | | (u64) sky2_read16(hw, base+4) << 16 | ||
2367 | | (u64) sky2_read16(hw, base+8) << 32 | ||
2368 | | (u64) sky2_read16(hw, base+12) << 48; | ||
2369 | } | ||
2370 | |||
2371 | /* There is no way to atomically read32 bit values from PHY, so retry */ | ||
2372 | static inline u32 get_stats32(struct sky2_hw *hw, unsigned port, unsigned reg) | ||
2373 | { | ||
2374 | u32 val; | ||
2375 | |||
2376 | do { | ||
2377 | val = gma_read32(hw, port, reg); | ||
2378 | } while (gma_read32(hw, port, reg) != val); | ||
2379 | |||
2380 | return val; | ||
2381 | } | ||
2382 | |||
2383 | static inline u64 get_stats64(struct sky2_hw *hw, unsigned port, unsigned reg) | ||
2384 | { | ||
2385 | u64 val; | ||
2386 | |||
2387 | do { | ||
2388 | val = gma_read64(hw, port, reg); | ||
2389 | } while (gma_read64(hw, port, reg) != val); | ||
2390 | |||
2391 | return val; | ||
2392 | } | ||
2393 | |||
2394 | static inline void gma_write16(const struct sky2_hw *hw, unsigned port, int r, u16 v) | ||
2395 | { | ||
2396 | sky2_write16(hw, SK_GMAC_REG(port,r), v); | ||
2397 | } | ||
2398 | |||
2399 | static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg, | ||
2400 | const u8 *addr) | ||
2401 | { | ||
2402 | gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8)); | ||
2403 | gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); | ||
2404 | gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); | ||
2405 | } | ||
2406 | |||
2407 | /* PCI config space access */ | ||
2408 | static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg) | ||
2409 | { | ||
2410 | return sky2_read32(hw, Y2_CFG_SPC + reg); | ||
2411 | } | ||
2412 | |||
2413 | static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg) | ||
2414 | { | ||
2415 | return sky2_read16(hw, Y2_CFG_SPC + reg); | ||
2416 | } | ||
2417 | |||
2418 | static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val) | ||
2419 | { | ||
2420 | sky2_write32(hw, Y2_CFG_SPC + reg, val); | ||
2421 | } | ||
2422 | |||
2423 | static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val) | ||
2424 | { | ||
2425 | sky2_write16(hw, Y2_CFG_SPC + reg, val); | ||
2426 | } | ||
2427 | #endif | ||