diff options
author | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-05-13 02:04:46 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-08-11 05:33:43 -0400 |
commit | e689cf4a042772f727450035b102579b0c01bdc7 (patch) | |
tree | f2b17aa21b8358a8f7589fed46fa08688b439464 /drivers/net/ethernet/sun | |
parent | 8efc91254fda97ee78e2e0b8e016120e664131de (diff) |
cassini/niu/sun*: Move the Sun drivers
Moves the Sun drivers into drivers/net/ethernet/sun/ and make
the necessary Kconfig and Makefile changes.
Oliver Hartkopp <socketcan@hartkopp.net> suggested removing the
sun* prefix on the driver names. This type of change I will
leave up to the driver maintainers.
CC: Sam Creasey <sammy@sammy.net>
CC: Adrian Sun <asun@darksunrising.com>
CC: Benjamin Herrenscmidt <benh@kernel.crashing.org>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/sun')
-rw-r--r-- | drivers/net/ethernet/sun/Kconfig | 86 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/Makefile | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/cassini.c | 5305 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/cassini.h | 2914 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/niu.c | 10263 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/niu.h | 3306 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/sunbmac.c | 1306 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/sunbmac.h | 355 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/sungem.c | 3049 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/sungem.h | 1027 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/sungem_phy.c | 1200 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/sungem_phy.h | 132 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/sunhme.c | 3360 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/sunhme.h | 512 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/sunqe.c | 1007 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/sunqe.h | 350 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/sunvnet.c | 1284 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/sunvnet.h | 83 |
18 files changed, 35550 insertions, 0 deletions
diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig new file mode 100644 index 000000000000..87b17a7e6dfe --- /dev/null +++ b/drivers/net/ethernet/sun/Kconfig | |||
@@ -0,0 +1,86 @@ | |||
1 | # | ||
2 | # Sun network device configuration | ||
3 | # | ||
4 | |||
5 | config NET_VENDOR_SUN | ||
6 | bool "Sun devices" | ||
7 | depends on SUN3 || SBUS || PCI || SUN_LDOMS | ||
8 | ---help--- | ||
9 | If you have a network (Ethernet) card belonging to this class, say | ||
10 | Y and read the Ethernet-HOWTO, available from | ||
11 | <http://www.tldp.org/docs.html#howto>. | ||
12 | |||
13 | Note that the answer to this question doesn't directly affect the | ||
14 | kernel: saying N will just cause the configurator to skip all | ||
15 | the questions about Sun network interfaces. If you say Y, you will be | ||
16 | asked for your specific card in the following questions. | ||
17 | |||
18 | if NET_VENDOR_SUN | ||
19 | |||
20 | config HAPPYMEAL | ||
21 | tristate "Sun Happy Meal 10/100baseT support" | ||
22 | depends on (SBUS || PCI) | ||
23 | select CRC32 | ||
24 | ---help--- | ||
25 | This driver supports the "hme" interface present on most Ultra | ||
26 | systems and as an option on older Sbus systems. This driver supports | ||
27 | both PCI and Sbus devices. This driver also supports the "qfe" quad | ||
28 | 100baseT device available in both PCI and Sbus configurations. | ||
29 | |||
30 | To compile this driver as a module, choose M here: the module | ||
31 | will be called sunhme. | ||
32 | |||
33 | config SUNBMAC | ||
34 | tristate "Sun BigMAC 10/100baseT support (EXPERIMENTAL)" | ||
35 | depends on SBUS && EXPERIMENTAL | ||
36 | select CRC32 | ||
37 | ---help--- | ||
38 | This driver supports the "be" interface available as an Sbus option. | ||
39 | This is Sun's older 100baseT Ethernet device. | ||
40 | |||
41 | To compile this driver as a module, choose M here: the module | ||
42 | will be called sunbmac. | ||
43 | |||
44 | config SUNQE | ||
45 | tristate "Sun QuadEthernet support" | ||
46 | depends on SBUS | ||
47 | select CRC32 | ||
48 | ---help--- | ||
49 | This driver supports the "qe" 10baseT Ethernet device, available as | ||
50 | an Sbus option. Note that this is not the same as Quad FastEthernet | ||
51 | "qfe" which is supported by the Happy Meal driver instead. | ||
52 | |||
53 | To compile this driver as a module, choose M here: the module | ||
54 | will be called sunqe. | ||
55 | |||
56 | config SUNGEM | ||
57 | tristate "Sun GEM support" | ||
58 | depends on PCI | ||
59 | select CRC32 | ||
60 | ---help--- | ||
61 | Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also | ||
62 | <http://www.sun.com/products-n-solutions/hardware/docs/pdf/806-3985-10.pdf>. | ||
63 | |||
64 | config CASSINI | ||
65 | tristate "Sun Cassini support" | ||
66 | depends on PCI | ||
67 | select CRC32 | ||
68 | ---help--- | ||
69 | Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also | ||
70 | <http://www.sun.com/products-n-solutions/hardware/docs/pdf/817-4341-10.pdf> | ||
71 | |||
72 | config SUNVNET | ||
73 | tristate "Sun Virtual Network support" | ||
74 | depends on SUN_LDOMS | ||
75 | ---help--- | ||
76 | Support for virtual network devices under Sun Logical Domains. | ||
77 | |||
78 | config NIU | ||
79 | tristate "Sun Neptune 10Gbit Ethernet support" | ||
80 | depends on PCI | ||
81 | select CRC32 | ||
82 | ---help--- | ||
83 | This enables support for cards based upon Sun's | ||
84 | Neptune chipset. | ||
85 | |||
86 | endif # NET_VENDOR_SUN | ||
diff --git a/drivers/net/ethernet/sun/Makefile b/drivers/net/ethernet/sun/Makefile new file mode 100644 index 000000000000..4f25217a75f8 --- /dev/null +++ b/drivers/net/ethernet/sun/Makefile | |||
@@ -0,0 +1,11 @@ | |||
1 | # | ||
2 | # Makefile for the Sun network device drivers. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_HAPPYMEAL) += sunhme.o | ||
6 | obj-$(CONFIG_SUNQE) += sunqe.o | ||
7 | obj-$(CONFIG_SUNBMAC) += sunbmac.o | ||
8 | obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o | ||
9 | obj-$(CONFIG_CASSINI) += cassini.o | ||
10 | obj-$(CONFIG_SUNVNET) += sunvnet.o | ||
11 | obj-$(CONFIG_NIU) += niu.o | ||
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c new file mode 100644 index 000000000000..646c86bcc545 --- /dev/null +++ b/drivers/net/ethernet/sun/cassini.c | |||
@@ -0,0 +1,5305 @@ | |||
1 | /* cassini.c: Sun Microsystems Cassini(+) ethernet driver. | ||
2 | * | ||
3 | * Copyright (C) 2004 Sun Microsystems Inc. | ||
4 | * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License as | ||
8 | * published by the Free Software Foundation; either version 2 of the | ||
9 | * License, or (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA | ||
19 | * 02111-1307, USA. | ||
20 | * | ||
21 | * This driver uses the sungem driver (c) David Miller | ||
22 | * (davem@redhat.com) as its basis. | ||
23 | * | ||
24 | * The cassini chip has a number of features that distinguish it from | ||
25 | * the gem chip: | ||
26 | * 4 transmit descriptor rings that are used for either QoS (VLAN) or | ||
27 | * load balancing (non-VLAN mode) | ||
28 | * batching of multiple packets | ||
29 | * multiple CPU dispatching | ||
30 | * page-based RX descriptor engine with separate completion rings | ||
31 | * Gigabit support (GMII and PCS interface) | ||
32 | * MIF link up/down detection works | ||
33 | * | ||
34 | * RX is handled by page sized buffers that are attached as fragments to | ||
35 | * the skb. here's what's done: | ||
36 | * -- driver allocates pages at a time and keeps reference counts | ||
37 | * on them. | ||
38 | * -- the upper protocol layers assume that the header is in the skb | ||
39 | * itself. as a result, cassini will copy a small amount (64 bytes) | ||
40 | * to make them happy. | ||
41 | * -- driver appends the rest of the data pages as frags to skbuffs | ||
42 | * and increments the reference count | ||
43 | * -- on page reclamation, the driver swaps the page with a spare page. | ||
44 | * if that page is still in use, it frees its reference to that page, | ||
45 | * and allocates a new page for use. otherwise, it just recycles the | ||
46 | * the page. | ||
47 | * | ||
48 | * NOTE: cassini can parse the header. however, it's not worth it | ||
49 | * as long as the network stack requires a header copy. | ||
50 | * | ||
51 | * TX has 4 queues. currently these queues are used in a round-robin | ||
52 | * fashion for load balancing. They can also be used for QoS. for that | ||
53 | * to work, however, QoS information needs to be exposed down to the driver | ||
54 | * level so that subqueues get targeted to particular transmit rings. | ||
55 | * alternatively, the queues can be configured via use of the all-purpose | ||
56 | * ioctl. | ||
57 | * | ||
58 | * RX DATA: the rx completion ring has all the info, but the rx desc | ||
59 | * ring has all of the data. RX can conceivably come in under multiple | ||
60 | * interrupts, but the INT# assignment needs to be set up properly by | ||
61 | * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do | ||
62 | * that. also, the two descriptor rings are designed to distinguish between | ||
63 | * encrypted and non-encrypted packets, but we use them for buffering | ||
64 | * instead. | ||
65 | * | ||
66 | * by default, the selective clear mask is set up to process rx packets. | ||
67 | */ | ||
68 | |||
69 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
70 | |||
71 | #include <linux/module.h> | ||
72 | #include <linux/kernel.h> | ||
73 | #include <linux/types.h> | ||
74 | #include <linux/compiler.h> | ||
75 | #include <linux/slab.h> | ||
76 | #include <linux/delay.h> | ||
77 | #include <linux/init.h> | ||
78 | #include <linux/interrupt.h> | ||
79 | #include <linux/vmalloc.h> | ||
80 | #include <linux/ioport.h> | ||
81 | #include <linux/pci.h> | ||
82 | #include <linux/mm.h> | ||
83 | #include <linux/highmem.h> | ||
84 | #include <linux/list.h> | ||
85 | #include <linux/dma-mapping.h> | ||
86 | |||
87 | #include <linux/netdevice.h> | ||
88 | #include <linux/etherdevice.h> | ||
89 | #include <linux/skbuff.h> | ||
90 | #include <linux/ethtool.h> | ||
91 | #include <linux/crc32.h> | ||
92 | #include <linux/random.h> | ||
93 | #include <linux/mii.h> | ||
94 | #include <linux/ip.h> | ||
95 | #include <linux/tcp.h> | ||
96 | #include <linux/mutex.h> | ||
97 | #include <linux/firmware.h> | ||
98 | |||
99 | #include <net/checksum.h> | ||
100 | |||
101 | #include <linux/atomic.h> | ||
102 | #include <asm/system.h> | ||
103 | #include <asm/io.h> | ||
104 | #include <asm/byteorder.h> | ||
105 | #include <asm/uaccess.h> | ||
106 | |||
107 | #define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ) | ||
108 | #define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ) | ||
109 | #define CAS_NCPUS num_online_cpus() | ||
110 | |||
111 | #define cas_skb_release(x) netif_rx(x) | ||
112 | |||
113 | /* select which firmware to use */ | ||
114 | #define USE_HP_WORKAROUND | ||
115 | #define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */ | ||
116 | #define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */ | ||
117 | |||
118 | #include "cassini.h" | ||
119 | |||
120 | #define USE_TX_COMPWB /* use completion writeback registers */ | ||
121 | #define USE_CSMA_CD_PROTO /* standard CSMA/CD */ | ||
122 | #define USE_RX_BLANK /* hw interrupt mitigation */ | ||
123 | #undef USE_ENTROPY_DEV /* don't test for entropy device */ | ||
124 | |||
125 | /* NOTE: these aren't useable unless PCI interrupts can be assigned. | ||
126 | * also, we need to make cp->lock finer-grained. | ||
127 | */ | ||
128 | #undef USE_PCI_INTB | ||
129 | #undef USE_PCI_INTC | ||
130 | #undef USE_PCI_INTD | ||
131 | #undef USE_QOS | ||
132 | |||
133 | #undef USE_VPD_DEBUG /* debug vpd information if defined */ | ||
134 | |||
135 | /* rx processing options */ | ||
136 | #define USE_PAGE_ORDER /* specify to allocate large rx pages */ | ||
137 | #define RX_DONT_BATCH 0 /* if 1, don't batch flows */ | ||
138 | #define RX_COPY_ALWAYS 0 /* if 0, use frags */ | ||
139 | #define RX_COPY_MIN 64 /* copy a little to make upper layers happy */ | ||
140 | #undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */ | ||
141 | |||
142 | #define DRV_MODULE_NAME "cassini" | ||
143 | #define DRV_MODULE_VERSION "1.6" | ||
144 | #define DRV_MODULE_RELDATE "21 May 2008" | ||
145 | |||
146 | #define CAS_DEF_MSG_ENABLE \ | ||
147 | (NETIF_MSG_DRV | \ | ||
148 | NETIF_MSG_PROBE | \ | ||
149 | NETIF_MSG_LINK | \ | ||
150 | NETIF_MSG_TIMER | \ | ||
151 | NETIF_MSG_IFDOWN | \ | ||
152 | NETIF_MSG_IFUP | \ | ||
153 | NETIF_MSG_RX_ERR | \ | ||
154 | NETIF_MSG_TX_ERR) | ||
155 | |||
156 | /* length of time before we decide the hardware is borked, | ||
157 | * and dev->tx_timeout() should be called to fix the problem | ||
158 | */ | ||
159 | #define CAS_TX_TIMEOUT (HZ) | ||
160 | #define CAS_LINK_TIMEOUT (22*HZ/10) | ||
161 | #define CAS_LINK_FAST_TIMEOUT (1) | ||
162 | |||
163 | /* timeout values for state changing. these specify the number | ||
164 | * of 10us delays to be used before giving up. | ||
165 | */ | ||
166 | #define STOP_TRIES_PHY 1000 | ||
167 | #define STOP_TRIES 5000 | ||
168 | |||
169 | /* specify a minimum frame size to deal with some fifo issues | ||
170 | * max mtu == 2 * page size - ethernet header - 64 - swivel = | ||
171 | * 2 * page_size - 0x50 | ||
172 | */ | ||
173 | #define CAS_MIN_FRAME 97 | ||
174 | #define CAS_1000MB_MIN_FRAME 255 | ||
175 | #define CAS_MIN_MTU 60 | ||
176 | #define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000) | ||
177 | |||
178 | #if 1 | ||
179 | /* | ||
180 | * Eliminate these and use separate atomic counters for each, to | ||
181 | * avoid a race condition. | ||
182 | */ | ||
183 | #else | ||
184 | #define CAS_RESET_MTU 1 | ||
185 | #define CAS_RESET_ALL 2 | ||
186 | #define CAS_RESET_SPARE 3 | ||
187 | #endif | ||
188 | |||
189 | static char version[] __devinitdata = | ||
190 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | ||
191 | |||
192 | static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */ | ||
193 | static int link_mode; | ||
194 | |||
195 | MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)"); | ||
196 | MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver"); | ||
197 | MODULE_LICENSE("GPL"); | ||
198 | MODULE_FIRMWARE("sun/cassini.bin"); | ||
199 | module_param(cassini_debug, int, 0); | ||
200 | MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value"); | ||
201 | module_param(link_mode, int, 0); | ||
202 | MODULE_PARM_DESC(link_mode, "default link mode"); | ||
203 | |||
204 | /* | ||
205 | * Work around for a PCS bug in which the link goes down due to the chip | ||
206 | * being confused and never showing a link status of "up." | ||
207 | */ | ||
208 | #define DEFAULT_LINKDOWN_TIMEOUT 5 | ||
209 | /* | ||
210 | * Value in seconds, for user input. | ||
211 | */ | ||
212 | static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT; | ||
213 | module_param(linkdown_timeout, int, 0); | ||
214 | MODULE_PARM_DESC(linkdown_timeout, | ||
215 | "min reset interval in sec. for PCS linkdown issue; disabled if not positive"); | ||
216 | |||
217 | /* | ||
218 | * value in 'ticks' (units used by jiffies). Set when we init the | ||
219 | * module because 'HZ' in actually a function call on some flavors of | ||
220 | * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ. | ||
221 | */ | ||
222 | static int link_transition_timeout; | ||
223 | |||
224 | |||
225 | |||
226 | static u16 link_modes[] __devinitdata = { | ||
227 | BMCR_ANENABLE, /* 0 : autoneg */ | ||
228 | 0, /* 1 : 10bt half duplex */ | ||
229 | BMCR_SPEED100, /* 2 : 100bt half duplex */ | ||
230 | BMCR_FULLDPLX, /* 3 : 10bt full duplex */ | ||
231 | BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */ | ||
232 | CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */ | ||
233 | }; | ||
234 | |||
235 | static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = { | ||
236 | { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI, | ||
237 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
238 | { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN, | ||
239 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
240 | { 0, } | ||
241 | }; | ||
242 | |||
243 | MODULE_DEVICE_TABLE(pci, cas_pci_tbl); | ||
244 | |||
245 | static void cas_set_link_modes(struct cas *cp); | ||
246 | |||
247 | static inline void cas_lock_tx(struct cas *cp) | ||
248 | { | ||
249 | int i; | ||
250 | |||
251 | for (i = 0; i < N_TX_RINGS; i++) | ||
252 | spin_lock(&cp->tx_lock[i]); | ||
253 | } | ||
254 | |||
255 | static inline void cas_lock_all(struct cas *cp) | ||
256 | { | ||
257 | spin_lock_irq(&cp->lock); | ||
258 | cas_lock_tx(cp); | ||
259 | } | ||
260 | |||
261 | /* WTZ: QA was finding deadlock problems with the previous | ||
262 | * versions after long test runs with multiple cards per machine. | ||
263 | * See if replacing cas_lock_all with safer versions helps. The | ||
264 | * symptoms QA is reporting match those we'd expect if interrupts | ||
265 | * aren't being properly restored, and we fixed a previous deadlock | ||
266 | * with similar symptoms by using save/restore versions in other | ||
267 | * places. | ||
268 | */ | ||
269 | #define cas_lock_all_save(cp, flags) \ | ||
270 | do { \ | ||
271 | struct cas *xxxcp = (cp); \ | ||
272 | spin_lock_irqsave(&xxxcp->lock, flags); \ | ||
273 | cas_lock_tx(xxxcp); \ | ||
274 | } while (0) | ||
275 | |||
276 | static inline void cas_unlock_tx(struct cas *cp) | ||
277 | { | ||
278 | int i; | ||
279 | |||
280 | for (i = N_TX_RINGS; i > 0; i--) | ||
281 | spin_unlock(&cp->tx_lock[i - 1]); | ||
282 | } | ||
283 | |||
284 | static inline void cas_unlock_all(struct cas *cp) | ||
285 | { | ||
286 | cas_unlock_tx(cp); | ||
287 | spin_unlock_irq(&cp->lock); | ||
288 | } | ||
289 | |||
290 | #define cas_unlock_all_restore(cp, flags) \ | ||
291 | do { \ | ||
292 | struct cas *xxxcp = (cp); \ | ||
293 | cas_unlock_tx(xxxcp); \ | ||
294 | spin_unlock_irqrestore(&xxxcp->lock, flags); \ | ||
295 | } while (0) | ||
296 | |||
297 | static void cas_disable_irq(struct cas *cp, const int ring) | ||
298 | { | ||
299 | /* Make sure we won't get any more interrupts */ | ||
300 | if (ring == 0) { | ||
301 | writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK); | ||
302 | return; | ||
303 | } | ||
304 | |||
305 | /* disable completion interrupts and selectively mask */ | ||
306 | if (cp->cas_flags & CAS_FLAG_REG_PLUS) { | ||
307 | switch (ring) { | ||
308 | #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) | ||
309 | #ifdef USE_PCI_INTB | ||
310 | case 1: | ||
311 | #endif | ||
312 | #ifdef USE_PCI_INTC | ||
313 | case 2: | ||
314 | #endif | ||
315 | #ifdef USE_PCI_INTD | ||
316 | case 3: | ||
317 | #endif | ||
318 | writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN, | ||
319 | cp->regs + REG_PLUS_INTRN_MASK(ring)); | ||
320 | break; | ||
321 | #endif | ||
322 | default: | ||
323 | writel(INTRN_MASK_CLEAR_ALL, cp->regs + | ||
324 | REG_PLUS_INTRN_MASK(ring)); | ||
325 | break; | ||
326 | } | ||
327 | } | ||
328 | } | ||
329 | |||
330 | static inline void cas_mask_intr(struct cas *cp) | ||
331 | { | ||
332 | int i; | ||
333 | |||
334 | for (i = 0; i < N_RX_COMP_RINGS; i++) | ||
335 | cas_disable_irq(cp, i); | ||
336 | } | ||
337 | |||
338 | static void cas_enable_irq(struct cas *cp, const int ring) | ||
339 | { | ||
340 | if (ring == 0) { /* all but TX_DONE */ | ||
341 | writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK); | ||
342 | return; | ||
343 | } | ||
344 | |||
345 | if (cp->cas_flags & CAS_FLAG_REG_PLUS) { | ||
346 | switch (ring) { | ||
347 | #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) | ||
348 | #ifdef USE_PCI_INTB | ||
349 | case 1: | ||
350 | #endif | ||
351 | #ifdef USE_PCI_INTC | ||
352 | case 2: | ||
353 | #endif | ||
354 | #ifdef USE_PCI_INTD | ||
355 | case 3: | ||
356 | #endif | ||
357 | writel(INTRN_MASK_RX_EN, cp->regs + | ||
358 | REG_PLUS_INTRN_MASK(ring)); | ||
359 | break; | ||
360 | #endif | ||
361 | default: | ||
362 | break; | ||
363 | } | ||
364 | } | ||
365 | } | ||
366 | |||
367 | static inline void cas_unmask_intr(struct cas *cp) | ||
368 | { | ||
369 | int i; | ||
370 | |||
371 | for (i = 0; i < N_RX_COMP_RINGS; i++) | ||
372 | cas_enable_irq(cp, i); | ||
373 | } | ||
374 | |||
375 | static inline void cas_entropy_gather(struct cas *cp) | ||
376 | { | ||
377 | #ifdef USE_ENTROPY_DEV | ||
378 | if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) | ||
379 | return; | ||
380 | |||
381 | batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV), | ||
382 | readl(cp->regs + REG_ENTROPY_IV), | ||
383 | sizeof(uint64_t)*8); | ||
384 | #endif | ||
385 | } | ||
386 | |||
387 | static inline void cas_entropy_reset(struct cas *cp) | ||
388 | { | ||
389 | #ifdef USE_ENTROPY_DEV | ||
390 | if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) | ||
391 | return; | ||
392 | |||
393 | writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT, | ||
394 | cp->regs + REG_BIM_LOCAL_DEV_EN); | ||
395 | writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET); | ||
396 | writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG); | ||
397 | |||
398 | /* if we read back 0x0, we don't have an entropy device */ | ||
399 | if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0) | ||
400 | cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV; | ||
401 | #endif | ||
402 | } | ||
403 | |||
404 | /* access to the phy. the following assumes that we've initialized the MIF to | ||
405 | * be in frame rather than bit-bang mode | ||
406 | */ | ||
407 | static u16 cas_phy_read(struct cas *cp, int reg) | ||
408 | { | ||
409 | u32 cmd; | ||
410 | int limit = STOP_TRIES_PHY; | ||
411 | |||
412 | cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ; | ||
413 | cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); | ||
414 | cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); | ||
415 | cmd |= MIF_FRAME_TURN_AROUND_MSB; | ||
416 | writel(cmd, cp->regs + REG_MIF_FRAME); | ||
417 | |||
418 | /* poll for completion */ | ||
419 | while (limit-- > 0) { | ||
420 | udelay(10); | ||
421 | cmd = readl(cp->regs + REG_MIF_FRAME); | ||
422 | if (cmd & MIF_FRAME_TURN_AROUND_LSB) | ||
423 | return cmd & MIF_FRAME_DATA_MASK; | ||
424 | } | ||
425 | return 0xFFFF; /* -1 */ | ||
426 | } | ||
427 | |||
428 | static int cas_phy_write(struct cas *cp, int reg, u16 val) | ||
429 | { | ||
430 | int limit = STOP_TRIES_PHY; | ||
431 | u32 cmd; | ||
432 | |||
433 | cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE; | ||
434 | cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); | ||
435 | cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); | ||
436 | cmd |= MIF_FRAME_TURN_AROUND_MSB; | ||
437 | cmd |= val & MIF_FRAME_DATA_MASK; | ||
438 | writel(cmd, cp->regs + REG_MIF_FRAME); | ||
439 | |||
440 | /* poll for completion */ | ||
441 | while (limit-- > 0) { | ||
442 | udelay(10); | ||
443 | cmd = readl(cp->regs + REG_MIF_FRAME); | ||
444 | if (cmd & MIF_FRAME_TURN_AROUND_LSB) | ||
445 | return 0; | ||
446 | } | ||
447 | return -1; | ||
448 | } | ||
449 | |||
450 | static void cas_phy_powerup(struct cas *cp) | ||
451 | { | ||
452 | u16 ctl = cas_phy_read(cp, MII_BMCR); | ||
453 | |||
454 | if ((ctl & BMCR_PDOWN) == 0) | ||
455 | return; | ||
456 | ctl &= ~BMCR_PDOWN; | ||
457 | cas_phy_write(cp, MII_BMCR, ctl); | ||
458 | } | ||
459 | |||
460 | static void cas_phy_powerdown(struct cas *cp) | ||
461 | { | ||
462 | u16 ctl = cas_phy_read(cp, MII_BMCR); | ||
463 | |||
464 | if (ctl & BMCR_PDOWN) | ||
465 | return; | ||
466 | ctl |= BMCR_PDOWN; | ||
467 | cas_phy_write(cp, MII_BMCR, ctl); | ||
468 | } | ||
469 | |||
470 | /* cp->lock held. note: the last put_page will free the buffer */ | ||
471 | static int cas_page_free(struct cas *cp, cas_page_t *page) | ||
472 | { | ||
473 | pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, | ||
474 | PCI_DMA_FROMDEVICE); | ||
475 | __free_pages(page->buffer, cp->page_order); | ||
476 | kfree(page); | ||
477 | return 0; | ||
478 | } | ||
479 | |||
480 | #ifdef RX_COUNT_BUFFERS | ||
481 | #define RX_USED_ADD(x, y) ((x)->used += (y)) | ||
482 | #define RX_USED_SET(x, y) ((x)->used = (y)) | ||
483 | #else | ||
484 | #define RX_USED_ADD(x, y) | ||
485 | #define RX_USED_SET(x, y) | ||
486 | #endif | ||
487 | |||
488 | /* local page allocation routines for the receive buffers. jumbo pages | ||
489 | * require at least 8K contiguous and 8K aligned buffers. | ||
490 | */ | ||
491 | static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) | ||
492 | { | ||
493 | cas_page_t *page; | ||
494 | |||
495 | page = kmalloc(sizeof(cas_page_t), flags); | ||
496 | if (!page) | ||
497 | return NULL; | ||
498 | |||
499 | INIT_LIST_HEAD(&page->list); | ||
500 | RX_USED_SET(page, 0); | ||
501 | page->buffer = alloc_pages(flags, cp->page_order); | ||
502 | if (!page->buffer) | ||
503 | goto page_err; | ||
504 | page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, | ||
505 | cp->page_size, PCI_DMA_FROMDEVICE); | ||
506 | return page; | ||
507 | |||
508 | page_err: | ||
509 | kfree(page); | ||
510 | return NULL; | ||
511 | } | ||
512 | |||
513 | /* initialize spare pool of rx buffers, but allocate during the open */ | ||
514 | static void cas_spare_init(struct cas *cp) | ||
515 | { | ||
516 | spin_lock(&cp->rx_inuse_lock); | ||
517 | INIT_LIST_HEAD(&cp->rx_inuse_list); | ||
518 | spin_unlock(&cp->rx_inuse_lock); | ||
519 | |||
520 | spin_lock(&cp->rx_spare_lock); | ||
521 | INIT_LIST_HEAD(&cp->rx_spare_list); | ||
522 | cp->rx_spares_needed = RX_SPARE_COUNT; | ||
523 | spin_unlock(&cp->rx_spare_lock); | ||
524 | } | ||
525 | |||
526 | /* used on close. free all the spare buffers. */ | ||
527 | static void cas_spare_free(struct cas *cp) | ||
528 | { | ||
529 | struct list_head list, *elem, *tmp; | ||
530 | |||
531 | /* free spare buffers */ | ||
532 | INIT_LIST_HEAD(&list); | ||
533 | spin_lock(&cp->rx_spare_lock); | ||
534 | list_splice_init(&cp->rx_spare_list, &list); | ||
535 | spin_unlock(&cp->rx_spare_lock); | ||
536 | list_for_each_safe(elem, tmp, &list) { | ||
537 | cas_page_free(cp, list_entry(elem, cas_page_t, list)); | ||
538 | } | ||
539 | |||
540 | INIT_LIST_HEAD(&list); | ||
541 | #if 1 | ||
542 | /* | ||
543 | * Looks like Adrian had protected this with a different | ||
544 | * lock than used everywhere else to manipulate this list. | ||
545 | */ | ||
546 | spin_lock(&cp->rx_inuse_lock); | ||
547 | list_splice_init(&cp->rx_inuse_list, &list); | ||
548 | spin_unlock(&cp->rx_inuse_lock); | ||
549 | #else | ||
550 | spin_lock(&cp->rx_spare_lock); | ||
551 | list_splice_init(&cp->rx_inuse_list, &list); | ||
552 | spin_unlock(&cp->rx_spare_lock); | ||
553 | #endif | ||
554 | list_for_each_safe(elem, tmp, &list) { | ||
555 | cas_page_free(cp, list_entry(elem, cas_page_t, list)); | ||
556 | } | ||
557 | } | ||
558 | |||
559 | /* replenish spares if needed */ | ||
560 | static void cas_spare_recover(struct cas *cp, const gfp_t flags) | ||
561 | { | ||
562 | struct list_head list, *elem, *tmp; | ||
563 | int needed, i; | ||
564 | |||
565 | /* check inuse list. if we don't need any more free buffers, | ||
566 | * just free it | ||
567 | */ | ||
568 | |||
569 | /* make a local copy of the list */ | ||
570 | INIT_LIST_HEAD(&list); | ||
571 | spin_lock(&cp->rx_inuse_lock); | ||
572 | list_splice_init(&cp->rx_inuse_list, &list); | ||
573 | spin_unlock(&cp->rx_inuse_lock); | ||
574 | |||
575 | list_for_each_safe(elem, tmp, &list) { | ||
576 | cas_page_t *page = list_entry(elem, cas_page_t, list); | ||
577 | |||
578 | /* | ||
579 | * With the lockless pagecache, cassini buffering scheme gets | ||
580 | * slightly less accurate: we might find that a page has an | ||
581 | * elevated reference count here, due to a speculative ref, | ||
582 | * and skip it as in-use. Ideally we would be able to reclaim | ||
583 | * it. However this would be such a rare case, it doesn't | ||
584 | * matter too much as we should pick it up the next time round. | ||
585 | * | ||
586 | * Importantly, if we find that the page has a refcount of 1 | ||
587 | * here (our refcount), then we know it is definitely not inuse | ||
588 | * so we can reuse it. | ||
589 | */ | ||
590 | if (page_count(page->buffer) > 1) | ||
591 | continue; | ||
592 | |||
593 | list_del(elem); | ||
594 | spin_lock(&cp->rx_spare_lock); | ||
595 | if (cp->rx_spares_needed > 0) { | ||
596 | list_add(elem, &cp->rx_spare_list); | ||
597 | cp->rx_spares_needed--; | ||
598 | spin_unlock(&cp->rx_spare_lock); | ||
599 | } else { | ||
600 | spin_unlock(&cp->rx_spare_lock); | ||
601 | cas_page_free(cp, page); | ||
602 | } | ||
603 | } | ||
604 | |||
605 | /* put any inuse buffers back on the list */ | ||
606 | if (!list_empty(&list)) { | ||
607 | spin_lock(&cp->rx_inuse_lock); | ||
608 | list_splice(&list, &cp->rx_inuse_list); | ||
609 | spin_unlock(&cp->rx_inuse_lock); | ||
610 | } | ||
611 | |||
612 | spin_lock(&cp->rx_spare_lock); | ||
613 | needed = cp->rx_spares_needed; | ||
614 | spin_unlock(&cp->rx_spare_lock); | ||
615 | if (!needed) | ||
616 | return; | ||
617 | |||
618 | /* we still need spares, so try to allocate some */ | ||
619 | INIT_LIST_HEAD(&list); | ||
620 | i = 0; | ||
621 | while (i < needed) { | ||
622 | cas_page_t *spare = cas_page_alloc(cp, flags); | ||
623 | if (!spare) | ||
624 | break; | ||
625 | list_add(&spare->list, &list); | ||
626 | i++; | ||
627 | } | ||
628 | |||
629 | spin_lock(&cp->rx_spare_lock); | ||
630 | list_splice(&list, &cp->rx_spare_list); | ||
631 | cp->rx_spares_needed -= i; | ||
632 | spin_unlock(&cp->rx_spare_lock); | ||
633 | } | ||
634 | |||
635 | /* pull a page from the list. */ | ||
636 | static cas_page_t *cas_page_dequeue(struct cas *cp) | ||
637 | { | ||
638 | struct list_head *entry; | ||
639 | int recover; | ||
640 | |||
641 | spin_lock(&cp->rx_spare_lock); | ||
642 | if (list_empty(&cp->rx_spare_list)) { | ||
643 | /* try to do a quick recovery */ | ||
644 | spin_unlock(&cp->rx_spare_lock); | ||
645 | cas_spare_recover(cp, GFP_ATOMIC); | ||
646 | spin_lock(&cp->rx_spare_lock); | ||
647 | if (list_empty(&cp->rx_spare_list)) { | ||
648 | netif_err(cp, rx_err, cp->dev, | ||
649 | "no spare buffers available\n"); | ||
650 | spin_unlock(&cp->rx_spare_lock); | ||
651 | return NULL; | ||
652 | } | ||
653 | } | ||
654 | |||
655 | entry = cp->rx_spare_list.next; | ||
656 | list_del(entry); | ||
657 | recover = ++cp->rx_spares_needed; | ||
658 | spin_unlock(&cp->rx_spare_lock); | ||
659 | |||
660 | /* trigger the timer to do the recovery */ | ||
661 | if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) { | ||
662 | #if 1 | ||
663 | atomic_inc(&cp->reset_task_pending); | ||
664 | atomic_inc(&cp->reset_task_pending_spare); | ||
665 | schedule_work(&cp->reset_task); | ||
666 | #else | ||
667 | atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE); | ||
668 | schedule_work(&cp->reset_task); | ||
669 | #endif | ||
670 | } | ||
671 | return list_entry(entry, cas_page_t, list); | ||
672 | } | ||
673 | |||
674 | |||
675 | static void cas_mif_poll(struct cas *cp, const int enable) | ||
676 | { | ||
677 | u32 cfg; | ||
678 | |||
679 | cfg = readl(cp->regs + REG_MIF_CFG); | ||
680 | cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1); | ||
681 | |||
682 | if (cp->phy_type & CAS_PHY_MII_MDIO1) | ||
683 | cfg |= MIF_CFG_PHY_SELECT; | ||
684 | |||
685 | /* poll and interrupt on link status change. */ | ||
686 | if (enable) { | ||
687 | cfg |= MIF_CFG_POLL_EN; | ||
688 | cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR); | ||
689 | cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr); | ||
690 | } | ||
691 | writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF, | ||
692 | cp->regs + REG_MIF_MASK); | ||
693 | writel(cfg, cp->regs + REG_MIF_CFG); | ||
694 | } | ||
695 | |||
696 | /* Must be invoked under cp->lock */ | ||
697 | static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep) | ||
698 | { | ||
699 | u16 ctl; | ||
700 | #if 1 | ||
701 | int lcntl; | ||
702 | int changed = 0; | ||
703 | int oldstate = cp->lstate; | ||
704 | int link_was_not_down = !(oldstate == link_down); | ||
705 | #endif | ||
706 | /* Setup link parameters */ | ||
707 | if (!ep) | ||
708 | goto start_aneg; | ||
709 | lcntl = cp->link_cntl; | ||
710 | if (ep->autoneg == AUTONEG_ENABLE) | ||
711 | cp->link_cntl = BMCR_ANENABLE; | ||
712 | else { | ||
713 | u32 speed = ethtool_cmd_speed(ep); | ||
714 | cp->link_cntl = 0; | ||
715 | if (speed == SPEED_100) | ||
716 | cp->link_cntl |= BMCR_SPEED100; | ||
717 | else if (speed == SPEED_1000) | ||
718 | cp->link_cntl |= CAS_BMCR_SPEED1000; | ||
719 | if (ep->duplex == DUPLEX_FULL) | ||
720 | cp->link_cntl |= BMCR_FULLDPLX; | ||
721 | } | ||
722 | #if 1 | ||
723 | changed = (lcntl != cp->link_cntl); | ||
724 | #endif | ||
725 | start_aneg: | ||
726 | if (cp->lstate == link_up) { | ||
727 | netdev_info(cp->dev, "PCS link down\n"); | ||
728 | } else { | ||
729 | if (changed) { | ||
730 | netdev_info(cp->dev, "link configuration changed\n"); | ||
731 | } | ||
732 | } | ||
733 | cp->lstate = link_down; | ||
734 | cp->link_transition = LINK_TRANSITION_LINK_DOWN; | ||
735 | if (!cp->hw_running) | ||
736 | return; | ||
737 | #if 1 | ||
738 | /* | ||
739 | * WTZ: If the old state was link_up, we turn off the carrier | ||
740 | * to replicate everything we do elsewhere on a link-down | ||
741 | * event when we were already in a link-up state.. | ||
742 | */ | ||
743 | if (oldstate == link_up) | ||
744 | netif_carrier_off(cp->dev); | ||
745 | if (changed && link_was_not_down) { | ||
746 | /* | ||
747 | * WTZ: This branch will simply schedule a full reset after | ||
748 | * we explicitly changed link modes in an ioctl. See if this | ||
749 | * fixes the link-problems we were having for forced mode. | ||
750 | */ | ||
751 | atomic_inc(&cp->reset_task_pending); | ||
752 | atomic_inc(&cp->reset_task_pending_all); | ||
753 | schedule_work(&cp->reset_task); | ||
754 | cp->timer_ticks = 0; | ||
755 | mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); | ||
756 | return; | ||
757 | } | ||
758 | #endif | ||
759 | if (cp->phy_type & CAS_PHY_SERDES) { | ||
760 | u32 val = readl(cp->regs + REG_PCS_MII_CTRL); | ||
761 | |||
762 | if (cp->link_cntl & BMCR_ANENABLE) { | ||
763 | val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN); | ||
764 | cp->lstate = link_aneg; | ||
765 | } else { | ||
766 | if (cp->link_cntl & BMCR_FULLDPLX) | ||
767 | val |= PCS_MII_CTRL_DUPLEX; | ||
768 | val &= ~PCS_MII_AUTONEG_EN; | ||
769 | cp->lstate = link_force_ok; | ||
770 | } | ||
771 | cp->link_transition = LINK_TRANSITION_LINK_CONFIG; | ||
772 | writel(val, cp->regs + REG_PCS_MII_CTRL); | ||
773 | |||
774 | } else { | ||
775 | cas_mif_poll(cp, 0); | ||
776 | ctl = cas_phy_read(cp, MII_BMCR); | ||
777 | ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | | ||
778 | CAS_BMCR_SPEED1000 | BMCR_ANENABLE); | ||
779 | ctl |= cp->link_cntl; | ||
780 | if (ctl & BMCR_ANENABLE) { | ||
781 | ctl |= BMCR_ANRESTART; | ||
782 | cp->lstate = link_aneg; | ||
783 | } else { | ||
784 | cp->lstate = link_force_ok; | ||
785 | } | ||
786 | cp->link_transition = LINK_TRANSITION_LINK_CONFIG; | ||
787 | cas_phy_write(cp, MII_BMCR, ctl); | ||
788 | cas_mif_poll(cp, 1); | ||
789 | } | ||
790 | |||
791 | cp->timer_ticks = 0; | ||
792 | mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); | ||
793 | } | ||
794 | |||
795 | /* Must be invoked under cp->lock. */ | ||
796 | static int cas_reset_mii_phy(struct cas *cp) | ||
797 | { | ||
798 | int limit = STOP_TRIES_PHY; | ||
799 | u16 val; | ||
800 | |||
801 | cas_phy_write(cp, MII_BMCR, BMCR_RESET); | ||
802 | udelay(100); | ||
803 | while (--limit) { | ||
804 | val = cas_phy_read(cp, MII_BMCR); | ||
805 | if ((val & BMCR_RESET) == 0) | ||
806 | break; | ||
807 | udelay(10); | ||
808 | } | ||
809 | return limit <= 0; | ||
810 | } | ||
811 | |||
812 | static int cas_saturn_firmware_init(struct cas *cp) | ||
813 | { | ||
814 | const struct firmware *fw; | ||
815 | const char fw_name[] = "sun/cassini.bin"; | ||
816 | int err; | ||
817 | |||
818 | if (PHY_NS_DP83065 != cp->phy_id) | ||
819 | return 0; | ||
820 | |||
821 | err = request_firmware(&fw, fw_name, &cp->pdev->dev); | ||
822 | if (err) { | ||
823 | pr_err("Failed to load firmware \"%s\"\n", | ||
824 | fw_name); | ||
825 | return err; | ||
826 | } | ||
827 | if (fw->size < 2) { | ||
828 | pr_err("bogus length %zu in \"%s\"\n", | ||
829 | fw->size, fw_name); | ||
830 | err = -EINVAL; | ||
831 | goto out; | ||
832 | } | ||
833 | cp->fw_load_addr= fw->data[1] << 8 | fw->data[0]; | ||
834 | cp->fw_size = fw->size - 2; | ||
835 | cp->fw_data = vmalloc(cp->fw_size); | ||
836 | if (!cp->fw_data) { | ||
837 | err = -ENOMEM; | ||
838 | pr_err("\"%s\" Failed %d\n", fw_name, err); | ||
839 | goto out; | ||
840 | } | ||
841 | memcpy(cp->fw_data, &fw->data[2], cp->fw_size); | ||
842 | out: | ||
843 | release_firmware(fw); | ||
844 | return err; | ||
845 | } | ||
846 | |||
847 | static void cas_saturn_firmware_load(struct cas *cp) | ||
848 | { | ||
849 | int i; | ||
850 | |||
851 | cas_phy_powerdown(cp); | ||
852 | |||
853 | /* expanded memory access mode */ | ||
854 | cas_phy_write(cp, DP83065_MII_MEM, 0x0); | ||
855 | |||
856 | /* pointer configuration for new firmware */ | ||
857 | cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9); | ||
858 | cas_phy_write(cp, DP83065_MII_REGD, 0xbd); | ||
859 | cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa); | ||
860 | cas_phy_write(cp, DP83065_MII_REGD, 0x82); | ||
861 | cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb); | ||
862 | cas_phy_write(cp, DP83065_MII_REGD, 0x0); | ||
863 | cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc); | ||
864 | cas_phy_write(cp, DP83065_MII_REGD, 0x39); | ||
865 | |||
866 | /* download new firmware */ | ||
867 | cas_phy_write(cp, DP83065_MII_MEM, 0x1); | ||
868 | cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr); | ||
869 | for (i = 0; i < cp->fw_size; i++) | ||
870 | cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]); | ||
871 | |||
872 | /* enable firmware */ | ||
873 | cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8); | ||
874 | cas_phy_write(cp, DP83065_MII_REGD, 0x1); | ||
875 | } | ||
876 | |||
877 | |||
878 | /* phy initialization */ | ||
879 | static void cas_phy_init(struct cas *cp) | ||
880 | { | ||
881 | u16 val; | ||
882 | |||
883 | /* if we're in MII/GMII mode, set up phy */ | ||
884 | if (CAS_PHY_MII(cp->phy_type)) { | ||
885 | writel(PCS_DATAPATH_MODE_MII, | ||
886 | cp->regs + REG_PCS_DATAPATH_MODE); | ||
887 | |||
888 | cas_mif_poll(cp, 0); | ||
889 | cas_reset_mii_phy(cp); /* take out of isolate mode */ | ||
890 | |||
891 | if (PHY_LUCENT_B0 == cp->phy_id) { | ||
892 | /* workaround link up/down issue with lucent */ | ||
893 | cas_phy_write(cp, LUCENT_MII_REG, 0x8000); | ||
894 | cas_phy_write(cp, MII_BMCR, 0x00f1); | ||
895 | cas_phy_write(cp, LUCENT_MII_REG, 0x0); | ||
896 | |||
897 | } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) { | ||
898 | /* workarounds for broadcom phy */ | ||
899 | cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20); | ||
900 | cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012); | ||
901 | cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804); | ||
902 | cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013); | ||
903 | cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204); | ||
904 | cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); | ||
905 | cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132); | ||
906 | cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); | ||
907 | cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232); | ||
908 | cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F); | ||
909 | cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20); | ||
910 | |||
911 | } else if (PHY_BROADCOM_5411 == cp->phy_id) { | ||
912 | val = cas_phy_read(cp, BROADCOM_MII_REG4); | ||
913 | val = cas_phy_read(cp, BROADCOM_MII_REG4); | ||
914 | if (val & 0x0080) { | ||
915 | /* link workaround */ | ||
916 | cas_phy_write(cp, BROADCOM_MII_REG4, | ||
917 | val & ~0x0080); | ||
918 | } | ||
919 | |||
920 | } else if (cp->cas_flags & CAS_FLAG_SATURN) { | ||
921 | writel((cp->phy_type & CAS_PHY_MII_MDIO0) ? | ||
922 | SATURN_PCFG_FSI : 0x0, | ||
923 | cp->regs + REG_SATURN_PCFG); | ||
924 | |||
925 | /* load firmware to address 10Mbps auto-negotiation | ||
926 | * issue. NOTE: this will need to be changed if the | ||
927 | * default firmware gets fixed. | ||
928 | */ | ||
929 | if (PHY_NS_DP83065 == cp->phy_id) { | ||
930 | cas_saturn_firmware_load(cp); | ||
931 | } | ||
932 | cas_phy_powerup(cp); | ||
933 | } | ||
934 | |||
935 | /* advertise capabilities */ | ||
936 | val = cas_phy_read(cp, MII_BMCR); | ||
937 | val &= ~BMCR_ANENABLE; | ||
938 | cas_phy_write(cp, MII_BMCR, val); | ||
939 | udelay(10); | ||
940 | |||
941 | cas_phy_write(cp, MII_ADVERTISE, | ||
942 | cas_phy_read(cp, MII_ADVERTISE) | | ||
943 | (ADVERTISE_10HALF | ADVERTISE_10FULL | | ||
944 | ADVERTISE_100HALF | ADVERTISE_100FULL | | ||
945 | CAS_ADVERTISE_PAUSE | | ||
946 | CAS_ADVERTISE_ASYM_PAUSE)); | ||
947 | |||
948 | if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { | ||
949 | /* make sure that we don't advertise half | ||
950 | * duplex to avoid a chip issue | ||
951 | */ | ||
952 | val = cas_phy_read(cp, CAS_MII_1000_CTRL); | ||
953 | val &= ~CAS_ADVERTISE_1000HALF; | ||
954 | val |= CAS_ADVERTISE_1000FULL; | ||
955 | cas_phy_write(cp, CAS_MII_1000_CTRL, val); | ||
956 | } | ||
957 | |||
958 | } else { | ||
959 | /* reset pcs for serdes */ | ||
960 | u32 val; | ||
961 | int limit; | ||
962 | |||
963 | writel(PCS_DATAPATH_MODE_SERDES, | ||
964 | cp->regs + REG_PCS_DATAPATH_MODE); | ||
965 | |||
966 | /* enable serdes pins on saturn */ | ||
967 | if (cp->cas_flags & CAS_FLAG_SATURN) | ||
968 | writel(0, cp->regs + REG_SATURN_PCFG); | ||
969 | |||
970 | /* Reset PCS unit. */ | ||
971 | val = readl(cp->regs + REG_PCS_MII_CTRL); | ||
972 | val |= PCS_MII_RESET; | ||
973 | writel(val, cp->regs + REG_PCS_MII_CTRL); | ||
974 | |||
975 | limit = STOP_TRIES; | ||
976 | while (--limit > 0) { | ||
977 | udelay(10); | ||
978 | if ((readl(cp->regs + REG_PCS_MII_CTRL) & | ||
979 | PCS_MII_RESET) == 0) | ||
980 | break; | ||
981 | } | ||
982 | if (limit <= 0) | ||
983 | netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n", | ||
984 | readl(cp->regs + REG_PCS_STATE_MACHINE)); | ||
985 | |||
986 | /* Make sure PCS is disabled while changing advertisement | ||
987 | * configuration. | ||
988 | */ | ||
989 | writel(0x0, cp->regs + REG_PCS_CFG); | ||
990 | |||
991 | /* Advertise all capabilities except half-duplex. */ | ||
992 | val = readl(cp->regs + REG_PCS_MII_ADVERT); | ||
993 | val &= ~PCS_MII_ADVERT_HD; | ||
994 | val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE | | ||
995 | PCS_MII_ADVERT_ASYM_PAUSE); | ||
996 | writel(val, cp->regs + REG_PCS_MII_ADVERT); | ||
997 | |||
998 | /* enable PCS */ | ||
999 | writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG); | ||
1000 | |||
1001 | /* pcs workaround: enable sync detect */ | ||
1002 | writel(PCS_SERDES_CTRL_SYNCD_EN, | ||
1003 | cp->regs + REG_PCS_SERDES_CTRL); | ||
1004 | } | ||
1005 | } | ||
1006 | |||
1007 | |||
1008 | static int cas_pcs_link_check(struct cas *cp) | ||
1009 | { | ||
1010 | u32 stat, state_machine; | ||
1011 | int retval = 0; | ||
1012 | |||
1013 | /* The link status bit latches on zero, so you must | ||
1014 | * read it twice in such a case to see a transition | ||
1015 | * to the link being up. | ||
1016 | */ | ||
1017 | stat = readl(cp->regs + REG_PCS_MII_STATUS); | ||
1018 | if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0) | ||
1019 | stat = readl(cp->regs + REG_PCS_MII_STATUS); | ||
1020 | |||
1021 | /* The remote-fault indication is only valid | ||
1022 | * when autoneg has completed. | ||
1023 | */ | ||
1024 | if ((stat & (PCS_MII_STATUS_AUTONEG_COMP | | ||
1025 | PCS_MII_STATUS_REMOTE_FAULT)) == | ||
1026 | (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) | ||
1027 | netif_info(cp, link, cp->dev, "PCS RemoteFault\n"); | ||
1028 | |||
1029 | /* work around link detection issue by querying the PCS state | ||
1030 | * machine directly. | ||
1031 | */ | ||
1032 | state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE); | ||
1033 | if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) { | ||
1034 | stat &= ~PCS_MII_STATUS_LINK_STATUS; | ||
1035 | } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) { | ||
1036 | stat |= PCS_MII_STATUS_LINK_STATUS; | ||
1037 | } | ||
1038 | |||
1039 | if (stat & PCS_MII_STATUS_LINK_STATUS) { | ||
1040 | if (cp->lstate != link_up) { | ||
1041 | if (cp->opened) { | ||
1042 | cp->lstate = link_up; | ||
1043 | cp->link_transition = LINK_TRANSITION_LINK_UP; | ||
1044 | |||
1045 | cas_set_link_modes(cp); | ||
1046 | netif_carrier_on(cp->dev); | ||
1047 | } | ||
1048 | } | ||
1049 | } else if (cp->lstate == link_up) { | ||
1050 | cp->lstate = link_down; | ||
1051 | if (link_transition_timeout != 0 && | ||
1052 | cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && | ||
1053 | !cp->link_transition_jiffies_valid) { | ||
1054 | /* | ||
1055 | * force a reset, as a workaround for the | ||
1056 | * link-failure problem. May want to move this to a | ||
1057 | * point a bit earlier in the sequence. If we had | ||
1058 | * generated a reset a short time ago, we'll wait for | ||
1059 | * the link timer to check the status until a | ||
1060 | * timer expires (link_transistion_jiffies_valid is | ||
1061 | * true when the timer is running.) Instead of using | ||
1062 | * a system timer, we just do a check whenever the | ||
1063 | * link timer is running - this clears the flag after | ||
1064 | * a suitable delay. | ||
1065 | */ | ||
1066 | retval = 1; | ||
1067 | cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; | ||
1068 | cp->link_transition_jiffies = jiffies; | ||
1069 | cp->link_transition_jiffies_valid = 1; | ||
1070 | } else { | ||
1071 | cp->link_transition = LINK_TRANSITION_ON_FAILURE; | ||
1072 | } | ||
1073 | netif_carrier_off(cp->dev); | ||
1074 | if (cp->opened) | ||
1075 | netif_info(cp, link, cp->dev, "PCS link down\n"); | ||
1076 | |||
1077 | /* Cassini only: if you force a mode, there can be | ||
1078 | * sync problems on link down. to fix that, the following | ||
1079 | * things need to be checked: | ||
1080 | * 1) read serialink state register | ||
1081 | * 2) read pcs status register to verify link down. | ||
1082 | * 3) if link down and serial link == 0x03, then you need | ||
1083 | * to global reset the chip. | ||
1084 | */ | ||
1085 | if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) { | ||
1086 | /* should check to see if we're in a forced mode */ | ||
1087 | stat = readl(cp->regs + REG_PCS_SERDES_STATE); | ||
1088 | if (stat == 0x03) | ||
1089 | return 1; | ||
1090 | } | ||
1091 | } else if (cp->lstate == link_down) { | ||
1092 | if (link_transition_timeout != 0 && | ||
1093 | cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && | ||
1094 | !cp->link_transition_jiffies_valid) { | ||
1095 | /* force a reset, as a workaround for the | ||
1096 | * link-failure problem. May want to move | ||
1097 | * this to a point a bit earlier in the | ||
1098 | * sequence. | ||
1099 | */ | ||
1100 | retval = 1; | ||
1101 | cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; | ||
1102 | cp->link_transition_jiffies = jiffies; | ||
1103 | cp->link_transition_jiffies_valid = 1; | ||
1104 | } else { | ||
1105 | cp->link_transition = LINK_TRANSITION_STILL_FAILED; | ||
1106 | } | ||
1107 | } | ||
1108 | |||
1109 | return retval; | ||
1110 | } | ||
1111 | |||
1112 | static int cas_pcs_interrupt(struct net_device *dev, | ||
1113 | struct cas *cp, u32 status) | ||
1114 | { | ||
1115 | u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS); | ||
1116 | |||
1117 | if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0) | ||
1118 | return 0; | ||
1119 | return cas_pcs_link_check(cp); | ||
1120 | } | ||
1121 | |||
1122 | static int cas_txmac_interrupt(struct net_device *dev, | ||
1123 | struct cas *cp, u32 status) | ||
1124 | { | ||
1125 | u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS); | ||
1126 | |||
1127 | if (!txmac_stat) | ||
1128 | return 0; | ||
1129 | |||
1130 | netif_printk(cp, intr, KERN_DEBUG, cp->dev, | ||
1131 | "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat); | ||
1132 | |||
1133 | /* Defer timer expiration is quite normal, | ||
1134 | * don't even log the event. | ||
1135 | */ | ||
1136 | if ((txmac_stat & MAC_TX_DEFER_TIMER) && | ||
1137 | !(txmac_stat & ~MAC_TX_DEFER_TIMER)) | ||
1138 | return 0; | ||
1139 | |||
1140 | spin_lock(&cp->stat_lock[0]); | ||
1141 | if (txmac_stat & MAC_TX_UNDERRUN) { | ||
1142 | netdev_err(dev, "TX MAC xmit underrun\n"); | ||
1143 | cp->net_stats[0].tx_fifo_errors++; | ||
1144 | } | ||
1145 | |||
1146 | if (txmac_stat & MAC_TX_MAX_PACKET_ERR) { | ||
1147 | netdev_err(dev, "TX MAC max packet size error\n"); | ||
1148 | cp->net_stats[0].tx_errors++; | ||
1149 | } | ||
1150 | |||
1151 | /* The rest are all cases of one of the 16-bit TX | ||
1152 | * counters expiring. | ||
1153 | */ | ||
1154 | if (txmac_stat & MAC_TX_COLL_NORMAL) | ||
1155 | cp->net_stats[0].collisions += 0x10000; | ||
1156 | |||
1157 | if (txmac_stat & MAC_TX_COLL_EXCESS) { | ||
1158 | cp->net_stats[0].tx_aborted_errors += 0x10000; | ||
1159 | cp->net_stats[0].collisions += 0x10000; | ||
1160 | } | ||
1161 | |||
1162 | if (txmac_stat & MAC_TX_COLL_LATE) { | ||
1163 | cp->net_stats[0].tx_aborted_errors += 0x10000; | ||
1164 | cp->net_stats[0].collisions += 0x10000; | ||
1165 | } | ||
1166 | spin_unlock(&cp->stat_lock[0]); | ||
1167 | |||
1168 | /* We do not keep track of MAC_TX_COLL_FIRST and | ||
1169 | * MAC_TX_PEAK_ATTEMPTS events. | ||
1170 | */ | ||
1171 | return 0; | ||
1172 | } | ||
1173 | |||
1174 | static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware) | ||
1175 | { | ||
1176 | cas_hp_inst_t *inst; | ||
1177 | u32 val; | ||
1178 | int i; | ||
1179 | |||
1180 | i = 0; | ||
1181 | while ((inst = firmware) && inst->note) { | ||
1182 | writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR); | ||
1183 | |||
1184 | val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val); | ||
1185 | val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask); | ||
1186 | writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI); | ||
1187 | |||
1188 | val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10); | ||
1189 | val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop); | ||
1190 | val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext); | ||
1191 | val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff); | ||
1192 | val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext); | ||
1193 | val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff); | ||
1194 | val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op); | ||
1195 | writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID); | ||
1196 | |||
1197 | val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask); | ||
1198 | val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift); | ||
1199 | val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab); | ||
1200 | val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg); | ||
1201 | writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW); | ||
1202 | ++firmware; | ||
1203 | ++i; | ||
1204 | } | ||
1205 | } | ||
1206 | |||
1207 | static void cas_init_rx_dma(struct cas *cp) | ||
1208 | { | ||
1209 | u64 desc_dma = cp->block_dvma; | ||
1210 | u32 val; | ||
1211 | int i, size; | ||
1212 | |||
1213 | /* rx free descriptors */ | ||
1214 | val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL); | ||
1215 | val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0)); | ||
1216 | val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0)); | ||
1217 | if ((N_RX_DESC_RINGS > 1) && | ||
1218 | (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */ | ||
1219 | val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1)); | ||
1220 | writel(val, cp->regs + REG_RX_CFG); | ||
1221 | |||
1222 | val = (unsigned long) cp->init_rxds[0] - | ||
1223 | (unsigned long) cp->init_block; | ||
1224 | writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI); | ||
1225 | writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW); | ||
1226 | writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); | ||
1227 | |||
1228 | if (cp->cas_flags & CAS_FLAG_REG_PLUS) { | ||
1229 | /* rx desc 2 is for IPSEC packets. however, | ||
1230 | * we don't it that for that purpose. | ||
1231 | */ | ||
1232 | val = (unsigned long) cp->init_rxds[1] - | ||
1233 | (unsigned long) cp->init_block; | ||
1234 | writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI); | ||
1235 | writel((desc_dma + val) & 0xffffffff, cp->regs + | ||
1236 | REG_PLUS_RX_DB1_LOW); | ||
1237 | writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + | ||
1238 | REG_PLUS_RX_KICK1); | ||
1239 | } | ||
1240 | |||
1241 | /* rx completion registers */ | ||
1242 | val = (unsigned long) cp->init_rxcs[0] - | ||
1243 | (unsigned long) cp->init_block; | ||
1244 | writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI); | ||
1245 | writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW); | ||
1246 | |||
1247 | if (cp->cas_flags & CAS_FLAG_REG_PLUS) { | ||
1248 | /* rx comp 2-4 */ | ||
1249 | for (i = 1; i < MAX_RX_COMP_RINGS; i++) { | ||
1250 | val = (unsigned long) cp->init_rxcs[i] - | ||
1251 | (unsigned long) cp->init_block; | ||
1252 | writel((desc_dma + val) >> 32, cp->regs + | ||
1253 | REG_PLUS_RX_CBN_HI(i)); | ||
1254 | writel((desc_dma + val) & 0xffffffff, cp->regs + | ||
1255 | REG_PLUS_RX_CBN_LOW(i)); | ||
1256 | } | ||
1257 | } | ||
1258 | |||
1259 | /* read selective clear regs to prevent spurious interrupts | ||
1260 | * on reset because complete == kick. | ||
1261 | * selective clear set up to prevent interrupts on resets | ||
1262 | */ | ||
1263 | readl(cp->regs + REG_INTR_STATUS_ALIAS); | ||
1264 | writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR); | ||
1265 | if (cp->cas_flags & CAS_FLAG_REG_PLUS) { | ||
1266 | for (i = 1; i < N_RX_COMP_RINGS; i++) | ||
1267 | readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i)); | ||
1268 | |||
1269 | /* 2 is different from 3 and 4 */ | ||
1270 | if (N_RX_COMP_RINGS > 1) | ||
1271 | writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1, | ||
1272 | cp->regs + REG_PLUS_ALIASN_CLEAR(1)); | ||
1273 | |||
1274 | for (i = 2; i < N_RX_COMP_RINGS; i++) | ||
1275 | writel(INTR_RX_DONE_ALT, | ||
1276 | cp->regs + REG_PLUS_ALIASN_CLEAR(i)); | ||
1277 | } | ||
1278 | |||
1279 | /* set up pause thresholds */ | ||
1280 | val = CAS_BASE(RX_PAUSE_THRESH_OFF, | ||
1281 | cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM); | ||
1282 | val |= CAS_BASE(RX_PAUSE_THRESH_ON, | ||
1283 | cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM); | ||
1284 | writel(val, cp->regs + REG_RX_PAUSE_THRESH); | ||
1285 | |||
1286 | /* zero out dma reassembly buffers */ | ||
1287 | for (i = 0; i < 64; i++) { | ||
1288 | writel(i, cp->regs + REG_RX_TABLE_ADDR); | ||
1289 | writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW); | ||
1290 | writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID); | ||
1291 | writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI); | ||
1292 | } | ||
1293 | |||
1294 | /* make sure address register is 0 for normal operation */ | ||
1295 | writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR); | ||
1296 | writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR); | ||
1297 | |||
1298 | /* interrupt mitigation */ | ||
1299 | #ifdef USE_RX_BLANK | ||
1300 | val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL); | ||
1301 | val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL); | ||
1302 | writel(val, cp->regs + REG_RX_BLANK); | ||
1303 | #else | ||
1304 | writel(0x0, cp->regs + REG_RX_BLANK); | ||
1305 | #endif | ||
1306 | |||
1307 | /* interrupt generation as a function of low water marks for | ||
1308 | * free desc and completion entries. these are used to trigger | ||
1309 | * housekeeping for rx descs. we don't use the free interrupt | ||
1310 | * as it's not very useful | ||
1311 | */ | ||
1312 | /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */ | ||
1313 | val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL); | ||
1314 | writel(val, cp->regs + REG_RX_AE_THRESH); | ||
1315 | if (cp->cas_flags & CAS_FLAG_REG_PLUS) { | ||
1316 | val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1)); | ||
1317 | writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH); | ||
1318 | } | ||
1319 | |||
1320 | /* Random early detect registers. useful for congestion avoidance. | ||
1321 | * this should be tunable. | ||
1322 | */ | ||
1323 | writel(0x0, cp->regs + REG_RX_RED); | ||
1324 | |||
1325 | /* receive page sizes. default == 2K (0x800) */ | ||
1326 | val = 0; | ||
1327 | if (cp->page_size == 0x1000) | ||
1328 | val = 0x1; | ||
1329 | else if (cp->page_size == 0x2000) | ||
1330 | val = 0x2; | ||
1331 | else if (cp->page_size == 0x4000) | ||
1332 | val = 0x3; | ||
1333 | |||
1334 | /* round mtu + offset. constrain to page size. */ | ||
1335 | size = cp->dev->mtu + 64; | ||
1336 | if (size > cp->page_size) | ||
1337 | size = cp->page_size; | ||
1338 | |||
1339 | if (size <= 0x400) | ||
1340 | i = 0x0; | ||
1341 | else if (size <= 0x800) | ||
1342 | i = 0x1; | ||
1343 | else if (size <= 0x1000) | ||
1344 | i = 0x2; | ||
1345 | else | ||
1346 | i = 0x3; | ||
1347 | |||
1348 | cp->mtu_stride = 1 << (i + 10); | ||
1349 | val = CAS_BASE(RX_PAGE_SIZE, val); | ||
1350 | val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i); | ||
1351 | val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10)); | ||
1352 | val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1); | ||
1353 | writel(val, cp->regs + REG_RX_PAGE_SIZE); | ||
1354 | |||
1355 | /* enable the header parser if desired */ | ||
1356 | if (CAS_HP_FIRMWARE == cas_prog_null) | ||
1357 | return; | ||
1358 | |||
1359 | val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS); | ||
1360 | val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK; | ||
1361 | val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL); | ||
1362 | writel(val, cp->regs + REG_HP_CFG); | ||
1363 | } | ||
1364 | |||
1365 | static inline void cas_rxc_init(struct cas_rx_comp *rxc) | ||
1366 | { | ||
1367 | memset(rxc, 0, sizeof(*rxc)); | ||
1368 | rxc->word4 = cpu_to_le64(RX_COMP4_ZERO); | ||
1369 | } | ||
1370 | |||
1371 | /* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1] | ||
1372 | * flipping is protected by the fact that the chip will not | ||
1373 | * hand back the same page index while it's being processed. | ||
1374 | */ | ||
1375 | static inline cas_page_t *cas_page_spare(struct cas *cp, const int index) | ||
1376 | { | ||
1377 | cas_page_t *page = cp->rx_pages[1][index]; | ||
1378 | cas_page_t *new; | ||
1379 | |||
1380 | if (page_count(page->buffer) == 1) | ||
1381 | return page; | ||
1382 | |||
1383 | new = cas_page_dequeue(cp); | ||
1384 | if (new) { | ||
1385 | spin_lock(&cp->rx_inuse_lock); | ||
1386 | list_add(&page->list, &cp->rx_inuse_list); | ||
1387 | spin_unlock(&cp->rx_inuse_lock); | ||
1388 | } | ||
1389 | return new; | ||
1390 | } | ||
1391 | |||
1392 | /* this needs to be changed if we actually use the ENC RX DESC ring */ | ||
1393 | static cas_page_t *cas_page_swap(struct cas *cp, const int ring, | ||
1394 | const int index) | ||
1395 | { | ||
1396 | cas_page_t **page0 = cp->rx_pages[0]; | ||
1397 | cas_page_t **page1 = cp->rx_pages[1]; | ||
1398 | |||
1399 | /* swap if buffer is in use */ | ||
1400 | if (page_count(page0[index]->buffer) > 1) { | ||
1401 | cas_page_t *new = cas_page_spare(cp, index); | ||
1402 | if (new) { | ||
1403 | page1[index] = page0[index]; | ||
1404 | page0[index] = new; | ||
1405 | } | ||
1406 | } | ||
1407 | RX_USED_SET(page0[index], 0); | ||
1408 | return page0[index]; | ||
1409 | } | ||
1410 | |||
1411 | static void cas_clean_rxds(struct cas *cp) | ||
1412 | { | ||
1413 | /* only clean ring 0 as ring 1 is used for spare buffers */ | ||
1414 | struct cas_rx_desc *rxd = cp->init_rxds[0]; | ||
1415 | int i, size; | ||
1416 | |||
1417 | /* release all rx flows */ | ||
1418 | for (i = 0; i < N_RX_FLOWS; i++) { | ||
1419 | struct sk_buff *skb; | ||
1420 | while ((skb = __skb_dequeue(&cp->rx_flows[i]))) { | ||
1421 | cas_skb_release(skb); | ||
1422 | } | ||
1423 | } | ||
1424 | |||
1425 | /* initialize descriptors */ | ||
1426 | size = RX_DESC_RINGN_SIZE(0); | ||
1427 | for (i = 0; i < size; i++) { | ||
1428 | cas_page_t *page = cas_page_swap(cp, 0, i); | ||
1429 | rxd[i].buffer = cpu_to_le64(page->dma_addr); | ||
1430 | rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) | | ||
1431 | CAS_BASE(RX_INDEX_RING, 0)); | ||
1432 | } | ||
1433 | |||
1434 | cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4; | ||
1435 | cp->rx_last[0] = 0; | ||
1436 | cp->cas_flags &= ~CAS_FLAG_RXD_POST(0); | ||
1437 | } | ||
1438 | |||
1439 | static void cas_clean_rxcs(struct cas *cp) | ||
1440 | { | ||
1441 | int i, j; | ||
1442 | |||
1443 | /* take ownership of rx comp descriptors */ | ||
1444 | memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS); | ||
1445 | memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS); | ||
1446 | for (i = 0; i < N_RX_COMP_RINGS; i++) { | ||
1447 | struct cas_rx_comp *rxc = cp->init_rxcs[i]; | ||
1448 | for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) { | ||
1449 | cas_rxc_init(rxc + j); | ||
1450 | } | ||
1451 | } | ||
1452 | } | ||
1453 | |||
1454 | #if 0 | ||
1455 | /* When we get a RX fifo overflow, the RX unit is probably hung | ||
1456 | * so we do the following. | ||
1457 | * | ||
1458 | * If any part of the reset goes wrong, we return 1 and that causes the | ||
1459 | * whole chip to be reset. | ||
1460 | */ | ||
1461 | static int cas_rxmac_reset(struct cas *cp) | ||
1462 | { | ||
1463 | struct net_device *dev = cp->dev; | ||
1464 | int limit; | ||
1465 | u32 val; | ||
1466 | |||
1467 | /* First, reset MAC RX. */ | ||
1468 | writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); | ||
1469 | for (limit = 0; limit < STOP_TRIES; limit++) { | ||
1470 | if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN)) | ||
1471 | break; | ||
1472 | udelay(10); | ||
1473 | } | ||
1474 | if (limit == STOP_TRIES) { | ||
1475 | netdev_err(dev, "RX MAC will not disable, resetting whole chip\n"); | ||
1476 | return 1; | ||
1477 | } | ||
1478 | |||
1479 | /* Second, disable RX DMA. */ | ||
1480 | writel(0, cp->regs + REG_RX_CFG); | ||
1481 | for (limit = 0; limit < STOP_TRIES; limit++) { | ||
1482 | if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN)) | ||
1483 | break; | ||
1484 | udelay(10); | ||
1485 | } | ||
1486 | if (limit == STOP_TRIES) { | ||
1487 | netdev_err(dev, "RX DMA will not disable, resetting whole chip\n"); | ||
1488 | return 1; | ||
1489 | } | ||
1490 | |||
1491 | mdelay(5); | ||
1492 | |||
1493 | /* Execute RX reset command. */ | ||
1494 | writel(SW_RESET_RX, cp->regs + REG_SW_RESET); | ||
1495 | for (limit = 0; limit < STOP_TRIES; limit++) { | ||
1496 | if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX)) | ||
1497 | break; | ||
1498 | udelay(10); | ||
1499 | } | ||
1500 | if (limit == STOP_TRIES) { | ||
1501 | netdev_err(dev, "RX reset command will not execute, resetting whole chip\n"); | ||
1502 | return 1; | ||
1503 | } | ||
1504 | |||
1505 | /* reset driver rx state */ | ||
1506 | cas_clean_rxds(cp); | ||
1507 | cas_clean_rxcs(cp); | ||
1508 | |||
1509 | /* Now, reprogram the rest of RX unit. */ | ||
1510 | cas_init_rx_dma(cp); | ||
1511 | |||
1512 | /* re-enable */ | ||
1513 | val = readl(cp->regs + REG_RX_CFG); | ||
1514 | writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG); | ||
1515 | writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); | ||
1516 | val = readl(cp->regs + REG_MAC_RX_CFG); | ||
1517 | writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); | ||
1518 | return 0; | ||
1519 | } | ||
1520 | #endif | ||
1521 | |||
1522 | static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp, | ||
1523 | u32 status) | ||
1524 | { | ||
1525 | u32 stat = readl(cp->regs + REG_MAC_RX_STATUS); | ||
1526 | |||
1527 | if (!stat) | ||
1528 | return 0; | ||
1529 | |||
1530 | netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat); | ||
1531 | |||
1532 | /* these are all rollovers */ | ||
1533 | spin_lock(&cp->stat_lock[0]); | ||
1534 | if (stat & MAC_RX_ALIGN_ERR) | ||
1535 | cp->net_stats[0].rx_frame_errors += 0x10000; | ||
1536 | |||
1537 | if (stat & MAC_RX_CRC_ERR) | ||
1538 | cp->net_stats[0].rx_crc_errors += 0x10000; | ||
1539 | |||
1540 | if (stat & MAC_RX_LEN_ERR) | ||
1541 | cp->net_stats[0].rx_length_errors += 0x10000; | ||
1542 | |||
1543 | if (stat & MAC_RX_OVERFLOW) { | ||
1544 | cp->net_stats[0].rx_over_errors++; | ||
1545 | cp->net_stats[0].rx_fifo_errors++; | ||
1546 | } | ||
1547 | |||
1548 | /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR | ||
1549 | * events. | ||
1550 | */ | ||
1551 | spin_unlock(&cp->stat_lock[0]); | ||
1552 | return 0; | ||
1553 | } | ||
1554 | |||
1555 | static int cas_mac_interrupt(struct net_device *dev, struct cas *cp, | ||
1556 | u32 status) | ||
1557 | { | ||
1558 | u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS); | ||
1559 | |||
1560 | if (!stat) | ||
1561 | return 0; | ||
1562 | |||
1563 | netif_printk(cp, intr, KERN_DEBUG, cp->dev, | ||
1564 | "mac interrupt, stat: 0x%x\n", stat); | ||
1565 | |||
1566 | /* This interrupt is just for pause frame and pause | ||
1567 | * tracking. It is useful for diagnostics and debug | ||
1568 | * but probably by default we will mask these events. | ||
1569 | */ | ||
1570 | if (stat & MAC_CTRL_PAUSE_STATE) | ||
1571 | cp->pause_entered++; | ||
1572 | |||
1573 | if (stat & MAC_CTRL_PAUSE_RECEIVED) | ||
1574 | cp->pause_last_time_recvd = (stat >> 16); | ||
1575 | |||
1576 | return 0; | ||
1577 | } | ||
1578 | |||
1579 | |||
1580 | /* Must be invoked under cp->lock. */ | ||
1581 | static inline int cas_mdio_link_not_up(struct cas *cp) | ||
1582 | { | ||
1583 | u16 val; | ||
1584 | |||
1585 | switch (cp->lstate) { | ||
1586 | case link_force_ret: | ||
1587 | netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n"); | ||
1588 | cas_phy_write(cp, MII_BMCR, cp->link_fcntl); | ||
1589 | cp->timer_ticks = 5; | ||
1590 | cp->lstate = link_force_ok; | ||
1591 | cp->link_transition = LINK_TRANSITION_LINK_CONFIG; | ||
1592 | break; | ||
1593 | |||
1594 | case link_aneg: | ||
1595 | val = cas_phy_read(cp, MII_BMCR); | ||
1596 | |||
1597 | /* Try forced modes. we try things in the following order: | ||
1598 | * 1000 full -> 100 full/half -> 10 half | ||
1599 | */ | ||
1600 | val &= ~(BMCR_ANRESTART | BMCR_ANENABLE); | ||
1601 | val |= BMCR_FULLDPLX; | ||
1602 | val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? | ||
1603 | CAS_BMCR_SPEED1000 : BMCR_SPEED100; | ||
1604 | cas_phy_write(cp, MII_BMCR, val); | ||
1605 | cp->timer_ticks = 5; | ||
1606 | cp->lstate = link_force_try; | ||
1607 | cp->link_transition = LINK_TRANSITION_LINK_CONFIG; | ||
1608 | break; | ||
1609 | |||
1610 | case link_force_try: | ||
1611 | /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */ | ||
1612 | val = cas_phy_read(cp, MII_BMCR); | ||
1613 | cp->timer_ticks = 5; | ||
1614 | if (val & CAS_BMCR_SPEED1000) { /* gigabit */ | ||
1615 | val &= ~CAS_BMCR_SPEED1000; | ||
1616 | val |= (BMCR_SPEED100 | BMCR_FULLDPLX); | ||
1617 | cas_phy_write(cp, MII_BMCR, val); | ||
1618 | break; | ||
1619 | } | ||
1620 | |||
1621 | if (val & BMCR_SPEED100) { | ||
1622 | if (val & BMCR_FULLDPLX) /* fd failed */ | ||
1623 | val &= ~BMCR_FULLDPLX; | ||
1624 | else { /* 100Mbps failed */ | ||
1625 | val &= ~BMCR_SPEED100; | ||
1626 | } | ||
1627 | cas_phy_write(cp, MII_BMCR, val); | ||
1628 | break; | ||
1629 | } | ||
1630 | default: | ||
1631 | break; | ||
1632 | } | ||
1633 | return 0; | ||
1634 | } | ||
1635 | |||
1636 | |||
1637 | /* must be invoked with cp->lock held */ | ||
1638 | static int cas_mii_link_check(struct cas *cp, const u16 bmsr) | ||
1639 | { | ||
1640 | int restart; | ||
1641 | |||
1642 | if (bmsr & BMSR_LSTATUS) { | ||
1643 | /* Ok, here we got a link. If we had it due to a forced | ||
1644 | * fallback, and we were configured for autoneg, we | ||
1645 | * retry a short autoneg pass. If you know your hub is | ||
1646 | * broken, use ethtool ;) | ||
1647 | */ | ||
1648 | if ((cp->lstate == link_force_try) && | ||
1649 | (cp->link_cntl & BMCR_ANENABLE)) { | ||
1650 | cp->lstate = link_force_ret; | ||
1651 | cp->link_transition = LINK_TRANSITION_LINK_CONFIG; | ||
1652 | cas_mif_poll(cp, 0); | ||
1653 | cp->link_fcntl = cas_phy_read(cp, MII_BMCR); | ||
1654 | cp->timer_ticks = 5; | ||
1655 | if (cp->opened) | ||
1656 | netif_info(cp, link, cp->dev, | ||
1657 | "Got link after fallback, retrying autoneg once...\n"); | ||
1658 | cas_phy_write(cp, MII_BMCR, | ||
1659 | cp->link_fcntl | BMCR_ANENABLE | | ||
1660 | BMCR_ANRESTART); | ||
1661 | cas_mif_poll(cp, 1); | ||
1662 | |||
1663 | } else if (cp->lstate != link_up) { | ||
1664 | cp->lstate = link_up; | ||
1665 | cp->link_transition = LINK_TRANSITION_LINK_UP; | ||
1666 | |||
1667 | if (cp->opened) { | ||
1668 | cas_set_link_modes(cp); | ||
1669 | netif_carrier_on(cp->dev); | ||
1670 | } | ||
1671 | } | ||
1672 | return 0; | ||
1673 | } | ||
1674 | |||
1675 | /* link not up. if the link was previously up, we restart the | ||
1676 | * whole process | ||
1677 | */ | ||
1678 | restart = 0; | ||
1679 | if (cp->lstate == link_up) { | ||
1680 | cp->lstate = link_down; | ||
1681 | cp->link_transition = LINK_TRANSITION_LINK_DOWN; | ||
1682 | |||
1683 | netif_carrier_off(cp->dev); | ||
1684 | if (cp->opened) | ||
1685 | netif_info(cp, link, cp->dev, "Link down\n"); | ||
1686 | restart = 1; | ||
1687 | |||
1688 | } else if (++cp->timer_ticks > 10) | ||
1689 | cas_mdio_link_not_up(cp); | ||
1690 | |||
1691 | return restart; | ||
1692 | } | ||
1693 | |||
1694 | static int cas_mif_interrupt(struct net_device *dev, struct cas *cp, | ||
1695 | u32 status) | ||
1696 | { | ||
1697 | u32 stat = readl(cp->regs + REG_MIF_STATUS); | ||
1698 | u16 bmsr; | ||
1699 | |||
1700 | /* check for a link change */ | ||
1701 | if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0) | ||
1702 | return 0; | ||
1703 | |||
1704 | bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat); | ||
1705 | return cas_mii_link_check(cp, bmsr); | ||
1706 | } | ||
1707 | |||
1708 | static int cas_pci_interrupt(struct net_device *dev, struct cas *cp, | ||
1709 | u32 status) | ||
1710 | { | ||
1711 | u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS); | ||
1712 | |||
1713 | if (!stat) | ||
1714 | return 0; | ||
1715 | |||
1716 | netdev_err(dev, "PCI error [%04x:%04x]", | ||
1717 | stat, readl(cp->regs + REG_BIM_DIAG)); | ||
1718 | |||
1719 | /* cassini+ has this reserved */ | ||
1720 | if ((stat & PCI_ERR_BADACK) && | ||
1721 | ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0)) | ||
1722 | pr_cont(" <No ACK64# during ABS64 cycle>"); | ||
1723 | |||
1724 | if (stat & PCI_ERR_DTRTO) | ||
1725 | pr_cont(" <Delayed transaction timeout>"); | ||
1726 | if (stat & PCI_ERR_OTHER) | ||
1727 | pr_cont(" <other>"); | ||
1728 | if (stat & PCI_ERR_BIM_DMA_WRITE) | ||
1729 | pr_cont(" <BIM DMA 0 write req>"); | ||
1730 | if (stat & PCI_ERR_BIM_DMA_READ) | ||
1731 | pr_cont(" <BIM DMA 0 read req>"); | ||
1732 | pr_cont("\n"); | ||
1733 | |||
1734 | if (stat & PCI_ERR_OTHER) { | ||
1735 | u16 cfg; | ||
1736 | |||
1737 | /* Interrogate PCI config space for the | ||
1738 | * true cause. | ||
1739 | */ | ||
1740 | pci_read_config_word(cp->pdev, PCI_STATUS, &cfg); | ||
1741 | netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg); | ||
1742 | if (cfg & PCI_STATUS_PARITY) | ||
1743 | netdev_err(dev, "PCI parity error detected\n"); | ||
1744 | if (cfg & PCI_STATUS_SIG_TARGET_ABORT) | ||
1745 | netdev_err(dev, "PCI target abort\n"); | ||
1746 | if (cfg & PCI_STATUS_REC_TARGET_ABORT) | ||
1747 | netdev_err(dev, "PCI master acks target abort\n"); | ||
1748 | if (cfg & PCI_STATUS_REC_MASTER_ABORT) | ||
1749 | netdev_err(dev, "PCI master abort\n"); | ||
1750 | if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR) | ||
1751 | netdev_err(dev, "PCI system error SERR#\n"); | ||
1752 | if (cfg & PCI_STATUS_DETECTED_PARITY) | ||
1753 | netdev_err(dev, "PCI parity error\n"); | ||
1754 | |||
1755 | /* Write the error bits back to clear them. */ | ||
1756 | cfg &= (PCI_STATUS_PARITY | | ||
1757 | PCI_STATUS_SIG_TARGET_ABORT | | ||
1758 | PCI_STATUS_REC_TARGET_ABORT | | ||
1759 | PCI_STATUS_REC_MASTER_ABORT | | ||
1760 | PCI_STATUS_SIG_SYSTEM_ERROR | | ||
1761 | PCI_STATUS_DETECTED_PARITY); | ||
1762 | pci_write_config_word(cp->pdev, PCI_STATUS, cfg); | ||
1763 | } | ||
1764 | |||
1765 | /* For all PCI errors, we should reset the chip. */ | ||
1766 | return 1; | ||
1767 | } | ||
1768 | |||
1769 | /* All non-normal interrupt conditions get serviced here. | ||
1770 | * Returns non-zero if we should just exit the interrupt | ||
1771 | * handler right now (ie. if we reset the card which invalidates | ||
1772 | * all of the other original irq status bits). | ||
1773 | */ | ||
1774 | static int cas_abnormal_irq(struct net_device *dev, struct cas *cp, | ||
1775 | u32 status) | ||
1776 | { | ||
1777 | if (status & INTR_RX_TAG_ERROR) { | ||
1778 | /* corrupt RX tag framing */ | ||
1779 | netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, | ||
1780 | "corrupt rx tag framing\n"); | ||
1781 | spin_lock(&cp->stat_lock[0]); | ||
1782 | cp->net_stats[0].rx_errors++; | ||
1783 | spin_unlock(&cp->stat_lock[0]); | ||
1784 | goto do_reset; | ||
1785 | } | ||
1786 | |||
1787 | if (status & INTR_RX_LEN_MISMATCH) { | ||
1788 | /* length mismatch. */ | ||
1789 | netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, | ||
1790 | "length mismatch for rx frame\n"); | ||
1791 | spin_lock(&cp->stat_lock[0]); | ||
1792 | cp->net_stats[0].rx_errors++; | ||
1793 | spin_unlock(&cp->stat_lock[0]); | ||
1794 | goto do_reset; | ||
1795 | } | ||
1796 | |||
1797 | if (status & INTR_PCS_STATUS) { | ||
1798 | if (cas_pcs_interrupt(dev, cp, status)) | ||
1799 | goto do_reset; | ||
1800 | } | ||
1801 | |||
1802 | if (status & INTR_TX_MAC_STATUS) { | ||
1803 | if (cas_txmac_interrupt(dev, cp, status)) | ||
1804 | goto do_reset; | ||
1805 | } | ||
1806 | |||
1807 | if (status & INTR_RX_MAC_STATUS) { | ||
1808 | if (cas_rxmac_interrupt(dev, cp, status)) | ||
1809 | goto do_reset; | ||
1810 | } | ||
1811 | |||
1812 | if (status & INTR_MAC_CTRL_STATUS) { | ||
1813 | if (cas_mac_interrupt(dev, cp, status)) | ||
1814 | goto do_reset; | ||
1815 | } | ||
1816 | |||
1817 | if (status & INTR_MIF_STATUS) { | ||
1818 | if (cas_mif_interrupt(dev, cp, status)) | ||
1819 | goto do_reset; | ||
1820 | } | ||
1821 | |||
1822 | if (status & INTR_PCI_ERROR_STATUS) { | ||
1823 | if (cas_pci_interrupt(dev, cp, status)) | ||
1824 | goto do_reset; | ||
1825 | } | ||
1826 | return 0; | ||
1827 | |||
1828 | do_reset: | ||
1829 | #if 1 | ||
1830 | atomic_inc(&cp->reset_task_pending); | ||
1831 | atomic_inc(&cp->reset_task_pending_all); | ||
1832 | netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status); | ||
1833 | schedule_work(&cp->reset_task); | ||
1834 | #else | ||
1835 | atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); | ||
1836 | netdev_err(dev, "reset called in cas_abnormal_irq\n"); | ||
1837 | schedule_work(&cp->reset_task); | ||
1838 | #endif | ||
1839 | return 1; | ||
1840 | } | ||
1841 | |||
1842 | /* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when | ||
1843 | * determining whether to do a netif_stop/wakeup | ||
1844 | */ | ||
1845 | #define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1) | ||
1846 | #define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK) | ||
1847 | static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr, | ||
1848 | const int len) | ||
1849 | { | ||
1850 | unsigned long off = addr + len; | ||
1851 | |||
1852 | if (CAS_TABORT(cp) == 1) | ||
1853 | return 0; | ||
1854 | if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN) | ||
1855 | return 0; | ||
1856 | return TX_TARGET_ABORT_LEN; | ||
1857 | } | ||
1858 | |||
1859 | static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) | ||
1860 | { | ||
1861 | struct cas_tx_desc *txds; | ||
1862 | struct sk_buff **skbs; | ||
1863 | struct net_device *dev = cp->dev; | ||
1864 | int entry, count; | ||
1865 | |||
1866 | spin_lock(&cp->tx_lock[ring]); | ||
1867 | txds = cp->init_txds[ring]; | ||
1868 | skbs = cp->tx_skbs[ring]; | ||
1869 | entry = cp->tx_old[ring]; | ||
1870 | |||
1871 | count = TX_BUFF_COUNT(ring, entry, limit); | ||
1872 | while (entry != limit) { | ||
1873 | struct sk_buff *skb = skbs[entry]; | ||
1874 | dma_addr_t daddr; | ||
1875 | u32 dlen; | ||
1876 | int frag; | ||
1877 | |||
1878 | if (!skb) { | ||
1879 | /* this should never occur */ | ||
1880 | entry = TX_DESC_NEXT(ring, entry); | ||
1881 | continue; | ||
1882 | } | ||
1883 | |||
1884 | /* however, we might get only a partial skb release. */ | ||
1885 | count -= skb_shinfo(skb)->nr_frags + | ||
1886 | + cp->tx_tiny_use[ring][entry].nbufs + 1; | ||
1887 | if (count < 0) | ||
1888 | break; | ||
1889 | |||
1890 | netif_printk(cp, tx_done, KERN_DEBUG, cp->dev, | ||
1891 | "tx[%d] done, slot %d\n", ring, entry); | ||
1892 | |||
1893 | skbs[entry] = NULL; | ||
1894 | cp->tx_tiny_use[ring][entry].nbufs = 0; | ||
1895 | |||
1896 | for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { | ||
1897 | struct cas_tx_desc *txd = txds + entry; | ||
1898 | |||
1899 | daddr = le64_to_cpu(txd->buffer); | ||
1900 | dlen = CAS_VAL(TX_DESC_BUFLEN, | ||
1901 | le64_to_cpu(txd->control)); | ||
1902 | pci_unmap_page(cp->pdev, daddr, dlen, | ||
1903 | PCI_DMA_TODEVICE); | ||
1904 | entry = TX_DESC_NEXT(ring, entry); | ||
1905 | |||
1906 | /* tiny buffer may follow */ | ||
1907 | if (cp->tx_tiny_use[ring][entry].used) { | ||
1908 | cp->tx_tiny_use[ring][entry].used = 0; | ||
1909 | entry = TX_DESC_NEXT(ring, entry); | ||
1910 | } | ||
1911 | } | ||
1912 | |||
1913 | spin_lock(&cp->stat_lock[ring]); | ||
1914 | cp->net_stats[ring].tx_packets++; | ||
1915 | cp->net_stats[ring].tx_bytes += skb->len; | ||
1916 | spin_unlock(&cp->stat_lock[ring]); | ||
1917 | dev_kfree_skb_irq(skb); | ||
1918 | } | ||
1919 | cp->tx_old[ring] = entry; | ||
1920 | |||
1921 | /* this is wrong for multiple tx rings. the net device needs | ||
1922 | * multiple queues for this to do the right thing. we wait | ||
1923 | * for 2*packets to be available when using tiny buffers | ||
1924 | */ | ||
1925 | if (netif_queue_stopped(dev) && | ||
1926 | (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))) | ||
1927 | netif_wake_queue(dev); | ||
1928 | spin_unlock(&cp->tx_lock[ring]); | ||
1929 | } | ||
1930 | |||
1931 | static void cas_tx(struct net_device *dev, struct cas *cp, | ||
1932 | u32 status) | ||
1933 | { | ||
1934 | int limit, ring; | ||
1935 | #ifdef USE_TX_COMPWB | ||
1936 | u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); | ||
1937 | #endif | ||
1938 | netif_printk(cp, intr, KERN_DEBUG, cp->dev, | ||
1939 | "tx interrupt, status: 0x%x, %llx\n", | ||
1940 | status, (unsigned long long)compwb); | ||
1941 | /* process all the rings */ | ||
1942 | for (ring = 0; ring < N_TX_RINGS; ring++) { | ||
1943 | #ifdef USE_TX_COMPWB | ||
1944 | /* use the completion writeback registers */ | ||
1945 | limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) | | ||
1946 | CAS_VAL(TX_COMPWB_LSB, compwb); | ||
1947 | compwb = TX_COMPWB_NEXT(compwb); | ||
1948 | #else | ||
1949 | limit = readl(cp->regs + REG_TX_COMPN(ring)); | ||
1950 | #endif | ||
1951 | if (cp->tx_old[ring] != limit) | ||
1952 | cas_tx_ringN(cp, ring, limit); | ||
1953 | } | ||
1954 | } | ||
1955 | |||
1956 | |||
1957 | static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, | ||
1958 | int entry, const u64 *words, | ||
1959 | struct sk_buff **skbref) | ||
1960 | { | ||
1961 | int dlen, hlen, len, i, alloclen; | ||
1962 | int off, swivel = RX_SWIVEL_OFF_VAL; | ||
1963 | struct cas_page *page; | ||
1964 | struct sk_buff *skb; | ||
1965 | void *addr, *crcaddr; | ||
1966 | __sum16 csum; | ||
1967 | char *p; | ||
1968 | |||
1969 | hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]); | ||
1970 | dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]); | ||
1971 | len = hlen + dlen; | ||
1972 | |||
1973 | if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT)) | ||
1974 | alloclen = len; | ||
1975 | else | ||
1976 | alloclen = max(hlen, RX_COPY_MIN); | ||
1977 | |||
1978 | skb = dev_alloc_skb(alloclen + swivel + cp->crc_size); | ||
1979 | if (skb == NULL) | ||
1980 | return -1; | ||
1981 | |||
1982 | *skbref = skb; | ||
1983 | skb_reserve(skb, swivel); | ||
1984 | |||
1985 | p = skb->data; | ||
1986 | addr = crcaddr = NULL; | ||
1987 | if (hlen) { /* always copy header pages */ | ||
1988 | i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); | ||
1989 | page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; | ||
1990 | off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 + | ||
1991 | swivel; | ||
1992 | |||
1993 | i = hlen; | ||
1994 | if (!dlen) /* attach FCS */ | ||
1995 | i += cp->crc_size; | ||
1996 | pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, | ||
1997 | PCI_DMA_FROMDEVICE); | ||
1998 | addr = cas_page_map(page->buffer); | ||
1999 | memcpy(p, addr + off, i); | ||
2000 | pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, | ||
2001 | PCI_DMA_FROMDEVICE); | ||
2002 | cas_page_unmap(addr); | ||
2003 | RX_USED_ADD(page, 0x100); | ||
2004 | p += hlen; | ||
2005 | swivel = 0; | ||
2006 | } | ||
2007 | |||
2008 | |||
2009 | if (alloclen < (hlen + dlen)) { | ||
2010 | skb_frag_t *frag = skb_shinfo(skb)->frags; | ||
2011 | |||
2012 | /* normal or jumbo packets. we use frags */ | ||
2013 | i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); | ||
2014 | page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; | ||
2015 | off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; | ||
2016 | |||
2017 | hlen = min(cp->page_size - off, dlen); | ||
2018 | if (hlen < 0) { | ||
2019 | netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, | ||
2020 | "rx page overflow: %d\n", hlen); | ||
2021 | dev_kfree_skb_irq(skb); | ||
2022 | return -1; | ||
2023 | } | ||
2024 | i = hlen; | ||
2025 | if (i == dlen) /* attach FCS */ | ||
2026 | i += cp->crc_size; | ||
2027 | pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, | ||
2028 | PCI_DMA_FROMDEVICE); | ||
2029 | |||
2030 | /* make sure we always copy a header */ | ||
2031 | swivel = 0; | ||
2032 | if (p == (char *) skb->data) { /* not split */ | ||
2033 | addr = cas_page_map(page->buffer); | ||
2034 | memcpy(p, addr + off, RX_COPY_MIN); | ||
2035 | pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, | ||
2036 | PCI_DMA_FROMDEVICE); | ||
2037 | cas_page_unmap(addr); | ||
2038 | off += RX_COPY_MIN; | ||
2039 | swivel = RX_COPY_MIN; | ||
2040 | RX_USED_ADD(page, cp->mtu_stride); | ||
2041 | } else { | ||
2042 | RX_USED_ADD(page, hlen); | ||
2043 | } | ||
2044 | skb_put(skb, alloclen); | ||
2045 | |||
2046 | skb_shinfo(skb)->nr_frags++; | ||
2047 | skb->data_len += hlen - swivel; | ||
2048 | skb->truesize += hlen - swivel; | ||
2049 | skb->len += hlen - swivel; | ||
2050 | |||
2051 | get_page(page->buffer); | ||
2052 | frag->page = page->buffer; | ||
2053 | frag->page_offset = off; | ||
2054 | frag->size = hlen - swivel; | ||
2055 | |||
2056 | /* any more data? */ | ||
2057 | if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { | ||
2058 | hlen = dlen; | ||
2059 | off = 0; | ||
2060 | |||
2061 | i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); | ||
2062 | page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; | ||
2063 | pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, | ||
2064 | hlen + cp->crc_size, | ||
2065 | PCI_DMA_FROMDEVICE); | ||
2066 | pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, | ||
2067 | hlen + cp->crc_size, | ||
2068 | PCI_DMA_FROMDEVICE); | ||
2069 | |||
2070 | skb_shinfo(skb)->nr_frags++; | ||
2071 | skb->data_len += hlen; | ||
2072 | skb->len += hlen; | ||
2073 | frag++; | ||
2074 | |||
2075 | get_page(page->buffer); | ||
2076 | frag->page = page->buffer; | ||
2077 | frag->page_offset = 0; | ||
2078 | frag->size = hlen; | ||
2079 | RX_USED_ADD(page, hlen + cp->crc_size); | ||
2080 | } | ||
2081 | |||
2082 | if (cp->crc_size) { | ||
2083 | addr = cas_page_map(page->buffer); | ||
2084 | crcaddr = addr + off + hlen; | ||
2085 | } | ||
2086 | |||
2087 | } else { | ||
2088 | /* copying packet */ | ||
2089 | if (!dlen) | ||
2090 | goto end_copy_pkt; | ||
2091 | |||
2092 | i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); | ||
2093 | page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; | ||
2094 | off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; | ||
2095 | hlen = min(cp->page_size - off, dlen); | ||
2096 | if (hlen < 0) { | ||
2097 | netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, | ||
2098 | "rx page overflow: %d\n", hlen); | ||
2099 | dev_kfree_skb_irq(skb); | ||
2100 | return -1; | ||
2101 | } | ||
2102 | i = hlen; | ||
2103 | if (i == dlen) /* attach FCS */ | ||
2104 | i += cp->crc_size; | ||
2105 | pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, | ||
2106 | PCI_DMA_FROMDEVICE); | ||
2107 | addr = cas_page_map(page->buffer); | ||
2108 | memcpy(p, addr + off, i); | ||
2109 | pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, | ||
2110 | PCI_DMA_FROMDEVICE); | ||
2111 | cas_page_unmap(addr); | ||
2112 | if (p == (char *) skb->data) /* not split */ | ||
2113 | RX_USED_ADD(page, cp->mtu_stride); | ||
2114 | else | ||
2115 | RX_USED_ADD(page, i); | ||
2116 | |||
2117 | /* any more data? */ | ||
2118 | if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { | ||
2119 | p += hlen; | ||
2120 | i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); | ||
2121 | page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; | ||
2122 | pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, | ||
2123 | dlen + cp->crc_size, | ||
2124 | PCI_DMA_FROMDEVICE); | ||
2125 | addr = cas_page_map(page->buffer); | ||
2126 | memcpy(p, addr, dlen + cp->crc_size); | ||
2127 | pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, | ||
2128 | dlen + cp->crc_size, | ||
2129 | PCI_DMA_FROMDEVICE); | ||
2130 | cas_page_unmap(addr); | ||
2131 | RX_USED_ADD(page, dlen + cp->crc_size); | ||
2132 | } | ||
2133 | end_copy_pkt: | ||
2134 | if (cp->crc_size) { | ||
2135 | addr = NULL; | ||
2136 | crcaddr = skb->data + alloclen; | ||
2137 | } | ||
2138 | skb_put(skb, alloclen); | ||
2139 | } | ||
2140 | |||
2141 | csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3])); | ||
2142 | if (cp->crc_size) { | ||
2143 | /* checksum includes FCS. strip it out. */ | ||
2144 | csum = csum_fold(csum_partial(crcaddr, cp->crc_size, | ||
2145 | csum_unfold(csum))); | ||
2146 | if (addr) | ||
2147 | cas_page_unmap(addr); | ||
2148 | } | ||
2149 | skb->protocol = eth_type_trans(skb, cp->dev); | ||
2150 | if (skb->protocol == htons(ETH_P_IP)) { | ||
2151 | skb->csum = csum_unfold(~csum); | ||
2152 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
2153 | } else | ||
2154 | skb_checksum_none_assert(skb); | ||
2155 | return len; | ||
2156 | } | ||
2157 | |||
2158 | |||
2159 | /* we can handle up to 64 rx flows at a time. we do the same thing | ||
2160 | * as nonreassm except that we batch up the buffers. | ||
2161 | * NOTE: we currently just treat each flow as a bunch of packets that | ||
2162 | * we pass up. a better way would be to coalesce the packets | ||
2163 | * into a jumbo packet. to do that, we need to do the following: | ||
2164 | * 1) the first packet will have a clean split between header and | ||
2165 | * data. save both. | ||
2166 | * 2) each time the next flow packet comes in, extend the | ||
2167 | * data length and merge the checksums. | ||
2168 | * 3) on flow release, fix up the header. | ||
2169 | * 4) make sure the higher layer doesn't care. | ||
2170 | * because packets get coalesced, we shouldn't run into fragment count | ||
2171 | * issues. | ||
2172 | */ | ||
2173 | static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words, | ||
2174 | struct sk_buff *skb) | ||
2175 | { | ||
2176 | int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1); | ||
2177 | struct sk_buff_head *flow = &cp->rx_flows[flowid]; | ||
2178 | |||
2179 | /* this is protected at a higher layer, so no need to | ||
2180 | * do any additional locking here. stick the buffer | ||
2181 | * at the end. | ||
2182 | */ | ||
2183 | __skb_queue_tail(flow, skb); | ||
2184 | if (words[0] & RX_COMP1_RELEASE_FLOW) { | ||
2185 | while ((skb = __skb_dequeue(flow))) { | ||
2186 | cas_skb_release(skb); | ||
2187 | } | ||
2188 | } | ||
2189 | } | ||
2190 | |||
2191 | /* put rx descriptor back on ring. if a buffer is in use by a higher | ||
2192 | * layer, this will need to put in a replacement. | ||
2193 | */ | ||
2194 | static void cas_post_page(struct cas *cp, const int ring, const int index) | ||
2195 | { | ||
2196 | cas_page_t *new; | ||
2197 | int entry; | ||
2198 | |||
2199 | entry = cp->rx_old[ring]; | ||
2200 | |||
2201 | new = cas_page_swap(cp, ring, index); | ||
2202 | cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); | ||
2203 | cp->init_rxds[ring][entry].index = | ||
2204 | cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) | | ||
2205 | CAS_BASE(RX_INDEX_RING, ring)); | ||
2206 | |||
2207 | entry = RX_DESC_ENTRY(ring, entry + 1); | ||
2208 | cp->rx_old[ring] = entry; | ||
2209 | |||
2210 | if (entry % 4) | ||
2211 | return; | ||
2212 | |||
2213 | if (ring == 0) | ||
2214 | writel(entry, cp->regs + REG_RX_KICK); | ||
2215 | else if ((N_RX_DESC_RINGS > 1) && | ||
2216 | (cp->cas_flags & CAS_FLAG_REG_PLUS)) | ||
2217 | writel(entry, cp->regs + REG_PLUS_RX_KICK1); | ||
2218 | } | ||
2219 | |||
2220 | |||
2221 | /* only when things are bad */ | ||
2222 | static int cas_post_rxds_ringN(struct cas *cp, int ring, int num) | ||
2223 | { | ||
2224 | unsigned int entry, last, count, released; | ||
2225 | int cluster; | ||
2226 | cas_page_t **page = cp->rx_pages[ring]; | ||
2227 | |||
2228 | entry = cp->rx_old[ring]; | ||
2229 | |||
2230 | netif_printk(cp, intr, KERN_DEBUG, cp->dev, | ||
2231 | "rxd[%d] interrupt, done: %d\n", ring, entry); | ||
2232 | |||
2233 | cluster = -1; | ||
2234 | count = entry & 0x3; | ||
2235 | last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4); | ||
2236 | released = 0; | ||
2237 | while (entry != last) { | ||
2238 | /* make a new buffer if it's still in use */ | ||
2239 | if (page_count(page[entry]->buffer) > 1) { | ||
2240 | cas_page_t *new = cas_page_dequeue(cp); | ||
2241 | if (!new) { | ||
2242 | /* let the timer know that we need to | ||
2243 | * do this again | ||
2244 | */ | ||
2245 | cp->cas_flags |= CAS_FLAG_RXD_POST(ring); | ||
2246 | if (!timer_pending(&cp->link_timer)) | ||
2247 | mod_timer(&cp->link_timer, jiffies + | ||
2248 | CAS_LINK_FAST_TIMEOUT); | ||
2249 | cp->rx_old[ring] = entry; | ||
2250 | cp->rx_last[ring] = num ? num - released : 0; | ||
2251 | return -ENOMEM; | ||
2252 | } | ||
2253 | spin_lock(&cp->rx_inuse_lock); | ||
2254 | list_add(&page[entry]->list, &cp->rx_inuse_list); | ||
2255 | spin_unlock(&cp->rx_inuse_lock); | ||
2256 | cp->init_rxds[ring][entry].buffer = | ||
2257 | cpu_to_le64(new->dma_addr); | ||
2258 | page[entry] = new; | ||
2259 | |||
2260 | } | ||
2261 | |||
2262 | if (++count == 4) { | ||
2263 | cluster = entry; | ||
2264 | count = 0; | ||
2265 | } | ||
2266 | released++; | ||
2267 | entry = RX_DESC_ENTRY(ring, entry + 1); | ||
2268 | } | ||
2269 | cp->rx_old[ring] = entry; | ||
2270 | |||
2271 | if (cluster < 0) | ||
2272 | return 0; | ||
2273 | |||
2274 | if (ring == 0) | ||
2275 | writel(cluster, cp->regs + REG_RX_KICK); | ||
2276 | else if ((N_RX_DESC_RINGS > 1) && | ||
2277 | (cp->cas_flags & CAS_FLAG_REG_PLUS)) | ||
2278 | writel(cluster, cp->regs + REG_PLUS_RX_KICK1); | ||
2279 | return 0; | ||
2280 | } | ||
2281 | |||
2282 | |||
2283 | /* process a completion ring. packets are set up in three basic ways: | ||
2284 | * small packets: should be copied header + data in single buffer. | ||
2285 | * large packets: header and data in a single buffer. | ||
2286 | * split packets: header in a separate buffer from data. | ||
2287 | * data may be in multiple pages. data may be > 256 | ||
2288 | * bytes but in a single page. | ||
2289 | * | ||
2290 | * NOTE: RX page posting is done in this routine as well. while there's | ||
2291 | * the capability of using multiple RX completion rings, it isn't | ||
2292 | * really worthwhile due to the fact that the page posting will | ||
2293 | * force serialization on the single descriptor ring. | ||
2294 | */ | ||
2295 | static int cas_rx_ringN(struct cas *cp, int ring, int budget) | ||
2296 | { | ||
2297 | struct cas_rx_comp *rxcs = cp->init_rxcs[ring]; | ||
2298 | int entry, drops; | ||
2299 | int npackets = 0; | ||
2300 | |||
2301 | netif_printk(cp, intr, KERN_DEBUG, cp->dev, | ||
2302 | "rx[%d] interrupt, done: %d/%d\n", | ||
2303 | ring, | ||
2304 | readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]); | ||
2305 | |||
2306 | entry = cp->rx_new[ring]; | ||
2307 | drops = 0; | ||
2308 | while (1) { | ||
2309 | struct cas_rx_comp *rxc = rxcs + entry; | ||
2310 | struct sk_buff *uninitialized_var(skb); | ||
2311 | int type, len; | ||
2312 | u64 words[4]; | ||
2313 | int i, dring; | ||
2314 | |||
2315 | words[0] = le64_to_cpu(rxc->word1); | ||
2316 | words[1] = le64_to_cpu(rxc->word2); | ||
2317 | words[2] = le64_to_cpu(rxc->word3); | ||
2318 | words[3] = le64_to_cpu(rxc->word4); | ||
2319 | |||
2320 | /* don't touch if still owned by hw */ | ||
2321 | type = CAS_VAL(RX_COMP1_TYPE, words[0]); | ||
2322 | if (type == 0) | ||
2323 | break; | ||
2324 | |||
2325 | /* hw hasn't cleared the zero bit yet */ | ||
2326 | if (words[3] & RX_COMP4_ZERO) { | ||
2327 | break; | ||
2328 | } | ||
2329 | |||
2330 | /* get info on the packet */ | ||
2331 | if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) { | ||
2332 | spin_lock(&cp->stat_lock[ring]); | ||
2333 | cp->net_stats[ring].rx_errors++; | ||
2334 | if (words[3] & RX_COMP4_LEN_MISMATCH) | ||
2335 | cp->net_stats[ring].rx_length_errors++; | ||
2336 | if (words[3] & RX_COMP4_BAD) | ||
2337 | cp->net_stats[ring].rx_crc_errors++; | ||
2338 | spin_unlock(&cp->stat_lock[ring]); | ||
2339 | |||
2340 | /* We'll just return it to Cassini. */ | ||
2341 | drop_it: | ||
2342 | spin_lock(&cp->stat_lock[ring]); | ||
2343 | ++cp->net_stats[ring].rx_dropped; | ||
2344 | spin_unlock(&cp->stat_lock[ring]); | ||
2345 | goto next; | ||
2346 | } | ||
2347 | |||
2348 | len = cas_rx_process_pkt(cp, rxc, entry, words, &skb); | ||
2349 | if (len < 0) { | ||
2350 | ++drops; | ||
2351 | goto drop_it; | ||
2352 | } | ||
2353 | |||
2354 | /* see if it's a flow re-assembly or not. the driver | ||
2355 | * itself handles release back up. | ||
2356 | */ | ||
2357 | if (RX_DONT_BATCH || (type == 0x2)) { | ||
2358 | /* non-reassm: these always get released */ | ||
2359 | cas_skb_release(skb); | ||
2360 | } else { | ||
2361 | cas_rx_flow_pkt(cp, words, skb); | ||
2362 | } | ||
2363 | |||
2364 | spin_lock(&cp->stat_lock[ring]); | ||
2365 | cp->net_stats[ring].rx_packets++; | ||
2366 | cp->net_stats[ring].rx_bytes += len; | ||
2367 | spin_unlock(&cp->stat_lock[ring]); | ||
2368 | |||
2369 | next: | ||
2370 | npackets++; | ||
2371 | |||
2372 | /* should it be released? */ | ||
2373 | if (words[0] & RX_COMP1_RELEASE_HDR) { | ||
2374 | i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); | ||
2375 | dring = CAS_VAL(RX_INDEX_RING, i); | ||
2376 | i = CAS_VAL(RX_INDEX_NUM, i); | ||
2377 | cas_post_page(cp, dring, i); | ||
2378 | } | ||
2379 | |||
2380 | if (words[0] & RX_COMP1_RELEASE_DATA) { | ||
2381 | i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); | ||
2382 | dring = CAS_VAL(RX_INDEX_RING, i); | ||
2383 | i = CAS_VAL(RX_INDEX_NUM, i); | ||
2384 | cas_post_page(cp, dring, i); | ||
2385 | } | ||
2386 | |||
2387 | if (words[0] & RX_COMP1_RELEASE_NEXT) { | ||
2388 | i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); | ||
2389 | dring = CAS_VAL(RX_INDEX_RING, i); | ||
2390 | i = CAS_VAL(RX_INDEX_NUM, i); | ||
2391 | cas_post_page(cp, dring, i); | ||
2392 | } | ||
2393 | |||
2394 | /* skip to the next entry */ | ||
2395 | entry = RX_COMP_ENTRY(ring, entry + 1 + | ||
2396 | CAS_VAL(RX_COMP1_SKIP, words[0])); | ||
2397 | #ifdef USE_NAPI | ||
2398 | if (budget && (npackets >= budget)) | ||
2399 | break; | ||
2400 | #endif | ||
2401 | } | ||
2402 | cp->rx_new[ring] = entry; | ||
2403 | |||
2404 | if (drops) | ||
2405 | netdev_info(cp->dev, "Memory squeeze, deferring packet\n"); | ||
2406 | return npackets; | ||
2407 | } | ||
2408 | |||
2409 | |||
2410 | /* put completion entries back on the ring */ | ||
2411 | static void cas_post_rxcs_ringN(struct net_device *dev, | ||
2412 | struct cas *cp, int ring) | ||
2413 | { | ||
2414 | struct cas_rx_comp *rxc = cp->init_rxcs[ring]; | ||
2415 | int last, entry; | ||
2416 | |||
2417 | last = cp->rx_cur[ring]; | ||
2418 | entry = cp->rx_new[ring]; | ||
2419 | netif_printk(cp, intr, KERN_DEBUG, dev, | ||
2420 | "rxc[%d] interrupt, done: %d/%d\n", | ||
2421 | ring, readl(cp->regs + REG_RX_COMP_HEAD), entry); | ||
2422 | |||
2423 | /* zero and re-mark descriptors */ | ||
2424 | while (last != entry) { | ||
2425 | cas_rxc_init(rxc + last); | ||
2426 | last = RX_COMP_ENTRY(ring, last + 1); | ||
2427 | } | ||
2428 | cp->rx_cur[ring] = last; | ||
2429 | |||
2430 | if (ring == 0) | ||
2431 | writel(last, cp->regs + REG_RX_COMP_TAIL); | ||
2432 | else if (cp->cas_flags & CAS_FLAG_REG_PLUS) | ||
2433 | writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring)); | ||
2434 | } | ||
2435 | |||
2436 | |||
2437 | |||
2438 | /* cassini can use all four PCI interrupts for the completion ring. | ||
2439 | * rings 3 and 4 are identical | ||
2440 | */ | ||
2441 | #if defined(USE_PCI_INTC) || defined(USE_PCI_INTD) | ||
2442 | static inline void cas_handle_irqN(struct net_device *dev, | ||
2443 | struct cas *cp, const u32 status, | ||
2444 | const int ring) | ||
2445 | { | ||
2446 | if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT)) | ||
2447 | cas_post_rxcs_ringN(dev, cp, ring); | ||
2448 | } | ||
2449 | |||
2450 | static irqreturn_t cas_interruptN(int irq, void *dev_id) | ||
2451 | { | ||
2452 | struct net_device *dev = dev_id; | ||
2453 | struct cas *cp = netdev_priv(dev); | ||
2454 | unsigned long flags; | ||
2455 | int ring; | ||
2456 | u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); | ||
2457 | |||
2458 | /* check for shared irq */ | ||
2459 | if (status == 0) | ||
2460 | return IRQ_NONE; | ||
2461 | |||
2462 | ring = (irq == cp->pci_irq_INTC) ? 2 : 3; | ||
2463 | spin_lock_irqsave(&cp->lock, flags); | ||
2464 | if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ | ||
2465 | #ifdef USE_NAPI | ||
2466 | cas_mask_intr(cp); | ||
2467 | napi_schedule(&cp->napi); | ||
2468 | #else | ||
2469 | cas_rx_ringN(cp, ring, 0); | ||
2470 | #endif | ||
2471 | status &= ~INTR_RX_DONE_ALT; | ||
2472 | } | ||
2473 | |||
2474 | if (status) | ||
2475 | cas_handle_irqN(dev, cp, status, ring); | ||
2476 | spin_unlock_irqrestore(&cp->lock, flags); | ||
2477 | return IRQ_HANDLED; | ||
2478 | } | ||
2479 | #endif | ||
2480 | |||
2481 | #ifdef USE_PCI_INTB | ||
2482 | /* everything but rx packets */ | ||
2483 | static inline void cas_handle_irq1(struct cas *cp, const u32 status) | ||
2484 | { | ||
2485 | if (status & INTR_RX_BUF_UNAVAIL_1) { | ||
2486 | /* Frame arrived, no free RX buffers available. | ||
2487 | * NOTE: we can get this on a link transition. */ | ||
2488 | cas_post_rxds_ringN(cp, 1, 0); | ||
2489 | spin_lock(&cp->stat_lock[1]); | ||
2490 | cp->net_stats[1].rx_dropped++; | ||
2491 | spin_unlock(&cp->stat_lock[1]); | ||
2492 | } | ||
2493 | |||
2494 | if (status & INTR_RX_BUF_AE_1) | ||
2495 | cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) - | ||
2496 | RX_AE_FREEN_VAL(1)); | ||
2497 | |||
2498 | if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) | ||
2499 | cas_post_rxcs_ringN(cp, 1); | ||
2500 | } | ||
2501 | |||
2502 | /* ring 2 handles a few more events than 3 and 4 */ | ||
2503 | static irqreturn_t cas_interrupt1(int irq, void *dev_id) | ||
2504 | { | ||
2505 | struct net_device *dev = dev_id; | ||
2506 | struct cas *cp = netdev_priv(dev); | ||
2507 | unsigned long flags; | ||
2508 | u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); | ||
2509 | |||
2510 | /* check for shared interrupt */ | ||
2511 | if (status == 0) | ||
2512 | return IRQ_NONE; | ||
2513 | |||
2514 | spin_lock_irqsave(&cp->lock, flags); | ||
2515 | if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ | ||
2516 | #ifdef USE_NAPI | ||
2517 | cas_mask_intr(cp); | ||
2518 | napi_schedule(&cp->napi); | ||
2519 | #else | ||
2520 | cas_rx_ringN(cp, 1, 0); | ||
2521 | #endif | ||
2522 | status &= ~INTR_RX_DONE_ALT; | ||
2523 | } | ||
2524 | if (status) | ||
2525 | cas_handle_irq1(cp, status); | ||
2526 | spin_unlock_irqrestore(&cp->lock, flags); | ||
2527 | return IRQ_HANDLED; | ||
2528 | } | ||
2529 | #endif | ||
2530 | |||
2531 | static inline void cas_handle_irq(struct net_device *dev, | ||
2532 | struct cas *cp, const u32 status) | ||
2533 | { | ||
2534 | /* housekeeping interrupts */ | ||
2535 | if (status & INTR_ERROR_MASK) | ||
2536 | cas_abnormal_irq(dev, cp, status); | ||
2537 | |||
2538 | if (status & INTR_RX_BUF_UNAVAIL) { | ||
2539 | /* Frame arrived, no free RX buffers available. | ||
2540 | * NOTE: we can get this on a link transition. | ||
2541 | */ | ||
2542 | cas_post_rxds_ringN(cp, 0, 0); | ||
2543 | spin_lock(&cp->stat_lock[0]); | ||
2544 | cp->net_stats[0].rx_dropped++; | ||
2545 | spin_unlock(&cp->stat_lock[0]); | ||
2546 | } else if (status & INTR_RX_BUF_AE) { | ||
2547 | cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) - | ||
2548 | RX_AE_FREEN_VAL(0)); | ||
2549 | } | ||
2550 | |||
2551 | if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) | ||
2552 | cas_post_rxcs_ringN(dev, cp, 0); | ||
2553 | } | ||
2554 | |||
2555 | static irqreturn_t cas_interrupt(int irq, void *dev_id) | ||
2556 | { | ||
2557 | struct net_device *dev = dev_id; | ||
2558 | struct cas *cp = netdev_priv(dev); | ||
2559 | unsigned long flags; | ||
2560 | u32 status = readl(cp->regs + REG_INTR_STATUS); | ||
2561 | |||
2562 | if (status == 0) | ||
2563 | return IRQ_NONE; | ||
2564 | |||
2565 | spin_lock_irqsave(&cp->lock, flags); | ||
2566 | if (status & (INTR_TX_ALL | INTR_TX_INTME)) { | ||
2567 | cas_tx(dev, cp, status); | ||
2568 | status &= ~(INTR_TX_ALL | INTR_TX_INTME); | ||
2569 | } | ||
2570 | |||
2571 | if (status & INTR_RX_DONE) { | ||
2572 | #ifdef USE_NAPI | ||
2573 | cas_mask_intr(cp); | ||
2574 | napi_schedule(&cp->napi); | ||
2575 | #else | ||
2576 | cas_rx_ringN(cp, 0, 0); | ||
2577 | #endif | ||
2578 | status &= ~INTR_RX_DONE; | ||
2579 | } | ||
2580 | |||
2581 | if (status) | ||
2582 | cas_handle_irq(dev, cp, status); | ||
2583 | spin_unlock_irqrestore(&cp->lock, flags); | ||
2584 | return IRQ_HANDLED; | ||
2585 | } | ||
2586 | |||
2587 | |||
2588 | #ifdef USE_NAPI | ||
2589 | static int cas_poll(struct napi_struct *napi, int budget) | ||
2590 | { | ||
2591 | struct cas *cp = container_of(napi, struct cas, napi); | ||
2592 | struct net_device *dev = cp->dev; | ||
2593 | int i, enable_intr, credits; | ||
2594 | u32 status = readl(cp->regs + REG_INTR_STATUS); | ||
2595 | unsigned long flags; | ||
2596 | |||
2597 | spin_lock_irqsave(&cp->lock, flags); | ||
2598 | cas_tx(dev, cp, status); | ||
2599 | spin_unlock_irqrestore(&cp->lock, flags); | ||
2600 | |||
2601 | /* NAPI rx packets. we spread the credits across all of the | ||
2602 | * rxc rings | ||
2603 | * | ||
2604 | * to make sure we're fair with the work we loop through each | ||
2605 | * ring N_RX_COMP_RING times with a request of | ||
2606 | * budget / N_RX_COMP_RINGS | ||
2607 | */ | ||
2608 | enable_intr = 1; | ||
2609 | credits = 0; | ||
2610 | for (i = 0; i < N_RX_COMP_RINGS; i++) { | ||
2611 | int j; | ||
2612 | for (j = 0; j < N_RX_COMP_RINGS; j++) { | ||
2613 | credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS); | ||
2614 | if (credits >= budget) { | ||
2615 | enable_intr = 0; | ||
2616 | goto rx_comp; | ||
2617 | } | ||
2618 | } | ||
2619 | } | ||
2620 | |||
2621 | rx_comp: | ||
2622 | /* final rx completion */ | ||
2623 | spin_lock_irqsave(&cp->lock, flags); | ||
2624 | if (status) | ||
2625 | cas_handle_irq(dev, cp, status); | ||
2626 | |||
2627 | #ifdef USE_PCI_INTB | ||
2628 | if (N_RX_COMP_RINGS > 1) { | ||
2629 | status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); | ||
2630 | if (status) | ||
2631 | cas_handle_irq1(dev, cp, status); | ||
2632 | } | ||
2633 | #endif | ||
2634 | |||
2635 | #ifdef USE_PCI_INTC | ||
2636 | if (N_RX_COMP_RINGS > 2) { | ||
2637 | status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2)); | ||
2638 | if (status) | ||
2639 | cas_handle_irqN(dev, cp, status, 2); | ||
2640 | } | ||
2641 | #endif | ||
2642 | |||
2643 | #ifdef USE_PCI_INTD | ||
2644 | if (N_RX_COMP_RINGS > 3) { | ||
2645 | status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3)); | ||
2646 | if (status) | ||
2647 | cas_handle_irqN(dev, cp, status, 3); | ||
2648 | } | ||
2649 | #endif | ||
2650 | spin_unlock_irqrestore(&cp->lock, flags); | ||
2651 | if (enable_intr) { | ||
2652 | napi_complete(napi); | ||
2653 | cas_unmask_intr(cp); | ||
2654 | } | ||
2655 | return credits; | ||
2656 | } | ||
2657 | #endif | ||
2658 | |||
2659 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2660 | static void cas_netpoll(struct net_device *dev) | ||
2661 | { | ||
2662 | struct cas *cp = netdev_priv(dev); | ||
2663 | |||
2664 | cas_disable_irq(cp, 0); | ||
2665 | cas_interrupt(cp->pdev->irq, dev); | ||
2666 | cas_enable_irq(cp, 0); | ||
2667 | |||
2668 | #ifdef USE_PCI_INTB | ||
2669 | if (N_RX_COMP_RINGS > 1) { | ||
2670 | /* cas_interrupt1(); */ | ||
2671 | } | ||
2672 | #endif | ||
2673 | #ifdef USE_PCI_INTC | ||
2674 | if (N_RX_COMP_RINGS > 2) { | ||
2675 | /* cas_interruptN(); */ | ||
2676 | } | ||
2677 | #endif | ||
2678 | #ifdef USE_PCI_INTD | ||
2679 | if (N_RX_COMP_RINGS > 3) { | ||
2680 | /* cas_interruptN(); */ | ||
2681 | } | ||
2682 | #endif | ||
2683 | } | ||
2684 | #endif | ||
2685 | |||
2686 | static void cas_tx_timeout(struct net_device *dev) | ||
2687 | { | ||
2688 | struct cas *cp = netdev_priv(dev); | ||
2689 | |||
2690 | netdev_err(dev, "transmit timed out, resetting\n"); | ||
2691 | if (!cp->hw_running) { | ||
2692 | netdev_err(dev, "hrm.. hw not running!\n"); | ||
2693 | return; | ||
2694 | } | ||
2695 | |||
2696 | netdev_err(dev, "MIF_STATE[%08x]\n", | ||
2697 | readl(cp->regs + REG_MIF_STATE_MACHINE)); | ||
2698 | |||
2699 | netdev_err(dev, "MAC_STATE[%08x]\n", | ||
2700 | readl(cp->regs + REG_MAC_STATE_MACHINE)); | ||
2701 | |||
2702 | netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n", | ||
2703 | readl(cp->regs + REG_TX_CFG), | ||
2704 | readl(cp->regs + REG_MAC_TX_STATUS), | ||
2705 | readl(cp->regs + REG_MAC_TX_CFG), | ||
2706 | readl(cp->regs + REG_TX_FIFO_PKT_CNT), | ||
2707 | readl(cp->regs + REG_TX_FIFO_WRITE_PTR), | ||
2708 | readl(cp->regs + REG_TX_FIFO_READ_PTR), | ||
2709 | readl(cp->regs + REG_TX_SM_1), | ||
2710 | readl(cp->regs + REG_TX_SM_2)); | ||
2711 | |||
2712 | netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n", | ||
2713 | readl(cp->regs + REG_RX_CFG), | ||
2714 | readl(cp->regs + REG_MAC_RX_STATUS), | ||
2715 | readl(cp->regs + REG_MAC_RX_CFG)); | ||
2716 | |||
2717 | netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n", | ||
2718 | readl(cp->regs + REG_HP_STATE_MACHINE), | ||
2719 | readl(cp->regs + REG_HP_STATUS0), | ||
2720 | readl(cp->regs + REG_HP_STATUS1), | ||
2721 | readl(cp->regs + REG_HP_STATUS2)); | ||
2722 | |||
2723 | #if 1 | ||
2724 | atomic_inc(&cp->reset_task_pending); | ||
2725 | atomic_inc(&cp->reset_task_pending_all); | ||
2726 | schedule_work(&cp->reset_task); | ||
2727 | #else | ||
2728 | atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); | ||
2729 | schedule_work(&cp->reset_task); | ||
2730 | #endif | ||
2731 | } | ||
2732 | |||
2733 | static inline int cas_intme(int ring, int entry) | ||
2734 | { | ||
2735 | /* Algorithm: IRQ every 1/2 of descriptors. */ | ||
2736 | if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1))) | ||
2737 | return 1; | ||
2738 | return 0; | ||
2739 | } | ||
2740 | |||
2741 | |||
2742 | static void cas_write_txd(struct cas *cp, int ring, int entry, | ||
2743 | dma_addr_t mapping, int len, u64 ctrl, int last) | ||
2744 | { | ||
2745 | struct cas_tx_desc *txd = cp->init_txds[ring] + entry; | ||
2746 | |||
2747 | ctrl |= CAS_BASE(TX_DESC_BUFLEN, len); | ||
2748 | if (cas_intme(ring, entry)) | ||
2749 | ctrl |= TX_DESC_INTME; | ||
2750 | if (last) | ||
2751 | ctrl |= TX_DESC_EOF; | ||
2752 | txd->control = cpu_to_le64(ctrl); | ||
2753 | txd->buffer = cpu_to_le64(mapping); | ||
2754 | } | ||
2755 | |||
2756 | static inline void *tx_tiny_buf(struct cas *cp, const int ring, | ||
2757 | const int entry) | ||
2758 | { | ||
2759 | return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry; | ||
2760 | } | ||
2761 | |||
2762 | static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring, | ||
2763 | const int entry, const int tentry) | ||
2764 | { | ||
2765 | cp->tx_tiny_use[ring][tentry].nbufs++; | ||
2766 | cp->tx_tiny_use[ring][entry].used = 1; | ||
2767 | return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry; | ||
2768 | } | ||
2769 | |||
2770 | static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, | ||
2771 | struct sk_buff *skb) | ||
2772 | { | ||
2773 | struct net_device *dev = cp->dev; | ||
2774 | int entry, nr_frags, frag, tabort, tentry; | ||
2775 | dma_addr_t mapping; | ||
2776 | unsigned long flags; | ||
2777 | u64 ctrl; | ||
2778 | u32 len; | ||
2779 | |||
2780 | spin_lock_irqsave(&cp->tx_lock[ring], flags); | ||
2781 | |||
2782 | /* This is a hard error, log it. */ | ||
2783 | if (TX_BUFFS_AVAIL(cp, ring) <= | ||
2784 | CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) { | ||
2785 | netif_stop_queue(dev); | ||
2786 | spin_unlock_irqrestore(&cp->tx_lock[ring], flags); | ||
2787 | netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); | ||
2788 | return 1; | ||
2789 | } | ||
2790 | |||
2791 | ctrl = 0; | ||
2792 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
2793 | const u64 csum_start_off = skb_checksum_start_offset(skb); | ||
2794 | const u64 csum_stuff_off = csum_start_off + skb->csum_offset; | ||
2795 | |||
2796 | ctrl = TX_DESC_CSUM_EN | | ||
2797 | CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | | ||
2798 | CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off); | ||
2799 | } | ||
2800 | |||
2801 | entry = cp->tx_new[ring]; | ||
2802 | cp->tx_skbs[ring][entry] = skb; | ||
2803 | |||
2804 | nr_frags = skb_shinfo(skb)->nr_frags; | ||
2805 | len = skb_headlen(skb); | ||
2806 | mapping = pci_map_page(cp->pdev, virt_to_page(skb->data), | ||
2807 | offset_in_page(skb->data), len, | ||
2808 | PCI_DMA_TODEVICE); | ||
2809 | |||
2810 | tentry = entry; | ||
2811 | tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); | ||
2812 | if (unlikely(tabort)) { | ||
2813 | /* NOTE: len is always > tabort */ | ||
2814 | cas_write_txd(cp, ring, entry, mapping, len - tabort, | ||
2815 | ctrl | TX_DESC_SOF, 0); | ||
2816 | entry = TX_DESC_NEXT(ring, entry); | ||
2817 | |||
2818 | skb_copy_from_linear_data_offset(skb, len - tabort, | ||
2819 | tx_tiny_buf(cp, ring, entry), tabort); | ||
2820 | mapping = tx_tiny_map(cp, ring, entry, tentry); | ||
2821 | cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, | ||
2822 | (nr_frags == 0)); | ||
2823 | } else { | ||
2824 | cas_write_txd(cp, ring, entry, mapping, len, ctrl | | ||
2825 | TX_DESC_SOF, (nr_frags == 0)); | ||
2826 | } | ||
2827 | entry = TX_DESC_NEXT(ring, entry); | ||
2828 | |||
2829 | for (frag = 0; frag < nr_frags; frag++) { | ||
2830 | skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; | ||
2831 | |||
2832 | len = fragp->size; | ||
2833 | mapping = pci_map_page(cp->pdev, fragp->page, | ||
2834 | fragp->page_offset, len, | ||
2835 | PCI_DMA_TODEVICE); | ||
2836 | |||
2837 | tabort = cas_calc_tabort(cp, fragp->page_offset, len); | ||
2838 | if (unlikely(tabort)) { | ||
2839 | void *addr; | ||
2840 | |||
2841 | /* NOTE: len is always > tabort */ | ||
2842 | cas_write_txd(cp, ring, entry, mapping, len - tabort, | ||
2843 | ctrl, 0); | ||
2844 | entry = TX_DESC_NEXT(ring, entry); | ||
2845 | |||
2846 | addr = cas_page_map(fragp->page); | ||
2847 | memcpy(tx_tiny_buf(cp, ring, entry), | ||
2848 | addr + fragp->page_offset + len - tabort, | ||
2849 | tabort); | ||
2850 | cas_page_unmap(addr); | ||
2851 | mapping = tx_tiny_map(cp, ring, entry, tentry); | ||
2852 | len = tabort; | ||
2853 | } | ||
2854 | |||
2855 | cas_write_txd(cp, ring, entry, mapping, len, ctrl, | ||
2856 | (frag + 1 == nr_frags)); | ||
2857 | entry = TX_DESC_NEXT(ring, entry); | ||
2858 | } | ||
2859 | |||
2860 | cp->tx_new[ring] = entry; | ||
2861 | if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) | ||
2862 | netif_stop_queue(dev); | ||
2863 | |||
2864 | netif_printk(cp, tx_queued, KERN_DEBUG, dev, | ||
2865 | "tx[%d] queued, slot %d, skblen %d, avail %d\n", | ||
2866 | ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring)); | ||
2867 | writel(entry, cp->regs + REG_TX_KICKN(ring)); | ||
2868 | spin_unlock_irqrestore(&cp->tx_lock[ring], flags); | ||
2869 | return 0; | ||
2870 | } | ||
2871 | |||
2872 | static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
2873 | { | ||
2874 | struct cas *cp = netdev_priv(dev); | ||
2875 | |||
2876 | /* this is only used as a load-balancing hint, so it doesn't | ||
2877 | * need to be SMP safe | ||
2878 | */ | ||
2879 | static int ring; | ||
2880 | |||
2881 | if (skb_padto(skb, cp->min_frame_size)) | ||
2882 | return NETDEV_TX_OK; | ||
2883 | |||
2884 | /* XXX: we need some higher-level QoS hooks to steer packets to | ||
2885 | * individual queues. | ||
2886 | */ | ||
2887 | if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb)) | ||
2888 | return NETDEV_TX_BUSY; | ||
2889 | return NETDEV_TX_OK; | ||
2890 | } | ||
2891 | |||
2892 | static void cas_init_tx_dma(struct cas *cp) | ||
2893 | { | ||
2894 | u64 desc_dma = cp->block_dvma; | ||
2895 | unsigned long off; | ||
2896 | u32 val; | ||
2897 | int i; | ||
2898 | |||
2899 | /* set up tx completion writeback registers. must be 8-byte aligned */ | ||
2900 | #ifdef USE_TX_COMPWB | ||
2901 | off = offsetof(struct cas_init_block, tx_compwb); | ||
2902 | writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI); | ||
2903 | writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW); | ||
2904 | #endif | ||
2905 | |||
2906 | /* enable completion writebacks, enable paced mode, | ||
2907 | * disable read pipe, and disable pre-interrupt compwbs | ||
2908 | */ | ||
2909 | val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 | | ||
2910 | TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 | | ||
2911 | TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE | | ||
2912 | TX_CFG_INTR_COMPWB_DIS; | ||
2913 | |||
2914 | /* write out tx ring info and tx desc bases */ | ||
2915 | for (i = 0; i < MAX_TX_RINGS; i++) { | ||
2916 | off = (unsigned long) cp->init_txds[i] - | ||
2917 | (unsigned long) cp->init_block; | ||
2918 | |||
2919 | val |= CAS_TX_RINGN_BASE(i); | ||
2920 | writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i)); | ||
2921 | writel((desc_dma + off) & 0xffffffff, cp->regs + | ||
2922 | REG_TX_DBN_LOW(i)); | ||
2923 | /* don't zero out the kick register here as the system | ||
2924 | * will wedge | ||
2925 | */ | ||
2926 | } | ||
2927 | writel(val, cp->regs + REG_TX_CFG); | ||
2928 | |||
2929 | /* program max burst sizes. these numbers should be different | ||
2930 | * if doing QoS. | ||
2931 | */ | ||
2932 | #ifdef USE_QOS | ||
2933 | writel(0x800, cp->regs + REG_TX_MAXBURST_0); | ||
2934 | writel(0x1600, cp->regs + REG_TX_MAXBURST_1); | ||
2935 | writel(0x2400, cp->regs + REG_TX_MAXBURST_2); | ||
2936 | writel(0x4800, cp->regs + REG_TX_MAXBURST_3); | ||
2937 | #else | ||
2938 | writel(0x800, cp->regs + REG_TX_MAXBURST_0); | ||
2939 | writel(0x800, cp->regs + REG_TX_MAXBURST_1); | ||
2940 | writel(0x800, cp->regs + REG_TX_MAXBURST_2); | ||
2941 | writel(0x800, cp->regs + REG_TX_MAXBURST_3); | ||
2942 | #endif | ||
2943 | } | ||
2944 | |||
2945 | /* Must be invoked under cp->lock. */ | ||
2946 | static inline void cas_init_dma(struct cas *cp) | ||
2947 | { | ||
2948 | cas_init_tx_dma(cp); | ||
2949 | cas_init_rx_dma(cp); | ||
2950 | } | ||
2951 | |||
2952 | static void cas_process_mc_list(struct cas *cp) | ||
2953 | { | ||
2954 | u16 hash_table[16]; | ||
2955 | u32 crc; | ||
2956 | struct netdev_hw_addr *ha; | ||
2957 | int i = 1; | ||
2958 | |||
2959 | memset(hash_table, 0, sizeof(hash_table)); | ||
2960 | netdev_for_each_mc_addr(ha, cp->dev) { | ||
2961 | if (i <= CAS_MC_EXACT_MATCH_SIZE) { | ||
2962 | /* use the alternate mac address registers for the | ||
2963 | * first 15 multicast addresses | ||
2964 | */ | ||
2965 | writel((ha->addr[4] << 8) | ha->addr[5], | ||
2966 | cp->regs + REG_MAC_ADDRN(i*3 + 0)); | ||
2967 | writel((ha->addr[2] << 8) | ha->addr[3], | ||
2968 | cp->regs + REG_MAC_ADDRN(i*3 + 1)); | ||
2969 | writel((ha->addr[0] << 8) | ha->addr[1], | ||
2970 | cp->regs + REG_MAC_ADDRN(i*3 + 2)); | ||
2971 | i++; | ||
2972 | } | ||
2973 | else { | ||
2974 | /* use hw hash table for the next series of | ||
2975 | * multicast addresses | ||
2976 | */ | ||
2977 | crc = ether_crc_le(ETH_ALEN, ha->addr); | ||
2978 | crc >>= 24; | ||
2979 | hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); | ||
2980 | } | ||
2981 | } | ||
2982 | for (i = 0; i < 16; i++) | ||
2983 | writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i)); | ||
2984 | } | ||
2985 | |||
2986 | /* Must be invoked under cp->lock. */ | ||
2987 | static u32 cas_setup_multicast(struct cas *cp) | ||
2988 | { | ||
2989 | u32 rxcfg = 0; | ||
2990 | int i; | ||
2991 | |||
2992 | if (cp->dev->flags & IFF_PROMISC) { | ||
2993 | rxcfg |= MAC_RX_CFG_PROMISC_EN; | ||
2994 | |||
2995 | } else if (cp->dev->flags & IFF_ALLMULTI) { | ||
2996 | for (i=0; i < 16; i++) | ||
2997 | writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i)); | ||
2998 | rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; | ||
2999 | |||
3000 | } else { | ||
3001 | cas_process_mc_list(cp); | ||
3002 | rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; | ||
3003 | } | ||
3004 | |||
3005 | return rxcfg; | ||
3006 | } | ||
3007 | |||
3008 | /* must be invoked under cp->stat_lock[N_TX_RINGS] */ | ||
3009 | static void cas_clear_mac_err(struct cas *cp) | ||
3010 | { | ||
3011 | writel(0, cp->regs + REG_MAC_COLL_NORMAL); | ||
3012 | writel(0, cp->regs + REG_MAC_COLL_FIRST); | ||
3013 | writel(0, cp->regs + REG_MAC_COLL_EXCESS); | ||
3014 | writel(0, cp->regs + REG_MAC_COLL_LATE); | ||
3015 | writel(0, cp->regs + REG_MAC_TIMER_DEFER); | ||
3016 | writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK); | ||
3017 | writel(0, cp->regs + REG_MAC_RECV_FRAME); | ||
3018 | writel(0, cp->regs + REG_MAC_LEN_ERR); | ||
3019 | writel(0, cp->regs + REG_MAC_ALIGN_ERR); | ||
3020 | writel(0, cp->regs + REG_MAC_FCS_ERR); | ||
3021 | writel(0, cp->regs + REG_MAC_RX_CODE_ERR); | ||
3022 | } | ||
3023 | |||
3024 | |||
3025 | static void cas_mac_reset(struct cas *cp) | ||
3026 | { | ||
3027 | int i; | ||
3028 | |||
3029 | /* do both TX and RX reset */ | ||
3030 | writel(0x1, cp->regs + REG_MAC_TX_RESET); | ||
3031 | writel(0x1, cp->regs + REG_MAC_RX_RESET); | ||
3032 | |||
3033 | /* wait for TX */ | ||
3034 | i = STOP_TRIES; | ||
3035 | while (i-- > 0) { | ||
3036 | if (readl(cp->regs + REG_MAC_TX_RESET) == 0) | ||
3037 | break; | ||
3038 | udelay(10); | ||
3039 | } | ||
3040 | |||
3041 | /* wait for RX */ | ||
3042 | i = STOP_TRIES; | ||
3043 | while (i-- > 0) { | ||
3044 | if (readl(cp->regs + REG_MAC_RX_RESET) == 0) | ||
3045 | break; | ||
3046 | udelay(10); | ||
3047 | } | ||
3048 | |||
3049 | if (readl(cp->regs + REG_MAC_TX_RESET) | | ||
3050 | readl(cp->regs + REG_MAC_RX_RESET)) | ||
3051 | netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n", | ||
3052 | readl(cp->regs + REG_MAC_TX_RESET), | ||
3053 | readl(cp->regs + REG_MAC_RX_RESET), | ||
3054 | readl(cp->regs + REG_MAC_STATE_MACHINE)); | ||
3055 | } | ||
3056 | |||
3057 | |||
3058 | /* Must be invoked under cp->lock. */ | ||
3059 | static void cas_init_mac(struct cas *cp) | ||
3060 | { | ||
3061 | unsigned char *e = &cp->dev->dev_addr[0]; | ||
3062 | int i; | ||
3063 | cas_mac_reset(cp); | ||
3064 | |||
3065 | /* setup core arbitration weight register */ | ||
3066 | writel(CAWR_RR_DIS, cp->regs + REG_CAWR); | ||
3067 | |||
3068 | /* XXX Use pci_dma_burst_advice() */ | ||
3069 | #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) | ||
3070 | /* set the infinite burst register for chips that don't have | ||
3071 | * pci issues. | ||
3072 | */ | ||
3073 | if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0) | ||
3074 | writel(INF_BURST_EN, cp->regs + REG_INF_BURST); | ||
3075 | #endif | ||
3076 | |||
3077 | writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE); | ||
3078 | |||
3079 | writel(0x00, cp->regs + REG_MAC_IPG0); | ||
3080 | writel(0x08, cp->regs + REG_MAC_IPG1); | ||
3081 | writel(0x04, cp->regs + REG_MAC_IPG2); | ||
3082 | |||
3083 | /* change later for 802.3z */ | ||
3084 | writel(0x40, cp->regs + REG_MAC_SLOT_TIME); | ||
3085 | |||
3086 | /* min frame + FCS */ | ||
3087 | writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN); | ||
3088 | |||
3089 | /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we | ||
3090 | * specify the maximum frame size to prevent RX tag errors on | ||
3091 | * oversized frames. | ||
3092 | */ | ||
3093 | writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) | | ||
3094 | CAS_BASE(MAC_FRAMESIZE_MAX_FRAME, | ||
3095 | (CAS_MAX_MTU + ETH_HLEN + 4 + 4)), | ||
3096 | cp->regs + REG_MAC_FRAMESIZE_MAX); | ||
3097 | |||
3098 | /* NOTE: crc_size is used as a surrogate for half-duplex. | ||
3099 | * workaround saturn half-duplex issue by increasing preamble | ||
3100 | * size to 65 bytes. | ||
3101 | */ | ||
3102 | if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size) | ||
3103 | writel(0x41, cp->regs + REG_MAC_PA_SIZE); | ||
3104 | else | ||
3105 | writel(0x07, cp->regs + REG_MAC_PA_SIZE); | ||
3106 | writel(0x04, cp->regs + REG_MAC_JAM_SIZE); | ||
3107 | writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT); | ||
3108 | writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE); | ||
3109 | |||
3110 | writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED); | ||
3111 | |||
3112 | writel(0, cp->regs + REG_MAC_ADDR_FILTER0); | ||
3113 | writel(0, cp->regs + REG_MAC_ADDR_FILTER1); | ||
3114 | writel(0, cp->regs + REG_MAC_ADDR_FILTER2); | ||
3115 | writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK); | ||
3116 | writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK); | ||
3117 | |||
3118 | /* setup mac address in perfect filter array */ | ||
3119 | for (i = 0; i < 45; i++) | ||
3120 | writel(0x0, cp->regs + REG_MAC_ADDRN(i)); | ||
3121 | |||
3122 | writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0)); | ||
3123 | writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1)); | ||
3124 | writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2)); | ||
3125 | |||
3126 | writel(0x0001, cp->regs + REG_MAC_ADDRN(42)); | ||
3127 | writel(0xc200, cp->regs + REG_MAC_ADDRN(43)); | ||
3128 | writel(0x0180, cp->regs + REG_MAC_ADDRN(44)); | ||
3129 | |||
3130 | cp->mac_rx_cfg = cas_setup_multicast(cp); | ||
3131 | |||
3132 | spin_lock(&cp->stat_lock[N_TX_RINGS]); | ||
3133 | cas_clear_mac_err(cp); | ||
3134 | spin_unlock(&cp->stat_lock[N_TX_RINGS]); | ||
3135 | |||
3136 | /* Setup MAC interrupts. We want to get all of the interesting | ||
3137 | * counter expiration events, but we do not want to hear about | ||
3138 | * normal rx/tx as the DMA engine tells us that. | ||
3139 | */ | ||
3140 | writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK); | ||
3141 | writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); | ||
3142 | |||
3143 | /* Don't enable even the PAUSE interrupts for now, we | ||
3144 | * make no use of those events other than to record them. | ||
3145 | */ | ||
3146 | writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK); | ||
3147 | } | ||
3148 | |||
3149 | /* Must be invoked under cp->lock. */ | ||
3150 | static void cas_init_pause_thresholds(struct cas *cp) | ||
3151 | { | ||
3152 | /* Calculate pause thresholds. Setting the OFF threshold to the | ||
3153 | * full RX fifo size effectively disables PAUSE generation | ||
3154 | */ | ||
3155 | if (cp->rx_fifo_size <= (2 * 1024)) { | ||
3156 | cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size; | ||
3157 | } else { | ||
3158 | int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63; | ||
3159 | if (max_frame * 3 > cp->rx_fifo_size) { | ||
3160 | cp->rx_pause_off = 7104; | ||
3161 | cp->rx_pause_on = 960; | ||
3162 | } else { | ||
3163 | int off = (cp->rx_fifo_size - (max_frame * 2)); | ||
3164 | int on = off - max_frame; | ||
3165 | cp->rx_pause_off = off; | ||
3166 | cp->rx_pause_on = on; | ||
3167 | } | ||
3168 | } | ||
3169 | } | ||
3170 | |||
3171 | static int cas_vpd_match(const void __iomem *p, const char *str) | ||
3172 | { | ||
3173 | int len = strlen(str) + 1; | ||
3174 | int i; | ||
3175 | |||
3176 | for (i = 0; i < len; i++) { | ||
3177 | if (readb(p + i) != str[i]) | ||
3178 | return 0; | ||
3179 | } | ||
3180 | return 1; | ||
3181 | } | ||
3182 | |||
3183 | |||
3184 | /* get the mac address by reading the vpd information in the rom. | ||
3185 | * also get the phy type and determine if there's an entropy generator. | ||
3186 | * NOTE: this is a bit convoluted for the following reasons: | ||
3187 | * 1) vpd info has order-dependent mac addresses for multinic cards | ||
3188 | * 2) the only way to determine the nic order is to use the slot | ||
3189 | * number. | ||
3190 | * 3) fiber cards don't have bridges, so their slot numbers don't | ||
3191 | * mean anything. | ||
3192 | * 4) we don't actually know we have a fiber card until after | ||
3193 | * the mac addresses are parsed. | ||
3194 | */ | ||
3195 | static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, | ||
3196 | const int offset) | ||
3197 | { | ||
3198 | void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START; | ||
3199 | void __iomem *base, *kstart; | ||
3200 | int i, len; | ||
3201 | int found = 0; | ||
3202 | #define VPD_FOUND_MAC 0x01 | ||
3203 | #define VPD_FOUND_PHY 0x02 | ||
3204 | |||
3205 | int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ | ||
3206 | int mac_off = 0; | ||
3207 | |||
3208 | #if defined(CONFIG_SPARC) | ||
3209 | const unsigned char *addr; | ||
3210 | #endif | ||
3211 | |||
3212 | /* give us access to the PROM */ | ||
3213 | writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD, | ||
3214 | cp->regs + REG_BIM_LOCAL_DEV_EN); | ||
3215 | |||
3216 | /* check for an expansion rom */ | ||
3217 | if (readb(p) != 0x55 || readb(p + 1) != 0xaa) | ||
3218 | goto use_random_mac_addr; | ||
3219 | |||
3220 | /* search for beginning of vpd */ | ||
3221 | base = NULL; | ||
3222 | for (i = 2; i < EXPANSION_ROM_SIZE; i++) { | ||
3223 | /* check for PCIR */ | ||
3224 | if ((readb(p + i + 0) == 0x50) && | ||
3225 | (readb(p + i + 1) == 0x43) && | ||
3226 | (readb(p + i + 2) == 0x49) && | ||
3227 | (readb(p + i + 3) == 0x52)) { | ||
3228 | base = p + (readb(p + i + 8) | | ||
3229 | (readb(p + i + 9) << 8)); | ||
3230 | break; | ||
3231 | } | ||
3232 | } | ||
3233 | |||
3234 | if (!base || (readb(base) != 0x82)) | ||
3235 | goto use_random_mac_addr; | ||
3236 | |||
3237 | i = (readb(base + 1) | (readb(base + 2) << 8)) + 3; | ||
3238 | while (i < EXPANSION_ROM_SIZE) { | ||
3239 | if (readb(base + i) != 0x90) /* no vpd found */ | ||
3240 | goto use_random_mac_addr; | ||
3241 | |||
3242 | /* found a vpd field */ | ||
3243 | len = readb(base + i + 1) | (readb(base + i + 2) << 8); | ||
3244 | |||
3245 | /* extract keywords */ | ||
3246 | kstart = base + i + 3; | ||
3247 | p = kstart; | ||
3248 | while ((p - kstart) < len) { | ||
3249 | int klen = readb(p + 2); | ||
3250 | int j; | ||
3251 | char type; | ||
3252 | |||
3253 | p += 3; | ||
3254 | |||
3255 | /* look for the following things: | ||
3256 | * -- correct length == 29 | ||
3257 | * 3 (type) + 2 (size) + | ||
3258 | * 18 (strlen("local-mac-address") + 1) + | ||
3259 | * 6 (mac addr) | ||
3260 | * -- VPD Instance 'I' | ||
3261 | * -- VPD Type Bytes 'B' | ||
3262 | * -- VPD data length == 6 | ||
3263 | * -- property string == local-mac-address | ||
3264 | * | ||
3265 | * -- correct length == 24 | ||
3266 | * 3 (type) + 2 (size) + | ||
3267 | * 12 (strlen("entropy-dev") + 1) + | ||
3268 | * 7 (strlen("vms110") + 1) | ||
3269 | * -- VPD Instance 'I' | ||
3270 | * -- VPD Type String 'B' | ||
3271 | * -- VPD data length == 7 | ||
3272 | * -- property string == entropy-dev | ||
3273 | * | ||
3274 | * -- correct length == 18 | ||
3275 | * 3 (type) + 2 (size) + | ||
3276 | * 9 (strlen("phy-type") + 1) + | ||
3277 | * 4 (strlen("pcs") + 1) | ||
3278 | * -- VPD Instance 'I' | ||
3279 | * -- VPD Type String 'S' | ||
3280 | * -- VPD data length == 4 | ||
3281 | * -- property string == phy-type | ||
3282 | * | ||
3283 | * -- correct length == 23 | ||
3284 | * 3 (type) + 2 (size) + | ||
3285 | * 14 (strlen("phy-interface") + 1) + | ||
3286 | * 4 (strlen("pcs") + 1) | ||
3287 | * -- VPD Instance 'I' | ||
3288 | * -- VPD Type String 'S' | ||
3289 | * -- VPD data length == 4 | ||
3290 | * -- property string == phy-interface | ||
3291 | */ | ||
3292 | if (readb(p) != 'I') | ||
3293 | goto next; | ||
3294 | |||
3295 | /* finally, check string and length */ | ||
3296 | type = readb(p + 3); | ||
3297 | if (type == 'B') { | ||
3298 | if ((klen == 29) && readb(p + 4) == 6 && | ||
3299 | cas_vpd_match(p + 5, | ||
3300 | "local-mac-address")) { | ||
3301 | if (mac_off++ > offset) | ||
3302 | goto next; | ||
3303 | |||
3304 | /* set mac address */ | ||
3305 | for (j = 0; j < 6; j++) | ||
3306 | dev_addr[j] = | ||
3307 | readb(p + 23 + j); | ||
3308 | goto found_mac; | ||
3309 | } | ||
3310 | } | ||
3311 | |||
3312 | if (type != 'S') | ||
3313 | goto next; | ||
3314 | |||
3315 | #ifdef USE_ENTROPY_DEV | ||
3316 | if ((klen == 24) && | ||
3317 | cas_vpd_match(p + 5, "entropy-dev") && | ||
3318 | cas_vpd_match(p + 17, "vms110")) { | ||
3319 | cp->cas_flags |= CAS_FLAG_ENTROPY_DEV; | ||
3320 | goto next; | ||
3321 | } | ||
3322 | #endif | ||
3323 | |||
3324 | if (found & VPD_FOUND_PHY) | ||
3325 | goto next; | ||
3326 | |||
3327 | if ((klen == 18) && readb(p + 4) == 4 && | ||
3328 | cas_vpd_match(p + 5, "phy-type")) { | ||
3329 | if (cas_vpd_match(p + 14, "pcs")) { | ||
3330 | phy_type = CAS_PHY_SERDES; | ||
3331 | goto found_phy; | ||
3332 | } | ||
3333 | } | ||
3334 | |||
3335 | if ((klen == 23) && readb(p + 4) == 4 && | ||
3336 | cas_vpd_match(p + 5, "phy-interface")) { | ||
3337 | if (cas_vpd_match(p + 19, "pcs")) { | ||
3338 | phy_type = CAS_PHY_SERDES; | ||
3339 | goto found_phy; | ||
3340 | } | ||
3341 | } | ||
3342 | found_mac: | ||
3343 | found |= VPD_FOUND_MAC; | ||
3344 | goto next; | ||
3345 | |||
3346 | found_phy: | ||
3347 | found |= VPD_FOUND_PHY; | ||
3348 | |||
3349 | next: | ||
3350 | p += klen; | ||
3351 | } | ||
3352 | i += len + 3; | ||
3353 | } | ||
3354 | |||
3355 | use_random_mac_addr: | ||
3356 | if (found & VPD_FOUND_MAC) | ||
3357 | goto done; | ||
3358 | |||
3359 | #if defined(CONFIG_SPARC) | ||
3360 | addr = of_get_property(cp->of_node, "local-mac-address", NULL); | ||
3361 | if (addr != NULL) { | ||
3362 | memcpy(dev_addr, addr, 6); | ||
3363 | goto done; | ||
3364 | } | ||
3365 | #endif | ||
3366 | |||
3367 | /* Sun MAC prefix then 3 random bytes. */ | ||
3368 | pr_info("MAC address not found in ROM VPD\n"); | ||
3369 | dev_addr[0] = 0x08; | ||
3370 | dev_addr[1] = 0x00; | ||
3371 | dev_addr[2] = 0x20; | ||
3372 | get_random_bytes(dev_addr + 3, 3); | ||
3373 | |||
3374 | done: | ||
3375 | writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN); | ||
3376 | return phy_type; | ||
3377 | } | ||
3378 | |||
3379 | /* check pci invariants */ | ||
3380 | static void cas_check_pci_invariants(struct cas *cp) | ||
3381 | { | ||
3382 | struct pci_dev *pdev = cp->pdev; | ||
3383 | |||
3384 | cp->cas_flags = 0; | ||
3385 | if ((pdev->vendor == PCI_VENDOR_ID_SUN) && | ||
3386 | (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) { | ||
3387 | if (pdev->revision >= CAS_ID_REVPLUS) | ||
3388 | cp->cas_flags |= CAS_FLAG_REG_PLUS; | ||
3389 | if (pdev->revision < CAS_ID_REVPLUS02u) | ||
3390 | cp->cas_flags |= CAS_FLAG_TARGET_ABORT; | ||
3391 | |||
3392 | /* Original Cassini supports HW CSUM, but it's not | ||
3393 | * enabled by default as it can trigger TX hangs. | ||
3394 | */ | ||
3395 | if (pdev->revision < CAS_ID_REV2) | ||
3396 | cp->cas_flags |= CAS_FLAG_NO_HW_CSUM; | ||
3397 | } else { | ||
3398 | /* Only sun has original cassini chips. */ | ||
3399 | cp->cas_flags |= CAS_FLAG_REG_PLUS; | ||
3400 | |||
3401 | /* We use a flag because the same phy might be externally | ||
3402 | * connected. | ||
3403 | */ | ||
3404 | if ((pdev->vendor == PCI_VENDOR_ID_NS) && | ||
3405 | (pdev->device == PCI_DEVICE_ID_NS_SATURN)) | ||
3406 | cp->cas_flags |= CAS_FLAG_SATURN; | ||
3407 | } | ||
3408 | } | ||
3409 | |||
3410 | |||
3411 | static int cas_check_invariants(struct cas *cp) | ||
3412 | { | ||
3413 | struct pci_dev *pdev = cp->pdev; | ||
3414 | u32 cfg; | ||
3415 | int i; | ||
3416 | |||
3417 | /* get page size for rx buffers. */ | ||
3418 | cp->page_order = 0; | ||
3419 | #ifdef USE_PAGE_ORDER | ||
3420 | if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) { | ||
3421 | /* see if we can allocate larger pages */ | ||
3422 | struct page *page = alloc_pages(GFP_ATOMIC, | ||
3423 | CAS_JUMBO_PAGE_SHIFT - | ||
3424 | PAGE_SHIFT); | ||
3425 | if (page) { | ||
3426 | __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT); | ||
3427 | cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT; | ||
3428 | } else { | ||
3429 | printk("MTU limited to %d bytes\n", CAS_MAX_MTU); | ||
3430 | } | ||
3431 | } | ||
3432 | #endif | ||
3433 | cp->page_size = (PAGE_SIZE << cp->page_order); | ||
3434 | |||
3435 | /* Fetch the FIFO configurations. */ | ||
3436 | cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64; | ||
3437 | cp->rx_fifo_size = RX_FIFO_SIZE; | ||
3438 | |||
3439 | /* finish phy determination. MDIO1 takes precedence over MDIO0 if | ||
3440 | * they're both connected. | ||
3441 | */ | ||
3442 | cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr, | ||
3443 | PCI_SLOT(pdev->devfn)); | ||
3444 | if (cp->phy_type & CAS_PHY_SERDES) { | ||
3445 | cp->cas_flags |= CAS_FLAG_1000MB_CAP; | ||
3446 | return 0; /* no more checking needed */ | ||
3447 | } | ||
3448 | |||
3449 | /* MII */ | ||
3450 | cfg = readl(cp->regs + REG_MIF_CFG); | ||
3451 | if (cfg & MIF_CFG_MDIO_1) { | ||
3452 | cp->phy_type = CAS_PHY_MII_MDIO1; | ||
3453 | } else if (cfg & MIF_CFG_MDIO_0) { | ||
3454 | cp->phy_type = CAS_PHY_MII_MDIO0; | ||
3455 | } | ||
3456 | |||
3457 | cas_mif_poll(cp, 0); | ||
3458 | writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); | ||
3459 | |||
3460 | for (i = 0; i < 32; i++) { | ||
3461 | u32 phy_id; | ||
3462 | int j; | ||
3463 | |||
3464 | for (j = 0; j < 3; j++) { | ||
3465 | cp->phy_addr = i; | ||
3466 | phy_id = cas_phy_read(cp, MII_PHYSID1) << 16; | ||
3467 | phy_id |= cas_phy_read(cp, MII_PHYSID2); | ||
3468 | if (phy_id && (phy_id != 0xFFFFFFFF)) { | ||
3469 | cp->phy_id = phy_id; | ||
3470 | goto done; | ||
3471 | } | ||
3472 | } | ||
3473 | } | ||
3474 | pr_err("MII phy did not respond [%08x]\n", | ||
3475 | readl(cp->regs + REG_MIF_STATE_MACHINE)); | ||
3476 | return -1; | ||
3477 | |||
3478 | done: | ||
3479 | /* see if we can do gigabit */ | ||
3480 | cfg = cas_phy_read(cp, MII_BMSR); | ||
3481 | if ((cfg & CAS_BMSR_1000_EXTEND) && | ||
3482 | cas_phy_read(cp, CAS_MII_1000_EXTEND)) | ||
3483 | cp->cas_flags |= CAS_FLAG_1000MB_CAP; | ||
3484 | return 0; | ||
3485 | } | ||
3486 | |||
3487 | /* Must be invoked under cp->lock. */ | ||
3488 | static inline void cas_start_dma(struct cas *cp) | ||
3489 | { | ||
3490 | int i; | ||
3491 | u32 val; | ||
3492 | int txfailed = 0; | ||
3493 | |||
3494 | /* enable dma */ | ||
3495 | val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN; | ||
3496 | writel(val, cp->regs + REG_TX_CFG); | ||
3497 | val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN; | ||
3498 | writel(val, cp->regs + REG_RX_CFG); | ||
3499 | |||
3500 | /* enable the mac */ | ||
3501 | val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN; | ||
3502 | writel(val, cp->regs + REG_MAC_TX_CFG); | ||
3503 | val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN; | ||
3504 | writel(val, cp->regs + REG_MAC_RX_CFG); | ||
3505 | |||
3506 | i = STOP_TRIES; | ||
3507 | while (i-- > 0) { | ||
3508 | val = readl(cp->regs + REG_MAC_TX_CFG); | ||
3509 | if ((val & MAC_TX_CFG_EN)) | ||
3510 | break; | ||
3511 | udelay(10); | ||
3512 | } | ||
3513 | if (i < 0) txfailed = 1; | ||
3514 | i = STOP_TRIES; | ||
3515 | while (i-- > 0) { | ||
3516 | val = readl(cp->regs + REG_MAC_RX_CFG); | ||
3517 | if ((val & MAC_RX_CFG_EN)) { | ||
3518 | if (txfailed) { | ||
3519 | netdev_err(cp->dev, | ||
3520 | "enabling mac failed [tx:%08x:%08x]\n", | ||
3521 | readl(cp->regs + REG_MIF_STATE_MACHINE), | ||
3522 | readl(cp->regs + REG_MAC_STATE_MACHINE)); | ||
3523 | } | ||
3524 | goto enable_rx_done; | ||
3525 | } | ||
3526 | udelay(10); | ||
3527 | } | ||
3528 | netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n", | ||
3529 | (txfailed ? "tx,rx" : "rx"), | ||
3530 | readl(cp->regs + REG_MIF_STATE_MACHINE), | ||
3531 | readl(cp->regs + REG_MAC_STATE_MACHINE)); | ||
3532 | |||
3533 | enable_rx_done: | ||
3534 | cas_unmask_intr(cp); /* enable interrupts */ | ||
3535 | writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); | ||
3536 | writel(0, cp->regs + REG_RX_COMP_TAIL); | ||
3537 | |||
3538 | if (cp->cas_flags & CAS_FLAG_REG_PLUS) { | ||
3539 | if (N_RX_DESC_RINGS > 1) | ||
3540 | writel(RX_DESC_RINGN_SIZE(1) - 4, | ||
3541 | cp->regs + REG_PLUS_RX_KICK1); | ||
3542 | |||
3543 | for (i = 1; i < N_RX_COMP_RINGS; i++) | ||
3544 | writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i)); | ||
3545 | } | ||
3546 | } | ||
3547 | |||
3548 | /* Must be invoked under cp->lock. */ | ||
3549 | static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd, | ||
3550 | int *pause) | ||
3551 | { | ||
3552 | u32 val = readl(cp->regs + REG_PCS_MII_LPA); | ||
3553 | *fd = (val & PCS_MII_LPA_FD) ? 1 : 0; | ||
3554 | *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00; | ||
3555 | if (val & PCS_MII_LPA_ASYM_PAUSE) | ||
3556 | *pause |= 0x10; | ||
3557 | *spd = 1000; | ||
3558 | } | ||
3559 | |||
3560 | /* Must be invoked under cp->lock. */ | ||
3561 | static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd, | ||
3562 | int *pause) | ||
3563 | { | ||
3564 | u32 val; | ||
3565 | |||
3566 | *fd = 0; | ||
3567 | *spd = 10; | ||
3568 | *pause = 0; | ||
3569 | |||
3570 | /* use GMII registers */ | ||
3571 | val = cas_phy_read(cp, MII_LPA); | ||
3572 | if (val & CAS_LPA_PAUSE) | ||
3573 | *pause = 0x01; | ||
3574 | |||
3575 | if (val & CAS_LPA_ASYM_PAUSE) | ||
3576 | *pause |= 0x10; | ||
3577 | |||
3578 | if (val & LPA_DUPLEX) | ||
3579 | *fd = 1; | ||
3580 | if (val & LPA_100) | ||
3581 | *spd = 100; | ||
3582 | |||
3583 | if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { | ||
3584 | val = cas_phy_read(cp, CAS_MII_1000_STATUS); | ||
3585 | if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF)) | ||
3586 | *spd = 1000; | ||
3587 | if (val & CAS_LPA_1000FULL) | ||
3588 | *fd = 1; | ||
3589 | } | ||
3590 | } | ||
3591 | |||
3592 | /* A link-up condition has occurred, initialize and enable the | ||
3593 | * rest of the chip. | ||
3594 | * | ||
3595 | * Must be invoked under cp->lock. | ||
3596 | */ | ||
3597 | static void cas_set_link_modes(struct cas *cp) | ||
3598 | { | ||
3599 | u32 val; | ||
3600 | int full_duplex, speed, pause; | ||
3601 | |||
3602 | full_duplex = 0; | ||
3603 | speed = 10; | ||
3604 | pause = 0; | ||
3605 | |||
3606 | if (CAS_PHY_MII(cp->phy_type)) { | ||
3607 | cas_mif_poll(cp, 0); | ||
3608 | val = cas_phy_read(cp, MII_BMCR); | ||
3609 | if (val & BMCR_ANENABLE) { | ||
3610 | cas_read_mii_link_mode(cp, &full_duplex, &speed, | ||
3611 | &pause); | ||
3612 | } else { | ||
3613 | if (val & BMCR_FULLDPLX) | ||
3614 | full_duplex = 1; | ||
3615 | |||
3616 | if (val & BMCR_SPEED100) | ||
3617 | speed = 100; | ||
3618 | else if (val & CAS_BMCR_SPEED1000) | ||
3619 | speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? | ||
3620 | 1000 : 100; | ||
3621 | } | ||
3622 | cas_mif_poll(cp, 1); | ||
3623 | |||
3624 | } else { | ||
3625 | val = readl(cp->regs + REG_PCS_MII_CTRL); | ||
3626 | cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause); | ||
3627 | if ((val & PCS_MII_AUTONEG_EN) == 0) { | ||
3628 | if (val & PCS_MII_CTRL_DUPLEX) | ||
3629 | full_duplex = 1; | ||
3630 | } | ||
3631 | } | ||
3632 | |||
3633 | netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n", | ||
3634 | speed, full_duplex ? "full" : "half"); | ||
3635 | |||
3636 | val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED; | ||
3637 | if (CAS_PHY_MII(cp->phy_type)) { | ||
3638 | val |= MAC_XIF_MII_BUFFER_OUTPUT_EN; | ||
3639 | if (!full_duplex) | ||
3640 | val |= MAC_XIF_DISABLE_ECHO; | ||
3641 | } | ||
3642 | if (full_duplex) | ||
3643 | val |= MAC_XIF_FDPLX_LED; | ||
3644 | if (speed == 1000) | ||
3645 | val |= MAC_XIF_GMII_MODE; | ||
3646 | writel(val, cp->regs + REG_MAC_XIF_CFG); | ||
3647 | |||
3648 | /* deal with carrier and collision detect. */ | ||
3649 | val = MAC_TX_CFG_IPG_EN; | ||
3650 | if (full_duplex) { | ||
3651 | val |= MAC_TX_CFG_IGNORE_CARRIER; | ||
3652 | val |= MAC_TX_CFG_IGNORE_COLL; | ||
3653 | } else { | ||
3654 | #ifndef USE_CSMA_CD_PROTO | ||
3655 | val |= MAC_TX_CFG_NEVER_GIVE_UP_EN; | ||
3656 | val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM; | ||
3657 | #endif | ||
3658 | } | ||
3659 | /* val now set up for REG_MAC_TX_CFG */ | ||
3660 | |||
3661 | /* If gigabit and half-duplex, enable carrier extension | ||
3662 | * mode. increase slot time to 512 bytes as well. | ||
3663 | * else, disable it and make sure slot time is 64 bytes. | ||
3664 | * also activate checksum bug workaround | ||
3665 | */ | ||
3666 | if ((speed == 1000) && !full_duplex) { | ||
3667 | writel(val | MAC_TX_CFG_CARRIER_EXTEND, | ||
3668 | cp->regs + REG_MAC_TX_CFG); | ||
3669 | |||
3670 | val = readl(cp->regs + REG_MAC_RX_CFG); | ||
3671 | val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */ | ||
3672 | writel(val | MAC_RX_CFG_CARRIER_EXTEND, | ||
3673 | cp->regs + REG_MAC_RX_CFG); | ||
3674 | |||
3675 | writel(0x200, cp->regs + REG_MAC_SLOT_TIME); | ||
3676 | |||
3677 | cp->crc_size = 4; | ||
3678 | /* minimum size gigabit frame at half duplex */ | ||
3679 | cp->min_frame_size = CAS_1000MB_MIN_FRAME; | ||
3680 | |||
3681 | } else { | ||
3682 | writel(val, cp->regs + REG_MAC_TX_CFG); | ||
3683 | |||
3684 | /* checksum bug workaround. don't strip FCS when in | ||
3685 | * half-duplex mode | ||
3686 | */ | ||
3687 | val = readl(cp->regs + REG_MAC_RX_CFG); | ||
3688 | if (full_duplex) { | ||
3689 | val |= MAC_RX_CFG_STRIP_FCS; | ||
3690 | cp->crc_size = 0; | ||
3691 | cp->min_frame_size = CAS_MIN_MTU; | ||
3692 | } else { | ||
3693 | val &= ~MAC_RX_CFG_STRIP_FCS; | ||
3694 | cp->crc_size = 4; | ||
3695 | cp->min_frame_size = CAS_MIN_FRAME; | ||
3696 | } | ||
3697 | writel(val & ~MAC_RX_CFG_CARRIER_EXTEND, | ||
3698 | cp->regs + REG_MAC_RX_CFG); | ||
3699 | writel(0x40, cp->regs + REG_MAC_SLOT_TIME); | ||
3700 | } | ||
3701 | |||
3702 | if (netif_msg_link(cp)) { | ||
3703 | if (pause & 0x01) { | ||
3704 | netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n", | ||
3705 | cp->rx_fifo_size, | ||
3706 | cp->rx_pause_off, | ||
3707 | cp->rx_pause_on); | ||
3708 | } else if (pause & 0x10) { | ||
3709 | netdev_info(cp->dev, "TX pause enabled\n"); | ||
3710 | } else { | ||
3711 | netdev_info(cp->dev, "Pause is disabled\n"); | ||
3712 | } | ||
3713 | } | ||
3714 | |||
3715 | val = readl(cp->regs + REG_MAC_CTRL_CFG); | ||
3716 | val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN); | ||
3717 | if (pause) { /* symmetric or asymmetric pause */ | ||
3718 | val |= MAC_CTRL_CFG_SEND_PAUSE_EN; | ||
3719 | if (pause & 0x01) { /* symmetric pause */ | ||
3720 | val |= MAC_CTRL_CFG_RECV_PAUSE_EN; | ||
3721 | } | ||
3722 | } | ||
3723 | writel(val, cp->regs + REG_MAC_CTRL_CFG); | ||
3724 | cas_start_dma(cp); | ||
3725 | } | ||
3726 | |||
3727 | /* Must be invoked under cp->lock. */ | ||
3728 | static void cas_init_hw(struct cas *cp, int restart_link) | ||
3729 | { | ||
3730 | if (restart_link) | ||
3731 | cas_phy_init(cp); | ||
3732 | |||
3733 | cas_init_pause_thresholds(cp); | ||
3734 | cas_init_mac(cp); | ||
3735 | cas_init_dma(cp); | ||
3736 | |||
3737 | if (restart_link) { | ||
3738 | /* Default aneg parameters */ | ||
3739 | cp->timer_ticks = 0; | ||
3740 | cas_begin_auto_negotiation(cp, NULL); | ||
3741 | } else if (cp->lstate == link_up) { | ||
3742 | cas_set_link_modes(cp); | ||
3743 | netif_carrier_on(cp->dev); | ||
3744 | } | ||
3745 | } | ||
3746 | |||
3747 | /* Must be invoked under cp->lock. on earlier cassini boards, | ||
3748 | * SOFT_0 is tied to PCI reset. we use this to force a pci reset, | ||
3749 | * let it settle out, and then restore pci state. | ||
3750 | */ | ||
3751 | static void cas_hard_reset(struct cas *cp) | ||
3752 | { | ||
3753 | writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN); | ||
3754 | udelay(20); | ||
3755 | pci_restore_state(cp->pdev); | ||
3756 | } | ||
3757 | |||
3758 | |||
3759 | static void cas_global_reset(struct cas *cp, int blkflag) | ||
3760 | { | ||
3761 | int limit; | ||
3762 | |||
3763 | /* issue a global reset. don't use RSTOUT. */ | ||
3764 | if (blkflag && !CAS_PHY_MII(cp->phy_type)) { | ||
3765 | /* For PCS, when the blkflag is set, we should set the | ||
3766 | * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of | ||
3767 | * the last autonegotiation from being cleared. We'll | ||
3768 | * need some special handling if the chip is set into a | ||
3769 | * loopback mode. | ||
3770 | */ | ||
3771 | writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK), | ||
3772 | cp->regs + REG_SW_RESET); | ||
3773 | } else { | ||
3774 | writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET); | ||
3775 | } | ||
3776 | |||
3777 | /* need to wait at least 3ms before polling register */ | ||
3778 | mdelay(3); | ||
3779 | |||
3780 | limit = STOP_TRIES; | ||
3781 | while (limit-- > 0) { | ||
3782 | u32 val = readl(cp->regs + REG_SW_RESET); | ||
3783 | if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0) | ||
3784 | goto done; | ||
3785 | udelay(10); | ||
3786 | } | ||
3787 | netdev_err(cp->dev, "sw reset failed\n"); | ||
3788 | |||
3789 | done: | ||
3790 | /* enable various BIM interrupts */ | ||
3791 | writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE | | ||
3792 | BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG); | ||
3793 | |||
3794 | /* clear out pci error status mask for handled errors. | ||
3795 | * we don't deal with DMA counter overflows as they happen | ||
3796 | * all the time. | ||
3797 | */ | ||
3798 | writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO | | ||
3799 | PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE | | ||
3800 | PCI_ERR_BIM_DMA_READ), cp->regs + | ||
3801 | REG_PCI_ERR_STATUS_MASK); | ||
3802 | |||
3803 | /* set up for MII by default to address mac rx reset timeout | ||
3804 | * issue | ||
3805 | */ | ||
3806 | writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); | ||
3807 | } | ||
3808 | |||
3809 | static void cas_reset(struct cas *cp, int blkflag) | ||
3810 | { | ||
3811 | u32 val; | ||
3812 | |||
3813 | cas_mask_intr(cp); | ||
3814 | cas_global_reset(cp, blkflag); | ||
3815 | cas_mac_reset(cp); | ||
3816 | cas_entropy_reset(cp); | ||
3817 | |||
3818 | /* disable dma engines. */ | ||
3819 | val = readl(cp->regs + REG_TX_CFG); | ||
3820 | val &= ~TX_CFG_DMA_EN; | ||
3821 | writel(val, cp->regs + REG_TX_CFG); | ||
3822 | |||
3823 | val = readl(cp->regs + REG_RX_CFG); | ||
3824 | val &= ~RX_CFG_DMA_EN; | ||
3825 | writel(val, cp->regs + REG_RX_CFG); | ||
3826 | |||
3827 | /* program header parser */ | ||
3828 | if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) || | ||
3829 | (CAS_HP_ALT_FIRMWARE == cas_prog_null)) { | ||
3830 | cas_load_firmware(cp, CAS_HP_FIRMWARE); | ||
3831 | } else { | ||
3832 | cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE); | ||
3833 | } | ||
3834 | |||
3835 | /* clear out error registers */ | ||
3836 | spin_lock(&cp->stat_lock[N_TX_RINGS]); | ||
3837 | cas_clear_mac_err(cp); | ||
3838 | spin_unlock(&cp->stat_lock[N_TX_RINGS]); | ||
3839 | } | ||
3840 | |||
3841 | /* Shut down the chip, must be called with pm_mutex held. */ | ||
3842 | static void cas_shutdown(struct cas *cp) | ||
3843 | { | ||
3844 | unsigned long flags; | ||
3845 | |||
3846 | /* Make us not-running to avoid timers respawning */ | ||
3847 | cp->hw_running = 0; | ||
3848 | |||
3849 | del_timer_sync(&cp->link_timer); | ||
3850 | |||
3851 | /* Stop the reset task */ | ||
3852 | #if 0 | ||
3853 | while (atomic_read(&cp->reset_task_pending_mtu) || | ||
3854 | atomic_read(&cp->reset_task_pending_spare) || | ||
3855 | atomic_read(&cp->reset_task_pending_all)) | ||
3856 | schedule(); | ||
3857 | |||
3858 | #else | ||
3859 | while (atomic_read(&cp->reset_task_pending)) | ||
3860 | schedule(); | ||
3861 | #endif | ||
3862 | /* Actually stop the chip */ | ||
3863 | cas_lock_all_save(cp, flags); | ||
3864 | cas_reset(cp, 0); | ||
3865 | if (cp->cas_flags & CAS_FLAG_SATURN) | ||
3866 | cas_phy_powerdown(cp); | ||
3867 | cas_unlock_all_restore(cp, flags); | ||
3868 | } | ||
3869 | |||
3870 | static int cas_change_mtu(struct net_device *dev, int new_mtu) | ||
3871 | { | ||
3872 | struct cas *cp = netdev_priv(dev); | ||
3873 | |||
3874 | if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU) | ||
3875 | return -EINVAL; | ||
3876 | |||
3877 | dev->mtu = new_mtu; | ||
3878 | if (!netif_running(dev) || !netif_device_present(dev)) | ||
3879 | return 0; | ||
3880 | |||
3881 | /* let the reset task handle it */ | ||
3882 | #if 1 | ||
3883 | atomic_inc(&cp->reset_task_pending); | ||
3884 | if ((cp->phy_type & CAS_PHY_SERDES)) { | ||
3885 | atomic_inc(&cp->reset_task_pending_all); | ||
3886 | } else { | ||
3887 | atomic_inc(&cp->reset_task_pending_mtu); | ||
3888 | } | ||
3889 | schedule_work(&cp->reset_task); | ||
3890 | #else | ||
3891 | atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? | ||
3892 | CAS_RESET_ALL : CAS_RESET_MTU); | ||
3893 | pr_err("reset called in cas_change_mtu\n"); | ||
3894 | schedule_work(&cp->reset_task); | ||
3895 | #endif | ||
3896 | |||
3897 | flush_work_sync(&cp->reset_task); | ||
3898 | return 0; | ||
3899 | } | ||
3900 | |||
3901 | static void cas_clean_txd(struct cas *cp, int ring) | ||
3902 | { | ||
3903 | struct cas_tx_desc *txd = cp->init_txds[ring]; | ||
3904 | struct sk_buff *skb, **skbs = cp->tx_skbs[ring]; | ||
3905 | u64 daddr, dlen; | ||
3906 | int i, size; | ||
3907 | |||
3908 | size = TX_DESC_RINGN_SIZE(ring); | ||
3909 | for (i = 0; i < size; i++) { | ||
3910 | int frag; | ||
3911 | |||
3912 | if (skbs[i] == NULL) | ||
3913 | continue; | ||
3914 | |||
3915 | skb = skbs[i]; | ||
3916 | skbs[i] = NULL; | ||
3917 | |||
3918 | for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { | ||
3919 | int ent = i & (size - 1); | ||
3920 | |||
3921 | /* first buffer is never a tiny buffer and so | ||
3922 | * needs to be unmapped. | ||
3923 | */ | ||
3924 | daddr = le64_to_cpu(txd[ent].buffer); | ||
3925 | dlen = CAS_VAL(TX_DESC_BUFLEN, | ||
3926 | le64_to_cpu(txd[ent].control)); | ||
3927 | pci_unmap_page(cp->pdev, daddr, dlen, | ||
3928 | PCI_DMA_TODEVICE); | ||
3929 | |||
3930 | if (frag != skb_shinfo(skb)->nr_frags) { | ||
3931 | i++; | ||
3932 | |||
3933 | /* next buffer might by a tiny buffer. | ||
3934 | * skip past it. | ||
3935 | */ | ||
3936 | ent = i & (size - 1); | ||
3937 | if (cp->tx_tiny_use[ring][ent].used) | ||
3938 | i++; | ||
3939 | } | ||
3940 | } | ||
3941 | dev_kfree_skb_any(skb); | ||
3942 | } | ||
3943 | |||
3944 | /* zero out tiny buf usage */ | ||
3945 | memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring])); | ||
3946 | } | ||
3947 | |||
3948 | /* freed on close */ | ||
3949 | static inline void cas_free_rx_desc(struct cas *cp, int ring) | ||
3950 | { | ||
3951 | cas_page_t **page = cp->rx_pages[ring]; | ||
3952 | int i, size; | ||
3953 | |||
3954 | size = RX_DESC_RINGN_SIZE(ring); | ||
3955 | for (i = 0; i < size; i++) { | ||
3956 | if (page[i]) { | ||
3957 | cas_page_free(cp, page[i]); | ||
3958 | page[i] = NULL; | ||
3959 | } | ||
3960 | } | ||
3961 | } | ||
3962 | |||
3963 | static void cas_free_rxds(struct cas *cp) | ||
3964 | { | ||
3965 | int i; | ||
3966 | |||
3967 | for (i = 0; i < N_RX_DESC_RINGS; i++) | ||
3968 | cas_free_rx_desc(cp, i); | ||
3969 | } | ||
3970 | |||
3971 | /* Must be invoked under cp->lock. */ | ||
3972 | static void cas_clean_rings(struct cas *cp) | ||
3973 | { | ||
3974 | int i; | ||
3975 | |||
3976 | /* need to clean all tx rings */ | ||
3977 | memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS); | ||
3978 | memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS); | ||
3979 | for (i = 0; i < N_TX_RINGS; i++) | ||
3980 | cas_clean_txd(cp, i); | ||
3981 | |||
3982 | /* zero out init block */ | ||
3983 | memset(cp->init_block, 0, sizeof(struct cas_init_block)); | ||
3984 | cas_clean_rxds(cp); | ||
3985 | cas_clean_rxcs(cp); | ||
3986 | } | ||
3987 | |||
3988 | /* allocated on open */ | ||
3989 | static inline int cas_alloc_rx_desc(struct cas *cp, int ring) | ||
3990 | { | ||
3991 | cas_page_t **page = cp->rx_pages[ring]; | ||
3992 | int size, i = 0; | ||
3993 | |||
3994 | size = RX_DESC_RINGN_SIZE(ring); | ||
3995 | for (i = 0; i < size; i++) { | ||
3996 | if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL) | ||
3997 | return -1; | ||
3998 | } | ||
3999 | return 0; | ||
4000 | } | ||
4001 | |||
4002 | static int cas_alloc_rxds(struct cas *cp) | ||
4003 | { | ||
4004 | int i; | ||
4005 | |||
4006 | for (i = 0; i < N_RX_DESC_RINGS; i++) { | ||
4007 | if (cas_alloc_rx_desc(cp, i) < 0) { | ||
4008 | cas_free_rxds(cp); | ||
4009 | return -1; | ||
4010 | } | ||
4011 | } | ||
4012 | return 0; | ||
4013 | } | ||
4014 | |||
4015 | static void cas_reset_task(struct work_struct *work) | ||
4016 | { | ||
4017 | struct cas *cp = container_of(work, struct cas, reset_task); | ||
4018 | #if 0 | ||
4019 | int pending = atomic_read(&cp->reset_task_pending); | ||
4020 | #else | ||
4021 | int pending_all = atomic_read(&cp->reset_task_pending_all); | ||
4022 | int pending_spare = atomic_read(&cp->reset_task_pending_spare); | ||
4023 | int pending_mtu = atomic_read(&cp->reset_task_pending_mtu); | ||
4024 | |||
4025 | if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) { | ||
4026 | /* We can have more tasks scheduled than actually | ||
4027 | * needed. | ||
4028 | */ | ||
4029 | atomic_dec(&cp->reset_task_pending); | ||
4030 | return; | ||
4031 | } | ||
4032 | #endif | ||
4033 | /* The link went down, we reset the ring, but keep | ||
4034 | * DMA stopped. Use this function for reset | ||
4035 | * on error as well. | ||
4036 | */ | ||
4037 | if (cp->hw_running) { | ||
4038 | unsigned long flags; | ||
4039 | |||
4040 | /* Make sure we don't get interrupts or tx packets */ | ||
4041 | netif_device_detach(cp->dev); | ||
4042 | cas_lock_all_save(cp, flags); | ||
4043 | |||
4044 | if (cp->opened) { | ||
4045 | /* We call cas_spare_recover when we call cas_open. | ||
4046 | * but we do not initialize the lists cas_spare_recover | ||
4047 | * uses until cas_open is called. | ||
4048 | */ | ||
4049 | cas_spare_recover(cp, GFP_ATOMIC); | ||
4050 | } | ||
4051 | #if 1 | ||
4052 | /* test => only pending_spare set */ | ||
4053 | if (!pending_all && !pending_mtu) | ||
4054 | goto done; | ||
4055 | #else | ||
4056 | if (pending == CAS_RESET_SPARE) | ||
4057 | goto done; | ||
4058 | #endif | ||
4059 | /* when pending == CAS_RESET_ALL, the following | ||
4060 | * call to cas_init_hw will restart auto negotiation. | ||
4061 | * Setting the second argument of cas_reset to | ||
4062 | * !(pending == CAS_RESET_ALL) will set this argument | ||
4063 | * to 1 (avoiding reinitializing the PHY for the normal | ||
4064 | * PCS case) when auto negotiation is not restarted. | ||
4065 | */ | ||
4066 | #if 1 | ||
4067 | cas_reset(cp, !(pending_all > 0)); | ||
4068 | if (cp->opened) | ||
4069 | cas_clean_rings(cp); | ||
4070 | cas_init_hw(cp, (pending_all > 0)); | ||
4071 | #else | ||
4072 | cas_reset(cp, !(pending == CAS_RESET_ALL)); | ||
4073 | if (cp->opened) | ||
4074 | cas_clean_rings(cp); | ||
4075 | cas_init_hw(cp, pending == CAS_RESET_ALL); | ||
4076 | #endif | ||
4077 | |||
4078 | done: | ||
4079 | cas_unlock_all_restore(cp, flags); | ||
4080 | netif_device_attach(cp->dev); | ||
4081 | } | ||
4082 | #if 1 | ||
4083 | atomic_sub(pending_all, &cp->reset_task_pending_all); | ||
4084 | atomic_sub(pending_spare, &cp->reset_task_pending_spare); | ||
4085 | atomic_sub(pending_mtu, &cp->reset_task_pending_mtu); | ||
4086 | atomic_dec(&cp->reset_task_pending); | ||
4087 | #else | ||
4088 | atomic_set(&cp->reset_task_pending, 0); | ||
4089 | #endif | ||
4090 | } | ||
4091 | |||
4092 | static void cas_link_timer(unsigned long data) | ||
4093 | { | ||
4094 | struct cas *cp = (struct cas *) data; | ||
4095 | int mask, pending = 0, reset = 0; | ||
4096 | unsigned long flags; | ||
4097 | |||
4098 | if (link_transition_timeout != 0 && | ||
4099 | cp->link_transition_jiffies_valid && | ||
4100 | ((jiffies - cp->link_transition_jiffies) > | ||
4101 | (link_transition_timeout))) { | ||
4102 | /* One-second counter so link-down workaround doesn't | ||
4103 | * cause resets to occur so fast as to fool the switch | ||
4104 | * into thinking the link is down. | ||
4105 | */ | ||
4106 | cp->link_transition_jiffies_valid = 0; | ||
4107 | } | ||
4108 | |||
4109 | if (!cp->hw_running) | ||
4110 | return; | ||
4111 | |||
4112 | spin_lock_irqsave(&cp->lock, flags); | ||
4113 | cas_lock_tx(cp); | ||
4114 | cas_entropy_gather(cp); | ||
4115 | |||
4116 | /* If the link task is still pending, we just | ||
4117 | * reschedule the link timer | ||
4118 | */ | ||
4119 | #if 1 | ||
4120 | if (atomic_read(&cp->reset_task_pending_all) || | ||
4121 | atomic_read(&cp->reset_task_pending_spare) || | ||
4122 | atomic_read(&cp->reset_task_pending_mtu)) | ||
4123 | goto done; | ||
4124 | #else | ||
4125 | if (atomic_read(&cp->reset_task_pending)) | ||
4126 | goto done; | ||
4127 | #endif | ||
4128 | |||
4129 | /* check for rx cleaning */ | ||
4130 | if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) { | ||
4131 | int i, rmask; | ||
4132 | |||
4133 | for (i = 0; i < MAX_RX_DESC_RINGS; i++) { | ||
4134 | rmask = CAS_FLAG_RXD_POST(i); | ||
4135 | if ((mask & rmask) == 0) | ||
4136 | continue; | ||
4137 | |||
4138 | /* post_rxds will do a mod_timer */ | ||
4139 | if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) { | ||
4140 | pending = 1; | ||
4141 | continue; | ||
4142 | } | ||
4143 | cp->cas_flags &= ~rmask; | ||
4144 | } | ||
4145 | } | ||
4146 | |||
4147 | if (CAS_PHY_MII(cp->phy_type)) { | ||
4148 | u16 bmsr; | ||
4149 | cas_mif_poll(cp, 0); | ||
4150 | bmsr = cas_phy_read(cp, MII_BMSR); | ||
4151 | /* WTZ: Solaris driver reads this twice, but that | ||
4152 | * may be due to the PCS case and the use of a | ||
4153 | * common implementation. Read it twice here to be | ||
4154 | * safe. | ||
4155 | */ | ||
4156 | bmsr = cas_phy_read(cp, MII_BMSR); | ||
4157 | cas_mif_poll(cp, 1); | ||
4158 | readl(cp->regs + REG_MIF_STATUS); /* avoid dups */ | ||
4159 | reset = cas_mii_link_check(cp, bmsr); | ||
4160 | } else { | ||
4161 | reset = cas_pcs_link_check(cp); | ||
4162 | } | ||
4163 | |||
4164 | if (reset) | ||
4165 | goto done; | ||
4166 | |||
4167 | /* check for tx state machine confusion */ | ||
4168 | if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) { | ||
4169 | u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE); | ||
4170 | u32 wptr, rptr; | ||
4171 | int tlm = CAS_VAL(MAC_SM_TLM, val); | ||
4172 | |||
4173 | if (((tlm == 0x5) || (tlm == 0x3)) && | ||
4174 | (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) { | ||
4175 | netif_printk(cp, tx_err, KERN_DEBUG, cp->dev, | ||
4176 | "tx err: MAC_STATE[%08x]\n", val); | ||
4177 | reset = 1; | ||
4178 | goto done; | ||
4179 | } | ||
4180 | |||
4181 | val = readl(cp->regs + REG_TX_FIFO_PKT_CNT); | ||
4182 | wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR); | ||
4183 | rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR); | ||
4184 | if ((val == 0) && (wptr != rptr)) { | ||
4185 | netif_printk(cp, tx_err, KERN_DEBUG, cp->dev, | ||
4186 | "tx err: TX_FIFO[%08x:%08x:%08x]\n", | ||
4187 | val, wptr, rptr); | ||
4188 | reset = 1; | ||
4189 | } | ||
4190 | |||
4191 | if (reset) | ||
4192 | cas_hard_reset(cp); | ||
4193 | } | ||
4194 | |||
4195 | done: | ||
4196 | if (reset) { | ||
4197 | #if 1 | ||
4198 | atomic_inc(&cp->reset_task_pending); | ||
4199 | atomic_inc(&cp->reset_task_pending_all); | ||
4200 | schedule_work(&cp->reset_task); | ||
4201 | #else | ||
4202 | atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); | ||
4203 | pr_err("reset called in cas_link_timer\n"); | ||
4204 | schedule_work(&cp->reset_task); | ||
4205 | #endif | ||
4206 | } | ||
4207 | |||
4208 | if (!pending) | ||
4209 | mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); | ||
4210 | cas_unlock_tx(cp); | ||
4211 | spin_unlock_irqrestore(&cp->lock, flags); | ||
4212 | } | ||
4213 | |||
4214 | /* tiny buffers are used to avoid target abort issues with | ||
4215 | * older cassini's | ||
4216 | */ | ||
4217 | static void cas_tx_tiny_free(struct cas *cp) | ||
4218 | { | ||
4219 | struct pci_dev *pdev = cp->pdev; | ||
4220 | int i; | ||
4221 | |||
4222 | for (i = 0; i < N_TX_RINGS; i++) { | ||
4223 | if (!cp->tx_tiny_bufs[i]) | ||
4224 | continue; | ||
4225 | |||
4226 | pci_free_consistent(pdev, TX_TINY_BUF_BLOCK, | ||
4227 | cp->tx_tiny_bufs[i], | ||
4228 | cp->tx_tiny_dvma[i]); | ||
4229 | cp->tx_tiny_bufs[i] = NULL; | ||
4230 | } | ||
4231 | } | ||
4232 | |||
4233 | static int cas_tx_tiny_alloc(struct cas *cp) | ||
4234 | { | ||
4235 | struct pci_dev *pdev = cp->pdev; | ||
4236 | int i; | ||
4237 | |||
4238 | for (i = 0; i < N_TX_RINGS; i++) { | ||
4239 | cp->tx_tiny_bufs[i] = | ||
4240 | pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK, | ||
4241 | &cp->tx_tiny_dvma[i]); | ||
4242 | if (!cp->tx_tiny_bufs[i]) { | ||
4243 | cas_tx_tiny_free(cp); | ||
4244 | return -1; | ||
4245 | } | ||
4246 | } | ||
4247 | return 0; | ||
4248 | } | ||
4249 | |||
4250 | |||
4251 | static int cas_open(struct net_device *dev) | ||
4252 | { | ||
4253 | struct cas *cp = netdev_priv(dev); | ||
4254 | int hw_was_up, err; | ||
4255 | unsigned long flags; | ||
4256 | |||
4257 | mutex_lock(&cp->pm_mutex); | ||
4258 | |||
4259 | hw_was_up = cp->hw_running; | ||
4260 | |||
4261 | /* The power-management mutex protects the hw_running | ||
4262 | * etc. state so it is safe to do this bit without cp->lock | ||
4263 | */ | ||
4264 | if (!cp->hw_running) { | ||
4265 | /* Reset the chip */ | ||
4266 | cas_lock_all_save(cp, flags); | ||
4267 | /* We set the second arg to cas_reset to zero | ||
4268 | * because cas_init_hw below will have its second | ||
4269 | * argument set to non-zero, which will force | ||
4270 | * autonegotiation to start. | ||
4271 | */ | ||
4272 | cas_reset(cp, 0); | ||
4273 | cp->hw_running = 1; | ||
4274 | cas_unlock_all_restore(cp, flags); | ||
4275 | } | ||
4276 | |||
4277 | err = -ENOMEM; | ||
4278 | if (cas_tx_tiny_alloc(cp) < 0) | ||
4279 | goto err_unlock; | ||
4280 | |||
4281 | /* alloc rx descriptors */ | ||
4282 | if (cas_alloc_rxds(cp) < 0) | ||
4283 | goto err_tx_tiny; | ||
4284 | |||
4285 | /* allocate spares */ | ||
4286 | cas_spare_init(cp); | ||
4287 | cas_spare_recover(cp, GFP_KERNEL); | ||
4288 | |||
4289 | /* We can now request the interrupt as we know it's masked | ||
4290 | * on the controller. cassini+ has up to 4 interrupts | ||
4291 | * that can be used, but you need to do explicit pci interrupt | ||
4292 | * mapping to expose them | ||
4293 | */ | ||
4294 | if (request_irq(cp->pdev->irq, cas_interrupt, | ||
4295 | IRQF_SHARED, dev->name, (void *) dev)) { | ||
4296 | netdev_err(cp->dev, "failed to request irq !\n"); | ||
4297 | err = -EAGAIN; | ||
4298 | goto err_spare; | ||
4299 | } | ||
4300 | |||
4301 | #ifdef USE_NAPI | ||
4302 | napi_enable(&cp->napi); | ||
4303 | #endif | ||
4304 | /* init hw */ | ||
4305 | cas_lock_all_save(cp, flags); | ||
4306 | cas_clean_rings(cp); | ||
4307 | cas_init_hw(cp, !hw_was_up); | ||
4308 | cp->opened = 1; | ||
4309 | cas_unlock_all_restore(cp, flags); | ||
4310 | |||
4311 | netif_start_queue(dev); | ||
4312 | mutex_unlock(&cp->pm_mutex); | ||
4313 | return 0; | ||
4314 | |||
4315 | err_spare: | ||
4316 | cas_spare_free(cp); | ||
4317 | cas_free_rxds(cp); | ||
4318 | err_tx_tiny: | ||
4319 | cas_tx_tiny_free(cp); | ||
4320 | err_unlock: | ||
4321 | mutex_unlock(&cp->pm_mutex); | ||
4322 | return err; | ||
4323 | } | ||
4324 | |||
4325 | static int cas_close(struct net_device *dev) | ||
4326 | { | ||
4327 | unsigned long flags; | ||
4328 | struct cas *cp = netdev_priv(dev); | ||
4329 | |||
4330 | #ifdef USE_NAPI | ||
4331 | napi_disable(&cp->napi); | ||
4332 | #endif | ||
4333 | /* Make sure we don't get distracted by suspend/resume */ | ||
4334 | mutex_lock(&cp->pm_mutex); | ||
4335 | |||
4336 | netif_stop_queue(dev); | ||
4337 | |||
4338 | /* Stop traffic, mark us closed */ | ||
4339 | cas_lock_all_save(cp, flags); | ||
4340 | cp->opened = 0; | ||
4341 | cas_reset(cp, 0); | ||
4342 | cas_phy_init(cp); | ||
4343 | cas_begin_auto_negotiation(cp, NULL); | ||
4344 | cas_clean_rings(cp); | ||
4345 | cas_unlock_all_restore(cp, flags); | ||
4346 | |||
4347 | free_irq(cp->pdev->irq, (void *) dev); | ||
4348 | cas_spare_free(cp); | ||
4349 | cas_free_rxds(cp); | ||
4350 | cas_tx_tiny_free(cp); | ||
4351 | mutex_unlock(&cp->pm_mutex); | ||
4352 | return 0; | ||
4353 | } | ||
4354 | |||
4355 | static struct { | ||
4356 | const char name[ETH_GSTRING_LEN]; | ||
4357 | } ethtool_cassini_statnames[] = { | ||
4358 | {"collisions"}, | ||
4359 | {"rx_bytes"}, | ||
4360 | {"rx_crc_errors"}, | ||
4361 | {"rx_dropped"}, | ||
4362 | {"rx_errors"}, | ||
4363 | {"rx_fifo_errors"}, | ||
4364 | {"rx_frame_errors"}, | ||
4365 | {"rx_length_errors"}, | ||
4366 | {"rx_over_errors"}, | ||
4367 | {"rx_packets"}, | ||
4368 | {"tx_aborted_errors"}, | ||
4369 | {"tx_bytes"}, | ||
4370 | {"tx_dropped"}, | ||
4371 | {"tx_errors"}, | ||
4372 | {"tx_fifo_errors"}, | ||
4373 | {"tx_packets"} | ||
4374 | }; | ||
4375 | #define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames) | ||
4376 | |||
4377 | static struct { | ||
4378 | const int offsets; /* neg. values for 2nd arg to cas_read_phy */ | ||
4379 | } ethtool_register_table[] = { | ||
4380 | {-MII_BMSR}, | ||
4381 | {-MII_BMCR}, | ||
4382 | {REG_CAWR}, | ||
4383 | {REG_INF_BURST}, | ||
4384 | {REG_BIM_CFG}, | ||
4385 | {REG_RX_CFG}, | ||
4386 | {REG_HP_CFG}, | ||
4387 | {REG_MAC_TX_CFG}, | ||
4388 | {REG_MAC_RX_CFG}, | ||
4389 | {REG_MAC_CTRL_CFG}, | ||
4390 | {REG_MAC_XIF_CFG}, | ||
4391 | {REG_MIF_CFG}, | ||
4392 | {REG_PCS_CFG}, | ||
4393 | {REG_SATURN_PCFG}, | ||
4394 | {REG_PCS_MII_STATUS}, | ||
4395 | {REG_PCS_STATE_MACHINE}, | ||
4396 | {REG_MAC_COLL_EXCESS}, | ||
4397 | {REG_MAC_COLL_LATE} | ||
4398 | }; | ||
4399 | #define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table) | ||
4400 | #define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN) | ||
4401 | |||
4402 | static void cas_read_regs(struct cas *cp, u8 *ptr, int len) | ||
4403 | { | ||
4404 | u8 *p; | ||
4405 | int i; | ||
4406 | unsigned long flags; | ||
4407 | |||
4408 | spin_lock_irqsave(&cp->lock, flags); | ||
4409 | for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) { | ||
4410 | u16 hval; | ||
4411 | u32 val; | ||
4412 | if (ethtool_register_table[i].offsets < 0) { | ||
4413 | hval = cas_phy_read(cp, | ||
4414 | -ethtool_register_table[i].offsets); | ||
4415 | val = hval; | ||
4416 | } else { | ||
4417 | val= readl(cp->regs+ethtool_register_table[i].offsets); | ||
4418 | } | ||
4419 | memcpy(p, (u8 *)&val, sizeof(u32)); | ||
4420 | } | ||
4421 | spin_unlock_irqrestore(&cp->lock, flags); | ||
4422 | } | ||
4423 | |||
4424 | static struct net_device_stats *cas_get_stats(struct net_device *dev) | ||
4425 | { | ||
4426 | struct cas *cp = netdev_priv(dev); | ||
4427 | struct net_device_stats *stats = cp->net_stats; | ||
4428 | unsigned long flags; | ||
4429 | int i; | ||
4430 | unsigned long tmp; | ||
4431 | |||
4432 | /* we collate all of the stats into net_stats[N_TX_RING] */ | ||
4433 | if (!cp->hw_running) | ||
4434 | return stats + N_TX_RINGS; | ||
4435 | |||
4436 | /* collect outstanding stats */ | ||
4437 | /* WTZ: the Cassini spec gives these as 16 bit counters but | ||
4438 | * stored in 32-bit words. Added a mask of 0xffff to be safe, | ||
4439 | * in case the chip somehow puts any garbage in the other bits. | ||
4440 | * Also, counter usage didn't seem to mach what Adrian did | ||
4441 | * in the parts of the code that set these quantities. Made | ||
4442 | * that consistent. | ||
4443 | */ | ||
4444 | spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags); | ||
4445 | stats[N_TX_RINGS].rx_crc_errors += | ||
4446 | readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff; | ||
4447 | stats[N_TX_RINGS].rx_frame_errors += | ||
4448 | readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff; | ||
4449 | stats[N_TX_RINGS].rx_length_errors += | ||
4450 | readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff; | ||
4451 | #if 1 | ||
4452 | tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) + | ||
4453 | (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff); | ||
4454 | stats[N_TX_RINGS].tx_aborted_errors += tmp; | ||
4455 | stats[N_TX_RINGS].collisions += | ||
4456 | tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff); | ||
4457 | #else | ||
4458 | stats[N_TX_RINGS].tx_aborted_errors += | ||
4459 | readl(cp->regs + REG_MAC_COLL_EXCESS); | ||
4460 | stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) + | ||
4461 | readl(cp->regs + REG_MAC_COLL_LATE); | ||
4462 | #endif | ||
4463 | cas_clear_mac_err(cp); | ||
4464 | |||
4465 | /* saved bits that are unique to ring 0 */ | ||
4466 | spin_lock(&cp->stat_lock[0]); | ||
4467 | stats[N_TX_RINGS].collisions += stats[0].collisions; | ||
4468 | stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors; | ||
4469 | stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors; | ||
4470 | stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors; | ||
4471 | stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors; | ||
4472 | stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors; | ||
4473 | spin_unlock(&cp->stat_lock[0]); | ||
4474 | |||
4475 | for (i = 0; i < N_TX_RINGS; i++) { | ||
4476 | spin_lock(&cp->stat_lock[i]); | ||
4477 | stats[N_TX_RINGS].rx_length_errors += | ||
4478 | stats[i].rx_length_errors; | ||
4479 | stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors; | ||
4480 | stats[N_TX_RINGS].rx_packets += stats[i].rx_packets; | ||
4481 | stats[N_TX_RINGS].tx_packets += stats[i].tx_packets; | ||
4482 | stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes; | ||
4483 | stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes; | ||
4484 | stats[N_TX_RINGS].rx_errors += stats[i].rx_errors; | ||
4485 | stats[N_TX_RINGS].tx_errors += stats[i].tx_errors; | ||
4486 | stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped; | ||
4487 | stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped; | ||
4488 | memset(stats + i, 0, sizeof(struct net_device_stats)); | ||
4489 | spin_unlock(&cp->stat_lock[i]); | ||
4490 | } | ||
4491 | spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags); | ||
4492 | return stats + N_TX_RINGS; | ||
4493 | } | ||
4494 | |||
4495 | |||
4496 | static void cas_set_multicast(struct net_device *dev) | ||
4497 | { | ||
4498 | struct cas *cp = netdev_priv(dev); | ||
4499 | u32 rxcfg, rxcfg_new; | ||
4500 | unsigned long flags; | ||
4501 | int limit = STOP_TRIES; | ||
4502 | |||
4503 | if (!cp->hw_running) | ||
4504 | return; | ||
4505 | |||
4506 | spin_lock_irqsave(&cp->lock, flags); | ||
4507 | rxcfg = readl(cp->regs + REG_MAC_RX_CFG); | ||
4508 | |||
4509 | /* disable RX MAC and wait for completion */ | ||
4510 | writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); | ||
4511 | while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) { | ||
4512 | if (!limit--) | ||
4513 | break; | ||
4514 | udelay(10); | ||
4515 | } | ||
4516 | |||
4517 | /* disable hash filter and wait for completion */ | ||
4518 | limit = STOP_TRIES; | ||
4519 | rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN); | ||
4520 | writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); | ||
4521 | while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) { | ||
4522 | if (!limit--) | ||
4523 | break; | ||
4524 | udelay(10); | ||
4525 | } | ||
4526 | |||
4527 | /* program hash filters */ | ||
4528 | cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp); | ||
4529 | rxcfg |= rxcfg_new; | ||
4530 | writel(rxcfg, cp->regs + REG_MAC_RX_CFG); | ||
4531 | spin_unlock_irqrestore(&cp->lock, flags); | ||
4532 | } | ||
4533 | |||
4534 | static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
4535 | { | ||
4536 | struct cas *cp = netdev_priv(dev); | ||
4537 | strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN); | ||
4538 | strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN); | ||
4539 | info->fw_version[0] = '\0'; | ||
4540 | strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN); | ||
4541 | info->regdump_len = cp->casreg_len < CAS_MAX_REGS ? | ||
4542 | cp->casreg_len : CAS_MAX_REGS; | ||
4543 | info->n_stats = CAS_NUM_STAT_KEYS; | ||
4544 | } | ||
4545 | |||
4546 | static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
4547 | { | ||
4548 | struct cas *cp = netdev_priv(dev); | ||
4549 | u16 bmcr; | ||
4550 | int full_duplex, speed, pause; | ||
4551 | unsigned long flags; | ||
4552 | enum link_state linkstate = link_up; | ||
4553 | |||
4554 | cmd->advertising = 0; | ||
4555 | cmd->supported = SUPPORTED_Autoneg; | ||
4556 | if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { | ||
4557 | cmd->supported |= SUPPORTED_1000baseT_Full; | ||
4558 | cmd->advertising |= ADVERTISED_1000baseT_Full; | ||
4559 | } | ||
4560 | |||
4561 | /* Record PHY settings if HW is on. */ | ||
4562 | spin_lock_irqsave(&cp->lock, flags); | ||
4563 | bmcr = 0; | ||
4564 | linkstate = cp->lstate; | ||
4565 | if (CAS_PHY_MII(cp->phy_type)) { | ||
4566 | cmd->port = PORT_MII; | ||
4567 | cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ? | ||
4568 | XCVR_INTERNAL : XCVR_EXTERNAL; | ||
4569 | cmd->phy_address = cp->phy_addr; | ||
4570 | cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII | | ||
4571 | ADVERTISED_10baseT_Half | | ||
4572 | ADVERTISED_10baseT_Full | | ||
4573 | ADVERTISED_100baseT_Half | | ||
4574 | ADVERTISED_100baseT_Full; | ||
4575 | |||
4576 | cmd->supported |= | ||
4577 | (SUPPORTED_10baseT_Half | | ||
4578 | SUPPORTED_10baseT_Full | | ||
4579 | SUPPORTED_100baseT_Half | | ||
4580 | SUPPORTED_100baseT_Full | | ||
4581 | SUPPORTED_TP | SUPPORTED_MII); | ||
4582 | |||
4583 | if (cp->hw_running) { | ||
4584 | cas_mif_poll(cp, 0); | ||
4585 | bmcr = cas_phy_read(cp, MII_BMCR); | ||
4586 | cas_read_mii_link_mode(cp, &full_duplex, | ||
4587 | &speed, &pause); | ||
4588 | cas_mif_poll(cp, 1); | ||
4589 | } | ||
4590 | |||
4591 | } else { | ||
4592 | cmd->port = PORT_FIBRE; | ||
4593 | cmd->transceiver = XCVR_INTERNAL; | ||
4594 | cmd->phy_address = 0; | ||
4595 | cmd->supported |= SUPPORTED_FIBRE; | ||
4596 | cmd->advertising |= ADVERTISED_FIBRE; | ||
4597 | |||
4598 | if (cp->hw_running) { | ||
4599 | /* pcs uses the same bits as mii */ | ||
4600 | bmcr = readl(cp->regs + REG_PCS_MII_CTRL); | ||
4601 | cas_read_pcs_link_mode(cp, &full_duplex, | ||
4602 | &speed, &pause); | ||
4603 | } | ||
4604 | } | ||
4605 | spin_unlock_irqrestore(&cp->lock, flags); | ||
4606 | |||
4607 | if (bmcr & BMCR_ANENABLE) { | ||
4608 | cmd->advertising |= ADVERTISED_Autoneg; | ||
4609 | cmd->autoneg = AUTONEG_ENABLE; | ||
4610 | ethtool_cmd_speed_set(cmd, ((speed == 10) ? | ||
4611 | SPEED_10 : | ||
4612 | ((speed == 1000) ? | ||
4613 | SPEED_1000 : SPEED_100))); | ||
4614 | cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; | ||
4615 | } else { | ||
4616 | cmd->autoneg = AUTONEG_DISABLE; | ||
4617 | ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ? | ||
4618 | SPEED_1000 : | ||
4619 | ((bmcr & BMCR_SPEED100) ? | ||
4620 | SPEED_100 : SPEED_10))); | ||
4621 | cmd->duplex = | ||
4622 | (bmcr & BMCR_FULLDPLX) ? | ||
4623 | DUPLEX_FULL : DUPLEX_HALF; | ||
4624 | } | ||
4625 | if (linkstate != link_up) { | ||
4626 | /* Force these to "unknown" if the link is not up and | ||
4627 | * autonogotiation in enabled. We can set the link | ||
4628 | * speed to 0, but not cmd->duplex, | ||
4629 | * because its legal values are 0 and 1. Ethtool will | ||
4630 | * print the value reported in parentheses after the | ||
4631 | * word "Unknown" for unrecognized values. | ||
4632 | * | ||
4633 | * If in forced mode, we report the speed and duplex | ||
4634 | * settings that we configured. | ||
4635 | */ | ||
4636 | if (cp->link_cntl & BMCR_ANENABLE) { | ||
4637 | ethtool_cmd_speed_set(cmd, 0); | ||
4638 | cmd->duplex = 0xff; | ||
4639 | } else { | ||
4640 | ethtool_cmd_speed_set(cmd, SPEED_10); | ||
4641 | if (cp->link_cntl & BMCR_SPEED100) { | ||
4642 | ethtool_cmd_speed_set(cmd, SPEED_100); | ||
4643 | } else if (cp->link_cntl & CAS_BMCR_SPEED1000) { | ||
4644 | ethtool_cmd_speed_set(cmd, SPEED_1000); | ||
4645 | } | ||
4646 | cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)? | ||
4647 | DUPLEX_FULL : DUPLEX_HALF; | ||
4648 | } | ||
4649 | } | ||
4650 | return 0; | ||
4651 | } | ||
4652 | |||
4653 | static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
4654 | { | ||
4655 | struct cas *cp = netdev_priv(dev); | ||
4656 | unsigned long flags; | ||
4657 | u32 speed = ethtool_cmd_speed(cmd); | ||
4658 | |||
4659 | /* Verify the settings we care about. */ | ||
4660 | if (cmd->autoneg != AUTONEG_ENABLE && | ||
4661 | cmd->autoneg != AUTONEG_DISABLE) | ||
4662 | return -EINVAL; | ||
4663 | |||
4664 | if (cmd->autoneg == AUTONEG_DISABLE && | ||
4665 | ((speed != SPEED_1000 && | ||
4666 | speed != SPEED_100 && | ||
4667 | speed != SPEED_10) || | ||
4668 | (cmd->duplex != DUPLEX_HALF && | ||
4669 | cmd->duplex != DUPLEX_FULL))) | ||
4670 | return -EINVAL; | ||
4671 | |||
4672 | /* Apply settings and restart link process. */ | ||
4673 | spin_lock_irqsave(&cp->lock, flags); | ||
4674 | cas_begin_auto_negotiation(cp, cmd); | ||
4675 | spin_unlock_irqrestore(&cp->lock, flags); | ||
4676 | return 0; | ||
4677 | } | ||
4678 | |||
4679 | static int cas_nway_reset(struct net_device *dev) | ||
4680 | { | ||
4681 | struct cas *cp = netdev_priv(dev); | ||
4682 | unsigned long flags; | ||
4683 | |||
4684 | if ((cp->link_cntl & BMCR_ANENABLE) == 0) | ||
4685 | return -EINVAL; | ||
4686 | |||
4687 | /* Restart link process. */ | ||
4688 | spin_lock_irqsave(&cp->lock, flags); | ||
4689 | cas_begin_auto_negotiation(cp, NULL); | ||
4690 | spin_unlock_irqrestore(&cp->lock, flags); | ||
4691 | |||
4692 | return 0; | ||
4693 | } | ||
4694 | |||
4695 | static u32 cas_get_link(struct net_device *dev) | ||
4696 | { | ||
4697 | struct cas *cp = netdev_priv(dev); | ||
4698 | return cp->lstate == link_up; | ||
4699 | } | ||
4700 | |||
4701 | static u32 cas_get_msglevel(struct net_device *dev) | ||
4702 | { | ||
4703 | struct cas *cp = netdev_priv(dev); | ||
4704 | return cp->msg_enable; | ||
4705 | } | ||
4706 | |||
4707 | static void cas_set_msglevel(struct net_device *dev, u32 value) | ||
4708 | { | ||
4709 | struct cas *cp = netdev_priv(dev); | ||
4710 | cp->msg_enable = value; | ||
4711 | } | ||
4712 | |||
4713 | static int cas_get_regs_len(struct net_device *dev) | ||
4714 | { | ||
4715 | struct cas *cp = netdev_priv(dev); | ||
4716 | return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS; | ||
4717 | } | ||
4718 | |||
4719 | static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs, | ||
4720 | void *p) | ||
4721 | { | ||
4722 | struct cas *cp = netdev_priv(dev); | ||
4723 | regs->version = 0; | ||
4724 | /* cas_read_regs handles locks (cp->lock). */ | ||
4725 | cas_read_regs(cp, p, regs->len / sizeof(u32)); | ||
4726 | } | ||
4727 | |||
4728 | static int cas_get_sset_count(struct net_device *dev, int sset) | ||
4729 | { | ||
4730 | switch (sset) { | ||
4731 | case ETH_SS_STATS: | ||
4732 | return CAS_NUM_STAT_KEYS; | ||
4733 | default: | ||
4734 | return -EOPNOTSUPP; | ||
4735 | } | ||
4736 | } | ||
4737 | |||
4738 | static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data) | ||
4739 | { | ||
4740 | memcpy(data, ðtool_cassini_statnames, | ||
4741 | CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN); | ||
4742 | } | ||
4743 | |||
4744 | static void cas_get_ethtool_stats(struct net_device *dev, | ||
4745 | struct ethtool_stats *estats, u64 *data) | ||
4746 | { | ||
4747 | struct cas *cp = netdev_priv(dev); | ||
4748 | struct net_device_stats *stats = cas_get_stats(cp->dev); | ||
4749 | int i = 0; | ||
4750 | data[i++] = stats->collisions; | ||
4751 | data[i++] = stats->rx_bytes; | ||
4752 | data[i++] = stats->rx_crc_errors; | ||
4753 | data[i++] = stats->rx_dropped; | ||
4754 | data[i++] = stats->rx_errors; | ||
4755 | data[i++] = stats->rx_fifo_errors; | ||
4756 | data[i++] = stats->rx_frame_errors; | ||
4757 | data[i++] = stats->rx_length_errors; | ||
4758 | data[i++] = stats->rx_over_errors; | ||
4759 | data[i++] = stats->rx_packets; | ||
4760 | data[i++] = stats->tx_aborted_errors; | ||
4761 | data[i++] = stats->tx_bytes; | ||
4762 | data[i++] = stats->tx_dropped; | ||
4763 | data[i++] = stats->tx_errors; | ||
4764 | data[i++] = stats->tx_fifo_errors; | ||
4765 | data[i++] = stats->tx_packets; | ||
4766 | BUG_ON(i != CAS_NUM_STAT_KEYS); | ||
4767 | } | ||
4768 | |||
4769 | static const struct ethtool_ops cas_ethtool_ops = { | ||
4770 | .get_drvinfo = cas_get_drvinfo, | ||
4771 | .get_settings = cas_get_settings, | ||
4772 | .set_settings = cas_set_settings, | ||
4773 | .nway_reset = cas_nway_reset, | ||
4774 | .get_link = cas_get_link, | ||
4775 | .get_msglevel = cas_get_msglevel, | ||
4776 | .set_msglevel = cas_set_msglevel, | ||
4777 | .get_regs_len = cas_get_regs_len, | ||
4778 | .get_regs = cas_get_regs, | ||
4779 | .get_sset_count = cas_get_sset_count, | ||
4780 | .get_strings = cas_get_strings, | ||
4781 | .get_ethtool_stats = cas_get_ethtool_stats, | ||
4782 | }; | ||
4783 | |||
4784 | static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
4785 | { | ||
4786 | struct cas *cp = netdev_priv(dev); | ||
4787 | struct mii_ioctl_data *data = if_mii(ifr); | ||
4788 | unsigned long flags; | ||
4789 | int rc = -EOPNOTSUPP; | ||
4790 | |||
4791 | /* Hold the PM mutex while doing ioctl's or we may collide | ||
4792 | * with open/close and power management and oops. | ||
4793 | */ | ||
4794 | mutex_lock(&cp->pm_mutex); | ||
4795 | switch (cmd) { | ||
4796 | case SIOCGMIIPHY: /* Get address of MII PHY in use. */ | ||
4797 | data->phy_id = cp->phy_addr; | ||
4798 | /* Fallthrough... */ | ||
4799 | |||
4800 | case SIOCGMIIREG: /* Read MII PHY register. */ | ||
4801 | spin_lock_irqsave(&cp->lock, flags); | ||
4802 | cas_mif_poll(cp, 0); | ||
4803 | data->val_out = cas_phy_read(cp, data->reg_num & 0x1f); | ||
4804 | cas_mif_poll(cp, 1); | ||
4805 | spin_unlock_irqrestore(&cp->lock, flags); | ||
4806 | rc = 0; | ||
4807 | break; | ||
4808 | |||
4809 | case SIOCSMIIREG: /* Write MII PHY register. */ | ||
4810 | spin_lock_irqsave(&cp->lock, flags); | ||
4811 | cas_mif_poll(cp, 0); | ||
4812 | rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in); | ||
4813 | cas_mif_poll(cp, 1); | ||
4814 | spin_unlock_irqrestore(&cp->lock, flags); | ||
4815 | break; | ||
4816 | default: | ||
4817 | break; | ||
4818 | } | ||
4819 | |||
4820 | mutex_unlock(&cp->pm_mutex); | ||
4821 | return rc; | ||
4822 | } | ||
4823 | |||
4824 | /* When this chip sits underneath an Intel 31154 bridge, it is the | ||
4825 | * only subordinate device and we can tweak the bridge settings to | ||
4826 | * reflect that fact. | ||
4827 | */ | ||
4828 | static void __devinit cas_program_bridge(struct pci_dev *cas_pdev) | ||
4829 | { | ||
4830 | struct pci_dev *pdev = cas_pdev->bus->self; | ||
4831 | u32 val; | ||
4832 | |||
4833 | if (!pdev) | ||
4834 | return; | ||
4835 | |||
4836 | if (pdev->vendor != 0x8086 || pdev->device != 0x537c) | ||
4837 | return; | ||
4838 | |||
4839 | /* Clear bit 10 (Bus Parking Control) in the Secondary | ||
4840 | * Arbiter Control/Status Register which lives at offset | ||
4841 | * 0x41. Using a 32-bit word read/modify/write at 0x40 | ||
4842 | * is much simpler so that's how we do this. | ||
4843 | */ | ||
4844 | pci_read_config_dword(pdev, 0x40, &val); | ||
4845 | val &= ~0x00040000; | ||
4846 | pci_write_config_dword(pdev, 0x40, val); | ||
4847 | |||
4848 | /* Max out the Multi-Transaction Timer settings since | ||
4849 | * Cassini is the only device present. | ||
4850 | * | ||
4851 | * The register is 16-bit and lives at 0x50. When the | ||
4852 | * settings are enabled, it extends the GRANT# signal | ||
4853 | * for a requestor after a transaction is complete. This | ||
4854 | * allows the next request to run without first needing | ||
4855 | * to negotiate the GRANT# signal back. | ||
4856 | * | ||
4857 | * Bits 12:10 define the grant duration: | ||
4858 | * | ||
4859 | * 1 -- 16 clocks | ||
4860 | * 2 -- 32 clocks | ||
4861 | * 3 -- 64 clocks | ||
4862 | * 4 -- 128 clocks | ||
4863 | * 5 -- 256 clocks | ||
4864 | * | ||
4865 | * All other values are illegal. | ||
4866 | * | ||
4867 | * Bits 09:00 define which REQ/GNT signal pairs get the | ||
4868 | * GRANT# signal treatment. We set them all. | ||
4869 | */ | ||
4870 | pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff); | ||
4871 | |||
4872 | /* The Read Prefecth Policy register is 16-bit and sits at | ||
4873 | * offset 0x52. It enables a "smart" pre-fetch policy. We | ||
4874 | * enable it and max out all of the settings since only one | ||
4875 | * device is sitting underneath and thus bandwidth sharing is | ||
4876 | * not an issue. | ||
4877 | * | ||
4878 | * The register has several 3 bit fields, which indicates a | ||
4879 | * multiplier applied to the base amount of prefetching the | ||
4880 | * chip would do. These fields are at: | ||
4881 | * | ||
4882 | * 15:13 --- ReRead Primary Bus | ||
4883 | * 12:10 --- FirstRead Primary Bus | ||
4884 | * 09:07 --- ReRead Secondary Bus | ||
4885 | * 06:04 --- FirstRead Secondary Bus | ||
4886 | * | ||
4887 | * Bits 03:00 control which REQ/GNT pairs the prefetch settings | ||
4888 | * get enabled on. Bit 3 is a grouped enabler which controls | ||
4889 | * all of the REQ/GNT pairs from [8:3]. Bits 2 to 0 control | ||
4890 | * the individual REQ/GNT pairs [2:0]. | ||
4891 | */ | ||
4892 | pci_write_config_word(pdev, 0x52, | ||
4893 | (0x7 << 13) | | ||
4894 | (0x7 << 10) | | ||
4895 | (0x7 << 7) | | ||
4896 | (0x7 << 4) | | ||
4897 | (0xf << 0)); | ||
4898 | |||
4899 | /* Force cacheline size to 0x8 */ | ||
4900 | pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08); | ||
4901 | |||
4902 | /* Force latency timer to maximum setting so Cassini can | ||
4903 | * sit on the bus as long as it likes. | ||
4904 | */ | ||
4905 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff); | ||
4906 | } | ||
4907 | |||
4908 | static const struct net_device_ops cas_netdev_ops = { | ||
4909 | .ndo_open = cas_open, | ||
4910 | .ndo_stop = cas_close, | ||
4911 | .ndo_start_xmit = cas_start_xmit, | ||
4912 | .ndo_get_stats = cas_get_stats, | ||
4913 | .ndo_set_multicast_list = cas_set_multicast, | ||
4914 | .ndo_do_ioctl = cas_ioctl, | ||
4915 | .ndo_tx_timeout = cas_tx_timeout, | ||
4916 | .ndo_change_mtu = cas_change_mtu, | ||
4917 | .ndo_set_mac_address = eth_mac_addr, | ||
4918 | .ndo_validate_addr = eth_validate_addr, | ||
4919 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
4920 | .ndo_poll_controller = cas_netpoll, | ||
4921 | #endif | ||
4922 | }; | ||
4923 | |||
4924 | static int __devinit cas_init_one(struct pci_dev *pdev, | ||
4925 | const struct pci_device_id *ent) | ||
4926 | { | ||
4927 | static int cas_version_printed = 0; | ||
4928 | unsigned long casreg_len; | ||
4929 | struct net_device *dev; | ||
4930 | struct cas *cp; | ||
4931 | int i, err, pci_using_dac; | ||
4932 | u16 pci_cmd; | ||
4933 | u8 orig_cacheline_size = 0, cas_cacheline_size = 0; | ||
4934 | |||
4935 | if (cas_version_printed++ == 0) | ||
4936 | pr_info("%s", version); | ||
4937 | |||
4938 | err = pci_enable_device(pdev); | ||
4939 | if (err) { | ||
4940 | dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); | ||
4941 | return err; | ||
4942 | } | ||
4943 | |||
4944 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | ||
4945 | dev_err(&pdev->dev, "Cannot find proper PCI device " | ||
4946 | "base address, aborting\n"); | ||
4947 | err = -ENODEV; | ||
4948 | goto err_out_disable_pdev; | ||
4949 | } | ||
4950 | |||
4951 | dev = alloc_etherdev(sizeof(*cp)); | ||
4952 | if (!dev) { | ||
4953 | dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n"); | ||
4954 | err = -ENOMEM; | ||
4955 | goto err_out_disable_pdev; | ||
4956 | } | ||
4957 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
4958 | |||
4959 | err = pci_request_regions(pdev, dev->name); | ||
4960 | if (err) { | ||
4961 | dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); | ||
4962 | goto err_out_free_netdev; | ||
4963 | } | ||
4964 | pci_set_master(pdev); | ||
4965 | |||
4966 | /* we must always turn on parity response or else parity | ||
4967 | * doesn't get generated properly. disable SERR/PERR as well. | ||
4968 | * in addition, we want to turn MWI on. | ||
4969 | */ | ||
4970 | pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); | ||
4971 | pci_cmd &= ~PCI_COMMAND_SERR; | ||
4972 | pci_cmd |= PCI_COMMAND_PARITY; | ||
4973 | pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); | ||
4974 | if (pci_try_set_mwi(pdev)) | ||
4975 | pr_warning("Could not enable MWI for %s\n", pci_name(pdev)); | ||
4976 | |||
4977 | cas_program_bridge(pdev); | ||
4978 | |||
4979 | /* | ||
4980 | * On some architectures, the default cache line size set | ||
4981 | * by pci_try_set_mwi reduces perforamnce. We have to increase | ||
4982 | * it for this case. To start, we'll print some configuration | ||
4983 | * data. | ||
4984 | */ | ||
4985 | #if 1 | ||
4986 | pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, | ||
4987 | &orig_cacheline_size); | ||
4988 | if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) { | ||
4989 | cas_cacheline_size = | ||
4990 | (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ? | ||
4991 | CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES; | ||
4992 | if (pci_write_config_byte(pdev, | ||
4993 | PCI_CACHE_LINE_SIZE, | ||
4994 | cas_cacheline_size)) { | ||
4995 | dev_err(&pdev->dev, "Could not set PCI cache " | ||
4996 | "line size\n"); | ||
4997 | goto err_write_cacheline; | ||
4998 | } | ||
4999 | } | ||
5000 | #endif | ||
5001 | |||
5002 | |||
5003 | /* Configure DMA attributes. */ | ||
5004 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
5005 | pci_using_dac = 1; | ||
5006 | err = pci_set_consistent_dma_mask(pdev, | ||
5007 | DMA_BIT_MASK(64)); | ||
5008 | if (err < 0) { | ||
5009 | dev_err(&pdev->dev, "Unable to obtain 64-bit DMA " | ||
5010 | "for consistent allocations\n"); | ||
5011 | goto err_out_free_res; | ||
5012 | } | ||
5013 | |||
5014 | } else { | ||
5015 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
5016 | if (err) { | ||
5017 | dev_err(&pdev->dev, "No usable DMA configuration, " | ||
5018 | "aborting\n"); | ||
5019 | goto err_out_free_res; | ||
5020 | } | ||
5021 | pci_using_dac = 0; | ||
5022 | } | ||
5023 | |||
5024 | casreg_len = pci_resource_len(pdev, 0); | ||
5025 | |||
5026 | cp = netdev_priv(dev); | ||
5027 | cp->pdev = pdev; | ||
5028 | #if 1 | ||
5029 | /* A value of 0 indicates we never explicitly set it */ | ||
5030 | cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0; | ||
5031 | #endif | ||
5032 | cp->dev = dev; | ||
5033 | cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : | ||
5034 | cassini_debug; | ||
5035 | |||
5036 | #if defined(CONFIG_SPARC) | ||
5037 | cp->of_node = pci_device_to_OF_node(pdev); | ||
5038 | #endif | ||
5039 | |||
5040 | cp->link_transition = LINK_TRANSITION_UNKNOWN; | ||
5041 | cp->link_transition_jiffies_valid = 0; | ||
5042 | |||
5043 | spin_lock_init(&cp->lock); | ||
5044 | spin_lock_init(&cp->rx_inuse_lock); | ||
5045 | spin_lock_init(&cp->rx_spare_lock); | ||
5046 | for (i = 0; i < N_TX_RINGS; i++) { | ||
5047 | spin_lock_init(&cp->stat_lock[i]); | ||
5048 | spin_lock_init(&cp->tx_lock[i]); | ||
5049 | } | ||
5050 | spin_lock_init(&cp->stat_lock[N_TX_RINGS]); | ||
5051 | mutex_init(&cp->pm_mutex); | ||
5052 | |||
5053 | init_timer(&cp->link_timer); | ||
5054 | cp->link_timer.function = cas_link_timer; | ||
5055 | cp->link_timer.data = (unsigned long) cp; | ||
5056 | |||
5057 | #if 1 | ||
5058 | /* Just in case the implementation of atomic operations | ||
5059 | * change so that an explicit initialization is necessary. | ||
5060 | */ | ||
5061 | atomic_set(&cp->reset_task_pending, 0); | ||
5062 | atomic_set(&cp->reset_task_pending_all, 0); | ||
5063 | atomic_set(&cp->reset_task_pending_spare, 0); | ||
5064 | atomic_set(&cp->reset_task_pending_mtu, 0); | ||
5065 | #endif | ||
5066 | INIT_WORK(&cp->reset_task, cas_reset_task); | ||
5067 | |||
5068 | /* Default link parameters */ | ||
5069 | if (link_mode >= 0 && link_mode < 6) | ||
5070 | cp->link_cntl = link_modes[link_mode]; | ||
5071 | else | ||
5072 | cp->link_cntl = BMCR_ANENABLE; | ||
5073 | cp->lstate = link_down; | ||
5074 | cp->link_transition = LINK_TRANSITION_LINK_DOWN; | ||
5075 | netif_carrier_off(cp->dev); | ||
5076 | cp->timer_ticks = 0; | ||
5077 | |||
5078 | /* give us access to cassini registers */ | ||
5079 | cp->regs = pci_iomap(pdev, 0, casreg_len); | ||
5080 | if (!cp->regs) { | ||
5081 | dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); | ||
5082 | goto err_out_free_res; | ||
5083 | } | ||
5084 | cp->casreg_len = casreg_len; | ||
5085 | |||
5086 | pci_save_state(pdev); | ||
5087 | cas_check_pci_invariants(cp); | ||
5088 | cas_hard_reset(cp); | ||
5089 | cas_reset(cp, 0); | ||
5090 | if (cas_check_invariants(cp)) | ||
5091 | goto err_out_iounmap; | ||
5092 | if (cp->cas_flags & CAS_FLAG_SATURN) | ||
5093 | if (cas_saturn_firmware_init(cp)) | ||
5094 | goto err_out_iounmap; | ||
5095 | |||
5096 | cp->init_block = (struct cas_init_block *) | ||
5097 | pci_alloc_consistent(pdev, sizeof(struct cas_init_block), | ||
5098 | &cp->block_dvma); | ||
5099 | if (!cp->init_block) { | ||
5100 | dev_err(&pdev->dev, "Cannot allocate init block, aborting\n"); | ||
5101 | goto err_out_iounmap; | ||
5102 | } | ||
5103 | |||
5104 | for (i = 0; i < N_TX_RINGS; i++) | ||
5105 | cp->init_txds[i] = cp->init_block->txds[i]; | ||
5106 | |||
5107 | for (i = 0; i < N_RX_DESC_RINGS; i++) | ||
5108 | cp->init_rxds[i] = cp->init_block->rxds[i]; | ||
5109 | |||
5110 | for (i = 0; i < N_RX_COMP_RINGS; i++) | ||
5111 | cp->init_rxcs[i] = cp->init_block->rxcs[i]; | ||
5112 | |||
5113 | for (i = 0; i < N_RX_FLOWS; i++) | ||
5114 | skb_queue_head_init(&cp->rx_flows[i]); | ||
5115 | |||
5116 | dev->netdev_ops = &cas_netdev_ops; | ||
5117 | dev->ethtool_ops = &cas_ethtool_ops; | ||
5118 | dev->watchdog_timeo = CAS_TX_TIMEOUT; | ||
5119 | |||
5120 | #ifdef USE_NAPI | ||
5121 | netif_napi_add(dev, &cp->napi, cas_poll, 64); | ||
5122 | #endif | ||
5123 | dev->irq = pdev->irq; | ||
5124 | dev->dma = 0; | ||
5125 | |||
5126 | /* Cassini features. */ | ||
5127 | if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0) | ||
5128 | dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; | ||
5129 | |||
5130 | if (pci_using_dac) | ||
5131 | dev->features |= NETIF_F_HIGHDMA; | ||
5132 | |||
5133 | if (register_netdev(dev)) { | ||
5134 | dev_err(&pdev->dev, "Cannot register net device, aborting\n"); | ||
5135 | goto err_out_free_consistent; | ||
5136 | } | ||
5137 | |||
5138 | i = readl(cp->regs + REG_BIM_CFG); | ||
5139 | netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n", | ||
5140 | (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", | ||
5141 | (i & BIM_CFG_32BIT) ? "32" : "64", | ||
5142 | (i & BIM_CFG_66MHZ) ? "66" : "33", | ||
5143 | (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq, | ||
5144 | dev->dev_addr); | ||
5145 | |||
5146 | pci_set_drvdata(pdev, dev); | ||
5147 | cp->hw_running = 1; | ||
5148 | cas_entropy_reset(cp); | ||
5149 | cas_phy_init(cp); | ||
5150 | cas_begin_auto_negotiation(cp, NULL); | ||
5151 | return 0; | ||
5152 | |||
5153 | err_out_free_consistent: | ||
5154 | pci_free_consistent(pdev, sizeof(struct cas_init_block), | ||
5155 | cp->init_block, cp->block_dvma); | ||
5156 | |||
5157 | err_out_iounmap: | ||
5158 | mutex_lock(&cp->pm_mutex); | ||
5159 | if (cp->hw_running) | ||
5160 | cas_shutdown(cp); | ||
5161 | mutex_unlock(&cp->pm_mutex); | ||
5162 | |||
5163 | pci_iounmap(pdev, cp->regs); | ||
5164 | |||
5165 | |||
5166 | err_out_free_res: | ||
5167 | pci_release_regions(pdev); | ||
5168 | |||
5169 | err_write_cacheline: | ||
5170 | /* Try to restore it in case the error occurred after we | ||
5171 | * set it. | ||
5172 | */ | ||
5173 | pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size); | ||
5174 | |||
5175 | err_out_free_netdev: | ||
5176 | free_netdev(dev); | ||
5177 | |||
5178 | err_out_disable_pdev: | ||
5179 | pci_disable_device(pdev); | ||
5180 | pci_set_drvdata(pdev, NULL); | ||
5181 | return -ENODEV; | ||
5182 | } | ||
5183 | |||
5184 | static void __devexit cas_remove_one(struct pci_dev *pdev) | ||
5185 | { | ||
5186 | struct net_device *dev = pci_get_drvdata(pdev); | ||
5187 | struct cas *cp; | ||
5188 | if (!dev) | ||
5189 | return; | ||
5190 | |||
5191 | cp = netdev_priv(dev); | ||
5192 | unregister_netdev(dev); | ||
5193 | |||
5194 | if (cp->fw_data) | ||
5195 | vfree(cp->fw_data); | ||
5196 | |||
5197 | mutex_lock(&cp->pm_mutex); | ||
5198 | cancel_work_sync(&cp->reset_task); | ||
5199 | if (cp->hw_running) | ||
5200 | cas_shutdown(cp); | ||
5201 | mutex_unlock(&cp->pm_mutex); | ||
5202 | |||
5203 | #if 1 | ||
5204 | if (cp->orig_cacheline_size) { | ||
5205 | /* Restore the cache line size if we had modified | ||
5206 | * it. | ||
5207 | */ | ||
5208 | pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, | ||
5209 | cp->orig_cacheline_size); | ||
5210 | } | ||
5211 | #endif | ||
5212 | pci_free_consistent(pdev, sizeof(struct cas_init_block), | ||
5213 | cp->init_block, cp->block_dvma); | ||
5214 | pci_iounmap(pdev, cp->regs); | ||
5215 | free_netdev(dev); | ||
5216 | pci_release_regions(pdev); | ||
5217 | pci_disable_device(pdev); | ||
5218 | pci_set_drvdata(pdev, NULL); | ||
5219 | } | ||
5220 | |||
5221 | #ifdef CONFIG_PM | ||
5222 | static int cas_suspend(struct pci_dev *pdev, pm_message_t state) | ||
5223 | { | ||
5224 | struct net_device *dev = pci_get_drvdata(pdev); | ||
5225 | struct cas *cp = netdev_priv(dev); | ||
5226 | unsigned long flags; | ||
5227 | |||
5228 | mutex_lock(&cp->pm_mutex); | ||
5229 | |||
5230 | /* If the driver is opened, we stop the DMA */ | ||
5231 | if (cp->opened) { | ||
5232 | netif_device_detach(dev); | ||
5233 | |||
5234 | cas_lock_all_save(cp, flags); | ||
5235 | |||
5236 | /* We can set the second arg of cas_reset to 0 | ||
5237 | * because on resume, we'll call cas_init_hw with | ||
5238 | * its second arg set so that autonegotiation is | ||
5239 | * restarted. | ||
5240 | */ | ||
5241 | cas_reset(cp, 0); | ||
5242 | cas_clean_rings(cp); | ||
5243 | cas_unlock_all_restore(cp, flags); | ||
5244 | } | ||
5245 | |||
5246 | if (cp->hw_running) | ||
5247 | cas_shutdown(cp); | ||
5248 | mutex_unlock(&cp->pm_mutex); | ||
5249 | |||
5250 | return 0; | ||
5251 | } | ||
5252 | |||
5253 | static int cas_resume(struct pci_dev *pdev) | ||
5254 | { | ||
5255 | struct net_device *dev = pci_get_drvdata(pdev); | ||
5256 | struct cas *cp = netdev_priv(dev); | ||
5257 | |||
5258 | netdev_info(dev, "resuming\n"); | ||
5259 | |||
5260 | mutex_lock(&cp->pm_mutex); | ||
5261 | cas_hard_reset(cp); | ||
5262 | if (cp->opened) { | ||
5263 | unsigned long flags; | ||
5264 | cas_lock_all_save(cp, flags); | ||
5265 | cas_reset(cp, 0); | ||
5266 | cp->hw_running = 1; | ||
5267 | cas_clean_rings(cp); | ||
5268 | cas_init_hw(cp, 1); | ||
5269 | cas_unlock_all_restore(cp, flags); | ||
5270 | |||
5271 | netif_device_attach(dev); | ||
5272 | } | ||
5273 | mutex_unlock(&cp->pm_mutex); | ||
5274 | return 0; | ||
5275 | } | ||
5276 | #endif /* CONFIG_PM */ | ||
5277 | |||
5278 | static struct pci_driver cas_driver = { | ||
5279 | .name = DRV_MODULE_NAME, | ||
5280 | .id_table = cas_pci_tbl, | ||
5281 | .probe = cas_init_one, | ||
5282 | .remove = __devexit_p(cas_remove_one), | ||
5283 | #ifdef CONFIG_PM | ||
5284 | .suspend = cas_suspend, | ||
5285 | .resume = cas_resume | ||
5286 | #endif | ||
5287 | }; | ||
5288 | |||
5289 | static int __init cas_init(void) | ||
5290 | { | ||
5291 | if (linkdown_timeout > 0) | ||
5292 | link_transition_timeout = linkdown_timeout * HZ; | ||
5293 | else | ||
5294 | link_transition_timeout = 0; | ||
5295 | |||
5296 | return pci_register_driver(&cas_driver); | ||
5297 | } | ||
5298 | |||
5299 | static void __exit cas_cleanup(void) | ||
5300 | { | ||
5301 | pci_unregister_driver(&cas_driver); | ||
5302 | } | ||
5303 | |||
5304 | module_init(cas_init); | ||
5305 | module_exit(cas_cleanup); | ||
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h new file mode 100644 index 000000000000..b361424d5f57 --- /dev/null +++ b/drivers/net/ethernet/sun/cassini.h | |||
@@ -0,0 +1,2914 @@ | |||
1 | /* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $ | ||
2 | * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver. | ||
3 | * | ||
4 | * Copyright (C) 2004 Sun Microsystems Inc. | ||
5 | * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License as | ||
9 | * published by the Free Software Foundation; either version 2 of the | ||
10 | * License, or (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA | ||
20 | * 02111-1307, USA. | ||
21 | * | ||
22 | * vendor id: 0x108E (Sun Microsystems, Inc.) | ||
23 | * device id: 0xabba (Cassini) | ||
24 | * revision ids: 0x01 = Cassini | ||
25 | * 0x02 = Cassini rev 2 | ||
26 | * 0x10 = Cassini+ | ||
27 | * 0x11 = Cassini+ 0.2u | ||
28 | * | ||
29 | * vendor id: 0x100b (National Semiconductor) | ||
30 | * device id: 0x0035 (DP83065/Saturn) | ||
31 | * revision ids: 0x30 = Saturn B2 | ||
32 | * | ||
33 | * rings are all offset from 0. | ||
34 | * | ||
35 | * there are two clock domains: | ||
36 | * PCI: 33/66MHz clock | ||
37 | * chip: 125MHz clock | ||
38 | */ | ||
39 | |||
40 | #ifndef _CASSINI_H | ||
41 | #define _CASSINI_H | ||
42 | |||
43 | /* cassini register map: 2M memory mapped in 32-bit memory space accessible as | ||
44 | * 32-bit words. there is no i/o port access. REG_ addresses are | ||
45 | * shared between cassini and cassini+. REG_PLUS_ addresses only | ||
46 | * appear in cassini+. REG_MINUS_ addresses only appear in cassini. | ||
47 | */ | ||
48 | #define CAS_ID_REV2 0x02 | ||
49 | #define CAS_ID_REVPLUS 0x10 | ||
50 | #define CAS_ID_REVPLUS02u 0x11 | ||
51 | #define CAS_ID_REVSATURNB2 0x30 | ||
52 | |||
53 | /** global resources **/ | ||
54 | |||
55 | /* this register sets the weights for the weighted round robin arbiter. e.g., | ||
56 | * if rx weight == 1 and tx weight == 0, rx == 2x tx transfer credit | ||
57 | * for its next turn to access the pci bus. | ||
58 | * map: 0x0 = x1, 0x1 = x2, 0x2 = x4, 0x3 = x8 | ||
59 | * DEFAULT: 0x0, SIZE: 5 bits | ||
60 | */ | ||
61 | #define REG_CAWR 0x0004 /* core arbitration weight */ | ||
62 | #define CAWR_RX_DMA_WEIGHT_SHIFT 0 | ||
63 | #define CAWR_RX_DMA_WEIGHT_MASK 0x03 /* [0:1] */ | ||
64 | #define CAWR_TX_DMA_WEIGHT_SHIFT 2 | ||
65 | #define CAWR_TX_DMA_WEIGHT_MASK 0x0C /* [3:2] */ | ||
66 | #define CAWR_RR_DIS 0x10 /* [4] */ | ||
67 | |||
68 | /* if enabled, BIM can send bursts across PCI bus > cacheline size. burst | ||
69 | * sizes determined by length of packet or descriptor transfer and the | ||
70 | * max length allowed by the target. | ||
71 | * DEFAULT: 0x0, SIZE: 1 bit | ||
72 | */ | ||
73 | #define REG_INF_BURST 0x0008 /* infinite burst enable reg */ | ||
74 | #define INF_BURST_EN 0x1 /* enable */ | ||
75 | |||
76 | /* top level interrupts [0-9] are auto-cleared to 0 when the status | ||
77 | * register is read. second level interrupts [13 - 18] are cleared at | ||
78 | * the source. tx completion register 3 is replicated in [19 - 31] | ||
79 | * DEFAULT: 0x00000000, SIZE: 29 bits | ||
80 | */ | ||
81 | #define REG_INTR_STATUS 0x000C /* interrupt status register */ | ||
82 | #define INTR_TX_INTME 0x00000001 /* frame w/ INT ME desc bit set | ||
83 | xferred from host queue to | ||
84 | TX FIFO */ | ||
85 | #define INTR_TX_ALL 0x00000002 /* all xmit frames xferred into | ||
86 | TX FIFO. i.e., | ||
87 | TX Kick == TX complete. if | ||
88 | PACED_MODE set, then TX FIFO | ||
89 | also empty */ | ||
90 | #define INTR_TX_DONE 0x00000004 /* any frame xferred into tx | ||
91 | FIFO */ | ||
92 | #define INTR_TX_TAG_ERROR 0x00000008 /* TX FIFO tag framing | ||
93 | corrupted. FATAL ERROR */ | ||
94 | #define INTR_RX_DONE 0x00000010 /* at least 1 frame xferred | ||
95 | from RX FIFO to host mem. | ||
96 | RX completion reg updated. | ||
97 | may be delayed by recv | ||
98 | intr blanking. */ | ||
99 | #define INTR_RX_BUF_UNAVAIL 0x00000020 /* no more receive buffers. | ||
100 | RX Kick == RX complete */ | ||
101 | #define INTR_RX_TAG_ERROR 0x00000040 /* RX FIFO tag framing | ||
102 | corrupted. FATAL ERROR */ | ||
103 | #define INTR_RX_COMP_FULL 0x00000080 /* no more room in completion | ||
104 | ring to post descriptors. | ||
105 | RX complete head incr to | ||
106 | almost reach RX complete | ||
107 | tail */ | ||
108 | #define INTR_RX_BUF_AE 0x00000100 /* less than the | ||
109 | programmable threshold # | ||
110 | of free descr avail for | ||
111 | hw use */ | ||
112 | #define INTR_RX_COMP_AF 0x00000200 /* less than the | ||
113 | programmable threshold # | ||
114 | of descr spaces for hw | ||
115 | use in completion descr | ||
116 | ring */ | ||
117 | #define INTR_RX_LEN_MISMATCH 0x00000400 /* len field from MAC != | ||
118 | len of non-reassembly pkt | ||
119 | from fifo during DMA or | ||
120 | header parser provides TCP | ||
121 | header and payload size > | ||
122 | MAC packet size. | ||
123 | FATAL ERROR */ | ||
124 | #define INTR_SUMMARY 0x00001000 /* summary interrupt bit. this | ||
125 | bit will be set if an interrupt | ||
126 | generated on the pci bus. useful | ||
127 | when driver is polling for | ||
128 | interrupts */ | ||
129 | #define INTR_PCS_STATUS 0x00002000 /* PCS interrupt status register */ | ||
130 | #define INTR_TX_MAC_STATUS 0x00004000 /* TX MAC status register has at | ||
131 | least 1 unmasked interrupt set */ | ||
132 | #define INTR_RX_MAC_STATUS 0x00008000 /* RX MAC status register has at | ||
133 | least 1 unmasked interrupt set */ | ||
134 | #define INTR_MAC_CTRL_STATUS 0x00010000 /* MAC control status register has | ||
135 | at least 1 unmasked interrupt | ||
136 | set */ | ||
137 | #define INTR_MIF_STATUS 0x00020000 /* MIF status register has at least | ||
138 | 1 unmasked interrupt set */ | ||
139 | #define INTR_PCI_ERROR_STATUS 0x00040000 /* PCI error status register in the | ||
140 | BIF has at least 1 unmasked | ||
141 | interrupt set */ | ||
142 | #define INTR_TX_COMP_3_MASK 0xFFF80000 /* mask for TX completion | ||
143 | 3 reg data */ | ||
144 | #define INTR_TX_COMP_3_SHIFT 19 | ||
145 | #define INTR_ERROR_MASK (INTR_MIF_STATUS | INTR_PCI_ERROR_STATUS | \ | ||
146 | INTR_PCS_STATUS | INTR_RX_LEN_MISMATCH | \ | ||
147 | INTR_TX_MAC_STATUS | INTR_RX_MAC_STATUS | \ | ||
148 | INTR_TX_TAG_ERROR | INTR_RX_TAG_ERROR | \ | ||
149 | INTR_MAC_CTRL_STATUS) | ||
150 | |||
151 | /* determines which status events will cause an interrupt. layout same | ||
152 | * as REG_INTR_STATUS. | ||
153 | * DEFAULT: 0xFFFFFFFF, SIZE: 16 bits | ||
154 | */ | ||
155 | #define REG_INTR_MASK 0x0010 /* Interrupt mask */ | ||
156 | |||
157 | /* top level interrupt bits that are cleared during read of REG_INTR_STATUS_ALIAS. | ||
158 | * useful when driver is polling for interrupts. layout same as REG_INTR_MASK. | ||
159 | * DEFAULT: 0x00000000, SIZE: 12 bits | ||
160 | */ | ||
161 | #define REG_ALIAS_CLEAR 0x0014 /* alias clear mask | ||
162 | (used w/ status alias) */ | ||
163 | /* same as REG_INTR_STATUS except that only bits cleared are those selected by | ||
164 | * REG_ALIAS_CLEAR | ||
165 | * DEFAULT: 0x00000000, SIZE: 29 bits | ||
166 | */ | ||
167 | #define REG_INTR_STATUS_ALIAS 0x001C /* interrupt status alias | ||
168 | (selective clear) */ | ||
169 | |||
170 | /* DEFAULT: 0x0, SIZE: 3 bits */ | ||
171 | #define REG_PCI_ERR_STATUS 0x1000 /* PCI error status */ | ||
172 | #define PCI_ERR_BADACK 0x01 /* reserved in Cassini+. | ||
173 | set if no ACK64# during ABS64 cycle | ||
174 | in Cassini. */ | ||
175 | #define PCI_ERR_DTRTO 0x02 /* delayed xaction timeout. set if | ||
176 | no read retry after 2^15 clocks */ | ||
177 | #define PCI_ERR_OTHER 0x04 /* other PCI errors */ | ||
178 | #define PCI_ERR_BIM_DMA_WRITE 0x08 /* BIM received 0 count DMA write req. | ||
179 | unused in Cassini. */ | ||
180 | #define PCI_ERR_BIM_DMA_READ 0x10 /* BIM received 0 count DMA read req. | ||
181 | unused in Cassini. */ | ||
182 | #define PCI_ERR_BIM_DMA_TIMEOUT 0x20 /* BIM received 255 retries during | ||
183 | DMA. unused in cassini. */ | ||
184 | |||
185 | /* mask for PCI status events that will set PCI_ERR_STATUS. if cleared, event | ||
186 | * causes an interrupt to be generated. | ||
187 | * DEFAULT: 0x7, SIZE: 3 bits | ||
188 | */ | ||
189 | #define REG_PCI_ERR_STATUS_MASK 0x1004 /* PCI Error status mask */ | ||
190 | |||
191 | /* used to configure PCI related parameters that are not in PCI config space. | ||
192 | * DEFAULT: 0bxx000, SIZE: 5 bits | ||
193 | */ | ||
194 | #define REG_BIM_CFG 0x1008 /* BIM Configuration */ | ||
195 | #define BIM_CFG_RESERVED0 0x001 /* reserved */ | ||
196 | #define BIM_CFG_RESERVED1 0x002 /* reserved */ | ||
197 | #define BIM_CFG_64BIT_DISABLE 0x004 /* disable 64-bit mode */ | ||
198 | #define BIM_CFG_66MHZ 0x008 /* (ro) 1 = 66MHz, 0 = < 66MHz */ | ||
199 | #define BIM_CFG_32BIT 0x010 /* (ro) 1 = 32-bit slot, 0 = 64-bit */ | ||
200 | #define BIM_CFG_DPAR_INTR_ENABLE 0x020 /* detected parity err enable */ | ||
201 | #define BIM_CFG_RMA_INTR_ENABLE 0x040 /* master abort intr enable */ | ||
202 | #define BIM_CFG_RTA_INTR_ENABLE 0x080 /* target abort intr enable */ | ||
203 | #define BIM_CFG_RESERVED2 0x100 /* reserved */ | ||
204 | #define BIM_CFG_BIM_DISABLE 0x200 /* stop BIM DMA. use before global | ||
205 | reset. reserved in Cassini. */ | ||
206 | #define BIM_CFG_BIM_STATUS 0x400 /* (ro) 1 = BIM DMA suspended. | ||
207 | reserved in Cassini. */ | ||
208 | #define BIM_CFG_PERROR_BLOCK 0x800 /* block PERR# to pci bus. def: 0. | ||
209 | reserved in Cassini. */ | ||
210 | |||
211 | /* DEFAULT: 0x00000000, SIZE: 32 bits */ | ||
212 | #define REG_BIM_DIAG 0x100C /* BIM Diagnostic */ | ||
213 | #define BIM_DIAG_MSTR_SM_MASK 0x3FFFFF00 /* PCI master controller state | ||
214 | machine bits [21:0] */ | ||
215 | #define BIM_DIAG_BRST_SM_MASK 0x7F /* PCI burst controller state | ||
216 | machine bits [6:0] */ | ||
217 | |||
218 | /* writing to SW_RESET_TX and SW_RESET_RX will issue a global | ||
219 | * reset. poll until TX and RX read back as 0's for completion. | ||
220 | */ | ||
221 | #define REG_SW_RESET 0x1010 /* Software reset */ | ||
222 | #define SW_RESET_TX 0x00000001 /* reset TX DMA engine. poll until | ||
223 | cleared to 0. */ | ||
224 | #define SW_RESET_RX 0x00000002 /* reset RX DMA engine. poll until | ||
225 | cleared to 0. */ | ||
226 | #define SW_RESET_RSTOUT 0x00000004 /* force RSTOUT# pin active (low). | ||
227 | resets PHY and anything else | ||
228 | connected to RSTOUT#. RSTOUT# | ||
229 | is also activated by local PCI | ||
230 | reset when hot-swap is being | ||
231 | done. */ | ||
232 | #define SW_RESET_BLOCK_PCS_SLINK 0x00000008 /* if a global reset is done with | ||
233 | this bit set, PCS and SLINK | ||
234 | modules won't be reset. | ||
235 | i.e., link won't drop. */ | ||
236 | #define SW_RESET_BREQ_SM_MASK 0x00007F00 /* breq state machine [6:0] */ | ||
237 | #define SW_RESET_PCIARB_SM_MASK 0x00070000 /* pci arbitration state bits: | ||
238 | 0b000: ARB_IDLE1 | ||
239 | 0b001: ARB_IDLE2 | ||
240 | 0b010: ARB_WB_ACK | ||
241 | 0b011: ARB_WB_WAT | ||
242 | 0b100: ARB_RB_ACK | ||
243 | 0b101: ARB_RB_WAT | ||
244 | 0b110: ARB_RB_END | ||
245 | 0b111: ARB_WB_END */ | ||
246 | #define SW_RESET_RDPCI_SM_MASK 0x00300000 /* read pci state bits: | ||
247 | 0b00: RD_PCI_WAT | ||
248 | 0b01: RD_PCI_RDY | ||
249 | 0b11: RD_PCI_ACK */ | ||
250 | #define SW_RESET_RDARB_SM_MASK 0x00C00000 /* read arbitration state bits: | ||
251 | 0b00: AD_IDL_RX | ||
252 | 0b01: AD_ACK_RX | ||
253 | 0b10: AD_ACK_TX | ||
254 | 0b11: AD_IDL_TX */ | ||
255 | #define SW_RESET_WRPCI_SM_MASK 0x06000000 /* write pci state bits | ||
256 | 0b00: WR_PCI_WAT | ||
257 | 0b01: WR_PCI_RDY | ||
258 | 0b11: WR_PCI_ACK */ | ||
259 | #define SW_RESET_WRARB_SM_MASK 0x38000000 /* write arbitration state bits: | ||
260 | 0b000: ARB_IDLE1 | ||
261 | 0b001: ARB_IDLE2 | ||
262 | 0b010: ARB_TX_ACK | ||
263 | 0b011: ARB_TX_WAT | ||
264 | 0b100: ARB_RX_ACK | ||
265 | 0b110: ARB_RX_WAT */ | ||
266 | |||
267 | /* Cassini only. 64-bit register used to check PCI datapath. when read, | ||
268 | * value written has both lower and upper 32-bit halves rotated to the right | ||
269 | * one bit position. e.g., FFFFFFFF FFFFFFFF -> 7FFFFFFF 7FFFFFFF | ||
270 | */ | ||
271 | #define REG_MINUS_BIM_DATAPATH_TEST 0x1018 /* Cassini: BIM datapath test | ||
272 | Cassini+: reserved */ | ||
273 | |||
274 | /* output enables are provided for each device's chip select and for the rest | ||
275 | * of the outputs from cassini to its local bus devices. two sw programmable | ||
276 | * bits are connected to general purpus control/status bits. | ||
277 | * DEFAULT: 0x7 | ||
278 | */ | ||
279 | #define REG_BIM_LOCAL_DEV_EN 0x1020 /* BIM local device | ||
280 | output EN. default: 0x7 */ | ||
281 | #define BIM_LOCAL_DEV_PAD 0x01 /* address bus, RW signal, and | ||
282 | OE signal output enable on the | ||
283 | local bus interface. these | ||
284 | are shared between both local | ||
285 | bus devices. tristate when 0. */ | ||
286 | #define BIM_LOCAL_DEV_PROM 0x02 /* PROM chip select */ | ||
287 | #define BIM_LOCAL_DEV_EXT 0x04 /* secondary local bus device chip | ||
288 | select output enable */ | ||
289 | #define BIM_LOCAL_DEV_SOFT_0 0x08 /* sw programmable ctrl bit 0 */ | ||
290 | #define BIM_LOCAL_DEV_SOFT_1 0x10 /* sw programmable ctrl bit 1 */ | ||
291 | #define BIM_LOCAL_DEV_HW_RESET 0x20 /* internal hw reset. Cassini+ only. */ | ||
292 | |||
293 | /* access 24 entry BIM read and write buffers. put address in REG_BIM_BUFFER_ADDR | ||
294 | * and read/write from/to it REG_BIM_BUFFER_DATA_LOW and _DATA_HI. | ||
295 | * _DATA_HI should be the last access of the sequence. | ||
296 | * DEFAULT: undefined | ||
297 | */ | ||
298 | #define REG_BIM_BUFFER_ADDR 0x1024 /* BIM buffer address. for | ||
299 | purposes. */ | ||
300 | #define BIM_BUFFER_ADDR_MASK 0x3F /* index (0 - 23) of buffer */ | ||
301 | #define BIM_BUFFER_WR_SELECT 0x40 /* write buffer access = 1 | ||
302 | read buffer access = 0 */ | ||
303 | /* DEFAULT: undefined */ | ||
304 | #define REG_BIM_BUFFER_DATA_LOW 0x1028 /* BIM buffer data low */ | ||
305 | #define REG_BIM_BUFFER_DATA_HI 0x102C /* BIM buffer data high */ | ||
306 | |||
307 | /* set BIM_RAM_BIST_START to start built-in self test for BIM read buffer. | ||
308 | * bit auto-clears when done with status read from _SUMMARY and _PASS bits. | ||
309 | */ | ||
310 | #define REG_BIM_RAM_BIST 0x102C /* BIM RAM (read buffer) BIST | ||
311 | control/status */ | ||
312 | #define BIM_RAM_BIST_RD_START 0x01 /* start BIST for BIM read buffer */ | ||
313 | #define BIM_RAM_BIST_WR_START 0x02 /* start BIST for BIM write buffer. | ||
314 | Cassini only. reserved in | ||
315 | Cassini+. */ | ||
316 | #define BIM_RAM_BIST_RD_PASS 0x04 /* summary BIST pass status for read | ||
317 | buffer. */ | ||
318 | #define BIM_RAM_BIST_WR_PASS 0x08 /* summary BIST pass status for write | ||
319 | buffer. Cassini only. reserved | ||
320 | in Cassini+. */ | ||
321 | #define BIM_RAM_BIST_RD_LOW_PASS 0x10 /* read low bank passes BIST */ | ||
322 | #define BIM_RAM_BIST_RD_HI_PASS 0x20 /* read high bank passes BIST */ | ||
323 | #define BIM_RAM_BIST_WR_LOW_PASS 0x40 /* write low bank passes BIST. | ||
324 | Cassini only. reserved in | ||
325 | Cassini+. */ | ||
326 | #define BIM_RAM_BIST_WR_HI_PASS 0x80 /* write high bank passes BIST. | ||
327 | Cassini only. reserved in | ||
328 | Cassini+. */ | ||
329 | |||
330 | /* ASUN: i'm not sure what this does as it's not in the spec. | ||
331 | * DEFAULT: 0xFC | ||
332 | */ | ||
333 | #define REG_BIM_DIAG_MUX 0x1030 /* BIM diagnostic probe mux | ||
334 | select register */ | ||
335 | |||
336 | /* enable probe monitoring mode and select data appearing on the P_A* bus. bit | ||
337 | * values for _SEL_HI_MASK and _SEL_LOW_MASK: | ||
338 | * 0x0: internal probe[7:0] (pci arb state, wtc empty w, wtc full w, wtc empty w, | ||
339 | * wtc empty r, post pci) | ||
340 | * 0x1: internal probe[15:8] (pci wbuf comp, pci wpkt comp, pci rbuf comp, | ||
341 | * pci rpkt comp, txdma wr req, txdma wr ack, | ||
342 | * txdma wr rdy, txdma wr xfr done) | ||
343 | * 0x2: internal probe[23:16] (txdma rd req, txdma rd ack, txdma rd rdy, rxdma rd, | ||
344 | * rd arb state, rd pci state) | ||
345 | * 0x3: internal probe[31:24] (rxdma req, rxdma ack, rxdma rdy, wrarb state, | ||
346 | * wrpci state) | ||
347 | * 0x4: pci io probe[7:0] 0x5: pci io probe[15:8] | ||
348 | * 0x6: pci io probe[23:16] 0x7: pci io probe[31:24] | ||
349 | * 0x8: pci io probe[39:32] 0x9: pci io probe[47:40] | ||
350 | * 0xa: pci io probe[55:48] 0xb: pci io probe[63:56] | ||
351 | * the following are not available in Cassini: | ||
352 | * 0xc: rx probe[7:0] 0xd: tx probe[7:0] | ||
353 | * 0xe: hp probe[7:0] 0xf: mac probe[7:0] | ||
354 | */ | ||
355 | #define REG_PLUS_PROBE_MUX_SELECT 0x1034 /* Cassini+: PROBE MUX SELECT */ | ||
356 | #define PROBE_MUX_EN 0x80000000 /* allow probe signals to be | ||
357 | driven on local bus P_A[15:0] | ||
358 | for debugging */ | ||
359 | #define PROBE_MUX_SUB_MUX_MASK 0x0000FF00 /* select sub module probe signals: | ||
360 | 0x03 = mac[1:0] | ||
361 | 0x0C = rx[1:0] | ||
362 | 0x30 = tx[1:0] | ||
363 | 0xC0 = hp[1:0] */ | ||
364 | #define PROBE_MUX_SEL_HI_MASK 0x000000F0 /* select which module to appear | ||
365 | on P_A[15:8]. see above for | ||
366 | values. */ | ||
367 | #define PROBE_MUX_SEL_LOW_MASK 0x0000000F /* select which module to appear | ||
368 | on P_A[7:0]. see above for | ||
369 | values. */ | ||
370 | |||
371 | /* values mean the same thing as REG_INTR_MASK excep that it's for INTB. | ||
372 | DEFAULT: 0x1F */ | ||
373 | #define REG_PLUS_INTR_MASK_1 0x1038 /* Cassini+: interrupt mask | ||
374 | register 2 for INTB */ | ||
375 | #define REG_PLUS_INTRN_MASK(x) (REG_PLUS_INTR_MASK_1 + ((x) - 1)*16) | ||
376 | /* bits correspond to both _MASK and _STATUS registers. _ALT corresponds to | ||
377 | * all of the alternate (2-4) INTR registers while _1 corresponds to only | ||
378 | * _MASK_1 and _STATUS_1 registers. | ||
379 | * DEFAULT: 0x7 for MASK registers, 0x0 for ALIAS_CLEAR registers | ||
380 | */ | ||
381 | #define INTR_RX_DONE_ALT 0x01 | ||
382 | #define INTR_RX_COMP_FULL_ALT 0x02 | ||
383 | #define INTR_RX_COMP_AF_ALT 0x04 | ||
384 | #define INTR_RX_BUF_UNAVAIL_1 0x08 | ||
385 | #define INTR_RX_BUF_AE_1 0x10 /* almost empty */ | ||
386 | #define INTRN_MASK_RX_EN 0x80 | ||
387 | #define INTRN_MASK_CLEAR_ALL (INTR_RX_DONE_ALT | \ | ||
388 | INTR_RX_COMP_FULL_ALT | \ | ||
389 | INTR_RX_COMP_AF_ALT | \ | ||
390 | INTR_RX_BUF_UNAVAIL_1 | \ | ||
391 | INTR_RX_BUF_AE_1) | ||
392 | #define REG_PLUS_INTR_STATUS_1 0x103C /* Cassini+: interrupt status | ||
393 | register 2 for INTB. default: 0x1F */ | ||
394 | #define REG_PLUS_INTRN_STATUS(x) (REG_PLUS_INTR_STATUS_1 + ((x) - 1)*16) | ||
395 | #define INTR_STATUS_ALT_INTX_EN 0x80 /* generate INTX when one of the | ||
396 | flags are set. enables desc ring. */ | ||
397 | |||
398 | #define REG_PLUS_ALIAS_CLEAR_1 0x1040 /* Cassini+: alias clear mask | ||
399 | register 2 for INTB */ | ||
400 | #define REG_PLUS_ALIASN_CLEAR(x) (REG_PLUS_ALIAS_CLEAR_1 + ((x) - 1)*16) | ||
401 | |||
402 | #define REG_PLUS_INTR_STATUS_ALIAS_1 0x1044 /* Cassini+: interrupt status | ||
403 | register alias 2 for INTB */ | ||
404 | #define REG_PLUS_INTRN_STATUS_ALIAS(x) (REG_PLUS_INTR_STATUS_ALIAS_1 + ((x) - 1)*16) | ||
405 | |||
406 | #define REG_SATURN_PCFG 0x106c /* pin configuration register for | ||
407 | integrated macphy */ | ||
408 | |||
409 | #define SATURN_PCFG_TLA 0x00000001 /* 1 = phy actled */ | ||
410 | #define SATURN_PCFG_FLA 0x00000002 /* 1 = phy link10led */ | ||
411 | #define SATURN_PCFG_CLA 0x00000004 /* 1 = phy link100led */ | ||
412 | #define SATURN_PCFG_LLA 0x00000008 /* 1 = phy link1000led */ | ||
413 | #define SATURN_PCFG_RLA 0x00000010 /* 1 = phy duplexled */ | ||
414 | #define SATURN_PCFG_PDS 0x00000020 /* phy debug mode. | ||
415 | 0 = normal */ | ||
416 | #define SATURN_PCFG_MTP 0x00000080 /* test point select */ | ||
417 | #define SATURN_PCFG_GMO 0x00000100 /* GMII observe. 1 = | ||
418 | GMII on SERDES pins for | ||
419 | monitoring. */ | ||
420 | #define SATURN_PCFG_FSI 0x00000200 /* 1 = freeze serdes/gmii. all | ||
421 | pins configed as outputs. | ||
422 | for power saving when using | ||
423 | internal phy. */ | ||
424 | #define SATURN_PCFG_LAD 0x00000800 /* 0 = mac core led ctrl | ||
425 | polarity from strapping | ||
426 | value. | ||
427 | 1 = mac core led ctrl | ||
428 | polarity active low. */ | ||
429 | |||
430 | |||
431 | /** transmit dma registers **/ | ||
432 | #define MAX_TX_RINGS_SHIFT 2 | ||
433 | #define MAX_TX_RINGS (1 << MAX_TX_RINGS_SHIFT) | ||
434 | #define MAX_TX_RINGS_MASK (MAX_TX_RINGS - 1) | ||
435 | |||
436 | /* TX configuration. | ||
437 | * descr ring sizes size = 32 * (1 << n), n < 9. e.g., 0x8 = 8k. default: 0x8 | ||
438 | * DEFAULT: 0x3F000001 | ||
439 | */ | ||
440 | #define REG_TX_CFG 0x2004 /* TX config */ | ||
441 | #define TX_CFG_DMA_EN 0x00000001 /* enable TX DMA. if cleared, DMA | ||
442 | will stop after xfer of current | ||
443 | buffer has been completed. */ | ||
444 | #define TX_CFG_FIFO_PIO_SEL 0x00000002 /* TX DMA FIFO can be | ||
445 | accessed w/ FIFO addr | ||
446 | and data registers. | ||
447 | TX DMA should be | ||
448 | disabled. */ | ||
449 | #define TX_CFG_DESC_RING0_MASK 0x0000003C /* # desc entries in | ||
450 | ring 1. */ | ||
451 | #define TX_CFG_DESC_RING0_SHIFT 2 | ||
452 | #define TX_CFG_DESC_RINGN_MASK(a) (TX_CFG_DESC_RING0_MASK << (a)*4) | ||
453 | #define TX_CFG_DESC_RINGN_SHIFT(a) (TX_CFG_DESC_RING0_SHIFT + (a)*4) | ||
454 | #define TX_CFG_PACED_MODE 0x00100000 /* TX_ALL only set after | ||
455 | TX FIFO becomes empty. | ||
456 | if 0, TX_ALL set | ||
457 | if descr queue empty. */ | ||
458 | #define TX_CFG_DMA_RDPIPE_DIS 0x01000000 /* always set to 1 */ | ||
459 | #define TX_CFG_COMPWB_Q1 0x02000000 /* completion writeback happens at | ||
460 | the end of every packet kicked | ||
461 | through Q1. */ | ||
462 | #define TX_CFG_COMPWB_Q2 0x04000000 /* completion writeback happens at | ||
463 | the end of every packet kicked | ||
464 | through Q2. */ | ||
465 | #define TX_CFG_COMPWB_Q3 0x08000000 /* completion writeback happens at | ||
466 | the end of every packet kicked | ||
467 | through Q3 */ | ||
468 | #define TX_CFG_COMPWB_Q4 0x10000000 /* completion writeback happens at | ||
469 | the end of every packet kicked | ||
470 | through Q4 */ | ||
471 | #define TX_CFG_INTR_COMPWB_DIS 0x20000000 /* disable pre-interrupt completion | ||
472 | writeback */ | ||
473 | #define TX_CFG_CTX_SEL_MASK 0xC0000000 /* selects tx test port | ||
474 | connection | ||
475 | 0b00: tx mac req, | ||
476 | tx mac retry req, | ||
477 | tx ack and tx tag. | ||
478 | 0b01: txdma rd req, | ||
479 | txdma rd ack, | ||
480 | txdma rd rdy, | ||
481 | txdma rd type0 | ||
482 | 0b11: txdma wr req, | ||
483 | txdma wr ack, | ||
484 | txdma wr rdy, | ||
485 | txdma wr xfr done. */ | ||
486 | #define TX_CFG_CTX_SEL_SHIFT 30 | ||
487 | |||
488 | /* 11-bit counters that point to next location in FIFO to be loaded/retrieved. | ||
489 | * used for diagnostics only. | ||
490 | */ | ||
491 | #define REG_TX_FIFO_WRITE_PTR 0x2014 /* TX FIFO write pointer */ | ||
492 | #define REG_TX_FIFO_SHADOW_WRITE_PTR 0x2018 /* TX FIFO shadow write | ||
493 | pointer. temp hold reg. | ||
494 | diagnostics only. */ | ||
495 | #define REG_TX_FIFO_READ_PTR 0x201C /* TX FIFO read pointer */ | ||
496 | #define REG_TX_FIFO_SHADOW_READ_PTR 0x2020 /* TX FIFO shadow read | ||
497 | pointer */ | ||
498 | |||
499 | /* (ro) 11-bit up/down counter w/ # of frames currently in TX FIFO */ | ||
500 | #define REG_TX_FIFO_PKT_CNT 0x2024 /* TX FIFO packet counter */ | ||
501 | |||
502 | /* current state of all state machines in TX */ | ||
503 | #define REG_TX_SM_1 0x2028 /* TX state machine reg #1 */ | ||
504 | #define TX_SM_1_CHAIN_MASK 0x000003FF /* chaining state machine */ | ||
505 | #define TX_SM_1_CSUM_MASK 0x00000C00 /* checksum state machine */ | ||
506 | #define TX_SM_1_FIFO_LOAD_MASK 0x0003F000 /* FIFO load state machine. | ||
507 | = 0x01 when TX disabled. */ | ||
508 | #define TX_SM_1_FIFO_UNLOAD_MASK 0x003C0000 /* FIFO unload state machine */ | ||
509 | #define TX_SM_1_CACHE_MASK 0x03C00000 /* desc. prefetch cache controller | ||
510 | state machine */ | ||
511 | #define TX_SM_1_CBQ_ARB_MASK 0xF8000000 /* CBQ arbiter state machine */ | ||
512 | |||
513 | #define REG_TX_SM_2 0x202C /* TX state machine reg #2 */ | ||
514 | #define TX_SM_2_COMP_WB_MASK 0x07 /* completion writeback sm */ | ||
515 | #define TX_SM_2_SUB_LOAD_MASK 0x38 /* sub load state machine */ | ||
516 | #define TX_SM_2_KICK_MASK 0xC0 /* kick state machine */ | ||
517 | |||
518 | /* 64-bit pointer to the transmit data buffer. only the 50 LSB are incremented | ||
519 | * while the upper 23 bits are taken from the TX descriptor | ||
520 | */ | ||
521 | #define REG_TX_DATA_PTR_LOW 0x2030 /* TX data pointer low */ | ||
522 | #define REG_TX_DATA_PTR_HI 0x2034 /* TX data pointer high */ | ||
523 | |||
524 | /* 13 bit registers written by driver w/ descriptor value that follows | ||
525 | * last valid xmit descriptor. kick # and complete # values are used by | ||
526 | * the xmit dma engine to control tx descr fetching. if > 1 valid | ||
527 | * tx descr is available within the cache line being read, cassini will | ||
528 | * internally cache up to 4 of them. 0 on reset. _KICK = rw, _COMP = ro. | ||
529 | */ | ||
530 | #define REG_TX_KICK0 0x2038 /* TX kick reg #1 */ | ||
531 | #define REG_TX_KICKN(x) (REG_TX_KICK0 + (x)*4) | ||
532 | #define REG_TX_COMP0 0x2048 /* TX completion reg #1 */ | ||
533 | #define REG_TX_COMPN(x) (REG_TX_COMP0 + (x)*4) | ||
534 | |||
535 | /* values of TX_COMPLETE_1-4 are written. each completion register | ||
536 | * is 2bytes in size and contiguous. 8B allocation w/ 8B alignment. | ||
537 | * NOTE: completion reg values are only written back prior to TX_INTME and | ||
538 | * TX_ALL interrupts. at all other times, the most up-to-date index values | ||
539 | * should be obtained from the REG_TX_COMPLETE_# registers. | ||
540 | * here's the layout: | ||
541 | * offset from base addr completion # byte | ||
542 | * 0 TX_COMPLETE_1_MSB | ||
543 | * 1 TX_COMPLETE_1_LSB | ||
544 | * 2 TX_COMPLETE_2_MSB | ||
545 | * 3 TX_COMPLETE_2_LSB | ||
546 | * 4 TX_COMPLETE_3_MSB | ||
547 | * 5 TX_COMPLETE_3_LSB | ||
548 | * 6 TX_COMPLETE_4_MSB | ||
549 | * 7 TX_COMPLETE_4_LSB | ||
550 | */ | ||
551 | #define TX_COMPWB_SIZE 8 | ||
552 | #define REG_TX_COMPWB_DB_LOW 0x2058 /* TX completion write back | ||
553 | base low */ | ||
554 | #define REG_TX_COMPWB_DB_HI 0x205C /* TX completion write back | ||
555 | base high */ | ||
556 | #define TX_COMPWB_MSB_MASK 0x00000000000000FFULL | ||
557 | #define TX_COMPWB_MSB_SHIFT 0 | ||
558 | #define TX_COMPWB_LSB_MASK 0x000000000000FF00ULL | ||
559 | #define TX_COMPWB_LSB_SHIFT 8 | ||
560 | #define TX_COMPWB_NEXT(x) ((x) >> 16) | ||
561 | |||
562 | /* 53 MSB used as base address. 11 LSB assumed to be 0. TX desc pointer must | ||
563 | * be 2KB-aligned. */ | ||
564 | #define REG_TX_DB0_LOW 0x2060 /* TX descriptor base low #1 */ | ||
565 | #define REG_TX_DB0_HI 0x2064 /* TX descriptor base hi #1 */ | ||
566 | #define REG_TX_DBN_LOW(x) (REG_TX_DB0_LOW + (x)*8) | ||
567 | #define REG_TX_DBN_HI(x) (REG_TX_DB0_HI + (x)*8) | ||
568 | |||
569 | /* 16-bit registers hold weights for the weighted round-robin of the | ||
570 | * four CBQ TX descr rings. weights correspond to # bytes xferred from | ||
571 | * host to TXFIFO in a round of WRR arbitration. can be set | ||
572 | * dynamically with new weights set upon completion of the current | ||
573 | * packet transfer from host memory to TXFIFO. a dummy write to any of | ||
574 | * these registers causes a queue1 pre-emption with all historical bw | ||
575 | * deficit data reset to 0 (useful when congestion requires a | ||
576 | * pre-emption/re-allocation of network bandwidth | ||
577 | */ | ||
578 | #define REG_TX_MAXBURST_0 0x2080 /* TX MaxBurst #1 */ | ||
579 | #define REG_TX_MAXBURST_1 0x2084 /* TX MaxBurst #2 */ | ||
580 | #define REG_TX_MAXBURST_2 0x2088 /* TX MaxBurst #3 */ | ||
581 | #define REG_TX_MAXBURST_3 0x208C /* TX MaxBurst #4 */ | ||
582 | |||
583 | /* diagnostics access to any TX FIFO location. every access is 65 | ||
584 | * bits. _DATA_LOW = 32 LSB, _DATA_HI_T1/T0 = 32 MSB. _TAG = tag bit. | ||
585 | * writing _DATA_HI_T0 sets tag bit low, writing _DATA_HI_T1 sets tag | ||
586 | * bit high. TX_FIFO_PIO_SEL must be set for TX FIFO PIO access. if | ||
587 | * TX FIFO data integrity is desired, TX DMA should be | ||
588 | * disabled. _DATA_HI_Tx should be the last access of the sequence. | ||
589 | */ | ||
590 | #define REG_TX_FIFO_ADDR 0x2104 /* TX FIFO address */ | ||
591 | #define REG_TX_FIFO_TAG 0x2108 /* TX FIFO tag */ | ||
592 | #define REG_TX_FIFO_DATA_LOW 0x210C /* TX FIFO data low */ | ||
593 | #define REG_TX_FIFO_DATA_HI_T1 0x2110 /* TX FIFO data high t1 */ | ||
594 | #define REG_TX_FIFO_DATA_HI_T0 0x2114 /* TX FIFO data high t0 */ | ||
595 | #define REG_TX_FIFO_SIZE 0x2118 /* (ro) TX FIFO size = 0x090 = 9KB */ | ||
596 | |||
597 | /* 9-bit register controls BIST of TX FIFO. bit set indicates that the BIST | ||
598 | * passed for the specified memory | ||
599 | */ | ||
600 | #define REG_TX_RAMBIST 0x211C /* TX RAMBIST control/status */ | ||
601 | #define TX_RAMBIST_STATE 0x01C0 /* progress state of RAMBIST | ||
602 | controller state machine */ | ||
603 | #define TX_RAMBIST_RAM33A_PASS 0x0020 /* RAM33A passed */ | ||
604 | #define TX_RAMBIST_RAM32A_PASS 0x0010 /* RAM32A passed */ | ||
605 | #define TX_RAMBIST_RAM33B_PASS 0x0008 /* RAM33B passed */ | ||
606 | #define TX_RAMBIST_RAM32B_PASS 0x0004 /* RAM32B passed */ | ||
607 | #define TX_RAMBIST_SUMMARY 0x0002 /* all RAM passed */ | ||
608 | #define TX_RAMBIST_START 0x0001 /* write 1 to start BIST. self | ||
609 | clears on completion. */ | ||
610 | |||
611 | /** receive dma registers **/ | ||
612 | #define MAX_RX_DESC_RINGS 2 | ||
613 | #define MAX_RX_COMP_RINGS 4 | ||
614 | |||
615 | /* receive DMA channel configuration. default: 0x80910 | ||
616 | * free ring size = (1 << n)*32 -> [32 - 8k] | ||
617 | * completion ring size = (1 << n)*128 -> [128 - 32k], n < 9 | ||
618 | * DEFAULT: 0x80910 | ||
619 | */ | ||
620 | #define REG_RX_CFG 0x4000 /* RX config */ | ||
621 | #define RX_CFG_DMA_EN 0x00000001 /* enable RX DMA. 0 stops | ||
622 | channel as soon as current | ||
623 | frame xfer has completed. | ||
624 | driver should disable MAC | ||
625 | for 200ms before disabling | ||
626 | RX */ | ||
627 | #define RX_CFG_DESC_RING_MASK 0x0000001E /* # desc entries in RX | ||
628 | free desc ring. | ||
629 | def: 0x8 = 8k */ | ||
630 | #define RX_CFG_DESC_RING_SHIFT 1 | ||
631 | #define RX_CFG_COMP_RING_MASK 0x000001E0 /* # desc entries in RX complete | ||
632 | ring. def: 0x8 = 32k */ | ||
633 | #define RX_CFG_COMP_RING_SHIFT 5 | ||
634 | #define RX_CFG_BATCH_DIS 0x00000200 /* disable receive desc | ||
635 | batching. def: 0x0 = | ||
636 | enabled */ | ||
637 | #define RX_CFG_SWIVEL_MASK 0x00001C00 /* byte offset of the 1st | ||
638 | data byte of the packet | ||
639 | w/in 8 byte boundares. | ||
640 | this swivels the data | ||
641 | DMA'ed to header | ||
642 | buffers, jumbo buffers | ||
643 | when header split is not | ||
644 | requested and MTU sized | ||
645 | buffers. def: 0x2 */ | ||
646 | #define RX_CFG_SWIVEL_SHIFT 10 | ||
647 | |||
648 | /* cassini+ only */ | ||
649 | #define RX_CFG_DESC_RING1_MASK 0x000F0000 /* # of desc entries in | ||
650 | RX free desc ring 2. | ||
651 | def: 0x8 = 8k */ | ||
652 | #define RX_CFG_DESC_RING1_SHIFT 16 | ||
653 | |||
654 | |||
655 | /* the page size register allows cassini chips to do the following with | ||
656 | * received data: | ||
657 | * [--------------------------------------------------------------] page | ||
658 | * [off][buf1][pad][off][buf2][pad][off][buf3][pad][off][buf4][pad] | ||
659 | * |--------------| = PAGE_SIZE_BUFFER_STRIDE | ||
660 | * page = PAGE_SIZE | ||
661 | * offset = PAGE_SIZE_MTU_OFF | ||
662 | * for the above example, MTU_BUFFER_COUNT = 4. | ||
663 | * NOTE: as is apparent, you need to ensure that the following holds: | ||
664 | * MTU_BUFFER_COUNT <= PAGE_SIZE/PAGE_SIZE_BUFFER_STRIDE | ||
665 | * DEFAULT: 0x48002002 (8k pages) | ||
666 | */ | ||
667 | #define REG_RX_PAGE_SIZE 0x4004 /* RX page size */ | ||
668 | #define RX_PAGE_SIZE_MASK 0x00000003 /* size of pages pointed to | ||
669 | by receive descriptors. | ||
670 | if jumbo buffers are | ||
671 | supported the page size | ||
672 | should not be < 8k. | ||
673 | 0b00 = 2k, 0b01 = 4k | ||
674 | 0b10 = 8k, 0b11 = 16k | ||
675 | DEFAULT: 8k */ | ||
676 | #define RX_PAGE_SIZE_SHIFT 0 | ||
677 | #define RX_PAGE_SIZE_MTU_COUNT_MASK 0x00007800 /* # of MTU buffers the hw | ||
678 | packs into a page. | ||
679 | DEFAULT: 4 */ | ||
680 | #define RX_PAGE_SIZE_MTU_COUNT_SHIFT 11 | ||
681 | #define RX_PAGE_SIZE_MTU_STRIDE_MASK 0x18000000 /* # of bytes that separate | ||
682 | each MTU buffer + | ||
683 | offset from each | ||
684 | other. | ||
685 | 0b00 = 1k, 0b01 = 2k | ||
686 | 0b10 = 4k, 0b11 = 8k | ||
687 | DEFAULT: 0x1 */ | ||
688 | #define RX_PAGE_SIZE_MTU_STRIDE_SHIFT 27 | ||
689 | #define RX_PAGE_SIZE_MTU_OFF_MASK 0xC0000000 /* offset in each page that | ||
690 | hw writes the MTU buffer | ||
691 | into. | ||
692 | 0b00 = 0, | ||
693 | 0b01 = 64 bytes | ||
694 | 0b10 = 96, 0b11 = 128 | ||
695 | DEFAULT: 0x1 */ | ||
696 | #define RX_PAGE_SIZE_MTU_OFF_SHIFT 30 | ||
697 | |||
698 | /* 11-bit counter points to next location in RX FIFO to be loaded/read. | ||
699 | * shadow write pointers enable retries in case of early receive aborts. | ||
700 | * DEFAULT: 0x0. generated on 64-bit boundaries. | ||
701 | */ | ||
702 | #define REG_RX_FIFO_WRITE_PTR 0x4008 /* RX FIFO write pointer */ | ||
703 | #define REG_RX_FIFO_READ_PTR 0x400C /* RX FIFO read pointer */ | ||
704 | #define REG_RX_IPP_FIFO_SHADOW_WRITE_PTR 0x4010 /* RX IPP FIFO shadow write | ||
705 | pointer */ | ||
706 | #define REG_RX_IPP_FIFO_SHADOW_READ_PTR 0x4014 /* RX IPP FIFO shadow read | ||
707 | pointer */ | ||
708 | #define REG_RX_IPP_FIFO_READ_PTR 0x400C /* RX IPP FIFO read | ||
709 | pointer. (8-bit counter) */ | ||
710 | |||
711 | /* current state of RX DMA state engines + other info | ||
712 | * DEFAULT: 0x0 | ||
713 | */ | ||
714 | #define REG_RX_DEBUG 0x401C /* RX debug */ | ||
715 | #define RX_DEBUG_LOAD_STATE_MASK 0x0000000F /* load state machine w/ MAC: | ||
716 | 0x0 = idle, 0x1 = load_bop | ||
717 | 0x2 = load 1, 0x3 = load 2 | ||
718 | 0x4 = load 3, 0x5 = load 4 | ||
719 | 0x6 = last detect | ||
720 | 0x7 = wait req | ||
721 | 0x8 = wait req statuss 1st | ||
722 | 0x9 = load st | ||
723 | 0xa = bubble mac | ||
724 | 0xb = error */ | ||
725 | #define RX_DEBUG_LM_STATE_MASK 0x00000070 /* load state machine w/ HP and | ||
726 | RX FIFO: | ||
727 | 0x0 = idle, 0x1 = hp xfr | ||
728 | 0x2 = wait hp ready | ||
729 | 0x3 = wait flow code | ||
730 | 0x4 = fifo xfer | ||
731 | 0x5 = make status | ||
732 | 0x6 = csum ready | ||
733 | 0x7 = error */ | ||
734 | #define RX_DEBUG_FC_STATE_MASK 0x000000180 /* flow control state machine | ||
735 | w/ MAC: | ||
736 | 0x0 = idle | ||
737 | 0x1 = wait xoff ack | ||
738 | 0x2 = wait xon | ||
739 | 0x3 = wait xon ack */ | ||
740 | #define RX_DEBUG_DATA_STATE_MASK 0x000001E00 /* unload data state machine | ||
741 | states: | ||
742 | 0x0 = idle data | ||
743 | 0x1 = header begin | ||
744 | 0x2 = xfer header | ||
745 | 0x3 = xfer header ld | ||
746 | 0x4 = mtu begin | ||
747 | 0x5 = xfer mtu | ||
748 | 0x6 = xfer mtu ld | ||
749 | 0x7 = jumbo begin | ||
750 | 0x8 = xfer jumbo | ||
751 | 0x9 = xfer jumbo ld | ||
752 | 0xa = reas begin | ||
753 | 0xb = xfer reas | ||
754 | 0xc = flush tag | ||
755 | 0xd = xfer reas ld | ||
756 | 0xe = error | ||
757 | 0xf = bubble idle */ | ||
758 | #define RX_DEBUG_DESC_STATE_MASK 0x0001E000 /* unload desc state machine | ||
759 | states: | ||
760 | 0x0 = idle desc | ||
761 | 0x1 = wait ack | ||
762 | 0x9 = wait ack 2 | ||
763 | 0x2 = fetch desc 1 | ||
764 | 0xa = fetch desc 2 | ||
765 | 0x3 = load ptrs | ||
766 | 0x4 = wait dma | ||
767 | 0x5 = wait ack batch | ||
768 | 0x6 = post batch | ||
769 | 0x7 = xfr done */ | ||
770 | #define RX_DEBUG_INTR_READ_PTR_MASK 0x30000000 /* interrupt read ptr of the | ||
771 | interrupt queue */ | ||
772 | #define RX_DEBUG_INTR_WRITE_PTR_MASK 0xC0000000 /* interrupt write pointer | ||
773 | of the interrupt queue */ | ||
774 | |||
775 | /* flow control frames are emitted using two PAUSE thresholds: | ||
776 | * XOFF PAUSE uses pause time value pre-programmed in the Send PAUSE MAC reg | ||
777 | * XON PAUSE uses a pause time of 0. granularity of threshold is 64bytes. | ||
778 | * PAUSE thresholds defined in terms of FIFO occupancy and may be translated | ||
779 | * into FIFO vacancy using RX_FIFO_SIZE. setting ON will trigger XON frames | ||
780 | * when FIFO reaches 0. OFF threshold should not be > size of RX FIFO. max | ||
781 | * value is is 0x6F. | ||
782 | * DEFAULT: 0x00078 | ||
783 | */ | ||
784 | #define REG_RX_PAUSE_THRESH 0x4020 /* RX pause thresholds */ | ||
785 | #define RX_PAUSE_THRESH_QUANTUM 64 | ||
786 | #define RX_PAUSE_THRESH_OFF_MASK 0x000001FF /* XOFF PAUSE emitted when | ||
787 | RX FIFO occupancy > | ||
788 | value*64B */ | ||
789 | #define RX_PAUSE_THRESH_OFF_SHIFT 0 | ||
790 | #define RX_PAUSE_THRESH_ON_MASK 0x001FF000 /* XON PAUSE emitted after | ||
791 | emitting XOFF PAUSE when RX | ||
792 | FIFO occupancy falls below | ||
793 | this value*64B. must be | ||
794 | < XOFF threshold. if = | ||
795 | RX_FIFO_SIZE< XON frames are | ||
796 | never emitted. */ | ||
797 | #define RX_PAUSE_THRESH_ON_SHIFT 12 | ||
798 | |||
799 | /* 13-bit register used to control RX desc fetching and intr generation. if 4+ | ||
800 | * valid RX descriptors are available, Cassini will read 4 at a time. | ||
801 | * writing N means that all desc up to *but* excluding N are available. N must | ||
802 | * be a multiple of 4 (N % 4 = 0). first desc should be cache-line aligned. | ||
803 | * DEFAULT: 0 on reset | ||
804 | */ | ||
805 | #define REG_RX_KICK 0x4024 /* RX kick reg */ | ||
806 | |||
807 | /* 8KB aligned 64-bit pointer to the base of the RX free/completion rings. | ||
808 | * lower 13 bits of the low register are hard-wired to 0. | ||
809 | */ | ||
810 | #define REG_RX_DB_LOW 0x4028 /* RX descriptor ring | ||
811 | base low */ | ||
812 | #define REG_RX_DB_HI 0x402C /* RX descriptor ring | ||
813 | base hi */ | ||
814 | #define REG_RX_CB_LOW 0x4030 /* RX completion ring | ||
815 | base low */ | ||
816 | #define REG_RX_CB_HI 0x4034 /* RX completion ring | ||
817 | base hi */ | ||
818 | /* 13-bit register indicate desc used by cassini for receive frames. used | ||
819 | * for diagnostic purposes. | ||
820 | * DEFAULT: 0 on reset | ||
821 | */ | ||
822 | #define REG_RX_COMP 0x4038 /* (ro) RX completion */ | ||
823 | |||
824 | /* HEAD and TAIL are used to control RX desc posting and interrupt | ||
825 | * generation. hw moves the head register to pass ownership to sw. sw | ||
826 | * moves the tail register to pass ownership back to hw. to give all | ||
827 | * entries to hw, set TAIL = HEAD. if HEAD and TAIL indicate that no | ||
828 | * more entries are available, DMA will pause and an interrupt will be | ||
829 | * generated to indicate no more entries are available. sw can use | ||
830 | * this interrupt to reduce the # of times it must update the | ||
831 | * completion tail register. | ||
832 | * DEFAULT: 0 on reset | ||
833 | */ | ||
834 | #define REG_RX_COMP_HEAD 0x403C /* RX completion head */ | ||
835 | #define REG_RX_COMP_TAIL 0x4040 /* RX completion tail */ | ||
836 | |||
837 | /* values used for receive interrupt blanking. loaded each time the ISR is read | ||
838 | * DEFAULT: 0x00000000 | ||
839 | */ | ||
840 | #define REG_RX_BLANK 0x4044 /* RX blanking register | ||
841 | for ISR read */ | ||
842 | #define RX_BLANK_INTR_PKT_MASK 0x000001FF /* RX_DONE intr asserted if | ||
843 | this many sets of completion | ||
844 | writebacks (up to 2 packets) | ||
845 | occur since the last time | ||
846 | the ISR was read. 0 = no | ||
847 | packet blanking */ | ||
848 | #define RX_BLANK_INTR_PKT_SHIFT 0 | ||
849 | #define RX_BLANK_INTR_TIME_MASK 0x3FFFF000 /* RX_DONE interrupt asserted | ||
850 | if that many clocks were | ||
851 | counted since last time the | ||
852 | ISR was read. | ||
853 | each count is 512 core | ||
854 | clocks (125MHz). 0 = no | ||
855 | time blanking */ | ||
856 | #define RX_BLANK_INTR_TIME_SHIFT 12 | ||
857 | |||
858 | /* values used for interrupt generation based on threshold values of how | ||
859 | * many free desc and completion entries are available for hw use. | ||
860 | * DEFAULT: 0x00000000 | ||
861 | */ | ||
862 | #define REG_RX_AE_THRESH 0x4048 /* RX almost empty | ||
863 | thresholds */ | ||
864 | #define RX_AE_THRESH_FREE_MASK 0x00001FFF /* RX_BUF_AE will be | ||
865 | generated if # desc | ||
866 | avail for hw use <= | ||
867 | # */ | ||
868 | #define RX_AE_THRESH_FREE_SHIFT 0 | ||
869 | #define RX_AE_THRESH_COMP_MASK 0x0FFFE000 /* RX_COMP_AE will be | ||
870 | generated if # of | ||
871 | completion entries | ||
872 | avail for hw use <= | ||
873 | # */ | ||
874 | #define RX_AE_THRESH_COMP_SHIFT 13 | ||
875 | |||
876 | /* probabilities for random early drop (RED) thresholds on a FIFO threshold | ||
877 | * basis. probability should increase when the FIFO level increases. control | ||
878 | * packets are never dropped and not counted in stats. probability programmed | ||
879 | * on a 12.5% granularity. e.g., 0x1 = 1/8 packets dropped. | ||
880 | * DEFAULT: 0x00000000 | ||
881 | */ | ||
882 | #define REG_RX_RED 0x404C /* RX random early detect enable */ | ||
883 | #define RX_RED_4K_6K_FIFO_MASK 0x000000FF /* 4KB < FIFO thresh < 6KB */ | ||
884 | #define RX_RED_6K_8K_FIFO_MASK 0x0000FF00 /* 6KB < FIFO thresh < 8KB */ | ||
885 | #define RX_RED_8K_10K_FIFO_MASK 0x00FF0000 /* 8KB < FIFO thresh < 10KB */ | ||
886 | #define RX_RED_10K_12K_FIFO_MASK 0xFF000000 /* 10KB < FIFO thresh < 12KB */ | ||
887 | |||
888 | /* FIFO fullness levels for RX FIFO, RX control FIFO, and RX IPP FIFO. | ||
889 | * RX control FIFO = # of packets in RX FIFO. | ||
890 | * DEFAULT: 0x0 | ||
891 | */ | ||
892 | #define REG_RX_FIFO_FULLNESS 0x4050 /* (ro) RX FIFO fullness */ | ||
893 | #define RX_FIFO_FULLNESS_RX_FIFO_MASK 0x3FF80000 /* level w/ 8B granularity */ | ||
894 | #define RX_FIFO_FULLNESS_IPP_FIFO_MASK 0x0007FF00 /* level w/ 8B granularity */ | ||
895 | #define RX_FIFO_FULLNESS_RX_PKT_MASK 0x000000FF /* # packets in RX FIFO */ | ||
896 | #define REG_RX_IPP_PACKET_COUNT 0x4054 /* RX IPP packet counter */ | ||
897 | #define REG_RX_WORK_DMA_PTR_LOW 0x4058 /* RX working DMA ptr low */ | ||
898 | #define REG_RX_WORK_DMA_PTR_HI 0x405C /* RX working DMA ptr | ||
899 | high */ | ||
900 | |||
901 | /* BIST testing ro RX FIFO, RX control FIFO, and RX IPP FIFO. only RX BIST | ||
902 | * START/COMPLETE is writeable. START will clear when the BIST has completed | ||
903 | * checking all 17 RAMS. | ||
904 | * DEFAULT: 0bxxxx xxxxx xxxx xxxx xxxx x000 0000 0000 00x0 | ||
905 | */ | ||
906 | #define REG_RX_BIST 0x4060 /* (ro) RX BIST */ | ||
907 | #define RX_BIST_32A_PASS 0x80000000 /* RX FIFO 32A passed */ | ||
908 | #define RX_BIST_33A_PASS 0x40000000 /* RX FIFO 33A passed */ | ||
909 | #define RX_BIST_32B_PASS 0x20000000 /* RX FIFO 32B passed */ | ||
910 | #define RX_BIST_33B_PASS 0x10000000 /* RX FIFO 33B passed */ | ||
911 | #define RX_BIST_32C_PASS 0x08000000 /* RX FIFO 32C passed */ | ||
912 | #define RX_BIST_33C_PASS 0x04000000 /* RX FIFO 33C passed */ | ||
913 | #define RX_BIST_IPP_32A_PASS 0x02000000 /* RX IPP FIFO 33B passed */ | ||
914 | #define RX_BIST_IPP_33A_PASS 0x01000000 /* RX IPP FIFO 33A passed */ | ||
915 | #define RX_BIST_IPP_32B_PASS 0x00800000 /* RX IPP FIFO 32B passed */ | ||
916 | #define RX_BIST_IPP_33B_PASS 0x00400000 /* RX IPP FIFO 33B passed */ | ||
917 | #define RX_BIST_IPP_32C_PASS 0x00200000 /* RX IPP FIFO 32C passed */ | ||
918 | #define RX_BIST_IPP_33C_PASS 0x00100000 /* RX IPP FIFO 33C passed */ | ||
919 | #define RX_BIST_CTRL_32_PASS 0x00800000 /* RX CTRL FIFO 32 passed */ | ||
920 | #define RX_BIST_CTRL_33_PASS 0x00400000 /* RX CTRL FIFO 33 passed */ | ||
921 | #define RX_BIST_REAS_26A_PASS 0x00200000 /* RX Reas 26A passed */ | ||
922 | #define RX_BIST_REAS_26B_PASS 0x00100000 /* RX Reas 26B passed */ | ||
923 | #define RX_BIST_REAS_27_PASS 0x00080000 /* RX Reas 27 passed */ | ||
924 | #define RX_BIST_STATE_MASK 0x00078000 /* BIST state machine */ | ||
925 | #define RX_BIST_SUMMARY 0x00000002 /* when BIST complete, | ||
926 | summary pass bit | ||
927 | contains AND of BIST | ||
928 | results of all 16 | ||
929 | RAMS */ | ||
930 | #define RX_BIST_START 0x00000001 /* write 1 to start | ||
931 | BIST. self clears | ||
932 | on completion. */ | ||
933 | |||
934 | /* next location in RX CTRL FIFO that will be loaded w/ data from RX IPP/read | ||
935 | * from to retrieve packet control info. | ||
936 | * DEFAULT: 0 | ||
937 | */ | ||
938 | #define REG_RX_CTRL_FIFO_WRITE_PTR 0x4064 /* (ro) RX control FIFO | ||
939 | write ptr */ | ||
940 | #define REG_RX_CTRL_FIFO_READ_PTR 0x4068 /* (ro) RX control FIFO read | ||
941 | ptr */ | ||
942 | |||
943 | /* receive interrupt blanking. loaded each time interrupt alias register is | ||
944 | * read. | ||
945 | * DEFAULT: 0x0 | ||
946 | */ | ||
947 | #define REG_RX_BLANK_ALIAS_READ 0x406C /* RX blanking register for | ||
948 | alias read */ | ||
949 | #define RX_BAR_INTR_PACKET_MASK 0x000001FF /* assert RX_DONE if # | ||
950 | completion writebacks | ||
951 | > # since last ISR | ||
952 | read. 0 = no | ||
953 | blanking. up to 2 | ||
954 | packets per | ||
955 | completion wb. */ | ||
956 | #define RX_BAR_INTR_TIME_MASK 0x3FFFF000 /* assert RX_DONE if # | ||
957 | clocks > # since last | ||
958 | ISR read. each count | ||
959 | is 512 core clocks | ||
960 | (125MHz). 0 = no | ||
961 | blanking. */ | ||
962 | |||
963 | /* diagnostic access to RX FIFO. 32 LSB accessed via DATA_LOW. 32 MSB accessed | ||
964 | * via DATA_HI_T0 or DATA_HI_T1. TAG reads the tag bit. writing HI_T0 | ||
965 | * will unset the tag bit while writing HI_T1 will set the tag bit. to reset | ||
966 | * to normal operation after diagnostics, write to address location 0x0. | ||
967 | * RX_DMA_EN bit must be set to 0x0 for RX FIFO PIO access. DATA_HI should | ||
968 | * be the last write access of a write sequence. | ||
969 | * DEFAULT: undefined | ||
970 | */ | ||
971 | #define REG_RX_FIFO_ADDR 0x4080 /* RX FIFO address */ | ||
972 | #define REG_RX_FIFO_TAG 0x4084 /* RX FIFO tag */ | ||
973 | #define REG_RX_FIFO_DATA_LOW 0x4088 /* RX FIFO data low */ | ||
974 | #define REG_RX_FIFO_DATA_HI_T0 0x408C /* RX FIFO data high T0 */ | ||
975 | #define REG_RX_FIFO_DATA_HI_T1 0x4090 /* RX FIFO data high T1 */ | ||
976 | |||
977 | /* diagnostic assess to RX CTRL FIFO. 8-bit FIFO_ADDR holds address of | ||
978 | * 81 bit control entry and 6 bit flow id. LOW and MID are both 32-bit | ||
979 | * accesses. HI is 7-bits with 6-bit flow id and 1 bit control | ||
980 | * word. RX_DMA_EN must be 0 for RX CTRL FIFO PIO access. DATA_HI | ||
981 | * should be last write access of the write sequence. | ||
982 | * DEFAULT: undefined | ||
983 | */ | ||
984 | #define REG_RX_CTRL_FIFO_ADDR 0x4094 /* RX Control FIFO and | ||
985 | Batching FIFO addr */ | ||
986 | #define REG_RX_CTRL_FIFO_DATA_LOW 0x4098 /* RX Control FIFO data | ||
987 | low */ | ||
988 | #define REG_RX_CTRL_FIFO_DATA_MID 0x409C /* RX Control FIFO data | ||
989 | mid */ | ||
990 | #define REG_RX_CTRL_FIFO_DATA_HI 0x4100 /* RX Control FIFO data | ||
991 | hi and flow id */ | ||
992 | #define RX_CTRL_FIFO_DATA_HI_CTRL 0x0001 /* upper bit of ctrl word */ | ||
993 | #define RX_CTRL_FIFO_DATA_HI_FLOW_MASK 0x007E /* flow id */ | ||
994 | |||
995 | /* diagnostic access to RX IPP FIFO. same semantics as RX_FIFO. | ||
996 | * DEFAULT: undefined | ||
997 | */ | ||
998 | #define REG_RX_IPP_FIFO_ADDR 0x4104 /* RX IPP FIFO address */ | ||
999 | #define REG_RX_IPP_FIFO_TAG 0x4108 /* RX IPP FIFO tag */ | ||
1000 | #define REG_RX_IPP_FIFO_DATA_LOW 0x410C /* RX IPP FIFO data low */ | ||
1001 | #define REG_RX_IPP_FIFO_DATA_HI_T0 0x4110 /* RX IPP FIFO data high | ||
1002 | T0 */ | ||
1003 | #define REG_RX_IPP_FIFO_DATA_HI_T1 0x4114 /* RX IPP FIFO data high | ||
1004 | T1 */ | ||
1005 | |||
1006 | /* 64-bit pointer to receive data buffer in host memory used for headers and | ||
1007 | * small packets. MSB in high register. loaded by DMA state machine and | ||
1008 | * increments as DMA writes receive data. only 50 LSB are incremented. top | ||
1009 | * 13 bits taken from RX descriptor. | ||
1010 | * DEFAULT: undefined | ||
1011 | */ | ||
1012 | #define REG_RX_HEADER_PAGE_PTR_LOW 0x4118 /* (ro) RX header page ptr | ||
1013 | low */ | ||
1014 | #define REG_RX_HEADER_PAGE_PTR_HI 0x411C /* (ro) RX header page ptr | ||
1015 | high */ | ||
1016 | #define REG_RX_MTU_PAGE_PTR_LOW 0x4120 /* (ro) RX MTU page pointer | ||
1017 | low */ | ||
1018 | #define REG_RX_MTU_PAGE_PTR_HI 0x4124 /* (ro) RX MTU page pointer | ||
1019 | high */ | ||
1020 | |||
1021 | /* PIO diagnostic access to RX reassembly DMA Table RAM. 6-bit register holds | ||
1022 | * one of 64 79-bit locations in the RX Reassembly DMA table and the addr of | ||
1023 | * one of the 64 byte locations in the Batching table. LOW holds 32 LSB. | ||
1024 | * MID holds the next 32 LSB. HIGH holds the 15 MSB. RX_DMA_EN must be set | ||
1025 | * to 0 for PIO access. DATA_HIGH should be last write of write sequence. | ||
1026 | * layout: | ||
1027 | * reassmbl ptr [78:15] | reassmbl index [14:1] | reassmbl entry valid [0] | ||
1028 | * DEFAULT: undefined | ||
1029 | */ | ||
1030 | #define REG_RX_TABLE_ADDR 0x4128 /* RX reassembly DMA table | ||
1031 | address */ | ||
1032 | #define RX_TABLE_ADDR_MASK 0x0000003F /* address mask */ | ||
1033 | |||
1034 | #define REG_RX_TABLE_DATA_LOW 0x412C /* RX reassembly DMA table | ||
1035 | data low */ | ||
1036 | #define REG_RX_TABLE_DATA_MID 0x4130 /* RX reassembly DMA table | ||
1037 | data mid */ | ||
1038 | #define REG_RX_TABLE_DATA_HI 0x4134 /* RX reassembly DMA table | ||
1039 | data high */ | ||
1040 | |||
1041 | /* cassini+ only */ | ||
1042 | /* 8KB aligned 64-bit pointer to base of RX rings. lower 13 bits hardwired to | ||
1043 | * 0. same semantics as primary desc/complete rings. | ||
1044 | */ | ||
1045 | #define REG_PLUS_RX_DB1_LOW 0x4200 /* RX descriptor ring | ||
1046 | 2 base low */ | ||
1047 | #define REG_PLUS_RX_DB1_HI 0x4204 /* RX descriptor ring | ||
1048 | 2 base high */ | ||
1049 | #define REG_PLUS_RX_CB1_LOW 0x4208 /* RX completion ring | ||
1050 | 2 base low. 4 total */ | ||
1051 | #define REG_PLUS_RX_CB1_HI 0x420C /* RX completion ring | ||
1052 | 2 base high. 4 total */ | ||
1053 | #define REG_PLUS_RX_CBN_LOW(x) (REG_PLUS_RX_CB1_LOW + 8*((x) - 1)) | ||
1054 | #define REG_PLUS_RX_CBN_HI(x) (REG_PLUS_RX_CB1_HI + 8*((x) - 1)) | ||
1055 | #define REG_PLUS_RX_KICK1 0x4220 /* RX Kick 2 register */ | ||
1056 | #define REG_PLUS_RX_COMP1 0x4224 /* (ro) RX completion 2 | ||
1057 | reg */ | ||
1058 | #define REG_PLUS_RX_COMP1_HEAD 0x4228 /* (ro) RX completion 2 | ||
1059 | head reg. 4 total. */ | ||
1060 | #define REG_PLUS_RX_COMP1_TAIL 0x422C /* RX completion 2 | ||
1061 | tail reg. 4 total. */ | ||
1062 | #define REG_PLUS_RX_COMPN_HEAD(x) (REG_PLUS_RX_COMP1_HEAD + 8*((x) - 1)) | ||
1063 | #define REG_PLUS_RX_COMPN_TAIL(x) (REG_PLUS_RX_COMP1_TAIL + 8*((x) - 1)) | ||
1064 | #define REG_PLUS_RX_AE1_THRESH 0x4240 /* RX almost empty 2 | ||
1065 | thresholds */ | ||
1066 | #define RX_AE1_THRESH_FREE_MASK RX_AE_THRESH_FREE_MASK | ||
1067 | #define RX_AE1_THRESH_FREE_SHIFT RX_AE_THRESH_FREE_SHIFT | ||
1068 | |||
1069 | /** header parser registers **/ | ||
1070 | |||
1071 | /* RX parser configuration register. | ||
1072 | * DEFAULT: 0x1651004 | ||
1073 | */ | ||
1074 | #define REG_HP_CFG 0x4140 /* header parser | ||
1075 | configuration reg */ | ||
1076 | #define HP_CFG_PARSE_EN 0x00000001 /* enab header parsing */ | ||
1077 | #define HP_CFG_NUM_CPU_MASK 0x000000FC /* # processors | ||
1078 | 0 = 64. 0x3f = 63 */ | ||
1079 | #define HP_CFG_NUM_CPU_SHIFT 2 | ||
1080 | #define HP_CFG_SYN_INC_MASK 0x00000100 /* SYN bit won't increment | ||
1081 | TCP seq # by one when | ||
1082 | stored in FDBM */ | ||
1083 | #define HP_CFG_TCP_THRESH_MASK 0x000FFE00 /* # bytes of TCP data | ||
1084 | needed to be considered | ||
1085 | for reassembly */ | ||
1086 | #define HP_CFG_TCP_THRESH_SHIFT 9 | ||
1087 | |||
1088 | /* access to RX Instruction RAM. 5-bit register/counter holds addr | ||
1089 | * of 39 bit entry to be read/written. 32 LSB in _DATA_LOW. 7 MSB in _DATA_HI. | ||
1090 | * RX_DMA_EN must be 0 for RX instr PIO access. DATA_HI should be last access | ||
1091 | * of sequence. | ||
1092 | * DEFAULT: undefined | ||
1093 | */ | ||
1094 | #define REG_HP_INSTR_RAM_ADDR 0x4144 /* HP instruction RAM | ||
1095 | address */ | ||
1096 | #define HP_INSTR_RAM_ADDR_MASK 0x01F /* 5-bit mask */ | ||
1097 | #define REG_HP_INSTR_RAM_DATA_LOW 0x4148 /* HP instruction RAM | ||
1098 | data low */ | ||
1099 | #define HP_INSTR_RAM_LOW_OUTMASK_MASK 0x0000FFFF | ||
1100 | #define HP_INSTR_RAM_LOW_OUTMASK_SHIFT 0 | ||
1101 | #define HP_INSTR_RAM_LOW_OUTSHIFT_MASK 0x000F0000 | ||
1102 | #define HP_INSTR_RAM_LOW_OUTSHIFT_SHIFT 16 | ||
1103 | #define HP_INSTR_RAM_LOW_OUTEN_MASK 0x00300000 | ||
1104 | #define HP_INSTR_RAM_LOW_OUTEN_SHIFT 20 | ||
1105 | #define HP_INSTR_RAM_LOW_OUTARG_MASK 0xFFC00000 | ||
1106 | #define HP_INSTR_RAM_LOW_OUTARG_SHIFT 22 | ||
1107 | #define REG_HP_INSTR_RAM_DATA_MID 0x414C /* HP instruction RAM | ||
1108 | data mid */ | ||
1109 | #define HP_INSTR_RAM_MID_OUTARG_MASK 0x00000003 | ||
1110 | #define HP_INSTR_RAM_MID_OUTARG_SHIFT 0 | ||
1111 | #define HP_INSTR_RAM_MID_OUTOP_MASK 0x0000003C | ||
1112 | #define HP_INSTR_RAM_MID_OUTOP_SHIFT 2 | ||
1113 | #define HP_INSTR_RAM_MID_FNEXT_MASK 0x000007C0 | ||
1114 | #define HP_INSTR_RAM_MID_FNEXT_SHIFT 6 | ||
1115 | #define HP_INSTR_RAM_MID_FOFF_MASK 0x0003F800 | ||
1116 | #define HP_INSTR_RAM_MID_FOFF_SHIFT 11 | ||
1117 | #define HP_INSTR_RAM_MID_SNEXT_MASK 0x007C0000 | ||
1118 | #define HP_INSTR_RAM_MID_SNEXT_SHIFT 18 | ||
1119 | #define HP_INSTR_RAM_MID_SOFF_MASK 0x3F800000 | ||
1120 | #define HP_INSTR_RAM_MID_SOFF_SHIFT 23 | ||
1121 | #define HP_INSTR_RAM_MID_OP_MASK 0xC0000000 | ||
1122 | #define HP_INSTR_RAM_MID_OP_SHIFT 30 | ||
1123 | #define REG_HP_INSTR_RAM_DATA_HI 0x4150 /* HP instruction RAM | ||
1124 | data high */ | ||
1125 | #define HP_INSTR_RAM_HI_VAL_MASK 0x0000FFFF | ||
1126 | #define HP_INSTR_RAM_HI_VAL_SHIFT 0 | ||
1127 | #define HP_INSTR_RAM_HI_MASK_MASK 0xFFFF0000 | ||
1128 | #define HP_INSTR_RAM_HI_MASK_SHIFT 16 | ||
1129 | |||
1130 | /* PIO access into RX Header parser data RAM and flow database. | ||
1131 | * 11-bit register. Data fills the LSB portion of bus if less than 32 bits. | ||
1132 | * DATA_RAM: write RAM_FDB_DATA with index to access DATA_RAM. | ||
1133 | * RAM bytes = 4*(x - 1) + [3:0]. e.g., 0 -> [3:0], 31 -> [123:120] | ||
1134 | * FLOWDB: write DATA_RAM_FDB register and then read/write FDB1-12 to access | ||
1135 | * flow database. | ||
1136 | * RX_DMA_EN must be 0 for RX parser RAM PIO access. RX Parser RAM data reg | ||
1137 | * should be the last write access of the write sequence. | ||
1138 | * DEFAULT: undefined | ||
1139 | */ | ||
1140 | #define REG_HP_DATA_RAM_FDB_ADDR 0x4154 /* HP data and FDB | ||
1141 | RAM address */ | ||
1142 | #define HP_DATA_RAM_FDB_DATA_MASK 0x001F /* select 1 of 86 byte | ||
1143 | locations in header | ||
1144 | parser data ram to | ||
1145 | read/write */ | ||
1146 | #define HP_DATA_RAM_FDB_FDB_MASK 0x3F00 /* 1 of 64 353-bit locations | ||
1147 | in the flow database */ | ||
1148 | #define REG_HP_DATA_RAM_DATA 0x4158 /* HP data RAM data */ | ||
1149 | |||
1150 | /* HP flow database registers: 1 - 12, 0x415C - 0x4188, 4 8-bit bytes | ||
1151 | * FLOW_DB(1) = IP_SA[127:96], FLOW_DB(2) = IP_SA[95:64] | ||
1152 | * FLOW_DB(3) = IP_SA[63:32], FLOW_DB(4) = IP_SA[31:0] | ||
1153 | * FLOW_DB(5) = IP_DA[127:96], FLOW_DB(6) = IP_DA[95:64] | ||
1154 | * FLOW_DB(7) = IP_DA[63:32], FLOW_DB(8) = IP_DA[31:0] | ||
1155 | * FLOW_DB(9) = {TCP_SP[15:0],TCP_DP[15:0]} | ||
1156 | * FLOW_DB(10) = bit 0 has value for flow valid | ||
1157 | * FLOW_DB(11) = TCP_SEQ[63:32], FLOW_DB(12) = TCP_SEQ[31:0] | ||
1158 | */ | ||
1159 | #define REG_HP_FLOW_DB0 0x415C /* HP flow database 1 reg */ | ||
1160 | #define REG_HP_FLOW_DBN(x) (REG_HP_FLOW_DB0 + (x)*4) | ||
1161 | |||
1162 | /* diagnostics for RX Header Parser block. | ||
1163 | * ASUN: the header parser state machine register is used for diagnostics | ||
1164 | * purposes. however, the spec doesn't have any details on it. | ||
1165 | */ | ||
1166 | #define REG_HP_STATE_MACHINE 0x418C /* (ro) HP state machine */ | ||
1167 | #define REG_HP_STATUS0 0x4190 /* (ro) HP status 1 */ | ||
1168 | #define HP_STATUS0_SAP_MASK 0xFFFF0000 /* SAP */ | ||
1169 | #define HP_STATUS0_L3_OFF_MASK 0x0000FE00 /* L3 offset */ | ||
1170 | #define HP_STATUS0_LB_CPUNUM_MASK 0x000001F8 /* load balancing CPU | ||
1171 | number */ | ||
1172 | #define HP_STATUS0_HRP_OPCODE_MASK 0x00000007 /* HRP opcode */ | ||
1173 | |||
1174 | #define REG_HP_STATUS1 0x4194 /* (ro) HP status 2 */ | ||
1175 | #define HP_STATUS1_ACCUR2_MASK 0xE0000000 /* accu R2[6:4] */ | ||
1176 | #define HP_STATUS1_FLOWID_MASK 0x1F800000 /* flow id */ | ||
1177 | #define HP_STATUS1_TCP_OFF_MASK 0x007F0000 /* tcp payload offset */ | ||
1178 | #define HP_STATUS1_TCP_SIZE_MASK 0x0000FFFF /* tcp payload size */ | ||
1179 | |||
1180 | #define REG_HP_STATUS2 0x4198 /* (ro) HP status 3 */ | ||
1181 | #define HP_STATUS2_ACCUR2_MASK 0xF0000000 /* accu R2[3:0] */ | ||
1182 | #define HP_STATUS2_CSUM_OFF_MASK 0x07F00000 /* checksum start | ||
1183 | start offset */ | ||
1184 | #define HP_STATUS2_ACCUR1_MASK 0x000FE000 /* accu R1 */ | ||
1185 | #define HP_STATUS2_FORCE_DROP 0x00001000 /* force drop */ | ||
1186 | #define HP_STATUS2_BWO_REASSM 0x00000800 /* batching w/o | ||
1187 | reassembly */ | ||
1188 | #define HP_STATUS2_JH_SPLIT_EN 0x00000400 /* jumbo header split | ||
1189 | enable */ | ||
1190 | #define HP_STATUS2_FORCE_TCP_NOCHECK 0x00000200 /* force tcp no payload | ||
1191 | check */ | ||
1192 | #define HP_STATUS2_DATA_MASK_ZERO 0x00000100 /* mask of data length | ||
1193 | equal to zero */ | ||
1194 | #define HP_STATUS2_FORCE_TCP_CHECK 0x00000080 /* force tcp payload | ||
1195 | chk */ | ||
1196 | #define HP_STATUS2_MASK_TCP_THRESH 0x00000040 /* mask of payload | ||
1197 | threshold */ | ||
1198 | #define HP_STATUS2_NO_ASSIST 0x00000020 /* no assist */ | ||
1199 | #define HP_STATUS2_CTRL_PACKET_FLAG 0x00000010 /* control packet flag */ | ||
1200 | #define HP_STATUS2_TCP_FLAG_CHECK 0x00000008 /* tcp flag check */ | ||
1201 | #define HP_STATUS2_SYN_FLAG 0x00000004 /* syn flag */ | ||
1202 | #define HP_STATUS2_TCP_CHECK 0x00000002 /* tcp payload chk */ | ||
1203 | #define HP_STATUS2_TCP_NOCHECK 0x00000001 /* tcp no payload chk */ | ||
1204 | |||
1205 | /* BIST for header parser(HP) and flow database memories (FDBM). set _START | ||
1206 | * to start BIST. controller clears _START on completion. _START can also | ||
1207 | * be cleared to force termination of BIST. a bit set indicates that that | ||
1208 | * memory passed its BIST. | ||
1209 | */ | ||
1210 | #define REG_HP_RAM_BIST 0x419C /* HP RAM BIST reg */ | ||
1211 | #define HP_RAM_BIST_HP_DATA_PASS 0x80000000 /* HP data ram */ | ||
1212 | #define HP_RAM_BIST_HP_INSTR0_PASS 0x40000000 /* HP instr ram 0 */ | ||
1213 | #define HP_RAM_BIST_HP_INSTR1_PASS 0x20000000 /* HP instr ram 1 */ | ||
1214 | #define HP_RAM_BIST_HP_INSTR2_PASS 0x10000000 /* HP instr ram 2 */ | ||
1215 | #define HP_RAM_BIST_FDBM_AGE0_PASS 0x08000000 /* FDBM aging RAM0 */ | ||
1216 | #define HP_RAM_BIST_FDBM_AGE1_PASS 0x04000000 /* FDBM aging RAM1 */ | ||
1217 | #define HP_RAM_BIST_FDBM_FLOWID00_PASS 0x02000000 /* FDBM flowid RAM0 | ||
1218 | bank 0 */ | ||
1219 | #define HP_RAM_BIST_FDBM_FLOWID10_PASS 0x01000000 /* FDBM flowid RAM1 | ||
1220 | bank 0 */ | ||
1221 | #define HP_RAM_BIST_FDBM_FLOWID20_PASS 0x00800000 /* FDBM flowid RAM2 | ||
1222 | bank 0 */ | ||
1223 | #define HP_RAM_BIST_FDBM_FLOWID30_PASS 0x00400000 /* FDBM flowid RAM3 | ||
1224 | bank 0 */ | ||
1225 | #define HP_RAM_BIST_FDBM_FLOWID01_PASS 0x00200000 /* FDBM flowid RAM0 | ||
1226 | bank 1 */ | ||
1227 | #define HP_RAM_BIST_FDBM_FLOWID11_PASS 0x00100000 /* FDBM flowid RAM1 | ||
1228 | bank 2 */ | ||
1229 | #define HP_RAM_BIST_FDBM_FLOWID21_PASS 0x00080000 /* FDBM flowid RAM2 | ||
1230 | bank 1 */ | ||
1231 | #define HP_RAM_BIST_FDBM_FLOWID31_PASS 0x00040000 /* FDBM flowid RAM3 | ||
1232 | bank 1 */ | ||
1233 | #define HP_RAM_BIST_FDBM_TCPSEQ_PASS 0x00020000 /* FDBM tcp sequence | ||
1234 | RAM */ | ||
1235 | #define HP_RAM_BIST_SUMMARY 0x00000002 /* all BIST tests */ | ||
1236 | #define HP_RAM_BIST_START 0x00000001 /* start/stop BIST */ | ||
1237 | |||
1238 | |||
1239 | /** MAC registers. **/ | ||
1240 | /* reset bits are set using a PIO write and self-cleared after the command | ||
1241 | * execution has completed. | ||
1242 | */ | ||
1243 | #define REG_MAC_TX_RESET 0x6000 /* TX MAC software reset | ||
1244 | command (default: 0x0) */ | ||
1245 | #define REG_MAC_RX_RESET 0x6004 /* RX MAC software reset | ||
1246 | command (default: 0x0) */ | ||
1247 | /* execute a pause flow control frame transmission | ||
1248 | DEFAULT: 0x0XXXX */ | ||
1249 | #define REG_MAC_SEND_PAUSE 0x6008 /* send pause command reg */ | ||
1250 | #define MAC_SEND_PAUSE_TIME_MASK 0x0000FFFF /* value of pause time | ||
1251 | to be sent on network | ||
1252 | in units of slot | ||
1253 | times */ | ||
1254 | #define MAC_SEND_PAUSE_SEND 0x00010000 /* send pause flow ctrl | ||
1255 | frame on network */ | ||
1256 | |||
1257 | /* bit set indicates that event occurred. auto-cleared when status register | ||
1258 | * is read and have corresponding mask bits in mask register. events will | ||
1259 | * trigger an interrupt if the corresponding mask bit is 0. | ||
1260 | * status register default: 0x00000000 | ||
1261 | * mask register default = 0xFFFFFFFF on reset | ||
1262 | */ | ||
1263 | #define REG_MAC_TX_STATUS 0x6010 /* TX MAC status reg */ | ||
1264 | #define MAC_TX_FRAME_XMIT 0x0001 /* successful frame | ||
1265 | transmision */ | ||
1266 | #define MAC_TX_UNDERRUN 0x0002 /* terminated frame | ||
1267 | transmission due to | ||
1268 | data starvation in the | ||
1269 | xmit data path */ | ||
1270 | #define MAC_TX_MAX_PACKET_ERR 0x0004 /* frame exceeds max allowed | ||
1271 | length passed to TX MAC | ||
1272 | by the DMA engine */ | ||
1273 | #define MAC_TX_COLL_NORMAL 0x0008 /* rollover of the normal | ||
1274 | collision counter */ | ||
1275 | #define MAC_TX_COLL_EXCESS 0x0010 /* rollover of the excessive | ||
1276 | collision counter */ | ||
1277 | #define MAC_TX_COLL_LATE 0x0020 /* rollover of the late | ||
1278 | collision counter */ | ||
1279 | #define MAC_TX_COLL_FIRST 0x0040 /* rollover of the first | ||
1280 | collision counter */ | ||
1281 | #define MAC_TX_DEFER_TIMER 0x0080 /* rollover of the defer | ||
1282 | timer */ | ||
1283 | #define MAC_TX_PEAK_ATTEMPTS 0x0100 /* rollover of the peak | ||
1284 | attempts counter */ | ||
1285 | |||
1286 | #define REG_MAC_RX_STATUS 0x6014 /* RX MAC status reg */ | ||
1287 | #define MAC_RX_FRAME_RECV 0x0001 /* successful receipt of | ||
1288 | a frame */ | ||
1289 | #define MAC_RX_OVERFLOW 0x0002 /* dropped frame due to | ||
1290 | RX FIFO overflow */ | ||
1291 | #define MAC_RX_FRAME_COUNT 0x0004 /* rollover of receive frame | ||
1292 | counter */ | ||
1293 | #define MAC_RX_ALIGN_ERR 0x0008 /* rollover of alignment | ||
1294 | error counter */ | ||
1295 | #define MAC_RX_CRC_ERR 0x0010 /* rollover of crc error | ||
1296 | counter */ | ||
1297 | #define MAC_RX_LEN_ERR 0x0020 /* rollover of length | ||
1298 | error counter */ | ||
1299 | #define MAC_RX_VIOL_ERR 0x0040 /* rollover of code | ||
1300 | violation error */ | ||
1301 | |||
1302 | /* DEFAULT: 0xXXXX0000 on reset */ | ||
1303 | #define REG_MAC_CTRL_STATUS 0x6018 /* MAC control status reg */ | ||
1304 | #define MAC_CTRL_PAUSE_RECEIVED 0x00000001 /* successful | ||
1305 | reception of a | ||
1306 | pause control | ||
1307 | frame */ | ||
1308 | #define MAC_CTRL_PAUSE_STATE 0x00000002 /* MAC has made a | ||
1309 | transition from | ||
1310 | "not paused" to | ||
1311 | "paused" */ | ||
1312 | #define MAC_CTRL_NOPAUSE_STATE 0x00000004 /* MAC has made a | ||
1313 | transition from | ||
1314 | "paused" to "not | ||
1315 | paused" */ | ||
1316 | #define MAC_CTRL_PAUSE_TIME_MASK 0xFFFF0000 /* value of pause time | ||
1317 | operand that was | ||
1318 | received in the last | ||
1319 | pause flow control | ||
1320 | frame */ | ||
1321 | |||
1322 | /* layout identical to TX MAC[8:0] */ | ||
1323 | #define REG_MAC_TX_MASK 0x6020 /* TX MAC mask reg */ | ||
1324 | /* layout identical to RX MAC[6:0] */ | ||
1325 | #define REG_MAC_RX_MASK 0x6024 /* RX MAC mask reg */ | ||
1326 | /* layout identical to CTRL MAC[2:0] */ | ||
1327 | #define REG_MAC_CTRL_MASK 0x6028 /* MAC control mask reg */ | ||
1328 | |||
1329 | /* to ensure proper operation, CFG_EN must be cleared to 0 and a delay | ||
1330 | * imposed before writes to other bits in the TX_MAC_CFG register or any of | ||
1331 | * the MAC parameters is performed. delay dependent upon time required to | ||
1332 | * transmit a maximum size frame (= MAC_FRAMESIZE_MAX*8/Mbps). e.g., | ||
1333 | * the delay for a 1518-byte frame on a 100Mbps network is 125us. | ||
1334 | * alternatively, just poll TX_CFG_EN until it reads back as 0. | ||
1335 | * NOTE: on half-duplex 1Gbps, TX_CFG_CARRIER_EXTEND and | ||
1336 | * RX_CFG_CARRIER_EXTEND should be set and the SLOT_TIME register should | ||
1337 | * be 0x200 (slot time of 512 bytes) | ||
1338 | */ | ||
1339 | #define REG_MAC_TX_CFG 0x6030 /* TX MAC config reg */ | ||
1340 | #define MAC_TX_CFG_EN 0x0001 /* enable TX MAC. 0 will | ||
1341 | force TXMAC state | ||
1342 | machine to remain in | ||
1343 | idle state or to | ||
1344 | transition to idle state | ||
1345 | on completion of an | ||
1346 | ongoing packet. */ | ||
1347 | #define MAC_TX_CFG_IGNORE_CARRIER 0x0002 /* disable CSMA/CD deferral | ||
1348 | process. set to 1 when | ||
1349 | full duplex and 0 when | ||
1350 | half duplex */ | ||
1351 | #define MAC_TX_CFG_IGNORE_COLL 0x0004 /* disable CSMA/CD backoff | ||
1352 | algorithm. set to 1 when | ||
1353 | full duplex and 0 when | ||
1354 | half duplex */ | ||
1355 | #define MAC_TX_CFG_IPG_EN 0x0008 /* enable extension of the | ||
1356 | Rx-to-TX IPG. after | ||
1357 | receiving a frame, TX | ||
1358 | MAC will reset its | ||
1359 | deferral process to | ||
1360 | carrier sense for the | ||
1361 | amount of time = IPG0 + | ||
1362 | IPG1 and commit to | ||
1363 | transmission for time | ||
1364 | specified in IPG2. when | ||
1365 | 0 or when xmitting frames | ||
1366 | back-to-pack (Tx-to-Tx | ||
1367 | IPG), TX MAC ignores | ||
1368 | IPG0 and will only use | ||
1369 | IPG1 for deferral time. | ||
1370 | IPG2 still used. */ | ||
1371 | #define MAC_TX_CFG_NEVER_GIVE_UP_EN 0x0010 /* TX MAC will not easily | ||
1372 | give up on frame | ||
1373 | xmission. if backoff | ||
1374 | algorithm reaches the | ||
1375 | ATTEMPT_LIMIT, it will | ||
1376 | clear attempts counter | ||
1377 | and continue trying to | ||
1378 | send the frame as | ||
1379 | specified by | ||
1380 | GIVE_UP_LIM. when 0, | ||
1381 | TX MAC will execute | ||
1382 | standard CSMA/CD prot. */ | ||
1383 | #define MAC_TX_CFG_NEVER_GIVE_UP_LIM 0x0020 /* when set, TX MAC will | ||
1384 | continue to try to xmit | ||
1385 | until successful. when | ||
1386 | 0, TX MAC will continue | ||
1387 | to try xmitting until | ||
1388 | successful or backoff | ||
1389 | algorithm reaches | ||
1390 | ATTEMPT_LIMIT*16 */ | ||
1391 | #define MAC_TX_CFG_NO_BACKOFF 0x0040 /* modify CSMA/CD to disable | ||
1392 | backoff algorithm. TX | ||
1393 | MAC will not back off | ||
1394 | after a xmission attempt | ||
1395 | that resulted in a | ||
1396 | collision. */ | ||
1397 | #define MAC_TX_CFG_SLOW_DOWN 0x0080 /* modify CSMA/CD so that | ||
1398 | deferral process is reset | ||
1399 | in response to carrier | ||
1400 | sense during the entire | ||
1401 | duration of IPG. TX MAC | ||
1402 | will only commit to frame | ||
1403 | xmission after frame | ||
1404 | xmission has actually | ||
1405 | begun. */ | ||
1406 | #define MAC_TX_CFG_NO_FCS 0x0100 /* TX MAC will not generate | ||
1407 | CRC for all xmitted | ||
1408 | packets. when clear, CRC | ||
1409 | generation is dependent | ||
1410 | upon NO_CRC bit in the | ||
1411 | xmit control word from | ||
1412 | TX DMA */ | ||
1413 | #define MAC_TX_CFG_CARRIER_EXTEND 0x0200 /* enables xmit part of the | ||
1414 | carrier extension | ||
1415 | feature. this allows for | ||
1416 | longer collision domains | ||
1417 | by extending the carrier | ||
1418 | and collision window | ||
1419 | from the end of FCS until | ||
1420 | the end of the slot time | ||
1421 | if necessary. Required | ||
1422 | for half-duplex at 1Gbps, | ||
1423 | clear otherwise. */ | ||
1424 | |||
1425 | /* when CRC is not stripped, reassembly packets will not contain the CRC. | ||
1426 | * these will be stripped by HRP because it reassembles layer 4 data, and the | ||
1427 | * CRC is layer 2. however, non-reassembly packets will still contain the CRC | ||
1428 | * when passed to the host. to ensure proper operation, need to wait 3.2ms | ||
1429 | * after clearing RX_CFG_EN before writing to any other RX MAC registers | ||
1430 | * or other MAC parameters. alternatively, poll RX_CFG_EN until it clears | ||
1431 | * to 0. similary, HASH_FILTER_EN and ADDR_FILTER_EN have the same | ||
1432 | * restrictions as CFG_EN. | ||
1433 | */ | ||
1434 | #define REG_MAC_RX_CFG 0x6034 /* RX MAC config reg */ | ||
1435 | #define MAC_RX_CFG_EN 0x0001 /* enable RX MAC */ | ||
1436 | #define MAC_RX_CFG_STRIP_PAD 0x0002 /* always program to 0. | ||
1437 | feature not supported */ | ||
1438 | #define MAC_RX_CFG_STRIP_FCS 0x0004 /* RX MAC will strip the | ||
1439 | last 4 bytes of a | ||
1440 | received frame. */ | ||
1441 | #define MAC_RX_CFG_PROMISC_EN 0x0008 /* promiscuous mode */ | ||
1442 | #define MAC_RX_CFG_PROMISC_GROUP_EN 0x0010 /* accept all valid | ||
1443 | multicast frames (group | ||
1444 | bit in DA field set) */ | ||
1445 | #define MAC_RX_CFG_HASH_FILTER_EN 0x0020 /* use hash table to filter | ||
1446 | multicast addresses */ | ||
1447 | #define MAC_RX_CFG_ADDR_FILTER_EN 0x0040 /* cause RX MAC to use | ||
1448 | address filtering regs | ||
1449 | to filter both unicast | ||
1450 | and multicast | ||
1451 | addresses */ | ||
1452 | #define MAC_RX_CFG_DISABLE_DISCARD 0x0080 /* pass errored frames to | ||
1453 | RX DMA by setting BAD | ||
1454 | bit but not Abort bit | ||
1455 | in the status. CRC, | ||
1456 | framing, and length errs | ||
1457 | will not increment | ||
1458 | error counters. frames | ||
1459 | which don't match dest | ||
1460 | addr will be passed up | ||
1461 | w/ BAD bit set. */ | ||
1462 | #define MAC_RX_CFG_CARRIER_EXTEND 0x0100 /* enable reception of | ||
1463 | packet bursts generated | ||
1464 | by carrier extension | ||
1465 | with packet bursting | ||
1466 | senders. only applies | ||
1467 | to half-duplex 1Gbps */ | ||
1468 | |||
1469 | /* DEFAULT: 0x0 */ | ||
1470 | #define REG_MAC_CTRL_CFG 0x6038 /* MAC control config reg */ | ||
1471 | #define MAC_CTRL_CFG_SEND_PAUSE_EN 0x0001 /* respond to requests for | ||
1472 | sending pause flow ctrl | ||
1473 | frames */ | ||
1474 | #define MAC_CTRL_CFG_RECV_PAUSE_EN 0x0002 /* respond to received | ||
1475 | pause flow ctrl frames */ | ||
1476 | #define MAC_CTRL_CFG_PASS_CTRL 0x0004 /* pass valid MAC ctrl | ||
1477 | packets to RX DMA */ | ||
1478 | |||
1479 | /* to ensure proper operation, a global initialization sequence should be | ||
1480 | * performed when a loopback config is entered or exited. if programmed after | ||
1481 | * a hw or global sw reset, RX/TX MAC software reset and initialization | ||
1482 | * should be done to ensure stable clocking. | ||
1483 | * DEFAULT: 0x0 | ||
1484 | */ | ||
1485 | #define REG_MAC_XIF_CFG 0x603C /* XIF config reg */ | ||
1486 | #define MAC_XIF_TX_MII_OUTPUT_EN 0x0001 /* enable output drivers | ||
1487 | on MII xmit bus */ | ||
1488 | #define MAC_XIF_MII_INT_LOOPBACK 0x0002 /* loopback GMII xmit data | ||
1489 | path to GMII recv data | ||
1490 | path. phy mode register | ||
1491 | clock selection must be | ||
1492 | set to GMII mode and | ||
1493 | GMII_MODE should be set | ||
1494 | to 1. in loopback mode, | ||
1495 | REFCLK will drive the | ||
1496 | entire mac core. 0 for | ||
1497 | normal operation. */ | ||
1498 | #define MAC_XIF_DISABLE_ECHO 0x0004 /* disables receive data | ||
1499 | path during packet | ||
1500 | xmission. clear to 0 | ||
1501 | in any full duplex mode, | ||
1502 | in any loopback mode, | ||
1503 | or in half-duplex SERDES | ||
1504 | or SLINK modes. set when | ||
1505 | in half-duplex when | ||
1506 | using external phy. */ | ||
1507 | #define MAC_XIF_GMII_MODE 0x0008 /* MAC operates with GMII | ||
1508 | clocks and datapath */ | ||
1509 | #define MAC_XIF_MII_BUFFER_OUTPUT_EN 0x0010 /* MII_BUF_EN pin. enable | ||
1510 | external tristate buffer | ||
1511 | on the MII receive | ||
1512 | bus. */ | ||
1513 | #define MAC_XIF_LINK_LED 0x0020 /* LINKLED# active (low) */ | ||
1514 | #define MAC_XIF_FDPLX_LED 0x0040 /* FDPLXLED# active (low) */ | ||
1515 | |||
1516 | #define REG_MAC_IPG0 0x6040 /* inter-packet gap0 reg. | ||
1517 | recommended: 0x00 */ | ||
1518 | #define REG_MAC_IPG1 0x6044 /* inter-packet gap1 reg | ||
1519 | recommended: 0x08 */ | ||
1520 | #define REG_MAC_IPG2 0x6048 /* inter-packet gap2 reg | ||
1521 | recommended: 0x04 */ | ||
1522 | #define REG_MAC_SLOT_TIME 0x604C /* slot time reg | ||
1523 | recommended: 0x40 */ | ||
1524 | #define REG_MAC_FRAMESIZE_MIN 0x6050 /* min frame size reg | ||
1525 | recommended: 0x40 */ | ||
1526 | |||
1527 | /* FRAMESIZE_MAX holds both the max frame size as well as the max burst size. | ||
1528 | * recommended value: 0x2000.05EE | ||
1529 | */ | ||
1530 | #define REG_MAC_FRAMESIZE_MAX 0x6054 /* max frame size reg */ | ||
1531 | #define MAC_FRAMESIZE_MAX_BURST_MASK 0x3FFF0000 /* max burst size */ | ||
1532 | #define MAC_FRAMESIZE_MAX_BURST_SHIFT 16 | ||
1533 | #define MAC_FRAMESIZE_MAX_FRAME_MASK 0x00007FFF /* max frame size */ | ||
1534 | #define MAC_FRAMESIZE_MAX_FRAME_SHIFT 0 | ||
1535 | #define REG_MAC_PA_SIZE 0x6058 /* PA size reg. number of | ||
1536 | preamble bytes that the | ||
1537 | TX MAC will xmit at the | ||
1538 | beginning of each frame | ||
1539 | value should be 2 or | ||
1540 | greater. recommended | ||
1541 | value: 0x07 */ | ||
1542 | #define REG_MAC_JAM_SIZE 0x605C /* jam size reg. duration | ||
1543 | of jam in units of media | ||
1544 | byte time. recommended | ||
1545 | value: 0x04 */ | ||
1546 | #define REG_MAC_ATTEMPT_LIMIT 0x6060 /* attempt limit reg. # | ||
1547 | of attempts TX MAC will | ||
1548 | make to xmit a frame | ||
1549 | before it resets its | ||
1550 | attempts counter. after | ||
1551 | the limit has been | ||
1552 | reached, TX MAC may or | ||
1553 | may not drop the frame | ||
1554 | dependent upon value | ||
1555 | in TX_MAC_CFG. | ||
1556 | recommended | ||
1557 | value: 0x10 */ | ||
1558 | #define REG_MAC_CTRL_TYPE 0x6064 /* MAC control type reg. | ||
1559 | type field of a MAC | ||
1560 | ctrl frame. recommended | ||
1561 | value: 0x8808 */ | ||
1562 | |||
1563 | /* mac address registers: 0 - 44, 0x6080 - 0x6130, 4 8-bit bytes. | ||
1564 | * register contains comparison | ||
1565 | * 0 16 MSB of primary MAC addr [47:32] of DA field | ||
1566 | * 1 16 middle bits "" [31:16] of DA field | ||
1567 | * 2 16 LSB "" [15:0] of DA field | ||
1568 | * 3*x 16MSB of alt MAC addr 1-15 [47:32] of DA field | ||
1569 | * 4*x 16 middle bits "" [31:16] | ||
1570 | * 5*x 16 LSB "" [15:0] | ||
1571 | * 42 16 MSB of MAC CTRL addr [47:32] of DA. | ||
1572 | * 43 16 middle bits "" [31:16] | ||
1573 | * 44 16 LSB "" [15:0] | ||
1574 | * MAC CTRL addr must be the reserved multicast addr for MAC CTRL frames. | ||
1575 | * if there is a match, MAC will set the bit for alternative address | ||
1576 | * filter pass [15] | ||
1577 | |||
1578 | * here is the map of registers given MAC address notation: a:b:c:d:e:f | ||
1579 | * ab cd ef | ||
1580 | * primary addr reg 2 reg 1 reg 0 | ||
1581 | * alt addr 1 reg 5 reg 4 reg 3 | ||
1582 | * alt addr x reg 5*x reg 4*x reg 3*x | ||
1583 | * ctrl addr reg 44 reg 43 reg 42 | ||
1584 | */ | ||
1585 | #define REG_MAC_ADDR0 0x6080 /* MAC address 0 reg */ | ||
1586 | #define REG_MAC_ADDRN(x) (REG_MAC_ADDR0 + (x)*4) | ||
1587 | #define REG_MAC_ADDR_FILTER0 0x614C /* address filter 0 reg | ||
1588 | [47:32] */ | ||
1589 | #define REG_MAC_ADDR_FILTER1 0x6150 /* address filter 1 reg | ||
1590 | [31:16] */ | ||
1591 | #define REG_MAC_ADDR_FILTER2 0x6154 /* address filter 2 reg | ||
1592 | [15:0] */ | ||
1593 | #define REG_MAC_ADDR_FILTER2_1_MASK 0x6158 /* address filter 2 and 1 | ||
1594 | mask reg. 8-bit reg | ||
1595 | contains nibble mask for | ||
1596 | reg 2 and 1. */ | ||
1597 | #define REG_MAC_ADDR_FILTER0_MASK 0x615C /* address filter 0 mask | ||
1598 | reg */ | ||
1599 | |||
1600 | /* hash table registers: 0 - 15, 0x6160 - 0x619C, 4 8-bit bytes | ||
1601 | * 16-bit registers contain bits of the hash table. | ||
1602 | * reg x -> [16*(15 - x) + 15 : 16*(15 - x)]. | ||
1603 | * e.g., 15 -> [15:0], 0 -> [255:240] | ||
1604 | */ | ||
1605 | #define REG_MAC_HASH_TABLE0 0x6160 /* hash table 0 reg */ | ||
1606 | #define REG_MAC_HASH_TABLEN(x) (REG_MAC_HASH_TABLE0 + (x)*4) | ||
1607 | |||
1608 | /* statistics registers. these registers generate an interrupt on | ||
1609 | * overflow. recommended initialization: 0x0000. most are 16-bits except | ||
1610 | * for PEAK_ATTEMPTS register which is 8 bits. | ||
1611 | */ | ||
1612 | #define REG_MAC_COLL_NORMAL 0x61A0 /* normal collision | ||
1613 | counter. */ | ||
1614 | #define REG_MAC_COLL_FIRST 0x61A4 /* first attempt | ||
1615 | successful collision | ||
1616 | counter */ | ||
1617 | #define REG_MAC_COLL_EXCESS 0x61A8 /* excessive collision | ||
1618 | counter */ | ||
1619 | #define REG_MAC_COLL_LATE 0x61AC /* late collision counter */ | ||
1620 | #define REG_MAC_TIMER_DEFER 0x61B0 /* defer timer. time base | ||
1621 | is the media byte | ||
1622 | clock/256 */ | ||
1623 | #define REG_MAC_ATTEMPTS_PEAK 0x61B4 /* peak attempts reg */ | ||
1624 | #define REG_MAC_RECV_FRAME 0x61B8 /* receive frame counter */ | ||
1625 | #define REG_MAC_LEN_ERR 0x61BC /* length error counter */ | ||
1626 | #define REG_MAC_ALIGN_ERR 0x61C0 /* alignment error counter */ | ||
1627 | #define REG_MAC_FCS_ERR 0x61C4 /* FCS error counter */ | ||
1628 | #define REG_MAC_RX_CODE_ERR 0x61C8 /* RX code violation | ||
1629 | error counter */ | ||
1630 | |||
1631 | /* misc registers */ | ||
1632 | #define REG_MAC_RANDOM_SEED 0x61CC /* random number seed reg. | ||
1633 | 10-bit register used as a | ||
1634 | seed for the random number | ||
1635 | generator for the CSMA/CD | ||
1636 | backoff algorithm. only | ||
1637 | programmed after power-on | ||
1638 | reset and should be a | ||
1639 | random value which has a | ||
1640 | high likelihood of being | ||
1641 | unique for each MAC | ||
1642 | attached to a network | ||
1643 | segment (e.g., 10 LSB of | ||
1644 | MAC address) */ | ||
1645 | |||
1646 | /* ASUN: there's a PAUSE_TIMER (ro) described, but it's not in the address | ||
1647 | * map | ||
1648 | */ | ||
1649 | |||
1650 | /* 27-bit register has the current state for key state machines in the MAC */ | ||
1651 | #define REG_MAC_STATE_MACHINE 0x61D0 /* (ro) state machine reg */ | ||
1652 | #define MAC_SM_RLM_MASK 0x07800000 | ||
1653 | #define MAC_SM_RLM_SHIFT 23 | ||
1654 | #define MAC_SM_RX_FC_MASK 0x00700000 | ||
1655 | #define MAC_SM_RX_FC_SHIFT 20 | ||
1656 | #define MAC_SM_TLM_MASK 0x000F0000 | ||
1657 | #define MAC_SM_TLM_SHIFT 16 | ||
1658 | #define MAC_SM_ENCAP_SM_MASK 0x0000F000 | ||
1659 | #define MAC_SM_ENCAP_SM_SHIFT 12 | ||
1660 | #define MAC_SM_TX_REQ_MASK 0x00000C00 | ||
1661 | #define MAC_SM_TX_REQ_SHIFT 10 | ||
1662 | #define MAC_SM_TX_FC_MASK 0x000003C0 | ||
1663 | #define MAC_SM_TX_FC_SHIFT 6 | ||
1664 | #define MAC_SM_FIFO_WRITE_SEL_MASK 0x00000038 | ||
1665 | #define MAC_SM_FIFO_WRITE_SEL_SHIFT 3 | ||
1666 | #define MAC_SM_TX_FIFO_EMPTY_MASK 0x00000007 | ||
1667 | #define MAC_SM_TX_FIFO_EMPTY_SHIFT 0 | ||
1668 | |||
1669 | /** MIF registers. the MIF can be programmed in either bit-bang or | ||
1670 | * frame mode. | ||
1671 | **/ | ||
1672 | #define REG_MIF_BIT_BANG_CLOCK 0x6200 /* MIF bit-bang clock. | ||
1673 | 1 -> 0 will generate a | ||
1674 | rising edge. 0 -> 1 will | ||
1675 | generate a falling edge. */ | ||
1676 | #define REG_MIF_BIT_BANG_DATA 0x6204 /* MIF bit-bang data. 1-bit | ||
1677 | register generates data */ | ||
1678 | #define REG_MIF_BIT_BANG_OUTPUT_EN 0x6208 /* MIF bit-bang output | ||
1679 | enable. enable when | ||
1680 | xmitting data from MIF to | ||
1681 | transceiver. */ | ||
1682 | |||
1683 | /* 32-bit register serves as an instruction register when the MIF is | ||
1684 | * programmed in frame mode. load this register w/ a valid instruction | ||
1685 | * (as per IEEE 802.3u MII spec). poll this register to check for instruction | ||
1686 | * execution completion. during a read operation, this register will also | ||
1687 | * contain the 16-bit data returned by the tranceiver. unless specified | ||
1688 | * otherwise, fields are considered "don't care" when polling for | ||
1689 | * completion. | ||
1690 | */ | ||
1691 | #define REG_MIF_FRAME 0x620C /* MIF frame/output reg */ | ||
1692 | #define MIF_FRAME_START_MASK 0xC0000000 /* start of frame. | ||
1693 | load w/ 01 when | ||
1694 | issuing an instr */ | ||
1695 | #define MIF_FRAME_ST 0x40000000 /* STart of frame */ | ||
1696 | #define MIF_FRAME_OPCODE_MASK 0x30000000 /* opcode. 01 for a | ||
1697 | write. 10 for a | ||
1698 | read */ | ||
1699 | #define MIF_FRAME_OP_READ 0x20000000 /* read OPcode */ | ||
1700 | #define MIF_FRAME_OP_WRITE 0x10000000 /* write OPcode */ | ||
1701 | #define MIF_FRAME_PHY_ADDR_MASK 0x0F800000 /* phy address. when | ||
1702 | issuing an instr, | ||
1703 | this field should be | ||
1704 | loaded w/ the XCVR | ||
1705 | addr */ | ||
1706 | #define MIF_FRAME_PHY_ADDR_SHIFT 23 | ||
1707 | #define MIF_FRAME_REG_ADDR_MASK 0x007C0000 /* register address. | ||
1708 | when issuing an instr, | ||
1709 | addr of register | ||
1710 | to be read/written */ | ||
1711 | #define MIF_FRAME_REG_ADDR_SHIFT 18 | ||
1712 | #define MIF_FRAME_TURN_AROUND_MSB 0x00020000 /* turn around, MSB. | ||
1713 | when issuing an instr, | ||
1714 | set this bit to 1 */ | ||
1715 | #define MIF_FRAME_TURN_AROUND_LSB 0x00010000 /* turn around, LSB. | ||
1716 | when issuing an instr, | ||
1717 | set this bit to 0. | ||
1718 | when polling for | ||
1719 | completion, 1 means | ||
1720 | that instr execution | ||
1721 | has been completed */ | ||
1722 | #define MIF_FRAME_DATA_MASK 0x0000FFFF /* instruction payload | ||
1723 | load with 16-bit data | ||
1724 | to be written in | ||
1725 | transceiver reg for a | ||
1726 | write. doesn't matter | ||
1727 | in a read. when | ||
1728 | polling for | ||
1729 | completion, field is | ||
1730 | "don't care" for write | ||
1731 | and 16-bit data | ||
1732 | returned by the | ||
1733 | transceiver for a | ||
1734 | read (if valid bit | ||
1735 | is set) */ | ||
1736 | #define REG_MIF_CFG 0x6210 /* MIF config reg */ | ||
1737 | #define MIF_CFG_PHY_SELECT 0x0001 /* 1 -> select MDIO_1 | ||
1738 | 0 -> select MDIO_0 */ | ||
1739 | #define MIF_CFG_POLL_EN 0x0002 /* enable polling | ||
1740 | mechanism. if set, | ||
1741 | BB_MODE should be 0 */ | ||
1742 | #define MIF_CFG_BB_MODE 0x0004 /* 1 -> bit-bang mode | ||
1743 | 0 -> frame mode */ | ||
1744 | #define MIF_CFG_POLL_REG_MASK 0x00F8 /* register address to be | ||
1745 | used by polling mode. | ||
1746 | only meaningful if POLL_EN | ||
1747 | is set to 1 */ | ||
1748 | #define MIF_CFG_POLL_REG_SHIFT 3 | ||
1749 | #define MIF_CFG_MDIO_0 0x0100 /* (ro) dual purpose. | ||
1750 | when MDIO_0 is idle, | ||
1751 | 1 -> tranceiver is | ||
1752 | connected to MDIO_0. | ||
1753 | when MIF is communicating | ||
1754 | w/ MDIO_0 in bit-bang | ||
1755 | mode, this bit indicates | ||
1756 | the incoming bit stream | ||
1757 | during a read op */ | ||
1758 | #define MIF_CFG_MDIO_1 0x0200 /* (ro) dual purpose. | ||
1759 | when MDIO_1 is idle, | ||
1760 | 1 -> transceiver is | ||
1761 | connected to MDIO_1. | ||
1762 | when MIF is communicating | ||
1763 | w/ MDIO_1 in bit-bang | ||
1764 | mode, this bit indicates | ||
1765 | the incoming bit stream | ||
1766 | during a read op */ | ||
1767 | #define MIF_CFG_POLL_PHY_MASK 0x7C00 /* tranceiver address to | ||
1768 | be polled */ | ||
1769 | #define MIF_CFG_POLL_PHY_SHIFT 10 | ||
1770 | |||
1771 | /* 16-bit register used to determine which bits in the POLL_STATUS portion of | ||
1772 | * the MIF_STATUS register will cause an interrupt. if a mask bit is 0, | ||
1773 | * corresponding bit of the POLL_STATUS will generate a MIF interrupt when | ||
1774 | * set. DEFAULT: 0xFFFF | ||
1775 | */ | ||
1776 | #define REG_MIF_MASK 0x6214 /* MIF mask reg */ | ||
1777 | |||
1778 | /* 32-bit register used when in poll mode. auto-cleared after being read */ | ||
1779 | #define REG_MIF_STATUS 0x6218 /* MIF status reg */ | ||
1780 | #define MIF_STATUS_POLL_DATA_MASK 0xFFFF0000 /* poll data contains | ||
1781 | the "latest image" | ||
1782 | update of the XCVR | ||
1783 | reg being read */ | ||
1784 | #define MIF_STATUS_POLL_DATA_SHIFT 16 | ||
1785 | #define MIF_STATUS_POLL_STATUS_MASK 0x0000FFFF /* poll status indicates | ||
1786 | which bits in the | ||
1787 | POLL_DATA field have | ||
1788 | changed since the | ||
1789 | MIF_STATUS reg was | ||
1790 | last read */ | ||
1791 | #define MIF_STATUS_POLL_STATUS_SHIFT 0 | ||
1792 | |||
1793 | /* 7-bit register has current state for all state machines in the MIF */ | ||
1794 | #define REG_MIF_STATE_MACHINE 0x621C /* MIF state machine reg */ | ||
1795 | #define MIF_SM_CONTROL_MASK 0x07 /* control state machine | ||
1796 | state */ | ||
1797 | #define MIF_SM_EXECUTION_MASK 0x60 /* execution state machine | ||
1798 | state */ | ||
1799 | |||
1800 | /** PCS/Serialink. the following registers are equivalent to the standard | ||
1801 | * MII management registers except that they're directly mapped in | ||
1802 | * Cassini's register space. | ||
1803 | **/ | ||
1804 | |||
1805 | /* the auto-negotiation enable bit should be programmed the same at | ||
1806 | * the link partner as in the local device to enable auto-negotiation to | ||
1807 | * complete. when that bit is reprogrammed, auto-neg/manual config is | ||
1808 | * restarted automatically. | ||
1809 | * DEFAULT: 0x1040 | ||
1810 | */ | ||
1811 | #define REG_PCS_MII_CTRL 0x9000 /* PCS MII control reg */ | ||
1812 | #define PCS_MII_CTRL_1000_SEL 0x0040 /* reads 1. ignored on | ||
1813 | writes */ | ||
1814 | #define PCS_MII_CTRL_COLLISION_TEST 0x0080 /* COL signal at the PCS | ||
1815 | to MAC interface is | ||
1816 | activated regardless | ||
1817 | of activity */ | ||
1818 | #define PCS_MII_CTRL_DUPLEX 0x0100 /* forced 0x0. PCS | ||
1819 | behaviour same for | ||
1820 | half and full dplx */ | ||
1821 | #define PCS_MII_RESTART_AUTONEG 0x0200 /* self clearing. | ||
1822 | restart auto- | ||
1823 | negotiation */ | ||
1824 | #define PCS_MII_ISOLATE 0x0400 /* read as 0. ignored | ||
1825 | on writes */ | ||
1826 | #define PCS_MII_POWER_DOWN 0x0800 /* read as 0. ignored | ||
1827 | on writes */ | ||
1828 | #define PCS_MII_AUTONEG_EN 0x1000 /* default 1. PCS goes | ||
1829 | through automatic | ||
1830 | link config before it | ||
1831 | can be used. when 0, | ||
1832 | link can be used | ||
1833 | w/out any link config | ||
1834 | phase */ | ||
1835 | #define PCS_MII_10_100_SEL 0x2000 /* read as 0. ignored on | ||
1836 | writes */ | ||
1837 | #define PCS_MII_RESET 0x8000 /* reset PCS. self-clears | ||
1838 | when done */ | ||
1839 | |||
1840 | /* DEFAULT: 0x0108 */ | ||
1841 | #define REG_PCS_MII_STATUS 0x9004 /* PCS MII status reg */ | ||
1842 | #define PCS_MII_STATUS_EXTEND_CAP 0x0001 /* reads 0 */ | ||
1843 | #define PCS_MII_STATUS_JABBER_DETECT 0x0002 /* reads 0 */ | ||
1844 | #define PCS_MII_STATUS_LINK_STATUS 0x0004 /* 1 -> link up. | ||
1845 | 0 -> link down. 0 is | ||
1846 | latched so that 0 is | ||
1847 | kept until read. read | ||
1848 | 2x to determine if the | ||
1849 | link has gone up again */ | ||
1850 | #define PCS_MII_STATUS_AUTONEG_ABLE 0x0008 /* reads 1 (able to perform | ||
1851 | auto-neg) */ | ||
1852 | #define PCS_MII_STATUS_REMOTE_FAULT 0x0010 /* 1 -> remote fault detected | ||
1853 | from received link code | ||
1854 | word. only valid after | ||
1855 | auto-neg completed */ | ||
1856 | #define PCS_MII_STATUS_AUTONEG_COMP 0x0020 /* 1 -> auto-negotiation | ||
1857 | completed | ||
1858 | 0 -> auto-negotiation not | ||
1859 | completed */ | ||
1860 | #define PCS_MII_STATUS_EXTEND_STATUS 0x0100 /* reads as 1. used as an | ||
1861 | indication that this is | ||
1862 | a 1000 Base-X PHY. writes | ||
1863 | to it are ignored */ | ||
1864 | |||
1865 | /* used during auto-negotiation. | ||
1866 | * DEFAULT: 0x00E0 | ||
1867 | */ | ||
1868 | #define REG_PCS_MII_ADVERT 0x9008 /* PCS MII advertisement | ||
1869 | reg */ | ||
1870 | #define PCS_MII_ADVERT_FD 0x0020 /* advertise full duplex | ||
1871 | 1000 Base-X */ | ||
1872 | #define PCS_MII_ADVERT_HD 0x0040 /* advertise half-duplex | ||
1873 | 1000 Base-X */ | ||
1874 | #define PCS_MII_ADVERT_SYM_PAUSE 0x0080 /* advertise PAUSE | ||
1875 | symmetric capability */ | ||
1876 | #define PCS_MII_ADVERT_ASYM_PAUSE 0x0100 /* advertises PAUSE | ||
1877 | asymmetric capability */ | ||
1878 | #define PCS_MII_ADVERT_RF_MASK 0x3000 /* remote fault. write bit13 | ||
1879 | to optionally indicate to | ||
1880 | link partner that chip is | ||
1881 | going off-line. bit12 will | ||
1882 | get set when signal | ||
1883 | detect == FAIL and will | ||
1884 | remain set until | ||
1885 | successful negotiation */ | ||
1886 | #define PCS_MII_ADVERT_ACK 0x4000 /* (ro) */ | ||
1887 | #define PCS_MII_ADVERT_NEXT_PAGE 0x8000 /* (ro) forced 0x0 */ | ||
1888 | |||
1889 | /* contents updated as a result of autonegotiation. layout and definitions | ||
1890 | * identical to PCS_MII_ADVERT | ||
1891 | */ | ||
1892 | #define REG_PCS_MII_LPA 0x900C /* PCS MII link partner | ||
1893 | ability reg */ | ||
1894 | #define PCS_MII_LPA_FD PCS_MII_ADVERT_FD | ||
1895 | #define PCS_MII_LPA_HD PCS_MII_ADVERT_HD | ||
1896 | #define PCS_MII_LPA_SYM_PAUSE PCS_MII_ADVERT_SYM_PAUSE | ||
1897 | #define PCS_MII_LPA_ASYM_PAUSE PCS_MII_ADVERT_ASYM_PAUSE | ||
1898 | #define PCS_MII_LPA_RF_MASK PCS_MII_ADVERT_RF_MASK | ||
1899 | #define PCS_MII_LPA_ACK PCS_MII_ADVERT_ACK | ||
1900 | #define PCS_MII_LPA_NEXT_PAGE PCS_MII_ADVERT_NEXT_PAGE | ||
1901 | |||
1902 | /* DEFAULT: 0x0 */ | ||
1903 | #define REG_PCS_CFG 0x9010 /* PCS config reg */ | ||
1904 | #define PCS_CFG_EN 0x01 /* enable PCS. must be | ||
1905 | 0 when modifying | ||
1906 | PCS_MII_ADVERT */ | ||
1907 | #define PCS_CFG_SD_OVERRIDE 0x02 /* sets signal detect to | ||
1908 | OK. bit is | ||
1909 | non-resettable */ | ||
1910 | #define PCS_CFG_SD_ACTIVE_LOW 0x04 /* changes interpretation | ||
1911 | of optical signal to make | ||
1912 | signal detect okay when | ||
1913 | signal is low */ | ||
1914 | #define PCS_CFG_JITTER_STUDY_MASK 0x18 /* used to make jitter | ||
1915 | measurements. a single | ||
1916 | code group is xmitted | ||
1917 | regularly. | ||
1918 | 0x0 = normal operation | ||
1919 | 0x1 = high freq test | ||
1920 | pattern, D21.5 | ||
1921 | 0x2 = low freq test | ||
1922 | pattern, K28.7 | ||
1923 | 0x3 = reserved */ | ||
1924 | #define PCS_CFG_10MS_TIMER_OVERRIDE 0x20 /* shortens 10-20ms auto- | ||
1925 | negotiation timer to | ||
1926 | a few cycles for test | ||
1927 | purposes */ | ||
1928 | |||
1929 | /* used for diagnostic purposes. bits 20-22 autoclear on read */ | ||
1930 | #define REG_PCS_STATE_MACHINE 0x9014 /* (ro) PCS state machine | ||
1931 | and diagnostic reg */ | ||
1932 | #define PCS_SM_TX_STATE_MASK 0x0000000F /* 0 and 1 indicate | ||
1933 | xmission of idle. | ||
1934 | otherwise, xmission of | ||
1935 | a packet */ | ||
1936 | #define PCS_SM_RX_STATE_MASK 0x000000F0 /* 0 indicates reception | ||
1937 | of idle. otherwise, | ||
1938 | reception of packet */ | ||
1939 | #define PCS_SM_WORD_SYNC_STATE_MASK 0x00000700 /* 0 indicates loss of | ||
1940 | sync */ | ||
1941 | #define PCS_SM_SEQ_DETECT_STATE_MASK 0x00001800 /* cycling through 0-3 | ||
1942 | indicates reception of | ||
1943 | Config codes. cycling | ||
1944 | through 0-1 indicates | ||
1945 | reception of idles */ | ||
1946 | #define PCS_SM_LINK_STATE_MASK 0x0001E000 | ||
1947 | #define SM_LINK_STATE_UP 0x00016000 /* link state is up */ | ||
1948 | |||
1949 | #define PCS_SM_LOSS_LINK_C 0x00100000 /* loss of link due to | ||
1950 | recept of Config | ||
1951 | codes */ | ||
1952 | #define PCS_SM_LOSS_LINK_SYNC 0x00200000 /* loss of link due to | ||
1953 | loss of sync */ | ||
1954 | #define PCS_SM_LOSS_SIGNAL_DETECT 0x00400000 /* signal detect goes | ||
1955 | from OK to FAIL. bit29 | ||
1956 | will also be set if | ||
1957 | this is set */ | ||
1958 | #define PCS_SM_NO_LINK_BREAKLINK 0x01000000 /* link not up due to | ||
1959 | receipt of breaklink | ||
1960 | C codes from partner. | ||
1961 | C codes w/ 0 content | ||
1962 | received triggering | ||
1963 | start/restart of | ||
1964 | autonegotiation. | ||
1965 | should be sent for | ||
1966 | no longer than 20ms */ | ||
1967 | #define PCS_SM_NO_LINK_SERDES 0x02000000 /* serdes being | ||
1968 | initialized. see serdes | ||
1969 | state reg */ | ||
1970 | #define PCS_SM_NO_LINK_C 0x04000000 /* C codes not stable or | ||
1971 | not received */ | ||
1972 | #define PCS_SM_NO_LINK_SYNC 0x08000000 /* word sync not | ||
1973 | achieved */ | ||
1974 | #define PCS_SM_NO_LINK_WAIT_C 0x10000000 /* waiting for C codes | ||
1975 | w/ ack bit set */ | ||
1976 | #define PCS_SM_NO_LINK_NO_IDLE 0x20000000 /* link partner continues | ||
1977 | to send C codes | ||
1978 | instead of idle | ||
1979 | symbols or pkt data */ | ||
1980 | |||
1981 | /* this register indicates interrupt changes in specific PCS MII status bits. | ||
1982 | * PCS_INT may be masked at the ISR level. only a single bit is implemented | ||
1983 | * for link status change. | ||
1984 | */ | ||
1985 | #define REG_PCS_INTR_STATUS 0x9018 /* PCS interrupt status */ | ||
1986 | #define PCS_INTR_STATUS_LINK_CHANGE 0x04 /* link status has changed | ||
1987 | since last read */ | ||
1988 | |||
1989 | /* control which network interface is used. no more than one bit should | ||
1990 | * be set. | ||
1991 | * DEFAULT: none | ||
1992 | */ | ||
1993 | #define REG_PCS_DATAPATH_MODE 0x9050 /* datapath mode reg */ | ||
1994 | #define PCS_DATAPATH_MODE_MII 0x00 /* PCS is not used and | ||
1995 | MII/GMII is selected. | ||
1996 | selection between MII and | ||
1997 | GMII is controlled by | ||
1998 | XIF_CFG */ | ||
1999 | #define PCS_DATAPATH_MODE_SERDES 0x02 /* PCS is used via the | ||
2000 | 10-bit interface */ | ||
2001 | |||
2002 | /* input to serdes chip or serialink block */ | ||
2003 | #define REG_PCS_SERDES_CTRL 0x9054 /* serdes control reg */ | ||
2004 | #define PCS_SERDES_CTRL_LOOPBACK 0x01 /* enable loopback on | ||
2005 | serdes interface */ | ||
2006 | #define PCS_SERDES_CTRL_SYNCD_EN 0x02 /* enable sync carrier | ||
2007 | detection. should be | ||
2008 | 0x0 for normal | ||
2009 | operation */ | ||
2010 | #define PCS_SERDES_CTRL_LOCKREF 0x04 /* frequency-lock RBC[0:1] | ||
2011 | to REFCLK when set. | ||
2012 | when clear, receiver | ||
2013 | clock locks to incoming | ||
2014 | serial data */ | ||
2015 | |||
2016 | /* multiplex test outputs into the PROM address (PA_3 through PA_0) pins. | ||
2017 | * should be 0x0 for normal operations. | ||
2018 | * 0b000 normal operation, PROM address[3:0] selected | ||
2019 | * 0b001 rxdma req, rxdma ack, rxdma ready, rxdma read | ||
2020 | * 0b010 rxmac req, rx ack, rx tag, rx clk shared | ||
2021 | * 0b011 txmac req, tx ack, tx tag, tx retry req | ||
2022 | * 0b100 tx tp3, tx tp2, tx tp1, tx tp0 | ||
2023 | * 0b101 R period RX, R period TX, R period HP, R period BIM | ||
2024 | * DEFAULT: 0x0 | ||
2025 | */ | ||
2026 | #define REG_PCS_SHARED_OUTPUT_SEL 0x9058 /* shared output select */ | ||
2027 | #define PCS_SOS_PROM_ADDR_MASK 0x0007 | ||
2028 | |||
2029 | /* used for diagnostics. this register indicates progress of the SERDES | ||
2030 | * boot up. | ||
2031 | * 0b00 undergoing reset | ||
2032 | * 0b01 waiting 500us while lockrefn is asserted | ||
2033 | * 0b10 waiting for comma detect | ||
2034 | * 0b11 receive data is synchronized | ||
2035 | * DEFAULT: 0x0 | ||
2036 | */ | ||
2037 | #define REG_PCS_SERDES_STATE 0x905C /* (ro) serdes state */ | ||
2038 | #define PCS_SERDES_STATE_MASK 0x03 | ||
2039 | |||
2040 | /* used for diagnostics. indicates number of packets transmitted or received. | ||
2041 | * counters rollover w/out generating an interrupt. | ||
2042 | * DEFAULT: 0x0 | ||
2043 | */ | ||
2044 | #define REG_PCS_PACKET_COUNT 0x9060 /* (ro) PCS packet counter */ | ||
2045 | #define PCS_PACKET_COUNT_TX 0x000007FF /* pkts xmitted by PCS */ | ||
2046 | #define PCS_PACKET_COUNT_RX 0x07FF0000 /* pkts recvd by PCS | ||
2047 | whether they | ||
2048 | encountered an error | ||
2049 | or not */ | ||
2050 | |||
2051 | /** LocalBus Devices. the following provides run-time access to the | ||
2052 | * Cassini's PROM | ||
2053 | ***/ | ||
2054 | #define REG_EXPANSION_ROM_RUN_START 0x100000 /* expansion rom run time | ||
2055 | access */ | ||
2056 | #define REG_EXPANSION_ROM_RUN_END 0x17FFFF | ||
2057 | |||
2058 | #define REG_SECOND_LOCALBUS_START 0x180000 /* secondary local bus | ||
2059 | device */ | ||
2060 | #define REG_SECOND_LOCALBUS_END 0x1FFFFF | ||
2061 | |||
2062 | /* entropy device */ | ||
2063 | #define REG_ENTROPY_START REG_SECOND_LOCALBUS_START | ||
2064 | #define REG_ENTROPY_DATA (REG_ENTROPY_START + 0x00) | ||
2065 | #define REG_ENTROPY_STATUS (REG_ENTROPY_START + 0x04) | ||
2066 | #define ENTROPY_STATUS_DRDY 0x01 | ||
2067 | #define ENTROPY_STATUS_BUSY 0x02 | ||
2068 | #define ENTROPY_STATUS_CIPHER 0x04 | ||
2069 | #define ENTROPY_STATUS_BYPASS_MASK 0x18 | ||
2070 | #define REG_ENTROPY_MODE (REG_ENTROPY_START + 0x05) | ||
2071 | #define ENTROPY_MODE_KEY_MASK 0x07 | ||
2072 | #define ENTROPY_MODE_ENCRYPT 0x40 | ||
2073 | #define REG_ENTROPY_RAND_REG (REG_ENTROPY_START + 0x06) | ||
2074 | #define REG_ENTROPY_RESET (REG_ENTROPY_START + 0x07) | ||
2075 | #define ENTROPY_RESET_DES_IO 0x01 | ||
2076 | #define ENTROPY_RESET_STC_MODE 0x02 | ||
2077 | #define ENTROPY_RESET_KEY_CACHE 0x04 | ||
2078 | #define ENTROPY_RESET_IV 0x08 | ||
2079 | #define REG_ENTROPY_IV (REG_ENTROPY_START + 0x08) | ||
2080 | #define REG_ENTROPY_KEY0 (REG_ENTROPY_START + 0x10) | ||
2081 | #define REG_ENTROPY_KEYN(x) (REG_ENTROPY_KEY0 + 4*(x)) | ||
2082 | |||
2083 | /* phys of interest w/ their special mii registers */ | ||
2084 | #define PHY_LUCENT_B0 0x00437421 | ||
2085 | #define LUCENT_MII_REG 0x1F | ||
2086 | |||
2087 | #define PHY_NS_DP83065 0x20005c78 | ||
2088 | #define DP83065_MII_MEM 0x16 | ||
2089 | #define DP83065_MII_REGD 0x1D | ||
2090 | #define DP83065_MII_REGE 0x1E | ||
2091 | |||
2092 | #define PHY_BROADCOM_5411 0x00206071 | ||
2093 | #define PHY_BROADCOM_B0 0x00206050 | ||
2094 | #define BROADCOM_MII_REG4 0x14 | ||
2095 | #define BROADCOM_MII_REG5 0x15 | ||
2096 | #define BROADCOM_MII_REG7 0x17 | ||
2097 | #define BROADCOM_MII_REG8 0x18 | ||
2098 | |||
2099 | #define CAS_MII_ANNPTR 0x07 | ||
2100 | #define CAS_MII_ANNPRR 0x08 | ||
2101 | #define CAS_MII_1000_CTRL 0x09 | ||
2102 | #define CAS_MII_1000_STATUS 0x0A | ||
2103 | #define CAS_MII_1000_EXTEND 0x0F | ||
2104 | |||
2105 | #define CAS_BMSR_1000_EXTEND 0x0100 /* supports 1000Base-T extended status */ | ||
2106 | /* | ||
2107 | * if autoneg is disabled, here's the table: | ||
2108 | * BMCR_SPEED100 = 100Mbps | ||
2109 | * BMCR_SPEED1000 = 1000Mbps | ||
2110 | * ~(BMCR_SPEED100 | BMCR_SPEED1000) = 10Mbps | ||
2111 | */ | ||
2112 | #define CAS_BMCR_SPEED1000 0x0040 /* Select 1000Mbps */ | ||
2113 | |||
2114 | #define CAS_ADVERTISE_1000HALF 0x0100 | ||
2115 | #define CAS_ADVERTISE_1000FULL 0x0200 | ||
2116 | #define CAS_ADVERTISE_PAUSE 0x0400 | ||
2117 | #define CAS_ADVERTISE_ASYM_PAUSE 0x0800 | ||
2118 | |||
2119 | /* regular lpa register */ | ||
2120 | #define CAS_LPA_PAUSE CAS_ADVERTISE_PAUSE | ||
2121 | #define CAS_LPA_ASYM_PAUSE CAS_ADVERTISE_ASYM_PAUSE | ||
2122 | |||
2123 | /* 1000_STATUS register */ | ||
2124 | #define CAS_LPA_1000HALF 0x0400 | ||
2125 | #define CAS_LPA_1000FULL 0x0800 | ||
2126 | |||
2127 | #define CAS_EXTEND_1000XFULL 0x8000 | ||
2128 | #define CAS_EXTEND_1000XHALF 0x4000 | ||
2129 | #define CAS_EXTEND_1000TFULL 0x2000 | ||
2130 | #define CAS_EXTEND_1000THALF 0x1000 | ||
2131 | |||
2132 | /* cassini header parser firmware */ | ||
2133 | typedef struct cas_hp_inst { | ||
2134 | const char *note; | ||
2135 | |||
2136 | u16 mask, val; | ||
2137 | |||
2138 | u8 op; | ||
2139 | u8 soff, snext; /* if match succeeds, new offset and match */ | ||
2140 | u8 foff, fnext; /* if match fails, new offset and match */ | ||
2141 | /* output info */ | ||
2142 | u8 outop; /* output opcode */ | ||
2143 | |||
2144 | u16 outarg; /* output argument */ | ||
2145 | u8 outenab; /* output enable: 0 = not, 1 = if match | ||
2146 | 2 = if !match, 3 = always */ | ||
2147 | u8 outshift; /* barrel shift right, 4 bits */ | ||
2148 | u16 outmask; | ||
2149 | } cas_hp_inst_t; | ||
2150 | |||
2151 | /* comparison */ | ||
2152 | #define OP_EQ 0 /* packet == value */ | ||
2153 | #define OP_LT 1 /* packet < value */ | ||
2154 | #define OP_GT 2 /* packet > value */ | ||
2155 | #define OP_NP 3 /* new packet */ | ||
2156 | |||
2157 | /* output opcodes */ | ||
2158 | #define CL_REG 0 | ||
2159 | #define LD_FID 1 | ||
2160 | #define LD_SEQ 2 | ||
2161 | #define LD_CTL 3 | ||
2162 | #define LD_SAP 4 | ||
2163 | #define LD_R1 5 | ||
2164 | #define LD_L3 6 | ||
2165 | #define LD_SUM 7 | ||
2166 | #define LD_HDR 8 | ||
2167 | #define IM_FID 9 | ||
2168 | #define IM_SEQ 10 | ||
2169 | #define IM_SAP 11 | ||
2170 | #define IM_R1 12 | ||
2171 | #define IM_CTL 13 | ||
2172 | #define LD_LEN 14 | ||
2173 | #define ST_FLG 15 | ||
2174 | |||
2175 | /* match setp #s for IP4TCP4 */ | ||
2176 | #define S1_PCKT 0 | ||
2177 | #define S1_VLAN 1 | ||
2178 | #define S1_CFI 2 | ||
2179 | #define S1_8023 3 | ||
2180 | #define S1_LLC 4 | ||
2181 | #define S1_LLCc 5 | ||
2182 | #define S1_IPV4 6 | ||
2183 | #define S1_IPV4c 7 | ||
2184 | #define S1_IPV4F 8 | ||
2185 | #define S1_TCP44 9 | ||
2186 | #define S1_IPV6 10 | ||
2187 | #define S1_IPV6L 11 | ||
2188 | #define S1_IPV6c 12 | ||
2189 | #define S1_TCP64 13 | ||
2190 | #define S1_TCPSQ 14 | ||
2191 | #define S1_TCPFG 15 | ||
2192 | #define S1_TCPHL 16 | ||
2193 | #define S1_TCPHc 17 | ||
2194 | #define S1_CLNP 18 | ||
2195 | #define S1_CLNP2 19 | ||
2196 | #define S1_DROP 20 | ||
2197 | #define S2_HTTP 21 | ||
2198 | #define S1_ESP4 22 | ||
2199 | #define S1_AH4 23 | ||
2200 | #define S1_ESP6 24 | ||
2201 | #define S1_AH6 25 | ||
2202 | |||
2203 | #define CAS_PROG_IP46TCP4_PREAMBLE \ | ||
2204 | { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, S1_PCKT, \ | ||
2205 | CL_REG, 0x3ff, 1, 0x0, 0x0000}, \ | ||
2206 | { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, \ | ||
2207 | IM_CTL, 0x00a, 3, 0x0, 0xffff}, \ | ||
2208 | { "CFI?", 0x1000, 0x1000, OP_EQ, 0, S1_DROP, 1, S1_8023, \ | ||
2209 | CL_REG, 0x000, 0, 0x0, 0x0000}, \ | ||
2210 | { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, \ | ||
2211 | CL_REG, 0x000, 0, 0x0, 0x0000}, \ | ||
2212 | { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP, \ | ||
2213 | CL_REG, 0x000, 0, 0x0, 0x0000}, \ | ||
2214 | { "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP, \ | ||
2215 | CL_REG, 0x000, 0, 0x0, 0x0000}, \ | ||
2216 | { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, \ | ||
2217 | LD_SAP, 0x100, 3, 0x0, 0xffff}, \ | ||
2218 | { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP, \ | ||
2219 | LD_SUM, 0x00a, 1, 0x0, 0x0000}, \ | ||
2220 | { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP, \ | ||
2221 | LD_LEN, 0x03e, 1, 0x0, 0xffff}, \ | ||
2222 | { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_CLNP, \ | ||
2223 | LD_FID, 0x182, 1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ \ | ||
2224 | { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP, \ | ||
2225 | LD_SUM, 0x015, 1, 0x0, 0x0000}, \ | ||
2226 | { "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP, \ | ||
2227 | IM_R1, 0x128, 1, 0x0, 0xffff}, \ | ||
2228 | { "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP, \ | ||
2229 | LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ \ | ||
2230 | { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP, \ | ||
2231 | LD_LEN, 0x03f, 1, 0x0, 0xffff} | ||
2232 | |||
2233 | #ifdef USE_HP_IP46TCP4 | ||
2234 | static cas_hp_inst_t cas_prog_ip46tcp4tab[] = { | ||
2235 | CAS_PROG_IP46TCP4_PREAMBLE, | ||
2236 | { "TCP seq", /* DADDR should point to dest port */ | ||
2237 | 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ, | ||
2238 | 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ | ||
2239 | { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0, | ||
2240 | S1_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */ | ||
2241 | { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, | ||
2242 | S1_TCPHc, LD_R1, 0x205, 3, 0xB, 0xf000}, | ||
2243 | { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, | ||
2244 | S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff}, | ||
2245 | { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2, | ||
2246 | IM_CTL, 0x001, 3, 0x0, 0x0001}, | ||
2247 | { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, | ||
2248 | IM_CTL, 0x000, 0, 0x0, 0x0000}, | ||
2249 | { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, | ||
2250 | IM_CTL, 0x080, 3, 0x0, 0xffff}, | ||
2251 | { NULL }, | ||
2252 | }; | ||
2253 | #ifdef HP_IP46TCP4_DEFAULT | ||
2254 | #define CAS_HP_FIRMWARE cas_prog_ip46tcp4tab | ||
2255 | #endif | ||
2256 | #endif | ||
2257 | |||
2258 | /* | ||
2259 | * Alternate table load which excludes HTTP server traffic from reassembly. | ||
2260 | * It is substantially similar to the basic table, with one extra state | ||
2261 | * and a few extra compares. */ | ||
2262 | #ifdef USE_HP_IP46TCP4NOHTTP | ||
2263 | static cas_hp_inst_t cas_prog_ip46tcp4nohttptab[] = { | ||
2264 | CAS_PROG_IP46TCP4_PREAMBLE, | ||
2265 | { "TCP seq", /* DADDR should point to dest port */ | ||
2266 | 0xFFFF, 0x0080, OP_EQ, 0, S2_HTTP, 0, S1_TCPFG, LD_SEQ, | ||
2267 | 0x081, 3, 0x0, 0xffff} , /* Load TCP seq # */ | ||
2268 | { "TCP control flags", 0xFFFF, 0x8080, OP_EQ, 0, S2_HTTP, 0, | ||
2269 | S1_TCPHL, ST_FLG, 0x145, 2, 0x0, 0x002f, }, /* Load TCP flags */ | ||
2270 | { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc, | ||
2271 | LD_R1, 0x205, 3, 0xB, 0xf000}, | ||
2272 | { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, | ||
2273 | LD_HDR, 0x0ff, 3, 0x0, 0xffff}, | ||
2274 | { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2, | ||
2275 | IM_CTL, 0x001, 3, 0x0, 0x0001}, | ||
2276 | { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, | ||
2277 | CL_REG, 0x002, 3, 0x0, 0x0000}, | ||
2278 | { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, | ||
2279 | IM_CTL, 0x080, 3, 0x0, 0xffff}, | ||
2280 | { "No HTTP", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, | ||
2281 | IM_CTL, 0x044, 3, 0x0, 0xffff}, | ||
2282 | { NULL }, | ||
2283 | }; | ||
2284 | #ifdef HP_IP46TCP4NOHTTP_DEFAULT | ||
2285 | #define CAS_HP_FIRMWARE cas_prog_ip46tcp4nohttptab | ||
2286 | #endif | ||
2287 | #endif | ||
2288 | |||
2289 | /* match step #s for IP4FRAG */ | ||
2290 | #define S3_IPV6c 11 | ||
2291 | #define S3_TCP64 12 | ||
2292 | #define S3_TCPSQ 13 | ||
2293 | #define S3_TCPFG 14 | ||
2294 | #define S3_TCPHL 15 | ||
2295 | #define S3_TCPHc 16 | ||
2296 | #define S3_FRAG 17 | ||
2297 | #define S3_FOFF 18 | ||
2298 | #define S3_CLNP 19 | ||
2299 | |||
2300 | #ifdef USE_HP_IP4FRAG | ||
2301 | static cas_hp_inst_t cas_prog_ip4fragtab[] = { | ||
2302 | { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, S1_PCKT, | ||
2303 | CL_REG, 0x3ff, 1, 0x0, 0x0000}, | ||
2304 | { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, | ||
2305 | IM_CTL, 0x00a, 3, 0x0, 0xffff}, | ||
2306 | { "CFI?", 0x1000, 0x1000, OP_EQ, 0, S3_CLNP, 1, S1_8023, | ||
2307 | CL_REG, 0x000, 0, 0x0, 0x0000}, | ||
2308 | { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, | ||
2309 | CL_REG, 0x000, 0, 0x0, 0x0000}, | ||
2310 | { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S3_CLNP, | ||
2311 | CL_REG, 0x000, 0, 0x0, 0x0000}, | ||
2312 | { "LLCc?",0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S3_CLNP, | ||
2313 | CL_REG, 0x000, 0, 0x0, 0x0000}, | ||
2314 | { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, | ||
2315 | LD_SAP, 0x100, 3, 0x0, 0xffff}, | ||
2316 | { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S3_CLNP, | ||
2317 | LD_SUM, 0x00a, 1, 0x0, 0x0000}, | ||
2318 | { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S3_FRAG, | ||
2319 | LD_LEN, 0x03e, 3, 0x0, 0xffff}, | ||
2320 | { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S3_TCPSQ, 0, S3_CLNP, | ||
2321 | LD_FID, 0x182, 3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ | ||
2322 | { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S3_IPV6c, 0, S3_CLNP, | ||
2323 | LD_SUM, 0x015, 1, 0x0, 0x0000}, | ||
2324 | { "IPV6 cont?", 0xf000, 0x6000, OP_EQ, 3, S3_TCP64, 0, S3_CLNP, | ||
2325 | LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ | ||
2326 | { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S3_TCPSQ, 0, S3_CLNP, | ||
2327 | LD_LEN, 0x03f, 1, 0x0, 0xffff}, | ||
2328 | { "TCP seq", /* DADDR should point to dest port */ | ||
2329 | 0x0000, 0x0000, OP_EQ, 0, S3_TCPFG, 4, S3_TCPFG, LD_SEQ, | ||
2330 | 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ | ||
2331 | { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S3_TCPHL, 0, | ||
2332 | S3_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */ | ||
2333 | { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S3_TCPHc, 0, S3_TCPHc, | ||
2334 | LD_R1, 0x205, 3, 0xB, 0xf000}, | ||
2335 | { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, | ||
2336 | LD_HDR, 0x0ff, 3, 0x0, 0xffff}, | ||
2337 | { "IP4 Fragment", 0x0000, 0x0000, OP_EQ, 0, S3_FOFF, 0, S3_FOFF, | ||
2338 | LD_FID, 0x103, 3, 0x0, 0xffff}, /* FID IP4 src+dst */ | ||
2339 | { "IP4 frag offset", 0x0000, 0x0000, OP_EQ, 0, S3_FOFF, 0, S3_FOFF, | ||
2340 | LD_SEQ, 0x040, 1, 0xD, 0xfff8}, | ||
2341 | { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, | ||
2342 | IM_CTL, 0x001, 3, 0x0, 0x0001}, | ||
2343 | { NULL }, | ||
2344 | }; | ||
2345 | #ifdef HP_IP4FRAG_DEFAULT | ||
2346 | #define CAS_HP_FIRMWARE cas_prog_ip4fragtab | ||
2347 | #endif | ||
2348 | #endif | ||
2349 | |||
2350 | /* | ||
2351 | * Alternate table which does batching without reassembly | ||
2352 | */ | ||
2353 | #ifdef USE_HP_IP46TCP4BATCH | ||
2354 | static cas_hp_inst_t cas_prog_ip46tcp4batchtab[] = { | ||
2355 | CAS_PROG_IP46TCP4_PREAMBLE, | ||
2356 | { "TCP seq", /* DADDR should point to dest port */ | ||
2357 | 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 0, S1_TCPFG, LD_SEQ, | ||
2358 | 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ | ||
2359 | { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0, | ||
2360 | S1_TCPHL, ST_FLG, 0x000, 3, 0x0, 0x0000}, /* Load TCP flags */ | ||
2361 | { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, | ||
2362 | S1_TCPHc, LD_R1, 0x205, 3, 0xB, 0xf000}, | ||
2363 | { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, | ||
2364 | S1_PCKT, IM_CTL, 0x040, 3, 0x0, 0xffff}, /* set batch bit */ | ||
2365 | { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, | ||
2366 | IM_CTL, 0x001, 3, 0x0, 0x0001}, | ||
2367 | { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, | ||
2368 | S1_PCKT, IM_CTL, 0x080, 3, 0x0, 0xffff}, | ||
2369 | { NULL }, | ||
2370 | }; | ||
2371 | #ifdef HP_IP46TCP4BATCH_DEFAULT | ||
2372 | #define CAS_HP_FIRMWARE cas_prog_ip46tcp4batchtab | ||
2373 | #endif | ||
2374 | #endif | ||
2375 | |||
2376 | /* Workaround for Cassini rev2 descriptor corruption problem. | ||
2377 | * Does batching without reassembly, and sets the SAP to a known | ||
2378 | * data pattern for all packets. | ||
2379 | */ | ||
2380 | #ifdef USE_HP_WORKAROUND | ||
2381 | static cas_hp_inst_t cas_prog_workaroundtab[] = { | ||
2382 | { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, | ||
2383 | S1_PCKT, CL_REG, 0x3ff, 1, 0x0, 0x0000} , | ||
2384 | { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, | ||
2385 | IM_CTL, 0x04a, 3, 0x0, 0xffff}, | ||
2386 | { "CFI?", 0x1000, 0x1000, OP_EQ, 0, S1_CLNP, 1, S1_8023, | ||
2387 | CL_REG, 0x000, 0, 0x0, 0x0000}, | ||
2388 | { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, | ||
2389 | CL_REG, 0x000, 0, 0x0, 0x0000}, | ||
2390 | { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP, | ||
2391 | CL_REG, 0x000, 0, 0x0, 0x0000}, | ||
2392 | { "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP, | ||
2393 | CL_REG, 0x000, 0, 0x0, 0x0000}, | ||
2394 | { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, | ||
2395 | IM_SAP, 0x6AE, 3, 0x0, 0xffff}, | ||
2396 | { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP, | ||
2397 | LD_SUM, 0x00a, 1, 0x0, 0x0000}, | ||
2398 | { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP, | ||
2399 | LD_LEN, 0x03e, 1, 0x0, 0xffff}, | ||
2400 | { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_CLNP, | ||
2401 | LD_FID, 0x182, 3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ | ||
2402 | { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP, | ||
2403 | LD_SUM, 0x015, 1, 0x0, 0x0000}, | ||
2404 | { "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP, | ||
2405 | IM_R1, 0x128, 1, 0x0, 0xffff}, | ||
2406 | { "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP, | ||
2407 | LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ | ||
2408 | { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP, | ||
2409 | LD_LEN, 0x03f, 1, 0x0, 0xffff}, | ||
2410 | { "TCP seq", /* DADDR should point to dest port */ | ||
2411 | 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ, | ||
2412 | 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ | ||
2413 | { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0, | ||
2414 | S1_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */ | ||
2415 | { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc, | ||
2416 | LD_R1, 0x205, 3, 0xB, 0xf000}, | ||
2417 | { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, | ||
2418 | S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff}, | ||
2419 | { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2, | ||
2420 | IM_SAP, 0x6AE, 3, 0x0, 0xffff} , | ||
2421 | { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, | ||
2422 | IM_CTL, 0x001, 3, 0x0, 0x0001}, | ||
2423 | { NULL }, | ||
2424 | }; | ||
2425 | #ifdef HP_WORKAROUND_DEFAULT | ||
2426 | #define CAS_HP_FIRMWARE cas_prog_workaroundtab | ||
2427 | #endif | ||
2428 | #endif | ||
2429 | |||
2430 | #ifdef USE_HP_ENCRYPT | ||
2431 | static cas_hp_inst_t cas_prog_encryptiontab[] = { | ||
2432 | { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, | ||
2433 | S1_PCKT, CL_REG, 0x3ff, 1, 0x0, 0x0000}, | ||
2434 | { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, | ||
2435 | IM_CTL, 0x00a, 3, 0x0, 0xffff}, | ||
2436 | #if 0 | ||
2437 | //"CFI?", /* 02 FIND CFI and If FIND go to S1_DROP */ | ||
2438 | //0x1000, 0x1000, OP_EQ, 0, S1_DROP, 1, S1_8023, CL_REG, 0x000, 0, 0x0, 0x00 | ||
2439 | 00, | ||
2440 | #endif | ||
2441 | { "CFI?", /* FIND CFI and If FIND go to CleanUP1 (ignore and send to host) */ | ||
2442 | 0x1000, 0x1000, OP_EQ, 0, S1_CLNP, 1, S1_8023, | ||
2443 | CL_REG, 0x000, 0, 0x0, 0x0000}, | ||
2444 | { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, | ||
2445 | CL_REG, 0x000, 0, 0x0, 0x0000}, | ||
2446 | { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP, | ||
2447 | CL_REG, 0x000, 0, 0x0, 0x0000}, | ||
2448 | { "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP, | ||
2449 | CL_REG, 0x000, 0, 0x0, 0x0000}, | ||
2450 | { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, | ||
2451 | LD_SAP, 0x100, 3, 0x0, 0xffff}, | ||
2452 | { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP, | ||
2453 | LD_SUM, 0x00a, 1, 0x0, 0x0000}, | ||
2454 | { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP, | ||
2455 | LD_LEN, 0x03e, 1, 0x0, 0xffff}, | ||
2456 | { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_ESP4, | ||
2457 | LD_FID, 0x182, 1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ | ||
2458 | { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP, | ||
2459 | LD_SUM, 0x015, 1, 0x0, 0x0000}, | ||
2460 | { "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP, | ||
2461 | IM_R1, 0x128, 1, 0x0, 0xffff}, | ||
2462 | { "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP, | ||
2463 | LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ | ||
2464 | { "TCP64?", | ||
2465 | #if 0 | ||
2466 | //@@@0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_ESP6, LD_LEN, 0x03f, 1, 0x0, 0xffff, | ||
2467 | #endif | ||
2468 | 0xff00, 0x0600, OP_EQ, 12, S1_TCPSQ, 0, S1_ESP6, LD_LEN, | ||
2469 | 0x03f, 1, 0x0, 0xffff}, | ||
2470 | { "TCP seq", /* 14:DADDR should point to dest port */ | ||
2471 | 0xFFFF, 0x0080, OP_EQ, 0, S2_HTTP, 0, S1_TCPFG, LD_SEQ, | ||
2472 | 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ | ||
2473 | { "TCP control flags", 0xFFFF, 0x8080, OP_EQ, 0, S2_HTTP, 0, | ||
2474 | S1_TCPHL, ST_FLG, 0x145, 2, 0x0, 0x002f}, /* Load TCP flags */ | ||
2475 | { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc, | ||
2476 | LD_R1, 0x205, 3, 0xB, 0xf000} , | ||
2477 | { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, | ||
2478 | S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff}, | ||
2479 | { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2, | ||
2480 | IM_CTL, 0x001, 3, 0x0, 0x0001}, | ||
2481 | { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, | ||
2482 | CL_REG, 0x002, 3, 0x0, 0x0000}, | ||
2483 | { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, | ||
2484 | IM_CTL, 0x080, 3, 0x0, 0xffff}, | ||
2485 | { "No HTTP", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, | ||
2486 | IM_CTL, 0x044, 3, 0x0, 0xffff}, | ||
2487 | { "IPV4 ESP encrypted?", /* S1_ESP4 */ | ||
2488 | 0x00ff, 0x0032, OP_EQ, 0, S1_CLNP2, 0, S1_AH4, IM_CTL, | ||
2489 | 0x021, 1, 0x0, 0xffff}, | ||
2490 | { "IPV4 AH encrypted?", /* S1_AH4 */ | ||
2491 | 0x00ff, 0x0033, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL, | ||
2492 | 0x021, 1, 0x0, 0xffff}, | ||
2493 | { "IPV6 ESP encrypted?", /* S1_ESP6 */ | ||
2494 | #if 0 | ||
2495 | //@@@0x00ff, 0x0032, OP_EQ, 0, S1_CLNP2, 0, S1_AH6, IM_CTL, 0x021, 1, 0x0, 0xffff, | ||
2496 | #endif | ||
2497 | 0xff00, 0x3200, OP_EQ, 0, S1_CLNP2, 0, S1_AH6, IM_CTL, | ||
2498 | 0x021, 1, 0x0, 0xffff}, | ||
2499 | { "IPV6 AH encrypted?", /* S1_AH6 */ | ||
2500 | #if 0 | ||
2501 | //@@@0x00ff, 0x0033, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL, 0x021, 1, 0x0, 0xffff, | ||
2502 | #endif | ||
2503 | 0xff00, 0x3300, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL, | ||
2504 | 0x021, 1, 0x0, 0xffff}, | ||
2505 | { NULL }, | ||
2506 | }; | ||
2507 | #ifdef HP_ENCRYPT_DEFAULT | ||
2508 | #define CAS_HP_FIRMWARE cas_prog_encryptiontab | ||
2509 | #endif | ||
2510 | #endif | ||
2511 | |||
2512 | static cas_hp_inst_t cas_prog_null[] = { {NULL} }; | ||
2513 | #ifdef HP_NULL_DEFAULT | ||
2514 | #define CAS_HP_FIRMWARE cas_prog_null | ||
2515 | #endif | ||
2516 | |||
2517 | /* phy types */ | ||
2518 | #define CAS_PHY_UNKNOWN 0x00 | ||
2519 | #define CAS_PHY_SERDES 0x01 | ||
2520 | #define CAS_PHY_MII_MDIO0 0x02 | ||
2521 | #define CAS_PHY_MII_MDIO1 0x04 | ||
2522 | #define CAS_PHY_MII(x) ((x) & (CAS_PHY_MII_MDIO0 | CAS_PHY_MII_MDIO1)) | ||
2523 | |||
2524 | /* _RING_INDEX is the index for the ring sizes to be used. _RING_SIZE | ||
2525 | * is the actual size. the default index for the various rings is | ||
2526 | * 8. NOTE: there a bunch of alignment constraints for the rings. to | ||
2527 | * deal with that, i just allocate rings to create the desired | ||
2528 | * alignment. here are the constraints: | ||
2529 | * RX DESC and COMP rings must be 8KB aligned | ||
2530 | * TX DESC must be 2KB aligned. | ||
2531 | * if you change the numbers, be cognizant of how the alignment will change | ||
2532 | * in INIT_BLOCK as well. | ||
2533 | */ | ||
2534 | |||
2535 | #define DESC_RING_I_TO_S(x) (32*(1 << (x))) | ||
2536 | #define COMP_RING_I_TO_S(x) (128*(1 << (x))) | ||
2537 | #define TX_DESC_RING_INDEX 4 /* 512 = 8k */ | ||
2538 | #define RX_DESC_RING_INDEX 4 /* 512 = 8k */ | ||
2539 | #define RX_COMP_RING_INDEX 4 /* 2048 = 64k: should be 4x rx ring size */ | ||
2540 | |||
2541 | #if (TX_DESC_RING_INDEX > 8) || (TX_DESC_RING_INDEX < 0) | ||
2542 | #error TX_DESC_RING_INDEX must be between 0 and 8 | ||
2543 | #endif | ||
2544 | |||
2545 | #if (RX_DESC_RING_INDEX > 8) || (RX_DESC_RING_INDEX < 0) | ||
2546 | #error RX_DESC_RING_INDEX must be between 0 and 8 | ||
2547 | #endif | ||
2548 | |||
2549 | #if (RX_COMP_RING_INDEX > 8) || (RX_COMP_RING_INDEX < 0) | ||
2550 | #error RX_COMP_RING_INDEX must be between 0 and 8 | ||
2551 | #endif | ||
2552 | |||
2553 | #define N_TX_RINGS MAX_TX_RINGS /* for QoS */ | ||
2554 | #define N_TX_RINGS_MASK MAX_TX_RINGS_MASK | ||
2555 | #define N_RX_DESC_RINGS MAX_RX_DESC_RINGS /* 1 for ipsec */ | ||
2556 | #define N_RX_COMP_RINGS 0x1 /* for mult. PCI interrupts */ | ||
2557 | |||
2558 | /* number of flows that can go through re-assembly */ | ||
2559 | #define N_RX_FLOWS 64 | ||
2560 | |||
2561 | #define TX_DESC_RING_SIZE DESC_RING_I_TO_S(TX_DESC_RING_INDEX) | ||
2562 | #define RX_DESC_RING_SIZE DESC_RING_I_TO_S(RX_DESC_RING_INDEX) | ||
2563 | #define RX_COMP_RING_SIZE COMP_RING_I_TO_S(RX_COMP_RING_INDEX) | ||
2564 | #define TX_DESC_RINGN_INDEX(x) TX_DESC_RING_INDEX | ||
2565 | #define RX_DESC_RINGN_INDEX(x) RX_DESC_RING_INDEX | ||
2566 | #define RX_COMP_RINGN_INDEX(x) RX_COMP_RING_INDEX | ||
2567 | #define TX_DESC_RINGN_SIZE(x) TX_DESC_RING_SIZE | ||
2568 | #define RX_DESC_RINGN_SIZE(x) RX_DESC_RING_SIZE | ||
2569 | #define RX_COMP_RINGN_SIZE(x) RX_COMP_RING_SIZE | ||
2570 | |||
2571 | /* convert values */ | ||
2572 | #define CAS_BASE(x, y) (((y) << (x ## _SHIFT)) & (x ## _MASK)) | ||
2573 | #define CAS_VAL(x, y) (((y) & (x ## _MASK)) >> (x ## _SHIFT)) | ||
2574 | #define CAS_TX_RINGN_BASE(y) ((TX_DESC_RINGN_INDEX(y) << \ | ||
2575 | TX_CFG_DESC_RINGN_SHIFT(y)) & \ | ||
2576 | TX_CFG_DESC_RINGN_MASK(y)) | ||
2577 | |||
2578 | /* min is 2k, but we can't do jumbo frames unless it's at least 8k */ | ||
2579 | #define CAS_MIN_PAGE_SHIFT 11 /* 2048 */ | ||
2580 | #define CAS_JUMBO_PAGE_SHIFT 13 /* 8192 */ | ||
2581 | #define CAS_MAX_PAGE_SHIFT 14 /* 16384 */ | ||
2582 | |||
2583 | #define TX_DESC_BUFLEN_MASK 0x0000000000003FFFULL /* buffer length in | ||
2584 | bytes. 0 - 9256 */ | ||
2585 | #define TX_DESC_BUFLEN_SHIFT 0 | ||
2586 | #define TX_DESC_CSUM_START_MASK 0x00000000001F8000ULL /* checksum start. # | ||
2587 | of bytes to be | ||
2588 | skipped before | ||
2589 | csum calc begins. | ||
2590 | value must be | ||
2591 | even */ | ||
2592 | #define TX_DESC_CSUM_START_SHIFT 15 | ||
2593 | #define TX_DESC_CSUM_STUFF_MASK 0x000000001FE00000ULL /* checksum stuff. | ||
2594 | byte offset w/in | ||
2595 | the pkt for the | ||
2596 | 1st csum byte. | ||
2597 | must be > 8 */ | ||
2598 | #define TX_DESC_CSUM_STUFF_SHIFT 21 | ||
2599 | #define TX_DESC_CSUM_EN 0x0000000020000000ULL /* enable checksum */ | ||
2600 | #define TX_DESC_EOF 0x0000000040000000ULL /* end of frame */ | ||
2601 | #define TX_DESC_SOF 0x0000000080000000ULL /* start of frame */ | ||
2602 | #define TX_DESC_INTME 0x0000000100000000ULL /* interrupt me */ | ||
2603 | #define TX_DESC_NO_CRC 0x0000000200000000ULL /* debugging only. | ||
2604 | CRC will not be | ||
2605 | inserted into | ||
2606 | outgoing frame. */ | ||
2607 | struct cas_tx_desc { | ||
2608 | __le64 control; | ||
2609 | __le64 buffer; | ||
2610 | }; | ||
2611 | |||
2612 | /* descriptor ring for free buffers contains page-sized buffers. the index | ||
2613 | * value is not used by the hw in any way. it's just stored and returned in | ||
2614 | * the completion ring. | ||
2615 | */ | ||
2616 | struct cas_rx_desc { | ||
2617 | __le64 index; | ||
2618 | __le64 buffer; | ||
2619 | }; | ||
2620 | |||
2621 | /* received packets are put on the completion ring. */ | ||
2622 | /* word 1 */ | ||
2623 | #define RX_COMP1_DATA_SIZE_MASK 0x0000000007FFE000ULL | ||
2624 | #define RX_COMP1_DATA_SIZE_SHIFT 13 | ||
2625 | #define RX_COMP1_DATA_OFF_MASK 0x000001FFF8000000ULL | ||
2626 | #define RX_COMP1_DATA_OFF_SHIFT 27 | ||
2627 | #define RX_COMP1_DATA_INDEX_MASK 0x007FFE0000000000ULL | ||
2628 | #define RX_COMP1_DATA_INDEX_SHIFT 41 | ||
2629 | #define RX_COMP1_SKIP_MASK 0x0180000000000000ULL | ||
2630 | #define RX_COMP1_SKIP_SHIFT 55 | ||
2631 | #define RX_COMP1_RELEASE_NEXT 0x0200000000000000ULL | ||
2632 | #define RX_COMP1_SPLIT_PKT 0x0400000000000000ULL | ||
2633 | #define RX_COMP1_RELEASE_FLOW 0x0800000000000000ULL | ||
2634 | #define RX_COMP1_RELEASE_DATA 0x1000000000000000ULL | ||
2635 | #define RX_COMP1_RELEASE_HDR 0x2000000000000000ULL | ||
2636 | #define RX_COMP1_TYPE_MASK 0xC000000000000000ULL | ||
2637 | #define RX_COMP1_TYPE_SHIFT 62 | ||
2638 | |||
2639 | /* word 2 */ | ||
2640 | #define RX_COMP2_NEXT_INDEX_MASK 0x00000007FFE00000ULL | ||
2641 | #define RX_COMP2_NEXT_INDEX_SHIFT 21 | ||
2642 | #define RX_COMP2_HDR_SIZE_MASK 0x00000FF800000000ULL | ||
2643 | #define RX_COMP2_HDR_SIZE_SHIFT 35 | ||
2644 | #define RX_COMP2_HDR_OFF_MASK 0x0003F00000000000ULL | ||
2645 | #define RX_COMP2_HDR_OFF_SHIFT 44 | ||
2646 | #define RX_COMP2_HDR_INDEX_MASK 0xFFFC000000000000ULL | ||
2647 | #define RX_COMP2_HDR_INDEX_SHIFT 50 | ||
2648 | |||
2649 | /* word 3 */ | ||
2650 | #define RX_COMP3_SMALL_PKT 0x0000000000000001ULL | ||
2651 | #define RX_COMP3_JUMBO_PKT 0x0000000000000002ULL | ||
2652 | #define RX_COMP3_JUMBO_HDR_SPLIT_EN 0x0000000000000004ULL | ||
2653 | #define RX_COMP3_CSUM_START_MASK 0x000000000007F000ULL | ||
2654 | #define RX_COMP3_CSUM_START_SHIFT 12 | ||
2655 | #define RX_COMP3_FLOWID_MASK 0x0000000001F80000ULL | ||
2656 | #define RX_COMP3_FLOWID_SHIFT 19 | ||
2657 | #define RX_COMP3_OPCODE_MASK 0x000000000E000000ULL | ||
2658 | #define RX_COMP3_OPCODE_SHIFT 25 | ||
2659 | #define RX_COMP3_FORCE_FLAG 0x0000000010000000ULL | ||
2660 | #define RX_COMP3_NO_ASSIST 0x0000000020000000ULL | ||
2661 | #define RX_COMP3_LOAD_BAL_MASK 0x000001F800000000ULL | ||
2662 | #define RX_COMP3_LOAD_BAL_SHIFT 35 | ||
2663 | #define RX_PLUS_COMP3_ENC_PKT 0x0000020000000000ULL /* cas+ */ | ||
2664 | #define RX_COMP3_L3_HEAD_OFF_MASK 0x0000FE0000000000ULL /* cas */ | ||
2665 | #define RX_COMP3_L3_HEAD_OFF_SHIFT 41 | ||
2666 | #define RX_PLUS_COMP_L3_HEAD_OFF_MASK 0x0000FC0000000000ULL /* cas+ */ | ||
2667 | #define RX_PLUS_COMP_L3_HEAD_OFF_SHIFT 42 | ||
2668 | #define RX_COMP3_SAP_MASK 0xFFFF000000000000ULL | ||
2669 | #define RX_COMP3_SAP_SHIFT 48 | ||
2670 | |||
2671 | /* word 4 */ | ||
2672 | #define RX_COMP4_TCP_CSUM_MASK 0x000000000000FFFFULL | ||
2673 | #define RX_COMP4_TCP_CSUM_SHIFT 0 | ||
2674 | #define RX_COMP4_PKT_LEN_MASK 0x000000003FFF0000ULL | ||
2675 | #define RX_COMP4_PKT_LEN_SHIFT 16 | ||
2676 | #define RX_COMP4_PERFECT_MATCH_MASK 0x00000003C0000000ULL | ||
2677 | #define RX_COMP4_PERFECT_MATCH_SHIFT 30 | ||
2678 | #define RX_COMP4_ZERO 0x0000080000000000ULL | ||
2679 | #define RX_COMP4_HASH_VAL_MASK 0x0FFFF00000000000ULL | ||
2680 | #define RX_COMP4_HASH_VAL_SHIFT 44 | ||
2681 | #define RX_COMP4_HASH_PASS 0x1000000000000000ULL | ||
2682 | #define RX_COMP4_BAD 0x4000000000000000ULL | ||
2683 | #define RX_COMP4_LEN_MISMATCH 0x8000000000000000ULL | ||
2684 | |||
2685 | /* we encode the following: ring/index/release. only 14 bits | ||
2686 | * are usable. | ||
2687 | * NOTE: the encoding is dependent upon RX_DESC_RING_SIZE and | ||
2688 | * MAX_RX_DESC_RINGS. */ | ||
2689 | #define RX_INDEX_NUM_MASK 0x0000000000000FFFULL | ||
2690 | #define RX_INDEX_NUM_SHIFT 0 | ||
2691 | #define RX_INDEX_RING_MASK 0x0000000000001000ULL | ||
2692 | #define RX_INDEX_RING_SHIFT 12 | ||
2693 | #define RX_INDEX_RELEASE 0x0000000000002000ULL | ||
2694 | |||
2695 | struct cas_rx_comp { | ||
2696 | __le64 word1; | ||
2697 | __le64 word2; | ||
2698 | __le64 word3; | ||
2699 | __le64 word4; | ||
2700 | }; | ||
2701 | |||
2702 | enum link_state { | ||
2703 | link_down = 0, /* No link, will retry */ | ||
2704 | link_aneg, /* Autoneg in progress */ | ||
2705 | link_force_try, /* Try Forced link speed */ | ||
2706 | link_force_ret, /* Forced mode worked, retrying autoneg */ | ||
2707 | link_force_ok, /* Stay in forced mode */ | ||
2708 | link_up /* Link is up */ | ||
2709 | }; | ||
2710 | |||
2711 | typedef struct cas_page { | ||
2712 | struct list_head list; | ||
2713 | struct page *buffer; | ||
2714 | dma_addr_t dma_addr; | ||
2715 | int used; | ||
2716 | } cas_page_t; | ||
2717 | |||
2718 | |||
2719 | /* some alignment constraints: | ||
2720 | * TX DESC, RX DESC, and RX COMP must each be 8K aligned. | ||
2721 | * TX COMPWB must be 8-byte aligned. | ||
2722 | * to accomplish this, here's what we do: | ||
2723 | * | ||
2724 | * INIT_BLOCK_RX_COMP = 64k (already aligned) | ||
2725 | * INIT_BLOCK_RX_DESC = 8k | ||
2726 | * INIT_BLOCK_TX = 8k | ||
2727 | * INIT_BLOCK_RX1_DESC = 8k | ||
2728 | * TX COMPWB | ||
2729 | */ | ||
2730 | #define INIT_BLOCK_TX (TX_DESC_RING_SIZE) | ||
2731 | #define INIT_BLOCK_RX_DESC (RX_DESC_RING_SIZE) | ||
2732 | #define INIT_BLOCK_RX_COMP (RX_COMP_RING_SIZE) | ||
2733 | |||
2734 | struct cas_init_block { | ||
2735 | struct cas_rx_comp rxcs[N_RX_COMP_RINGS][INIT_BLOCK_RX_COMP]; | ||
2736 | struct cas_rx_desc rxds[N_RX_DESC_RINGS][INIT_BLOCK_RX_DESC]; | ||
2737 | struct cas_tx_desc txds[N_TX_RINGS][INIT_BLOCK_TX]; | ||
2738 | __le64 tx_compwb; | ||
2739 | }; | ||
2740 | |||
2741 | /* tiny buffers to deal with target abort issue. we allocate a bit | ||
2742 | * over so that we don't have target abort issues with these buffers | ||
2743 | * as well. | ||
2744 | */ | ||
2745 | #define TX_TINY_BUF_LEN 0x100 | ||
2746 | #define TX_TINY_BUF_BLOCK ((INIT_BLOCK_TX + 1)*TX_TINY_BUF_LEN) | ||
2747 | |||
2748 | struct cas_tiny_count { | ||
2749 | int nbufs; | ||
2750 | int used; | ||
2751 | }; | ||
2752 | |||
2753 | struct cas { | ||
2754 | spinlock_t lock; /* for most bits */ | ||
2755 | spinlock_t tx_lock[N_TX_RINGS]; /* tx bits */ | ||
2756 | spinlock_t stat_lock[N_TX_RINGS + 1]; /* for stat gathering */ | ||
2757 | spinlock_t rx_inuse_lock; /* rx inuse list */ | ||
2758 | spinlock_t rx_spare_lock; /* rx spare list */ | ||
2759 | |||
2760 | void __iomem *regs; | ||
2761 | int tx_new[N_TX_RINGS], tx_old[N_TX_RINGS]; | ||
2762 | int rx_old[N_RX_DESC_RINGS]; | ||
2763 | int rx_cur[N_RX_COMP_RINGS], rx_new[N_RX_COMP_RINGS]; | ||
2764 | int rx_last[N_RX_DESC_RINGS]; | ||
2765 | |||
2766 | struct napi_struct napi; | ||
2767 | |||
2768 | /* Set when chip is actually in operational state | ||
2769 | * (ie. not power managed) */ | ||
2770 | int hw_running; | ||
2771 | int opened; | ||
2772 | struct mutex pm_mutex; /* open/close/suspend/resume */ | ||
2773 | |||
2774 | struct cas_init_block *init_block; | ||
2775 | struct cas_tx_desc *init_txds[MAX_TX_RINGS]; | ||
2776 | struct cas_rx_desc *init_rxds[MAX_RX_DESC_RINGS]; | ||
2777 | struct cas_rx_comp *init_rxcs[MAX_RX_COMP_RINGS]; | ||
2778 | |||
2779 | /* we use sk_buffs for tx and pages for rx. the rx skbuffs | ||
2780 | * are there for flow re-assembly. */ | ||
2781 | struct sk_buff *tx_skbs[N_TX_RINGS][TX_DESC_RING_SIZE]; | ||
2782 | struct sk_buff_head rx_flows[N_RX_FLOWS]; | ||
2783 | cas_page_t *rx_pages[N_RX_DESC_RINGS][RX_DESC_RING_SIZE]; | ||
2784 | struct list_head rx_spare_list, rx_inuse_list; | ||
2785 | int rx_spares_needed; | ||
2786 | |||
2787 | /* for small packets when copying would be quicker than | ||
2788 | mapping */ | ||
2789 | struct cas_tiny_count tx_tiny_use[N_TX_RINGS][TX_DESC_RING_SIZE]; | ||
2790 | u8 *tx_tiny_bufs[N_TX_RINGS]; | ||
2791 | |||
2792 | u32 msg_enable; | ||
2793 | |||
2794 | /* N_TX_RINGS must be >= N_RX_DESC_RINGS */ | ||
2795 | struct net_device_stats net_stats[N_TX_RINGS + 1]; | ||
2796 | |||
2797 | u32 pci_cfg[64 >> 2]; | ||
2798 | u8 pci_revision; | ||
2799 | |||
2800 | int phy_type; | ||
2801 | int phy_addr; | ||
2802 | u32 phy_id; | ||
2803 | #define CAS_FLAG_1000MB_CAP 0x00000001 | ||
2804 | #define CAS_FLAG_REG_PLUS 0x00000002 | ||
2805 | #define CAS_FLAG_TARGET_ABORT 0x00000004 | ||
2806 | #define CAS_FLAG_SATURN 0x00000008 | ||
2807 | #define CAS_FLAG_RXD_POST_MASK 0x000000F0 | ||
2808 | #define CAS_FLAG_RXD_POST_SHIFT 4 | ||
2809 | #define CAS_FLAG_RXD_POST(x) ((1 << (CAS_FLAG_RXD_POST_SHIFT + (x))) & \ | ||
2810 | CAS_FLAG_RXD_POST_MASK) | ||
2811 | #define CAS_FLAG_ENTROPY_DEV 0x00000100 | ||
2812 | #define CAS_FLAG_NO_HW_CSUM 0x00000200 | ||
2813 | u32 cas_flags; | ||
2814 | int packet_min; /* minimum packet size */ | ||
2815 | int tx_fifo_size; | ||
2816 | int rx_fifo_size; | ||
2817 | int rx_pause_off; | ||
2818 | int rx_pause_on; | ||
2819 | int crc_size; /* 4 if half-duplex */ | ||
2820 | |||
2821 | int pci_irq_INTC; | ||
2822 | int min_frame_size; /* for tx fifo workaround */ | ||
2823 | |||
2824 | /* page size allocation */ | ||
2825 | int page_size; | ||
2826 | int page_order; | ||
2827 | int mtu_stride; | ||
2828 | |||
2829 | u32 mac_rx_cfg; | ||
2830 | |||
2831 | /* Autoneg & PHY control */ | ||
2832 | int link_cntl; | ||
2833 | int link_fcntl; | ||
2834 | enum link_state lstate; | ||
2835 | struct timer_list link_timer; | ||
2836 | int timer_ticks; | ||
2837 | struct work_struct reset_task; | ||
2838 | #if 0 | ||
2839 | atomic_t reset_task_pending; | ||
2840 | #else | ||
2841 | atomic_t reset_task_pending; | ||
2842 | atomic_t reset_task_pending_mtu; | ||
2843 | atomic_t reset_task_pending_spare; | ||
2844 | atomic_t reset_task_pending_all; | ||
2845 | #endif | ||
2846 | |||
2847 | /* Link-down problem workaround */ | ||
2848 | #define LINK_TRANSITION_UNKNOWN 0 | ||
2849 | #define LINK_TRANSITION_ON_FAILURE 1 | ||
2850 | #define LINK_TRANSITION_STILL_FAILED 2 | ||
2851 | #define LINK_TRANSITION_LINK_UP 3 | ||
2852 | #define LINK_TRANSITION_LINK_CONFIG 4 | ||
2853 | #define LINK_TRANSITION_LINK_DOWN 5 | ||
2854 | #define LINK_TRANSITION_REQUESTED_RESET 6 | ||
2855 | int link_transition; | ||
2856 | int link_transition_jiffies_valid; | ||
2857 | unsigned long link_transition_jiffies; | ||
2858 | |||
2859 | /* Tuning */ | ||
2860 | u8 orig_cacheline_size; /* value when loaded */ | ||
2861 | #define CAS_PREF_CACHELINE_SIZE 0x20 /* Minimum desired */ | ||
2862 | |||
2863 | /* Diagnostic counters and state. */ | ||
2864 | int casreg_len; /* reg-space size for dumping */ | ||
2865 | u64 pause_entered; | ||
2866 | u16 pause_last_time_recvd; | ||
2867 | |||
2868 | dma_addr_t block_dvma, tx_tiny_dvma[N_TX_RINGS]; | ||
2869 | struct pci_dev *pdev; | ||
2870 | struct net_device *dev; | ||
2871 | #if defined(CONFIG_OF) | ||
2872 | struct device_node *of_node; | ||
2873 | #endif | ||
2874 | |||
2875 | /* Firmware Info */ | ||
2876 | u16 fw_load_addr; | ||
2877 | u32 fw_size; | ||
2878 | u8 *fw_data; | ||
2879 | }; | ||
2880 | |||
2881 | #define TX_DESC_NEXT(r, x) (((x) + 1) & (TX_DESC_RINGN_SIZE(r) - 1)) | ||
2882 | #define RX_DESC_ENTRY(r, x) ((x) & (RX_DESC_RINGN_SIZE(r) - 1)) | ||
2883 | #define RX_COMP_ENTRY(r, x) ((x) & (RX_COMP_RINGN_SIZE(r) - 1)) | ||
2884 | |||
2885 | #define TX_BUFF_COUNT(r, x, y) ((x) <= (y) ? ((y) - (x)) : \ | ||
2886 | (TX_DESC_RINGN_SIZE(r) - (x) + (y))) | ||
2887 | |||
2888 | #define TX_BUFFS_AVAIL(cp, i) ((cp)->tx_old[(i)] <= (cp)->tx_new[(i)] ? \ | ||
2889 | (cp)->tx_old[(i)] + (TX_DESC_RINGN_SIZE(i) - 1) - (cp)->tx_new[(i)] : \ | ||
2890 | (cp)->tx_old[(i)] - (cp)->tx_new[(i)] - 1) | ||
2891 | |||
2892 | #define CAS_ALIGN(addr, align) \ | ||
2893 | (((unsigned long) (addr) + ((align) - 1UL)) & ~((align) - 1)) | ||
2894 | |||
2895 | #define RX_FIFO_SIZE 16384 | ||
2896 | #define EXPANSION_ROM_SIZE 65536 | ||
2897 | |||
2898 | #define CAS_MC_EXACT_MATCH_SIZE 15 | ||
2899 | #define CAS_MC_HASH_SIZE 256 | ||
2900 | #define CAS_MC_HASH_MAX (CAS_MC_EXACT_MATCH_SIZE + \ | ||
2901 | CAS_MC_HASH_SIZE) | ||
2902 | |||
2903 | #define TX_TARGET_ABORT_LEN 0x20 | ||
2904 | #define RX_SWIVEL_OFF_VAL 0x2 | ||
2905 | #define RX_AE_FREEN_VAL(x) (RX_DESC_RINGN_SIZE(x) >> 1) | ||
2906 | #define RX_AE_COMP_VAL (RX_COMP_RING_SIZE >> 1) | ||
2907 | #define RX_BLANK_INTR_PKT_VAL 0x05 | ||
2908 | #define RX_BLANK_INTR_TIME_VAL 0x0F | ||
2909 | #define HP_TCP_THRESH_VAL 1530 /* reduce to enable reassembly */ | ||
2910 | |||
2911 | #define RX_SPARE_COUNT (RX_DESC_RING_SIZE >> 1) | ||
2912 | #define RX_SPARE_RECOVER_VAL (RX_SPARE_COUNT >> 2) | ||
2913 | |||
2914 | #endif /* _CASSINI_H */ | ||
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c new file mode 100644 index 000000000000..ed47585a6862 --- /dev/null +++ b/drivers/net/ethernet/sun/niu.c | |||
@@ -0,0 +1,10263 @@ | |||
1 | /* niu.c: Neptune ethernet driver. | ||
2 | * | ||
3 | * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
7 | |||
8 | #include <linux/module.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/interrupt.h> | ||
11 | #include <linux/pci.h> | ||
12 | #include <linux/dma-mapping.h> | ||
13 | #include <linux/netdevice.h> | ||
14 | #include <linux/ethtool.h> | ||
15 | #include <linux/etherdevice.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/bitops.h> | ||
19 | #include <linux/mii.h> | ||
20 | #include <linux/if_ether.h> | ||
21 | #include <linux/if_vlan.h> | ||
22 | #include <linux/ip.h> | ||
23 | #include <linux/in.h> | ||
24 | #include <linux/ipv6.h> | ||
25 | #include <linux/log2.h> | ||
26 | #include <linux/jiffies.h> | ||
27 | #include <linux/crc32.h> | ||
28 | #include <linux/list.h> | ||
29 | #include <linux/slab.h> | ||
30 | |||
31 | #include <linux/io.h> | ||
32 | #include <linux/of_device.h> | ||
33 | |||
34 | #include "niu.h" | ||
35 | |||
36 | #define DRV_MODULE_NAME "niu" | ||
37 | #define DRV_MODULE_VERSION "1.1" | ||
38 | #define DRV_MODULE_RELDATE "Apr 22, 2010" | ||
39 | |||
40 | static char version[] __devinitdata = | ||
41 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | ||
42 | |||
43 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
44 | MODULE_DESCRIPTION("NIU ethernet driver"); | ||
45 | MODULE_LICENSE("GPL"); | ||
46 | MODULE_VERSION(DRV_MODULE_VERSION); | ||
47 | |||
48 | #ifndef readq | ||
49 | static u64 readq(void __iomem *reg) | ||
50 | { | ||
51 | return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32); | ||
52 | } | ||
53 | |||
54 | static void writeq(u64 val, void __iomem *reg) | ||
55 | { | ||
56 | writel(val & 0xffffffff, reg); | ||
57 | writel(val >> 32, reg + 0x4UL); | ||
58 | } | ||
59 | #endif | ||
60 | |||
61 | static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = { | ||
62 | {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, | ||
63 | {} | ||
64 | }; | ||
65 | |||
66 | MODULE_DEVICE_TABLE(pci, niu_pci_tbl); | ||
67 | |||
68 | #define NIU_TX_TIMEOUT (5 * HZ) | ||
69 | |||
70 | #define nr64(reg) readq(np->regs + (reg)) | ||
71 | #define nw64(reg, val) writeq((val), np->regs + (reg)) | ||
72 | |||
73 | #define nr64_mac(reg) readq(np->mac_regs + (reg)) | ||
74 | #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg)) | ||
75 | |||
76 | #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg)) | ||
77 | #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg)) | ||
78 | |||
79 | #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg)) | ||
80 | #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg)) | ||
81 | |||
82 | #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg)) | ||
83 | #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg)) | ||
84 | |||
85 | #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) | ||
86 | |||
87 | static int niu_debug; | ||
88 | static int debug = -1; | ||
89 | module_param(debug, int, 0); | ||
90 | MODULE_PARM_DESC(debug, "NIU debug level"); | ||
91 | |||
92 | #define niu_lock_parent(np, flags) \ | ||
93 | spin_lock_irqsave(&np->parent->lock, flags) | ||
94 | #define niu_unlock_parent(np, flags) \ | ||
95 | spin_unlock_irqrestore(&np->parent->lock, flags) | ||
96 | |||
97 | static int serdes_init_10g_serdes(struct niu *np); | ||
98 | |||
99 | static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg, | ||
100 | u64 bits, int limit, int delay) | ||
101 | { | ||
102 | while (--limit >= 0) { | ||
103 | u64 val = nr64_mac(reg); | ||
104 | |||
105 | if (!(val & bits)) | ||
106 | break; | ||
107 | udelay(delay); | ||
108 | } | ||
109 | if (limit < 0) | ||
110 | return -ENODEV; | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg, | ||
115 | u64 bits, int limit, int delay, | ||
116 | const char *reg_name) | ||
117 | { | ||
118 | int err; | ||
119 | |||
120 | nw64_mac(reg, bits); | ||
121 | err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay); | ||
122 | if (err) | ||
123 | netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", | ||
124 | (unsigned long long)bits, reg_name, | ||
125 | (unsigned long long)nr64_mac(reg)); | ||
126 | return err; | ||
127 | } | ||
128 | |||
129 | #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ | ||
130 | ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ | ||
131 | __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ | ||
132 | }) | ||
133 | |||
134 | static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg, | ||
135 | u64 bits, int limit, int delay) | ||
136 | { | ||
137 | while (--limit >= 0) { | ||
138 | u64 val = nr64_ipp(reg); | ||
139 | |||
140 | if (!(val & bits)) | ||
141 | break; | ||
142 | udelay(delay); | ||
143 | } | ||
144 | if (limit < 0) | ||
145 | return -ENODEV; | ||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg, | ||
150 | u64 bits, int limit, int delay, | ||
151 | const char *reg_name) | ||
152 | { | ||
153 | int err; | ||
154 | u64 val; | ||
155 | |||
156 | val = nr64_ipp(reg); | ||
157 | val |= bits; | ||
158 | nw64_ipp(reg, val); | ||
159 | |||
160 | err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay); | ||
161 | if (err) | ||
162 | netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", | ||
163 | (unsigned long long)bits, reg_name, | ||
164 | (unsigned long long)nr64_ipp(reg)); | ||
165 | return err; | ||
166 | } | ||
167 | |||
168 | #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ | ||
169 | ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ | ||
170 | __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ | ||
171 | }) | ||
172 | |||
173 | static int __niu_wait_bits_clear(struct niu *np, unsigned long reg, | ||
174 | u64 bits, int limit, int delay) | ||
175 | { | ||
176 | while (--limit >= 0) { | ||
177 | u64 val = nr64(reg); | ||
178 | |||
179 | if (!(val & bits)) | ||
180 | break; | ||
181 | udelay(delay); | ||
182 | } | ||
183 | if (limit < 0) | ||
184 | return -ENODEV; | ||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \ | ||
189 | ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ | ||
190 | __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \ | ||
191 | }) | ||
192 | |||
193 | static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg, | ||
194 | u64 bits, int limit, int delay, | ||
195 | const char *reg_name) | ||
196 | { | ||
197 | int err; | ||
198 | |||
199 | nw64(reg, bits); | ||
200 | err = __niu_wait_bits_clear(np, reg, bits, limit, delay); | ||
201 | if (err) | ||
202 | netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", | ||
203 | (unsigned long long)bits, reg_name, | ||
204 | (unsigned long long)nr64(reg)); | ||
205 | return err; | ||
206 | } | ||
207 | |||
208 | #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ | ||
209 | ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ | ||
210 | __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ | ||
211 | }) | ||
212 | |||
213 | static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on) | ||
214 | { | ||
215 | u64 val = (u64) lp->timer; | ||
216 | |||
217 | if (on) | ||
218 | val |= LDG_IMGMT_ARM; | ||
219 | |||
220 | nw64(LDG_IMGMT(lp->ldg_num), val); | ||
221 | } | ||
222 | |||
223 | static int niu_ldn_irq_enable(struct niu *np, int ldn, int on) | ||
224 | { | ||
225 | unsigned long mask_reg, bits; | ||
226 | u64 val; | ||
227 | |||
228 | if (ldn < 0 || ldn > LDN_MAX) | ||
229 | return -EINVAL; | ||
230 | |||
231 | if (ldn < 64) { | ||
232 | mask_reg = LD_IM0(ldn); | ||
233 | bits = LD_IM0_MASK; | ||
234 | } else { | ||
235 | mask_reg = LD_IM1(ldn - 64); | ||
236 | bits = LD_IM1_MASK; | ||
237 | } | ||
238 | |||
239 | val = nr64(mask_reg); | ||
240 | if (on) | ||
241 | val &= ~bits; | ||
242 | else | ||
243 | val |= bits; | ||
244 | nw64(mask_reg, val); | ||
245 | |||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on) | ||
250 | { | ||
251 | struct niu_parent *parent = np->parent; | ||
252 | int i; | ||
253 | |||
254 | for (i = 0; i <= LDN_MAX; i++) { | ||
255 | int err; | ||
256 | |||
257 | if (parent->ldg_map[i] != lp->ldg_num) | ||
258 | continue; | ||
259 | |||
260 | err = niu_ldn_irq_enable(np, i, on); | ||
261 | if (err) | ||
262 | return err; | ||
263 | } | ||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | static int niu_enable_interrupts(struct niu *np, int on) | ||
268 | { | ||
269 | int i; | ||
270 | |||
271 | for (i = 0; i < np->num_ldg; i++) { | ||
272 | struct niu_ldg *lp = &np->ldg[i]; | ||
273 | int err; | ||
274 | |||
275 | err = niu_enable_ldn_in_ldg(np, lp, on); | ||
276 | if (err) | ||
277 | return err; | ||
278 | } | ||
279 | for (i = 0; i < np->num_ldg; i++) | ||
280 | niu_ldg_rearm(np, &np->ldg[i], on); | ||
281 | |||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | static u32 phy_encode(u32 type, int port) | ||
286 | { | ||
287 | return type << (port * 2); | ||
288 | } | ||
289 | |||
290 | static u32 phy_decode(u32 val, int port) | ||
291 | { | ||
292 | return (val >> (port * 2)) & PORT_TYPE_MASK; | ||
293 | } | ||
294 | |||
295 | static int mdio_wait(struct niu *np) | ||
296 | { | ||
297 | int limit = 1000; | ||
298 | u64 val; | ||
299 | |||
300 | while (--limit > 0) { | ||
301 | val = nr64(MIF_FRAME_OUTPUT); | ||
302 | if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1) | ||
303 | return val & MIF_FRAME_OUTPUT_DATA; | ||
304 | |||
305 | udelay(10); | ||
306 | } | ||
307 | |||
308 | return -ENODEV; | ||
309 | } | ||
310 | |||
311 | static int mdio_read(struct niu *np, int port, int dev, int reg) | ||
312 | { | ||
313 | int err; | ||
314 | |||
315 | nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); | ||
316 | err = mdio_wait(np); | ||
317 | if (err < 0) | ||
318 | return err; | ||
319 | |||
320 | nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev)); | ||
321 | return mdio_wait(np); | ||
322 | } | ||
323 | |||
324 | static int mdio_write(struct niu *np, int port, int dev, int reg, int data) | ||
325 | { | ||
326 | int err; | ||
327 | |||
328 | nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); | ||
329 | err = mdio_wait(np); | ||
330 | if (err < 0) | ||
331 | return err; | ||
332 | |||
333 | nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data)); | ||
334 | err = mdio_wait(np); | ||
335 | if (err < 0) | ||
336 | return err; | ||
337 | |||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | static int mii_read(struct niu *np, int port, int reg) | ||
342 | { | ||
343 | nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg)); | ||
344 | return mdio_wait(np); | ||
345 | } | ||
346 | |||
347 | static int mii_write(struct niu *np, int port, int reg, int data) | ||
348 | { | ||
349 | int err; | ||
350 | |||
351 | nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data)); | ||
352 | err = mdio_wait(np); | ||
353 | if (err < 0) | ||
354 | return err; | ||
355 | |||
356 | return 0; | ||
357 | } | ||
358 | |||
359 | static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val) | ||
360 | { | ||
361 | int err; | ||
362 | |||
363 | err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, | ||
364 | ESR2_TI_PLL_TX_CFG_L(channel), | ||
365 | val & 0xffff); | ||
366 | if (!err) | ||
367 | err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, | ||
368 | ESR2_TI_PLL_TX_CFG_H(channel), | ||
369 | val >> 16); | ||
370 | return err; | ||
371 | } | ||
372 | |||
373 | static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val) | ||
374 | { | ||
375 | int err; | ||
376 | |||
377 | err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, | ||
378 | ESR2_TI_PLL_RX_CFG_L(channel), | ||
379 | val & 0xffff); | ||
380 | if (!err) | ||
381 | err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, | ||
382 | ESR2_TI_PLL_RX_CFG_H(channel), | ||
383 | val >> 16); | ||
384 | return err; | ||
385 | } | ||
386 | |||
387 | /* Mode is always 10G fiber. */ | ||
388 | static int serdes_init_niu_10g_fiber(struct niu *np) | ||
389 | { | ||
390 | struct niu_link_config *lp = &np->link_config; | ||
391 | u32 tx_cfg, rx_cfg; | ||
392 | unsigned long i; | ||
393 | |||
394 | tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); | ||
395 | rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | | ||
396 | PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | | ||
397 | PLL_RX_CFG_EQ_LP_ADAPTIVE); | ||
398 | |||
399 | if (lp->loopback_mode == LOOPBACK_PHY) { | ||
400 | u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; | ||
401 | |||
402 | mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, | ||
403 | ESR2_TI_PLL_TEST_CFG_L, test_cfg); | ||
404 | |||
405 | tx_cfg |= PLL_TX_CFG_ENTEST; | ||
406 | rx_cfg |= PLL_RX_CFG_ENTEST; | ||
407 | } | ||
408 | |||
409 | /* Initialize all 4 lanes of the SERDES. */ | ||
410 | for (i = 0; i < 4; i++) { | ||
411 | int err = esr2_set_tx_cfg(np, i, tx_cfg); | ||
412 | if (err) | ||
413 | return err; | ||
414 | } | ||
415 | |||
416 | for (i = 0; i < 4; i++) { | ||
417 | int err = esr2_set_rx_cfg(np, i, rx_cfg); | ||
418 | if (err) | ||
419 | return err; | ||
420 | } | ||
421 | |||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | static int serdes_init_niu_1g_serdes(struct niu *np) | ||
426 | { | ||
427 | struct niu_link_config *lp = &np->link_config; | ||
428 | u16 pll_cfg, pll_sts; | ||
429 | int max_retry = 100; | ||
430 | u64 uninitialized_var(sig), mask, val; | ||
431 | u32 tx_cfg, rx_cfg; | ||
432 | unsigned long i; | ||
433 | int err; | ||
434 | |||
435 | tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV | | ||
436 | PLL_TX_CFG_RATE_HALF); | ||
437 | rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | | ||
438 | PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | | ||
439 | PLL_RX_CFG_RATE_HALF); | ||
440 | |||
441 | if (np->port == 0) | ||
442 | rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE; | ||
443 | |||
444 | if (lp->loopback_mode == LOOPBACK_PHY) { | ||
445 | u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; | ||
446 | |||
447 | mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, | ||
448 | ESR2_TI_PLL_TEST_CFG_L, test_cfg); | ||
449 | |||
450 | tx_cfg |= PLL_TX_CFG_ENTEST; | ||
451 | rx_cfg |= PLL_RX_CFG_ENTEST; | ||
452 | } | ||
453 | |||
454 | /* Initialize PLL for 1G */ | ||
455 | pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X); | ||
456 | |||
457 | err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, | ||
458 | ESR2_TI_PLL_CFG_L, pll_cfg); | ||
459 | if (err) { | ||
460 | netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", | ||
461 | np->port, __func__); | ||
462 | return err; | ||
463 | } | ||
464 | |||
465 | pll_sts = PLL_CFG_ENPLL; | ||
466 | |||
467 | err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, | ||
468 | ESR2_TI_PLL_STS_L, pll_sts); | ||
469 | if (err) { | ||
470 | netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", | ||
471 | np->port, __func__); | ||
472 | return err; | ||
473 | } | ||
474 | |||
475 | udelay(200); | ||
476 | |||
477 | /* Initialize all 4 lanes of the SERDES. */ | ||
478 | for (i = 0; i < 4; i++) { | ||
479 | err = esr2_set_tx_cfg(np, i, tx_cfg); | ||
480 | if (err) | ||
481 | return err; | ||
482 | } | ||
483 | |||
484 | for (i = 0; i < 4; i++) { | ||
485 | err = esr2_set_rx_cfg(np, i, rx_cfg); | ||
486 | if (err) | ||
487 | return err; | ||
488 | } | ||
489 | |||
490 | switch (np->port) { | ||
491 | case 0: | ||
492 | val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); | ||
493 | mask = val; | ||
494 | break; | ||
495 | |||
496 | case 1: | ||
497 | val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); | ||
498 | mask = val; | ||
499 | break; | ||
500 | |||
501 | default: | ||
502 | return -EINVAL; | ||
503 | } | ||
504 | |||
505 | while (max_retry--) { | ||
506 | sig = nr64(ESR_INT_SIGNALS); | ||
507 | if ((sig & mask) == val) | ||
508 | break; | ||
509 | |||
510 | mdelay(500); | ||
511 | } | ||
512 | |||
513 | if ((sig & mask) != val) { | ||
514 | netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", | ||
515 | np->port, (int)(sig & mask), (int)val); | ||
516 | return -ENODEV; | ||
517 | } | ||
518 | |||
519 | return 0; | ||
520 | } | ||
521 | |||
522 | static int serdes_init_niu_10g_serdes(struct niu *np) | ||
523 | { | ||
524 | struct niu_link_config *lp = &np->link_config; | ||
525 | u32 tx_cfg, rx_cfg, pll_cfg, pll_sts; | ||
526 | int max_retry = 100; | ||
527 | u64 uninitialized_var(sig), mask, val; | ||
528 | unsigned long i; | ||
529 | int err; | ||
530 | |||
531 | tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); | ||
532 | rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | | ||
533 | PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | | ||
534 | PLL_RX_CFG_EQ_LP_ADAPTIVE); | ||
535 | |||
536 | if (lp->loopback_mode == LOOPBACK_PHY) { | ||
537 | u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; | ||
538 | |||
539 | mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, | ||
540 | ESR2_TI_PLL_TEST_CFG_L, test_cfg); | ||
541 | |||
542 | tx_cfg |= PLL_TX_CFG_ENTEST; | ||
543 | rx_cfg |= PLL_RX_CFG_ENTEST; | ||
544 | } | ||
545 | |||
546 | /* Initialize PLL for 10G */ | ||
547 | pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X); | ||
548 | |||
549 | err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, | ||
550 | ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff); | ||
551 | if (err) { | ||
552 | netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", | ||
553 | np->port, __func__); | ||
554 | return err; | ||
555 | } | ||
556 | |||
557 | pll_sts = PLL_CFG_ENPLL; | ||
558 | |||
559 | err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, | ||
560 | ESR2_TI_PLL_STS_L, pll_sts & 0xffff); | ||
561 | if (err) { | ||
562 | netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", | ||
563 | np->port, __func__); | ||
564 | return err; | ||
565 | } | ||
566 | |||
567 | udelay(200); | ||
568 | |||
569 | /* Initialize all 4 lanes of the SERDES. */ | ||
570 | for (i = 0; i < 4; i++) { | ||
571 | err = esr2_set_tx_cfg(np, i, tx_cfg); | ||
572 | if (err) | ||
573 | return err; | ||
574 | } | ||
575 | |||
576 | for (i = 0; i < 4; i++) { | ||
577 | err = esr2_set_rx_cfg(np, i, rx_cfg); | ||
578 | if (err) | ||
579 | return err; | ||
580 | } | ||
581 | |||
582 | /* check if serdes is ready */ | ||
583 | |||
584 | switch (np->port) { | ||
585 | case 0: | ||
586 | mask = ESR_INT_SIGNALS_P0_BITS; | ||
587 | val = (ESR_INT_SRDY0_P0 | | ||
588 | ESR_INT_DET0_P0 | | ||
589 | ESR_INT_XSRDY_P0 | | ||
590 | ESR_INT_XDP_P0_CH3 | | ||
591 | ESR_INT_XDP_P0_CH2 | | ||
592 | ESR_INT_XDP_P0_CH1 | | ||
593 | ESR_INT_XDP_P0_CH0); | ||
594 | break; | ||
595 | |||
596 | case 1: | ||
597 | mask = ESR_INT_SIGNALS_P1_BITS; | ||
598 | val = (ESR_INT_SRDY0_P1 | | ||
599 | ESR_INT_DET0_P1 | | ||
600 | ESR_INT_XSRDY_P1 | | ||
601 | ESR_INT_XDP_P1_CH3 | | ||
602 | ESR_INT_XDP_P1_CH2 | | ||
603 | ESR_INT_XDP_P1_CH1 | | ||
604 | ESR_INT_XDP_P1_CH0); | ||
605 | break; | ||
606 | |||
607 | default: | ||
608 | return -EINVAL; | ||
609 | } | ||
610 | |||
611 | while (max_retry--) { | ||
612 | sig = nr64(ESR_INT_SIGNALS); | ||
613 | if ((sig & mask) == val) | ||
614 | break; | ||
615 | |||
616 | mdelay(500); | ||
617 | } | ||
618 | |||
619 | if ((sig & mask) != val) { | ||
620 | pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n", | ||
621 | np->port, (int)(sig & mask), (int)val); | ||
622 | |||
623 | /* 10G failed, try initializing at 1G */ | ||
624 | err = serdes_init_niu_1g_serdes(np); | ||
625 | if (!err) { | ||
626 | np->flags &= ~NIU_FLAGS_10G; | ||
627 | np->mac_xcvr = MAC_XCVR_PCS; | ||
628 | } else { | ||
629 | netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", | ||
630 | np->port); | ||
631 | return -ENODEV; | ||
632 | } | ||
633 | } | ||
634 | return 0; | ||
635 | } | ||
636 | |||
637 | static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val) | ||
638 | { | ||
639 | int err; | ||
640 | |||
641 | err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan)); | ||
642 | if (err >= 0) { | ||
643 | *val = (err & 0xffff); | ||
644 | err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, | ||
645 | ESR_RXTX_CTRL_H(chan)); | ||
646 | if (err >= 0) | ||
647 | *val |= ((err & 0xffff) << 16); | ||
648 | err = 0; | ||
649 | } | ||
650 | return err; | ||
651 | } | ||
652 | |||
653 | static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val) | ||
654 | { | ||
655 | int err; | ||
656 | |||
657 | err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, | ||
658 | ESR_GLUE_CTRL0_L(chan)); | ||
659 | if (err >= 0) { | ||
660 | *val = (err & 0xffff); | ||
661 | err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, | ||
662 | ESR_GLUE_CTRL0_H(chan)); | ||
663 | if (err >= 0) { | ||
664 | *val |= ((err & 0xffff) << 16); | ||
665 | err = 0; | ||
666 | } | ||
667 | } | ||
668 | return err; | ||
669 | } | ||
670 | |||
671 | static int esr_read_reset(struct niu *np, u32 *val) | ||
672 | { | ||
673 | int err; | ||
674 | |||
675 | err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, | ||
676 | ESR_RXTX_RESET_CTRL_L); | ||
677 | if (err >= 0) { | ||
678 | *val = (err & 0xffff); | ||
679 | err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, | ||
680 | ESR_RXTX_RESET_CTRL_H); | ||
681 | if (err >= 0) { | ||
682 | *val |= ((err & 0xffff) << 16); | ||
683 | err = 0; | ||
684 | } | ||
685 | } | ||
686 | return err; | ||
687 | } | ||
688 | |||
689 | static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val) | ||
690 | { | ||
691 | int err; | ||
692 | |||
693 | err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, | ||
694 | ESR_RXTX_CTRL_L(chan), val & 0xffff); | ||
695 | if (!err) | ||
696 | err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, | ||
697 | ESR_RXTX_CTRL_H(chan), (val >> 16)); | ||
698 | return err; | ||
699 | } | ||
700 | |||
701 | static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val) | ||
702 | { | ||
703 | int err; | ||
704 | |||
705 | err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, | ||
706 | ESR_GLUE_CTRL0_L(chan), val & 0xffff); | ||
707 | if (!err) | ||
708 | err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, | ||
709 | ESR_GLUE_CTRL0_H(chan), (val >> 16)); | ||
710 | return err; | ||
711 | } | ||
712 | |||
713 | static int esr_reset(struct niu *np) | ||
714 | { | ||
715 | u32 uninitialized_var(reset); | ||
716 | int err; | ||
717 | |||
718 | err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, | ||
719 | ESR_RXTX_RESET_CTRL_L, 0x0000); | ||
720 | if (err) | ||
721 | return err; | ||
722 | err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, | ||
723 | ESR_RXTX_RESET_CTRL_H, 0xffff); | ||
724 | if (err) | ||
725 | return err; | ||
726 | udelay(200); | ||
727 | |||
728 | err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, | ||
729 | ESR_RXTX_RESET_CTRL_L, 0xffff); | ||
730 | if (err) | ||
731 | return err; | ||
732 | udelay(200); | ||
733 | |||
734 | err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, | ||
735 | ESR_RXTX_RESET_CTRL_H, 0x0000); | ||
736 | if (err) | ||
737 | return err; | ||
738 | udelay(200); | ||
739 | |||
740 | err = esr_read_reset(np, &reset); | ||
741 | if (err) | ||
742 | return err; | ||
743 | if (reset != 0) { | ||
744 | netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n", | ||
745 | np->port, reset); | ||
746 | return -ENODEV; | ||
747 | } | ||
748 | |||
749 | return 0; | ||
750 | } | ||
751 | |||
752 | static int serdes_init_10g(struct niu *np) | ||
753 | { | ||
754 | struct niu_link_config *lp = &np->link_config; | ||
755 | unsigned long ctrl_reg, test_cfg_reg, i; | ||
756 | u64 ctrl_val, test_cfg_val, sig, mask, val; | ||
757 | int err; | ||
758 | |||
759 | switch (np->port) { | ||
760 | case 0: | ||
761 | ctrl_reg = ENET_SERDES_0_CTRL_CFG; | ||
762 | test_cfg_reg = ENET_SERDES_0_TEST_CFG; | ||
763 | break; | ||
764 | case 1: | ||
765 | ctrl_reg = ENET_SERDES_1_CTRL_CFG; | ||
766 | test_cfg_reg = ENET_SERDES_1_TEST_CFG; | ||
767 | break; | ||
768 | |||
769 | default: | ||
770 | return -EINVAL; | ||
771 | } | ||
772 | ctrl_val = (ENET_SERDES_CTRL_SDET_0 | | ||
773 | ENET_SERDES_CTRL_SDET_1 | | ||
774 | ENET_SERDES_CTRL_SDET_2 | | ||
775 | ENET_SERDES_CTRL_SDET_3 | | ||
776 | (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | | ||
777 | (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | | ||
778 | (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | | ||
779 | (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | | ||
780 | (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | | ||
781 | (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | | ||
782 | (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | | ||
783 | (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); | ||
784 | test_cfg_val = 0; | ||
785 | |||
786 | if (lp->loopback_mode == LOOPBACK_PHY) { | ||
787 | test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << | ||
788 | ENET_SERDES_TEST_MD_0_SHIFT) | | ||
789 | (ENET_TEST_MD_PAD_LOOPBACK << | ||
790 | ENET_SERDES_TEST_MD_1_SHIFT) | | ||
791 | (ENET_TEST_MD_PAD_LOOPBACK << | ||
792 | ENET_SERDES_TEST_MD_2_SHIFT) | | ||
793 | (ENET_TEST_MD_PAD_LOOPBACK << | ||
794 | ENET_SERDES_TEST_MD_3_SHIFT)); | ||
795 | } | ||
796 | |||
797 | nw64(ctrl_reg, ctrl_val); | ||
798 | nw64(test_cfg_reg, test_cfg_val); | ||
799 | |||
800 | /* Initialize all 4 lanes of the SERDES. */ | ||
801 | for (i = 0; i < 4; i++) { | ||
802 | u32 rxtx_ctrl, glue0; | ||
803 | |||
804 | err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); | ||
805 | if (err) | ||
806 | return err; | ||
807 | err = esr_read_glue0(np, i, &glue0); | ||
808 | if (err) | ||
809 | return err; | ||
810 | |||
811 | rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); | ||
812 | rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | | ||
813 | (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); | ||
814 | |||
815 | glue0 &= ~(ESR_GLUE_CTRL0_SRATE | | ||
816 | ESR_GLUE_CTRL0_THCNT | | ||
817 | ESR_GLUE_CTRL0_BLTIME); | ||
818 | glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | | ||
819 | (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | | ||
820 | (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | | ||
821 | (BLTIME_300_CYCLES << | ||
822 | ESR_GLUE_CTRL0_BLTIME_SHIFT)); | ||
823 | |||
824 | err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); | ||
825 | if (err) | ||
826 | return err; | ||
827 | err = esr_write_glue0(np, i, glue0); | ||
828 | if (err) | ||
829 | return err; | ||
830 | } | ||
831 | |||
832 | err = esr_reset(np); | ||
833 | if (err) | ||
834 | return err; | ||
835 | |||
836 | sig = nr64(ESR_INT_SIGNALS); | ||
837 | switch (np->port) { | ||
838 | case 0: | ||
839 | mask = ESR_INT_SIGNALS_P0_BITS; | ||
840 | val = (ESR_INT_SRDY0_P0 | | ||
841 | ESR_INT_DET0_P0 | | ||
842 | ESR_INT_XSRDY_P0 | | ||
843 | ESR_INT_XDP_P0_CH3 | | ||
844 | ESR_INT_XDP_P0_CH2 | | ||
845 | ESR_INT_XDP_P0_CH1 | | ||
846 | ESR_INT_XDP_P0_CH0); | ||
847 | break; | ||
848 | |||
849 | case 1: | ||
850 | mask = ESR_INT_SIGNALS_P1_BITS; | ||
851 | val = (ESR_INT_SRDY0_P1 | | ||
852 | ESR_INT_DET0_P1 | | ||
853 | ESR_INT_XSRDY_P1 | | ||
854 | ESR_INT_XDP_P1_CH3 | | ||
855 | ESR_INT_XDP_P1_CH2 | | ||
856 | ESR_INT_XDP_P1_CH1 | | ||
857 | ESR_INT_XDP_P1_CH0); | ||
858 | break; | ||
859 | |||
860 | default: | ||
861 | return -EINVAL; | ||
862 | } | ||
863 | |||
864 | if ((sig & mask) != val) { | ||
865 | if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { | ||
866 | np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; | ||
867 | return 0; | ||
868 | } | ||
869 | netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", | ||
870 | np->port, (int)(sig & mask), (int)val); | ||
871 | return -ENODEV; | ||
872 | } | ||
873 | if (np->flags & NIU_FLAGS_HOTPLUG_PHY) | ||
874 | np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; | ||
875 | return 0; | ||
876 | } | ||
877 | |||
878 | static int serdes_init_1g(struct niu *np) | ||
879 | { | ||
880 | u64 val; | ||
881 | |||
882 | val = nr64(ENET_SERDES_1_PLL_CFG); | ||
883 | val &= ~ENET_SERDES_PLL_FBDIV2; | ||
884 | switch (np->port) { | ||
885 | case 0: | ||
886 | val |= ENET_SERDES_PLL_HRATE0; | ||
887 | break; | ||
888 | case 1: | ||
889 | val |= ENET_SERDES_PLL_HRATE1; | ||
890 | break; | ||
891 | case 2: | ||
892 | val |= ENET_SERDES_PLL_HRATE2; | ||
893 | break; | ||
894 | case 3: | ||
895 | val |= ENET_SERDES_PLL_HRATE3; | ||
896 | break; | ||
897 | default: | ||
898 | return -EINVAL; | ||
899 | } | ||
900 | nw64(ENET_SERDES_1_PLL_CFG, val); | ||
901 | |||
902 | return 0; | ||
903 | } | ||
904 | |||
905 | static int serdes_init_1g_serdes(struct niu *np) | ||
906 | { | ||
907 | struct niu_link_config *lp = &np->link_config; | ||
908 | unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; | ||
909 | u64 ctrl_val, test_cfg_val, sig, mask, val; | ||
910 | int err; | ||
911 | u64 reset_val, val_rd; | ||
912 | |||
913 | val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 | | ||
914 | ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 | | ||
915 | ENET_SERDES_PLL_FBDIV0; | ||
916 | switch (np->port) { | ||
917 | case 0: | ||
918 | reset_val = ENET_SERDES_RESET_0; | ||
919 | ctrl_reg = ENET_SERDES_0_CTRL_CFG; | ||
920 | test_cfg_reg = ENET_SERDES_0_TEST_CFG; | ||
921 | pll_cfg = ENET_SERDES_0_PLL_CFG; | ||
922 | break; | ||
923 | case 1: | ||
924 | reset_val = ENET_SERDES_RESET_1; | ||
925 | ctrl_reg = ENET_SERDES_1_CTRL_CFG; | ||
926 | test_cfg_reg = ENET_SERDES_1_TEST_CFG; | ||
927 | pll_cfg = ENET_SERDES_1_PLL_CFG; | ||
928 | break; | ||
929 | |||
930 | default: | ||
931 | return -EINVAL; | ||
932 | } | ||
933 | ctrl_val = (ENET_SERDES_CTRL_SDET_0 | | ||
934 | ENET_SERDES_CTRL_SDET_1 | | ||
935 | ENET_SERDES_CTRL_SDET_2 | | ||
936 | ENET_SERDES_CTRL_SDET_3 | | ||
937 | (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | | ||
938 | (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | | ||
939 | (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | | ||
940 | (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | | ||
941 | (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | | ||
942 | (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | | ||
943 | (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | | ||
944 | (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); | ||
945 | test_cfg_val = 0; | ||
946 | |||
947 | if (lp->loopback_mode == LOOPBACK_PHY) { | ||
948 | test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << | ||
949 | ENET_SERDES_TEST_MD_0_SHIFT) | | ||
950 | (ENET_TEST_MD_PAD_LOOPBACK << | ||
951 | ENET_SERDES_TEST_MD_1_SHIFT) | | ||
952 | (ENET_TEST_MD_PAD_LOOPBACK << | ||
953 | ENET_SERDES_TEST_MD_2_SHIFT) | | ||
954 | (ENET_TEST_MD_PAD_LOOPBACK << | ||
955 | ENET_SERDES_TEST_MD_3_SHIFT)); | ||
956 | } | ||
957 | |||
958 | nw64(ENET_SERDES_RESET, reset_val); | ||
959 | mdelay(20); | ||
960 | val_rd = nr64(ENET_SERDES_RESET); | ||
961 | val_rd &= ~reset_val; | ||
962 | nw64(pll_cfg, val); | ||
963 | nw64(ctrl_reg, ctrl_val); | ||
964 | nw64(test_cfg_reg, test_cfg_val); | ||
965 | nw64(ENET_SERDES_RESET, val_rd); | ||
966 | mdelay(2000); | ||
967 | |||
968 | /* Initialize all 4 lanes of the SERDES. */ | ||
969 | for (i = 0; i < 4; i++) { | ||
970 | u32 rxtx_ctrl, glue0; | ||
971 | |||
972 | err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); | ||
973 | if (err) | ||
974 | return err; | ||
975 | err = esr_read_glue0(np, i, &glue0); | ||
976 | if (err) | ||
977 | return err; | ||
978 | |||
979 | rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); | ||
980 | rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | | ||
981 | (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); | ||
982 | |||
983 | glue0 &= ~(ESR_GLUE_CTRL0_SRATE | | ||
984 | ESR_GLUE_CTRL0_THCNT | | ||
985 | ESR_GLUE_CTRL0_BLTIME); | ||
986 | glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | | ||
987 | (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | | ||
988 | (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | | ||
989 | (BLTIME_300_CYCLES << | ||
990 | ESR_GLUE_CTRL0_BLTIME_SHIFT)); | ||
991 | |||
992 | err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); | ||
993 | if (err) | ||
994 | return err; | ||
995 | err = esr_write_glue0(np, i, glue0); | ||
996 | if (err) | ||
997 | return err; | ||
998 | } | ||
999 | |||
1000 | |||
1001 | sig = nr64(ESR_INT_SIGNALS); | ||
1002 | switch (np->port) { | ||
1003 | case 0: | ||
1004 | val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); | ||
1005 | mask = val; | ||
1006 | break; | ||
1007 | |||
1008 | case 1: | ||
1009 | val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); | ||
1010 | mask = val; | ||
1011 | break; | ||
1012 | |||
1013 | default: | ||
1014 | return -EINVAL; | ||
1015 | } | ||
1016 | |||
1017 | if ((sig & mask) != val) { | ||
1018 | netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", | ||
1019 | np->port, (int)(sig & mask), (int)val); | ||
1020 | return -ENODEV; | ||
1021 | } | ||
1022 | |||
1023 | return 0; | ||
1024 | } | ||
1025 | |||
1026 | static int link_status_1g_serdes(struct niu *np, int *link_up_p) | ||
1027 | { | ||
1028 | struct niu_link_config *lp = &np->link_config; | ||
1029 | int link_up; | ||
1030 | u64 val; | ||
1031 | u16 current_speed; | ||
1032 | unsigned long flags; | ||
1033 | u8 current_duplex; | ||
1034 | |||
1035 | link_up = 0; | ||
1036 | current_speed = SPEED_INVALID; | ||
1037 | current_duplex = DUPLEX_INVALID; | ||
1038 | |||
1039 | spin_lock_irqsave(&np->lock, flags); | ||
1040 | |||
1041 | val = nr64_pcs(PCS_MII_STAT); | ||
1042 | |||
1043 | if (val & PCS_MII_STAT_LINK_STATUS) { | ||
1044 | link_up = 1; | ||
1045 | current_speed = SPEED_1000; | ||
1046 | current_duplex = DUPLEX_FULL; | ||
1047 | } | ||
1048 | |||
1049 | lp->active_speed = current_speed; | ||
1050 | lp->active_duplex = current_duplex; | ||
1051 | spin_unlock_irqrestore(&np->lock, flags); | ||
1052 | |||
1053 | *link_up_p = link_up; | ||
1054 | return 0; | ||
1055 | } | ||
1056 | |||
1057 | static int link_status_10g_serdes(struct niu *np, int *link_up_p) | ||
1058 | { | ||
1059 | unsigned long flags; | ||
1060 | struct niu_link_config *lp = &np->link_config; | ||
1061 | int link_up = 0; | ||
1062 | int link_ok = 1; | ||
1063 | u64 val, val2; | ||
1064 | u16 current_speed; | ||
1065 | u8 current_duplex; | ||
1066 | |||
1067 | if (!(np->flags & NIU_FLAGS_10G)) | ||
1068 | return link_status_1g_serdes(np, link_up_p); | ||
1069 | |||
1070 | current_speed = SPEED_INVALID; | ||
1071 | current_duplex = DUPLEX_INVALID; | ||
1072 | spin_lock_irqsave(&np->lock, flags); | ||
1073 | |||
1074 | val = nr64_xpcs(XPCS_STATUS(0)); | ||
1075 | val2 = nr64_mac(XMAC_INTER2); | ||
1076 | if (val2 & 0x01000000) | ||
1077 | link_ok = 0; | ||
1078 | |||
1079 | if ((val & 0x1000ULL) && link_ok) { | ||
1080 | link_up = 1; | ||
1081 | current_speed = SPEED_10000; | ||
1082 | current_duplex = DUPLEX_FULL; | ||
1083 | } | ||
1084 | lp->active_speed = current_speed; | ||
1085 | lp->active_duplex = current_duplex; | ||
1086 | spin_unlock_irqrestore(&np->lock, flags); | ||
1087 | *link_up_p = link_up; | ||
1088 | return 0; | ||
1089 | } | ||
1090 | |||
1091 | static int link_status_mii(struct niu *np, int *link_up_p) | ||
1092 | { | ||
1093 | struct niu_link_config *lp = &np->link_config; | ||
1094 | int err; | ||
1095 | int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus; | ||
1096 | int supported, advertising, active_speed, active_duplex; | ||
1097 | |||
1098 | err = mii_read(np, np->phy_addr, MII_BMCR); | ||
1099 | if (unlikely(err < 0)) | ||
1100 | return err; | ||
1101 | bmcr = err; | ||
1102 | |||
1103 | err = mii_read(np, np->phy_addr, MII_BMSR); | ||
1104 | if (unlikely(err < 0)) | ||
1105 | return err; | ||
1106 | bmsr = err; | ||
1107 | |||
1108 | err = mii_read(np, np->phy_addr, MII_ADVERTISE); | ||
1109 | if (unlikely(err < 0)) | ||
1110 | return err; | ||
1111 | advert = err; | ||
1112 | |||
1113 | err = mii_read(np, np->phy_addr, MII_LPA); | ||
1114 | if (unlikely(err < 0)) | ||
1115 | return err; | ||
1116 | lpa = err; | ||
1117 | |||
1118 | if (likely(bmsr & BMSR_ESTATEN)) { | ||
1119 | err = mii_read(np, np->phy_addr, MII_ESTATUS); | ||
1120 | if (unlikely(err < 0)) | ||
1121 | return err; | ||
1122 | estatus = err; | ||
1123 | |||
1124 | err = mii_read(np, np->phy_addr, MII_CTRL1000); | ||
1125 | if (unlikely(err < 0)) | ||
1126 | return err; | ||
1127 | ctrl1000 = err; | ||
1128 | |||
1129 | err = mii_read(np, np->phy_addr, MII_STAT1000); | ||
1130 | if (unlikely(err < 0)) | ||
1131 | return err; | ||
1132 | stat1000 = err; | ||
1133 | } else | ||
1134 | estatus = ctrl1000 = stat1000 = 0; | ||
1135 | |||
1136 | supported = 0; | ||
1137 | if (bmsr & BMSR_ANEGCAPABLE) | ||
1138 | supported |= SUPPORTED_Autoneg; | ||
1139 | if (bmsr & BMSR_10HALF) | ||
1140 | supported |= SUPPORTED_10baseT_Half; | ||
1141 | if (bmsr & BMSR_10FULL) | ||
1142 | supported |= SUPPORTED_10baseT_Full; | ||
1143 | if (bmsr & BMSR_100HALF) | ||
1144 | supported |= SUPPORTED_100baseT_Half; | ||
1145 | if (bmsr & BMSR_100FULL) | ||
1146 | supported |= SUPPORTED_100baseT_Full; | ||
1147 | if (estatus & ESTATUS_1000_THALF) | ||
1148 | supported |= SUPPORTED_1000baseT_Half; | ||
1149 | if (estatus & ESTATUS_1000_TFULL) | ||
1150 | supported |= SUPPORTED_1000baseT_Full; | ||
1151 | lp->supported = supported; | ||
1152 | |||
1153 | advertising = 0; | ||
1154 | if (advert & ADVERTISE_10HALF) | ||
1155 | advertising |= ADVERTISED_10baseT_Half; | ||
1156 | if (advert & ADVERTISE_10FULL) | ||
1157 | advertising |= ADVERTISED_10baseT_Full; | ||
1158 | if (advert & ADVERTISE_100HALF) | ||
1159 | advertising |= ADVERTISED_100baseT_Half; | ||
1160 | if (advert & ADVERTISE_100FULL) | ||
1161 | advertising |= ADVERTISED_100baseT_Full; | ||
1162 | if (ctrl1000 & ADVERTISE_1000HALF) | ||
1163 | advertising |= ADVERTISED_1000baseT_Half; | ||
1164 | if (ctrl1000 & ADVERTISE_1000FULL) | ||
1165 | advertising |= ADVERTISED_1000baseT_Full; | ||
1166 | |||
1167 | if (bmcr & BMCR_ANENABLE) { | ||
1168 | int neg, neg1000; | ||
1169 | |||
1170 | lp->active_autoneg = 1; | ||
1171 | advertising |= ADVERTISED_Autoneg; | ||
1172 | |||
1173 | neg = advert & lpa; | ||
1174 | neg1000 = (ctrl1000 << 2) & stat1000; | ||
1175 | |||
1176 | if (neg1000 & (LPA_1000FULL | LPA_1000HALF)) | ||
1177 | active_speed = SPEED_1000; | ||
1178 | else if (neg & LPA_100) | ||
1179 | active_speed = SPEED_100; | ||
1180 | else if (neg & (LPA_10HALF | LPA_10FULL)) | ||
1181 | active_speed = SPEED_10; | ||
1182 | else | ||
1183 | active_speed = SPEED_INVALID; | ||
1184 | |||
1185 | if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX)) | ||
1186 | active_duplex = DUPLEX_FULL; | ||
1187 | else if (active_speed != SPEED_INVALID) | ||
1188 | active_duplex = DUPLEX_HALF; | ||
1189 | else | ||
1190 | active_duplex = DUPLEX_INVALID; | ||
1191 | } else { | ||
1192 | lp->active_autoneg = 0; | ||
1193 | |||
1194 | if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100)) | ||
1195 | active_speed = SPEED_1000; | ||
1196 | else if (bmcr & BMCR_SPEED100) | ||
1197 | active_speed = SPEED_100; | ||
1198 | else | ||
1199 | active_speed = SPEED_10; | ||
1200 | |||
1201 | if (bmcr & BMCR_FULLDPLX) | ||
1202 | active_duplex = DUPLEX_FULL; | ||
1203 | else | ||
1204 | active_duplex = DUPLEX_HALF; | ||
1205 | } | ||
1206 | |||
1207 | lp->active_advertising = advertising; | ||
1208 | lp->active_speed = active_speed; | ||
1209 | lp->active_duplex = active_duplex; | ||
1210 | *link_up_p = !!(bmsr & BMSR_LSTATUS); | ||
1211 | |||
1212 | return 0; | ||
1213 | } | ||
1214 | |||
1215 | static int link_status_1g_rgmii(struct niu *np, int *link_up_p) | ||
1216 | { | ||
1217 | struct niu_link_config *lp = &np->link_config; | ||
1218 | u16 current_speed, bmsr; | ||
1219 | unsigned long flags; | ||
1220 | u8 current_duplex; | ||
1221 | int err, link_up; | ||
1222 | |||
1223 | link_up = 0; | ||
1224 | current_speed = SPEED_INVALID; | ||
1225 | current_duplex = DUPLEX_INVALID; | ||
1226 | |||
1227 | spin_lock_irqsave(&np->lock, flags); | ||
1228 | |||
1229 | err = -EINVAL; | ||
1230 | |||
1231 | err = mii_read(np, np->phy_addr, MII_BMSR); | ||
1232 | if (err < 0) | ||
1233 | goto out; | ||
1234 | |||
1235 | bmsr = err; | ||
1236 | if (bmsr & BMSR_LSTATUS) { | ||
1237 | u16 adv, lpa; | ||
1238 | |||
1239 | err = mii_read(np, np->phy_addr, MII_ADVERTISE); | ||
1240 | if (err < 0) | ||
1241 | goto out; | ||
1242 | adv = err; | ||
1243 | |||
1244 | err = mii_read(np, np->phy_addr, MII_LPA); | ||
1245 | if (err < 0) | ||
1246 | goto out; | ||
1247 | lpa = err; | ||
1248 | |||
1249 | err = mii_read(np, np->phy_addr, MII_ESTATUS); | ||
1250 | if (err < 0) | ||
1251 | goto out; | ||
1252 | link_up = 1; | ||
1253 | current_speed = SPEED_1000; | ||
1254 | current_duplex = DUPLEX_FULL; | ||
1255 | |||
1256 | } | ||
1257 | lp->active_speed = current_speed; | ||
1258 | lp->active_duplex = current_duplex; | ||
1259 | err = 0; | ||
1260 | |||
1261 | out: | ||
1262 | spin_unlock_irqrestore(&np->lock, flags); | ||
1263 | |||
1264 | *link_up_p = link_up; | ||
1265 | return err; | ||
1266 | } | ||
1267 | |||
1268 | static int link_status_1g(struct niu *np, int *link_up_p) | ||
1269 | { | ||
1270 | struct niu_link_config *lp = &np->link_config; | ||
1271 | unsigned long flags; | ||
1272 | int err; | ||
1273 | |||
1274 | spin_lock_irqsave(&np->lock, flags); | ||
1275 | |||
1276 | err = link_status_mii(np, link_up_p); | ||
1277 | lp->supported |= SUPPORTED_TP; | ||
1278 | lp->active_advertising |= ADVERTISED_TP; | ||
1279 | |||
1280 | spin_unlock_irqrestore(&np->lock, flags); | ||
1281 | return err; | ||
1282 | } | ||
1283 | |||
1284 | static int bcm8704_reset(struct niu *np) | ||
1285 | { | ||
1286 | int err, limit; | ||
1287 | |||
1288 | err = mdio_read(np, np->phy_addr, | ||
1289 | BCM8704_PHYXS_DEV_ADDR, MII_BMCR); | ||
1290 | if (err < 0 || err == 0xffff) | ||
1291 | return err; | ||
1292 | err |= BMCR_RESET; | ||
1293 | err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, | ||
1294 | MII_BMCR, err); | ||
1295 | if (err) | ||
1296 | return err; | ||
1297 | |||
1298 | limit = 1000; | ||
1299 | while (--limit >= 0) { | ||
1300 | err = mdio_read(np, np->phy_addr, | ||
1301 | BCM8704_PHYXS_DEV_ADDR, MII_BMCR); | ||
1302 | if (err < 0) | ||
1303 | return err; | ||
1304 | if (!(err & BMCR_RESET)) | ||
1305 | break; | ||
1306 | } | ||
1307 | if (limit < 0) { | ||
1308 | netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n", | ||
1309 | np->port, (err & 0xffff)); | ||
1310 | return -ENODEV; | ||
1311 | } | ||
1312 | return 0; | ||
1313 | } | ||
1314 | |||
1315 | /* When written, certain PHY registers need to be read back twice | ||
1316 | * in order for the bits to settle properly. | ||
1317 | */ | ||
1318 | static int bcm8704_user_dev3_readback(struct niu *np, int reg) | ||
1319 | { | ||
1320 | int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); | ||
1321 | if (err < 0) | ||
1322 | return err; | ||
1323 | err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); | ||
1324 | if (err < 0) | ||
1325 | return err; | ||
1326 | return 0; | ||
1327 | } | ||
1328 | |||
1329 | static int bcm8706_init_user_dev3(struct niu *np) | ||
1330 | { | ||
1331 | int err; | ||
1332 | |||
1333 | |||
1334 | err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, | ||
1335 | BCM8704_USER_OPT_DIGITAL_CTRL); | ||
1336 | if (err < 0) | ||
1337 | return err; | ||
1338 | err &= ~USER_ODIG_CTRL_GPIOS; | ||
1339 | err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); | ||
1340 | err |= USER_ODIG_CTRL_RESV2; | ||
1341 | err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, | ||
1342 | BCM8704_USER_OPT_DIGITAL_CTRL, err); | ||
1343 | if (err) | ||
1344 | return err; | ||
1345 | |||
1346 | mdelay(1000); | ||
1347 | |||
1348 | return 0; | ||
1349 | } | ||
1350 | |||
1351 | static int bcm8704_init_user_dev3(struct niu *np) | ||
1352 | { | ||
1353 | int err; | ||
1354 | |||
1355 | err = mdio_write(np, np->phy_addr, | ||
1356 | BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL, | ||
1357 | (USER_CONTROL_OPTXRST_LVL | | ||
1358 | USER_CONTROL_OPBIASFLT_LVL | | ||
1359 | USER_CONTROL_OBTMPFLT_LVL | | ||
1360 | USER_CONTROL_OPPRFLT_LVL | | ||
1361 | USER_CONTROL_OPTXFLT_LVL | | ||
1362 | USER_CONTROL_OPRXLOS_LVL | | ||
1363 | USER_CONTROL_OPRXFLT_LVL | | ||
1364 | USER_CONTROL_OPTXON_LVL | | ||
1365 | (0x3f << USER_CONTROL_RES1_SHIFT))); | ||
1366 | if (err) | ||
1367 | return err; | ||
1368 | |||
1369 | err = mdio_write(np, np->phy_addr, | ||
1370 | BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL, | ||
1371 | (USER_PMD_TX_CTL_XFP_CLKEN | | ||
1372 | (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) | | ||
1373 | (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) | | ||
1374 | USER_PMD_TX_CTL_TSCK_LPWREN)); | ||
1375 | if (err) | ||
1376 | return err; | ||
1377 | |||
1378 | err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL); | ||
1379 | if (err) | ||
1380 | return err; | ||
1381 | err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL); | ||
1382 | if (err) | ||
1383 | return err; | ||
1384 | |||
1385 | err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, | ||
1386 | BCM8704_USER_OPT_DIGITAL_CTRL); | ||
1387 | if (err < 0) | ||
1388 | return err; | ||
1389 | err &= ~USER_ODIG_CTRL_GPIOS; | ||
1390 | err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); | ||
1391 | err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, | ||
1392 | BCM8704_USER_OPT_DIGITAL_CTRL, err); | ||
1393 | if (err) | ||
1394 | return err; | ||
1395 | |||
1396 | mdelay(1000); | ||
1397 | |||
1398 | return 0; | ||
1399 | } | ||
1400 | |||
1401 | static int mrvl88x2011_act_led(struct niu *np, int val) | ||
1402 | { | ||
1403 | int err; | ||
1404 | |||
1405 | err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, | ||
1406 | MRVL88X2011_LED_8_TO_11_CTL); | ||
1407 | if (err < 0) | ||
1408 | return err; | ||
1409 | |||
1410 | err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK); | ||
1411 | err |= MRVL88X2011_LED(MRVL88X2011_LED_ACT,val); | ||
1412 | |||
1413 | return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, | ||
1414 | MRVL88X2011_LED_8_TO_11_CTL, err); | ||
1415 | } | ||
1416 | |||
1417 | static int mrvl88x2011_led_blink_rate(struct niu *np, int rate) | ||
1418 | { | ||
1419 | int err; | ||
1420 | |||
1421 | err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, | ||
1422 | MRVL88X2011_LED_BLINK_CTL); | ||
1423 | if (err >= 0) { | ||
1424 | err &= ~MRVL88X2011_LED_BLKRATE_MASK; | ||
1425 | err |= (rate << 4); | ||
1426 | |||
1427 | err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, | ||
1428 | MRVL88X2011_LED_BLINK_CTL, err); | ||
1429 | } | ||
1430 | |||
1431 | return err; | ||
1432 | } | ||
1433 | |||
1434 | static int xcvr_init_10g_mrvl88x2011(struct niu *np) | ||
1435 | { | ||
1436 | int err; | ||
1437 | |||
1438 | /* Set LED functions */ | ||
1439 | err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS); | ||
1440 | if (err) | ||
1441 | return err; | ||
1442 | |||
1443 | /* led activity */ | ||
1444 | err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF); | ||
1445 | if (err) | ||
1446 | return err; | ||
1447 | |||
1448 | err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, | ||
1449 | MRVL88X2011_GENERAL_CTL); | ||
1450 | if (err < 0) | ||
1451 | return err; | ||
1452 | |||
1453 | err |= MRVL88X2011_ENA_XFPREFCLK; | ||
1454 | |||
1455 | err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, | ||
1456 | MRVL88X2011_GENERAL_CTL, err); | ||
1457 | if (err < 0) | ||
1458 | return err; | ||
1459 | |||
1460 | err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, | ||
1461 | MRVL88X2011_PMA_PMD_CTL_1); | ||
1462 | if (err < 0) | ||
1463 | return err; | ||
1464 | |||
1465 | if (np->link_config.loopback_mode == LOOPBACK_MAC) | ||
1466 | err |= MRVL88X2011_LOOPBACK; | ||
1467 | else | ||
1468 | err &= ~MRVL88X2011_LOOPBACK; | ||
1469 | |||
1470 | err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, | ||
1471 | MRVL88X2011_PMA_PMD_CTL_1, err); | ||
1472 | if (err < 0) | ||
1473 | return err; | ||
1474 | |||
1475 | /* Enable PMD */ | ||
1476 | return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, | ||
1477 | MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX); | ||
1478 | } | ||
1479 | |||
1480 | |||
1481 | static int xcvr_diag_bcm870x(struct niu *np) | ||
1482 | { | ||
1483 | u16 analog_stat0, tx_alarm_status; | ||
1484 | int err = 0; | ||
1485 | |||
1486 | #if 1 | ||
1487 | err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, | ||
1488 | MII_STAT1000); | ||
1489 | if (err < 0) | ||
1490 | return err; | ||
1491 | pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err); | ||
1492 | |||
1493 | err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20); | ||
1494 | if (err < 0) | ||
1495 | return err; | ||
1496 | pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err); | ||
1497 | |||
1498 | err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, | ||
1499 | MII_NWAYTEST); | ||
1500 | if (err < 0) | ||
1501 | return err; | ||
1502 | pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err); | ||
1503 | #endif | ||
1504 | |||
1505 | /* XXX dig this out it might not be so useful XXX */ | ||
1506 | err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, | ||
1507 | BCM8704_USER_ANALOG_STATUS0); | ||
1508 | if (err < 0) | ||
1509 | return err; | ||
1510 | err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, | ||
1511 | BCM8704_USER_ANALOG_STATUS0); | ||
1512 | if (err < 0) | ||
1513 | return err; | ||
1514 | analog_stat0 = err; | ||
1515 | |||
1516 | err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, | ||
1517 | BCM8704_USER_TX_ALARM_STATUS); | ||
1518 | if (err < 0) | ||
1519 | return err; | ||
1520 | err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, | ||
1521 | BCM8704_USER_TX_ALARM_STATUS); | ||
1522 | if (err < 0) | ||
1523 | return err; | ||
1524 | tx_alarm_status = err; | ||
1525 | |||
1526 | if (analog_stat0 != 0x03fc) { | ||
1527 | if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) { | ||
1528 | pr_info("Port %u cable not connected or bad cable\n", | ||
1529 | np->port); | ||
1530 | } else if (analog_stat0 == 0x639c) { | ||
1531 | pr_info("Port %u optical module is bad or missing\n", | ||
1532 | np->port); | ||
1533 | } | ||
1534 | } | ||
1535 | |||
1536 | return 0; | ||
1537 | } | ||
1538 | |||
1539 | static int xcvr_10g_set_lb_bcm870x(struct niu *np) | ||
1540 | { | ||
1541 | struct niu_link_config *lp = &np->link_config; | ||
1542 | int err; | ||
1543 | |||
1544 | err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, | ||
1545 | MII_BMCR); | ||
1546 | if (err < 0) | ||
1547 | return err; | ||
1548 | |||
1549 | err &= ~BMCR_LOOPBACK; | ||
1550 | |||
1551 | if (lp->loopback_mode == LOOPBACK_MAC) | ||
1552 | err |= BMCR_LOOPBACK; | ||
1553 | |||
1554 | err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, | ||
1555 | MII_BMCR, err); | ||
1556 | if (err) | ||
1557 | return err; | ||
1558 | |||
1559 | return 0; | ||
1560 | } | ||
1561 | |||
1562 | static int xcvr_init_10g_bcm8706(struct niu *np) | ||
1563 | { | ||
1564 | int err = 0; | ||
1565 | u64 val; | ||
1566 | |||
1567 | if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) && | ||
1568 | (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0) | ||
1569 | return err; | ||
1570 | |||
1571 | val = nr64_mac(XMAC_CONFIG); | ||
1572 | val &= ~XMAC_CONFIG_LED_POLARITY; | ||
1573 | val |= XMAC_CONFIG_FORCE_LED_ON; | ||
1574 | nw64_mac(XMAC_CONFIG, val); | ||
1575 | |||
1576 | val = nr64(MIF_CONFIG); | ||
1577 | val |= MIF_CONFIG_INDIRECT_MODE; | ||
1578 | nw64(MIF_CONFIG, val); | ||
1579 | |||
1580 | err = bcm8704_reset(np); | ||
1581 | if (err) | ||
1582 | return err; | ||
1583 | |||
1584 | err = xcvr_10g_set_lb_bcm870x(np); | ||
1585 | if (err) | ||
1586 | return err; | ||
1587 | |||
1588 | err = bcm8706_init_user_dev3(np); | ||
1589 | if (err) | ||
1590 | return err; | ||
1591 | |||
1592 | err = xcvr_diag_bcm870x(np); | ||
1593 | if (err) | ||
1594 | return err; | ||
1595 | |||
1596 | return 0; | ||
1597 | } | ||
1598 | |||
1599 | static int xcvr_init_10g_bcm8704(struct niu *np) | ||
1600 | { | ||
1601 | int err; | ||
1602 | |||
1603 | err = bcm8704_reset(np); | ||
1604 | if (err) | ||
1605 | return err; | ||
1606 | |||
1607 | err = bcm8704_init_user_dev3(np); | ||
1608 | if (err) | ||
1609 | return err; | ||
1610 | |||
1611 | err = xcvr_10g_set_lb_bcm870x(np); | ||
1612 | if (err) | ||
1613 | return err; | ||
1614 | |||
1615 | err = xcvr_diag_bcm870x(np); | ||
1616 | if (err) | ||
1617 | return err; | ||
1618 | |||
1619 | return 0; | ||
1620 | } | ||
1621 | |||
1622 | static int xcvr_init_10g(struct niu *np) | ||
1623 | { | ||
1624 | int phy_id, err; | ||
1625 | u64 val; | ||
1626 | |||
1627 | val = nr64_mac(XMAC_CONFIG); | ||
1628 | val &= ~XMAC_CONFIG_LED_POLARITY; | ||
1629 | val |= XMAC_CONFIG_FORCE_LED_ON; | ||
1630 | nw64_mac(XMAC_CONFIG, val); | ||
1631 | |||
1632 | /* XXX shared resource, lock parent XXX */ | ||
1633 | val = nr64(MIF_CONFIG); | ||
1634 | val |= MIF_CONFIG_INDIRECT_MODE; | ||
1635 | nw64(MIF_CONFIG, val); | ||
1636 | |||
1637 | phy_id = phy_decode(np->parent->port_phy, np->port); | ||
1638 | phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; | ||
1639 | |||
1640 | /* handle different phy types */ | ||
1641 | switch (phy_id & NIU_PHY_ID_MASK) { | ||
1642 | case NIU_PHY_ID_MRVL88X2011: | ||
1643 | err = xcvr_init_10g_mrvl88x2011(np); | ||
1644 | break; | ||
1645 | |||
1646 | default: /* bcom 8704 */ | ||
1647 | err = xcvr_init_10g_bcm8704(np); | ||
1648 | break; | ||
1649 | } | ||
1650 | |||
1651 | return err; | ||
1652 | } | ||
1653 | |||
1654 | static int mii_reset(struct niu *np) | ||
1655 | { | ||
1656 | int limit, err; | ||
1657 | |||
1658 | err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET); | ||
1659 | if (err) | ||
1660 | return err; | ||
1661 | |||
1662 | limit = 1000; | ||
1663 | while (--limit >= 0) { | ||
1664 | udelay(500); | ||
1665 | err = mii_read(np, np->phy_addr, MII_BMCR); | ||
1666 | if (err < 0) | ||
1667 | return err; | ||
1668 | if (!(err & BMCR_RESET)) | ||
1669 | break; | ||
1670 | } | ||
1671 | if (limit < 0) { | ||
1672 | netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n", | ||
1673 | np->port, err); | ||
1674 | return -ENODEV; | ||
1675 | } | ||
1676 | |||
1677 | return 0; | ||
1678 | } | ||
1679 | |||
1680 | static int xcvr_init_1g_rgmii(struct niu *np) | ||
1681 | { | ||
1682 | int err; | ||
1683 | u64 val; | ||
1684 | u16 bmcr, bmsr, estat; | ||
1685 | |||
1686 | val = nr64(MIF_CONFIG); | ||
1687 | val &= ~MIF_CONFIG_INDIRECT_MODE; | ||
1688 | nw64(MIF_CONFIG, val); | ||
1689 | |||
1690 | err = mii_reset(np); | ||
1691 | if (err) | ||
1692 | return err; | ||
1693 | |||
1694 | err = mii_read(np, np->phy_addr, MII_BMSR); | ||
1695 | if (err < 0) | ||
1696 | return err; | ||
1697 | bmsr = err; | ||
1698 | |||
1699 | estat = 0; | ||
1700 | if (bmsr & BMSR_ESTATEN) { | ||
1701 | err = mii_read(np, np->phy_addr, MII_ESTATUS); | ||
1702 | if (err < 0) | ||
1703 | return err; | ||
1704 | estat = err; | ||
1705 | } | ||
1706 | |||
1707 | bmcr = 0; | ||
1708 | err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); | ||
1709 | if (err) | ||
1710 | return err; | ||
1711 | |||
1712 | if (bmsr & BMSR_ESTATEN) { | ||
1713 | u16 ctrl1000 = 0; | ||
1714 | |||
1715 | if (estat & ESTATUS_1000_TFULL) | ||
1716 | ctrl1000 |= ADVERTISE_1000FULL; | ||
1717 | err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); | ||
1718 | if (err) | ||
1719 | return err; | ||
1720 | } | ||
1721 | |||
1722 | bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX); | ||
1723 | |||
1724 | err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); | ||
1725 | if (err) | ||
1726 | return err; | ||
1727 | |||
1728 | err = mii_read(np, np->phy_addr, MII_BMCR); | ||
1729 | if (err < 0) | ||
1730 | return err; | ||
1731 | bmcr = mii_read(np, np->phy_addr, MII_BMCR); | ||
1732 | |||
1733 | err = mii_read(np, np->phy_addr, MII_BMSR); | ||
1734 | if (err < 0) | ||
1735 | return err; | ||
1736 | |||
1737 | return 0; | ||
1738 | } | ||
1739 | |||
1740 | static int mii_init_common(struct niu *np) | ||
1741 | { | ||
1742 | struct niu_link_config *lp = &np->link_config; | ||
1743 | u16 bmcr, bmsr, adv, estat; | ||
1744 | int err; | ||
1745 | |||
1746 | err = mii_reset(np); | ||
1747 | if (err) | ||
1748 | return err; | ||
1749 | |||
1750 | err = mii_read(np, np->phy_addr, MII_BMSR); | ||
1751 | if (err < 0) | ||
1752 | return err; | ||
1753 | bmsr = err; | ||
1754 | |||
1755 | estat = 0; | ||
1756 | if (bmsr & BMSR_ESTATEN) { | ||
1757 | err = mii_read(np, np->phy_addr, MII_ESTATUS); | ||
1758 | if (err < 0) | ||
1759 | return err; | ||
1760 | estat = err; | ||
1761 | } | ||
1762 | |||
1763 | bmcr = 0; | ||
1764 | err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); | ||
1765 | if (err) | ||
1766 | return err; | ||
1767 | |||
1768 | if (lp->loopback_mode == LOOPBACK_MAC) { | ||
1769 | bmcr |= BMCR_LOOPBACK; | ||
1770 | if (lp->active_speed == SPEED_1000) | ||
1771 | bmcr |= BMCR_SPEED1000; | ||
1772 | if (lp->active_duplex == DUPLEX_FULL) | ||
1773 | bmcr |= BMCR_FULLDPLX; | ||
1774 | } | ||
1775 | |||
1776 | if (lp->loopback_mode == LOOPBACK_PHY) { | ||
1777 | u16 aux; | ||
1778 | |||
1779 | aux = (BCM5464R_AUX_CTL_EXT_LB | | ||
1780 | BCM5464R_AUX_CTL_WRITE_1); | ||
1781 | err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux); | ||
1782 | if (err) | ||
1783 | return err; | ||
1784 | } | ||
1785 | |||
1786 | if (lp->autoneg) { | ||
1787 | u16 ctrl1000; | ||
1788 | |||
1789 | adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; | ||
1790 | if ((bmsr & BMSR_10HALF) && | ||
1791 | (lp->advertising & ADVERTISED_10baseT_Half)) | ||
1792 | adv |= ADVERTISE_10HALF; | ||
1793 | if ((bmsr & BMSR_10FULL) && | ||
1794 | (lp->advertising & ADVERTISED_10baseT_Full)) | ||
1795 | adv |= ADVERTISE_10FULL; | ||
1796 | if ((bmsr & BMSR_100HALF) && | ||
1797 | (lp->advertising & ADVERTISED_100baseT_Half)) | ||
1798 | adv |= ADVERTISE_100HALF; | ||
1799 | if ((bmsr & BMSR_100FULL) && | ||
1800 | (lp->advertising & ADVERTISED_100baseT_Full)) | ||
1801 | adv |= ADVERTISE_100FULL; | ||
1802 | err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv); | ||
1803 | if (err) | ||
1804 | return err; | ||
1805 | |||
1806 | if (likely(bmsr & BMSR_ESTATEN)) { | ||
1807 | ctrl1000 = 0; | ||
1808 | if ((estat & ESTATUS_1000_THALF) && | ||
1809 | (lp->advertising & ADVERTISED_1000baseT_Half)) | ||
1810 | ctrl1000 |= ADVERTISE_1000HALF; | ||
1811 | if ((estat & ESTATUS_1000_TFULL) && | ||
1812 | (lp->advertising & ADVERTISED_1000baseT_Full)) | ||
1813 | ctrl1000 |= ADVERTISE_1000FULL; | ||
1814 | err = mii_write(np, np->phy_addr, | ||
1815 | MII_CTRL1000, ctrl1000); | ||
1816 | if (err) | ||
1817 | return err; | ||
1818 | } | ||
1819 | |||
1820 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | ||
1821 | } else { | ||
1822 | /* !lp->autoneg */ | ||
1823 | int fulldpx; | ||
1824 | |||
1825 | if (lp->duplex == DUPLEX_FULL) { | ||
1826 | bmcr |= BMCR_FULLDPLX; | ||
1827 | fulldpx = 1; | ||
1828 | } else if (lp->duplex == DUPLEX_HALF) | ||
1829 | fulldpx = 0; | ||
1830 | else | ||
1831 | return -EINVAL; | ||
1832 | |||
1833 | if (lp->speed == SPEED_1000) { | ||
1834 | /* if X-full requested while not supported, or | ||
1835 | X-half requested while not supported... */ | ||
1836 | if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) || | ||
1837 | (!fulldpx && !(estat & ESTATUS_1000_THALF))) | ||
1838 | return -EINVAL; | ||
1839 | bmcr |= BMCR_SPEED1000; | ||
1840 | } else if (lp->speed == SPEED_100) { | ||
1841 | if ((fulldpx && !(bmsr & BMSR_100FULL)) || | ||
1842 | (!fulldpx && !(bmsr & BMSR_100HALF))) | ||
1843 | return -EINVAL; | ||
1844 | bmcr |= BMCR_SPEED100; | ||
1845 | } else if (lp->speed == SPEED_10) { | ||
1846 | if ((fulldpx && !(bmsr & BMSR_10FULL)) || | ||
1847 | (!fulldpx && !(bmsr & BMSR_10HALF))) | ||
1848 | return -EINVAL; | ||
1849 | } else | ||
1850 | return -EINVAL; | ||
1851 | } | ||
1852 | |||
1853 | err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); | ||
1854 | if (err) | ||
1855 | return err; | ||
1856 | |||
1857 | #if 0 | ||
1858 | err = mii_read(np, np->phy_addr, MII_BMCR); | ||
1859 | if (err < 0) | ||
1860 | return err; | ||
1861 | bmcr = err; | ||
1862 | |||
1863 | err = mii_read(np, np->phy_addr, MII_BMSR); | ||
1864 | if (err < 0) | ||
1865 | return err; | ||
1866 | bmsr = err; | ||
1867 | |||
1868 | pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n", | ||
1869 | np->port, bmcr, bmsr); | ||
1870 | #endif | ||
1871 | |||
1872 | return 0; | ||
1873 | } | ||
1874 | |||
1875 | static int xcvr_init_1g(struct niu *np) | ||
1876 | { | ||
1877 | u64 val; | ||
1878 | |||
1879 | /* XXX shared resource, lock parent XXX */ | ||
1880 | val = nr64(MIF_CONFIG); | ||
1881 | val &= ~MIF_CONFIG_INDIRECT_MODE; | ||
1882 | nw64(MIF_CONFIG, val); | ||
1883 | |||
1884 | return mii_init_common(np); | ||
1885 | } | ||
1886 | |||
1887 | static int niu_xcvr_init(struct niu *np) | ||
1888 | { | ||
1889 | const struct niu_phy_ops *ops = np->phy_ops; | ||
1890 | int err; | ||
1891 | |||
1892 | err = 0; | ||
1893 | if (ops->xcvr_init) | ||
1894 | err = ops->xcvr_init(np); | ||
1895 | |||
1896 | return err; | ||
1897 | } | ||
1898 | |||
1899 | static int niu_serdes_init(struct niu *np) | ||
1900 | { | ||
1901 | const struct niu_phy_ops *ops = np->phy_ops; | ||
1902 | int err; | ||
1903 | |||
1904 | err = 0; | ||
1905 | if (ops->serdes_init) | ||
1906 | err = ops->serdes_init(np); | ||
1907 | |||
1908 | return err; | ||
1909 | } | ||
1910 | |||
1911 | static void niu_init_xif(struct niu *); | ||
1912 | static void niu_handle_led(struct niu *, int status); | ||
1913 | |||
1914 | static int niu_link_status_common(struct niu *np, int link_up) | ||
1915 | { | ||
1916 | struct niu_link_config *lp = &np->link_config; | ||
1917 | struct net_device *dev = np->dev; | ||
1918 | unsigned long flags; | ||
1919 | |||
1920 | if (!netif_carrier_ok(dev) && link_up) { | ||
1921 | netif_info(np, link, dev, "Link is up at %s, %s duplex\n", | ||
1922 | lp->active_speed == SPEED_10000 ? "10Gb/sec" : | ||
1923 | lp->active_speed == SPEED_1000 ? "1Gb/sec" : | ||
1924 | lp->active_speed == SPEED_100 ? "100Mbit/sec" : | ||
1925 | "10Mbit/sec", | ||
1926 | lp->active_duplex == DUPLEX_FULL ? "full" : "half"); | ||
1927 | |||
1928 | spin_lock_irqsave(&np->lock, flags); | ||
1929 | niu_init_xif(np); | ||
1930 | niu_handle_led(np, 1); | ||
1931 | spin_unlock_irqrestore(&np->lock, flags); | ||
1932 | |||
1933 | netif_carrier_on(dev); | ||
1934 | } else if (netif_carrier_ok(dev) && !link_up) { | ||
1935 | netif_warn(np, link, dev, "Link is down\n"); | ||
1936 | spin_lock_irqsave(&np->lock, flags); | ||
1937 | niu_handle_led(np, 0); | ||
1938 | spin_unlock_irqrestore(&np->lock, flags); | ||
1939 | netif_carrier_off(dev); | ||
1940 | } | ||
1941 | |||
1942 | return 0; | ||
1943 | } | ||
1944 | |||
1945 | static int link_status_10g_mrvl(struct niu *np, int *link_up_p) | ||
1946 | { | ||
1947 | int err, link_up, pma_status, pcs_status; | ||
1948 | |||
1949 | link_up = 0; | ||
1950 | |||
1951 | err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, | ||
1952 | MRVL88X2011_10G_PMD_STATUS_2); | ||
1953 | if (err < 0) | ||
1954 | goto out; | ||
1955 | |||
1956 | /* Check PMA/PMD Register: 1.0001.2 == 1 */ | ||
1957 | err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, | ||
1958 | MRVL88X2011_PMA_PMD_STATUS_1); | ||
1959 | if (err < 0) | ||
1960 | goto out; | ||
1961 | |||
1962 | pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); | ||
1963 | |||
1964 | /* Check PMC Register : 3.0001.2 == 1: read twice */ | ||
1965 | err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, | ||
1966 | MRVL88X2011_PMA_PMD_STATUS_1); | ||
1967 | if (err < 0) | ||
1968 | goto out; | ||
1969 | |||
1970 | err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, | ||
1971 | MRVL88X2011_PMA_PMD_STATUS_1); | ||
1972 | if (err < 0) | ||
1973 | goto out; | ||
1974 | |||
1975 | pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); | ||
1976 | |||
1977 | /* Check XGXS Register : 4.0018.[0-3,12] */ | ||
1978 | err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR, | ||
1979 | MRVL88X2011_10G_XGXS_LANE_STAT); | ||
1980 | if (err < 0) | ||
1981 | goto out; | ||
1982 | |||
1983 | if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 | | ||
1984 | PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 | | ||
1985 | PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC | | ||
1986 | 0x800)) | ||
1987 | link_up = (pma_status && pcs_status) ? 1 : 0; | ||
1988 | |||
1989 | np->link_config.active_speed = SPEED_10000; | ||
1990 | np->link_config.active_duplex = DUPLEX_FULL; | ||
1991 | err = 0; | ||
1992 | out: | ||
1993 | mrvl88x2011_act_led(np, (link_up ? | ||
1994 | MRVL88X2011_LED_CTL_PCS_ACT : | ||
1995 | MRVL88X2011_LED_CTL_OFF)); | ||
1996 | |||
1997 | *link_up_p = link_up; | ||
1998 | return err; | ||
1999 | } | ||
2000 | |||
2001 | static int link_status_10g_bcm8706(struct niu *np, int *link_up_p) | ||
2002 | { | ||
2003 | int err, link_up; | ||
2004 | link_up = 0; | ||
2005 | |||
2006 | err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, | ||
2007 | BCM8704_PMD_RCV_SIGDET); | ||
2008 | if (err < 0 || err == 0xffff) | ||
2009 | goto out; | ||
2010 | if (!(err & PMD_RCV_SIGDET_GLOBAL)) { | ||
2011 | err = 0; | ||
2012 | goto out; | ||
2013 | } | ||
2014 | |||
2015 | err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, | ||
2016 | BCM8704_PCS_10G_R_STATUS); | ||
2017 | if (err < 0) | ||
2018 | goto out; | ||
2019 | |||
2020 | if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { | ||
2021 | err = 0; | ||
2022 | goto out; | ||
2023 | } | ||
2024 | |||
2025 | err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, | ||
2026 | BCM8704_PHYXS_XGXS_LANE_STAT); | ||
2027 | if (err < 0) | ||
2028 | goto out; | ||
2029 | if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | | ||
2030 | PHYXS_XGXS_LANE_STAT_MAGIC | | ||
2031 | PHYXS_XGXS_LANE_STAT_PATTEST | | ||
2032 | PHYXS_XGXS_LANE_STAT_LANE3 | | ||
2033 | PHYXS_XGXS_LANE_STAT_LANE2 | | ||
2034 | PHYXS_XGXS_LANE_STAT_LANE1 | | ||
2035 | PHYXS_XGXS_LANE_STAT_LANE0)) { | ||
2036 | err = 0; | ||
2037 | np->link_config.active_speed = SPEED_INVALID; | ||
2038 | np->link_config.active_duplex = DUPLEX_INVALID; | ||
2039 | goto out; | ||
2040 | } | ||
2041 | |||
2042 | link_up = 1; | ||
2043 | np->link_config.active_speed = SPEED_10000; | ||
2044 | np->link_config.active_duplex = DUPLEX_FULL; | ||
2045 | err = 0; | ||
2046 | |||
2047 | out: | ||
2048 | *link_up_p = link_up; | ||
2049 | return err; | ||
2050 | } | ||
2051 | |||
2052 | static int link_status_10g_bcom(struct niu *np, int *link_up_p) | ||
2053 | { | ||
2054 | int err, link_up; | ||
2055 | |||
2056 | link_up = 0; | ||
2057 | |||
2058 | err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, | ||
2059 | BCM8704_PMD_RCV_SIGDET); | ||
2060 | if (err < 0) | ||
2061 | goto out; | ||
2062 | if (!(err & PMD_RCV_SIGDET_GLOBAL)) { | ||
2063 | err = 0; | ||
2064 | goto out; | ||
2065 | } | ||
2066 | |||
2067 | err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, | ||
2068 | BCM8704_PCS_10G_R_STATUS); | ||
2069 | if (err < 0) | ||
2070 | goto out; | ||
2071 | if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { | ||
2072 | err = 0; | ||
2073 | goto out; | ||
2074 | } | ||
2075 | |||
2076 | err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, | ||
2077 | BCM8704_PHYXS_XGXS_LANE_STAT); | ||
2078 | if (err < 0) | ||
2079 | goto out; | ||
2080 | |||
2081 | if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | | ||
2082 | PHYXS_XGXS_LANE_STAT_MAGIC | | ||
2083 | PHYXS_XGXS_LANE_STAT_LANE3 | | ||
2084 | PHYXS_XGXS_LANE_STAT_LANE2 | | ||
2085 | PHYXS_XGXS_LANE_STAT_LANE1 | | ||
2086 | PHYXS_XGXS_LANE_STAT_LANE0)) { | ||
2087 | err = 0; | ||
2088 | goto out; | ||
2089 | } | ||
2090 | |||
2091 | link_up = 1; | ||
2092 | np->link_config.active_speed = SPEED_10000; | ||
2093 | np->link_config.active_duplex = DUPLEX_FULL; | ||
2094 | err = 0; | ||
2095 | |||
2096 | out: | ||
2097 | *link_up_p = link_up; | ||
2098 | return err; | ||
2099 | } | ||
2100 | |||
2101 | static int link_status_10g(struct niu *np, int *link_up_p) | ||
2102 | { | ||
2103 | unsigned long flags; | ||
2104 | int err = -EINVAL; | ||
2105 | |||
2106 | spin_lock_irqsave(&np->lock, flags); | ||
2107 | |||
2108 | if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { | ||
2109 | int phy_id; | ||
2110 | |||
2111 | phy_id = phy_decode(np->parent->port_phy, np->port); | ||
2112 | phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; | ||
2113 | |||
2114 | /* handle different phy types */ | ||
2115 | switch (phy_id & NIU_PHY_ID_MASK) { | ||
2116 | case NIU_PHY_ID_MRVL88X2011: | ||
2117 | err = link_status_10g_mrvl(np, link_up_p); | ||
2118 | break; | ||
2119 | |||
2120 | default: /* bcom 8704 */ | ||
2121 | err = link_status_10g_bcom(np, link_up_p); | ||
2122 | break; | ||
2123 | } | ||
2124 | } | ||
2125 | |||
2126 | spin_unlock_irqrestore(&np->lock, flags); | ||
2127 | |||
2128 | return err; | ||
2129 | } | ||
2130 | |||
2131 | static int niu_10g_phy_present(struct niu *np) | ||
2132 | { | ||
2133 | u64 sig, mask, val; | ||
2134 | |||
2135 | sig = nr64(ESR_INT_SIGNALS); | ||
2136 | switch (np->port) { | ||
2137 | case 0: | ||
2138 | mask = ESR_INT_SIGNALS_P0_BITS; | ||
2139 | val = (ESR_INT_SRDY0_P0 | | ||
2140 | ESR_INT_DET0_P0 | | ||
2141 | ESR_INT_XSRDY_P0 | | ||
2142 | ESR_INT_XDP_P0_CH3 | | ||
2143 | ESR_INT_XDP_P0_CH2 | | ||
2144 | ESR_INT_XDP_P0_CH1 | | ||
2145 | ESR_INT_XDP_P0_CH0); | ||
2146 | break; | ||
2147 | |||
2148 | case 1: | ||
2149 | mask = ESR_INT_SIGNALS_P1_BITS; | ||
2150 | val = (ESR_INT_SRDY0_P1 | | ||
2151 | ESR_INT_DET0_P1 | | ||
2152 | ESR_INT_XSRDY_P1 | | ||
2153 | ESR_INT_XDP_P1_CH3 | | ||
2154 | ESR_INT_XDP_P1_CH2 | | ||
2155 | ESR_INT_XDP_P1_CH1 | | ||
2156 | ESR_INT_XDP_P1_CH0); | ||
2157 | break; | ||
2158 | |||
2159 | default: | ||
2160 | return 0; | ||
2161 | } | ||
2162 | |||
2163 | if ((sig & mask) != val) | ||
2164 | return 0; | ||
2165 | return 1; | ||
2166 | } | ||
2167 | |||
2168 | static int link_status_10g_hotplug(struct niu *np, int *link_up_p) | ||
2169 | { | ||
2170 | unsigned long flags; | ||
2171 | int err = 0; | ||
2172 | int phy_present; | ||
2173 | int phy_present_prev; | ||
2174 | |||
2175 | spin_lock_irqsave(&np->lock, flags); | ||
2176 | |||
2177 | if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { | ||
2178 | phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ? | ||
2179 | 1 : 0; | ||
2180 | phy_present = niu_10g_phy_present(np); | ||
2181 | if (phy_present != phy_present_prev) { | ||
2182 | /* state change */ | ||
2183 | if (phy_present) { | ||
2184 | /* A NEM was just plugged in */ | ||
2185 | np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; | ||
2186 | if (np->phy_ops->xcvr_init) | ||
2187 | err = np->phy_ops->xcvr_init(np); | ||
2188 | if (err) { | ||
2189 | err = mdio_read(np, np->phy_addr, | ||
2190 | BCM8704_PHYXS_DEV_ADDR, MII_BMCR); | ||
2191 | if (err == 0xffff) { | ||
2192 | /* No mdio, back-to-back XAUI */ | ||
2193 | goto out; | ||
2194 | } | ||
2195 | /* debounce */ | ||
2196 | np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; | ||
2197 | } | ||
2198 | } else { | ||
2199 | np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; | ||
2200 | *link_up_p = 0; | ||
2201 | netif_warn(np, link, np->dev, | ||
2202 | "Hotplug PHY Removed\n"); | ||
2203 | } | ||
2204 | } | ||
2205 | out: | ||
2206 | if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) { | ||
2207 | err = link_status_10g_bcm8706(np, link_up_p); | ||
2208 | if (err == 0xffff) { | ||
2209 | /* No mdio, back-to-back XAUI: it is C10NEM */ | ||
2210 | *link_up_p = 1; | ||
2211 | np->link_config.active_speed = SPEED_10000; | ||
2212 | np->link_config.active_duplex = DUPLEX_FULL; | ||
2213 | } | ||
2214 | } | ||
2215 | } | ||
2216 | |||
2217 | spin_unlock_irqrestore(&np->lock, flags); | ||
2218 | |||
2219 | return 0; | ||
2220 | } | ||
2221 | |||
2222 | static int niu_link_status(struct niu *np, int *link_up_p) | ||
2223 | { | ||
2224 | const struct niu_phy_ops *ops = np->phy_ops; | ||
2225 | int err; | ||
2226 | |||
2227 | err = 0; | ||
2228 | if (ops->link_status) | ||
2229 | err = ops->link_status(np, link_up_p); | ||
2230 | |||
2231 | return err; | ||
2232 | } | ||
2233 | |||
2234 | static void niu_timer(unsigned long __opaque) | ||
2235 | { | ||
2236 | struct niu *np = (struct niu *) __opaque; | ||
2237 | unsigned long off; | ||
2238 | int err, link_up; | ||
2239 | |||
2240 | err = niu_link_status(np, &link_up); | ||
2241 | if (!err) | ||
2242 | niu_link_status_common(np, link_up); | ||
2243 | |||
2244 | if (netif_carrier_ok(np->dev)) | ||
2245 | off = 5 * HZ; | ||
2246 | else | ||
2247 | off = 1 * HZ; | ||
2248 | np->timer.expires = jiffies + off; | ||
2249 | |||
2250 | add_timer(&np->timer); | ||
2251 | } | ||
2252 | |||
2253 | static const struct niu_phy_ops phy_ops_10g_serdes = { | ||
2254 | .serdes_init = serdes_init_10g_serdes, | ||
2255 | .link_status = link_status_10g_serdes, | ||
2256 | }; | ||
2257 | |||
2258 | static const struct niu_phy_ops phy_ops_10g_serdes_niu = { | ||
2259 | .serdes_init = serdes_init_niu_10g_serdes, | ||
2260 | .link_status = link_status_10g_serdes, | ||
2261 | }; | ||
2262 | |||
2263 | static const struct niu_phy_ops phy_ops_1g_serdes_niu = { | ||
2264 | .serdes_init = serdes_init_niu_1g_serdes, | ||
2265 | .link_status = link_status_1g_serdes, | ||
2266 | }; | ||
2267 | |||
2268 | static const struct niu_phy_ops phy_ops_1g_rgmii = { | ||
2269 | .xcvr_init = xcvr_init_1g_rgmii, | ||
2270 | .link_status = link_status_1g_rgmii, | ||
2271 | }; | ||
2272 | |||
2273 | static const struct niu_phy_ops phy_ops_10g_fiber_niu = { | ||
2274 | .serdes_init = serdes_init_niu_10g_fiber, | ||
2275 | .xcvr_init = xcvr_init_10g, | ||
2276 | .link_status = link_status_10g, | ||
2277 | }; | ||
2278 | |||
2279 | static const struct niu_phy_ops phy_ops_10g_fiber = { | ||
2280 | .serdes_init = serdes_init_10g, | ||
2281 | .xcvr_init = xcvr_init_10g, | ||
2282 | .link_status = link_status_10g, | ||
2283 | }; | ||
2284 | |||
2285 | static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = { | ||
2286 | .serdes_init = serdes_init_10g, | ||
2287 | .xcvr_init = xcvr_init_10g_bcm8706, | ||
2288 | .link_status = link_status_10g_hotplug, | ||
2289 | }; | ||
2290 | |||
2291 | static const struct niu_phy_ops phy_ops_niu_10g_hotplug = { | ||
2292 | .serdes_init = serdes_init_niu_10g_fiber, | ||
2293 | .xcvr_init = xcvr_init_10g_bcm8706, | ||
2294 | .link_status = link_status_10g_hotplug, | ||
2295 | }; | ||
2296 | |||
2297 | static const struct niu_phy_ops phy_ops_10g_copper = { | ||
2298 | .serdes_init = serdes_init_10g, | ||
2299 | .link_status = link_status_10g, /* XXX */ | ||
2300 | }; | ||
2301 | |||
2302 | static const struct niu_phy_ops phy_ops_1g_fiber = { | ||
2303 | .serdes_init = serdes_init_1g, | ||
2304 | .xcvr_init = xcvr_init_1g, | ||
2305 | .link_status = link_status_1g, | ||
2306 | }; | ||
2307 | |||
2308 | static const struct niu_phy_ops phy_ops_1g_copper = { | ||
2309 | .xcvr_init = xcvr_init_1g, | ||
2310 | .link_status = link_status_1g, | ||
2311 | }; | ||
2312 | |||
2313 | struct niu_phy_template { | ||
2314 | const struct niu_phy_ops *ops; | ||
2315 | u32 phy_addr_base; | ||
2316 | }; | ||
2317 | |||
2318 | static const struct niu_phy_template phy_template_niu_10g_fiber = { | ||
2319 | .ops = &phy_ops_10g_fiber_niu, | ||
2320 | .phy_addr_base = 16, | ||
2321 | }; | ||
2322 | |||
2323 | static const struct niu_phy_template phy_template_niu_10g_serdes = { | ||
2324 | .ops = &phy_ops_10g_serdes_niu, | ||
2325 | .phy_addr_base = 0, | ||
2326 | }; | ||
2327 | |||
2328 | static const struct niu_phy_template phy_template_niu_1g_serdes = { | ||
2329 | .ops = &phy_ops_1g_serdes_niu, | ||
2330 | .phy_addr_base = 0, | ||
2331 | }; | ||
2332 | |||
2333 | static const struct niu_phy_template phy_template_10g_fiber = { | ||
2334 | .ops = &phy_ops_10g_fiber, | ||
2335 | .phy_addr_base = 8, | ||
2336 | }; | ||
2337 | |||
2338 | static const struct niu_phy_template phy_template_10g_fiber_hotplug = { | ||
2339 | .ops = &phy_ops_10g_fiber_hotplug, | ||
2340 | .phy_addr_base = 8, | ||
2341 | }; | ||
2342 | |||
2343 | static const struct niu_phy_template phy_template_niu_10g_hotplug = { | ||
2344 | .ops = &phy_ops_niu_10g_hotplug, | ||
2345 | .phy_addr_base = 8, | ||
2346 | }; | ||
2347 | |||
2348 | static const struct niu_phy_template phy_template_10g_copper = { | ||
2349 | .ops = &phy_ops_10g_copper, | ||
2350 | .phy_addr_base = 10, | ||
2351 | }; | ||
2352 | |||
2353 | static const struct niu_phy_template phy_template_1g_fiber = { | ||
2354 | .ops = &phy_ops_1g_fiber, | ||
2355 | .phy_addr_base = 0, | ||
2356 | }; | ||
2357 | |||
2358 | static const struct niu_phy_template phy_template_1g_copper = { | ||
2359 | .ops = &phy_ops_1g_copper, | ||
2360 | .phy_addr_base = 0, | ||
2361 | }; | ||
2362 | |||
2363 | static const struct niu_phy_template phy_template_1g_rgmii = { | ||
2364 | .ops = &phy_ops_1g_rgmii, | ||
2365 | .phy_addr_base = 0, | ||
2366 | }; | ||
2367 | |||
2368 | static const struct niu_phy_template phy_template_10g_serdes = { | ||
2369 | .ops = &phy_ops_10g_serdes, | ||
2370 | .phy_addr_base = 0, | ||
2371 | }; | ||
2372 | |||
2373 | static int niu_atca_port_num[4] = { | ||
2374 | 0, 0, 11, 10 | ||
2375 | }; | ||
2376 | |||
2377 | static int serdes_init_10g_serdes(struct niu *np) | ||
2378 | { | ||
2379 | struct niu_link_config *lp = &np->link_config; | ||
2380 | unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; | ||
2381 | u64 ctrl_val, test_cfg_val, sig, mask, val; | ||
2382 | |||
2383 | switch (np->port) { | ||
2384 | case 0: | ||
2385 | ctrl_reg = ENET_SERDES_0_CTRL_CFG; | ||
2386 | test_cfg_reg = ENET_SERDES_0_TEST_CFG; | ||
2387 | pll_cfg = ENET_SERDES_0_PLL_CFG; | ||
2388 | break; | ||
2389 | case 1: | ||
2390 | ctrl_reg = ENET_SERDES_1_CTRL_CFG; | ||
2391 | test_cfg_reg = ENET_SERDES_1_TEST_CFG; | ||
2392 | pll_cfg = ENET_SERDES_1_PLL_CFG; | ||
2393 | break; | ||
2394 | |||
2395 | default: | ||
2396 | return -EINVAL; | ||
2397 | } | ||
2398 | ctrl_val = (ENET_SERDES_CTRL_SDET_0 | | ||
2399 | ENET_SERDES_CTRL_SDET_1 | | ||
2400 | ENET_SERDES_CTRL_SDET_2 | | ||
2401 | ENET_SERDES_CTRL_SDET_3 | | ||
2402 | (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | | ||
2403 | (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | | ||
2404 | (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | | ||
2405 | (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | | ||
2406 | (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | | ||
2407 | (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | | ||
2408 | (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | | ||
2409 | (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); | ||
2410 | test_cfg_val = 0; | ||
2411 | |||
2412 | if (lp->loopback_mode == LOOPBACK_PHY) { | ||
2413 | test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << | ||
2414 | ENET_SERDES_TEST_MD_0_SHIFT) | | ||
2415 | (ENET_TEST_MD_PAD_LOOPBACK << | ||
2416 | ENET_SERDES_TEST_MD_1_SHIFT) | | ||
2417 | (ENET_TEST_MD_PAD_LOOPBACK << | ||
2418 | ENET_SERDES_TEST_MD_2_SHIFT) | | ||
2419 | (ENET_TEST_MD_PAD_LOOPBACK << | ||
2420 | ENET_SERDES_TEST_MD_3_SHIFT)); | ||
2421 | } | ||
2422 | |||
2423 | esr_reset(np); | ||
2424 | nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2); | ||
2425 | nw64(ctrl_reg, ctrl_val); | ||
2426 | nw64(test_cfg_reg, test_cfg_val); | ||
2427 | |||
2428 | /* Initialize all 4 lanes of the SERDES. */ | ||
2429 | for (i = 0; i < 4; i++) { | ||
2430 | u32 rxtx_ctrl, glue0; | ||
2431 | int err; | ||
2432 | |||
2433 | err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); | ||
2434 | if (err) | ||
2435 | return err; | ||
2436 | err = esr_read_glue0(np, i, &glue0); | ||
2437 | if (err) | ||
2438 | return err; | ||
2439 | |||
2440 | rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); | ||
2441 | rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | | ||
2442 | (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); | ||
2443 | |||
2444 | glue0 &= ~(ESR_GLUE_CTRL0_SRATE | | ||
2445 | ESR_GLUE_CTRL0_THCNT | | ||
2446 | ESR_GLUE_CTRL0_BLTIME); | ||
2447 | glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | | ||
2448 | (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | | ||
2449 | (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | | ||
2450 | (BLTIME_300_CYCLES << | ||
2451 | ESR_GLUE_CTRL0_BLTIME_SHIFT)); | ||
2452 | |||
2453 | err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); | ||
2454 | if (err) | ||
2455 | return err; | ||
2456 | err = esr_write_glue0(np, i, glue0); | ||
2457 | if (err) | ||
2458 | return err; | ||
2459 | } | ||
2460 | |||
2461 | |||
2462 | sig = nr64(ESR_INT_SIGNALS); | ||
2463 | switch (np->port) { | ||
2464 | case 0: | ||
2465 | mask = ESR_INT_SIGNALS_P0_BITS; | ||
2466 | val = (ESR_INT_SRDY0_P0 | | ||
2467 | ESR_INT_DET0_P0 | | ||
2468 | ESR_INT_XSRDY_P0 | | ||
2469 | ESR_INT_XDP_P0_CH3 | | ||
2470 | ESR_INT_XDP_P0_CH2 | | ||
2471 | ESR_INT_XDP_P0_CH1 | | ||
2472 | ESR_INT_XDP_P0_CH0); | ||
2473 | break; | ||
2474 | |||
2475 | case 1: | ||
2476 | mask = ESR_INT_SIGNALS_P1_BITS; | ||
2477 | val = (ESR_INT_SRDY0_P1 | | ||
2478 | ESR_INT_DET0_P1 | | ||
2479 | ESR_INT_XSRDY_P1 | | ||
2480 | ESR_INT_XDP_P1_CH3 | | ||
2481 | ESR_INT_XDP_P1_CH2 | | ||
2482 | ESR_INT_XDP_P1_CH1 | | ||
2483 | ESR_INT_XDP_P1_CH0); | ||
2484 | break; | ||
2485 | |||
2486 | default: | ||
2487 | return -EINVAL; | ||
2488 | } | ||
2489 | |||
2490 | if ((sig & mask) != val) { | ||
2491 | int err; | ||
2492 | err = serdes_init_1g_serdes(np); | ||
2493 | if (!err) { | ||
2494 | np->flags &= ~NIU_FLAGS_10G; | ||
2495 | np->mac_xcvr = MAC_XCVR_PCS; | ||
2496 | } else { | ||
2497 | netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", | ||
2498 | np->port); | ||
2499 | return -ENODEV; | ||
2500 | } | ||
2501 | } | ||
2502 | |||
2503 | return 0; | ||
2504 | } | ||
2505 | |||
2506 | static int niu_determine_phy_disposition(struct niu *np) | ||
2507 | { | ||
2508 | struct niu_parent *parent = np->parent; | ||
2509 | u8 plat_type = parent->plat_type; | ||
2510 | const struct niu_phy_template *tp; | ||
2511 | u32 phy_addr_off = 0; | ||
2512 | |||
2513 | if (plat_type == PLAT_TYPE_NIU) { | ||
2514 | switch (np->flags & | ||
2515 | (NIU_FLAGS_10G | | ||
2516 | NIU_FLAGS_FIBER | | ||
2517 | NIU_FLAGS_XCVR_SERDES)) { | ||
2518 | case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: | ||
2519 | /* 10G Serdes */ | ||
2520 | tp = &phy_template_niu_10g_serdes; | ||
2521 | break; | ||
2522 | case NIU_FLAGS_XCVR_SERDES: | ||
2523 | /* 1G Serdes */ | ||
2524 | tp = &phy_template_niu_1g_serdes; | ||
2525 | break; | ||
2526 | case NIU_FLAGS_10G | NIU_FLAGS_FIBER: | ||
2527 | /* 10G Fiber */ | ||
2528 | default: | ||
2529 | if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { | ||
2530 | tp = &phy_template_niu_10g_hotplug; | ||
2531 | if (np->port == 0) | ||
2532 | phy_addr_off = 8; | ||
2533 | if (np->port == 1) | ||
2534 | phy_addr_off = 12; | ||
2535 | } else { | ||
2536 | tp = &phy_template_niu_10g_fiber; | ||
2537 | phy_addr_off += np->port; | ||
2538 | } | ||
2539 | break; | ||
2540 | } | ||
2541 | } else { | ||
2542 | switch (np->flags & | ||
2543 | (NIU_FLAGS_10G | | ||
2544 | NIU_FLAGS_FIBER | | ||
2545 | NIU_FLAGS_XCVR_SERDES)) { | ||
2546 | case 0: | ||
2547 | /* 1G copper */ | ||
2548 | tp = &phy_template_1g_copper; | ||
2549 | if (plat_type == PLAT_TYPE_VF_P0) | ||
2550 | phy_addr_off = 10; | ||
2551 | else if (plat_type == PLAT_TYPE_VF_P1) | ||
2552 | phy_addr_off = 26; | ||
2553 | |||
2554 | phy_addr_off += (np->port ^ 0x3); | ||
2555 | break; | ||
2556 | |||
2557 | case NIU_FLAGS_10G: | ||
2558 | /* 10G copper */ | ||
2559 | tp = &phy_template_10g_copper; | ||
2560 | break; | ||
2561 | |||
2562 | case NIU_FLAGS_FIBER: | ||
2563 | /* 1G fiber */ | ||
2564 | tp = &phy_template_1g_fiber; | ||
2565 | break; | ||
2566 | |||
2567 | case NIU_FLAGS_10G | NIU_FLAGS_FIBER: | ||
2568 | /* 10G fiber */ | ||
2569 | tp = &phy_template_10g_fiber; | ||
2570 | if (plat_type == PLAT_TYPE_VF_P0 || | ||
2571 | plat_type == PLAT_TYPE_VF_P1) | ||
2572 | phy_addr_off = 8; | ||
2573 | phy_addr_off += np->port; | ||
2574 | if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { | ||
2575 | tp = &phy_template_10g_fiber_hotplug; | ||
2576 | if (np->port == 0) | ||
2577 | phy_addr_off = 8; | ||
2578 | if (np->port == 1) | ||
2579 | phy_addr_off = 12; | ||
2580 | } | ||
2581 | break; | ||
2582 | |||
2583 | case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: | ||
2584 | case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: | ||
2585 | case NIU_FLAGS_XCVR_SERDES: | ||
2586 | switch(np->port) { | ||
2587 | case 0: | ||
2588 | case 1: | ||
2589 | tp = &phy_template_10g_serdes; | ||
2590 | break; | ||
2591 | case 2: | ||
2592 | case 3: | ||
2593 | tp = &phy_template_1g_rgmii; | ||
2594 | break; | ||
2595 | default: | ||
2596 | return -EINVAL; | ||
2597 | break; | ||
2598 | } | ||
2599 | phy_addr_off = niu_atca_port_num[np->port]; | ||
2600 | break; | ||
2601 | |||
2602 | default: | ||
2603 | return -EINVAL; | ||
2604 | } | ||
2605 | } | ||
2606 | |||
2607 | np->phy_ops = tp->ops; | ||
2608 | np->phy_addr = tp->phy_addr_base + phy_addr_off; | ||
2609 | |||
2610 | return 0; | ||
2611 | } | ||
2612 | |||
2613 | static int niu_init_link(struct niu *np) | ||
2614 | { | ||
2615 | struct niu_parent *parent = np->parent; | ||
2616 | int err, ignore; | ||
2617 | |||
2618 | if (parent->plat_type == PLAT_TYPE_NIU) { | ||
2619 | err = niu_xcvr_init(np); | ||
2620 | if (err) | ||
2621 | return err; | ||
2622 | msleep(200); | ||
2623 | } | ||
2624 | err = niu_serdes_init(np); | ||
2625 | if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY)) | ||
2626 | return err; | ||
2627 | msleep(200); | ||
2628 | err = niu_xcvr_init(np); | ||
2629 | if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY)) | ||
2630 | niu_link_status(np, &ignore); | ||
2631 | return 0; | ||
2632 | } | ||
2633 | |||
2634 | static void niu_set_primary_mac(struct niu *np, unsigned char *addr) | ||
2635 | { | ||
2636 | u16 reg0 = addr[4] << 8 | addr[5]; | ||
2637 | u16 reg1 = addr[2] << 8 | addr[3]; | ||
2638 | u16 reg2 = addr[0] << 8 | addr[1]; | ||
2639 | |||
2640 | if (np->flags & NIU_FLAGS_XMAC) { | ||
2641 | nw64_mac(XMAC_ADDR0, reg0); | ||
2642 | nw64_mac(XMAC_ADDR1, reg1); | ||
2643 | nw64_mac(XMAC_ADDR2, reg2); | ||
2644 | } else { | ||
2645 | nw64_mac(BMAC_ADDR0, reg0); | ||
2646 | nw64_mac(BMAC_ADDR1, reg1); | ||
2647 | nw64_mac(BMAC_ADDR2, reg2); | ||
2648 | } | ||
2649 | } | ||
2650 | |||
2651 | static int niu_num_alt_addr(struct niu *np) | ||
2652 | { | ||
2653 | if (np->flags & NIU_FLAGS_XMAC) | ||
2654 | return XMAC_NUM_ALT_ADDR; | ||
2655 | else | ||
2656 | return BMAC_NUM_ALT_ADDR; | ||
2657 | } | ||
2658 | |||
2659 | static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr) | ||
2660 | { | ||
2661 | u16 reg0 = addr[4] << 8 | addr[5]; | ||
2662 | u16 reg1 = addr[2] << 8 | addr[3]; | ||
2663 | u16 reg2 = addr[0] << 8 | addr[1]; | ||
2664 | |||
2665 | if (index >= niu_num_alt_addr(np)) | ||
2666 | return -EINVAL; | ||
2667 | |||
2668 | if (np->flags & NIU_FLAGS_XMAC) { | ||
2669 | nw64_mac(XMAC_ALT_ADDR0(index), reg0); | ||
2670 | nw64_mac(XMAC_ALT_ADDR1(index), reg1); | ||
2671 | nw64_mac(XMAC_ALT_ADDR2(index), reg2); | ||
2672 | } else { | ||
2673 | nw64_mac(BMAC_ALT_ADDR0(index), reg0); | ||
2674 | nw64_mac(BMAC_ALT_ADDR1(index), reg1); | ||
2675 | nw64_mac(BMAC_ALT_ADDR2(index), reg2); | ||
2676 | } | ||
2677 | |||
2678 | return 0; | ||
2679 | } | ||
2680 | |||
2681 | static int niu_enable_alt_mac(struct niu *np, int index, int on) | ||
2682 | { | ||
2683 | unsigned long reg; | ||
2684 | u64 val, mask; | ||
2685 | |||
2686 | if (index >= niu_num_alt_addr(np)) | ||
2687 | return -EINVAL; | ||
2688 | |||
2689 | if (np->flags & NIU_FLAGS_XMAC) { | ||
2690 | reg = XMAC_ADDR_CMPEN; | ||
2691 | mask = 1 << index; | ||
2692 | } else { | ||
2693 | reg = BMAC_ADDR_CMPEN; | ||
2694 | mask = 1 << (index + 1); | ||
2695 | } | ||
2696 | |||
2697 | val = nr64_mac(reg); | ||
2698 | if (on) | ||
2699 | val |= mask; | ||
2700 | else | ||
2701 | val &= ~mask; | ||
2702 | nw64_mac(reg, val); | ||
2703 | |||
2704 | return 0; | ||
2705 | } | ||
2706 | |||
2707 | static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg, | ||
2708 | int num, int mac_pref) | ||
2709 | { | ||
2710 | u64 val = nr64_mac(reg); | ||
2711 | val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR); | ||
2712 | val |= num; | ||
2713 | if (mac_pref) | ||
2714 | val |= HOST_INFO_MPR; | ||
2715 | nw64_mac(reg, val); | ||
2716 | } | ||
2717 | |||
2718 | static int __set_rdc_table_num(struct niu *np, | ||
2719 | int xmac_index, int bmac_index, | ||
2720 | int rdc_table_num, int mac_pref) | ||
2721 | { | ||
2722 | unsigned long reg; | ||
2723 | |||
2724 | if (rdc_table_num & ~HOST_INFO_MACRDCTBLN) | ||
2725 | return -EINVAL; | ||
2726 | if (np->flags & NIU_FLAGS_XMAC) | ||
2727 | reg = XMAC_HOST_INFO(xmac_index); | ||
2728 | else | ||
2729 | reg = BMAC_HOST_INFO(bmac_index); | ||
2730 | __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref); | ||
2731 | return 0; | ||
2732 | } | ||
2733 | |||
2734 | static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num, | ||
2735 | int mac_pref) | ||
2736 | { | ||
2737 | return __set_rdc_table_num(np, 17, 0, table_num, mac_pref); | ||
2738 | } | ||
2739 | |||
2740 | static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num, | ||
2741 | int mac_pref) | ||
2742 | { | ||
2743 | return __set_rdc_table_num(np, 16, 8, table_num, mac_pref); | ||
2744 | } | ||
2745 | |||
2746 | static int niu_set_alt_mac_rdc_table(struct niu *np, int idx, | ||
2747 | int table_num, int mac_pref) | ||
2748 | { | ||
2749 | if (idx >= niu_num_alt_addr(np)) | ||
2750 | return -EINVAL; | ||
2751 | return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref); | ||
2752 | } | ||
2753 | |||
2754 | static u64 vlan_entry_set_parity(u64 reg_val) | ||
2755 | { | ||
2756 | u64 port01_mask; | ||
2757 | u64 port23_mask; | ||
2758 | |||
2759 | port01_mask = 0x00ff; | ||
2760 | port23_mask = 0xff00; | ||
2761 | |||
2762 | if (hweight64(reg_val & port01_mask) & 1) | ||
2763 | reg_val |= ENET_VLAN_TBL_PARITY0; | ||
2764 | else | ||
2765 | reg_val &= ~ENET_VLAN_TBL_PARITY0; | ||
2766 | |||
2767 | if (hweight64(reg_val & port23_mask) & 1) | ||
2768 | reg_val |= ENET_VLAN_TBL_PARITY1; | ||
2769 | else | ||
2770 | reg_val &= ~ENET_VLAN_TBL_PARITY1; | ||
2771 | |||
2772 | return reg_val; | ||
2773 | } | ||
2774 | |||
2775 | static void vlan_tbl_write(struct niu *np, unsigned long index, | ||
2776 | int port, int vpr, int rdc_table) | ||
2777 | { | ||
2778 | u64 reg_val = nr64(ENET_VLAN_TBL(index)); | ||
2779 | |||
2780 | reg_val &= ~((ENET_VLAN_TBL_VPR | | ||
2781 | ENET_VLAN_TBL_VLANRDCTBLN) << | ||
2782 | ENET_VLAN_TBL_SHIFT(port)); | ||
2783 | if (vpr) | ||
2784 | reg_val |= (ENET_VLAN_TBL_VPR << | ||
2785 | ENET_VLAN_TBL_SHIFT(port)); | ||
2786 | reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port)); | ||
2787 | |||
2788 | reg_val = vlan_entry_set_parity(reg_val); | ||
2789 | |||
2790 | nw64(ENET_VLAN_TBL(index), reg_val); | ||
2791 | } | ||
2792 | |||
2793 | static void vlan_tbl_clear(struct niu *np) | ||
2794 | { | ||
2795 | int i; | ||
2796 | |||
2797 | for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) | ||
2798 | nw64(ENET_VLAN_TBL(i), 0); | ||
2799 | } | ||
2800 | |||
2801 | static int tcam_wait_bit(struct niu *np, u64 bit) | ||
2802 | { | ||
2803 | int limit = 1000; | ||
2804 | |||
2805 | while (--limit > 0) { | ||
2806 | if (nr64(TCAM_CTL) & bit) | ||
2807 | break; | ||
2808 | udelay(1); | ||
2809 | } | ||
2810 | if (limit <= 0) | ||
2811 | return -ENODEV; | ||
2812 | |||
2813 | return 0; | ||
2814 | } | ||
2815 | |||
2816 | static int tcam_flush(struct niu *np, int index) | ||
2817 | { | ||
2818 | nw64(TCAM_KEY_0, 0x00); | ||
2819 | nw64(TCAM_KEY_MASK_0, 0xff); | ||
2820 | nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); | ||
2821 | |||
2822 | return tcam_wait_bit(np, TCAM_CTL_STAT); | ||
2823 | } | ||
2824 | |||
2825 | #if 0 | ||
2826 | static int tcam_read(struct niu *np, int index, | ||
2827 | u64 *key, u64 *mask) | ||
2828 | { | ||
2829 | int err; | ||
2830 | |||
2831 | nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index)); | ||
2832 | err = tcam_wait_bit(np, TCAM_CTL_STAT); | ||
2833 | if (!err) { | ||
2834 | key[0] = nr64(TCAM_KEY_0); | ||
2835 | key[1] = nr64(TCAM_KEY_1); | ||
2836 | key[2] = nr64(TCAM_KEY_2); | ||
2837 | key[3] = nr64(TCAM_KEY_3); | ||
2838 | mask[0] = nr64(TCAM_KEY_MASK_0); | ||
2839 | mask[1] = nr64(TCAM_KEY_MASK_1); | ||
2840 | mask[2] = nr64(TCAM_KEY_MASK_2); | ||
2841 | mask[3] = nr64(TCAM_KEY_MASK_3); | ||
2842 | } | ||
2843 | return err; | ||
2844 | } | ||
2845 | #endif | ||
2846 | |||
2847 | static int tcam_write(struct niu *np, int index, | ||
2848 | u64 *key, u64 *mask) | ||
2849 | { | ||
2850 | nw64(TCAM_KEY_0, key[0]); | ||
2851 | nw64(TCAM_KEY_1, key[1]); | ||
2852 | nw64(TCAM_KEY_2, key[2]); | ||
2853 | nw64(TCAM_KEY_3, key[3]); | ||
2854 | nw64(TCAM_KEY_MASK_0, mask[0]); | ||
2855 | nw64(TCAM_KEY_MASK_1, mask[1]); | ||
2856 | nw64(TCAM_KEY_MASK_2, mask[2]); | ||
2857 | nw64(TCAM_KEY_MASK_3, mask[3]); | ||
2858 | nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); | ||
2859 | |||
2860 | return tcam_wait_bit(np, TCAM_CTL_STAT); | ||
2861 | } | ||
2862 | |||
2863 | #if 0 | ||
2864 | static int tcam_assoc_read(struct niu *np, int index, u64 *data) | ||
2865 | { | ||
2866 | int err; | ||
2867 | |||
2868 | nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index)); | ||
2869 | err = tcam_wait_bit(np, TCAM_CTL_STAT); | ||
2870 | if (!err) | ||
2871 | *data = nr64(TCAM_KEY_1); | ||
2872 | |||
2873 | return err; | ||
2874 | } | ||
2875 | #endif | ||
2876 | |||
2877 | static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data) | ||
2878 | { | ||
2879 | nw64(TCAM_KEY_1, assoc_data); | ||
2880 | nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index)); | ||
2881 | |||
2882 | return tcam_wait_bit(np, TCAM_CTL_STAT); | ||
2883 | } | ||
2884 | |||
2885 | static void tcam_enable(struct niu *np, int on) | ||
2886 | { | ||
2887 | u64 val = nr64(FFLP_CFG_1); | ||
2888 | |||
2889 | if (on) | ||
2890 | val &= ~FFLP_CFG_1_TCAM_DIS; | ||
2891 | else | ||
2892 | val |= FFLP_CFG_1_TCAM_DIS; | ||
2893 | nw64(FFLP_CFG_1, val); | ||
2894 | } | ||
2895 | |||
2896 | static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio) | ||
2897 | { | ||
2898 | u64 val = nr64(FFLP_CFG_1); | ||
2899 | |||
2900 | val &= ~(FFLP_CFG_1_FFLPINITDONE | | ||
2901 | FFLP_CFG_1_CAMLAT | | ||
2902 | FFLP_CFG_1_CAMRATIO); | ||
2903 | val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT); | ||
2904 | val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT); | ||
2905 | nw64(FFLP_CFG_1, val); | ||
2906 | |||
2907 | val = nr64(FFLP_CFG_1); | ||
2908 | val |= FFLP_CFG_1_FFLPINITDONE; | ||
2909 | nw64(FFLP_CFG_1, val); | ||
2910 | } | ||
2911 | |||
2912 | static int tcam_user_eth_class_enable(struct niu *np, unsigned long class, | ||
2913 | int on) | ||
2914 | { | ||
2915 | unsigned long reg; | ||
2916 | u64 val; | ||
2917 | |||
2918 | if (class < CLASS_CODE_ETHERTYPE1 || | ||
2919 | class > CLASS_CODE_ETHERTYPE2) | ||
2920 | return -EINVAL; | ||
2921 | |||
2922 | reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); | ||
2923 | val = nr64(reg); | ||
2924 | if (on) | ||
2925 | val |= L2_CLS_VLD; | ||
2926 | else | ||
2927 | val &= ~L2_CLS_VLD; | ||
2928 | nw64(reg, val); | ||
2929 | |||
2930 | return 0; | ||
2931 | } | ||
2932 | |||
2933 | #if 0 | ||
2934 | static int tcam_user_eth_class_set(struct niu *np, unsigned long class, | ||
2935 | u64 ether_type) | ||
2936 | { | ||
2937 | unsigned long reg; | ||
2938 | u64 val; | ||
2939 | |||
2940 | if (class < CLASS_CODE_ETHERTYPE1 || | ||
2941 | class > CLASS_CODE_ETHERTYPE2 || | ||
2942 | (ether_type & ~(u64)0xffff) != 0) | ||
2943 | return -EINVAL; | ||
2944 | |||
2945 | reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); | ||
2946 | val = nr64(reg); | ||
2947 | val &= ~L2_CLS_ETYPE; | ||
2948 | val |= (ether_type << L2_CLS_ETYPE_SHIFT); | ||
2949 | nw64(reg, val); | ||
2950 | |||
2951 | return 0; | ||
2952 | } | ||
2953 | #endif | ||
2954 | |||
2955 | static int tcam_user_ip_class_enable(struct niu *np, unsigned long class, | ||
2956 | int on) | ||
2957 | { | ||
2958 | unsigned long reg; | ||
2959 | u64 val; | ||
2960 | |||
2961 | if (class < CLASS_CODE_USER_PROG1 || | ||
2962 | class > CLASS_CODE_USER_PROG4) | ||
2963 | return -EINVAL; | ||
2964 | |||
2965 | reg = L3_CLS(class - CLASS_CODE_USER_PROG1); | ||
2966 | val = nr64(reg); | ||
2967 | if (on) | ||
2968 | val |= L3_CLS_VALID; | ||
2969 | else | ||
2970 | val &= ~L3_CLS_VALID; | ||
2971 | nw64(reg, val); | ||
2972 | |||
2973 | return 0; | ||
2974 | } | ||
2975 | |||
2976 | static int tcam_user_ip_class_set(struct niu *np, unsigned long class, | ||
2977 | int ipv6, u64 protocol_id, | ||
2978 | u64 tos_mask, u64 tos_val) | ||
2979 | { | ||
2980 | unsigned long reg; | ||
2981 | u64 val; | ||
2982 | |||
2983 | if (class < CLASS_CODE_USER_PROG1 || | ||
2984 | class > CLASS_CODE_USER_PROG4 || | ||
2985 | (protocol_id & ~(u64)0xff) != 0 || | ||
2986 | (tos_mask & ~(u64)0xff) != 0 || | ||
2987 | (tos_val & ~(u64)0xff) != 0) | ||
2988 | return -EINVAL; | ||
2989 | |||
2990 | reg = L3_CLS(class - CLASS_CODE_USER_PROG1); | ||
2991 | val = nr64(reg); | ||
2992 | val &= ~(L3_CLS_IPVER | L3_CLS_PID | | ||
2993 | L3_CLS_TOSMASK | L3_CLS_TOS); | ||
2994 | if (ipv6) | ||
2995 | val |= L3_CLS_IPVER; | ||
2996 | val |= (protocol_id << L3_CLS_PID_SHIFT); | ||
2997 | val |= (tos_mask << L3_CLS_TOSMASK_SHIFT); | ||
2998 | val |= (tos_val << L3_CLS_TOS_SHIFT); | ||
2999 | nw64(reg, val); | ||
3000 | |||
3001 | return 0; | ||
3002 | } | ||
3003 | |||
3004 | static int tcam_early_init(struct niu *np) | ||
3005 | { | ||
3006 | unsigned long i; | ||
3007 | int err; | ||
3008 | |||
3009 | tcam_enable(np, 0); | ||
3010 | tcam_set_lat_and_ratio(np, | ||
3011 | DEFAULT_TCAM_LATENCY, | ||
3012 | DEFAULT_TCAM_ACCESS_RATIO); | ||
3013 | for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) { | ||
3014 | err = tcam_user_eth_class_enable(np, i, 0); | ||
3015 | if (err) | ||
3016 | return err; | ||
3017 | } | ||
3018 | for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) { | ||
3019 | err = tcam_user_ip_class_enable(np, i, 0); | ||
3020 | if (err) | ||
3021 | return err; | ||
3022 | } | ||
3023 | |||
3024 | return 0; | ||
3025 | } | ||
3026 | |||
3027 | static int tcam_flush_all(struct niu *np) | ||
3028 | { | ||
3029 | unsigned long i; | ||
3030 | |||
3031 | for (i = 0; i < np->parent->tcam_num_entries; i++) { | ||
3032 | int err = tcam_flush(np, i); | ||
3033 | if (err) | ||
3034 | return err; | ||
3035 | } | ||
3036 | return 0; | ||
3037 | } | ||
3038 | |||
3039 | static u64 hash_addr_regval(unsigned long index, unsigned long num_entries) | ||
3040 | { | ||
3041 | return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0); | ||
3042 | } | ||
3043 | |||
3044 | #if 0 | ||
3045 | static int hash_read(struct niu *np, unsigned long partition, | ||
3046 | unsigned long index, unsigned long num_entries, | ||
3047 | u64 *data) | ||
3048 | { | ||
3049 | u64 val = hash_addr_regval(index, num_entries); | ||
3050 | unsigned long i; | ||
3051 | |||
3052 | if (partition >= FCRAM_NUM_PARTITIONS || | ||
3053 | index + num_entries > FCRAM_SIZE) | ||
3054 | return -EINVAL; | ||
3055 | |||
3056 | nw64(HASH_TBL_ADDR(partition), val); | ||
3057 | for (i = 0; i < num_entries; i++) | ||
3058 | data[i] = nr64(HASH_TBL_DATA(partition)); | ||
3059 | |||
3060 | return 0; | ||
3061 | } | ||
3062 | #endif | ||
3063 | |||
3064 | static int hash_write(struct niu *np, unsigned long partition, | ||
3065 | unsigned long index, unsigned long num_entries, | ||
3066 | u64 *data) | ||
3067 | { | ||
3068 | u64 val = hash_addr_regval(index, num_entries); | ||
3069 | unsigned long i; | ||
3070 | |||
3071 | if (partition >= FCRAM_NUM_PARTITIONS || | ||
3072 | index + (num_entries * 8) > FCRAM_SIZE) | ||
3073 | return -EINVAL; | ||
3074 | |||
3075 | nw64(HASH_TBL_ADDR(partition), val); | ||
3076 | for (i = 0; i < num_entries; i++) | ||
3077 | nw64(HASH_TBL_DATA(partition), data[i]); | ||
3078 | |||
3079 | return 0; | ||
3080 | } | ||
3081 | |||
3082 | static void fflp_reset(struct niu *np) | ||
3083 | { | ||
3084 | u64 val; | ||
3085 | |||
3086 | nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST); | ||
3087 | udelay(10); | ||
3088 | nw64(FFLP_CFG_1, 0); | ||
3089 | |||
3090 | val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE; | ||
3091 | nw64(FFLP_CFG_1, val); | ||
3092 | } | ||
3093 | |||
3094 | static void fflp_set_timings(struct niu *np) | ||
3095 | { | ||
3096 | u64 val = nr64(FFLP_CFG_1); | ||
3097 | |||
3098 | val &= ~FFLP_CFG_1_FFLPINITDONE; | ||
3099 | val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT); | ||
3100 | nw64(FFLP_CFG_1, val); | ||
3101 | |||
3102 | val = nr64(FFLP_CFG_1); | ||
3103 | val |= FFLP_CFG_1_FFLPINITDONE; | ||
3104 | nw64(FFLP_CFG_1, val); | ||
3105 | |||
3106 | val = nr64(FCRAM_REF_TMR); | ||
3107 | val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN); | ||
3108 | val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT); | ||
3109 | val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT); | ||
3110 | nw64(FCRAM_REF_TMR, val); | ||
3111 | } | ||
3112 | |||
3113 | static int fflp_set_partition(struct niu *np, u64 partition, | ||
3114 | u64 mask, u64 base, int enable) | ||
3115 | { | ||
3116 | unsigned long reg; | ||
3117 | u64 val; | ||
3118 | |||
3119 | if (partition >= FCRAM_NUM_PARTITIONS || | ||
3120 | (mask & ~(u64)0x1f) != 0 || | ||
3121 | (base & ~(u64)0x1f) != 0) | ||
3122 | return -EINVAL; | ||
3123 | |||
3124 | reg = FLW_PRT_SEL(partition); | ||
3125 | |||
3126 | val = nr64(reg); | ||
3127 | val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE); | ||
3128 | val |= (mask << FLW_PRT_SEL_MASK_SHIFT); | ||
3129 | val |= (base << FLW_PRT_SEL_BASE_SHIFT); | ||
3130 | if (enable) | ||
3131 | val |= FLW_PRT_SEL_EXT; | ||
3132 | nw64(reg, val); | ||
3133 | |||
3134 | return 0; | ||
3135 | } | ||
3136 | |||
3137 | static int fflp_disable_all_partitions(struct niu *np) | ||
3138 | { | ||
3139 | unsigned long i; | ||
3140 | |||
3141 | for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) { | ||
3142 | int err = fflp_set_partition(np, 0, 0, 0, 0); | ||
3143 | if (err) | ||
3144 | return err; | ||
3145 | } | ||
3146 | return 0; | ||
3147 | } | ||
3148 | |||
3149 | static void fflp_llcsnap_enable(struct niu *np, int on) | ||
3150 | { | ||
3151 | u64 val = nr64(FFLP_CFG_1); | ||
3152 | |||
3153 | if (on) | ||
3154 | val |= FFLP_CFG_1_LLCSNAP; | ||
3155 | else | ||
3156 | val &= ~FFLP_CFG_1_LLCSNAP; | ||
3157 | nw64(FFLP_CFG_1, val); | ||
3158 | } | ||
3159 | |||
3160 | static void fflp_errors_enable(struct niu *np, int on) | ||
3161 | { | ||
3162 | u64 val = nr64(FFLP_CFG_1); | ||
3163 | |||
3164 | if (on) | ||
3165 | val &= ~FFLP_CFG_1_ERRORDIS; | ||
3166 | else | ||
3167 | val |= FFLP_CFG_1_ERRORDIS; | ||
3168 | nw64(FFLP_CFG_1, val); | ||
3169 | } | ||
3170 | |||
3171 | static int fflp_hash_clear(struct niu *np) | ||
3172 | { | ||
3173 | struct fcram_hash_ipv4 ent; | ||
3174 | unsigned long i; | ||
3175 | |||
3176 | /* IPV4 hash entry with valid bit clear, rest is don't care. */ | ||
3177 | memset(&ent, 0, sizeof(ent)); | ||
3178 | ent.header = HASH_HEADER_EXT; | ||
3179 | |||
3180 | for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) { | ||
3181 | int err = hash_write(np, 0, i, 1, (u64 *) &ent); | ||
3182 | if (err) | ||
3183 | return err; | ||
3184 | } | ||
3185 | return 0; | ||
3186 | } | ||
3187 | |||
3188 | static int fflp_early_init(struct niu *np) | ||
3189 | { | ||
3190 | struct niu_parent *parent; | ||
3191 | unsigned long flags; | ||
3192 | int err; | ||
3193 | |||
3194 | niu_lock_parent(np, flags); | ||
3195 | |||
3196 | parent = np->parent; | ||
3197 | err = 0; | ||
3198 | if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) { | ||
3199 | if (np->parent->plat_type != PLAT_TYPE_NIU) { | ||
3200 | fflp_reset(np); | ||
3201 | fflp_set_timings(np); | ||
3202 | err = fflp_disable_all_partitions(np); | ||
3203 | if (err) { | ||
3204 | netif_printk(np, probe, KERN_DEBUG, np->dev, | ||
3205 | "fflp_disable_all_partitions failed, err=%d\n", | ||
3206 | err); | ||
3207 | goto out; | ||
3208 | } | ||
3209 | } | ||
3210 | |||
3211 | err = tcam_early_init(np); | ||
3212 | if (err) { | ||
3213 | netif_printk(np, probe, KERN_DEBUG, np->dev, | ||
3214 | "tcam_early_init failed, err=%d\n", err); | ||
3215 | goto out; | ||
3216 | } | ||
3217 | fflp_llcsnap_enable(np, 1); | ||
3218 | fflp_errors_enable(np, 0); | ||
3219 | nw64(H1POLY, 0); | ||
3220 | nw64(H2POLY, 0); | ||
3221 | |||
3222 | err = tcam_flush_all(np); | ||
3223 | if (err) { | ||
3224 | netif_printk(np, probe, KERN_DEBUG, np->dev, | ||
3225 | "tcam_flush_all failed, err=%d\n", err); | ||
3226 | goto out; | ||
3227 | } | ||
3228 | if (np->parent->plat_type != PLAT_TYPE_NIU) { | ||
3229 | err = fflp_hash_clear(np); | ||
3230 | if (err) { | ||
3231 | netif_printk(np, probe, KERN_DEBUG, np->dev, | ||
3232 | "fflp_hash_clear failed, err=%d\n", | ||
3233 | err); | ||
3234 | goto out; | ||
3235 | } | ||
3236 | } | ||
3237 | |||
3238 | vlan_tbl_clear(np); | ||
3239 | |||
3240 | parent->flags |= PARENT_FLGS_CLS_HWINIT; | ||
3241 | } | ||
3242 | out: | ||
3243 | niu_unlock_parent(np, flags); | ||
3244 | return err; | ||
3245 | } | ||
3246 | |||
3247 | static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key) | ||
3248 | { | ||
3249 | if (class_code < CLASS_CODE_USER_PROG1 || | ||
3250 | class_code > CLASS_CODE_SCTP_IPV6) | ||
3251 | return -EINVAL; | ||
3252 | |||
3253 | nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key); | ||
3254 | return 0; | ||
3255 | } | ||
3256 | |||
3257 | static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key) | ||
3258 | { | ||
3259 | if (class_code < CLASS_CODE_USER_PROG1 || | ||
3260 | class_code > CLASS_CODE_SCTP_IPV6) | ||
3261 | return -EINVAL; | ||
3262 | |||
3263 | nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key); | ||
3264 | return 0; | ||
3265 | } | ||
3266 | |||
3267 | /* Entries for the ports are interleaved in the TCAM */ | ||
3268 | static u16 tcam_get_index(struct niu *np, u16 idx) | ||
3269 | { | ||
3270 | /* One entry reserved for IP fragment rule */ | ||
3271 | if (idx >= (np->clas.tcam_sz - 1)) | ||
3272 | idx = 0; | ||
3273 | return np->clas.tcam_top + ((idx+1) * np->parent->num_ports); | ||
3274 | } | ||
3275 | |||
3276 | static u16 tcam_get_size(struct niu *np) | ||
3277 | { | ||
3278 | /* One entry reserved for IP fragment rule */ | ||
3279 | return np->clas.tcam_sz - 1; | ||
3280 | } | ||
3281 | |||
3282 | static u16 tcam_get_valid_entry_cnt(struct niu *np) | ||
3283 | { | ||
3284 | /* One entry reserved for IP fragment rule */ | ||
3285 | return np->clas.tcam_valid_entries - 1; | ||
3286 | } | ||
3287 | |||
3288 | static void niu_rx_skb_append(struct sk_buff *skb, struct page *page, | ||
3289 | u32 offset, u32 size) | ||
3290 | { | ||
3291 | int i = skb_shinfo(skb)->nr_frags; | ||
3292 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
3293 | |||
3294 | frag->page = page; | ||
3295 | frag->page_offset = offset; | ||
3296 | frag->size = size; | ||
3297 | |||
3298 | skb->len += size; | ||
3299 | skb->data_len += size; | ||
3300 | skb->truesize += size; | ||
3301 | |||
3302 | skb_shinfo(skb)->nr_frags = i + 1; | ||
3303 | } | ||
3304 | |||
3305 | static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a) | ||
3306 | { | ||
3307 | a >>= PAGE_SHIFT; | ||
3308 | a ^= (a >> ilog2(MAX_RBR_RING_SIZE)); | ||
3309 | |||
3310 | return a & (MAX_RBR_RING_SIZE - 1); | ||
3311 | } | ||
3312 | |||
3313 | static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, | ||
3314 | struct page ***link) | ||
3315 | { | ||
3316 | unsigned int h = niu_hash_rxaddr(rp, addr); | ||
3317 | struct page *p, **pp; | ||
3318 | |||
3319 | addr &= PAGE_MASK; | ||
3320 | pp = &rp->rxhash[h]; | ||
3321 | for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) { | ||
3322 | if (p->index == addr) { | ||
3323 | *link = pp; | ||
3324 | goto found; | ||
3325 | } | ||
3326 | } | ||
3327 | BUG(); | ||
3328 | |||
3329 | found: | ||
3330 | return p; | ||
3331 | } | ||
3332 | |||
3333 | static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base) | ||
3334 | { | ||
3335 | unsigned int h = niu_hash_rxaddr(rp, base); | ||
3336 | |||
3337 | page->index = base; | ||
3338 | page->mapping = (struct address_space *) rp->rxhash[h]; | ||
3339 | rp->rxhash[h] = page; | ||
3340 | } | ||
3341 | |||
3342 | static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, | ||
3343 | gfp_t mask, int start_index) | ||
3344 | { | ||
3345 | struct page *page; | ||
3346 | u64 addr; | ||
3347 | int i; | ||
3348 | |||
3349 | page = alloc_page(mask); | ||
3350 | if (!page) | ||
3351 | return -ENOMEM; | ||
3352 | |||
3353 | addr = np->ops->map_page(np->device, page, 0, | ||
3354 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
3355 | |||
3356 | niu_hash_page(rp, page, addr); | ||
3357 | if (rp->rbr_blocks_per_page > 1) | ||
3358 | atomic_add(rp->rbr_blocks_per_page - 1, | ||
3359 | &compound_head(page)->_count); | ||
3360 | |||
3361 | for (i = 0; i < rp->rbr_blocks_per_page; i++) { | ||
3362 | __le32 *rbr = &rp->rbr[start_index + i]; | ||
3363 | |||
3364 | *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT); | ||
3365 | addr += rp->rbr_block_size; | ||
3366 | } | ||
3367 | |||
3368 | return 0; | ||
3369 | } | ||
3370 | |||
3371 | static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) | ||
3372 | { | ||
3373 | int index = rp->rbr_index; | ||
3374 | |||
3375 | rp->rbr_pending++; | ||
3376 | if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) { | ||
3377 | int err = niu_rbr_add_page(np, rp, mask, index); | ||
3378 | |||
3379 | if (unlikely(err)) { | ||
3380 | rp->rbr_pending--; | ||
3381 | return; | ||
3382 | } | ||
3383 | |||
3384 | rp->rbr_index += rp->rbr_blocks_per_page; | ||
3385 | BUG_ON(rp->rbr_index > rp->rbr_table_size); | ||
3386 | if (rp->rbr_index == rp->rbr_table_size) | ||
3387 | rp->rbr_index = 0; | ||
3388 | |||
3389 | if (rp->rbr_pending >= rp->rbr_kick_thresh) { | ||
3390 | nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending); | ||
3391 | rp->rbr_pending = 0; | ||
3392 | } | ||
3393 | } | ||
3394 | } | ||
3395 | |||
3396 | static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp) | ||
3397 | { | ||
3398 | unsigned int index = rp->rcr_index; | ||
3399 | int num_rcr = 0; | ||
3400 | |||
3401 | rp->rx_dropped++; | ||
3402 | while (1) { | ||
3403 | struct page *page, **link; | ||
3404 | u64 addr, val; | ||
3405 | u32 rcr_size; | ||
3406 | |||
3407 | num_rcr++; | ||
3408 | |||
3409 | val = le64_to_cpup(&rp->rcr[index]); | ||
3410 | addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << | ||
3411 | RCR_ENTRY_PKT_BUF_ADDR_SHIFT; | ||
3412 | page = niu_find_rxpage(rp, addr, &link); | ||
3413 | |||
3414 | rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> | ||
3415 | RCR_ENTRY_PKTBUFSZ_SHIFT]; | ||
3416 | if ((page->index + PAGE_SIZE) - rcr_size == addr) { | ||
3417 | *link = (struct page *) page->mapping; | ||
3418 | np->ops->unmap_page(np->device, page->index, | ||
3419 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
3420 | page->index = 0; | ||
3421 | page->mapping = NULL; | ||
3422 | __free_page(page); | ||
3423 | rp->rbr_refill_pending++; | ||
3424 | } | ||
3425 | |||
3426 | index = NEXT_RCR(rp, index); | ||
3427 | if (!(val & RCR_ENTRY_MULTI)) | ||
3428 | break; | ||
3429 | |||
3430 | } | ||
3431 | rp->rcr_index = index; | ||
3432 | |||
3433 | return num_rcr; | ||
3434 | } | ||
3435 | |||
3436 | static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, | ||
3437 | struct rx_ring_info *rp) | ||
3438 | { | ||
3439 | unsigned int index = rp->rcr_index; | ||
3440 | struct rx_pkt_hdr1 *rh; | ||
3441 | struct sk_buff *skb; | ||
3442 | int len, num_rcr; | ||
3443 | |||
3444 | skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE); | ||
3445 | if (unlikely(!skb)) | ||
3446 | return niu_rx_pkt_ignore(np, rp); | ||
3447 | |||
3448 | num_rcr = 0; | ||
3449 | while (1) { | ||
3450 | struct page *page, **link; | ||
3451 | u32 rcr_size, append_size; | ||
3452 | u64 addr, val, off; | ||
3453 | |||
3454 | num_rcr++; | ||
3455 | |||
3456 | val = le64_to_cpup(&rp->rcr[index]); | ||
3457 | |||
3458 | len = (val & RCR_ENTRY_L2_LEN) >> | ||
3459 | RCR_ENTRY_L2_LEN_SHIFT; | ||
3460 | len -= ETH_FCS_LEN; | ||
3461 | |||
3462 | addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << | ||
3463 | RCR_ENTRY_PKT_BUF_ADDR_SHIFT; | ||
3464 | page = niu_find_rxpage(rp, addr, &link); | ||
3465 | |||
3466 | rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> | ||
3467 | RCR_ENTRY_PKTBUFSZ_SHIFT]; | ||
3468 | |||
3469 | off = addr & ~PAGE_MASK; | ||
3470 | append_size = rcr_size; | ||
3471 | if (num_rcr == 1) { | ||
3472 | int ptype; | ||
3473 | |||
3474 | ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT); | ||
3475 | if ((ptype == RCR_PKT_TYPE_TCP || | ||
3476 | ptype == RCR_PKT_TYPE_UDP) && | ||
3477 | !(val & (RCR_ENTRY_NOPORT | | ||
3478 | RCR_ENTRY_ERROR))) | ||
3479 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
3480 | else | ||
3481 | skb_checksum_none_assert(skb); | ||
3482 | } else if (!(val & RCR_ENTRY_MULTI)) | ||
3483 | append_size = len - skb->len; | ||
3484 | |||
3485 | niu_rx_skb_append(skb, page, off, append_size); | ||
3486 | if ((page->index + rp->rbr_block_size) - rcr_size == addr) { | ||
3487 | *link = (struct page *) page->mapping; | ||
3488 | np->ops->unmap_page(np->device, page->index, | ||
3489 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
3490 | page->index = 0; | ||
3491 | page->mapping = NULL; | ||
3492 | rp->rbr_refill_pending++; | ||
3493 | } else | ||
3494 | get_page(page); | ||
3495 | |||
3496 | index = NEXT_RCR(rp, index); | ||
3497 | if (!(val & RCR_ENTRY_MULTI)) | ||
3498 | break; | ||
3499 | |||
3500 | } | ||
3501 | rp->rcr_index = index; | ||
3502 | |||
3503 | len += sizeof(*rh); | ||
3504 | len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN); | ||
3505 | __pskb_pull_tail(skb, len); | ||
3506 | |||
3507 | rh = (struct rx_pkt_hdr1 *) skb->data; | ||
3508 | if (np->dev->features & NETIF_F_RXHASH) | ||
3509 | skb->rxhash = ((u32)rh->hashval2_0 << 24 | | ||
3510 | (u32)rh->hashval2_1 << 16 | | ||
3511 | (u32)rh->hashval1_1 << 8 | | ||
3512 | (u32)rh->hashval1_2 << 0); | ||
3513 | skb_pull(skb, sizeof(*rh)); | ||
3514 | |||
3515 | rp->rx_packets++; | ||
3516 | rp->rx_bytes += skb->len; | ||
3517 | |||
3518 | skb->protocol = eth_type_trans(skb, np->dev); | ||
3519 | skb_record_rx_queue(skb, rp->rx_channel); | ||
3520 | napi_gro_receive(napi, skb); | ||
3521 | |||
3522 | return num_rcr; | ||
3523 | } | ||
3524 | |||
3525 | static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) | ||
3526 | { | ||
3527 | int blocks_per_page = rp->rbr_blocks_per_page; | ||
3528 | int err, index = rp->rbr_index; | ||
3529 | |||
3530 | err = 0; | ||
3531 | while (index < (rp->rbr_table_size - blocks_per_page)) { | ||
3532 | err = niu_rbr_add_page(np, rp, mask, index); | ||
3533 | if (err) | ||
3534 | break; | ||
3535 | |||
3536 | index += blocks_per_page; | ||
3537 | } | ||
3538 | |||
3539 | rp->rbr_index = index; | ||
3540 | return err; | ||
3541 | } | ||
3542 | |||
3543 | static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp) | ||
3544 | { | ||
3545 | int i; | ||
3546 | |||
3547 | for (i = 0; i < MAX_RBR_RING_SIZE; i++) { | ||
3548 | struct page *page; | ||
3549 | |||
3550 | page = rp->rxhash[i]; | ||
3551 | while (page) { | ||
3552 | struct page *next = (struct page *) page->mapping; | ||
3553 | u64 base = page->index; | ||
3554 | |||
3555 | np->ops->unmap_page(np->device, base, PAGE_SIZE, | ||
3556 | DMA_FROM_DEVICE); | ||
3557 | page->index = 0; | ||
3558 | page->mapping = NULL; | ||
3559 | |||
3560 | __free_page(page); | ||
3561 | |||
3562 | page = next; | ||
3563 | } | ||
3564 | } | ||
3565 | |||
3566 | for (i = 0; i < rp->rbr_table_size; i++) | ||
3567 | rp->rbr[i] = cpu_to_le32(0); | ||
3568 | rp->rbr_index = 0; | ||
3569 | } | ||
3570 | |||
3571 | static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) | ||
3572 | { | ||
3573 | struct tx_buff_info *tb = &rp->tx_buffs[idx]; | ||
3574 | struct sk_buff *skb = tb->skb; | ||
3575 | struct tx_pkt_hdr *tp; | ||
3576 | u64 tx_flags; | ||
3577 | int i, len; | ||
3578 | |||
3579 | tp = (struct tx_pkt_hdr *) skb->data; | ||
3580 | tx_flags = le64_to_cpup(&tp->flags); | ||
3581 | |||
3582 | rp->tx_packets++; | ||
3583 | rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) - | ||
3584 | ((tx_flags & TXHDR_PAD) / 2)); | ||
3585 | |||
3586 | len = skb_headlen(skb); | ||
3587 | np->ops->unmap_single(np->device, tb->mapping, | ||
3588 | len, DMA_TO_DEVICE); | ||
3589 | |||
3590 | if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK) | ||
3591 | rp->mark_pending--; | ||
3592 | |||
3593 | tb->skb = NULL; | ||
3594 | do { | ||
3595 | idx = NEXT_TX(rp, idx); | ||
3596 | len -= MAX_TX_DESC_LEN; | ||
3597 | } while (len > 0); | ||
3598 | |||
3599 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
3600 | tb = &rp->tx_buffs[idx]; | ||
3601 | BUG_ON(tb->skb != NULL); | ||
3602 | np->ops->unmap_page(np->device, tb->mapping, | ||
3603 | skb_shinfo(skb)->frags[i].size, | ||
3604 | DMA_TO_DEVICE); | ||
3605 | idx = NEXT_TX(rp, idx); | ||
3606 | } | ||
3607 | |||
3608 | dev_kfree_skb(skb); | ||
3609 | |||
3610 | return idx; | ||
3611 | } | ||
3612 | |||
3613 | #define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4) | ||
3614 | |||
3615 | static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) | ||
3616 | { | ||
3617 | struct netdev_queue *txq; | ||
3618 | u16 pkt_cnt, tmp; | ||
3619 | int cons, index; | ||
3620 | u64 cs; | ||
3621 | |||
3622 | index = (rp - np->tx_rings); | ||
3623 | txq = netdev_get_tx_queue(np->dev, index); | ||
3624 | |||
3625 | cs = rp->tx_cs; | ||
3626 | if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) | ||
3627 | goto out; | ||
3628 | |||
3629 | tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT; | ||
3630 | pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) & | ||
3631 | (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT); | ||
3632 | |||
3633 | rp->last_pkt_cnt = tmp; | ||
3634 | |||
3635 | cons = rp->cons; | ||
3636 | |||
3637 | netif_printk(np, tx_done, KERN_DEBUG, np->dev, | ||
3638 | "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); | ||
3639 | |||
3640 | while (pkt_cnt--) | ||
3641 | cons = release_tx_packet(np, rp, cons); | ||
3642 | |||
3643 | rp->cons = cons; | ||
3644 | smp_mb(); | ||
3645 | |||
3646 | out: | ||
3647 | if (unlikely(netif_tx_queue_stopped(txq) && | ||
3648 | (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { | ||
3649 | __netif_tx_lock(txq, smp_processor_id()); | ||
3650 | if (netif_tx_queue_stopped(txq) && | ||
3651 | (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) | ||
3652 | netif_tx_wake_queue(txq); | ||
3653 | __netif_tx_unlock(txq); | ||
3654 | } | ||
3655 | } | ||
3656 | |||
3657 | static inline void niu_sync_rx_discard_stats(struct niu *np, | ||
3658 | struct rx_ring_info *rp, | ||
3659 | const int limit) | ||
3660 | { | ||
3661 | /* This elaborate scheme is needed for reading the RX discard | ||
3662 | * counters, as they are only 16-bit and can overflow quickly, | ||
3663 | * and because the overflow indication bit is not usable as | ||
3664 | * the counter value does not wrap, but remains at max value | ||
3665 | * 0xFFFF. | ||
3666 | * | ||
3667 | * In theory and in practice counters can be lost in between | ||
3668 | * reading nr64() and clearing the counter nw64(). For this | ||
3669 | * reason, the number of counter clearings nw64() is | ||
3670 | * limited/reduced though the limit parameter. | ||
3671 | */ | ||
3672 | int rx_channel = rp->rx_channel; | ||
3673 | u32 misc, wred; | ||
3674 | |||
3675 | /* RXMISC (Receive Miscellaneous Discard Count), covers the | ||
3676 | * following discard events: IPP (Input Port Process), | ||
3677 | * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive | ||
3678 | * Block Ring) prefetch buffer is empty. | ||
3679 | */ | ||
3680 | misc = nr64(RXMISC(rx_channel)); | ||
3681 | if (unlikely((misc & RXMISC_COUNT) > limit)) { | ||
3682 | nw64(RXMISC(rx_channel), 0); | ||
3683 | rp->rx_errors += misc & RXMISC_COUNT; | ||
3684 | |||
3685 | if (unlikely(misc & RXMISC_OFLOW)) | ||
3686 | dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n", | ||
3687 | rx_channel); | ||
3688 | |||
3689 | netif_printk(np, rx_err, KERN_DEBUG, np->dev, | ||
3690 | "rx-%d: MISC drop=%u over=%u\n", | ||
3691 | rx_channel, misc, misc-limit); | ||
3692 | } | ||
3693 | |||
3694 | /* WRED (Weighted Random Early Discard) by hardware */ | ||
3695 | wred = nr64(RED_DIS_CNT(rx_channel)); | ||
3696 | if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) { | ||
3697 | nw64(RED_DIS_CNT(rx_channel), 0); | ||
3698 | rp->rx_dropped += wred & RED_DIS_CNT_COUNT; | ||
3699 | |||
3700 | if (unlikely(wred & RED_DIS_CNT_OFLOW)) | ||
3701 | dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel); | ||
3702 | |||
3703 | netif_printk(np, rx_err, KERN_DEBUG, np->dev, | ||
3704 | "rx-%d: WRED drop=%u over=%u\n", | ||
3705 | rx_channel, wred, wred-limit); | ||
3706 | } | ||
3707 | } | ||
3708 | |||
3709 | static int niu_rx_work(struct napi_struct *napi, struct niu *np, | ||
3710 | struct rx_ring_info *rp, int budget) | ||
3711 | { | ||
3712 | int qlen, rcr_done = 0, work_done = 0; | ||
3713 | struct rxdma_mailbox *mbox = rp->mbox; | ||
3714 | u64 stat; | ||
3715 | |||
3716 | #if 1 | ||
3717 | stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); | ||
3718 | qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN; | ||
3719 | #else | ||
3720 | stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); | ||
3721 | qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN); | ||
3722 | #endif | ||
3723 | mbox->rx_dma_ctl_stat = 0; | ||
3724 | mbox->rcrstat_a = 0; | ||
3725 | |||
3726 | netif_printk(np, rx_status, KERN_DEBUG, np->dev, | ||
3727 | "%s(chan[%d]), stat[%llx] qlen=%d\n", | ||
3728 | __func__, rp->rx_channel, (unsigned long long)stat, qlen); | ||
3729 | |||
3730 | rcr_done = work_done = 0; | ||
3731 | qlen = min(qlen, budget); | ||
3732 | while (work_done < qlen) { | ||
3733 | rcr_done += niu_process_rx_pkt(napi, np, rp); | ||
3734 | work_done++; | ||
3735 | } | ||
3736 | |||
3737 | if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) { | ||
3738 | unsigned int i; | ||
3739 | |||
3740 | for (i = 0; i < rp->rbr_refill_pending; i++) | ||
3741 | niu_rbr_refill(np, rp, GFP_ATOMIC); | ||
3742 | rp->rbr_refill_pending = 0; | ||
3743 | } | ||
3744 | |||
3745 | stat = (RX_DMA_CTL_STAT_MEX | | ||
3746 | ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) | | ||
3747 | ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT)); | ||
3748 | |||
3749 | nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); | ||
3750 | |||
3751 | /* Only sync discards stats when qlen indicate potential for drops */ | ||
3752 | if (qlen > 10) | ||
3753 | niu_sync_rx_discard_stats(np, rp, 0x7FFF); | ||
3754 | |||
3755 | return work_done; | ||
3756 | } | ||
3757 | |||
3758 | static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget) | ||
3759 | { | ||
3760 | u64 v0 = lp->v0; | ||
3761 | u32 tx_vec = (v0 >> 32); | ||
3762 | u32 rx_vec = (v0 & 0xffffffff); | ||
3763 | int i, work_done = 0; | ||
3764 | |||
3765 | netif_printk(np, intr, KERN_DEBUG, np->dev, | ||
3766 | "%s() v0[%016llx]\n", __func__, (unsigned long long)v0); | ||
3767 | |||
3768 | for (i = 0; i < np->num_tx_rings; i++) { | ||
3769 | struct tx_ring_info *rp = &np->tx_rings[i]; | ||
3770 | if (tx_vec & (1 << rp->tx_channel)) | ||
3771 | niu_tx_work(np, rp); | ||
3772 | nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0); | ||
3773 | } | ||
3774 | |||
3775 | for (i = 0; i < np->num_rx_rings; i++) { | ||
3776 | struct rx_ring_info *rp = &np->rx_rings[i]; | ||
3777 | |||
3778 | if (rx_vec & (1 << rp->rx_channel)) { | ||
3779 | int this_work_done; | ||
3780 | |||
3781 | this_work_done = niu_rx_work(&lp->napi, np, rp, | ||
3782 | budget); | ||
3783 | |||
3784 | budget -= this_work_done; | ||
3785 | work_done += this_work_done; | ||
3786 | } | ||
3787 | nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0); | ||
3788 | } | ||
3789 | |||
3790 | return work_done; | ||
3791 | } | ||
3792 | |||
3793 | static int niu_poll(struct napi_struct *napi, int budget) | ||
3794 | { | ||
3795 | struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi); | ||
3796 | struct niu *np = lp->np; | ||
3797 | int work_done; | ||
3798 | |||
3799 | work_done = niu_poll_core(np, lp, budget); | ||
3800 | |||
3801 | if (work_done < budget) { | ||
3802 | napi_complete(napi); | ||
3803 | niu_ldg_rearm(np, lp, 1); | ||
3804 | } | ||
3805 | return work_done; | ||
3806 | } | ||
3807 | |||
3808 | static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, | ||
3809 | u64 stat) | ||
3810 | { | ||
3811 | netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel); | ||
3812 | |||
3813 | if (stat & RX_DMA_CTL_STAT_RBR_TMOUT) | ||
3814 | pr_cont("RBR_TMOUT "); | ||
3815 | if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR) | ||
3816 | pr_cont("RSP_CNT "); | ||
3817 | if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS) | ||
3818 | pr_cont("BYTE_EN_BUS "); | ||
3819 | if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR) | ||
3820 | pr_cont("RSP_DAT "); | ||
3821 | if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR) | ||
3822 | pr_cont("RCR_ACK "); | ||
3823 | if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR) | ||
3824 | pr_cont("RCR_SHA_PAR "); | ||
3825 | if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR) | ||
3826 | pr_cont("RBR_PRE_PAR "); | ||
3827 | if (stat & RX_DMA_CTL_STAT_CONFIG_ERR) | ||
3828 | pr_cont("CONFIG "); | ||
3829 | if (stat & RX_DMA_CTL_STAT_RCRINCON) | ||
3830 | pr_cont("RCRINCON "); | ||
3831 | if (stat & RX_DMA_CTL_STAT_RCRFULL) | ||
3832 | pr_cont("RCRFULL "); | ||
3833 | if (stat & RX_DMA_CTL_STAT_RBRFULL) | ||
3834 | pr_cont("RBRFULL "); | ||
3835 | if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE) | ||
3836 | pr_cont("RBRLOGPAGE "); | ||
3837 | if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE) | ||
3838 | pr_cont("CFIGLOGPAGE "); | ||
3839 | if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR) | ||
3840 | pr_cont("DC_FIDO "); | ||
3841 | |||
3842 | pr_cont(")\n"); | ||
3843 | } | ||
3844 | |||
3845 | static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) | ||
3846 | { | ||
3847 | u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); | ||
3848 | int err = 0; | ||
3849 | |||
3850 | |||
3851 | if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL | | ||
3852 | RX_DMA_CTL_STAT_PORT_FATAL)) | ||
3853 | err = -EINVAL; | ||
3854 | |||
3855 | if (err) { | ||
3856 | netdev_err(np->dev, "RX channel %u error, stat[%llx]\n", | ||
3857 | rp->rx_channel, | ||
3858 | (unsigned long long) stat); | ||
3859 | |||
3860 | niu_log_rxchan_errors(np, rp, stat); | ||
3861 | } | ||
3862 | |||
3863 | nw64(RX_DMA_CTL_STAT(rp->rx_channel), | ||
3864 | stat & RX_DMA_CTL_WRITE_CLEAR_ERRS); | ||
3865 | |||
3866 | return err; | ||
3867 | } | ||
3868 | |||
3869 | static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, | ||
3870 | u64 cs) | ||
3871 | { | ||
3872 | netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel); | ||
3873 | |||
3874 | if (cs & TX_CS_MBOX_ERR) | ||
3875 | pr_cont("MBOX "); | ||
3876 | if (cs & TX_CS_PKT_SIZE_ERR) | ||
3877 | pr_cont("PKT_SIZE "); | ||
3878 | if (cs & TX_CS_TX_RING_OFLOW) | ||
3879 | pr_cont("TX_RING_OFLOW "); | ||
3880 | if (cs & TX_CS_PREF_BUF_PAR_ERR) | ||
3881 | pr_cont("PREF_BUF_PAR "); | ||
3882 | if (cs & TX_CS_NACK_PREF) | ||
3883 | pr_cont("NACK_PREF "); | ||
3884 | if (cs & TX_CS_NACK_PKT_RD) | ||
3885 | pr_cont("NACK_PKT_RD "); | ||
3886 | if (cs & TX_CS_CONF_PART_ERR) | ||
3887 | pr_cont("CONF_PART "); | ||
3888 | if (cs & TX_CS_PKT_PRT_ERR) | ||
3889 | pr_cont("PKT_PTR "); | ||
3890 | |||
3891 | pr_cont(")\n"); | ||
3892 | } | ||
3893 | |||
3894 | static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) | ||
3895 | { | ||
3896 | u64 cs, logh, logl; | ||
3897 | |||
3898 | cs = nr64(TX_CS(rp->tx_channel)); | ||
3899 | logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); | ||
3900 | logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); | ||
3901 | |||
3902 | netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n", | ||
3903 | rp->tx_channel, | ||
3904 | (unsigned long long)cs, | ||
3905 | (unsigned long long)logh, | ||
3906 | (unsigned long long)logl); | ||
3907 | |||
3908 | niu_log_txchan_errors(np, rp, cs); | ||
3909 | |||
3910 | return -ENODEV; | ||
3911 | } | ||
3912 | |||
3913 | static int niu_mif_interrupt(struct niu *np) | ||
3914 | { | ||
3915 | u64 mif_status = nr64(MIF_STATUS); | ||
3916 | int phy_mdint = 0; | ||
3917 | |||
3918 | if (np->flags & NIU_FLAGS_XMAC) { | ||
3919 | u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS); | ||
3920 | |||
3921 | if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT) | ||
3922 | phy_mdint = 1; | ||
3923 | } | ||
3924 | |||
3925 | netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n", | ||
3926 | (unsigned long long)mif_status, phy_mdint); | ||
3927 | |||
3928 | return -ENODEV; | ||
3929 | } | ||
3930 | |||
3931 | static void niu_xmac_interrupt(struct niu *np) | ||
3932 | { | ||
3933 | struct niu_xmac_stats *mp = &np->mac_stats.xmac; | ||
3934 | u64 val; | ||
3935 | |||
3936 | val = nr64_mac(XTXMAC_STATUS); | ||
3937 | if (val & XTXMAC_STATUS_FRAME_CNT_EXP) | ||
3938 | mp->tx_frames += TXMAC_FRM_CNT_COUNT; | ||
3939 | if (val & XTXMAC_STATUS_BYTE_CNT_EXP) | ||
3940 | mp->tx_bytes += TXMAC_BYTE_CNT_COUNT; | ||
3941 | if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR) | ||
3942 | mp->tx_fifo_errors++; | ||
3943 | if (val & XTXMAC_STATUS_TXMAC_OFLOW) | ||
3944 | mp->tx_overflow_errors++; | ||
3945 | if (val & XTXMAC_STATUS_MAX_PSIZE_ERR) | ||
3946 | mp->tx_max_pkt_size_errors++; | ||
3947 | if (val & XTXMAC_STATUS_TXMAC_UFLOW) | ||
3948 | mp->tx_underflow_errors++; | ||
3949 | |||
3950 | val = nr64_mac(XRXMAC_STATUS); | ||
3951 | if (val & XRXMAC_STATUS_LCL_FLT_STATUS) | ||
3952 | mp->rx_local_faults++; | ||
3953 | if (val & XRXMAC_STATUS_RFLT_DET) | ||
3954 | mp->rx_remote_faults++; | ||
3955 | if (val & XRXMAC_STATUS_LFLT_CNT_EXP) | ||
3956 | mp->rx_link_faults += LINK_FAULT_CNT_COUNT; | ||
3957 | if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP) | ||
3958 | mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT; | ||
3959 | if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP) | ||
3960 | mp->rx_frags += RXMAC_FRAG_CNT_COUNT; | ||
3961 | if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP) | ||
3962 | mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; | ||
3963 | if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) | ||
3964 | mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; | ||
3965 | if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) | ||
3966 | mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; | ||
3967 | if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) | ||
3968 | mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; | ||
3969 | if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) | ||
3970 | mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT; | ||
3971 | if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP) | ||
3972 | mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT; | ||
3973 | if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP) | ||
3974 | mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT; | ||
3975 | if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP) | ||
3976 | mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT; | ||
3977 | if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP) | ||
3978 | mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT; | ||
3979 | if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP) | ||
3980 | mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT; | ||
3981 | if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP) | ||
3982 | mp->rx_octets += RXMAC_BT_CNT_COUNT; | ||
3983 | if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP) | ||
3984 | mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT; | ||
3985 | if (val & XRXMAC_STATUS_LENERR_CNT_EXP) | ||
3986 | mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT; | ||
3987 | if (val & XRXMAC_STATUS_CRCERR_CNT_EXP) | ||
3988 | mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT; | ||
3989 | if (val & XRXMAC_STATUS_RXUFLOW) | ||
3990 | mp->rx_underflows++; | ||
3991 | if (val & XRXMAC_STATUS_RXOFLOW) | ||
3992 | mp->rx_overflows++; | ||
3993 | |||
3994 | val = nr64_mac(XMAC_FC_STAT); | ||
3995 | if (val & XMAC_FC_STAT_TX_MAC_NPAUSE) | ||
3996 | mp->pause_off_state++; | ||
3997 | if (val & XMAC_FC_STAT_TX_MAC_PAUSE) | ||
3998 | mp->pause_on_state++; | ||
3999 | if (val & XMAC_FC_STAT_RX_MAC_RPAUSE) | ||
4000 | mp->pause_received++; | ||
4001 | } | ||
4002 | |||
4003 | static void niu_bmac_interrupt(struct niu *np) | ||
4004 | { | ||
4005 | struct niu_bmac_stats *mp = &np->mac_stats.bmac; | ||
4006 | u64 val; | ||
4007 | |||
4008 | val = nr64_mac(BTXMAC_STATUS); | ||
4009 | if (val & BTXMAC_STATUS_UNDERRUN) | ||
4010 | mp->tx_underflow_errors++; | ||
4011 | if (val & BTXMAC_STATUS_MAX_PKT_ERR) | ||
4012 | mp->tx_max_pkt_size_errors++; | ||
4013 | if (val & BTXMAC_STATUS_BYTE_CNT_EXP) | ||
4014 | mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT; | ||
4015 | if (val & BTXMAC_STATUS_FRAME_CNT_EXP) | ||
4016 | mp->tx_frames += BTXMAC_FRM_CNT_COUNT; | ||
4017 | |||
4018 | val = nr64_mac(BRXMAC_STATUS); | ||
4019 | if (val & BRXMAC_STATUS_OVERFLOW) | ||
4020 | mp->rx_overflows++; | ||
4021 | if (val & BRXMAC_STATUS_FRAME_CNT_EXP) | ||
4022 | mp->rx_frames += BRXMAC_FRAME_CNT_COUNT; | ||
4023 | if (val & BRXMAC_STATUS_ALIGN_ERR_EXP) | ||
4024 | mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; | ||
4025 | if (val & BRXMAC_STATUS_CRC_ERR_EXP) | ||
4026 | mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; | ||
4027 | if (val & BRXMAC_STATUS_LEN_ERR_EXP) | ||
4028 | mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT; | ||
4029 | |||
4030 | val = nr64_mac(BMAC_CTRL_STATUS); | ||
4031 | if (val & BMAC_CTRL_STATUS_NOPAUSE) | ||
4032 | mp->pause_off_state++; | ||
4033 | if (val & BMAC_CTRL_STATUS_PAUSE) | ||
4034 | mp->pause_on_state++; | ||
4035 | if (val & BMAC_CTRL_STATUS_PAUSE_RECV) | ||
4036 | mp->pause_received++; | ||
4037 | } | ||
4038 | |||
4039 | static int niu_mac_interrupt(struct niu *np) | ||
4040 | { | ||
4041 | if (np->flags & NIU_FLAGS_XMAC) | ||
4042 | niu_xmac_interrupt(np); | ||
4043 | else | ||
4044 | niu_bmac_interrupt(np); | ||
4045 | |||
4046 | return 0; | ||
4047 | } | ||
4048 | |||
4049 | static void niu_log_device_error(struct niu *np, u64 stat) | ||
4050 | { | ||
4051 | netdev_err(np->dev, "Core device errors ( "); | ||
4052 | |||
4053 | if (stat & SYS_ERR_MASK_META2) | ||
4054 | pr_cont("META2 "); | ||
4055 | if (stat & SYS_ERR_MASK_META1) | ||
4056 | pr_cont("META1 "); | ||
4057 | if (stat & SYS_ERR_MASK_PEU) | ||
4058 | pr_cont("PEU "); | ||
4059 | if (stat & SYS_ERR_MASK_TXC) | ||
4060 | pr_cont("TXC "); | ||
4061 | if (stat & SYS_ERR_MASK_RDMC) | ||
4062 | pr_cont("RDMC "); | ||
4063 | if (stat & SYS_ERR_MASK_TDMC) | ||
4064 | pr_cont("TDMC "); | ||
4065 | if (stat & SYS_ERR_MASK_ZCP) | ||
4066 | pr_cont("ZCP "); | ||
4067 | if (stat & SYS_ERR_MASK_FFLP) | ||
4068 | pr_cont("FFLP "); | ||
4069 | if (stat & SYS_ERR_MASK_IPP) | ||
4070 | pr_cont("IPP "); | ||
4071 | if (stat & SYS_ERR_MASK_MAC) | ||
4072 | pr_cont("MAC "); | ||
4073 | if (stat & SYS_ERR_MASK_SMX) | ||
4074 | pr_cont("SMX "); | ||
4075 | |||
4076 | pr_cont(")\n"); | ||
4077 | } | ||
4078 | |||
4079 | static int niu_device_error(struct niu *np) | ||
4080 | { | ||
4081 | u64 stat = nr64(SYS_ERR_STAT); | ||
4082 | |||
4083 | netdev_err(np->dev, "Core device error, stat[%llx]\n", | ||
4084 | (unsigned long long)stat); | ||
4085 | |||
4086 | niu_log_device_error(np, stat); | ||
4087 | |||
4088 | return -ENODEV; | ||
4089 | } | ||
4090 | |||
4091 | static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp, | ||
4092 | u64 v0, u64 v1, u64 v2) | ||
4093 | { | ||
4094 | |||
4095 | int i, err = 0; | ||
4096 | |||
4097 | lp->v0 = v0; | ||
4098 | lp->v1 = v1; | ||
4099 | lp->v2 = v2; | ||
4100 | |||
4101 | if (v1 & 0x00000000ffffffffULL) { | ||
4102 | u32 rx_vec = (v1 & 0xffffffff); | ||
4103 | |||
4104 | for (i = 0; i < np->num_rx_rings; i++) { | ||
4105 | struct rx_ring_info *rp = &np->rx_rings[i]; | ||
4106 | |||
4107 | if (rx_vec & (1 << rp->rx_channel)) { | ||
4108 | int r = niu_rx_error(np, rp); | ||
4109 | if (r) { | ||
4110 | err = r; | ||
4111 | } else { | ||
4112 | if (!v0) | ||
4113 | nw64(RX_DMA_CTL_STAT(rp->rx_channel), | ||
4114 | RX_DMA_CTL_STAT_MEX); | ||
4115 | } | ||
4116 | } | ||
4117 | } | ||
4118 | } | ||
4119 | if (v1 & 0x7fffffff00000000ULL) { | ||
4120 | u32 tx_vec = (v1 >> 32) & 0x7fffffff; | ||
4121 | |||
4122 | for (i = 0; i < np->num_tx_rings; i++) { | ||
4123 | struct tx_ring_info *rp = &np->tx_rings[i]; | ||
4124 | |||
4125 | if (tx_vec & (1 << rp->tx_channel)) { | ||
4126 | int r = niu_tx_error(np, rp); | ||
4127 | if (r) | ||
4128 | err = r; | ||
4129 | } | ||
4130 | } | ||
4131 | } | ||
4132 | if ((v0 | v1) & 0x8000000000000000ULL) { | ||
4133 | int r = niu_mif_interrupt(np); | ||
4134 | if (r) | ||
4135 | err = r; | ||
4136 | } | ||
4137 | if (v2) { | ||
4138 | if (v2 & 0x01ef) { | ||
4139 | int r = niu_mac_interrupt(np); | ||
4140 | if (r) | ||
4141 | err = r; | ||
4142 | } | ||
4143 | if (v2 & 0x0210) { | ||
4144 | int r = niu_device_error(np); | ||
4145 | if (r) | ||
4146 | err = r; | ||
4147 | } | ||
4148 | } | ||
4149 | |||
4150 | if (err) | ||
4151 | niu_enable_interrupts(np, 0); | ||
4152 | |||
4153 | return err; | ||
4154 | } | ||
4155 | |||
4156 | static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp, | ||
4157 | int ldn) | ||
4158 | { | ||
4159 | struct rxdma_mailbox *mbox = rp->mbox; | ||
4160 | u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); | ||
4161 | |||
4162 | stat_write = (RX_DMA_CTL_STAT_RCRTHRES | | ||
4163 | RX_DMA_CTL_STAT_RCRTO); | ||
4164 | nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); | ||
4165 | |||
4166 | netif_printk(np, intr, KERN_DEBUG, np->dev, | ||
4167 | "%s() stat[%llx]\n", __func__, (unsigned long long)stat); | ||
4168 | } | ||
4169 | |||
4170 | static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, | ||
4171 | int ldn) | ||
4172 | { | ||
4173 | rp->tx_cs = nr64(TX_CS(rp->tx_channel)); | ||
4174 | |||
4175 | netif_printk(np, intr, KERN_DEBUG, np->dev, | ||
4176 | "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs); | ||
4177 | } | ||
4178 | |||
4179 | static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) | ||
4180 | { | ||
4181 | struct niu_parent *parent = np->parent; | ||
4182 | u32 rx_vec, tx_vec; | ||
4183 | int i; | ||
4184 | |||
4185 | tx_vec = (v0 >> 32); | ||
4186 | rx_vec = (v0 & 0xffffffff); | ||
4187 | |||
4188 | for (i = 0; i < np->num_rx_rings; i++) { | ||
4189 | struct rx_ring_info *rp = &np->rx_rings[i]; | ||
4190 | int ldn = LDN_RXDMA(rp->rx_channel); | ||
4191 | |||
4192 | if (parent->ldg_map[ldn] != ldg) | ||
4193 | continue; | ||
4194 | |||
4195 | nw64(LD_IM0(ldn), LD_IM0_MASK); | ||
4196 | if (rx_vec & (1 << rp->rx_channel)) | ||
4197 | niu_rxchan_intr(np, rp, ldn); | ||
4198 | } | ||
4199 | |||
4200 | for (i = 0; i < np->num_tx_rings; i++) { | ||
4201 | struct tx_ring_info *rp = &np->tx_rings[i]; | ||
4202 | int ldn = LDN_TXDMA(rp->tx_channel); | ||
4203 | |||
4204 | if (parent->ldg_map[ldn] != ldg) | ||
4205 | continue; | ||
4206 | |||
4207 | nw64(LD_IM0(ldn), LD_IM0_MASK); | ||
4208 | if (tx_vec & (1 << rp->tx_channel)) | ||
4209 | niu_txchan_intr(np, rp, ldn); | ||
4210 | } | ||
4211 | } | ||
4212 | |||
4213 | static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, | ||
4214 | u64 v0, u64 v1, u64 v2) | ||
4215 | { | ||
4216 | if (likely(napi_schedule_prep(&lp->napi))) { | ||
4217 | lp->v0 = v0; | ||
4218 | lp->v1 = v1; | ||
4219 | lp->v2 = v2; | ||
4220 | __niu_fastpath_interrupt(np, lp->ldg_num, v0); | ||
4221 | __napi_schedule(&lp->napi); | ||
4222 | } | ||
4223 | } | ||
4224 | |||
4225 | static irqreturn_t niu_interrupt(int irq, void *dev_id) | ||
4226 | { | ||
4227 | struct niu_ldg *lp = dev_id; | ||
4228 | struct niu *np = lp->np; | ||
4229 | int ldg = lp->ldg_num; | ||
4230 | unsigned long flags; | ||
4231 | u64 v0, v1, v2; | ||
4232 | |||
4233 | if (netif_msg_intr(np)) | ||
4234 | printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)", | ||
4235 | __func__, lp, ldg); | ||
4236 | |||
4237 | spin_lock_irqsave(&np->lock, flags); | ||
4238 | |||
4239 | v0 = nr64(LDSV0(ldg)); | ||
4240 | v1 = nr64(LDSV1(ldg)); | ||
4241 | v2 = nr64(LDSV2(ldg)); | ||
4242 | |||
4243 | if (netif_msg_intr(np)) | ||
4244 | pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n", | ||
4245 | (unsigned long long) v0, | ||
4246 | (unsigned long long) v1, | ||
4247 | (unsigned long long) v2); | ||
4248 | |||
4249 | if (unlikely(!v0 && !v1 && !v2)) { | ||
4250 | spin_unlock_irqrestore(&np->lock, flags); | ||
4251 | return IRQ_NONE; | ||
4252 | } | ||
4253 | |||
4254 | if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) { | ||
4255 | int err = niu_slowpath_interrupt(np, lp, v0, v1, v2); | ||
4256 | if (err) | ||
4257 | goto out; | ||
4258 | } | ||
4259 | if (likely(v0 & ~((u64)1 << LDN_MIF))) | ||
4260 | niu_schedule_napi(np, lp, v0, v1, v2); | ||
4261 | else | ||
4262 | niu_ldg_rearm(np, lp, 1); | ||
4263 | out: | ||
4264 | spin_unlock_irqrestore(&np->lock, flags); | ||
4265 | |||
4266 | return IRQ_HANDLED; | ||
4267 | } | ||
4268 | |||
4269 | static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp) | ||
4270 | { | ||
4271 | if (rp->mbox) { | ||
4272 | np->ops->free_coherent(np->device, | ||
4273 | sizeof(struct rxdma_mailbox), | ||
4274 | rp->mbox, rp->mbox_dma); | ||
4275 | rp->mbox = NULL; | ||
4276 | } | ||
4277 | if (rp->rcr) { | ||
4278 | np->ops->free_coherent(np->device, | ||
4279 | MAX_RCR_RING_SIZE * sizeof(__le64), | ||
4280 | rp->rcr, rp->rcr_dma); | ||
4281 | rp->rcr = NULL; | ||
4282 | rp->rcr_table_size = 0; | ||
4283 | rp->rcr_index = 0; | ||
4284 | } | ||
4285 | if (rp->rbr) { | ||
4286 | niu_rbr_free(np, rp); | ||
4287 | |||
4288 | np->ops->free_coherent(np->device, | ||
4289 | MAX_RBR_RING_SIZE * sizeof(__le32), | ||
4290 | rp->rbr, rp->rbr_dma); | ||
4291 | rp->rbr = NULL; | ||
4292 | rp->rbr_table_size = 0; | ||
4293 | rp->rbr_index = 0; | ||
4294 | } | ||
4295 | kfree(rp->rxhash); | ||
4296 | rp->rxhash = NULL; | ||
4297 | } | ||
4298 | |||
4299 | static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp) | ||
4300 | { | ||
4301 | if (rp->mbox) { | ||
4302 | np->ops->free_coherent(np->device, | ||
4303 | sizeof(struct txdma_mailbox), | ||
4304 | rp->mbox, rp->mbox_dma); | ||
4305 | rp->mbox = NULL; | ||
4306 | } | ||
4307 | if (rp->descr) { | ||
4308 | int i; | ||
4309 | |||
4310 | for (i = 0; i < MAX_TX_RING_SIZE; i++) { | ||
4311 | if (rp->tx_buffs[i].skb) | ||
4312 | (void) release_tx_packet(np, rp, i); | ||
4313 | } | ||
4314 | |||
4315 | np->ops->free_coherent(np->device, | ||
4316 | MAX_TX_RING_SIZE * sizeof(__le64), | ||
4317 | rp->descr, rp->descr_dma); | ||
4318 | rp->descr = NULL; | ||
4319 | rp->pending = 0; | ||
4320 | rp->prod = 0; | ||
4321 | rp->cons = 0; | ||
4322 | rp->wrap_bit = 0; | ||
4323 | } | ||
4324 | } | ||
4325 | |||
4326 | static void niu_free_channels(struct niu *np) | ||
4327 | { | ||
4328 | int i; | ||
4329 | |||
4330 | if (np->rx_rings) { | ||
4331 | for (i = 0; i < np->num_rx_rings; i++) { | ||
4332 | struct rx_ring_info *rp = &np->rx_rings[i]; | ||
4333 | |||
4334 | niu_free_rx_ring_info(np, rp); | ||
4335 | } | ||
4336 | kfree(np->rx_rings); | ||
4337 | np->rx_rings = NULL; | ||
4338 | np->num_rx_rings = 0; | ||
4339 | } | ||
4340 | |||
4341 | if (np->tx_rings) { | ||
4342 | for (i = 0; i < np->num_tx_rings; i++) { | ||
4343 | struct tx_ring_info *rp = &np->tx_rings[i]; | ||
4344 | |||
4345 | niu_free_tx_ring_info(np, rp); | ||
4346 | } | ||
4347 | kfree(np->tx_rings); | ||
4348 | np->tx_rings = NULL; | ||
4349 | np->num_tx_rings = 0; | ||
4350 | } | ||
4351 | } | ||
4352 | |||
4353 | static int niu_alloc_rx_ring_info(struct niu *np, | ||
4354 | struct rx_ring_info *rp) | ||
4355 | { | ||
4356 | BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64); | ||
4357 | |||
4358 | rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *), | ||
4359 | GFP_KERNEL); | ||
4360 | if (!rp->rxhash) | ||
4361 | return -ENOMEM; | ||
4362 | |||
4363 | rp->mbox = np->ops->alloc_coherent(np->device, | ||
4364 | sizeof(struct rxdma_mailbox), | ||
4365 | &rp->mbox_dma, GFP_KERNEL); | ||
4366 | if (!rp->mbox) | ||
4367 | return -ENOMEM; | ||
4368 | if ((unsigned long)rp->mbox & (64UL - 1)) { | ||
4369 | netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n", | ||
4370 | rp->mbox); | ||
4371 | return -EINVAL; | ||
4372 | } | ||
4373 | |||
4374 | rp->rcr = np->ops->alloc_coherent(np->device, | ||
4375 | MAX_RCR_RING_SIZE * sizeof(__le64), | ||
4376 | &rp->rcr_dma, GFP_KERNEL); | ||
4377 | if (!rp->rcr) | ||
4378 | return -ENOMEM; | ||
4379 | if ((unsigned long)rp->rcr & (64UL - 1)) { | ||
4380 | netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n", | ||
4381 | rp->rcr); | ||
4382 | return -EINVAL; | ||
4383 | } | ||
4384 | rp->rcr_table_size = MAX_RCR_RING_SIZE; | ||
4385 | rp->rcr_index = 0; | ||
4386 | |||
4387 | rp->rbr = np->ops->alloc_coherent(np->device, | ||
4388 | MAX_RBR_RING_SIZE * sizeof(__le32), | ||
4389 | &rp->rbr_dma, GFP_KERNEL); | ||
4390 | if (!rp->rbr) | ||
4391 | return -ENOMEM; | ||
4392 | if ((unsigned long)rp->rbr & (64UL - 1)) { | ||
4393 | netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n", | ||
4394 | rp->rbr); | ||
4395 | return -EINVAL; | ||
4396 | } | ||
4397 | rp->rbr_table_size = MAX_RBR_RING_SIZE; | ||
4398 | rp->rbr_index = 0; | ||
4399 | rp->rbr_pending = 0; | ||
4400 | |||
4401 | return 0; | ||
4402 | } | ||
4403 | |||
4404 | static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp) | ||
4405 | { | ||
4406 | int mtu = np->dev->mtu; | ||
4407 | |||
4408 | /* These values are recommended by the HW designers for fair | ||
4409 | * utilization of DRR amongst the rings. | ||
4410 | */ | ||
4411 | rp->max_burst = mtu + 32; | ||
4412 | if (rp->max_burst > 4096) | ||
4413 | rp->max_burst = 4096; | ||
4414 | } | ||
4415 | |||
4416 | static int niu_alloc_tx_ring_info(struct niu *np, | ||
4417 | struct tx_ring_info *rp) | ||
4418 | { | ||
4419 | BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64); | ||
4420 | |||
4421 | rp->mbox = np->ops->alloc_coherent(np->device, | ||
4422 | sizeof(struct txdma_mailbox), | ||
4423 | &rp->mbox_dma, GFP_KERNEL); | ||
4424 | if (!rp->mbox) | ||
4425 | return -ENOMEM; | ||
4426 | if ((unsigned long)rp->mbox & (64UL - 1)) { | ||
4427 | netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n", | ||
4428 | rp->mbox); | ||
4429 | return -EINVAL; | ||
4430 | } | ||
4431 | |||
4432 | rp->descr = np->ops->alloc_coherent(np->device, | ||
4433 | MAX_TX_RING_SIZE * sizeof(__le64), | ||
4434 | &rp->descr_dma, GFP_KERNEL); | ||
4435 | if (!rp->descr) | ||
4436 | return -ENOMEM; | ||
4437 | if ((unsigned long)rp->descr & (64UL - 1)) { | ||
4438 | netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n", | ||
4439 | rp->descr); | ||
4440 | return -EINVAL; | ||
4441 | } | ||
4442 | |||
4443 | rp->pending = MAX_TX_RING_SIZE; | ||
4444 | rp->prod = 0; | ||
4445 | rp->cons = 0; | ||
4446 | rp->wrap_bit = 0; | ||
4447 | |||
4448 | /* XXX make these configurable... XXX */ | ||
4449 | rp->mark_freq = rp->pending / 4; | ||
4450 | |||
4451 | niu_set_max_burst(np, rp); | ||
4452 | |||
4453 | return 0; | ||
4454 | } | ||
4455 | |||
4456 | static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) | ||
4457 | { | ||
4458 | u16 bss; | ||
4459 | |||
4460 | bss = min(PAGE_SHIFT, 15); | ||
4461 | |||
4462 | rp->rbr_block_size = 1 << bss; | ||
4463 | rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss); | ||
4464 | |||
4465 | rp->rbr_sizes[0] = 256; | ||
4466 | rp->rbr_sizes[1] = 1024; | ||
4467 | if (np->dev->mtu > ETH_DATA_LEN) { | ||
4468 | switch (PAGE_SIZE) { | ||
4469 | case 4 * 1024: | ||
4470 | rp->rbr_sizes[2] = 4096; | ||
4471 | break; | ||
4472 | |||
4473 | default: | ||
4474 | rp->rbr_sizes[2] = 8192; | ||
4475 | break; | ||
4476 | } | ||
4477 | } else { | ||
4478 | rp->rbr_sizes[2] = 2048; | ||
4479 | } | ||
4480 | rp->rbr_sizes[3] = rp->rbr_block_size; | ||
4481 | } | ||
4482 | |||
4483 | static int niu_alloc_channels(struct niu *np) | ||
4484 | { | ||
4485 | struct niu_parent *parent = np->parent; | ||
4486 | int first_rx_channel, first_tx_channel; | ||
4487 | int num_rx_rings, num_tx_rings; | ||
4488 | struct rx_ring_info *rx_rings; | ||
4489 | struct tx_ring_info *tx_rings; | ||
4490 | int i, port, err; | ||
4491 | |||
4492 | port = np->port; | ||
4493 | first_rx_channel = first_tx_channel = 0; | ||
4494 | for (i = 0; i < port; i++) { | ||
4495 | first_rx_channel += parent->rxchan_per_port[i]; | ||
4496 | first_tx_channel += parent->txchan_per_port[i]; | ||
4497 | } | ||
4498 | |||
4499 | num_rx_rings = parent->rxchan_per_port[port]; | ||
4500 | num_tx_rings = parent->txchan_per_port[port]; | ||
4501 | |||
4502 | rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info), | ||
4503 | GFP_KERNEL); | ||
4504 | err = -ENOMEM; | ||
4505 | if (!rx_rings) | ||
4506 | goto out_err; | ||
4507 | |||
4508 | np->num_rx_rings = num_rx_rings; | ||
4509 | smp_wmb(); | ||
4510 | np->rx_rings = rx_rings; | ||
4511 | |||
4512 | netif_set_real_num_rx_queues(np->dev, num_rx_rings); | ||
4513 | |||
4514 | for (i = 0; i < np->num_rx_rings; i++) { | ||
4515 | struct rx_ring_info *rp = &np->rx_rings[i]; | ||
4516 | |||
4517 | rp->np = np; | ||
4518 | rp->rx_channel = first_rx_channel + i; | ||
4519 | |||
4520 | err = niu_alloc_rx_ring_info(np, rp); | ||
4521 | if (err) | ||
4522 | goto out_err; | ||
4523 | |||
4524 | niu_size_rbr(np, rp); | ||
4525 | |||
4526 | /* XXX better defaults, configurable, etc... XXX */ | ||
4527 | rp->nonsyn_window = 64; | ||
4528 | rp->nonsyn_threshold = rp->rcr_table_size - 64; | ||
4529 | rp->syn_window = 64; | ||
4530 | rp->syn_threshold = rp->rcr_table_size - 64; | ||
4531 | rp->rcr_pkt_threshold = 16; | ||
4532 | rp->rcr_timeout = 8; | ||
4533 | rp->rbr_kick_thresh = RBR_REFILL_MIN; | ||
4534 | if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page) | ||
4535 | rp->rbr_kick_thresh = rp->rbr_blocks_per_page; | ||
4536 | |||
4537 | err = niu_rbr_fill(np, rp, GFP_KERNEL); | ||
4538 | if (err) | ||
4539 | return err; | ||
4540 | } | ||
4541 | |||
4542 | tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info), | ||
4543 | GFP_KERNEL); | ||
4544 | err = -ENOMEM; | ||
4545 | if (!tx_rings) | ||
4546 | goto out_err; | ||
4547 | |||
4548 | np->num_tx_rings = num_tx_rings; | ||
4549 | smp_wmb(); | ||
4550 | np->tx_rings = tx_rings; | ||
4551 | |||
4552 | netif_set_real_num_tx_queues(np->dev, num_tx_rings); | ||
4553 | |||
4554 | for (i = 0; i < np->num_tx_rings; i++) { | ||
4555 | struct tx_ring_info *rp = &np->tx_rings[i]; | ||
4556 | |||
4557 | rp->np = np; | ||
4558 | rp->tx_channel = first_tx_channel + i; | ||
4559 | |||
4560 | err = niu_alloc_tx_ring_info(np, rp); | ||
4561 | if (err) | ||
4562 | goto out_err; | ||
4563 | } | ||
4564 | |||
4565 | return 0; | ||
4566 | |||
4567 | out_err: | ||
4568 | niu_free_channels(np); | ||
4569 | return err; | ||
4570 | } | ||
4571 | |||
4572 | static int niu_tx_cs_sng_poll(struct niu *np, int channel) | ||
4573 | { | ||
4574 | int limit = 1000; | ||
4575 | |||
4576 | while (--limit > 0) { | ||
4577 | u64 val = nr64(TX_CS(channel)); | ||
4578 | if (val & TX_CS_SNG_STATE) | ||
4579 | return 0; | ||
4580 | } | ||
4581 | return -ENODEV; | ||
4582 | } | ||
4583 | |||
4584 | static int niu_tx_channel_stop(struct niu *np, int channel) | ||
4585 | { | ||
4586 | u64 val = nr64(TX_CS(channel)); | ||
4587 | |||
4588 | val |= TX_CS_STOP_N_GO; | ||
4589 | nw64(TX_CS(channel), val); | ||
4590 | |||
4591 | return niu_tx_cs_sng_poll(np, channel); | ||
4592 | } | ||
4593 | |||
4594 | static int niu_tx_cs_reset_poll(struct niu *np, int channel) | ||
4595 | { | ||
4596 | int limit = 1000; | ||
4597 | |||
4598 | while (--limit > 0) { | ||
4599 | u64 val = nr64(TX_CS(channel)); | ||
4600 | if (!(val & TX_CS_RST)) | ||
4601 | return 0; | ||
4602 | } | ||
4603 | return -ENODEV; | ||
4604 | } | ||
4605 | |||
4606 | static int niu_tx_channel_reset(struct niu *np, int channel) | ||
4607 | { | ||
4608 | u64 val = nr64(TX_CS(channel)); | ||
4609 | int err; | ||
4610 | |||
4611 | val |= TX_CS_RST; | ||
4612 | nw64(TX_CS(channel), val); | ||
4613 | |||
4614 | err = niu_tx_cs_reset_poll(np, channel); | ||
4615 | if (!err) | ||
4616 | nw64(TX_RING_KICK(channel), 0); | ||
4617 | |||
4618 | return err; | ||
4619 | } | ||
4620 | |||
4621 | static int niu_tx_channel_lpage_init(struct niu *np, int channel) | ||
4622 | { | ||
4623 | u64 val; | ||
4624 | |||
4625 | nw64(TX_LOG_MASK1(channel), 0); | ||
4626 | nw64(TX_LOG_VAL1(channel), 0); | ||
4627 | nw64(TX_LOG_MASK2(channel), 0); | ||
4628 | nw64(TX_LOG_VAL2(channel), 0); | ||
4629 | nw64(TX_LOG_PAGE_RELO1(channel), 0); | ||
4630 | nw64(TX_LOG_PAGE_RELO2(channel), 0); | ||
4631 | nw64(TX_LOG_PAGE_HDL(channel), 0); | ||
4632 | |||
4633 | val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT; | ||
4634 | val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1); | ||
4635 | nw64(TX_LOG_PAGE_VLD(channel), val); | ||
4636 | |||
4637 | /* XXX TXDMA 32bit mode? XXX */ | ||
4638 | |||
4639 | return 0; | ||
4640 | } | ||
4641 | |||
4642 | static void niu_txc_enable_port(struct niu *np, int on) | ||
4643 | { | ||
4644 | unsigned long flags; | ||
4645 | u64 val, mask; | ||
4646 | |||
4647 | niu_lock_parent(np, flags); | ||
4648 | val = nr64(TXC_CONTROL); | ||
4649 | mask = (u64)1 << np->port; | ||
4650 | if (on) { | ||
4651 | val |= TXC_CONTROL_ENABLE | mask; | ||
4652 | } else { | ||
4653 | val &= ~mask; | ||
4654 | if ((val & ~TXC_CONTROL_ENABLE) == 0) | ||
4655 | val &= ~TXC_CONTROL_ENABLE; | ||
4656 | } | ||
4657 | nw64(TXC_CONTROL, val); | ||
4658 | niu_unlock_parent(np, flags); | ||
4659 | } | ||
4660 | |||
4661 | static void niu_txc_set_imask(struct niu *np, u64 imask) | ||
4662 | { | ||
4663 | unsigned long flags; | ||
4664 | u64 val; | ||
4665 | |||
4666 | niu_lock_parent(np, flags); | ||
4667 | val = nr64(TXC_INT_MASK); | ||
4668 | val &= ~TXC_INT_MASK_VAL(np->port); | ||
4669 | val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port)); | ||
4670 | niu_unlock_parent(np, flags); | ||
4671 | } | ||
4672 | |||
4673 | static void niu_txc_port_dma_enable(struct niu *np, int on) | ||
4674 | { | ||
4675 | u64 val = 0; | ||
4676 | |||
4677 | if (on) { | ||
4678 | int i; | ||
4679 | |||
4680 | for (i = 0; i < np->num_tx_rings; i++) | ||
4681 | val |= (1 << np->tx_rings[i].tx_channel); | ||
4682 | } | ||
4683 | nw64(TXC_PORT_DMA(np->port), val); | ||
4684 | } | ||
4685 | |||
4686 | static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp) | ||
4687 | { | ||
4688 | int err, channel = rp->tx_channel; | ||
4689 | u64 val, ring_len; | ||
4690 | |||
4691 | err = niu_tx_channel_stop(np, channel); | ||
4692 | if (err) | ||
4693 | return err; | ||
4694 | |||
4695 | err = niu_tx_channel_reset(np, channel); | ||
4696 | if (err) | ||
4697 | return err; | ||
4698 | |||
4699 | err = niu_tx_channel_lpage_init(np, channel); | ||
4700 | if (err) | ||
4701 | return err; | ||
4702 | |||
4703 | nw64(TXC_DMA_MAX(channel), rp->max_burst); | ||
4704 | nw64(TX_ENT_MSK(channel), 0); | ||
4705 | |||
4706 | if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | | ||
4707 | TX_RNG_CFIG_STADDR)) { | ||
4708 | netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n", | ||
4709 | channel, (unsigned long long)rp->descr_dma); | ||
4710 | return -EINVAL; | ||
4711 | } | ||
4712 | |||
4713 | /* The length field in TX_RNG_CFIG is measured in 64-byte | ||
4714 | * blocks. rp->pending is the number of TX descriptors in | ||
4715 | * our ring, 8 bytes each, thus we divide by 8 bytes more | ||
4716 | * to get the proper value the chip wants. | ||
4717 | */ | ||
4718 | ring_len = (rp->pending / 8); | ||
4719 | |||
4720 | val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) | | ||
4721 | rp->descr_dma); | ||
4722 | nw64(TX_RNG_CFIG(channel), val); | ||
4723 | |||
4724 | if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || | ||
4725 | ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { | ||
4726 | netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n", | ||
4727 | channel, (unsigned long long)rp->mbox_dma); | ||
4728 | return -EINVAL; | ||
4729 | } | ||
4730 | nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); | ||
4731 | nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR); | ||
4732 | |||
4733 | nw64(TX_CS(channel), 0); | ||
4734 | |||
4735 | rp->last_pkt_cnt = 0; | ||
4736 | |||
4737 | return 0; | ||
4738 | } | ||
4739 | |||
4740 | static void niu_init_rdc_groups(struct niu *np) | ||
4741 | { | ||
4742 | struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port]; | ||
4743 | int i, first_table_num = tp->first_table_num; | ||
4744 | |||
4745 | for (i = 0; i < tp->num_tables; i++) { | ||
4746 | struct rdc_table *tbl = &tp->tables[i]; | ||
4747 | int this_table = first_table_num + i; | ||
4748 | int slot; | ||
4749 | |||
4750 | for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) | ||
4751 | nw64(RDC_TBL(this_table, slot), | ||
4752 | tbl->rxdma_channel[slot]); | ||
4753 | } | ||
4754 | |||
4755 | nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]); | ||
4756 | } | ||
4757 | |||
4758 | static void niu_init_drr_weight(struct niu *np) | ||
4759 | { | ||
4760 | int type = phy_decode(np->parent->port_phy, np->port); | ||
4761 | u64 val; | ||
4762 | |||
4763 | switch (type) { | ||
4764 | case PORT_TYPE_10G: | ||
4765 | val = PT_DRR_WEIGHT_DEFAULT_10G; | ||
4766 | break; | ||
4767 | |||
4768 | case PORT_TYPE_1G: | ||
4769 | default: | ||
4770 | val = PT_DRR_WEIGHT_DEFAULT_1G; | ||
4771 | break; | ||
4772 | } | ||
4773 | nw64(PT_DRR_WT(np->port), val); | ||
4774 | } | ||
4775 | |||
4776 | static int niu_init_hostinfo(struct niu *np) | ||
4777 | { | ||
4778 | struct niu_parent *parent = np->parent; | ||
4779 | struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; | ||
4780 | int i, err, num_alt = niu_num_alt_addr(np); | ||
4781 | int first_rdc_table = tp->first_table_num; | ||
4782 | |||
4783 | err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); | ||
4784 | if (err) | ||
4785 | return err; | ||
4786 | |||
4787 | err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); | ||
4788 | if (err) | ||
4789 | return err; | ||
4790 | |||
4791 | for (i = 0; i < num_alt; i++) { | ||
4792 | err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1); | ||
4793 | if (err) | ||
4794 | return err; | ||
4795 | } | ||
4796 | |||
4797 | return 0; | ||
4798 | } | ||
4799 | |||
4800 | static int niu_rx_channel_reset(struct niu *np, int channel) | ||
4801 | { | ||
4802 | return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel), | ||
4803 | RXDMA_CFIG1_RST, 1000, 10, | ||
4804 | "RXDMA_CFIG1"); | ||
4805 | } | ||
4806 | |||
4807 | static int niu_rx_channel_lpage_init(struct niu *np, int channel) | ||
4808 | { | ||
4809 | u64 val; | ||
4810 | |||
4811 | nw64(RX_LOG_MASK1(channel), 0); | ||
4812 | nw64(RX_LOG_VAL1(channel), 0); | ||
4813 | nw64(RX_LOG_MASK2(channel), 0); | ||
4814 | nw64(RX_LOG_VAL2(channel), 0); | ||
4815 | nw64(RX_LOG_PAGE_RELO1(channel), 0); | ||
4816 | nw64(RX_LOG_PAGE_RELO2(channel), 0); | ||
4817 | nw64(RX_LOG_PAGE_HDL(channel), 0); | ||
4818 | |||
4819 | val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT; | ||
4820 | val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1); | ||
4821 | nw64(RX_LOG_PAGE_VLD(channel), val); | ||
4822 | |||
4823 | return 0; | ||
4824 | } | ||
4825 | |||
4826 | static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp) | ||
4827 | { | ||
4828 | u64 val; | ||
4829 | |||
4830 | val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) | | ||
4831 | ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) | | ||
4832 | ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) | | ||
4833 | ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT)); | ||
4834 | nw64(RDC_RED_PARA(rp->rx_channel), val); | ||
4835 | } | ||
4836 | |||
4837 | static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) | ||
4838 | { | ||
4839 | u64 val = 0; | ||
4840 | |||
4841 | *ret = 0; | ||
4842 | switch (rp->rbr_block_size) { | ||
4843 | case 4 * 1024: | ||
4844 | val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); | ||
4845 | break; | ||
4846 | case 8 * 1024: | ||
4847 | val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT); | ||
4848 | break; | ||
4849 | case 16 * 1024: | ||
4850 | val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT); | ||
4851 | break; | ||
4852 | case 32 * 1024: | ||
4853 | val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT); | ||
4854 | break; | ||
4855 | default: | ||
4856 | return -EINVAL; | ||
4857 | } | ||
4858 | val |= RBR_CFIG_B_VLD2; | ||
4859 | switch (rp->rbr_sizes[2]) { | ||
4860 | case 2 * 1024: | ||
4861 | val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT); | ||
4862 | break; | ||
4863 | case 4 * 1024: | ||
4864 | val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT); | ||
4865 | break; | ||
4866 | case 8 * 1024: | ||
4867 | val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT); | ||
4868 | break; | ||
4869 | case 16 * 1024: | ||
4870 | val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT); | ||
4871 | break; | ||
4872 | |||
4873 | default: | ||
4874 | return -EINVAL; | ||
4875 | } | ||
4876 | val |= RBR_CFIG_B_VLD1; | ||
4877 | switch (rp->rbr_sizes[1]) { | ||
4878 | case 1 * 1024: | ||
4879 | val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT); | ||
4880 | break; | ||
4881 | case 2 * 1024: | ||
4882 | val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT); | ||
4883 | break; | ||
4884 | case 4 * 1024: | ||
4885 | val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT); | ||
4886 | break; | ||
4887 | case 8 * 1024: | ||
4888 | val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT); | ||
4889 | break; | ||
4890 | |||
4891 | default: | ||
4892 | return -EINVAL; | ||
4893 | } | ||
4894 | val |= RBR_CFIG_B_VLD0; | ||
4895 | switch (rp->rbr_sizes[0]) { | ||
4896 | case 256: | ||
4897 | val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT); | ||
4898 | break; | ||
4899 | case 512: | ||
4900 | val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT); | ||
4901 | break; | ||
4902 | case 1 * 1024: | ||
4903 | val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT); | ||
4904 | break; | ||
4905 | case 2 * 1024: | ||
4906 | val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT); | ||
4907 | break; | ||
4908 | |||
4909 | default: | ||
4910 | return -EINVAL; | ||
4911 | } | ||
4912 | |||
4913 | *ret = val; | ||
4914 | return 0; | ||
4915 | } | ||
4916 | |||
4917 | static int niu_enable_rx_channel(struct niu *np, int channel, int on) | ||
4918 | { | ||
4919 | u64 val = nr64(RXDMA_CFIG1(channel)); | ||
4920 | int limit; | ||
4921 | |||
4922 | if (on) | ||
4923 | val |= RXDMA_CFIG1_EN; | ||
4924 | else | ||
4925 | val &= ~RXDMA_CFIG1_EN; | ||
4926 | nw64(RXDMA_CFIG1(channel), val); | ||
4927 | |||
4928 | limit = 1000; | ||
4929 | while (--limit > 0) { | ||
4930 | if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST) | ||
4931 | break; | ||
4932 | udelay(10); | ||
4933 | } | ||
4934 | if (limit <= 0) | ||
4935 | return -ENODEV; | ||
4936 | return 0; | ||
4937 | } | ||
4938 | |||
4939 | static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp) | ||
4940 | { | ||
4941 | int err, channel = rp->rx_channel; | ||
4942 | u64 val; | ||
4943 | |||
4944 | err = niu_rx_channel_reset(np, channel); | ||
4945 | if (err) | ||
4946 | return err; | ||
4947 | |||
4948 | err = niu_rx_channel_lpage_init(np, channel); | ||
4949 | if (err) | ||
4950 | return err; | ||
4951 | |||
4952 | niu_rx_channel_wred_init(np, rp); | ||
4953 | |||
4954 | nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY); | ||
4955 | nw64(RX_DMA_CTL_STAT(channel), | ||
4956 | (RX_DMA_CTL_STAT_MEX | | ||
4957 | RX_DMA_CTL_STAT_RCRTHRES | | ||
4958 | RX_DMA_CTL_STAT_RCRTO | | ||
4959 | RX_DMA_CTL_STAT_RBR_EMPTY)); | ||
4960 | nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); | ||
4961 | nw64(RXDMA_CFIG2(channel), | ||
4962 | ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) | | ||
4963 | RXDMA_CFIG2_FULL_HDR)); | ||
4964 | nw64(RBR_CFIG_A(channel), | ||
4965 | ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | | ||
4966 | (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); | ||
4967 | err = niu_compute_rbr_cfig_b(rp, &val); | ||
4968 | if (err) | ||
4969 | return err; | ||
4970 | nw64(RBR_CFIG_B(channel), val); | ||
4971 | nw64(RCRCFIG_A(channel), | ||
4972 | ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) | | ||
4973 | (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR))); | ||
4974 | nw64(RCRCFIG_B(channel), | ||
4975 | ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) | | ||
4976 | RCRCFIG_B_ENTOUT | | ||
4977 | ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT)); | ||
4978 | |||
4979 | err = niu_enable_rx_channel(np, channel, 1); | ||
4980 | if (err) | ||
4981 | return err; | ||
4982 | |||
4983 | nw64(RBR_KICK(channel), rp->rbr_index); | ||
4984 | |||
4985 | val = nr64(RX_DMA_CTL_STAT(channel)); | ||
4986 | val |= RX_DMA_CTL_STAT_RBR_EMPTY; | ||
4987 | nw64(RX_DMA_CTL_STAT(channel), val); | ||
4988 | |||
4989 | return 0; | ||
4990 | } | ||
4991 | |||
4992 | static int niu_init_rx_channels(struct niu *np) | ||
4993 | { | ||
4994 | unsigned long flags; | ||
4995 | u64 seed = jiffies_64; | ||
4996 | int err, i; | ||
4997 | |||
4998 | niu_lock_parent(np, flags); | ||
4999 | nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider); | ||
5000 | nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL)); | ||
5001 | niu_unlock_parent(np, flags); | ||
5002 | |||
5003 | /* XXX RXDMA 32bit mode? XXX */ | ||
5004 | |||
5005 | niu_init_rdc_groups(np); | ||
5006 | niu_init_drr_weight(np); | ||
5007 | |||
5008 | err = niu_init_hostinfo(np); | ||
5009 | if (err) | ||
5010 | return err; | ||
5011 | |||
5012 | for (i = 0; i < np->num_rx_rings; i++) { | ||
5013 | struct rx_ring_info *rp = &np->rx_rings[i]; | ||
5014 | |||
5015 | err = niu_init_one_rx_channel(np, rp); | ||
5016 | if (err) | ||
5017 | return err; | ||
5018 | } | ||
5019 | |||
5020 | return 0; | ||
5021 | } | ||
5022 | |||
5023 | static int niu_set_ip_frag_rule(struct niu *np) | ||
5024 | { | ||
5025 | struct niu_parent *parent = np->parent; | ||
5026 | struct niu_classifier *cp = &np->clas; | ||
5027 | struct niu_tcam_entry *tp; | ||
5028 | int index, err; | ||
5029 | |||
5030 | index = cp->tcam_top; | ||
5031 | tp = &parent->tcam[index]; | ||
5032 | |||
5033 | /* Note that the noport bit is the same in both ipv4 and | ||
5034 | * ipv6 format TCAM entries. | ||
5035 | */ | ||
5036 | memset(tp, 0, sizeof(*tp)); | ||
5037 | tp->key[1] = TCAM_V4KEY1_NOPORT; | ||
5038 | tp->key_mask[1] = TCAM_V4KEY1_NOPORT; | ||
5039 | tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | | ||
5040 | ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT)); | ||
5041 | err = tcam_write(np, index, tp->key, tp->key_mask); | ||
5042 | if (err) | ||
5043 | return err; | ||
5044 | err = tcam_assoc_write(np, index, tp->assoc_data); | ||
5045 | if (err) | ||
5046 | return err; | ||
5047 | tp->valid = 1; | ||
5048 | cp->tcam_valid_entries++; | ||
5049 | |||
5050 | return 0; | ||
5051 | } | ||
5052 | |||
5053 | static int niu_init_classifier_hw(struct niu *np) | ||
5054 | { | ||
5055 | struct niu_parent *parent = np->parent; | ||
5056 | struct niu_classifier *cp = &np->clas; | ||
5057 | int i, err; | ||
5058 | |||
5059 | nw64(H1POLY, cp->h1_init); | ||
5060 | nw64(H2POLY, cp->h2_init); | ||
5061 | |||
5062 | err = niu_init_hostinfo(np); | ||
5063 | if (err) | ||
5064 | return err; | ||
5065 | |||
5066 | for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) { | ||
5067 | struct niu_vlan_rdc *vp = &cp->vlan_mappings[i]; | ||
5068 | |||
5069 | vlan_tbl_write(np, i, np->port, | ||
5070 | vp->vlan_pref, vp->rdc_num); | ||
5071 | } | ||
5072 | |||
5073 | for (i = 0; i < cp->num_alt_mac_mappings; i++) { | ||
5074 | struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i]; | ||
5075 | |||
5076 | err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num, | ||
5077 | ap->rdc_num, ap->mac_pref); | ||
5078 | if (err) | ||
5079 | return err; | ||
5080 | } | ||
5081 | |||
5082 | for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { | ||
5083 | int index = i - CLASS_CODE_USER_PROG1; | ||
5084 | |||
5085 | err = niu_set_tcam_key(np, i, parent->tcam_key[index]); | ||
5086 | if (err) | ||
5087 | return err; | ||
5088 | err = niu_set_flow_key(np, i, parent->flow_key[index]); | ||
5089 | if (err) | ||
5090 | return err; | ||
5091 | } | ||
5092 | |||
5093 | err = niu_set_ip_frag_rule(np); | ||
5094 | if (err) | ||
5095 | return err; | ||
5096 | |||
5097 | tcam_enable(np, 1); | ||
5098 | |||
5099 | return 0; | ||
5100 | } | ||
5101 | |||
5102 | static int niu_zcp_write(struct niu *np, int index, u64 *data) | ||
5103 | { | ||
5104 | nw64(ZCP_RAM_DATA0, data[0]); | ||
5105 | nw64(ZCP_RAM_DATA1, data[1]); | ||
5106 | nw64(ZCP_RAM_DATA2, data[2]); | ||
5107 | nw64(ZCP_RAM_DATA3, data[3]); | ||
5108 | nw64(ZCP_RAM_DATA4, data[4]); | ||
5109 | nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL); | ||
5110 | nw64(ZCP_RAM_ACC, | ||
5111 | (ZCP_RAM_ACC_WRITE | | ||
5112 | (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | | ||
5113 | (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); | ||
5114 | |||
5115 | return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, | ||
5116 | 1000, 100); | ||
5117 | } | ||
5118 | |||
5119 | static int niu_zcp_read(struct niu *np, int index, u64 *data) | ||
5120 | { | ||
5121 | int err; | ||
5122 | |||
5123 | err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, | ||
5124 | 1000, 100); | ||
5125 | if (err) { | ||
5126 | netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n", | ||
5127 | (unsigned long long)nr64(ZCP_RAM_ACC)); | ||
5128 | return err; | ||
5129 | } | ||
5130 | |||
5131 | nw64(ZCP_RAM_ACC, | ||
5132 | (ZCP_RAM_ACC_READ | | ||
5133 | (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | | ||
5134 | (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); | ||
5135 | |||
5136 | err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, | ||
5137 | 1000, 100); | ||
5138 | if (err) { | ||
5139 | netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n", | ||
5140 | (unsigned long long)nr64(ZCP_RAM_ACC)); | ||
5141 | return err; | ||
5142 | } | ||
5143 | |||
5144 | data[0] = nr64(ZCP_RAM_DATA0); | ||
5145 | data[1] = nr64(ZCP_RAM_DATA1); | ||
5146 | data[2] = nr64(ZCP_RAM_DATA2); | ||
5147 | data[3] = nr64(ZCP_RAM_DATA3); | ||
5148 | data[4] = nr64(ZCP_RAM_DATA4); | ||
5149 | |||
5150 | return 0; | ||
5151 | } | ||
5152 | |||
5153 | static void niu_zcp_cfifo_reset(struct niu *np) | ||
5154 | { | ||
5155 | u64 val = nr64(RESET_CFIFO); | ||
5156 | |||
5157 | val |= RESET_CFIFO_RST(np->port); | ||
5158 | nw64(RESET_CFIFO, val); | ||
5159 | udelay(10); | ||
5160 | |||
5161 | val &= ~RESET_CFIFO_RST(np->port); | ||
5162 | nw64(RESET_CFIFO, val); | ||
5163 | } | ||
5164 | |||
5165 | static int niu_init_zcp(struct niu *np) | ||
5166 | { | ||
5167 | u64 data[5], rbuf[5]; | ||
5168 | int i, max, err; | ||
5169 | |||
5170 | if (np->parent->plat_type != PLAT_TYPE_NIU) { | ||
5171 | if (np->port == 0 || np->port == 1) | ||
5172 | max = ATLAS_P0_P1_CFIFO_ENTRIES; | ||
5173 | else | ||
5174 | max = ATLAS_P2_P3_CFIFO_ENTRIES; | ||
5175 | } else | ||
5176 | max = NIU_CFIFO_ENTRIES; | ||
5177 | |||
5178 | data[0] = 0; | ||
5179 | data[1] = 0; | ||
5180 | data[2] = 0; | ||
5181 | data[3] = 0; | ||
5182 | data[4] = 0; | ||
5183 | |||
5184 | for (i = 0; i < max; i++) { | ||
5185 | err = niu_zcp_write(np, i, data); | ||
5186 | if (err) | ||
5187 | return err; | ||
5188 | err = niu_zcp_read(np, i, rbuf); | ||
5189 | if (err) | ||
5190 | return err; | ||
5191 | } | ||
5192 | |||
5193 | niu_zcp_cfifo_reset(np); | ||
5194 | nw64(CFIFO_ECC(np->port), 0); | ||
5195 | nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL); | ||
5196 | (void) nr64(ZCP_INT_STAT); | ||
5197 | nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL); | ||
5198 | |||
5199 | return 0; | ||
5200 | } | ||
5201 | |||
5202 | static void niu_ipp_write(struct niu *np, int index, u64 *data) | ||
5203 | { | ||
5204 | u64 val = nr64_ipp(IPP_CFIG); | ||
5205 | |||
5206 | nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W); | ||
5207 | nw64_ipp(IPP_DFIFO_WR_PTR, index); | ||
5208 | nw64_ipp(IPP_DFIFO_WR0, data[0]); | ||
5209 | nw64_ipp(IPP_DFIFO_WR1, data[1]); | ||
5210 | nw64_ipp(IPP_DFIFO_WR2, data[2]); | ||
5211 | nw64_ipp(IPP_DFIFO_WR3, data[3]); | ||
5212 | nw64_ipp(IPP_DFIFO_WR4, data[4]); | ||
5213 | nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W); | ||
5214 | } | ||
5215 | |||
5216 | static void niu_ipp_read(struct niu *np, int index, u64 *data) | ||
5217 | { | ||
5218 | nw64_ipp(IPP_DFIFO_RD_PTR, index); | ||
5219 | data[0] = nr64_ipp(IPP_DFIFO_RD0); | ||
5220 | data[1] = nr64_ipp(IPP_DFIFO_RD1); | ||
5221 | data[2] = nr64_ipp(IPP_DFIFO_RD2); | ||
5222 | data[3] = nr64_ipp(IPP_DFIFO_RD3); | ||
5223 | data[4] = nr64_ipp(IPP_DFIFO_RD4); | ||
5224 | } | ||
5225 | |||
5226 | static int niu_ipp_reset(struct niu *np) | ||
5227 | { | ||
5228 | return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST, | ||
5229 | 1000, 100, "IPP_CFIG"); | ||
5230 | } | ||
5231 | |||
5232 | static int niu_init_ipp(struct niu *np) | ||
5233 | { | ||
5234 | u64 data[5], rbuf[5], val; | ||
5235 | int i, max, err; | ||
5236 | |||
5237 | if (np->parent->plat_type != PLAT_TYPE_NIU) { | ||
5238 | if (np->port == 0 || np->port == 1) | ||
5239 | max = ATLAS_P0_P1_DFIFO_ENTRIES; | ||
5240 | else | ||
5241 | max = ATLAS_P2_P3_DFIFO_ENTRIES; | ||
5242 | } else | ||
5243 | max = NIU_DFIFO_ENTRIES; | ||
5244 | |||
5245 | data[0] = 0; | ||
5246 | data[1] = 0; | ||
5247 | data[2] = 0; | ||
5248 | data[3] = 0; | ||
5249 | data[4] = 0; | ||
5250 | |||
5251 | for (i = 0; i < max; i++) { | ||
5252 | niu_ipp_write(np, i, data); | ||
5253 | niu_ipp_read(np, i, rbuf); | ||
5254 | } | ||
5255 | |||
5256 | (void) nr64_ipp(IPP_INT_STAT); | ||
5257 | (void) nr64_ipp(IPP_INT_STAT); | ||
5258 | |||
5259 | err = niu_ipp_reset(np); | ||
5260 | if (err) | ||
5261 | return err; | ||
5262 | |||
5263 | (void) nr64_ipp(IPP_PKT_DIS); | ||
5264 | (void) nr64_ipp(IPP_BAD_CS_CNT); | ||
5265 | (void) nr64_ipp(IPP_ECC); | ||
5266 | |||
5267 | (void) nr64_ipp(IPP_INT_STAT); | ||
5268 | |||
5269 | nw64_ipp(IPP_MSK, ~IPP_MSK_ALL); | ||
5270 | |||
5271 | val = nr64_ipp(IPP_CFIG); | ||
5272 | val &= ~IPP_CFIG_IP_MAX_PKT; | ||
5273 | val |= (IPP_CFIG_IPP_ENABLE | | ||
5274 | IPP_CFIG_DFIFO_ECC_EN | | ||
5275 | IPP_CFIG_DROP_BAD_CRC | | ||
5276 | IPP_CFIG_CKSUM_EN | | ||
5277 | (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT)); | ||
5278 | nw64_ipp(IPP_CFIG, val); | ||
5279 | |||
5280 | return 0; | ||
5281 | } | ||
5282 | |||
5283 | static void niu_handle_led(struct niu *np, int status) | ||
5284 | { | ||
5285 | u64 val; | ||
5286 | val = nr64_mac(XMAC_CONFIG); | ||
5287 | |||
5288 | if ((np->flags & NIU_FLAGS_10G) != 0 && | ||
5289 | (np->flags & NIU_FLAGS_FIBER) != 0) { | ||
5290 | if (status) { | ||
5291 | val |= XMAC_CONFIG_LED_POLARITY; | ||
5292 | val &= ~XMAC_CONFIG_FORCE_LED_ON; | ||
5293 | } else { | ||
5294 | val |= XMAC_CONFIG_FORCE_LED_ON; | ||
5295 | val &= ~XMAC_CONFIG_LED_POLARITY; | ||
5296 | } | ||
5297 | } | ||
5298 | |||
5299 | nw64_mac(XMAC_CONFIG, val); | ||
5300 | } | ||
5301 | |||
5302 | static void niu_init_xif_xmac(struct niu *np) | ||
5303 | { | ||
5304 | struct niu_link_config *lp = &np->link_config; | ||
5305 | u64 val; | ||
5306 | |||
5307 | if (np->flags & NIU_FLAGS_XCVR_SERDES) { | ||
5308 | val = nr64(MIF_CONFIG); | ||
5309 | val |= MIF_CONFIG_ATCA_GE; | ||
5310 | nw64(MIF_CONFIG, val); | ||
5311 | } | ||
5312 | |||
5313 | val = nr64_mac(XMAC_CONFIG); | ||
5314 | val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; | ||
5315 | |||
5316 | val |= XMAC_CONFIG_TX_OUTPUT_EN; | ||
5317 | |||
5318 | if (lp->loopback_mode == LOOPBACK_MAC) { | ||
5319 | val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; | ||
5320 | val |= XMAC_CONFIG_LOOPBACK; | ||
5321 | } else { | ||
5322 | val &= ~XMAC_CONFIG_LOOPBACK; | ||
5323 | } | ||
5324 | |||
5325 | if (np->flags & NIU_FLAGS_10G) { | ||
5326 | val &= ~XMAC_CONFIG_LFS_DISABLE; | ||
5327 | } else { | ||
5328 | val |= XMAC_CONFIG_LFS_DISABLE; | ||
5329 | if (!(np->flags & NIU_FLAGS_FIBER) && | ||
5330 | !(np->flags & NIU_FLAGS_XCVR_SERDES)) | ||
5331 | val |= XMAC_CONFIG_1G_PCS_BYPASS; | ||
5332 | else | ||
5333 | val &= ~XMAC_CONFIG_1G_PCS_BYPASS; | ||
5334 | } | ||
5335 | |||
5336 | val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; | ||
5337 | |||
5338 | if (lp->active_speed == SPEED_100) | ||
5339 | val |= XMAC_CONFIG_SEL_CLK_25MHZ; | ||
5340 | else | ||
5341 | val &= ~XMAC_CONFIG_SEL_CLK_25MHZ; | ||
5342 | |||
5343 | nw64_mac(XMAC_CONFIG, val); | ||
5344 | |||
5345 | val = nr64_mac(XMAC_CONFIG); | ||
5346 | val &= ~XMAC_CONFIG_MODE_MASK; | ||
5347 | if (np->flags & NIU_FLAGS_10G) { | ||
5348 | val |= XMAC_CONFIG_MODE_XGMII; | ||
5349 | } else { | ||
5350 | if (lp->active_speed == SPEED_1000) | ||
5351 | val |= XMAC_CONFIG_MODE_GMII; | ||
5352 | else | ||
5353 | val |= XMAC_CONFIG_MODE_MII; | ||
5354 | } | ||
5355 | |||
5356 | nw64_mac(XMAC_CONFIG, val); | ||
5357 | } | ||
5358 | |||
5359 | static void niu_init_xif_bmac(struct niu *np) | ||
5360 | { | ||
5361 | struct niu_link_config *lp = &np->link_config; | ||
5362 | u64 val; | ||
5363 | |||
5364 | val = BMAC_XIF_CONFIG_TX_OUTPUT_EN; | ||
5365 | |||
5366 | if (lp->loopback_mode == LOOPBACK_MAC) | ||
5367 | val |= BMAC_XIF_CONFIG_MII_LOOPBACK; | ||
5368 | else | ||
5369 | val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK; | ||
5370 | |||
5371 | if (lp->active_speed == SPEED_1000) | ||
5372 | val |= BMAC_XIF_CONFIG_GMII_MODE; | ||
5373 | else | ||
5374 | val &= ~BMAC_XIF_CONFIG_GMII_MODE; | ||
5375 | |||
5376 | val &= ~(BMAC_XIF_CONFIG_LINK_LED | | ||
5377 | BMAC_XIF_CONFIG_LED_POLARITY); | ||
5378 | |||
5379 | if (!(np->flags & NIU_FLAGS_10G) && | ||
5380 | !(np->flags & NIU_FLAGS_FIBER) && | ||
5381 | lp->active_speed == SPEED_100) | ||
5382 | val |= BMAC_XIF_CONFIG_25MHZ_CLOCK; | ||
5383 | else | ||
5384 | val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK; | ||
5385 | |||
5386 | nw64_mac(BMAC_XIF_CONFIG, val); | ||
5387 | } | ||
5388 | |||
5389 | static void niu_init_xif(struct niu *np) | ||
5390 | { | ||
5391 | if (np->flags & NIU_FLAGS_XMAC) | ||
5392 | niu_init_xif_xmac(np); | ||
5393 | else | ||
5394 | niu_init_xif_bmac(np); | ||
5395 | } | ||
5396 | |||
5397 | static void niu_pcs_mii_reset(struct niu *np) | ||
5398 | { | ||
5399 | int limit = 1000; | ||
5400 | u64 val = nr64_pcs(PCS_MII_CTL); | ||
5401 | val |= PCS_MII_CTL_RST; | ||
5402 | nw64_pcs(PCS_MII_CTL, val); | ||
5403 | while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) { | ||
5404 | udelay(100); | ||
5405 | val = nr64_pcs(PCS_MII_CTL); | ||
5406 | } | ||
5407 | } | ||
5408 | |||
5409 | static void niu_xpcs_reset(struct niu *np) | ||
5410 | { | ||
5411 | int limit = 1000; | ||
5412 | u64 val = nr64_xpcs(XPCS_CONTROL1); | ||
5413 | val |= XPCS_CONTROL1_RESET; | ||
5414 | nw64_xpcs(XPCS_CONTROL1, val); | ||
5415 | while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) { | ||
5416 | udelay(100); | ||
5417 | val = nr64_xpcs(XPCS_CONTROL1); | ||
5418 | } | ||
5419 | } | ||
5420 | |||
5421 | static int niu_init_pcs(struct niu *np) | ||
5422 | { | ||
5423 | struct niu_link_config *lp = &np->link_config; | ||
5424 | u64 val; | ||
5425 | |||
5426 | switch (np->flags & (NIU_FLAGS_10G | | ||
5427 | NIU_FLAGS_FIBER | | ||
5428 | NIU_FLAGS_XCVR_SERDES)) { | ||
5429 | case NIU_FLAGS_FIBER: | ||
5430 | /* 1G fiber */ | ||
5431 | nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); | ||
5432 | nw64_pcs(PCS_DPATH_MODE, 0); | ||
5433 | niu_pcs_mii_reset(np); | ||
5434 | break; | ||
5435 | |||
5436 | case NIU_FLAGS_10G: | ||
5437 | case NIU_FLAGS_10G | NIU_FLAGS_FIBER: | ||
5438 | case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: | ||
5439 | /* 10G SERDES */ | ||
5440 | if (!(np->flags & NIU_FLAGS_XMAC)) | ||
5441 | return -EINVAL; | ||
5442 | |||
5443 | /* 10G copper or fiber */ | ||
5444 | val = nr64_mac(XMAC_CONFIG); | ||
5445 | val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; | ||
5446 | nw64_mac(XMAC_CONFIG, val); | ||
5447 | |||
5448 | niu_xpcs_reset(np); | ||
5449 | |||
5450 | val = nr64_xpcs(XPCS_CONTROL1); | ||
5451 | if (lp->loopback_mode == LOOPBACK_PHY) | ||
5452 | val |= XPCS_CONTROL1_LOOPBACK; | ||
5453 | else | ||
5454 | val &= ~XPCS_CONTROL1_LOOPBACK; | ||
5455 | nw64_xpcs(XPCS_CONTROL1, val); | ||
5456 | |||
5457 | nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0); | ||
5458 | (void) nr64_xpcs(XPCS_SYMERR_CNT01); | ||
5459 | (void) nr64_xpcs(XPCS_SYMERR_CNT23); | ||
5460 | break; | ||
5461 | |||
5462 | |||
5463 | case NIU_FLAGS_XCVR_SERDES: | ||
5464 | /* 1G SERDES */ | ||
5465 | niu_pcs_mii_reset(np); | ||
5466 | nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); | ||
5467 | nw64_pcs(PCS_DPATH_MODE, 0); | ||
5468 | break; | ||
5469 | |||
5470 | case 0: | ||
5471 | /* 1G copper */ | ||
5472 | case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: | ||
5473 | /* 1G RGMII FIBER */ | ||
5474 | nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII); | ||
5475 | niu_pcs_mii_reset(np); | ||
5476 | break; | ||
5477 | |||
5478 | default: | ||
5479 | return -EINVAL; | ||
5480 | } | ||
5481 | |||
5482 | return 0; | ||
5483 | } | ||
5484 | |||
5485 | static int niu_reset_tx_xmac(struct niu *np) | ||
5486 | { | ||
5487 | return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST, | ||
5488 | (XTXMAC_SW_RST_REG_RS | | ||
5489 | XTXMAC_SW_RST_SOFT_RST), | ||
5490 | 1000, 100, "XTXMAC_SW_RST"); | ||
5491 | } | ||
5492 | |||
5493 | static int niu_reset_tx_bmac(struct niu *np) | ||
5494 | { | ||
5495 | int limit; | ||
5496 | |||
5497 | nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET); | ||
5498 | limit = 1000; | ||
5499 | while (--limit >= 0) { | ||
5500 | if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET)) | ||
5501 | break; | ||
5502 | udelay(100); | ||
5503 | } | ||
5504 | if (limit < 0) { | ||
5505 | dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n", | ||
5506 | np->port, | ||
5507 | (unsigned long long) nr64_mac(BTXMAC_SW_RST)); | ||
5508 | return -ENODEV; | ||
5509 | } | ||
5510 | |||
5511 | return 0; | ||
5512 | } | ||
5513 | |||
5514 | static int niu_reset_tx_mac(struct niu *np) | ||
5515 | { | ||
5516 | if (np->flags & NIU_FLAGS_XMAC) | ||
5517 | return niu_reset_tx_xmac(np); | ||
5518 | else | ||
5519 | return niu_reset_tx_bmac(np); | ||
5520 | } | ||
5521 | |||
5522 | static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max) | ||
5523 | { | ||
5524 | u64 val; | ||
5525 | |||
5526 | val = nr64_mac(XMAC_MIN); | ||
5527 | val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE | | ||
5528 | XMAC_MIN_RX_MIN_PKT_SIZE); | ||
5529 | val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT); | ||
5530 | val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT); | ||
5531 | nw64_mac(XMAC_MIN, val); | ||
5532 | |||
5533 | nw64_mac(XMAC_MAX, max); | ||
5534 | |||
5535 | nw64_mac(XTXMAC_STAT_MSK, ~(u64)0); | ||
5536 | |||
5537 | val = nr64_mac(XMAC_IPG); | ||
5538 | if (np->flags & NIU_FLAGS_10G) { | ||
5539 | val &= ~XMAC_IPG_IPG_XGMII; | ||
5540 | val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT); | ||
5541 | } else { | ||
5542 | val &= ~XMAC_IPG_IPG_MII_GMII; | ||
5543 | val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT); | ||
5544 | } | ||
5545 | nw64_mac(XMAC_IPG, val); | ||
5546 | |||
5547 | val = nr64_mac(XMAC_CONFIG); | ||
5548 | val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC | | ||
5549 | XMAC_CONFIG_STRETCH_MODE | | ||
5550 | XMAC_CONFIG_VAR_MIN_IPG_EN | | ||
5551 | XMAC_CONFIG_TX_ENABLE); | ||
5552 | nw64_mac(XMAC_CONFIG, val); | ||
5553 | |||
5554 | nw64_mac(TXMAC_FRM_CNT, 0); | ||
5555 | nw64_mac(TXMAC_BYTE_CNT, 0); | ||
5556 | } | ||
5557 | |||
5558 | static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max) | ||
5559 | { | ||
5560 | u64 val; | ||
5561 | |||
5562 | nw64_mac(BMAC_MIN_FRAME, min); | ||
5563 | nw64_mac(BMAC_MAX_FRAME, max); | ||
5564 | |||
5565 | nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0); | ||
5566 | nw64_mac(BMAC_CTRL_TYPE, 0x8808); | ||
5567 | nw64_mac(BMAC_PREAMBLE_SIZE, 7); | ||
5568 | |||
5569 | val = nr64_mac(BTXMAC_CONFIG); | ||
5570 | val &= ~(BTXMAC_CONFIG_FCS_DISABLE | | ||
5571 | BTXMAC_CONFIG_ENABLE); | ||
5572 | nw64_mac(BTXMAC_CONFIG, val); | ||
5573 | } | ||
5574 | |||
5575 | static void niu_init_tx_mac(struct niu *np) | ||
5576 | { | ||
5577 | u64 min, max; | ||
5578 | |||
5579 | min = 64; | ||
5580 | if (np->dev->mtu > ETH_DATA_LEN) | ||
5581 | max = 9216; | ||
5582 | else | ||
5583 | max = 1522; | ||
5584 | |||
5585 | /* The XMAC_MIN register only accepts values for TX min which | ||
5586 | * have the low 3 bits cleared. | ||
5587 | */ | ||
5588 | BUG_ON(min & 0x7); | ||
5589 | |||
5590 | if (np->flags & NIU_FLAGS_XMAC) | ||
5591 | niu_init_tx_xmac(np, min, max); | ||
5592 | else | ||
5593 | niu_init_tx_bmac(np, min, max); | ||
5594 | } | ||
5595 | |||
5596 | static int niu_reset_rx_xmac(struct niu *np) | ||
5597 | { | ||
5598 | int limit; | ||
5599 | |||
5600 | nw64_mac(XRXMAC_SW_RST, | ||
5601 | XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST); | ||
5602 | limit = 1000; | ||
5603 | while (--limit >= 0) { | ||
5604 | if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS | | ||
5605 | XRXMAC_SW_RST_SOFT_RST))) | ||
5606 | break; | ||
5607 | udelay(100); | ||
5608 | } | ||
5609 | if (limit < 0) { | ||
5610 | dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n", | ||
5611 | np->port, | ||
5612 | (unsigned long long) nr64_mac(XRXMAC_SW_RST)); | ||
5613 | return -ENODEV; | ||
5614 | } | ||
5615 | |||
5616 | return 0; | ||
5617 | } | ||
5618 | |||
5619 | static int niu_reset_rx_bmac(struct niu *np) | ||
5620 | { | ||
5621 | int limit; | ||
5622 | |||
5623 | nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET); | ||
5624 | limit = 1000; | ||
5625 | while (--limit >= 0) { | ||
5626 | if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET)) | ||
5627 | break; | ||
5628 | udelay(100); | ||
5629 | } | ||
5630 | if (limit < 0) { | ||
5631 | dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n", | ||
5632 | np->port, | ||
5633 | (unsigned long long) nr64_mac(BRXMAC_SW_RST)); | ||
5634 | return -ENODEV; | ||
5635 | } | ||
5636 | |||
5637 | return 0; | ||
5638 | } | ||
5639 | |||
5640 | static int niu_reset_rx_mac(struct niu *np) | ||
5641 | { | ||
5642 | if (np->flags & NIU_FLAGS_XMAC) | ||
5643 | return niu_reset_rx_xmac(np); | ||
5644 | else | ||
5645 | return niu_reset_rx_bmac(np); | ||
5646 | } | ||
5647 | |||
5648 | static void niu_init_rx_xmac(struct niu *np) | ||
5649 | { | ||
5650 | struct niu_parent *parent = np->parent; | ||
5651 | struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; | ||
5652 | int first_rdc_table = tp->first_table_num; | ||
5653 | unsigned long i; | ||
5654 | u64 val; | ||
5655 | |||
5656 | nw64_mac(XMAC_ADD_FILT0, 0); | ||
5657 | nw64_mac(XMAC_ADD_FILT1, 0); | ||
5658 | nw64_mac(XMAC_ADD_FILT2, 0); | ||
5659 | nw64_mac(XMAC_ADD_FILT12_MASK, 0); | ||
5660 | nw64_mac(XMAC_ADD_FILT00_MASK, 0); | ||
5661 | for (i = 0; i < MAC_NUM_HASH; i++) | ||
5662 | nw64_mac(XMAC_HASH_TBL(i), 0); | ||
5663 | nw64_mac(XRXMAC_STAT_MSK, ~(u64)0); | ||
5664 | niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); | ||
5665 | niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); | ||
5666 | |||
5667 | val = nr64_mac(XMAC_CONFIG); | ||
5668 | val &= ~(XMAC_CONFIG_RX_MAC_ENABLE | | ||
5669 | XMAC_CONFIG_PROMISCUOUS | | ||
5670 | XMAC_CONFIG_PROMISC_GROUP | | ||
5671 | XMAC_CONFIG_ERR_CHK_DIS | | ||
5672 | XMAC_CONFIG_RX_CRC_CHK_DIS | | ||
5673 | XMAC_CONFIG_RESERVED_MULTICAST | | ||
5674 | XMAC_CONFIG_RX_CODEV_CHK_DIS | | ||
5675 | XMAC_CONFIG_ADDR_FILTER_EN | | ||
5676 | XMAC_CONFIG_RCV_PAUSE_ENABLE | | ||
5677 | XMAC_CONFIG_STRIP_CRC | | ||
5678 | XMAC_CONFIG_PASS_FLOW_CTRL | | ||
5679 | XMAC_CONFIG_MAC2IPP_PKT_CNT_EN); | ||
5680 | val |= (XMAC_CONFIG_HASH_FILTER_EN); | ||
5681 | nw64_mac(XMAC_CONFIG, val); | ||
5682 | |||
5683 | nw64_mac(RXMAC_BT_CNT, 0); | ||
5684 | nw64_mac(RXMAC_BC_FRM_CNT, 0); | ||
5685 | nw64_mac(RXMAC_MC_FRM_CNT, 0); | ||
5686 | nw64_mac(RXMAC_FRAG_CNT, 0); | ||
5687 | nw64_mac(RXMAC_HIST_CNT1, 0); | ||
5688 | nw64_mac(RXMAC_HIST_CNT2, 0); | ||
5689 | nw64_mac(RXMAC_HIST_CNT3, 0); | ||
5690 | nw64_mac(RXMAC_HIST_CNT4, 0); | ||
5691 | nw64_mac(RXMAC_HIST_CNT5, 0); | ||
5692 | nw64_mac(RXMAC_HIST_CNT6, 0); | ||
5693 | nw64_mac(RXMAC_HIST_CNT7, 0); | ||
5694 | nw64_mac(RXMAC_MPSZER_CNT, 0); | ||
5695 | nw64_mac(RXMAC_CRC_ER_CNT, 0); | ||
5696 | nw64_mac(RXMAC_CD_VIO_CNT, 0); | ||
5697 | nw64_mac(LINK_FAULT_CNT, 0); | ||
5698 | } | ||
5699 | |||
5700 | static void niu_init_rx_bmac(struct niu *np) | ||
5701 | { | ||
5702 | struct niu_parent *parent = np->parent; | ||
5703 | struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; | ||
5704 | int first_rdc_table = tp->first_table_num; | ||
5705 | unsigned long i; | ||
5706 | u64 val; | ||
5707 | |||
5708 | nw64_mac(BMAC_ADD_FILT0, 0); | ||
5709 | nw64_mac(BMAC_ADD_FILT1, 0); | ||
5710 | nw64_mac(BMAC_ADD_FILT2, 0); | ||
5711 | nw64_mac(BMAC_ADD_FILT12_MASK, 0); | ||
5712 | nw64_mac(BMAC_ADD_FILT00_MASK, 0); | ||
5713 | for (i = 0; i < MAC_NUM_HASH; i++) | ||
5714 | nw64_mac(BMAC_HASH_TBL(i), 0); | ||
5715 | niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); | ||
5716 | niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); | ||
5717 | nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0); | ||
5718 | |||
5719 | val = nr64_mac(BRXMAC_CONFIG); | ||
5720 | val &= ~(BRXMAC_CONFIG_ENABLE | | ||
5721 | BRXMAC_CONFIG_STRIP_PAD | | ||
5722 | BRXMAC_CONFIG_STRIP_FCS | | ||
5723 | BRXMAC_CONFIG_PROMISC | | ||
5724 | BRXMAC_CONFIG_PROMISC_GRP | | ||
5725 | BRXMAC_CONFIG_ADDR_FILT_EN | | ||
5726 | BRXMAC_CONFIG_DISCARD_DIS); | ||
5727 | val |= (BRXMAC_CONFIG_HASH_FILT_EN); | ||
5728 | nw64_mac(BRXMAC_CONFIG, val); | ||
5729 | |||
5730 | val = nr64_mac(BMAC_ADDR_CMPEN); | ||
5731 | val |= BMAC_ADDR_CMPEN_EN0; | ||
5732 | nw64_mac(BMAC_ADDR_CMPEN, val); | ||
5733 | } | ||
5734 | |||
5735 | static void niu_init_rx_mac(struct niu *np) | ||
5736 | { | ||
5737 | niu_set_primary_mac(np, np->dev->dev_addr); | ||
5738 | |||
5739 | if (np->flags & NIU_FLAGS_XMAC) | ||
5740 | niu_init_rx_xmac(np); | ||
5741 | else | ||
5742 | niu_init_rx_bmac(np); | ||
5743 | } | ||
5744 | |||
5745 | static void niu_enable_tx_xmac(struct niu *np, int on) | ||
5746 | { | ||
5747 | u64 val = nr64_mac(XMAC_CONFIG); | ||
5748 | |||
5749 | if (on) | ||
5750 | val |= XMAC_CONFIG_TX_ENABLE; | ||
5751 | else | ||
5752 | val &= ~XMAC_CONFIG_TX_ENABLE; | ||
5753 | nw64_mac(XMAC_CONFIG, val); | ||
5754 | } | ||
5755 | |||
5756 | static void niu_enable_tx_bmac(struct niu *np, int on) | ||
5757 | { | ||
5758 | u64 val = nr64_mac(BTXMAC_CONFIG); | ||
5759 | |||
5760 | if (on) | ||
5761 | val |= BTXMAC_CONFIG_ENABLE; | ||
5762 | else | ||
5763 | val &= ~BTXMAC_CONFIG_ENABLE; | ||
5764 | nw64_mac(BTXMAC_CONFIG, val); | ||
5765 | } | ||
5766 | |||
5767 | static void niu_enable_tx_mac(struct niu *np, int on) | ||
5768 | { | ||
5769 | if (np->flags & NIU_FLAGS_XMAC) | ||
5770 | niu_enable_tx_xmac(np, on); | ||
5771 | else | ||
5772 | niu_enable_tx_bmac(np, on); | ||
5773 | } | ||
5774 | |||
5775 | static void niu_enable_rx_xmac(struct niu *np, int on) | ||
5776 | { | ||
5777 | u64 val = nr64_mac(XMAC_CONFIG); | ||
5778 | |||
5779 | val &= ~(XMAC_CONFIG_HASH_FILTER_EN | | ||
5780 | XMAC_CONFIG_PROMISCUOUS); | ||
5781 | |||
5782 | if (np->flags & NIU_FLAGS_MCAST) | ||
5783 | val |= XMAC_CONFIG_HASH_FILTER_EN; | ||
5784 | if (np->flags & NIU_FLAGS_PROMISC) | ||
5785 | val |= XMAC_CONFIG_PROMISCUOUS; | ||
5786 | |||
5787 | if (on) | ||
5788 | val |= XMAC_CONFIG_RX_MAC_ENABLE; | ||
5789 | else | ||
5790 | val &= ~XMAC_CONFIG_RX_MAC_ENABLE; | ||
5791 | nw64_mac(XMAC_CONFIG, val); | ||
5792 | } | ||
5793 | |||
5794 | static void niu_enable_rx_bmac(struct niu *np, int on) | ||
5795 | { | ||
5796 | u64 val = nr64_mac(BRXMAC_CONFIG); | ||
5797 | |||
5798 | val &= ~(BRXMAC_CONFIG_HASH_FILT_EN | | ||
5799 | BRXMAC_CONFIG_PROMISC); | ||
5800 | |||
5801 | if (np->flags & NIU_FLAGS_MCAST) | ||
5802 | val |= BRXMAC_CONFIG_HASH_FILT_EN; | ||
5803 | if (np->flags & NIU_FLAGS_PROMISC) | ||
5804 | val |= BRXMAC_CONFIG_PROMISC; | ||
5805 | |||
5806 | if (on) | ||
5807 | val |= BRXMAC_CONFIG_ENABLE; | ||
5808 | else | ||
5809 | val &= ~BRXMAC_CONFIG_ENABLE; | ||
5810 | nw64_mac(BRXMAC_CONFIG, val); | ||
5811 | } | ||
5812 | |||
5813 | static void niu_enable_rx_mac(struct niu *np, int on) | ||
5814 | { | ||
5815 | if (np->flags & NIU_FLAGS_XMAC) | ||
5816 | niu_enable_rx_xmac(np, on); | ||
5817 | else | ||
5818 | niu_enable_rx_bmac(np, on); | ||
5819 | } | ||
5820 | |||
5821 | static int niu_init_mac(struct niu *np) | ||
5822 | { | ||
5823 | int err; | ||
5824 | |||
5825 | niu_init_xif(np); | ||
5826 | err = niu_init_pcs(np); | ||
5827 | if (err) | ||
5828 | return err; | ||
5829 | |||
5830 | err = niu_reset_tx_mac(np); | ||
5831 | if (err) | ||
5832 | return err; | ||
5833 | niu_init_tx_mac(np); | ||
5834 | err = niu_reset_rx_mac(np); | ||
5835 | if (err) | ||
5836 | return err; | ||
5837 | niu_init_rx_mac(np); | ||
5838 | |||
5839 | /* This looks hookey but the RX MAC reset we just did will | ||
5840 | * undo some of the state we setup in niu_init_tx_mac() so we | ||
5841 | * have to call it again. In particular, the RX MAC reset will | ||
5842 | * set the XMAC_MAX register back to it's default value. | ||
5843 | */ | ||
5844 | niu_init_tx_mac(np); | ||
5845 | niu_enable_tx_mac(np, 1); | ||
5846 | |||
5847 | niu_enable_rx_mac(np, 1); | ||
5848 | |||
5849 | return 0; | ||
5850 | } | ||
5851 | |||
5852 | static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp) | ||
5853 | { | ||
5854 | (void) niu_tx_channel_stop(np, rp->tx_channel); | ||
5855 | } | ||
5856 | |||
5857 | static void niu_stop_tx_channels(struct niu *np) | ||
5858 | { | ||
5859 | int i; | ||
5860 | |||
5861 | for (i = 0; i < np->num_tx_rings; i++) { | ||
5862 | struct tx_ring_info *rp = &np->tx_rings[i]; | ||
5863 | |||
5864 | niu_stop_one_tx_channel(np, rp); | ||
5865 | } | ||
5866 | } | ||
5867 | |||
5868 | static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp) | ||
5869 | { | ||
5870 | (void) niu_tx_channel_reset(np, rp->tx_channel); | ||
5871 | } | ||
5872 | |||
5873 | static void niu_reset_tx_channels(struct niu *np) | ||
5874 | { | ||
5875 | int i; | ||
5876 | |||
5877 | for (i = 0; i < np->num_tx_rings; i++) { | ||
5878 | struct tx_ring_info *rp = &np->tx_rings[i]; | ||
5879 | |||
5880 | niu_reset_one_tx_channel(np, rp); | ||
5881 | } | ||
5882 | } | ||
5883 | |||
5884 | static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp) | ||
5885 | { | ||
5886 | (void) niu_enable_rx_channel(np, rp->rx_channel, 0); | ||
5887 | } | ||
5888 | |||
5889 | static void niu_stop_rx_channels(struct niu *np) | ||
5890 | { | ||
5891 | int i; | ||
5892 | |||
5893 | for (i = 0; i < np->num_rx_rings; i++) { | ||
5894 | struct rx_ring_info *rp = &np->rx_rings[i]; | ||
5895 | |||
5896 | niu_stop_one_rx_channel(np, rp); | ||
5897 | } | ||
5898 | } | ||
5899 | |||
5900 | static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp) | ||
5901 | { | ||
5902 | int channel = rp->rx_channel; | ||
5903 | |||
5904 | (void) niu_rx_channel_reset(np, channel); | ||
5905 | nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL); | ||
5906 | nw64(RX_DMA_CTL_STAT(channel), 0); | ||
5907 | (void) niu_enable_rx_channel(np, channel, 0); | ||
5908 | } | ||
5909 | |||
5910 | static void niu_reset_rx_channels(struct niu *np) | ||
5911 | { | ||
5912 | int i; | ||
5913 | |||
5914 | for (i = 0; i < np->num_rx_rings; i++) { | ||
5915 | struct rx_ring_info *rp = &np->rx_rings[i]; | ||
5916 | |||
5917 | niu_reset_one_rx_channel(np, rp); | ||
5918 | } | ||
5919 | } | ||
5920 | |||
5921 | static void niu_disable_ipp(struct niu *np) | ||
5922 | { | ||
5923 | u64 rd, wr, val; | ||
5924 | int limit; | ||
5925 | |||
5926 | rd = nr64_ipp(IPP_DFIFO_RD_PTR); | ||
5927 | wr = nr64_ipp(IPP_DFIFO_WR_PTR); | ||
5928 | limit = 100; | ||
5929 | while (--limit >= 0 && (rd != wr)) { | ||
5930 | rd = nr64_ipp(IPP_DFIFO_RD_PTR); | ||
5931 | wr = nr64_ipp(IPP_DFIFO_WR_PTR); | ||
5932 | } | ||
5933 | if (limit < 0 && | ||
5934 | (rd != 0 && wr != 1)) { | ||
5935 | netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n", | ||
5936 | (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR), | ||
5937 | (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR)); | ||
5938 | } | ||
5939 | |||
5940 | val = nr64_ipp(IPP_CFIG); | ||
5941 | val &= ~(IPP_CFIG_IPP_ENABLE | | ||
5942 | IPP_CFIG_DFIFO_ECC_EN | | ||
5943 | IPP_CFIG_DROP_BAD_CRC | | ||
5944 | IPP_CFIG_CKSUM_EN); | ||
5945 | nw64_ipp(IPP_CFIG, val); | ||
5946 | |||
5947 | (void) niu_ipp_reset(np); | ||
5948 | } | ||
5949 | |||
5950 | static int niu_init_hw(struct niu *np) | ||
5951 | { | ||
5952 | int i, err; | ||
5953 | |||
5954 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n"); | ||
5955 | niu_txc_enable_port(np, 1); | ||
5956 | niu_txc_port_dma_enable(np, 1); | ||
5957 | niu_txc_set_imask(np, 0); | ||
5958 | |||
5959 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n"); | ||
5960 | for (i = 0; i < np->num_tx_rings; i++) { | ||
5961 | struct tx_ring_info *rp = &np->tx_rings[i]; | ||
5962 | |||
5963 | err = niu_init_one_tx_channel(np, rp); | ||
5964 | if (err) | ||
5965 | return err; | ||
5966 | } | ||
5967 | |||
5968 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n"); | ||
5969 | err = niu_init_rx_channels(np); | ||
5970 | if (err) | ||
5971 | goto out_uninit_tx_channels; | ||
5972 | |||
5973 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n"); | ||
5974 | err = niu_init_classifier_hw(np); | ||
5975 | if (err) | ||
5976 | goto out_uninit_rx_channels; | ||
5977 | |||
5978 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n"); | ||
5979 | err = niu_init_zcp(np); | ||
5980 | if (err) | ||
5981 | goto out_uninit_rx_channels; | ||
5982 | |||
5983 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n"); | ||
5984 | err = niu_init_ipp(np); | ||
5985 | if (err) | ||
5986 | goto out_uninit_rx_channels; | ||
5987 | |||
5988 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n"); | ||
5989 | err = niu_init_mac(np); | ||
5990 | if (err) | ||
5991 | goto out_uninit_ipp; | ||
5992 | |||
5993 | return 0; | ||
5994 | |||
5995 | out_uninit_ipp: | ||
5996 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n"); | ||
5997 | niu_disable_ipp(np); | ||
5998 | |||
5999 | out_uninit_rx_channels: | ||
6000 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n"); | ||
6001 | niu_stop_rx_channels(np); | ||
6002 | niu_reset_rx_channels(np); | ||
6003 | |||
6004 | out_uninit_tx_channels: | ||
6005 | netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n"); | ||
6006 | niu_stop_tx_channels(np); | ||
6007 | niu_reset_tx_channels(np); | ||
6008 | |||
6009 | return err; | ||
6010 | } | ||
6011 | |||
6012 | static void niu_stop_hw(struct niu *np) | ||
6013 | { | ||
6014 | netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n"); | ||
6015 | niu_enable_interrupts(np, 0); | ||
6016 | |||
6017 | netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n"); | ||
6018 | niu_enable_rx_mac(np, 0); | ||
6019 | |||
6020 | netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n"); | ||
6021 | niu_disable_ipp(np); | ||
6022 | |||
6023 | netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n"); | ||
6024 | niu_stop_tx_channels(np); | ||
6025 | |||
6026 | netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n"); | ||
6027 | niu_stop_rx_channels(np); | ||
6028 | |||
6029 | netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n"); | ||
6030 | niu_reset_tx_channels(np); | ||
6031 | |||
6032 | netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n"); | ||
6033 | niu_reset_rx_channels(np); | ||
6034 | } | ||
6035 | |||
6036 | static void niu_set_irq_name(struct niu *np) | ||
6037 | { | ||
6038 | int port = np->port; | ||
6039 | int i, j = 1; | ||
6040 | |||
6041 | sprintf(np->irq_name[0], "%s:MAC", np->dev->name); | ||
6042 | |||
6043 | if (port == 0) { | ||
6044 | sprintf(np->irq_name[1], "%s:MIF", np->dev->name); | ||
6045 | sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name); | ||
6046 | j = 3; | ||
6047 | } | ||
6048 | |||
6049 | for (i = 0; i < np->num_ldg - j; i++) { | ||
6050 | if (i < np->num_rx_rings) | ||
6051 | sprintf(np->irq_name[i+j], "%s-rx-%d", | ||
6052 | np->dev->name, i); | ||
6053 | else if (i < np->num_tx_rings + np->num_rx_rings) | ||
6054 | sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name, | ||
6055 | i - np->num_rx_rings); | ||
6056 | } | ||
6057 | } | ||
6058 | |||
6059 | static int niu_request_irq(struct niu *np) | ||
6060 | { | ||
6061 | int i, j, err; | ||
6062 | |||
6063 | niu_set_irq_name(np); | ||
6064 | |||
6065 | err = 0; | ||
6066 | for (i = 0; i < np->num_ldg; i++) { | ||
6067 | struct niu_ldg *lp = &np->ldg[i]; | ||
6068 | |||
6069 | err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED, | ||
6070 | np->irq_name[i], lp); | ||
6071 | if (err) | ||
6072 | goto out_free_irqs; | ||
6073 | |||
6074 | } | ||
6075 | |||
6076 | return 0; | ||
6077 | |||
6078 | out_free_irqs: | ||
6079 | for (j = 0; j < i; j++) { | ||
6080 | struct niu_ldg *lp = &np->ldg[j]; | ||
6081 | |||
6082 | free_irq(lp->irq, lp); | ||
6083 | } | ||
6084 | return err; | ||
6085 | } | ||
6086 | |||
6087 | static void niu_free_irq(struct niu *np) | ||
6088 | { | ||
6089 | int i; | ||
6090 | |||
6091 | for (i = 0; i < np->num_ldg; i++) { | ||
6092 | struct niu_ldg *lp = &np->ldg[i]; | ||
6093 | |||
6094 | free_irq(lp->irq, lp); | ||
6095 | } | ||
6096 | } | ||
6097 | |||
6098 | static void niu_enable_napi(struct niu *np) | ||
6099 | { | ||
6100 | int i; | ||
6101 | |||
6102 | for (i = 0; i < np->num_ldg; i++) | ||
6103 | napi_enable(&np->ldg[i].napi); | ||
6104 | } | ||
6105 | |||
6106 | static void niu_disable_napi(struct niu *np) | ||
6107 | { | ||
6108 | int i; | ||
6109 | |||
6110 | for (i = 0; i < np->num_ldg; i++) | ||
6111 | napi_disable(&np->ldg[i].napi); | ||
6112 | } | ||
6113 | |||
6114 | static int niu_open(struct net_device *dev) | ||
6115 | { | ||
6116 | struct niu *np = netdev_priv(dev); | ||
6117 | int err; | ||
6118 | |||
6119 | netif_carrier_off(dev); | ||
6120 | |||
6121 | err = niu_alloc_channels(np); | ||
6122 | if (err) | ||
6123 | goto out_err; | ||
6124 | |||
6125 | err = niu_enable_interrupts(np, 0); | ||
6126 | if (err) | ||
6127 | goto out_free_channels; | ||
6128 | |||
6129 | err = niu_request_irq(np); | ||
6130 | if (err) | ||
6131 | goto out_free_channels; | ||
6132 | |||
6133 | niu_enable_napi(np); | ||
6134 | |||
6135 | spin_lock_irq(&np->lock); | ||
6136 | |||
6137 | err = niu_init_hw(np); | ||
6138 | if (!err) { | ||
6139 | init_timer(&np->timer); | ||
6140 | np->timer.expires = jiffies + HZ; | ||
6141 | np->timer.data = (unsigned long) np; | ||
6142 | np->timer.function = niu_timer; | ||
6143 | |||
6144 | err = niu_enable_interrupts(np, 1); | ||
6145 | if (err) | ||
6146 | niu_stop_hw(np); | ||
6147 | } | ||
6148 | |||
6149 | spin_unlock_irq(&np->lock); | ||
6150 | |||
6151 | if (err) { | ||
6152 | niu_disable_napi(np); | ||
6153 | goto out_free_irq; | ||
6154 | } | ||
6155 | |||
6156 | netif_tx_start_all_queues(dev); | ||
6157 | |||
6158 | if (np->link_config.loopback_mode != LOOPBACK_DISABLED) | ||
6159 | netif_carrier_on(dev); | ||
6160 | |||
6161 | add_timer(&np->timer); | ||
6162 | |||
6163 | return 0; | ||
6164 | |||
6165 | out_free_irq: | ||
6166 | niu_free_irq(np); | ||
6167 | |||
6168 | out_free_channels: | ||
6169 | niu_free_channels(np); | ||
6170 | |||
6171 | out_err: | ||
6172 | return err; | ||
6173 | } | ||
6174 | |||
6175 | static void niu_full_shutdown(struct niu *np, struct net_device *dev) | ||
6176 | { | ||
6177 | cancel_work_sync(&np->reset_task); | ||
6178 | |||
6179 | niu_disable_napi(np); | ||
6180 | netif_tx_stop_all_queues(dev); | ||
6181 | |||
6182 | del_timer_sync(&np->timer); | ||
6183 | |||
6184 | spin_lock_irq(&np->lock); | ||
6185 | |||
6186 | niu_stop_hw(np); | ||
6187 | |||
6188 | spin_unlock_irq(&np->lock); | ||
6189 | } | ||
6190 | |||
6191 | static int niu_close(struct net_device *dev) | ||
6192 | { | ||
6193 | struct niu *np = netdev_priv(dev); | ||
6194 | |||
6195 | niu_full_shutdown(np, dev); | ||
6196 | |||
6197 | niu_free_irq(np); | ||
6198 | |||
6199 | niu_free_channels(np); | ||
6200 | |||
6201 | niu_handle_led(np, 0); | ||
6202 | |||
6203 | return 0; | ||
6204 | } | ||
6205 | |||
6206 | static void niu_sync_xmac_stats(struct niu *np) | ||
6207 | { | ||
6208 | struct niu_xmac_stats *mp = &np->mac_stats.xmac; | ||
6209 | |||
6210 | mp->tx_frames += nr64_mac(TXMAC_FRM_CNT); | ||
6211 | mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT); | ||
6212 | |||
6213 | mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT); | ||
6214 | mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT); | ||
6215 | mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT); | ||
6216 | mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT); | ||
6217 | mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT); | ||
6218 | mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1); | ||
6219 | mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2); | ||
6220 | mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3); | ||
6221 | mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4); | ||
6222 | mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5); | ||
6223 | mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6); | ||
6224 | mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7); | ||
6225 | mp->rx_octets += nr64_mac(RXMAC_BT_CNT); | ||
6226 | mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT); | ||
6227 | mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT); | ||
6228 | mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT); | ||
6229 | } | ||
6230 | |||
6231 | static void niu_sync_bmac_stats(struct niu *np) | ||
6232 | { | ||
6233 | struct niu_bmac_stats *mp = &np->mac_stats.bmac; | ||
6234 | |||
6235 | mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT); | ||
6236 | mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT); | ||
6237 | |||
6238 | mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT); | ||
6239 | mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); | ||
6240 | mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); | ||
6241 | mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT); | ||
6242 | } | ||
6243 | |||
6244 | static void niu_sync_mac_stats(struct niu *np) | ||
6245 | { | ||
6246 | if (np->flags & NIU_FLAGS_XMAC) | ||
6247 | niu_sync_xmac_stats(np); | ||
6248 | else | ||
6249 | niu_sync_bmac_stats(np); | ||
6250 | } | ||
6251 | |||
6252 | static void niu_get_rx_stats(struct niu *np, | ||
6253 | struct rtnl_link_stats64 *stats) | ||
6254 | { | ||
6255 | u64 pkts, dropped, errors, bytes; | ||
6256 | struct rx_ring_info *rx_rings; | ||
6257 | int i; | ||
6258 | |||
6259 | pkts = dropped = errors = bytes = 0; | ||
6260 | |||
6261 | rx_rings = ACCESS_ONCE(np->rx_rings); | ||
6262 | if (!rx_rings) | ||
6263 | goto no_rings; | ||
6264 | |||
6265 | for (i = 0; i < np->num_rx_rings; i++) { | ||
6266 | struct rx_ring_info *rp = &rx_rings[i]; | ||
6267 | |||
6268 | niu_sync_rx_discard_stats(np, rp, 0); | ||
6269 | |||
6270 | pkts += rp->rx_packets; | ||
6271 | bytes += rp->rx_bytes; | ||
6272 | dropped += rp->rx_dropped; | ||
6273 | errors += rp->rx_errors; | ||
6274 | } | ||
6275 | |||
6276 | no_rings: | ||
6277 | stats->rx_packets = pkts; | ||
6278 | stats->rx_bytes = bytes; | ||
6279 | stats->rx_dropped = dropped; | ||
6280 | stats->rx_errors = errors; | ||
6281 | } | ||
6282 | |||
6283 | static void niu_get_tx_stats(struct niu *np, | ||
6284 | struct rtnl_link_stats64 *stats) | ||
6285 | { | ||
6286 | u64 pkts, errors, bytes; | ||
6287 | struct tx_ring_info *tx_rings; | ||
6288 | int i; | ||
6289 | |||
6290 | pkts = errors = bytes = 0; | ||
6291 | |||
6292 | tx_rings = ACCESS_ONCE(np->tx_rings); | ||
6293 | if (!tx_rings) | ||
6294 | goto no_rings; | ||
6295 | |||
6296 | for (i = 0; i < np->num_tx_rings; i++) { | ||
6297 | struct tx_ring_info *rp = &tx_rings[i]; | ||
6298 | |||
6299 | pkts += rp->tx_packets; | ||
6300 | bytes += rp->tx_bytes; | ||
6301 | errors += rp->tx_errors; | ||
6302 | } | ||
6303 | |||
6304 | no_rings: | ||
6305 | stats->tx_packets = pkts; | ||
6306 | stats->tx_bytes = bytes; | ||
6307 | stats->tx_errors = errors; | ||
6308 | } | ||
6309 | |||
6310 | static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev, | ||
6311 | struct rtnl_link_stats64 *stats) | ||
6312 | { | ||
6313 | struct niu *np = netdev_priv(dev); | ||
6314 | |||
6315 | if (netif_running(dev)) { | ||
6316 | niu_get_rx_stats(np, stats); | ||
6317 | niu_get_tx_stats(np, stats); | ||
6318 | } | ||
6319 | |||
6320 | return stats; | ||
6321 | } | ||
6322 | |||
6323 | static void niu_load_hash_xmac(struct niu *np, u16 *hash) | ||
6324 | { | ||
6325 | int i; | ||
6326 | |||
6327 | for (i = 0; i < 16; i++) | ||
6328 | nw64_mac(XMAC_HASH_TBL(i), hash[i]); | ||
6329 | } | ||
6330 | |||
6331 | static void niu_load_hash_bmac(struct niu *np, u16 *hash) | ||
6332 | { | ||
6333 | int i; | ||
6334 | |||
6335 | for (i = 0; i < 16; i++) | ||
6336 | nw64_mac(BMAC_HASH_TBL(i), hash[i]); | ||
6337 | } | ||
6338 | |||
6339 | static void niu_load_hash(struct niu *np, u16 *hash) | ||
6340 | { | ||
6341 | if (np->flags & NIU_FLAGS_XMAC) | ||
6342 | niu_load_hash_xmac(np, hash); | ||
6343 | else | ||
6344 | niu_load_hash_bmac(np, hash); | ||
6345 | } | ||
6346 | |||
6347 | static void niu_set_rx_mode(struct net_device *dev) | ||
6348 | { | ||
6349 | struct niu *np = netdev_priv(dev); | ||
6350 | int i, alt_cnt, err; | ||
6351 | struct netdev_hw_addr *ha; | ||
6352 | unsigned long flags; | ||
6353 | u16 hash[16] = { 0, }; | ||
6354 | |||
6355 | spin_lock_irqsave(&np->lock, flags); | ||
6356 | niu_enable_rx_mac(np, 0); | ||
6357 | |||
6358 | np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); | ||
6359 | if (dev->flags & IFF_PROMISC) | ||
6360 | np->flags |= NIU_FLAGS_PROMISC; | ||
6361 | if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev))) | ||
6362 | np->flags |= NIU_FLAGS_MCAST; | ||
6363 | |||
6364 | alt_cnt = netdev_uc_count(dev); | ||
6365 | if (alt_cnt > niu_num_alt_addr(np)) { | ||
6366 | alt_cnt = 0; | ||
6367 | np->flags |= NIU_FLAGS_PROMISC; | ||
6368 | } | ||
6369 | |||
6370 | if (alt_cnt) { | ||
6371 | int index = 0; | ||
6372 | |||
6373 | netdev_for_each_uc_addr(ha, dev) { | ||
6374 | err = niu_set_alt_mac(np, index, ha->addr); | ||
6375 | if (err) | ||
6376 | netdev_warn(dev, "Error %d adding alt mac %d\n", | ||
6377 | err, index); | ||
6378 | err = niu_enable_alt_mac(np, index, 1); | ||
6379 | if (err) | ||
6380 | netdev_warn(dev, "Error %d enabling alt mac %d\n", | ||
6381 | err, index); | ||
6382 | |||
6383 | index++; | ||
6384 | } | ||
6385 | } else { | ||
6386 | int alt_start; | ||
6387 | if (np->flags & NIU_FLAGS_XMAC) | ||
6388 | alt_start = 0; | ||
6389 | else | ||
6390 | alt_start = 1; | ||
6391 | for (i = alt_start; i < niu_num_alt_addr(np); i++) { | ||
6392 | err = niu_enable_alt_mac(np, i, 0); | ||
6393 | if (err) | ||
6394 | netdev_warn(dev, "Error %d disabling alt mac %d\n", | ||
6395 | err, i); | ||
6396 | } | ||
6397 | } | ||
6398 | if (dev->flags & IFF_ALLMULTI) { | ||
6399 | for (i = 0; i < 16; i++) | ||
6400 | hash[i] = 0xffff; | ||
6401 | } else if (!netdev_mc_empty(dev)) { | ||
6402 | netdev_for_each_mc_addr(ha, dev) { | ||
6403 | u32 crc = ether_crc_le(ETH_ALEN, ha->addr); | ||
6404 | |||
6405 | crc >>= 24; | ||
6406 | hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); | ||
6407 | } | ||
6408 | } | ||
6409 | |||
6410 | if (np->flags & NIU_FLAGS_MCAST) | ||
6411 | niu_load_hash(np, hash); | ||
6412 | |||
6413 | niu_enable_rx_mac(np, 1); | ||
6414 | spin_unlock_irqrestore(&np->lock, flags); | ||
6415 | } | ||
6416 | |||
6417 | static int niu_set_mac_addr(struct net_device *dev, void *p) | ||
6418 | { | ||
6419 | struct niu *np = netdev_priv(dev); | ||
6420 | struct sockaddr *addr = p; | ||
6421 | unsigned long flags; | ||
6422 | |||
6423 | if (!is_valid_ether_addr(addr->sa_data)) | ||
6424 | return -EINVAL; | ||
6425 | |||
6426 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | ||
6427 | |||
6428 | if (!netif_running(dev)) | ||
6429 | return 0; | ||
6430 | |||
6431 | spin_lock_irqsave(&np->lock, flags); | ||
6432 | niu_enable_rx_mac(np, 0); | ||
6433 | niu_set_primary_mac(np, dev->dev_addr); | ||
6434 | niu_enable_rx_mac(np, 1); | ||
6435 | spin_unlock_irqrestore(&np->lock, flags); | ||
6436 | |||
6437 | return 0; | ||
6438 | } | ||
6439 | |||
6440 | static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
6441 | { | ||
6442 | return -EOPNOTSUPP; | ||
6443 | } | ||
6444 | |||
6445 | static void niu_netif_stop(struct niu *np) | ||
6446 | { | ||
6447 | np->dev->trans_start = jiffies; /* prevent tx timeout */ | ||
6448 | |||
6449 | niu_disable_napi(np); | ||
6450 | |||
6451 | netif_tx_disable(np->dev); | ||
6452 | } | ||
6453 | |||
6454 | static void niu_netif_start(struct niu *np) | ||
6455 | { | ||
6456 | /* NOTE: unconditional netif_wake_queue is only appropriate | ||
6457 | * so long as all callers are assured to have free tx slots | ||
6458 | * (such as after niu_init_hw). | ||
6459 | */ | ||
6460 | netif_tx_wake_all_queues(np->dev); | ||
6461 | |||
6462 | niu_enable_napi(np); | ||
6463 | |||
6464 | niu_enable_interrupts(np, 1); | ||
6465 | } | ||
6466 | |||
6467 | static void niu_reset_buffers(struct niu *np) | ||
6468 | { | ||
6469 | int i, j, k, err; | ||
6470 | |||
6471 | if (np->rx_rings) { | ||
6472 | for (i = 0; i < np->num_rx_rings; i++) { | ||
6473 | struct rx_ring_info *rp = &np->rx_rings[i]; | ||
6474 | |||
6475 | for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) { | ||
6476 | struct page *page; | ||
6477 | |||
6478 | page = rp->rxhash[j]; | ||
6479 | while (page) { | ||
6480 | struct page *next = | ||
6481 | (struct page *) page->mapping; | ||
6482 | u64 base = page->index; | ||
6483 | base = base >> RBR_DESCR_ADDR_SHIFT; | ||
6484 | rp->rbr[k++] = cpu_to_le32(base); | ||
6485 | page = next; | ||
6486 | } | ||
6487 | } | ||
6488 | for (; k < MAX_RBR_RING_SIZE; k++) { | ||
6489 | err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k); | ||
6490 | if (unlikely(err)) | ||
6491 | break; | ||
6492 | } | ||
6493 | |||
6494 | rp->rbr_index = rp->rbr_table_size - 1; | ||
6495 | rp->rcr_index = 0; | ||
6496 | rp->rbr_pending = 0; | ||
6497 | rp->rbr_refill_pending = 0; | ||
6498 | } | ||
6499 | } | ||
6500 | if (np->tx_rings) { | ||
6501 | for (i = 0; i < np->num_tx_rings; i++) { | ||
6502 | struct tx_ring_info *rp = &np->tx_rings[i]; | ||
6503 | |||
6504 | for (j = 0; j < MAX_TX_RING_SIZE; j++) { | ||
6505 | if (rp->tx_buffs[j].skb) | ||
6506 | (void) release_tx_packet(np, rp, j); | ||
6507 | } | ||
6508 | |||
6509 | rp->pending = MAX_TX_RING_SIZE; | ||
6510 | rp->prod = 0; | ||
6511 | rp->cons = 0; | ||
6512 | rp->wrap_bit = 0; | ||
6513 | } | ||
6514 | } | ||
6515 | } | ||
6516 | |||
6517 | static void niu_reset_task(struct work_struct *work) | ||
6518 | { | ||
6519 | struct niu *np = container_of(work, struct niu, reset_task); | ||
6520 | unsigned long flags; | ||
6521 | int err; | ||
6522 | |||
6523 | spin_lock_irqsave(&np->lock, flags); | ||
6524 | if (!netif_running(np->dev)) { | ||
6525 | spin_unlock_irqrestore(&np->lock, flags); | ||
6526 | return; | ||
6527 | } | ||
6528 | |||
6529 | spin_unlock_irqrestore(&np->lock, flags); | ||
6530 | |||
6531 | del_timer_sync(&np->timer); | ||
6532 | |||
6533 | niu_netif_stop(np); | ||
6534 | |||
6535 | spin_lock_irqsave(&np->lock, flags); | ||
6536 | |||
6537 | niu_stop_hw(np); | ||
6538 | |||
6539 | spin_unlock_irqrestore(&np->lock, flags); | ||
6540 | |||
6541 | niu_reset_buffers(np); | ||
6542 | |||
6543 | spin_lock_irqsave(&np->lock, flags); | ||
6544 | |||
6545 | err = niu_init_hw(np); | ||
6546 | if (!err) { | ||
6547 | np->timer.expires = jiffies + HZ; | ||
6548 | add_timer(&np->timer); | ||
6549 | niu_netif_start(np); | ||
6550 | } | ||
6551 | |||
6552 | spin_unlock_irqrestore(&np->lock, flags); | ||
6553 | } | ||
6554 | |||
6555 | static void niu_tx_timeout(struct net_device *dev) | ||
6556 | { | ||
6557 | struct niu *np = netdev_priv(dev); | ||
6558 | |||
6559 | dev_err(np->device, "%s: Transmit timed out, resetting\n", | ||
6560 | dev->name); | ||
6561 | |||
6562 | schedule_work(&np->reset_task); | ||
6563 | } | ||
6564 | |||
6565 | static void niu_set_txd(struct tx_ring_info *rp, int index, | ||
6566 | u64 mapping, u64 len, u64 mark, | ||
6567 | u64 n_frags) | ||
6568 | { | ||
6569 | __le64 *desc = &rp->descr[index]; | ||
6570 | |||
6571 | *desc = cpu_to_le64(mark | | ||
6572 | (n_frags << TX_DESC_NUM_PTR_SHIFT) | | ||
6573 | (len << TX_DESC_TR_LEN_SHIFT) | | ||
6574 | (mapping & TX_DESC_SAD)); | ||
6575 | } | ||
6576 | |||
6577 | static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, | ||
6578 | u64 pad_bytes, u64 len) | ||
6579 | { | ||
6580 | u16 eth_proto, eth_proto_inner; | ||
6581 | u64 csum_bits, l3off, ihl, ret; | ||
6582 | u8 ip_proto; | ||
6583 | int ipv6; | ||
6584 | |||
6585 | eth_proto = be16_to_cpu(ehdr->h_proto); | ||
6586 | eth_proto_inner = eth_proto; | ||
6587 | if (eth_proto == ETH_P_8021Q) { | ||
6588 | struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr; | ||
6589 | __be16 val = vp->h_vlan_encapsulated_proto; | ||
6590 | |||
6591 | eth_proto_inner = be16_to_cpu(val); | ||
6592 | } | ||
6593 | |||
6594 | ipv6 = ihl = 0; | ||
6595 | switch (skb->protocol) { | ||
6596 | case cpu_to_be16(ETH_P_IP): | ||
6597 | ip_proto = ip_hdr(skb)->protocol; | ||
6598 | ihl = ip_hdr(skb)->ihl; | ||
6599 | break; | ||
6600 | case cpu_to_be16(ETH_P_IPV6): | ||
6601 | ip_proto = ipv6_hdr(skb)->nexthdr; | ||
6602 | ihl = (40 >> 2); | ||
6603 | ipv6 = 1; | ||
6604 | break; | ||
6605 | default: | ||
6606 | ip_proto = ihl = 0; | ||
6607 | break; | ||
6608 | } | ||
6609 | |||
6610 | csum_bits = TXHDR_CSUM_NONE; | ||
6611 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
6612 | u64 start, stuff; | ||
6613 | |||
6614 | csum_bits = (ip_proto == IPPROTO_TCP ? | ||
6615 | TXHDR_CSUM_TCP : | ||
6616 | (ip_proto == IPPROTO_UDP ? | ||
6617 | TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP)); | ||
6618 | |||
6619 | start = skb_checksum_start_offset(skb) - | ||
6620 | (pad_bytes + sizeof(struct tx_pkt_hdr)); | ||
6621 | stuff = start + skb->csum_offset; | ||
6622 | |||
6623 | csum_bits |= (start / 2) << TXHDR_L4START_SHIFT; | ||
6624 | csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT; | ||
6625 | } | ||
6626 | |||
6627 | l3off = skb_network_offset(skb) - | ||
6628 | (pad_bytes + sizeof(struct tx_pkt_hdr)); | ||
6629 | |||
6630 | ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) | | ||
6631 | (len << TXHDR_LEN_SHIFT) | | ||
6632 | ((l3off / 2) << TXHDR_L3START_SHIFT) | | ||
6633 | (ihl << TXHDR_IHL_SHIFT) | | ||
6634 | ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) | | ||
6635 | ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) | | ||
6636 | (ipv6 ? TXHDR_IP_VER : 0) | | ||
6637 | csum_bits); | ||
6638 | |||
6639 | return ret; | ||
6640 | } | ||
6641 | |||
6642 | static netdev_tx_t niu_start_xmit(struct sk_buff *skb, | ||
6643 | struct net_device *dev) | ||
6644 | { | ||
6645 | struct niu *np = netdev_priv(dev); | ||
6646 | unsigned long align, headroom; | ||
6647 | struct netdev_queue *txq; | ||
6648 | struct tx_ring_info *rp; | ||
6649 | struct tx_pkt_hdr *tp; | ||
6650 | unsigned int len, nfg; | ||
6651 | struct ethhdr *ehdr; | ||
6652 | int prod, i, tlen; | ||
6653 | u64 mapping, mrk; | ||
6654 | |||
6655 | i = skb_get_queue_mapping(skb); | ||
6656 | rp = &np->tx_rings[i]; | ||
6657 | txq = netdev_get_tx_queue(dev, i); | ||
6658 | |||
6659 | if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { | ||
6660 | netif_tx_stop_queue(txq); | ||
6661 | dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name); | ||
6662 | rp->tx_errors++; | ||
6663 | return NETDEV_TX_BUSY; | ||
6664 | } | ||
6665 | |||
6666 | if (skb->len < ETH_ZLEN) { | ||
6667 | unsigned int pad_bytes = ETH_ZLEN - skb->len; | ||
6668 | |||
6669 | if (skb_pad(skb, pad_bytes)) | ||
6670 | goto out; | ||
6671 | skb_put(skb, pad_bytes); | ||
6672 | } | ||
6673 | |||
6674 | len = sizeof(struct tx_pkt_hdr) + 15; | ||
6675 | if (skb_headroom(skb) < len) { | ||
6676 | struct sk_buff *skb_new; | ||
6677 | |||
6678 | skb_new = skb_realloc_headroom(skb, len); | ||
6679 | if (!skb_new) { | ||
6680 | rp->tx_errors++; | ||
6681 | goto out_drop; | ||
6682 | } | ||
6683 | kfree_skb(skb); | ||
6684 | skb = skb_new; | ||
6685 | } else | ||
6686 | skb_orphan(skb); | ||
6687 | |||
6688 | align = ((unsigned long) skb->data & (16 - 1)); | ||
6689 | headroom = align + sizeof(struct tx_pkt_hdr); | ||
6690 | |||
6691 | ehdr = (struct ethhdr *) skb->data; | ||
6692 | tp = (struct tx_pkt_hdr *) skb_push(skb, headroom); | ||
6693 | |||
6694 | len = skb->len - sizeof(struct tx_pkt_hdr); | ||
6695 | tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len)); | ||
6696 | tp->resv = 0; | ||
6697 | |||
6698 | len = skb_headlen(skb); | ||
6699 | mapping = np->ops->map_single(np->device, skb->data, | ||
6700 | len, DMA_TO_DEVICE); | ||
6701 | |||
6702 | prod = rp->prod; | ||
6703 | |||
6704 | rp->tx_buffs[prod].skb = skb; | ||
6705 | rp->tx_buffs[prod].mapping = mapping; | ||
6706 | |||
6707 | mrk = TX_DESC_SOP; | ||
6708 | if (++rp->mark_counter == rp->mark_freq) { | ||
6709 | rp->mark_counter = 0; | ||
6710 | mrk |= TX_DESC_MARK; | ||
6711 | rp->mark_pending++; | ||
6712 | } | ||
6713 | |||
6714 | tlen = len; | ||
6715 | nfg = skb_shinfo(skb)->nr_frags; | ||
6716 | while (tlen > 0) { | ||
6717 | tlen -= MAX_TX_DESC_LEN; | ||
6718 | nfg++; | ||
6719 | } | ||
6720 | |||
6721 | while (len > 0) { | ||
6722 | unsigned int this_len = len; | ||
6723 | |||
6724 | if (this_len > MAX_TX_DESC_LEN) | ||
6725 | this_len = MAX_TX_DESC_LEN; | ||
6726 | |||
6727 | niu_set_txd(rp, prod, mapping, this_len, mrk, nfg); | ||
6728 | mrk = nfg = 0; | ||
6729 | |||
6730 | prod = NEXT_TX(rp, prod); | ||
6731 | mapping += this_len; | ||
6732 | len -= this_len; | ||
6733 | } | ||
6734 | |||
6735 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
6736 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
6737 | |||
6738 | len = frag->size; | ||
6739 | mapping = np->ops->map_page(np->device, frag->page, | ||
6740 | frag->page_offset, len, | ||
6741 | DMA_TO_DEVICE); | ||
6742 | |||
6743 | rp->tx_buffs[prod].skb = NULL; | ||
6744 | rp->tx_buffs[prod].mapping = mapping; | ||
6745 | |||
6746 | niu_set_txd(rp, prod, mapping, len, 0, 0); | ||
6747 | |||
6748 | prod = NEXT_TX(rp, prod); | ||
6749 | } | ||
6750 | |||
6751 | if (prod < rp->prod) | ||
6752 | rp->wrap_bit ^= TX_RING_KICK_WRAP; | ||
6753 | rp->prod = prod; | ||
6754 | |||
6755 | nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); | ||
6756 | |||
6757 | if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { | ||
6758 | netif_tx_stop_queue(txq); | ||
6759 | if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) | ||
6760 | netif_tx_wake_queue(txq); | ||
6761 | } | ||
6762 | |||
6763 | out: | ||
6764 | return NETDEV_TX_OK; | ||
6765 | |||
6766 | out_drop: | ||
6767 | rp->tx_errors++; | ||
6768 | kfree_skb(skb); | ||
6769 | goto out; | ||
6770 | } | ||
6771 | |||
6772 | static int niu_change_mtu(struct net_device *dev, int new_mtu) | ||
6773 | { | ||
6774 | struct niu *np = netdev_priv(dev); | ||
6775 | int err, orig_jumbo, new_jumbo; | ||
6776 | |||
6777 | if (new_mtu < 68 || new_mtu > NIU_MAX_MTU) | ||
6778 | return -EINVAL; | ||
6779 | |||
6780 | orig_jumbo = (dev->mtu > ETH_DATA_LEN); | ||
6781 | new_jumbo = (new_mtu > ETH_DATA_LEN); | ||
6782 | |||
6783 | dev->mtu = new_mtu; | ||
6784 | |||
6785 | if (!netif_running(dev) || | ||
6786 | (orig_jumbo == new_jumbo)) | ||
6787 | return 0; | ||
6788 | |||
6789 | niu_full_shutdown(np, dev); | ||
6790 | |||
6791 | niu_free_channels(np); | ||
6792 | |||
6793 | niu_enable_napi(np); | ||
6794 | |||
6795 | err = niu_alloc_channels(np); | ||
6796 | if (err) | ||
6797 | return err; | ||
6798 | |||
6799 | spin_lock_irq(&np->lock); | ||
6800 | |||
6801 | err = niu_init_hw(np); | ||
6802 | if (!err) { | ||
6803 | init_timer(&np->timer); | ||
6804 | np->timer.expires = jiffies + HZ; | ||
6805 | np->timer.data = (unsigned long) np; | ||
6806 | np->timer.function = niu_timer; | ||
6807 | |||
6808 | err = niu_enable_interrupts(np, 1); | ||
6809 | if (err) | ||
6810 | niu_stop_hw(np); | ||
6811 | } | ||
6812 | |||
6813 | spin_unlock_irq(&np->lock); | ||
6814 | |||
6815 | if (!err) { | ||
6816 | netif_tx_start_all_queues(dev); | ||
6817 | if (np->link_config.loopback_mode != LOOPBACK_DISABLED) | ||
6818 | netif_carrier_on(dev); | ||
6819 | |||
6820 | add_timer(&np->timer); | ||
6821 | } | ||
6822 | |||
6823 | return err; | ||
6824 | } | ||
6825 | |||
6826 | static void niu_get_drvinfo(struct net_device *dev, | ||
6827 | struct ethtool_drvinfo *info) | ||
6828 | { | ||
6829 | struct niu *np = netdev_priv(dev); | ||
6830 | struct niu_vpd *vpd = &np->vpd; | ||
6831 | |||
6832 | strcpy(info->driver, DRV_MODULE_NAME); | ||
6833 | strcpy(info->version, DRV_MODULE_VERSION); | ||
6834 | sprintf(info->fw_version, "%d.%d", | ||
6835 | vpd->fcode_major, vpd->fcode_minor); | ||
6836 | if (np->parent->plat_type != PLAT_TYPE_NIU) | ||
6837 | strcpy(info->bus_info, pci_name(np->pdev)); | ||
6838 | } | ||
6839 | |||
6840 | static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
6841 | { | ||
6842 | struct niu *np = netdev_priv(dev); | ||
6843 | struct niu_link_config *lp; | ||
6844 | |||
6845 | lp = &np->link_config; | ||
6846 | |||
6847 | memset(cmd, 0, sizeof(*cmd)); | ||
6848 | cmd->phy_address = np->phy_addr; | ||
6849 | cmd->supported = lp->supported; | ||
6850 | cmd->advertising = lp->active_advertising; | ||
6851 | cmd->autoneg = lp->active_autoneg; | ||
6852 | ethtool_cmd_speed_set(cmd, lp->active_speed); | ||
6853 | cmd->duplex = lp->active_duplex; | ||
6854 | cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP; | ||
6855 | cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ? | ||
6856 | XCVR_EXTERNAL : XCVR_INTERNAL; | ||
6857 | |||
6858 | return 0; | ||
6859 | } | ||
6860 | |||
6861 | static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
6862 | { | ||
6863 | struct niu *np = netdev_priv(dev); | ||
6864 | struct niu_link_config *lp = &np->link_config; | ||
6865 | |||
6866 | lp->advertising = cmd->advertising; | ||
6867 | lp->speed = ethtool_cmd_speed(cmd); | ||
6868 | lp->duplex = cmd->duplex; | ||
6869 | lp->autoneg = cmd->autoneg; | ||
6870 | return niu_init_link(np); | ||
6871 | } | ||
6872 | |||
6873 | static u32 niu_get_msglevel(struct net_device *dev) | ||
6874 | { | ||
6875 | struct niu *np = netdev_priv(dev); | ||
6876 | return np->msg_enable; | ||
6877 | } | ||
6878 | |||
6879 | static void niu_set_msglevel(struct net_device *dev, u32 value) | ||
6880 | { | ||
6881 | struct niu *np = netdev_priv(dev); | ||
6882 | np->msg_enable = value; | ||
6883 | } | ||
6884 | |||
6885 | static int niu_nway_reset(struct net_device *dev) | ||
6886 | { | ||
6887 | struct niu *np = netdev_priv(dev); | ||
6888 | |||
6889 | if (np->link_config.autoneg) | ||
6890 | return niu_init_link(np); | ||
6891 | |||
6892 | return 0; | ||
6893 | } | ||
6894 | |||
6895 | static int niu_get_eeprom_len(struct net_device *dev) | ||
6896 | { | ||
6897 | struct niu *np = netdev_priv(dev); | ||
6898 | |||
6899 | return np->eeprom_len; | ||
6900 | } | ||
6901 | |||
6902 | static int niu_get_eeprom(struct net_device *dev, | ||
6903 | struct ethtool_eeprom *eeprom, u8 *data) | ||
6904 | { | ||
6905 | struct niu *np = netdev_priv(dev); | ||
6906 | u32 offset, len, val; | ||
6907 | |||
6908 | offset = eeprom->offset; | ||
6909 | len = eeprom->len; | ||
6910 | |||
6911 | if (offset + len < offset) | ||
6912 | return -EINVAL; | ||
6913 | if (offset >= np->eeprom_len) | ||
6914 | return -EINVAL; | ||
6915 | if (offset + len > np->eeprom_len) | ||
6916 | len = eeprom->len = np->eeprom_len - offset; | ||
6917 | |||
6918 | if (offset & 3) { | ||
6919 | u32 b_offset, b_count; | ||
6920 | |||
6921 | b_offset = offset & 3; | ||
6922 | b_count = 4 - b_offset; | ||
6923 | if (b_count > len) | ||
6924 | b_count = len; | ||
6925 | |||
6926 | val = nr64(ESPC_NCR((offset - b_offset) / 4)); | ||
6927 | memcpy(data, ((char *)&val) + b_offset, b_count); | ||
6928 | data += b_count; | ||
6929 | len -= b_count; | ||
6930 | offset += b_count; | ||
6931 | } | ||
6932 | while (len >= 4) { | ||
6933 | val = nr64(ESPC_NCR(offset / 4)); | ||
6934 | memcpy(data, &val, 4); | ||
6935 | data += 4; | ||
6936 | len -= 4; | ||
6937 | offset += 4; | ||
6938 | } | ||
6939 | if (len) { | ||
6940 | val = nr64(ESPC_NCR(offset / 4)); | ||
6941 | memcpy(data, &val, len); | ||
6942 | } | ||
6943 | return 0; | ||
6944 | } | ||
6945 | |||
6946 | static void niu_ethflow_to_l3proto(int flow_type, u8 *pid) | ||
6947 | { | ||
6948 | switch (flow_type) { | ||
6949 | case TCP_V4_FLOW: | ||
6950 | case TCP_V6_FLOW: | ||
6951 | *pid = IPPROTO_TCP; | ||
6952 | break; | ||
6953 | case UDP_V4_FLOW: | ||
6954 | case UDP_V6_FLOW: | ||
6955 | *pid = IPPROTO_UDP; | ||
6956 | break; | ||
6957 | case SCTP_V4_FLOW: | ||
6958 | case SCTP_V6_FLOW: | ||
6959 | *pid = IPPROTO_SCTP; | ||
6960 | break; | ||
6961 | case AH_V4_FLOW: | ||
6962 | case AH_V6_FLOW: | ||
6963 | *pid = IPPROTO_AH; | ||
6964 | break; | ||
6965 | case ESP_V4_FLOW: | ||
6966 | case ESP_V6_FLOW: | ||
6967 | *pid = IPPROTO_ESP; | ||
6968 | break; | ||
6969 | default: | ||
6970 | *pid = 0; | ||
6971 | break; | ||
6972 | } | ||
6973 | } | ||
6974 | |||
6975 | static int niu_class_to_ethflow(u64 class, int *flow_type) | ||
6976 | { | ||
6977 | switch (class) { | ||
6978 | case CLASS_CODE_TCP_IPV4: | ||
6979 | *flow_type = TCP_V4_FLOW; | ||
6980 | break; | ||
6981 | case CLASS_CODE_UDP_IPV4: | ||
6982 | *flow_type = UDP_V4_FLOW; | ||
6983 | break; | ||
6984 | case CLASS_CODE_AH_ESP_IPV4: | ||
6985 | *flow_type = AH_V4_FLOW; | ||
6986 | break; | ||
6987 | case CLASS_CODE_SCTP_IPV4: | ||
6988 | *flow_type = SCTP_V4_FLOW; | ||
6989 | break; | ||
6990 | case CLASS_CODE_TCP_IPV6: | ||
6991 | *flow_type = TCP_V6_FLOW; | ||
6992 | break; | ||
6993 | case CLASS_CODE_UDP_IPV6: | ||
6994 | *flow_type = UDP_V6_FLOW; | ||
6995 | break; | ||
6996 | case CLASS_CODE_AH_ESP_IPV6: | ||
6997 | *flow_type = AH_V6_FLOW; | ||
6998 | break; | ||
6999 | case CLASS_CODE_SCTP_IPV6: | ||
7000 | *flow_type = SCTP_V6_FLOW; | ||
7001 | break; | ||
7002 | case CLASS_CODE_USER_PROG1: | ||
7003 | case CLASS_CODE_USER_PROG2: | ||
7004 | case CLASS_CODE_USER_PROG3: | ||
7005 | case CLASS_CODE_USER_PROG4: | ||
7006 | *flow_type = IP_USER_FLOW; | ||
7007 | break; | ||
7008 | default: | ||
7009 | return 0; | ||
7010 | } | ||
7011 | |||
7012 | return 1; | ||
7013 | } | ||
7014 | |||
7015 | static int niu_ethflow_to_class(int flow_type, u64 *class) | ||
7016 | { | ||
7017 | switch (flow_type) { | ||
7018 | case TCP_V4_FLOW: | ||
7019 | *class = CLASS_CODE_TCP_IPV4; | ||
7020 | break; | ||
7021 | case UDP_V4_FLOW: | ||
7022 | *class = CLASS_CODE_UDP_IPV4; | ||
7023 | break; | ||
7024 | case AH_ESP_V4_FLOW: | ||
7025 | case AH_V4_FLOW: | ||
7026 | case ESP_V4_FLOW: | ||
7027 | *class = CLASS_CODE_AH_ESP_IPV4; | ||
7028 | break; | ||
7029 | case SCTP_V4_FLOW: | ||
7030 | *class = CLASS_CODE_SCTP_IPV4; | ||
7031 | break; | ||
7032 | case TCP_V6_FLOW: | ||
7033 | *class = CLASS_CODE_TCP_IPV6; | ||
7034 | break; | ||
7035 | case UDP_V6_FLOW: | ||
7036 | *class = CLASS_CODE_UDP_IPV6; | ||
7037 | break; | ||
7038 | case AH_ESP_V6_FLOW: | ||
7039 | case AH_V6_FLOW: | ||
7040 | case ESP_V6_FLOW: | ||
7041 | *class = CLASS_CODE_AH_ESP_IPV6; | ||
7042 | break; | ||
7043 | case SCTP_V6_FLOW: | ||
7044 | *class = CLASS_CODE_SCTP_IPV6; | ||
7045 | break; | ||
7046 | default: | ||
7047 | return 0; | ||
7048 | } | ||
7049 | |||
7050 | return 1; | ||
7051 | } | ||
7052 | |||
7053 | static u64 niu_flowkey_to_ethflow(u64 flow_key) | ||
7054 | { | ||
7055 | u64 ethflow = 0; | ||
7056 | |||
7057 | if (flow_key & FLOW_KEY_L2DA) | ||
7058 | ethflow |= RXH_L2DA; | ||
7059 | if (flow_key & FLOW_KEY_VLAN) | ||
7060 | ethflow |= RXH_VLAN; | ||
7061 | if (flow_key & FLOW_KEY_IPSA) | ||
7062 | ethflow |= RXH_IP_SRC; | ||
7063 | if (flow_key & FLOW_KEY_IPDA) | ||
7064 | ethflow |= RXH_IP_DST; | ||
7065 | if (flow_key & FLOW_KEY_PROTO) | ||
7066 | ethflow |= RXH_L3_PROTO; | ||
7067 | if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT)) | ||
7068 | ethflow |= RXH_L4_B_0_1; | ||
7069 | if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT)) | ||
7070 | ethflow |= RXH_L4_B_2_3; | ||
7071 | |||
7072 | return ethflow; | ||
7073 | |||
7074 | } | ||
7075 | |||
7076 | static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key) | ||
7077 | { | ||
7078 | u64 key = 0; | ||
7079 | |||
7080 | if (ethflow & RXH_L2DA) | ||
7081 | key |= FLOW_KEY_L2DA; | ||
7082 | if (ethflow & RXH_VLAN) | ||
7083 | key |= FLOW_KEY_VLAN; | ||
7084 | if (ethflow & RXH_IP_SRC) | ||
7085 | key |= FLOW_KEY_IPSA; | ||
7086 | if (ethflow & RXH_IP_DST) | ||
7087 | key |= FLOW_KEY_IPDA; | ||
7088 | if (ethflow & RXH_L3_PROTO) | ||
7089 | key |= FLOW_KEY_PROTO; | ||
7090 | if (ethflow & RXH_L4_B_0_1) | ||
7091 | key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT); | ||
7092 | if (ethflow & RXH_L4_B_2_3) | ||
7093 | key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT); | ||
7094 | |||
7095 | *flow_key = key; | ||
7096 | |||
7097 | return 1; | ||
7098 | |||
7099 | } | ||
7100 | |||
7101 | static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) | ||
7102 | { | ||
7103 | u64 class; | ||
7104 | |||
7105 | nfc->data = 0; | ||
7106 | |||
7107 | if (!niu_ethflow_to_class(nfc->flow_type, &class)) | ||
7108 | return -EINVAL; | ||
7109 | |||
7110 | if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & | ||
7111 | TCAM_KEY_DISC) | ||
7112 | nfc->data = RXH_DISCARD; | ||
7113 | else | ||
7114 | nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class - | ||
7115 | CLASS_CODE_USER_PROG1]); | ||
7116 | return 0; | ||
7117 | } | ||
7118 | |||
7119 | static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp, | ||
7120 | struct ethtool_rx_flow_spec *fsp) | ||
7121 | { | ||
7122 | u32 tmp; | ||
7123 | u16 prt; | ||
7124 | |||
7125 | tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; | ||
7126 | fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); | ||
7127 | |||
7128 | tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; | ||
7129 | fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); | ||
7130 | |||
7131 | tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; | ||
7132 | fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); | ||
7133 | |||
7134 | tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; | ||
7135 | fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); | ||
7136 | |||
7137 | fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >> | ||
7138 | TCAM_V4KEY2_TOS_SHIFT; | ||
7139 | fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >> | ||
7140 | TCAM_V4KEY2_TOS_SHIFT; | ||
7141 | |||
7142 | switch (fsp->flow_type) { | ||
7143 | case TCP_V4_FLOW: | ||
7144 | case UDP_V4_FLOW: | ||
7145 | case SCTP_V4_FLOW: | ||
7146 | prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> | ||
7147 | TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; | ||
7148 | fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); | ||
7149 | |||
7150 | prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> | ||
7151 | TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; | ||
7152 | fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); | ||
7153 | |||
7154 | prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> | ||
7155 | TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; | ||
7156 | fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); | ||
7157 | |||
7158 | prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> | ||
7159 | TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; | ||
7160 | fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); | ||
7161 | break; | ||
7162 | case AH_V4_FLOW: | ||
7163 | case ESP_V4_FLOW: | ||
7164 | tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> | ||
7165 | TCAM_V4KEY2_PORT_SPI_SHIFT; | ||
7166 | fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp); | ||
7167 | |||
7168 | tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> | ||
7169 | TCAM_V4KEY2_PORT_SPI_SHIFT; | ||
7170 | fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp); | ||
7171 | break; | ||
7172 | case IP_USER_FLOW: | ||
7173 | tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> | ||
7174 | TCAM_V4KEY2_PORT_SPI_SHIFT; | ||
7175 | fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); | ||
7176 | |||
7177 | tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> | ||
7178 | TCAM_V4KEY2_PORT_SPI_SHIFT; | ||
7179 | fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); | ||
7180 | |||
7181 | fsp->h_u.usr_ip4_spec.proto = | ||
7182 | (tp->key[2] & TCAM_V4KEY2_PROTO) >> | ||
7183 | TCAM_V4KEY2_PROTO_SHIFT; | ||
7184 | fsp->m_u.usr_ip4_spec.proto = | ||
7185 | (tp->key_mask[2] & TCAM_V4KEY2_PROTO) >> | ||
7186 | TCAM_V4KEY2_PROTO_SHIFT; | ||
7187 | |||
7188 | fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; | ||
7189 | break; | ||
7190 | default: | ||
7191 | break; | ||
7192 | } | ||
7193 | } | ||
7194 | |||
7195 | static int niu_get_ethtool_tcam_entry(struct niu *np, | ||
7196 | struct ethtool_rxnfc *nfc) | ||
7197 | { | ||
7198 | struct niu_parent *parent = np->parent; | ||
7199 | struct niu_tcam_entry *tp; | ||
7200 | struct ethtool_rx_flow_spec *fsp = &nfc->fs; | ||
7201 | u16 idx; | ||
7202 | u64 class; | ||
7203 | int ret = 0; | ||
7204 | |||
7205 | idx = tcam_get_index(np, (u16)nfc->fs.location); | ||
7206 | |||
7207 | tp = &parent->tcam[idx]; | ||
7208 | if (!tp->valid) { | ||
7209 | netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n", | ||
7210 | parent->index, (u16)nfc->fs.location, idx); | ||
7211 | return -EINVAL; | ||
7212 | } | ||
7213 | |||
7214 | /* fill the flow spec entry */ | ||
7215 | class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> | ||
7216 | TCAM_V4KEY0_CLASS_CODE_SHIFT; | ||
7217 | ret = niu_class_to_ethflow(class, &fsp->flow_type); | ||
7218 | |||
7219 | if (ret < 0) { | ||
7220 | netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", | ||
7221 | parent->index); | ||
7222 | ret = -EINVAL; | ||
7223 | goto out; | ||
7224 | } | ||
7225 | |||
7226 | if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) { | ||
7227 | u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >> | ||
7228 | TCAM_V4KEY2_PROTO_SHIFT; | ||
7229 | if (proto == IPPROTO_ESP) { | ||
7230 | if (fsp->flow_type == AH_V4_FLOW) | ||
7231 | fsp->flow_type = ESP_V4_FLOW; | ||
7232 | else | ||
7233 | fsp->flow_type = ESP_V6_FLOW; | ||
7234 | } | ||
7235 | } | ||
7236 | |||
7237 | switch (fsp->flow_type) { | ||
7238 | case TCP_V4_FLOW: | ||
7239 | case UDP_V4_FLOW: | ||
7240 | case SCTP_V4_FLOW: | ||
7241 | case AH_V4_FLOW: | ||
7242 | case ESP_V4_FLOW: | ||
7243 | niu_get_ip4fs_from_tcam_key(tp, fsp); | ||
7244 | break; | ||
7245 | case TCP_V6_FLOW: | ||
7246 | case UDP_V6_FLOW: | ||
7247 | case SCTP_V6_FLOW: | ||
7248 | case AH_V6_FLOW: | ||
7249 | case ESP_V6_FLOW: | ||
7250 | /* Not yet implemented */ | ||
7251 | ret = -EINVAL; | ||
7252 | break; | ||
7253 | case IP_USER_FLOW: | ||
7254 | niu_get_ip4fs_from_tcam_key(tp, fsp); | ||
7255 | break; | ||
7256 | default: | ||
7257 | ret = -EINVAL; | ||
7258 | break; | ||
7259 | } | ||
7260 | |||
7261 | if (ret < 0) | ||
7262 | goto out; | ||
7263 | |||
7264 | if (tp->assoc_data & TCAM_ASSOCDATA_DISC) | ||
7265 | fsp->ring_cookie = RX_CLS_FLOW_DISC; | ||
7266 | else | ||
7267 | fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >> | ||
7268 | TCAM_ASSOCDATA_OFFSET_SHIFT; | ||
7269 | |||
7270 | /* put the tcam size here */ | ||
7271 | nfc->data = tcam_get_size(np); | ||
7272 | out: | ||
7273 | return ret; | ||
7274 | } | ||
7275 | |||
7276 | static int niu_get_ethtool_tcam_all(struct niu *np, | ||
7277 | struct ethtool_rxnfc *nfc, | ||
7278 | u32 *rule_locs) | ||
7279 | { | ||
7280 | struct niu_parent *parent = np->parent; | ||
7281 | struct niu_tcam_entry *tp; | ||
7282 | int i, idx, cnt; | ||
7283 | unsigned long flags; | ||
7284 | int ret = 0; | ||
7285 | |||
7286 | /* put the tcam size here */ | ||
7287 | nfc->data = tcam_get_size(np); | ||
7288 | |||
7289 | niu_lock_parent(np, flags); | ||
7290 | for (cnt = 0, i = 0; i < nfc->data; i++) { | ||
7291 | idx = tcam_get_index(np, i); | ||
7292 | tp = &parent->tcam[idx]; | ||
7293 | if (!tp->valid) | ||
7294 | continue; | ||
7295 | if (cnt == nfc->rule_cnt) { | ||
7296 | ret = -EMSGSIZE; | ||
7297 | break; | ||
7298 | } | ||
7299 | rule_locs[cnt] = i; | ||
7300 | cnt++; | ||
7301 | } | ||
7302 | niu_unlock_parent(np, flags); | ||
7303 | |||
7304 | return ret; | ||
7305 | } | ||
7306 | |||
7307 | static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, | ||
7308 | void *rule_locs) | ||
7309 | { | ||
7310 | struct niu *np = netdev_priv(dev); | ||
7311 | int ret = 0; | ||
7312 | |||
7313 | switch (cmd->cmd) { | ||
7314 | case ETHTOOL_GRXFH: | ||
7315 | ret = niu_get_hash_opts(np, cmd); | ||
7316 | break; | ||
7317 | case ETHTOOL_GRXRINGS: | ||
7318 | cmd->data = np->num_rx_rings; | ||
7319 | break; | ||
7320 | case ETHTOOL_GRXCLSRLCNT: | ||
7321 | cmd->rule_cnt = tcam_get_valid_entry_cnt(np); | ||
7322 | break; | ||
7323 | case ETHTOOL_GRXCLSRULE: | ||
7324 | ret = niu_get_ethtool_tcam_entry(np, cmd); | ||
7325 | break; | ||
7326 | case ETHTOOL_GRXCLSRLALL: | ||
7327 | ret = niu_get_ethtool_tcam_all(np, cmd, (u32 *)rule_locs); | ||
7328 | break; | ||
7329 | default: | ||
7330 | ret = -EINVAL; | ||
7331 | break; | ||
7332 | } | ||
7333 | |||
7334 | return ret; | ||
7335 | } | ||
7336 | |||
7337 | static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) | ||
7338 | { | ||
7339 | u64 class; | ||
7340 | u64 flow_key = 0; | ||
7341 | unsigned long flags; | ||
7342 | |||
7343 | if (!niu_ethflow_to_class(nfc->flow_type, &class)) | ||
7344 | return -EINVAL; | ||
7345 | |||
7346 | if (class < CLASS_CODE_USER_PROG1 || | ||
7347 | class > CLASS_CODE_SCTP_IPV6) | ||
7348 | return -EINVAL; | ||
7349 | |||
7350 | if (nfc->data & RXH_DISCARD) { | ||
7351 | niu_lock_parent(np, flags); | ||
7352 | flow_key = np->parent->tcam_key[class - | ||
7353 | CLASS_CODE_USER_PROG1]; | ||
7354 | flow_key |= TCAM_KEY_DISC; | ||
7355 | nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); | ||
7356 | np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; | ||
7357 | niu_unlock_parent(np, flags); | ||
7358 | return 0; | ||
7359 | } else { | ||
7360 | /* Discard was set before, but is not set now */ | ||
7361 | if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & | ||
7362 | TCAM_KEY_DISC) { | ||
7363 | niu_lock_parent(np, flags); | ||
7364 | flow_key = np->parent->tcam_key[class - | ||
7365 | CLASS_CODE_USER_PROG1]; | ||
7366 | flow_key &= ~TCAM_KEY_DISC; | ||
7367 | nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), | ||
7368 | flow_key); | ||
7369 | np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = | ||
7370 | flow_key; | ||
7371 | niu_unlock_parent(np, flags); | ||
7372 | } | ||
7373 | } | ||
7374 | |||
7375 | if (!niu_ethflow_to_flowkey(nfc->data, &flow_key)) | ||
7376 | return -EINVAL; | ||
7377 | |||
7378 | niu_lock_parent(np, flags); | ||
7379 | nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key); | ||
7380 | np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key; | ||
7381 | niu_unlock_parent(np, flags); | ||
7382 | |||
7383 | return 0; | ||
7384 | } | ||
7385 | |||
7386 | static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp, | ||
7387 | struct niu_tcam_entry *tp, | ||
7388 | int l2_rdc_tab, u64 class) | ||
7389 | { | ||
7390 | u8 pid = 0; | ||
7391 | u32 sip, dip, sipm, dipm, spi, spim; | ||
7392 | u16 sport, dport, spm, dpm; | ||
7393 | |||
7394 | sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src); | ||
7395 | sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src); | ||
7396 | dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst); | ||
7397 | dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst); | ||
7398 | |||
7399 | tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT; | ||
7400 | tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE; | ||
7401 | tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT; | ||
7402 | tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM; | ||
7403 | |||
7404 | tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT; | ||
7405 | tp->key[3] |= dip; | ||
7406 | |||
7407 | tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT; | ||
7408 | tp->key_mask[3] |= dipm; | ||
7409 | |||
7410 | tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos << | ||
7411 | TCAM_V4KEY2_TOS_SHIFT); | ||
7412 | tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos << | ||
7413 | TCAM_V4KEY2_TOS_SHIFT); | ||
7414 | switch (fsp->flow_type) { | ||
7415 | case TCP_V4_FLOW: | ||
7416 | case UDP_V4_FLOW: | ||
7417 | case SCTP_V4_FLOW: | ||
7418 | sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc); | ||
7419 | spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc); | ||
7420 | dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst); | ||
7421 | dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst); | ||
7422 | |||
7423 | tp->key[2] |= (((u64)sport << 16) | dport); | ||
7424 | tp->key_mask[2] |= (((u64)spm << 16) | dpm); | ||
7425 | niu_ethflow_to_l3proto(fsp->flow_type, &pid); | ||
7426 | break; | ||
7427 | case AH_V4_FLOW: | ||
7428 | case ESP_V4_FLOW: | ||
7429 | spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi); | ||
7430 | spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi); | ||
7431 | |||
7432 | tp->key[2] |= spi; | ||
7433 | tp->key_mask[2] |= spim; | ||
7434 | niu_ethflow_to_l3proto(fsp->flow_type, &pid); | ||
7435 | break; | ||
7436 | case IP_USER_FLOW: | ||
7437 | spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes); | ||
7438 | spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes); | ||
7439 | |||
7440 | tp->key[2] |= spi; | ||
7441 | tp->key_mask[2] |= spim; | ||
7442 | pid = fsp->h_u.usr_ip4_spec.proto; | ||
7443 | break; | ||
7444 | default: | ||
7445 | break; | ||
7446 | } | ||
7447 | |||
7448 | tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT); | ||
7449 | if (pid) { | ||
7450 | tp->key_mask[2] |= TCAM_V4KEY2_PROTO; | ||
7451 | } | ||
7452 | } | ||
7453 | |||
7454 | static int niu_add_ethtool_tcam_entry(struct niu *np, | ||
7455 | struct ethtool_rxnfc *nfc) | ||
7456 | { | ||
7457 | struct niu_parent *parent = np->parent; | ||
7458 | struct niu_tcam_entry *tp; | ||
7459 | struct ethtool_rx_flow_spec *fsp = &nfc->fs; | ||
7460 | struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port]; | ||
7461 | int l2_rdc_table = rdc_table->first_table_num; | ||
7462 | u16 idx; | ||
7463 | u64 class; | ||
7464 | unsigned long flags; | ||
7465 | int err, ret; | ||
7466 | |||
7467 | ret = 0; | ||
7468 | |||
7469 | idx = nfc->fs.location; | ||
7470 | if (idx >= tcam_get_size(np)) | ||
7471 | return -EINVAL; | ||
7472 | |||
7473 | if (fsp->flow_type == IP_USER_FLOW) { | ||
7474 | int i; | ||
7475 | int add_usr_cls = 0; | ||
7476 | struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec; | ||
7477 | struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec; | ||
7478 | |||
7479 | if (uspec->ip_ver != ETH_RX_NFC_IP4) | ||
7480 | return -EINVAL; | ||
7481 | |||
7482 | niu_lock_parent(np, flags); | ||
7483 | |||
7484 | for (i = 0; i < NIU_L3_PROG_CLS; i++) { | ||
7485 | if (parent->l3_cls[i]) { | ||
7486 | if (uspec->proto == parent->l3_cls_pid[i]) { | ||
7487 | class = parent->l3_cls[i]; | ||
7488 | parent->l3_cls_refcnt[i]++; | ||
7489 | add_usr_cls = 1; | ||
7490 | break; | ||
7491 | } | ||
7492 | } else { | ||
7493 | /* Program new user IP class */ | ||
7494 | switch (i) { | ||
7495 | case 0: | ||
7496 | class = CLASS_CODE_USER_PROG1; | ||
7497 | break; | ||
7498 | case 1: | ||
7499 | class = CLASS_CODE_USER_PROG2; | ||
7500 | break; | ||
7501 | case 2: | ||
7502 | class = CLASS_CODE_USER_PROG3; | ||
7503 | break; | ||
7504 | case 3: | ||
7505 | class = CLASS_CODE_USER_PROG4; | ||
7506 | break; | ||
7507 | default: | ||
7508 | break; | ||
7509 | } | ||
7510 | ret = tcam_user_ip_class_set(np, class, 0, | ||
7511 | uspec->proto, | ||
7512 | uspec->tos, | ||
7513 | umask->tos); | ||
7514 | if (ret) | ||
7515 | goto out; | ||
7516 | |||
7517 | ret = tcam_user_ip_class_enable(np, class, 1); | ||
7518 | if (ret) | ||
7519 | goto out; | ||
7520 | parent->l3_cls[i] = class; | ||
7521 | parent->l3_cls_pid[i] = uspec->proto; | ||
7522 | parent->l3_cls_refcnt[i]++; | ||
7523 | add_usr_cls = 1; | ||
7524 | break; | ||
7525 | } | ||
7526 | } | ||
7527 | if (!add_usr_cls) { | ||
7528 | netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n", | ||
7529 | parent->index, __func__, uspec->proto); | ||
7530 | ret = -EINVAL; | ||
7531 | goto out; | ||
7532 | } | ||
7533 | niu_unlock_parent(np, flags); | ||
7534 | } else { | ||
7535 | if (!niu_ethflow_to_class(fsp->flow_type, &class)) { | ||
7536 | return -EINVAL; | ||
7537 | } | ||
7538 | } | ||
7539 | |||
7540 | niu_lock_parent(np, flags); | ||
7541 | |||
7542 | idx = tcam_get_index(np, idx); | ||
7543 | tp = &parent->tcam[idx]; | ||
7544 | |||
7545 | memset(tp, 0, sizeof(*tp)); | ||
7546 | |||
7547 | /* fill in the tcam key and mask */ | ||
7548 | switch (fsp->flow_type) { | ||
7549 | case TCP_V4_FLOW: | ||
7550 | case UDP_V4_FLOW: | ||
7551 | case SCTP_V4_FLOW: | ||
7552 | case AH_V4_FLOW: | ||
7553 | case ESP_V4_FLOW: | ||
7554 | niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class); | ||
7555 | break; | ||
7556 | case TCP_V6_FLOW: | ||
7557 | case UDP_V6_FLOW: | ||
7558 | case SCTP_V6_FLOW: | ||
7559 | case AH_V6_FLOW: | ||
7560 | case ESP_V6_FLOW: | ||
7561 | /* Not yet implemented */ | ||
7562 | netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n", | ||
7563 | parent->index, __func__, fsp->flow_type); | ||
7564 | ret = -EINVAL; | ||
7565 | goto out; | ||
7566 | case IP_USER_FLOW: | ||
7567 | niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class); | ||
7568 | break; | ||
7569 | default: | ||
7570 | netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n", | ||
7571 | parent->index, __func__, fsp->flow_type); | ||
7572 | ret = -EINVAL; | ||
7573 | goto out; | ||
7574 | } | ||
7575 | |||
7576 | /* fill in the assoc data */ | ||
7577 | if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { | ||
7578 | tp->assoc_data = TCAM_ASSOCDATA_DISC; | ||
7579 | } else { | ||
7580 | if (fsp->ring_cookie >= np->num_rx_rings) { | ||
7581 | netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n", | ||
7582 | parent->index, __func__, | ||
7583 | (long long)fsp->ring_cookie); | ||
7584 | ret = -EINVAL; | ||
7585 | goto out; | ||
7586 | } | ||
7587 | tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | | ||
7588 | (fsp->ring_cookie << | ||
7589 | TCAM_ASSOCDATA_OFFSET_SHIFT)); | ||
7590 | } | ||
7591 | |||
7592 | err = tcam_write(np, idx, tp->key, tp->key_mask); | ||
7593 | if (err) { | ||
7594 | ret = -EINVAL; | ||
7595 | goto out; | ||
7596 | } | ||
7597 | err = tcam_assoc_write(np, idx, tp->assoc_data); | ||
7598 | if (err) { | ||
7599 | ret = -EINVAL; | ||
7600 | goto out; | ||
7601 | } | ||
7602 | |||
7603 | /* validate the entry */ | ||
7604 | tp->valid = 1; | ||
7605 | np->clas.tcam_valid_entries++; | ||
7606 | out: | ||
7607 | niu_unlock_parent(np, flags); | ||
7608 | |||
7609 | return ret; | ||
7610 | } | ||
7611 | |||
7612 | static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc) | ||
7613 | { | ||
7614 | struct niu_parent *parent = np->parent; | ||
7615 | struct niu_tcam_entry *tp; | ||
7616 | u16 idx; | ||
7617 | unsigned long flags; | ||
7618 | u64 class; | ||
7619 | int ret = 0; | ||
7620 | |||
7621 | if (loc >= tcam_get_size(np)) | ||
7622 | return -EINVAL; | ||
7623 | |||
7624 | niu_lock_parent(np, flags); | ||
7625 | |||
7626 | idx = tcam_get_index(np, loc); | ||
7627 | tp = &parent->tcam[idx]; | ||
7628 | |||
7629 | /* if the entry is of a user defined class, then update*/ | ||
7630 | class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> | ||
7631 | TCAM_V4KEY0_CLASS_CODE_SHIFT; | ||
7632 | |||
7633 | if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) { | ||
7634 | int i; | ||
7635 | for (i = 0; i < NIU_L3_PROG_CLS; i++) { | ||
7636 | if (parent->l3_cls[i] == class) { | ||
7637 | parent->l3_cls_refcnt[i]--; | ||
7638 | if (!parent->l3_cls_refcnt[i]) { | ||
7639 | /* disable class */ | ||
7640 | ret = tcam_user_ip_class_enable(np, | ||
7641 | class, | ||
7642 | 0); | ||
7643 | if (ret) | ||
7644 | goto out; | ||
7645 | parent->l3_cls[i] = 0; | ||
7646 | parent->l3_cls_pid[i] = 0; | ||
7647 | } | ||
7648 | break; | ||
7649 | } | ||
7650 | } | ||
7651 | if (i == NIU_L3_PROG_CLS) { | ||
7652 | netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n", | ||
7653 | parent->index, __func__, | ||
7654 | (unsigned long long)class); | ||
7655 | ret = -EINVAL; | ||
7656 | goto out; | ||
7657 | } | ||
7658 | } | ||
7659 | |||
7660 | ret = tcam_flush(np, idx); | ||
7661 | if (ret) | ||
7662 | goto out; | ||
7663 | |||
7664 | /* invalidate the entry */ | ||
7665 | tp->valid = 0; | ||
7666 | np->clas.tcam_valid_entries--; | ||
7667 | out: | ||
7668 | niu_unlock_parent(np, flags); | ||
7669 | |||
7670 | return ret; | ||
7671 | } | ||
7672 | |||
7673 | static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) | ||
7674 | { | ||
7675 | struct niu *np = netdev_priv(dev); | ||
7676 | int ret = 0; | ||
7677 | |||
7678 | switch (cmd->cmd) { | ||
7679 | case ETHTOOL_SRXFH: | ||
7680 | ret = niu_set_hash_opts(np, cmd); | ||
7681 | break; | ||
7682 | case ETHTOOL_SRXCLSRLINS: | ||
7683 | ret = niu_add_ethtool_tcam_entry(np, cmd); | ||
7684 | break; | ||
7685 | case ETHTOOL_SRXCLSRLDEL: | ||
7686 | ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location); | ||
7687 | break; | ||
7688 | default: | ||
7689 | ret = -EINVAL; | ||
7690 | break; | ||
7691 | } | ||
7692 | |||
7693 | return ret; | ||
7694 | } | ||
7695 | |||
7696 | static const struct { | ||
7697 | const char string[ETH_GSTRING_LEN]; | ||
7698 | } niu_xmac_stat_keys[] = { | ||
7699 | { "tx_frames" }, | ||
7700 | { "tx_bytes" }, | ||
7701 | { "tx_fifo_errors" }, | ||
7702 | { "tx_overflow_errors" }, | ||
7703 | { "tx_max_pkt_size_errors" }, | ||
7704 | { "tx_underflow_errors" }, | ||
7705 | { "rx_local_faults" }, | ||
7706 | { "rx_remote_faults" }, | ||
7707 | { "rx_link_faults" }, | ||
7708 | { "rx_align_errors" }, | ||
7709 | { "rx_frags" }, | ||
7710 | { "rx_mcasts" }, | ||
7711 | { "rx_bcasts" }, | ||
7712 | { "rx_hist_cnt1" }, | ||
7713 | { "rx_hist_cnt2" }, | ||
7714 | { "rx_hist_cnt3" }, | ||
7715 | { "rx_hist_cnt4" }, | ||
7716 | { "rx_hist_cnt5" }, | ||
7717 | { "rx_hist_cnt6" }, | ||
7718 | { "rx_hist_cnt7" }, | ||
7719 | { "rx_octets" }, | ||
7720 | { "rx_code_violations" }, | ||
7721 | { "rx_len_errors" }, | ||
7722 | { "rx_crc_errors" }, | ||
7723 | { "rx_underflows" }, | ||
7724 | { "rx_overflows" }, | ||
7725 | { "pause_off_state" }, | ||
7726 | { "pause_on_state" }, | ||
7727 | { "pause_received" }, | ||
7728 | }; | ||
7729 | |||
7730 | #define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys) | ||
7731 | |||
7732 | static const struct { | ||
7733 | const char string[ETH_GSTRING_LEN]; | ||
7734 | } niu_bmac_stat_keys[] = { | ||
7735 | { "tx_underflow_errors" }, | ||
7736 | { "tx_max_pkt_size_errors" }, | ||
7737 | { "tx_bytes" }, | ||
7738 | { "tx_frames" }, | ||
7739 | { "rx_overflows" }, | ||
7740 | { "rx_frames" }, | ||
7741 | { "rx_align_errors" }, | ||
7742 | { "rx_crc_errors" }, | ||
7743 | { "rx_len_errors" }, | ||
7744 | { "pause_off_state" }, | ||
7745 | { "pause_on_state" }, | ||
7746 | { "pause_received" }, | ||
7747 | }; | ||
7748 | |||
7749 | #define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys) | ||
7750 | |||
7751 | static const struct { | ||
7752 | const char string[ETH_GSTRING_LEN]; | ||
7753 | } niu_rxchan_stat_keys[] = { | ||
7754 | { "rx_channel" }, | ||
7755 | { "rx_packets" }, | ||
7756 | { "rx_bytes" }, | ||
7757 | { "rx_dropped" }, | ||
7758 | { "rx_errors" }, | ||
7759 | }; | ||
7760 | |||
7761 | #define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys) | ||
7762 | |||
7763 | static const struct { | ||
7764 | const char string[ETH_GSTRING_LEN]; | ||
7765 | } niu_txchan_stat_keys[] = { | ||
7766 | { "tx_channel" }, | ||
7767 | { "tx_packets" }, | ||
7768 | { "tx_bytes" }, | ||
7769 | { "tx_errors" }, | ||
7770 | }; | ||
7771 | |||
7772 | #define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys) | ||
7773 | |||
7774 | static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data) | ||
7775 | { | ||
7776 | struct niu *np = netdev_priv(dev); | ||
7777 | int i; | ||
7778 | |||
7779 | if (stringset != ETH_SS_STATS) | ||
7780 | return; | ||
7781 | |||
7782 | if (np->flags & NIU_FLAGS_XMAC) { | ||
7783 | memcpy(data, niu_xmac_stat_keys, | ||
7784 | sizeof(niu_xmac_stat_keys)); | ||
7785 | data += sizeof(niu_xmac_stat_keys); | ||
7786 | } else { | ||
7787 | memcpy(data, niu_bmac_stat_keys, | ||
7788 | sizeof(niu_bmac_stat_keys)); | ||
7789 | data += sizeof(niu_bmac_stat_keys); | ||
7790 | } | ||
7791 | for (i = 0; i < np->num_rx_rings; i++) { | ||
7792 | memcpy(data, niu_rxchan_stat_keys, | ||
7793 | sizeof(niu_rxchan_stat_keys)); | ||
7794 | data += sizeof(niu_rxchan_stat_keys); | ||
7795 | } | ||
7796 | for (i = 0; i < np->num_tx_rings; i++) { | ||
7797 | memcpy(data, niu_txchan_stat_keys, | ||
7798 | sizeof(niu_txchan_stat_keys)); | ||
7799 | data += sizeof(niu_txchan_stat_keys); | ||
7800 | } | ||
7801 | } | ||
7802 | |||
7803 | static int niu_get_sset_count(struct net_device *dev, int stringset) | ||
7804 | { | ||
7805 | struct niu *np = netdev_priv(dev); | ||
7806 | |||
7807 | if (stringset != ETH_SS_STATS) | ||
7808 | return -EINVAL; | ||
7809 | |||
7810 | return (np->flags & NIU_FLAGS_XMAC ? | ||
7811 | NUM_XMAC_STAT_KEYS : | ||
7812 | NUM_BMAC_STAT_KEYS) + | ||
7813 | (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + | ||
7814 | (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS); | ||
7815 | } | ||
7816 | |||
7817 | static void niu_get_ethtool_stats(struct net_device *dev, | ||
7818 | struct ethtool_stats *stats, u64 *data) | ||
7819 | { | ||
7820 | struct niu *np = netdev_priv(dev); | ||
7821 | int i; | ||
7822 | |||
7823 | niu_sync_mac_stats(np); | ||
7824 | if (np->flags & NIU_FLAGS_XMAC) { | ||
7825 | memcpy(data, &np->mac_stats.xmac, | ||
7826 | sizeof(struct niu_xmac_stats)); | ||
7827 | data += (sizeof(struct niu_xmac_stats) / sizeof(u64)); | ||
7828 | } else { | ||
7829 | memcpy(data, &np->mac_stats.bmac, | ||
7830 | sizeof(struct niu_bmac_stats)); | ||
7831 | data += (sizeof(struct niu_bmac_stats) / sizeof(u64)); | ||
7832 | } | ||
7833 | for (i = 0; i < np->num_rx_rings; i++) { | ||
7834 | struct rx_ring_info *rp = &np->rx_rings[i]; | ||
7835 | |||
7836 | niu_sync_rx_discard_stats(np, rp, 0); | ||
7837 | |||
7838 | data[0] = rp->rx_channel; | ||
7839 | data[1] = rp->rx_packets; | ||
7840 | data[2] = rp->rx_bytes; | ||
7841 | data[3] = rp->rx_dropped; | ||
7842 | data[4] = rp->rx_errors; | ||
7843 | data += 5; | ||
7844 | } | ||
7845 | for (i = 0; i < np->num_tx_rings; i++) { | ||
7846 | struct tx_ring_info *rp = &np->tx_rings[i]; | ||
7847 | |||
7848 | data[0] = rp->tx_channel; | ||
7849 | data[1] = rp->tx_packets; | ||
7850 | data[2] = rp->tx_bytes; | ||
7851 | data[3] = rp->tx_errors; | ||
7852 | data += 4; | ||
7853 | } | ||
7854 | } | ||
7855 | |||
7856 | static u64 niu_led_state_save(struct niu *np) | ||
7857 | { | ||
7858 | if (np->flags & NIU_FLAGS_XMAC) | ||
7859 | return nr64_mac(XMAC_CONFIG); | ||
7860 | else | ||
7861 | return nr64_mac(BMAC_XIF_CONFIG); | ||
7862 | } | ||
7863 | |||
7864 | static void niu_led_state_restore(struct niu *np, u64 val) | ||
7865 | { | ||
7866 | if (np->flags & NIU_FLAGS_XMAC) | ||
7867 | nw64_mac(XMAC_CONFIG, val); | ||
7868 | else | ||
7869 | nw64_mac(BMAC_XIF_CONFIG, val); | ||
7870 | } | ||
7871 | |||
7872 | static void niu_force_led(struct niu *np, int on) | ||
7873 | { | ||
7874 | u64 val, reg, bit; | ||
7875 | |||
7876 | if (np->flags & NIU_FLAGS_XMAC) { | ||
7877 | reg = XMAC_CONFIG; | ||
7878 | bit = XMAC_CONFIG_FORCE_LED_ON; | ||
7879 | } else { | ||
7880 | reg = BMAC_XIF_CONFIG; | ||
7881 | bit = BMAC_XIF_CONFIG_LINK_LED; | ||
7882 | } | ||
7883 | |||
7884 | val = nr64_mac(reg); | ||
7885 | if (on) | ||
7886 | val |= bit; | ||
7887 | else | ||
7888 | val &= ~bit; | ||
7889 | nw64_mac(reg, val); | ||
7890 | } | ||
7891 | |||
7892 | static int niu_set_phys_id(struct net_device *dev, | ||
7893 | enum ethtool_phys_id_state state) | ||
7894 | |||
7895 | { | ||
7896 | struct niu *np = netdev_priv(dev); | ||
7897 | |||
7898 | if (!netif_running(dev)) | ||
7899 | return -EAGAIN; | ||
7900 | |||
7901 | switch (state) { | ||
7902 | case ETHTOOL_ID_ACTIVE: | ||
7903 | np->orig_led_state = niu_led_state_save(np); | ||
7904 | return 1; /* cycle on/off once per second */ | ||
7905 | |||
7906 | case ETHTOOL_ID_ON: | ||
7907 | niu_force_led(np, 1); | ||
7908 | break; | ||
7909 | |||
7910 | case ETHTOOL_ID_OFF: | ||
7911 | niu_force_led(np, 0); | ||
7912 | break; | ||
7913 | |||
7914 | case ETHTOOL_ID_INACTIVE: | ||
7915 | niu_led_state_restore(np, np->orig_led_state); | ||
7916 | } | ||
7917 | |||
7918 | return 0; | ||
7919 | } | ||
7920 | |||
7921 | static const struct ethtool_ops niu_ethtool_ops = { | ||
7922 | .get_drvinfo = niu_get_drvinfo, | ||
7923 | .get_link = ethtool_op_get_link, | ||
7924 | .get_msglevel = niu_get_msglevel, | ||
7925 | .set_msglevel = niu_set_msglevel, | ||
7926 | .nway_reset = niu_nway_reset, | ||
7927 | .get_eeprom_len = niu_get_eeprom_len, | ||
7928 | .get_eeprom = niu_get_eeprom, | ||
7929 | .get_settings = niu_get_settings, | ||
7930 | .set_settings = niu_set_settings, | ||
7931 | .get_strings = niu_get_strings, | ||
7932 | .get_sset_count = niu_get_sset_count, | ||
7933 | .get_ethtool_stats = niu_get_ethtool_stats, | ||
7934 | .set_phys_id = niu_set_phys_id, | ||
7935 | .get_rxnfc = niu_get_nfc, | ||
7936 | .set_rxnfc = niu_set_nfc, | ||
7937 | }; | ||
7938 | |||
7939 | static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, | ||
7940 | int ldg, int ldn) | ||
7941 | { | ||
7942 | if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) | ||
7943 | return -EINVAL; | ||
7944 | if (ldn < 0 || ldn > LDN_MAX) | ||
7945 | return -EINVAL; | ||
7946 | |||
7947 | parent->ldg_map[ldn] = ldg; | ||
7948 | |||
7949 | if (np->parent->plat_type == PLAT_TYPE_NIU) { | ||
7950 | /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by | ||
7951 | * the firmware, and we're not supposed to change them. | ||
7952 | * Validate the mapping, because if it's wrong we probably | ||
7953 | * won't get any interrupts and that's painful to debug. | ||
7954 | */ | ||
7955 | if (nr64(LDG_NUM(ldn)) != ldg) { | ||
7956 | dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n", | ||
7957 | np->port, ldn, ldg, | ||
7958 | (unsigned long long) nr64(LDG_NUM(ldn))); | ||
7959 | return -EINVAL; | ||
7960 | } | ||
7961 | } else | ||
7962 | nw64(LDG_NUM(ldn), ldg); | ||
7963 | |||
7964 | return 0; | ||
7965 | } | ||
7966 | |||
7967 | static int niu_set_ldg_timer_res(struct niu *np, int res) | ||
7968 | { | ||
7969 | if (res < 0 || res > LDG_TIMER_RES_VAL) | ||
7970 | return -EINVAL; | ||
7971 | |||
7972 | |||
7973 | nw64(LDG_TIMER_RES, res); | ||
7974 | |||
7975 | return 0; | ||
7976 | } | ||
7977 | |||
7978 | static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector) | ||
7979 | { | ||
7980 | if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) || | ||
7981 | (func < 0 || func > 3) || | ||
7982 | (vector < 0 || vector > 0x1f)) | ||
7983 | return -EINVAL; | ||
7984 | |||
7985 | nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector); | ||
7986 | |||
7987 | return 0; | ||
7988 | } | ||
7989 | |||
7990 | static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr) | ||
7991 | { | ||
7992 | u64 frame, frame_base = (ESPC_PIO_STAT_READ_START | | ||
7993 | (addr << ESPC_PIO_STAT_ADDR_SHIFT)); | ||
7994 | int limit; | ||
7995 | |||
7996 | if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT)) | ||
7997 | return -EINVAL; | ||
7998 | |||
7999 | frame = frame_base; | ||
8000 | nw64(ESPC_PIO_STAT, frame); | ||
8001 | limit = 64; | ||
8002 | do { | ||
8003 | udelay(5); | ||
8004 | frame = nr64(ESPC_PIO_STAT); | ||
8005 | if (frame & ESPC_PIO_STAT_READ_END) | ||
8006 | break; | ||
8007 | } while (limit--); | ||
8008 | if (!(frame & ESPC_PIO_STAT_READ_END)) { | ||
8009 | dev_err(np->device, "EEPROM read timeout frame[%llx]\n", | ||
8010 | (unsigned long long) frame); | ||
8011 | return -ENODEV; | ||
8012 | } | ||
8013 | |||
8014 | frame = frame_base; | ||
8015 | nw64(ESPC_PIO_STAT, frame); | ||
8016 | limit = 64; | ||
8017 | do { | ||
8018 | udelay(5); | ||
8019 | frame = nr64(ESPC_PIO_STAT); | ||
8020 | if (frame & ESPC_PIO_STAT_READ_END) | ||
8021 | break; | ||
8022 | } while (limit--); | ||
8023 | if (!(frame & ESPC_PIO_STAT_READ_END)) { | ||
8024 | dev_err(np->device, "EEPROM read timeout frame[%llx]\n", | ||
8025 | (unsigned long long) frame); | ||
8026 | return -ENODEV; | ||
8027 | } | ||
8028 | |||
8029 | frame = nr64(ESPC_PIO_STAT); | ||
8030 | return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT; | ||
8031 | } | ||
8032 | |||
8033 | static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off) | ||
8034 | { | ||
8035 | int err = niu_pci_eeprom_read(np, off); | ||
8036 | u16 val; | ||
8037 | |||
8038 | if (err < 0) | ||
8039 | return err; | ||
8040 | val = (err << 8); | ||
8041 | err = niu_pci_eeprom_read(np, off + 1); | ||
8042 | if (err < 0) | ||
8043 | return err; | ||
8044 | val |= (err & 0xff); | ||
8045 | |||
8046 | return val; | ||
8047 | } | ||
8048 | |||
8049 | static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off) | ||
8050 | { | ||
8051 | int err = niu_pci_eeprom_read(np, off); | ||
8052 | u16 val; | ||
8053 | |||
8054 | if (err < 0) | ||
8055 | return err; | ||
8056 | |||
8057 | val = (err & 0xff); | ||
8058 | err = niu_pci_eeprom_read(np, off + 1); | ||
8059 | if (err < 0) | ||
8060 | return err; | ||
8061 | |||
8062 | val |= (err & 0xff) << 8; | ||
8063 | |||
8064 | return val; | ||
8065 | } | ||
8066 | |||
8067 | static int __devinit niu_pci_vpd_get_propname(struct niu *np, | ||
8068 | u32 off, | ||
8069 | char *namebuf, | ||
8070 | int namebuf_len) | ||
8071 | { | ||
8072 | int i; | ||
8073 | |||
8074 | for (i = 0; i < namebuf_len; i++) { | ||
8075 | int err = niu_pci_eeprom_read(np, off + i); | ||
8076 | if (err < 0) | ||
8077 | return err; | ||
8078 | *namebuf++ = err; | ||
8079 | if (!err) | ||
8080 | break; | ||
8081 | } | ||
8082 | if (i >= namebuf_len) | ||
8083 | return -EINVAL; | ||
8084 | |||
8085 | return i + 1; | ||
8086 | } | ||
8087 | |||
8088 | static void __devinit niu_vpd_parse_version(struct niu *np) | ||
8089 | { | ||
8090 | struct niu_vpd *vpd = &np->vpd; | ||
8091 | int len = strlen(vpd->version) + 1; | ||
8092 | const char *s = vpd->version; | ||
8093 | int i; | ||
8094 | |||
8095 | for (i = 0; i < len - 5; i++) { | ||
8096 | if (!strncmp(s + i, "FCode ", 6)) | ||
8097 | break; | ||
8098 | } | ||
8099 | if (i >= len - 5) | ||
8100 | return; | ||
8101 | |||
8102 | s += i + 5; | ||
8103 | sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor); | ||
8104 | |||
8105 | netif_printk(np, probe, KERN_DEBUG, np->dev, | ||
8106 | "VPD_SCAN: FCODE major(%d) minor(%d)\n", | ||
8107 | vpd->fcode_major, vpd->fcode_minor); | ||
8108 | if (vpd->fcode_major > NIU_VPD_MIN_MAJOR || | ||
8109 | (vpd->fcode_major == NIU_VPD_MIN_MAJOR && | ||
8110 | vpd->fcode_minor >= NIU_VPD_MIN_MINOR)) | ||
8111 | np->flags |= NIU_FLAGS_VPD_VALID; | ||
8112 | } | ||
8113 | |||
8114 | /* ESPC_PIO_EN_ENABLE must be set */ | ||
8115 | static int __devinit niu_pci_vpd_scan_props(struct niu *np, | ||
8116 | u32 start, u32 end) | ||
8117 | { | ||
8118 | unsigned int found_mask = 0; | ||
8119 | #define FOUND_MASK_MODEL 0x00000001 | ||
8120 | #define FOUND_MASK_BMODEL 0x00000002 | ||
8121 | #define FOUND_MASK_VERS 0x00000004 | ||
8122 | #define FOUND_MASK_MAC 0x00000008 | ||
8123 | #define FOUND_MASK_NMAC 0x00000010 | ||
8124 | #define FOUND_MASK_PHY 0x00000020 | ||
8125 | #define FOUND_MASK_ALL 0x0000003f | ||
8126 | |||
8127 | netif_printk(np, probe, KERN_DEBUG, np->dev, | ||
8128 | "VPD_SCAN: start[%x] end[%x]\n", start, end); | ||
8129 | while (start < end) { | ||
8130 | int len, err, prop_len; | ||
8131 | char namebuf[64]; | ||
8132 | u8 *prop_buf; | ||
8133 | int max_len; | ||
8134 | |||
8135 | if (found_mask == FOUND_MASK_ALL) { | ||
8136 | niu_vpd_parse_version(np); | ||
8137 | return 1; | ||
8138 | } | ||
8139 | |||
8140 | err = niu_pci_eeprom_read(np, start + 2); | ||
8141 | if (err < 0) | ||
8142 | return err; | ||
8143 | len = err; | ||
8144 | start += 3; | ||
8145 | |||
8146 | prop_len = niu_pci_eeprom_read(np, start + 4); | ||
8147 | err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64); | ||
8148 | if (err < 0) | ||
8149 | return err; | ||
8150 | |||
8151 | prop_buf = NULL; | ||
8152 | max_len = 0; | ||
8153 | if (!strcmp(namebuf, "model")) { | ||
8154 | prop_buf = np->vpd.model; | ||
8155 | max_len = NIU_VPD_MODEL_MAX; | ||
8156 | found_mask |= FOUND_MASK_MODEL; | ||
8157 | } else if (!strcmp(namebuf, "board-model")) { | ||
8158 | prop_buf = np->vpd.board_model; | ||
8159 | max_len = NIU_VPD_BD_MODEL_MAX; | ||
8160 | found_mask |= FOUND_MASK_BMODEL; | ||
8161 | } else if (!strcmp(namebuf, "version")) { | ||
8162 | prop_buf = np->vpd.version; | ||
8163 | max_len = NIU_VPD_VERSION_MAX; | ||
8164 | found_mask |= FOUND_MASK_VERS; | ||
8165 | } else if (!strcmp(namebuf, "local-mac-address")) { | ||
8166 | prop_buf = np->vpd.local_mac; | ||
8167 | max_len = ETH_ALEN; | ||
8168 | found_mask |= FOUND_MASK_MAC; | ||
8169 | } else if (!strcmp(namebuf, "num-mac-addresses")) { | ||
8170 | prop_buf = &np->vpd.mac_num; | ||
8171 | max_len = 1; | ||
8172 | found_mask |= FOUND_MASK_NMAC; | ||
8173 | } else if (!strcmp(namebuf, "phy-type")) { | ||
8174 | prop_buf = np->vpd.phy_type; | ||
8175 | max_len = NIU_VPD_PHY_TYPE_MAX; | ||
8176 | found_mask |= FOUND_MASK_PHY; | ||
8177 | } | ||
8178 | |||
8179 | if (max_len && prop_len > max_len) { | ||
8180 | dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len); | ||
8181 | return -EINVAL; | ||
8182 | } | ||
8183 | |||
8184 | if (prop_buf) { | ||
8185 | u32 off = start + 5 + err; | ||
8186 | int i; | ||
8187 | |||
8188 | netif_printk(np, probe, KERN_DEBUG, np->dev, | ||
8189 | "VPD_SCAN: Reading in property [%s] len[%d]\n", | ||
8190 | namebuf, prop_len); | ||
8191 | for (i = 0; i < prop_len; i++) | ||
8192 | *prop_buf++ = niu_pci_eeprom_read(np, off + i); | ||
8193 | } | ||
8194 | |||
8195 | start += len; | ||
8196 | } | ||
8197 | |||
8198 | return 0; | ||
8199 | } | ||
8200 | |||
8201 | /* ESPC_PIO_EN_ENABLE must be set */ | ||
8202 | static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start) | ||
8203 | { | ||
8204 | u32 offset; | ||
8205 | int err; | ||
8206 | |||
8207 | err = niu_pci_eeprom_read16_swp(np, start + 1); | ||
8208 | if (err < 0) | ||
8209 | return; | ||
8210 | |||
8211 | offset = err + 3; | ||
8212 | |||
8213 | while (start + offset < ESPC_EEPROM_SIZE) { | ||
8214 | u32 here = start + offset; | ||
8215 | u32 end; | ||
8216 | |||
8217 | err = niu_pci_eeprom_read(np, here); | ||
8218 | if (err != 0x90) | ||
8219 | return; | ||
8220 | |||
8221 | err = niu_pci_eeprom_read16_swp(np, here + 1); | ||
8222 | if (err < 0) | ||
8223 | return; | ||
8224 | |||
8225 | here = start + offset + 3; | ||
8226 | end = start + offset + err; | ||
8227 | |||
8228 | offset += err; | ||
8229 | |||
8230 | err = niu_pci_vpd_scan_props(np, here, end); | ||
8231 | if (err < 0 || err == 1) | ||
8232 | return; | ||
8233 | } | ||
8234 | } | ||
8235 | |||
8236 | /* ESPC_PIO_EN_ENABLE must be set */ | ||
8237 | static u32 __devinit niu_pci_vpd_offset(struct niu *np) | ||
8238 | { | ||
8239 | u32 start = 0, end = ESPC_EEPROM_SIZE, ret; | ||
8240 | int err; | ||
8241 | |||
8242 | while (start < end) { | ||
8243 | ret = start; | ||
8244 | |||
8245 | /* ROM header signature? */ | ||
8246 | err = niu_pci_eeprom_read16(np, start + 0); | ||
8247 | if (err != 0x55aa) | ||
8248 | return 0; | ||
8249 | |||
8250 | /* Apply offset to PCI data structure. */ | ||
8251 | err = niu_pci_eeprom_read16(np, start + 23); | ||
8252 | if (err < 0) | ||
8253 | return 0; | ||
8254 | start += err; | ||
8255 | |||
8256 | /* Check for "PCIR" signature. */ | ||
8257 | err = niu_pci_eeprom_read16(np, start + 0); | ||
8258 | if (err != 0x5043) | ||
8259 | return 0; | ||
8260 | err = niu_pci_eeprom_read16(np, start + 2); | ||
8261 | if (err != 0x4952) | ||
8262 | return 0; | ||
8263 | |||
8264 | /* Check for OBP image type. */ | ||
8265 | err = niu_pci_eeprom_read(np, start + 20); | ||
8266 | if (err < 0) | ||
8267 | return 0; | ||
8268 | if (err != 0x01) { | ||
8269 | err = niu_pci_eeprom_read(np, ret + 2); | ||
8270 | if (err < 0) | ||
8271 | return 0; | ||
8272 | |||
8273 | start = ret + (err * 512); | ||
8274 | continue; | ||
8275 | } | ||
8276 | |||
8277 | err = niu_pci_eeprom_read16_swp(np, start + 8); | ||
8278 | if (err < 0) | ||
8279 | return err; | ||
8280 | ret += err; | ||
8281 | |||
8282 | err = niu_pci_eeprom_read(np, ret + 0); | ||
8283 | if (err != 0x82) | ||
8284 | return 0; | ||
8285 | |||
8286 | return ret; | ||
8287 | } | ||
8288 | |||
8289 | return 0; | ||
8290 | } | ||
8291 | |||
8292 | static int __devinit niu_phy_type_prop_decode(struct niu *np, | ||
8293 | const char *phy_prop) | ||
8294 | { | ||
8295 | if (!strcmp(phy_prop, "mif")) { | ||
8296 | /* 1G copper, MII */ | ||
8297 | np->flags &= ~(NIU_FLAGS_FIBER | | ||
8298 | NIU_FLAGS_10G); | ||
8299 | np->mac_xcvr = MAC_XCVR_MII; | ||
8300 | } else if (!strcmp(phy_prop, "xgf")) { | ||
8301 | /* 10G fiber, XPCS */ | ||
8302 | np->flags |= (NIU_FLAGS_10G | | ||
8303 | NIU_FLAGS_FIBER); | ||
8304 | np->mac_xcvr = MAC_XCVR_XPCS; | ||
8305 | } else if (!strcmp(phy_prop, "pcs")) { | ||
8306 | /* 1G fiber, PCS */ | ||
8307 | np->flags &= ~NIU_FLAGS_10G; | ||
8308 | np->flags |= NIU_FLAGS_FIBER; | ||
8309 | np->mac_xcvr = MAC_XCVR_PCS; | ||
8310 | } else if (!strcmp(phy_prop, "xgc")) { | ||
8311 | /* 10G copper, XPCS */ | ||
8312 | np->flags |= NIU_FLAGS_10G; | ||
8313 | np->flags &= ~NIU_FLAGS_FIBER; | ||
8314 | np->mac_xcvr = MAC_XCVR_XPCS; | ||
8315 | } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) { | ||
8316 | /* 10G Serdes or 1G Serdes, default to 10G */ | ||
8317 | np->flags |= NIU_FLAGS_10G; | ||
8318 | np->flags &= ~NIU_FLAGS_FIBER; | ||
8319 | np->flags |= NIU_FLAGS_XCVR_SERDES; | ||
8320 | np->mac_xcvr = MAC_XCVR_XPCS; | ||
8321 | } else { | ||
8322 | return -EINVAL; | ||
8323 | } | ||
8324 | return 0; | ||
8325 | } | ||
8326 | |||
8327 | static int niu_pci_vpd_get_nports(struct niu *np) | ||
8328 | { | ||
8329 | int ports = 0; | ||
8330 | |||
8331 | if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) || | ||
8332 | (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) || | ||
8333 | (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) || | ||
8334 | (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) || | ||
8335 | (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) { | ||
8336 | ports = 4; | ||
8337 | } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) || | ||
8338 | (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) || | ||
8339 | (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) || | ||
8340 | (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) { | ||
8341 | ports = 2; | ||
8342 | } | ||
8343 | |||
8344 | return ports; | ||
8345 | } | ||
8346 | |||
8347 | static void __devinit niu_pci_vpd_validate(struct niu *np) | ||
8348 | { | ||
8349 | struct net_device *dev = np->dev; | ||
8350 | struct niu_vpd *vpd = &np->vpd; | ||
8351 | u8 val8; | ||
8352 | |||
8353 | if (!is_valid_ether_addr(&vpd->local_mac[0])) { | ||
8354 | dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n"); | ||
8355 | |||
8356 | np->flags &= ~NIU_FLAGS_VPD_VALID; | ||
8357 | return; | ||
8358 | } | ||
8359 | |||
8360 | if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || | ||
8361 | !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { | ||
8362 | np->flags |= NIU_FLAGS_10G; | ||
8363 | np->flags &= ~NIU_FLAGS_FIBER; | ||
8364 | np->flags |= NIU_FLAGS_XCVR_SERDES; | ||
8365 | np->mac_xcvr = MAC_XCVR_PCS; | ||
8366 | if (np->port > 1) { | ||
8367 | np->flags |= NIU_FLAGS_FIBER; | ||
8368 | np->flags &= ~NIU_FLAGS_10G; | ||
8369 | } | ||
8370 | if (np->flags & NIU_FLAGS_10G) | ||
8371 | np->mac_xcvr = MAC_XCVR_XPCS; | ||
8372 | } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { | ||
8373 | np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | | ||
8374 | NIU_FLAGS_HOTPLUG_PHY); | ||
8375 | } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { | ||
8376 | dev_err(np->device, "Illegal phy string [%s]\n", | ||
8377 | np->vpd.phy_type); | ||
8378 | dev_err(np->device, "Falling back to SPROM\n"); | ||
8379 | np->flags &= ~NIU_FLAGS_VPD_VALID; | ||
8380 | return; | ||
8381 | } | ||
8382 | |||
8383 | memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN); | ||
8384 | |||
8385 | val8 = dev->perm_addr[5]; | ||
8386 | dev->perm_addr[5] += np->port; | ||
8387 | if (dev->perm_addr[5] < val8) | ||
8388 | dev->perm_addr[4]++; | ||
8389 | |||
8390 | memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); | ||
8391 | } | ||
8392 | |||
8393 | static int __devinit niu_pci_probe_sprom(struct niu *np) | ||
8394 | { | ||
8395 | struct net_device *dev = np->dev; | ||
8396 | int len, i; | ||
8397 | u64 val, sum; | ||
8398 | u8 val8; | ||
8399 | |||
8400 | val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ); | ||
8401 | val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT; | ||
8402 | len = val / 4; | ||
8403 | |||
8404 | np->eeprom_len = len; | ||
8405 | |||
8406 | netif_printk(np, probe, KERN_DEBUG, np->dev, | ||
8407 | "SPROM: Image size %llu\n", (unsigned long long)val); | ||
8408 | |||
8409 | sum = 0; | ||
8410 | for (i = 0; i < len; i++) { | ||
8411 | val = nr64(ESPC_NCR(i)); | ||
8412 | sum += (val >> 0) & 0xff; | ||
8413 | sum += (val >> 8) & 0xff; | ||
8414 | sum += (val >> 16) & 0xff; | ||
8415 | sum += (val >> 24) & 0xff; | ||
8416 | } | ||
8417 | netif_printk(np, probe, KERN_DEBUG, np->dev, | ||
8418 | "SPROM: Checksum %x\n", (int)(sum & 0xff)); | ||
8419 | if ((sum & 0xff) != 0xab) { | ||
8420 | dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff)); | ||
8421 | return -EINVAL; | ||
8422 | } | ||
8423 | |||
8424 | val = nr64(ESPC_PHY_TYPE); | ||
8425 | switch (np->port) { | ||
8426 | case 0: | ||
8427 | val8 = (val & ESPC_PHY_TYPE_PORT0) >> | ||
8428 | ESPC_PHY_TYPE_PORT0_SHIFT; | ||
8429 | break; | ||
8430 | case 1: | ||
8431 | val8 = (val & ESPC_PHY_TYPE_PORT1) >> | ||
8432 | ESPC_PHY_TYPE_PORT1_SHIFT; | ||
8433 | break; | ||
8434 | case 2: | ||
8435 | val8 = (val & ESPC_PHY_TYPE_PORT2) >> | ||
8436 | ESPC_PHY_TYPE_PORT2_SHIFT; | ||
8437 | break; | ||
8438 | case 3: | ||
8439 | val8 = (val & ESPC_PHY_TYPE_PORT3) >> | ||
8440 | ESPC_PHY_TYPE_PORT3_SHIFT; | ||
8441 | break; | ||
8442 | default: | ||
8443 | dev_err(np->device, "Bogus port number %u\n", | ||
8444 | np->port); | ||
8445 | return -EINVAL; | ||
8446 | } | ||
8447 | netif_printk(np, probe, KERN_DEBUG, np->dev, | ||
8448 | "SPROM: PHY type %x\n", val8); | ||
8449 | |||
8450 | switch (val8) { | ||
8451 | case ESPC_PHY_TYPE_1G_COPPER: | ||
8452 | /* 1G copper, MII */ | ||
8453 | np->flags &= ~(NIU_FLAGS_FIBER | | ||
8454 | NIU_FLAGS_10G); | ||
8455 | np->mac_xcvr = MAC_XCVR_MII; | ||
8456 | break; | ||
8457 | |||
8458 | case ESPC_PHY_TYPE_1G_FIBER: | ||
8459 | /* 1G fiber, PCS */ | ||
8460 | np->flags &= ~NIU_FLAGS_10G; | ||
8461 | np->flags |= NIU_FLAGS_FIBER; | ||
8462 | np->mac_xcvr = MAC_XCVR_PCS; | ||
8463 | break; | ||
8464 | |||
8465 | case ESPC_PHY_TYPE_10G_COPPER: | ||
8466 | /* 10G copper, XPCS */ | ||
8467 | np->flags |= NIU_FLAGS_10G; | ||
8468 | np->flags &= ~NIU_FLAGS_FIBER; | ||
8469 | np->mac_xcvr = MAC_XCVR_XPCS; | ||
8470 | break; | ||
8471 | |||
8472 | case ESPC_PHY_TYPE_10G_FIBER: | ||
8473 | /* 10G fiber, XPCS */ | ||
8474 | np->flags |= (NIU_FLAGS_10G | | ||
8475 | NIU_FLAGS_FIBER); | ||
8476 | np->mac_xcvr = MAC_XCVR_XPCS; | ||
8477 | break; | ||
8478 | |||
8479 | default: | ||
8480 | dev_err(np->device, "Bogus SPROM phy type %u\n", val8); | ||
8481 | return -EINVAL; | ||
8482 | } | ||
8483 | |||
8484 | val = nr64(ESPC_MAC_ADDR0); | ||
8485 | netif_printk(np, probe, KERN_DEBUG, np->dev, | ||
8486 | "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val); | ||
8487 | dev->perm_addr[0] = (val >> 0) & 0xff; | ||
8488 | dev->perm_addr[1] = (val >> 8) & 0xff; | ||
8489 | dev->perm_addr[2] = (val >> 16) & 0xff; | ||
8490 | dev->perm_addr[3] = (val >> 24) & 0xff; | ||
8491 | |||
8492 | val = nr64(ESPC_MAC_ADDR1); | ||
8493 | netif_printk(np, probe, KERN_DEBUG, np->dev, | ||
8494 | "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val); | ||
8495 | dev->perm_addr[4] = (val >> 0) & 0xff; | ||
8496 | dev->perm_addr[5] = (val >> 8) & 0xff; | ||
8497 | |||
8498 | if (!is_valid_ether_addr(&dev->perm_addr[0])) { | ||
8499 | dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n", | ||
8500 | dev->perm_addr); | ||
8501 | return -EINVAL; | ||
8502 | } | ||
8503 | |||
8504 | val8 = dev->perm_addr[5]; | ||
8505 | dev->perm_addr[5] += np->port; | ||
8506 | if (dev->perm_addr[5] < val8) | ||
8507 | dev->perm_addr[4]++; | ||
8508 | |||
8509 | memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); | ||
8510 | |||
8511 | val = nr64(ESPC_MOD_STR_LEN); | ||
8512 | netif_printk(np, probe, KERN_DEBUG, np->dev, | ||
8513 | "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val); | ||
8514 | if (val >= 8 * 4) | ||
8515 | return -EINVAL; | ||
8516 | |||
8517 | for (i = 0; i < val; i += 4) { | ||
8518 | u64 tmp = nr64(ESPC_NCR(5 + (i / 4))); | ||
8519 | |||
8520 | np->vpd.model[i + 3] = (tmp >> 0) & 0xff; | ||
8521 | np->vpd.model[i + 2] = (tmp >> 8) & 0xff; | ||
8522 | np->vpd.model[i + 1] = (tmp >> 16) & 0xff; | ||
8523 | np->vpd.model[i + 0] = (tmp >> 24) & 0xff; | ||
8524 | } | ||
8525 | np->vpd.model[val] = '\0'; | ||
8526 | |||
8527 | val = nr64(ESPC_BD_MOD_STR_LEN); | ||
8528 | netif_printk(np, probe, KERN_DEBUG, np->dev, | ||
8529 | "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val); | ||
8530 | if (val >= 4 * 4) | ||
8531 | return -EINVAL; | ||
8532 | |||
8533 | for (i = 0; i < val; i += 4) { | ||
8534 | u64 tmp = nr64(ESPC_NCR(14 + (i / 4))); | ||
8535 | |||
8536 | np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff; | ||
8537 | np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff; | ||
8538 | np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff; | ||
8539 | np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff; | ||
8540 | } | ||
8541 | np->vpd.board_model[val] = '\0'; | ||
8542 | |||
8543 | np->vpd.mac_num = | ||
8544 | nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; | ||
8545 | netif_printk(np, probe, KERN_DEBUG, np->dev, | ||
8546 | "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num); | ||
8547 | |||
8548 | return 0; | ||
8549 | } | ||
8550 | |||
8551 | static int __devinit niu_get_and_validate_port(struct niu *np) | ||
8552 | { | ||
8553 | struct niu_parent *parent = np->parent; | ||
8554 | |||
8555 | if (np->port <= 1) | ||
8556 | np->flags |= NIU_FLAGS_XMAC; | ||
8557 | |||
8558 | if (!parent->num_ports) { | ||
8559 | if (parent->plat_type == PLAT_TYPE_NIU) { | ||
8560 | parent->num_ports = 2; | ||
8561 | } else { | ||
8562 | parent->num_ports = niu_pci_vpd_get_nports(np); | ||
8563 | if (!parent->num_ports) { | ||
8564 | /* Fall back to SPROM as last resort. | ||
8565 | * This will fail on most cards. | ||
8566 | */ | ||
8567 | parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & | ||
8568 | ESPC_NUM_PORTS_MACS_VAL; | ||
8569 | |||
8570 | /* All of the current probing methods fail on | ||
8571 | * Maramba on-board parts. | ||
8572 | */ | ||
8573 | if (!parent->num_ports) | ||
8574 | parent->num_ports = 4; | ||
8575 | } | ||
8576 | } | ||
8577 | } | ||
8578 | |||
8579 | if (np->port >= parent->num_ports) | ||
8580 | return -ENODEV; | ||
8581 | |||
8582 | return 0; | ||
8583 | } | ||
8584 | |||
8585 | static int __devinit phy_record(struct niu_parent *parent, | ||
8586 | struct phy_probe_info *p, | ||
8587 | int dev_id_1, int dev_id_2, u8 phy_port, | ||
8588 | int type) | ||
8589 | { | ||
8590 | u32 id = (dev_id_1 << 16) | dev_id_2; | ||
8591 | u8 idx; | ||
8592 | |||
8593 | if (dev_id_1 < 0 || dev_id_2 < 0) | ||
8594 | return 0; | ||
8595 | if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) { | ||
8596 | if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) && | ||
8597 | ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) && | ||
8598 | ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706)) | ||
8599 | return 0; | ||
8600 | } else { | ||
8601 | if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R) | ||
8602 | return 0; | ||
8603 | } | ||
8604 | |||
8605 | pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n", | ||
8606 | parent->index, id, | ||
8607 | type == PHY_TYPE_PMA_PMD ? "PMA/PMD" : | ||
8608 | type == PHY_TYPE_PCS ? "PCS" : "MII", | ||
8609 | phy_port); | ||
8610 | |||
8611 | if (p->cur[type] >= NIU_MAX_PORTS) { | ||
8612 | pr_err("Too many PHY ports\n"); | ||
8613 | return -EINVAL; | ||
8614 | } | ||
8615 | idx = p->cur[type]; | ||
8616 | p->phy_id[type][idx] = id; | ||
8617 | p->phy_port[type][idx] = phy_port; | ||
8618 | p->cur[type] = idx + 1; | ||
8619 | return 0; | ||
8620 | } | ||
8621 | |||
8622 | static int __devinit port_has_10g(struct phy_probe_info *p, int port) | ||
8623 | { | ||
8624 | int i; | ||
8625 | |||
8626 | for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) { | ||
8627 | if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port) | ||
8628 | return 1; | ||
8629 | } | ||
8630 | for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) { | ||
8631 | if (p->phy_port[PHY_TYPE_PCS][i] == port) | ||
8632 | return 1; | ||
8633 | } | ||
8634 | |||
8635 | return 0; | ||
8636 | } | ||
8637 | |||
8638 | static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest) | ||
8639 | { | ||
8640 | int port, cnt; | ||
8641 | |||
8642 | cnt = 0; | ||
8643 | *lowest = 32; | ||
8644 | for (port = 8; port < 32; port++) { | ||
8645 | if (port_has_10g(p, port)) { | ||
8646 | if (!cnt) | ||
8647 | *lowest = port; | ||
8648 | cnt++; | ||
8649 | } | ||
8650 | } | ||
8651 | |||
8652 | return cnt; | ||
8653 | } | ||
8654 | |||
8655 | static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest) | ||
8656 | { | ||
8657 | *lowest = 32; | ||
8658 | if (p->cur[PHY_TYPE_MII]) | ||
8659 | *lowest = p->phy_port[PHY_TYPE_MII][0]; | ||
8660 | |||
8661 | return p->cur[PHY_TYPE_MII]; | ||
8662 | } | ||
8663 | |||
8664 | static void __devinit niu_n2_divide_channels(struct niu_parent *parent) | ||
8665 | { | ||
8666 | int num_ports = parent->num_ports; | ||
8667 | int i; | ||
8668 | |||
8669 | for (i = 0; i < num_ports; i++) { | ||
8670 | parent->rxchan_per_port[i] = (16 / num_ports); | ||
8671 | parent->txchan_per_port[i] = (16 / num_ports); | ||
8672 | |||
8673 | pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n", | ||
8674 | parent->index, i, | ||
8675 | parent->rxchan_per_port[i], | ||
8676 | parent->txchan_per_port[i]); | ||
8677 | } | ||
8678 | } | ||
8679 | |||
8680 | static void __devinit niu_divide_channels(struct niu_parent *parent, | ||
8681 | int num_10g, int num_1g) | ||
8682 | { | ||
8683 | int num_ports = parent->num_ports; | ||
8684 | int rx_chans_per_10g, rx_chans_per_1g; | ||
8685 | int tx_chans_per_10g, tx_chans_per_1g; | ||
8686 | int i, tot_rx, tot_tx; | ||
8687 | |||
8688 | if (!num_10g || !num_1g) { | ||
8689 | rx_chans_per_10g = rx_chans_per_1g = | ||
8690 | (NIU_NUM_RXCHAN / num_ports); | ||
8691 | tx_chans_per_10g = tx_chans_per_1g = | ||
8692 | (NIU_NUM_TXCHAN / num_ports); | ||
8693 | } else { | ||
8694 | rx_chans_per_1g = NIU_NUM_RXCHAN / 8; | ||
8695 | rx_chans_per_10g = (NIU_NUM_RXCHAN - | ||
8696 | (rx_chans_per_1g * num_1g)) / | ||
8697 | num_10g; | ||
8698 | |||
8699 | tx_chans_per_1g = NIU_NUM_TXCHAN / 6; | ||
8700 | tx_chans_per_10g = (NIU_NUM_TXCHAN - | ||
8701 | (tx_chans_per_1g * num_1g)) / | ||
8702 | num_10g; | ||
8703 | } | ||
8704 | |||
8705 | tot_rx = tot_tx = 0; | ||
8706 | for (i = 0; i < num_ports; i++) { | ||
8707 | int type = phy_decode(parent->port_phy, i); | ||
8708 | |||
8709 | if (type == PORT_TYPE_10G) { | ||
8710 | parent->rxchan_per_port[i] = rx_chans_per_10g; | ||
8711 | parent->txchan_per_port[i] = tx_chans_per_10g; | ||
8712 | } else { | ||
8713 | parent->rxchan_per_port[i] = rx_chans_per_1g; | ||
8714 | parent->txchan_per_port[i] = tx_chans_per_1g; | ||
8715 | } | ||
8716 | pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n", | ||
8717 | parent->index, i, | ||
8718 | parent->rxchan_per_port[i], | ||
8719 | parent->txchan_per_port[i]); | ||
8720 | tot_rx += parent->rxchan_per_port[i]; | ||
8721 | tot_tx += parent->txchan_per_port[i]; | ||
8722 | } | ||
8723 | |||
8724 | if (tot_rx > NIU_NUM_RXCHAN) { | ||
8725 | pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n", | ||
8726 | parent->index, tot_rx); | ||
8727 | for (i = 0; i < num_ports; i++) | ||
8728 | parent->rxchan_per_port[i] = 1; | ||
8729 | } | ||
8730 | if (tot_tx > NIU_NUM_TXCHAN) { | ||
8731 | pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n", | ||
8732 | parent->index, tot_tx); | ||
8733 | for (i = 0; i < num_ports; i++) | ||
8734 | parent->txchan_per_port[i] = 1; | ||
8735 | } | ||
8736 | if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) { | ||
8737 | pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n", | ||
8738 | parent->index, tot_rx, tot_tx); | ||
8739 | } | ||
8740 | } | ||
8741 | |||
8742 | static void __devinit niu_divide_rdc_groups(struct niu_parent *parent, | ||
8743 | int num_10g, int num_1g) | ||
8744 | { | ||
8745 | int i, num_ports = parent->num_ports; | ||
8746 | int rdc_group, rdc_groups_per_port; | ||
8747 | int rdc_channel_base; | ||
8748 | |||
8749 | rdc_group = 0; | ||
8750 | rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports; | ||
8751 | |||
8752 | rdc_channel_base = 0; | ||
8753 | |||
8754 | for (i = 0; i < num_ports; i++) { | ||
8755 | struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i]; | ||
8756 | int grp, num_channels = parent->rxchan_per_port[i]; | ||
8757 | int this_channel_offset; | ||
8758 | |||
8759 | tp->first_table_num = rdc_group; | ||
8760 | tp->num_tables = rdc_groups_per_port; | ||
8761 | this_channel_offset = 0; | ||
8762 | for (grp = 0; grp < tp->num_tables; grp++) { | ||
8763 | struct rdc_table *rt = &tp->tables[grp]; | ||
8764 | int slot; | ||
8765 | |||
8766 | pr_info("niu%d: Port %d RDC tbl(%d) [ ", | ||
8767 | parent->index, i, tp->first_table_num + grp); | ||
8768 | for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) { | ||
8769 | rt->rxdma_channel[slot] = | ||
8770 | rdc_channel_base + this_channel_offset; | ||
8771 | |||
8772 | pr_cont("%d ", rt->rxdma_channel[slot]); | ||
8773 | |||
8774 | if (++this_channel_offset == num_channels) | ||
8775 | this_channel_offset = 0; | ||
8776 | } | ||
8777 | pr_cont("]\n"); | ||
8778 | } | ||
8779 | |||
8780 | parent->rdc_default[i] = rdc_channel_base; | ||
8781 | |||
8782 | rdc_channel_base += num_channels; | ||
8783 | rdc_group += rdc_groups_per_port; | ||
8784 | } | ||
8785 | } | ||
8786 | |||
8787 | static int __devinit fill_phy_probe_info(struct niu *np, | ||
8788 | struct niu_parent *parent, | ||
8789 | struct phy_probe_info *info) | ||
8790 | { | ||
8791 | unsigned long flags; | ||
8792 | int port, err; | ||
8793 | |||
8794 | memset(info, 0, sizeof(*info)); | ||
8795 | |||
8796 | /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */ | ||
8797 | niu_lock_parent(np, flags); | ||
8798 | err = 0; | ||
8799 | for (port = 8; port < 32; port++) { | ||
8800 | int dev_id_1, dev_id_2; | ||
8801 | |||
8802 | dev_id_1 = mdio_read(np, port, | ||
8803 | NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1); | ||
8804 | dev_id_2 = mdio_read(np, port, | ||
8805 | NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2); | ||
8806 | err = phy_record(parent, info, dev_id_1, dev_id_2, port, | ||
8807 | PHY_TYPE_PMA_PMD); | ||
8808 | if (err) | ||
8809 | break; | ||
8810 | dev_id_1 = mdio_read(np, port, | ||
8811 | NIU_PCS_DEV_ADDR, MII_PHYSID1); | ||
8812 | dev_id_2 = mdio_read(np, port, | ||
8813 | NIU_PCS_DEV_ADDR, MII_PHYSID2); | ||
8814 | err = phy_record(parent, info, dev_id_1, dev_id_2, port, | ||
8815 | PHY_TYPE_PCS); | ||
8816 | if (err) | ||
8817 | break; | ||
8818 | dev_id_1 = mii_read(np, port, MII_PHYSID1); | ||
8819 | dev_id_2 = mii_read(np, port, MII_PHYSID2); | ||
8820 | err = phy_record(parent, info, dev_id_1, dev_id_2, port, | ||
8821 | PHY_TYPE_MII); | ||
8822 | if (err) | ||
8823 | break; | ||
8824 | } | ||
8825 | niu_unlock_parent(np, flags); | ||
8826 | |||
8827 | return err; | ||
8828 | } | ||
8829 | |||
8830 | static int __devinit walk_phys(struct niu *np, struct niu_parent *parent) | ||
8831 | { | ||
8832 | struct phy_probe_info *info = &parent->phy_probe_info; | ||
8833 | int lowest_10g, lowest_1g; | ||
8834 | int num_10g, num_1g; | ||
8835 | u32 val; | ||
8836 | int err; | ||
8837 | |||
8838 | num_10g = num_1g = 0; | ||
8839 | |||
8840 | if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || | ||
8841 | !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { | ||
8842 | num_10g = 0; | ||
8843 | num_1g = 2; | ||
8844 | parent->plat_type = PLAT_TYPE_ATCA_CP3220; | ||
8845 | parent->num_ports = 4; | ||
8846 | val = (phy_encode(PORT_TYPE_1G, 0) | | ||
8847 | phy_encode(PORT_TYPE_1G, 1) | | ||
8848 | phy_encode(PORT_TYPE_1G, 2) | | ||
8849 | phy_encode(PORT_TYPE_1G, 3)); | ||
8850 | } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { | ||
8851 | num_10g = 2; | ||
8852 | num_1g = 0; | ||
8853 | parent->num_ports = 2; | ||
8854 | val = (phy_encode(PORT_TYPE_10G, 0) | | ||
8855 | phy_encode(PORT_TYPE_10G, 1)); | ||
8856 | } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) && | ||
8857 | (parent->plat_type == PLAT_TYPE_NIU)) { | ||
8858 | /* this is the Monza case */ | ||
8859 | if (np->flags & NIU_FLAGS_10G) { | ||
8860 | val = (phy_encode(PORT_TYPE_10G, 0) | | ||
8861 | phy_encode(PORT_TYPE_10G, 1)); | ||
8862 | } else { | ||
8863 | val = (phy_encode(PORT_TYPE_1G, 0) | | ||
8864 | phy_encode(PORT_TYPE_1G, 1)); | ||
8865 | } | ||
8866 | } else { | ||
8867 | err = fill_phy_probe_info(np, parent, info); | ||
8868 | if (err) | ||
8869 | return err; | ||
8870 | |||
8871 | num_10g = count_10g_ports(info, &lowest_10g); | ||
8872 | num_1g = count_1g_ports(info, &lowest_1g); | ||
8873 | |||
8874 | switch ((num_10g << 4) | num_1g) { | ||
8875 | case 0x24: | ||
8876 | if (lowest_1g == 10) | ||
8877 | parent->plat_type = PLAT_TYPE_VF_P0; | ||
8878 | else if (lowest_1g == 26) | ||
8879 | parent->plat_type = PLAT_TYPE_VF_P1; | ||
8880 | else | ||
8881 | goto unknown_vg_1g_port; | ||
8882 | |||
8883 | /* fallthru */ | ||
8884 | case 0x22: | ||
8885 | val = (phy_encode(PORT_TYPE_10G, 0) | | ||
8886 | phy_encode(PORT_TYPE_10G, 1) | | ||
8887 | phy_encode(PORT_TYPE_1G, 2) | | ||
8888 | phy_encode(PORT_TYPE_1G, 3)); | ||
8889 | break; | ||
8890 | |||
8891 | case 0x20: | ||
8892 | val = (phy_encode(PORT_TYPE_10G, 0) | | ||
8893 | phy_encode(PORT_TYPE_10G, 1)); | ||
8894 | break; | ||
8895 | |||
8896 | case 0x10: | ||
8897 | val = phy_encode(PORT_TYPE_10G, np->port); | ||
8898 | break; | ||
8899 | |||
8900 | case 0x14: | ||
8901 | if (lowest_1g == 10) | ||
8902 | parent->plat_type = PLAT_TYPE_VF_P0; | ||
8903 | else if (lowest_1g == 26) | ||
8904 | parent->plat_type = PLAT_TYPE_VF_P1; | ||
8905 | else | ||
8906 | goto unknown_vg_1g_port; | ||
8907 | |||
8908 | /* fallthru */ | ||
8909 | case 0x13: | ||
8910 | if ((lowest_10g & 0x7) == 0) | ||
8911 | val = (phy_encode(PORT_TYPE_10G, 0) | | ||
8912 | phy_encode(PORT_TYPE_1G, 1) | | ||
8913 | phy_encode(PORT_TYPE_1G, 2) | | ||
8914 | phy_encode(PORT_TYPE_1G, 3)); | ||
8915 | else | ||
8916 | val = (phy_encode(PORT_TYPE_1G, 0) | | ||
8917 | phy_encode(PORT_TYPE_10G, 1) | | ||
8918 | phy_encode(PORT_TYPE_1G, 2) | | ||
8919 | phy_encode(PORT_TYPE_1G, 3)); | ||
8920 | break; | ||
8921 | |||
8922 | case 0x04: | ||
8923 | if (lowest_1g == 10) | ||
8924 | parent->plat_type = PLAT_TYPE_VF_P0; | ||
8925 | else if (lowest_1g == 26) | ||
8926 | parent->plat_type = PLAT_TYPE_VF_P1; | ||
8927 | else | ||
8928 | goto unknown_vg_1g_port; | ||
8929 | |||
8930 | val = (phy_encode(PORT_TYPE_1G, 0) | | ||
8931 | phy_encode(PORT_TYPE_1G, 1) | | ||
8932 | phy_encode(PORT_TYPE_1G, 2) | | ||
8933 | phy_encode(PORT_TYPE_1G, 3)); | ||
8934 | break; | ||
8935 | |||
8936 | default: | ||
8937 | pr_err("Unsupported port config 10G[%d] 1G[%d]\n", | ||
8938 | num_10g, num_1g); | ||
8939 | return -EINVAL; | ||
8940 | } | ||
8941 | } | ||
8942 | |||
8943 | parent->port_phy = val; | ||
8944 | |||
8945 | if (parent->plat_type == PLAT_TYPE_NIU) | ||
8946 | niu_n2_divide_channels(parent); | ||
8947 | else | ||
8948 | niu_divide_channels(parent, num_10g, num_1g); | ||
8949 | |||
8950 | niu_divide_rdc_groups(parent, num_10g, num_1g); | ||
8951 | |||
8952 | return 0; | ||
8953 | |||
8954 | unknown_vg_1g_port: | ||
8955 | pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g); | ||
8956 | return -EINVAL; | ||
8957 | } | ||
8958 | |||
8959 | static int __devinit niu_probe_ports(struct niu *np) | ||
8960 | { | ||
8961 | struct niu_parent *parent = np->parent; | ||
8962 | int err, i; | ||
8963 | |||
8964 | if (parent->port_phy == PORT_PHY_UNKNOWN) { | ||
8965 | err = walk_phys(np, parent); | ||
8966 | if (err) | ||
8967 | return err; | ||
8968 | |||
8969 | niu_set_ldg_timer_res(np, 2); | ||
8970 | for (i = 0; i <= LDN_MAX; i++) | ||
8971 | niu_ldn_irq_enable(np, i, 0); | ||
8972 | } | ||
8973 | |||
8974 | if (parent->port_phy == PORT_PHY_INVALID) | ||
8975 | return -EINVAL; | ||
8976 | |||
8977 | return 0; | ||
8978 | } | ||
8979 | |||
8980 | static int __devinit niu_classifier_swstate_init(struct niu *np) | ||
8981 | { | ||
8982 | struct niu_classifier *cp = &np->clas; | ||
8983 | |||
8984 | cp->tcam_top = (u16) np->port; | ||
8985 | cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports; | ||
8986 | cp->h1_init = 0xffffffff; | ||
8987 | cp->h2_init = 0xffff; | ||
8988 | |||
8989 | return fflp_early_init(np); | ||
8990 | } | ||
8991 | |||
8992 | static void __devinit niu_link_config_init(struct niu *np) | ||
8993 | { | ||
8994 | struct niu_link_config *lp = &np->link_config; | ||
8995 | |||
8996 | lp->advertising = (ADVERTISED_10baseT_Half | | ||
8997 | ADVERTISED_10baseT_Full | | ||
8998 | ADVERTISED_100baseT_Half | | ||
8999 | ADVERTISED_100baseT_Full | | ||
9000 | ADVERTISED_1000baseT_Half | | ||
9001 | ADVERTISED_1000baseT_Full | | ||
9002 | ADVERTISED_10000baseT_Full | | ||
9003 | ADVERTISED_Autoneg); | ||
9004 | lp->speed = lp->active_speed = SPEED_INVALID; | ||
9005 | lp->duplex = DUPLEX_FULL; | ||
9006 | lp->active_duplex = DUPLEX_INVALID; | ||
9007 | lp->autoneg = 1; | ||
9008 | #if 0 | ||
9009 | lp->loopback_mode = LOOPBACK_MAC; | ||
9010 | lp->active_speed = SPEED_10000; | ||
9011 | lp->active_duplex = DUPLEX_FULL; | ||
9012 | #else | ||
9013 | lp->loopback_mode = LOOPBACK_DISABLED; | ||
9014 | #endif | ||
9015 | } | ||
9016 | |||
9017 | static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np) | ||
9018 | { | ||
9019 | switch (np->port) { | ||
9020 | case 0: | ||
9021 | np->mac_regs = np->regs + XMAC_PORT0_OFF; | ||
9022 | np->ipp_off = 0x00000; | ||
9023 | np->pcs_off = 0x04000; | ||
9024 | np->xpcs_off = 0x02000; | ||
9025 | break; | ||
9026 | |||
9027 | case 1: | ||
9028 | np->mac_regs = np->regs + XMAC_PORT1_OFF; | ||
9029 | np->ipp_off = 0x08000; | ||
9030 | np->pcs_off = 0x0a000; | ||
9031 | np->xpcs_off = 0x08000; | ||
9032 | break; | ||
9033 | |||
9034 | case 2: | ||
9035 | np->mac_regs = np->regs + BMAC_PORT2_OFF; | ||
9036 | np->ipp_off = 0x04000; | ||
9037 | np->pcs_off = 0x0e000; | ||
9038 | np->xpcs_off = ~0UL; | ||
9039 | break; | ||
9040 | |||
9041 | case 3: | ||
9042 | np->mac_regs = np->regs + BMAC_PORT3_OFF; | ||
9043 | np->ipp_off = 0x0c000; | ||
9044 | np->pcs_off = 0x12000; | ||
9045 | np->xpcs_off = ~0UL; | ||
9046 | break; | ||
9047 | |||
9048 | default: | ||
9049 | dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port); | ||
9050 | return -EINVAL; | ||
9051 | } | ||
9052 | |||
9053 | return 0; | ||
9054 | } | ||
9055 | |||
9056 | static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map) | ||
9057 | { | ||
9058 | struct msix_entry msi_vec[NIU_NUM_LDG]; | ||
9059 | struct niu_parent *parent = np->parent; | ||
9060 | struct pci_dev *pdev = np->pdev; | ||
9061 | int i, num_irqs, err; | ||
9062 | u8 first_ldg; | ||
9063 | |||
9064 | first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; | ||
9065 | for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++) | ||
9066 | ldg_num_map[i] = first_ldg + i; | ||
9067 | |||
9068 | num_irqs = (parent->rxchan_per_port[np->port] + | ||
9069 | parent->txchan_per_port[np->port] + | ||
9070 | (np->port == 0 ? 3 : 1)); | ||
9071 | BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); | ||
9072 | |||
9073 | retry: | ||
9074 | for (i = 0; i < num_irqs; i++) { | ||
9075 | msi_vec[i].vector = 0; | ||
9076 | msi_vec[i].entry = i; | ||
9077 | } | ||
9078 | |||
9079 | err = pci_enable_msix(pdev, msi_vec, num_irqs); | ||
9080 | if (err < 0) { | ||
9081 | np->flags &= ~NIU_FLAGS_MSIX; | ||
9082 | return; | ||
9083 | } | ||
9084 | if (err > 0) { | ||
9085 | num_irqs = err; | ||
9086 | goto retry; | ||
9087 | } | ||
9088 | |||
9089 | np->flags |= NIU_FLAGS_MSIX; | ||
9090 | for (i = 0; i < num_irqs; i++) | ||
9091 | np->ldg[i].irq = msi_vec[i].vector; | ||
9092 | np->num_ldg = num_irqs; | ||
9093 | } | ||
9094 | |||
9095 | static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map) | ||
9096 | { | ||
9097 | #ifdef CONFIG_SPARC64 | ||
9098 | struct platform_device *op = np->op; | ||
9099 | const u32 *int_prop; | ||
9100 | int i; | ||
9101 | |||
9102 | int_prop = of_get_property(op->dev.of_node, "interrupts", NULL); | ||
9103 | if (!int_prop) | ||
9104 | return -ENODEV; | ||
9105 | |||
9106 | for (i = 0; i < op->archdata.num_irqs; i++) { | ||
9107 | ldg_num_map[i] = int_prop[i]; | ||
9108 | np->ldg[i].irq = op->archdata.irqs[i]; | ||
9109 | } | ||
9110 | |||
9111 | np->num_ldg = op->archdata.num_irqs; | ||
9112 | |||
9113 | return 0; | ||
9114 | #else | ||
9115 | return -EINVAL; | ||
9116 | #endif | ||
9117 | } | ||
9118 | |||
9119 | static int __devinit niu_ldg_init(struct niu *np) | ||
9120 | { | ||
9121 | struct niu_parent *parent = np->parent; | ||
9122 | u8 ldg_num_map[NIU_NUM_LDG]; | ||
9123 | int first_chan, num_chan; | ||
9124 | int i, err, ldg_rotor; | ||
9125 | u8 port; | ||
9126 | |||
9127 | np->num_ldg = 1; | ||
9128 | np->ldg[0].irq = np->dev->irq; | ||
9129 | if (parent->plat_type == PLAT_TYPE_NIU) { | ||
9130 | err = niu_n2_irq_init(np, ldg_num_map); | ||
9131 | if (err) | ||
9132 | return err; | ||
9133 | } else | ||
9134 | niu_try_msix(np, ldg_num_map); | ||
9135 | |||
9136 | port = np->port; | ||
9137 | for (i = 0; i < np->num_ldg; i++) { | ||
9138 | struct niu_ldg *lp = &np->ldg[i]; | ||
9139 | |||
9140 | netif_napi_add(np->dev, &lp->napi, niu_poll, 64); | ||
9141 | |||
9142 | lp->np = np; | ||
9143 | lp->ldg_num = ldg_num_map[i]; | ||
9144 | lp->timer = 2; /* XXX */ | ||
9145 | |||
9146 | /* On N2 NIU the firmware has setup the SID mappings so they go | ||
9147 | * to the correct values that will route the LDG to the proper | ||
9148 | * interrupt in the NCU interrupt table. | ||
9149 | */ | ||
9150 | if (np->parent->plat_type != PLAT_TYPE_NIU) { | ||
9151 | err = niu_set_ldg_sid(np, lp->ldg_num, port, i); | ||
9152 | if (err) | ||
9153 | return err; | ||
9154 | } | ||
9155 | } | ||
9156 | |||
9157 | /* We adopt the LDG assignment ordering used by the N2 NIU | ||
9158 | * 'interrupt' properties because that simplifies a lot of | ||
9159 | * things. This ordering is: | ||
9160 | * | ||
9161 | * MAC | ||
9162 | * MIF (if port zero) | ||
9163 | * SYSERR (if port zero) | ||
9164 | * RX channels | ||
9165 | * TX channels | ||
9166 | */ | ||
9167 | |||
9168 | ldg_rotor = 0; | ||
9169 | |||
9170 | err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], | ||
9171 | LDN_MAC(port)); | ||
9172 | if (err) | ||
9173 | return err; | ||
9174 | |||
9175 | ldg_rotor++; | ||
9176 | if (ldg_rotor == np->num_ldg) | ||
9177 | ldg_rotor = 0; | ||
9178 | |||
9179 | if (port == 0) { | ||
9180 | err = niu_ldg_assign_ldn(np, parent, | ||
9181 | ldg_num_map[ldg_rotor], | ||
9182 | LDN_MIF); | ||
9183 | if (err) | ||
9184 | return err; | ||
9185 | |||
9186 | ldg_rotor++; | ||
9187 | if (ldg_rotor == np->num_ldg) | ||
9188 | ldg_rotor = 0; | ||
9189 | |||
9190 | err = niu_ldg_assign_ldn(np, parent, | ||
9191 | ldg_num_map[ldg_rotor], | ||
9192 | LDN_DEVICE_ERROR); | ||
9193 | if (err) | ||
9194 | return err; | ||
9195 | |||
9196 | ldg_rotor++; | ||
9197 | if (ldg_rotor == np->num_ldg) | ||
9198 | ldg_rotor = 0; | ||
9199 | |||
9200 | } | ||
9201 | |||
9202 | first_chan = 0; | ||
9203 | for (i = 0; i < port; i++) | ||
9204 | first_chan += parent->rxchan_per_port[i]; | ||
9205 | num_chan = parent->rxchan_per_port[port]; | ||
9206 | |||
9207 | for (i = first_chan; i < (first_chan + num_chan); i++) { | ||
9208 | err = niu_ldg_assign_ldn(np, parent, | ||
9209 | ldg_num_map[ldg_rotor], | ||
9210 | LDN_RXDMA(i)); | ||
9211 | if (err) | ||
9212 | return err; | ||
9213 | ldg_rotor++; | ||
9214 | if (ldg_rotor == np->num_ldg) | ||
9215 | ldg_rotor = 0; | ||
9216 | } | ||
9217 | |||
9218 | first_chan = 0; | ||
9219 | for (i = 0; i < port; i++) | ||
9220 | first_chan += parent->txchan_per_port[i]; | ||
9221 | num_chan = parent->txchan_per_port[port]; | ||
9222 | for (i = first_chan; i < (first_chan + num_chan); i++) { | ||
9223 | err = niu_ldg_assign_ldn(np, parent, | ||
9224 | ldg_num_map[ldg_rotor], | ||
9225 | LDN_TXDMA(i)); | ||
9226 | if (err) | ||
9227 | return err; | ||
9228 | ldg_rotor++; | ||
9229 | if (ldg_rotor == np->num_ldg) | ||
9230 | ldg_rotor = 0; | ||
9231 | } | ||
9232 | |||
9233 | return 0; | ||
9234 | } | ||
9235 | |||
9236 | static void __devexit niu_ldg_free(struct niu *np) | ||
9237 | { | ||
9238 | if (np->flags & NIU_FLAGS_MSIX) | ||
9239 | pci_disable_msix(np->pdev); | ||
9240 | } | ||
9241 | |||
9242 | static int __devinit niu_get_of_props(struct niu *np) | ||
9243 | { | ||
9244 | #ifdef CONFIG_SPARC64 | ||
9245 | struct net_device *dev = np->dev; | ||
9246 | struct device_node *dp; | ||
9247 | const char *phy_type; | ||
9248 | const u8 *mac_addr; | ||
9249 | const char *model; | ||
9250 | int prop_len; | ||
9251 | |||
9252 | if (np->parent->plat_type == PLAT_TYPE_NIU) | ||
9253 | dp = np->op->dev.of_node; | ||
9254 | else | ||
9255 | dp = pci_device_to_OF_node(np->pdev); | ||
9256 | |||
9257 | phy_type = of_get_property(dp, "phy-type", &prop_len); | ||
9258 | if (!phy_type) { | ||
9259 | netdev_err(dev, "%s: OF node lacks phy-type property\n", | ||
9260 | dp->full_name); | ||
9261 | return -EINVAL; | ||
9262 | } | ||
9263 | |||
9264 | if (!strcmp(phy_type, "none")) | ||
9265 | return -ENODEV; | ||
9266 | |||
9267 | strcpy(np->vpd.phy_type, phy_type); | ||
9268 | |||
9269 | if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { | ||
9270 | netdev_err(dev, "%s: Illegal phy string [%s]\n", | ||
9271 | dp->full_name, np->vpd.phy_type); | ||
9272 | return -EINVAL; | ||
9273 | } | ||
9274 | |||
9275 | mac_addr = of_get_property(dp, "local-mac-address", &prop_len); | ||
9276 | if (!mac_addr) { | ||
9277 | netdev_err(dev, "%s: OF node lacks local-mac-address property\n", | ||
9278 | dp->full_name); | ||
9279 | return -EINVAL; | ||
9280 | } | ||
9281 | if (prop_len != dev->addr_len) { | ||
9282 | netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n", | ||
9283 | dp->full_name, prop_len); | ||
9284 | } | ||
9285 | memcpy(dev->perm_addr, mac_addr, dev->addr_len); | ||
9286 | if (!is_valid_ether_addr(&dev->perm_addr[0])) { | ||
9287 | netdev_err(dev, "%s: OF MAC address is invalid\n", | ||
9288 | dp->full_name); | ||
9289 | netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr); | ||
9290 | return -EINVAL; | ||
9291 | } | ||
9292 | |||
9293 | memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); | ||
9294 | |||
9295 | model = of_get_property(dp, "model", &prop_len); | ||
9296 | |||
9297 | if (model) | ||
9298 | strcpy(np->vpd.model, model); | ||
9299 | |||
9300 | if (of_find_property(dp, "hot-swappable-phy", &prop_len)) { | ||
9301 | np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | | ||
9302 | NIU_FLAGS_HOTPLUG_PHY); | ||
9303 | } | ||
9304 | |||
9305 | return 0; | ||
9306 | #else | ||
9307 | return -EINVAL; | ||
9308 | #endif | ||
9309 | } | ||
9310 | |||
9311 | static int __devinit niu_get_invariants(struct niu *np) | ||
9312 | { | ||
9313 | int err, have_props; | ||
9314 | u32 offset; | ||
9315 | |||
9316 | err = niu_get_of_props(np); | ||
9317 | if (err == -ENODEV) | ||
9318 | return err; | ||
9319 | |||
9320 | have_props = !err; | ||
9321 | |||
9322 | err = niu_init_mac_ipp_pcs_base(np); | ||
9323 | if (err) | ||
9324 | return err; | ||
9325 | |||
9326 | if (have_props) { | ||
9327 | err = niu_get_and_validate_port(np); | ||
9328 | if (err) | ||
9329 | return err; | ||
9330 | |||
9331 | } else { | ||
9332 | if (np->parent->plat_type == PLAT_TYPE_NIU) | ||
9333 | return -EINVAL; | ||
9334 | |||
9335 | nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE); | ||
9336 | offset = niu_pci_vpd_offset(np); | ||
9337 | netif_printk(np, probe, KERN_DEBUG, np->dev, | ||
9338 | "%s() VPD offset [%08x]\n", __func__, offset); | ||
9339 | if (offset) | ||
9340 | niu_pci_vpd_fetch(np, offset); | ||
9341 | nw64(ESPC_PIO_EN, 0); | ||
9342 | |||
9343 | if (np->flags & NIU_FLAGS_VPD_VALID) { | ||
9344 | niu_pci_vpd_validate(np); | ||
9345 | err = niu_get_and_validate_port(np); | ||
9346 | if (err) | ||
9347 | return err; | ||
9348 | } | ||
9349 | |||
9350 | if (!(np->flags & NIU_FLAGS_VPD_VALID)) { | ||
9351 | err = niu_get_and_validate_port(np); | ||
9352 | if (err) | ||
9353 | return err; | ||
9354 | err = niu_pci_probe_sprom(np); | ||
9355 | if (err) | ||
9356 | return err; | ||
9357 | } | ||
9358 | } | ||
9359 | |||
9360 | err = niu_probe_ports(np); | ||
9361 | if (err) | ||
9362 | return err; | ||
9363 | |||
9364 | niu_ldg_init(np); | ||
9365 | |||
9366 | niu_classifier_swstate_init(np); | ||
9367 | niu_link_config_init(np); | ||
9368 | |||
9369 | err = niu_determine_phy_disposition(np); | ||
9370 | if (!err) | ||
9371 | err = niu_init_link(np); | ||
9372 | |||
9373 | return err; | ||
9374 | } | ||
9375 | |||
9376 | static LIST_HEAD(niu_parent_list); | ||
9377 | static DEFINE_MUTEX(niu_parent_lock); | ||
9378 | static int niu_parent_index; | ||
9379 | |||
9380 | static ssize_t show_port_phy(struct device *dev, | ||
9381 | struct device_attribute *attr, char *buf) | ||
9382 | { | ||
9383 | struct platform_device *plat_dev = to_platform_device(dev); | ||
9384 | struct niu_parent *p = plat_dev->dev.platform_data; | ||
9385 | u32 port_phy = p->port_phy; | ||
9386 | char *orig_buf = buf; | ||
9387 | int i; | ||
9388 | |||
9389 | if (port_phy == PORT_PHY_UNKNOWN || | ||
9390 | port_phy == PORT_PHY_INVALID) | ||
9391 | return 0; | ||
9392 | |||
9393 | for (i = 0; i < p->num_ports; i++) { | ||
9394 | const char *type_str; | ||
9395 | int type; | ||
9396 | |||
9397 | type = phy_decode(port_phy, i); | ||
9398 | if (type == PORT_TYPE_10G) | ||
9399 | type_str = "10G"; | ||
9400 | else | ||
9401 | type_str = "1G"; | ||
9402 | buf += sprintf(buf, | ||
9403 | (i == 0) ? "%s" : " %s", | ||
9404 | type_str); | ||
9405 | } | ||
9406 | buf += sprintf(buf, "\n"); | ||
9407 | return buf - orig_buf; | ||
9408 | } | ||
9409 | |||
9410 | static ssize_t show_plat_type(struct device *dev, | ||
9411 | struct device_attribute *attr, char *buf) | ||
9412 | { | ||
9413 | struct platform_device *plat_dev = to_platform_device(dev); | ||
9414 | struct niu_parent *p = plat_dev->dev.platform_data; | ||
9415 | const char *type_str; | ||
9416 | |||
9417 | switch (p->plat_type) { | ||
9418 | case PLAT_TYPE_ATLAS: | ||
9419 | type_str = "atlas"; | ||
9420 | break; | ||
9421 | case PLAT_TYPE_NIU: | ||
9422 | type_str = "niu"; | ||
9423 | break; | ||
9424 | case PLAT_TYPE_VF_P0: | ||
9425 | type_str = "vf_p0"; | ||
9426 | break; | ||
9427 | case PLAT_TYPE_VF_P1: | ||
9428 | type_str = "vf_p1"; | ||
9429 | break; | ||
9430 | default: | ||
9431 | type_str = "unknown"; | ||
9432 | break; | ||
9433 | } | ||
9434 | |||
9435 | return sprintf(buf, "%s\n", type_str); | ||
9436 | } | ||
9437 | |||
9438 | static ssize_t __show_chan_per_port(struct device *dev, | ||
9439 | struct device_attribute *attr, char *buf, | ||
9440 | int rx) | ||
9441 | { | ||
9442 | struct platform_device *plat_dev = to_platform_device(dev); | ||
9443 | struct niu_parent *p = plat_dev->dev.platform_data; | ||
9444 | char *orig_buf = buf; | ||
9445 | u8 *arr; | ||
9446 | int i; | ||
9447 | |||
9448 | arr = (rx ? p->rxchan_per_port : p->txchan_per_port); | ||
9449 | |||
9450 | for (i = 0; i < p->num_ports; i++) { | ||
9451 | buf += sprintf(buf, | ||
9452 | (i == 0) ? "%d" : " %d", | ||
9453 | arr[i]); | ||
9454 | } | ||
9455 | buf += sprintf(buf, "\n"); | ||
9456 | |||
9457 | return buf - orig_buf; | ||
9458 | } | ||
9459 | |||
9460 | static ssize_t show_rxchan_per_port(struct device *dev, | ||
9461 | struct device_attribute *attr, char *buf) | ||
9462 | { | ||
9463 | return __show_chan_per_port(dev, attr, buf, 1); | ||
9464 | } | ||
9465 | |||
9466 | static ssize_t show_txchan_per_port(struct device *dev, | ||
9467 | struct device_attribute *attr, char *buf) | ||
9468 | { | ||
9469 | return __show_chan_per_port(dev, attr, buf, 1); | ||
9470 | } | ||
9471 | |||
9472 | static ssize_t show_num_ports(struct device *dev, | ||
9473 | struct device_attribute *attr, char *buf) | ||
9474 | { | ||
9475 | struct platform_device *plat_dev = to_platform_device(dev); | ||
9476 | struct niu_parent *p = plat_dev->dev.platform_data; | ||
9477 | |||
9478 | return sprintf(buf, "%d\n", p->num_ports); | ||
9479 | } | ||
9480 | |||
9481 | static struct device_attribute niu_parent_attributes[] = { | ||
9482 | __ATTR(port_phy, S_IRUGO, show_port_phy, NULL), | ||
9483 | __ATTR(plat_type, S_IRUGO, show_plat_type, NULL), | ||
9484 | __ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL), | ||
9485 | __ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL), | ||
9486 | __ATTR(num_ports, S_IRUGO, show_num_ports, NULL), | ||
9487 | {} | ||
9488 | }; | ||
9489 | |||
9490 | static struct niu_parent * __devinit niu_new_parent(struct niu *np, | ||
9491 | union niu_parent_id *id, | ||
9492 | u8 ptype) | ||
9493 | { | ||
9494 | struct platform_device *plat_dev; | ||
9495 | struct niu_parent *p; | ||
9496 | int i; | ||
9497 | |||
9498 | plat_dev = platform_device_register_simple("niu-board", niu_parent_index, | ||
9499 | NULL, 0); | ||
9500 | if (IS_ERR(plat_dev)) | ||
9501 | return NULL; | ||
9502 | |||
9503 | for (i = 0; attr_name(niu_parent_attributes[i]); i++) { | ||
9504 | int err = device_create_file(&plat_dev->dev, | ||
9505 | &niu_parent_attributes[i]); | ||
9506 | if (err) | ||
9507 | goto fail_unregister; | ||
9508 | } | ||
9509 | |||
9510 | p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
9511 | if (!p) | ||
9512 | goto fail_unregister; | ||
9513 | |||
9514 | p->index = niu_parent_index++; | ||
9515 | |||
9516 | plat_dev->dev.platform_data = p; | ||
9517 | p->plat_dev = plat_dev; | ||
9518 | |||
9519 | memcpy(&p->id, id, sizeof(*id)); | ||
9520 | p->plat_type = ptype; | ||
9521 | INIT_LIST_HEAD(&p->list); | ||
9522 | atomic_set(&p->refcnt, 0); | ||
9523 | list_add(&p->list, &niu_parent_list); | ||
9524 | spin_lock_init(&p->lock); | ||
9525 | |||
9526 | p->rxdma_clock_divider = 7500; | ||
9527 | |||
9528 | p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES; | ||
9529 | if (p->plat_type == PLAT_TYPE_NIU) | ||
9530 | p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES; | ||
9531 | |||
9532 | for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { | ||
9533 | int index = i - CLASS_CODE_USER_PROG1; | ||
9534 | |||
9535 | p->tcam_key[index] = TCAM_KEY_TSEL; | ||
9536 | p->flow_key[index] = (FLOW_KEY_IPSA | | ||
9537 | FLOW_KEY_IPDA | | ||
9538 | FLOW_KEY_PROTO | | ||
9539 | (FLOW_KEY_L4_BYTE12 << | ||
9540 | FLOW_KEY_L4_0_SHIFT) | | ||
9541 | (FLOW_KEY_L4_BYTE12 << | ||
9542 | FLOW_KEY_L4_1_SHIFT)); | ||
9543 | } | ||
9544 | |||
9545 | for (i = 0; i < LDN_MAX + 1; i++) | ||
9546 | p->ldg_map[i] = LDG_INVALID; | ||
9547 | |||
9548 | return p; | ||
9549 | |||
9550 | fail_unregister: | ||
9551 | platform_device_unregister(plat_dev); | ||
9552 | return NULL; | ||
9553 | } | ||
9554 | |||
9555 | static struct niu_parent * __devinit niu_get_parent(struct niu *np, | ||
9556 | union niu_parent_id *id, | ||
9557 | u8 ptype) | ||
9558 | { | ||
9559 | struct niu_parent *p, *tmp; | ||
9560 | int port = np->port; | ||
9561 | |||
9562 | mutex_lock(&niu_parent_lock); | ||
9563 | p = NULL; | ||
9564 | list_for_each_entry(tmp, &niu_parent_list, list) { | ||
9565 | if (!memcmp(id, &tmp->id, sizeof(*id))) { | ||
9566 | p = tmp; | ||
9567 | break; | ||
9568 | } | ||
9569 | } | ||
9570 | if (!p) | ||
9571 | p = niu_new_parent(np, id, ptype); | ||
9572 | |||
9573 | if (p) { | ||
9574 | char port_name[6]; | ||
9575 | int err; | ||
9576 | |||
9577 | sprintf(port_name, "port%d", port); | ||
9578 | err = sysfs_create_link(&p->plat_dev->dev.kobj, | ||
9579 | &np->device->kobj, | ||
9580 | port_name); | ||
9581 | if (!err) { | ||
9582 | p->ports[port] = np; | ||
9583 | atomic_inc(&p->refcnt); | ||
9584 | } | ||
9585 | } | ||
9586 | mutex_unlock(&niu_parent_lock); | ||
9587 | |||
9588 | return p; | ||
9589 | } | ||
9590 | |||
9591 | static void niu_put_parent(struct niu *np) | ||
9592 | { | ||
9593 | struct niu_parent *p = np->parent; | ||
9594 | u8 port = np->port; | ||
9595 | char port_name[6]; | ||
9596 | |||
9597 | BUG_ON(!p || p->ports[port] != np); | ||
9598 | |||
9599 | netif_printk(np, probe, KERN_DEBUG, np->dev, | ||
9600 | "%s() port[%u]\n", __func__, port); | ||
9601 | |||
9602 | sprintf(port_name, "port%d", port); | ||
9603 | |||
9604 | mutex_lock(&niu_parent_lock); | ||
9605 | |||
9606 | sysfs_remove_link(&p->plat_dev->dev.kobj, port_name); | ||
9607 | |||
9608 | p->ports[port] = NULL; | ||
9609 | np->parent = NULL; | ||
9610 | |||
9611 | if (atomic_dec_and_test(&p->refcnt)) { | ||
9612 | list_del(&p->list); | ||
9613 | platform_device_unregister(p->plat_dev); | ||
9614 | } | ||
9615 | |||
9616 | mutex_unlock(&niu_parent_lock); | ||
9617 | } | ||
9618 | |||
9619 | static void *niu_pci_alloc_coherent(struct device *dev, size_t size, | ||
9620 | u64 *handle, gfp_t flag) | ||
9621 | { | ||
9622 | dma_addr_t dh; | ||
9623 | void *ret; | ||
9624 | |||
9625 | ret = dma_alloc_coherent(dev, size, &dh, flag); | ||
9626 | if (ret) | ||
9627 | *handle = dh; | ||
9628 | return ret; | ||
9629 | } | ||
9630 | |||
9631 | static void niu_pci_free_coherent(struct device *dev, size_t size, | ||
9632 | void *cpu_addr, u64 handle) | ||
9633 | { | ||
9634 | dma_free_coherent(dev, size, cpu_addr, handle); | ||
9635 | } | ||
9636 | |||
9637 | static u64 niu_pci_map_page(struct device *dev, struct page *page, | ||
9638 | unsigned long offset, size_t size, | ||
9639 | enum dma_data_direction direction) | ||
9640 | { | ||
9641 | return dma_map_page(dev, page, offset, size, direction); | ||
9642 | } | ||
9643 | |||
9644 | static void niu_pci_unmap_page(struct device *dev, u64 dma_address, | ||
9645 | size_t size, enum dma_data_direction direction) | ||
9646 | { | ||
9647 | dma_unmap_page(dev, dma_address, size, direction); | ||
9648 | } | ||
9649 | |||
9650 | static u64 niu_pci_map_single(struct device *dev, void *cpu_addr, | ||
9651 | size_t size, | ||
9652 | enum dma_data_direction direction) | ||
9653 | { | ||
9654 | return dma_map_single(dev, cpu_addr, size, direction); | ||
9655 | } | ||
9656 | |||
9657 | static void niu_pci_unmap_single(struct device *dev, u64 dma_address, | ||
9658 | size_t size, | ||
9659 | enum dma_data_direction direction) | ||
9660 | { | ||
9661 | dma_unmap_single(dev, dma_address, size, direction); | ||
9662 | } | ||
9663 | |||
9664 | static const struct niu_ops niu_pci_ops = { | ||
9665 | .alloc_coherent = niu_pci_alloc_coherent, | ||
9666 | .free_coherent = niu_pci_free_coherent, | ||
9667 | .map_page = niu_pci_map_page, | ||
9668 | .unmap_page = niu_pci_unmap_page, | ||
9669 | .map_single = niu_pci_map_single, | ||
9670 | .unmap_single = niu_pci_unmap_single, | ||
9671 | }; | ||
9672 | |||
9673 | static void __devinit niu_driver_version(void) | ||
9674 | { | ||
9675 | static int niu_version_printed; | ||
9676 | |||
9677 | if (niu_version_printed++ == 0) | ||
9678 | pr_info("%s", version); | ||
9679 | } | ||
9680 | |||
9681 | static struct net_device * __devinit niu_alloc_and_init( | ||
9682 | struct device *gen_dev, struct pci_dev *pdev, | ||
9683 | struct platform_device *op, const struct niu_ops *ops, | ||
9684 | u8 port) | ||
9685 | { | ||
9686 | struct net_device *dev; | ||
9687 | struct niu *np; | ||
9688 | |||
9689 | dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN); | ||
9690 | if (!dev) { | ||
9691 | dev_err(gen_dev, "Etherdev alloc failed, aborting\n"); | ||
9692 | return NULL; | ||
9693 | } | ||
9694 | |||
9695 | SET_NETDEV_DEV(dev, gen_dev); | ||
9696 | |||
9697 | np = netdev_priv(dev); | ||
9698 | np->dev = dev; | ||
9699 | np->pdev = pdev; | ||
9700 | np->op = op; | ||
9701 | np->device = gen_dev; | ||
9702 | np->ops = ops; | ||
9703 | |||
9704 | np->msg_enable = niu_debug; | ||
9705 | |||
9706 | spin_lock_init(&np->lock); | ||
9707 | INIT_WORK(&np->reset_task, niu_reset_task); | ||
9708 | |||
9709 | np->port = port; | ||
9710 | |||
9711 | return dev; | ||
9712 | } | ||
9713 | |||
9714 | static const struct net_device_ops niu_netdev_ops = { | ||
9715 | .ndo_open = niu_open, | ||
9716 | .ndo_stop = niu_close, | ||
9717 | .ndo_start_xmit = niu_start_xmit, | ||
9718 | .ndo_get_stats64 = niu_get_stats, | ||
9719 | .ndo_set_multicast_list = niu_set_rx_mode, | ||
9720 | .ndo_validate_addr = eth_validate_addr, | ||
9721 | .ndo_set_mac_address = niu_set_mac_addr, | ||
9722 | .ndo_do_ioctl = niu_ioctl, | ||
9723 | .ndo_tx_timeout = niu_tx_timeout, | ||
9724 | .ndo_change_mtu = niu_change_mtu, | ||
9725 | }; | ||
9726 | |||
9727 | static void __devinit niu_assign_netdev_ops(struct net_device *dev) | ||
9728 | { | ||
9729 | dev->netdev_ops = &niu_netdev_ops; | ||
9730 | dev->ethtool_ops = &niu_ethtool_ops; | ||
9731 | dev->watchdog_timeo = NIU_TX_TIMEOUT; | ||
9732 | } | ||
9733 | |||
9734 | static void __devinit niu_device_announce(struct niu *np) | ||
9735 | { | ||
9736 | struct net_device *dev = np->dev; | ||
9737 | |||
9738 | pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr); | ||
9739 | |||
9740 | if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) { | ||
9741 | pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", | ||
9742 | dev->name, | ||
9743 | (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), | ||
9744 | (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), | ||
9745 | (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"), | ||
9746 | (np->mac_xcvr == MAC_XCVR_MII ? "MII" : | ||
9747 | (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), | ||
9748 | np->vpd.phy_type); | ||
9749 | } else { | ||
9750 | pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", | ||
9751 | dev->name, | ||
9752 | (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), | ||
9753 | (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), | ||
9754 | (np->flags & NIU_FLAGS_FIBER ? "FIBER" : | ||
9755 | (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" : | ||
9756 | "COPPER")), | ||
9757 | (np->mac_xcvr == MAC_XCVR_MII ? "MII" : | ||
9758 | (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), | ||
9759 | np->vpd.phy_type); | ||
9760 | } | ||
9761 | } | ||
9762 | |||
9763 | static void __devinit niu_set_basic_features(struct net_device *dev) | ||
9764 | { | ||
9765 | dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH; | ||
9766 | dev->features |= dev->hw_features | NETIF_F_RXCSUM; | ||
9767 | } | ||
9768 | |||
9769 | static int __devinit niu_pci_init_one(struct pci_dev *pdev, | ||
9770 | const struct pci_device_id *ent) | ||
9771 | { | ||
9772 | union niu_parent_id parent_id; | ||
9773 | struct net_device *dev; | ||
9774 | struct niu *np; | ||
9775 | int err, pos; | ||
9776 | u64 dma_mask; | ||
9777 | u16 val16; | ||
9778 | |||
9779 | niu_driver_version(); | ||
9780 | |||
9781 | err = pci_enable_device(pdev); | ||
9782 | if (err) { | ||
9783 | dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); | ||
9784 | return err; | ||
9785 | } | ||
9786 | |||
9787 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || | ||
9788 | !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { | ||
9789 | dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n"); | ||
9790 | err = -ENODEV; | ||
9791 | goto err_out_disable_pdev; | ||
9792 | } | ||
9793 | |||
9794 | err = pci_request_regions(pdev, DRV_MODULE_NAME); | ||
9795 | if (err) { | ||
9796 | dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); | ||
9797 | goto err_out_disable_pdev; | ||
9798 | } | ||
9799 | |||
9800 | pos = pci_pcie_cap(pdev); | ||
9801 | if (pos <= 0) { | ||
9802 | dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); | ||
9803 | goto err_out_free_res; | ||
9804 | } | ||
9805 | |||
9806 | dev = niu_alloc_and_init(&pdev->dev, pdev, NULL, | ||
9807 | &niu_pci_ops, PCI_FUNC(pdev->devfn)); | ||
9808 | if (!dev) { | ||
9809 | err = -ENOMEM; | ||
9810 | goto err_out_free_res; | ||
9811 | } | ||
9812 | np = netdev_priv(dev); | ||
9813 | |||
9814 | memset(&parent_id, 0, sizeof(parent_id)); | ||
9815 | parent_id.pci.domain = pci_domain_nr(pdev->bus); | ||
9816 | parent_id.pci.bus = pdev->bus->number; | ||
9817 | parent_id.pci.device = PCI_SLOT(pdev->devfn); | ||
9818 | |||
9819 | np->parent = niu_get_parent(np, &parent_id, | ||
9820 | PLAT_TYPE_ATLAS); | ||
9821 | if (!np->parent) { | ||
9822 | err = -ENOMEM; | ||
9823 | goto err_out_free_dev; | ||
9824 | } | ||
9825 | |||
9826 | pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); | ||
9827 | val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; | ||
9828 | val16 |= (PCI_EXP_DEVCTL_CERE | | ||
9829 | PCI_EXP_DEVCTL_NFERE | | ||
9830 | PCI_EXP_DEVCTL_FERE | | ||
9831 | PCI_EXP_DEVCTL_URRE | | ||
9832 | PCI_EXP_DEVCTL_RELAX_EN); | ||
9833 | pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16); | ||
9834 | |||
9835 | dma_mask = DMA_BIT_MASK(44); | ||
9836 | err = pci_set_dma_mask(pdev, dma_mask); | ||
9837 | if (!err) { | ||
9838 | dev->features |= NETIF_F_HIGHDMA; | ||
9839 | err = pci_set_consistent_dma_mask(pdev, dma_mask); | ||
9840 | if (err) { | ||
9841 | dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n"); | ||
9842 | goto err_out_release_parent; | ||
9843 | } | ||
9844 | } | ||
9845 | if (err || dma_mask == DMA_BIT_MASK(32)) { | ||
9846 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
9847 | if (err) { | ||
9848 | dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); | ||
9849 | goto err_out_release_parent; | ||
9850 | } | ||
9851 | } | ||
9852 | |||
9853 | niu_set_basic_features(dev); | ||
9854 | |||
9855 | np->regs = pci_ioremap_bar(pdev, 0); | ||
9856 | if (!np->regs) { | ||
9857 | dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); | ||
9858 | err = -ENOMEM; | ||
9859 | goto err_out_release_parent; | ||
9860 | } | ||
9861 | |||
9862 | pci_set_master(pdev); | ||
9863 | pci_save_state(pdev); | ||
9864 | |||
9865 | dev->irq = pdev->irq; | ||
9866 | |||
9867 | niu_assign_netdev_ops(dev); | ||
9868 | |||
9869 | err = niu_get_invariants(np); | ||
9870 | if (err) { | ||
9871 | if (err != -ENODEV) | ||
9872 | dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n"); | ||
9873 | goto err_out_iounmap; | ||
9874 | } | ||
9875 | |||
9876 | err = register_netdev(dev); | ||
9877 | if (err) { | ||
9878 | dev_err(&pdev->dev, "Cannot register net device, aborting\n"); | ||
9879 | goto err_out_iounmap; | ||
9880 | } | ||
9881 | |||
9882 | pci_set_drvdata(pdev, dev); | ||
9883 | |||
9884 | niu_device_announce(np); | ||
9885 | |||
9886 | return 0; | ||
9887 | |||
9888 | err_out_iounmap: | ||
9889 | if (np->regs) { | ||
9890 | iounmap(np->regs); | ||
9891 | np->regs = NULL; | ||
9892 | } | ||
9893 | |||
9894 | err_out_release_parent: | ||
9895 | niu_put_parent(np); | ||
9896 | |||
9897 | err_out_free_dev: | ||
9898 | free_netdev(dev); | ||
9899 | |||
9900 | err_out_free_res: | ||
9901 | pci_release_regions(pdev); | ||
9902 | |||
9903 | err_out_disable_pdev: | ||
9904 | pci_disable_device(pdev); | ||
9905 | pci_set_drvdata(pdev, NULL); | ||
9906 | |||
9907 | return err; | ||
9908 | } | ||
9909 | |||
9910 | static void __devexit niu_pci_remove_one(struct pci_dev *pdev) | ||
9911 | { | ||
9912 | struct net_device *dev = pci_get_drvdata(pdev); | ||
9913 | |||
9914 | if (dev) { | ||
9915 | struct niu *np = netdev_priv(dev); | ||
9916 | |||
9917 | unregister_netdev(dev); | ||
9918 | if (np->regs) { | ||
9919 | iounmap(np->regs); | ||
9920 | np->regs = NULL; | ||
9921 | } | ||
9922 | |||
9923 | niu_ldg_free(np); | ||
9924 | |||
9925 | niu_put_parent(np); | ||
9926 | |||
9927 | free_netdev(dev); | ||
9928 | pci_release_regions(pdev); | ||
9929 | pci_disable_device(pdev); | ||
9930 | pci_set_drvdata(pdev, NULL); | ||
9931 | } | ||
9932 | } | ||
9933 | |||
9934 | static int niu_suspend(struct pci_dev *pdev, pm_message_t state) | ||
9935 | { | ||
9936 | struct net_device *dev = pci_get_drvdata(pdev); | ||
9937 | struct niu *np = netdev_priv(dev); | ||
9938 | unsigned long flags; | ||
9939 | |||
9940 | if (!netif_running(dev)) | ||
9941 | return 0; | ||
9942 | |||
9943 | flush_work_sync(&np->reset_task); | ||
9944 | niu_netif_stop(np); | ||
9945 | |||
9946 | del_timer_sync(&np->timer); | ||
9947 | |||
9948 | spin_lock_irqsave(&np->lock, flags); | ||
9949 | niu_enable_interrupts(np, 0); | ||
9950 | spin_unlock_irqrestore(&np->lock, flags); | ||
9951 | |||
9952 | netif_device_detach(dev); | ||
9953 | |||
9954 | spin_lock_irqsave(&np->lock, flags); | ||
9955 | niu_stop_hw(np); | ||
9956 | spin_unlock_irqrestore(&np->lock, flags); | ||
9957 | |||
9958 | pci_save_state(pdev); | ||
9959 | |||
9960 | return 0; | ||
9961 | } | ||
9962 | |||
9963 | static int niu_resume(struct pci_dev *pdev) | ||
9964 | { | ||
9965 | struct net_device *dev = pci_get_drvdata(pdev); | ||
9966 | struct niu *np = netdev_priv(dev); | ||
9967 | unsigned long flags; | ||
9968 | int err; | ||
9969 | |||
9970 | if (!netif_running(dev)) | ||
9971 | return 0; | ||
9972 | |||
9973 | pci_restore_state(pdev); | ||
9974 | |||
9975 | netif_device_attach(dev); | ||
9976 | |||
9977 | spin_lock_irqsave(&np->lock, flags); | ||
9978 | |||
9979 | err = niu_init_hw(np); | ||
9980 | if (!err) { | ||
9981 | np->timer.expires = jiffies + HZ; | ||
9982 | add_timer(&np->timer); | ||
9983 | niu_netif_start(np); | ||
9984 | } | ||
9985 | |||
9986 | spin_unlock_irqrestore(&np->lock, flags); | ||
9987 | |||
9988 | return err; | ||
9989 | } | ||
9990 | |||
9991 | static struct pci_driver niu_pci_driver = { | ||
9992 | .name = DRV_MODULE_NAME, | ||
9993 | .id_table = niu_pci_tbl, | ||
9994 | .probe = niu_pci_init_one, | ||
9995 | .remove = __devexit_p(niu_pci_remove_one), | ||
9996 | .suspend = niu_suspend, | ||
9997 | .resume = niu_resume, | ||
9998 | }; | ||
9999 | |||
10000 | #ifdef CONFIG_SPARC64 | ||
10001 | static void *niu_phys_alloc_coherent(struct device *dev, size_t size, | ||
10002 | u64 *dma_addr, gfp_t flag) | ||
10003 | { | ||
10004 | unsigned long order = get_order(size); | ||
10005 | unsigned long page = __get_free_pages(flag, order); | ||
10006 | |||
10007 | if (page == 0UL) | ||
10008 | return NULL; | ||
10009 | memset((char *)page, 0, PAGE_SIZE << order); | ||
10010 | *dma_addr = __pa(page); | ||
10011 | |||
10012 | return (void *) page; | ||
10013 | } | ||
10014 | |||
10015 | static void niu_phys_free_coherent(struct device *dev, size_t size, | ||
10016 | void *cpu_addr, u64 handle) | ||
10017 | { | ||
10018 | unsigned long order = get_order(size); | ||
10019 | |||
10020 | free_pages((unsigned long) cpu_addr, order); | ||
10021 | } | ||
10022 | |||
10023 | static u64 niu_phys_map_page(struct device *dev, struct page *page, | ||
10024 | unsigned long offset, size_t size, | ||
10025 | enum dma_data_direction direction) | ||
10026 | { | ||
10027 | return page_to_phys(page) + offset; | ||
10028 | } | ||
10029 | |||
10030 | static void niu_phys_unmap_page(struct device *dev, u64 dma_address, | ||
10031 | size_t size, enum dma_data_direction direction) | ||
10032 | { | ||
10033 | /* Nothing to do. */ | ||
10034 | } | ||
10035 | |||
10036 | static u64 niu_phys_map_single(struct device *dev, void *cpu_addr, | ||
10037 | size_t size, | ||
10038 | enum dma_data_direction direction) | ||
10039 | { | ||
10040 | return __pa(cpu_addr); | ||
10041 | } | ||
10042 | |||
10043 | static void niu_phys_unmap_single(struct device *dev, u64 dma_address, | ||
10044 | size_t size, | ||
10045 | enum dma_data_direction direction) | ||
10046 | { | ||
10047 | /* Nothing to do. */ | ||
10048 | } | ||
10049 | |||
10050 | static const struct niu_ops niu_phys_ops = { | ||
10051 | .alloc_coherent = niu_phys_alloc_coherent, | ||
10052 | .free_coherent = niu_phys_free_coherent, | ||
10053 | .map_page = niu_phys_map_page, | ||
10054 | .unmap_page = niu_phys_unmap_page, | ||
10055 | .map_single = niu_phys_map_single, | ||
10056 | .unmap_single = niu_phys_unmap_single, | ||
10057 | }; | ||
10058 | |||
10059 | static int __devinit niu_of_probe(struct platform_device *op) | ||
10060 | { | ||
10061 | union niu_parent_id parent_id; | ||
10062 | struct net_device *dev; | ||
10063 | struct niu *np; | ||
10064 | const u32 *reg; | ||
10065 | int err; | ||
10066 | |||
10067 | niu_driver_version(); | ||
10068 | |||
10069 | reg = of_get_property(op->dev.of_node, "reg", NULL); | ||
10070 | if (!reg) { | ||
10071 | dev_err(&op->dev, "%s: No 'reg' property, aborting\n", | ||
10072 | op->dev.of_node->full_name); | ||
10073 | return -ENODEV; | ||
10074 | } | ||
10075 | |||
10076 | dev = niu_alloc_and_init(&op->dev, NULL, op, | ||
10077 | &niu_phys_ops, reg[0] & 0x1); | ||
10078 | if (!dev) { | ||
10079 | err = -ENOMEM; | ||
10080 | goto err_out; | ||
10081 | } | ||
10082 | np = netdev_priv(dev); | ||
10083 | |||
10084 | memset(&parent_id, 0, sizeof(parent_id)); | ||
10085 | parent_id.of = of_get_parent(op->dev.of_node); | ||
10086 | |||
10087 | np->parent = niu_get_parent(np, &parent_id, | ||
10088 | PLAT_TYPE_NIU); | ||
10089 | if (!np->parent) { | ||
10090 | err = -ENOMEM; | ||
10091 | goto err_out_free_dev; | ||
10092 | } | ||
10093 | |||
10094 | niu_set_basic_features(dev); | ||
10095 | |||
10096 | np->regs = of_ioremap(&op->resource[1], 0, | ||
10097 | resource_size(&op->resource[1]), | ||
10098 | "niu regs"); | ||
10099 | if (!np->regs) { | ||
10100 | dev_err(&op->dev, "Cannot map device registers, aborting\n"); | ||
10101 | err = -ENOMEM; | ||
10102 | goto err_out_release_parent; | ||
10103 | } | ||
10104 | |||
10105 | np->vir_regs_1 = of_ioremap(&op->resource[2], 0, | ||
10106 | resource_size(&op->resource[2]), | ||
10107 | "niu vregs-1"); | ||
10108 | if (!np->vir_regs_1) { | ||
10109 | dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n"); | ||
10110 | err = -ENOMEM; | ||
10111 | goto err_out_iounmap; | ||
10112 | } | ||
10113 | |||
10114 | np->vir_regs_2 = of_ioremap(&op->resource[3], 0, | ||
10115 | resource_size(&op->resource[3]), | ||
10116 | "niu vregs-2"); | ||
10117 | if (!np->vir_regs_2) { | ||
10118 | dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n"); | ||
10119 | err = -ENOMEM; | ||
10120 | goto err_out_iounmap; | ||
10121 | } | ||
10122 | |||
10123 | niu_assign_netdev_ops(dev); | ||
10124 | |||
10125 | err = niu_get_invariants(np); | ||
10126 | if (err) { | ||
10127 | if (err != -ENODEV) | ||
10128 | dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n"); | ||
10129 | goto err_out_iounmap; | ||
10130 | } | ||
10131 | |||
10132 | err = register_netdev(dev); | ||
10133 | if (err) { | ||
10134 | dev_err(&op->dev, "Cannot register net device, aborting\n"); | ||
10135 | goto err_out_iounmap; | ||
10136 | } | ||
10137 | |||
10138 | dev_set_drvdata(&op->dev, dev); | ||
10139 | |||
10140 | niu_device_announce(np); | ||
10141 | |||
10142 | return 0; | ||
10143 | |||
10144 | err_out_iounmap: | ||
10145 | if (np->vir_regs_1) { | ||
10146 | of_iounmap(&op->resource[2], np->vir_regs_1, | ||
10147 | resource_size(&op->resource[2])); | ||
10148 | np->vir_regs_1 = NULL; | ||
10149 | } | ||
10150 | |||
10151 | if (np->vir_regs_2) { | ||
10152 | of_iounmap(&op->resource[3], np->vir_regs_2, | ||
10153 | resource_size(&op->resource[3])); | ||
10154 | np->vir_regs_2 = NULL; | ||
10155 | } | ||
10156 | |||
10157 | if (np->regs) { | ||
10158 | of_iounmap(&op->resource[1], np->regs, | ||
10159 | resource_size(&op->resource[1])); | ||
10160 | np->regs = NULL; | ||
10161 | } | ||
10162 | |||
10163 | err_out_release_parent: | ||
10164 | niu_put_parent(np); | ||
10165 | |||
10166 | err_out_free_dev: | ||
10167 | free_netdev(dev); | ||
10168 | |||
10169 | err_out: | ||
10170 | return err; | ||
10171 | } | ||
10172 | |||
10173 | static int __devexit niu_of_remove(struct platform_device *op) | ||
10174 | { | ||
10175 | struct net_device *dev = dev_get_drvdata(&op->dev); | ||
10176 | |||
10177 | if (dev) { | ||
10178 | struct niu *np = netdev_priv(dev); | ||
10179 | |||
10180 | unregister_netdev(dev); | ||
10181 | |||
10182 | if (np->vir_regs_1) { | ||
10183 | of_iounmap(&op->resource[2], np->vir_regs_1, | ||
10184 | resource_size(&op->resource[2])); | ||
10185 | np->vir_regs_1 = NULL; | ||
10186 | } | ||
10187 | |||
10188 | if (np->vir_regs_2) { | ||
10189 | of_iounmap(&op->resource[3], np->vir_regs_2, | ||
10190 | resource_size(&op->resource[3])); | ||
10191 | np->vir_regs_2 = NULL; | ||
10192 | } | ||
10193 | |||
10194 | if (np->regs) { | ||
10195 | of_iounmap(&op->resource[1], np->regs, | ||
10196 | resource_size(&op->resource[1])); | ||
10197 | np->regs = NULL; | ||
10198 | } | ||
10199 | |||
10200 | niu_ldg_free(np); | ||
10201 | |||
10202 | niu_put_parent(np); | ||
10203 | |||
10204 | free_netdev(dev); | ||
10205 | dev_set_drvdata(&op->dev, NULL); | ||
10206 | } | ||
10207 | return 0; | ||
10208 | } | ||
10209 | |||
10210 | static const struct of_device_id niu_match[] = { | ||
10211 | { | ||
10212 | .name = "network", | ||
10213 | .compatible = "SUNW,niusl", | ||
10214 | }, | ||
10215 | {}, | ||
10216 | }; | ||
10217 | MODULE_DEVICE_TABLE(of, niu_match); | ||
10218 | |||
10219 | static struct platform_driver niu_of_driver = { | ||
10220 | .driver = { | ||
10221 | .name = "niu", | ||
10222 | .owner = THIS_MODULE, | ||
10223 | .of_match_table = niu_match, | ||
10224 | }, | ||
10225 | .probe = niu_of_probe, | ||
10226 | .remove = __devexit_p(niu_of_remove), | ||
10227 | }; | ||
10228 | |||
10229 | #endif /* CONFIG_SPARC64 */ | ||
10230 | |||
10231 | static int __init niu_init(void) | ||
10232 | { | ||
10233 | int err = 0; | ||
10234 | |||
10235 | BUILD_BUG_ON(PAGE_SIZE < 4 * 1024); | ||
10236 | |||
10237 | niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); | ||
10238 | |||
10239 | #ifdef CONFIG_SPARC64 | ||
10240 | err = platform_driver_register(&niu_of_driver); | ||
10241 | #endif | ||
10242 | |||
10243 | if (!err) { | ||
10244 | err = pci_register_driver(&niu_pci_driver); | ||
10245 | #ifdef CONFIG_SPARC64 | ||
10246 | if (err) | ||
10247 | platform_driver_unregister(&niu_of_driver); | ||
10248 | #endif | ||
10249 | } | ||
10250 | |||
10251 | return err; | ||
10252 | } | ||
10253 | |||
10254 | static void __exit niu_exit(void) | ||
10255 | { | ||
10256 | pci_unregister_driver(&niu_pci_driver); | ||
10257 | #ifdef CONFIG_SPARC64 | ||
10258 | platform_driver_unregister(&niu_of_driver); | ||
10259 | #endif | ||
10260 | } | ||
10261 | |||
10262 | module_init(niu_init); | ||
10263 | module_exit(niu_exit); | ||
diff --git a/drivers/net/ethernet/sun/niu.h b/drivers/net/ethernet/sun/niu.h new file mode 100644 index 000000000000..51e177e1860d --- /dev/null +++ b/drivers/net/ethernet/sun/niu.h | |||
@@ -0,0 +1,3306 @@ | |||
1 | /* niu.h: Definitions for Neptune ethernet driver. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #ifndef _NIU_H | ||
7 | #define _NIU_H | ||
8 | |||
9 | #define PIO 0x000000UL | ||
10 | #define FZC_PIO 0x080000UL | ||
11 | #define FZC_MAC 0x180000UL | ||
12 | #define FZC_IPP 0x280000UL | ||
13 | #define FFLP 0x300000UL | ||
14 | #define FZC_FFLP 0x380000UL | ||
15 | #define PIO_VADDR 0x400000UL | ||
16 | #define ZCP 0x500000UL | ||
17 | #define FZC_ZCP 0x580000UL | ||
18 | #define DMC 0x600000UL | ||
19 | #define FZC_DMC 0x680000UL | ||
20 | #define TXC 0x700000UL | ||
21 | #define FZC_TXC 0x780000UL | ||
22 | #define PIO_LDSV 0x800000UL | ||
23 | #define PIO_PIO_LDGIM 0x900000UL | ||
24 | #define PIO_IMASK0 0xa00000UL | ||
25 | #define PIO_IMASK1 0xb00000UL | ||
26 | #define FZC_PROM 0xc80000UL | ||
27 | #define FZC_PIM 0xd80000UL | ||
28 | |||
29 | #define LDSV0(LDG) (PIO_LDSV + 0x00000UL + (LDG) * 0x2000UL) | ||
30 | #define LDSV1(LDG) (PIO_LDSV + 0x00008UL + (LDG) * 0x2000UL) | ||
31 | #define LDSV2(LDG) (PIO_LDSV + 0x00010UL + (LDG) * 0x2000UL) | ||
32 | |||
33 | #define LDG_IMGMT(LDG) (PIO_LDSV + 0x00018UL + (LDG) * 0x2000UL) | ||
34 | #define LDG_IMGMT_ARM 0x0000000080000000ULL | ||
35 | #define LDG_IMGMT_TIMER 0x000000000000003fULL | ||
36 | |||
37 | #define LD_IM0(IDX) (PIO_IMASK0 + 0x00000UL + (IDX) * 0x2000UL) | ||
38 | #define LD_IM0_MASK 0x0000000000000003ULL | ||
39 | |||
40 | #define LD_IM1(IDX) (PIO_IMASK1 + 0x00000UL + (IDX) * 0x2000UL) | ||
41 | #define LD_IM1_MASK 0x0000000000000003ULL | ||
42 | |||
43 | #define LDG_TIMER_RES (FZC_PIO + 0x00008UL) | ||
44 | #define LDG_TIMER_RES_VAL 0x00000000000fffffULL | ||
45 | |||
46 | #define DIRTY_TID_CTL (FZC_PIO + 0x00010UL) | ||
47 | #define DIRTY_TID_CTL_NPTHRED 0x00000000003f0000ULL | ||
48 | #define DIRTY_TID_CTL_RDTHRED 0x00000000000003f0ULL | ||
49 | #define DIRTY_TID_CTL_DTIDCLR 0x0000000000000002ULL | ||
50 | #define DIRTY_TID_CTL_DTIDENAB 0x0000000000000001ULL | ||
51 | |||
52 | #define DIRTY_TID_STAT (FZC_PIO + 0x00018UL) | ||
53 | #define DIRTY_TID_STAT_NPWSTAT 0x0000000000003f00ULL | ||
54 | #define DIRTY_TID_STAT_RDSTAT 0x000000000000003fULL | ||
55 | |||
56 | #define RST_CTL (FZC_PIO + 0x00038UL) | ||
57 | #define RST_CTL_MAC_RST3 0x0000000000400000ULL | ||
58 | #define RST_CTL_MAC_RST2 0x0000000000200000ULL | ||
59 | #define RST_CTL_MAC_RST1 0x0000000000100000ULL | ||
60 | #define RST_CTL_MAC_RST0 0x0000000000080000ULL | ||
61 | #define RST_CTL_ACK_TO_EN 0x0000000000000800ULL | ||
62 | #define RST_CTL_ACK_TO_VAL 0x00000000000007feULL | ||
63 | |||
64 | #define SMX_CFIG_DAT (FZC_PIO + 0x00040UL) | ||
65 | #define SMX_CFIG_DAT_RAS_DET 0x0000000080000000ULL | ||
66 | #define SMX_CFIG_DAT_RAS_INJ 0x0000000040000000ULL | ||
67 | #define SMX_CFIG_DAT_XACT_TO 0x000000000fffffffULL | ||
68 | |||
69 | #define SMX_INT_STAT (FZC_PIO + 0x00048UL) | ||
70 | #define SMX_INT_STAT_STAT 0x00000000ffffffffULL | ||
71 | |||
72 | #define SMX_CTL (FZC_PIO + 0x00050UL) | ||
73 | #define SMX_CTL_CTL 0x00000000ffffffffULL | ||
74 | |||
75 | #define SMX_DBG_VEC (FZC_PIO + 0x00058UL) | ||
76 | #define SMX_DBG_VEC_VEC 0x00000000ffffffffULL | ||
77 | |||
78 | #define PIO_DBG_SEL (FZC_PIO + 0x00060UL) | ||
79 | #define PIO_DBG_SEL_SEL 0x000000000000003fULL | ||
80 | |||
81 | #define PIO_TRAIN_VEC (FZC_PIO + 0x00068UL) | ||
82 | #define PIO_TRAIN_VEC_VEC 0x00000000ffffffffULL | ||
83 | |||
84 | #define PIO_ARB_CTL (FZC_PIO + 0x00070UL) | ||
85 | #define PIO_ARB_CTL_CTL 0x00000000ffffffffULL | ||
86 | |||
87 | #define PIO_ARB_DBG_VEC (FZC_PIO + 0x00078UL) | ||
88 | #define PIO_ARB_DBG_VEC_VEC 0x00000000ffffffffULL | ||
89 | |||
90 | #define SYS_ERR_MASK (FZC_PIO + 0x00090UL) | ||
91 | #define SYS_ERR_MASK_META2 0x0000000000000400ULL | ||
92 | #define SYS_ERR_MASK_META1 0x0000000000000200ULL | ||
93 | #define SYS_ERR_MASK_PEU 0x0000000000000100ULL | ||
94 | #define SYS_ERR_MASK_TXC 0x0000000000000080ULL | ||
95 | #define SYS_ERR_MASK_RDMC 0x0000000000000040ULL | ||
96 | #define SYS_ERR_MASK_TDMC 0x0000000000000020ULL | ||
97 | #define SYS_ERR_MASK_ZCP 0x0000000000000010ULL | ||
98 | #define SYS_ERR_MASK_FFLP 0x0000000000000008ULL | ||
99 | #define SYS_ERR_MASK_IPP 0x0000000000000004ULL | ||
100 | #define SYS_ERR_MASK_MAC 0x0000000000000002ULL | ||
101 | #define SYS_ERR_MASK_SMX 0x0000000000000001ULL | ||
102 | |||
103 | #define SYS_ERR_STAT (FZC_PIO + 0x00098UL) | ||
104 | #define SYS_ERR_STAT_META2 0x0000000000000400ULL | ||
105 | #define SYS_ERR_STAT_META1 0x0000000000000200ULL | ||
106 | #define SYS_ERR_STAT_PEU 0x0000000000000100ULL | ||
107 | #define SYS_ERR_STAT_TXC 0x0000000000000080ULL | ||
108 | #define SYS_ERR_STAT_RDMC 0x0000000000000040ULL | ||
109 | #define SYS_ERR_STAT_TDMC 0x0000000000000020ULL | ||
110 | #define SYS_ERR_STAT_ZCP 0x0000000000000010ULL | ||
111 | #define SYS_ERR_STAT_FFLP 0x0000000000000008ULL | ||
112 | #define SYS_ERR_STAT_IPP 0x0000000000000004ULL | ||
113 | #define SYS_ERR_STAT_MAC 0x0000000000000002ULL | ||
114 | #define SYS_ERR_STAT_SMX 0x0000000000000001ULL | ||
115 | |||
116 | #define SID(LDG) (FZC_PIO + 0x10200UL + (LDG) * 8UL) | ||
117 | #define SID_FUNC 0x0000000000000060ULL | ||
118 | #define SID_FUNC_SHIFT 5 | ||
119 | #define SID_VECTOR 0x000000000000001fULL | ||
120 | #define SID_VECTOR_SHIFT 0 | ||
121 | |||
122 | #define LDG_NUM(LDN) (FZC_PIO + 0x20000UL + (LDN) * 8UL) | ||
123 | |||
124 | #define XMAC_PORT0_OFF (FZC_MAC + 0x000000) | ||
125 | #define XMAC_PORT1_OFF (FZC_MAC + 0x006000) | ||
126 | #define BMAC_PORT2_OFF (FZC_MAC + 0x00c000) | ||
127 | #define BMAC_PORT3_OFF (FZC_MAC + 0x010000) | ||
128 | |||
129 | /* XMAC registers, offset from np->mac_regs */ | ||
130 | |||
131 | #define XTXMAC_SW_RST 0x00000UL | ||
132 | #define XTXMAC_SW_RST_REG_RS 0x0000000000000002ULL | ||
133 | #define XTXMAC_SW_RST_SOFT_RST 0x0000000000000001ULL | ||
134 | |||
135 | #define XRXMAC_SW_RST 0x00008UL | ||
136 | #define XRXMAC_SW_RST_REG_RS 0x0000000000000002ULL | ||
137 | #define XRXMAC_SW_RST_SOFT_RST 0x0000000000000001ULL | ||
138 | |||
139 | #define XTXMAC_STATUS 0x00020UL | ||
140 | #define XTXMAC_STATUS_FRAME_CNT_EXP 0x0000000000000800ULL | ||
141 | #define XTXMAC_STATUS_BYTE_CNT_EXP 0x0000000000000400ULL | ||
142 | #define XTXMAC_STATUS_TXFIFO_XFR_ERR 0x0000000000000010ULL | ||
143 | #define XTXMAC_STATUS_TXMAC_OFLOW 0x0000000000000008ULL | ||
144 | #define XTXMAC_STATUS_MAX_PSIZE_ERR 0x0000000000000004ULL | ||
145 | #define XTXMAC_STATUS_TXMAC_UFLOW 0x0000000000000002ULL | ||
146 | #define XTXMAC_STATUS_FRAME_XMITED 0x0000000000000001ULL | ||
147 | |||
148 | #define XRXMAC_STATUS 0x00028UL | ||
149 | #define XRXMAC_STATUS_RXHIST7_CNT_EXP 0x0000000000100000ULL | ||
150 | #define XRXMAC_STATUS_LCL_FLT_STATUS 0x0000000000080000ULL | ||
151 | #define XRXMAC_STATUS_RFLT_DET 0x0000000000040000ULL | ||
152 | #define XRXMAC_STATUS_LFLT_CNT_EXP 0x0000000000020000ULL | ||
153 | #define XRXMAC_STATUS_PHY_MDINT 0x0000000000010000ULL | ||
154 | #define XRXMAC_STATUS_ALIGNERR_CNT_EXP 0x0000000000010000ULL | ||
155 | #define XRXMAC_STATUS_RXFRAG_CNT_EXP 0x0000000000008000ULL | ||
156 | #define XRXMAC_STATUS_RXMULTF_CNT_EXP 0x0000000000004000ULL | ||
157 | #define XRXMAC_STATUS_RXBCAST_CNT_EXP 0x0000000000002000ULL | ||
158 | #define XRXMAC_STATUS_RXHIST6_CNT_EXP 0x0000000000001000ULL | ||
159 | #define XRXMAC_STATUS_RXHIST5_CNT_EXP 0x0000000000000800ULL | ||
160 | #define XRXMAC_STATUS_RXHIST4_CNT_EXP 0x0000000000000400ULL | ||
161 | #define XRXMAC_STATUS_RXHIST3_CNT_EXP 0x0000000000000200ULL | ||
162 | #define XRXMAC_STATUS_RXHIST2_CNT_EXP 0x0000000000000100ULL | ||
163 | #define XRXMAC_STATUS_RXHIST1_CNT_EXP 0x0000000000000080ULL | ||
164 | #define XRXMAC_STATUS_RXOCTET_CNT_EXP 0x0000000000000040ULL | ||
165 | #define XRXMAC_STATUS_CVIOLERR_CNT_EXP 0x0000000000000020ULL | ||
166 | #define XRXMAC_STATUS_LENERR_CNT_EXP 0x0000000000000010ULL | ||
167 | #define XRXMAC_STATUS_CRCERR_CNT_EXP 0x0000000000000008ULL | ||
168 | #define XRXMAC_STATUS_RXUFLOW 0x0000000000000004ULL | ||
169 | #define XRXMAC_STATUS_RXOFLOW 0x0000000000000002ULL | ||
170 | #define XRXMAC_STATUS_FRAME_RCVD 0x0000000000000001ULL | ||
171 | |||
172 | #define XMAC_FC_STAT 0x00030UL | ||
173 | #define XMAC_FC_STAT_RX_RCV_PAUSE_TIME 0x00000000ffff0000ULL | ||
174 | #define XMAC_FC_STAT_TX_MAC_NPAUSE 0x0000000000000004ULL | ||
175 | #define XMAC_FC_STAT_TX_MAC_PAUSE 0x0000000000000002ULL | ||
176 | #define XMAC_FC_STAT_RX_MAC_RPAUSE 0x0000000000000001ULL | ||
177 | |||
178 | #define XTXMAC_STAT_MSK 0x00040UL | ||
179 | #define XTXMAC_STAT_MSK_FRAME_CNT_EXP 0x0000000000000800ULL | ||
180 | #define XTXMAC_STAT_MSK_BYTE_CNT_EXP 0x0000000000000400ULL | ||
181 | #define XTXMAC_STAT_MSK_TXFIFO_XFR_ERR 0x0000000000000010ULL | ||
182 | #define XTXMAC_STAT_MSK_TXMAC_OFLOW 0x0000000000000008ULL | ||
183 | #define XTXMAC_STAT_MSK_MAX_PSIZE_ERR 0x0000000000000004ULL | ||
184 | #define XTXMAC_STAT_MSK_TXMAC_UFLOW 0x0000000000000002ULL | ||
185 | #define XTXMAC_STAT_MSK_FRAME_XMITED 0x0000000000000001ULL | ||
186 | |||
187 | #define XRXMAC_STAT_MSK 0x00048UL | ||
188 | #define XRXMAC_STAT_MSK_LCL_FLT_STAT_MSK 0x0000000000080000ULL | ||
189 | #define XRXMAC_STAT_MSK_RFLT_DET 0x0000000000040000ULL | ||
190 | #define XRXMAC_STAT_MSK_LFLT_CNT_EXP 0x0000000000020000ULL | ||
191 | #define XRXMAC_STAT_MSK_PHY_MDINT 0x0000000000010000ULL | ||
192 | #define XRXMAC_STAT_MSK_RXFRAG_CNT_EXP 0x0000000000008000ULL | ||
193 | #define XRXMAC_STAT_MSK_RXMULTF_CNT_EXP 0x0000000000004000ULL | ||
194 | #define XRXMAC_STAT_MSK_RXBCAST_CNT_EXP 0x0000000000002000ULL | ||
195 | #define XRXMAC_STAT_MSK_RXHIST6_CNT_EXP 0x0000000000001000ULL | ||
196 | #define XRXMAC_STAT_MSK_RXHIST5_CNT_EXP 0x0000000000000800ULL | ||
197 | #define XRXMAC_STAT_MSK_RXHIST4_CNT_EXP 0x0000000000000400ULL | ||
198 | #define XRXMAC_STAT_MSK_RXHIST3_CNT_EXP 0x0000000000000200ULL | ||
199 | #define XRXMAC_STAT_MSK_RXHIST2_CNT_EXP 0x0000000000000100ULL | ||
200 | #define XRXMAC_STAT_MSK_RXHIST1_CNT_EXP 0x0000000000000080ULL | ||
201 | #define XRXMAC_STAT_MSK_RXOCTET_CNT_EXP 0x0000000000000040ULL | ||
202 | #define XRXMAC_STAT_MSK_CVIOLERR_CNT_EXP 0x0000000000000020ULL | ||
203 | #define XRXMAC_STAT_MSK_LENERR_CNT_EXP 0x0000000000000010ULL | ||
204 | #define XRXMAC_STAT_MSK_CRCERR_CNT_EXP 0x0000000000000008ULL | ||
205 | #define XRXMAC_STAT_MSK_RXUFLOW_CNT_EXP 0x0000000000000004ULL | ||
206 | #define XRXMAC_STAT_MSK_RXOFLOW_CNT_EXP 0x0000000000000002ULL | ||
207 | #define XRXMAC_STAT_MSK_FRAME_RCVD 0x0000000000000001ULL | ||
208 | |||
209 | #define XMAC_FC_MSK 0x00050UL | ||
210 | #define XMAC_FC_MSK_TX_MAC_NPAUSE 0x0000000000000004ULL | ||
211 | #define XMAC_FC_MSK_TX_MAC_PAUSE 0x0000000000000002ULL | ||
212 | #define XMAC_FC_MSK_RX_MAC_RPAUSE 0x0000000000000001ULL | ||
213 | |||
214 | #define XMAC_CONFIG 0x00060UL | ||
215 | #define XMAC_CONFIG_SEL_CLK_25MHZ 0x0000000080000000ULL | ||
216 | #define XMAC_CONFIG_1G_PCS_BYPASS 0x0000000040000000ULL | ||
217 | #define XMAC_CONFIG_10G_XPCS_BYPASS 0x0000000020000000ULL | ||
218 | #define XMAC_CONFIG_MODE_MASK 0x0000000018000000ULL | ||
219 | #define XMAC_CONFIG_MODE_XGMII 0x0000000000000000ULL | ||
220 | #define XMAC_CONFIG_MODE_GMII 0x0000000008000000ULL | ||
221 | #define XMAC_CONFIG_MODE_MII 0x0000000010000000ULL | ||
222 | #define XMAC_CONFIG_LFS_DISABLE 0x0000000004000000ULL | ||
223 | #define XMAC_CONFIG_LOOPBACK 0x0000000002000000ULL | ||
224 | #define XMAC_CONFIG_TX_OUTPUT_EN 0x0000000001000000ULL | ||
225 | #define XMAC_CONFIG_SEL_POR_CLK_SRC 0x0000000000800000ULL | ||
226 | #define XMAC_CONFIG_LED_POLARITY 0x0000000000400000ULL | ||
227 | #define XMAC_CONFIG_FORCE_LED_ON 0x0000000000200000ULL | ||
228 | #define XMAC_CONFIG_PASS_FLOW_CTRL 0x0000000000100000ULL | ||
229 | #define XMAC_CONFIG_RCV_PAUSE_ENABLE 0x0000000000080000ULL | ||
230 | #define XMAC_CONFIG_MAC2IPP_PKT_CNT_EN 0x0000000000040000ULL | ||
231 | #define XMAC_CONFIG_STRIP_CRC 0x0000000000020000ULL | ||
232 | #define XMAC_CONFIG_ADDR_FILTER_EN 0x0000000000010000ULL | ||
233 | #define XMAC_CONFIG_HASH_FILTER_EN 0x0000000000008000ULL | ||
234 | #define XMAC_CONFIG_RX_CODEV_CHK_DIS 0x0000000000004000ULL | ||
235 | #define XMAC_CONFIG_RESERVED_MULTICAST 0x0000000000002000ULL | ||
236 | #define XMAC_CONFIG_RX_CRC_CHK_DIS 0x0000000000001000ULL | ||
237 | #define XMAC_CONFIG_ERR_CHK_DIS 0x0000000000000800ULL | ||
238 | #define XMAC_CONFIG_PROMISC_GROUP 0x0000000000000400ULL | ||
239 | #define XMAC_CONFIG_PROMISCUOUS 0x0000000000000200ULL | ||
240 | #define XMAC_CONFIG_RX_MAC_ENABLE 0x0000000000000100ULL | ||
241 | #define XMAC_CONFIG_WARNING_MSG_EN 0x0000000000000080ULL | ||
242 | #define XMAC_CONFIG_ALWAYS_NO_CRC 0x0000000000000008ULL | ||
243 | #define XMAC_CONFIG_VAR_MIN_IPG_EN 0x0000000000000004ULL | ||
244 | #define XMAC_CONFIG_STRETCH_MODE 0x0000000000000002ULL | ||
245 | #define XMAC_CONFIG_TX_ENABLE 0x0000000000000001ULL | ||
246 | |||
247 | #define XMAC_IPG 0x00080UL | ||
248 | #define XMAC_IPG_STRETCH_CONST 0x0000000000e00000ULL | ||
249 | #define XMAC_IPG_STRETCH_CONST_SHIFT 21 | ||
250 | #define XMAC_IPG_STRETCH_RATIO 0x00000000001f0000ULL | ||
251 | #define XMAC_IPG_STRETCH_RATIO_SHIFT 16 | ||
252 | #define XMAC_IPG_IPG_MII_GMII 0x000000000000ff00ULL | ||
253 | #define XMAC_IPG_IPG_MII_GMII_SHIFT 8 | ||
254 | #define XMAC_IPG_IPG_XGMII 0x0000000000000007ULL | ||
255 | #define XMAC_IPG_IPG_XGMII_SHIFT 0 | ||
256 | |||
257 | #define IPG_12_15_XGMII 3 | ||
258 | #define IPG_16_19_XGMII 4 | ||
259 | #define IPG_20_23_XGMII 5 | ||
260 | #define IPG_12_MII_GMII 10 | ||
261 | #define IPG_13_MII_GMII 11 | ||
262 | #define IPG_14_MII_GMII 12 | ||
263 | #define IPG_15_MII_GMII 13 | ||
264 | #define IPG_16_MII_GMII 14 | ||
265 | |||
266 | #define XMAC_MIN 0x00088UL | ||
267 | #define XMAC_MIN_RX_MIN_PKT_SIZE 0x000000003ff00000ULL | ||
268 | #define XMAC_MIN_RX_MIN_PKT_SIZE_SHFT 20 | ||
269 | #define XMAC_MIN_SLOT_TIME 0x000000000003fc00ULL | ||
270 | #define XMAC_MIN_SLOT_TIME_SHFT 10 | ||
271 | #define XMAC_MIN_TX_MIN_PKT_SIZE 0x00000000000003ffULL | ||
272 | #define XMAC_MIN_TX_MIN_PKT_SIZE_SHFT 0 | ||
273 | |||
274 | #define XMAC_MAX 0x00090UL | ||
275 | #define XMAC_MAX_FRAME_SIZE 0x0000000000003fffULL | ||
276 | #define XMAC_MAX_FRAME_SIZE_SHFT 0 | ||
277 | |||
278 | #define XMAC_ADDR0 0x000a0UL | ||
279 | #define XMAC_ADDR0_ADDR0 0x000000000000ffffULL | ||
280 | |||
281 | #define XMAC_ADDR1 0x000a8UL | ||
282 | #define XMAC_ADDR1_ADDR1 0x000000000000ffffULL | ||
283 | |||
284 | #define XMAC_ADDR2 0x000b0UL | ||
285 | #define XMAC_ADDR2_ADDR2 0x000000000000ffffULL | ||
286 | |||
287 | #define XMAC_ADDR_CMPEN 0x00208UL | ||
288 | #define XMAC_ADDR_CMPEN_EN15 0x0000000000008000ULL | ||
289 | #define XMAC_ADDR_CMPEN_EN14 0x0000000000004000ULL | ||
290 | #define XMAC_ADDR_CMPEN_EN13 0x0000000000002000ULL | ||
291 | #define XMAC_ADDR_CMPEN_EN12 0x0000000000001000ULL | ||
292 | #define XMAC_ADDR_CMPEN_EN11 0x0000000000000800ULL | ||
293 | #define XMAC_ADDR_CMPEN_EN10 0x0000000000000400ULL | ||
294 | #define XMAC_ADDR_CMPEN_EN9 0x0000000000000200ULL | ||
295 | #define XMAC_ADDR_CMPEN_EN8 0x0000000000000100ULL | ||
296 | #define XMAC_ADDR_CMPEN_EN7 0x0000000000000080ULL | ||
297 | #define XMAC_ADDR_CMPEN_EN6 0x0000000000000040ULL | ||
298 | #define XMAC_ADDR_CMPEN_EN5 0x0000000000000020ULL | ||
299 | #define XMAC_ADDR_CMPEN_EN4 0x0000000000000010ULL | ||
300 | #define XMAC_ADDR_CMPEN_EN3 0x0000000000000008ULL | ||
301 | #define XMAC_ADDR_CMPEN_EN2 0x0000000000000004ULL | ||
302 | #define XMAC_ADDR_CMPEN_EN1 0x0000000000000002ULL | ||
303 | #define XMAC_ADDR_CMPEN_EN0 0x0000000000000001ULL | ||
304 | |||
305 | #define XMAC_NUM_ALT_ADDR 16 | ||
306 | |||
307 | #define XMAC_ALT_ADDR0(NUM) (0x00218UL + (NUM)*0x18UL) | ||
308 | #define XMAC_ALT_ADDR0_ADDR0 0x000000000000ffffULL | ||
309 | |||
310 | #define XMAC_ALT_ADDR1(NUM) (0x00220UL + (NUM)*0x18UL) | ||
311 | #define XMAC_ALT_ADDR1_ADDR1 0x000000000000ffffULL | ||
312 | |||
313 | #define XMAC_ALT_ADDR2(NUM) (0x00228UL + (NUM)*0x18UL) | ||
314 | #define XMAC_ALT_ADDR2_ADDR2 0x000000000000ffffULL | ||
315 | |||
316 | #define XMAC_ADD_FILT0 0x00818UL | ||
317 | #define XMAC_ADD_FILT0_FILT0 0x000000000000ffffULL | ||
318 | |||
319 | #define XMAC_ADD_FILT1 0x00820UL | ||
320 | #define XMAC_ADD_FILT1_FILT1 0x000000000000ffffULL | ||
321 | |||
322 | #define XMAC_ADD_FILT2 0x00828UL | ||
323 | #define XMAC_ADD_FILT2_FILT2 0x000000000000ffffULL | ||
324 | |||
325 | #define XMAC_ADD_FILT12_MASK 0x00830UL | ||
326 | #define XMAC_ADD_FILT12_MASK_VAL 0x00000000000000ffULL | ||
327 | |||
328 | #define XMAC_ADD_FILT00_MASK 0x00838UL | ||
329 | #define XMAC_ADD_FILT00_MASK_VAL 0x000000000000ffffULL | ||
330 | |||
331 | #define XMAC_HASH_TBL(NUM) (0x00840UL + (NUM) * 0x8UL) | ||
332 | #define XMAC_HASH_TBL_VAL 0x000000000000ffffULL | ||
333 | |||
334 | #define XMAC_NUM_HOST_INFO 20 | ||
335 | |||
336 | #define XMAC_HOST_INFO(NUM) (0x00900UL + (NUM) * 0x8UL) | ||
337 | |||
338 | #define XMAC_PA_DATA0 0x00b80UL | ||
339 | #define XMAC_PA_DATA0_VAL 0x00000000ffffffffULL | ||
340 | |||
341 | #define XMAC_PA_DATA1 0x00b88UL | ||
342 | #define XMAC_PA_DATA1_VAL 0x00000000ffffffffULL | ||
343 | |||
344 | #define XMAC_DEBUG_SEL 0x00b90UL | ||
345 | #define XMAC_DEBUG_SEL_XMAC 0x0000000000000078ULL | ||
346 | #define XMAC_DEBUG_SEL_MAC 0x0000000000000007ULL | ||
347 | |||
348 | #define XMAC_TRAIN_VEC 0x00b98UL | ||
349 | #define XMAC_TRAIN_VEC_VAL 0x00000000ffffffffULL | ||
350 | |||
351 | #define RXMAC_BT_CNT 0x00100UL | ||
352 | #define RXMAC_BT_CNT_COUNT 0x00000000ffffffffULL | ||
353 | |||
354 | #define RXMAC_BC_FRM_CNT 0x00108UL | ||
355 | #define RXMAC_BC_FRM_CNT_COUNT 0x00000000001fffffULL | ||
356 | |||
357 | #define RXMAC_MC_FRM_CNT 0x00110UL | ||
358 | #define RXMAC_MC_FRM_CNT_COUNT 0x00000000001fffffULL | ||
359 | |||
360 | #define RXMAC_FRAG_CNT 0x00118UL | ||
361 | #define RXMAC_FRAG_CNT_COUNT 0x00000000001fffffULL | ||
362 | |||
363 | #define RXMAC_HIST_CNT1 0x00120UL | ||
364 | #define RXMAC_HIST_CNT1_COUNT 0x00000000001fffffULL | ||
365 | |||
366 | #define RXMAC_HIST_CNT2 0x00128UL | ||
367 | #define RXMAC_HIST_CNT2_COUNT 0x00000000001fffffULL | ||
368 | |||
369 | #define RXMAC_HIST_CNT3 0x00130UL | ||
370 | #define RXMAC_HIST_CNT3_COUNT 0x00000000000fffffULL | ||
371 | |||
372 | #define RXMAC_HIST_CNT4 0x00138UL | ||
373 | #define RXMAC_HIST_CNT4_COUNT 0x000000000007ffffULL | ||
374 | |||
375 | #define RXMAC_HIST_CNT5 0x00140UL | ||
376 | #define RXMAC_HIST_CNT5_COUNT 0x000000000003ffffULL | ||
377 | |||
378 | #define RXMAC_HIST_CNT6 0x00148UL | ||
379 | #define RXMAC_HIST_CNT6_COUNT 0x000000000000ffffULL | ||
380 | |||
381 | #define RXMAC_MPSZER_CNT 0x00150UL | ||
382 | #define RXMAC_MPSZER_CNT_COUNT 0x00000000000000ffULL | ||
383 | |||
384 | #define RXMAC_CRC_ER_CNT 0x00158UL | ||
385 | #define RXMAC_CRC_ER_CNT_COUNT 0x00000000000000ffULL | ||
386 | |||
387 | #define RXMAC_CD_VIO_CNT 0x00160UL | ||
388 | #define RXMAC_CD_VIO_CNT_COUNT 0x00000000000000ffULL | ||
389 | |||
390 | #define RXMAC_ALIGN_ERR_CNT 0x00168UL | ||
391 | #define RXMAC_ALIGN_ERR_CNT_COUNT 0x00000000000000ffULL | ||
392 | |||
393 | #define TXMAC_FRM_CNT 0x00170UL | ||
394 | #define TXMAC_FRM_CNT_COUNT 0x00000000ffffffffULL | ||
395 | |||
396 | #define TXMAC_BYTE_CNT 0x00178UL | ||
397 | #define TXMAC_BYTE_CNT_COUNT 0x00000000ffffffffULL | ||
398 | |||
399 | #define LINK_FAULT_CNT 0x00180UL | ||
400 | #define LINK_FAULT_CNT_COUNT 0x00000000000000ffULL | ||
401 | |||
402 | #define RXMAC_HIST_CNT7 0x00188UL | ||
403 | #define RXMAC_HIST_CNT7_COUNT 0x0000000007ffffffULL | ||
404 | |||
405 | #define XMAC_SM_REG 0x001a8UL | ||
406 | #define XMAC_SM_REG_STATE 0x00000000ffffffffULL | ||
407 | |||
408 | #define XMAC_INTER1 0x001b0UL | ||
409 | #define XMAC_INTERN1_SIGNALS1 0x00000000ffffffffULL | ||
410 | |||
411 | #define XMAC_INTER2 0x001b8UL | ||
412 | #define XMAC_INTERN2_SIGNALS2 0x00000000ffffffffULL | ||
413 | |||
414 | /* BMAC registers, offset from np->mac_regs */ | ||
415 | |||
416 | #define BTXMAC_SW_RST 0x00000UL | ||
417 | #define BTXMAC_SW_RST_RESET 0x0000000000000001ULL | ||
418 | |||
419 | #define BRXMAC_SW_RST 0x00008UL | ||
420 | #define BRXMAC_SW_RST_RESET 0x0000000000000001ULL | ||
421 | |||
422 | #define BMAC_SEND_PAUSE 0x00010UL | ||
423 | #define BMAC_SEND_PAUSE_SEND 0x0000000000010000ULL | ||
424 | #define BMAC_SEND_PAUSE_TIME 0x000000000000ffffULL | ||
425 | |||
426 | #define BTXMAC_STATUS 0x00020UL | ||
427 | #define BTXMAC_STATUS_XMIT 0x0000000000000001ULL | ||
428 | #define BTXMAC_STATUS_UNDERRUN 0x0000000000000002ULL | ||
429 | #define BTXMAC_STATUS_MAX_PKT_ERR 0x0000000000000004ULL | ||
430 | #define BTXMAC_STATUS_BYTE_CNT_EXP 0x0000000000000400ULL | ||
431 | #define BTXMAC_STATUS_FRAME_CNT_EXP 0x0000000000000800ULL | ||
432 | |||
433 | #define BRXMAC_STATUS 0x00028UL | ||
434 | #define BRXMAC_STATUS_RX_PKT 0x0000000000000001ULL | ||
435 | #define BRXMAC_STATUS_OVERFLOW 0x0000000000000002ULL | ||
436 | #define BRXMAC_STATUS_FRAME_CNT_EXP 0x0000000000000004ULL | ||
437 | #define BRXMAC_STATUS_ALIGN_ERR_EXP 0x0000000000000008ULL | ||
438 | #define BRXMAC_STATUS_CRC_ERR_EXP 0x0000000000000010ULL | ||
439 | #define BRXMAC_STATUS_LEN_ERR_EXP 0x0000000000000020ULL | ||
440 | |||
441 | #define BMAC_CTRL_STATUS 0x00030UL | ||
442 | #define BMAC_CTRL_STATUS_PAUSE_RECV 0x0000000000000001ULL | ||
443 | #define BMAC_CTRL_STATUS_PAUSE 0x0000000000000002ULL | ||
444 | #define BMAC_CTRL_STATUS_NOPAUSE 0x0000000000000004ULL | ||
445 | #define BMAC_CTRL_STATUS_TIME 0x00000000ffff0000ULL | ||
446 | #define BMAC_CTRL_STATUS_TIME_SHIFT 16 | ||
447 | |||
448 | #define BTXMAC_STATUS_MASK 0x00040UL | ||
449 | #define BRXMAC_STATUS_MASK 0x00048UL | ||
450 | #define BMAC_CTRL_STATUS_MASK 0x00050UL | ||
451 | |||
452 | #define BTXMAC_CONFIG 0x00060UL | ||
453 | #define BTXMAC_CONFIG_ENABLE 0x0000000000000001ULL | ||
454 | #define BTXMAC_CONFIG_FCS_DISABLE 0x0000000000000002ULL | ||
455 | |||
456 | #define BRXMAC_CONFIG 0x00068UL | ||
457 | #define BRXMAC_CONFIG_DISCARD_DIS 0x0000000000000080ULL | ||
458 | #define BRXMAC_CONFIG_ADDR_FILT_EN 0x0000000000000040ULL | ||
459 | #define BRXMAC_CONFIG_HASH_FILT_EN 0x0000000000000020ULL | ||
460 | #define BRXMAC_CONFIG_PROMISC_GRP 0x0000000000000010ULL | ||
461 | #define BRXMAC_CONFIG_PROMISC 0x0000000000000008ULL | ||
462 | #define BRXMAC_CONFIG_STRIP_FCS 0x0000000000000004ULL | ||
463 | #define BRXMAC_CONFIG_STRIP_PAD 0x0000000000000002ULL | ||
464 | #define BRXMAC_CONFIG_ENABLE 0x0000000000000001ULL | ||
465 | |||
466 | #define BMAC_CTRL_CONFIG 0x00070UL | ||
467 | #define BMAC_CTRL_CONFIG_TX_PAUSE_EN 0x0000000000000001ULL | ||
468 | #define BMAC_CTRL_CONFIG_RX_PAUSE_EN 0x0000000000000002ULL | ||
469 | #define BMAC_CTRL_CONFIG_PASS_CTRL 0x0000000000000004ULL | ||
470 | |||
471 | #define BMAC_XIF_CONFIG 0x00078UL | ||
472 | #define BMAC_XIF_CONFIG_TX_OUTPUT_EN 0x0000000000000001ULL | ||
473 | #define BMAC_XIF_CONFIG_MII_LOOPBACK 0x0000000000000002ULL | ||
474 | #define BMAC_XIF_CONFIG_GMII_MODE 0x0000000000000008ULL | ||
475 | #define BMAC_XIF_CONFIG_LINK_LED 0x0000000000000020ULL | ||
476 | #define BMAC_XIF_CONFIG_LED_POLARITY 0x0000000000000040ULL | ||
477 | #define BMAC_XIF_CONFIG_25MHZ_CLOCK 0x0000000000000080ULL | ||
478 | |||
479 | #define BMAC_MIN_FRAME 0x000a0UL | ||
480 | #define BMAC_MIN_FRAME_VAL 0x00000000000003ffULL | ||
481 | |||
482 | #define BMAC_MAX_FRAME 0x000a8UL | ||
483 | #define BMAC_MAX_FRAME_MAX_BURST 0x000000003fff0000ULL | ||
484 | #define BMAC_MAX_FRAME_MAX_BURST_SHIFT 16 | ||
485 | #define BMAC_MAX_FRAME_MAX_FRAME 0x0000000000003fffULL | ||
486 | #define BMAC_MAX_FRAME_MAX_FRAME_SHIFT 0 | ||
487 | |||
488 | #define BMAC_PREAMBLE_SIZE 0x000b0UL | ||
489 | #define BMAC_PREAMBLE_SIZE_VAL 0x00000000000003ffULL | ||
490 | |||
491 | #define BMAC_CTRL_TYPE 0x000c8UL | ||
492 | |||
493 | #define BMAC_ADDR0 0x00100UL | ||
494 | #define BMAC_ADDR0_ADDR0 0x000000000000ffffULL | ||
495 | |||
496 | #define BMAC_ADDR1 0x00108UL | ||
497 | #define BMAC_ADDR1_ADDR1 0x000000000000ffffULL | ||
498 | |||
499 | #define BMAC_ADDR2 0x00110UL | ||
500 | #define BMAC_ADDR2_ADDR2 0x000000000000ffffULL | ||
501 | |||
502 | #define BMAC_NUM_ALT_ADDR 6 | ||
503 | |||
504 | #define BMAC_ALT_ADDR0(NUM) (0x00118UL + (NUM)*0x18UL) | ||
505 | #define BMAC_ALT_ADDR0_ADDR0 0x000000000000ffffULL | ||
506 | |||
507 | #define BMAC_ALT_ADDR1(NUM) (0x00120UL + (NUM)*0x18UL) | ||
508 | #define BMAC_ALT_ADDR1_ADDR1 0x000000000000ffffULL | ||
509 | |||
510 | #define BMAC_ALT_ADDR2(NUM) (0x00128UL + (NUM)*0x18UL) | ||
511 | #define BMAC_ALT_ADDR2_ADDR2 0x000000000000ffffULL | ||
512 | |||
513 | #define BMAC_FC_ADDR0 0x00268UL | ||
514 | #define BMAC_FC_ADDR0_ADDR0 0x000000000000ffffULL | ||
515 | |||
516 | #define BMAC_FC_ADDR1 0x00270UL | ||
517 | #define BMAC_FC_ADDR1_ADDR1 0x000000000000ffffULL | ||
518 | |||
519 | #define BMAC_FC_ADDR2 0x00278UL | ||
520 | #define BMAC_FC_ADDR2_ADDR2 0x000000000000ffffULL | ||
521 | |||
522 | #define BMAC_ADD_FILT0 0x00298UL | ||
523 | #define BMAC_ADD_FILT0_FILT0 0x000000000000ffffULL | ||
524 | |||
525 | #define BMAC_ADD_FILT1 0x002a0UL | ||
526 | #define BMAC_ADD_FILT1_FILT1 0x000000000000ffffULL | ||
527 | |||
528 | #define BMAC_ADD_FILT2 0x002a8UL | ||
529 | #define BMAC_ADD_FILT2_FILT2 0x000000000000ffffULL | ||
530 | |||
531 | #define BMAC_ADD_FILT12_MASK 0x002b0UL | ||
532 | #define BMAC_ADD_FILT12_MASK_VAL 0x00000000000000ffULL | ||
533 | |||
534 | #define BMAC_ADD_FILT00_MASK 0x002b8UL | ||
535 | #define BMAC_ADD_FILT00_MASK_VAL 0x000000000000ffffULL | ||
536 | |||
537 | #define BMAC_HASH_TBL(NUM) (0x002c0UL + (NUM) * 0x8UL) | ||
538 | #define BMAC_HASH_TBL_VAL 0x000000000000ffffULL | ||
539 | |||
540 | #define BRXMAC_FRAME_CNT 0x00370 | ||
541 | #define BRXMAC_FRAME_CNT_COUNT 0x000000000000ffffULL | ||
542 | |||
543 | #define BRXMAC_MAX_LEN_ERR_CNT 0x00378 | ||
544 | |||
545 | #define BRXMAC_ALIGN_ERR_CNT 0x00380 | ||
546 | #define BRXMAC_ALIGN_ERR_CNT_COUNT 0x000000000000ffffULL | ||
547 | |||
548 | #define BRXMAC_CRC_ERR_CNT 0x00388 | ||
549 | #define BRXMAC_ALIGN_ERR_CNT_COUNT 0x000000000000ffffULL | ||
550 | |||
551 | #define BRXMAC_CODE_VIOL_ERR_CNT 0x00390 | ||
552 | #define BRXMAC_CODE_VIOL_ERR_CNT_COUNT 0x000000000000ffffULL | ||
553 | |||
554 | #define BMAC_STATE_MACHINE 0x003a0 | ||
555 | |||
556 | #define BMAC_ADDR_CMPEN 0x003f8UL | ||
557 | #define BMAC_ADDR_CMPEN_EN15 0x0000000000008000ULL | ||
558 | #define BMAC_ADDR_CMPEN_EN14 0x0000000000004000ULL | ||
559 | #define BMAC_ADDR_CMPEN_EN13 0x0000000000002000ULL | ||
560 | #define BMAC_ADDR_CMPEN_EN12 0x0000000000001000ULL | ||
561 | #define BMAC_ADDR_CMPEN_EN11 0x0000000000000800ULL | ||
562 | #define BMAC_ADDR_CMPEN_EN10 0x0000000000000400ULL | ||
563 | #define BMAC_ADDR_CMPEN_EN9 0x0000000000000200ULL | ||
564 | #define BMAC_ADDR_CMPEN_EN8 0x0000000000000100ULL | ||
565 | #define BMAC_ADDR_CMPEN_EN7 0x0000000000000080ULL | ||
566 | #define BMAC_ADDR_CMPEN_EN6 0x0000000000000040ULL | ||
567 | #define BMAC_ADDR_CMPEN_EN5 0x0000000000000020ULL | ||
568 | #define BMAC_ADDR_CMPEN_EN4 0x0000000000000010ULL | ||
569 | #define BMAC_ADDR_CMPEN_EN3 0x0000000000000008ULL | ||
570 | #define BMAC_ADDR_CMPEN_EN2 0x0000000000000004ULL | ||
571 | #define BMAC_ADDR_CMPEN_EN1 0x0000000000000002ULL | ||
572 | #define BMAC_ADDR_CMPEN_EN0 0x0000000000000001ULL | ||
573 | |||
574 | #define BMAC_NUM_HOST_INFO 9 | ||
575 | |||
576 | #define BMAC_HOST_INFO(NUM) (0x00400UL + (NUM) * 0x8UL) | ||
577 | |||
578 | #define BTXMAC_BYTE_CNT 0x00448UL | ||
579 | #define BTXMAC_BYTE_CNT_COUNT 0x00000000ffffffffULL | ||
580 | |||
581 | #define BTXMAC_FRM_CNT 0x00450UL | ||
582 | #define BTXMAC_FRM_CNT_COUNT 0x00000000ffffffffULL | ||
583 | |||
584 | #define BRXMAC_BYTE_CNT 0x00458UL | ||
585 | #define BRXMAC_BYTE_CNT_COUNT 0x00000000ffffffffULL | ||
586 | |||
587 | #define HOST_INFO_MPR 0x0000000000000100ULL | ||
588 | #define HOST_INFO_MACRDCTBLN 0x0000000000000007ULL | ||
589 | |||
590 | /* XPCS registers, offset from np->regs + np->xpcs_off */ | ||
591 | |||
592 | #define XPCS_CONTROL1 (FZC_MAC + 0x00000UL) | ||
593 | #define XPCS_CONTROL1_RESET 0x0000000000008000ULL | ||
594 | #define XPCS_CONTROL1_LOOPBACK 0x0000000000004000ULL | ||
595 | #define XPCS_CONTROL1_SPEED_SELECT3 0x0000000000002000ULL | ||
596 | #define XPCS_CONTROL1_CSR_LOW_PWR 0x0000000000000800ULL | ||
597 | #define XPCS_CONTROL1_CSR_SPEED1 0x0000000000000040ULL | ||
598 | #define XPCS_CONTROL1_CSR_SPEED0 0x000000000000003cULL | ||
599 | |||
600 | #define XPCS_STATUS1 (FZC_MAC + 0x00008UL) | ||
601 | #define XPCS_STATUS1_CSR_FAULT 0x0000000000000080ULL | ||
602 | #define XPCS_STATUS1_CSR_RXLNK_STAT 0x0000000000000004ULL | ||
603 | #define XPCS_STATUS1_CSR_LPWR_ABLE 0x0000000000000002ULL | ||
604 | |||
605 | #define XPCS_DEVICE_IDENTIFIER (FZC_MAC + 0x00010UL) | ||
606 | #define XPCS_DEVICE_IDENTIFIER_VAL 0x00000000ffffffffULL | ||
607 | |||
608 | #define XPCS_SPEED_ABILITY (FZC_MAC + 0x00018UL) | ||
609 | #define XPCS_SPEED_ABILITY_10GIG 0x0000000000000001ULL | ||
610 | |||
611 | #define XPCS_DEV_IN_PKG (FZC_MAC + 0x00020UL) | ||
612 | #define XPCS_DEV_IN_PKG_CSR_VEND2 0x0000000080000000ULL | ||
613 | #define XPCS_DEV_IN_PKG_CSR_VEND1 0x0000000040000000ULL | ||
614 | #define XPCS_DEV_IN_PKG_DTE_XS 0x0000000000000020ULL | ||
615 | #define XPCS_DEV_IN_PKG_PHY_XS 0x0000000000000010ULL | ||
616 | #define XPCS_DEV_IN_PKG_PCS 0x0000000000000008ULL | ||
617 | #define XPCS_DEV_IN_PKG_WIS 0x0000000000000004ULL | ||
618 | #define XPCS_DEV_IN_PKG_PMD_PMA 0x0000000000000002ULL | ||
619 | #define XPCS_DEV_IN_PKG_CLS22 0x0000000000000001ULL | ||
620 | |||
621 | #define XPCS_CONTROL2 (FZC_MAC + 0x00028UL) | ||
622 | #define XPCS_CONTROL2_CSR_PSC_SEL 0x0000000000000003ULL | ||
623 | |||
624 | #define XPCS_STATUS2 (FZC_MAC + 0x00030UL) | ||
625 | #define XPCS_STATUS2_CSR_DEV_PRES 0x000000000000c000ULL | ||
626 | #define XPCS_STATUS2_CSR_TX_FAULT 0x0000000000000800ULL | ||
627 | #define XPCS_STATUS2_CSR_RCV_FAULT 0x0000000000000400ULL | ||
628 | #define XPCS_STATUS2_TEN_GBASE_W 0x0000000000000004ULL | ||
629 | #define XPCS_STATUS2_TEN_GBASE_X 0x0000000000000002ULL | ||
630 | #define XPCS_STATUS2_TEN_GBASE_R 0x0000000000000001ULL | ||
631 | |||
632 | #define XPCS_PKG_ID (FZC_MAC + 0x00038UL) | ||
633 | #define XPCS_PKG_ID_VAL 0x00000000ffffffffULL | ||
634 | |||
635 | #define XPCS_STATUS(IDX) (FZC_MAC + 0x00040UL) | ||
636 | #define XPCS_STATUS_CSR_LANE_ALIGN 0x0000000000001000ULL | ||
637 | #define XPCS_STATUS_CSR_PATTEST_CAP 0x0000000000000800ULL | ||
638 | #define XPCS_STATUS_CSR_LANE3_SYNC 0x0000000000000008ULL | ||
639 | #define XPCS_STATUS_CSR_LANE2_SYNC 0x0000000000000004ULL | ||
640 | #define XPCS_STATUS_CSR_LANE1_SYNC 0x0000000000000002ULL | ||
641 | #define XPCS_STATUS_CSR_LANE0_SYNC 0x0000000000000001ULL | ||
642 | |||
643 | #define XPCS_TEST_CONTROL (FZC_MAC + 0x00048UL) | ||
644 | #define XPCS_TEST_CONTROL_TXTST_EN 0x0000000000000004ULL | ||
645 | #define XPCS_TEST_CONTROL_TPAT_SEL 0x0000000000000003ULL | ||
646 | |||
647 | #define XPCS_CFG_VENDOR1 (FZC_MAC + 0x00050UL) | ||
648 | #define XPCS_CFG_VENDOR1_DBG_IOTST 0x0000000000000080ULL | ||
649 | #define XPCS_CFG_VENDOR1_DBG_SEL 0x0000000000000078ULL | ||
650 | #define XPCS_CFG_VENDOR1_BYPASS_DET 0x0000000000000004ULL | ||
651 | #define XPCS_CFG_VENDOR1_TXBUF_EN 0x0000000000000002ULL | ||
652 | #define XPCS_CFG_VENDOR1_XPCS_EN 0x0000000000000001ULL | ||
653 | |||
654 | #define XPCS_DIAG_VENDOR2 (FZC_MAC + 0x00058UL) | ||
655 | #define XPCS_DIAG_VENDOR2_SSM_LANE3 0x0000000001e00000ULL | ||
656 | #define XPCS_DIAG_VENDOR2_SSM_LANE2 0x00000000001e0000ULL | ||
657 | #define XPCS_DIAG_VENDOR2_SSM_LANE1 0x000000000001e000ULL | ||
658 | #define XPCS_DIAG_VENDOR2_SSM_LANE0 0x0000000000001e00ULL | ||
659 | #define XPCS_DIAG_VENDOR2_EBUF_SM 0x00000000000001feULL | ||
660 | #define XPCS_DIAG_VENDOR2_RCV_SM 0x0000000000000001ULL | ||
661 | |||
662 | #define XPCS_MASK1 (FZC_MAC + 0x00060UL) | ||
663 | #define XPCS_MASK1_FAULT_MASK 0x0000000000000080ULL | ||
664 | #define XPCS_MASK1_RXALIGN_STAT_MSK 0x0000000000000004ULL | ||
665 | |||
666 | #define XPCS_PKT_COUNT (FZC_MAC + 0x00068UL) | ||
667 | #define XPCS_PKT_COUNT_TX 0x00000000ffff0000ULL | ||
668 | #define XPCS_PKT_COUNT_RX 0x000000000000ffffULL | ||
669 | |||
670 | #define XPCS_TX_SM (FZC_MAC + 0x00070UL) | ||
671 | #define XPCS_TX_SM_VAL 0x000000000000000fULL | ||
672 | |||
673 | #define XPCS_DESKEW_ERR_CNT (FZC_MAC + 0x00078UL) | ||
674 | #define XPCS_DESKEW_ERR_CNT_VAL 0x00000000000000ffULL | ||
675 | |||
676 | #define XPCS_SYMERR_CNT01 (FZC_MAC + 0x00080UL) | ||
677 | #define XPCS_SYMERR_CNT01_LANE1 0x00000000ffff0000ULL | ||
678 | #define XPCS_SYMERR_CNT01_LANE0 0x000000000000ffffULL | ||
679 | |||
680 | #define XPCS_SYMERR_CNT23 (FZC_MAC + 0x00088UL) | ||
681 | #define XPCS_SYMERR_CNT23_LANE3 0x00000000ffff0000ULL | ||
682 | #define XPCS_SYMERR_CNT23_LANE2 0x000000000000ffffULL | ||
683 | |||
684 | #define XPCS_TRAINING_VECTOR (FZC_MAC + 0x00090UL) | ||
685 | #define XPCS_TRAINING_VECTOR_VAL 0x00000000ffffffffULL | ||
686 | |||
687 | /* PCS registers, offset from np->regs + np->pcs_off */ | ||
688 | |||
689 | #define PCS_MII_CTL (FZC_MAC + 0x00000UL) | ||
690 | #define PCS_MII_CTL_RST 0x0000000000008000ULL | ||
691 | #define PCS_MII_CTL_10_100_SPEED 0x0000000000002000ULL | ||
692 | #define PCS_MII_AUTONEG_EN 0x0000000000001000ULL | ||
693 | #define PCS_MII_PWR_DOWN 0x0000000000000800ULL | ||
694 | #define PCS_MII_ISOLATE 0x0000000000000400ULL | ||
695 | #define PCS_MII_AUTONEG_RESTART 0x0000000000000200ULL | ||
696 | #define PCS_MII_DUPLEX 0x0000000000000100ULL | ||
697 | #define PCS_MII_COLL_TEST 0x0000000000000080ULL | ||
698 | #define PCS_MII_1000MB_SPEED 0x0000000000000040ULL | ||
699 | |||
700 | #define PCS_MII_STAT (FZC_MAC + 0x00008UL) | ||
701 | #define PCS_MII_STAT_EXT_STATUS 0x0000000000000100ULL | ||
702 | #define PCS_MII_STAT_AUTONEG_DONE 0x0000000000000020ULL | ||
703 | #define PCS_MII_STAT_REMOTE_FAULT 0x0000000000000010ULL | ||
704 | #define PCS_MII_STAT_AUTONEG_ABLE 0x0000000000000008ULL | ||
705 | #define PCS_MII_STAT_LINK_STATUS 0x0000000000000004ULL | ||
706 | #define PCS_MII_STAT_JABBER_DET 0x0000000000000002ULL | ||
707 | #define PCS_MII_STAT_EXT_CAP 0x0000000000000001ULL | ||
708 | |||
709 | #define PCS_MII_ADV (FZC_MAC + 0x00010UL) | ||
710 | #define PCS_MII_ADV_NEXT_PAGE 0x0000000000008000ULL | ||
711 | #define PCS_MII_ADV_ACK 0x0000000000004000ULL | ||
712 | #define PCS_MII_ADV_REMOTE_FAULT 0x0000000000003000ULL | ||
713 | #define PCS_MII_ADV_ASM_DIR 0x0000000000000100ULL | ||
714 | #define PCS_MII_ADV_PAUSE 0x0000000000000080ULL | ||
715 | #define PCS_MII_ADV_HALF_DUPLEX 0x0000000000000040ULL | ||
716 | #define PCS_MII_ADV_FULL_DUPLEX 0x0000000000000020ULL | ||
717 | |||
718 | #define PCS_MII_PARTNER (FZC_MAC + 0x00018UL) | ||
719 | #define PCS_MII_PARTNER_NEXT_PAGE 0x0000000000008000ULL | ||
720 | #define PCS_MII_PARTNER_ACK 0x0000000000004000ULL | ||
721 | #define PCS_MII_PARTNER_REMOTE_FAULT 0x0000000000002000ULL | ||
722 | #define PCS_MII_PARTNER_PAUSE 0x0000000000000180ULL | ||
723 | #define PCS_MII_PARTNER_HALF_DUPLEX 0x0000000000000040ULL | ||
724 | #define PCS_MII_PARTNER_FULL_DUPLEX 0x0000000000000020ULL | ||
725 | |||
726 | #define PCS_CONF (FZC_MAC + 0x00020UL) | ||
727 | #define PCS_CONF_MASK 0x0000000000000040ULL | ||
728 | #define PCS_CONF_10MS_TMR_OVERRIDE 0x0000000000000020ULL | ||
729 | #define PCS_CONF_JITTER_STUDY 0x0000000000000018ULL | ||
730 | #define PCS_CONF_SIGDET_ACTIVE_LOW 0x0000000000000004ULL | ||
731 | #define PCS_CONF_SIGDET_OVERRIDE 0x0000000000000002ULL | ||
732 | #define PCS_CONF_ENABLE 0x0000000000000001ULL | ||
733 | |||
734 | #define PCS_STATE (FZC_MAC + 0x00028UL) | ||
735 | #define PCS_STATE_D_PARTNER_FAIL 0x0000000020000000ULL | ||
736 | #define PCS_STATE_D_WAIT_C_CODES_ACK 0x0000000010000000ULL | ||
737 | #define PCS_STATE_D_SYNC_LOSS 0x0000000008000000ULL | ||
738 | #define PCS_STATE_D_NO_GOOD_C_CODES 0x0000000004000000ULL | ||
739 | #define PCS_STATE_D_SERDES 0x0000000002000000ULL | ||
740 | #define PCS_STATE_D_BREAKLINK_C_CODES 0x0000000001000000ULL | ||
741 | #define PCS_STATE_L_SIGDET 0x0000000000400000ULL | ||
742 | #define PCS_STATE_L_SYNC_LOSS 0x0000000000200000ULL | ||
743 | #define PCS_STATE_L_C_CODES 0x0000000000100000ULL | ||
744 | #define PCS_STATE_LINK_CFG_STATE 0x000000000001e000ULL | ||
745 | #define PCS_STATE_SEQ_DET_STATE 0x0000000000001800ULL | ||
746 | #define PCS_STATE_WORD_SYNC_STATE 0x0000000000000700ULL | ||
747 | #define PCS_STATE_NO_IDLE 0x000000000000000fULL | ||
748 | |||
749 | #define PCS_INTERRUPT (FZC_MAC + 0x00030UL) | ||
750 | #define PCS_INTERRUPT_LSTATUS 0x0000000000000004ULL | ||
751 | |||
752 | #define PCS_DPATH_MODE (FZC_MAC + 0x000a0UL) | ||
753 | #define PCS_DPATH_MODE_PCS 0x0000000000000000ULL | ||
754 | #define PCS_DPATH_MODE_MII 0x0000000000000002ULL | ||
755 | #define PCS_DPATH_MODE_LINKUP_F_ENAB 0x0000000000000001ULL | ||
756 | |||
757 | #define PCS_PKT_CNT (FZC_MAC + 0x000c0UL) | ||
758 | #define PCS_PKT_CNT_RX 0x0000000007ff0000ULL | ||
759 | #define PCS_PKT_CNT_TX 0x00000000000007ffULL | ||
760 | |||
761 | #define MIF_BB_MDC (FZC_MAC + 0x16000UL) | ||
762 | #define MIF_BB_MDC_CLK 0x0000000000000001ULL | ||
763 | |||
764 | #define MIF_BB_MDO (FZC_MAC + 0x16008UL) | ||
765 | #define MIF_BB_MDO_DAT 0x0000000000000001ULL | ||
766 | |||
767 | #define MIF_BB_MDO_EN (FZC_MAC + 0x16010UL) | ||
768 | #define MIF_BB_MDO_EN_VAL 0x0000000000000001ULL | ||
769 | |||
770 | #define MIF_FRAME_OUTPUT (FZC_MAC + 0x16018UL) | ||
771 | #define MIF_FRAME_OUTPUT_ST 0x00000000c0000000ULL | ||
772 | #define MIF_FRAME_OUTPUT_ST_SHIFT 30 | ||
773 | #define MIF_FRAME_OUTPUT_OP_ADDR 0x0000000000000000ULL | ||
774 | #define MIF_FRAME_OUTPUT_OP_WRITE 0x0000000010000000ULL | ||
775 | #define MIF_FRAME_OUTPUT_OP_READ_INC 0x0000000020000000ULL | ||
776 | #define MIF_FRAME_OUTPUT_OP_READ 0x0000000030000000ULL | ||
777 | #define MIF_FRAME_OUTPUT_OP_SHIFT 28 | ||
778 | #define MIF_FRAME_OUTPUT_PORT 0x000000000f800000ULL | ||
779 | #define MIF_FRAME_OUTPUT_PORT_SHIFT 23 | ||
780 | #define MIF_FRAME_OUTPUT_REG 0x00000000007c0000ULL | ||
781 | #define MIF_FRAME_OUTPUT_REG_SHIFT 18 | ||
782 | #define MIF_FRAME_OUTPUT_TA 0x0000000000030000ULL | ||
783 | #define MIF_FRAME_OUTPUT_TA_SHIFT 16 | ||
784 | #define MIF_FRAME_OUTPUT_DATA 0x000000000000ffffULL | ||
785 | #define MIF_FRAME_OUTPUT_DATA_SHIFT 0 | ||
786 | |||
787 | #define MDIO_ADDR_OP(port, dev, reg) \ | ||
788 | ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ | ||
789 | MIF_FRAME_OUTPUT_OP_ADDR | \ | ||
790 | (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ | ||
791 | (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \ | ||
792 | (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \ | ||
793 | (reg << MIF_FRAME_OUTPUT_DATA_SHIFT)) | ||
794 | |||
795 | #define MDIO_READ_OP(port, dev) \ | ||
796 | ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ | ||
797 | MIF_FRAME_OUTPUT_OP_READ | \ | ||
798 | (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ | ||
799 | (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \ | ||
800 | (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT)) | ||
801 | |||
802 | #define MDIO_WRITE_OP(port, dev, data) \ | ||
803 | ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ | ||
804 | MIF_FRAME_OUTPUT_OP_WRITE | \ | ||
805 | (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ | ||
806 | (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \ | ||
807 | (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \ | ||
808 | (data << MIF_FRAME_OUTPUT_DATA_SHIFT)) | ||
809 | |||
810 | #define MII_READ_OP(port, reg) \ | ||
811 | ((1 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ | ||
812 | (2 << MIF_FRAME_OUTPUT_OP_SHIFT) | \ | ||
813 | (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ | ||
814 | (reg << MIF_FRAME_OUTPUT_REG_SHIFT) | \ | ||
815 | (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT)) | ||
816 | |||
817 | #define MII_WRITE_OP(port, reg, data) \ | ||
818 | ((1 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ | ||
819 | (1 << MIF_FRAME_OUTPUT_OP_SHIFT) | \ | ||
820 | (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ | ||
821 | (reg << MIF_FRAME_OUTPUT_REG_SHIFT) | \ | ||
822 | (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \ | ||
823 | (data << MIF_FRAME_OUTPUT_DATA_SHIFT)) | ||
824 | |||
825 | #define MIF_CONFIG (FZC_MAC + 0x16020UL) | ||
826 | #define MIF_CONFIG_ATCA_GE 0x0000000000010000ULL | ||
827 | #define MIF_CONFIG_INDIRECT_MODE 0x0000000000008000ULL | ||
828 | #define MIF_CONFIG_POLL_PRT_PHYADDR 0x0000000000003c00ULL | ||
829 | #define MIF_CONFIG_POLL_DEV_REG_ADDR 0x00000000000003e0ULL | ||
830 | #define MIF_CONFIG_BB_MODE 0x0000000000000010ULL | ||
831 | #define MIF_CONFIG_POLL_EN 0x0000000000000008ULL | ||
832 | #define MIF_CONFIG_BB_SER_SEL 0x0000000000000006ULL | ||
833 | #define MIF_CONFIG_MANUAL_MODE 0x0000000000000001ULL | ||
834 | |||
835 | #define MIF_POLL_STATUS (FZC_MAC + 0x16028UL) | ||
836 | #define MIF_POLL_STATUS_DATA 0x00000000ffff0000ULL | ||
837 | #define MIF_POLL_STATUS_STAT 0x000000000000ffffULL | ||
838 | |||
839 | #define MIF_POLL_MASK (FZC_MAC + 0x16030UL) | ||
840 | #define MIF_POLL_MASK_VAL 0x000000000000ffffULL | ||
841 | |||
842 | #define MIF_SM (FZC_MAC + 0x16038UL) | ||
843 | #define MIF_SM_PORT_ADDR 0x00000000001f0000ULL | ||
844 | #define MIF_SM_MDI_1 0x0000000000004000ULL | ||
845 | #define MIF_SM_MDI_0 0x0000000000002400ULL | ||
846 | #define MIF_SM_MDCLK 0x0000000000001000ULL | ||
847 | #define MIF_SM_MDO_EN 0x0000000000000800ULL | ||
848 | #define MIF_SM_MDO 0x0000000000000400ULL | ||
849 | #define MIF_SM_MDI 0x0000000000000200ULL | ||
850 | #define MIF_SM_CTL 0x00000000000001c0ULL | ||
851 | #define MIF_SM_EX 0x000000000000003fULL | ||
852 | |||
853 | #define MIF_STATUS (FZC_MAC + 0x16040UL) | ||
854 | #define MIF_STATUS_MDINT1 0x0000000000000020ULL | ||
855 | #define MIF_STATUS_MDINT0 0x0000000000000010ULL | ||
856 | |||
857 | #define MIF_MASK (FZC_MAC + 0x16048UL) | ||
858 | #define MIF_MASK_MDINT1 0x0000000000000020ULL | ||
859 | #define MIF_MASK_MDINT0 0x0000000000000010ULL | ||
860 | #define MIF_MASK_PEU_ERR 0x0000000000000008ULL | ||
861 | #define MIF_MASK_YC 0x0000000000000004ULL | ||
862 | #define MIF_MASK_XGE_ERR0 0x0000000000000002ULL | ||
863 | #define MIF_MASK_MIF_INIT_DONE 0x0000000000000001ULL | ||
864 | |||
865 | #define ENET_SERDES_RESET (FZC_MAC + 0x14000UL) | ||
866 | #define ENET_SERDES_RESET_1 0x0000000000000002ULL | ||
867 | #define ENET_SERDES_RESET_0 0x0000000000000001ULL | ||
868 | |||
869 | #define ENET_SERDES_CFG (FZC_MAC + 0x14008UL) | ||
870 | #define ENET_SERDES_BE_LOOPBACK 0x0000000000000002ULL | ||
871 | #define ENET_SERDES_CFG_FORCE_RDY 0x0000000000000001ULL | ||
872 | |||
873 | #define ENET_SERDES_0_PLL_CFG (FZC_MAC + 0x14010UL) | ||
874 | #define ENET_SERDES_PLL_FBDIV0 0x0000000000000001ULL | ||
875 | #define ENET_SERDES_PLL_FBDIV1 0x0000000000000002ULL | ||
876 | #define ENET_SERDES_PLL_FBDIV2 0x0000000000000004ULL | ||
877 | #define ENET_SERDES_PLL_HRATE0 0x0000000000000008ULL | ||
878 | #define ENET_SERDES_PLL_HRATE1 0x0000000000000010ULL | ||
879 | #define ENET_SERDES_PLL_HRATE2 0x0000000000000020ULL | ||
880 | #define ENET_SERDES_PLL_HRATE3 0x0000000000000040ULL | ||
881 | |||
882 | #define ENET_SERDES_0_CTRL_CFG (FZC_MAC + 0x14018UL) | ||
883 | #define ENET_SERDES_CTRL_SDET_0 0x0000000000000001ULL | ||
884 | #define ENET_SERDES_CTRL_SDET_1 0x0000000000000002ULL | ||
885 | #define ENET_SERDES_CTRL_SDET_2 0x0000000000000004ULL | ||
886 | #define ENET_SERDES_CTRL_SDET_3 0x0000000000000008ULL | ||
887 | #define ENET_SERDES_CTRL_EMPH_0 0x0000000000000070ULL | ||
888 | #define ENET_SERDES_CTRL_EMPH_0_SHIFT 4 | ||
889 | #define ENET_SERDES_CTRL_EMPH_1 0x0000000000000380ULL | ||
890 | #define ENET_SERDES_CTRL_EMPH_1_SHIFT 7 | ||
891 | #define ENET_SERDES_CTRL_EMPH_2 0x0000000000001c00ULL | ||
892 | #define ENET_SERDES_CTRL_EMPH_2_SHIFT 10 | ||
893 | #define ENET_SERDES_CTRL_EMPH_3 0x000000000000e000ULL | ||
894 | #define ENET_SERDES_CTRL_EMPH_3_SHIFT 13 | ||
895 | #define ENET_SERDES_CTRL_LADJ_0 0x0000000000070000ULL | ||
896 | #define ENET_SERDES_CTRL_LADJ_0_SHIFT 16 | ||
897 | #define ENET_SERDES_CTRL_LADJ_1 0x0000000000380000ULL | ||
898 | #define ENET_SERDES_CTRL_LADJ_1_SHIFT 19 | ||
899 | #define ENET_SERDES_CTRL_LADJ_2 0x0000000001c00000ULL | ||
900 | #define ENET_SERDES_CTRL_LADJ_2_SHIFT 22 | ||
901 | #define ENET_SERDES_CTRL_LADJ_3 0x000000000e000000ULL | ||
902 | #define ENET_SERDES_CTRL_LADJ_3_SHIFT 25 | ||
903 | #define ENET_SERDES_CTRL_RXITERM_0 0x0000000010000000ULL | ||
904 | #define ENET_SERDES_CTRL_RXITERM_1 0x0000000020000000ULL | ||
905 | #define ENET_SERDES_CTRL_RXITERM_2 0x0000000040000000ULL | ||
906 | #define ENET_SERDES_CTRL_RXITERM_3 0x0000000080000000ULL | ||
907 | |||
908 | #define ENET_SERDES_0_TEST_CFG (FZC_MAC + 0x14020UL) | ||
909 | #define ENET_SERDES_TEST_MD_0 0x0000000000000003ULL | ||
910 | #define ENET_SERDES_TEST_MD_0_SHIFT 0 | ||
911 | #define ENET_SERDES_TEST_MD_1 0x000000000000000cULL | ||
912 | #define ENET_SERDES_TEST_MD_1_SHIFT 2 | ||
913 | #define ENET_SERDES_TEST_MD_2 0x0000000000000030ULL | ||
914 | #define ENET_SERDES_TEST_MD_2_SHIFT 4 | ||
915 | #define ENET_SERDES_TEST_MD_3 0x00000000000000c0ULL | ||
916 | #define ENET_SERDES_TEST_MD_3_SHIFT 6 | ||
917 | |||
918 | #define ENET_TEST_MD_NO_LOOPBACK 0x0 | ||
919 | #define ENET_TEST_MD_EWRAP 0x1 | ||
920 | #define ENET_TEST_MD_PAD_LOOPBACK 0x2 | ||
921 | #define ENET_TEST_MD_REV_LOOPBACK 0x3 | ||
922 | |||
923 | #define ENET_SERDES_1_PLL_CFG (FZC_MAC + 0x14028UL) | ||
924 | #define ENET_SERDES_1_CTRL_CFG (FZC_MAC + 0x14030UL) | ||
925 | #define ENET_SERDES_1_TEST_CFG (FZC_MAC + 0x14038UL) | ||
926 | |||
927 | #define ENET_RGMII_CFG_REG (FZC_MAC + 0x14040UL) | ||
928 | |||
929 | #define ESR_INT_SIGNALS (FZC_MAC + 0x14800UL) | ||
930 | #define ESR_INT_SIGNALS_ALL 0x00000000ffffffffULL | ||
931 | #define ESR_INT_SIGNALS_P0_BITS 0x0000000033e0000fULL | ||
932 | #define ESR_INT_SIGNALS_P1_BITS 0x000000000c1f00f0ULL | ||
933 | #define ESR_INT_SRDY0_P0 0x0000000020000000ULL | ||
934 | #define ESR_INT_DET0_P0 0x0000000010000000ULL | ||
935 | #define ESR_INT_SRDY0_P1 0x0000000008000000ULL | ||
936 | #define ESR_INT_DET0_P1 0x0000000004000000ULL | ||
937 | #define ESR_INT_XSRDY_P0 0x0000000002000000ULL | ||
938 | #define ESR_INT_XDP_P0_CH3 0x0000000001000000ULL | ||
939 | #define ESR_INT_XDP_P0_CH2 0x0000000000800000ULL | ||
940 | #define ESR_INT_XDP_P0_CH1 0x0000000000400000ULL | ||
941 | #define ESR_INT_XDP_P0_CH0 0x0000000000200000ULL | ||
942 | #define ESR_INT_XSRDY_P1 0x0000000000100000ULL | ||
943 | #define ESR_INT_XDP_P1_CH3 0x0000000000080000ULL | ||
944 | #define ESR_INT_XDP_P1_CH2 0x0000000000040000ULL | ||
945 | #define ESR_INT_XDP_P1_CH1 0x0000000000020000ULL | ||
946 | #define ESR_INT_XDP_P1_CH0 0x0000000000010000ULL | ||
947 | #define ESR_INT_SLOSS_P1_CH3 0x0000000000000080ULL | ||
948 | #define ESR_INT_SLOSS_P1_CH2 0x0000000000000040ULL | ||
949 | #define ESR_INT_SLOSS_P1_CH1 0x0000000000000020ULL | ||
950 | #define ESR_INT_SLOSS_P1_CH0 0x0000000000000010ULL | ||
951 | #define ESR_INT_SLOSS_P0_CH3 0x0000000000000008ULL | ||
952 | #define ESR_INT_SLOSS_P0_CH2 0x0000000000000004ULL | ||
953 | #define ESR_INT_SLOSS_P0_CH1 0x0000000000000002ULL | ||
954 | #define ESR_INT_SLOSS_P0_CH0 0x0000000000000001ULL | ||
955 | |||
956 | #define ESR_DEBUG_SEL (FZC_MAC + 0x14808UL) | ||
957 | #define ESR_DEBUG_SEL_VAL 0x000000000000003fULL | ||
958 | |||
959 | /* SerDes registers behind MIF */ | ||
960 | #define NIU_ESR_DEV_ADDR 0x1e | ||
961 | #define ESR_BASE 0x0000 | ||
962 | |||
963 | #define ESR_RXTX_COMM_CTRL_L (ESR_BASE + 0x0000) | ||
964 | #define ESR_RXTX_COMM_CTRL_H (ESR_BASE + 0x0001) | ||
965 | |||
966 | #define ESR_RXTX_RESET_CTRL_L (ESR_BASE + 0x0002) | ||
967 | #define ESR_RXTX_RESET_CTRL_H (ESR_BASE + 0x0003) | ||
968 | |||
969 | #define ESR_RX_POWER_CTRL_L (ESR_BASE + 0x0004) | ||
970 | #define ESR_RX_POWER_CTRL_H (ESR_BASE + 0x0005) | ||
971 | |||
972 | #define ESR_TX_POWER_CTRL_L (ESR_BASE + 0x0006) | ||
973 | #define ESR_TX_POWER_CTRL_H (ESR_BASE + 0x0007) | ||
974 | |||
975 | #define ESR_MISC_POWER_CTRL_L (ESR_BASE + 0x0008) | ||
976 | #define ESR_MISC_POWER_CTRL_H (ESR_BASE + 0x0009) | ||
977 | |||
978 | #define ESR_RXTX_CTRL_L(CHAN) (ESR_BASE + 0x0080 + (CHAN) * 0x10) | ||
979 | #define ESR_RXTX_CTRL_H(CHAN) (ESR_BASE + 0x0081 + (CHAN) * 0x10) | ||
980 | #define ESR_RXTX_CTRL_BIASCNTL 0x80000000 | ||
981 | #define ESR_RXTX_CTRL_RESV1 0x7c000000 | ||
982 | #define ESR_RXTX_CTRL_TDENFIFO 0x02000000 | ||
983 | #define ESR_RXTX_CTRL_TDWS20 0x01000000 | ||
984 | #define ESR_RXTX_CTRL_VMUXLO 0x00c00000 | ||
985 | #define ESR_RXTX_CTRL_VMUXLO_SHIFT 22 | ||
986 | #define ESR_RXTX_CTRL_VPULSELO 0x00300000 | ||
987 | #define ESR_RXTX_CTRL_VPULSELO_SHIFT 20 | ||
988 | #define ESR_RXTX_CTRL_RESV2 0x000f0000 | ||
989 | #define ESR_RXTX_CTRL_RESV3 0x0000c000 | ||
990 | #define ESR_RXTX_CTRL_RXPRESWIN 0x00003000 | ||
991 | #define ESR_RXTX_CTRL_RXPRESWIN_SHIFT 12 | ||
992 | #define ESR_RXTX_CTRL_RESV4 0x00000800 | ||
993 | #define ESR_RXTX_CTRL_RISEFALL 0x00000700 | ||
994 | #define ESR_RXTX_CTRL_RISEFALL_SHIFT 8 | ||
995 | #define ESR_RXTX_CTRL_RESV5 0x000000fe | ||
996 | #define ESR_RXTX_CTRL_ENSTRETCH 0x00000001 | ||
997 | |||
998 | #define ESR_RXTX_TUNING_L(CHAN) (ESR_BASE + 0x0082 + (CHAN) * 0x10) | ||
999 | #define ESR_RXTX_TUNING_H(CHAN) (ESR_BASE + 0x0083 + (CHAN) * 0x10) | ||
1000 | |||
1001 | #define ESR_RX_SYNCCHAR_L(CHAN) (ESR_BASE + 0x0084 + (CHAN) * 0x10) | ||
1002 | #define ESR_RX_SYNCCHAR_H(CHAN) (ESR_BASE + 0x0085 + (CHAN) * 0x10) | ||
1003 | |||
1004 | #define ESR_RXTX_TEST_L(CHAN) (ESR_BASE + 0x0086 + (CHAN) * 0x10) | ||
1005 | #define ESR_RXTX_TEST_H(CHAN) (ESR_BASE + 0x0087 + (CHAN) * 0x10) | ||
1006 | |||
1007 | #define ESR_GLUE_CTRL0_L(CHAN) (ESR_BASE + 0x0088 + (CHAN) * 0x10) | ||
1008 | #define ESR_GLUE_CTRL0_H(CHAN) (ESR_BASE + 0x0089 + (CHAN) * 0x10) | ||
1009 | #define ESR_GLUE_CTRL0_RESV1 0xf8000000 | ||
1010 | #define ESR_GLUE_CTRL0_BLTIME 0x07000000 | ||
1011 | #define ESR_GLUE_CTRL0_BLTIME_SHIFT 24 | ||
1012 | #define ESR_GLUE_CTRL0_RESV2 0x00ff0000 | ||
1013 | #define ESR_GLUE_CTRL0_RXLOS_TEST 0x00008000 | ||
1014 | #define ESR_GLUE_CTRL0_RESV3 0x00004000 | ||
1015 | #define ESR_GLUE_CTRL0_RXLOSENAB 0x00002000 | ||
1016 | #define ESR_GLUE_CTRL0_FASTRESYNC 0x00001000 | ||
1017 | #define ESR_GLUE_CTRL0_SRATE 0x00000f00 | ||
1018 | #define ESR_GLUE_CTRL0_SRATE_SHIFT 8 | ||
1019 | #define ESR_GLUE_CTRL0_THCNT 0x000000ff | ||
1020 | #define ESR_GLUE_CTRL0_THCNT_SHIFT 0 | ||
1021 | |||
1022 | #define BLTIME_64_CYCLES 0 | ||
1023 | #define BLTIME_128_CYCLES 1 | ||
1024 | #define BLTIME_256_CYCLES 2 | ||
1025 | #define BLTIME_300_CYCLES 3 | ||
1026 | #define BLTIME_384_CYCLES 4 | ||
1027 | #define BLTIME_512_CYCLES 5 | ||
1028 | #define BLTIME_1024_CYCLES 6 | ||
1029 | #define BLTIME_2048_CYCLES 7 | ||
1030 | |||
1031 | #define ESR_GLUE_CTRL1_L(CHAN) (ESR_BASE + 0x008a + (CHAN) * 0x10) | ||
1032 | #define ESR_GLUE_CTRL1_H(CHAN) (ESR_BASE + 0x008b + (CHAN) * 0x10) | ||
1033 | #define ESR_RXTX_TUNING1_L(CHAN) (ESR_BASE + 0x00c2 + (CHAN) * 0x10) | ||
1034 | #define ESR_RXTX_TUNING1_H(CHAN) (ESR_BASE + 0x00c2 + (CHAN) * 0x10) | ||
1035 | #define ESR_RXTX_TUNING2_L(CHAN) (ESR_BASE + 0x0102 + (CHAN) * 0x10) | ||
1036 | #define ESR_RXTX_TUNING2_H(CHAN) (ESR_BASE + 0x0102 + (CHAN) * 0x10) | ||
1037 | #define ESR_RXTX_TUNING3_L(CHAN) (ESR_BASE + 0x0142 + (CHAN) * 0x10) | ||
1038 | #define ESR_RXTX_TUNING3_H(CHAN) (ESR_BASE + 0x0142 + (CHAN) * 0x10) | ||
1039 | |||
1040 | #define NIU_ESR2_DEV_ADDR 0x1e | ||
1041 | #define ESR2_BASE 0x8000 | ||
1042 | |||
1043 | #define ESR2_TI_PLL_CFG_L (ESR2_BASE + 0x000) | ||
1044 | #define ESR2_TI_PLL_CFG_H (ESR2_BASE + 0x001) | ||
1045 | #define PLL_CFG_STD 0x00000c00 | ||
1046 | #define PLL_CFG_STD_SHIFT 10 | ||
1047 | #define PLL_CFG_LD 0x00000300 | ||
1048 | #define PLL_CFG_LD_SHIFT 8 | ||
1049 | #define PLL_CFG_MPY 0x0000001e | ||
1050 | #define PLL_CFG_MPY_SHIFT 1 | ||
1051 | #define PLL_CFG_MPY_4X 0x0 | ||
1052 | #define PLL_CFG_MPY_5X 0x00000002 | ||
1053 | #define PLL_CFG_MPY_6X 0x00000004 | ||
1054 | #define PLL_CFG_MPY_8X 0x00000008 | ||
1055 | #define PLL_CFG_MPY_10X 0x0000000a | ||
1056 | #define PLL_CFG_MPY_12X 0x0000000c | ||
1057 | #define PLL_CFG_MPY_12P5X 0x0000000e | ||
1058 | #define PLL_CFG_ENPLL 0x00000001 | ||
1059 | |||
1060 | #define ESR2_TI_PLL_STS_L (ESR2_BASE + 0x002) | ||
1061 | #define ESR2_TI_PLL_STS_H (ESR2_BASE + 0x003) | ||
1062 | #define PLL_STS_LOCK 0x00000001 | ||
1063 | |||
1064 | #define ESR2_TI_PLL_TEST_CFG_L (ESR2_BASE + 0x004) | ||
1065 | #define ESR2_TI_PLL_TEST_CFG_H (ESR2_BASE + 0x005) | ||
1066 | #define PLL_TEST_INVPATT 0x00004000 | ||
1067 | #define PLL_TEST_RATE 0x00003000 | ||
1068 | #define PLL_TEST_RATE_SHIFT 12 | ||
1069 | #define PLL_TEST_CFG_ENBSAC 0x00000400 | ||
1070 | #define PLL_TEST_CFG_ENBSRX 0x00000200 | ||
1071 | #define PLL_TEST_CFG_ENBSTX 0x00000100 | ||
1072 | #define PLL_TEST_CFG_LOOPBACK_PAD 0x00000040 | ||
1073 | #define PLL_TEST_CFG_LOOPBACK_CML_DIS 0x00000080 | ||
1074 | #define PLL_TEST_CFG_LOOPBACK_CML_EN 0x000000c0 | ||
1075 | #define PLL_TEST_CFG_CLKBYP 0x00000030 | ||
1076 | #define PLL_TEST_CFG_CLKBYP_SHIFT 4 | ||
1077 | #define PLL_TEST_CFG_EN_RXPATT 0x00000008 | ||
1078 | #define PLL_TEST_CFG_EN_TXPATT 0x00000004 | ||
1079 | #define PLL_TEST_CFG_TPATT 0x00000003 | ||
1080 | #define PLL_TEST_CFG_TPATT_SHIFT 0 | ||
1081 | |||
1082 | #define ESR2_TI_PLL_TX_CFG_L(CHAN) (ESR2_BASE + 0x100 + (CHAN) * 4) | ||
1083 | #define ESR2_TI_PLL_TX_CFG_H(CHAN) (ESR2_BASE + 0x101 + (CHAN) * 4) | ||
1084 | #define PLL_TX_CFG_RDTCT 0x00600000 | ||
1085 | #define PLL_TX_CFG_RDTCT_SHIFT 21 | ||
1086 | #define PLL_TX_CFG_ENIDL 0x00100000 | ||
1087 | #define PLL_TX_CFG_BSTX 0x00020000 | ||
1088 | #define PLL_TX_CFG_ENFTP 0x00010000 | ||
1089 | #define PLL_TX_CFG_DE 0x0000f000 | ||
1090 | #define PLL_TX_CFG_DE_SHIFT 12 | ||
1091 | #define PLL_TX_CFG_SWING_125MV 0x00000000 | ||
1092 | #define PLL_TX_CFG_SWING_250MV 0x00000200 | ||
1093 | #define PLL_TX_CFG_SWING_500MV 0x00000400 | ||
1094 | #define PLL_TX_CFG_SWING_625MV 0x00000600 | ||
1095 | #define PLL_TX_CFG_SWING_750MV 0x00000800 | ||
1096 | #define PLL_TX_CFG_SWING_1000MV 0x00000a00 | ||
1097 | #define PLL_TX_CFG_SWING_1250MV 0x00000c00 | ||
1098 | #define PLL_TX_CFG_SWING_1375MV 0x00000e00 | ||
1099 | #define PLL_TX_CFG_CM 0x00000100 | ||
1100 | #define PLL_TX_CFG_INVPAIR 0x00000080 | ||
1101 | #define PLL_TX_CFG_RATE 0x00000060 | ||
1102 | #define PLL_TX_CFG_RATE_SHIFT 5 | ||
1103 | #define PLL_TX_CFG_RATE_FULL 0x0 | ||
1104 | #define PLL_TX_CFG_RATE_HALF 0x20 | ||
1105 | #define PLL_TX_CFG_RATE_QUAD 0x40 | ||
1106 | #define PLL_TX_CFG_BUSWIDTH 0x0000001c | ||
1107 | #define PLL_TX_CFG_BUSWIDTH_SHIFT 2 | ||
1108 | #define PLL_TX_CFG_ENTEST 0x00000002 | ||
1109 | #define PLL_TX_CFG_ENTX 0x00000001 | ||
1110 | |||
1111 | #define ESR2_TI_PLL_TX_STS_L(CHAN) (ESR2_BASE + 0x102 + (CHAN) * 4) | ||
1112 | #define ESR2_TI_PLL_TX_STS_H(CHAN) (ESR2_BASE + 0x103 + (CHAN) * 4) | ||
1113 | #define PLL_TX_STS_RDTCTIP 0x00000002 | ||
1114 | #define PLL_TX_STS_TESTFAIL 0x00000001 | ||
1115 | |||
1116 | #define ESR2_TI_PLL_RX_CFG_L(CHAN) (ESR2_BASE + 0x120 + (CHAN) * 4) | ||
1117 | #define ESR2_TI_PLL_RX_CFG_H(CHAN) (ESR2_BASE + 0x121 + (CHAN) * 4) | ||
1118 | #define PLL_RX_CFG_BSINRXN 0x02000000 | ||
1119 | #define PLL_RX_CFG_BSINRXP 0x01000000 | ||
1120 | #define PLL_RX_CFG_EQ_MAX_LF 0x00000000 | ||
1121 | #define PLL_RX_CFG_EQ_LP_ADAPTIVE 0x00080000 | ||
1122 | #define PLL_RX_CFG_EQ_LP_1084MHZ 0x00400000 | ||
1123 | #define PLL_RX_CFG_EQ_LP_805MHZ 0x00480000 | ||
1124 | #define PLL_RX_CFG_EQ_LP_573MHZ 0x00500000 | ||
1125 | #define PLL_RX_CFG_EQ_LP_402MHZ 0x00580000 | ||
1126 | #define PLL_RX_CFG_EQ_LP_304MHZ 0x00600000 | ||
1127 | #define PLL_RX_CFG_EQ_LP_216MHZ 0x00680000 | ||
1128 | #define PLL_RX_CFG_EQ_LP_156MHZ 0x00700000 | ||
1129 | #define PLL_RX_CFG_EQ_LP_135MHZ 0x00780000 | ||
1130 | #define PLL_RX_CFG_EQ_SHIFT 19 | ||
1131 | #define PLL_RX_CFG_CDR 0x00070000 | ||
1132 | #define PLL_RX_CFG_CDR_SHIFT 16 | ||
1133 | #define PLL_RX_CFG_LOS_DIS 0x00000000 | ||
1134 | #define PLL_RX_CFG_LOS_HTHRESH 0x00004000 | ||
1135 | #define PLL_RX_CFG_LOS_LTHRESH 0x00008000 | ||
1136 | #define PLL_RX_CFG_ALIGN_DIS 0x00000000 | ||
1137 | #define PLL_RX_CFG_ALIGN_ENA 0x00001000 | ||
1138 | #define PLL_RX_CFG_ALIGN_JOG 0x00002000 | ||
1139 | #define PLL_RX_CFG_TERM_VDDT 0x00000000 | ||
1140 | #define PLL_RX_CFG_TERM_0P8VDDT 0x00000100 | ||
1141 | #define PLL_RX_CFG_TERM_FLOAT 0x00000300 | ||
1142 | #define PLL_RX_CFG_INVPAIR 0x00000080 | ||
1143 | #define PLL_RX_CFG_RATE 0x00000060 | ||
1144 | #define PLL_RX_CFG_RATE_SHIFT 5 | ||
1145 | #define PLL_RX_CFG_RATE_FULL 0x0 | ||
1146 | #define PLL_RX_CFG_RATE_HALF 0x20 | ||
1147 | #define PLL_RX_CFG_RATE_QUAD 0x40 | ||
1148 | #define PLL_RX_CFG_BUSWIDTH 0x0000001c | ||
1149 | #define PLL_RX_CFG_BUSWIDTH_SHIFT 2 | ||
1150 | #define PLL_RX_CFG_ENTEST 0x00000002 | ||
1151 | #define PLL_RX_CFG_ENRX 0x00000001 | ||
1152 | |||
1153 | #define ESR2_TI_PLL_RX_STS_L(CHAN) (ESR2_BASE + 0x122 + (CHAN) * 4) | ||
1154 | #define ESR2_TI_PLL_RX_STS_H(CHAN) (ESR2_BASE + 0x123 + (CHAN) * 4) | ||
1155 | #define PLL_RX_STS_CRCIDTCT 0x00000200 | ||
1156 | #define PLL_RX_STS_CWDTCT 0x00000100 | ||
1157 | #define PLL_RX_STS_BSRXN 0x00000020 | ||
1158 | #define PLL_RX_STS_BSRXP 0x00000010 | ||
1159 | #define PLL_RX_STS_LOSDTCT 0x00000008 | ||
1160 | #define PLL_RX_STS_ODDCG 0x00000004 | ||
1161 | #define PLL_RX_STS_SYNC 0x00000002 | ||
1162 | #define PLL_RX_STS_TESTFAIL 0x00000001 | ||
1163 | |||
1164 | #define ENET_VLAN_TBL(IDX) (FZC_FFLP + 0x00000UL + (IDX) * 8UL) | ||
1165 | #define ENET_VLAN_TBL_PARITY1 0x0000000000020000ULL | ||
1166 | #define ENET_VLAN_TBL_PARITY0 0x0000000000010000ULL | ||
1167 | #define ENET_VLAN_TBL_VPR 0x0000000000000008ULL | ||
1168 | #define ENET_VLAN_TBL_VLANRDCTBLN 0x0000000000000007ULL | ||
1169 | #define ENET_VLAN_TBL_SHIFT(PORT) ((PORT) * 4) | ||
1170 | |||
1171 | #define ENET_VLAN_TBL_NUM_ENTRIES 4096 | ||
1172 | |||
1173 | #define FFLP_VLAN_PAR_ERR (FZC_FFLP + 0x0800UL) | ||
1174 | #define FFLP_VLAN_PAR_ERR_ERR 0x0000000080000000ULL | ||
1175 | #define FFLP_VLAN_PAR_ERR_M_ERR 0x0000000040000000ULL | ||
1176 | #define FFLP_VLAN_PAR_ERR_ADDR 0x000000003ffc0000ULL | ||
1177 | #define FFLP_VLAN_PAR_ERR_DATA 0x000000000003ffffULL | ||
1178 | |||
1179 | #define L2_CLS(IDX) (FZC_FFLP + 0x20000UL + (IDX) * 8UL) | ||
1180 | #define L2_CLS_VLD 0x0000000000010000ULL | ||
1181 | #define L2_CLS_ETYPE 0x000000000000ffffULL | ||
1182 | #define L2_CLS_ETYPE_SHIFT 0 | ||
1183 | |||
1184 | #define L3_CLS(IDX) (FZC_FFLP + 0x20010UL + (IDX) * 8UL) | ||
1185 | #define L3_CLS_VALID 0x0000000002000000ULL | ||
1186 | #define L3_CLS_IPVER 0x0000000001000000ULL | ||
1187 | #define L3_CLS_PID 0x0000000000ff0000ULL | ||
1188 | #define L3_CLS_PID_SHIFT 16 | ||
1189 | #define L3_CLS_TOSMASK 0x000000000000ff00ULL | ||
1190 | #define L3_CLS_TOSMASK_SHIFT 8 | ||
1191 | #define L3_CLS_TOS 0x00000000000000ffULL | ||
1192 | #define L3_CLS_TOS_SHIFT 0 | ||
1193 | |||
1194 | #define TCAM_KEY(IDX) (FZC_FFLP + 0x20030UL + (IDX) * 8UL) | ||
1195 | #define TCAM_KEY_DISC 0x0000000000000008ULL | ||
1196 | #define TCAM_KEY_TSEL 0x0000000000000004ULL | ||
1197 | #define TCAM_KEY_IPADDR 0x0000000000000001ULL | ||
1198 | |||
1199 | #define TCAM_KEY_0 (FZC_FFLP + 0x20090UL) | ||
1200 | #define TCAM_KEY_0_KEY 0x00000000000000ffULL /* bits 192-199 */ | ||
1201 | |||
1202 | #define TCAM_KEY_1 (FZC_FFLP + 0x20098UL) | ||
1203 | #define TCAM_KEY_1_KEY 0xffffffffffffffffULL /* bits 128-191 */ | ||
1204 | |||
1205 | #define TCAM_KEY_2 (FZC_FFLP + 0x200a0UL) | ||
1206 | #define TCAM_KEY_2_KEY 0xffffffffffffffffULL /* bits 64-127 */ | ||
1207 | |||
1208 | #define TCAM_KEY_3 (FZC_FFLP + 0x200a8UL) | ||
1209 | #define TCAM_KEY_3_KEY 0xffffffffffffffffULL /* bits 0-63 */ | ||
1210 | |||
1211 | #define TCAM_KEY_MASK_0 (FZC_FFLP + 0x200b0UL) | ||
1212 | #define TCAM_KEY_MASK_0_KEY_SEL 0x00000000000000ffULL /* bits 192-199 */ | ||
1213 | |||
1214 | #define TCAM_KEY_MASK_1 (FZC_FFLP + 0x200b8UL) | ||
1215 | #define TCAM_KEY_MASK_1_KEY_SEL 0xffffffffffffffffULL /* bits 128-191 */ | ||
1216 | |||
1217 | #define TCAM_KEY_MASK_2 (FZC_FFLP + 0x200c0UL) | ||
1218 | #define TCAM_KEY_MASK_2_KEY_SEL 0xffffffffffffffffULL /* bits 64-127 */ | ||
1219 | |||
1220 | #define TCAM_KEY_MASK_3 (FZC_FFLP + 0x200c8UL) | ||
1221 | #define TCAM_KEY_MASK_3_KEY_SEL 0xffffffffffffffffULL /* bits 0-63 */ | ||
1222 | |||
1223 | #define TCAM_CTL (FZC_FFLP + 0x200d0UL) | ||
1224 | #define TCAM_CTL_RWC 0x00000000001c0000ULL | ||
1225 | #define TCAM_CTL_RWC_TCAM_WRITE 0x0000000000000000ULL | ||
1226 | #define TCAM_CTL_RWC_TCAM_READ 0x0000000000040000ULL | ||
1227 | #define TCAM_CTL_RWC_TCAM_COMPARE 0x0000000000080000ULL | ||
1228 | #define TCAM_CTL_RWC_RAM_WRITE 0x0000000000100000ULL | ||
1229 | #define TCAM_CTL_RWC_RAM_READ 0x0000000000140000ULL | ||
1230 | #define TCAM_CTL_STAT 0x0000000000020000ULL | ||
1231 | #define TCAM_CTL_MATCH 0x0000000000010000ULL | ||
1232 | #define TCAM_CTL_LOC 0x00000000000003ffULL | ||
1233 | |||
1234 | #define TCAM_ERR (FZC_FFLP + 0x200d8UL) | ||
1235 | #define TCAM_ERR_ERR 0x0000000080000000ULL | ||
1236 | #define TCAM_ERR_P_ECC 0x0000000040000000ULL | ||
1237 | #define TCAM_ERR_MULT 0x0000000020000000ULL | ||
1238 | #define TCAM_ERR_ADDR 0x0000000000ff0000ULL | ||
1239 | #define TCAM_ERR_SYNDROME 0x000000000000ffffULL | ||
1240 | |||
1241 | #define HASH_LOOKUP_ERR_LOG1 (FZC_FFLP + 0x200e0UL) | ||
1242 | #define HASH_LOOKUP_ERR_LOG1_ERR 0x0000000000000008ULL | ||
1243 | #define HASH_LOOKUP_ERR_LOG1_MULT_LK 0x0000000000000004ULL | ||
1244 | #define HASH_LOOKUP_ERR_LOG1_CU 0x0000000000000002ULL | ||
1245 | #define HASH_LOOKUP_ERR_LOG1_MULT_BIT 0x0000000000000001ULL | ||
1246 | |||
1247 | #define HASH_LOOKUP_ERR_LOG2 (FZC_FFLP + 0x200e8UL) | ||
1248 | #define HASH_LOOKUP_ERR_LOG2_H1 0x000000007ffff800ULL | ||
1249 | #define HASH_LOOKUP_ERR_LOG2_SUBAREA 0x0000000000000700ULL | ||
1250 | #define HASH_LOOKUP_ERR_LOG2_SYNDROME 0x00000000000000ffULL | ||
1251 | |||
1252 | #define FFLP_CFG_1 (FZC_FFLP + 0x20100UL) | ||
1253 | #define FFLP_CFG_1_TCAM_DIS 0x0000000004000000ULL | ||
1254 | #define FFLP_CFG_1_PIO_DBG_SEL 0x0000000003800000ULL | ||
1255 | #define FFLP_CFG_1_PIO_FIO_RST 0x0000000000400000ULL | ||
1256 | #define FFLP_CFG_1_PIO_FIO_LAT 0x0000000000300000ULL | ||
1257 | #define FFLP_CFG_1_CAMLAT 0x00000000000f0000ULL | ||
1258 | #define FFLP_CFG_1_CAMLAT_SHIFT 16 | ||
1259 | #define FFLP_CFG_1_CAMRATIO 0x000000000000f000ULL | ||
1260 | #define FFLP_CFG_1_CAMRATIO_SHIFT 12 | ||
1261 | #define FFLP_CFG_1_FCRAMRATIO 0x0000000000000f00ULL | ||
1262 | #define FFLP_CFG_1_FCRAMRATIO_SHIFT 8 | ||
1263 | #define FFLP_CFG_1_FCRAMOUTDR_MASK 0x00000000000000f0ULL | ||
1264 | #define FFLP_CFG_1_FCRAMOUTDR_NORMAL 0x0000000000000000ULL | ||
1265 | #define FFLP_CFG_1_FCRAMOUTDR_STRONG 0x0000000000000050ULL | ||
1266 | #define FFLP_CFG_1_FCRAMOUTDR_WEAK 0x00000000000000a0ULL | ||
1267 | #define FFLP_CFG_1_FCRAMQS 0x0000000000000008ULL | ||
1268 | #define FFLP_CFG_1_ERRORDIS 0x0000000000000004ULL | ||
1269 | #define FFLP_CFG_1_FFLPINITDONE 0x0000000000000002ULL | ||
1270 | #define FFLP_CFG_1_LLCSNAP 0x0000000000000001ULL | ||
1271 | |||
1272 | #define DEFAULT_FCRAMRATIO 10 | ||
1273 | |||
1274 | #define DEFAULT_TCAM_LATENCY 4 | ||
1275 | #define DEFAULT_TCAM_ACCESS_RATIO 10 | ||
1276 | |||
1277 | #define TCP_CFLAG_MSK (FZC_FFLP + 0x20108UL) | ||
1278 | #define TCP_CFLAG_MSK_MASK 0x0000000000000fffULL | ||
1279 | |||
1280 | #define FCRAM_REF_TMR (FZC_FFLP + 0x20110UL) | ||
1281 | #define FCRAM_REF_TMR_MAX 0x00000000ffff0000ULL | ||
1282 | #define FCRAM_REF_TMR_MAX_SHIFT 16 | ||
1283 | #define FCRAM_REF_TMR_MIN 0x000000000000ffffULL | ||
1284 | #define FCRAM_REF_TMR_MIN_SHIFT 0 | ||
1285 | |||
1286 | #define DEFAULT_FCRAM_REFRESH_MAX 512 | ||
1287 | #define DEFAULT_FCRAM_REFRESH_MIN 512 | ||
1288 | |||
1289 | #define FCRAM_FIO_ADDR (FZC_FFLP + 0x20118UL) | ||
1290 | #define FCRAM_FIO_ADDR_ADDR 0x00000000000000ffULL | ||
1291 | |||
1292 | #define FCRAM_FIO_DAT (FZC_FFLP + 0x20120UL) | ||
1293 | #define FCRAM_FIO_DAT_DATA 0x000000000000ffffULL | ||
1294 | |||
1295 | #define FCRAM_ERR_TST0 (FZC_FFLP + 0x20128UL) | ||
1296 | #define FCRAM_ERR_TST0_SYND 0x00000000000000ffULL | ||
1297 | |||
1298 | #define FCRAM_ERR_TST1 (FZC_FFLP + 0x20130UL) | ||
1299 | #define FCRAM_ERR_TST1_DAT 0x00000000ffffffffULL | ||
1300 | |||
1301 | #define FCRAM_ERR_TST2 (FZC_FFLP + 0x20138UL) | ||
1302 | #define FCRAM_ERR_TST2_DAT 0x00000000ffffffffULL | ||
1303 | |||
1304 | #define FFLP_ERR_MASK (FZC_FFLP + 0x20140UL) | ||
1305 | #define FFLP_ERR_MASK_HSH_TBL_DAT 0x00000000000007f8ULL | ||
1306 | #define FFLP_ERR_MASK_HSH_TBL_LKUP 0x0000000000000004ULL | ||
1307 | #define FFLP_ERR_MASK_TCAM 0x0000000000000002ULL | ||
1308 | #define FFLP_ERR_MASK_VLAN 0x0000000000000001ULL | ||
1309 | |||
1310 | #define FFLP_DBG_TRAIN_VCT (FZC_FFLP + 0x20148UL) | ||
1311 | #define FFLP_DBG_TRAIN_VCT_VECTOR 0x00000000ffffffffULL | ||
1312 | |||
1313 | #define FCRAM_PHY_RD_LAT (FZC_FFLP + 0x20150UL) | ||
1314 | #define FCRAM_PHY_RD_LAT_LAT 0x00000000000000ffULL | ||
1315 | |||
1316 | /* Ethernet TCAM format */ | ||
1317 | #define TCAM_ETHKEY0_RESV1 0xffffffffffffff00ULL | ||
1318 | #define TCAM_ETHKEY0_CLASS_CODE 0x00000000000000f8ULL | ||
1319 | #define TCAM_ETHKEY0_CLASS_CODE_SHIFT 3 | ||
1320 | #define TCAM_ETHKEY0_RESV2 0x0000000000000007ULL | ||
1321 | #define TCAM_ETHKEY1_FRAME_BYTE0_7(NUM) (0xff << ((7 - NUM) * 8)) | ||
1322 | #define TCAM_ETHKEY2_FRAME_BYTE8 0xff00000000000000ULL | ||
1323 | #define TCAM_ETHKEY2_FRAME_BYTE8_SHIFT 56 | ||
1324 | #define TCAM_ETHKEY2_FRAME_BYTE9 0x00ff000000000000ULL | ||
1325 | #define TCAM_ETHKEY2_FRAME_BYTE9_SHIFT 48 | ||
1326 | #define TCAM_ETHKEY2_FRAME_BYTE10 0x0000ff0000000000ULL | ||
1327 | #define TCAM_ETHKEY2_FRAME_BYTE10_SHIFT 40 | ||
1328 | #define TCAM_ETHKEY2_FRAME_RESV 0x000000ffffffffffULL | ||
1329 | #define TCAM_ETHKEY3_FRAME_RESV 0xffffffffffffffffULL | ||
1330 | |||
1331 | /* IPV4 TCAM format */ | ||
1332 | #define TCAM_V4KEY0_RESV1 0xffffffffffffff00ULL | ||
1333 | #define TCAM_V4KEY0_CLASS_CODE 0x00000000000000f8ULL | ||
1334 | #define TCAM_V4KEY0_CLASS_CODE_SHIFT 3 | ||
1335 | #define TCAM_V4KEY0_RESV2 0x0000000000000007ULL | ||
1336 | #define TCAM_V4KEY1_L2RDCNUM 0xf800000000000000ULL | ||
1337 | #define TCAM_V4KEY1_L2RDCNUM_SHIFT 59 | ||
1338 | #define TCAM_V4KEY1_NOPORT 0x0400000000000000ULL | ||
1339 | #define TCAM_V4KEY1_RESV 0x03ffffffffffffffULL | ||
1340 | #define TCAM_V4KEY2_RESV 0xffff000000000000ULL | ||
1341 | #define TCAM_V4KEY2_TOS 0x0000ff0000000000ULL | ||
1342 | #define TCAM_V4KEY2_TOS_SHIFT 40 | ||
1343 | #define TCAM_V4KEY2_PROTO 0x000000ff00000000ULL | ||
1344 | #define TCAM_V4KEY2_PROTO_SHIFT 32 | ||
1345 | #define TCAM_V4KEY2_PORT_SPI 0x00000000ffffffffULL | ||
1346 | #define TCAM_V4KEY2_PORT_SPI_SHIFT 0 | ||
1347 | #define TCAM_V4KEY3_SADDR 0xffffffff00000000ULL | ||
1348 | #define TCAM_V4KEY3_SADDR_SHIFT 32 | ||
1349 | #define TCAM_V4KEY3_DADDR 0x00000000ffffffffULL | ||
1350 | #define TCAM_V4KEY3_DADDR_SHIFT 0 | ||
1351 | |||
1352 | /* IPV6 TCAM format */ | ||
1353 | #define TCAM_V6KEY0_RESV1 0xffffffffffffff00ULL | ||
1354 | #define TCAM_V6KEY0_CLASS_CODE 0x00000000000000f8ULL | ||
1355 | #define TCAM_V6KEY0_CLASS_CODE_SHIFT 3 | ||
1356 | #define TCAM_V6KEY0_RESV2 0x0000000000000007ULL | ||
1357 | #define TCAM_V6KEY1_L2RDCNUM 0xf800000000000000ULL | ||
1358 | #define TCAM_V6KEY1_L2RDCNUM_SHIFT 59 | ||
1359 | #define TCAM_V6KEY1_NOPORT 0x0400000000000000ULL | ||
1360 | #define TCAM_V6KEY1_RESV 0x03ff000000000000ULL | ||
1361 | #define TCAM_V6KEY1_TOS 0x0000ff0000000000ULL | ||
1362 | #define TCAM_V6KEY1_TOS_SHIFT 40 | ||
1363 | #define TCAM_V6KEY1_NEXT_HDR 0x000000ff00000000ULL | ||
1364 | #define TCAM_V6KEY1_NEXT_HDR_SHIFT 32 | ||
1365 | #define TCAM_V6KEY1_PORT_SPI 0x00000000ffffffffULL | ||
1366 | #define TCAM_V6KEY1_PORT_SPI_SHIFT 0 | ||
1367 | #define TCAM_V6KEY2_ADDR_HIGH 0xffffffffffffffffULL | ||
1368 | #define TCAM_V6KEY3_ADDR_LOW 0xffffffffffffffffULL | ||
1369 | |||
1370 | #define TCAM_ASSOCDATA_SYNDROME 0x000003fffc000000ULL | ||
1371 | #define TCAM_ASSOCDATA_SYNDROME_SHIFT 26 | ||
1372 | #define TCAM_ASSOCDATA_ZFID 0x0000000003ffc000ULL | ||
1373 | #define TCAM_ASSOCDATA_ZFID_SHIFT 14 | ||
1374 | #define TCAM_ASSOCDATA_V4_ECC_OK 0x0000000000002000ULL | ||
1375 | #define TCAM_ASSOCDATA_DISC 0x0000000000001000ULL | ||
1376 | #define TCAM_ASSOCDATA_TRES_MASK 0x0000000000000c00ULL | ||
1377 | #define TCAM_ASSOCDATA_TRES_USE_L2RDC 0x0000000000000000ULL | ||
1378 | #define TCAM_ASSOCDATA_TRES_USE_OFFSET 0x0000000000000400ULL | ||
1379 | #define TCAM_ASSOCDATA_TRES_OVR_RDC 0x0000000000000800ULL | ||
1380 | #define TCAM_ASSOCDATA_TRES_OVR_RDC_OFF 0x0000000000000c00ULL | ||
1381 | #define TCAM_ASSOCDATA_RDCTBL 0x0000000000000380ULL | ||
1382 | #define TCAM_ASSOCDATA_RDCTBL_SHIFT 7 | ||
1383 | #define TCAM_ASSOCDATA_OFFSET 0x000000000000007cULL | ||
1384 | #define TCAM_ASSOCDATA_OFFSET_SHIFT 2 | ||
1385 | #define TCAM_ASSOCDATA_ZFVLD 0x0000000000000002ULL | ||
1386 | #define TCAM_ASSOCDATA_AGE 0x0000000000000001ULL | ||
1387 | |||
1388 | #define FLOW_KEY(IDX) (FZC_FFLP + 0x40000UL + (IDX) * 8UL) | ||
1389 | #define FLOW_KEY_PORT 0x0000000000000200ULL | ||
1390 | #define FLOW_KEY_L2DA 0x0000000000000100ULL | ||
1391 | #define FLOW_KEY_VLAN 0x0000000000000080ULL | ||
1392 | #define FLOW_KEY_IPSA 0x0000000000000040ULL | ||
1393 | #define FLOW_KEY_IPDA 0x0000000000000020ULL | ||
1394 | #define FLOW_KEY_PROTO 0x0000000000000010ULL | ||
1395 | #define FLOW_KEY_L4_0 0x000000000000000cULL | ||
1396 | #define FLOW_KEY_L4_0_SHIFT 2 | ||
1397 | #define FLOW_KEY_L4_1 0x0000000000000003ULL | ||
1398 | #define FLOW_KEY_L4_1_SHIFT 0 | ||
1399 | |||
1400 | #define FLOW_KEY_L4_NONE 0x0 | ||
1401 | #define FLOW_KEY_L4_RESV 0x1 | ||
1402 | #define FLOW_KEY_L4_BYTE12 0x2 | ||
1403 | #define FLOW_KEY_L4_BYTE56 0x3 | ||
1404 | |||
1405 | #define H1POLY (FZC_FFLP + 0x40060UL) | ||
1406 | #define H1POLY_INITVAL 0x00000000ffffffffULL | ||
1407 | |||
1408 | #define H2POLY (FZC_FFLP + 0x40068UL) | ||
1409 | #define H2POLY_INITVAL 0x000000000000ffffULL | ||
1410 | |||
1411 | #define FLW_PRT_SEL(IDX) (FZC_FFLP + 0x40070UL + (IDX) * 8UL) | ||
1412 | #define FLW_PRT_SEL_EXT 0x0000000000010000ULL | ||
1413 | #define FLW_PRT_SEL_MASK 0x0000000000001f00ULL | ||
1414 | #define FLW_PRT_SEL_MASK_SHIFT 8 | ||
1415 | #define FLW_PRT_SEL_BASE 0x000000000000001fULL | ||
1416 | #define FLW_PRT_SEL_BASE_SHIFT 0 | ||
1417 | |||
1418 | #define HASH_TBL_ADDR(IDX) (FFLP + 0x00000UL + (IDX) * 8192UL) | ||
1419 | #define HASH_TBL_ADDR_AUTOINC 0x0000000000800000ULL | ||
1420 | #define HASH_TBL_ADDR_ADDR 0x00000000007fffffULL | ||
1421 | |||
1422 | #define HASH_TBL_DATA(IDX) (FFLP + 0x00008UL + (IDX) * 8192UL) | ||
1423 | #define HASH_TBL_DATA_DATA 0xffffffffffffffffULL | ||
1424 | |||
1425 | /* FCRAM hash table entries are up to 8 64-bit words in size. | ||
1426 | * The layout of each entry is determined by the settings in the | ||
1427 | * first word, which is the header. | ||
1428 | * | ||
1429 | * The indexing is controllable per partition (there is one partition | ||
1430 | * per RDC group, thus a total of eight) using the BASE and MASK fields | ||
1431 | * of FLW_PRT_SEL above. | ||
1432 | */ | ||
1433 | #define FCRAM_SIZE 0x800000 | ||
1434 | #define FCRAM_NUM_PARTITIONS 8 | ||
1435 | |||
1436 | /* Generic HASH entry header, used for all non-optimized formats. */ | ||
1437 | #define HASH_HEADER_FMT 0x8000000000000000ULL | ||
1438 | #define HASH_HEADER_EXT 0x4000000000000000ULL | ||
1439 | #define HASH_HEADER_VALID 0x2000000000000000ULL | ||
1440 | #define HASH_HEADER_RESVD 0x1000000000000000ULL | ||
1441 | #define HASH_HEADER_L2_DADDR 0x0ffffffffffff000ULL | ||
1442 | #define HASH_HEADER_L2_DADDR_SHIFT 12 | ||
1443 | #define HASH_HEADER_VLAN 0x0000000000000fffULL | ||
1444 | #define HASH_HEADER_VLAN_SHIFT 0 | ||
1445 | |||
1446 | /* Optimized format, just a header with a special layout defined below. | ||
1447 | * Set FMT and EXT both to zero to indicate this layout is being used. | ||
1448 | */ | ||
1449 | #define HASH_OPT_HEADER_FMT 0x8000000000000000ULL | ||
1450 | #define HASH_OPT_HEADER_EXT 0x4000000000000000ULL | ||
1451 | #define HASH_OPT_HEADER_VALID 0x2000000000000000ULL | ||
1452 | #define HASH_OPT_HEADER_RDCOFF 0x1f00000000000000ULL | ||
1453 | #define HASH_OPT_HEADER_RDCOFF_SHIFT 56 | ||
1454 | #define HASH_OPT_HEADER_HASH2 0x00ffff0000000000ULL | ||
1455 | #define HASH_OPT_HEADER_HASH2_SHIFT 40 | ||
1456 | #define HASH_OPT_HEADER_RESVD 0x000000ff00000000ULL | ||
1457 | #define HASH_OPT_HEADER_USERINFO 0x00000000ffffffffULL | ||
1458 | #define HASH_OPT_HEADER_USERINFO_SHIFT 0 | ||
1459 | |||
1460 | /* Port and protocol word used for ipv4 and ipv6 layouts. */ | ||
1461 | #define HASH_PORT_DPORT 0xffff000000000000ULL | ||
1462 | #define HASH_PORT_DPORT_SHIFT 48 | ||
1463 | #define HASH_PORT_SPORT 0x0000ffff00000000ULL | ||
1464 | #define HASH_PORT_SPORT_SHIFT 32 | ||
1465 | #define HASH_PORT_PROTO 0x00000000ff000000ULL | ||
1466 | #define HASH_PORT_PROTO_SHIFT 24 | ||
1467 | #define HASH_PORT_PORT_OFF 0x0000000000c00000ULL | ||
1468 | #define HASH_PORT_PORT_OFF_SHIFT 22 | ||
1469 | #define HASH_PORT_PORT_RESV 0x00000000003fffffULL | ||
1470 | |||
1471 | /* Action word used for ipv4 and ipv6 layouts. */ | ||
1472 | #define HASH_ACTION_RESV1 0xe000000000000000ULL | ||
1473 | #define HASH_ACTION_RDCOFF 0x1f00000000000000ULL | ||
1474 | #define HASH_ACTION_RDCOFF_SHIFT 56 | ||
1475 | #define HASH_ACTION_ZFVALID 0x0080000000000000ULL | ||
1476 | #define HASH_ACTION_RESV2 0x0070000000000000ULL | ||
1477 | #define HASH_ACTION_ZFID 0x000fff0000000000ULL | ||
1478 | #define HASH_ACTION_ZFID_SHIFT 40 | ||
1479 | #define HASH_ACTION_RESV3 0x000000ff00000000ULL | ||
1480 | #define HASH_ACTION_USERINFO 0x00000000ffffffffULL | ||
1481 | #define HASH_ACTION_USERINFO_SHIFT 0 | ||
1482 | |||
1483 | /* IPV4 address word. Addresses are in network endian. */ | ||
1484 | #define HASH_IP4ADDR_SADDR 0xffffffff00000000ULL | ||
1485 | #define HASH_IP4ADDR_SADDR_SHIFT 32 | ||
1486 | #define HASH_IP4ADDR_DADDR 0x00000000ffffffffULL | ||
1487 | #define HASH_IP4ADDR_DADDR_SHIFT 0 | ||
1488 | |||
1489 | /* IPV6 address layout is 4 words, first two are saddr, next two | ||
1490 | * are daddr. Addresses are in network endian. | ||
1491 | */ | ||
1492 | |||
1493 | struct fcram_hash_opt { | ||
1494 | u64 header; | ||
1495 | }; | ||
1496 | |||
1497 | /* EXT=1, FMT=0 */ | ||
1498 | struct fcram_hash_ipv4 { | ||
1499 | u64 header; | ||
1500 | u64 addrs; | ||
1501 | u64 ports; | ||
1502 | u64 action; | ||
1503 | }; | ||
1504 | |||
1505 | /* EXT=1, FMT=1 */ | ||
1506 | struct fcram_hash_ipv6 { | ||
1507 | u64 header; | ||
1508 | u64 addrs[4]; | ||
1509 | u64 ports; | ||
1510 | u64 action; | ||
1511 | }; | ||
1512 | |||
1513 | #define HASH_TBL_DATA_LOG(IDX) (FFLP + 0x00010UL + (IDX) * 8192UL) | ||
1514 | #define HASH_TBL_DATA_LOG_ERR 0x0000000080000000ULL | ||
1515 | #define HASH_TBL_DATA_LOG_ADDR 0x000000007fffff00ULL | ||
1516 | #define HASH_TBL_DATA_LOG_SYNDROME 0x00000000000000ffULL | ||
1517 | |||
1518 | #define RX_DMA_CK_DIV (FZC_DMC + 0x00000UL) | ||
1519 | #define RX_DMA_CK_DIV_CNT 0x000000000000ffffULL | ||
1520 | |||
1521 | #define DEF_RDC(IDX) (FZC_DMC + 0x00008UL + (IDX) * 0x8UL) | ||
1522 | #define DEF_RDC_VAL 0x000000000000001fULL | ||
1523 | |||
1524 | #define PT_DRR_WT(IDX) (FZC_DMC + 0x00028UL + (IDX) * 0x8UL) | ||
1525 | #define PT_DRR_WT_VAL 0x000000000000ffffULL | ||
1526 | |||
1527 | #define PT_DRR_WEIGHT_DEFAULT_10G 0x0400 | ||
1528 | #define PT_DRR_WEIGHT_DEFAULT_1G 0x0066 | ||
1529 | |||
1530 | #define PT_USE(IDX) (FZC_DMC + 0x00048UL + (IDX) * 0x8UL) | ||
1531 | #define PT_USE_CNT 0x00000000000fffffULL | ||
1532 | |||
1533 | #define RED_RAN_INIT (FZC_DMC + 0x00068UL) | ||
1534 | #define RED_RAN_INIT_OPMODE 0x0000000000010000ULL | ||
1535 | #define RED_RAN_INIT_VAL 0x000000000000ffffULL | ||
1536 | |||
1537 | #define RX_ADDR_MD (FZC_DMC + 0x00070UL) | ||
1538 | #define RX_ADDR_MD_DBG_PT_MUX_SEL 0x000000000000000cULL | ||
1539 | #define RX_ADDR_MD_RAM_ACC 0x0000000000000002ULL | ||
1540 | #define RX_ADDR_MD_MODE32 0x0000000000000001ULL | ||
1541 | |||
1542 | #define RDMC_PRE_PAR_ERR (FZC_DMC + 0x00078UL) | ||
1543 | #define RDMC_PRE_PAR_ERR_ERR 0x0000000000008000ULL | ||
1544 | #define RDMC_PRE_PAR_ERR_MERR 0x0000000000004000ULL | ||
1545 | #define RDMC_PRE_PAR_ERR_ADDR 0x00000000000000ffULL | ||
1546 | |||
1547 | #define RDMC_SHA_PAR_ERR (FZC_DMC + 0x00080UL) | ||
1548 | #define RDMC_SHA_PAR_ERR_ERR 0x0000000000008000ULL | ||
1549 | #define RDMC_SHA_PAR_ERR_MERR 0x0000000000004000ULL | ||
1550 | #define RDMC_SHA_PAR_ERR_ADDR 0x00000000000000ffULL | ||
1551 | |||
1552 | #define RDMC_MEM_ADDR (FZC_DMC + 0x00088UL) | ||
1553 | #define RDMC_MEM_ADDR_PRE_SHAD 0x0000000000000100ULL | ||
1554 | #define RDMC_MEM_ADDR_ADDR 0x00000000000000ffULL | ||
1555 | |||
1556 | #define RDMC_MEM_DAT0 (FZC_DMC + 0x00090UL) | ||
1557 | #define RDMC_MEM_DAT0_DATA 0x00000000ffffffffULL /* bits 31:0 */ | ||
1558 | |||
1559 | #define RDMC_MEM_DAT1 (FZC_DMC + 0x00098UL) | ||
1560 | #define RDMC_MEM_DAT1_DATA 0x00000000ffffffffULL /* bits 63:32 */ | ||
1561 | |||
1562 | #define RDMC_MEM_DAT2 (FZC_DMC + 0x000a0UL) | ||
1563 | #define RDMC_MEM_DAT2_DATA 0x00000000ffffffffULL /* bits 95:64 */ | ||
1564 | |||
1565 | #define RDMC_MEM_DAT3 (FZC_DMC + 0x000a8UL) | ||
1566 | #define RDMC_MEM_DAT3_DATA 0x00000000ffffffffULL /* bits 127:96 */ | ||
1567 | |||
1568 | #define RDMC_MEM_DAT4 (FZC_DMC + 0x000b0UL) | ||
1569 | #define RDMC_MEM_DAT4_DATA 0x00000000000fffffULL /* bits 147:128 */ | ||
1570 | |||
1571 | #define RX_CTL_DAT_FIFO_STAT (FZC_DMC + 0x000b8UL) | ||
1572 | #define RX_CTL_DAT_FIFO_STAT_ID_MISMATCH 0x0000000000000100ULL | ||
1573 | #define RX_CTL_DAT_FIFO_STAT_ZCP_EOP_ERR 0x00000000000000f0ULL | ||
1574 | #define RX_CTL_DAT_FIFO_STAT_IPP_EOP_ERR 0x000000000000000fULL | ||
1575 | |||
1576 | #define RX_CTL_DAT_FIFO_MASK (FZC_DMC + 0x000c0UL) | ||
1577 | #define RX_CTL_DAT_FIFO_MASK_ID_MISMATCH 0x0000000000000100ULL | ||
1578 | #define RX_CTL_DAT_FIFO_MASK_ZCP_EOP_ERR 0x00000000000000f0ULL | ||
1579 | #define RX_CTL_DAT_FIFO_MASK_IPP_EOP_ERR 0x000000000000000fULL | ||
1580 | |||
1581 | #define RDMC_TRAINING_VECTOR (FZC_DMC + 0x000c8UL) | ||
1582 | #define RDMC_TRAINING_VECTOR_TRAINING_VECTOR 0x00000000ffffffffULL | ||
1583 | |||
1584 | #define RX_CTL_DAT_FIFO_STAT_DBG (FZC_DMC + 0x000d0UL) | ||
1585 | #define RX_CTL_DAT_FIFO_STAT_DBG_ID_MISMATCH 0x0000000000000100ULL | ||
1586 | #define RX_CTL_DAT_FIFO_STAT_DBG_ZCP_EOP_ERR 0x00000000000000f0ULL | ||
1587 | #define RX_CTL_DAT_FIFO_STAT_DBG_IPP_EOP_ERR 0x000000000000000fULL | ||
1588 | |||
1589 | #define RDC_TBL(TBL,SLOT) (FZC_ZCP + 0x10000UL + \ | ||
1590 | (TBL) * (8UL * 16UL) + \ | ||
1591 | (SLOT) * 8UL) | ||
1592 | #define RDC_TBL_RDC 0x000000000000000fULL | ||
1593 | |||
1594 | #define RX_LOG_PAGE_VLD(IDX) (FZC_DMC + 0x20000UL + (IDX) * 0x40UL) | ||
1595 | #define RX_LOG_PAGE_VLD_FUNC 0x000000000000000cULL | ||
1596 | #define RX_LOG_PAGE_VLD_FUNC_SHIFT 2 | ||
1597 | #define RX_LOG_PAGE_VLD_PAGE1 0x0000000000000002ULL | ||
1598 | #define RX_LOG_PAGE_VLD_PAGE0 0x0000000000000001ULL | ||
1599 | |||
1600 | #define RX_LOG_MASK1(IDX) (FZC_DMC + 0x20008UL + (IDX) * 0x40UL) | ||
1601 | #define RX_LOG_MASK1_MASK 0x00000000ffffffffULL | ||
1602 | |||
1603 | #define RX_LOG_VAL1(IDX) (FZC_DMC + 0x20010UL + (IDX) * 0x40UL) | ||
1604 | #define RX_LOG_VAL1_VALUE 0x00000000ffffffffULL | ||
1605 | |||
1606 | #define RX_LOG_MASK2(IDX) (FZC_DMC + 0x20018UL + (IDX) * 0x40UL) | ||
1607 | #define RX_LOG_MASK2_MASK 0x00000000ffffffffULL | ||
1608 | |||
1609 | #define RX_LOG_VAL2(IDX) (FZC_DMC + 0x20020UL + (IDX) * 0x40UL) | ||
1610 | #define RX_LOG_VAL2_VALUE 0x00000000ffffffffULL | ||
1611 | |||
1612 | #define RX_LOG_PAGE_RELO1(IDX) (FZC_DMC + 0x20028UL + (IDX) * 0x40UL) | ||
1613 | #define RX_LOG_PAGE_RELO1_RELO 0x00000000ffffffffULL | ||
1614 | |||
1615 | #define RX_LOG_PAGE_RELO2(IDX) (FZC_DMC + 0x20030UL + (IDX) * 0x40UL) | ||
1616 | #define RX_LOG_PAGE_RELO2_RELO 0x00000000ffffffffULL | ||
1617 | |||
1618 | #define RX_LOG_PAGE_HDL(IDX) (FZC_DMC + 0x20038UL + (IDX) * 0x40UL) | ||
1619 | #define RX_LOG_PAGE_HDL_HANDLE 0x00000000000fffffULL | ||
1620 | |||
1621 | #define TX_LOG_PAGE_VLD(IDX) (FZC_DMC + 0x40000UL + (IDX) * 0x200UL) | ||
1622 | #define TX_LOG_PAGE_VLD_FUNC 0x000000000000000cULL | ||
1623 | #define TX_LOG_PAGE_VLD_FUNC_SHIFT 2 | ||
1624 | #define TX_LOG_PAGE_VLD_PAGE1 0x0000000000000002ULL | ||
1625 | #define TX_LOG_PAGE_VLD_PAGE0 0x0000000000000001ULL | ||
1626 | |||
1627 | #define TX_LOG_MASK1(IDX) (FZC_DMC + 0x40008UL + (IDX) * 0x200UL) | ||
1628 | #define TX_LOG_MASK1_MASK 0x00000000ffffffffULL | ||
1629 | |||
1630 | #define TX_LOG_VAL1(IDX) (FZC_DMC + 0x40010UL + (IDX) * 0x200UL) | ||
1631 | #define TX_LOG_VAL1_VALUE 0x00000000ffffffffULL | ||
1632 | |||
1633 | #define TX_LOG_MASK2(IDX) (FZC_DMC + 0x40018UL + (IDX) * 0x200UL) | ||
1634 | #define TX_LOG_MASK2_MASK 0x00000000ffffffffULL | ||
1635 | |||
1636 | #define TX_LOG_VAL2(IDX) (FZC_DMC + 0x40020UL + (IDX) * 0x200UL) | ||
1637 | #define TX_LOG_VAL2_VALUE 0x00000000ffffffffULL | ||
1638 | |||
1639 | #define TX_LOG_PAGE_RELO1(IDX) (FZC_DMC + 0x40028UL + (IDX) * 0x200UL) | ||
1640 | #define TX_LOG_PAGE_RELO1_RELO 0x00000000ffffffffULL | ||
1641 | |||
1642 | #define TX_LOG_PAGE_RELO2(IDX) (FZC_DMC + 0x40030UL + (IDX) * 0x200UL) | ||
1643 | #define TX_LOG_PAGE_RELO2_RELO 0x00000000ffffffffULL | ||
1644 | |||
1645 | #define TX_LOG_PAGE_HDL(IDX) (FZC_DMC + 0x40038UL + (IDX) * 0x200UL) | ||
1646 | #define TX_LOG_PAGE_HDL_HANDLE 0x00000000000fffffULL | ||
1647 | |||
1648 | #define TX_ADDR_MD (FZC_DMC + 0x45000UL) | ||
1649 | #define TX_ADDR_MD_MODE32 0x0000000000000001ULL | ||
1650 | |||
1651 | #define RDC_RED_PARA(IDX) (FZC_DMC + 0x30000UL + (IDX) * 0x40UL) | ||
1652 | #define RDC_RED_PARA_THRE_SYN 0x00000000fff00000ULL | ||
1653 | #define RDC_RED_PARA_THRE_SYN_SHIFT 20 | ||
1654 | #define RDC_RED_PARA_WIN_SYN 0x00000000000f0000ULL | ||
1655 | #define RDC_RED_PARA_WIN_SYN_SHIFT 16 | ||
1656 | #define RDC_RED_PARA_THRE 0x000000000000fff0ULL | ||
1657 | #define RDC_RED_PARA_THRE_SHIFT 4 | ||
1658 | #define RDC_RED_PARA_WIN 0x000000000000000fULL | ||
1659 | #define RDC_RED_PARA_WIN_SHIFT 0 | ||
1660 | |||
1661 | #define RED_DIS_CNT(IDX) (FZC_DMC + 0x30008UL + (IDX) * 0x40UL) | ||
1662 | #define RED_DIS_CNT_OFLOW 0x0000000000010000ULL | ||
1663 | #define RED_DIS_CNT_COUNT 0x000000000000ffffULL | ||
1664 | |||
1665 | #define IPP_CFIG (FZC_IPP + 0x00000UL) | ||
1666 | #define IPP_CFIG_SOFT_RST 0x0000000080000000ULL | ||
1667 | #define IPP_CFIG_IP_MAX_PKT 0x0000000001ffff00ULL | ||
1668 | #define IPP_CFIG_IP_MAX_PKT_SHIFT 8 | ||
1669 | #define IPP_CFIG_FFLP_CS_PIO_W 0x0000000000000080ULL | ||
1670 | #define IPP_CFIG_PFIFO_PIO_W 0x0000000000000040ULL | ||
1671 | #define IPP_CFIG_DFIFO_PIO_W 0x0000000000000020ULL | ||
1672 | #define IPP_CFIG_CKSUM_EN 0x0000000000000010ULL | ||
1673 | #define IPP_CFIG_DROP_BAD_CRC 0x0000000000000008ULL | ||
1674 | #define IPP_CFIG_DFIFO_ECC_EN 0x0000000000000004ULL | ||
1675 | #define IPP_CFIG_DEBUG_BUS_OUT_EN 0x0000000000000002ULL | ||
1676 | #define IPP_CFIG_IPP_ENABLE 0x0000000000000001ULL | ||
1677 | |||
1678 | #define IPP_PKT_DIS (FZC_IPP + 0x00020UL) | ||
1679 | #define IPP_PKT_DIS_COUNT 0x0000000000003fffULL | ||
1680 | |||
1681 | #define IPP_BAD_CS_CNT (FZC_IPP + 0x00028UL) | ||
1682 | #define IPP_BAD_CS_CNT_COUNT 0x0000000000003fffULL | ||
1683 | |||
1684 | #define IPP_ECC (FZC_IPP + 0x00030UL) | ||
1685 | #define IPP_ECC_COUNT 0x00000000000000ffULL | ||
1686 | |||
1687 | #define IPP_INT_STAT (FZC_IPP + 0x00040UL) | ||
1688 | #define IPP_INT_STAT_SOP_MISS 0x0000000080000000ULL | ||
1689 | #define IPP_INT_STAT_EOP_MISS 0x0000000040000000ULL | ||
1690 | #define IPP_INT_STAT_DFIFO_UE 0x0000000030000000ULL | ||
1691 | #define IPP_INT_STAT_DFIFO_CE 0x000000000c000000ULL | ||
1692 | #define IPP_INT_STAT_DFIFO_ECC 0x0000000003000000ULL | ||
1693 | #define IPP_INT_STAT_DFIFO_ECC_IDX 0x00000000007ff000ULL | ||
1694 | #define IPP_INT_STAT_PFIFO_PERR 0x0000000000000800ULL | ||
1695 | #define IPP_INT_STAT_ECC_ERR_MAX 0x0000000000000400ULL | ||
1696 | #define IPP_INT_STAT_PFIFO_ERR_IDX 0x00000000000003f0ULL | ||
1697 | #define IPP_INT_STAT_PFIFO_OVER 0x0000000000000008ULL | ||
1698 | #define IPP_INT_STAT_PFIFO_UND 0x0000000000000004ULL | ||
1699 | #define IPP_INT_STAT_BAD_CS_MX 0x0000000000000002ULL | ||
1700 | #define IPP_INT_STAT_PKT_DIS_MX 0x0000000000000001ULL | ||
1701 | #define IPP_INT_STAT_ALL 0x00000000ff7fffffULL | ||
1702 | |||
1703 | #define IPP_MSK (FZC_IPP + 0x00048UL) | ||
1704 | #define IPP_MSK_ECC_ERR_MX 0x0000000000000080ULL | ||
1705 | #define IPP_MSK_DFIFO_EOP_SOP 0x0000000000000040ULL | ||
1706 | #define IPP_MSK_DFIFO_UC 0x0000000000000020ULL | ||
1707 | #define IPP_MSK_PFIFO_PAR 0x0000000000000010ULL | ||
1708 | #define IPP_MSK_PFIFO_OVER 0x0000000000000008ULL | ||
1709 | #define IPP_MSK_PFIFO_UND 0x0000000000000004ULL | ||
1710 | #define IPP_MSK_BAD_CS 0x0000000000000002ULL | ||
1711 | #define IPP_MSK_PKT_DIS_CNT 0x0000000000000001ULL | ||
1712 | #define IPP_MSK_ALL 0x00000000000000ffULL | ||
1713 | |||
1714 | #define IPP_PFIFO_RD0 (FZC_IPP + 0x00060UL) | ||
1715 | #define IPP_PFIFO_RD0_DATA 0x00000000ffffffffULL /* bits 31:0 */ | ||
1716 | |||
1717 | #define IPP_PFIFO_RD1 (FZC_IPP + 0x00068UL) | ||
1718 | #define IPP_PFIFO_RD1_DATA 0x00000000ffffffffULL /* bits 63:32 */ | ||
1719 | |||
1720 | #define IPP_PFIFO_RD2 (FZC_IPP + 0x00070UL) | ||
1721 | #define IPP_PFIFO_RD2_DATA 0x00000000ffffffffULL /* bits 95:64 */ | ||
1722 | |||
1723 | #define IPP_PFIFO_RD3 (FZC_IPP + 0x00078UL) | ||
1724 | #define IPP_PFIFO_RD3_DATA 0x00000000ffffffffULL /* bits 127:96 */ | ||
1725 | |||
1726 | #define IPP_PFIFO_RD4 (FZC_IPP + 0x00080UL) | ||
1727 | #define IPP_PFIFO_RD4_DATA 0x00000000ffffffffULL /* bits 145:128 */ | ||
1728 | |||
1729 | #define IPP_PFIFO_WR0 (FZC_IPP + 0x00088UL) | ||
1730 | #define IPP_PFIFO_WR0_DATA 0x00000000ffffffffULL /* bits 31:0 */ | ||
1731 | |||
1732 | #define IPP_PFIFO_WR1 (FZC_IPP + 0x00090UL) | ||
1733 | #define IPP_PFIFO_WR1_DATA 0x00000000ffffffffULL /* bits 63:32 */ | ||
1734 | |||
1735 | #define IPP_PFIFO_WR2 (FZC_IPP + 0x00098UL) | ||
1736 | #define IPP_PFIFO_WR2_DATA 0x00000000ffffffffULL /* bits 95:64 */ | ||
1737 | |||
1738 | #define IPP_PFIFO_WR3 (FZC_IPP + 0x000a0UL) | ||
1739 | #define IPP_PFIFO_WR3_DATA 0x00000000ffffffffULL /* bits 127:96 */ | ||
1740 | |||
1741 | #define IPP_PFIFO_WR4 (FZC_IPP + 0x000a8UL) | ||
1742 | #define IPP_PFIFO_WR4_DATA 0x00000000ffffffffULL /* bits 145:128 */ | ||
1743 | |||
1744 | #define IPP_PFIFO_RD_PTR (FZC_IPP + 0x000b0UL) | ||
1745 | #define IPP_PFIFO_RD_PTR_PTR 0x000000000000003fULL | ||
1746 | |||
1747 | #define IPP_PFIFO_WR_PTR (FZC_IPP + 0x000b8UL) | ||
1748 | #define IPP_PFIFO_WR_PTR_PTR 0x000000000000007fULL | ||
1749 | |||
1750 | #define IPP_DFIFO_RD0 (FZC_IPP + 0x000c0UL) | ||
1751 | #define IPP_DFIFO_RD0_DATA 0x00000000ffffffffULL /* bits 31:0 */ | ||
1752 | |||
1753 | #define IPP_DFIFO_RD1 (FZC_IPP + 0x000c8UL) | ||
1754 | #define IPP_DFIFO_RD1_DATA 0x00000000ffffffffULL /* bits 63:32 */ | ||
1755 | |||
1756 | #define IPP_DFIFO_RD2 (FZC_IPP + 0x000d0UL) | ||
1757 | #define IPP_DFIFO_RD2_DATA 0x00000000ffffffffULL /* bits 95:64 */ | ||
1758 | |||
1759 | #define IPP_DFIFO_RD3 (FZC_IPP + 0x000d8UL) | ||
1760 | #define IPP_DFIFO_RD3_DATA 0x00000000ffffffffULL /* bits 127:96 */ | ||
1761 | |||
1762 | #define IPP_DFIFO_RD4 (FZC_IPP + 0x000e0UL) | ||
1763 | #define IPP_DFIFO_RD4_DATA 0x00000000ffffffffULL /* bits 145:128 */ | ||
1764 | |||
1765 | #define IPP_DFIFO_WR0 (FZC_IPP + 0x000e8UL) | ||
1766 | #define IPP_DFIFO_WR0_DATA 0x00000000ffffffffULL /* bits 31:0 */ | ||
1767 | |||
1768 | #define IPP_DFIFO_WR1 (FZC_IPP + 0x000f0UL) | ||
1769 | #define IPP_DFIFO_WR1_DATA 0x00000000ffffffffULL /* bits 63:32 */ | ||
1770 | |||
1771 | #define IPP_DFIFO_WR2 (FZC_IPP + 0x000f8UL) | ||
1772 | #define IPP_DFIFO_WR2_DATA 0x00000000ffffffffULL /* bits 95:64 */ | ||
1773 | |||
1774 | #define IPP_DFIFO_WR3 (FZC_IPP + 0x00100UL) | ||
1775 | #define IPP_DFIFO_WR3_DATA 0x00000000ffffffffULL /* bits 127:96 */ | ||
1776 | |||
1777 | #define IPP_DFIFO_WR4 (FZC_IPP + 0x00108UL) | ||
1778 | #define IPP_DFIFO_WR4_DATA 0x00000000ffffffffULL /* bits 145:128 */ | ||
1779 | |||
1780 | #define IPP_DFIFO_RD_PTR (FZC_IPP + 0x00110UL) | ||
1781 | #define IPP_DFIFO_RD_PTR_PTR 0x0000000000000fffULL | ||
1782 | |||
1783 | #define IPP_DFIFO_WR_PTR (FZC_IPP + 0x00118UL) | ||
1784 | #define IPP_DFIFO_WR_PTR_PTR 0x0000000000000fffULL | ||
1785 | |||
1786 | #define IPP_SM (FZC_IPP + 0x00120UL) | ||
1787 | #define IPP_SM_SM 0x00000000ffffffffULL | ||
1788 | |||
1789 | #define IPP_CS_STAT (FZC_IPP + 0x00128UL) | ||
1790 | #define IPP_CS_STAT_BCYC_CNT 0x00000000ff000000ULL | ||
1791 | #define IPP_CS_STAT_IP_LEN 0x0000000000fff000ULL | ||
1792 | #define IPP_CS_STAT_CS_FAIL 0x0000000000000800ULL | ||
1793 | #define IPP_CS_STAT_TERM 0x0000000000000400ULL | ||
1794 | #define IPP_CS_STAT_BAD_NUM 0x0000000000000200ULL | ||
1795 | #define IPP_CS_STAT_CS_STATE 0x00000000000001ffULL | ||
1796 | |||
1797 | #define IPP_FFLP_CS_INFO (FZC_IPP + 0x00130UL) | ||
1798 | #define IPP_FFLP_CS_INFO_PKT_ID 0x0000000000003c00ULL | ||
1799 | #define IPP_FFLP_CS_INFO_L4_PROTO 0x0000000000000300ULL | ||
1800 | #define IPP_FFLP_CS_INFO_V4_HD_LEN 0x00000000000000f0ULL | ||
1801 | #define IPP_FFLP_CS_INFO_L3_VER 0x000000000000000cULL | ||
1802 | #define IPP_FFLP_CS_INFO_L2_OP 0x0000000000000003ULL | ||
1803 | |||
1804 | #define IPP_DBG_SEL (FZC_IPP + 0x00138UL) | ||
1805 | #define IPP_DBG_SEL_SEL 0x000000000000000fULL | ||
1806 | |||
1807 | #define IPP_DFIFO_ECC_SYND (FZC_IPP + 0x00140UL) | ||
1808 | #define IPP_DFIFO_ECC_SYND_SYND 0x000000000000ffffULL | ||
1809 | |||
1810 | #define IPP_DFIFO_EOP_RD_PTR (FZC_IPP + 0x00148UL) | ||
1811 | #define IPP_DFIFO_EOP_RD_PTR_PTR 0x0000000000000fffULL | ||
1812 | |||
1813 | #define IPP_ECC_CTL (FZC_IPP + 0x00150UL) | ||
1814 | #define IPP_ECC_CTL_DIS_DBL 0x0000000080000000ULL | ||
1815 | #define IPP_ECC_CTL_COR_DBL 0x0000000000020000ULL | ||
1816 | #define IPP_ECC_CTL_COR_SNG 0x0000000000010000ULL | ||
1817 | #define IPP_ECC_CTL_COR_ALL 0x0000000000000400ULL | ||
1818 | #define IPP_ECC_CTL_COR_1 0x0000000000000100ULL | ||
1819 | #define IPP_ECC_CTL_COR_LST 0x0000000000000004ULL | ||
1820 | #define IPP_ECC_CTL_COR_SND 0x0000000000000002ULL | ||
1821 | #define IPP_ECC_CTL_COR_FSR 0x0000000000000001ULL | ||
1822 | |||
1823 | #define NIU_DFIFO_ENTRIES 1024 | ||
1824 | #define ATLAS_P0_P1_DFIFO_ENTRIES 2048 | ||
1825 | #define ATLAS_P2_P3_DFIFO_ENTRIES 1024 | ||
1826 | |||
1827 | #define ZCP_CFIG (FZC_ZCP + 0x00000UL) | ||
1828 | #define ZCP_CFIG_ZCP_32BIT_MODE 0x0000000001000000ULL | ||
1829 | #define ZCP_CFIG_ZCP_DEBUG_SEL 0x0000000000ff0000ULL | ||
1830 | #define ZCP_CFIG_DMA_TH 0x000000000000ffe0ULL | ||
1831 | #define ZCP_CFIG_ECC_CHK_DIS 0x0000000000000010ULL | ||
1832 | #define ZCP_CFIG_PAR_CHK_DIS 0x0000000000000008ULL | ||
1833 | #define ZCP_CFIG_DIS_BUFF_RSP_IF 0x0000000000000004ULL | ||
1834 | #define ZCP_CFIG_DIS_BUFF_REQ_IF 0x0000000000000002ULL | ||
1835 | #define ZCP_CFIG_ZC_ENABLE 0x0000000000000001ULL | ||
1836 | |||
1837 | #define ZCP_INT_STAT (FZC_ZCP + 0x00008UL) | ||
1838 | #define ZCP_INT_STAT_RRFIFO_UNDERRUN 0x0000000000008000ULL | ||
1839 | #define ZCP_INT_STAT_RRFIFO_OVERRUN 0x0000000000004000ULL | ||
1840 | #define ZCP_INT_STAT_RSPFIFO_UNCOR_ERR 0x0000000000001000ULL | ||
1841 | #define ZCP_INT_STAT_BUFFER_OVERFLOW 0x0000000000000800ULL | ||
1842 | #define ZCP_INT_STAT_STAT_TBL_PERR 0x0000000000000400ULL | ||
1843 | #define ZCP_INT_STAT_DYN_TBL_PERR 0x0000000000000200ULL | ||
1844 | #define ZCP_INT_STAT_BUF_TBL_PERR 0x0000000000000100ULL | ||
1845 | #define ZCP_INT_STAT_TT_PROGRAM_ERR 0x0000000000000080ULL | ||
1846 | #define ZCP_INT_STAT_RSP_TT_INDEX_ERR 0x0000000000000040ULL | ||
1847 | #define ZCP_INT_STAT_SLV_TT_INDEX_ERR 0x0000000000000020ULL | ||
1848 | #define ZCP_INT_STAT_ZCP_TT_INDEX_ERR 0x0000000000000010ULL | ||
1849 | #define ZCP_INT_STAT_CFIFO_ECC3 0x0000000000000008ULL | ||
1850 | #define ZCP_INT_STAT_CFIFO_ECC2 0x0000000000000004ULL | ||
1851 | #define ZCP_INT_STAT_CFIFO_ECC1 0x0000000000000002ULL | ||
1852 | #define ZCP_INT_STAT_CFIFO_ECC0 0x0000000000000001ULL | ||
1853 | #define ZCP_INT_STAT_ALL 0x000000000000ffffULL | ||
1854 | |||
1855 | #define ZCP_INT_MASK (FZC_ZCP + 0x00010UL) | ||
1856 | #define ZCP_INT_MASK_RRFIFO_UNDERRUN 0x0000000000008000ULL | ||
1857 | #define ZCP_INT_MASK_RRFIFO_OVERRUN 0x0000000000004000ULL | ||
1858 | #define ZCP_INT_MASK_LOJ 0x0000000000002000ULL | ||
1859 | #define ZCP_INT_MASK_RSPFIFO_UNCOR_ERR 0x0000000000001000ULL | ||
1860 | #define ZCP_INT_MASK_BUFFER_OVERFLOW 0x0000000000000800ULL | ||
1861 | #define ZCP_INT_MASK_STAT_TBL_PERR 0x0000000000000400ULL | ||
1862 | #define ZCP_INT_MASK_DYN_TBL_PERR 0x0000000000000200ULL | ||
1863 | #define ZCP_INT_MASK_BUF_TBL_PERR 0x0000000000000100ULL | ||
1864 | #define ZCP_INT_MASK_TT_PROGRAM_ERR 0x0000000000000080ULL | ||
1865 | #define ZCP_INT_MASK_RSP_TT_INDEX_ERR 0x0000000000000040ULL | ||
1866 | #define ZCP_INT_MASK_SLV_TT_INDEX_ERR 0x0000000000000020ULL | ||
1867 | #define ZCP_INT_MASK_ZCP_TT_INDEX_ERR 0x0000000000000010ULL | ||
1868 | #define ZCP_INT_MASK_CFIFO_ECC3 0x0000000000000008ULL | ||
1869 | #define ZCP_INT_MASK_CFIFO_ECC2 0x0000000000000004ULL | ||
1870 | #define ZCP_INT_MASK_CFIFO_ECC1 0x0000000000000002ULL | ||
1871 | #define ZCP_INT_MASK_CFIFO_ECC0 0x0000000000000001ULL | ||
1872 | #define ZCP_INT_MASK_ALL 0x000000000000ffffULL | ||
1873 | |||
1874 | #define BAM4BUF (FZC_ZCP + 0x00018UL) | ||
1875 | #define BAM4BUF_LOJ 0x0000000080000000ULL | ||
1876 | #define BAM4BUF_EN_CK 0x0000000040000000ULL | ||
1877 | #define BAM4BUF_IDX_END0 0x000000003ff00000ULL | ||
1878 | #define BAM4BUF_IDX_ST0 0x00000000000ffc00ULL | ||
1879 | #define BAM4BUF_OFFSET0 0x00000000000003ffULL | ||
1880 | |||
1881 | #define BAM8BUF (FZC_ZCP + 0x00020UL) | ||
1882 | #define BAM8BUF_LOJ 0x0000000080000000ULL | ||
1883 | #define BAM8BUF_EN_CK 0x0000000040000000ULL | ||
1884 | #define BAM8BUF_IDX_END1 0x000000003ff00000ULL | ||
1885 | #define BAM8BUF_IDX_ST1 0x00000000000ffc00ULL | ||
1886 | #define BAM8BUF_OFFSET1 0x00000000000003ffULL | ||
1887 | |||
1888 | #define BAM16BUF (FZC_ZCP + 0x00028UL) | ||
1889 | #define BAM16BUF_LOJ 0x0000000080000000ULL | ||
1890 | #define BAM16BUF_EN_CK 0x0000000040000000ULL | ||
1891 | #define BAM16BUF_IDX_END2 0x000000003ff00000ULL | ||
1892 | #define BAM16BUF_IDX_ST2 0x00000000000ffc00ULL | ||
1893 | #define BAM16BUF_OFFSET2 0x00000000000003ffULL | ||
1894 | |||
1895 | #define BAM32BUF (FZC_ZCP + 0x00030UL) | ||
1896 | #define BAM32BUF_LOJ 0x0000000080000000ULL | ||
1897 | #define BAM32BUF_EN_CK 0x0000000040000000ULL | ||
1898 | #define BAM32BUF_IDX_END3 0x000000003ff00000ULL | ||
1899 | #define BAM32BUF_IDX_ST3 0x00000000000ffc00ULL | ||
1900 | #define BAM32BUF_OFFSET3 0x00000000000003ffULL | ||
1901 | |||
1902 | #define DST4BUF (FZC_ZCP + 0x00038UL) | ||
1903 | #define DST4BUF_DS_OFFSET0 0x00000000000003ffULL | ||
1904 | |||
1905 | #define DST8BUF (FZC_ZCP + 0x00040UL) | ||
1906 | #define DST8BUF_DS_OFFSET1 0x00000000000003ffULL | ||
1907 | |||
1908 | #define DST16BUF (FZC_ZCP + 0x00048UL) | ||
1909 | #define DST16BUF_DS_OFFSET2 0x00000000000003ffULL | ||
1910 | |||
1911 | #define DST32BUF (FZC_ZCP + 0x00050UL) | ||
1912 | #define DST32BUF_DS_OFFSET3 0x00000000000003ffULL | ||
1913 | |||
1914 | #define ZCP_RAM_DATA0 (FZC_ZCP + 0x00058UL) | ||
1915 | #define ZCP_RAM_DATA0_DAT0 0x00000000ffffffffULL | ||
1916 | |||
1917 | #define ZCP_RAM_DATA1 (FZC_ZCP + 0x00060UL) | ||
1918 | #define ZCP_RAM_DAT10_DAT1 0x00000000ffffffffULL | ||
1919 | |||
1920 | #define ZCP_RAM_DATA2 (FZC_ZCP + 0x00068UL) | ||
1921 | #define ZCP_RAM_DATA2_DAT2 0x00000000ffffffffULL | ||
1922 | |||
1923 | #define ZCP_RAM_DATA3 (FZC_ZCP + 0x00070UL) | ||
1924 | #define ZCP_RAM_DATA3_DAT3 0x00000000ffffffffULL | ||
1925 | |||
1926 | #define ZCP_RAM_DATA4 (FZC_ZCP + 0x00078UL) | ||
1927 | #define ZCP_RAM_DATA4_DAT4 0x00000000000000ffULL | ||
1928 | |||
1929 | #define ZCP_RAM_BE (FZC_ZCP + 0x00080UL) | ||
1930 | #define ZCP_RAM_BE_VAL 0x000000000001ffffULL | ||
1931 | |||
1932 | #define ZCP_RAM_ACC (FZC_ZCP + 0x00088UL) | ||
1933 | #define ZCP_RAM_ACC_BUSY 0x0000000080000000ULL | ||
1934 | #define ZCP_RAM_ACC_READ 0x0000000040000000ULL | ||
1935 | #define ZCP_RAM_ACC_WRITE 0x0000000000000000ULL | ||
1936 | #define ZCP_RAM_ACC_LOJ 0x0000000020000000ULL | ||
1937 | #define ZCP_RAM_ACC_ZFCID 0x000000001ffe0000ULL | ||
1938 | #define ZCP_RAM_ACC_ZFCID_SHIFT 17 | ||
1939 | #define ZCP_RAM_ACC_RAM_SEL 0x000000000001f000ULL | ||
1940 | #define ZCP_RAM_ACC_RAM_SEL_SHIFT 12 | ||
1941 | #define ZCP_RAM_ACC_CFIFOADDR 0x0000000000000fffULL | ||
1942 | #define ZCP_RAM_ACC_CFIFOADDR_SHIFT 0 | ||
1943 | |||
1944 | #define ZCP_RAM_SEL_BAM(INDEX) (0x00 + (INDEX)) | ||
1945 | #define ZCP_RAM_SEL_TT_STATIC 0x08 | ||
1946 | #define ZCP_RAM_SEL_TT_DYNAMIC 0x09 | ||
1947 | #define ZCP_RAM_SEL_CFIFO(PORT) (0x10 + (PORT)) | ||
1948 | |||
1949 | #define NIU_CFIFO_ENTRIES 1024 | ||
1950 | #define ATLAS_P0_P1_CFIFO_ENTRIES 2048 | ||
1951 | #define ATLAS_P2_P3_CFIFO_ENTRIES 1024 | ||
1952 | |||
1953 | #define CHK_BIT_DATA (FZC_ZCP + 0x00090UL) | ||
1954 | #define CHK_BIT_DATA_DATA 0x000000000000ffffULL | ||
1955 | |||
1956 | #define RESET_CFIFO (FZC_ZCP + 0x00098UL) | ||
1957 | #define RESET_CFIFO_RST(PORT) (0x1 << (PORT)) | ||
1958 | |||
1959 | #define CFIFO_ECC(PORT) (FZC_ZCP + 0x000a0UL + (PORT) * 8UL) | ||
1960 | #define CFIFO_ECC_DIS_DBLBIT_ERR 0x0000000080000000ULL | ||
1961 | #define CFIFO_ECC_DBLBIT_ERR 0x0000000000020000ULL | ||
1962 | #define CFIFO_ECC_SINGLEBIT_ERR 0x0000000000010000ULL | ||
1963 | #define CFIFO_ECC_ALL_PKT 0x0000000000000400ULL | ||
1964 | #define CFIFO_ECC_LAST_LINE 0x0000000000000004ULL | ||
1965 | #define CFIFO_ECC_2ND_LINE 0x0000000000000002ULL | ||
1966 | #define CFIFO_ECC_1ST_LINE 0x0000000000000001ULL | ||
1967 | |||
1968 | #define ZCP_TRAINING_VECTOR (FZC_ZCP + 0x000c0UL) | ||
1969 | #define ZCP_TRAINING_VECTOR_VECTOR 0x00000000ffffffffULL | ||
1970 | |||
1971 | #define ZCP_STATE_MACHINE (FZC_ZCP + 0x000c8UL) | ||
1972 | #define ZCP_STATE_MACHINE_SM 0x00000000ffffffffULL | ||
1973 | |||
1974 | /* Same bits as ZCP_INT_STAT */ | ||
1975 | #define ZCP_INT_STAT_TEST (FZC_ZCP + 0x00108UL) | ||
1976 | |||
1977 | #define RXDMA_CFIG1(IDX) (DMC + 0x00000UL + (IDX) * 0x200UL) | ||
1978 | #define RXDMA_CFIG1_EN 0x0000000080000000ULL | ||
1979 | #define RXDMA_CFIG1_RST 0x0000000040000000ULL | ||
1980 | #define RXDMA_CFIG1_QST 0x0000000020000000ULL | ||
1981 | #define RXDMA_CFIG1_MBADDR_H 0x0000000000000fffULL /* mboxaddr 43:32 */ | ||
1982 | |||
1983 | #define RXDMA_CFIG2(IDX) (DMC + 0x00008UL + (IDX) * 0x200UL) | ||
1984 | #define RXDMA_CFIG2_MBADDR_L 0x00000000ffffffc0ULL /* mboxaddr 31:6 */ | ||
1985 | #define RXDMA_CFIG2_OFFSET 0x0000000000000006ULL | ||
1986 | #define RXDMA_CFIG2_OFFSET_SHIFT 1 | ||
1987 | #define RXDMA_CFIG2_FULL_HDR 0x0000000000000001ULL | ||
1988 | |||
1989 | #define RBR_CFIG_A(IDX) (DMC + 0x00010UL + (IDX) * 0x200UL) | ||
1990 | #define RBR_CFIG_A_LEN 0xffff000000000000ULL | ||
1991 | #define RBR_CFIG_A_LEN_SHIFT 48 | ||
1992 | #define RBR_CFIG_A_STADDR_BASE 0x00000ffffffc0000ULL | ||
1993 | #define RBR_CFIG_A_STADDR 0x000000000003ffc0ULL | ||
1994 | |||
1995 | #define RBR_CFIG_B(IDX) (DMC + 0x00018UL + (IDX) * 0x200UL) | ||
1996 | #define RBR_CFIG_B_BLKSIZE 0x0000000003000000ULL | ||
1997 | #define RBR_CFIG_B_BLKSIZE_SHIFT 24 | ||
1998 | #define RBR_CFIG_B_VLD2 0x0000000000800000ULL | ||
1999 | #define RBR_CFIG_B_BUFSZ2 0x0000000000030000ULL | ||
2000 | #define RBR_CFIG_B_BUFSZ2_SHIFT 16 | ||
2001 | #define RBR_CFIG_B_VLD1 0x0000000000008000ULL | ||
2002 | #define RBR_CFIG_B_BUFSZ1 0x0000000000000300ULL | ||
2003 | #define RBR_CFIG_B_BUFSZ1_SHIFT 8 | ||
2004 | #define RBR_CFIG_B_VLD0 0x0000000000000080ULL | ||
2005 | #define RBR_CFIG_B_BUFSZ0 0x0000000000000003ULL | ||
2006 | #define RBR_CFIG_B_BUFSZ0_SHIFT 0 | ||
2007 | |||
2008 | #define RBR_BLKSIZE_4K 0x0 | ||
2009 | #define RBR_BLKSIZE_8K 0x1 | ||
2010 | #define RBR_BLKSIZE_16K 0x2 | ||
2011 | #define RBR_BLKSIZE_32K 0x3 | ||
2012 | #define RBR_BUFSZ2_2K 0x0 | ||
2013 | #define RBR_BUFSZ2_4K 0x1 | ||
2014 | #define RBR_BUFSZ2_8K 0x2 | ||
2015 | #define RBR_BUFSZ2_16K 0x3 | ||
2016 | #define RBR_BUFSZ1_1K 0x0 | ||
2017 | #define RBR_BUFSZ1_2K 0x1 | ||
2018 | #define RBR_BUFSZ1_4K 0x2 | ||
2019 | #define RBR_BUFSZ1_8K 0x3 | ||
2020 | #define RBR_BUFSZ0_256 0x0 | ||
2021 | #define RBR_BUFSZ0_512 0x1 | ||
2022 | #define RBR_BUFSZ0_1K 0x2 | ||
2023 | #define RBR_BUFSZ0_2K 0x3 | ||
2024 | |||
2025 | #define RBR_KICK(IDX) (DMC + 0x00020UL + (IDX) * 0x200UL) | ||
2026 | #define RBR_KICK_BKADD 0x000000000000ffffULL | ||
2027 | |||
2028 | #define RBR_STAT(IDX) (DMC + 0x00028UL + (IDX) * 0x200UL) | ||
2029 | #define RBR_STAT_QLEN 0x000000000000ffffULL | ||
2030 | |||
2031 | #define RBR_HDH(IDX) (DMC + 0x00030UL + (IDX) * 0x200UL) | ||
2032 | #define RBR_HDH_HEAD_H 0x0000000000000fffULL | ||
2033 | |||
2034 | #define RBR_HDL(IDX) (DMC + 0x00038UL + (IDX) * 0x200UL) | ||
2035 | #define RBR_HDL_HEAD_L 0x00000000fffffffcULL | ||
2036 | |||
2037 | #define RCRCFIG_A(IDX) (DMC + 0x00040UL + (IDX) * 0x200UL) | ||
2038 | #define RCRCFIG_A_LEN 0xffff000000000000ULL | ||
2039 | #define RCRCFIG_A_LEN_SHIFT 48 | ||
2040 | #define RCRCFIG_A_STADDR_BASE 0x00000ffffff80000ULL | ||
2041 | #define RCRCFIG_A_STADDR 0x000000000007ffc0ULL | ||
2042 | |||
2043 | #define RCRCFIG_B(IDX) (DMC + 0x00048UL + (IDX) * 0x200UL) | ||
2044 | #define RCRCFIG_B_PTHRES 0x00000000ffff0000ULL | ||
2045 | #define RCRCFIG_B_PTHRES_SHIFT 16 | ||
2046 | #define RCRCFIG_B_ENTOUT 0x0000000000008000ULL | ||
2047 | #define RCRCFIG_B_TIMEOUT 0x000000000000003fULL | ||
2048 | #define RCRCFIG_B_TIMEOUT_SHIFT 0 | ||
2049 | |||
2050 | #define RCRSTAT_A(IDX) (DMC + 0x00050UL + (IDX) * 0x200UL) | ||
2051 | #define RCRSTAT_A_QLEN 0x000000000000ffffULL | ||
2052 | |||
2053 | #define RCRSTAT_B(IDX) (DMC + 0x00058UL + (IDX) * 0x200UL) | ||
2054 | #define RCRSTAT_B_TIPTR_H 0x0000000000000fffULL | ||
2055 | |||
2056 | #define RCRSTAT_C(IDX) (DMC + 0x00060UL + (IDX) * 0x200UL) | ||
2057 | #define RCRSTAT_C_TIPTR_L 0x00000000fffffff8ULL | ||
2058 | |||
2059 | #define RX_DMA_CTL_STAT(IDX) (DMC + 0x00070UL + (IDX) * 0x200UL) | ||
2060 | #define RX_DMA_CTL_STAT_RBR_TMOUT 0x0020000000000000ULL | ||
2061 | #define RX_DMA_CTL_STAT_RSP_CNT_ERR 0x0010000000000000ULL | ||
2062 | #define RX_DMA_CTL_STAT_BYTE_EN_BUS 0x0008000000000000ULL | ||
2063 | #define RX_DMA_CTL_STAT_RSP_DAT_ERR 0x0004000000000000ULL | ||
2064 | #define RX_DMA_CTL_STAT_RCR_ACK_ERR 0x0002000000000000ULL | ||
2065 | #define RX_DMA_CTL_STAT_DC_FIFO_ERR 0x0001000000000000ULL | ||
2066 | #define RX_DMA_CTL_STAT_MEX 0x0000800000000000ULL | ||
2067 | #define RX_DMA_CTL_STAT_RCRTHRES 0x0000400000000000ULL | ||
2068 | #define RX_DMA_CTL_STAT_RCRTO 0x0000200000000000ULL | ||
2069 | #define RX_DMA_CTL_STAT_RCR_SHA_PAR 0x0000100000000000ULL | ||
2070 | #define RX_DMA_CTL_STAT_RBR_PRE_PAR 0x0000080000000000ULL | ||
2071 | #define RX_DMA_CTL_STAT_PORT_DROP_PKT 0x0000040000000000ULL | ||
2072 | #define RX_DMA_CTL_STAT_WRED_DROP 0x0000020000000000ULL | ||
2073 | #define RX_DMA_CTL_STAT_RBR_PRE_EMTY 0x0000010000000000ULL | ||
2074 | #define RX_DMA_CTL_STAT_RCRSHADOW_FULL 0x0000008000000000ULL | ||
2075 | #define RX_DMA_CTL_STAT_CONFIG_ERR 0x0000004000000000ULL | ||
2076 | #define RX_DMA_CTL_STAT_RCRINCON 0x0000002000000000ULL | ||
2077 | #define RX_DMA_CTL_STAT_RCRFULL 0x0000001000000000ULL | ||
2078 | #define RX_DMA_CTL_STAT_RBR_EMPTY 0x0000000800000000ULL | ||
2079 | #define RX_DMA_CTL_STAT_RBRFULL 0x0000000400000000ULL | ||
2080 | #define RX_DMA_CTL_STAT_RBRLOGPAGE 0x0000000200000000ULL | ||
2081 | #define RX_DMA_CTL_STAT_CFIGLOGPAGE 0x0000000100000000ULL | ||
2082 | #define RX_DMA_CTL_STAT_PTRREAD 0x00000000ffff0000ULL | ||
2083 | #define RX_DMA_CTL_STAT_PTRREAD_SHIFT 16 | ||
2084 | #define RX_DMA_CTL_STAT_PKTREAD 0x000000000000ffffULL | ||
2085 | #define RX_DMA_CTL_STAT_PKTREAD_SHIFT 0 | ||
2086 | |||
2087 | #define RX_DMA_CTL_STAT_CHAN_FATAL (RX_DMA_CTL_STAT_RBR_TMOUT | \ | ||
2088 | RX_DMA_CTL_STAT_RSP_CNT_ERR | \ | ||
2089 | RX_DMA_CTL_STAT_BYTE_EN_BUS | \ | ||
2090 | RX_DMA_CTL_STAT_RSP_DAT_ERR | \ | ||
2091 | RX_DMA_CTL_STAT_RCR_ACK_ERR | \ | ||
2092 | RX_DMA_CTL_STAT_RCR_SHA_PAR | \ | ||
2093 | RX_DMA_CTL_STAT_RBR_PRE_PAR | \ | ||
2094 | RX_DMA_CTL_STAT_CONFIG_ERR | \ | ||
2095 | RX_DMA_CTL_STAT_RCRINCON | \ | ||
2096 | RX_DMA_CTL_STAT_RCRFULL | \ | ||
2097 | RX_DMA_CTL_STAT_RBRFULL | \ | ||
2098 | RX_DMA_CTL_STAT_RBRLOGPAGE | \ | ||
2099 | RX_DMA_CTL_STAT_CFIGLOGPAGE) | ||
2100 | |||
2101 | #define RX_DMA_CTL_STAT_PORT_FATAL (RX_DMA_CTL_STAT_DC_FIFO_ERR) | ||
2102 | |||
2103 | #define RX_DMA_CTL_WRITE_CLEAR_ERRS (RX_DMA_CTL_STAT_RBR_EMPTY | \ | ||
2104 | RX_DMA_CTL_STAT_RCRSHADOW_FULL | \ | ||
2105 | RX_DMA_CTL_STAT_RBR_PRE_EMTY | \ | ||
2106 | RX_DMA_CTL_STAT_WRED_DROP | \ | ||
2107 | RX_DMA_CTL_STAT_PORT_DROP_PKT | \ | ||
2108 | RX_DMA_CTL_STAT_RCRTO | \ | ||
2109 | RX_DMA_CTL_STAT_RCRTHRES | \ | ||
2110 | RX_DMA_CTL_STAT_DC_FIFO_ERR) | ||
2111 | |||
2112 | #define RCR_FLSH(IDX) (DMC + 0x00078UL + (IDX) * 0x200UL) | ||
2113 | #define RCR_FLSH_FLSH 0x0000000000000001ULL | ||
2114 | |||
2115 | #define RXMISC(IDX) (DMC + 0x00090UL + (IDX) * 0x200UL) | ||
2116 | #define RXMISC_OFLOW 0x0000000000010000ULL | ||
2117 | #define RXMISC_COUNT 0x000000000000ffffULL | ||
2118 | |||
2119 | #define RX_DMA_CTL_STAT_DBG(IDX) (DMC + 0x00098UL + (IDX) * 0x200UL) | ||
2120 | #define RX_DMA_CTL_STAT_DBG_RBR_TMOUT 0x0020000000000000ULL | ||
2121 | #define RX_DMA_CTL_STAT_DBG_RSP_CNT_ERR 0x0010000000000000ULL | ||
2122 | #define RX_DMA_CTL_STAT_DBG_BYTE_EN_BUS 0x0008000000000000ULL | ||
2123 | #define RX_DMA_CTL_STAT_DBG_RSP_DAT_ERR 0x0004000000000000ULL | ||
2124 | #define RX_DMA_CTL_STAT_DBG_RCR_ACK_ERR 0x0002000000000000ULL | ||
2125 | #define RX_DMA_CTL_STAT_DBG_DC_FIFO_ERR 0x0001000000000000ULL | ||
2126 | #define RX_DMA_CTL_STAT_DBG_MEX 0x0000800000000000ULL | ||
2127 | #define RX_DMA_CTL_STAT_DBG_RCRTHRES 0x0000400000000000ULL | ||
2128 | #define RX_DMA_CTL_STAT_DBG_RCRTO 0x0000200000000000ULL | ||
2129 | #define RX_DMA_CTL_STAT_DBG_RCR_SHA_PAR 0x0000100000000000ULL | ||
2130 | #define RX_DMA_CTL_STAT_DBG_RBR_PRE_PAR 0x0000080000000000ULL | ||
2131 | #define RX_DMA_CTL_STAT_DBG_PORT_DROP_PKT 0x0000040000000000ULL | ||
2132 | #define RX_DMA_CTL_STAT_DBG_WRED_DROP 0x0000020000000000ULL | ||
2133 | #define RX_DMA_CTL_STAT_DBG_RBR_PRE_EMTY 0x0000010000000000ULL | ||
2134 | #define RX_DMA_CTL_STAT_DBG_RCRSHADOW_FULL 0x0000008000000000ULL | ||
2135 | #define RX_DMA_CTL_STAT_DBG_CONFIG_ERR 0x0000004000000000ULL | ||
2136 | #define RX_DMA_CTL_STAT_DBG_RCRINCON 0x0000002000000000ULL | ||
2137 | #define RX_DMA_CTL_STAT_DBG_RCRFULL 0x0000001000000000ULL | ||
2138 | #define RX_DMA_CTL_STAT_DBG_RBR_EMPTY 0x0000000800000000ULL | ||
2139 | #define RX_DMA_CTL_STAT_DBG_RBRFULL 0x0000000400000000ULL | ||
2140 | #define RX_DMA_CTL_STAT_DBG_RBRLOGPAGE 0x0000000200000000ULL | ||
2141 | #define RX_DMA_CTL_STAT_DBG_CFIGLOGPAGE 0x0000000100000000ULL | ||
2142 | #define RX_DMA_CTL_STAT_DBG_PTRREAD 0x00000000ffff0000ULL | ||
2143 | #define RX_DMA_CTL_STAT_DBG_PKTREAD 0x000000000000ffffULL | ||
2144 | |||
2145 | #define RX_DMA_ENT_MSK(IDX) (DMC + 0x00068UL + (IDX) * 0x200UL) | ||
2146 | #define RX_DMA_ENT_MSK_RBR_TMOUT 0x0000000000200000ULL | ||
2147 | #define RX_DMA_ENT_MSK_RSP_CNT_ERR 0x0000000000100000ULL | ||
2148 | #define RX_DMA_ENT_MSK_BYTE_EN_BUS 0x0000000000080000ULL | ||
2149 | #define RX_DMA_ENT_MSK_RSP_DAT_ERR 0x0000000000040000ULL | ||
2150 | #define RX_DMA_ENT_MSK_RCR_ACK_ERR 0x0000000000020000ULL | ||
2151 | #define RX_DMA_ENT_MSK_DC_FIFO_ERR 0x0000000000010000ULL | ||
2152 | #define RX_DMA_ENT_MSK_RCRTHRES 0x0000000000004000ULL | ||
2153 | #define RX_DMA_ENT_MSK_RCRTO 0x0000000000002000ULL | ||
2154 | #define RX_DMA_ENT_MSK_RCR_SHA_PAR 0x0000000000001000ULL | ||
2155 | #define RX_DMA_ENT_MSK_RBR_PRE_PAR 0x0000000000000800ULL | ||
2156 | #define RX_DMA_ENT_MSK_PORT_DROP_PKT 0x0000000000000400ULL | ||
2157 | #define RX_DMA_ENT_MSK_WRED_DROP 0x0000000000000200ULL | ||
2158 | #define RX_DMA_ENT_MSK_RBR_PRE_EMTY 0x0000000000000100ULL | ||
2159 | #define RX_DMA_ENT_MSK_RCR_SHADOW_FULL 0x0000000000000080ULL | ||
2160 | #define RX_DMA_ENT_MSK_CONFIG_ERR 0x0000000000000040ULL | ||
2161 | #define RX_DMA_ENT_MSK_RCRINCON 0x0000000000000020ULL | ||
2162 | #define RX_DMA_ENT_MSK_RCRFULL 0x0000000000000010ULL | ||
2163 | #define RX_DMA_ENT_MSK_RBR_EMPTY 0x0000000000000008ULL | ||
2164 | #define RX_DMA_ENT_MSK_RBRFULL 0x0000000000000004ULL | ||
2165 | #define RX_DMA_ENT_MSK_RBRLOGPAGE 0x0000000000000002ULL | ||
2166 | #define RX_DMA_ENT_MSK_CFIGLOGPAGE 0x0000000000000001ULL | ||
2167 | #define RX_DMA_ENT_MSK_ALL 0x00000000003f7fffULL | ||
2168 | |||
2169 | #define TX_RNG_CFIG(IDX) (DMC + 0x40000UL + (IDX) * 0x200UL) | ||
2170 | #define TX_RNG_CFIG_LEN 0x1fff000000000000ULL | ||
2171 | #define TX_RNG_CFIG_LEN_SHIFT 48 | ||
2172 | #define TX_RNG_CFIG_STADDR_BASE 0x00000ffffff80000ULL | ||
2173 | #define TX_RNG_CFIG_STADDR 0x000000000007ffc0ULL | ||
2174 | |||
2175 | #define TX_RING_HDL(IDX) (DMC + 0x40010UL + (IDX) * 0x200UL) | ||
2176 | #define TX_RING_HDL_WRAP 0x0000000000080000ULL | ||
2177 | #define TX_RING_HDL_HEAD 0x000000000007fff8ULL | ||
2178 | #define TX_RING_HDL_HEAD_SHIFT 3 | ||
2179 | |||
2180 | #define TX_RING_KICK(IDX) (DMC + 0x40018UL + (IDX) * 0x200UL) | ||
2181 | #define TX_RING_KICK_WRAP 0x0000000000080000ULL | ||
2182 | #define TX_RING_KICK_TAIL 0x000000000007fff8ULL | ||
2183 | |||
2184 | #define TX_ENT_MSK(IDX) (DMC + 0x40020UL + (IDX) * 0x200UL) | ||
2185 | #define TX_ENT_MSK_MK 0x0000000000008000ULL | ||
2186 | #define TX_ENT_MSK_MBOX_ERR 0x0000000000000080ULL | ||
2187 | #define TX_ENT_MSK_PKT_SIZE_ERR 0x0000000000000040ULL | ||
2188 | #define TX_ENT_MSK_TX_RING_OFLOW 0x0000000000000020ULL | ||
2189 | #define TX_ENT_MSK_PREF_BUF_ECC_ERR 0x0000000000000010ULL | ||
2190 | #define TX_ENT_MSK_NACK_PREF 0x0000000000000008ULL | ||
2191 | #define TX_ENT_MSK_NACK_PKT_RD 0x0000000000000004ULL | ||
2192 | #define TX_ENT_MSK_CONF_PART_ERR 0x0000000000000002ULL | ||
2193 | #define TX_ENT_MSK_PKT_PRT_ERR 0x0000000000000001ULL | ||
2194 | |||
2195 | #define TX_CS(IDX) (DMC + 0x40028UL + (IDX)*0x200UL) | ||
2196 | #define TX_CS_PKT_CNT 0x0fff000000000000ULL | ||
2197 | #define TX_CS_PKT_CNT_SHIFT 48 | ||
2198 | #define TX_CS_LASTMARK 0x00000fff00000000ULL | ||
2199 | #define TX_CS_LASTMARK_SHIFT 32 | ||
2200 | #define TX_CS_RST 0x0000000080000000ULL | ||
2201 | #define TX_CS_RST_STATE 0x0000000040000000ULL | ||
2202 | #define TX_CS_MB 0x0000000020000000ULL | ||
2203 | #define TX_CS_STOP_N_GO 0x0000000010000000ULL | ||
2204 | #define TX_CS_SNG_STATE 0x0000000008000000ULL | ||
2205 | #define TX_CS_MK 0x0000000000008000ULL | ||
2206 | #define TX_CS_MMK 0x0000000000004000ULL | ||
2207 | #define TX_CS_MBOX_ERR 0x0000000000000080ULL | ||
2208 | #define TX_CS_PKT_SIZE_ERR 0x0000000000000040ULL | ||
2209 | #define TX_CS_TX_RING_OFLOW 0x0000000000000020ULL | ||
2210 | #define TX_CS_PREF_BUF_PAR_ERR 0x0000000000000010ULL | ||
2211 | #define TX_CS_NACK_PREF 0x0000000000000008ULL | ||
2212 | #define TX_CS_NACK_PKT_RD 0x0000000000000004ULL | ||
2213 | #define TX_CS_CONF_PART_ERR 0x0000000000000002ULL | ||
2214 | #define TX_CS_PKT_PRT_ERR 0x0000000000000001ULL | ||
2215 | |||
2216 | #define TXDMA_MBH(IDX) (DMC + 0x40030UL + (IDX) * 0x200UL) | ||
2217 | #define TXDMA_MBH_MBADDR 0x0000000000000fffULL | ||
2218 | |||
2219 | #define TXDMA_MBL(IDX) (DMC + 0x40038UL + (IDX) * 0x200UL) | ||
2220 | #define TXDMA_MBL_MBADDR 0x00000000ffffffc0ULL | ||
2221 | |||
2222 | #define TX_DMA_PRE_ST(IDX) (DMC + 0x40040UL + (IDX) * 0x200UL) | ||
2223 | #define TX_DMA_PRE_ST_SHADOW_HD 0x000000000007ffffULL | ||
2224 | |||
2225 | #define TX_RNG_ERR_LOGH(IDX) (DMC + 0x40048UL + (IDX) * 0x200UL) | ||
2226 | #define TX_RNG_ERR_LOGH_ERR 0x0000000080000000ULL | ||
2227 | #define TX_RNG_ERR_LOGH_MERR 0x0000000040000000ULL | ||
2228 | #define TX_RNG_ERR_LOGH_ERRCODE 0x0000000038000000ULL | ||
2229 | #define TX_RNG_ERR_LOGH_ERRADDR 0x0000000000000fffULL | ||
2230 | |||
2231 | #define TX_RNG_ERR_LOGL(IDX) (DMC + 0x40050UL + (IDX) * 0x200UL) | ||
2232 | #define TX_RNG_ERR_LOGL_ERRADDR 0x00000000ffffffffULL | ||
2233 | |||
2234 | #define TDMC_INTR_DBG(IDX) (DMC + 0x40060UL + (IDX) * 0x200UL) | ||
2235 | #define TDMC_INTR_DBG_MK 0x0000000000008000ULL | ||
2236 | #define TDMC_INTR_DBG_MBOX_ERR 0x0000000000000080ULL | ||
2237 | #define TDMC_INTR_DBG_PKT_SIZE_ERR 0x0000000000000040ULL | ||
2238 | #define TDMC_INTR_DBG_TX_RING_OFLOW 0x0000000000000020ULL | ||
2239 | #define TDMC_INTR_DBG_PREF_BUF_PAR_ERR 0x0000000000000010ULL | ||
2240 | #define TDMC_INTR_DBG_NACK_PREF 0x0000000000000008ULL | ||
2241 | #define TDMC_INTR_DBG_NACK_PKT_RD 0x0000000000000004ULL | ||
2242 | #define TDMC_INTR_DBG_CONF_PART_ERR 0x0000000000000002ULL | ||
2243 | #define TDMC_INTR_DBG_PKT_PART_ERR 0x0000000000000001ULL | ||
2244 | |||
2245 | #define TX_CS_DBG(IDX) (DMC + 0x40068UL + (IDX) * 0x200UL) | ||
2246 | #define TX_CS_DBG_PKT_CNT 0x0fff000000000000ULL | ||
2247 | |||
2248 | #define TDMC_INJ_PAR_ERR(IDX) (DMC + 0x45040UL + (IDX) * 0x200UL) | ||
2249 | #define TDMC_INJ_PAR_ERR_VAL 0x000000000000ffffULL | ||
2250 | |||
2251 | #define TDMC_DBG_SEL(IDX) (DMC + 0x45080UL + (IDX) * 0x200UL) | ||
2252 | #define TDMC_DBG_SEL_DBG_SEL 0x000000000000003fULL | ||
2253 | |||
2254 | #define TDMC_TRAINING_VECTOR(IDX) (DMC + 0x45088UL + (IDX) * 0x200UL) | ||
2255 | #define TDMC_TRAINING_VECTOR_VEC 0x00000000ffffffffULL | ||
2256 | |||
2257 | #define TXC_DMA_MAX(CHAN) (FZC_TXC + 0x00000UL + (CHAN)*0x1000UL) | ||
2258 | #define TXC_DMA_MAX_LEN(CHAN) (FZC_TXC + 0x00008UL + (CHAN)*0x1000UL) | ||
2259 | |||
2260 | #define TXC_CONTROL (FZC_TXC + 0x20000UL) | ||
2261 | #define TXC_CONTROL_ENABLE 0x0000000000000010ULL | ||
2262 | #define TXC_CONTROL_PORT_ENABLE(X) (1 << (X)) | ||
2263 | |||
2264 | #define TXC_TRAINING_VEC (FZC_TXC + 0x20008UL) | ||
2265 | #define TXC_TRAINING_VEC_MASK 0x00000000ffffffffULL | ||
2266 | |||
2267 | #define TXC_DEBUG (FZC_TXC + 0x20010UL) | ||
2268 | #define TXC_DEBUG_SELECT 0x000000000000003fULL | ||
2269 | |||
2270 | #define TXC_MAX_REORDER (FZC_TXC + 0x20018UL) | ||
2271 | #define TXC_MAX_REORDER_PORT3 0x000000000f000000ULL | ||
2272 | #define TXC_MAX_REORDER_PORT2 0x00000000000f0000ULL | ||
2273 | #define TXC_MAX_REORDER_PORT1 0x0000000000000f00ULL | ||
2274 | #define TXC_MAX_REORDER_PORT0 0x000000000000000fULL | ||
2275 | |||
2276 | #define TXC_PORT_CTL(PORT) (FZC_TXC + 0x20020UL + (PORT)*0x100UL) | ||
2277 | #define TXC_PORT_CTL_CLR_ALL_STAT 0x0000000000000001ULL | ||
2278 | |||
2279 | #define TXC_PKT_STUFFED(PORT) (FZC_TXC + 0x20030UL + (PORT)*0x100UL) | ||
2280 | #define TXC_PKT_STUFFED_PP_REORDER 0x00000000ffff0000ULL | ||
2281 | #define TXC_PKT_STUFFED_PP_PACKETASSY 0x000000000000ffffULL | ||
2282 | |||
2283 | #define TXC_PKT_XMIT(PORT) (FZC_TXC + 0x20038UL + (PORT)*0x100UL) | ||
2284 | #define TXC_PKT_XMIT_BYTES 0x00000000ffff0000ULL | ||
2285 | #define TXC_PKT_XMIT_PKTS 0x000000000000ffffULL | ||
2286 | |||
2287 | #define TXC_ROECC_CTL(PORT) (FZC_TXC + 0x20040UL + (PORT)*0x100UL) | ||
2288 | #define TXC_ROECC_CTL_DISABLE_UE 0x0000000080000000ULL | ||
2289 | #define TXC_ROECC_CTL_DBL_BIT_ERR 0x0000000000020000ULL | ||
2290 | #define TXC_ROECC_CTL_SNGL_BIT_ERR 0x0000000000010000ULL | ||
2291 | #define TXC_ROECC_CTL_ALL_PKTS 0x0000000000000400ULL | ||
2292 | #define TXC_ROECC_CTL_ALT_PKTS 0x0000000000000200ULL | ||
2293 | #define TXC_ROECC_CTL_ONE_PKT_ONLY 0x0000000000000100ULL | ||
2294 | #define TXC_ROECC_CTL_LST_PKT_LINE 0x0000000000000004ULL | ||
2295 | #define TXC_ROECC_CTL_2ND_PKT_LINE 0x0000000000000002ULL | ||
2296 | #define TXC_ROECC_CTL_1ST_PKT_LINE 0x0000000000000001ULL | ||
2297 | |||
2298 | #define TXC_ROECC_ST(PORT) (FZC_TXC + 0x20048UL + (PORT)*0x100UL) | ||
2299 | #define TXC_ROECC_CLR_ST 0x0000000080000000ULL | ||
2300 | #define TXC_ROECC_CE 0x0000000000020000ULL | ||
2301 | #define TXC_ROECC_UE 0x0000000000010000ULL | ||
2302 | #define TXC_ROECC_ST_ECC_ADDR 0x00000000000003ffULL | ||
2303 | |||
2304 | #define TXC_RO_DATA0(PORT) (FZC_TXC + 0x20050UL + (PORT)*0x100UL) | ||
2305 | #define TXC_RO_DATA0_DATA0 0x00000000ffffffffULL /* bits 31:0 */ | ||
2306 | |||
2307 | #define TXC_RO_DATA1(PORT) (FZC_TXC + 0x20058UL + (PORT)*0x100UL) | ||
2308 | #define TXC_RO_DATA1_DATA1 0x00000000ffffffffULL /* bits 63:32 */ | ||
2309 | |||
2310 | #define TXC_RO_DATA2(PORT) (FZC_TXC + 0x20060UL + (PORT)*0x100UL) | ||
2311 | #define TXC_RO_DATA2_DATA2 0x00000000ffffffffULL /* bits 95:64 */ | ||
2312 | |||
2313 | #define TXC_RO_DATA3(PORT) (FZC_TXC + 0x20068UL + (PORT)*0x100UL) | ||
2314 | #define TXC_RO_DATA3_DATA3 0x00000000ffffffffULL /* bits 127:96 */ | ||
2315 | |||
2316 | #define TXC_RO_DATA4(PORT) (FZC_TXC + 0x20070UL + (PORT)*0x100UL) | ||
2317 | #define TXC_RO_DATA4_DATA4 0x0000000000ffffffULL /* bits 151:128 */ | ||
2318 | |||
2319 | #define TXC_SFECC_CTL(PORT) (FZC_TXC + 0x20078UL + (PORT)*0x100UL) | ||
2320 | #define TXC_SFECC_CTL_DISABLE_UE 0x0000000080000000ULL | ||
2321 | #define TXC_SFECC_CTL_DBL_BIT_ERR 0x0000000000020000ULL | ||
2322 | #define TXC_SFECC_CTL_SNGL_BIT_ERR 0x0000000000010000ULL | ||
2323 | #define TXC_SFECC_CTL_ALL_PKTS 0x0000000000000400ULL | ||
2324 | #define TXC_SFECC_CTL_ALT_PKTS 0x0000000000000200ULL | ||
2325 | #define TXC_SFECC_CTL_ONE_PKT_ONLY 0x0000000000000100ULL | ||
2326 | #define TXC_SFECC_CTL_LST_PKT_LINE 0x0000000000000004ULL | ||
2327 | #define TXC_SFECC_CTL_2ND_PKT_LINE 0x0000000000000002ULL | ||
2328 | #define TXC_SFECC_CTL_1ST_PKT_LINE 0x0000000000000001ULL | ||
2329 | |||
2330 | #define TXC_SFECC_ST(PORT) (FZC_TXC + 0x20080UL + (PORT)*0x100UL) | ||
2331 | #define TXC_SFECC_ST_CLR_ST 0x0000000080000000ULL | ||
2332 | #define TXC_SFECC_ST_CE 0x0000000000020000ULL | ||
2333 | #define TXC_SFECC_ST_UE 0x0000000000010000ULL | ||
2334 | #define TXC_SFECC_ST_ECC_ADDR 0x00000000000003ffULL | ||
2335 | |||
2336 | #define TXC_SF_DATA0(PORT) (FZC_TXC + 0x20088UL + (PORT)*0x100UL) | ||
2337 | #define TXC_SF_DATA0_DATA0 0x00000000ffffffffULL /* bits 31:0 */ | ||
2338 | |||
2339 | #define TXC_SF_DATA1(PORT) (FZC_TXC + 0x20090UL + (PORT)*0x100UL) | ||
2340 | #define TXC_SF_DATA1_DATA1 0x00000000ffffffffULL /* bits 63:32 */ | ||
2341 | |||
2342 | #define TXC_SF_DATA2(PORT) (FZC_TXC + 0x20098UL + (PORT)*0x100UL) | ||
2343 | #define TXC_SF_DATA2_DATA2 0x00000000ffffffffULL /* bits 95:64 */ | ||
2344 | |||
2345 | #define TXC_SF_DATA3(PORT) (FZC_TXC + 0x200a0UL + (PORT)*0x100UL) | ||
2346 | #define TXC_SF_DATA3_DATA3 0x00000000ffffffffULL /* bits 127:96 */ | ||
2347 | |||
2348 | #define TXC_SF_DATA4(PORT) (FZC_TXC + 0x200a8UL + (PORT)*0x100UL) | ||
2349 | #define TXC_SF_DATA4_DATA4 0x0000000000ffffffULL /* bits 151:128 */ | ||
2350 | |||
2351 | #define TXC_RO_TIDS(PORT) (FZC_TXC + 0x200b0UL + (PORT)*0x100UL) | ||
2352 | #define TXC_RO_TIDS_IN_USE 0x00000000ffffffffULL | ||
2353 | |||
2354 | #define TXC_RO_STATE0(PORT) (FZC_TXC + 0x200b8UL + (PORT)*0x100UL) | ||
2355 | #define TXC_RO_STATE0_DUPLICATE_TID 0x00000000ffffffffULL | ||
2356 | |||
2357 | #define TXC_RO_STATE1(PORT) (FZC_TXC + 0x200c0UL + (PORT)*0x100UL) | ||
2358 | #define TXC_RO_STATE1_UNUSED_TID 0x00000000ffffffffULL | ||
2359 | |||
2360 | #define TXC_RO_STATE2(PORT) (FZC_TXC + 0x200c8UL + (PORT)*0x100UL) | ||
2361 | #define TXC_RO_STATE2_TRANS_TIMEOUT 0x00000000ffffffffULL | ||
2362 | |||
2363 | #define TXC_RO_STATE3(PORT) (FZC_TXC + 0x200d0UL + (PORT)*0x100UL) | ||
2364 | #define TXC_RO_STATE3_ENAB_SPC_WMARK 0x0000000080000000ULL | ||
2365 | #define TXC_RO_STATE3_RO_SPC_WMARK 0x000000007fe00000ULL | ||
2366 | #define TXC_RO_STATE3_ROFIFO_SPC_AVAIL 0x00000000001ff800ULL | ||
2367 | #define TXC_RO_STATE3_ENAB_RO_WMARK 0x0000000000000100ULL | ||
2368 | #define TXC_RO_STATE3_HIGH_RO_USED 0x00000000000000f0ULL | ||
2369 | #define TXC_RO_STATE3_NUM_RO_USED 0x000000000000000fULL | ||
2370 | |||
2371 | #define TXC_RO_CTL(PORT) (FZC_TXC + 0x200d8UL + (PORT)*0x100UL) | ||
2372 | #define TXC_RO_CTL_CLR_FAIL_STATE 0x0000000080000000ULL | ||
2373 | #define TXC_RO_CTL_RO_ADDR 0x000000000f000000ULL | ||
2374 | #define TXC_RO_CTL_ADDR_FAILED 0x0000000000400000ULL | ||
2375 | #define TXC_RO_CTL_DMA_FAILED 0x0000000000200000ULL | ||
2376 | #define TXC_RO_CTL_LEN_FAILED 0x0000000000100000ULL | ||
2377 | #define TXC_RO_CTL_CAPT_ADDR_FAILED 0x0000000000040000ULL | ||
2378 | #define TXC_RO_CTL_CAPT_DMA_FAILED 0x0000000000020000ULL | ||
2379 | #define TXC_RO_CTL_CAPT_LEN_FAILED 0x0000000000010000ULL | ||
2380 | #define TXC_RO_CTL_RO_STATE_RD_DONE 0x0000000000000080ULL | ||
2381 | #define TXC_RO_CTL_RO_STATE_WR_DONE 0x0000000000000040ULL | ||
2382 | #define TXC_RO_CTL_RO_STATE_RD 0x0000000000000020ULL | ||
2383 | #define TXC_RO_CTL_RO_STATE_WR 0x0000000000000010ULL | ||
2384 | #define TXC_RO_CTL_RO_STATE_ADDR 0x000000000000000fULL | ||
2385 | |||
2386 | #define TXC_RO_ST_DATA0(PORT) (FZC_TXC + 0x200e0UL + (PORT)*0x100UL) | ||
2387 | #define TXC_RO_ST_DATA0_DATA0 0x00000000ffffffffULL | ||
2388 | |||
2389 | #define TXC_RO_ST_DATA1(PORT) (FZC_TXC + 0x200e8UL + (PORT)*0x100UL) | ||
2390 | #define TXC_RO_ST_DATA1_DATA1 0x00000000ffffffffULL | ||
2391 | |||
2392 | #define TXC_RO_ST_DATA2(PORT) (FZC_TXC + 0x200f0UL + (PORT)*0x100UL) | ||
2393 | #define TXC_RO_ST_DATA2_DATA2 0x00000000ffffffffULL | ||
2394 | |||
2395 | #define TXC_RO_ST_DATA3(PORT) (FZC_TXC + 0x200f8UL + (PORT)*0x100UL) | ||
2396 | #define TXC_RO_ST_DATA3_DATA3 0x00000000ffffffffULL | ||
2397 | |||
2398 | #define TXC_PORT_PACKET_REQ(PORT) (FZC_TXC + 0x20100UL + (PORT)*0x100UL) | ||
2399 | #define TXC_PORT_PACKET_REQ_GATHER_REQ 0x00000000f0000000ULL | ||
2400 | #define TXC_PORT_PACKET_REQ_PKT_REQ 0x000000000fff0000ULL | ||
2401 | #define TXC_PORT_PACKET_REQ_PERR_ABRT 0x000000000000ffffULL | ||
2402 | |||
2403 | /* bits are same as TXC_INT_STAT */ | ||
2404 | #define TXC_INT_STAT_DBG (FZC_TXC + 0x20420UL) | ||
2405 | |||
2406 | #define TXC_INT_STAT (FZC_TXC + 0x20428UL) | ||
2407 | #define TXC_INT_STAT_VAL_SHIFT(PORT) ((PORT) * 8) | ||
2408 | #define TXC_INT_STAT_VAL(PORT) (0x3f << TXC_INT_STAT_VAL_SHIFT(PORT)) | ||
2409 | #define TXC_INT_STAT_SF_CE(PORT) (0x01 << TXC_INT_STAT_VAL_SHIFT(PORT)) | ||
2410 | #define TXC_INT_STAT_SF_UE(PORT) (0x02 << TXC_INT_STAT_VAL_SHIFT(PORT)) | ||
2411 | #define TXC_INT_STAT_RO_CE(PORT) (0x04 << TXC_INT_STAT_VAL_SHIFT(PORT)) | ||
2412 | #define TXC_INT_STAT_RO_UE(PORT) (0x08 << TXC_INT_STAT_VAL_SHIFT(PORT)) | ||
2413 | #define TXC_INT_STAT_REORDER_ERR(PORT) (0x10 << TXC_INT_STAT_VAL_SHIFT(PORT)) | ||
2414 | #define TXC_INT_STAT_PKTASM_DEAD(PORT) (0x20 << TXC_INT_STAT_VAL_SHIFT(PORT)) | ||
2415 | |||
2416 | #define TXC_INT_MASK (FZC_TXC + 0x20430UL) | ||
2417 | #define TXC_INT_MASK_VAL_SHIFT(PORT) ((PORT) * 8) | ||
2418 | #define TXC_INT_MASK_VAL(PORT) (0x3f << TXC_INT_STAT_VAL_SHIFT(PORT)) | ||
2419 | |||
2420 | #define TXC_INT_MASK_SF_CE 0x01 | ||
2421 | #define TXC_INT_MASK_SF_UE 0x02 | ||
2422 | #define TXC_INT_MASK_RO_CE 0x04 | ||
2423 | #define TXC_INT_MASK_RO_UE 0x08 | ||
2424 | #define TXC_INT_MASK_REORDER_ERR 0x10 | ||
2425 | #define TXC_INT_MASK_PKTASM_DEAD 0x20 | ||
2426 | #define TXC_INT_MASK_ALL 0x3f | ||
2427 | |||
2428 | #define TXC_PORT_DMA(IDX) (FZC_TXC + 0x20028UL + (IDX)*0x100UL) | ||
2429 | |||
2430 | #define ESPC_PIO_EN (FZC_PROM + 0x40000UL) | ||
2431 | #define ESPC_PIO_EN_ENABLE 0x0000000000000001ULL | ||
2432 | |||
2433 | #define ESPC_PIO_STAT (FZC_PROM + 0x40008UL) | ||
2434 | #define ESPC_PIO_STAT_READ_START 0x0000000080000000ULL | ||
2435 | #define ESPC_PIO_STAT_READ_END 0x0000000040000000ULL | ||
2436 | #define ESPC_PIO_STAT_WRITE_INIT 0x0000000020000000ULL | ||
2437 | #define ESPC_PIO_STAT_WRITE_END 0x0000000010000000ULL | ||
2438 | #define ESPC_PIO_STAT_ADDR 0x0000000003ffff00ULL | ||
2439 | #define ESPC_PIO_STAT_ADDR_SHIFT 8 | ||
2440 | #define ESPC_PIO_STAT_DATA 0x00000000000000ffULL | ||
2441 | #define ESPC_PIO_STAT_DATA_SHIFT 0 | ||
2442 | |||
2443 | #define ESPC_NCR(IDX) (FZC_PROM + 0x40020UL + (IDX)*0x8UL) | ||
2444 | #define ESPC_NCR_VAL 0x00000000ffffffffULL | ||
2445 | |||
2446 | #define ESPC_MAC_ADDR0 ESPC_NCR(0) | ||
2447 | #define ESPC_MAC_ADDR1 ESPC_NCR(1) | ||
2448 | #define ESPC_NUM_PORTS_MACS ESPC_NCR(2) | ||
2449 | #define ESPC_NUM_PORTS_MACS_VAL 0x00000000000000ffULL | ||
2450 | #define ESPC_MOD_STR_LEN ESPC_NCR(4) | ||
2451 | #define ESPC_MOD_STR_1 ESPC_NCR(5) | ||
2452 | #define ESPC_MOD_STR_2 ESPC_NCR(6) | ||
2453 | #define ESPC_MOD_STR_3 ESPC_NCR(7) | ||
2454 | #define ESPC_MOD_STR_4 ESPC_NCR(8) | ||
2455 | #define ESPC_MOD_STR_5 ESPC_NCR(9) | ||
2456 | #define ESPC_MOD_STR_6 ESPC_NCR(10) | ||
2457 | #define ESPC_MOD_STR_7 ESPC_NCR(11) | ||
2458 | #define ESPC_MOD_STR_8 ESPC_NCR(12) | ||
2459 | #define ESPC_BD_MOD_STR_LEN ESPC_NCR(13) | ||
2460 | #define ESPC_BD_MOD_STR_1 ESPC_NCR(14) | ||
2461 | #define ESPC_BD_MOD_STR_2 ESPC_NCR(15) | ||
2462 | #define ESPC_BD_MOD_STR_3 ESPC_NCR(16) | ||
2463 | #define ESPC_BD_MOD_STR_4 ESPC_NCR(17) | ||
2464 | |||
2465 | #define ESPC_PHY_TYPE ESPC_NCR(18) | ||
2466 | #define ESPC_PHY_TYPE_PORT0 0x00000000ff000000ULL | ||
2467 | #define ESPC_PHY_TYPE_PORT0_SHIFT 24 | ||
2468 | #define ESPC_PHY_TYPE_PORT1 0x0000000000ff0000ULL | ||
2469 | #define ESPC_PHY_TYPE_PORT1_SHIFT 16 | ||
2470 | #define ESPC_PHY_TYPE_PORT2 0x000000000000ff00ULL | ||
2471 | #define ESPC_PHY_TYPE_PORT2_SHIFT 8 | ||
2472 | #define ESPC_PHY_TYPE_PORT3 0x00000000000000ffULL | ||
2473 | #define ESPC_PHY_TYPE_PORT3_SHIFT 0 | ||
2474 | |||
2475 | #define ESPC_PHY_TYPE_1G_COPPER 3 | ||
2476 | #define ESPC_PHY_TYPE_1G_FIBER 2 | ||
2477 | #define ESPC_PHY_TYPE_10G_COPPER 1 | ||
2478 | #define ESPC_PHY_TYPE_10G_FIBER 0 | ||
2479 | |||
2480 | #define ESPC_MAX_FM_SZ ESPC_NCR(19) | ||
2481 | |||
2482 | #define ESPC_INTR_NUM ESPC_NCR(20) | ||
2483 | #define ESPC_INTR_NUM_PORT0 0x00000000ff000000ULL | ||
2484 | #define ESPC_INTR_NUM_PORT1 0x0000000000ff0000ULL | ||
2485 | #define ESPC_INTR_NUM_PORT2 0x000000000000ff00ULL | ||
2486 | #define ESPC_INTR_NUM_PORT3 0x00000000000000ffULL | ||
2487 | |||
2488 | #define ESPC_VER_IMGSZ ESPC_NCR(21) | ||
2489 | #define ESPC_VER_IMGSZ_IMGSZ 0x00000000ffff0000ULL | ||
2490 | #define ESPC_VER_IMGSZ_IMGSZ_SHIFT 16 | ||
2491 | #define ESPC_VER_IMGSZ_VER 0x000000000000ffffULL | ||
2492 | #define ESPC_VER_IMGSZ_VER_SHIFT 0 | ||
2493 | |||
2494 | #define ESPC_CHKSUM ESPC_NCR(22) | ||
2495 | #define ESPC_CHKSUM_SUM 0x00000000000000ffULL | ||
2496 | |||
2497 | #define ESPC_EEPROM_SIZE 0x100000 | ||
2498 | |||
2499 | #define CLASS_CODE_UNRECOG 0x00 | ||
2500 | #define CLASS_CODE_DUMMY1 0x01 | ||
2501 | #define CLASS_CODE_ETHERTYPE1 0x02 | ||
2502 | #define CLASS_CODE_ETHERTYPE2 0x03 | ||
2503 | #define CLASS_CODE_USER_PROG1 0x04 | ||
2504 | #define CLASS_CODE_USER_PROG2 0x05 | ||
2505 | #define CLASS_CODE_USER_PROG3 0x06 | ||
2506 | #define CLASS_CODE_USER_PROG4 0x07 | ||
2507 | #define CLASS_CODE_TCP_IPV4 0x08 | ||
2508 | #define CLASS_CODE_UDP_IPV4 0x09 | ||
2509 | #define CLASS_CODE_AH_ESP_IPV4 0x0a | ||
2510 | #define CLASS_CODE_SCTP_IPV4 0x0b | ||
2511 | #define CLASS_CODE_TCP_IPV6 0x0c | ||
2512 | #define CLASS_CODE_UDP_IPV6 0x0d | ||
2513 | #define CLASS_CODE_AH_ESP_IPV6 0x0e | ||
2514 | #define CLASS_CODE_SCTP_IPV6 0x0f | ||
2515 | #define CLASS_CODE_ARP 0x10 | ||
2516 | #define CLASS_CODE_RARP 0x11 | ||
2517 | #define CLASS_CODE_DUMMY2 0x12 | ||
2518 | #define CLASS_CODE_DUMMY3 0x13 | ||
2519 | #define CLASS_CODE_DUMMY4 0x14 | ||
2520 | #define CLASS_CODE_DUMMY5 0x15 | ||
2521 | #define CLASS_CODE_DUMMY6 0x16 | ||
2522 | #define CLASS_CODE_DUMMY7 0x17 | ||
2523 | #define CLASS_CODE_DUMMY8 0x18 | ||
2524 | #define CLASS_CODE_DUMMY9 0x19 | ||
2525 | #define CLASS_CODE_DUMMY10 0x1a | ||
2526 | #define CLASS_CODE_DUMMY11 0x1b | ||
2527 | #define CLASS_CODE_DUMMY12 0x1c | ||
2528 | #define CLASS_CODE_DUMMY13 0x1d | ||
2529 | #define CLASS_CODE_DUMMY14 0x1e | ||
2530 | #define CLASS_CODE_DUMMY15 0x1f | ||
2531 | |||
2532 | /* Logical devices and device groups */ | ||
2533 | #define LDN_RXDMA(CHAN) (0 + (CHAN)) | ||
2534 | #define LDN_RESV1(OFF) (16 + (OFF)) | ||
2535 | #define LDN_TXDMA(CHAN) (32 + (CHAN)) | ||
2536 | #define LDN_RESV2(OFF) (56 + (OFF)) | ||
2537 | #define LDN_MIF 63 | ||
2538 | #define LDN_MAC(PORT) (64 + (PORT)) | ||
2539 | #define LDN_DEVICE_ERROR 68 | ||
2540 | #define LDN_MAX LDN_DEVICE_ERROR | ||
2541 | |||
2542 | #define NIU_LDG_MIN 0 | ||
2543 | #define NIU_LDG_MAX 63 | ||
2544 | #define NIU_NUM_LDG 64 | ||
2545 | #define LDG_INVALID 0xff | ||
2546 | |||
2547 | /* PHY stuff */ | ||
2548 | #define NIU_PMA_PMD_DEV_ADDR 1 | ||
2549 | #define NIU_PCS_DEV_ADDR 3 | ||
2550 | |||
2551 | #define NIU_PHY_ID_MASK 0xfffff0f0 | ||
2552 | #define NIU_PHY_ID_BCM8704 0x00206030 | ||
2553 | #define NIU_PHY_ID_BCM8706 0x00206035 | ||
2554 | #define NIU_PHY_ID_BCM5464R 0x002060b0 | ||
2555 | #define NIU_PHY_ID_MRVL88X2011 0x01410020 | ||
2556 | |||
2557 | /* MRVL88X2011 register addresses */ | ||
2558 | #define MRVL88X2011_USER_DEV1_ADDR 1 | ||
2559 | #define MRVL88X2011_USER_DEV2_ADDR 2 | ||
2560 | #define MRVL88X2011_USER_DEV3_ADDR 3 | ||
2561 | #define MRVL88X2011_USER_DEV4_ADDR 4 | ||
2562 | #define MRVL88X2011_PMA_PMD_CTL_1 0x0000 | ||
2563 | #define MRVL88X2011_PMA_PMD_STATUS_1 0x0001 | ||
2564 | #define MRVL88X2011_10G_PMD_STATUS_2 0x0008 | ||
2565 | #define MRVL88X2011_10G_PMD_TX_DIS 0x0009 | ||
2566 | #define MRVL88X2011_10G_XGXS_LANE_STAT 0x0018 | ||
2567 | #define MRVL88X2011_GENERAL_CTL 0x8300 | ||
2568 | #define MRVL88X2011_LED_BLINK_CTL 0x8303 | ||
2569 | #define MRVL88X2011_LED_8_TO_11_CTL 0x8306 | ||
2570 | |||
2571 | /* MRVL88X2011 register control */ | ||
2572 | #define MRVL88X2011_ENA_XFPREFCLK 0x0001 | ||
2573 | #define MRVL88X2011_ENA_PMDTX 0x0000 | ||
2574 | #define MRVL88X2011_LOOPBACK 0x1 | ||
2575 | #define MRVL88X2011_LED_ACT 0x1 | ||
2576 | #define MRVL88X2011_LNK_STATUS_OK 0x4 | ||
2577 | #define MRVL88X2011_LED_BLKRATE_MASK 0x70 | ||
2578 | #define MRVL88X2011_LED_BLKRATE_034MS 0x0 | ||
2579 | #define MRVL88X2011_LED_BLKRATE_067MS 0x1 | ||
2580 | #define MRVL88X2011_LED_BLKRATE_134MS 0x2 | ||
2581 | #define MRVL88X2011_LED_BLKRATE_269MS 0x3 | ||
2582 | #define MRVL88X2011_LED_BLKRATE_538MS 0x4 | ||
2583 | #define MRVL88X2011_LED_CTL_OFF 0x0 | ||
2584 | #define MRVL88X2011_LED_CTL_PCS_ACT 0x5 | ||
2585 | #define MRVL88X2011_LED_CTL_MASK 0x7 | ||
2586 | #define MRVL88X2011_LED(n,v) ((v)<<((n)*4)) | ||
2587 | #define MRVL88X2011_LED_STAT(n,v) ((v)>>((n)*4)) | ||
2588 | |||
2589 | #define BCM8704_PMA_PMD_DEV_ADDR 1 | ||
2590 | #define BCM8704_PCS_DEV_ADDR 2 | ||
2591 | #define BCM8704_USER_DEV3_ADDR 3 | ||
2592 | #define BCM8704_PHYXS_DEV_ADDR 4 | ||
2593 | #define BCM8704_USER_DEV4_ADDR 4 | ||
2594 | |||
2595 | #define BCM8704_PMD_RCV_SIGDET 0x000a | ||
2596 | #define PMD_RCV_SIGDET_LANE3 0x0010 | ||
2597 | #define PMD_RCV_SIGDET_LANE2 0x0008 | ||
2598 | #define PMD_RCV_SIGDET_LANE1 0x0004 | ||
2599 | #define PMD_RCV_SIGDET_LANE0 0x0002 | ||
2600 | #define PMD_RCV_SIGDET_GLOBAL 0x0001 | ||
2601 | |||
2602 | #define BCM8704_PCS_10G_R_STATUS 0x0020 | ||
2603 | #define PCS_10G_R_STATUS_LINKSTAT 0x1000 | ||
2604 | #define PCS_10G_R_STATUS_PRBS31_ABLE 0x0004 | ||
2605 | #define PCS_10G_R_STATUS_HI_BER 0x0002 | ||
2606 | #define PCS_10G_R_STATUS_BLK_LOCK 0x0001 | ||
2607 | |||
2608 | #define BCM8704_USER_CONTROL 0xc800 | ||
2609 | #define USER_CONTROL_OPTXENB_LVL 0x8000 | ||
2610 | #define USER_CONTROL_OPTXRST_LVL 0x4000 | ||
2611 | #define USER_CONTROL_OPBIASFLT_LVL 0x2000 | ||
2612 | #define USER_CONTROL_OBTMPFLT_LVL 0x1000 | ||
2613 | #define USER_CONTROL_OPPRFLT_LVL 0x0800 | ||
2614 | #define USER_CONTROL_OPTXFLT_LVL 0x0400 | ||
2615 | #define USER_CONTROL_OPRXLOS_LVL 0x0200 | ||
2616 | #define USER_CONTROL_OPRXFLT_LVL 0x0100 | ||
2617 | #define USER_CONTROL_OPTXON_LVL 0x0080 | ||
2618 | #define USER_CONTROL_RES1 0x007f | ||
2619 | #define USER_CONTROL_RES1_SHIFT 0 | ||
2620 | |||
2621 | #define BCM8704_USER_ANALOG_CLK 0xc801 | ||
2622 | #define BCM8704_USER_PMD_RX_CONTROL 0xc802 | ||
2623 | |||
2624 | #define BCM8704_USER_PMD_TX_CONTROL 0xc803 | ||
2625 | #define USER_PMD_TX_CTL_RES1 0xfe00 | ||
2626 | #define USER_PMD_TX_CTL_XFP_CLKEN 0x0100 | ||
2627 | #define USER_PMD_TX_CTL_TX_DAC_TXD 0x00c0 | ||
2628 | #define USER_PMD_TX_CTL_TX_DAC_TXD_SH 6 | ||
2629 | #define USER_PMD_TX_CTL_TX_DAC_TXCK 0x0030 | ||
2630 | #define USER_PMD_TX_CTL_TX_DAC_TXCK_SH 4 | ||
2631 | #define USER_PMD_TX_CTL_TSD_LPWREN 0x0008 | ||
2632 | #define USER_PMD_TX_CTL_TSCK_LPWREN 0x0004 | ||
2633 | #define USER_PMD_TX_CTL_CMU_LPWREN 0x0002 | ||
2634 | #define USER_PMD_TX_CTL_SFIFORST 0x0001 | ||
2635 | |||
2636 | #define BCM8704_USER_ANALOG_STATUS0 0xc804 | ||
2637 | #define BCM8704_USER_OPT_DIGITAL_CTRL 0xc808 | ||
2638 | #define BCM8704_USER_TX_ALARM_STATUS 0x9004 | ||
2639 | |||
2640 | #define USER_ODIG_CTRL_FMODE 0x8000 | ||
2641 | #define USER_ODIG_CTRL_TX_PDOWN 0x4000 | ||
2642 | #define USER_ODIG_CTRL_RX_PDOWN 0x2000 | ||
2643 | #define USER_ODIG_CTRL_EFILT_EN 0x1000 | ||
2644 | #define USER_ODIG_CTRL_OPT_RST 0x0800 | ||
2645 | #define USER_ODIG_CTRL_PCS_TIB 0x0400 | ||
2646 | #define USER_ODIG_CTRL_PCS_RI 0x0200 | ||
2647 | #define USER_ODIG_CTRL_RESV1 0x0180 | ||
2648 | #define USER_ODIG_CTRL_GPIOS 0x0060 | ||
2649 | #define USER_ODIG_CTRL_GPIOS_SHIFT 5 | ||
2650 | #define USER_ODIG_CTRL_RESV2 0x0010 | ||
2651 | #define USER_ODIG_CTRL_LB_ERR_DIS 0x0008 | ||
2652 | #define USER_ODIG_CTRL_RESV3 0x0006 | ||
2653 | #define USER_ODIG_CTRL_TXONOFF_PD_DIS 0x0001 | ||
2654 | |||
2655 | #define BCM8704_PHYXS_XGXS_LANE_STAT 0x0018 | ||
2656 | #define PHYXS_XGXS_LANE_STAT_ALINGED 0x1000 | ||
2657 | #define PHYXS_XGXS_LANE_STAT_PATTEST 0x0800 | ||
2658 | #define PHYXS_XGXS_LANE_STAT_MAGIC 0x0400 | ||
2659 | #define PHYXS_XGXS_LANE_STAT_LANE3 0x0008 | ||
2660 | #define PHYXS_XGXS_LANE_STAT_LANE2 0x0004 | ||
2661 | #define PHYXS_XGXS_LANE_STAT_LANE1 0x0002 | ||
2662 | #define PHYXS_XGXS_LANE_STAT_LANE0 0x0001 | ||
2663 | |||
2664 | #define BCM5464R_AUX_CTL 24 | ||
2665 | #define BCM5464R_AUX_CTL_EXT_LB 0x8000 | ||
2666 | #define BCM5464R_AUX_CTL_EXT_PLEN 0x4000 | ||
2667 | #define BCM5464R_AUX_CTL_ER1000 0x3000 | ||
2668 | #define BCM5464R_AUX_CTL_ER1000_SHIFT 12 | ||
2669 | #define BCM5464R_AUX_CTL_RESV1 0x0800 | ||
2670 | #define BCM5464R_AUX_CTL_WRITE_1 0x0400 | ||
2671 | #define BCM5464R_AUX_CTL_RESV2 0x0300 | ||
2672 | #define BCM5464R_AUX_CTL_PRESP_DIS 0x0080 | ||
2673 | #define BCM5464R_AUX_CTL_RESV3 0x0040 | ||
2674 | #define BCM5464R_AUX_CTL_ER100 0x0030 | ||
2675 | #define BCM5464R_AUX_CTL_ER100_SHIFT 4 | ||
2676 | #define BCM5464R_AUX_CTL_DIAG_MODE 0x0008 | ||
2677 | #define BCM5464R_AUX_CTL_SR_SEL 0x0007 | ||
2678 | #define BCM5464R_AUX_CTL_SR_SEL_SHIFT 0 | ||
2679 | |||
2680 | #define BCM5464R_CTRL1000_AS_MASTER 0x0800 | ||
2681 | #define BCM5464R_CTRL1000_ENABLE_AS_MASTER 0x1000 | ||
2682 | |||
2683 | #define RCR_ENTRY_MULTI 0x8000000000000000ULL | ||
2684 | #define RCR_ENTRY_PKT_TYPE 0x6000000000000000ULL | ||
2685 | #define RCR_ENTRY_PKT_TYPE_SHIFT 61 | ||
2686 | #define RCR_ENTRY_ZERO_COPY 0x1000000000000000ULL | ||
2687 | #define RCR_ENTRY_NOPORT 0x0800000000000000ULL | ||
2688 | #define RCR_ENTRY_PROMISC 0x0400000000000000ULL | ||
2689 | #define RCR_ENTRY_ERROR 0x0380000000000000ULL | ||
2690 | #define RCR_ENTRY_DCF_ERR 0x0040000000000000ULL | ||
2691 | #define RCR_ENTRY_L2_LEN 0x003fff0000000000ULL | ||
2692 | #define RCR_ENTRY_L2_LEN_SHIFT 40 | ||
2693 | #define RCR_ENTRY_PKTBUFSZ 0x000000c000000000ULL | ||
2694 | #define RCR_ENTRY_PKTBUFSZ_SHIFT 38 | ||
2695 | #define RCR_ENTRY_PKT_BUF_ADDR 0x0000003fffffffffULL /* bits 43:6 */ | ||
2696 | #define RCR_ENTRY_PKT_BUF_ADDR_SHIFT 6 | ||
2697 | |||
2698 | #define RCR_PKT_TYPE_OTHER 0x0 | ||
2699 | #define RCR_PKT_TYPE_TCP 0x1 | ||
2700 | #define RCR_PKT_TYPE_UDP 0x2 | ||
2701 | #define RCR_PKT_TYPE_SCTP 0x3 | ||
2702 | |||
2703 | #define NIU_RXPULL_MAX ETH_HLEN | ||
2704 | |||
2705 | struct rx_pkt_hdr0 { | ||
2706 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
2707 | u8 inputport:2, | ||
2708 | maccheck:1, | ||
2709 | class:5; | ||
2710 | u8 vlan:1, | ||
2711 | llcsnap:1, | ||
2712 | noport:1, | ||
2713 | badip:1, | ||
2714 | tcamhit:1, | ||
2715 | tres:2, | ||
2716 | tzfvld:1; | ||
2717 | #elif defined(__BIG_ENDIAN_BITFIELD) | ||
2718 | u8 class:5, | ||
2719 | maccheck:1, | ||
2720 | inputport:2; | ||
2721 | u8 tzfvld:1, | ||
2722 | tres:2, | ||
2723 | tcamhit:1, | ||
2724 | badip:1, | ||
2725 | noport:1, | ||
2726 | llcsnap:1, | ||
2727 | vlan:1; | ||
2728 | #endif | ||
2729 | }; | ||
2730 | |||
2731 | struct rx_pkt_hdr1 { | ||
2732 | u8 hwrsvd1; | ||
2733 | u8 tcammatch; | ||
2734 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
2735 | u8 hwrsvd2:2, | ||
2736 | hashit:1, | ||
2737 | exact:1, | ||
2738 | hzfvld:1, | ||
2739 | hashsidx:3; | ||
2740 | #elif defined(__BIG_ENDIAN_BITFIELD) | ||
2741 | u8 hashsidx:3, | ||
2742 | hzfvld:1, | ||
2743 | exact:1, | ||
2744 | hashit:1, | ||
2745 | hwrsvd2:2; | ||
2746 | #endif | ||
2747 | u8 zcrsvd; | ||
2748 | |||
2749 | /* Bits 11:8 of zero copy flow ID. */ | ||
2750 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
2751 | u8 hwrsvd3:4, zflowid0:4; | ||
2752 | #elif defined(__BIG_ENDIAN_BITFIELD) | ||
2753 | u8 zflowid0:4, hwrsvd3:4; | ||
2754 | #endif | ||
2755 | |||
2756 | /* Bits 7:0 of zero copy flow ID. */ | ||
2757 | u8 zflowid1; | ||
2758 | |||
2759 | /* Bits 15:8 of hash value, H2. */ | ||
2760 | u8 hashval2_0; | ||
2761 | |||
2762 | /* Bits 7:0 of hash value, H2. */ | ||
2763 | u8 hashval2_1; | ||
2764 | |||
2765 | /* Bits 19:16 of hash value, H1. */ | ||
2766 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
2767 | u8 hwrsvd4:4, hashval1_0:4; | ||
2768 | #elif defined(__BIG_ENDIAN_BITFIELD) | ||
2769 | u8 hashval1_0:4, hwrsvd4:4; | ||
2770 | #endif | ||
2771 | |||
2772 | /* Bits 15:8 of hash value, H1. */ | ||
2773 | u8 hashval1_1; | ||
2774 | |||
2775 | /* Bits 7:0 of hash value, H1. */ | ||
2776 | u8 hashval1_2; | ||
2777 | |||
2778 | u8 hwrsvd5; | ||
2779 | u8 hwrsvd6; | ||
2780 | |||
2781 | u8 usrdata_0; /* Bits 39:32 of user data. */ | ||
2782 | u8 usrdata_1; /* Bits 31:24 of user data. */ | ||
2783 | u8 usrdata_2; /* Bits 23:16 of user data. */ | ||
2784 | u8 usrdata_3; /* Bits 15:8 of user data. */ | ||
2785 | u8 usrdata_4; /* Bits 7:0 of user data. */ | ||
2786 | }; | ||
2787 | |||
2788 | struct tx_dma_mbox { | ||
2789 | u64 tx_dma_pre_st; | ||
2790 | u64 tx_cs; | ||
2791 | u64 tx_ring_kick; | ||
2792 | u64 tx_ring_hdl; | ||
2793 | u64 resv1; | ||
2794 | u32 tx_rng_err_logl; | ||
2795 | u32 tx_rng_err_logh; | ||
2796 | u64 resv2; | ||
2797 | u64 resv3; | ||
2798 | }; | ||
2799 | |||
2800 | struct tx_pkt_hdr { | ||
2801 | __le64 flags; | ||
2802 | #define TXHDR_PAD 0x0000000000000007ULL | ||
2803 | #define TXHDR_PAD_SHIFT 0 | ||
2804 | #define TXHDR_LEN 0x000000003fff0000ULL | ||
2805 | #define TXHDR_LEN_SHIFT 16 | ||
2806 | #define TXHDR_L4STUFF 0x0000003f00000000ULL | ||
2807 | #define TXHDR_L4STUFF_SHIFT 32 | ||
2808 | #define TXHDR_L4START 0x00003f0000000000ULL | ||
2809 | #define TXHDR_L4START_SHIFT 40 | ||
2810 | #define TXHDR_L3START 0x000f000000000000ULL | ||
2811 | #define TXHDR_L3START_SHIFT 48 | ||
2812 | #define TXHDR_IHL 0x00f0000000000000ULL | ||
2813 | #define TXHDR_IHL_SHIFT 52 | ||
2814 | #define TXHDR_VLAN 0x0100000000000000ULL | ||
2815 | #define TXHDR_LLC 0x0200000000000000ULL | ||
2816 | #define TXHDR_IP_VER 0x2000000000000000ULL | ||
2817 | #define TXHDR_CSUM_NONE 0x0000000000000000ULL | ||
2818 | #define TXHDR_CSUM_TCP 0x4000000000000000ULL | ||
2819 | #define TXHDR_CSUM_UDP 0x8000000000000000ULL | ||
2820 | #define TXHDR_CSUM_SCTP 0xc000000000000000ULL | ||
2821 | __le64 resv; | ||
2822 | }; | ||
2823 | |||
2824 | #define TX_DESC_SOP 0x8000000000000000ULL | ||
2825 | #define TX_DESC_MARK 0x4000000000000000ULL | ||
2826 | #define TX_DESC_NUM_PTR 0x3c00000000000000ULL | ||
2827 | #define TX_DESC_NUM_PTR_SHIFT 58 | ||
2828 | #define TX_DESC_TR_LEN 0x01fff00000000000ULL | ||
2829 | #define TX_DESC_TR_LEN_SHIFT 44 | ||
2830 | #define TX_DESC_SAD 0x00000fffffffffffULL | ||
2831 | #define TX_DESC_SAD_SHIFT 0 | ||
2832 | |||
2833 | struct tx_buff_info { | ||
2834 | struct sk_buff *skb; | ||
2835 | u64 mapping; | ||
2836 | }; | ||
2837 | |||
2838 | struct txdma_mailbox { | ||
2839 | __le64 tx_dma_pre_st; | ||
2840 | __le64 tx_cs; | ||
2841 | __le64 tx_ring_kick; | ||
2842 | __le64 tx_ring_hdl; | ||
2843 | __le64 resv1; | ||
2844 | __le32 tx_rng_err_logl; | ||
2845 | __le32 tx_rng_err_logh; | ||
2846 | __le64 resv2[2]; | ||
2847 | } __attribute__((aligned(64))); | ||
2848 | |||
2849 | #define MAX_TX_RING_SIZE 256 | ||
2850 | #define MAX_TX_DESC_LEN 4076 | ||
2851 | |||
2852 | struct tx_ring_info { | ||
2853 | struct tx_buff_info tx_buffs[MAX_TX_RING_SIZE]; | ||
2854 | struct niu *np; | ||
2855 | u64 tx_cs; | ||
2856 | int pending; | ||
2857 | int prod; | ||
2858 | int cons; | ||
2859 | int wrap_bit; | ||
2860 | u16 last_pkt_cnt; | ||
2861 | u16 tx_channel; | ||
2862 | u16 mark_counter; | ||
2863 | u16 mark_freq; | ||
2864 | u16 mark_pending; | ||
2865 | u16 __pad; | ||
2866 | struct txdma_mailbox *mbox; | ||
2867 | __le64 *descr; | ||
2868 | |||
2869 | u64 tx_packets; | ||
2870 | u64 tx_bytes; | ||
2871 | u64 tx_errors; | ||
2872 | |||
2873 | u64 mbox_dma; | ||
2874 | u64 descr_dma; | ||
2875 | int max_burst; | ||
2876 | }; | ||
2877 | |||
2878 | #define NEXT_TX(tp, index) \ | ||
2879 | (((index) + 1) < (tp)->pending ? ((index) + 1) : 0) | ||
2880 | |||
2881 | static inline u32 niu_tx_avail(struct tx_ring_info *tp) | ||
2882 | { | ||
2883 | return (tp->pending - | ||
2884 | ((tp->prod - tp->cons) & (MAX_TX_RING_SIZE - 1))); | ||
2885 | } | ||
2886 | |||
2887 | struct rxdma_mailbox { | ||
2888 | __le64 rx_dma_ctl_stat; | ||
2889 | __le64 rbr_stat; | ||
2890 | __le32 rbr_hdl; | ||
2891 | __le32 rbr_hdh; | ||
2892 | __le64 resv1; | ||
2893 | __le32 rcrstat_c; | ||
2894 | __le32 rcrstat_b; | ||
2895 | __le64 rcrstat_a; | ||
2896 | __le64 resv2[2]; | ||
2897 | } __attribute__((aligned(64))); | ||
2898 | |||
2899 | #define MAX_RBR_RING_SIZE 128 | ||
2900 | #define MAX_RCR_RING_SIZE (MAX_RBR_RING_SIZE * 2) | ||
2901 | |||
2902 | #define RBR_REFILL_MIN 16 | ||
2903 | |||
2904 | #define RX_SKB_ALLOC_SIZE 128 + NET_IP_ALIGN | ||
2905 | |||
2906 | struct rx_ring_info { | ||
2907 | struct niu *np; | ||
2908 | int rx_channel; | ||
2909 | u16 rbr_block_size; | ||
2910 | u16 rbr_blocks_per_page; | ||
2911 | u16 rbr_sizes[4]; | ||
2912 | unsigned int rcr_index; | ||
2913 | unsigned int rcr_table_size; | ||
2914 | unsigned int rbr_index; | ||
2915 | unsigned int rbr_pending; | ||
2916 | unsigned int rbr_refill_pending; | ||
2917 | unsigned int rbr_kick_thresh; | ||
2918 | unsigned int rbr_table_size; | ||
2919 | struct page **rxhash; | ||
2920 | struct rxdma_mailbox *mbox; | ||
2921 | __le64 *rcr; | ||
2922 | __le32 *rbr; | ||
2923 | #define RBR_DESCR_ADDR_SHIFT 12 | ||
2924 | |||
2925 | u64 rx_packets; | ||
2926 | u64 rx_bytes; | ||
2927 | u64 rx_dropped; | ||
2928 | u64 rx_errors; | ||
2929 | |||
2930 | u64 mbox_dma; | ||
2931 | u64 rcr_dma; | ||
2932 | u64 rbr_dma; | ||
2933 | |||
2934 | /* WRED */ | ||
2935 | int nonsyn_window; | ||
2936 | int nonsyn_threshold; | ||
2937 | int syn_window; | ||
2938 | int syn_threshold; | ||
2939 | |||
2940 | /* interrupt mitigation */ | ||
2941 | int rcr_pkt_threshold; | ||
2942 | int rcr_timeout; | ||
2943 | }; | ||
2944 | |||
2945 | #define NEXT_RCR(rp, index) \ | ||
2946 | (((index) + 1) < (rp)->rcr_table_size ? ((index) + 1) : 0) | ||
2947 | #define NEXT_RBR(rp, index) \ | ||
2948 | (((index) + 1) < (rp)->rbr_table_size ? ((index) + 1) : 0) | ||
2949 | |||
2950 | #define NIU_MAX_PORTS 4 | ||
2951 | #define NIU_NUM_RXCHAN 16 | ||
2952 | #define NIU_NUM_TXCHAN 24 | ||
2953 | #define MAC_NUM_HASH 16 | ||
2954 | |||
2955 | #define NIU_MAX_MTU 9216 | ||
2956 | |||
2957 | /* VPD strings */ | ||
2958 | #define NIU_QGC_LP_BM_STR "501-7606" | ||
2959 | #define NIU_2XGF_LP_BM_STR "501-7283" | ||
2960 | #define NIU_QGC_PEM_BM_STR "501-7765" | ||
2961 | #define NIU_2XGF_PEM_BM_STR "501-7626" | ||
2962 | #define NIU_ALONSO_BM_STR "373-0202" | ||
2963 | #define NIU_FOXXY_BM_STR "501-7961" | ||
2964 | #define NIU_2XGF_MRVL_BM_STR "SK-6E82" | ||
2965 | #define NIU_QGC_LP_MDL_STR "SUNW,pcie-qgc" | ||
2966 | #define NIU_2XGF_LP_MDL_STR "SUNW,pcie-2xgf" | ||
2967 | #define NIU_QGC_PEM_MDL_STR "SUNW,pcie-qgc-pem" | ||
2968 | #define NIU_2XGF_PEM_MDL_STR "SUNW,pcie-2xgf-pem" | ||
2969 | #define NIU_ALONSO_MDL_STR "SUNW,CP3220" | ||
2970 | #define NIU_KIMI_MDL_STR "SUNW,CP3260" | ||
2971 | #define NIU_MARAMBA_MDL_STR "SUNW,pcie-neptune" | ||
2972 | #define NIU_FOXXY_MDL_STR "SUNW,pcie-rfem" | ||
2973 | #define NIU_2XGF_MRVL_MDL_STR "SysKonnect,pcie-2xgf" | ||
2974 | |||
2975 | #define NIU_VPD_MIN_MAJOR 3 | ||
2976 | #define NIU_VPD_MIN_MINOR 4 | ||
2977 | |||
2978 | #define NIU_VPD_MODEL_MAX 32 | ||
2979 | #define NIU_VPD_BD_MODEL_MAX 16 | ||
2980 | #define NIU_VPD_VERSION_MAX 64 | ||
2981 | #define NIU_VPD_PHY_TYPE_MAX 8 | ||
2982 | |||
2983 | struct niu_vpd { | ||
2984 | char model[NIU_VPD_MODEL_MAX]; | ||
2985 | char board_model[NIU_VPD_BD_MODEL_MAX]; | ||
2986 | char version[NIU_VPD_VERSION_MAX]; | ||
2987 | char phy_type[NIU_VPD_PHY_TYPE_MAX]; | ||
2988 | u8 mac_num; | ||
2989 | u8 __pad; | ||
2990 | u8 local_mac[6]; | ||
2991 | int fcode_major; | ||
2992 | int fcode_minor; | ||
2993 | }; | ||
2994 | |||
2995 | struct niu_altmac_rdc { | ||
2996 | u8 alt_mac_num; | ||
2997 | u8 rdc_num; | ||
2998 | u8 mac_pref; | ||
2999 | }; | ||
3000 | |||
3001 | struct niu_vlan_rdc { | ||
3002 | u8 rdc_num; | ||
3003 | u8 vlan_pref; | ||
3004 | }; | ||
3005 | |||
3006 | struct niu_classifier { | ||
3007 | struct niu_altmac_rdc alt_mac_mappings[16]; | ||
3008 | struct niu_vlan_rdc vlan_mappings[ENET_VLAN_TBL_NUM_ENTRIES]; | ||
3009 | |||
3010 | u16 tcam_top; | ||
3011 | u16 tcam_sz; | ||
3012 | u16 tcam_valid_entries; | ||
3013 | u16 num_alt_mac_mappings; | ||
3014 | |||
3015 | u32 h1_init; | ||
3016 | u16 h2_init; | ||
3017 | }; | ||
3018 | |||
3019 | #define NIU_NUM_RDC_TABLES 8 | ||
3020 | #define NIU_RDC_TABLE_SLOTS 16 | ||
3021 | |||
3022 | struct rdc_table { | ||
3023 | u8 rxdma_channel[NIU_RDC_TABLE_SLOTS]; | ||
3024 | }; | ||
3025 | |||
3026 | struct niu_rdc_tables { | ||
3027 | struct rdc_table tables[NIU_NUM_RDC_TABLES]; | ||
3028 | int first_table_num; | ||
3029 | int num_tables; | ||
3030 | }; | ||
3031 | |||
3032 | #define PHY_TYPE_PMA_PMD 0 | ||
3033 | #define PHY_TYPE_PCS 1 | ||
3034 | #define PHY_TYPE_MII 2 | ||
3035 | #define PHY_TYPE_MAX 3 | ||
3036 | |||
3037 | struct phy_probe_info { | ||
3038 | u32 phy_id[PHY_TYPE_MAX][NIU_MAX_PORTS]; | ||
3039 | u8 phy_port[PHY_TYPE_MAX][NIU_MAX_PORTS]; | ||
3040 | u8 cur[PHY_TYPE_MAX]; | ||
3041 | |||
3042 | struct device_attribute phy_port_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS]; | ||
3043 | struct device_attribute phy_type_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS]; | ||
3044 | struct device_attribute phy_id_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS]; | ||
3045 | }; | ||
3046 | |||
3047 | struct niu_tcam_entry { | ||
3048 | u8 valid; | ||
3049 | u64 key[4]; | ||
3050 | u64 key_mask[4]; | ||
3051 | u64 assoc_data; | ||
3052 | }; | ||
3053 | |||
3054 | struct device_node; | ||
3055 | union niu_parent_id { | ||
3056 | struct { | ||
3057 | int domain; | ||
3058 | int bus; | ||
3059 | int device; | ||
3060 | } pci; | ||
3061 | struct device_node *of; | ||
3062 | }; | ||
3063 | |||
3064 | struct niu; | ||
3065 | struct niu_parent { | ||
3066 | struct platform_device *plat_dev; | ||
3067 | int index; | ||
3068 | |||
3069 | union niu_parent_id id; | ||
3070 | |||
3071 | struct niu *ports[NIU_MAX_PORTS]; | ||
3072 | |||
3073 | atomic_t refcnt; | ||
3074 | struct list_head list; | ||
3075 | |||
3076 | spinlock_t lock; | ||
3077 | |||
3078 | u32 flags; | ||
3079 | #define PARENT_FLGS_CLS_HWINIT 0x00000001 | ||
3080 | |||
3081 | u32 port_phy; | ||
3082 | #define PORT_PHY_UNKNOWN 0x00000000 | ||
3083 | #define PORT_PHY_INVALID 0xffffffff | ||
3084 | #define PORT_TYPE_10G 0x01 | ||
3085 | #define PORT_TYPE_1G 0x02 | ||
3086 | #define PORT_TYPE_MASK 0x03 | ||
3087 | |||
3088 | u8 rxchan_per_port[NIU_MAX_PORTS]; | ||
3089 | u8 txchan_per_port[NIU_MAX_PORTS]; | ||
3090 | |||
3091 | struct niu_rdc_tables rdc_group_cfg[NIU_MAX_PORTS]; | ||
3092 | u8 rdc_default[NIU_MAX_PORTS]; | ||
3093 | |||
3094 | u8 ldg_map[LDN_MAX + 1]; | ||
3095 | |||
3096 | u8 plat_type; | ||
3097 | #define PLAT_TYPE_INVALID 0x00 | ||
3098 | #define PLAT_TYPE_ATLAS 0x01 | ||
3099 | #define PLAT_TYPE_NIU 0x02 | ||
3100 | #define PLAT_TYPE_VF_P0 0x03 | ||
3101 | #define PLAT_TYPE_VF_P1 0x04 | ||
3102 | #define PLAT_TYPE_ATCA_CP3220 0x08 | ||
3103 | |||
3104 | u8 num_ports; | ||
3105 | |||
3106 | u16 tcam_num_entries; | ||
3107 | #define NIU_PCI_TCAM_ENTRIES 256 | ||
3108 | #define NIU_NONPCI_TCAM_ENTRIES 128 | ||
3109 | #define NIU_TCAM_ENTRIES_MAX 256 | ||
3110 | |||
3111 | int rxdma_clock_divider; | ||
3112 | |||
3113 | struct phy_probe_info phy_probe_info; | ||
3114 | |||
3115 | struct niu_tcam_entry tcam[NIU_TCAM_ENTRIES_MAX]; | ||
3116 | |||
3117 | #define NIU_L2_PROG_CLS 2 | ||
3118 | #define NIU_L3_PROG_CLS 4 | ||
3119 | u64 l2_cls[NIU_L2_PROG_CLS]; | ||
3120 | u64 l3_cls[NIU_L3_PROG_CLS]; | ||
3121 | u64 tcam_key[12]; | ||
3122 | u64 flow_key[12]; | ||
3123 | u16 l3_cls_refcnt[NIU_L3_PROG_CLS]; | ||
3124 | u8 l3_cls_pid[NIU_L3_PROG_CLS]; | ||
3125 | }; | ||
3126 | |||
3127 | struct niu_ops { | ||
3128 | void *(*alloc_coherent)(struct device *dev, size_t size, | ||
3129 | u64 *handle, gfp_t flag); | ||
3130 | void (*free_coherent)(struct device *dev, size_t size, | ||
3131 | void *cpu_addr, u64 handle); | ||
3132 | u64 (*map_page)(struct device *dev, struct page *page, | ||
3133 | unsigned long offset, size_t size, | ||
3134 | enum dma_data_direction direction); | ||
3135 | void (*unmap_page)(struct device *dev, u64 dma_address, | ||
3136 | size_t size, enum dma_data_direction direction); | ||
3137 | u64 (*map_single)(struct device *dev, void *cpu_addr, | ||
3138 | size_t size, | ||
3139 | enum dma_data_direction direction); | ||
3140 | void (*unmap_single)(struct device *dev, u64 dma_address, | ||
3141 | size_t size, enum dma_data_direction direction); | ||
3142 | }; | ||
3143 | |||
3144 | struct niu_link_config { | ||
3145 | u32 supported; | ||
3146 | |||
3147 | /* Describes what we're trying to get. */ | ||
3148 | u32 advertising; | ||
3149 | u16 speed; | ||
3150 | u8 duplex; | ||
3151 | u8 autoneg; | ||
3152 | |||
3153 | /* Describes what we actually have. */ | ||
3154 | u32 active_advertising; | ||
3155 | u16 active_speed; | ||
3156 | u8 active_duplex; | ||
3157 | u8 active_autoneg; | ||
3158 | #define SPEED_INVALID 0xffff | ||
3159 | #define DUPLEX_INVALID 0xff | ||
3160 | #define AUTONEG_INVALID 0xff | ||
3161 | |||
3162 | u8 loopback_mode; | ||
3163 | #define LOOPBACK_DISABLED 0x00 | ||
3164 | #define LOOPBACK_PHY 0x01 | ||
3165 | #define LOOPBACK_MAC 0x02 | ||
3166 | }; | ||
3167 | |||
3168 | struct niu_ldg { | ||
3169 | struct napi_struct napi; | ||
3170 | struct niu *np; | ||
3171 | u8 ldg_num; | ||
3172 | u8 timer; | ||
3173 | u64 v0, v1, v2; | ||
3174 | unsigned int irq; | ||
3175 | }; | ||
3176 | |||
3177 | struct niu_xmac_stats { | ||
3178 | u64 tx_frames; | ||
3179 | u64 tx_bytes; | ||
3180 | u64 tx_fifo_errors; | ||
3181 | u64 tx_overflow_errors; | ||
3182 | u64 tx_max_pkt_size_errors; | ||
3183 | u64 tx_underflow_errors; | ||
3184 | |||
3185 | u64 rx_local_faults; | ||
3186 | u64 rx_remote_faults; | ||
3187 | u64 rx_link_faults; | ||
3188 | u64 rx_align_errors; | ||
3189 | u64 rx_frags; | ||
3190 | u64 rx_mcasts; | ||
3191 | u64 rx_bcasts; | ||
3192 | u64 rx_hist_cnt1; | ||
3193 | u64 rx_hist_cnt2; | ||
3194 | u64 rx_hist_cnt3; | ||
3195 | u64 rx_hist_cnt4; | ||
3196 | u64 rx_hist_cnt5; | ||
3197 | u64 rx_hist_cnt6; | ||
3198 | u64 rx_hist_cnt7; | ||
3199 | u64 rx_octets; | ||
3200 | u64 rx_code_violations; | ||
3201 | u64 rx_len_errors; | ||
3202 | u64 rx_crc_errors; | ||
3203 | u64 rx_underflows; | ||
3204 | u64 rx_overflows; | ||
3205 | |||
3206 | u64 pause_off_state; | ||
3207 | u64 pause_on_state; | ||
3208 | u64 pause_received; | ||
3209 | }; | ||
3210 | |||
3211 | struct niu_bmac_stats { | ||
3212 | u64 tx_underflow_errors; | ||
3213 | u64 tx_max_pkt_size_errors; | ||
3214 | u64 tx_bytes; | ||
3215 | u64 tx_frames; | ||
3216 | |||
3217 | u64 rx_overflows; | ||
3218 | u64 rx_frames; | ||
3219 | u64 rx_align_errors; | ||
3220 | u64 rx_crc_errors; | ||
3221 | u64 rx_len_errors; | ||
3222 | |||
3223 | u64 pause_off_state; | ||
3224 | u64 pause_on_state; | ||
3225 | u64 pause_received; | ||
3226 | }; | ||
3227 | |||
3228 | union niu_mac_stats { | ||
3229 | struct niu_xmac_stats xmac; | ||
3230 | struct niu_bmac_stats bmac; | ||
3231 | }; | ||
3232 | |||
3233 | struct niu_phy_ops { | ||
3234 | int (*serdes_init)(struct niu *np); | ||
3235 | int (*xcvr_init)(struct niu *np); | ||
3236 | int (*link_status)(struct niu *np, int *); | ||
3237 | }; | ||
3238 | |||
3239 | struct platform_device; | ||
3240 | struct niu { | ||
3241 | void __iomem *regs; | ||
3242 | struct net_device *dev; | ||
3243 | struct pci_dev *pdev; | ||
3244 | struct device *device; | ||
3245 | struct niu_parent *parent; | ||
3246 | |||
3247 | u32 flags; | ||
3248 | #define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removeable PHY detected*/ | ||
3249 | #define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removeable PHY */ | ||
3250 | #define NIU_FLAGS_VPD_VALID 0x00800000 /* VPD has valid version */ | ||
3251 | #define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */ | ||
3252 | #define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */ | ||
3253 | #define NIU_FLAGS_PROMISC 0x00100000 /* PROMISC enabled */ | ||
3254 | #define NIU_FLAGS_XCVR_SERDES 0x00080000 /* 0=PHY 1=SERDES */ | ||
3255 | #define NIU_FLAGS_10G 0x00040000 /* 0=1G 1=10G */ | ||
3256 | #define NIU_FLAGS_FIBER 0x00020000 /* 0=COPPER 1=FIBER */ | ||
3257 | #define NIU_FLAGS_XMAC 0x00010000 /* 0=BMAC 1=XMAC */ | ||
3258 | |||
3259 | u32 msg_enable; | ||
3260 | char irq_name[NIU_NUM_RXCHAN+NIU_NUM_TXCHAN+3][IFNAMSIZ + 6]; | ||
3261 | |||
3262 | /* Protects hw programming, and ring state. */ | ||
3263 | spinlock_t lock; | ||
3264 | |||
3265 | const struct niu_ops *ops; | ||
3266 | union niu_mac_stats mac_stats; | ||
3267 | |||
3268 | struct rx_ring_info *rx_rings; | ||
3269 | struct tx_ring_info *tx_rings; | ||
3270 | int num_rx_rings; | ||
3271 | int num_tx_rings; | ||
3272 | |||
3273 | struct niu_ldg ldg[NIU_NUM_LDG]; | ||
3274 | int num_ldg; | ||
3275 | |||
3276 | void __iomem *mac_regs; | ||
3277 | unsigned long ipp_off; | ||
3278 | unsigned long pcs_off; | ||
3279 | unsigned long xpcs_off; | ||
3280 | |||
3281 | struct timer_list timer; | ||
3282 | u64 orig_led_state; | ||
3283 | const struct niu_phy_ops *phy_ops; | ||
3284 | int phy_addr; | ||
3285 | |||
3286 | struct niu_link_config link_config; | ||
3287 | |||
3288 | struct work_struct reset_task; | ||
3289 | |||
3290 | u8 port; | ||
3291 | u8 mac_xcvr; | ||
3292 | #define MAC_XCVR_MII 1 | ||
3293 | #define MAC_XCVR_PCS 2 | ||
3294 | #define MAC_XCVR_XPCS 3 | ||
3295 | |||
3296 | struct niu_classifier clas; | ||
3297 | |||
3298 | struct niu_vpd vpd; | ||
3299 | u32 eeprom_len; | ||
3300 | |||
3301 | struct platform_device *op; | ||
3302 | void __iomem *vir_regs_1; | ||
3303 | void __iomem *vir_regs_2; | ||
3304 | }; | ||
3305 | |||
3306 | #endif /* _NIU_H */ | ||
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c new file mode 100644 index 000000000000..297a4242106b --- /dev/null +++ b/drivers/net/ethernet/sun/sunbmac.c | |||
@@ -0,0 +1,1306 @@ | |||
1 | /* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters. | ||
2 | * | ||
3 | * Copyright (C) 1997, 1998, 1999, 2003, 2008 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/module.h> | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/types.h> | ||
10 | #include <linux/fcntl.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/ioport.h> | ||
13 | #include <linux/in.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/crc32.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/ethtool.h> | ||
20 | #include <linux/netdevice.h> | ||
21 | #include <linux/etherdevice.h> | ||
22 | #include <linux/skbuff.h> | ||
23 | #include <linux/bitops.h> | ||
24 | #include <linux/dma-mapping.h> | ||
25 | #include <linux/of.h> | ||
26 | #include <linux/of_device.h> | ||
27 | #include <linux/gfp.h> | ||
28 | |||
29 | #include <asm/auxio.h> | ||
30 | #include <asm/byteorder.h> | ||
31 | #include <asm/dma.h> | ||
32 | #include <asm/idprom.h> | ||
33 | #include <asm/io.h> | ||
34 | #include <asm/openprom.h> | ||
35 | #include <asm/oplib.h> | ||
36 | #include <asm/pgtable.h> | ||
37 | #include <asm/system.h> | ||
38 | |||
39 | #include "sunbmac.h" | ||
40 | |||
41 | #define DRV_NAME "sunbmac" | ||
42 | #define DRV_VERSION "2.1" | ||
43 | #define DRV_RELDATE "August 26, 2008" | ||
44 | #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" | ||
45 | |||
46 | static char version[] = | ||
47 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; | ||
48 | |||
49 | MODULE_VERSION(DRV_VERSION); | ||
50 | MODULE_AUTHOR(DRV_AUTHOR); | ||
51 | MODULE_DESCRIPTION("Sun BigMAC 100baseT ethernet driver"); | ||
52 | MODULE_LICENSE("GPL"); | ||
53 | |||
54 | #undef DEBUG_PROBE | ||
55 | #undef DEBUG_TX | ||
56 | #undef DEBUG_IRQ | ||
57 | |||
58 | #ifdef DEBUG_PROBE | ||
59 | #define DP(x) printk x | ||
60 | #else | ||
61 | #define DP(x) | ||
62 | #endif | ||
63 | |||
64 | #ifdef DEBUG_TX | ||
65 | #define DTX(x) printk x | ||
66 | #else | ||
67 | #define DTX(x) | ||
68 | #endif | ||
69 | |||
70 | #ifdef DEBUG_IRQ | ||
71 | #define DIRQ(x) printk x | ||
72 | #else | ||
73 | #define DIRQ(x) | ||
74 | #endif | ||
75 | |||
76 | #define DEFAULT_JAMSIZE 4 /* Toe jam */ | ||
77 | |||
78 | #define QEC_RESET_TRIES 200 | ||
79 | |||
80 | static int qec_global_reset(void __iomem *gregs) | ||
81 | { | ||
82 | int tries = QEC_RESET_TRIES; | ||
83 | |||
84 | sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); | ||
85 | while (--tries) { | ||
86 | if (sbus_readl(gregs + GLOB_CTRL) & GLOB_CTRL_RESET) { | ||
87 | udelay(20); | ||
88 | continue; | ||
89 | } | ||
90 | break; | ||
91 | } | ||
92 | if (tries) | ||
93 | return 0; | ||
94 | printk(KERN_ERR "BigMAC: Cannot reset the QEC.\n"); | ||
95 | return -1; | ||
96 | } | ||
97 | |||
98 | static void qec_init(struct bigmac *bp) | ||
99 | { | ||
100 | struct platform_device *qec_op = bp->qec_op; | ||
101 | void __iomem *gregs = bp->gregs; | ||
102 | u8 bsizes = bp->bigmac_bursts; | ||
103 | u32 regval; | ||
104 | |||
105 | /* 64byte bursts do not work at the moment, do | ||
106 | * not even try to enable them. -DaveM | ||
107 | */ | ||
108 | if (bsizes & DMA_BURST32) | ||
109 | regval = GLOB_CTRL_B32; | ||
110 | else | ||
111 | regval = GLOB_CTRL_B16; | ||
112 | sbus_writel(regval | GLOB_CTRL_BMODE, gregs + GLOB_CTRL); | ||
113 | sbus_writel(GLOB_PSIZE_2048, gregs + GLOB_PSIZE); | ||
114 | |||
115 | /* All of memsize is given to bigmac. */ | ||
116 | sbus_writel(resource_size(&qec_op->resource[1]), | ||
117 | gregs + GLOB_MSIZE); | ||
118 | |||
119 | /* Half to the transmitter, half to the receiver. */ | ||
120 | sbus_writel(resource_size(&qec_op->resource[1]) >> 1, | ||
121 | gregs + GLOB_TSIZE); | ||
122 | sbus_writel(resource_size(&qec_op->resource[1]) >> 1, | ||
123 | gregs + GLOB_RSIZE); | ||
124 | } | ||
125 | |||
126 | #define TX_RESET_TRIES 32 | ||
127 | #define RX_RESET_TRIES 32 | ||
128 | |||
129 | static void bigmac_tx_reset(void __iomem *bregs) | ||
130 | { | ||
131 | int tries = TX_RESET_TRIES; | ||
132 | |||
133 | sbus_writel(0, bregs + BMAC_TXCFG); | ||
134 | |||
135 | /* The fifo threshold bit is read-only and does | ||
136 | * not clear. -DaveM | ||
137 | */ | ||
138 | while ((sbus_readl(bregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_FIFO)) != 0 && | ||
139 | --tries != 0) | ||
140 | udelay(20); | ||
141 | |||
142 | if (!tries) { | ||
143 | printk(KERN_ERR "BIGMAC: Transmitter will not reset.\n"); | ||
144 | printk(KERN_ERR "BIGMAC: tx_cfg is %08x\n", | ||
145 | sbus_readl(bregs + BMAC_TXCFG)); | ||
146 | } | ||
147 | } | ||
148 | |||
149 | static void bigmac_rx_reset(void __iomem *bregs) | ||
150 | { | ||
151 | int tries = RX_RESET_TRIES; | ||
152 | |||
153 | sbus_writel(0, bregs + BMAC_RXCFG); | ||
154 | while (sbus_readl(bregs + BMAC_RXCFG) && --tries) | ||
155 | udelay(20); | ||
156 | |||
157 | if (!tries) { | ||
158 | printk(KERN_ERR "BIGMAC: Receiver will not reset.\n"); | ||
159 | printk(KERN_ERR "BIGMAC: rx_cfg is %08x\n", | ||
160 | sbus_readl(bregs + BMAC_RXCFG)); | ||
161 | } | ||
162 | } | ||
163 | |||
164 | /* Reset the transmitter and receiver. */ | ||
165 | static void bigmac_stop(struct bigmac *bp) | ||
166 | { | ||
167 | bigmac_tx_reset(bp->bregs); | ||
168 | bigmac_rx_reset(bp->bregs); | ||
169 | } | ||
170 | |||
171 | static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs) | ||
172 | { | ||
173 | struct net_device_stats *stats = &bp->enet_stats; | ||
174 | |||
175 | stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR); | ||
176 | sbus_writel(0, bregs + BMAC_RCRCECTR); | ||
177 | |||
178 | stats->rx_frame_errors += sbus_readl(bregs + BMAC_UNALECTR); | ||
179 | sbus_writel(0, bregs + BMAC_UNALECTR); | ||
180 | |||
181 | stats->rx_length_errors += sbus_readl(bregs + BMAC_GLECTR); | ||
182 | sbus_writel(0, bregs + BMAC_GLECTR); | ||
183 | |||
184 | stats->tx_aborted_errors += sbus_readl(bregs + BMAC_EXCTR); | ||
185 | |||
186 | stats->collisions += | ||
187 | (sbus_readl(bregs + BMAC_EXCTR) + | ||
188 | sbus_readl(bregs + BMAC_LTCTR)); | ||
189 | sbus_writel(0, bregs + BMAC_EXCTR); | ||
190 | sbus_writel(0, bregs + BMAC_LTCTR); | ||
191 | } | ||
192 | |||
193 | static void bigmac_clean_rings(struct bigmac *bp) | ||
194 | { | ||
195 | int i; | ||
196 | |||
197 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
198 | if (bp->rx_skbs[i] != NULL) { | ||
199 | dev_kfree_skb_any(bp->rx_skbs[i]); | ||
200 | bp->rx_skbs[i] = NULL; | ||
201 | } | ||
202 | } | ||
203 | |||
204 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
205 | if (bp->tx_skbs[i] != NULL) { | ||
206 | dev_kfree_skb_any(bp->tx_skbs[i]); | ||
207 | bp->tx_skbs[i] = NULL; | ||
208 | } | ||
209 | } | ||
210 | } | ||
211 | |||
212 | static void bigmac_init_rings(struct bigmac *bp, int from_irq) | ||
213 | { | ||
214 | struct bmac_init_block *bb = bp->bmac_block; | ||
215 | struct net_device *dev = bp->dev; | ||
216 | int i; | ||
217 | gfp_t gfp_flags = GFP_KERNEL; | ||
218 | |||
219 | if (from_irq || in_interrupt()) | ||
220 | gfp_flags = GFP_ATOMIC; | ||
221 | |||
222 | bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0; | ||
223 | |||
224 | /* Free any skippy bufs left around in the rings. */ | ||
225 | bigmac_clean_rings(bp); | ||
226 | |||
227 | /* Now get new skbufs for the receive ring. */ | ||
228 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
229 | struct sk_buff *skb; | ||
230 | |||
231 | skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags); | ||
232 | if (!skb) | ||
233 | continue; | ||
234 | |||
235 | bp->rx_skbs[i] = skb; | ||
236 | skb->dev = dev; | ||
237 | |||
238 | /* Because we reserve afterwards. */ | ||
239 | skb_put(skb, ETH_FRAME_LEN); | ||
240 | skb_reserve(skb, 34); | ||
241 | |||
242 | bb->be_rxd[i].rx_addr = | ||
243 | dma_map_single(&bp->bigmac_op->dev, | ||
244 | skb->data, | ||
245 | RX_BUF_ALLOC_SIZE - 34, | ||
246 | DMA_FROM_DEVICE); | ||
247 | bb->be_rxd[i].rx_flags = | ||
248 | (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); | ||
249 | } | ||
250 | |||
251 | for (i = 0; i < TX_RING_SIZE; i++) | ||
252 | bb->be_txd[i].tx_flags = bb->be_txd[i].tx_addr = 0; | ||
253 | } | ||
254 | |||
255 | #define MGMT_CLKON (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB|MGMT_PAL_DCLOCK) | ||
256 | #define MGMT_CLKOFF (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB) | ||
257 | |||
258 | static void idle_transceiver(void __iomem *tregs) | ||
259 | { | ||
260 | int i = 20; | ||
261 | |||
262 | while (i--) { | ||
263 | sbus_writel(MGMT_CLKOFF, tregs + TCVR_MPAL); | ||
264 | sbus_readl(tregs + TCVR_MPAL); | ||
265 | sbus_writel(MGMT_CLKON, tregs + TCVR_MPAL); | ||
266 | sbus_readl(tregs + TCVR_MPAL); | ||
267 | } | ||
268 | } | ||
269 | |||
270 | static void write_tcvr_bit(struct bigmac *bp, void __iomem *tregs, int bit) | ||
271 | { | ||
272 | if (bp->tcvr_type == internal) { | ||
273 | bit = (bit & 1) << 3; | ||
274 | sbus_writel(bit | (MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO), | ||
275 | tregs + TCVR_MPAL); | ||
276 | sbus_readl(tregs + TCVR_MPAL); | ||
277 | sbus_writel(bit | MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, | ||
278 | tregs + TCVR_MPAL); | ||
279 | sbus_readl(tregs + TCVR_MPAL); | ||
280 | } else if (bp->tcvr_type == external) { | ||
281 | bit = (bit & 1) << 2; | ||
282 | sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB, | ||
283 | tregs + TCVR_MPAL); | ||
284 | sbus_readl(tregs + TCVR_MPAL); | ||
285 | sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB | MGMT_PAL_DCLOCK, | ||
286 | tregs + TCVR_MPAL); | ||
287 | sbus_readl(tregs + TCVR_MPAL); | ||
288 | } else { | ||
289 | printk(KERN_ERR "write_tcvr_bit: No transceiver type known!\n"); | ||
290 | } | ||
291 | } | ||
292 | |||
293 | static int read_tcvr_bit(struct bigmac *bp, void __iomem *tregs) | ||
294 | { | ||
295 | int retval = 0; | ||
296 | |||
297 | if (bp->tcvr_type == internal) { | ||
298 | sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); | ||
299 | sbus_readl(tregs + TCVR_MPAL); | ||
300 | sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, | ||
301 | tregs + TCVR_MPAL); | ||
302 | sbus_readl(tregs + TCVR_MPAL); | ||
303 | retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3; | ||
304 | } else if (bp->tcvr_type == external) { | ||
305 | sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL); | ||
306 | sbus_readl(tregs + TCVR_MPAL); | ||
307 | sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); | ||
308 | sbus_readl(tregs + TCVR_MPAL); | ||
309 | retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2; | ||
310 | } else { | ||
311 | printk(KERN_ERR "read_tcvr_bit: No transceiver type known!\n"); | ||
312 | } | ||
313 | return retval; | ||
314 | } | ||
315 | |||
316 | static int read_tcvr_bit2(struct bigmac *bp, void __iomem *tregs) | ||
317 | { | ||
318 | int retval = 0; | ||
319 | |||
320 | if (bp->tcvr_type == internal) { | ||
321 | sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); | ||
322 | sbus_readl(tregs + TCVR_MPAL); | ||
323 | retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3; | ||
324 | sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); | ||
325 | sbus_readl(tregs + TCVR_MPAL); | ||
326 | } else if (bp->tcvr_type == external) { | ||
327 | sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL); | ||
328 | sbus_readl(tregs + TCVR_MPAL); | ||
329 | retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2; | ||
330 | sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); | ||
331 | sbus_readl(tregs + TCVR_MPAL); | ||
332 | } else { | ||
333 | printk(KERN_ERR "read_tcvr_bit2: No transceiver type known!\n"); | ||
334 | } | ||
335 | return retval; | ||
336 | } | ||
337 | |||
338 | static void put_tcvr_byte(struct bigmac *bp, | ||
339 | void __iomem *tregs, | ||
340 | unsigned int byte) | ||
341 | { | ||
342 | int shift = 4; | ||
343 | |||
344 | do { | ||
345 | write_tcvr_bit(bp, tregs, ((byte >> shift) & 1)); | ||
346 | shift -= 1; | ||
347 | } while (shift >= 0); | ||
348 | } | ||
349 | |||
350 | static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs, | ||
351 | int reg, unsigned short val) | ||
352 | { | ||
353 | int shift; | ||
354 | |||
355 | reg &= 0xff; | ||
356 | val &= 0xffff; | ||
357 | switch(bp->tcvr_type) { | ||
358 | case internal: | ||
359 | case external: | ||
360 | break; | ||
361 | |||
362 | default: | ||
363 | printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); | ||
364 | return; | ||
365 | } | ||
366 | |||
367 | idle_transceiver(tregs); | ||
368 | write_tcvr_bit(bp, tregs, 0); | ||
369 | write_tcvr_bit(bp, tregs, 1); | ||
370 | write_tcvr_bit(bp, tregs, 0); | ||
371 | write_tcvr_bit(bp, tregs, 1); | ||
372 | |||
373 | put_tcvr_byte(bp, tregs, | ||
374 | ((bp->tcvr_type == internal) ? | ||
375 | BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL)); | ||
376 | |||
377 | put_tcvr_byte(bp, tregs, reg); | ||
378 | |||
379 | write_tcvr_bit(bp, tregs, 1); | ||
380 | write_tcvr_bit(bp, tregs, 0); | ||
381 | |||
382 | shift = 15; | ||
383 | do { | ||
384 | write_tcvr_bit(bp, tregs, (val >> shift) & 1); | ||
385 | shift -= 1; | ||
386 | } while (shift >= 0); | ||
387 | } | ||
388 | |||
389 | static unsigned short bigmac_tcvr_read(struct bigmac *bp, | ||
390 | void __iomem *tregs, | ||
391 | int reg) | ||
392 | { | ||
393 | unsigned short retval = 0; | ||
394 | |||
395 | reg &= 0xff; | ||
396 | switch(bp->tcvr_type) { | ||
397 | case internal: | ||
398 | case external: | ||
399 | break; | ||
400 | |||
401 | default: | ||
402 | printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); | ||
403 | return 0xffff; | ||
404 | } | ||
405 | |||
406 | idle_transceiver(tregs); | ||
407 | write_tcvr_bit(bp, tregs, 0); | ||
408 | write_tcvr_bit(bp, tregs, 1); | ||
409 | write_tcvr_bit(bp, tregs, 1); | ||
410 | write_tcvr_bit(bp, tregs, 0); | ||
411 | |||
412 | put_tcvr_byte(bp, tregs, | ||
413 | ((bp->tcvr_type == internal) ? | ||
414 | BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL)); | ||
415 | |||
416 | put_tcvr_byte(bp, tregs, reg); | ||
417 | |||
418 | if (bp->tcvr_type == external) { | ||
419 | int shift = 15; | ||
420 | |||
421 | (void) read_tcvr_bit2(bp, tregs); | ||
422 | (void) read_tcvr_bit2(bp, tregs); | ||
423 | |||
424 | do { | ||
425 | int tmp; | ||
426 | |||
427 | tmp = read_tcvr_bit2(bp, tregs); | ||
428 | retval |= ((tmp & 1) << shift); | ||
429 | shift -= 1; | ||
430 | } while (shift >= 0); | ||
431 | |||
432 | (void) read_tcvr_bit2(bp, tregs); | ||
433 | (void) read_tcvr_bit2(bp, tregs); | ||
434 | (void) read_tcvr_bit2(bp, tregs); | ||
435 | } else { | ||
436 | int shift = 15; | ||
437 | |||
438 | (void) read_tcvr_bit(bp, tregs); | ||
439 | (void) read_tcvr_bit(bp, tregs); | ||
440 | |||
441 | do { | ||
442 | int tmp; | ||
443 | |||
444 | tmp = read_tcvr_bit(bp, tregs); | ||
445 | retval |= ((tmp & 1) << shift); | ||
446 | shift -= 1; | ||
447 | } while (shift >= 0); | ||
448 | |||
449 | (void) read_tcvr_bit(bp, tregs); | ||
450 | (void) read_tcvr_bit(bp, tregs); | ||
451 | (void) read_tcvr_bit(bp, tregs); | ||
452 | } | ||
453 | return retval; | ||
454 | } | ||
455 | |||
456 | static void bigmac_tcvr_init(struct bigmac *bp) | ||
457 | { | ||
458 | void __iomem *tregs = bp->tregs; | ||
459 | u32 mpal; | ||
460 | |||
461 | idle_transceiver(tregs); | ||
462 | sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, | ||
463 | tregs + TCVR_MPAL); | ||
464 | sbus_readl(tregs + TCVR_MPAL); | ||
465 | |||
466 | /* Only the bit for the present transceiver (internal or | ||
467 | * external) will stick, set them both and see what stays. | ||
468 | */ | ||
469 | sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); | ||
470 | sbus_readl(tregs + TCVR_MPAL); | ||
471 | udelay(20); | ||
472 | |||
473 | mpal = sbus_readl(tregs + TCVR_MPAL); | ||
474 | if (mpal & MGMT_PAL_EXT_MDIO) { | ||
475 | bp->tcvr_type = external; | ||
476 | sbus_writel(~(TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE), | ||
477 | tregs + TCVR_TPAL); | ||
478 | sbus_readl(tregs + TCVR_TPAL); | ||
479 | } else if (mpal & MGMT_PAL_INT_MDIO) { | ||
480 | bp->tcvr_type = internal; | ||
481 | sbus_writel(~(TCVR_PAL_SERIAL | TCVR_PAL_EXTLBACK | | ||
482 | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE), | ||
483 | tregs + TCVR_TPAL); | ||
484 | sbus_readl(tregs + TCVR_TPAL); | ||
485 | } else { | ||
486 | printk(KERN_ERR "BIGMAC: AIEEE, neither internal nor " | ||
487 | "external MDIO available!\n"); | ||
488 | printk(KERN_ERR "BIGMAC: mgmt_pal[%08x] tcvr_pal[%08x]\n", | ||
489 | sbus_readl(tregs + TCVR_MPAL), | ||
490 | sbus_readl(tregs + TCVR_TPAL)); | ||
491 | } | ||
492 | } | ||
493 | |||
494 | static int bigmac_init_hw(struct bigmac *, int); | ||
495 | |||
496 | static int try_next_permutation(struct bigmac *bp, void __iomem *tregs) | ||
497 | { | ||
498 | if (bp->sw_bmcr & BMCR_SPEED100) { | ||
499 | int timeout; | ||
500 | |||
501 | /* Reset the PHY. */ | ||
502 | bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); | ||
503 | bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); | ||
504 | bp->sw_bmcr = (BMCR_RESET); | ||
505 | bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); | ||
506 | |||
507 | timeout = 64; | ||
508 | while (--timeout) { | ||
509 | bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); | ||
510 | if ((bp->sw_bmcr & BMCR_RESET) == 0) | ||
511 | break; | ||
512 | udelay(20); | ||
513 | } | ||
514 | if (timeout == 0) | ||
515 | printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); | ||
516 | |||
517 | bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); | ||
518 | |||
519 | /* Now we try 10baseT. */ | ||
520 | bp->sw_bmcr &= ~(BMCR_SPEED100); | ||
521 | bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); | ||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | /* We've tried them all. */ | ||
526 | return -1; | ||
527 | } | ||
528 | |||
529 | static void bigmac_timer(unsigned long data) | ||
530 | { | ||
531 | struct bigmac *bp = (struct bigmac *) data; | ||
532 | void __iomem *tregs = bp->tregs; | ||
533 | int restart_timer = 0; | ||
534 | |||
535 | bp->timer_ticks++; | ||
536 | if (bp->timer_state == ltrywait) { | ||
537 | bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMSR); | ||
538 | bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); | ||
539 | if (bp->sw_bmsr & BMSR_LSTATUS) { | ||
540 | printk(KERN_INFO "%s: Link is now up at %s.\n", | ||
541 | bp->dev->name, | ||
542 | (bp->sw_bmcr & BMCR_SPEED100) ? | ||
543 | "100baseT" : "10baseT"); | ||
544 | bp->timer_state = asleep; | ||
545 | restart_timer = 0; | ||
546 | } else { | ||
547 | if (bp->timer_ticks >= 4) { | ||
548 | int ret; | ||
549 | |||
550 | ret = try_next_permutation(bp, tregs); | ||
551 | if (ret == -1) { | ||
552 | printk(KERN_ERR "%s: Link down, cable problem?\n", | ||
553 | bp->dev->name); | ||
554 | ret = bigmac_init_hw(bp, 0); | ||
555 | if (ret) { | ||
556 | printk(KERN_ERR "%s: Error, cannot re-init the " | ||
557 | "BigMAC.\n", bp->dev->name); | ||
558 | } | ||
559 | return; | ||
560 | } | ||
561 | bp->timer_ticks = 0; | ||
562 | restart_timer = 1; | ||
563 | } else { | ||
564 | restart_timer = 1; | ||
565 | } | ||
566 | } | ||
567 | } else { | ||
568 | /* Can't happens.... */ | ||
569 | printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n", | ||
570 | bp->dev->name); | ||
571 | restart_timer = 0; | ||
572 | bp->timer_ticks = 0; | ||
573 | bp->timer_state = asleep; /* foo on you */ | ||
574 | } | ||
575 | |||
576 | if (restart_timer != 0) { | ||
577 | bp->bigmac_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */ | ||
578 | add_timer(&bp->bigmac_timer); | ||
579 | } | ||
580 | } | ||
581 | |||
582 | /* Well, really we just force the chip into 100baseT then | ||
583 | * 10baseT, each time checking for a link status. | ||
584 | */ | ||
585 | static void bigmac_begin_auto_negotiation(struct bigmac *bp) | ||
586 | { | ||
587 | void __iomem *tregs = bp->tregs; | ||
588 | int timeout; | ||
589 | |||
590 | /* Grab new software copies of PHY registers. */ | ||
591 | bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMSR); | ||
592 | bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); | ||
593 | |||
594 | /* Reset the PHY. */ | ||
595 | bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); | ||
596 | bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); | ||
597 | bp->sw_bmcr = (BMCR_RESET); | ||
598 | bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); | ||
599 | |||
600 | timeout = 64; | ||
601 | while (--timeout) { | ||
602 | bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); | ||
603 | if ((bp->sw_bmcr & BMCR_RESET) == 0) | ||
604 | break; | ||
605 | udelay(20); | ||
606 | } | ||
607 | if (timeout == 0) | ||
608 | printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); | ||
609 | |||
610 | bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); | ||
611 | |||
612 | /* First we try 100baseT. */ | ||
613 | bp->sw_bmcr |= BMCR_SPEED100; | ||
614 | bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); | ||
615 | |||
616 | bp->timer_state = ltrywait; | ||
617 | bp->timer_ticks = 0; | ||
618 | bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10; | ||
619 | bp->bigmac_timer.data = (unsigned long) bp; | ||
620 | bp->bigmac_timer.function = bigmac_timer; | ||
621 | add_timer(&bp->bigmac_timer); | ||
622 | } | ||
623 | |||
624 | static int bigmac_init_hw(struct bigmac *bp, int from_irq) | ||
625 | { | ||
626 | void __iomem *gregs = bp->gregs; | ||
627 | void __iomem *cregs = bp->creg; | ||
628 | void __iomem *bregs = bp->bregs; | ||
629 | unsigned char *e = &bp->dev->dev_addr[0]; | ||
630 | |||
631 | /* Latch current counters into statistics. */ | ||
632 | bigmac_get_counters(bp, bregs); | ||
633 | |||
634 | /* Reset QEC. */ | ||
635 | qec_global_reset(gregs); | ||
636 | |||
637 | /* Init QEC. */ | ||
638 | qec_init(bp); | ||
639 | |||
640 | /* Alloc and reset the tx/rx descriptor chains. */ | ||
641 | bigmac_init_rings(bp, from_irq); | ||
642 | |||
643 | /* Initialize the PHY. */ | ||
644 | bigmac_tcvr_init(bp); | ||
645 | |||
646 | /* Stop transmitter and receiver. */ | ||
647 | bigmac_stop(bp); | ||
648 | |||
649 | /* Set hardware ethernet address. */ | ||
650 | sbus_writel(((e[4] << 8) | e[5]), bregs + BMAC_MACADDR2); | ||
651 | sbus_writel(((e[2] << 8) | e[3]), bregs + BMAC_MACADDR1); | ||
652 | sbus_writel(((e[0] << 8) | e[1]), bregs + BMAC_MACADDR0); | ||
653 | |||
654 | /* Clear the hash table until mc upload occurs. */ | ||
655 | sbus_writel(0, bregs + BMAC_HTABLE3); | ||
656 | sbus_writel(0, bregs + BMAC_HTABLE2); | ||
657 | sbus_writel(0, bregs + BMAC_HTABLE1); | ||
658 | sbus_writel(0, bregs + BMAC_HTABLE0); | ||
659 | |||
660 | /* Enable Big Mac hash table filter. */ | ||
661 | sbus_writel(BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_FIFO, | ||
662 | bregs + BMAC_RXCFG); | ||
663 | udelay(20); | ||
664 | |||
665 | /* Ok, configure the Big Mac transmitter. */ | ||
666 | sbus_writel(BIGMAC_TXCFG_FIFO, bregs + BMAC_TXCFG); | ||
667 | |||
668 | /* The HME docs recommend to use the 10LSB of our MAC here. */ | ||
669 | sbus_writel(((e[5] | e[4] << 8) & 0x3ff), | ||
670 | bregs + BMAC_RSEED); | ||
671 | |||
672 | /* Enable the output drivers no matter what. */ | ||
673 | sbus_writel(BIGMAC_XCFG_ODENABLE | BIGMAC_XCFG_RESV, | ||
674 | bregs + BMAC_XIFCFG); | ||
675 | |||
676 | /* Tell the QEC where the ring descriptors are. */ | ||
677 | sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0), | ||
678 | cregs + CREG_RXDS); | ||
679 | sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0), | ||
680 | cregs + CREG_TXDS); | ||
681 | |||
682 | /* Setup the FIFO pointers into QEC local memory. */ | ||
683 | sbus_writel(0, cregs + CREG_RXRBUFPTR); | ||
684 | sbus_writel(0, cregs + CREG_RXWBUFPTR); | ||
685 | sbus_writel(sbus_readl(gregs + GLOB_RSIZE), | ||
686 | cregs + CREG_TXRBUFPTR); | ||
687 | sbus_writel(sbus_readl(gregs + GLOB_RSIZE), | ||
688 | cregs + CREG_TXWBUFPTR); | ||
689 | |||
690 | /* Tell bigmac what interrupts we don't want to hear about. */ | ||
691 | sbus_writel(BIGMAC_IMASK_GOTFRAME | BIGMAC_IMASK_SENTFRAME, | ||
692 | bregs + BMAC_IMASK); | ||
693 | |||
694 | /* Enable the various other irq's. */ | ||
695 | sbus_writel(0, cregs + CREG_RIMASK); | ||
696 | sbus_writel(0, cregs + CREG_TIMASK); | ||
697 | sbus_writel(0, cregs + CREG_QMASK); | ||
698 | sbus_writel(0, cregs + CREG_BMASK); | ||
699 | |||
700 | /* Set jam size to a reasonable default. */ | ||
701 | sbus_writel(DEFAULT_JAMSIZE, bregs + BMAC_JSIZE); | ||
702 | |||
703 | /* Clear collision counter. */ | ||
704 | sbus_writel(0, cregs + CREG_CCNT); | ||
705 | |||
706 | /* Enable transmitter and receiver. */ | ||
707 | sbus_writel(sbus_readl(bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE, | ||
708 | bregs + BMAC_TXCFG); | ||
709 | sbus_writel(sbus_readl(bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE, | ||
710 | bregs + BMAC_RXCFG); | ||
711 | |||
712 | /* Ok, start detecting link speed/duplex. */ | ||
713 | bigmac_begin_auto_negotiation(bp); | ||
714 | |||
715 | /* Success. */ | ||
716 | return 0; | ||
717 | } | ||
718 | |||
719 | /* Error interrupts get sent here. */ | ||
720 | static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_status) | ||
721 | { | ||
722 | printk(KERN_ERR "bigmac_is_medium_rare: "); | ||
723 | if (qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) { | ||
724 | if (qec_status & GLOB_STAT_ER) | ||
725 | printk("QEC_ERROR, "); | ||
726 | if (qec_status & GLOB_STAT_BM) | ||
727 | printk("QEC_BMAC_ERROR, "); | ||
728 | } | ||
729 | if (bmac_status & CREG_STAT_ERRORS) { | ||
730 | if (bmac_status & CREG_STAT_BERROR) | ||
731 | printk("BMAC_ERROR, "); | ||
732 | if (bmac_status & CREG_STAT_TXDERROR) | ||
733 | printk("TXD_ERROR, "); | ||
734 | if (bmac_status & CREG_STAT_TXLERR) | ||
735 | printk("TX_LATE_ERROR, "); | ||
736 | if (bmac_status & CREG_STAT_TXPERR) | ||
737 | printk("TX_PARITY_ERROR, "); | ||
738 | if (bmac_status & CREG_STAT_TXSERR) | ||
739 | printk("TX_SBUS_ERROR, "); | ||
740 | |||
741 | if (bmac_status & CREG_STAT_RXDROP) | ||
742 | printk("RX_DROP_ERROR, "); | ||
743 | |||
744 | if (bmac_status & CREG_STAT_RXSMALL) | ||
745 | printk("RX_SMALL_ERROR, "); | ||
746 | if (bmac_status & CREG_STAT_RXLERR) | ||
747 | printk("RX_LATE_ERROR, "); | ||
748 | if (bmac_status & CREG_STAT_RXPERR) | ||
749 | printk("RX_PARITY_ERROR, "); | ||
750 | if (bmac_status & CREG_STAT_RXSERR) | ||
751 | printk("RX_SBUS_ERROR, "); | ||
752 | } | ||
753 | |||
754 | printk(" RESET\n"); | ||
755 | bigmac_init_hw(bp, 1); | ||
756 | } | ||
757 | |||
758 | /* BigMAC transmit complete service routines. */ | ||
759 | static void bigmac_tx(struct bigmac *bp) | ||
760 | { | ||
761 | struct be_txd *txbase = &bp->bmac_block->be_txd[0]; | ||
762 | struct net_device *dev = bp->dev; | ||
763 | int elem; | ||
764 | |||
765 | spin_lock(&bp->lock); | ||
766 | |||
767 | elem = bp->tx_old; | ||
768 | DTX(("bigmac_tx: tx_old[%d] ", elem)); | ||
769 | while (elem != bp->tx_new) { | ||
770 | struct sk_buff *skb; | ||
771 | struct be_txd *this = &txbase[elem]; | ||
772 | |||
773 | DTX(("this(%p) [flags(%08x)addr(%08x)]", | ||
774 | this, this->tx_flags, this->tx_addr)); | ||
775 | |||
776 | if (this->tx_flags & TXD_OWN) | ||
777 | break; | ||
778 | skb = bp->tx_skbs[elem]; | ||
779 | bp->enet_stats.tx_packets++; | ||
780 | bp->enet_stats.tx_bytes += skb->len; | ||
781 | dma_unmap_single(&bp->bigmac_op->dev, | ||
782 | this->tx_addr, skb->len, | ||
783 | DMA_TO_DEVICE); | ||
784 | |||
785 | DTX(("skb(%p) ", skb)); | ||
786 | bp->tx_skbs[elem] = NULL; | ||
787 | dev_kfree_skb_irq(skb); | ||
788 | |||
789 | elem = NEXT_TX(elem); | ||
790 | } | ||
791 | DTX((" DONE, tx_old=%d\n", elem)); | ||
792 | bp->tx_old = elem; | ||
793 | |||
794 | if (netif_queue_stopped(dev) && | ||
795 | TX_BUFFS_AVAIL(bp) > 0) | ||
796 | netif_wake_queue(bp->dev); | ||
797 | |||
798 | spin_unlock(&bp->lock); | ||
799 | } | ||
800 | |||
801 | /* BigMAC receive complete service routines. */ | ||
802 | static void bigmac_rx(struct bigmac *bp) | ||
803 | { | ||
804 | struct be_rxd *rxbase = &bp->bmac_block->be_rxd[0]; | ||
805 | struct be_rxd *this; | ||
806 | int elem = bp->rx_new, drops = 0; | ||
807 | u32 flags; | ||
808 | |||
809 | this = &rxbase[elem]; | ||
810 | while (!((flags = this->rx_flags) & RXD_OWN)) { | ||
811 | struct sk_buff *skb; | ||
812 | int len = (flags & RXD_LENGTH); /* FCS not included */ | ||
813 | |||
814 | /* Check for errors. */ | ||
815 | if (len < ETH_ZLEN) { | ||
816 | bp->enet_stats.rx_errors++; | ||
817 | bp->enet_stats.rx_length_errors++; | ||
818 | |||
819 | drop_it: | ||
820 | /* Return it to the BigMAC. */ | ||
821 | bp->enet_stats.rx_dropped++; | ||
822 | this->rx_flags = | ||
823 | (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); | ||
824 | goto next; | ||
825 | } | ||
826 | skb = bp->rx_skbs[elem]; | ||
827 | if (len > RX_COPY_THRESHOLD) { | ||
828 | struct sk_buff *new_skb; | ||
829 | |||
830 | /* Now refill the entry, if we can. */ | ||
831 | new_skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); | ||
832 | if (new_skb == NULL) { | ||
833 | drops++; | ||
834 | goto drop_it; | ||
835 | } | ||
836 | dma_unmap_single(&bp->bigmac_op->dev, | ||
837 | this->rx_addr, | ||
838 | RX_BUF_ALLOC_SIZE - 34, | ||
839 | DMA_FROM_DEVICE); | ||
840 | bp->rx_skbs[elem] = new_skb; | ||
841 | new_skb->dev = bp->dev; | ||
842 | skb_put(new_skb, ETH_FRAME_LEN); | ||
843 | skb_reserve(new_skb, 34); | ||
844 | this->rx_addr = | ||
845 | dma_map_single(&bp->bigmac_op->dev, | ||
846 | new_skb->data, | ||
847 | RX_BUF_ALLOC_SIZE - 34, | ||
848 | DMA_FROM_DEVICE); | ||
849 | this->rx_flags = | ||
850 | (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); | ||
851 | |||
852 | /* Trim the original skb for the netif. */ | ||
853 | skb_trim(skb, len); | ||
854 | } else { | ||
855 | struct sk_buff *copy_skb = dev_alloc_skb(len + 2); | ||
856 | |||
857 | if (copy_skb == NULL) { | ||
858 | drops++; | ||
859 | goto drop_it; | ||
860 | } | ||
861 | skb_reserve(copy_skb, 2); | ||
862 | skb_put(copy_skb, len); | ||
863 | dma_sync_single_for_cpu(&bp->bigmac_op->dev, | ||
864 | this->rx_addr, len, | ||
865 | DMA_FROM_DEVICE); | ||
866 | skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len); | ||
867 | dma_sync_single_for_device(&bp->bigmac_op->dev, | ||
868 | this->rx_addr, len, | ||
869 | DMA_FROM_DEVICE); | ||
870 | |||
871 | /* Reuse original ring buffer. */ | ||
872 | this->rx_flags = | ||
873 | (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); | ||
874 | |||
875 | skb = copy_skb; | ||
876 | } | ||
877 | |||
878 | /* No checksums done by the BigMAC ;-( */ | ||
879 | skb->protocol = eth_type_trans(skb, bp->dev); | ||
880 | netif_rx(skb); | ||
881 | bp->enet_stats.rx_packets++; | ||
882 | bp->enet_stats.rx_bytes += len; | ||
883 | next: | ||
884 | elem = NEXT_RX(elem); | ||
885 | this = &rxbase[elem]; | ||
886 | } | ||
887 | bp->rx_new = elem; | ||
888 | if (drops) | ||
889 | printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", bp->dev->name); | ||
890 | } | ||
891 | |||
892 | static irqreturn_t bigmac_interrupt(int irq, void *dev_id) | ||
893 | { | ||
894 | struct bigmac *bp = (struct bigmac *) dev_id; | ||
895 | u32 qec_status, bmac_status; | ||
896 | |||
897 | DIRQ(("bigmac_interrupt: ")); | ||
898 | |||
899 | /* Latch status registers now. */ | ||
900 | bmac_status = sbus_readl(bp->creg + CREG_STAT); | ||
901 | qec_status = sbus_readl(bp->gregs + GLOB_STAT); | ||
902 | |||
903 | DIRQ(("qec_status=%08x bmac_status=%08x\n", qec_status, bmac_status)); | ||
904 | if ((qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) || | ||
905 | (bmac_status & CREG_STAT_ERRORS)) | ||
906 | bigmac_is_medium_rare(bp, qec_status, bmac_status); | ||
907 | |||
908 | if (bmac_status & CREG_STAT_TXIRQ) | ||
909 | bigmac_tx(bp); | ||
910 | |||
911 | if (bmac_status & CREG_STAT_RXIRQ) | ||
912 | bigmac_rx(bp); | ||
913 | |||
914 | return IRQ_HANDLED; | ||
915 | } | ||
916 | |||
917 | static int bigmac_open(struct net_device *dev) | ||
918 | { | ||
919 | struct bigmac *bp = netdev_priv(dev); | ||
920 | int ret; | ||
921 | |||
922 | ret = request_irq(dev->irq, bigmac_interrupt, IRQF_SHARED, dev->name, bp); | ||
923 | if (ret) { | ||
924 | printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq); | ||
925 | return ret; | ||
926 | } | ||
927 | init_timer(&bp->bigmac_timer); | ||
928 | ret = bigmac_init_hw(bp, 0); | ||
929 | if (ret) | ||
930 | free_irq(dev->irq, bp); | ||
931 | return ret; | ||
932 | } | ||
933 | |||
934 | static int bigmac_close(struct net_device *dev) | ||
935 | { | ||
936 | struct bigmac *bp = netdev_priv(dev); | ||
937 | |||
938 | del_timer(&bp->bigmac_timer); | ||
939 | bp->timer_state = asleep; | ||
940 | bp->timer_ticks = 0; | ||
941 | |||
942 | bigmac_stop(bp); | ||
943 | bigmac_clean_rings(bp); | ||
944 | free_irq(dev->irq, bp); | ||
945 | return 0; | ||
946 | } | ||
947 | |||
948 | static void bigmac_tx_timeout(struct net_device *dev) | ||
949 | { | ||
950 | struct bigmac *bp = netdev_priv(dev); | ||
951 | |||
952 | bigmac_init_hw(bp, 0); | ||
953 | netif_wake_queue(dev); | ||
954 | } | ||
955 | |||
956 | /* Put a packet on the wire. */ | ||
957 | static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
958 | { | ||
959 | struct bigmac *bp = netdev_priv(dev); | ||
960 | int len, entry; | ||
961 | u32 mapping; | ||
962 | |||
963 | len = skb->len; | ||
964 | mapping = dma_map_single(&bp->bigmac_op->dev, skb->data, | ||
965 | len, DMA_TO_DEVICE); | ||
966 | |||
967 | /* Avoid a race... */ | ||
968 | spin_lock_irq(&bp->lock); | ||
969 | entry = bp->tx_new; | ||
970 | DTX(("bigmac_start_xmit: len(%d) entry(%d)\n", len, entry)); | ||
971 | bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE; | ||
972 | bp->tx_skbs[entry] = skb; | ||
973 | bp->bmac_block->be_txd[entry].tx_addr = mapping; | ||
974 | bp->bmac_block->be_txd[entry].tx_flags = | ||
975 | (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); | ||
976 | bp->tx_new = NEXT_TX(entry); | ||
977 | if (TX_BUFFS_AVAIL(bp) <= 0) | ||
978 | netif_stop_queue(dev); | ||
979 | spin_unlock_irq(&bp->lock); | ||
980 | |||
981 | /* Get it going. */ | ||
982 | sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL); | ||
983 | |||
984 | |||
985 | return NETDEV_TX_OK; | ||
986 | } | ||
987 | |||
988 | static struct net_device_stats *bigmac_get_stats(struct net_device *dev) | ||
989 | { | ||
990 | struct bigmac *bp = netdev_priv(dev); | ||
991 | |||
992 | bigmac_get_counters(bp, bp->bregs); | ||
993 | return &bp->enet_stats; | ||
994 | } | ||
995 | |||
996 | static void bigmac_set_multicast(struct net_device *dev) | ||
997 | { | ||
998 | struct bigmac *bp = netdev_priv(dev); | ||
999 | void __iomem *bregs = bp->bregs; | ||
1000 | struct netdev_hw_addr *ha; | ||
1001 | int i; | ||
1002 | u32 tmp, crc; | ||
1003 | |||
1004 | /* Disable the receiver. The bit self-clears when | ||
1005 | * the operation is complete. | ||
1006 | */ | ||
1007 | tmp = sbus_readl(bregs + BMAC_RXCFG); | ||
1008 | tmp &= ~(BIGMAC_RXCFG_ENABLE); | ||
1009 | sbus_writel(tmp, bregs + BMAC_RXCFG); | ||
1010 | while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0) | ||
1011 | udelay(20); | ||
1012 | |||
1013 | if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { | ||
1014 | sbus_writel(0xffff, bregs + BMAC_HTABLE0); | ||
1015 | sbus_writel(0xffff, bregs + BMAC_HTABLE1); | ||
1016 | sbus_writel(0xffff, bregs + BMAC_HTABLE2); | ||
1017 | sbus_writel(0xffff, bregs + BMAC_HTABLE3); | ||
1018 | } else if (dev->flags & IFF_PROMISC) { | ||
1019 | tmp = sbus_readl(bregs + BMAC_RXCFG); | ||
1020 | tmp |= BIGMAC_RXCFG_PMISC; | ||
1021 | sbus_writel(tmp, bregs + BMAC_RXCFG); | ||
1022 | } else { | ||
1023 | u16 hash_table[4]; | ||
1024 | |||
1025 | for (i = 0; i < 4; i++) | ||
1026 | hash_table[i] = 0; | ||
1027 | |||
1028 | netdev_for_each_mc_addr(ha, dev) { | ||
1029 | crc = ether_crc_le(6, ha->addr); | ||
1030 | crc >>= 26; | ||
1031 | hash_table[crc >> 4] |= 1 << (crc & 0xf); | ||
1032 | } | ||
1033 | sbus_writel(hash_table[0], bregs + BMAC_HTABLE0); | ||
1034 | sbus_writel(hash_table[1], bregs + BMAC_HTABLE1); | ||
1035 | sbus_writel(hash_table[2], bregs + BMAC_HTABLE2); | ||
1036 | sbus_writel(hash_table[3], bregs + BMAC_HTABLE3); | ||
1037 | } | ||
1038 | |||
1039 | /* Re-enable the receiver. */ | ||
1040 | tmp = sbus_readl(bregs + BMAC_RXCFG); | ||
1041 | tmp |= BIGMAC_RXCFG_ENABLE; | ||
1042 | sbus_writel(tmp, bregs + BMAC_RXCFG); | ||
1043 | } | ||
1044 | |||
1045 | /* Ethtool support... */ | ||
1046 | static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
1047 | { | ||
1048 | strcpy(info->driver, "sunbmac"); | ||
1049 | strcpy(info->version, "2.0"); | ||
1050 | } | ||
1051 | |||
1052 | static u32 bigmac_get_link(struct net_device *dev) | ||
1053 | { | ||
1054 | struct bigmac *bp = netdev_priv(dev); | ||
1055 | |||
1056 | spin_lock_irq(&bp->lock); | ||
1057 | bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, BIGMAC_BMSR); | ||
1058 | spin_unlock_irq(&bp->lock); | ||
1059 | |||
1060 | return (bp->sw_bmsr & BMSR_LSTATUS); | ||
1061 | } | ||
1062 | |||
1063 | static const struct ethtool_ops bigmac_ethtool_ops = { | ||
1064 | .get_drvinfo = bigmac_get_drvinfo, | ||
1065 | .get_link = bigmac_get_link, | ||
1066 | }; | ||
1067 | |||
1068 | static const struct net_device_ops bigmac_ops = { | ||
1069 | .ndo_open = bigmac_open, | ||
1070 | .ndo_stop = bigmac_close, | ||
1071 | .ndo_start_xmit = bigmac_start_xmit, | ||
1072 | .ndo_get_stats = bigmac_get_stats, | ||
1073 | .ndo_set_multicast_list = bigmac_set_multicast, | ||
1074 | .ndo_tx_timeout = bigmac_tx_timeout, | ||
1075 | .ndo_change_mtu = eth_change_mtu, | ||
1076 | .ndo_set_mac_address = eth_mac_addr, | ||
1077 | .ndo_validate_addr = eth_validate_addr, | ||
1078 | }; | ||
1079 | |||
1080 | static int __devinit bigmac_ether_init(struct platform_device *op, | ||
1081 | struct platform_device *qec_op) | ||
1082 | { | ||
1083 | static int version_printed; | ||
1084 | struct net_device *dev; | ||
1085 | u8 bsizes, bsizes_more; | ||
1086 | struct bigmac *bp; | ||
1087 | int i; | ||
1088 | |||
1089 | /* Get a new device struct for this interface. */ | ||
1090 | dev = alloc_etherdev(sizeof(struct bigmac)); | ||
1091 | if (!dev) | ||
1092 | return -ENOMEM; | ||
1093 | |||
1094 | if (version_printed++ == 0) | ||
1095 | printk(KERN_INFO "%s", version); | ||
1096 | |||
1097 | for (i = 0; i < 6; i++) | ||
1098 | dev->dev_addr[i] = idprom->id_ethaddr[i]; | ||
1099 | |||
1100 | /* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */ | ||
1101 | bp = netdev_priv(dev); | ||
1102 | bp->qec_op = qec_op; | ||
1103 | bp->bigmac_op = op; | ||
1104 | |||
1105 | SET_NETDEV_DEV(dev, &op->dev); | ||
1106 | |||
1107 | spin_lock_init(&bp->lock); | ||
1108 | |||
1109 | /* Map in QEC global control registers. */ | ||
1110 | bp->gregs = of_ioremap(&qec_op->resource[0], 0, | ||
1111 | GLOB_REG_SIZE, "BigMAC QEC GLobal Regs"); | ||
1112 | if (!bp->gregs) { | ||
1113 | printk(KERN_ERR "BIGMAC: Cannot map QEC global registers.\n"); | ||
1114 | goto fail_and_cleanup; | ||
1115 | } | ||
1116 | |||
1117 | /* Make sure QEC is in BigMAC mode. */ | ||
1118 | if ((sbus_readl(bp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_BMODE) { | ||
1119 | printk(KERN_ERR "BigMAC: AIEEE, QEC is not in BigMAC mode!\n"); | ||
1120 | goto fail_and_cleanup; | ||
1121 | } | ||
1122 | |||
1123 | /* Reset the QEC. */ | ||
1124 | if (qec_global_reset(bp->gregs)) | ||
1125 | goto fail_and_cleanup; | ||
1126 | |||
1127 | /* Get supported SBUS burst sizes. */ | ||
1128 | bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff); | ||
1129 | bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff); | ||
1130 | |||
1131 | bsizes &= 0xff; | ||
1132 | if (bsizes_more != 0xff) | ||
1133 | bsizes &= bsizes_more; | ||
1134 | if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || | ||
1135 | (bsizes & DMA_BURST32) == 0) | ||
1136 | bsizes = (DMA_BURST32 - 1); | ||
1137 | bp->bigmac_bursts = bsizes; | ||
1138 | |||
1139 | /* Perform QEC initialization. */ | ||
1140 | qec_init(bp); | ||
1141 | |||
1142 | /* Map in the BigMAC channel registers. */ | ||
1143 | bp->creg = of_ioremap(&op->resource[0], 0, | ||
1144 | CREG_REG_SIZE, "BigMAC QEC Channel Regs"); | ||
1145 | if (!bp->creg) { | ||
1146 | printk(KERN_ERR "BIGMAC: Cannot map QEC channel registers.\n"); | ||
1147 | goto fail_and_cleanup; | ||
1148 | } | ||
1149 | |||
1150 | /* Map in the BigMAC control registers. */ | ||
1151 | bp->bregs = of_ioremap(&op->resource[1], 0, | ||
1152 | BMAC_REG_SIZE, "BigMAC Primary Regs"); | ||
1153 | if (!bp->bregs) { | ||
1154 | printk(KERN_ERR "BIGMAC: Cannot map BigMAC primary registers.\n"); | ||
1155 | goto fail_and_cleanup; | ||
1156 | } | ||
1157 | |||
1158 | /* Map in the BigMAC transceiver registers, this is how you poke at | ||
1159 | * the BigMAC's PHY. | ||
1160 | */ | ||
1161 | bp->tregs = of_ioremap(&op->resource[2], 0, | ||
1162 | TCVR_REG_SIZE, "BigMAC Transceiver Regs"); | ||
1163 | if (!bp->tregs) { | ||
1164 | printk(KERN_ERR "BIGMAC: Cannot map BigMAC transceiver registers.\n"); | ||
1165 | goto fail_and_cleanup; | ||
1166 | } | ||
1167 | |||
1168 | /* Stop the BigMAC. */ | ||
1169 | bigmac_stop(bp); | ||
1170 | |||
1171 | /* Allocate transmit/receive descriptor DVMA block. */ | ||
1172 | bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev, | ||
1173 | PAGE_SIZE, | ||
1174 | &bp->bblock_dvma, GFP_ATOMIC); | ||
1175 | if (bp->bmac_block == NULL || bp->bblock_dvma == 0) { | ||
1176 | printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n"); | ||
1177 | goto fail_and_cleanup; | ||
1178 | } | ||
1179 | |||
1180 | /* Get the board revision of this BigMAC. */ | ||
1181 | bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node, | ||
1182 | "board-version", 1); | ||
1183 | |||
1184 | /* Init auto-negotiation timer state. */ | ||
1185 | init_timer(&bp->bigmac_timer); | ||
1186 | bp->timer_state = asleep; | ||
1187 | bp->timer_ticks = 0; | ||
1188 | |||
1189 | /* Backlink to generic net device struct. */ | ||
1190 | bp->dev = dev; | ||
1191 | |||
1192 | /* Set links to our BigMAC open and close routines. */ | ||
1193 | dev->ethtool_ops = &bigmac_ethtool_ops; | ||
1194 | dev->netdev_ops = &bigmac_ops; | ||
1195 | dev->watchdog_timeo = 5*HZ; | ||
1196 | |||
1197 | /* Finish net device registration. */ | ||
1198 | dev->irq = bp->bigmac_op->archdata.irqs[0]; | ||
1199 | dev->dma = 0; | ||
1200 | |||
1201 | if (register_netdev(dev)) { | ||
1202 | printk(KERN_ERR "BIGMAC: Cannot register device.\n"); | ||
1203 | goto fail_and_cleanup; | ||
1204 | } | ||
1205 | |||
1206 | dev_set_drvdata(&bp->bigmac_op->dev, bp); | ||
1207 | |||
1208 | printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %pM\n", | ||
1209 | dev->name, dev->dev_addr); | ||
1210 | |||
1211 | return 0; | ||
1212 | |||
1213 | fail_and_cleanup: | ||
1214 | /* Something went wrong, undo whatever we did so far. */ | ||
1215 | /* Free register mappings if any. */ | ||
1216 | if (bp->gregs) | ||
1217 | of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); | ||
1218 | if (bp->creg) | ||
1219 | of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); | ||
1220 | if (bp->bregs) | ||
1221 | of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); | ||
1222 | if (bp->tregs) | ||
1223 | of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); | ||
1224 | |||
1225 | if (bp->bmac_block) | ||
1226 | dma_free_coherent(&bp->bigmac_op->dev, | ||
1227 | PAGE_SIZE, | ||
1228 | bp->bmac_block, | ||
1229 | bp->bblock_dvma); | ||
1230 | |||
1231 | /* This also frees the co-located private data */ | ||
1232 | free_netdev(dev); | ||
1233 | return -ENODEV; | ||
1234 | } | ||
1235 | |||
1236 | /* QEC can be the parent of either QuadEthernet or a BigMAC. We want | ||
1237 | * the latter. | ||
1238 | */ | ||
1239 | static int __devinit bigmac_sbus_probe(struct platform_device *op) | ||
1240 | { | ||
1241 | struct device *parent = op->dev.parent; | ||
1242 | struct platform_device *qec_op; | ||
1243 | |||
1244 | qec_op = to_platform_device(parent); | ||
1245 | |||
1246 | return bigmac_ether_init(op, qec_op); | ||
1247 | } | ||
1248 | |||
1249 | static int __devexit bigmac_sbus_remove(struct platform_device *op) | ||
1250 | { | ||
1251 | struct bigmac *bp = dev_get_drvdata(&op->dev); | ||
1252 | struct device *parent = op->dev.parent; | ||
1253 | struct net_device *net_dev = bp->dev; | ||
1254 | struct platform_device *qec_op; | ||
1255 | |||
1256 | qec_op = to_platform_device(parent); | ||
1257 | |||
1258 | unregister_netdev(net_dev); | ||
1259 | |||
1260 | of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); | ||
1261 | of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); | ||
1262 | of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); | ||
1263 | of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); | ||
1264 | dma_free_coherent(&op->dev, | ||
1265 | PAGE_SIZE, | ||
1266 | bp->bmac_block, | ||
1267 | bp->bblock_dvma); | ||
1268 | |||
1269 | free_netdev(net_dev); | ||
1270 | |||
1271 | dev_set_drvdata(&op->dev, NULL); | ||
1272 | |||
1273 | return 0; | ||
1274 | } | ||
1275 | |||
1276 | static const struct of_device_id bigmac_sbus_match[] = { | ||
1277 | { | ||
1278 | .name = "be", | ||
1279 | }, | ||
1280 | {}, | ||
1281 | }; | ||
1282 | |||
1283 | MODULE_DEVICE_TABLE(of, bigmac_sbus_match); | ||
1284 | |||
1285 | static struct platform_driver bigmac_sbus_driver = { | ||
1286 | .driver = { | ||
1287 | .name = "sunbmac", | ||
1288 | .owner = THIS_MODULE, | ||
1289 | .of_match_table = bigmac_sbus_match, | ||
1290 | }, | ||
1291 | .probe = bigmac_sbus_probe, | ||
1292 | .remove = __devexit_p(bigmac_sbus_remove), | ||
1293 | }; | ||
1294 | |||
1295 | static int __init bigmac_init(void) | ||
1296 | { | ||
1297 | return platform_driver_register(&bigmac_sbus_driver); | ||
1298 | } | ||
1299 | |||
1300 | static void __exit bigmac_exit(void) | ||
1301 | { | ||
1302 | platform_driver_unregister(&bigmac_sbus_driver); | ||
1303 | } | ||
1304 | |||
1305 | module_init(bigmac_init); | ||
1306 | module_exit(bigmac_exit); | ||
diff --git a/drivers/net/ethernet/sun/sunbmac.h b/drivers/net/ethernet/sun/sunbmac.h new file mode 100644 index 000000000000..4943e975a731 --- /dev/null +++ b/drivers/net/ethernet/sun/sunbmac.h | |||
@@ -0,0 +1,355 @@ | |||
1 | /* $Id: sunbmac.h,v 1.7 2000/07/11 22:35:22 davem Exp $ | ||
2 | * sunbmac.h: Defines for the Sun "Big MAC" 100baseT ethernet cards. | ||
3 | * | ||
4 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | ||
5 | */ | ||
6 | |||
7 | #ifndef _SUNBMAC_H | ||
8 | #define _SUNBMAC_H | ||
9 | |||
10 | /* QEC global registers. */ | ||
11 | #define GLOB_CTRL 0x00UL /* Control */ | ||
12 | #define GLOB_STAT 0x04UL /* Status */ | ||
13 | #define GLOB_PSIZE 0x08UL /* Packet Size */ | ||
14 | #define GLOB_MSIZE 0x0cUL /* Local-mem size (64K) */ | ||
15 | #define GLOB_RSIZE 0x10UL /* Receive partition size */ | ||
16 | #define GLOB_TSIZE 0x14UL /* Transmit partition size */ | ||
17 | #define GLOB_REG_SIZE 0x18UL | ||
18 | |||
19 | #define GLOB_CTRL_MMODE 0x40000000 /* MACE qec mode */ | ||
20 | #define GLOB_CTRL_BMODE 0x10000000 /* BigMAC qec mode */ | ||
21 | #define GLOB_CTRL_EPAR 0x00000020 /* Enable parity */ | ||
22 | #define GLOB_CTRL_ACNTRL 0x00000018 /* SBUS arbitration control */ | ||
23 | #define GLOB_CTRL_B64 0x00000004 /* 64 byte dvma bursts */ | ||
24 | #define GLOB_CTRL_B32 0x00000002 /* 32 byte dvma bursts */ | ||
25 | #define GLOB_CTRL_B16 0x00000000 /* 16 byte dvma bursts */ | ||
26 | #define GLOB_CTRL_RESET 0x00000001 /* Reset the QEC */ | ||
27 | |||
28 | #define GLOB_STAT_TX 0x00000008 /* BigMAC Transmit IRQ */ | ||
29 | #define GLOB_STAT_RX 0x00000004 /* BigMAC Receive IRQ */ | ||
30 | #define GLOB_STAT_BM 0x00000002 /* BigMAC Global IRQ */ | ||
31 | #define GLOB_STAT_ER 0x00000001 /* BigMAC Error IRQ */ | ||
32 | |||
33 | #define GLOB_PSIZE_2048 0x00 /* 2k packet size */ | ||
34 | #define GLOB_PSIZE_4096 0x01 /* 4k packet size */ | ||
35 | #define GLOB_PSIZE_6144 0x10 /* 6k packet size */ | ||
36 | #define GLOB_PSIZE_8192 0x11 /* 8k packet size */ | ||
37 | |||
38 | /* QEC BigMAC channel registers. */ | ||
39 | #define CREG_CTRL 0x00UL /* Control */ | ||
40 | #define CREG_STAT 0x04UL /* Status */ | ||
41 | #define CREG_RXDS 0x08UL /* RX descriptor ring ptr */ | ||
42 | #define CREG_TXDS 0x0cUL /* TX descriptor ring ptr */ | ||
43 | #define CREG_RIMASK 0x10UL /* RX Interrupt Mask */ | ||
44 | #define CREG_TIMASK 0x14UL /* TX Interrupt Mask */ | ||
45 | #define CREG_QMASK 0x18UL /* QEC Error Interrupt Mask */ | ||
46 | #define CREG_BMASK 0x1cUL /* BigMAC Error Interrupt Mask*/ | ||
47 | #define CREG_RXWBUFPTR 0x20UL /* Local memory rx write ptr */ | ||
48 | #define CREG_RXRBUFPTR 0x24UL /* Local memory rx read ptr */ | ||
49 | #define CREG_TXWBUFPTR 0x28UL /* Local memory tx write ptr */ | ||
50 | #define CREG_TXRBUFPTR 0x2cUL /* Local memory tx read ptr */ | ||
51 | #define CREG_CCNT 0x30UL /* Collision Counter */ | ||
52 | #define CREG_REG_SIZE 0x34UL | ||
53 | |||
54 | #define CREG_CTRL_TWAKEUP 0x00000001 /* Transmitter Wakeup, 'go'. */ | ||
55 | |||
56 | #define CREG_STAT_BERROR 0x80000000 /* BigMAC error */ | ||
57 | #define CREG_STAT_TXIRQ 0x00200000 /* Transmit Interrupt */ | ||
58 | #define CREG_STAT_TXDERROR 0x00080000 /* TX Descriptor is bogus */ | ||
59 | #define CREG_STAT_TXLERR 0x00040000 /* Late Transmit Error */ | ||
60 | #define CREG_STAT_TXPERR 0x00020000 /* Transmit Parity Error */ | ||
61 | #define CREG_STAT_TXSERR 0x00010000 /* Transmit SBUS error ack */ | ||
62 | #define CREG_STAT_RXIRQ 0x00000020 /* Receive Interrupt */ | ||
63 | #define CREG_STAT_RXDROP 0x00000010 /* Dropped a RX'd packet */ | ||
64 | #define CREG_STAT_RXSMALL 0x00000008 /* Receive buffer too small */ | ||
65 | #define CREG_STAT_RXLERR 0x00000004 /* Receive Late Error */ | ||
66 | #define CREG_STAT_RXPERR 0x00000002 /* Receive Parity Error */ | ||
67 | #define CREG_STAT_RXSERR 0x00000001 /* Receive SBUS Error ACK */ | ||
68 | |||
69 | #define CREG_STAT_ERRORS (CREG_STAT_BERROR|CREG_STAT_TXDERROR|CREG_STAT_TXLERR| \ | ||
70 | CREG_STAT_TXPERR|CREG_STAT_TXSERR|CREG_STAT_RXDROP| \ | ||
71 | CREG_STAT_RXSMALL|CREG_STAT_RXLERR|CREG_STAT_RXPERR| \ | ||
72 | CREG_STAT_RXSERR) | ||
73 | |||
74 | #define CREG_QMASK_TXDERROR 0x00080000 /* TXD error */ | ||
75 | #define CREG_QMASK_TXLERR 0x00040000 /* TX late error */ | ||
76 | #define CREG_QMASK_TXPERR 0x00020000 /* TX parity error */ | ||
77 | #define CREG_QMASK_TXSERR 0x00010000 /* TX sbus error ack */ | ||
78 | #define CREG_QMASK_RXDROP 0x00000010 /* RX drop */ | ||
79 | #define CREG_QMASK_RXBERROR 0x00000008 /* RX buffer error */ | ||
80 | #define CREG_QMASK_RXLEERR 0x00000004 /* RX late error */ | ||
81 | #define CREG_QMASK_RXPERR 0x00000002 /* RX parity error */ | ||
82 | #define CREG_QMASK_RXSERR 0x00000001 /* RX sbus error ack */ | ||
83 | |||
84 | /* BIGMAC core registers */ | ||
85 | #define BMAC_XIFCFG 0x000UL /* XIF config register */ | ||
86 | /* 0x004-->0x0fc, reserved */ | ||
87 | #define BMAC_STATUS 0x100UL /* Status register, clear on read */ | ||
88 | #define BMAC_IMASK 0x104UL /* Interrupt mask register */ | ||
89 | /* 0x108-->0x204, reserved */ | ||
90 | #define BMAC_TXSWRESET 0x208UL /* Transmitter software reset */ | ||
91 | #define BMAC_TXCFG 0x20cUL /* Transmitter config register */ | ||
92 | #define BMAC_IGAP1 0x210UL /* Inter-packet gap 1 */ | ||
93 | #define BMAC_IGAP2 0x214UL /* Inter-packet gap 2 */ | ||
94 | #define BMAC_ALIMIT 0x218UL /* Transmit attempt limit */ | ||
95 | #define BMAC_STIME 0x21cUL /* Transmit slot time */ | ||
96 | #define BMAC_PLEN 0x220UL /* Size of transmit preamble */ | ||
97 | #define BMAC_PPAT 0x224UL /* Pattern for transmit preamble */ | ||
98 | #define BMAC_TXDELIM 0x228UL /* Transmit delimiter */ | ||
99 | #define BMAC_JSIZE 0x22cUL /* Toe jam... */ | ||
100 | #define BMAC_TXPMAX 0x230UL /* Transmit max pkt size */ | ||
101 | #define BMAC_TXPMIN 0x234UL /* Transmit min pkt size */ | ||
102 | #define BMAC_PATTEMPT 0x238UL /* Count of transmit peak attempts */ | ||
103 | #define BMAC_DTCTR 0x23cUL /* Transmit defer timer */ | ||
104 | #define BMAC_NCCTR 0x240UL /* Transmit normal-collision counter */ | ||
105 | #define BMAC_FCCTR 0x244UL /* Transmit first-collision counter */ | ||
106 | #define BMAC_EXCTR 0x248UL /* Transmit excess-collision counter */ | ||
107 | #define BMAC_LTCTR 0x24cUL /* Transmit late-collision counter */ | ||
108 | #define BMAC_RSEED 0x250UL /* Transmit random number seed */ | ||
109 | #define BMAC_TXSMACHINE 0x254UL /* Transmit state machine */ | ||
110 | /* 0x258-->0x304, reserved */ | ||
111 | #define BMAC_RXSWRESET 0x308UL /* Receiver software reset */ | ||
112 | #define BMAC_RXCFG 0x30cUL /* Receiver config register */ | ||
113 | #define BMAC_RXPMAX 0x310UL /* Receive max pkt size */ | ||
114 | #define BMAC_RXPMIN 0x314UL /* Receive min pkt size */ | ||
115 | #define BMAC_MACADDR2 0x318UL /* Ether address register 2 */ | ||
116 | #define BMAC_MACADDR1 0x31cUL /* Ether address register 1 */ | ||
117 | #define BMAC_MACADDR0 0x320UL /* Ether address register 0 */ | ||
118 | #define BMAC_FRCTR 0x324UL /* Receive frame receive counter */ | ||
119 | #define BMAC_GLECTR 0x328UL /* Receive giant-length error counter */ | ||
120 | #define BMAC_UNALECTR 0x32cUL /* Receive unaligned error counter */ | ||
121 | #define BMAC_RCRCECTR 0x330UL /* Receive CRC error counter */ | ||
122 | #define BMAC_RXSMACHINE 0x334UL /* Receiver state machine */ | ||
123 | #define BMAC_RXCVALID 0x338UL /* Receiver code violation */ | ||
124 | /* 0x33c, reserved */ | ||
125 | #define BMAC_HTABLE3 0x340UL /* Hash table 3 */ | ||
126 | #define BMAC_HTABLE2 0x344UL /* Hash table 2 */ | ||
127 | #define BMAC_HTABLE1 0x348UL /* Hash table 1 */ | ||
128 | #define BMAC_HTABLE0 0x34cUL /* Hash table 0 */ | ||
129 | #define BMAC_AFILTER2 0x350UL /* Address filter 2 */ | ||
130 | #define BMAC_AFILTER1 0x354UL /* Address filter 1 */ | ||
131 | #define BMAC_AFILTER0 0x358UL /* Address filter 0 */ | ||
132 | #define BMAC_AFMASK 0x35cUL /* Address filter mask */ | ||
133 | #define BMAC_REG_SIZE 0x360UL | ||
134 | |||
135 | /* BigMac XIF config register. */ | ||
136 | #define BIGMAC_XCFG_ODENABLE 0x00000001 /* Output driver enable */ | ||
137 | #define BIGMAC_XCFG_RESV 0x00000002 /* Reserved, write always as 1 */ | ||
138 | #define BIGMAC_XCFG_MLBACK 0x00000004 /* Loopback-mode MII enable */ | ||
139 | #define BIGMAC_XCFG_SMODE 0x00000008 /* Enable serial mode */ | ||
140 | |||
141 | /* BigMAC status register. */ | ||
142 | #define BIGMAC_STAT_GOTFRAME 0x00000001 /* Received a frame */ | ||
143 | #define BIGMAC_STAT_RCNTEXP 0x00000002 /* Receive frame counter expired */ | ||
144 | #define BIGMAC_STAT_ACNTEXP 0x00000004 /* Align-error counter expired */ | ||
145 | #define BIGMAC_STAT_CCNTEXP 0x00000008 /* CRC-error counter expired */ | ||
146 | #define BIGMAC_STAT_LCNTEXP 0x00000010 /* Length-error counter expired */ | ||
147 | #define BIGMAC_STAT_RFIFOVF 0x00000020 /* Receive FIFO overflow */ | ||
148 | #define BIGMAC_STAT_CVCNTEXP 0x00000040 /* Code-violation counter expired */ | ||
149 | #define BIGMAC_STAT_SENTFRAME 0x00000100 /* Transmitted a frame */ | ||
150 | #define BIGMAC_STAT_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */ | ||
151 | #define BIGMAC_STAT_MAXPKTERR 0x00000400 /* Max-packet size error */ | ||
152 | #define BIGMAC_STAT_NCNTEXP 0x00000800 /* Normal-collision counter expired */ | ||
153 | #define BIGMAC_STAT_ECNTEXP 0x00001000 /* Excess-collision counter expired */ | ||
154 | #define BIGMAC_STAT_LCCNTEXP 0x00002000 /* Late-collision counter expired */ | ||
155 | #define BIGMAC_STAT_FCNTEXP 0x00004000 /* First-collision counter expired */ | ||
156 | #define BIGMAC_STAT_DTIMEXP 0x00008000 /* Defer-timer expired */ | ||
157 | |||
158 | /* BigMAC interrupt mask register. */ | ||
159 | #define BIGMAC_IMASK_GOTFRAME 0x00000001 /* Received a frame */ | ||
160 | #define BIGMAC_IMASK_RCNTEXP 0x00000002 /* Receive frame counter expired */ | ||
161 | #define BIGMAC_IMASK_ACNTEXP 0x00000004 /* Align-error counter expired */ | ||
162 | #define BIGMAC_IMASK_CCNTEXP 0x00000008 /* CRC-error counter expired */ | ||
163 | #define BIGMAC_IMASK_LCNTEXP 0x00000010 /* Length-error counter expired */ | ||
164 | #define BIGMAC_IMASK_RFIFOVF 0x00000020 /* Receive FIFO overflow */ | ||
165 | #define BIGMAC_IMASK_CVCNTEXP 0x00000040 /* Code-violation counter expired */ | ||
166 | #define BIGMAC_IMASK_SENTFRAME 0x00000100 /* Transmitted a frame */ | ||
167 | #define BIGMAC_IMASK_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */ | ||
168 | #define BIGMAC_IMASK_MAXPKTERR 0x00000400 /* Max-packet size error */ | ||
169 | #define BIGMAC_IMASK_NCNTEXP 0x00000800 /* Normal-collision counter expired */ | ||
170 | #define BIGMAC_IMASK_ECNTEXP 0x00001000 /* Excess-collision counter expired */ | ||
171 | #define BIGMAC_IMASK_LCCNTEXP 0x00002000 /* Late-collision counter expired */ | ||
172 | #define BIGMAC_IMASK_FCNTEXP 0x00004000 /* First-collision counter expired */ | ||
173 | #define BIGMAC_IMASK_DTIMEXP 0x00008000 /* Defer-timer expired */ | ||
174 | |||
175 | /* BigMac transmit config register. */ | ||
176 | #define BIGMAC_TXCFG_ENABLE 0x00000001 /* Enable the transmitter */ | ||
177 | #define BIGMAC_TXCFG_FIFO 0x00000010 /* Default tx fthresh... */ | ||
178 | #define BIGMAC_TXCFG_SMODE 0x00000020 /* Enable slow transmit mode */ | ||
179 | #define BIGMAC_TXCFG_CIGN 0x00000040 /* Ignore transmit collisions */ | ||
180 | #define BIGMAC_TXCFG_FCSOFF 0x00000080 /* Do not emit FCS */ | ||
181 | #define BIGMAC_TXCFG_DBACKOFF 0x00000100 /* Disable backoff */ | ||
182 | #define BIGMAC_TXCFG_FULLDPLX 0x00000200 /* Enable full-duplex */ | ||
183 | |||
184 | /* BigMac receive config register. */ | ||
185 | #define BIGMAC_RXCFG_ENABLE 0x00000001 /* Enable the receiver */ | ||
186 | #define BIGMAC_RXCFG_FIFO 0x0000000e /* Default rx fthresh... */ | ||
187 | #define BIGMAC_RXCFG_PSTRIP 0x00000020 /* Pad byte strip enable */ | ||
188 | #define BIGMAC_RXCFG_PMISC 0x00000040 /* Enable promiscuous mode */ | ||
189 | #define BIGMAC_RXCFG_DERR 0x00000080 /* Disable error checking */ | ||
190 | #define BIGMAC_RXCFG_DCRCS 0x00000100 /* Disable CRC stripping */ | ||
191 | #define BIGMAC_RXCFG_ME 0x00000200 /* Receive packets addressed to me */ | ||
192 | #define BIGMAC_RXCFG_PGRP 0x00000400 /* Enable promisc group mode */ | ||
193 | #define BIGMAC_RXCFG_HENABLE 0x00000800 /* Enable the hash filter */ | ||
194 | #define BIGMAC_RXCFG_AENABLE 0x00001000 /* Enable the address filter */ | ||
195 | |||
196 | /* The BigMAC PHY transceiver. Not nearly as sophisticated as the happy meal | ||
197 | * one. But it does have the "bit banger", oh baby. | ||
198 | */ | ||
199 | #define TCVR_TPAL 0x00UL | ||
200 | #define TCVR_MPAL 0x04UL | ||
201 | #define TCVR_REG_SIZE 0x08UL | ||
202 | |||
203 | /* Frame commands. */ | ||
204 | #define FRAME_WRITE 0x50020000 | ||
205 | #define FRAME_READ 0x60020000 | ||
206 | |||
207 | /* Tranceiver registers. */ | ||
208 | #define TCVR_PAL_SERIAL 0x00000001 /* Enable serial mode */ | ||
209 | #define TCVR_PAL_EXTLBACK 0x00000002 /* Enable external loopback */ | ||
210 | #define TCVR_PAL_MSENSE 0x00000004 /* Media sense */ | ||
211 | #define TCVR_PAL_LTENABLE 0x00000008 /* Link test enable */ | ||
212 | #define TCVR_PAL_LTSTATUS 0x00000010 /* Link test status (P1 only) */ | ||
213 | |||
214 | /* Management PAL. */ | ||
215 | #define MGMT_PAL_DCLOCK 0x00000001 /* Data clock */ | ||
216 | #define MGMT_PAL_OENAB 0x00000002 /* Output enabler */ | ||
217 | #define MGMT_PAL_MDIO 0x00000004 /* MDIO Data/attached */ | ||
218 | #define MGMT_PAL_TIMEO 0x00000008 /* Transmit enable timeout error */ | ||
219 | #define MGMT_PAL_EXT_MDIO MGMT_PAL_MDIO | ||
220 | #define MGMT_PAL_INT_MDIO MGMT_PAL_TIMEO | ||
221 | |||
222 | /* Here are some PHY addresses. */ | ||
223 | #define BIGMAC_PHY_EXTERNAL 0 /* External transceiver */ | ||
224 | #define BIGMAC_PHY_INTERNAL 1 /* Internal transceiver */ | ||
225 | |||
226 | /* PHY registers */ | ||
227 | #define BIGMAC_BMCR 0x00 /* Basic mode control register */ | ||
228 | #define BIGMAC_BMSR 0x01 /* Basic mode status register */ | ||
229 | |||
230 | /* BMCR bits */ | ||
231 | #define BMCR_ISOLATE 0x0400 /* Disconnect DP83840 from MII */ | ||
232 | #define BMCR_PDOWN 0x0800 /* Powerdown the DP83840 */ | ||
233 | #define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ | ||
234 | #define BMCR_SPEED100 0x2000 /* Select 100Mbps */ | ||
235 | #define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */ | ||
236 | #define BMCR_RESET 0x8000 /* Reset the DP83840 */ | ||
237 | |||
238 | /* BMSR bits */ | ||
239 | #define BMSR_ERCAP 0x0001 /* Ext-reg capability */ | ||
240 | #define BMSR_JCD 0x0002 /* Jabber detected */ | ||
241 | #define BMSR_LSTATUS 0x0004 /* Link status */ | ||
242 | |||
243 | /* Ring descriptors and such, same as Quad Ethernet. */ | ||
244 | struct be_rxd { | ||
245 | u32 rx_flags; | ||
246 | u32 rx_addr; | ||
247 | }; | ||
248 | |||
249 | #define RXD_OWN 0x80000000 /* Ownership. */ | ||
250 | #define RXD_UPDATE 0x10000000 /* Being Updated? */ | ||
251 | #define RXD_LENGTH 0x000007ff /* Packet Length. */ | ||
252 | |||
253 | struct be_txd { | ||
254 | u32 tx_flags; | ||
255 | u32 tx_addr; | ||
256 | }; | ||
257 | |||
258 | #define TXD_OWN 0x80000000 /* Ownership. */ | ||
259 | #define TXD_SOP 0x40000000 /* Start Of Packet */ | ||
260 | #define TXD_EOP 0x20000000 /* End Of Packet */ | ||
261 | #define TXD_UPDATE 0x10000000 /* Being Updated? */ | ||
262 | #define TXD_LENGTH 0x000007ff /* Packet Length. */ | ||
263 | |||
264 | #define TX_RING_MAXSIZE 256 | ||
265 | #define RX_RING_MAXSIZE 256 | ||
266 | |||
267 | #define TX_RING_SIZE 256 | ||
268 | #define RX_RING_SIZE 256 | ||
269 | |||
270 | #define NEXT_RX(num) (((num) + 1) & (RX_RING_SIZE - 1)) | ||
271 | #define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1)) | ||
272 | #define PREV_RX(num) (((num) - 1) & (RX_RING_SIZE - 1)) | ||
273 | #define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1)) | ||
274 | |||
275 | #define TX_BUFFS_AVAIL(bp) \ | ||
276 | (((bp)->tx_old <= (bp)->tx_new) ? \ | ||
277 | (bp)->tx_old + (TX_RING_SIZE - 1) - (bp)->tx_new : \ | ||
278 | (bp)->tx_old - (bp)->tx_new - 1) | ||
279 | |||
280 | |||
281 | #define RX_COPY_THRESHOLD 256 | ||
282 | #define RX_BUF_ALLOC_SIZE (ETH_FRAME_LEN + (64 * 3)) | ||
283 | |||
284 | struct bmac_init_block { | ||
285 | struct be_rxd be_rxd[RX_RING_MAXSIZE]; | ||
286 | struct be_txd be_txd[TX_RING_MAXSIZE]; | ||
287 | }; | ||
288 | |||
289 | #define bib_offset(mem, elem) \ | ||
290 | ((__u32)((unsigned long)(&(((struct bmac_init_block *)0)->mem[elem])))) | ||
291 | |||
292 | /* Now software state stuff. */ | ||
293 | enum bigmac_transceiver { | ||
294 | external = 0, | ||
295 | internal = 1, | ||
296 | none = 2, | ||
297 | }; | ||
298 | |||
299 | /* Timer state engine. */ | ||
300 | enum bigmac_timer_state { | ||
301 | ltrywait = 1, /* Forcing try of all modes, from fastest to slowest. */ | ||
302 | asleep = 2, /* Timer inactive. */ | ||
303 | }; | ||
304 | |||
305 | struct bigmac { | ||
306 | void __iomem *gregs; /* QEC Global Registers */ | ||
307 | void __iomem *creg; /* QEC BigMAC Channel Registers */ | ||
308 | void __iomem *bregs; /* BigMAC Registers */ | ||
309 | void __iomem *tregs; /* BigMAC Transceiver */ | ||
310 | struct bmac_init_block *bmac_block; /* RX and TX descriptors */ | ||
311 | __u32 bblock_dvma; /* RX and TX descriptors */ | ||
312 | |||
313 | spinlock_t lock; | ||
314 | |||
315 | struct sk_buff *rx_skbs[RX_RING_SIZE]; | ||
316 | struct sk_buff *tx_skbs[TX_RING_SIZE]; | ||
317 | |||
318 | int rx_new, tx_new, rx_old, tx_old; | ||
319 | |||
320 | int board_rev; /* BigMAC board revision. */ | ||
321 | |||
322 | enum bigmac_transceiver tcvr_type; | ||
323 | unsigned int bigmac_bursts; | ||
324 | unsigned int paddr; | ||
325 | unsigned short sw_bmsr; /* SW copy of PHY BMSR */ | ||
326 | unsigned short sw_bmcr; /* SW copy of PHY BMCR */ | ||
327 | struct timer_list bigmac_timer; | ||
328 | enum bigmac_timer_state timer_state; | ||
329 | unsigned int timer_ticks; | ||
330 | |||
331 | struct net_device_stats enet_stats; | ||
332 | struct platform_device *qec_op; | ||
333 | struct platform_device *bigmac_op; | ||
334 | struct net_device *dev; | ||
335 | }; | ||
336 | |||
337 | /* We use this to acquire receive skb's that we can DMA directly into. */ | ||
338 | #define ALIGNED_RX_SKB_ADDR(addr) \ | ||
339 | ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr)) | ||
340 | |||
341 | static inline struct sk_buff *big_mac_alloc_skb(unsigned int length, gfp_t gfp_flags) | ||
342 | { | ||
343 | struct sk_buff *skb; | ||
344 | |||
345 | skb = alloc_skb(length + 64, gfp_flags); | ||
346 | if(skb) { | ||
347 | int offset = ALIGNED_RX_SKB_ADDR(skb->data); | ||
348 | |||
349 | if(offset) | ||
350 | skb_reserve(skb, offset); | ||
351 | } | ||
352 | return skb; | ||
353 | } | ||
354 | |||
355 | #endif /* !(_SUNBMAC_H) */ | ||
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c new file mode 100644 index 000000000000..ade35dde5b51 --- /dev/null +++ b/drivers/net/ethernet/sun/sungem.c | |||
@@ -0,0 +1,3049 @@ | |||
1 | /* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $ | ||
2 | * sungem.c: Sun GEM ethernet driver. | ||
3 | * | ||
4 | * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com) | ||
5 | * | ||
6 | * Support for Apple GMAC and assorted PHYs, WOL, Power Management | ||
7 | * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org) | ||
8 | * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp. | ||
9 | * | ||
10 | * NAPI and NETPOLL support | ||
11 | * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com) | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
16 | |||
17 | #include <linux/module.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/types.h> | ||
20 | #include <linux/fcntl.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/ioport.h> | ||
23 | #include <linux/in.h> | ||
24 | #include <linux/sched.h> | ||
25 | #include <linux/string.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/errno.h> | ||
29 | #include <linux/pci.h> | ||
30 | #include <linux/dma-mapping.h> | ||
31 | #include <linux/netdevice.h> | ||
32 | #include <linux/etherdevice.h> | ||
33 | #include <linux/skbuff.h> | ||
34 | #include <linux/mii.h> | ||
35 | #include <linux/ethtool.h> | ||
36 | #include <linux/crc32.h> | ||
37 | #include <linux/random.h> | ||
38 | #include <linux/workqueue.h> | ||
39 | #include <linux/if_vlan.h> | ||
40 | #include <linux/bitops.h> | ||
41 | #include <linux/mm.h> | ||
42 | #include <linux/gfp.h> | ||
43 | |||
44 | #include <asm/system.h> | ||
45 | #include <asm/io.h> | ||
46 | #include <asm/byteorder.h> | ||
47 | #include <asm/uaccess.h> | ||
48 | #include <asm/irq.h> | ||
49 | |||
50 | #ifdef CONFIG_SPARC | ||
51 | #include <asm/idprom.h> | ||
52 | #include <asm/prom.h> | ||
53 | #endif | ||
54 | |||
55 | #ifdef CONFIG_PPC_PMAC | ||
56 | #include <asm/pci-bridge.h> | ||
57 | #include <asm/prom.h> | ||
58 | #include <asm/machdep.h> | ||
59 | #include <asm/pmac_feature.h> | ||
60 | #endif | ||
61 | |||
62 | #include "sungem_phy.h" | ||
63 | #include "sungem.h" | ||
64 | |||
65 | /* Stripping FCS is causing problems, disabled for now */ | ||
66 | #undef STRIP_FCS | ||
67 | |||
68 | #define DEFAULT_MSG (NETIF_MSG_DRV | \ | ||
69 | NETIF_MSG_PROBE | \ | ||
70 | NETIF_MSG_LINK) | ||
71 | |||
72 | #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ | ||
73 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ | ||
74 | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \ | ||
75 | SUPPORTED_Pause | SUPPORTED_Autoneg) | ||
76 | |||
77 | #define DRV_NAME "sungem" | ||
78 | #define DRV_VERSION "1.0" | ||
79 | #define DRV_AUTHOR "David S. Miller <davem@redhat.com>" | ||
80 | |||
81 | static char version[] __devinitdata = | ||
82 | DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n"; | ||
83 | |||
84 | MODULE_AUTHOR(DRV_AUTHOR); | ||
85 | MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver"); | ||
86 | MODULE_LICENSE("GPL"); | ||
87 | |||
88 | #define GEM_MODULE_NAME "gem" | ||
89 | |||
90 | static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = { | ||
91 | { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, | ||
92 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
93 | |||
94 | /* These models only differ from the original GEM in | ||
95 | * that their tx/rx fifos are of a different size and | ||
96 | * they only support 10/100 speeds. -DaveM | ||
97 | * | ||
98 | * Apple's GMAC does support gigabit on machines with | ||
99 | * the BCM54xx PHYs. -BenH | ||
100 | */ | ||
101 | { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM, | ||
102 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
103 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC, | ||
104 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
105 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP, | ||
106 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
107 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2, | ||
108 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
109 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC, | ||
110 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
111 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM, | ||
112 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
113 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC, | ||
114 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
115 | {0, } | ||
116 | }; | ||
117 | |||
118 | MODULE_DEVICE_TABLE(pci, gem_pci_tbl); | ||
119 | |||
120 | static u16 __phy_read(struct gem *gp, int phy_addr, int reg) | ||
121 | { | ||
122 | u32 cmd; | ||
123 | int limit = 10000; | ||
124 | |||
125 | cmd = (1 << 30); | ||
126 | cmd |= (2 << 28); | ||
127 | cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; | ||
128 | cmd |= (reg << 18) & MIF_FRAME_REGAD; | ||
129 | cmd |= (MIF_FRAME_TAMSB); | ||
130 | writel(cmd, gp->regs + MIF_FRAME); | ||
131 | |||
132 | while (--limit) { | ||
133 | cmd = readl(gp->regs + MIF_FRAME); | ||
134 | if (cmd & MIF_FRAME_TALSB) | ||
135 | break; | ||
136 | |||
137 | udelay(10); | ||
138 | } | ||
139 | |||
140 | if (!limit) | ||
141 | cmd = 0xffff; | ||
142 | |||
143 | return cmd & MIF_FRAME_DATA; | ||
144 | } | ||
145 | |||
146 | static inline int _phy_read(struct net_device *dev, int mii_id, int reg) | ||
147 | { | ||
148 | struct gem *gp = netdev_priv(dev); | ||
149 | return __phy_read(gp, mii_id, reg); | ||
150 | } | ||
151 | |||
152 | static inline u16 phy_read(struct gem *gp, int reg) | ||
153 | { | ||
154 | return __phy_read(gp, gp->mii_phy_addr, reg); | ||
155 | } | ||
156 | |||
157 | static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val) | ||
158 | { | ||
159 | u32 cmd; | ||
160 | int limit = 10000; | ||
161 | |||
162 | cmd = (1 << 30); | ||
163 | cmd |= (1 << 28); | ||
164 | cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; | ||
165 | cmd |= (reg << 18) & MIF_FRAME_REGAD; | ||
166 | cmd |= (MIF_FRAME_TAMSB); | ||
167 | cmd |= (val & MIF_FRAME_DATA); | ||
168 | writel(cmd, gp->regs + MIF_FRAME); | ||
169 | |||
170 | while (limit--) { | ||
171 | cmd = readl(gp->regs + MIF_FRAME); | ||
172 | if (cmd & MIF_FRAME_TALSB) | ||
173 | break; | ||
174 | |||
175 | udelay(10); | ||
176 | } | ||
177 | } | ||
178 | |||
179 | static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val) | ||
180 | { | ||
181 | struct gem *gp = netdev_priv(dev); | ||
182 | __phy_write(gp, mii_id, reg, val & 0xffff); | ||
183 | } | ||
184 | |||
185 | static inline void phy_write(struct gem *gp, int reg, u16 val) | ||
186 | { | ||
187 | __phy_write(gp, gp->mii_phy_addr, reg, val); | ||
188 | } | ||
189 | |||
190 | static inline void gem_enable_ints(struct gem *gp) | ||
191 | { | ||
192 | /* Enable all interrupts but TXDONE */ | ||
193 | writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK); | ||
194 | } | ||
195 | |||
196 | static inline void gem_disable_ints(struct gem *gp) | ||
197 | { | ||
198 | /* Disable all interrupts, including TXDONE */ | ||
199 | writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK); | ||
200 | (void)readl(gp->regs + GREG_IMASK); /* write posting */ | ||
201 | } | ||
202 | |||
203 | static void gem_get_cell(struct gem *gp) | ||
204 | { | ||
205 | BUG_ON(gp->cell_enabled < 0); | ||
206 | gp->cell_enabled++; | ||
207 | #ifdef CONFIG_PPC_PMAC | ||
208 | if (gp->cell_enabled == 1) { | ||
209 | mb(); | ||
210 | pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1); | ||
211 | udelay(10); | ||
212 | } | ||
213 | #endif /* CONFIG_PPC_PMAC */ | ||
214 | } | ||
215 | |||
216 | /* Turn off the chip's clock */ | ||
217 | static void gem_put_cell(struct gem *gp) | ||
218 | { | ||
219 | BUG_ON(gp->cell_enabled <= 0); | ||
220 | gp->cell_enabled--; | ||
221 | #ifdef CONFIG_PPC_PMAC | ||
222 | if (gp->cell_enabled == 0) { | ||
223 | mb(); | ||
224 | pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0); | ||
225 | udelay(10); | ||
226 | } | ||
227 | #endif /* CONFIG_PPC_PMAC */ | ||
228 | } | ||
229 | |||
230 | static inline void gem_netif_stop(struct gem *gp) | ||
231 | { | ||
232 | gp->dev->trans_start = jiffies; /* prevent tx timeout */ | ||
233 | napi_disable(&gp->napi); | ||
234 | netif_tx_disable(gp->dev); | ||
235 | } | ||
236 | |||
237 | static inline void gem_netif_start(struct gem *gp) | ||
238 | { | ||
239 | /* NOTE: unconditional netif_wake_queue is only | ||
240 | * appropriate so long as all callers are assured to | ||
241 | * have free tx slots. | ||
242 | */ | ||
243 | netif_wake_queue(gp->dev); | ||
244 | napi_enable(&gp->napi); | ||
245 | } | ||
246 | |||
247 | static void gem_schedule_reset(struct gem *gp) | ||
248 | { | ||
249 | gp->reset_task_pending = 1; | ||
250 | schedule_work(&gp->reset_task); | ||
251 | } | ||
252 | |||
253 | static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) | ||
254 | { | ||
255 | if (netif_msg_intr(gp)) | ||
256 | printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name); | ||
257 | } | ||
258 | |||
259 | static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) | ||
260 | { | ||
261 | u32 pcs_istat = readl(gp->regs + PCS_ISTAT); | ||
262 | u32 pcs_miistat; | ||
263 | |||
264 | if (netif_msg_intr(gp)) | ||
265 | printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n", | ||
266 | gp->dev->name, pcs_istat); | ||
267 | |||
268 | if (!(pcs_istat & PCS_ISTAT_LSC)) { | ||
269 | netdev_err(dev, "PCS irq but no link status change???\n"); | ||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | /* The link status bit latches on zero, so you must | ||
274 | * read it twice in such a case to see a transition | ||
275 | * to the link being up. | ||
276 | */ | ||
277 | pcs_miistat = readl(gp->regs + PCS_MIISTAT); | ||
278 | if (!(pcs_miistat & PCS_MIISTAT_LS)) | ||
279 | pcs_miistat |= | ||
280 | (readl(gp->regs + PCS_MIISTAT) & | ||
281 | PCS_MIISTAT_LS); | ||
282 | |||
283 | if (pcs_miistat & PCS_MIISTAT_ANC) { | ||
284 | /* The remote-fault indication is only valid | ||
285 | * when autoneg has completed. | ||
286 | */ | ||
287 | if (pcs_miistat & PCS_MIISTAT_RF) | ||
288 | netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n"); | ||
289 | else | ||
290 | netdev_info(dev, "PCS AutoNEG complete\n"); | ||
291 | } | ||
292 | |||
293 | if (pcs_miistat & PCS_MIISTAT_LS) { | ||
294 | netdev_info(dev, "PCS link is now up\n"); | ||
295 | netif_carrier_on(gp->dev); | ||
296 | } else { | ||
297 | netdev_info(dev, "PCS link is now down\n"); | ||
298 | netif_carrier_off(gp->dev); | ||
299 | /* If this happens and the link timer is not running, | ||
300 | * reset so we re-negotiate. | ||
301 | */ | ||
302 | if (!timer_pending(&gp->link_timer)) | ||
303 | return 1; | ||
304 | } | ||
305 | |||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) | ||
310 | { | ||
311 | u32 txmac_stat = readl(gp->regs + MAC_TXSTAT); | ||
312 | |||
313 | if (netif_msg_intr(gp)) | ||
314 | printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", | ||
315 | gp->dev->name, txmac_stat); | ||
316 | |||
317 | /* Defer timer expiration is quite normal, | ||
318 | * don't even log the event. | ||
319 | */ | ||
320 | if ((txmac_stat & MAC_TXSTAT_DTE) && | ||
321 | !(txmac_stat & ~MAC_TXSTAT_DTE)) | ||
322 | return 0; | ||
323 | |||
324 | if (txmac_stat & MAC_TXSTAT_URUN) { | ||
325 | netdev_err(dev, "TX MAC xmit underrun\n"); | ||
326 | dev->stats.tx_fifo_errors++; | ||
327 | } | ||
328 | |||
329 | if (txmac_stat & MAC_TXSTAT_MPE) { | ||
330 | netdev_err(dev, "TX MAC max packet size error\n"); | ||
331 | dev->stats.tx_errors++; | ||
332 | } | ||
333 | |||
334 | /* The rest are all cases of one of the 16-bit TX | ||
335 | * counters expiring. | ||
336 | */ | ||
337 | if (txmac_stat & MAC_TXSTAT_NCE) | ||
338 | dev->stats.collisions += 0x10000; | ||
339 | |||
340 | if (txmac_stat & MAC_TXSTAT_ECE) { | ||
341 | dev->stats.tx_aborted_errors += 0x10000; | ||
342 | dev->stats.collisions += 0x10000; | ||
343 | } | ||
344 | |||
345 | if (txmac_stat & MAC_TXSTAT_LCE) { | ||
346 | dev->stats.tx_aborted_errors += 0x10000; | ||
347 | dev->stats.collisions += 0x10000; | ||
348 | } | ||
349 | |||
350 | /* We do not keep track of MAC_TXSTAT_FCE and | ||
351 | * MAC_TXSTAT_PCE events. | ||
352 | */ | ||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | /* When we get a RX fifo overflow, the RX unit in GEM is probably hung | ||
357 | * so we do the following. | ||
358 | * | ||
359 | * If any part of the reset goes wrong, we return 1 and that causes the | ||
360 | * whole chip to be reset. | ||
361 | */ | ||
362 | static int gem_rxmac_reset(struct gem *gp) | ||
363 | { | ||
364 | struct net_device *dev = gp->dev; | ||
365 | int limit, i; | ||
366 | u64 desc_dma; | ||
367 | u32 val; | ||
368 | |||
369 | /* First, reset & disable MAC RX. */ | ||
370 | writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); | ||
371 | for (limit = 0; limit < 5000; limit++) { | ||
372 | if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD)) | ||
373 | break; | ||
374 | udelay(10); | ||
375 | } | ||
376 | if (limit == 5000) { | ||
377 | netdev_err(dev, "RX MAC will not reset, resetting whole chip\n"); | ||
378 | return 1; | ||
379 | } | ||
380 | |||
381 | writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB, | ||
382 | gp->regs + MAC_RXCFG); | ||
383 | for (limit = 0; limit < 5000; limit++) { | ||
384 | if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB)) | ||
385 | break; | ||
386 | udelay(10); | ||
387 | } | ||
388 | if (limit == 5000) { | ||
389 | netdev_err(dev, "RX MAC will not disable, resetting whole chip\n"); | ||
390 | return 1; | ||
391 | } | ||
392 | |||
393 | /* Second, disable RX DMA. */ | ||
394 | writel(0, gp->regs + RXDMA_CFG); | ||
395 | for (limit = 0; limit < 5000; limit++) { | ||
396 | if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE)) | ||
397 | break; | ||
398 | udelay(10); | ||
399 | } | ||
400 | if (limit == 5000) { | ||
401 | netdev_err(dev, "RX DMA will not disable, resetting whole chip\n"); | ||
402 | return 1; | ||
403 | } | ||
404 | |||
405 | udelay(5000); | ||
406 | |||
407 | /* Execute RX reset command. */ | ||
408 | writel(gp->swrst_base | GREG_SWRST_RXRST, | ||
409 | gp->regs + GREG_SWRST); | ||
410 | for (limit = 0; limit < 5000; limit++) { | ||
411 | if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST)) | ||
412 | break; | ||
413 | udelay(10); | ||
414 | } | ||
415 | if (limit == 5000) { | ||
416 | netdev_err(dev, "RX reset command will not execute, resetting whole chip\n"); | ||
417 | return 1; | ||
418 | } | ||
419 | |||
420 | /* Refresh the RX ring. */ | ||
421 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
422 | struct gem_rxd *rxd = &gp->init_block->rxd[i]; | ||
423 | |||
424 | if (gp->rx_skbs[i] == NULL) { | ||
425 | netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n"); | ||
426 | return 1; | ||
427 | } | ||
428 | |||
429 | rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); | ||
430 | } | ||
431 | gp->rx_new = gp->rx_old = 0; | ||
432 | |||
433 | /* Now we must reprogram the rest of RX unit. */ | ||
434 | desc_dma = (u64) gp->gblock_dvma; | ||
435 | desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); | ||
436 | writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); | ||
437 | writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); | ||
438 | writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); | ||
439 | val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | | ||
440 | ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); | ||
441 | writel(val, gp->regs + RXDMA_CFG); | ||
442 | if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) | ||
443 | writel(((5 & RXDMA_BLANK_IPKTS) | | ||
444 | ((8 << 12) & RXDMA_BLANK_ITIME)), | ||
445 | gp->regs + RXDMA_BLANK); | ||
446 | else | ||
447 | writel(((5 & RXDMA_BLANK_IPKTS) | | ||
448 | ((4 << 12) & RXDMA_BLANK_ITIME)), | ||
449 | gp->regs + RXDMA_BLANK); | ||
450 | val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); | ||
451 | val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); | ||
452 | writel(val, gp->regs + RXDMA_PTHRESH); | ||
453 | val = readl(gp->regs + RXDMA_CFG); | ||
454 | writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); | ||
455 | writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); | ||
456 | val = readl(gp->regs + MAC_RXCFG); | ||
457 | writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); | ||
458 | |||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) | ||
463 | { | ||
464 | u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT); | ||
465 | int ret = 0; | ||
466 | |||
467 | if (netif_msg_intr(gp)) | ||
468 | printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n", | ||
469 | gp->dev->name, rxmac_stat); | ||
470 | |||
471 | if (rxmac_stat & MAC_RXSTAT_OFLW) { | ||
472 | u32 smac = readl(gp->regs + MAC_SMACHINE); | ||
473 | |||
474 | netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac); | ||
475 | dev->stats.rx_over_errors++; | ||
476 | dev->stats.rx_fifo_errors++; | ||
477 | |||
478 | ret = gem_rxmac_reset(gp); | ||
479 | } | ||
480 | |||
481 | if (rxmac_stat & MAC_RXSTAT_ACE) | ||
482 | dev->stats.rx_frame_errors += 0x10000; | ||
483 | |||
484 | if (rxmac_stat & MAC_RXSTAT_CCE) | ||
485 | dev->stats.rx_crc_errors += 0x10000; | ||
486 | |||
487 | if (rxmac_stat & MAC_RXSTAT_LCE) | ||
488 | dev->stats.rx_length_errors += 0x10000; | ||
489 | |||
490 | /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE | ||
491 | * events. | ||
492 | */ | ||
493 | return ret; | ||
494 | } | ||
495 | |||
496 | static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) | ||
497 | { | ||
498 | u32 mac_cstat = readl(gp->regs + MAC_CSTAT); | ||
499 | |||
500 | if (netif_msg_intr(gp)) | ||
501 | printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n", | ||
502 | gp->dev->name, mac_cstat); | ||
503 | |||
504 | /* This interrupt is just for pause frame and pause | ||
505 | * tracking. It is useful for diagnostics and debug | ||
506 | * but probably by default we will mask these events. | ||
507 | */ | ||
508 | if (mac_cstat & MAC_CSTAT_PS) | ||
509 | gp->pause_entered++; | ||
510 | |||
511 | if (mac_cstat & MAC_CSTAT_PRCV) | ||
512 | gp->pause_last_time_recvd = (mac_cstat >> 16); | ||
513 | |||
514 | return 0; | ||
515 | } | ||
516 | |||
517 | static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) | ||
518 | { | ||
519 | u32 mif_status = readl(gp->regs + MIF_STATUS); | ||
520 | u32 reg_val, changed_bits; | ||
521 | |||
522 | reg_val = (mif_status & MIF_STATUS_DATA) >> 16; | ||
523 | changed_bits = (mif_status & MIF_STATUS_STAT); | ||
524 | |||
525 | gem_handle_mif_event(gp, reg_val, changed_bits); | ||
526 | |||
527 | return 0; | ||
528 | } | ||
529 | |||
530 | static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) | ||
531 | { | ||
532 | u32 pci_estat = readl(gp->regs + GREG_PCIESTAT); | ||
533 | |||
534 | if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && | ||
535 | gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { | ||
536 | netdev_err(dev, "PCI error [%04x]", pci_estat); | ||
537 | |||
538 | if (pci_estat & GREG_PCIESTAT_BADACK) | ||
539 | pr_cont(" <No ACK64# during ABS64 cycle>"); | ||
540 | if (pci_estat & GREG_PCIESTAT_DTRTO) | ||
541 | pr_cont(" <Delayed transaction timeout>"); | ||
542 | if (pci_estat & GREG_PCIESTAT_OTHER) | ||
543 | pr_cont(" <other>"); | ||
544 | pr_cont("\n"); | ||
545 | } else { | ||
546 | pci_estat |= GREG_PCIESTAT_OTHER; | ||
547 | netdev_err(dev, "PCI error\n"); | ||
548 | } | ||
549 | |||
550 | if (pci_estat & GREG_PCIESTAT_OTHER) { | ||
551 | u16 pci_cfg_stat; | ||
552 | |||
553 | /* Interrogate PCI config space for the | ||
554 | * true cause. | ||
555 | */ | ||
556 | pci_read_config_word(gp->pdev, PCI_STATUS, | ||
557 | &pci_cfg_stat); | ||
558 | netdev_err(dev, "Read PCI cfg space status [%04x]\n", | ||
559 | pci_cfg_stat); | ||
560 | if (pci_cfg_stat & PCI_STATUS_PARITY) | ||
561 | netdev_err(dev, "PCI parity error detected\n"); | ||
562 | if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT) | ||
563 | netdev_err(dev, "PCI target abort\n"); | ||
564 | if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT) | ||
565 | netdev_err(dev, "PCI master acks target abort\n"); | ||
566 | if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT) | ||
567 | netdev_err(dev, "PCI master abort\n"); | ||
568 | if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR) | ||
569 | netdev_err(dev, "PCI system error SERR#\n"); | ||
570 | if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY) | ||
571 | netdev_err(dev, "PCI parity error\n"); | ||
572 | |||
573 | /* Write the error bits back to clear them. */ | ||
574 | pci_cfg_stat &= (PCI_STATUS_PARITY | | ||
575 | PCI_STATUS_SIG_TARGET_ABORT | | ||
576 | PCI_STATUS_REC_TARGET_ABORT | | ||
577 | PCI_STATUS_REC_MASTER_ABORT | | ||
578 | PCI_STATUS_SIG_SYSTEM_ERROR | | ||
579 | PCI_STATUS_DETECTED_PARITY); | ||
580 | pci_write_config_word(gp->pdev, | ||
581 | PCI_STATUS, pci_cfg_stat); | ||
582 | } | ||
583 | |||
584 | /* For all PCI errors, we should reset the chip. */ | ||
585 | return 1; | ||
586 | } | ||
587 | |||
588 | /* All non-normal interrupt conditions get serviced here. | ||
589 | * Returns non-zero if we should just exit the interrupt | ||
590 | * handler right now (ie. if we reset the card which invalidates | ||
591 | * all of the other original irq status bits). | ||
592 | */ | ||
593 | static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status) | ||
594 | { | ||
595 | if (gem_status & GREG_STAT_RXNOBUF) { | ||
596 | /* Frame arrived, no free RX buffers available. */ | ||
597 | if (netif_msg_rx_err(gp)) | ||
598 | printk(KERN_DEBUG "%s: no buffer for rx frame\n", | ||
599 | gp->dev->name); | ||
600 | dev->stats.rx_dropped++; | ||
601 | } | ||
602 | |||
603 | if (gem_status & GREG_STAT_RXTAGERR) { | ||
604 | /* corrupt RX tag framing */ | ||
605 | if (netif_msg_rx_err(gp)) | ||
606 | printk(KERN_DEBUG "%s: corrupt rx tag framing\n", | ||
607 | gp->dev->name); | ||
608 | dev->stats.rx_errors++; | ||
609 | |||
610 | return 1; | ||
611 | } | ||
612 | |||
613 | if (gem_status & GREG_STAT_PCS) { | ||
614 | if (gem_pcs_interrupt(dev, gp, gem_status)) | ||
615 | return 1; | ||
616 | } | ||
617 | |||
618 | if (gem_status & GREG_STAT_TXMAC) { | ||
619 | if (gem_txmac_interrupt(dev, gp, gem_status)) | ||
620 | return 1; | ||
621 | } | ||
622 | |||
623 | if (gem_status & GREG_STAT_RXMAC) { | ||
624 | if (gem_rxmac_interrupt(dev, gp, gem_status)) | ||
625 | return 1; | ||
626 | } | ||
627 | |||
628 | if (gem_status & GREG_STAT_MAC) { | ||
629 | if (gem_mac_interrupt(dev, gp, gem_status)) | ||
630 | return 1; | ||
631 | } | ||
632 | |||
633 | if (gem_status & GREG_STAT_MIF) { | ||
634 | if (gem_mif_interrupt(dev, gp, gem_status)) | ||
635 | return 1; | ||
636 | } | ||
637 | |||
638 | if (gem_status & GREG_STAT_PCIERR) { | ||
639 | if (gem_pci_interrupt(dev, gp, gem_status)) | ||
640 | return 1; | ||
641 | } | ||
642 | |||
643 | return 0; | ||
644 | } | ||
645 | |||
646 | static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status) | ||
647 | { | ||
648 | int entry, limit; | ||
649 | |||
650 | entry = gp->tx_old; | ||
651 | limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT); | ||
652 | while (entry != limit) { | ||
653 | struct sk_buff *skb; | ||
654 | struct gem_txd *txd; | ||
655 | dma_addr_t dma_addr; | ||
656 | u32 dma_len; | ||
657 | int frag; | ||
658 | |||
659 | if (netif_msg_tx_done(gp)) | ||
660 | printk(KERN_DEBUG "%s: tx done, slot %d\n", | ||
661 | gp->dev->name, entry); | ||
662 | skb = gp->tx_skbs[entry]; | ||
663 | if (skb_shinfo(skb)->nr_frags) { | ||
664 | int last = entry + skb_shinfo(skb)->nr_frags; | ||
665 | int walk = entry; | ||
666 | int incomplete = 0; | ||
667 | |||
668 | last &= (TX_RING_SIZE - 1); | ||
669 | for (;;) { | ||
670 | walk = NEXT_TX(walk); | ||
671 | if (walk == limit) | ||
672 | incomplete = 1; | ||
673 | if (walk == last) | ||
674 | break; | ||
675 | } | ||
676 | if (incomplete) | ||
677 | break; | ||
678 | } | ||
679 | gp->tx_skbs[entry] = NULL; | ||
680 | dev->stats.tx_bytes += skb->len; | ||
681 | |||
682 | for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { | ||
683 | txd = &gp->init_block->txd[entry]; | ||
684 | |||
685 | dma_addr = le64_to_cpu(txd->buffer); | ||
686 | dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ; | ||
687 | |||
688 | pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); | ||
689 | entry = NEXT_TX(entry); | ||
690 | } | ||
691 | |||
692 | dev->stats.tx_packets++; | ||
693 | dev_kfree_skb(skb); | ||
694 | } | ||
695 | gp->tx_old = entry; | ||
696 | |||
697 | /* Need to make the tx_old update visible to gem_start_xmit() | ||
698 | * before checking for netif_queue_stopped(). Without the | ||
699 | * memory barrier, there is a small possibility that gem_start_xmit() | ||
700 | * will miss it and cause the queue to be stopped forever. | ||
701 | */ | ||
702 | smp_mb(); | ||
703 | |||
704 | if (unlikely(netif_queue_stopped(dev) && | ||
705 | TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) { | ||
706 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | ||
707 | |||
708 | __netif_tx_lock(txq, smp_processor_id()); | ||
709 | if (netif_queue_stopped(dev) && | ||
710 | TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) | ||
711 | netif_wake_queue(dev); | ||
712 | __netif_tx_unlock(txq); | ||
713 | } | ||
714 | } | ||
715 | |||
716 | static __inline__ void gem_post_rxds(struct gem *gp, int limit) | ||
717 | { | ||
718 | int cluster_start, curr, count, kick; | ||
719 | |||
720 | cluster_start = curr = (gp->rx_new & ~(4 - 1)); | ||
721 | count = 0; | ||
722 | kick = -1; | ||
723 | wmb(); | ||
724 | while (curr != limit) { | ||
725 | curr = NEXT_RX(curr); | ||
726 | if (++count == 4) { | ||
727 | struct gem_rxd *rxd = | ||
728 | &gp->init_block->rxd[cluster_start]; | ||
729 | for (;;) { | ||
730 | rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); | ||
731 | rxd++; | ||
732 | cluster_start = NEXT_RX(cluster_start); | ||
733 | if (cluster_start == curr) | ||
734 | break; | ||
735 | } | ||
736 | kick = curr; | ||
737 | count = 0; | ||
738 | } | ||
739 | } | ||
740 | if (kick >= 0) { | ||
741 | mb(); | ||
742 | writel(kick, gp->regs + RXDMA_KICK); | ||
743 | } | ||
744 | } | ||
745 | |||
746 | #define ALIGNED_RX_SKB_ADDR(addr) \ | ||
747 | ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) | ||
748 | static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size, | ||
749 | gfp_t gfp_flags) | ||
750 | { | ||
751 | struct sk_buff *skb = alloc_skb(size + 64, gfp_flags); | ||
752 | |||
753 | if (likely(skb)) { | ||
754 | unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data); | ||
755 | skb_reserve(skb, offset); | ||
756 | skb->dev = dev; | ||
757 | } | ||
758 | return skb; | ||
759 | } | ||
760 | |||
761 | static int gem_rx(struct gem *gp, int work_to_do) | ||
762 | { | ||
763 | struct net_device *dev = gp->dev; | ||
764 | int entry, drops, work_done = 0; | ||
765 | u32 done; | ||
766 | __sum16 csum; | ||
767 | |||
768 | if (netif_msg_rx_status(gp)) | ||
769 | printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", | ||
770 | gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new); | ||
771 | |||
772 | entry = gp->rx_new; | ||
773 | drops = 0; | ||
774 | done = readl(gp->regs + RXDMA_DONE); | ||
775 | for (;;) { | ||
776 | struct gem_rxd *rxd = &gp->init_block->rxd[entry]; | ||
777 | struct sk_buff *skb; | ||
778 | u64 status = le64_to_cpu(rxd->status_word); | ||
779 | dma_addr_t dma_addr; | ||
780 | int len; | ||
781 | |||
782 | if ((status & RXDCTRL_OWN) != 0) | ||
783 | break; | ||
784 | |||
785 | if (work_done >= RX_RING_SIZE || work_done >= work_to_do) | ||
786 | break; | ||
787 | |||
788 | /* When writing back RX descriptor, GEM writes status | ||
789 | * then buffer address, possibly in separate transactions. | ||
790 | * If we don't wait for the chip to write both, we could | ||
791 | * post a new buffer to this descriptor then have GEM spam | ||
792 | * on the buffer address. We sync on the RX completion | ||
793 | * register to prevent this from happening. | ||
794 | */ | ||
795 | if (entry == done) { | ||
796 | done = readl(gp->regs + RXDMA_DONE); | ||
797 | if (entry == done) | ||
798 | break; | ||
799 | } | ||
800 | |||
801 | /* We can now account for the work we're about to do */ | ||
802 | work_done++; | ||
803 | |||
804 | skb = gp->rx_skbs[entry]; | ||
805 | |||
806 | len = (status & RXDCTRL_BUFSZ) >> 16; | ||
807 | if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { | ||
808 | dev->stats.rx_errors++; | ||
809 | if (len < ETH_ZLEN) | ||
810 | dev->stats.rx_length_errors++; | ||
811 | if (len & RXDCTRL_BAD) | ||
812 | dev->stats.rx_crc_errors++; | ||
813 | |||
814 | /* We'll just return it to GEM. */ | ||
815 | drop_it: | ||
816 | dev->stats.rx_dropped++; | ||
817 | goto next; | ||
818 | } | ||
819 | |||
820 | dma_addr = le64_to_cpu(rxd->buffer); | ||
821 | if (len > RX_COPY_THRESHOLD) { | ||
822 | struct sk_buff *new_skb; | ||
823 | |||
824 | new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); | ||
825 | if (new_skb == NULL) { | ||
826 | drops++; | ||
827 | goto drop_it; | ||
828 | } | ||
829 | pci_unmap_page(gp->pdev, dma_addr, | ||
830 | RX_BUF_ALLOC_SIZE(gp), | ||
831 | PCI_DMA_FROMDEVICE); | ||
832 | gp->rx_skbs[entry] = new_skb; | ||
833 | skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); | ||
834 | rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, | ||
835 | virt_to_page(new_skb->data), | ||
836 | offset_in_page(new_skb->data), | ||
837 | RX_BUF_ALLOC_SIZE(gp), | ||
838 | PCI_DMA_FROMDEVICE)); | ||
839 | skb_reserve(new_skb, RX_OFFSET); | ||
840 | |||
841 | /* Trim the original skb for the netif. */ | ||
842 | skb_trim(skb, len); | ||
843 | } else { | ||
844 | struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2); | ||
845 | |||
846 | if (copy_skb == NULL) { | ||
847 | drops++; | ||
848 | goto drop_it; | ||
849 | } | ||
850 | |||
851 | skb_reserve(copy_skb, 2); | ||
852 | skb_put(copy_skb, len); | ||
853 | pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | ||
854 | skb_copy_from_linear_data(skb, copy_skb->data, len); | ||
855 | pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | ||
856 | |||
857 | /* We'll reuse the original ring buffer. */ | ||
858 | skb = copy_skb; | ||
859 | } | ||
860 | |||
861 | csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); | ||
862 | skb->csum = csum_unfold(csum); | ||
863 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
864 | skb->protocol = eth_type_trans(skb, gp->dev); | ||
865 | |||
866 | napi_gro_receive(&gp->napi, skb); | ||
867 | |||
868 | dev->stats.rx_packets++; | ||
869 | dev->stats.rx_bytes += len; | ||
870 | |||
871 | next: | ||
872 | entry = NEXT_RX(entry); | ||
873 | } | ||
874 | |||
875 | gem_post_rxds(gp, entry); | ||
876 | |||
877 | gp->rx_new = entry; | ||
878 | |||
879 | if (drops) | ||
880 | netdev_info(gp->dev, "Memory squeeze, deferring packet\n"); | ||
881 | |||
882 | return work_done; | ||
883 | } | ||
884 | |||
885 | static int gem_poll(struct napi_struct *napi, int budget) | ||
886 | { | ||
887 | struct gem *gp = container_of(napi, struct gem, napi); | ||
888 | struct net_device *dev = gp->dev; | ||
889 | int work_done; | ||
890 | |||
891 | work_done = 0; | ||
892 | do { | ||
893 | /* Handle anomalies */ | ||
894 | if (unlikely(gp->status & GREG_STAT_ABNORMAL)) { | ||
895 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | ||
896 | int reset; | ||
897 | |||
898 | /* We run the abnormal interrupt handling code with | ||
899 | * the Tx lock. It only resets the Rx portion of the | ||
900 | * chip, but we need to guard it against DMA being | ||
901 | * restarted by the link poll timer | ||
902 | */ | ||
903 | __netif_tx_lock(txq, smp_processor_id()); | ||
904 | reset = gem_abnormal_irq(dev, gp, gp->status); | ||
905 | __netif_tx_unlock(txq); | ||
906 | if (reset) { | ||
907 | gem_schedule_reset(gp); | ||
908 | napi_complete(napi); | ||
909 | return work_done; | ||
910 | } | ||
911 | } | ||
912 | |||
913 | /* Run TX completion thread */ | ||
914 | gem_tx(dev, gp, gp->status); | ||
915 | |||
916 | /* Run RX thread. We don't use any locking here, | ||
917 | * code willing to do bad things - like cleaning the | ||
918 | * rx ring - must call napi_disable(), which | ||
919 | * schedule_timeout()'s if polling is already disabled. | ||
920 | */ | ||
921 | work_done += gem_rx(gp, budget - work_done); | ||
922 | |||
923 | if (work_done >= budget) | ||
924 | return work_done; | ||
925 | |||
926 | gp->status = readl(gp->regs + GREG_STAT); | ||
927 | } while (gp->status & GREG_STAT_NAPI); | ||
928 | |||
929 | napi_complete(napi); | ||
930 | gem_enable_ints(gp); | ||
931 | |||
932 | return work_done; | ||
933 | } | ||
934 | |||
935 | static irqreturn_t gem_interrupt(int irq, void *dev_id) | ||
936 | { | ||
937 | struct net_device *dev = dev_id; | ||
938 | struct gem *gp = netdev_priv(dev); | ||
939 | |||
940 | if (napi_schedule_prep(&gp->napi)) { | ||
941 | u32 gem_status = readl(gp->regs + GREG_STAT); | ||
942 | |||
943 | if (unlikely(gem_status == 0)) { | ||
944 | napi_enable(&gp->napi); | ||
945 | return IRQ_NONE; | ||
946 | } | ||
947 | if (netif_msg_intr(gp)) | ||
948 | printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n", | ||
949 | gp->dev->name, gem_status); | ||
950 | |||
951 | gp->status = gem_status; | ||
952 | gem_disable_ints(gp); | ||
953 | __napi_schedule(&gp->napi); | ||
954 | } | ||
955 | |||
956 | /* If polling was disabled at the time we received that | ||
957 | * interrupt, we may return IRQ_HANDLED here while we | ||
958 | * should return IRQ_NONE. No big deal... | ||
959 | */ | ||
960 | return IRQ_HANDLED; | ||
961 | } | ||
962 | |||
963 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
964 | static void gem_poll_controller(struct net_device *dev) | ||
965 | { | ||
966 | struct gem *gp = netdev_priv(dev); | ||
967 | |||
968 | disable_irq(gp->pdev->irq); | ||
969 | gem_interrupt(gp->pdev->irq, dev); | ||
970 | enable_irq(gp->pdev->irq); | ||
971 | } | ||
972 | #endif | ||
973 | |||
974 | static void gem_tx_timeout(struct net_device *dev) | ||
975 | { | ||
976 | struct gem *gp = netdev_priv(dev); | ||
977 | |||
978 | netdev_err(dev, "transmit timed out, resetting\n"); | ||
979 | |||
980 | netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n", | ||
981 | readl(gp->regs + TXDMA_CFG), | ||
982 | readl(gp->regs + MAC_TXSTAT), | ||
983 | readl(gp->regs + MAC_TXCFG)); | ||
984 | netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n", | ||
985 | readl(gp->regs + RXDMA_CFG), | ||
986 | readl(gp->regs + MAC_RXSTAT), | ||
987 | readl(gp->regs + MAC_RXCFG)); | ||
988 | |||
989 | gem_schedule_reset(gp); | ||
990 | } | ||
991 | |||
992 | static __inline__ int gem_intme(int entry) | ||
993 | { | ||
994 | /* Algorithm: IRQ every 1/2 of descriptors. */ | ||
995 | if (!(entry & ((TX_RING_SIZE>>1)-1))) | ||
996 | return 1; | ||
997 | |||
998 | return 0; | ||
999 | } | ||
1000 | |||
1001 | static netdev_tx_t gem_start_xmit(struct sk_buff *skb, | ||
1002 | struct net_device *dev) | ||
1003 | { | ||
1004 | struct gem *gp = netdev_priv(dev); | ||
1005 | int entry; | ||
1006 | u64 ctrl; | ||
1007 | |||
1008 | ctrl = 0; | ||
1009 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
1010 | const u64 csum_start_off = skb_checksum_start_offset(skb); | ||
1011 | const u64 csum_stuff_off = csum_start_off + skb->csum_offset; | ||
1012 | |||
1013 | ctrl = (TXDCTRL_CENAB | | ||
1014 | (csum_start_off << 15) | | ||
1015 | (csum_stuff_off << 21)); | ||
1016 | } | ||
1017 | |||
1018 | if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) { | ||
1019 | /* This is a hard error, log it. */ | ||
1020 | if (!netif_queue_stopped(dev)) { | ||
1021 | netif_stop_queue(dev); | ||
1022 | netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); | ||
1023 | } | ||
1024 | return NETDEV_TX_BUSY; | ||
1025 | } | ||
1026 | |||
1027 | entry = gp->tx_new; | ||
1028 | gp->tx_skbs[entry] = skb; | ||
1029 | |||
1030 | if (skb_shinfo(skb)->nr_frags == 0) { | ||
1031 | struct gem_txd *txd = &gp->init_block->txd[entry]; | ||
1032 | dma_addr_t mapping; | ||
1033 | u32 len; | ||
1034 | |||
1035 | len = skb->len; | ||
1036 | mapping = pci_map_page(gp->pdev, | ||
1037 | virt_to_page(skb->data), | ||
1038 | offset_in_page(skb->data), | ||
1039 | len, PCI_DMA_TODEVICE); | ||
1040 | ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len; | ||
1041 | if (gem_intme(entry)) | ||
1042 | ctrl |= TXDCTRL_INTME; | ||
1043 | txd->buffer = cpu_to_le64(mapping); | ||
1044 | wmb(); | ||
1045 | txd->control_word = cpu_to_le64(ctrl); | ||
1046 | entry = NEXT_TX(entry); | ||
1047 | } else { | ||
1048 | struct gem_txd *txd; | ||
1049 | u32 first_len; | ||
1050 | u64 intme; | ||
1051 | dma_addr_t first_mapping; | ||
1052 | int frag, first_entry = entry; | ||
1053 | |||
1054 | intme = 0; | ||
1055 | if (gem_intme(entry)) | ||
1056 | intme |= TXDCTRL_INTME; | ||
1057 | |||
1058 | /* We must give this initial chunk to the device last. | ||
1059 | * Otherwise we could race with the device. | ||
1060 | */ | ||
1061 | first_len = skb_headlen(skb); | ||
1062 | first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data), | ||
1063 | offset_in_page(skb->data), | ||
1064 | first_len, PCI_DMA_TODEVICE); | ||
1065 | entry = NEXT_TX(entry); | ||
1066 | |||
1067 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | ||
1068 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; | ||
1069 | u32 len; | ||
1070 | dma_addr_t mapping; | ||
1071 | u64 this_ctrl; | ||
1072 | |||
1073 | len = this_frag->size; | ||
1074 | mapping = pci_map_page(gp->pdev, | ||
1075 | this_frag->page, | ||
1076 | this_frag->page_offset, | ||
1077 | len, PCI_DMA_TODEVICE); | ||
1078 | this_ctrl = ctrl; | ||
1079 | if (frag == skb_shinfo(skb)->nr_frags - 1) | ||
1080 | this_ctrl |= TXDCTRL_EOF; | ||
1081 | |||
1082 | txd = &gp->init_block->txd[entry]; | ||
1083 | txd->buffer = cpu_to_le64(mapping); | ||
1084 | wmb(); | ||
1085 | txd->control_word = cpu_to_le64(this_ctrl | len); | ||
1086 | |||
1087 | if (gem_intme(entry)) | ||
1088 | intme |= TXDCTRL_INTME; | ||
1089 | |||
1090 | entry = NEXT_TX(entry); | ||
1091 | } | ||
1092 | txd = &gp->init_block->txd[first_entry]; | ||
1093 | txd->buffer = cpu_to_le64(first_mapping); | ||
1094 | wmb(); | ||
1095 | txd->control_word = | ||
1096 | cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); | ||
1097 | } | ||
1098 | |||
1099 | gp->tx_new = entry; | ||
1100 | if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) { | ||
1101 | netif_stop_queue(dev); | ||
1102 | |||
1103 | /* netif_stop_queue() must be done before checking | ||
1104 | * checking tx index in TX_BUFFS_AVAIL() below, because | ||
1105 | * in gem_tx(), we update tx_old before checking for | ||
1106 | * netif_queue_stopped(). | ||
1107 | */ | ||
1108 | smp_mb(); | ||
1109 | if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) | ||
1110 | netif_wake_queue(dev); | ||
1111 | } | ||
1112 | if (netif_msg_tx_queued(gp)) | ||
1113 | printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", | ||
1114 | dev->name, entry, skb->len); | ||
1115 | mb(); | ||
1116 | writel(gp->tx_new, gp->regs + TXDMA_KICK); | ||
1117 | |||
1118 | return NETDEV_TX_OK; | ||
1119 | } | ||
1120 | |||
1121 | static void gem_pcs_reset(struct gem *gp) | ||
1122 | { | ||
1123 | int limit; | ||
1124 | u32 val; | ||
1125 | |||
1126 | /* Reset PCS unit. */ | ||
1127 | val = readl(gp->regs + PCS_MIICTRL); | ||
1128 | val |= PCS_MIICTRL_RST; | ||
1129 | writel(val, gp->regs + PCS_MIICTRL); | ||
1130 | |||
1131 | limit = 32; | ||
1132 | while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) { | ||
1133 | udelay(100); | ||
1134 | if (limit-- <= 0) | ||
1135 | break; | ||
1136 | } | ||
1137 | if (limit < 0) | ||
1138 | netdev_warn(gp->dev, "PCS reset bit would not clear\n"); | ||
1139 | } | ||
1140 | |||
1141 | static void gem_pcs_reinit_adv(struct gem *gp) | ||
1142 | { | ||
1143 | u32 val; | ||
1144 | |||
1145 | /* Make sure PCS is disabled while changing advertisement | ||
1146 | * configuration. | ||
1147 | */ | ||
1148 | val = readl(gp->regs + PCS_CFG); | ||
1149 | val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO); | ||
1150 | writel(val, gp->regs + PCS_CFG); | ||
1151 | |||
1152 | /* Advertise all capabilities except asymmetric | ||
1153 | * pause. | ||
1154 | */ | ||
1155 | val = readl(gp->regs + PCS_MIIADV); | ||
1156 | val |= (PCS_MIIADV_FD | PCS_MIIADV_HD | | ||
1157 | PCS_MIIADV_SP | PCS_MIIADV_AP); | ||
1158 | writel(val, gp->regs + PCS_MIIADV); | ||
1159 | |||
1160 | /* Enable and restart auto-negotiation, disable wrapback/loopback, | ||
1161 | * and re-enable PCS. | ||
1162 | */ | ||
1163 | val = readl(gp->regs + PCS_MIICTRL); | ||
1164 | val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE); | ||
1165 | val &= ~PCS_MIICTRL_WB; | ||
1166 | writel(val, gp->regs + PCS_MIICTRL); | ||
1167 | |||
1168 | val = readl(gp->regs + PCS_CFG); | ||
1169 | val |= PCS_CFG_ENABLE; | ||
1170 | writel(val, gp->regs + PCS_CFG); | ||
1171 | |||
1172 | /* Make sure serialink loopback is off. The meaning | ||
1173 | * of this bit is logically inverted based upon whether | ||
1174 | * you are in Serialink or SERDES mode. | ||
1175 | */ | ||
1176 | val = readl(gp->regs + PCS_SCTRL); | ||
1177 | if (gp->phy_type == phy_serialink) | ||
1178 | val &= ~PCS_SCTRL_LOOP; | ||
1179 | else | ||
1180 | val |= PCS_SCTRL_LOOP; | ||
1181 | writel(val, gp->regs + PCS_SCTRL); | ||
1182 | } | ||
1183 | |||
1184 | #define STOP_TRIES 32 | ||
1185 | |||
1186 | static void gem_reset(struct gem *gp) | ||
1187 | { | ||
1188 | int limit; | ||
1189 | u32 val; | ||
1190 | |||
1191 | /* Make sure we won't get any more interrupts */ | ||
1192 | writel(0xffffffff, gp->regs + GREG_IMASK); | ||
1193 | |||
1194 | /* Reset the chip */ | ||
1195 | writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST, | ||
1196 | gp->regs + GREG_SWRST); | ||
1197 | |||
1198 | limit = STOP_TRIES; | ||
1199 | |||
1200 | do { | ||
1201 | udelay(20); | ||
1202 | val = readl(gp->regs + GREG_SWRST); | ||
1203 | if (limit-- <= 0) | ||
1204 | break; | ||
1205 | } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); | ||
1206 | |||
1207 | if (limit < 0) | ||
1208 | netdev_err(gp->dev, "SW reset is ghetto\n"); | ||
1209 | |||
1210 | if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) | ||
1211 | gem_pcs_reinit_adv(gp); | ||
1212 | } | ||
1213 | |||
1214 | static void gem_start_dma(struct gem *gp) | ||
1215 | { | ||
1216 | u32 val; | ||
1217 | |||
1218 | /* We are ready to rock, turn everything on. */ | ||
1219 | val = readl(gp->regs + TXDMA_CFG); | ||
1220 | writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); | ||
1221 | val = readl(gp->regs + RXDMA_CFG); | ||
1222 | writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); | ||
1223 | val = readl(gp->regs + MAC_TXCFG); | ||
1224 | writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); | ||
1225 | val = readl(gp->regs + MAC_RXCFG); | ||
1226 | writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); | ||
1227 | |||
1228 | (void) readl(gp->regs + MAC_RXCFG); | ||
1229 | udelay(100); | ||
1230 | |||
1231 | gem_enable_ints(gp); | ||
1232 | |||
1233 | writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); | ||
1234 | } | ||
1235 | |||
1236 | /* DMA won't be actually stopped before about 4ms tho ... | ||
1237 | */ | ||
1238 | static void gem_stop_dma(struct gem *gp) | ||
1239 | { | ||
1240 | u32 val; | ||
1241 | |||
1242 | /* We are done rocking, turn everything off. */ | ||
1243 | val = readl(gp->regs + TXDMA_CFG); | ||
1244 | writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); | ||
1245 | val = readl(gp->regs + RXDMA_CFG); | ||
1246 | writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); | ||
1247 | val = readl(gp->regs + MAC_TXCFG); | ||
1248 | writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); | ||
1249 | val = readl(gp->regs + MAC_RXCFG); | ||
1250 | writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); | ||
1251 | |||
1252 | (void) readl(gp->regs + MAC_RXCFG); | ||
1253 | |||
1254 | /* Need to wait a bit ... done by the caller */ | ||
1255 | } | ||
1256 | |||
1257 | |||
1258 | // XXX dbl check what that function should do when called on PCS PHY | ||
1259 | static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) | ||
1260 | { | ||
1261 | u32 advertise, features; | ||
1262 | int autoneg; | ||
1263 | int speed; | ||
1264 | int duplex; | ||
1265 | |||
1266 | if (gp->phy_type != phy_mii_mdio0 && | ||
1267 | gp->phy_type != phy_mii_mdio1) | ||
1268 | goto non_mii; | ||
1269 | |||
1270 | /* Setup advertise */ | ||
1271 | if (found_mii_phy(gp)) | ||
1272 | features = gp->phy_mii.def->features; | ||
1273 | else | ||
1274 | features = 0; | ||
1275 | |||
1276 | advertise = features & ADVERTISE_MASK; | ||
1277 | if (gp->phy_mii.advertising != 0) | ||
1278 | advertise &= gp->phy_mii.advertising; | ||
1279 | |||
1280 | autoneg = gp->want_autoneg; | ||
1281 | speed = gp->phy_mii.speed; | ||
1282 | duplex = gp->phy_mii.duplex; | ||
1283 | |||
1284 | /* Setup link parameters */ | ||
1285 | if (!ep) | ||
1286 | goto start_aneg; | ||
1287 | if (ep->autoneg == AUTONEG_ENABLE) { | ||
1288 | advertise = ep->advertising; | ||
1289 | autoneg = 1; | ||
1290 | } else { | ||
1291 | autoneg = 0; | ||
1292 | speed = ethtool_cmd_speed(ep); | ||
1293 | duplex = ep->duplex; | ||
1294 | } | ||
1295 | |||
1296 | start_aneg: | ||
1297 | /* Sanitize settings based on PHY capabilities */ | ||
1298 | if ((features & SUPPORTED_Autoneg) == 0) | ||
1299 | autoneg = 0; | ||
1300 | if (speed == SPEED_1000 && | ||
1301 | !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full))) | ||
1302 | speed = SPEED_100; | ||
1303 | if (speed == SPEED_100 && | ||
1304 | !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full))) | ||
1305 | speed = SPEED_10; | ||
1306 | if (duplex == DUPLEX_FULL && | ||
1307 | !(features & (SUPPORTED_1000baseT_Full | | ||
1308 | SUPPORTED_100baseT_Full | | ||
1309 | SUPPORTED_10baseT_Full))) | ||
1310 | duplex = DUPLEX_HALF; | ||
1311 | if (speed == 0) | ||
1312 | speed = SPEED_10; | ||
1313 | |||
1314 | /* If we are asleep, we don't try to actually setup the PHY, we | ||
1315 | * just store the settings | ||
1316 | */ | ||
1317 | if (!netif_device_present(gp->dev)) { | ||
1318 | gp->phy_mii.autoneg = gp->want_autoneg = autoneg; | ||
1319 | gp->phy_mii.speed = speed; | ||
1320 | gp->phy_mii.duplex = duplex; | ||
1321 | return; | ||
1322 | } | ||
1323 | |||
1324 | /* Configure PHY & start aneg */ | ||
1325 | gp->want_autoneg = autoneg; | ||
1326 | if (autoneg) { | ||
1327 | if (found_mii_phy(gp)) | ||
1328 | gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise); | ||
1329 | gp->lstate = link_aneg; | ||
1330 | } else { | ||
1331 | if (found_mii_phy(gp)) | ||
1332 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex); | ||
1333 | gp->lstate = link_force_ok; | ||
1334 | } | ||
1335 | |||
1336 | non_mii: | ||
1337 | gp->timer_ticks = 0; | ||
1338 | mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); | ||
1339 | } | ||
1340 | |||
1341 | /* A link-up condition has occurred, initialize and enable the | ||
1342 | * rest of the chip. | ||
1343 | */ | ||
1344 | static int gem_set_link_modes(struct gem *gp) | ||
1345 | { | ||
1346 | struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0); | ||
1347 | int full_duplex, speed, pause; | ||
1348 | u32 val; | ||
1349 | |||
1350 | full_duplex = 0; | ||
1351 | speed = SPEED_10; | ||
1352 | pause = 0; | ||
1353 | |||
1354 | if (found_mii_phy(gp)) { | ||
1355 | if (gp->phy_mii.def->ops->read_link(&gp->phy_mii)) | ||
1356 | return 1; | ||
1357 | full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL); | ||
1358 | speed = gp->phy_mii.speed; | ||
1359 | pause = gp->phy_mii.pause; | ||
1360 | } else if (gp->phy_type == phy_serialink || | ||
1361 | gp->phy_type == phy_serdes) { | ||
1362 | u32 pcs_lpa = readl(gp->regs + PCS_MIILP); | ||
1363 | |||
1364 | if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes) | ||
1365 | full_duplex = 1; | ||
1366 | speed = SPEED_1000; | ||
1367 | } | ||
1368 | |||
1369 | netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n", | ||
1370 | speed, (full_duplex ? "full" : "half")); | ||
1371 | |||
1372 | |||
1373 | /* We take the tx queue lock to avoid collisions between | ||
1374 | * this code, the tx path and the NAPI-driven error path | ||
1375 | */ | ||
1376 | __netif_tx_lock(txq, smp_processor_id()); | ||
1377 | |||
1378 | val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); | ||
1379 | if (full_duplex) { | ||
1380 | val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL); | ||
1381 | } else { | ||
1382 | /* MAC_TXCFG_NBO must be zero. */ | ||
1383 | } | ||
1384 | writel(val, gp->regs + MAC_TXCFG); | ||
1385 | |||
1386 | val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED); | ||
1387 | if (!full_duplex && | ||
1388 | (gp->phy_type == phy_mii_mdio0 || | ||
1389 | gp->phy_type == phy_mii_mdio1)) { | ||
1390 | val |= MAC_XIFCFG_DISE; | ||
1391 | } else if (full_duplex) { | ||
1392 | val |= MAC_XIFCFG_FLED; | ||
1393 | } | ||
1394 | |||
1395 | if (speed == SPEED_1000) | ||
1396 | val |= (MAC_XIFCFG_GMII); | ||
1397 | |||
1398 | writel(val, gp->regs + MAC_XIFCFG); | ||
1399 | |||
1400 | /* If gigabit and half-duplex, enable carrier extension | ||
1401 | * mode. Else, disable it. | ||
1402 | */ | ||
1403 | if (speed == SPEED_1000 && !full_duplex) { | ||
1404 | val = readl(gp->regs + MAC_TXCFG); | ||
1405 | writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); | ||
1406 | |||
1407 | val = readl(gp->regs + MAC_RXCFG); | ||
1408 | writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); | ||
1409 | } else { | ||
1410 | val = readl(gp->regs + MAC_TXCFG); | ||
1411 | writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); | ||
1412 | |||
1413 | val = readl(gp->regs + MAC_RXCFG); | ||
1414 | writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); | ||
1415 | } | ||
1416 | |||
1417 | if (gp->phy_type == phy_serialink || | ||
1418 | gp->phy_type == phy_serdes) { | ||
1419 | u32 pcs_lpa = readl(gp->regs + PCS_MIILP); | ||
1420 | |||
1421 | if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP)) | ||
1422 | pause = 1; | ||
1423 | } | ||
1424 | |||
1425 | if (!full_duplex) | ||
1426 | writel(512, gp->regs + MAC_STIME); | ||
1427 | else | ||
1428 | writel(64, gp->regs + MAC_STIME); | ||
1429 | val = readl(gp->regs + MAC_MCCFG); | ||
1430 | if (pause) | ||
1431 | val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE); | ||
1432 | else | ||
1433 | val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE); | ||
1434 | writel(val, gp->regs + MAC_MCCFG); | ||
1435 | |||
1436 | gem_start_dma(gp); | ||
1437 | |||
1438 | __netif_tx_unlock(txq); | ||
1439 | |||
1440 | if (netif_msg_link(gp)) { | ||
1441 | if (pause) { | ||
1442 | netdev_info(gp->dev, | ||
1443 | "Pause is enabled (rxfifo: %d off: %d on: %d)\n", | ||
1444 | gp->rx_fifo_sz, | ||
1445 | gp->rx_pause_off, | ||
1446 | gp->rx_pause_on); | ||
1447 | } else { | ||
1448 | netdev_info(gp->dev, "Pause is disabled\n"); | ||
1449 | } | ||
1450 | } | ||
1451 | |||
1452 | return 0; | ||
1453 | } | ||
1454 | |||
1455 | static int gem_mdio_link_not_up(struct gem *gp) | ||
1456 | { | ||
1457 | switch (gp->lstate) { | ||
1458 | case link_force_ret: | ||
1459 | netif_info(gp, link, gp->dev, | ||
1460 | "Autoneg failed again, keeping forced mode\n"); | ||
1461 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, | ||
1462 | gp->last_forced_speed, DUPLEX_HALF); | ||
1463 | gp->timer_ticks = 5; | ||
1464 | gp->lstate = link_force_ok; | ||
1465 | return 0; | ||
1466 | case link_aneg: | ||
1467 | /* We try forced modes after a failed aneg only on PHYs that don't | ||
1468 | * have "magic_aneg" bit set, which means they internally do the | ||
1469 | * while forced-mode thingy. On these, we just restart aneg | ||
1470 | */ | ||
1471 | if (gp->phy_mii.def->magic_aneg) | ||
1472 | return 1; | ||
1473 | netif_info(gp, link, gp->dev, "switching to forced 100bt\n"); | ||
1474 | /* Try forced modes. */ | ||
1475 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, | ||
1476 | DUPLEX_HALF); | ||
1477 | gp->timer_ticks = 5; | ||
1478 | gp->lstate = link_force_try; | ||
1479 | return 0; | ||
1480 | case link_force_try: | ||
1481 | /* Downgrade from 100 to 10 Mbps if necessary. | ||
1482 | * If already at 10Mbps, warn user about the | ||
1483 | * situation every 10 ticks. | ||
1484 | */ | ||
1485 | if (gp->phy_mii.speed == SPEED_100) { | ||
1486 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, | ||
1487 | DUPLEX_HALF); | ||
1488 | gp->timer_ticks = 5; | ||
1489 | netif_info(gp, link, gp->dev, | ||
1490 | "switching to forced 10bt\n"); | ||
1491 | return 0; | ||
1492 | } else | ||
1493 | return 1; | ||
1494 | default: | ||
1495 | return 0; | ||
1496 | } | ||
1497 | } | ||
1498 | |||
1499 | static void gem_link_timer(unsigned long data) | ||
1500 | { | ||
1501 | struct gem *gp = (struct gem *) data; | ||
1502 | struct net_device *dev = gp->dev; | ||
1503 | int restart_aneg = 0; | ||
1504 | |||
1505 | /* There's no point doing anything if we're going to be reset */ | ||
1506 | if (gp->reset_task_pending) | ||
1507 | return; | ||
1508 | |||
1509 | if (gp->phy_type == phy_serialink || | ||
1510 | gp->phy_type == phy_serdes) { | ||
1511 | u32 val = readl(gp->regs + PCS_MIISTAT); | ||
1512 | |||
1513 | if (!(val & PCS_MIISTAT_LS)) | ||
1514 | val = readl(gp->regs + PCS_MIISTAT); | ||
1515 | |||
1516 | if ((val & PCS_MIISTAT_LS) != 0) { | ||
1517 | if (gp->lstate == link_up) | ||
1518 | goto restart; | ||
1519 | |||
1520 | gp->lstate = link_up; | ||
1521 | netif_carrier_on(dev); | ||
1522 | (void)gem_set_link_modes(gp); | ||
1523 | } | ||
1524 | goto restart; | ||
1525 | } | ||
1526 | if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) { | ||
1527 | /* Ok, here we got a link. If we had it due to a forced | ||
1528 | * fallback, and we were configured for autoneg, we do | ||
1529 | * retry a short autoneg pass. If you know your hub is | ||
1530 | * broken, use ethtool ;) | ||
1531 | */ | ||
1532 | if (gp->lstate == link_force_try && gp->want_autoneg) { | ||
1533 | gp->lstate = link_force_ret; | ||
1534 | gp->last_forced_speed = gp->phy_mii.speed; | ||
1535 | gp->timer_ticks = 5; | ||
1536 | if (netif_msg_link(gp)) | ||
1537 | netdev_info(dev, | ||
1538 | "Got link after fallback, retrying autoneg once...\n"); | ||
1539 | gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); | ||
1540 | } else if (gp->lstate != link_up) { | ||
1541 | gp->lstate = link_up; | ||
1542 | netif_carrier_on(dev); | ||
1543 | if (gem_set_link_modes(gp)) | ||
1544 | restart_aneg = 1; | ||
1545 | } | ||
1546 | } else { | ||
1547 | /* If the link was previously up, we restart the | ||
1548 | * whole process | ||
1549 | */ | ||
1550 | if (gp->lstate == link_up) { | ||
1551 | gp->lstate = link_down; | ||
1552 | netif_info(gp, link, dev, "Link down\n"); | ||
1553 | netif_carrier_off(dev); | ||
1554 | gem_schedule_reset(gp); | ||
1555 | /* The reset task will restart the timer */ | ||
1556 | return; | ||
1557 | } else if (++gp->timer_ticks > 10) { | ||
1558 | if (found_mii_phy(gp)) | ||
1559 | restart_aneg = gem_mdio_link_not_up(gp); | ||
1560 | else | ||
1561 | restart_aneg = 1; | ||
1562 | } | ||
1563 | } | ||
1564 | if (restart_aneg) { | ||
1565 | gem_begin_auto_negotiation(gp, NULL); | ||
1566 | return; | ||
1567 | } | ||
1568 | restart: | ||
1569 | mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); | ||
1570 | } | ||
1571 | |||
1572 | static void gem_clean_rings(struct gem *gp) | ||
1573 | { | ||
1574 | struct gem_init_block *gb = gp->init_block; | ||
1575 | struct sk_buff *skb; | ||
1576 | int i; | ||
1577 | dma_addr_t dma_addr; | ||
1578 | |||
1579 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1580 | struct gem_rxd *rxd; | ||
1581 | |||
1582 | rxd = &gb->rxd[i]; | ||
1583 | if (gp->rx_skbs[i] != NULL) { | ||
1584 | skb = gp->rx_skbs[i]; | ||
1585 | dma_addr = le64_to_cpu(rxd->buffer); | ||
1586 | pci_unmap_page(gp->pdev, dma_addr, | ||
1587 | RX_BUF_ALLOC_SIZE(gp), | ||
1588 | PCI_DMA_FROMDEVICE); | ||
1589 | dev_kfree_skb_any(skb); | ||
1590 | gp->rx_skbs[i] = NULL; | ||
1591 | } | ||
1592 | rxd->status_word = 0; | ||
1593 | wmb(); | ||
1594 | rxd->buffer = 0; | ||
1595 | } | ||
1596 | |||
1597 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
1598 | if (gp->tx_skbs[i] != NULL) { | ||
1599 | struct gem_txd *txd; | ||
1600 | int frag; | ||
1601 | |||
1602 | skb = gp->tx_skbs[i]; | ||
1603 | gp->tx_skbs[i] = NULL; | ||
1604 | |||
1605 | for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { | ||
1606 | int ent = i & (TX_RING_SIZE - 1); | ||
1607 | |||
1608 | txd = &gb->txd[ent]; | ||
1609 | dma_addr = le64_to_cpu(txd->buffer); | ||
1610 | pci_unmap_page(gp->pdev, dma_addr, | ||
1611 | le64_to_cpu(txd->control_word) & | ||
1612 | TXDCTRL_BUFSZ, PCI_DMA_TODEVICE); | ||
1613 | |||
1614 | if (frag != skb_shinfo(skb)->nr_frags) | ||
1615 | i++; | ||
1616 | } | ||
1617 | dev_kfree_skb_any(skb); | ||
1618 | } | ||
1619 | } | ||
1620 | } | ||
1621 | |||
1622 | static void gem_init_rings(struct gem *gp) | ||
1623 | { | ||
1624 | struct gem_init_block *gb = gp->init_block; | ||
1625 | struct net_device *dev = gp->dev; | ||
1626 | int i; | ||
1627 | dma_addr_t dma_addr; | ||
1628 | |||
1629 | gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0; | ||
1630 | |||
1631 | gem_clean_rings(gp); | ||
1632 | |||
1633 | gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN, | ||
1634 | (unsigned)VLAN_ETH_FRAME_LEN); | ||
1635 | |||
1636 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1637 | struct sk_buff *skb; | ||
1638 | struct gem_rxd *rxd = &gb->rxd[i]; | ||
1639 | |||
1640 | skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL); | ||
1641 | if (!skb) { | ||
1642 | rxd->buffer = 0; | ||
1643 | rxd->status_word = 0; | ||
1644 | continue; | ||
1645 | } | ||
1646 | |||
1647 | gp->rx_skbs[i] = skb; | ||
1648 | skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); | ||
1649 | dma_addr = pci_map_page(gp->pdev, | ||
1650 | virt_to_page(skb->data), | ||
1651 | offset_in_page(skb->data), | ||
1652 | RX_BUF_ALLOC_SIZE(gp), | ||
1653 | PCI_DMA_FROMDEVICE); | ||
1654 | rxd->buffer = cpu_to_le64(dma_addr); | ||
1655 | wmb(); | ||
1656 | rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); | ||
1657 | skb_reserve(skb, RX_OFFSET); | ||
1658 | } | ||
1659 | |||
1660 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
1661 | struct gem_txd *txd = &gb->txd[i]; | ||
1662 | |||
1663 | txd->control_word = 0; | ||
1664 | wmb(); | ||
1665 | txd->buffer = 0; | ||
1666 | } | ||
1667 | wmb(); | ||
1668 | } | ||
1669 | |||
1670 | /* Init PHY interface and start link poll state machine */ | ||
1671 | static void gem_init_phy(struct gem *gp) | ||
1672 | { | ||
1673 | u32 mifcfg; | ||
1674 | |||
1675 | /* Revert MIF CFG setting done on stop_phy */ | ||
1676 | mifcfg = readl(gp->regs + MIF_CFG); | ||
1677 | mifcfg &= ~MIF_CFG_BBMODE; | ||
1678 | writel(mifcfg, gp->regs + MIF_CFG); | ||
1679 | |||
1680 | if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { | ||
1681 | int i; | ||
1682 | |||
1683 | /* Those delay sucks, the HW seem to love them though, I'll | ||
1684 | * serisouly consider breaking some locks here to be able | ||
1685 | * to schedule instead | ||
1686 | */ | ||
1687 | for (i = 0; i < 3; i++) { | ||
1688 | #ifdef CONFIG_PPC_PMAC | ||
1689 | pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0); | ||
1690 | msleep(20); | ||
1691 | #endif | ||
1692 | /* Some PHYs used by apple have problem getting back to us, | ||
1693 | * we do an additional reset here | ||
1694 | */ | ||
1695 | phy_write(gp, MII_BMCR, BMCR_RESET); | ||
1696 | msleep(20); | ||
1697 | if (phy_read(gp, MII_BMCR) != 0xffff) | ||
1698 | break; | ||
1699 | if (i == 2) | ||
1700 | netdev_warn(gp->dev, "GMAC PHY not responding !\n"); | ||
1701 | } | ||
1702 | } | ||
1703 | |||
1704 | if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && | ||
1705 | gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { | ||
1706 | u32 val; | ||
1707 | |||
1708 | /* Init datapath mode register. */ | ||
1709 | if (gp->phy_type == phy_mii_mdio0 || | ||
1710 | gp->phy_type == phy_mii_mdio1) { | ||
1711 | val = PCS_DMODE_MGM; | ||
1712 | } else if (gp->phy_type == phy_serialink) { | ||
1713 | val = PCS_DMODE_SM | PCS_DMODE_GMOE; | ||
1714 | } else { | ||
1715 | val = PCS_DMODE_ESM; | ||
1716 | } | ||
1717 | |||
1718 | writel(val, gp->regs + PCS_DMODE); | ||
1719 | } | ||
1720 | |||
1721 | if (gp->phy_type == phy_mii_mdio0 || | ||
1722 | gp->phy_type == phy_mii_mdio1) { | ||
1723 | /* Reset and detect MII PHY */ | ||
1724 | mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr); | ||
1725 | |||
1726 | /* Init PHY */ | ||
1727 | if (gp->phy_mii.def && gp->phy_mii.def->ops->init) | ||
1728 | gp->phy_mii.def->ops->init(&gp->phy_mii); | ||
1729 | } else { | ||
1730 | gem_pcs_reset(gp); | ||
1731 | gem_pcs_reinit_adv(gp); | ||
1732 | } | ||
1733 | |||
1734 | /* Default aneg parameters */ | ||
1735 | gp->timer_ticks = 0; | ||
1736 | gp->lstate = link_down; | ||
1737 | netif_carrier_off(gp->dev); | ||
1738 | |||
1739 | /* Print things out */ | ||
1740 | if (gp->phy_type == phy_mii_mdio0 || | ||
1741 | gp->phy_type == phy_mii_mdio1) | ||
1742 | netdev_info(gp->dev, "Found %s PHY\n", | ||
1743 | gp->phy_mii.def ? gp->phy_mii.def->name : "no"); | ||
1744 | |||
1745 | gem_begin_auto_negotiation(gp, NULL); | ||
1746 | } | ||
1747 | |||
1748 | static void gem_init_dma(struct gem *gp) | ||
1749 | { | ||
1750 | u64 desc_dma = (u64) gp->gblock_dvma; | ||
1751 | u32 val; | ||
1752 | |||
1753 | val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE); | ||
1754 | writel(val, gp->regs + TXDMA_CFG); | ||
1755 | |||
1756 | writel(desc_dma >> 32, gp->regs + TXDMA_DBHI); | ||
1757 | writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW); | ||
1758 | desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); | ||
1759 | |||
1760 | writel(0, gp->regs + TXDMA_KICK); | ||
1761 | |||
1762 | val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | | ||
1763 | ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); | ||
1764 | writel(val, gp->regs + RXDMA_CFG); | ||
1765 | |||
1766 | writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); | ||
1767 | writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); | ||
1768 | |||
1769 | writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); | ||
1770 | |||
1771 | val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); | ||
1772 | val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); | ||
1773 | writel(val, gp->regs + RXDMA_PTHRESH); | ||
1774 | |||
1775 | if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) | ||
1776 | writel(((5 & RXDMA_BLANK_IPKTS) | | ||
1777 | ((8 << 12) & RXDMA_BLANK_ITIME)), | ||
1778 | gp->regs + RXDMA_BLANK); | ||
1779 | else | ||
1780 | writel(((5 & RXDMA_BLANK_IPKTS) | | ||
1781 | ((4 << 12) & RXDMA_BLANK_ITIME)), | ||
1782 | gp->regs + RXDMA_BLANK); | ||
1783 | } | ||
1784 | |||
1785 | static u32 gem_setup_multicast(struct gem *gp) | ||
1786 | { | ||
1787 | u32 rxcfg = 0; | ||
1788 | int i; | ||
1789 | |||
1790 | if ((gp->dev->flags & IFF_ALLMULTI) || | ||
1791 | (netdev_mc_count(gp->dev) > 256)) { | ||
1792 | for (i=0; i<16; i++) | ||
1793 | writel(0xffff, gp->regs + MAC_HASH0 + (i << 2)); | ||
1794 | rxcfg |= MAC_RXCFG_HFE; | ||
1795 | } else if (gp->dev->flags & IFF_PROMISC) { | ||
1796 | rxcfg |= MAC_RXCFG_PROM; | ||
1797 | } else { | ||
1798 | u16 hash_table[16]; | ||
1799 | u32 crc; | ||
1800 | struct netdev_hw_addr *ha; | ||
1801 | int i; | ||
1802 | |||
1803 | memset(hash_table, 0, sizeof(hash_table)); | ||
1804 | netdev_for_each_mc_addr(ha, gp->dev) { | ||
1805 | crc = ether_crc_le(6, ha->addr); | ||
1806 | crc >>= 24; | ||
1807 | hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); | ||
1808 | } | ||
1809 | for (i=0; i<16; i++) | ||
1810 | writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2)); | ||
1811 | rxcfg |= MAC_RXCFG_HFE; | ||
1812 | } | ||
1813 | |||
1814 | return rxcfg; | ||
1815 | } | ||
1816 | |||
1817 | static void gem_init_mac(struct gem *gp) | ||
1818 | { | ||
1819 | unsigned char *e = &gp->dev->dev_addr[0]; | ||
1820 | |||
1821 | writel(0x1bf0, gp->regs + MAC_SNDPAUSE); | ||
1822 | |||
1823 | writel(0x00, gp->regs + MAC_IPG0); | ||
1824 | writel(0x08, gp->regs + MAC_IPG1); | ||
1825 | writel(0x04, gp->regs + MAC_IPG2); | ||
1826 | writel(0x40, gp->regs + MAC_STIME); | ||
1827 | writel(0x40, gp->regs + MAC_MINFSZ); | ||
1828 | |||
1829 | /* Ethernet payload + header + FCS + optional VLAN tag. */ | ||
1830 | writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ); | ||
1831 | |||
1832 | writel(0x07, gp->regs + MAC_PASIZE); | ||
1833 | writel(0x04, gp->regs + MAC_JAMSIZE); | ||
1834 | writel(0x10, gp->regs + MAC_ATTLIM); | ||
1835 | writel(0x8808, gp->regs + MAC_MCTYPE); | ||
1836 | |||
1837 | writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED); | ||
1838 | |||
1839 | writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); | ||
1840 | writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); | ||
1841 | writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); | ||
1842 | |||
1843 | writel(0, gp->regs + MAC_ADDR3); | ||
1844 | writel(0, gp->regs + MAC_ADDR4); | ||
1845 | writel(0, gp->regs + MAC_ADDR5); | ||
1846 | |||
1847 | writel(0x0001, gp->regs + MAC_ADDR6); | ||
1848 | writel(0xc200, gp->regs + MAC_ADDR7); | ||
1849 | writel(0x0180, gp->regs + MAC_ADDR8); | ||
1850 | |||
1851 | writel(0, gp->regs + MAC_AFILT0); | ||
1852 | writel(0, gp->regs + MAC_AFILT1); | ||
1853 | writel(0, gp->regs + MAC_AFILT2); | ||
1854 | writel(0, gp->regs + MAC_AF21MSK); | ||
1855 | writel(0, gp->regs + MAC_AF0MSK); | ||
1856 | |||
1857 | gp->mac_rx_cfg = gem_setup_multicast(gp); | ||
1858 | #ifdef STRIP_FCS | ||
1859 | gp->mac_rx_cfg |= MAC_RXCFG_SFCS; | ||
1860 | #endif | ||
1861 | writel(0, gp->regs + MAC_NCOLL); | ||
1862 | writel(0, gp->regs + MAC_FASUCC); | ||
1863 | writel(0, gp->regs + MAC_ECOLL); | ||
1864 | writel(0, gp->regs + MAC_LCOLL); | ||
1865 | writel(0, gp->regs + MAC_DTIMER); | ||
1866 | writel(0, gp->regs + MAC_PATMPS); | ||
1867 | writel(0, gp->regs + MAC_RFCTR); | ||
1868 | writel(0, gp->regs + MAC_LERR); | ||
1869 | writel(0, gp->regs + MAC_AERR); | ||
1870 | writel(0, gp->regs + MAC_FCSERR); | ||
1871 | writel(0, gp->regs + MAC_RXCVERR); | ||
1872 | |||
1873 | /* Clear RX/TX/MAC/XIF config, we will set these up and enable | ||
1874 | * them once a link is established. | ||
1875 | */ | ||
1876 | writel(0, gp->regs + MAC_TXCFG); | ||
1877 | writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG); | ||
1878 | writel(0, gp->regs + MAC_MCCFG); | ||
1879 | writel(0, gp->regs + MAC_XIFCFG); | ||
1880 | |||
1881 | /* Setup MAC interrupts. We want to get all of the interesting | ||
1882 | * counter expiration events, but we do not want to hear about | ||
1883 | * normal rx/tx as the DMA engine tells us that. | ||
1884 | */ | ||
1885 | writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK); | ||
1886 | writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); | ||
1887 | |||
1888 | /* Don't enable even the PAUSE interrupts for now, we | ||
1889 | * make no use of those events other than to record them. | ||
1890 | */ | ||
1891 | writel(0xffffffff, gp->regs + MAC_MCMASK); | ||
1892 | |||
1893 | /* Don't enable GEM's WOL in normal operations | ||
1894 | */ | ||
1895 | if (gp->has_wol) | ||
1896 | writel(0, gp->regs + WOL_WAKECSR); | ||
1897 | } | ||
1898 | |||
1899 | static void gem_init_pause_thresholds(struct gem *gp) | ||
1900 | { | ||
1901 | u32 cfg; | ||
1902 | |||
1903 | /* Calculate pause thresholds. Setting the OFF threshold to the | ||
1904 | * full RX fifo size effectively disables PAUSE generation which | ||
1905 | * is what we do for 10/100 only GEMs which have FIFOs too small | ||
1906 | * to make real gains from PAUSE. | ||
1907 | */ | ||
1908 | if (gp->rx_fifo_sz <= (2 * 1024)) { | ||
1909 | gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz; | ||
1910 | } else { | ||
1911 | int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63; | ||
1912 | int off = (gp->rx_fifo_sz - (max_frame * 2)); | ||
1913 | int on = off - max_frame; | ||
1914 | |||
1915 | gp->rx_pause_off = off; | ||
1916 | gp->rx_pause_on = on; | ||
1917 | } | ||
1918 | |||
1919 | |||
1920 | /* Configure the chip "burst" DMA mode & enable some | ||
1921 | * HW bug fixes on Apple version | ||
1922 | */ | ||
1923 | cfg = 0; | ||
1924 | if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) | ||
1925 | cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX; | ||
1926 | #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) | ||
1927 | cfg |= GREG_CFG_IBURST; | ||
1928 | #endif | ||
1929 | cfg |= ((31 << 1) & GREG_CFG_TXDMALIM); | ||
1930 | cfg |= ((31 << 6) & GREG_CFG_RXDMALIM); | ||
1931 | writel(cfg, gp->regs + GREG_CFG); | ||
1932 | |||
1933 | /* If Infinite Burst didn't stick, then use different | ||
1934 | * thresholds (and Apple bug fixes don't exist) | ||
1935 | */ | ||
1936 | if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) { | ||
1937 | cfg = ((2 << 1) & GREG_CFG_TXDMALIM); | ||
1938 | cfg |= ((8 << 6) & GREG_CFG_RXDMALIM); | ||
1939 | writel(cfg, gp->regs + GREG_CFG); | ||
1940 | } | ||
1941 | } | ||
1942 | |||
1943 | static int gem_check_invariants(struct gem *gp) | ||
1944 | { | ||
1945 | struct pci_dev *pdev = gp->pdev; | ||
1946 | u32 mif_cfg; | ||
1947 | |||
1948 | /* On Apple's sungem, we can't rely on registers as the chip | ||
1949 | * was been powered down by the firmware. The PHY is looked | ||
1950 | * up later on. | ||
1951 | */ | ||
1952 | if (pdev->vendor == PCI_VENDOR_ID_APPLE) { | ||
1953 | gp->phy_type = phy_mii_mdio0; | ||
1954 | gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; | ||
1955 | gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; | ||
1956 | gp->swrst_base = 0; | ||
1957 | |||
1958 | mif_cfg = readl(gp->regs + MIF_CFG); | ||
1959 | mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1); | ||
1960 | mif_cfg |= MIF_CFG_MDI0; | ||
1961 | writel(mif_cfg, gp->regs + MIF_CFG); | ||
1962 | writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE); | ||
1963 | writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG); | ||
1964 | |||
1965 | /* We hard-code the PHY address so we can properly bring it out of | ||
1966 | * reset later on, we can't really probe it at this point, though | ||
1967 | * that isn't an issue. | ||
1968 | */ | ||
1969 | if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC) | ||
1970 | gp->mii_phy_addr = 1; | ||
1971 | else | ||
1972 | gp->mii_phy_addr = 0; | ||
1973 | |||
1974 | return 0; | ||
1975 | } | ||
1976 | |||
1977 | mif_cfg = readl(gp->regs + MIF_CFG); | ||
1978 | |||
1979 | if (pdev->vendor == PCI_VENDOR_ID_SUN && | ||
1980 | pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) { | ||
1981 | /* One of the MII PHYs _must_ be present | ||
1982 | * as this chip has no gigabit PHY. | ||
1983 | */ | ||
1984 | if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { | ||
1985 | pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n", | ||
1986 | mif_cfg); | ||
1987 | return -1; | ||
1988 | } | ||
1989 | } | ||
1990 | |||
1991 | /* Determine initial PHY interface type guess. MDIO1 is the | ||
1992 | * external PHY and thus takes precedence over MDIO0. | ||
1993 | */ | ||
1994 | |||
1995 | if (mif_cfg & MIF_CFG_MDI1) { | ||
1996 | gp->phy_type = phy_mii_mdio1; | ||
1997 | mif_cfg |= MIF_CFG_PSELECT; | ||
1998 | writel(mif_cfg, gp->regs + MIF_CFG); | ||
1999 | } else if (mif_cfg & MIF_CFG_MDI0) { | ||
2000 | gp->phy_type = phy_mii_mdio0; | ||
2001 | mif_cfg &= ~MIF_CFG_PSELECT; | ||
2002 | writel(mif_cfg, gp->regs + MIF_CFG); | ||
2003 | } else { | ||
2004 | #ifdef CONFIG_SPARC | ||
2005 | const char *p; | ||
2006 | |||
2007 | p = of_get_property(gp->of_node, "shared-pins", NULL); | ||
2008 | if (p && !strcmp(p, "serdes")) | ||
2009 | gp->phy_type = phy_serdes; | ||
2010 | else | ||
2011 | #endif | ||
2012 | gp->phy_type = phy_serialink; | ||
2013 | } | ||
2014 | if (gp->phy_type == phy_mii_mdio1 || | ||
2015 | gp->phy_type == phy_mii_mdio0) { | ||
2016 | int i; | ||
2017 | |||
2018 | for (i = 0; i < 32; i++) { | ||
2019 | gp->mii_phy_addr = i; | ||
2020 | if (phy_read(gp, MII_BMCR) != 0xffff) | ||
2021 | break; | ||
2022 | } | ||
2023 | if (i == 32) { | ||
2024 | if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { | ||
2025 | pr_err("RIO MII phy will not respond\n"); | ||
2026 | return -1; | ||
2027 | } | ||
2028 | gp->phy_type = phy_serdes; | ||
2029 | } | ||
2030 | } | ||
2031 | |||
2032 | /* Fetch the FIFO configurations now too. */ | ||
2033 | gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; | ||
2034 | gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; | ||
2035 | |||
2036 | if (pdev->vendor == PCI_VENDOR_ID_SUN) { | ||
2037 | if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { | ||
2038 | if (gp->tx_fifo_sz != (9 * 1024) || | ||
2039 | gp->rx_fifo_sz != (20 * 1024)) { | ||
2040 | pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n", | ||
2041 | gp->tx_fifo_sz, gp->rx_fifo_sz); | ||
2042 | return -1; | ||
2043 | } | ||
2044 | gp->swrst_base = 0; | ||
2045 | } else { | ||
2046 | if (gp->tx_fifo_sz != (2 * 1024) || | ||
2047 | gp->rx_fifo_sz != (2 * 1024)) { | ||
2048 | pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n", | ||
2049 | gp->tx_fifo_sz, gp->rx_fifo_sz); | ||
2050 | return -1; | ||
2051 | } | ||
2052 | gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT; | ||
2053 | } | ||
2054 | } | ||
2055 | |||
2056 | return 0; | ||
2057 | } | ||
2058 | |||
2059 | static void gem_reinit_chip(struct gem *gp) | ||
2060 | { | ||
2061 | /* Reset the chip */ | ||
2062 | gem_reset(gp); | ||
2063 | |||
2064 | /* Make sure ints are disabled */ | ||
2065 | gem_disable_ints(gp); | ||
2066 | |||
2067 | /* Allocate & setup ring buffers */ | ||
2068 | gem_init_rings(gp); | ||
2069 | |||
2070 | /* Configure pause thresholds */ | ||
2071 | gem_init_pause_thresholds(gp); | ||
2072 | |||
2073 | /* Init DMA & MAC engines */ | ||
2074 | gem_init_dma(gp); | ||
2075 | gem_init_mac(gp); | ||
2076 | } | ||
2077 | |||
2078 | |||
2079 | static void gem_stop_phy(struct gem *gp, int wol) | ||
2080 | { | ||
2081 | u32 mifcfg; | ||
2082 | |||
2083 | /* Let the chip settle down a bit, it seems that helps | ||
2084 | * for sleep mode on some models | ||
2085 | */ | ||
2086 | msleep(10); | ||
2087 | |||
2088 | /* Make sure we aren't polling PHY status change. We | ||
2089 | * don't currently use that feature though | ||
2090 | */ | ||
2091 | mifcfg = readl(gp->regs + MIF_CFG); | ||
2092 | mifcfg &= ~MIF_CFG_POLL; | ||
2093 | writel(mifcfg, gp->regs + MIF_CFG); | ||
2094 | |||
2095 | if (wol && gp->has_wol) { | ||
2096 | unsigned char *e = &gp->dev->dev_addr[0]; | ||
2097 | u32 csr; | ||
2098 | |||
2099 | /* Setup wake-on-lan for MAGIC packet */ | ||
2100 | writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB, | ||
2101 | gp->regs + MAC_RXCFG); | ||
2102 | writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0); | ||
2103 | writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1); | ||
2104 | writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2); | ||
2105 | |||
2106 | writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT); | ||
2107 | csr = WOL_WAKECSR_ENABLE; | ||
2108 | if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0) | ||
2109 | csr |= WOL_WAKECSR_MII; | ||
2110 | writel(csr, gp->regs + WOL_WAKECSR); | ||
2111 | } else { | ||
2112 | writel(0, gp->regs + MAC_RXCFG); | ||
2113 | (void)readl(gp->regs + MAC_RXCFG); | ||
2114 | /* Machine sleep will die in strange ways if we | ||
2115 | * dont wait a bit here, looks like the chip takes | ||
2116 | * some time to really shut down | ||
2117 | */ | ||
2118 | msleep(10); | ||
2119 | } | ||
2120 | |||
2121 | writel(0, gp->regs + MAC_TXCFG); | ||
2122 | writel(0, gp->regs + MAC_XIFCFG); | ||
2123 | writel(0, gp->regs + TXDMA_CFG); | ||
2124 | writel(0, gp->regs + RXDMA_CFG); | ||
2125 | |||
2126 | if (!wol) { | ||
2127 | gem_reset(gp); | ||
2128 | writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); | ||
2129 | writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); | ||
2130 | |||
2131 | if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend) | ||
2132 | gp->phy_mii.def->ops->suspend(&gp->phy_mii); | ||
2133 | |||
2134 | /* According to Apple, we must set the MDIO pins to this begnign | ||
2135 | * state or we may 1) eat more current, 2) damage some PHYs | ||
2136 | */ | ||
2137 | writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG); | ||
2138 | writel(0, gp->regs + MIF_BBCLK); | ||
2139 | writel(0, gp->regs + MIF_BBDATA); | ||
2140 | writel(0, gp->regs + MIF_BBOENAB); | ||
2141 | writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG); | ||
2142 | (void) readl(gp->regs + MAC_XIFCFG); | ||
2143 | } | ||
2144 | } | ||
2145 | |||
2146 | static int gem_do_start(struct net_device *dev) | ||
2147 | { | ||
2148 | struct gem *gp = netdev_priv(dev); | ||
2149 | int rc; | ||
2150 | |||
2151 | /* Enable the cell */ | ||
2152 | gem_get_cell(gp); | ||
2153 | |||
2154 | /* Make sure PCI access and bus master are enabled */ | ||
2155 | rc = pci_enable_device(gp->pdev); | ||
2156 | if (rc) { | ||
2157 | netdev_err(dev, "Failed to enable chip on PCI bus !\n"); | ||
2158 | |||
2159 | /* Put cell and forget it for now, it will be considered as | ||
2160 | * still asleep, a new sleep cycle may bring it back | ||
2161 | */ | ||
2162 | gem_put_cell(gp); | ||
2163 | return -ENXIO; | ||
2164 | } | ||
2165 | pci_set_master(gp->pdev); | ||
2166 | |||
2167 | /* Init & setup chip hardware */ | ||
2168 | gem_reinit_chip(gp); | ||
2169 | |||
2170 | /* An interrupt might come in handy */ | ||
2171 | rc = request_irq(gp->pdev->irq, gem_interrupt, | ||
2172 | IRQF_SHARED, dev->name, (void *)dev); | ||
2173 | if (rc) { | ||
2174 | netdev_err(dev, "failed to request irq !\n"); | ||
2175 | |||
2176 | gem_reset(gp); | ||
2177 | gem_clean_rings(gp); | ||
2178 | gem_put_cell(gp); | ||
2179 | return rc; | ||
2180 | } | ||
2181 | |||
2182 | /* Mark us as attached again if we come from resume(), this has | ||
2183 | * no effect if we weren't detatched and needs to be done now. | ||
2184 | */ | ||
2185 | netif_device_attach(dev); | ||
2186 | |||
2187 | /* Restart NAPI & queues */ | ||
2188 | gem_netif_start(gp); | ||
2189 | |||
2190 | /* Detect & init PHY, start autoneg etc... this will | ||
2191 | * eventually result in starting DMA operations when | ||
2192 | * the link is up | ||
2193 | */ | ||
2194 | gem_init_phy(gp); | ||
2195 | |||
2196 | return 0; | ||
2197 | } | ||
2198 | |||
2199 | static void gem_do_stop(struct net_device *dev, int wol) | ||
2200 | { | ||
2201 | struct gem *gp = netdev_priv(dev); | ||
2202 | |||
2203 | /* Stop NAPI and stop tx queue */ | ||
2204 | gem_netif_stop(gp); | ||
2205 | |||
2206 | /* Make sure ints are disabled. We don't care about | ||
2207 | * synchronizing as NAPI is disabled, thus a stray | ||
2208 | * interrupt will do nothing bad (our irq handler | ||
2209 | * just schedules NAPI) | ||
2210 | */ | ||
2211 | gem_disable_ints(gp); | ||
2212 | |||
2213 | /* Stop the link timer */ | ||
2214 | del_timer_sync(&gp->link_timer); | ||
2215 | |||
2216 | /* We cannot cancel the reset task while holding the | ||
2217 | * rtnl lock, we'd get an A->B / B->A deadlock stituation | ||
2218 | * if we did. This is not an issue however as the reset | ||
2219 | * task is synchronized vs. us (rtnl_lock) and will do | ||
2220 | * nothing if the device is down or suspended. We do | ||
2221 | * still clear reset_task_pending to avoid a spurrious | ||
2222 | * reset later on in case we do resume before it gets | ||
2223 | * scheduled. | ||
2224 | */ | ||
2225 | gp->reset_task_pending = 0; | ||
2226 | |||
2227 | /* If we are going to sleep with WOL */ | ||
2228 | gem_stop_dma(gp); | ||
2229 | msleep(10); | ||
2230 | if (!wol) | ||
2231 | gem_reset(gp); | ||
2232 | msleep(10); | ||
2233 | |||
2234 | /* Get rid of rings */ | ||
2235 | gem_clean_rings(gp); | ||
2236 | |||
2237 | /* No irq needed anymore */ | ||
2238 | free_irq(gp->pdev->irq, (void *) dev); | ||
2239 | |||
2240 | /* Shut the PHY down eventually and setup WOL */ | ||
2241 | gem_stop_phy(gp, wol); | ||
2242 | |||
2243 | /* Make sure bus master is disabled */ | ||
2244 | pci_disable_device(gp->pdev); | ||
2245 | |||
2246 | /* Cell not needed neither if no WOL */ | ||
2247 | if (!wol) | ||
2248 | gem_put_cell(gp); | ||
2249 | } | ||
2250 | |||
2251 | static void gem_reset_task(struct work_struct *work) | ||
2252 | { | ||
2253 | struct gem *gp = container_of(work, struct gem, reset_task); | ||
2254 | |||
2255 | /* Lock out the network stack (essentially shield ourselves | ||
2256 | * against a racing open, close, control call, or suspend | ||
2257 | */ | ||
2258 | rtnl_lock(); | ||
2259 | |||
2260 | /* Skip the reset task if suspended or closed, or if it's | ||
2261 | * been cancelled by gem_do_stop (see comment there) | ||
2262 | */ | ||
2263 | if (!netif_device_present(gp->dev) || | ||
2264 | !netif_running(gp->dev) || | ||
2265 | !gp->reset_task_pending) { | ||
2266 | rtnl_unlock(); | ||
2267 | return; | ||
2268 | } | ||
2269 | |||
2270 | /* Stop the link timer */ | ||
2271 | del_timer_sync(&gp->link_timer); | ||
2272 | |||
2273 | /* Stop NAPI and tx */ | ||
2274 | gem_netif_stop(gp); | ||
2275 | |||
2276 | /* Reset the chip & rings */ | ||
2277 | gem_reinit_chip(gp); | ||
2278 | if (gp->lstate == link_up) | ||
2279 | gem_set_link_modes(gp); | ||
2280 | |||
2281 | /* Restart NAPI and Tx */ | ||
2282 | gem_netif_start(gp); | ||
2283 | |||
2284 | /* We are back ! */ | ||
2285 | gp->reset_task_pending = 0; | ||
2286 | |||
2287 | /* If the link is not up, restart autoneg, else restart the | ||
2288 | * polling timer | ||
2289 | */ | ||
2290 | if (gp->lstate != link_up) | ||
2291 | gem_begin_auto_negotiation(gp, NULL); | ||
2292 | else | ||
2293 | mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); | ||
2294 | |||
2295 | rtnl_unlock(); | ||
2296 | } | ||
2297 | |||
2298 | static int gem_open(struct net_device *dev) | ||
2299 | { | ||
2300 | /* We allow open while suspended, we just do nothing, | ||
2301 | * the chip will be initialized in resume() | ||
2302 | */ | ||
2303 | if (netif_device_present(dev)) | ||
2304 | return gem_do_start(dev); | ||
2305 | return 0; | ||
2306 | } | ||
2307 | |||
2308 | static int gem_close(struct net_device *dev) | ||
2309 | { | ||
2310 | if (netif_device_present(dev)) | ||
2311 | gem_do_stop(dev, 0); | ||
2312 | |||
2313 | return 0; | ||
2314 | } | ||
2315 | |||
2316 | #ifdef CONFIG_PM | ||
2317 | static int gem_suspend(struct pci_dev *pdev, pm_message_t state) | ||
2318 | { | ||
2319 | struct net_device *dev = pci_get_drvdata(pdev); | ||
2320 | struct gem *gp = netdev_priv(dev); | ||
2321 | |||
2322 | /* Lock the network stack first to avoid racing with open/close, | ||
2323 | * reset task and setting calls | ||
2324 | */ | ||
2325 | rtnl_lock(); | ||
2326 | |||
2327 | /* Not running, mark ourselves non-present, no need for | ||
2328 | * a lock here | ||
2329 | */ | ||
2330 | if (!netif_running(dev)) { | ||
2331 | netif_device_detach(dev); | ||
2332 | rtnl_unlock(); | ||
2333 | return 0; | ||
2334 | } | ||
2335 | netdev_info(dev, "suspending, WakeOnLan %s\n", | ||
2336 | (gp->wake_on_lan && netif_running(dev)) ? | ||
2337 | "enabled" : "disabled"); | ||
2338 | |||
2339 | /* Tell the network stack we're gone. gem_do_stop() below will | ||
2340 | * synchronize with TX, stop NAPI etc... | ||
2341 | */ | ||
2342 | netif_device_detach(dev); | ||
2343 | |||
2344 | /* Switch off chip, remember WOL setting */ | ||
2345 | gp->asleep_wol = gp->wake_on_lan; | ||
2346 | gem_do_stop(dev, gp->asleep_wol); | ||
2347 | |||
2348 | /* Unlock the network stack */ | ||
2349 | rtnl_unlock(); | ||
2350 | |||
2351 | return 0; | ||
2352 | } | ||
2353 | |||
2354 | static int gem_resume(struct pci_dev *pdev) | ||
2355 | { | ||
2356 | struct net_device *dev = pci_get_drvdata(pdev); | ||
2357 | struct gem *gp = netdev_priv(dev); | ||
2358 | |||
2359 | /* See locking comment in gem_suspend */ | ||
2360 | rtnl_lock(); | ||
2361 | |||
2362 | /* Not running, mark ourselves present, no need for | ||
2363 | * a lock here | ||
2364 | */ | ||
2365 | if (!netif_running(dev)) { | ||
2366 | netif_device_attach(dev); | ||
2367 | rtnl_unlock(); | ||
2368 | return 0; | ||
2369 | } | ||
2370 | |||
2371 | /* Restart chip. If that fails there isn't much we can do, we | ||
2372 | * leave things stopped. | ||
2373 | */ | ||
2374 | gem_do_start(dev); | ||
2375 | |||
2376 | /* If we had WOL enabled, the cell clock was never turned off during | ||
2377 | * sleep, so we end up beeing unbalanced. Fix that here | ||
2378 | */ | ||
2379 | if (gp->asleep_wol) | ||
2380 | gem_put_cell(gp); | ||
2381 | |||
2382 | /* Unlock the network stack */ | ||
2383 | rtnl_unlock(); | ||
2384 | |||
2385 | return 0; | ||
2386 | } | ||
2387 | #endif /* CONFIG_PM */ | ||
2388 | |||
2389 | static struct net_device_stats *gem_get_stats(struct net_device *dev) | ||
2390 | { | ||
2391 | struct gem *gp = netdev_priv(dev); | ||
2392 | |||
2393 | /* I have seen this being called while the PM was in progress, | ||
2394 | * so we shield against this. Let's also not poke at registers | ||
2395 | * while the reset task is going on. | ||
2396 | * | ||
2397 | * TODO: Move stats collection elsewhere (link timer ?) and | ||
2398 | * make this a nop to avoid all those synchro issues | ||
2399 | */ | ||
2400 | if (!netif_device_present(dev) || !netif_running(dev)) | ||
2401 | goto bail; | ||
2402 | |||
2403 | /* Better safe than sorry... */ | ||
2404 | if (WARN_ON(!gp->cell_enabled)) | ||
2405 | goto bail; | ||
2406 | |||
2407 | dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR); | ||
2408 | writel(0, gp->regs + MAC_FCSERR); | ||
2409 | |||
2410 | dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR); | ||
2411 | writel(0, gp->regs + MAC_AERR); | ||
2412 | |||
2413 | dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR); | ||
2414 | writel(0, gp->regs + MAC_LERR); | ||
2415 | |||
2416 | dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL); | ||
2417 | dev->stats.collisions += | ||
2418 | (readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL)); | ||
2419 | writel(0, gp->regs + MAC_ECOLL); | ||
2420 | writel(0, gp->regs + MAC_LCOLL); | ||
2421 | bail: | ||
2422 | return &dev->stats; | ||
2423 | } | ||
2424 | |||
2425 | static int gem_set_mac_address(struct net_device *dev, void *addr) | ||
2426 | { | ||
2427 | struct sockaddr *macaddr = (struct sockaddr *) addr; | ||
2428 | struct gem *gp = netdev_priv(dev); | ||
2429 | unsigned char *e = &dev->dev_addr[0]; | ||
2430 | |||
2431 | if (!is_valid_ether_addr(macaddr->sa_data)) | ||
2432 | return -EADDRNOTAVAIL; | ||
2433 | |||
2434 | memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); | ||
2435 | |||
2436 | /* We'll just catch it later when the device is up'd or resumed */ | ||
2437 | if (!netif_running(dev) || !netif_device_present(dev)) | ||
2438 | return 0; | ||
2439 | |||
2440 | /* Better safe than sorry... */ | ||
2441 | if (WARN_ON(!gp->cell_enabled)) | ||
2442 | return 0; | ||
2443 | |||
2444 | writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); | ||
2445 | writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); | ||
2446 | writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); | ||
2447 | |||
2448 | return 0; | ||
2449 | } | ||
2450 | |||
2451 | static void gem_set_multicast(struct net_device *dev) | ||
2452 | { | ||
2453 | struct gem *gp = netdev_priv(dev); | ||
2454 | u32 rxcfg, rxcfg_new; | ||
2455 | int limit = 10000; | ||
2456 | |||
2457 | if (!netif_running(dev) || !netif_device_present(dev)) | ||
2458 | return; | ||
2459 | |||
2460 | /* Better safe than sorry... */ | ||
2461 | if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled)) | ||
2462 | return; | ||
2463 | |||
2464 | rxcfg = readl(gp->regs + MAC_RXCFG); | ||
2465 | rxcfg_new = gem_setup_multicast(gp); | ||
2466 | #ifdef STRIP_FCS | ||
2467 | rxcfg_new |= MAC_RXCFG_SFCS; | ||
2468 | #endif | ||
2469 | gp->mac_rx_cfg = rxcfg_new; | ||
2470 | |||
2471 | writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); | ||
2472 | while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) { | ||
2473 | if (!limit--) | ||
2474 | break; | ||
2475 | udelay(10); | ||
2476 | } | ||
2477 | |||
2478 | rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE); | ||
2479 | rxcfg |= rxcfg_new; | ||
2480 | |||
2481 | writel(rxcfg, gp->regs + MAC_RXCFG); | ||
2482 | } | ||
2483 | |||
2484 | /* Jumbo-grams don't seem to work :-( */ | ||
2485 | #define GEM_MIN_MTU 68 | ||
2486 | #if 1 | ||
2487 | #define GEM_MAX_MTU 1500 | ||
2488 | #else | ||
2489 | #define GEM_MAX_MTU 9000 | ||
2490 | #endif | ||
2491 | |||
2492 | static int gem_change_mtu(struct net_device *dev, int new_mtu) | ||
2493 | { | ||
2494 | struct gem *gp = netdev_priv(dev); | ||
2495 | |||
2496 | if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU) | ||
2497 | return -EINVAL; | ||
2498 | |||
2499 | dev->mtu = new_mtu; | ||
2500 | |||
2501 | /* We'll just catch it later when the device is up'd or resumed */ | ||
2502 | if (!netif_running(dev) || !netif_device_present(dev)) | ||
2503 | return 0; | ||
2504 | |||
2505 | /* Better safe than sorry... */ | ||
2506 | if (WARN_ON(!gp->cell_enabled)) | ||
2507 | return 0; | ||
2508 | |||
2509 | gem_netif_stop(gp); | ||
2510 | gem_reinit_chip(gp); | ||
2511 | if (gp->lstate == link_up) | ||
2512 | gem_set_link_modes(gp); | ||
2513 | gem_netif_start(gp); | ||
2514 | |||
2515 | return 0; | ||
2516 | } | ||
2517 | |||
2518 | static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
2519 | { | ||
2520 | struct gem *gp = netdev_priv(dev); | ||
2521 | |||
2522 | strcpy(info->driver, DRV_NAME); | ||
2523 | strcpy(info->version, DRV_VERSION); | ||
2524 | strcpy(info->bus_info, pci_name(gp->pdev)); | ||
2525 | } | ||
2526 | |||
2527 | static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
2528 | { | ||
2529 | struct gem *gp = netdev_priv(dev); | ||
2530 | |||
2531 | if (gp->phy_type == phy_mii_mdio0 || | ||
2532 | gp->phy_type == phy_mii_mdio1) { | ||
2533 | if (gp->phy_mii.def) | ||
2534 | cmd->supported = gp->phy_mii.def->features; | ||
2535 | else | ||
2536 | cmd->supported = (SUPPORTED_10baseT_Half | | ||
2537 | SUPPORTED_10baseT_Full); | ||
2538 | |||
2539 | /* XXX hardcoded stuff for now */ | ||
2540 | cmd->port = PORT_MII; | ||
2541 | cmd->transceiver = XCVR_EXTERNAL; | ||
2542 | cmd->phy_address = 0; /* XXX fixed PHYAD */ | ||
2543 | |||
2544 | /* Return current PHY settings */ | ||
2545 | cmd->autoneg = gp->want_autoneg; | ||
2546 | ethtool_cmd_speed_set(cmd, gp->phy_mii.speed); | ||
2547 | cmd->duplex = gp->phy_mii.duplex; | ||
2548 | cmd->advertising = gp->phy_mii.advertising; | ||
2549 | |||
2550 | /* If we started with a forced mode, we don't have a default | ||
2551 | * advertise set, we need to return something sensible so | ||
2552 | * userland can re-enable autoneg properly. | ||
2553 | */ | ||
2554 | if (cmd->advertising == 0) | ||
2555 | cmd->advertising = cmd->supported; | ||
2556 | } else { // XXX PCS ? | ||
2557 | cmd->supported = | ||
2558 | (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | | ||
2559 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | | ||
2560 | SUPPORTED_Autoneg); | ||
2561 | cmd->advertising = cmd->supported; | ||
2562 | ethtool_cmd_speed_set(cmd, 0); | ||
2563 | cmd->duplex = cmd->port = cmd->phy_address = | ||
2564 | cmd->transceiver = cmd->autoneg = 0; | ||
2565 | |||
2566 | /* serdes means usually a Fibre connector, with most fixed */ | ||
2567 | if (gp->phy_type == phy_serdes) { | ||
2568 | cmd->port = PORT_FIBRE; | ||
2569 | cmd->supported = (SUPPORTED_1000baseT_Half | | ||
2570 | SUPPORTED_1000baseT_Full | | ||
2571 | SUPPORTED_FIBRE | SUPPORTED_Autoneg | | ||
2572 | SUPPORTED_Pause | SUPPORTED_Asym_Pause); | ||
2573 | cmd->advertising = cmd->supported; | ||
2574 | cmd->transceiver = XCVR_INTERNAL; | ||
2575 | if (gp->lstate == link_up) | ||
2576 | ethtool_cmd_speed_set(cmd, SPEED_1000); | ||
2577 | cmd->duplex = DUPLEX_FULL; | ||
2578 | cmd->autoneg = 1; | ||
2579 | } | ||
2580 | } | ||
2581 | cmd->maxtxpkt = cmd->maxrxpkt = 0; | ||
2582 | |||
2583 | return 0; | ||
2584 | } | ||
2585 | |||
2586 | static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
2587 | { | ||
2588 | struct gem *gp = netdev_priv(dev); | ||
2589 | u32 speed = ethtool_cmd_speed(cmd); | ||
2590 | |||
2591 | /* Verify the settings we care about. */ | ||
2592 | if (cmd->autoneg != AUTONEG_ENABLE && | ||
2593 | cmd->autoneg != AUTONEG_DISABLE) | ||
2594 | return -EINVAL; | ||
2595 | |||
2596 | if (cmd->autoneg == AUTONEG_ENABLE && | ||
2597 | cmd->advertising == 0) | ||
2598 | return -EINVAL; | ||
2599 | |||
2600 | if (cmd->autoneg == AUTONEG_DISABLE && | ||
2601 | ((speed != SPEED_1000 && | ||
2602 | speed != SPEED_100 && | ||
2603 | speed != SPEED_10) || | ||
2604 | (cmd->duplex != DUPLEX_HALF && | ||
2605 | cmd->duplex != DUPLEX_FULL))) | ||
2606 | return -EINVAL; | ||
2607 | |||
2608 | /* Apply settings and restart link process. */ | ||
2609 | if (netif_device_present(gp->dev)) { | ||
2610 | del_timer_sync(&gp->link_timer); | ||
2611 | gem_begin_auto_negotiation(gp, cmd); | ||
2612 | } | ||
2613 | |||
2614 | return 0; | ||
2615 | } | ||
2616 | |||
2617 | static int gem_nway_reset(struct net_device *dev) | ||
2618 | { | ||
2619 | struct gem *gp = netdev_priv(dev); | ||
2620 | |||
2621 | if (!gp->want_autoneg) | ||
2622 | return -EINVAL; | ||
2623 | |||
2624 | /* Restart link process */ | ||
2625 | if (netif_device_present(gp->dev)) { | ||
2626 | del_timer_sync(&gp->link_timer); | ||
2627 | gem_begin_auto_negotiation(gp, NULL); | ||
2628 | } | ||
2629 | |||
2630 | return 0; | ||
2631 | } | ||
2632 | |||
2633 | static u32 gem_get_msglevel(struct net_device *dev) | ||
2634 | { | ||
2635 | struct gem *gp = netdev_priv(dev); | ||
2636 | return gp->msg_enable; | ||
2637 | } | ||
2638 | |||
2639 | static void gem_set_msglevel(struct net_device *dev, u32 value) | ||
2640 | { | ||
2641 | struct gem *gp = netdev_priv(dev); | ||
2642 | gp->msg_enable = value; | ||
2643 | } | ||
2644 | |||
2645 | |||
2646 | /* Add more when I understand how to program the chip */ | ||
2647 | /* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */ | ||
2648 | |||
2649 | #define WOL_SUPPORTED_MASK (WAKE_MAGIC) | ||
2650 | |||
2651 | static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
2652 | { | ||
2653 | struct gem *gp = netdev_priv(dev); | ||
2654 | |||
2655 | /* Add more when I understand how to program the chip */ | ||
2656 | if (gp->has_wol) { | ||
2657 | wol->supported = WOL_SUPPORTED_MASK; | ||
2658 | wol->wolopts = gp->wake_on_lan; | ||
2659 | } else { | ||
2660 | wol->supported = 0; | ||
2661 | wol->wolopts = 0; | ||
2662 | } | ||
2663 | } | ||
2664 | |||
2665 | static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
2666 | { | ||
2667 | struct gem *gp = netdev_priv(dev); | ||
2668 | |||
2669 | if (!gp->has_wol) | ||
2670 | return -EOPNOTSUPP; | ||
2671 | gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK; | ||
2672 | return 0; | ||
2673 | } | ||
2674 | |||
2675 | static const struct ethtool_ops gem_ethtool_ops = { | ||
2676 | .get_drvinfo = gem_get_drvinfo, | ||
2677 | .get_link = ethtool_op_get_link, | ||
2678 | .get_settings = gem_get_settings, | ||
2679 | .set_settings = gem_set_settings, | ||
2680 | .nway_reset = gem_nway_reset, | ||
2681 | .get_msglevel = gem_get_msglevel, | ||
2682 | .set_msglevel = gem_set_msglevel, | ||
2683 | .get_wol = gem_get_wol, | ||
2684 | .set_wol = gem_set_wol, | ||
2685 | }; | ||
2686 | |||
2687 | static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
2688 | { | ||
2689 | struct gem *gp = netdev_priv(dev); | ||
2690 | struct mii_ioctl_data *data = if_mii(ifr); | ||
2691 | int rc = -EOPNOTSUPP; | ||
2692 | |||
2693 | /* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that | ||
2694 | * netif_device_present() is true and holds rtnl_lock for us | ||
2695 | * so we have nothing to worry about | ||
2696 | */ | ||
2697 | |||
2698 | switch (cmd) { | ||
2699 | case SIOCGMIIPHY: /* Get address of MII PHY in use. */ | ||
2700 | data->phy_id = gp->mii_phy_addr; | ||
2701 | /* Fallthrough... */ | ||
2702 | |||
2703 | case SIOCGMIIREG: /* Read MII PHY register. */ | ||
2704 | data->val_out = __phy_read(gp, data->phy_id & 0x1f, | ||
2705 | data->reg_num & 0x1f); | ||
2706 | rc = 0; | ||
2707 | break; | ||
2708 | |||
2709 | case SIOCSMIIREG: /* Write MII PHY register. */ | ||
2710 | __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, | ||
2711 | data->val_in); | ||
2712 | rc = 0; | ||
2713 | break; | ||
2714 | } | ||
2715 | return rc; | ||
2716 | } | ||
2717 | |||
2718 | #if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC)) | ||
2719 | /* Fetch MAC address from vital product data of PCI ROM. */ | ||
2720 | static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) | ||
2721 | { | ||
2722 | int this_offset; | ||
2723 | |||
2724 | for (this_offset = 0x20; this_offset < len; this_offset++) { | ||
2725 | void __iomem *p = rom_base + this_offset; | ||
2726 | int i; | ||
2727 | |||
2728 | if (readb(p + 0) != 0x90 || | ||
2729 | readb(p + 1) != 0x00 || | ||
2730 | readb(p + 2) != 0x09 || | ||
2731 | readb(p + 3) != 0x4e || | ||
2732 | readb(p + 4) != 0x41 || | ||
2733 | readb(p + 5) != 0x06) | ||
2734 | continue; | ||
2735 | |||
2736 | this_offset += 6; | ||
2737 | p += 6; | ||
2738 | |||
2739 | for (i = 0; i < 6; i++) | ||
2740 | dev_addr[i] = readb(p + i); | ||
2741 | return 1; | ||
2742 | } | ||
2743 | return 0; | ||
2744 | } | ||
2745 | |||
2746 | static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) | ||
2747 | { | ||
2748 | size_t size; | ||
2749 | void __iomem *p = pci_map_rom(pdev, &size); | ||
2750 | |||
2751 | if (p) { | ||
2752 | int found; | ||
2753 | |||
2754 | found = readb(p) == 0x55 && | ||
2755 | readb(p + 1) == 0xaa && | ||
2756 | find_eth_addr_in_vpd(p, (64 * 1024), dev_addr); | ||
2757 | pci_unmap_rom(pdev, p); | ||
2758 | if (found) | ||
2759 | return; | ||
2760 | } | ||
2761 | |||
2762 | /* Sun MAC prefix then 3 random bytes. */ | ||
2763 | dev_addr[0] = 0x08; | ||
2764 | dev_addr[1] = 0x00; | ||
2765 | dev_addr[2] = 0x20; | ||
2766 | get_random_bytes(dev_addr + 3, 3); | ||
2767 | } | ||
2768 | #endif /* not Sparc and not PPC */ | ||
2769 | |||
2770 | static int __devinit gem_get_device_address(struct gem *gp) | ||
2771 | { | ||
2772 | #if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC) | ||
2773 | struct net_device *dev = gp->dev; | ||
2774 | const unsigned char *addr; | ||
2775 | |||
2776 | addr = of_get_property(gp->of_node, "local-mac-address", NULL); | ||
2777 | if (addr == NULL) { | ||
2778 | #ifdef CONFIG_SPARC | ||
2779 | addr = idprom->id_ethaddr; | ||
2780 | #else | ||
2781 | printk("\n"); | ||
2782 | pr_err("%s: can't get mac-address\n", dev->name); | ||
2783 | return -1; | ||
2784 | #endif | ||
2785 | } | ||
2786 | memcpy(dev->dev_addr, addr, 6); | ||
2787 | #else | ||
2788 | get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr); | ||
2789 | #endif | ||
2790 | return 0; | ||
2791 | } | ||
2792 | |||
2793 | static void gem_remove_one(struct pci_dev *pdev) | ||
2794 | { | ||
2795 | struct net_device *dev = pci_get_drvdata(pdev); | ||
2796 | |||
2797 | if (dev) { | ||
2798 | struct gem *gp = netdev_priv(dev); | ||
2799 | |||
2800 | unregister_netdev(dev); | ||
2801 | |||
2802 | /* Ensure reset task is truely gone */ | ||
2803 | cancel_work_sync(&gp->reset_task); | ||
2804 | |||
2805 | /* Free resources */ | ||
2806 | pci_free_consistent(pdev, | ||
2807 | sizeof(struct gem_init_block), | ||
2808 | gp->init_block, | ||
2809 | gp->gblock_dvma); | ||
2810 | iounmap(gp->regs); | ||
2811 | pci_release_regions(pdev); | ||
2812 | free_netdev(dev); | ||
2813 | |||
2814 | pci_set_drvdata(pdev, NULL); | ||
2815 | } | ||
2816 | } | ||
2817 | |||
2818 | static const struct net_device_ops gem_netdev_ops = { | ||
2819 | .ndo_open = gem_open, | ||
2820 | .ndo_stop = gem_close, | ||
2821 | .ndo_start_xmit = gem_start_xmit, | ||
2822 | .ndo_get_stats = gem_get_stats, | ||
2823 | .ndo_set_multicast_list = gem_set_multicast, | ||
2824 | .ndo_do_ioctl = gem_ioctl, | ||
2825 | .ndo_tx_timeout = gem_tx_timeout, | ||
2826 | .ndo_change_mtu = gem_change_mtu, | ||
2827 | .ndo_validate_addr = eth_validate_addr, | ||
2828 | .ndo_set_mac_address = gem_set_mac_address, | ||
2829 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2830 | .ndo_poll_controller = gem_poll_controller, | ||
2831 | #endif | ||
2832 | }; | ||
2833 | |||
2834 | static int __devinit gem_init_one(struct pci_dev *pdev, | ||
2835 | const struct pci_device_id *ent) | ||
2836 | { | ||
2837 | unsigned long gemreg_base, gemreg_len; | ||
2838 | struct net_device *dev; | ||
2839 | struct gem *gp; | ||
2840 | int err, pci_using_dac; | ||
2841 | |||
2842 | printk_once(KERN_INFO "%s", version); | ||
2843 | |||
2844 | /* Apple gmac note: during probe, the chip is powered up by | ||
2845 | * the arch code to allow the code below to work (and to let | ||
2846 | * the chip be probed on the config space. It won't stay powered | ||
2847 | * up until the interface is brought up however, so we can't rely | ||
2848 | * on register configuration done at this point. | ||
2849 | */ | ||
2850 | err = pci_enable_device(pdev); | ||
2851 | if (err) { | ||
2852 | pr_err("Cannot enable MMIO operation, aborting\n"); | ||
2853 | return err; | ||
2854 | } | ||
2855 | pci_set_master(pdev); | ||
2856 | |||
2857 | /* Configure DMA attributes. */ | ||
2858 | |||
2859 | /* All of the GEM documentation states that 64-bit DMA addressing | ||
2860 | * is fully supported and should work just fine. However the | ||
2861 | * front end for RIO based GEMs is different and only supports | ||
2862 | * 32-bit addressing. | ||
2863 | * | ||
2864 | * For now we assume the various PPC GEMs are 32-bit only as well. | ||
2865 | */ | ||
2866 | if (pdev->vendor == PCI_VENDOR_ID_SUN && | ||
2867 | pdev->device == PCI_DEVICE_ID_SUN_GEM && | ||
2868 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
2869 | pci_using_dac = 1; | ||
2870 | } else { | ||
2871 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
2872 | if (err) { | ||
2873 | pr_err("No usable DMA configuration, aborting\n"); | ||
2874 | goto err_disable_device; | ||
2875 | } | ||
2876 | pci_using_dac = 0; | ||
2877 | } | ||
2878 | |||
2879 | gemreg_base = pci_resource_start(pdev, 0); | ||
2880 | gemreg_len = pci_resource_len(pdev, 0); | ||
2881 | |||
2882 | if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { | ||
2883 | pr_err("Cannot find proper PCI device base address, aborting\n"); | ||
2884 | err = -ENODEV; | ||
2885 | goto err_disable_device; | ||
2886 | } | ||
2887 | |||
2888 | dev = alloc_etherdev(sizeof(*gp)); | ||
2889 | if (!dev) { | ||
2890 | pr_err("Etherdev alloc failed, aborting\n"); | ||
2891 | err = -ENOMEM; | ||
2892 | goto err_disable_device; | ||
2893 | } | ||
2894 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
2895 | |||
2896 | gp = netdev_priv(dev); | ||
2897 | |||
2898 | err = pci_request_regions(pdev, DRV_NAME); | ||
2899 | if (err) { | ||
2900 | pr_err("Cannot obtain PCI resources, aborting\n"); | ||
2901 | goto err_out_free_netdev; | ||
2902 | } | ||
2903 | |||
2904 | gp->pdev = pdev; | ||
2905 | dev->base_addr = (long) pdev; | ||
2906 | gp->dev = dev; | ||
2907 | |||
2908 | gp->msg_enable = DEFAULT_MSG; | ||
2909 | |||
2910 | init_timer(&gp->link_timer); | ||
2911 | gp->link_timer.function = gem_link_timer; | ||
2912 | gp->link_timer.data = (unsigned long) gp; | ||
2913 | |||
2914 | INIT_WORK(&gp->reset_task, gem_reset_task); | ||
2915 | |||
2916 | gp->lstate = link_down; | ||
2917 | gp->timer_ticks = 0; | ||
2918 | netif_carrier_off(dev); | ||
2919 | |||
2920 | gp->regs = ioremap(gemreg_base, gemreg_len); | ||
2921 | if (!gp->regs) { | ||
2922 | pr_err("Cannot map device registers, aborting\n"); | ||
2923 | err = -EIO; | ||
2924 | goto err_out_free_res; | ||
2925 | } | ||
2926 | |||
2927 | /* On Apple, we want a reference to the Open Firmware device-tree | ||
2928 | * node. We use it for clock control. | ||
2929 | */ | ||
2930 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) | ||
2931 | gp->of_node = pci_device_to_OF_node(pdev); | ||
2932 | #endif | ||
2933 | |||
2934 | /* Only Apple version supports WOL afaik */ | ||
2935 | if (pdev->vendor == PCI_VENDOR_ID_APPLE) | ||
2936 | gp->has_wol = 1; | ||
2937 | |||
2938 | /* Make sure cell is enabled */ | ||
2939 | gem_get_cell(gp); | ||
2940 | |||
2941 | /* Make sure everything is stopped and in init state */ | ||
2942 | gem_reset(gp); | ||
2943 | |||
2944 | /* Fill up the mii_phy structure (even if we won't use it) */ | ||
2945 | gp->phy_mii.dev = dev; | ||
2946 | gp->phy_mii.mdio_read = _phy_read; | ||
2947 | gp->phy_mii.mdio_write = _phy_write; | ||
2948 | #ifdef CONFIG_PPC_PMAC | ||
2949 | gp->phy_mii.platform_data = gp->of_node; | ||
2950 | #endif | ||
2951 | /* By default, we start with autoneg */ | ||
2952 | gp->want_autoneg = 1; | ||
2953 | |||
2954 | /* Check fifo sizes, PHY type, etc... */ | ||
2955 | if (gem_check_invariants(gp)) { | ||
2956 | err = -ENODEV; | ||
2957 | goto err_out_iounmap; | ||
2958 | } | ||
2959 | |||
2960 | /* It is guaranteed that the returned buffer will be at least | ||
2961 | * PAGE_SIZE aligned. | ||
2962 | */ | ||
2963 | gp->init_block = (struct gem_init_block *) | ||
2964 | pci_alloc_consistent(pdev, sizeof(struct gem_init_block), | ||
2965 | &gp->gblock_dvma); | ||
2966 | if (!gp->init_block) { | ||
2967 | pr_err("Cannot allocate init block, aborting\n"); | ||
2968 | err = -ENOMEM; | ||
2969 | goto err_out_iounmap; | ||
2970 | } | ||
2971 | |||
2972 | if (gem_get_device_address(gp)) | ||
2973 | goto err_out_free_consistent; | ||
2974 | |||
2975 | dev->netdev_ops = &gem_netdev_ops; | ||
2976 | netif_napi_add(dev, &gp->napi, gem_poll, 64); | ||
2977 | dev->ethtool_ops = &gem_ethtool_ops; | ||
2978 | dev->watchdog_timeo = 5 * HZ; | ||
2979 | dev->irq = pdev->irq; | ||
2980 | dev->dma = 0; | ||
2981 | |||
2982 | /* Set that now, in case PM kicks in now */ | ||
2983 | pci_set_drvdata(pdev, dev); | ||
2984 | |||
2985 | /* We can do scatter/gather and HW checksum */ | ||
2986 | dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; | ||
2987 | dev->features |= dev->hw_features | NETIF_F_RXCSUM; | ||
2988 | if (pci_using_dac) | ||
2989 | dev->features |= NETIF_F_HIGHDMA; | ||
2990 | |||
2991 | /* Register with kernel */ | ||
2992 | if (register_netdev(dev)) { | ||
2993 | pr_err("Cannot register net device, aborting\n"); | ||
2994 | err = -ENOMEM; | ||
2995 | goto err_out_free_consistent; | ||
2996 | } | ||
2997 | |||
2998 | /* Undo the get_cell with appropriate locking (we could use | ||
2999 | * ndo_init/uninit but that would be even more clumsy imho) | ||
3000 | */ | ||
3001 | rtnl_lock(); | ||
3002 | gem_put_cell(gp); | ||
3003 | rtnl_unlock(); | ||
3004 | |||
3005 | netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n", | ||
3006 | dev->dev_addr); | ||
3007 | return 0; | ||
3008 | |||
3009 | err_out_free_consistent: | ||
3010 | gem_remove_one(pdev); | ||
3011 | err_out_iounmap: | ||
3012 | gem_put_cell(gp); | ||
3013 | iounmap(gp->regs); | ||
3014 | |||
3015 | err_out_free_res: | ||
3016 | pci_release_regions(pdev); | ||
3017 | |||
3018 | err_out_free_netdev: | ||
3019 | free_netdev(dev); | ||
3020 | err_disable_device: | ||
3021 | pci_disable_device(pdev); | ||
3022 | return err; | ||
3023 | |||
3024 | } | ||
3025 | |||
3026 | |||
3027 | static struct pci_driver gem_driver = { | ||
3028 | .name = GEM_MODULE_NAME, | ||
3029 | .id_table = gem_pci_tbl, | ||
3030 | .probe = gem_init_one, | ||
3031 | .remove = gem_remove_one, | ||
3032 | #ifdef CONFIG_PM | ||
3033 | .suspend = gem_suspend, | ||
3034 | .resume = gem_resume, | ||
3035 | #endif /* CONFIG_PM */ | ||
3036 | }; | ||
3037 | |||
3038 | static int __init gem_init(void) | ||
3039 | { | ||
3040 | return pci_register_driver(&gem_driver); | ||
3041 | } | ||
3042 | |||
3043 | static void __exit gem_cleanup(void) | ||
3044 | { | ||
3045 | pci_unregister_driver(&gem_driver); | ||
3046 | } | ||
3047 | |||
3048 | module_init(gem_init); | ||
3049 | module_exit(gem_cleanup); | ||
diff --git a/drivers/net/ethernet/sun/sungem.h b/drivers/net/ethernet/sun/sungem.h new file mode 100644 index 000000000000..835ce1b3cb9f --- /dev/null +++ b/drivers/net/ethernet/sun/sungem.h | |||
@@ -0,0 +1,1027 @@ | |||
1 | /* $Id: sungem.h,v 1.10.2.4 2002/03/11 08:54:48 davem Exp $ | ||
2 | * sungem.h: Definitions for Sun GEM ethernet driver. | ||
3 | * | ||
4 | * Copyright (C) 2000 David S. Miller (davem@redhat.com) | ||
5 | */ | ||
6 | |||
7 | #ifndef _SUNGEM_H | ||
8 | #define _SUNGEM_H | ||
9 | |||
10 | /* Global Registers */ | ||
11 | #define GREG_SEBSTATE 0x0000UL /* SEB State Register */ | ||
12 | #define GREG_CFG 0x0004UL /* Configuration Register */ | ||
13 | #define GREG_STAT 0x000CUL /* Status Register */ | ||
14 | #define GREG_IMASK 0x0010UL /* Interrupt Mask Register */ | ||
15 | #define GREG_IACK 0x0014UL /* Interrupt ACK Register */ | ||
16 | #define GREG_STAT2 0x001CUL /* Alias of GREG_STAT */ | ||
17 | #define GREG_PCIESTAT 0x1000UL /* PCI Error Status Register */ | ||
18 | #define GREG_PCIEMASK 0x1004UL /* PCI Error Mask Register */ | ||
19 | #define GREG_BIFCFG 0x1008UL /* BIF Configuration Register */ | ||
20 | #define GREG_BIFDIAG 0x100CUL /* BIF Diagnostics Register */ | ||
21 | #define GREG_SWRST 0x1010UL /* Software Reset Register */ | ||
22 | |||
23 | /* Global SEB State Register */ | ||
24 | #define GREG_SEBSTATE_ARB 0x00000003 /* State of Arbiter */ | ||
25 | #define GREG_SEBSTATE_RXWON 0x00000004 /* RX won internal arbitration */ | ||
26 | |||
27 | /* Global Configuration Register */ | ||
28 | #define GREG_CFG_IBURST 0x00000001 /* Infinite Burst */ | ||
29 | #define GREG_CFG_TXDMALIM 0x0000003e /* TX DMA grant limit */ | ||
30 | #define GREG_CFG_RXDMALIM 0x000007c0 /* RX DMA grant limit */ | ||
31 | #define GREG_CFG_RONPAULBIT 0x00000800 /* Use mem read multiple for PCI read | ||
32 | * after infinite burst (Apple) */ | ||
33 | #define GREG_CFG_ENBUG2FIX 0x00001000 /* Fix Rx hang after overflow */ | ||
34 | |||
35 | /* Global Interrupt Status Register. | ||
36 | * | ||
37 | * Reading this register automatically clears bits 0 through 6. | ||
38 | * This auto-clearing does not occur when the alias at GREG_STAT2 | ||
39 | * is read instead. The rest of the interrupt bits only clear when | ||
40 | * the secondary interrupt status register corresponding to that | ||
41 | * bit is read (ie. if GREG_STAT_PCS is set, it will be cleared by | ||
42 | * reading PCS_ISTAT). | ||
43 | */ | ||
44 | #define GREG_STAT_TXINTME 0x00000001 /* TX INTME frame transferred */ | ||
45 | #define GREG_STAT_TXALL 0x00000002 /* All TX frames transferred */ | ||
46 | #define GREG_STAT_TXDONE 0x00000004 /* One TX frame transferred */ | ||
47 | #define GREG_STAT_RXDONE 0x00000010 /* One RX frame arrived */ | ||
48 | #define GREG_STAT_RXNOBUF 0x00000020 /* No free RX buffers available */ | ||
49 | #define GREG_STAT_RXTAGERR 0x00000040 /* RX tag framing is corrupt */ | ||
50 | #define GREG_STAT_PCS 0x00002000 /* PCS signalled interrupt */ | ||
51 | #define GREG_STAT_TXMAC 0x00004000 /* TX MAC signalled interrupt */ | ||
52 | #define GREG_STAT_RXMAC 0x00008000 /* RX MAC signalled interrupt */ | ||
53 | #define GREG_STAT_MAC 0x00010000 /* MAC Control signalled irq */ | ||
54 | #define GREG_STAT_MIF 0x00020000 /* MIF signalled interrupt */ | ||
55 | #define GREG_STAT_PCIERR 0x00040000 /* PCI Error interrupt */ | ||
56 | #define GREG_STAT_TXNR 0xfff80000 /* == TXDMA_TXDONE reg val */ | ||
57 | #define GREG_STAT_TXNR_SHIFT 19 | ||
58 | |||
59 | #define GREG_STAT_ABNORMAL (GREG_STAT_RXNOBUF | GREG_STAT_RXTAGERR | \ | ||
60 | GREG_STAT_PCS | GREG_STAT_TXMAC | GREG_STAT_RXMAC | \ | ||
61 | GREG_STAT_MAC | GREG_STAT_MIF | GREG_STAT_PCIERR) | ||
62 | |||
63 | #define GREG_STAT_NAPI (GREG_STAT_TXALL | GREG_STAT_TXINTME | \ | ||
64 | GREG_STAT_RXDONE | GREG_STAT_ABNORMAL) | ||
65 | |||
66 | /* The layout of GREG_IMASK and GREG_IACK is identical to GREG_STAT. | ||
67 | * Bits set in GREG_IMASK will prevent that interrupt type from being | ||
68 | * signalled to the cpu. GREG_IACK can be used to clear specific top-level | ||
69 | * interrupt conditions in GREG_STAT, ie. it only works for bits 0 through 6. | ||
70 | * Setting the bit will clear that interrupt, clear bits will have no effect | ||
71 | * on GREG_STAT. | ||
72 | */ | ||
73 | |||
74 | /* Global PCI Error Status Register */ | ||
75 | #define GREG_PCIESTAT_BADACK 0x00000001 /* No ACK64# during ABS64 cycle */ | ||
76 | #define GREG_PCIESTAT_DTRTO 0x00000002 /* Delayed transaction timeout */ | ||
77 | #define GREG_PCIESTAT_OTHER 0x00000004 /* Other PCI error, check cfg space */ | ||
78 | |||
79 | /* The layout of the GREG_PCIEMASK is identical to that of GREG_PCIESTAT. | ||
80 | * Bits set in GREG_PCIEMASK will prevent that interrupt type from being | ||
81 | * signalled to the cpu. | ||
82 | */ | ||
83 | |||
84 | /* Global BIF Configuration Register */ | ||
85 | #define GREG_BIFCFG_SLOWCLK 0x00000001 /* Set if PCI runs < 25Mhz */ | ||
86 | #define GREG_BIFCFG_B64DIS 0x00000002 /* Disable 64bit wide data cycle*/ | ||
87 | #define GREG_BIFCFG_M66EN 0x00000004 /* Set if on 66Mhz PCI segment */ | ||
88 | |||
89 | /* Global BIF Diagnostics Register */ | ||
90 | #define GREG_BIFDIAG_BURSTSM 0x007f0000 /* PCI Burst state machine */ | ||
91 | #define GREG_BIFDIAG_BIFSM 0xff000000 /* BIF state machine */ | ||
92 | |||
93 | /* Global Software Reset Register. | ||
94 | * | ||
95 | * This register is used to perform a global reset of the RX and TX portions | ||
96 | * of the GEM asic. Setting the RX or TX reset bit will start the reset. | ||
97 | * The driver _MUST_ poll these bits until they clear. One may not attempt | ||
98 | * to program any other part of GEM until the bits clear. | ||
99 | */ | ||
100 | #define GREG_SWRST_TXRST 0x00000001 /* TX Software Reset */ | ||
101 | #define GREG_SWRST_RXRST 0x00000002 /* RX Software Reset */ | ||
102 | #define GREG_SWRST_RSTOUT 0x00000004 /* Force RST# pin active */ | ||
103 | #define GREG_SWRST_CACHESIZE 0x00ff0000 /* RIO only: cache line size */ | ||
104 | #define GREG_SWRST_CACHE_SHIFT 16 | ||
105 | |||
106 | /* TX DMA Registers */ | ||
107 | #define TXDMA_KICK 0x2000UL /* TX Kick Register */ | ||
108 | #define TXDMA_CFG 0x2004UL /* TX Configuration Register */ | ||
109 | #define TXDMA_DBLOW 0x2008UL /* TX Desc. Base Low */ | ||
110 | #define TXDMA_DBHI 0x200CUL /* TX Desc. Base High */ | ||
111 | #define TXDMA_FWPTR 0x2014UL /* TX FIFO Write Pointer */ | ||
112 | #define TXDMA_FSWPTR 0x2018UL /* TX FIFO Shadow Write Pointer */ | ||
113 | #define TXDMA_FRPTR 0x201CUL /* TX FIFO Read Pointer */ | ||
114 | #define TXDMA_FSRPTR 0x2020UL /* TX FIFO Shadow Read Pointer */ | ||
115 | #define TXDMA_PCNT 0x2024UL /* TX FIFO Packet Counter */ | ||
116 | #define TXDMA_SMACHINE 0x2028UL /* TX State Machine Register */ | ||
117 | #define TXDMA_DPLOW 0x2030UL /* TX Data Pointer Low */ | ||
118 | #define TXDMA_DPHI 0x2034UL /* TX Data Pointer High */ | ||
119 | #define TXDMA_TXDONE 0x2100UL /* TX Completion Register */ | ||
120 | #define TXDMA_FADDR 0x2104UL /* TX FIFO Address */ | ||
121 | #define TXDMA_FTAG 0x2108UL /* TX FIFO Tag */ | ||
122 | #define TXDMA_DLOW 0x210CUL /* TX FIFO Data Low */ | ||
123 | #define TXDMA_DHIT1 0x2110UL /* TX FIFO Data HighT1 */ | ||
124 | #define TXDMA_DHIT0 0x2114UL /* TX FIFO Data HighT0 */ | ||
125 | #define TXDMA_FSZ 0x2118UL /* TX FIFO Size */ | ||
126 | |||
127 | /* TX Kick Register. | ||
128 | * | ||
129 | * This 13-bit register is programmed by the driver to hold the descriptor | ||
130 | * entry index which follows the last valid transmit descriptor. | ||
131 | */ | ||
132 | |||
133 | /* TX Completion Register. | ||
134 | * | ||
135 | * This 13-bit register is updated by GEM to hold to descriptor entry index | ||
136 | * which follows the last descriptor already processed by GEM. Note that | ||
137 | * this value is mirrored in GREG_STAT which eliminates the need to even | ||
138 | * access this register in the driver during interrupt processing. | ||
139 | */ | ||
140 | |||
141 | /* TX Configuration Register. | ||
142 | * | ||
143 | * Note that TXDMA_CFG_FTHRESH, the TX FIFO Threshold, is an obsolete feature | ||
144 | * that was meant to be used with jumbo packets. It should be set to the | ||
145 | * maximum value of 0x4ff, else one risks getting TX MAC Underrun errors. | ||
146 | */ | ||
147 | #define TXDMA_CFG_ENABLE 0x00000001 /* Enable TX DMA channel */ | ||
148 | #define TXDMA_CFG_RINGSZ 0x0000001e /* TX descriptor ring size */ | ||
149 | #define TXDMA_CFG_RINGSZ_32 0x00000000 /* 32 TX descriptors */ | ||
150 | #define TXDMA_CFG_RINGSZ_64 0x00000002 /* 64 TX descriptors */ | ||
151 | #define TXDMA_CFG_RINGSZ_128 0x00000004 /* 128 TX descriptors */ | ||
152 | #define TXDMA_CFG_RINGSZ_256 0x00000006 /* 256 TX descriptors */ | ||
153 | #define TXDMA_CFG_RINGSZ_512 0x00000008 /* 512 TX descriptors */ | ||
154 | #define TXDMA_CFG_RINGSZ_1K 0x0000000a /* 1024 TX descriptors */ | ||
155 | #define TXDMA_CFG_RINGSZ_2K 0x0000000c /* 2048 TX descriptors */ | ||
156 | #define TXDMA_CFG_RINGSZ_4K 0x0000000e /* 4096 TX descriptors */ | ||
157 | #define TXDMA_CFG_RINGSZ_8K 0x00000010 /* 8192 TX descriptors */ | ||
158 | #define TXDMA_CFG_PIOSEL 0x00000020 /* Enable TX FIFO PIO from cpu */ | ||
159 | #define TXDMA_CFG_FTHRESH 0x001ffc00 /* TX FIFO Threshold, obsolete */ | ||
160 | #define TXDMA_CFG_PMODE 0x00200000 /* TXALL irq means TX FIFO empty*/ | ||
161 | |||
162 | /* TX Descriptor Base Low/High. | ||
163 | * | ||
164 | * These two registers store the 53 most significant bits of the base address | ||
165 | * of the TX descriptor table. The 11 least significant bits are always | ||
166 | * zero. As a result, the TX descriptor table must be 2K aligned. | ||
167 | */ | ||
168 | |||
169 | /* The rest of the TXDMA_* registers are for diagnostics and debug, I will document | ||
170 | * them later. -DaveM | ||
171 | */ | ||
172 | |||
173 | /* WakeOnLan Registers */ | ||
174 | #define WOL_MATCH0 0x3000UL | ||
175 | #define WOL_MATCH1 0x3004UL | ||
176 | #define WOL_MATCH2 0x3008UL | ||
177 | #define WOL_MCOUNT 0x300CUL | ||
178 | #define WOL_WAKECSR 0x3010UL | ||
179 | |||
180 | /* WOL Match count register | ||
181 | */ | ||
182 | #define WOL_MCOUNT_N 0x00000010 | ||
183 | #define WOL_MCOUNT_M 0x00000000 /* 0 << 8 */ | ||
184 | |||
185 | #define WOL_WAKECSR_ENABLE 0x00000001 | ||
186 | #define WOL_WAKECSR_MII 0x00000002 | ||
187 | #define WOL_WAKECSR_SEEN 0x00000004 | ||
188 | #define WOL_WAKECSR_FILT_UCAST 0x00000008 | ||
189 | #define WOL_WAKECSR_FILT_MCAST 0x00000010 | ||
190 | #define WOL_WAKECSR_FILT_BCAST 0x00000020 | ||
191 | #define WOL_WAKECSR_FILT_SEEN 0x00000040 | ||
192 | |||
193 | |||
194 | /* Receive DMA Registers */ | ||
195 | #define RXDMA_CFG 0x4000UL /* RX Configuration Register */ | ||
196 | #define RXDMA_DBLOW 0x4004UL /* RX Descriptor Base Low */ | ||
197 | #define RXDMA_DBHI 0x4008UL /* RX Descriptor Base High */ | ||
198 | #define RXDMA_FWPTR 0x400CUL /* RX FIFO Write Pointer */ | ||
199 | #define RXDMA_FSWPTR 0x4010UL /* RX FIFO Shadow Write Pointer */ | ||
200 | #define RXDMA_FRPTR 0x4014UL /* RX FIFO Read Pointer */ | ||
201 | #define RXDMA_PCNT 0x4018UL /* RX FIFO Packet Counter */ | ||
202 | #define RXDMA_SMACHINE 0x401CUL /* RX State Machine Register */ | ||
203 | #define RXDMA_PTHRESH 0x4020UL /* Pause Thresholds */ | ||
204 | #define RXDMA_DPLOW 0x4024UL /* RX Data Pointer Low */ | ||
205 | #define RXDMA_DPHI 0x4028UL /* RX Data Pointer High */ | ||
206 | #define RXDMA_KICK 0x4100UL /* RX Kick Register */ | ||
207 | #define RXDMA_DONE 0x4104UL /* RX Completion Register */ | ||
208 | #define RXDMA_BLANK 0x4108UL /* RX Blanking Register */ | ||
209 | #define RXDMA_FADDR 0x410CUL /* RX FIFO Address */ | ||
210 | #define RXDMA_FTAG 0x4110UL /* RX FIFO Tag */ | ||
211 | #define RXDMA_DLOW 0x4114UL /* RX FIFO Data Low */ | ||
212 | #define RXDMA_DHIT1 0x4118UL /* RX FIFO Data HighT0 */ | ||
213 | #define RXDMA_DHIT0 0x411CUL /* RX FIFO Data HighT1 */ | ||
214 | #define RXDMA_FSZ 0x4120UL /* RX FIFO Size */ | ||
215 | |||
216 | /* RX Configuration Register. */ | ||
217 | #define RXDMA_CFG_ENABLE 0x00000001 /* Enable RX DMA channel */ | ||
218 | #define RXDMA_CFG_RINGSZ 0x0000001e /* RX descriptor ring size */ | ||
219 | #define RXDMA_CFG_RINGSZ_32 0x00000000 /* - 32 entries */ | ||
220 | #define RXDMA_CFG_RINGSZ_64 0x00000002 /* - 64 entries */ | ||
221 | #define RXDMA_CFG_RINGSZ_128 0x00000004 /* - 128 entries */ | ||
222 | #define RXDMA_CFG_RINGSZ_256 0x00000006 /* - 256 entries */ | ||
223 | #define RXDMA_CFG_RINGSZ_512 0x00000008 /* - 512 entries */ | ||
224 | #define RXDMA_CFG_RINGSZ_1K 0x0000000a /* - 1024 entries */ | ||
225 | #define RXDMA_CFG_RINGSZ_2K 0x0000000c /* - 2048 entries */ | ||
226 | #define RXDMA_CFG_RINGSZ_4K 0x0000000e /* - 4096 entries */ | ||
227 | #define RXDMA_CFG_RINGSZ_8K 0x00000010 /* - 8192 entries */ | ||
228 | #define RXDMA_CFG_RINGSZ_BDISAB 0x00000020 /* Disable RX desc batching */ | ||
229 | #define RXDMA_CFG_FBOFF 0x00001c00 /* Offset of first data byte */ | ||
230 | #define RXDMA_CFG_CSUMOFF 0x000fe000 /* Skip bytes before csum calc */ | ||
231 | #define RXDMA_CFG_FTHRESH 0x07000000 /* RX FIFO dma start threshold */ | ||
232 | #define RXDMA_CFG_FTHRESH_64 0x00000000 /* - 64 bytes */ | ||
233 | #define RXDMA_CFG_FTHRESH_128 0x01000000 /* - 128 bytes */ | ||
234 | #define RXDMA_CFG_FTHRESH_256 0x02000000 /* - 256 bytes */ | ||
235 | #define RXDMA_CFG_FTHRESH_512 0x03000000 /* - 512 bytes */ | ||
236 | #define RXDMA_CFG_FTHRESH_1K 0x04000000 /* - 1024 bytes */ | ||
237 | #define RXDMA_CFG_FTHRESH_2K 0x05000000 /* - 2048 bytes */ | ||
238 | |||
239 | /* RX Descriptor Base Low/High. | ||
240 | * | ||
241 | * These two registers store the 53 most significant bits of the base address | ||
242 | * of the RX descriptor table. The 11 least significant bits are always | ||
243 | * zero. As a result, the RX descriptor table must be 2K aligned. | ||
244 | */ | ||
245 | |||
246 | /* RX PAUSE Thresholds. | ||
247 | * | ||
248 | * These values determine when XOFF and XON PAUSE frames are emitted by | ||
249 | * GEM. The thresholds measure RX FIFO occupancy in units of 64 bytes. | ||
250 | */ | ||
251 | #define RXDMA_PTHRESH_OFF 0x000001ff /* XOFF emitted w/FIFO > this */ | ||
252 | #define RXDMA_PTHRESH_ON 0x001ff000 /* XON emitted w/FIFO < this */ | ||
253 | |||
254 | /* RX Kick Register. | ||
255 | * | ||
256 | * This 13-bit register is written by the host CPU and holds the last | ||
257 | * valid RX descriptor number plus one. This is, if 'N' is written to | ||
258 | * this register, it means that all RX descriptors up to but excluding | ||
259 | * 'N' are valid. | ||
260 | * | ||
261 | * The hardware requires that RX descriptors are posted in increments | ||
262 | * of 4. This means 'N' must be a multiple of four. For the best | ||
263 | * performance, the first new descriptor being posted should be (PCI) | ||
264 | * cache line aligned. | ||
265 | */ | ||
266 | |||
267 | /* RX Completion Register. | ||
268 | * | ||
269 | * This 13-bit register is updated by GEM to indicate which RX descriptors | ||
270 | * have already been used for receive frames. All descriptors up to but | ||
271 | * excluding the value in this register are ready to be processed. GEM | ||
272 | * updates this register value after the RX FIFO empties completely into | ||
273 | * the RX descriptor's buffer, but before the RX_DONE bit is set in the | ||
274 | * interrupt status register. | ||
275 | */ | ||
276 | |||
277 | /* RX Blanking Register. */ | ||
278 | #define RXDMA_BLANK_IPKTS 0x000001ff /* RX_DONE asserted after this | ||
279 | * many packets received since | ||
280 | * previous RX_DONE. | ||
281 | */ | ||
282 | #define RXDMA_BLANK_ITIME 0x000ff000 /* RX_DONE asserted after this | ||
283 | * many clocks (measured in 2048 | ||
284 | * PCI clocks) were counted since | ||
285 | * the previous RX_DONE. | ||
286 | */ | ||
287 | |||
288 | /* RX FIFO Size. | ||
289 | * | ||
290 | * This 11-bit read-only register indicates how large, in units of 64-bytes, | ||
291 | * the RX FIFO is. The driver uses this to properly configure the RX PAUSE | ||
292 | * thresholds. | ||
293 | */ | ||
294 | |||
295 | /* The rest of the RXDMA_* registers are for diagnostics and debug, I will document | ||
296 | * them later. -DaveM | ||
297 | */ | ||
298 | |||
299 | /* MAC Registers */ | ||
300 | #define MAC_TXRST 0x6000UL /* TX MAC Software Reset Command*/ | ||
301 | #define MAC_RXRST 0x6004UL /* RX MAC Software Reset Command*/ | ||
302 | #define MAC_SNDPAUSE 0x6008UL /* Send Pause Command Register */ | ||
303 | #define MAC_TXSTAT 0x6010UL /* TX MAC Status Register */ | ||
304 | #define MAC_RXSTAT 0x6014UL /* RX MAC Status Register */ | ||
305 | #define MAC_CSTAT 0x6018UL /* MAC Control Status Register */ | ||
306 | #define MAC_TXMASK 0x6020UL /* TX MAC Mask Register */ | ||
307 | #define MAC_RXMASK 0x6024UL /* RX MAC Mask Register */ | ||
308 | #define MAC_MCMASK 0x6028UL /* MAC Control Mask Register */ | ||
309 | #define MAC_TXCFG 0x6030UL /* TX MAC Configuration Register*/ | ||
310 | #define MAC_RXCFG 0x6034UL /* RX MAC Configuration Register*/ | ||
311 | #define MAC_MCCFG 0x6038UL /* MAC Control Config Register */ | ||
312 | #define MAC_XIFCFG 0x603CUL /* XIF Configuration Register */ | ||
313 | #define MAC_IPG0 0x6040UL /* InterPacketGap0 Register */ | ||
314 | #define MAC_IPG1 0x6044UL /* InterPacketGap1 Register */ | ||
315 | #define MAC_IPG2 0x6048UL /* InterPacketGap2 Register */ | ||
316 | #define MAC_STIME 0x604CUL /* SlotTime Register */ | ||
317 | #define MAC_MINFSZ 0x6050UL /* MinFrameSize Register */ | ||
318 | #define MAC_MAXFSZ 0x6054UL /* MaxFrameSize Register */ | ||
319 | #define MAC_PASIZE 0x6058UL /* PA Size Register */ | ||
320 | #define MAC_JAMSIZE 0x605CUL /* JamSize Register */ | ||
321 | #define MAC_ATTLIM 0x6060UL /* Attempt Limit Register */ | ||
322 | #define MAC_MCTYPE 0x6064UL /* MAC Control Type Register */ | ||
323 | #define MAC_ADDR0 0x6080UL /* MAC Address 0 Register */ | ||
324 | #define MAC_ADDR1 0x6084UL /* MAC Address 1 Register */ | ||
325 | #define MAC_ADDR2 0x6088UL /* MAC Address 2 Register */ | ||
326 | #define MAC_ADDR3 0x608CUL /* MAC Address 3 Register */ | ||
327 | #define MAC_ADDR4 0x6090UL /* MAC Address 4 Register */ | ||
328 | #define MAC_ADDR5 0x6094UL /* MAC Address 5 Register */ | ||
329 | #define MAC_ADDR6 0x6098UL /* MAC Address 6 Register */ | ||
330 | #define MAC_ADDR7 0x609CUL /* MAC Address 7 Register */ | ||
331 | #define MAC_ADDR8 0x60A0UL /* MAC Address 8 Register */ | ||
332 | #define MAC_AFILT0 0x60A4UL /* Address Filter 0 Register */ | ||
333 | #define MAC_AFILT1 0x60A8UL /* Address Filter 1 Register */ | ||
334 | #define MAC_AFILT2 0x60ACUL /* Address Filter 2 Register */ | ||
335 | #define MAC_AF21MSK 0x60B0UL /* Address Filter 2&1 Mask Reg */ | ||
336 | #define MAC_AF0MSK 0x60B4UL /* Address Filter 0 Mask Reg */ | ||
337 | #define MAC_HASH0 0x60C0UL /* Hash Table 0 Register */ | ||
338 | #define MAC_HASH1 0x60C4UL /* Hash Table 1 Register */ | ||
339 | #define MAC_HASH2 0x60C8UL /* Hash Table 2 Register */ | ||
340 | #define MAC_HASH3 0x60CCUL /* Hash Table 3 Register */ | ||
341 | #define MAC_HASH4 0x60D0UL /* Hash Table 4 Register */ | ||
342 | #define MAC_HASH5 0x60D4UL /* Hash Table 5 Register */ | ||
343 | #define MAC_HASH6 0x60D8UL /* Hash Table 6 Register */ | ||
344 | #define MAC_HASH7 0x60DCUL /* Hash Table 7 Register */ | ||
345 | #define MAC_HASH8 0x60E0UL /* Hash Table 8 Register */ | ||
346 | #define MAC_HASH9 0x60E4UL /* Hash Table 9 Register */ | ||
347 | #define MAC_HASH10 0x60E8UL /* Hash Table 10 Register */ | ||
348 | #define MAC_HASH11 0x60ECUL /* Hash Table 11 Register */ | ||
349 | #define MAC_HASH12 0x60F0UL /* Hash Table 12 Register */ | ||
350 | #define MAC_HASH13 0x60F4UL /* Hash Table 13 Register */ | ||
351 | #define MAC_HASH14 0x60F8UL /* Hash Table 14 Register */ | ||
352 | #define MAC_HASH15 0x60FCUL /* Hash Table 15 Register */ | ||
353 | #define MAC_NCOLL 0x6100UL /* Normal Collision Counter */ | ||
354 | #define MAC_FASUCC 0x6104UL /* First Attmpt. Succ Coll Ctr. */ | ||
355 | #define MAC_ECOLL 0x6108UL /* Excessive Collision Counter */ | ||
356 | #define MAC_LCOLL 0x610CUL /* Late Collision Counter */ | ||
357 | #define MAC_DTIMER 0x6110UL /* Defer Timer */ | ||
358 | #define MAC_PATMPS 0x6114UL /* Peak Attempts Register */ | ||
359 | #define MAC_RFCTR 0x6118UL /* Receive Frame Counter */ | ||
360 | #define MAC_LERR 0x611CUL /* Length Error Counter */ | ||
361 | #define MAC_AERR 0x6120UL /* Alignment Error Counter */ | ||
362 | #define MAC_FCSERR 0x6124UL /* FCS Error Counter */ | ||
363 | #define MAC_RXCVERR 0x6128UL /* RX code Violation Error Ctr */ | ||
364 | #define MAC_RANDSEED 0x6130UL /* Random Number Seed Register */ | ||
365 | #define MAC_SMACHINE 0x6134UL /* State Machine Register */ | ||
366 | |||
367 | /* TX MAC Software Reset Command. */ | ||
368 | #define MAC_TXRST_CMD 0x00000001 /* Start sw reset, self-clears */ | ||
369 | |||
370 | /* RX MAC Software Reset Command. */ | ||
371 | #define MAC_RXRST_CMD 0x00000001 /* Start sw reset, self-clears */ | ||
372 | |||
373 | /* Send Pause Command. */ | ||
374 | #define MAC_SNDPAUSE_TS 0x0000ffff /* The pause_time operand used in | ||
375 | * Send_Pause and flow-control | ||
376 | * handshakes. | ||
377 | */ | ||
378 | #define MAC_SNDPAUSE_SP 0x00010000 /* Setting this bit instructs the MAC | ||
379 | * to send a Pause Flow Control | ||
380 | * frame onto the network. | ||
381 | */ | ||
382 | |||
383 | /* TX MAC Status Register. */ | ||
384 | #define MAC_TXSTAT_XMIT 0x00000001 /* Frame Transmitted */ | ||
385 | #define MAC_TXSTAT_URUN 0x00000002 /* TX Underrun */ | ||
386 | #define MAC_TXSTAT_MPE 0x00000004 /* Max Packet Size Error */ | ||
387 | #define MAC_TXSTAT_NCE 0x00000008 /* Normal Collision Cntr Expire */ | ||
388 | #define MAC_TXSTAT_ECE 0x00000010 /* Excess Collision Cntr Expire */ | ||
389 | #define MAC_TXSTAT_LCE 0x00000020 /* Late Collision Cntr Expire */ | ||
390 | #define MAC_TXSTAT_FCE 0x00000040 /* First Collision Cntr Expire */ | ||
391 | #define MAC_TXSTAT_DTE 0x00000080 /* Defer Timer Expire */ | ||
392 | #define MAC_TXSTAT_PCE 0x00000100 /* Peak Attempts Cntr Expire */ | ||
393 | |||
394 | /* RX MAC Status Register. */ | ||
395 | #define MAC_RXSTAT_RCV 0x00000001 /* Frame Received */ | ||
396 | #define MAC_RXSTAT_OFLW 0x00000002 /* Receive Overflow */ | ||
397 | #define MAC_RXSTAT_FCE 0x00000004 /* Frame Cntr Expire */ | ||
398 | #define MAC_RXSTAT_ACE 0x00000008 /* Align Error Cntr Expire */ | ||
399 | #define MAC_RXSTAT_CCE 0x00000010 /* CRC Error Cntr Expire */ | ||
400 | #define MAC_RXSTAT_LCE 0x00000020 /* Length Error Cntr Expire */ | ||
401 | #define MAC_RXSTAT_VCE 0x00000040 /* Code Violation Cntr Expire */ | ||
402 | |||
403 | /* MAC Control Status Register. */ | ||
404 | #define MAC_CSTAT_PRCV 0x00000001 /* Pause Received */ | ||
405 | #define MAC_CSTAT_PS 0x00000002 /* Paused State */ | ||
406 | #define MAC_CSTAT_NPS 0x00000004 /* Not Paused State */ | ||
407 | #define MAC_CSTAT_PTR 0xffff0000 /* Pause Time Received */ | ||
408 | |||
409 | /* The layout of the MAC_{TX,RX,C}MASK registers is identical to that | ||
410 | * of MAC_{TX,RX,C}STAT. Bits set in MAC_{TX,RX,C}MASK will prevent | ||
411 | * that interrupt type from being signalled to front end of GEM. For | ||
412 | * the interrupt to actually get sent to the cpu, it is necessary to | ||
413 | * properly set the appropriate GREG_IMASK_{TX,RX,}MAC bits as well. | ||
414 | */ | ||
415 | |||
416 | /* TX MAC Configuration Register. | ||
417 | * | ||
418 | * NOTE: The TX MAC Enable bit must be cleared and polled until | ||
419 | * zero before any other bits in this register are changed. | ||
420 | * | ||
421 | * Also, enabling the Carrier Extension feature of GEM is | ||
422 | * a 3 step process 1) Set TX Carrier Extension 2) Set | ||
423 | * RX Carrier Extension 3) Set Slot Time to 0x200. This | ||
424 | * mode must be enabled when in half-duplex at 1Gbps, else | ||
425 | * it must be disabled. | ||
426 | */ | ||
427 | #define MAC_TXCFG_ENAB 0x00000001 /* TX MAC Enable */ | ||
428 | #define MAC_TXCFG_ICS 0x00000002 /* Ignore Carrier Sense */ | ||
429 | #define MAC_TXCFG_ICOLL 0x00000004 /* Ignore Collisions */ | ||
430 | #define MAC_TXCFG_EIPG0 0x00000008 /* Enable IPG0 */ | ||
431 | #define MAC_TXCFG_NGU 0x00000010 /* Never Give Up */ | ||
432 | #define MAC_TXCFG_NGUL 0x00000020 /* Never Give Up Limit */ | ||
433 | #define MAC_TXCFG_NBO 0x00000040 /* No Backoff */ | ||
434 | #define MAC_TXCFG_SD 0x00000080 /* Slow Down */ | ||
435 | #define MAC_TXCFG_NFCS 0x00000100 /* No FCS */ | ||
436 | #define MAC_TXCFG_TCE 0x00000200 /* TX Carrier Extension */ | ||
437 | |||
438 | /* RX MAC Configuration Register. | ||
439 | * | ||
440 | * NOTE: The RX MAC Enable bit must be cleared and polled until | ||
441 | * zero before any other bits in this register are changed. | ||
442 | * | ||
443 | * Similar rules apply to the Hash Filter Enable bit when | ||
444 | * programming the hash table registers, and the Address Filter | ||
445 | * Enable bit when programming the address filter registers. | ||
446 | */ | ||
447 | #define MAC_RXCFG_ENAB 0x00000001 /* RX MAC Enable */ | ||
448 | #define MAC_RXCFG_SPAD 0x00000002 /* Strip Pad */ | ||
449 | #define MAC_RXCFG_SFCS 0x00000004 /* Strip FCS */ | ||
450 | #define MAC_RXCFG_PROM 0x00000008 /* Promiscuous Mode */ | ||
451 | #define MAC_RXCFG_PGRP 0x00000010 /* Promiscuous Group */ | ||
452 | #define MAC_RXCFG_HFE 0x00000020 /* Hash Filter Enable */ | ||
453 | #define MAC_RXCFG_AFE 0x00000040 /* Address Filter Enable */ | ||
454 | #define MAC_RXCFG_DDE 0x00000080 /* Disable Discard on Error */ | ||
455 | #define MAC_RXCFG_RCE 0x00000100 /* RX Carrier Extension */ | ||
456 | |||
457 | /* MAC Control Config Register. */ | ||
458 | #define MAC_MCCFG_SPE 0x00000001 /* Send Pause Enable */ | ||
459 | #define MAC_MCCFG_RPE 0x00000002 /* Receive Pause Enable */ | ||
460 | #define MAC_MCCFG_PMC 0x00000004 /* Pass MAC Control */ | ||
461 | |||
462 | /* XIF Configuration Register. | ||
463 | * | ||
464 | * NOTE: When leaving or entering loopback mode, a global hardware | ||
465 | * init of GEM should be performed. | ||
466 | */ | ||
467 | #define MAC_XIFCFG_OE 0x00000001 /* MII TX Output Driver Enable */ | ||
468 | #define MAC_XIFCFG_LBCK 0x00000002 /* Loopback TX to RX */ | ||
469 | #define MAC_XIFCFG_DISE 0x00000004 /* Disable RX path during TX */ | ||
470 | #define MAC_XIFCFG_GMII 0x00000008 /* Use GMII clocks + datapath */ | ||
471 | #define MAC_XIFCFG_MBOE 0x00000010 /* Controls MII_BUF_EN pin */ | ||
472 | #define MAC_XIFCFG_LLED 0x00000020 /* Force LINKLED# active (low) */ | ||
473 | #define MAC_XIFCFG_FLED 0x00000040 /* Force FDPLXLED# active (low) */ | ||
474 | |||
475 | /* InterPacketGap0 Register. This 8-bit value is used as an extension | ||
476 | * to the InterPacketGap1 Register. Specifically it contributes to the | ||
477 | * timing of the RX-to-TX IPG. This value is ignored and presumed to | ||
478 | * be zero for TX-to-TX IPG calculations and/or when the Enable IPG0 bit | ||
479 | * is cleared in the TX MAC Configuration Register. | ||
480 | * | ||
481 | * This value in this register in terms of media byte time. | ||
482 | * | ||
483 | * Recommended value: 0x00 | ||
484 | */ | ||
485 | |||
486 | /* InterPacketGap1 Register. This 8-bit value defines the first 2/3 | ||
487 | * portion of the Inter Packet Gap. | ||
488 | * | ||
489 | * This value in this register in terms of media byte time. | ||
490 | * | ||
491 | * Recommended value: 0x08 | ||
492 | */ | ||
493 | |||
494 | /* InterPacketGap2 Register. This 8-bit value defines the second 1/3 | ||
495 | * portion of the Inter Packet Gap. | ||
496 | * | ||
497 | * This value in this register in terms of media byte time. | ||
498 | * | ||
499 | * Recommended value: 0x04 | ||
500 | */ | ||
501 | |||
502 | /* Slot Time Register. This 10-bit value specifies the slot time | ||
503 | * parameter in units of media byte time. It determines the physical | ||
504 | * span of the network. | ||
505 | * | ||
506 | * Recommended value: 0x40 | ||
507 | */ | ||
508 | |||
509 | /* Minimum Frame Size Register. This 10-bit register specifies the | ||
510 | * smallest sized frame the TXMAC will send onto the medium, and the | ||
511 | * RXMAC will receive from the medium. | ||
512 | * | ||
513 | * Recommended value: 0x40 | ||
514 | */ | ||
515 | |||
516 | /* Maximum Frame and Burst Size Register. | ||
517 | * | ||
518 | * This register specifies two things. First it specifies the maximum | ||
519 | * sized frame the TXMAC will send and the RXMAC will recognize as | ||
520 | * valid. Second, it specifies the maximum run length of a burst of | ||
521 | * packets sent in half-duplex gigabit modes. | ||
522 | * | ||
523 | * Recommended value: 0x200005ee | ||
524 | */ | ||
525 | #define MAC_MAXFSZ_MFS 0x00007fff /* Max Frame Size */ | ||
526 | #define MAC_MAXFSZ_MBS 0x7fff0000 /* Max Burst Size */ | ||
527 | |||
528 | /* PA Size Register. This 10-bit register specifies the number of preamble | ||
529 | * bytes which will be transmitted at the beginning of each frame. A | ||
530 | * value of two or greater should be programmed here. | ||
531 | * | ||
532 | * Recommended value: 0x07 | ||
533 | */ | ||
534 | |||
535 | /* Jam Size Register. This 4-bit register specifies the duration of | ||
536 | * the jam in units of media byte time. | ||
537 | * | ||
538 | * Recommended value: 0x04 | ||
539 | */ | ||
540 | |||
541 | /* Attempts Limit Register. This 8-bit register specifies the number | ||
542 | * of attempts that the TXMAC will make to transmit a frame, before it | ||
543 | * resets its Attempts Counter. After reaching the Attempts Limit the | ||
544 | * TXMAC may or may not drop the frame, as determined by the NGU | ||
545 | * (Never Give Up) and NGUL (Never Give Up Limit) bits in the TXMAC | ||
546 | * Configuration Register. | ||
547 | * | ||
548 | * Recommended value: 0x10 | ||
549 | */ | ||
550 | |||
551 | /* MAX Control Type Register. This 16-bit register specifies the | ||
552 | * "type" field of a MAC Control frame. The TXMAC uses this field to | ||
553 | * encapsulate the MAC Control frame for transmission, and the RXMAC | ||
554 | * uses it for decoding valid MAC Control frames received from the | ||
555 | * network. | ||
556 | * | ||
557 | * Recommended value: 0x8808 | ||
558 | */ | ||
559 | |||
560 | /* MAC Address Registers. Each of these registers specify the | ||
561 | * ethernet MAC of the interface, 16-bits at a time. Register | ||
562 | * 0 specifies bits [47:32], register 1 bits [31:16], and register | ||
563 | * 2 bits [15:0]. | ||
564 | * | ||
565 | * Registers 3 through and including 5 specify an alternate | ||
566 | * MAC address for the interface. | ||
567 | * | ||
568 | * Registers 6 through and including 8 specify the MAC Control | ||
569 | * Address, which must be the reserved multicast address for MAC | ||
570 | * Control frames. | ||
571 | * | ||
572 | * Example: To program primary station address a:b:c:d:e:f into | ||
573 | * the chip. | ||
574 | * MAC_Address_2 = (a << 8) | b | ||
575 | * MAC_Address_1 = (c << 8) | d | ||
576 | * MAC_Address_0 = (e << 8) | f | ||
577 | */ | ||
578 | |||
579 | /* Address Filter Registers. Registers 0 through 2 specify bit | ||
580 | * fields [47:32] through [15:0], respectively, of the address | ||
581 | * filter. The Address Filter 2&1 Mask Register denotes the 8-bit | ||
582 | * nibble mask for Address Filter Registers 2 and 1. The Address | ||
583 | * Filter 0 Mask Register denotes the 16-bit mask for the Address | ||
584 | * Filter Register 0. | ||
585 | */ | ||
586 | |||
587 | /* Hash Table Registers. Registers 0 through 15 specify bit fields | ||
588 | * [255:240] through [15:0], respectively, of the hash table. | ||
589 | */ | ||
590 | |||
591 | /* Statistics Registers. All of these registers are 16-bits and | ||
592 | * track occurrences of a specific event. GEM can be configured | ||
593 | * to interrupt the host cpu when any of these counters overflow. | ||
594 | * They should all be explicitly initialized to zero when the interface | ||
595 | * is brought up. | ||
596 | */ | ||
597 | |||
598 | /* Random Number Seed Register. This 10-bit value is used as the | ||
599 | * RNG seed inside GEM for the CSMA/CD backoff algorithm. It is | ||
600 | * recommended to program this register to the 10 LSB of the | ||
601 | * interfaces MAC address. | ||
602 | */ | ||
603 | |||
604 | /* Pause Timer, read-only. This 16-bit timer is used to time the pause | ||
605 | * interval as indicated by a received pause flow control frame. | ||
606 | * A non-zero value in this timer indicates that the MAC is currently in | ||
607 | * the paused state. | ||
608 | */ | ||
609 | |||
610 | /* MIF Registers */ | ||
611 | #define MIF_BBCLK 0x6200UL /* MIF Bit-Bang Clock */ | ||
612 | #define MIF_BBDATA 0x6204UL /* MIF Bit-Band Data */ | ||
613 | #define MIF_BBOENAB 0x6208UL /* MIF Bit-Bang Output Enable */ | ||
614 | #define MIF_FRAME 0x620CUL /* MIF Frame/Output Register */ | ||
615 | #define MIF_CFG 0x6210UL /* MIF Configuration Register */ | ||
616 | #define MIF_MASK 0x6214UL /* MIF Mask Register */ | ||
617 | #define MIF_STATUS 0x6218UL /* MIF Status Register */ | ||
618 | #define MIF_SMACHINE 0x621CUL /* MIF State Machine Register */ | ||
619 | |||
620 | /* MIF Bit-Bang Clock. This 1-bit register is used to generate the | ||
621 | * MDC clock waveform on the MII Management Interface when the MIF is | ||
622 | * programmed in the "Bit-Bang" mode. Writing a '1' after a '0' into | ||
623 | * this register will create a rising edge on the MDC, while writing | ||
624 | * a '0' after a '1' will create a falling edge. For every bit that | ||
625 | * is transferred on the management interface, both edges have to be | ||
626 | * generated. | ||
627 | */ | ||
628 | |||
629 | /* MIF Bit-Bang Data. This 1-bit register is used to generate the | ||
630 | * outgoing data (MDO) on the MII Management Interface when the MIF | ||
631 | * is programmed in the "Bit-Bang" mode. The daa will be steered to the | ||
632 | * appropriate MDIO based on the state of the PHY_Select bit in the MIF | ||
633 | * Configuration Register. | ||
634 | */ | ||
635 | |||
636 | /* MIF Big-Band Output Enable. THis 1-bit register is used to enable | ||
637 | * ('1') or disable ('0') the I-directional driver on the MII when the | ||
638 | * MIF is programmed in the "Bit-Bang" mode. The MDIO should be enabled | ||
639 | * when data bits are transferred from the MIF to the transceiver, and it | ||
640 | * should be disabled when the interface is idle or when data bits are | ||
641 | * transferred from the transceiver to the MIF (data portion of a read | ||
642 | * instruction). Only one MDIO will be enabled at a given time, depending | ||
643 | * on the state of the PHY_Select bit in the MIF Configuration Register. | ||
644 | */ | ||
645 | |||
646 | /* MIF Configuration Register. This 15-bit register controls the operation | ||
647 | * of the MIF. | ||
648 | */ | ||
649 | #define MIF_CFG_PSELECT 0x00000001 /* Xcvr slct: 0=mdio0 1=mdio1 */ | ||
650 | #define MIF_CFG_POLL 0x00000002 /* Enable polling mechanism */ | ||
651 | #define MIF_CFG_BBMODE 0x00000004 /* 1=bit-bang 0=frame mode */ | ||
652 | #define MIF_CFG_PRADDR 0x000000f8 /* Xcvr poll register address */ | ||
653 | #define MIF_CFG_MDI0 0x00000100 /* MDIO_0 present or read-bit */ | ||
654 | #define MIF_CFG_MDI1 0x00000200 /* MDIO_1 present or read-bit */ | ||
655 | #define MIF_CFG_PPADDR 0x00007c00 /* Xcvr poll PHY address */ | ||
656 | |||
657 | /* MIF Frame/Output Register. This 32-bit register allows the host to | ||
658 | * communicate with a transceiver in frame mode (as opposed to big-bang | ||
659 | * mode). Writes by the host specify an instrution. After being issued | ||
660 | * the host must poll this register for completion. Also, after | ||
661 | * completion this register holds the data returned by the transceiver | ||
662 | * if applicable. | ||
663 | */ | ||
664 | #define MIF_FRAME_ST 0xc0000000 /* STart of frame */ | ||
665 | #define MIF_FRAME_OP 0x30000000 /* OPcode */ | ||
666 | #define MIF_FRAME_PHYAD 0x0f800000 /* PHY ADdress */ | ||
667 | #define MIF_FRAME_REGAD 0x007c0000 /* REGister ADdress */ | ||
668 | #define MIF_FRAME_TAMSB 0x00020000 /* Turn Around MSB */ | ||
669 | #define MIF_FRAME_TALSB 0x00010000 /* Turn Around LSB */ | ||
670 | #define MIF_FRAME_DATA 0x0000ffff /* Instruction Payload */ | ||
671 | |||
672 | /* MIF Status Register. This register reports status when the MIF is | ||
673 | * operating in the poll mode. The poll status field is auto-clearing | ||
674 | * on read. | ||
675 | */ | ||
676 | #define MIF_STATUS_DATA 0xffff0000 /* Live image of XCVR reg */ | ||
677 | #define MIF_STATUS_STAT 0x0000ffff /* Which bits have changed */ | ||
678 | |||
679 | /* MIF Mask Register. This 16-bit register is used when in poll mode | ||
680 | * to say which bits of the polled register will cause an interrupt | ||
681 | * when changed. | ||
682 | */ | ||
683 | |||
684 | /* PCS/Serialink Registers */ | ||
685 | #define PCS_MIICTRL 0x9000UL /* PCS MII Control Register */ | ||
686 | #define PCS_MIISTAT 0x9004UL /* PCS MII Status Register */ | ||
687 | #define PCS_MIIADV 0x9008UL /* PCS MII Advertisement Reg */ | ||
688 | #define PCS_MIILP 0x900CUL /* PCS MII Link Partner Ability */ | ||
689 | #define PCS_CFG 0x9010UL /* PCS Configuration Register */ | ||
690 | #define PCS_SMACHINE 0x9014UL /* PCS State Machine Register */ | ||
691 | #define PCS_ISTAT 0x9018UL /* PCS Interrupt Status Reg */ | ||
692 | #define PCS_DMODE 0x9050UL /* Datapath Mode Register */ | ||
693 | #define PCS_SCTRL 0x9054UL /* Serialink Control Register */ | ||
694 | #define PCS_SOS 0x9058UL /* Shared Output Select Reg */ | ||
695 | #define PCS_SSTATE 0x905CUL /* Serialink State Register */ | ||
696 | |||
697 | /* PCD MII Control Register. */ | ||
698 | #define PCS_MIICTRL_SPD 0x00000040 /* Read as one, writes ignored */ | ||
699 | #define PCS_MIICTRL_CT 0x00000080 /* Force COL signal active */ | ||
700 | #define PCS_MIICTRL_DM 0x00000100 /* Duplex mode, forced low */ | ||
701 | #define PCS_MIICTRL_RAN 0x00000200 /* Restart auto-neg, self clear */ | ||
702 | #define PCS_MIICTRL_ISO 0x00000400 /* Read as zero, writes ignored */ | ||
703 | #define PCS_MIICTRL_PD 0x00000800 /* Read as zero, writes ignored */ | ||
704 | #define PCS_MIICTRL_ANE 0x00001000 /* Auto-neg enable */ | ||
705 | #define PCS_MIICTRL_SS 0x00002000 /* Read as zero, writes ignored */ | ||
706 | #define PCS_MIICTRL_WB 0x00004000 /* Wrapback, loopback at 10-bit | ||
707 | * input side of Serialink | ||
708 | */ | ||
709 | #define PCS_MIICTRL_RST 0x00008000 /* Resets PCS, self clearing */ | ||
710 | |||
711 | /* PCS MII Status Register. */ | ||
712 | #define PCS_MIISTAT_EC 0x00000001 /* Ext Capability: Read as zero */ | ||
713 | #define PCS_MIISTAT_JD 0x00000002 /* Jabber Detect: Read as zero */ | ||
714 | #define PCS_MIISTAT_LS 0x00000004 /* Link Status: 1=up 0=down */ | ||
715 | #define PCS_MIISTAT_ANA 0x00000008 /* Auto-neg Ability, always 1 */ | ||
716 | #define PCS_MIISTAT_RF 0x00000010 /* Remote Fault */ | ||
717 | #define PCS_MIISTAT_ANC 0x00000020 /* Auto-neg complete */ | ||
718 | #define PCS_MIISTAT_ES 0x00000100 /* Extended Status, always 1 */ | ||
719 | |||
720 | /* PCS MII Advertisement Register. */ | ||
721 | #define PCS_MIIADV_FD 0x00000020 /* Advertise Full Duplex */ | ||
722 | #define PCS_MIIADV_HD 0x00000040 /* Advertise Half Duplex */ | ||
723 | #define PCS_MIIADV_SP 0x00000080 /* Advertise Symmetric Pause */ | ||
724 | #define PCS_MIIADV_AP 0x00000100 /* Advertise Asymmetric Pause */ | ||
725 | #define PCS_MIIADV_RF 0x00003000 /* Remote Fault */ | ||
726 | #define PCS_MIIADV_ACK 0x00004000 /* Read-only */ | ||
727 | #define PCS_MIIADV_NP 0x00008000 /* Next-page, forced low */ | ||
728 | |||
729 | /* PCS MII Link Partner Ability Register. This register is equivalent | ||
730 | * to the Link Partnet Ability Register of the standard MII register set. | ||
731 | * It's layout corresponds to the PCS MII Advertisement Register. | ||
732 | */ | ||
733 | |||
734 | /* PCS Configuration Register. */ | ||
735 | #define PCS_CFG_ENABLE 0x00000001 /* Must be zero while changing | ||
736 | * PCS MII advertisement reg. | ||
737 | */ | ||
738 | #define PCS_CFG_SDO 0x00000002 /* Signal detect override */ | ||
739 | #define PCS_CFG_SDL 0x00000004 /* Signal detect active low */ | ||
740 | #define PCS_CFG_JS 0x00000018 /* Jitter-study: | ||
741 | * 0 = normal operation | ||
742 | * 1 = high-frequency test pattern | ||
743 | * 2 = low-frequency test pattern | ||
744 | * 3 = reserved | ||
745 | */ | ||
746 | #define PCS_CFG_TO 0x00000020 /* 10ms auto-neg timer override */ | ||
747 | |||
748 | /* PCS Interrupt Status Register. This register is self-clearing | ||
749 | * when read. | ||
750 | */ | ||
751 | #define PCS_ISTAT_LSC 0x00000004 /* Link Status Change */ | ||
752 | |||
753 | /* Datapath Mode Register. */ | ||
754 | #define PCS_DMODE_SM 0x00000001 /* 1 = use internal Serialink */ | ||
755 | #define PCS_DMODE_ESM 0x00000002 /* External SERDES mode */ | ||
756 | #define PCS_DMODE_MGM 0x00000004 /* MII/GMII mode */ | ||
757 | #define PCS_DMODE_GMOE 0x00000008 /* GMII Output Enable */ | ||
758 | |||
759 | /* Serialink Control Register. | ||
760 | * | ||
761 | * NOTE: When in SERDES mode, the loopback bit has inverse logic. | ||
762 | */ | ||
763 | #define PCS_SCTRL_LOOP 0x00000001 /* Loopback enable */ | ||
764 | #define PCS_SCTRL_ESCD 0x00000002 /* Enable sync char detection */ | ||
765 | #define PCS_SCTRL_LOCK 0x00000004 /* Lock to reference clock */ | ||
766 | #define PCS_SCTRL_EMP 0x00000018 /* Output driver emphasis */ | ||
767 | #define PCS_SCTRL_STEST 0x000001c0 /* Self test patterns */ | ||
768 | #define PCS_SCTRL_PDWN 0x00000200 /* Software power-down */ | ||
769 | #define PCS_SCTRL_RXZ 0x00000c00 /* PLL input to Serialink */ | ||
770 | #define PCS_SCTRL_RXP 0x00003000 /* PLL input to Serialink */ | ||
771 | #define PCS_SCTRL_TXZ 0x0000c000 /* PLL input to Serialink */ | ||
772 | #define PCS_SCTRL_TXP 0x00030000 /* PLL input to Serialink */ | ||
773 | |||
774 | /* Shared Output Select Register. For test and debug, allows multiplexing | ||
775 | * test outputs into the PROM address pins. Set to zero for normal | ||
776 | * operation. | ||
777 | */ | ||
778 | #define PCS_SOS_PADDR 0x00000003 /* PROM Address */ | ||
779 | |||
780 | /* PROM Image Space */ | ||
781 | #define PROM_START 0x100000UL /* Expansion ROM run time access*/ | ||
782 | #define PROM_SIZE 0x0fffffUL /* Size of ROM */ | ||
783 | #define PROM_END 0x200000UL /* End of ROM */ | ||
784 | |||
785 | /* MII definitions missing from mii.h */ | ||
786 | |||
787 | #define BMCR_SPD2 0x0040 /* Gigabit enable? (bcm5411) */ | ||
788 | #define LPA_PAUSE 0x0400 | ||
789 | |||
790 | /* More PHY registers (specific to Broadcom models) */ | ||
791 | |||
792 | /* MII BCM5201 MULTIPHY interrupt register */ | ||
793 | #define MII_BCM5201_INTERRUPT 0x1A | ||
794 | #define MII_BCM5201_INTERRUPT_INTENABLE 0x4000 | ||
795 | |||
796 | #define MII_BCM5201_AUXMODE2 0x1B | ||
797 | #define MII_BCM5201_AUXMODE2_LOWPOWER 0x0008 | ||
798 | |||
799 | #define MII_BCM5201_MULTIPHY 0x1E | ||
800 | |||
801 | /* MII BCM5201 MULTIPHY register bits */ | ||
802 | #define MII_BCM5201_MULTIPHY_SERIALMODE 0x0002 | ||
803 | #define MII_BCM5201_MULTIPHY_SUPERISOLATE 0x0008 | ||
804 | |||
805 | /* MII BCM5400 1000-BASET Control register */ | ||
806 | #define MII_BCM5400_GB_CONTROL 0x09 | ||
807 | #define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP 0x0200 | ||
808 | |||
809 | /* MII BCM5400 AUXCONTROL register */ | ||
810 | #define MII_BCM5400_AUXCONTROL 0x18 | ||
811 | #define MII_BCM5400_AUXCONTROL_PWR10BASET 0x0004 | ||
812 | |||
813 | /* MII BCM5400 AUXSTATUS register */ | ||
814 | #define MII_BCM5400_AUXSTATUS 0x19 | ||
815 | #define MII_BCM5400_AUXSTATUS_LINKMODE_MASK 0x0700 | ||
816 | #define MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT 8 | ||
817 | |||
818 | /* When it can, GEM internally caches 4 aligned TX descriptors | ||
819 | * at a time, so that it can use full cacheline DMA reads. | ||
820 | * | ||
821 | * Note that unlike HME, there is no ownership bit in the descriptor | ||
822 | * control word. The same functionality is obtained via the TX-Kick | ||
823 | * and TX-Complete registers. As a result, GEM need not write back | ||
824 | * updated values to the TX descriptor ring, it only performs reads. | ||
825 | * | ||
826 | * Since TX descriptors are never modified by GEM, the driver can | ||
827 | * use the buffer DMA address as a place to keep track of allocated | ||
828 | * DMA mappings for a transmitted packet. | ||
829 | */ | ||
830 | struct gem_txd { | ||
831 | __le64 control_word; | ||
832 | __le64 buffer; | ||
833 | }; | ||
834 | |||
835 | #define TXDCTRL_BUFSZ 0x0000000000007fffULL /* Buffer Size */ | ||
836 | #define TXDCTRL_CSTART 0x00000000001f8000ULL /* CSUM Start Offset */ | ||
837 | #define TXDCTRL_COFF 0x000000001fe00000ULL /* CSUM Stuff Offset */ | ||
838 | #define TXDCTRL_CENAB 0x0000000020000000ULL /* CSUM Enable */ | ||
839 | #define TXDCTRL_EOF 0x0000000040000000ULL /* End of Frame */ | ||
840 | #define TXDCTRL_SOF 0x0000000080000000ULL /* Start of Frame */ | ||
841 | #define TXDCTRL_INTME 0x0000000100000000ULL /* "Interrupt Me" */ | ||
842 | #define TXDCTRL_NOCRC 0x0000000200000000ULL /* No CRC Present */ | ||
843 | |||
844 | /* GEM requires that RX descriptors are provided four at a time, | ||
845 | * aligned. Also, the RX ring may not wrap around. This means that | ||
846 | * there will be at least 4 unused descriptor entries in the middle | ||
847 | * of the RX ring at all times. | ||
848 | * | ||
849 | * Similar to HME, GEM assumes that it can write garbage bytes before | ||
850 | * the beginning of the buffer and right after the end in order to DMA | ||
851 | * whole cachelines. | ||
852 | * | ||
853 | * Unlike for TX, GEM does update the status word in the RX descriptors | ||
854 | * when packets arrive. Therefore an ownership bit does exist in the | ||
855 | * RX descriptors. It is advisory, GEM clears it but does not check | ||
856 | * it in any way. So when buffers are posted to the RX ring (via the | ||
857 | * RX Kick register) by the driver it must make sure the buffers are | ||
858 | * truly ready and that the ownership bits are set properly. | ||
859 | * | ||
860 | * Even though GEM modifies the RX descriptors, it guarantees that the | ||
861 | * buffer DMA address field will stay the same when it performs these | ||
862 | * updates. Therefore it can be used to keep track of DMA mappings | ||
863 | * by the host driver just as in the TX descriptor case above. | ||
864 | */ | ||
865 | struct gem_rxd { | ||
866 | __le64 status_word; | ||
867 | __le64 buffer; | ||
868 | }; | ||
869 | |||
870 | #define RXDCTRL_TCPCSUM 0x000000000000ffffULL /* TCP Pseudo-CSUM */ | ||
871 | #define RXDCTRL_BUFSZ 0x000000007fff0000ULL /* Buffer Size */ | ||
872 | #define RXDCTRL_OWN 0x0000000080000000ULL /* GEM owns this entry */ | ||
873 | #define RXDCTRL_HASHVAL 0x0ffff00000000000ULL /* Hash Value */ | ||
874 | #define RXDCTRL_HPASS 0x1000000000000000ULL /* Passed Hash Filter */ | ||
875 | #define RXDCTRL_ALTMAC 0x2000000000000000ULL /* Matched ALT MAC */ | ||
876 | #define RXDCTRL_BAD 0x4000000000000000ULL /* Frame has bad CRC */ | ||
877 | |||
878 | #define RXDCTRL_FRESH(gp) \ | ||
879 | ((((RX_BUF_ALLOC_SIZE(gp) - RX_OFFSET) << 16) & RXDCTRL_BUFSZ) | \ | ||
880 | RXDCTRL_OWN) | ||
881 | |||
882 | #define TX_RING_SIZE 128 | ||
883 | #define RX_RING_SIZE 128 | ||
884 | |||
885 | #if TX_RING_SIZE == 32 | ||
886 | #define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_32 | ||
887 | #elif TX_RING_SIZE == 64 | ||
888 | #define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_64 | ||
889 | #elif TX_RING_SIZE == 128 | ||
890 | #define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_128 | ||
891 | #elif TX_RING_SIZE == 256 | ||
892 | #define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_256 | ||
893 | #elif TX_RING_SIZE == 512 | ||
894 | #define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_512 | ||
895 | #elif TX_RING_SIZE == 1024 | ||
896 | #define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_1K | ||
897 | #elif TX_RING_SIZE == 2048 | ||
898 | #define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_2K | ||
899 | #elif TX_RING_SIZE == 4096 | ||
900 | #define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_4K | ||
901 | #elif TX_RING_SIZE == 8192 | ||
902 | #define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_8K | ||
903 | #else | ||
904 | #error TX_RING_SIZE value is illegal... | ||
905 | #endif | ||
906 | |||
907 | #if RX_RING_SIZE == 32 | ||
908 | #define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_32 | ||
909 | #elif RX_RING_SIZE == 64 | ||
910 | #define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_64 | ||
911 | #elif RX_RING_SIZE == 128 | ||
912 | #define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_128 | ||
913 | #elif RX_RING_SIZE == 256 | ||
914 | #define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_256 | ||
915 | #elif RX_RING_SIZE == 512 | ||
916 | #define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_512 | ||
917 | #elif RX_RING_SIZE == 1024 | ||
918 | #define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_1K | ||
919 | #elif RX_RING_SIZE == 2048 | ||
920 | #define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_2K | ||
921 | #elif RX_RING_SIZE == 4096 | ||
922 | #define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_4K | ||
923 | #elif RX_RING_SIZE == 8192 | ||
924 | #define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_8K | ||
925 | #else | ||
926 | #error RX_RING_SIZE is illegal... | ||
927 | #endif | ||
928 | |||
929 | #define NEXT_TX(N) (((N) + 1) & (TX_RING_SIZE - 1)) | ||
930 | #define NEXT_RX(N) (((N) + 1) & (RX_RING_SIZE - 1)) | ||
931 | |||
932 | #define TX_BUFFS_AVAIL(GP) \ | ||
933 | (((GP)->tx_old <= (GP)->tx_new) ? \ | ||
934 | (GP)->tx_old + (TX_RING_SIZE - 1) - (GP)->tx_new : \ | ||
935 | (GP)->tx_old - (GP)->tx_new - 1) | ||
936 | |||
937 | #define RX_OFFSET 2 | ||
938 | #define RX_BUF_ALLOC_SIZE(gp) ((gp)->rx_buf_sz + 28 + RX_OFFSET + 64) | ||
939 | |||
940 | #define RX_COPY_THRESHOLD 256 | ||
941 | |||
942 | #if TX_RING_SIZE < 128 | ||
943 | #define INIT_BLOCK_TX_RING_SIZE 128 | ||
944 | #else | ||
945 | #define INIT_BLOCK_TX_RING_SIZE TX_RING_SIZE | ||
946 | #endif | ||
947 | |||
948 | #if RX_RING_SIZE < 128 | ||
949 | #define INIT_BLOCK_RX_RING_SIZE 128 | ||
950 | #else | ||
951 | #define INIT_BLOCK_RX_RING_SIZE RX_RING_SIZE | ||
952 | #endif | ||
953 | |||
954 | struct gem_init_block { | ||
955 | struct gem_txd txd[INIT_BLOCK_TX_RING_SIZE]; | ||
956 | struct gem_rxd rxd[INIT_BLOCK_RX_RING_SIZE]; | ||
957 | }; | ||
958 | |||
959 | enum gem_phy_type { | ||
960 | phy_mii_mdio0, | ||
961 | phy_mii_mdio1, | ||
962 | phy_serialink, | ||
963 | phy_serdes, | ||
964 | }; | ||
965 | |||
966 | enum link_state { | ||
967 | link_down = 0, /* No link, will retry */ | ||
968 | link_aneg, /* Autoneg in progress */ | ||
969 | link_force_try, /* Try Forced link speed */ | ||
970 | link_force_ret, /* Forced mode worked, retrying autoneg */ | ||
971 | link_force_ok, /* Stay in forced mode */ | ||
972 | link_up /* Link is up */ | ||
973 | }; | ||
974 | |||
975 | struct gem { | ||
976 | void __iomem *regs; | ||
977 | int rx_new, rx_old; | ||
978 | int tx_new, tx_old; | ||
979 | |||
980 | unsigned int has_wol : 1; /* chip supports wake-on-lan */ | ||
981 | unsigned int asleep_wol : 1; /* was asleep with WOL enabled */ | ||
982 | |||
983 | int cell_enabled; | ||
984 | u32 msg_enable; | ||
985 | u32 status; | ||
986 | |||
987 | struct napi_struct napi; | ||
988 | |||
989 | int tx_fifo_sz; | ||
990 | int rx_fifo_sz; | ||
991 | int rx_pause_off; | ||
992 | int rx_pause_on; | ||
993 | int rx_buf_sz; | ||
994 | u64 pause_entered; | ||
995 | u16 pause_last_time_recvd; | ||
996 | u32 mac_rx_cfg; | ||
997 | u32 swrst_base; | ||
998 | |||
999 | int want_autoneg; | ||
1000 | int last_forced_speed; | ||
1001 | enum link_state lstate; | ||
1002 | struct timer_list link_timer; | ||
1003 | int timer_ticks; | ||
1004 | int wake_on_lan; | ||
1005 | struct work_struct reset_task; | ||
1006 | volatile int reset_task_pending; | ||
1007 | |||
1008 | enum gem_phy_type phy_type; | ||
1009 | struct mii_phy phy_mii; | ||
1010 | int mii_phy_addr; | ||
1011 | |||
1012 | struct gem_init_block *init_block; | ||
1013 | struct sk_buff *rx_skbs[RX_RING_SIZE]; | ||
1014 | struct sk_buff *tx_skbs[TX_RING_SIZE]; | ||
1015 | dma_addr_t gblock_dvma; | ||
1016 | |||
1017 | struct pci_dev *pdev; | ||
1018 | struct net_device *dev; | ||
1019 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) | ||
1020 | struct device_node *of_node; | ||
1021 | #endif | ||
1022 | }; | ||
1023 | |||
1024 | #define found_mii_phy(gp) ((gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) && \ | ||
1025 | gp->phy_mii.def && gp->phy_mii.def->ops) | ||
1026 | |||
1027 | #endif /* _SUNGEM_H */ | ||
diff --git a/drivers/net/ethernet/sun/sungem_phy.c b/drivers/net/ethernet/sun/sungem_phy.c new file mode 100644 index 000000000000..d16880d7099b --- /dev/null +++ b/drivers/net/ethernet/sun/sungem_phy.c | |||
@@ -0,0 +1,1200 @@ | |||
1 | /* | ||
2 | * PHY drivers for the sungem ethernet driver. | ||
3 | * | ||
4 | * This file could be shared with other drivers. | ||
5 | * | ||
6 | * (c) 2002-2007, Benjamin Herrenscmidt (benh@kernel.crashing.org) | ||
7 | * | ||
8 | * TODO: | ||
9 | * - Add support for PHYs that provide an IRQ line | ||
10 | * - Eventually moved the entire polling state machine in | ||
11 | * there (out of the eth driver), so that it can easily be | ||
12 | * skipped on PHYs that implement it in hardware. | ||
13 | * - On LXT971 & BCM5201, Apple uses some chip specific regs | ||
14 | * to read the link status. Figure out why and if it makes | ||
15 | * sense to do the same (magic aneg ?) | ||
16 | * - Apple has some additional power management code for some | ||
17 | * Broadcom PHYs that they "hide" from the OpenSource version | ||
18 | * of darwin, still need to reverse engineer that | ||
19 | */ | ||
20 | |||
21 | |||
22 | #include <linux/module.h> | ||
23 | |||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/netdevice.h> | ||
27 | #include <linux/etherdevice.h> | ||
28 | #include <linux/mii.h> | ||
29 | #include <linux/ethtool.h> | ||
30 | #include <linux/delay.h> | ||
31 | |||
32 | #ifdef CONFIG_PPC_PMAC | ||
33 | #include <asm/prom.h> | ||
34 | #endif | ||
35 | |||
36 | #include "sungem_phy.h" | ||
37 | |||
38 | /* Link modes of the BCM5400 PHY */ | ||
39 | static const int phy_BCM5400_link_table[8][3] = { | ||
40 | { 0, 0, 0 }, /* No link */ | ||
41 | { 0, 0, 0 }, /* 10BT Half Duplex */ | ||
42 | { 1, 0, 0 }, /* 10BT Full Duplex */ | ||
43 | { 0, 1, 0 }, /* 100BT Half Duplex */ | ||
44 | { 0, 1, 0 }, /* 100BT Half Duplex */ | ||
45 | { 1, 1, 0 }, /* 100BT Full Duplex*/ | ||
46 | { 1, 0, 1 }, /* 1000BT */ | ||
47 | { 1, 0, 1 }, /* 1000BT */ | ||
48 | }; | ||
49 | |||
50 | static inline int __phy_read(struct mii_phy* phy, int id, int reg) | ||
51 | { | ||
52 | return phy->mdio_read(phy->dev, id, reg); | ||
53 | } | ||
54 | |||
55 | static inline void __phy_write(struct mii_phy* phy, int id, int reg, int val) | ||
56 | { | ||
57 | phy->mdio_write(phy->dev, id, reg, val); | ||
58 | } | ||
59 | |||
60 | static inline int phy_read(struct mii_phy* phy, int reg) | ||
61 | { | ||
62 | return phy->mdio_read(phy->dev, phy->mii_id, reg); | ||
63 | } | ||
64 | |||
65 | static inline void phy_write(struct mii_phy* phy, int reg, int val) | ||
66 | { | ||
67 | phy->mdio_write(phy->dev, phy->mii_id, reg, val); | ||
68 | } | ||
69 | |||
70 | static int reset_one_mii_phy(struct mii_phy* phy, int phy_id) | ||
71 | { | ||
72 | u16 val; | ||
73 | int limit = 10000; | ||
74 | |||
75 | val = __phy_read(phy, phy_id, MII_BMCR); | ||
76 | val &= ~(BMCR_ISOLATE | BMCR_PDOWN); | ||
77 | val |= BMCR_RESET; | ||
78 | __phy_write(phy, phy_id, MII_BMCR, val); | ||
79 | |||
80 | udelay(100); | ||
81 | |||
82 | while (--limit) { | ||
83 | val = __phy_read(phy, phy_id, MII_BMCR); | ||
84 | if ((val & BMCR_RESET) == 0) | ||
85 | break; | ||
86 | udelay(10); | ||
87 | } | ||
88 | if ((val & BMCR_ISOLATE) && limit > 0) | ||
89 | __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE); | ||
90 | |||
91 | return limit <= 0; | ||
92 | } | ||
93 | |||
94 | static int bcm5201_init(struct mii_phy* phy) | ||
95 | { | ||
96 | u16 data; | ||
97 | |||
98 | data = phy_read(phy, MII_BCM5201_MULTIPHY); | ||
99 | data &= ~MII_BCM5201_MULTIPHY_SUPERISOLATE; | ||
100 | phy_write(phy, MII_BCM5201_MULTIPHY, data); | ||
101 | |||
102 | phy_write(phy, MII_BCM5201_INTERRUPT, 0); | ||
103 | |||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static int bcm5201_suspend(struct mii_phy* phy) | ||
108 | { | ||
109 | phy_write(phy, MII_BCM5201_INTERRUPT, 0); | ||
110 | phy_write(phy, MII_BCM5201_MULTIPHY, MII_BCM5201_MULTIPHY_SUPERISOLATE); | ||
111 | |||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static int bcm5221_init(struct mii_phy* phy) | ||
116 | { | ||
117 | u16 data; | ||
118 | |||
119 | data = phy_read(phy, MII_BCM5221_TEST); | ||
120 | phy_write(phy, MII_BCM5221_TEST, | ||
121 | data | MII_BCM5221_TEST_ENABLE_SHADOWS); | ||
122 | |||
123 | data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2); | ||
124 | phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2, | ||
125 | data | MII_BCM5221_SHDOW_AUX_STAT2_APD); | ||
126 | |||
127 | data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); | ||
128 | phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, | ||
129 | data | MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR); | ||
130 | |||
131 | data = phy_read(phy, MII_BCM5221_TEST); | ||
132 | phy_write(phy, MII_BCM5221_TEST, | ||
133 | data & ~MII_BCM5221_TEST_ENABLE_SHADOWS); | ||
134 | |||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | static int bcm5221_suspend(struct mii_phy* phy) | ||
139 | { | ||
140 | u16 data; | ||
141 | |||
142 | data = phy_read(phy, MII_BCM5221_TEST); | ||
143 | phy_write(phy, MII_BCM5221_TEST, | ||
144 | data | MII_BCM5221_TEST_ENABLE_SHADOWS); | ||
145 | |||
146 | data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); | ||
147 | phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, | ||
148 | data | MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE); | ||
149 | |||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static int bcm5241_init(struct mii_phy* phy) | ||
154 | { | ||
155 | u16 data; | ||
156 | |||
157 | data = phy_read(phy, MII_BCM5221_TEST); | ||
158 | phy_write(phy, MII_BCM5221_TEST, | ||
159 | data | MII_BCM5221_TEST_ENABLE_SHADOWS); | ||
160 | |||
161 | data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2); | ||
162 | phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2, | ||
163 | data | MII_BCM5221_SHDOW_AUX_STAT2_APD); | ||
164 | |||
165 | data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); | ||
166 | phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, | ||
167 | data & ~MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR); | ||
168 | |||
169 | data = phy_read(phy, MII_BCM5221_TEST); | ||
170 | phy_write(phy, MII_BCM5221_TEST, | ||
171 | data & ~MII_BCM5221_TEST_ENABLE_SHADOWS); | ||
172 | |||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static int bcm5241_suspend(struct mii_phy* phy) | ||
177 | { | ||
178 | u16 data; | ||
179 | |||
180 | data = phy_read(phy, MII_BCM5221_TEST); | ||
181 | phy_write(phy, MII_BCM5221_TEST, | ||
182 | data | MII_BCM5221_TEST_ENABLE_SHADOWS); | ||
183 | |||
184 | data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); | ||
185 | phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, | ||
186 | data | MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR); | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static int bcm5400_init(struct mii_phy* phy) | ||
192 | { | ||
193 | u16 data; | ||
194 | |||
195 | /* Configure for gigabit full duplex */ | ||
196 | data = phy_read(phy, MII_BCM5400_AUXCONTROL); | ||
197 | data |= MII_BCM5400_AUXCONTROL_PWR10BASET; | ||
198 | phy_write(phy, MII_BCM5400_AUXCONTROL, data); | ||
199 | |||
200 | data = phy_read(phy, MII_BCM5400_GB_CONTROL); | ||
201 | data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; | ||
202 | phy_write(phy, MII_BCM5400_GB_CONTROL, data); | ||
203 | |||
204 | udelay(100); | ||
205 | |||
206 | /* Reset and configure cascaded 10/100 PHY */ | ||
207 | (void)reset_one_mii_phy(phy, 0x1f); | ||
208 | |||
209 | data = __phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY); | ||
210 | data |= MII_BCM5201_MULTIPHY_SERIALMODE; | ||
211 | __phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data); | ||
212 | |||
213 | data = phy_read(phy, MII_BCM5400_AUXCONTROL); | ||
214 | data &= ~MII_BCM5400_AUXCONTROL_PWR10BASET; | ||
215 | phy_write(phy, MII_BCM5400_AUXCONTROL, data); | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | static int bcm5400_suspend(struct mii_phy* phy) | ||
221 | { | ||
222 | #if 0 /* Commented out in Darwin... someone has those dawn docs ? */ | ||
223 | phy_write(phy, MII_BMCR, BMCR_PDOWN); | ||
224 | #endif | ||
225 | return 0; | ||
226 | } | ||
227 | |||
228 | static int bcm5401_init(struct mii_phy* phy) | ||
229 | { | ||
230 | u16 data; | ||
231 | int rev; | ||
232 | |||
233 | rev = phy_read(phy, MII_PHYSID2) & 0x000f; | ||
234 | if (rev == 0 || rev == 3) { | ||
235 | /* Some revisions of 5401 appear to need this | ||
236 | * initialisation sequence to disable, according | ||
237 | * to OF, "tap power management" | ||
238 | * | ||
239 | * WARNING ! OF and Darwin don't agree on the | ||
240 | * register addresses. OF seem to interpret the | ||
241 | * register numbers below as decimal | ||
242 | * | ||
243 | * Note: This should (and does) match tg3_init_5401phy_dsp | ||
244 | * in the tg3.c driver. -DaveM | ||
245 | */ | ||
246 | phy_write(phy, 0x18, 0x0c20); | ||
247 | phy_write(phy, 0x17, 0x0012); | ||
248 | phy_write(phy, 0x15, 0x1804); | ||
249 | phy_write(phy, 0x17, 0x0013); | ||
250 | phy_write(phy, 0x15, 0x1204); | ||
251 | phy_write(phy, 0x17, 0x8006); | ||
252 | phy_write(phy, 0x15, 0x0132); | ||
253 | phy_write(phy, 0x17, 0x8006); | ||
254 | phy_write(phy, 0x15, 0x0232); | ||
255 | phy_write(phy, 0x17, 0x201f); | ||
256 | phy_write(phy, 0x15, 0x0a20); | ||
257 | } | ||
258 | |||
259 | /* Configure for gigabit full duplex */ | ||
260 | data = phy_read(phy, MII_BCM5400_GB_CONTROL); | ||
261 | data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; | ||
262 | phy_write(phy, MII_BCM5400_GB_CONTROL, data); | ||
263 | |||
264 | udelay(10); | ||
265 | |||
266 | /* Reset and configure cascaded 10/100 PHY */ | ||
267 | (void)reset_one_mii_phy(phy, 0x1f); | ||
268 | |||
269 | data = __phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY); | ||
270 | data |= MII_BCM5201_MULTIPHY_SERIALMODE; | ||
271 | __phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data); | ||
272 | |||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static int bcm5401_suspend(struct mii_phy* phy) | ||
277 | { | ||
278 | #if 0 /* Commented out in Darwin... someone has those dawn docs ? */ | ||
279 | phy_write(phy, MII_BMCR, BMCR_PDOWN); | ||
280 | #endif | ||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | static int bcm5411_init(struct mii_phy* phy) | ||
285 | { | ||
286 | u16 data; | ||
287 | |||
288 | /* Here's some more Apple black magic to setup | ||
289 | * some voltage stuffs. | ||
290 | */ | ||
291 | phy_write(phy, 0x1c, 0x8c23); | ||
292 | phy_write(phy, 0x1c, 0x8ca3); | ||
293 | phy_write(phy, 0x1c, 0x8c23); | ||
294 | |||
295 | /* Here, Apple seems to want to reset it, do | ||
296 | * it as well | ||
297 | */ | ||
298 | phy_write(phy, MII_BMCR, BMCR_RESET); | ||
299 | phy_write(phy, MII_BMCR, 0x1340); | ||
300 | |||
301 | data = phy_read(phy, MII_BCM5400_GB_CONTROL); | ||
302 | data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; | ||
303 | phy_write(phy, MII_BCM5400_GB_CONTROL, data); | ||
304 | |||
305 | udelay(10); | ||
306 | |||
307 | /* Reset and configure cascaded 10/100 PHY */ | ||
308 | (void)reset_one_mii_phy(phy, 0x1f); | ||
309 | |||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) | ||
314 | { | ||
315 | u16 ctl, adv; | ||
316 | |||
317 | phy->autoneg = 1; | ||
318 | phy->speed = SPEED_10; | ||
319 | phy->duplex = DUPLEX_HALF; | ||
320 | phy->pause = 0; | ||
321 | phy->advertising = advertise; | ||
322 | |||
323 | /* Setup standard advertise */ | ||
324 | adv = phy_read(phy, MII_ADVERTISE); | ||
325 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); | ||
326 | if (advertise & ADVERTISED_10baseT_Half) | ||
327 | adv |= ADVERTISE_10HALF; | ||
328 | if (advertise & ADVERTISED_10baseT_Full) | ||
329 | adv |= ADVERTISE_10FULL; | ||
330 | if (advertise & ADVERTISED_100baseT_Half) | ||
331 | adv |= ADVERTISE_100HALF; | ||
332 | if (advertise & ADVERTISED_100baseT_Full) | ||
333 | adv |= ADVERTISE_100FULL; | ||
334 | phy_write(phy, MII_ADVERTISE, adv); | ||
335 | |||
336 | /* Start/Restart aneg */ | ||
337 | ctl = phy_read(phy, MII_BMCR); | ||
338 | ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); | ||
339 | phy_write(phy, MII_BMCR, ctl); | ||
340 | |||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) | ||
345 | { | ||
346 | u16 ctl; | ||
347 | |||
348 | phy->autoneg = 0; | ||
349 | phy->speed = speed; | ||
350 | phy->duplex = fd; | ||
351 | phy->pause = 0; | ||
352 | |||
353 | ctl = phy_read(phy, MII_BMCR); | ||
354 | ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE); | ||
355 | |||
356 | /* First reset the PHY */ | ||
357 | phy_write(phy, MII_BMCR, ctl | BMCR_RESET); | ||
358 | |||
359 | /* Select speed & duplex */ | ||
360 | switch(speed) { | ||
361 | case SPEED_10: | ||
362 | break; | ||
363 | case SPEED_100: | ||
364 | ctl |= BMCR_SPEED100; | ||
365 | break; | ||
366 | case SPEED_1000: | ||
367 | default: | ||
368 | return -EINVAL; | ||
369 | } | ||
370 | if (fd == DUPLEX_FULL) | ||
371 | ctl |= BMCR_FULLDPLX; | ||
372 | phy_write(phy, MII_BMCR, ctl); | ||
373 | |||
374 | return 0; | ||
375 | } | ||
376 | |||
377 | static int genmii_poll_link(struct mii_phy *phy) | ||
378 | { | ||
379 | u16 status; | ||
380 | |||
381 | (void)phy_read(phy, MII_BMSR); | ||
382 | status = phy_read(phy, MII_BMSR); | ||
383 | if ((status & BMSR_LSTATUS) == 0) | ||
384 | return 0; | ||
385 | if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE)) | ||
386 | return 0; | ||
387 | return 1; | ||
388 | } | ||
389 | |||
390 | static int genmii_read_link(struct mii_phy *phy) | ||
391 | { | ||
392 | u16 lpa; | ||
393 | |||
394 | if (phy->autoneg) { | ||
395 | lpa = phy_read(phy, MII_LPA); | ||
396 | |||
397 | if (lpa & (LPA_10FULL | LPA_100FULL)) | ||
398 | phy->duplex = DUPLEX_FULL; | ||
399 | else | ||
400 | phy->duplex = DUPLEX_HALF; | ||
401 | if (lpa & (LPA_100FULL | LPA_100HALF)) | ||
402 | phy->speed = SPEED_100; | ||
403 | else | ||
404 | phy->speed = SPEED_10; | ||
405 | phy->pause = 0; | ||
406 | } | ||
407 | /* On non-aneg, we assume what we put in BMCR is the speed, | ||
408 | * though magic-aneg shouldn't prevent this case from occurring | ||
409 | */ | ||
410 | |||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | static int generic_suspend(struct mii_phy* phy) | ||
415 | { | ||
416 | phy_write(phy, MII_BMCR, BMCR_PDOWN); | ||
417 | |||
418 | return 0; | ||
419 | } | ||
420 | |||
421 | static int bcm5421_init(struct mii_phy* phy) | ||
422 | { | ||
423 | u16 data; | ||
424 | unsigned int id; | ||
425 | |||
426 | id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2)); | ||
427 | |||
428 | /* Revision 0 of 5421 needs some fixups */ | ||
429 | if (id == 0x002060e0) { | ||
430 | /* This is borrowed from MacOS | ||
431 | */ | ||
432 | phy_write(phy, 0x18, 0x1007); | ||
433 | data = phy_read(phy, 0x18); | ||
434 | phy_write(phy, 0x18, data | 0x0400); | ||
435 | phy_write(phy, 0x18, 0x0007); | ||
436 | data = phy_read(phy, 0x18); | ||
437 | phy_write(phy, 0x18, data | 0x0800); | ||
438 | phy_write(phy, 0x17, 0x000a); | ||
439 | data = phy_read(phy, 0x15); | ||
440 | phy_write(phy, 0x15, data | 0x0200); | ||
441 | } | ||
442 | |||
443 | /* Pick up some init code from OF for K2 version */ | ||
444 | if ((id & 0xfffffff0) == 0x002062e0) { | ||
445 | phy_write(phy, 4, 0x01e1); | ||
446 | phy_write(phy, 9, 0x0300); | ||
447 | } | ||
448 | |||
449 | /* Check if we can enable automatic low power */ | ||
450 | #ifdef CONFIG_PPC_PMAC | ||
451 | if (phy->platform_data) { | ||
452 | struct device_node *np = of_get_parent(phy->platform_data); | ||
453 | int can_low_power = 1; | ||
454 | if (np == NULL || of_get_property(np, "no-autolowpower", NULL)) | ||
455 | can_low_power = 0; | ||
456 | if (can_low_power) { | ||
457 | /* Enable automatic low-power */ | ||
458 | phy_write(phy, 0x1c, 0x9002); | ||
459 | phy_write(phy, 0x1c, 0xa821); | ||
460 | phy_write(phy, 0x1c, 0x941d); | ||
461 | } | ||
462 | } | ||
463 | #endif /* CONFIG_PPC_PMAC */ | ||
464 | |||
465 | return 0; | ||
466 | } | ||
467 | |||
468 | static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise) | ||
469 | { | ||
470 | u16 ctl, adv; | ||
471 | |||
472 | phy->autoneg = 1; | ||
473 | phy->speed = SPEED_10; | ||
474 | phy->duplex = DUPLEX_HALF; | ||
475 | phy->pause = 0; | ||
476 | phy->advertising = advertise; | ||
477 | |||
478 | /* Setup standard advertise */ | ||
479 | adv = phy_read(phy, MII_ADVERTISE); | ||
480 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); | ||
481 | if (advertise & ADVERTISED_10baseT_Half) | ||
482 | adv |= ADVERTISE_10HALF; | ||
483 | if (advertise & ADVERTISED_10baseT_Full) | ||
484 | adv |= ADVERTISE_10FULL; | ||
485 | if (advertise & ADVERTISED_100baseT_Half) | ||
486 | adv |= ADVERTISE_100HALF; | ||
487 | if (advertise & ADVERTISED_100baseT_Full) | ||
488 | adv |= ADVERTISE_100FULL; | ||
489 | if (advertise & ADVERTISED_Pause) | ||
490 | adv |= ADVERTISE_PAUSE_CAP; | ||
491 | if (advertise & ADVERTISED_Asym_Pause) | ||
492 | adv |= ADVERTISE_PAUSE_ASYM; | ||
493 | phy_write(phy, MII_ADVERTISE, adv); | ||
494 | |||
495 | /* Setup 1000BT advertise */ | ||
496 | adv = phy_read(phy, MII_1000BASETCONTROL); | ||
497 | adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP|MII_1000BASETCONTROL_HALFDUPLEXCAP); | ||
498 | if (advertise & SUPPORTED_1000baseT_Half) | ||
499 | adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP; | ||
500 | if (advertise & SUPPORTED_1000baseT_Full) | ||
501 | adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP; | ||
502 | phy_write(phy, MII_1000BASETCONTROL, adv); | ||
503 | |||
504 | /* Start/Restart aneg */ | ||
505 | ctl = phy_read(phy, MII_BMCR); | ||
506 | ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); | ||
507 | phy_write(phy, MII_BMCR, ctl); | ||
508 | |||
509 | return 0; | ||
510 | } | ||
511 | |||
512 | static int bcm54xx_setup_forced(struct mii_phy *phy, int speed, int fd) | ||
513 | { | ||
514 | u16 ctl; | ||
515 | |||
516 | phy->autoneg = 0; | ||
517 | phy->speed = speed; | ||
518 | phy->duplex = fd; | ||
519 | phy->pause = 0; | ||
520 | |||
521 | ctl = phy_read(phy, MII_BMCR); | ||
522 | ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE); | ||
523 | |||
524 | /* First reset the PHY */ | ||
525 | phy_write(phy, MII_BMCR, ctl | BMCR_RESET); | ||
526 | |||
527 | /* Select speed & duplex */ | ||
528 | switch(speed) { | ||
529 | case SPEED_10: | ||
530 | break; | ||
531 | case SPEED_100: | ||
532 | ctl |= BMCR_SPEED100; | ||
533 | break; | ||
534 | case SPEED_1000: | ||
535 | ctl |= BMCR_SPD2; | ||
536 | } | ||
537 | if (fd == DUPLEX_FULL) | ||
538 | ctl |= BMCR_FULLDPLX; | ||
539 | |||
540 | // XXX Should we set the sungem to GII now on 1000BT ? | ||
541 | |||
542 | phy_write(phy, MII_BMCR, ctl); | ||
543 | |||
544 | return 0; | ||
545 | } | ||
546 | |||
547 | static int bcm54xx_read_link(struct mii_phy *phy) | ||
548 | { | ||
549 | int link_mode; | ||
550 | u16 val; | ||
551 | |||
552 | if (phy->autoneg) { | ||
553 | val = phy_read(phy, MII_BCM5400_AUXSTATUS); | ||
554 | link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >> | ||
555 | MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT); | ||
556 | phy->duplex = phy_BCM5400_link_table[link_mode][0] ? | ||
557 | DUPLEX_FULL : DUPLEX_HALF; | ||
558 | phy->speed = phy_BCM5400_link_table[link_mode][2] ? | ||
559 | SPEED_1000 : | ||
560 | (phy_BCM5400_link_table[link_mode][1] ? | ||
561 | SPEED_100 : SPEED_10); | ||
562 | val = phy_read(phy, MII_LPA); | ||
563 | phy->pause = (phy->duplex == DUPLEX_FULL) && | ||
564 | ((val & LPA_PAUSE) != 0); | ||
565 | } | ||
566 | /* On non-aneg, we assume what we put in BMCR is the speed, | ||
567 | * though magic-aneg shouldn't prevent this case from occurring | ||
568 | */ | ||
569 | |||
570 | return 0; | ||
571 | } | ||
572 | |||
573 | static int marvell88e1111_init(struct mii_phy* phy) | ||
574 | { | ||
575 | u16 rev; | ||
576 | |||
577 | /* magic init sequence for rev 0 */ | ||
578 | rev = phy_read(phy, MII_PHYSID2) & 0x000f; | ||
579 | if (rev == 0) { | ||
580 | phy_write(phy, 0x1d, 0x000a); | ||
581 | phy_write(phy, 0x1e, 0x0821); | ||
582 | |||
583 | phy_write(phy, 0x1d, 0x0006); | ||
584 | phy_write(phy, 0x1e, 0x8600); | ||
585 | |||
586 | phy_write(phy, 0x1d, 0x000b); | ||
587 | phy_write(phy, 0x1e, 0x0100); | ||
588 | |||
589 | phy_write(phy, 0x1d, 0x0004); | ||
590 | phy_write(phy, 0x1e, 0x4850); | ||
591 | } | ||
592 | return 0; | ||
593 | } | ||
594 | |||
595 | #define BCM5421_MODE_MASK (1 << 5) | ||
596 | |||
597 | static int bcm5421_poll_link(struct mii_phy* phy) | ||
598 | { | ||
599 | u32 phy_reg; | ||
600 | int mode; | ||
601 | |||
602 | /* find out in what mode we are */ | ||
603 | phy_write(phy, MII_NCONFIG, 0x1000); | ||
604 | phy_reg = phy_read(phy, MII_NCONFIG); | ||
605 | |||
606 | mode = (phy_reg & BCM5421_MODE_MASK) >> 5; | ||
607 | |||
608 | if ( mode == BCM54XX_COPPER) | ||
609 | return genmii_poll_link(phy); | ||
610 | |||
611 | /* try to find out wether we have a link */ | ||
612 | phy_write(phy, MII_NCONFIG, 0x2000); | ||
613 | phy_reg = phy_read(phy, MII_NCONFIG); | ||
614 | |||
615 | if (phy_reg & 0x0020) | ||
616 | return 0; | ||
617 | else | ||
618 | return 1; | ||
619 | } | ||
620 | |||
621 | static int bcm5421_read_link(struct mii_phy* phy) | ||
622 | { | ||
623 | u32 phy_reg; | ||
624 | int mode; | ||
625 | |||
626 | /* find out in what mode we are */ | ||
627 | phy_write(phy, MII_NCONFIG, 0x1000); | ||
628 | phy_reg = phy_read(phy, MII_NCONFIG); | ||
629 | |||
630 | mode = (phy_reg & BCM5421_MODE_MASK ) >> 5; | ||
631 | |||
632 | if ( mode == BCM54XX_COPPER) | ||
633 | return bcm54xx_read_link(phy); | ||
634 | |||
635 | phy->speed = SPEED_1000; | ||
636 | |||
637 | /* find out wether we are running half- or full duplex */ | ||
638 | phy_write(phy, MII_NCONFIG, 0x2000); | ||
639 | phy_reg = phy_read(phy, MII_NCONFIG); | ||
640 | |||
641 | if ( (phy_reg & 0x0080) >> 7) | ||
642 | phy->duplex |= DUPLEX_HALF; | ||
643 | else | ||
644 | phy->duplex |= DUPLEX_FULL; | ||
645 | |||
646 | return 0; | ||
647 | } | ||
648 | |||
649 | static int bcm5421_enable_fiber(struct mii_phy* phy, int autoneg) | ||
650 | { | ||
651 | /* enable fiber mode */ | ||
652 | phy_write(phy, MII_NCONFIG, 0x9020); | ||
653 | /* LEDs active in both modes, autosense prio = fiber */ | ||
654 | phy_write(phy, MII_NCONFIG, 0x945f); | ||
655 | |||
656 | if (!autoneg) { | ||
657 | /* switch off fibre autoneg */ | ||
658 | phy_write(phy, MII_NCONFIG, 0xfc01); | ||
659 | phy_write(phy, 0x0b, 0x0004); | ||
660 | } | ||
661 | |||
662 | phy->autoneg = autoneg; | ||
663 | |||
664 | return 0; | ||
665 | } | ||
666 | |||
667 | #define BCM5461_FIBER_LINK (1 << 2) | ||
668 | #define BCM5461_MODE_MASK (3 << 1) | ||
669 | |||
670 | static int bcm5461_poll_link(struct mii_phy* phy) | ||
671 | { | ||
672 | u32 phy_reg; | ||
673 | int mode; | ||
674 | |||
675 | /* find out in what mode we are */ | ||
676 | phy_write(phy, MII_NCONFIG, 0x7c00); | ||
677 | phy_reg = phy_read(phy, MII_NCONFIG); | ||
678 | |||
679 | mode = (phy_reg & BCM5461_MODE_MASK ) >> 1; | ||
680 | |||
681 | if ( mode == BCM54XX_COPPER) | ||
682 | return genmii_poll_link(phy); | ||
683 | |||
684 | /* find out wether we have a link */ | ||
685 | phy_write(phy, MII_NCONFIG, 0x7000); | ||
686 | phy_reg = phy_read(phy, MII_NCONFIG); | ||
687 | |||
688 | if (phy_reg & BCM5461_FIBER_LINK) | ||
689 | return 1; | ||
690 | else | ||
691 | return 0; | ||
692 | } | ||
693 | |||
694 | #define BCM5461_FIBER_DUPLEX (1 << 3) | ||
695 | |||
696 | static int bcm5461_read_link(struct mii_phy* phy) | ||
697 | { | ||
698 | u32 phy_reg; | ||
699 | int mode; | ||
700 | |||
701 | /* find out in what mode we are */ | ||
702 | phy_write(phy, MII_NCONFIG, 0x7c00); | ||
703 | phy_reg = phy_read(phy, MII_NCONFIG); | ||
704 | |||
705 | mode = (phy_reg & BCM5461_MODE_MASK ) >> 1; | ||
706 | |||
707 | if ( mode == BCM54XX_COPPER) { | ||
708 | return bcm54xx_read_link(phy); | ||
709 | } | ||
710 | |||
711 | phy->speed = SPEED_1000; | ||
712 | |||
713 | /* find out wether we are running half- or full duplex */ | ||
714 | phy_write(phy, MII_NCONFIG, 0x7000); | ||
715 | phy_reg = phy_read(phy, MII_NCONFIG); | ||
716 | |||
717 | if (phy_reg & BCM5461_FIBER_DUPLEX) | ||
718 | phy->duplex |= DUPLEX_FULL; | ||
719 | else | ||
720 | phy->duplex |= DUPLEX_HALF; | ||
721 | |||
722 | return 0; | ||
723 | } | ||
724 | |||
725 | static int bcm5461_enable_fiber(struct mii_phy* phy, int autoneg) | ||
726 | { | ||
727 | /* select fiber mode, enable 1000 base-X registers */ | ||
728 | phy_write(phy, MII_NCONFIG, 0xfc0b); | ||
729 | |||
730 | if (autoneg) { | ||
731 | /* enable fiber with no autonegotiation */ | ||
732 | phy_write(phy, MII_ADVERTISE, 0x01e0); | ||
733 | phy_write(phy, MII_BMCR, 0x1140); | ||
734 | } else { | ||
735 | /* enable fiber with autonegotiation */ | ||
736 | phy_write(phy, MII_BMCR, 0x0140); | ||
737 | } | ||
738 | |||
739 | phy->autoneg = autoneg; | ||
740 | |||
741 | return 0; | ||
742 | } | ||
743 | |||
744 | static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise) | ||
745 | { | ||
746 | u16 ctl, adv; | ||
747 | |||
748 | phy->autoneg = 1; | ||
749 | phy->speed = SPEED_10; | ||
750 | phy->duplex = DUPLEX_HALF; | ||
751 | phy->pause = 0; | ||
752 | phy->advertising = advertise; | ||
753 | |||
754 | /* Setup standard advertise */ | ||
755 | adv = phy_read(phy, MII_ADVERTISE); | ||
756 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); | ||
757 | if (advertise & ADVERTISED_10baseT_Half) | ||
758 | adv |= ADVERTISE_10HALF; | ||
759 | if (advertise & ADVERTISED_10baseT_Full) | ||
760 | adv |= ADVERTISE_10FULL; | ||
761 | if (advertise & ADVERTISED_100baseT_Half) | ||
762 | adv |= ADVERTISE_100HALF; | ||
763 | if (advertise & ADVERTISED_100baseT_Full) | ||
764 | adv |= ADVERTISE_100FULL; | ||
765 | if (advertise & ADVERTISED_Pause) | ||
766 | adv |= ADVERTISE_PAUSE_CAP; | ||
767 | if (advertise & ADVERTISED_Asym_Pause) | ||
768 | adv |= ADVERTISE_PAUSE_ASYM; | ||
769 | phy_write(phy, MII_ADVERTISE, adv); | ||
770 | |||
771 | /* Setup 1000BT advertise & enable crossover detect | ||
772 | * XXX How do we advertise 1000BT ? Darwin source is | ||
773 | * confusing here, they read from specific control and | ||
774 | * write to control... Someone has specs for those | ||
775 | * beasts ? | ||
776 | */ | ||
777 | adv = phy_read(phy, MII_M1011_PHY_SPEC_CONTROL); | ||
778 | adv |= MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX; | ||
779 | adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP | | ||
780 | MII_1000BASETCONTROL_HALFDUPLEXCAP); | ||
781 | if (advertise & SUPPORTED_1000baseT_Half) | ||
782 | adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP; | ||
783 | if (advertise & SUPPORTED_1000baseT_Full) | ||
784 | adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP; | ||
785 | phy_write(phy, MII_1000BASETCONTROL, adv); | ||
786 | |||
787 | /* Start/Restart aneg */ | ||
788 | ctl = phy_read(phy, MII_BMCR); | ||
789 | ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); | ||
790 | phy_write(phy, MII_BMCR, ctl); | ||
791 | |||
792 | return 0; | ||
793 | } | ||
794 | |||
795 | static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd) | ||
796 | { | ||
797 | u16 ctl, ctl2; | ||
798 | |||
799 | phy->autoneg = 0; | ||
800 | phy->speed = speed; | ||
801 | phy->duplex = fd; | ||
802 | phy->pause = 0; | ||
803 | |||
804 | ctl = phy_read(phy, MII_BMCR); | ||
805 | ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE); | ||
806 | ctl |= BMCR_RESET; | ||
807 | |||
808 | /* Select speed & duplex */ | ||
809 | switch(speed) { | ||
810 | case SPEED_10: | ||
811 | break; | ||
812 | case SPEED_100: | ||
813 | ctl |= BMCR_SPEED100; | ||
814 | break; | ||
815 | /* I'm not sure about the one below, again, Darwin source is | ||
816 | * quite confusing and I lack chip specs | ||
817 | */ | ||
818 | case SPEED_1000: | ||
819 | ctl |= BMCR_SPD2; | ||
820 | } | ||
821 | if (fd == DUPLEX_FULL) | ||
822 | ctl |= BMCR_FULLDPLX; | ||
823 | |||
824 | /* Disable crossover. Again, the way Apple does it is strange, | ||
825 | * though I don't assume they are wrong ;) | ||
826 | */ | ||
827 | ctl2 = phy_read(phy, MII_M1011_PHY_SPEC_CONTROL); | ||
828 | ctl2 &= ~(MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX | | ||
829 | MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX | | ||
830 | MII_1000BASETCONTROL_FULLDUPLEXCAP | | ||
831 | MII_1000BASETCONTROL_HALFDUPLEXCAP); | ||
832 | if (speed == SPEED_1000) | ||
833 | ctl2 |= (fd == DUPLEX_FULL) ? | ||
834 | MII_1000BASETCONTROL_FULLDUPLEXCAP : | ||
835 | MII_1000BASETCONTROL_HALFDUPLEXCAP; | ||
836 | phy_write(phy, MII_1000BASETCONTROL, ctl2); | ||
837 | |||
838 | // XXX Should we set the sungem to GII now on 1000BT ? | ||
839 | |||
840 | phy_write(phy, MII_BMCR, ctl); | ||
841 | |||
842 | return 0; | ||
843 | } | ||
844 | |||
845 | static int marvell_read_link(struct mii_phy *phy) | ||
846 | { | ||
847 | u16 status, pmask; | ||
848 | |||
849 | if (phy->autoneg) { | ||
850 | status = phy_read(phy, MII_M1011_PHY_SPEC_STATUS); | ||
851 | if ((status & MII_M1011_PHY_SPEC_STATUS_RESOLVED) == 0) | ||
852 | return -EAGAIN; | ||
853 | if (status & MII_M1011_PHY_SPEC_STATUS_1000) | ||
854 | phy->speed = SPEED_1000; | ||
855 | else if (status & MII_M1011_PHY_SPEC_STATUS_100) | ||
856 | phy->speed = SPEED_100; | ||
857 | else | ||
858 | phy->speed = SPEED_10; | ||
859 | if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX) | ||
860 | phy->duplex = DUPLEX_FULL; | ||
861 | else | ||
862 | phy->duplex = DUPLEX_HALF; | ||
863 | pmask = MII_M1011_PHY_SPEC_STATUS_TX_PAUSE | | ||
864 | MII_M1011_PHY_SPEC_STATUS_RX_PAUSE; | ||
865 | phy->pause = (status & pmask) == pmask; | ||
866 | } | ||
867 | /* On non-aneg, we assume what we put in BMCR is the speed, | ||
868 | * though magic-aneg shouldn't prevent this case from occurring | ||
869 | */ | ||
870 | |||
871 | return 0; | ||
872 | } | ||
873 | |||
874 | #define MII_BASIC_FEATURES \ | ||
875 | (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ | ||
876 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ | ||
877 | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII | \ | ||
878 | SUPPORTED_Pause) | ||
879 | |||
880 | /* On gigabit capable PHYs, we advertise Pause support but not asym pause | ||
881 | * support for now as I'm not sure it's supported and Darwin doesn't do | ||
882 | * it neither. --BenH. | ||
883 | */ | ||
884 | #define MII_GBIT_FEATURES \ | ||
885 | (MII_BASIC_FEATURES | \ | ||
886 | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full) | ||
887 | |||
888 | /* Broadcom BCM 5201 */ | ||
889 | static struct mii_phy_ops bcm5201_phy_ops = { | ||
890 | .init = bcm5201_init, | ||
891 | .suspend = bcm5201_suspend, | ||
892 | .setup_aneg = genmii_setup_aneg, | ||
893 | .setup_forced = genmii_setup_forced, | ||
894 | .poll_link = genmii_poll_link, | ||
895 | .read_link = genmii_read_link, | ||
896 | }; | ||
897 | |||
898 | static struct mii_phy_def bcm5201_phy_def = { | ||
899 | .phy_id = 0x00406210, | ||
900 | .phy_id_mask = 0xfffffff0, | ||
901 | .name = "BCM5201", | ||
902 | .features = MII_BASIC_FEATURES, | ||
903 | .magic_aneg = 1, | ||
904 | .ops = &bcm5201_phy_ops | ||
905 | }; | ||
906 | |||
907 | /* Broadcom BCM 5221 */ | ||
908 | static struct mii_phy_ops bcm5221_phy_ops = { | ||
909 | .suspend = bcm5221_suspend, | ||
910 | .init = bcm5221_init, | ||
911 | .setup_aneg = genmii_setup_aneg, | ||
912 | .setup_forced = genmii_setup_forced, | ||
913 | .poll_link = genmii_poll_link, | ||
914 | .read_link = genmii_read_link, | ||
915 | }; | ||
916 | |||
917 | static struct mii_phy_def bcm5221_phy_def = { | ||
918 | .phy_id = 0x004061e0, | ||
919 | .phy_id_mask = 0xfffffff0, | ||
920 | .name = "BCM5221", | ||
921 | .features = MII_BASIC_FEATURES, | ||
922 | .magic_aneg = 1, | ||
923 | .ops = &bcm5221_phy_ops | ||
924 | }; | ||
925 | |||
926 | /* Broadcom BCM 5241 */ | ||
927 | static struct mii_phy_ops bcm5241_phy_ops = { | ||
928 | .suspend = bcm5241_suspend, | ||
929 | .init = bcm5241_init, | ||
930 | .setup_aneg = genmii_setup_aneg, | ||
931 | .setup_forced = genmii_setup_forced, | ||
932 | .poll_link = genmii_poll_link, | ||
933 | .read_link = genmii_read_link, | ||
934 | }; | ||
935 | static struct mii_phy_def bcm5241_phy_def = { | ||
936 | .phy_id = 0x0143bc30, | ||
937 | .phy_id_mask = 0xfffffff0, | ||
938 | .name = "BCM5241", | ||
939 | .features = MII_BASIC_FEATURES, | ||
940 | .magic_aneg = 1, | ||
941 | .ops = &bcm5241_phy_ops | ||
942 | }; | ||
943 | |||
944 | /* Broadcom BCM 5400 */ | ||
945 | static struct mii_phy_ops bcm5400_phy_ops = { | ||
946 | .init = bcm5400_init, | ||
947 | .suspend = bcm5400_suspend, | ||
948 | .setup_aneg = bcm54xx_setup_aneg, | ||
949 | .setup_forced = bcm54xx_setup_forced, | ||
950 | .poll_link = genmii_poll_link, | ||
951 | .read_link = bcm54xx_read_link, | ||
952 | }; | ||
953 | |||
954 | static struct mii_phy_def bcm5400_phy_def = { | ||
955 | .phy_id = 0x00206040, | ||
956 | .phy_id_mask = 0xfffffff0, | ||
957 | .name = "BCM5400", | ||
958 | .features = MII_GBIT_FEATURES, | ||
959 | .magic_aneg = 1, | ||
960 | .ops = &bcm5400_phy_ops | ||
961 | }; | ||
962 | |||
963 | /* Broadcom BCM 5401 */ | ||
964 | static struct mii_phy_ops bcm5401_phy_ops = { | ||
965 | .init = bcm5401_init, | ||
966 | .suspend = bcm5401_suspend, | ||
967 | .setup_aneg = bcm54xx_setup_aneg, | ||
968 | .setup_forced = bcm54xx_setup_forced, | ||
969 | .poll_link = genmii_poll_link, | ||
970 | .read_link = bcm54xx_read_link, | ||
971 | }; | ||
972 | |||
973 | static struct mii_phy_def bcm5401_phy_def = { | ||
974 | .phy_id = 0x00206050, | ||
975 | .phy_id_mask = 0xfffffff0, | ||
976 | .name = "BCM5401", | ||
977 | .features = MII_GBIT_FEATURES, | ||
978 | .magic_aneg = 1, | ||
979 | .ops = &bcm5401_phy_ops | ||
980 | }; | ||
981 | |||
982 | /* Broadcom BCM 5411 */ | ||
983 | static struct mii_phy_ops bcm5411_phy_ops = { | ||
984 | .init = bcm5411_init, | ||
985 | .suspend = generic_suspend, | ||
986 | .setup_aneg = bcm54xx_setup_aneg, | ||
987 | .setup_forced = bcm54xx_setup_forced, | ||
988 | .poll_link = genmii_poll_link, | ||
989 | .read_link = bcm54xx_read_link, | ||
990 | }; | ||
991 | |||
992 | static struct mii_phy_def bcm5411_phy_def = { | ||
993 | .phy_id = 0x00206070, | ||
994 | .phy_id_mask = 0xfffffff0, | ||
995 | .name = "BCM5411", | ||
996 | .features = MII_GBIT_FEATURES, | ||
997 | .magic_aneg = 1, | ||
998 | .ops = &bcm5411_phy_ops | ||
999 | }; | ||
1000 | |||
1001 | /* Broadcom BCM 5421 */ | ||
1002 | static struct mii_phy_ops bcm5421_phy_ops = { | ||
1003 | .init = bcm5421_init, | ||
1004 | .suspend = generic_suspend, | ||
1005 | .setup_aneg = bcm54xx_setup_aneg, | ||
1006 | .setup_forced = bcm54xx_setup_forced, | ||
1007 | .poll_link = bcm5421_poll_link, | ||
1008 | .read_link = bcm5421_read_link, | ||
1009 | .enable_fiber = bcm5421_enable_fiber, | ||
1010 | }; | ||
1011 | |||
1012 | static struct mii_phy_def bcm5421_phy_def = { | ||
1013 | .phy_id = 0x002060e0, | ||
1014 | .phy_id_mask = 0xfffffff0, | ||
1015 | .name = "BCM5421", | ||
1016 | .features = MII_GBIT_FEATURES, | ||
1017 | .magic_aneg = 1, | ||
1018 | .ops = &bcm5421_phy_ops | ||
1019 | }; | ||
1020 | |||
1021 | /* Broadcom BCM 5421 built-in K2 */ | ||
1022 | static struct mii_phy_ops bcm5421k2_phy_ops = { | ||
1023 | .init = bcm5421_init, | ||
1024 | .suspend = generic_suspend, | ||
1025 | .setup_aneg = bcm54xx_setup_aneg, | ||
1026 | .setup_forced = bcm54xx_setup_forced, | ||
1027 | .poll_link = genmii_poll_link, | ||
1028 | .read_link = bcm54xx_read_link, | ||
1029 | }; | ||
1030 | |||
1031 | static struct mii_phy_def bcm5421k2_phy_def = { | ||
1032 | .phy_id = 0x002062e0, | ||
1033 | .phy_id_mask = 0xfffffff0, | ||
1034 | .name = "BCM5421-K2", | ||
1035 | .features = MII_GBIT_FEATURES, | ||
1036 | .magic_aneg = 1, | ||
1037 | .ops = &bcm5421k2_phy_ops | ||
1038 | }; | ||
1039 | |||
1040 | static struct mii_phy_ops bcm5461_phy_ops = { | ||
1041 | .init = bcm5421_init, | ||
1042 | .suspend = generic_suspend, | ||
1043 | .setup_aneg = bcm54xx_setup_aneg, | ||
1044 | .setup_forced = bcm54xx_setup_forced, | ||
1045 | .poll_link = bcm5461_poll_link, | ||
1046 | .read_link = bcm5461_read_link, | ||
1047 | .enable_fiber = bcm5461_enable_fiber, | ||
1048 | }; | ||
1049 | |||
1050 | static struct mii_phy_def bcm5461_phy_def = { | ||
1051 | .phy_id = 0x002060c0, | ||
1052 | .phy_id_mask = 0xfffffff0, | ||
1053 | .name = "BCM5461", | ||
1054 | .features = MII_GBIT_FEATURES, | ||
1055 | .magic_aneg = 1, | ||
1056 | .ops = &bcm5461_phy_ops | ||
1057 | }; | ||
1058 | |||
1059 | /* Broadcom BCM 5462 built-in Vesta */ | ||
1060 | static struct mii_phy_ops bcm5462V_phy_ops = { | ||
1061 | .init = bcm5421_init, | ||
1062 | .suspend = generic_suspend, | ||
1063 | .setup_aneg = bcm54xx_setup_aneg, | ||
1064 | .setup_forced = bcm54xx_setup_forced, | ||
1065 | .poll_link = genmii_poll_link, | ||
1066 | .read_link = bcm54xx_read_link, | ||
1067 | }; | ||
1068 | |||
1069 | static struct mii_phy_def bcm5462V_phy_def = { | ||
1070 | .phy_id = 0x002060d0, | ||
1071 | .phy_id_mask = 0xfffffff0, | ||
1072 | .name = "BCM5462-Vesta", | ||
1073 | .features = MII_GBIT_FEATURES, | ||
1074 | .magic_aneg = 1, | ||
1075 | .ops = &bcm5462V_phy_ops | ||
1076 | }; | ||
1077 | |||
1078 | /* Marvell 88E1101 amd 88E1111 */ | ||
1079 | static struct mii_phy_ops marvell88e1101_phy_ops = { | ||
1080 | .suspend = generic_suspend, | ||
1081 | .setup_aneg = marvell_setup_aneg, | ||
1082 | .setup_forced = marvell_setup_forced, | ||
1083 | .poll_link = genmii_poll_link, | ||
1084 | .read_link = marvell_read_link | ||
1085 | }; | ||
1086 | |||
1087 | static struct mii_phy_ops marvell88e1111_phy_ops = { | ||
1088 | .init = marvell88e1111_init, | ||
1089 | .suspend = generic_suspend, | ||
1090 | .setup_aneg = marvell_setup_aneg, | ||
1091 | .setup_forced = marvell_setup_forced, | ||
1092 | .poll_link = genmii_poll_link, | ||
1093 | .read_link = marvell_read_link | ||
1094 | }; | ||
1095 | |||
1096 | /* two revs in darwin for the 88e1101 ... I could use a datasheet | ||
1097 | * to get the proper names... | ||
1098 | */ | ||
1099 | static struct mii_phy_def marvell88e1101v1_phy_def = { | ||
1100 | .phy_id = 0x01410c20, | ||
1101 | .phy_id_mask = 0xfffffff0, | ||
1102 | .name = "Marvell 88E1101v1", | ||
1103 | .features = MII_GBIT_FEATURES, | ||
1104 | .magic_aneg = 1, | ||
1105 | .ops = &marvell88e1101_phy_ops | ||
1106 | }; | ||
1107 | static struct mii_phy_def marvell88e1101v2_phy_def = { | ||
1108 | .phy_id = 0x01410c60, | ||
1109 | .phy_id_mask = 0xfffffff0, | ||
1110 | .name = "Marvell 88E1101v2", | ||
1111 | .features = MII_GBIT_FEATURES, | ||
1112 | .magic_aneg = 1, | ||
1113 | .ops = &marvell88e1101_phy_ops | ||
1114 | }; | ||
1115 | static struct mii_phy_def marvell88e1111_phy_def = { | ||
1116 | .phy_id = 0x01410cc0, | ||
1117 | .phy_id_mask = 0xfffffff0, | ||
1118 | .name = "Marvell 88E1111", | ||
1119 | .features = MII_GBIT_FEATURES, | ||
1120 | .magic_aneg = 1, | ||
1121 | .ops = &marvell88e1111_phy_ops | ||
1122 | }; | ||
1123 | |||
1124 | /* Generic implementation for most 10/100 PHYs */ | ||
1125 | static struct mii_phy_ops generic_phy_ops = { | ||
1126 | .setup_aneg = genmii_setup_aneg, | ||
1127 | .setup_forced = genmii_setup_forced, | ||
1128 | .poll_link = genmii_poll_link, | ||
1129 | .read_link = genmii_read_link | ||
1130 | }; | ||
1131 | |||
1132 | static struct mii_phy_def genmii_phy_def = { | ||
1133 | .phy_id = 0x00000000, | ||
1134 | .phy_id_mask = 0x00000000, | ||
1135 | .name = "Generic MII", | ||
1136 | .features = MII_BASIC_FEATURES, | ||
1137 | .magic_aneg = 0, | ||
1138 | .ops = &generic_phy_ops | ||
1139 | }; | ||
1140 | |||
1141 | static struct mii_phy_def* mii_phy_table[] = { | ||
1142 | &bcm5201_phy_def, | ||
1143 | &bcm5221_phy_def, | ||
1144 | &bcm5241_phy_def, | ||
1145 | &bcm5400_phy_def, | ||
1146 | &bcm5401_phy_def, | ||
1147 | &bcm5411_phy_def, | ||
1148 | &bcm5421_phy_def, | ||
1149 | &bcm5421k2_phy_def, | ||
1150 | &bcm5461_phy_def, | ||
1151 | &bcm5462V_phy_def, | ||
1152 | &marvell88e1101v1_phy_def, | ||
1153 | &marvell88e1101v2_phy_def, | ||
1154 | &marvell88e1111_phy_def, | ||
1155 | &genmii_phy_def, | ||
1156 | NULL | ||
1157 | }; | ||
1158 | |||
1159 | int mii_phy_probe(struct mii_phy *phy, int mii_id) | ||
1160 | { | ||
1161 | int rc; | ||
1162 | u32 id; | ||
1163 | struct mii_phy_def* def; | ||
1164 | int i; | ||
1165 | |||
1166 | /* We do not reset the mii_phy structure as the driver | ||
1167 | * may re-probe the PHY regulary | ||
1168 | */ | ||
1169 | phy->mii_id = mii_id; | ||
1170 | |||
1171 | /* Take PHY out of isloate mode and reset it. */ | ||
1172 | rc = reset_one_mii_phy(phy, mii_id); | ||
1173 | if (rc) | ||
1174 | goto fail; | ||
1175 | |||
1176 | /* Read ID and find matching entry */ | ||
1177 | id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2)); | ||
1178 | printk(KERN_DEBUG KBUILD_MODNAME ": " "PHY ID: %x, addr: %x\n", | ||
1179 | id, mii_id); | ||
1180 | for (i=0; (def = mii_phy_table[i]) != NULL; i++) | ||
1181 | if ((id & def->phy_id_mask) == def->phy_id) | ||
1182 | break; | ||
1183 | /* Should never be NULL (we have a generic entry), but... */ | ||
1184 | if (def == NULL) | ||
1185 | goto fail; | ||
1186 | |||
1187 | phy->def = def; | ||
1188 | |||
1189 | return 0; | ||
1190 | fail: | ||
1191 | phy->speed = 0; | ||
1192 | phy->duplex = 0; | ||
1193 | phy->pause = 0; | ||
1194 | phy->advertising = 0; | ||
1195 | return -ENODEV; | ||
1196 | } | ||
1197 | |||
1198 | EXPORT_SYMBOL(mii_phy_probe); | ||
1199 | MODULE_LICENSE("GPL"); | ||
1200 | |||
diff --git a/drivers/net/ethernet/sun/sungem_phy.h b/drivers/net/ethernet/sun/sungem_phy.h new file mode 100644 index 000000000000..af02f9479cbb --- /dev/null +++ b/drivers/net/ethernet/sun/sungem_phy.h | |||
@@ -0,0 +1,132 @@ | |||
1 | #ifndef __SUNGEM_PHY_H__ | ||
2 | #define __SUNGEM_PHY_H__ | ||
3 | |||
4 | struct mii_phy; | ||
5 | |||
6 | /* Operations supported by any kind of PHY */ | ||
7 | struct mii_phy_ops | ||
8 | { | ||
9 | int (*init)(struct mii_phy *phy); | ||
10 | int (*suspend)(struct mii_phy *phy); | ||
11 | int (*setup_aneg)(struct mii_phy *phy, u32 advertise); | ||
12 | int (*setup_forced)(struct mii_phy *phy, int speed, int fd); | ||
13 | int (*poll_link)(struct mii_phy *phy); | ||
14 | int (*read_link)(struct mii_phy *phy); | ||
15 | int (*enable_fiber)(struct mii_phy *phy, int autoneg); | ||
16 | }; | ||
17 | |||
18 | /* Structure used to statically define an mii/gii based PHY */ | ||
19 | struct mii_phy_def | ||
20 | { | ||
21 | u32 phy_id; /* Concatenated ID1 << 16 | ID2 */ | ||
22 | u32 phy_id_mask; /* Significant bits */ | ||
23 | u32 features; /* Ethtool SUPPORTED_* defines */ | ||
24 | int magic_aneg; /* Autoneg does all speed test for us */ | ||
25 | const char* name; | ||
26 | const struct mii_phy_ops* ops; | ||
27 | }; | ||
28 | |||
29 | enum { | ||
30 | BCM54XX_COPPER, | ||
31 | BCM54XX_FIBER, | ||
32 | BCM54XX_GBIC, | ||
33 | BCM54XX_SGMII, | ||
34 | BCM54XX_UNKNOWN, | ||
35 | }; | ||
36 | |||
37 | /* An instance of a PHY, partially borrowed from mii_if_info */ | ||
38 | struct mii_phy | ||
39 | { | ||
40 | struct mii_phy_def* def; | ||
41 | u32 advertising; | ||
42 | int mii_id; | ||
43 | |||
44 | /* 1: autoneg enabled, 0: disabled */ | ||
45 | int autoneg; | ||
46 | |||
47 | /* forced speed & duplex (no autoneg) | ||
48 | * partner speed & duplex & pause (autoneg) | ||
49 | */ | ||
50 | int speed; | ||
51 | int duplex; | ||
52 | int pause; | ||
53 | |||
54 | /* Provided by host chip */ | ||
55 | struct net_device *dev; | ||
56 | int (*mdio_read) (struct net_device *dev, int mii_id, int reg); | ||
57 | void (*mdio_write) (struct net_device *dev, int mii_id, int reg, int val); | ||
58 | void *platform_data; | ||
59 | }; | ||
60 | |||
61 | /* Pass in a struct mii_phy with dev, mdio_read and mdio_write | ||
62 | * filled, the remaining fields will be filled on return | ||
63 | */ | ||
64 | extern int mii_phy_probe(struct mii_phy *phy, int mii_id); | ||
65 | |||
66 | |||
67 | /* MII definitions missing from mii.h */ | ||
68 | |||
69 | #define BMCR_SPD2 0x0040 /* Gigabit enable (bcm54xx) */ | ||
70 | #define LPA_PAUSE 0x0400 | ||
71 | |||
72 | /* More PHY registers (model specific) */ | ||
73 | |||
74 | /* MII BCM5201 MULTIPHY interrupt register */ | ||
75 | #define MII_BCM5201_INTERRUPT 0x1A | ||
76 | #define MII_BCM5201_INTERRUPT_INTENABLE 0x4000 | ||
77 | |||
78 | #define MII_BCM5201_AUXMODE2 0x1B | ||
79 | #define MII_BCM5201_AUXMODE2_LOWPOWER 0x0008 | ||
80 | |||
81 | #define MII_BCM5201_MULTIPHY 0x1E | ||
82 | |||
83 | /* MII BCM5201 MULTIPHY register bits */ | ||
84 | #define MII_BCM5201_MULTIPHY_SERIALMODE 0x0002 | ||
85 | #define MII_BCM5201_MULTIPHY_SUPERISOLATE 0x0008 | ||
86 | |||
87 | /* MII BCM5221 Additional registers */ | ||
88 | #define MII_BCM5221_TEST 0x1f | ||
89 | #define MII_BCM5221_TEST_ENABLE_SHADOWS 0x0080 | ||
90 | #define MII_BCM5221_SHDOW_AUX_STAT2 0x1b | ||
91 | #define MII_BCM5221_SHDOW_AUX_STAT2_APD 0x0020 | ||
92 | #define MII_BCM5221_SHDOW_AUX_MODE4 0x1a | ||
93 | #define MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE 0x0001 | ||
94 | #define MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR 0x0004 | ||
95 | |||
96 | /* MII BCM5241 Additional registers */ | ||
97 | #define MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR 0x0008 | ||
98 | |||
99 | /* MII BCM5400 1000-BASET Control register */ | ||
100 | #define MII_BCM5400_GB_CONTROL 0x09 | ||
101 | #define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP 0x0200 | ||
102 | |||
103 | /* MII BCM5400 AUXCONTROL register */ | ||
104 | #define MII_BCM5400_AUXCONTROL 0x18 | ||
105 | #define MII_BCM5400_AUXCONTROL_PWR10BASET 0x0004 | ||
106 | |||
107 | /* MII BCM5400 AUXSTATUS register */ | ||
108 | #define MII_BCM5400_AUXSTATUS 0x19 | ||
109 | #define MII_BCM5400_AUXSTATUS_LINKMODE_MASK 0x0700 | ||
110 | #define MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT 8 | ||
111 | |||
112 | /* 1000BT control (Marvell & BCM54xx at least) */ | ||
113 | #define MII_1000BASETCONTROL 0x09 | ||
114 | #define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200 | ||
115 | #define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100 | ||
116 | |||
117 | /* Marvell 88E1011 PHY control */ | ||
118 | #define MII_M1011_PHY_SPEC_CONTROL 0x10 | ||
119 | #define MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX 0x20 | ||
120 | #define MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX 0x40 | ||
121 | |||
122 | /* Marvell 88E1011 PHY status */ | ||
123 | #define MII_M1011_PHY_SPEC_STATUS 0x11 | ||
124 | #define MII_M1011_PHY_SPEC_STATUS_1000 0x8000 | ||
125 | #define MII_M1011_PHY_SPEC_STATUS_100 0x4000 | ||
126 | #define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000 | ||
127 | #define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000 | ||
128 | #define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800 | ||
129 | #define MII_M1011_PHY_SPEC_STATUS_TX_PAUSE 0x0008 | ||
130 | #define MII_M1011_PHY_SPEC_STATUS_RX_PAUSE 0x0004 | ||
131 | |||
132 | #endif /* __SUNGEM_PHY_H__ */ | ||
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c new file mode 100644 index 000000000000..856e05b9fba3 --- /dev/null +++ b/drivers/net/ethernet/sun/sunhme.c | |||
@@ -0,0 +1,3360 @@ | |||
1 | /* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching, | ||
2 | * auto carrier detecting ethernet driver. Also known as the | ||
3 | * "Happy Meal Ethernet" found on SunSwift SBUS cards. | ||
4 | * | ||
5 | * Copyright (C) 1996, 1998, 1999, 2002, 2003, | ||
6 | * 2006, 2008 David S. Miller (davem@davemloft.net) | ||
7 | * | ||
8 | * Changes : | ||
9 | * 2000/11/11 Willy Tarreau <willy AT meta-x.org> | ||
10 | * - port to non-sparc architectures. Tested only on x86 and | ||
11 | * only currently works with QFE PCI cards. | ||
12 | * - ability to specify the MAC address at module load time by passing this | ||
13 | * argument : macaddr=0x00,0x10,0x20,0x30,0x40,0x50 | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <linux/fcntl.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/ioport.h> | ||
22 | #include <linux/in.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/string.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/ethtool.h> | ||
28 | #include <linux/mii.h> | ||
29 | #include <linux/crc32.h> | ||
30 | #include <linux/random.h> | ||
31 | #include <linux/errno.h> | ||
32 | #include <linux/netdevice.h> | ||
33 | #include <linux/etherdevice.h> | ||
34 | #include <linux/skbuff.h> | ||
35 | #include <linux/mm.h> | ||
36 | #include <linux/bitops.h> | ||
37 | #include <linux/dma-mapping.h> | ||
38 | |||
39 | #include <asm/system.h> | ||
40 | #include <asm/io.h> | ||
41 | #include <asm/dma.h> | ||
42 | #include <asm/byteorder.h> | ||
43 | |||
44 | #ifdef CONFIG_SPARC | ||
45 | #include <linux/of.h> | ||
46 | #include <linux/of_device.h> | ||
47 | #include <asm/idprom.h> | ||
48 | #include <asm/openprom.h> | ||
49 | #include <asm/oplib.h> | ||
50 | #include <asm/prom.h> | ||
51 | #include <asm/auxio.h> | ||
52 | #endif | ||
53 | #include <asm/uaccess.h> | ||
54 | |||
55 | #include <asm/pgtable.h> | ||
56 | #include <asm/irq.h> | ||
57 | |||
58 | #ifdef CONFIG_PCI | ||
59 | #include <linux/pci.h> | ||
60 | #endif | ||
61 | |||
62 | #include "sunhme.h" | ||
63 | |||
64 | #define DRV_NAME "sunhme" | ||
65 | #define DRV_VERSION "3.10" | ||
66 | #define DRV_RELDATE "August 26, 2008" | ||
67 | #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" | ||
68 | |||
69 | static char version[] = | ||
70 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; | ||
71 | |||
72 | MODULE_VERSION(DRV_VERSION); | ||
73 | MODULE_AUTHOR(DRV_AUTHOR); | ||
74 | MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver"); | ||
75 | MODULE_LICENSE("GPL"); | ||
76 | |||
77 | static int macaddr[6]; | ||
78 | |||
79 | /* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */ | ||
80 | module_param_array(macaddr, int, NULL, 0); | ||
81 | MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set"); | ||
82 | |||
83 | #ifdef CONFIG_SBUS | ||
84 | static struct quattro *qfe_sbus_list; | ||
85 | #endif | ||
86 | |||
87 | #ifdef CONFIG_PCI | ||
88 | static struct quattro *qfe_pci_list; | ||
89 | #endif | ||
90 | |||
91 | #undef HMEDEBUG | ||
92 | #undef SXDEBUG | ||
93 | #undef RXDEBUG | ||
94 | #undef TXDEBUG | ||
95 | #undef TXLOGGING | ||
96 | |||
97 | #ifdef TXLOGGING | ||
98 | struct hme_tx_logent { | ||
99 | unsigned int tstamp; | ||
100 | int tx_new, tx_old; | ||
101 | unsigned int action; | ||
102 | #define TXLOG_ACTION_IRQ 0x01 | ||
103 | #define TXLOG_ACTION_TXMIT 0x02 | ||
104 | #define TXLOG_ACTION_TBUSY 0x04 | ||
105 | #define TXLOG_ACTION_NBUFS 0x08 | ||
106 | unsigned int status; | ||
107 | }; | ||
108 | #define TX_LOG_LEN 128 | ||
109 | static struct hme_tx_logent tx_log[TX_LOG_LEN]; | ||
110 | static int txlog_cur_entry; | ||
111 | static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s) | ||
112 | { | ||
113 | struct hme_tx_logent *tlp; | ||
114 | unsigned long flags; | ||
115 | |||
116 | local_irq_save(flags); | ||
117 | tlp = &tx_log[txlog_cur_entry]; | ||
118 | tlp->tstamp = (unsigned int)jiffies; | ||
119 | tlp->tx_new = hp->tx_new; | ||
120 | tlp->tx_old = hp->tx_old; | ||
121 | tlp->action = a; | ||
122 | tlp->status = s; | ||
123 | txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1); | ||
124 | local_irq_restore(flags); | ||
125 | } | ||
126 | static __inline__ void tx_dump_log(void) | ||
127 | { | ||
128 | int i, this; | ||
129 | |||
130 | this = txlog_cur_entry; | ||
131 | for (i = 0; i < TX_LOG_LEN; i++) { | ||
132 | printk("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i, | ||
133 | tx_log[this].tstamp, | ||
134 | tx_log[this].tx_new, tx_log[this].tx_old, | ||
135 | tx_log[this].action, tx_log[this].status); | ||
136 | this = (this + 1) & (TX_LOG_LEN - 1); | ||
137 | } | ||
138 | } | ||
139 | static __inline__ void tx_dump_ring(struct happy_meal *hp) | ||
140 | { | ||
141 | struct hmeal_init_block *hb = hp->happy_block; | ||
142 | struct happy_meal_txd *tp = &hb->happy_meal_txd[0]; | ||
143 | int i; | ||
144 | |||
145 | for (i = 0; i < TX_RING_SIZE; i+=4) { | ||
146 | printk("TXD[%d..%d]: [%08x:%08x] [%08x:%08x] [%08x:%08x] [%08x:%08x]\n", | ||
147 | i, i + 4, | ||
148 | le32_to_cpu(tp[i].tx_flags), le32_to_cpu(tp[i].tx_addr), | ||
149 | le32_to_cpu(tp[i + 1].tx_flags), le32_to_cpu(tp[i + 1].tx_addr), | ||
150 | le32_to_cpu(tp[i + 2].tx_flags), le32_to_cpu(tp[i + 2].tx_addr), | ||
151 | le32_to_cpu(tp[i + 3].tx_flags), le32_to_cpu(tp[i + 3].tx_addr)); | ||
152 | } | ||
153 | } | ||
154 | #else | ||
155 | #define tx_add_log(hp, a, s) do { } while(0) | ||
156 | #define tx_dump_log() do { } while(0) | ||
157 | #define tx_dump_ring(hp) do { } while(0) | ||
158 | #endif | ||
159 | |||
160 | #ifdef HMEDEBUG | ||
161 | #define HMD(x) printk x | ||
162 | #else | ||
163 | #define HMD(x) | ||
164 | #endif | ||
165 | |||
166 | /* #define AUTO_SWITCH_DEBUG */ | ||
167 | |||
168 | #ifdef AUTO_SWITCH_DEBUG | ||
169 | #define ASD(x) printk x | ||
170 | #else | ||
171 | #define ASD(x) | ||
172 | #endif | ||
173 | |||
174 | #define DEFAULT_IPG0 16 /* For lance-mode only */ | ||
175 | #define DEFAULT_IPG1 8 /* For all modes */ | ||
176 | #define DEFAULT_IPG2 4 /* For all modes */ | ||
177 | #define DEFAULT_JAMSIZE 4 /* Toe jam */ | ||
178 | |||
179 | /* NOTE: In the descriptor writes one _must_ write the address | ||
180 | * member _first_. The card must not be allowed to see | ||
181 | * the updated descriptor flags until the address is | ||
182 | * correct. I've added a write memory barrier between | ||
183 | * the two stores so that I can sleep well at night... -DaveM | ||
184 | */ | ||
185 | |||
186 | #if defined(CONFIG_SBUS) && defined(CONFIG_PCI) | ||
187 | static void sbus_hme_write32(void __iomem *reg, u32 val) | ||
188 | { | ||
189 | sbus_writel(val, reg); | ||
190 | } | ||
191 | |||
192 | static u32 sbus_hme_read32(void __iomem *reg) | ||
193 | { | ||
194 | return sbus_readl(reg); | ||
195 | } | ||
196 | |||
197 | static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr) | ||
198 | { | ||
199 | rxd->rx_addr = (__force hme32)addr; | ||
200 | wmb(); | ||
201 | rxd->rx_flags = (__force hme32)flags; | ||
202 | } | ||
203 | |||
204 | static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr) | ||
205 | { | ||
206 | txd->tx_addr = (__force hme32)addr; | ||
207 | wmb(); | ||
208 | txd->tx_flags = (__force hme32)flags; | ||
209 | } | ||
210 | |||
211 | static u32 sbus_hme_read_desc32(hme32 *p) | ||
212 | { | ||
213 | return (__force u32)*p; | ||
214 | } | ||
215 | |||
216 | static void pci_hme_write32(void __iomem *reg, u32 val) | ||
217 | { | ||
218 | writel(val, reg); | ||
219 | } | ||
220 | |||
221 | static u32 pci_hme_read32(void __iomem *reg) | ||
222 | { | ||
223 | return readl(reg); | ||
224 | } | ||
225 | |||
226 | static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr) | ||
227 | { | ||
228 | rxd->rx_addr = (__force hme32)cpu_to_le32(addr); | ||
229 | wmb(); | ||
230 | rxd->rx_flags = (__force hme32)cpu_to_le32(flags); | ||
231 | } | ||
232 | |||
233 | static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr) | ||
234 | { | ||
235 | txd->tx_addr = (__force hme32)cpu_to_le32(addr); | ||
236 | wmb(); | ||
237 | txd->tx_flags = (__force hme32)cpu_to_le32(flags); | ||
238 | } | ||
239 | |||
240 | static u32 pci_hme_read_desc32(hme32 *p) | ||
241 | { | ||
242 | return le32_to_cpup((__le32 *)p); | ||
243 | } | ||
244 | |||
245 | #define hme_write32(__hp, __reg, __val) \ | ||
246 | ((__hp)->write32((__reg), (__val))) | ||
247 | #define hme_read32(__hp, __reg) \ | ||
248 | ((__hp)->read32(__reg)) | ||
249 | #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ | ||
250 | ((__hp)->write_rxd((__rxd), (__flags), (__addr))) | ||
251 | #define hme_write_txd(__hp, __txd, __flags, __addr) \ | ||
252 | ((__hp)->write_txd((__txd), (__flags), (__addr))) | ||
253 | #define hme_read_desc32(__hp, __p) \ | ||
254 | ((__hp)->read_desc32(__p)) | ||
255 | #define hme_dma_map(__hp, __ptr, __size, __dir) \ | ||
256 | ((__hp)->dma_map((__hp)->dma_dev, (__ptr), (__size), (__dir))) | ||
257 | #define hme_dma_unmap(__hp, __addr, __size, __dir) \ | ||
258 | ((__hp)->dma_unmap((__hp)->dma_dev, (__addr), (__size), (__dir))) | ||
259 | #define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ | ||
260 | ((__hp)->dma_sync_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))) | ||
261 | #define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ | ||
262 | ((__hp)->dma_sync_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))) | ||
263 | #else | ||
264 | #ifdef CONFIG_SBUS | ||
265 | /* SBUS only compilation */ | ||
266 | #define hme_write32(__hp, __reg, __val) \ | ||
267 | sbus_writel((__val), (__reg)) | ||
268 | #define hme_read32(__hp, __reg) \ | ||
269 | sbus_readl(__reg) | ||
270 | #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ | ||
271 | do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \ | ||
272 | wmb(); \ | ||
273 | (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \ | ||
274 | } while(0) | ||
275 | #define hme_write_txd(__hp, __txd, __flags, __addr) \ | ||
276 | do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \ | ||
277 | wmb(); \ | ||
278 | (__txd)->tx_flags = (__force hme32)(u32)(__flags); \ | ||
279 | } while(0) | ||
280 | #define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p)) | ||
281 | #define hme_dma_map(__hp, __ptr, __size, __dir) \ | ||
282 | dma_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir)) | ||
283 | #define hme_dma_unmap(__hp, __addr, __size, __dir) \ | ||
284 | dma_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir)) | ||
285 | #define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ | ||
286 | dma_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)) | ||
287 | #define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ | ||
288 | dma_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)) | ||
289 | #else | ||
290 | /* PCI only compilation */ | ||
291 | #define hme_write32(__hp, __reg, __val) \ | ||
292 | writel((__val), (__reg)) | ||
293 | #define hme_read32(__hp, __reg) \ | ||
294 | readl(__reg) | ||
295 | #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ | ||
296 | do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \ | ||
297 | wmb(); \ | ||
298 | (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \ | ||
299 | } while(0) | ||
300 | #define hme_write_txd(__hp, __txd, __flags, __addr) \ | ||
301 | do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \ | ||
302 | wmb(); \ | ||
303 | (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \ | ||
304 | } while(0) | ||
305 | static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p) | ||
306 | { | ||
307 | return le32_to_cpup((__le32 *)p); | ||
308 | } | ||
309 | #define hme_dma_map(__hp, __ptr, __size, __dir) \ | ||
310 | pci_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir)) | ||
311 | #define hme_dma_unmap(__hp, __addr, __size, __dir) \ | ||
312 | pci_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir)) | ||
313 | #define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ | ||
314 | pci_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)) | ||
315 | #define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ | ||
316 | pci_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)) | ||
317 | #endif | ||
318 | #endif | ||
319 | |||
320 | |||
321 | /* Oh yes, the MIF BitBang is mighty fun to program. BitBucket is more like it. */ | ||
322 | static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit) | ||
323 | { | ||
324 | hme_write32(hp, tregs + TCVR_BBDATA, bit); | ||
325 | hme_write32(hp, tregs + TCVR_BBCLOCK, 0); | ||
326 | hme_write32(hp, tregs + TCVR_BBCLOCK, 1); | ||
327 | } | ||
328 | |||
329 | #if 0 | ||
330 | static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal) | ||
331 | { | ||
332 | u32 ret; | ||
333 | |||
334 | hme_write32(hp, tregs + TCVR_BBCLOCK, 0); | ||
335 | hme_write32(hp, tregs + TCVR_BBCLOCK, 1); | ||
336 | ret = hme_read32(hp, tregs + TCVR_CFG); | ||
337 | if (internal) | ||
338 | ret &= TCV_CFG_MDIO0; | ||
339 | else | ||
340 | ret &= TCV_CFG_MDIO1; | ||
341 | |||
342 | return ret; | ||
343 | } | ||
344 | #endif | ||
345 | |||
346 | static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal) | ||
347 | { | ||
348 | u32 retval; | ||
349 | |||
350 | hme_write32(hp, tregs + TCVR_BBCLOCK, 0); | ||
351 | udelay(1); | ||
352 | retval = hme_read32(hp, tregs + TCVR_CFG); | ||
353 | if (internal) | ||
354 | retval &= TCV_CFG_MDIO0; | ||
355 | else | ||
356 | retval &= TCV_CFG_MDIO1; | ||
357 | hme_write32(hp, tregs + TCVR_BBCLOCK, 1); | ||
358 | |||
359 | return retval; | ||
360 | } | ||
361 | |||
362 | #define TCVR_FAILURE 0x80000000 /* Impossible MIF read value */ | ||
363 | |||
364 | static int happy_meal_bb_read(struct happy_meal *hp, | ||
365 | void __iomem *tregs, int reg) | ||
366 | { | ||
367 | u32 tmp; | ||
368 | int retval = 0; | ||
369 | int i; | ||
370 | |||
371 | ASD(("happy_meal_bb_read: reg=%d ", reg)); | ||
372 | |||
373 | /* Enable the MIF BitBang outputs. */ | ||
374 | hme_write32(hp, tregs + TCVR_BBOENAB, 1); | ||
375 | |||
376 | /* Force BitBang into the idle state. */ | ||
377 | for (i = 0; i < 32; i++) | ||
378 | BB_PUT_BIT(hp, tregs, 1); | ||
379 | |||
380 | /* Give it the read sequence. */ | ||
381 | BB_PUT_BIT(hp, tregs, 0); | ||
382 | BB_PUT_BIT(hp, tregs, 1); | ||
383 | BB_PUT_BIT(hp, tregs, 1); | ||
384 | BB_PUT_BIT(hp, tregs, 0); | ||
385 | |||
386 | /* Give it the PHY address. */ | ||
387 | tmp = hp->paddr & 0xff; | ||
388 | for (i = 4; i >= 0; i--) | ||
389 | BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); | ||
390 | |||
391 | /* Tell it what register we want to read. */ | ||
392 | tmp = (reg & 0xff); | ||
393 | for (i = 4; i >= 0; i--) | ||
394 | BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); | ||
395 | |||
396 | /* Close down the MIF BitBang outputs. */ | ||
397 | hme_write32(hp, tregs + TCVR_BBOENAB, 0); | ||
398 | |||
399 | /* Now read in the value. */ | ||
400 | (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); | ||
401 | for (i = 15; i >= 0; i--) | ||
402 | retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); | ||
403 | (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); | ||
404 | (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); | ||
405 | (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); | ||
406 | ASD(("value=%x\n", retval)); | ||
407 | return retval; | ||
408 | } | ||
409 | |||
410 | static void happy_meal_bb_write(struct happy_meal *hp, | ||
411 | void __iomem *tregs, int reg, | ||
412 | unsigned short value) | ||
413 | { | ||
414 | u32 tmp; | ||
415 | int i; | ||
416 | |||
417 | ASD(("happy_meal_bb_write: reg=%d value=%x\n", reg, value)); | ||
418 | |||
419 | /* Enable the MIF BitBang outputs. */ | ||
420 | hme_write32(hp, tregs + TCVR_BBOENAB, 1); | ||
421 | |||
422 | /* Force BitBang into the idle state. */ | ||
423 | for (i = 0; i < 32; i++) | ||
424 | BB_PUT_BIT(hp, tregs, 1); | ||
425 | |||
426 | /* Give it write sequence. */ | ||
427 | BB_PUT_BIT(hp, tregs, 0); | ||
428 | BB_PUT_BIT(hp, tregs, 1); | ||
429 | BB_PUT_BIT(hp, tregs, 0); | ||
430 | BB_PUT_BIT(hp, tregs, 1); | ||
431 | |||
432 | /* Give it the PHY address. */ | ||
433 | tmp = (hp->paddr & 0xff); | ||
434 | for (i = 4; i >= 0; i--) | ||
435 | BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); | ||
436 | |||
437 | /* Tell it what register we will be writing. */ | ||
438 | tmp = (reg & 0xff); | ||
439 | for (i = 4; i >= 0; i--) | ||
440 | BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); | ||
441 | |||
442 | /* Tell it to become ready for the bits. */ | ||
443 | BB_PUT_BIT(hp, tregs, 1); | ||
444 | BB_PUT_BIT(hp, tregs, 0); | ||
445 | |||
446 | for (i = 15; i >= 0; i--) | ||
447 | BB_PUT_BIT(hp, tregs, ((value >> i) & 1)); | ||
448 | |||
449 | /* Close down the MIF BitBang outputs. */ | ||
450 | hme_write32(hp, tregs + TCVR_BBOENAB, 0); | ||
451 | } | ||
452 | |||
453 | #define TCVR_READ_TRIES 16 | ||
454 | |||
455 | static int happy_meal_tcvr_read(struct happy_meal *hp, | ||
456 | void __iomem *tregs, int reg) | ||
457 | { | ||
458 | int tries = TCVR_READ_TRIES; | ||
459 | int retval; | ||
460 | |||
461 | ASD(("happy_meal_tcvr_read: reg=0x%02x ", reg)); | ||
462 | if (hp->tcvr_type == none) { | ||
463 | ASD(("no transceiver, value=TCVR_FAILURE\n")); | ||
464 | return TCVR_FAILURE; | ||
465 | } | ||
466 | |||
467 | if (!(hp->happy_flags & HFLAG_FENABLE)) { | ||
468 | ASD(("doing bit bang\n")); | ||
469 | return happy_meal_bb_read(hp, tregs, reg); | ||
470 | } | ||
471 | |||
472 | hme_write32(hp, tregs + TCVR_FRAME, | ||
473 | (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18))); | ||
474 | while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries) | ||
475 | udelay(20); | ||
476 | if (!tries) { | ||
477 | printk(KERN_ERR "happy meal: Aieee, transceiver MIF read bolixed\n"); | ||
478 | return TCVR_FAILURE; | ||
479 | } | ||
480 | retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff; | ||
481 | ASD(("value=%04x\n", retval)); | ||
482 | return retval; | ||
483 | } | ||
484 | |||
485 | #define TCVR_WRITE_TRIES 16 | ||
486 | |||
487 | static void happy_meal_tcvr_write(struct happy_meal *hp, | ||
488 | void __iomem *tregs, int reg, | ||
489 | unsigned short value) | ||
490 | { | ||
491 | int tries = TCVR_WRITE_TRIES; | ||
492 | |||
493 | ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value)); | ||
494 | |||
495 | /* Welcome to Sun Microsystems, can I take your order please? */ | ||
496 | if (!(hp->happy_flags & HFLAG_FENABLE)) { | ||
497 | happy_meal_bb_write(hp, tregs, reg, value); | ||
498 | return; | ||
499 | } | ||
500 | |||
501 | /* Would you like fries with that? */ | ||
502 | hme_write32(hp, tregs + TCVR_FRAME, | ||
503 | (FRAME_WRITE | (hp->paddr << 23) | | ||
504 | ((reg & 0xff) << 18) | (value & 0xffff))); | ||
505 | while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries) | ||
506 | udelay(20); | ||
507 | |||
508 | /* Anything else? */ | ||
509 | if (!tries) | ||
510 | printk(KERN_ERR "happy meal: Aieee, transceiver MIF write bolixed\n"); | ||
511 | |||
512 | /* Fifty-two cents is your change, have a nice day. */ | ||
513 | } | ||
514 | |||
515 | /* Auto negotiation. The scheme is very simple. We have a timer routine | ||
516 | * that keeps watching the auto negotiation process as it progresses. | ||
517 | * The DP83840 is first told to start doing it's thing, we set up the time | ||
518 | * and place the timer state machine in it's initial state. | ||
519 | * | ||
520 | * Here the timer peeks at the DP83840 status registers at each click to see | ||
521 | * if the auto negotiation has completed, we assume here that the DP83840 PHY | ||
522 | * will time out at some point and just tell us what (didn't) happen. For | ||
523 | * complete coverage we only allow so many of the ticks at this level to run, | ||
524 | * when this has expired we print a warning message and try another strategy. | ||
525 | * This "other" strategy is to force the interface into various speed/duplex | ||
526 | * configurations and we stop when we see a link-up condition before the | ||
527 | * maximum number of "peek" ticks have occurred. | ||
528 | * | ||
529 | * Once a valid link status has been detected we configure the BigMAC and | ||
530 | * the rest of the Happy Meal to speak the most efficient protocol we could | ||
531 | * get a clean link for. The priority for link configurations, highest first | ||
532 | * is: | ||
533 | * 100 Base-T Full Duplex | ||
534 | * 100 Base-T Half Duplex | ||
535 | * 10 Base-T Full Duplex | ||
536 | * 10 Base-T Half Duplex | ||
537 | * | ||
538 | * We start a new timer now, after a successful auto negotiation status has | ||
539 | * been detected. This timer just waits for the link-up bit to get set in | ||
540 | * the BMCR of the DP83840. When this occurs we print a kernel log message | ||
541 | * describing the link type in use and the fact that it is up. | ||
542 | * | ||
543 | * If a fatal error of some sort is signalled and detected in the interrupt | ||
544 | * service routine, and the chip is reset, or the link is ifconfig'd down | ||
545 | * and then back up, this entire process repeats itself all over again. | ||
546 | */ | ||
547 | static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs) | ||
548 | { | ||
549 | hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); | ||
550 | |||
551 | /* Downgrade from full to half duplex. Only possible | ||
552 | * via ethtool. | ||
553 | */ | ||
554 | if (hp->sw_bmcr & BMCR_FULLDPLX) { | ||
555 | hp->sw_bmcr &= ~(BMCR_FULLDPLX); | ||
556 | happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); | ||
557 | return 0; | ||
558 | } | ||
559 | |||
560 | /* Downgrade from 100 to 10. */ | ||
561 | if (hp->sw_bmcr & BMCR_SPEED100) { | ||
562 | hp->sw_bmcr &= ~(BMCR_SPEED100); | ||
563 | happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); | ||
564 | return 0; | ||
565 | } | ||
566 | |||
567 | /* We've tried everything. */ | ||
568 | return -1; | ||
569 | } | ||
570 | |||
571 | static void display_link_mode(struct happy_meal *hp, void __iomem *tregs) | ||
572 | { | ||
573 | printk(KERN_INFO "%s: Link is up using ", hp->dev->name); | ||
574 | if (hp->tcvr_type == external) | ||
575 | printk("external "); | ||
576 | else | ||
577 | printk("internal "); | ||
578 | printk("transceiver at "); | ||
579 | hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA); | ||
580 | if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) { | ||
581 | if (hp->sw_lpa & LPA_100FULL) | ||
582 | printk("100Mb/s, Full Duplex.\n"); | ||
583 | else | ||
584 | printk("100Mb/s, Half Duplex.\n"); | ||
585 | } else { | ||
586 | if (hp->sw_lpa & LPA_10FULL) | ||
587 | printk("10Mb/s, Full Duplex.\n"); | ||
588 | else | ||
589 | printk("10Mb/s, Half Duplex.\n"); | ||
590 | } | ||
591 | } | ||
592 | |||
593 | static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs) | ||
594 | { | ||
595 | printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name); | ||
596 | if (hp->tcvr_type == external) | ||
597 | printk("external "); | ||
598 | else | ||
599 | printk("internal "); | ||
600 | printk("transceiver at "); | ||
601 | hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); | ||
602 | if (hp->sw_bmcr & BMCR_SPEED100) | ||
603 | printk("100Mb/s, "); | ||
604 | else | ||
605 | printk("10Mb/s, "); | ||
606 | if (hp->sw_bmcr & BMCR_FULLDPLX) | ||
607 | printk("Full Duplex.\n"); | ||
608 | else | ||
609 | printk("Half Duplex.\n"); | ||
610 | } | ||
611 | |||
612 | static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs) | ||
613 | { | ||
614 | int full; | ||
615 | |||
616 | /* All we care about is making sure the bigmac tx_cfg has a | ||
617 | * proper duplex setting. | ||
618 | */ | ||
619 | if (hp->timer_state == arbwait) { | ||
620 | hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA); | ||
621 | if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL))) | ||
622 | goto no_response; | ||
623 | if (hp->sw_lpa & LPA_100FULL) | ||
624 | full = 1; | ||
625 | else if (hp->sw_lpa & LPA_100HALF) | ||
626 | full = 0; | ||
627 | else if (hp->sw_lpa & LPA_10FULL) | ||
628 | full = 1; | ||
629 | else | ||
630 | full = 0; | ||
631 | } else { | ||
632 | /* Forcing a link mode. */ | ||
633 | hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); | ||
634 | if (hp->sw_bmcr & BMCR_FULLDPLX) | ||
635 | full = 1; | ||
636 | else | ||
637 | full = 0; | ||
638 | } | ||
639 | |||
640 | /* Before changing other bits in the tx_cfg register, and in | ||
641 | * general any of other the TX config registers too, you | ||
642 | * must: | ||
643 | * 1) Clear Enable | ||
644 | * 2) Poll with reads until that bit reads back as zero | ||
645 | * 3) Make TX configuration changes | ||
646 | * 4) Set Enable once more | ||
647 | */ | ||
648 | hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, | ||
649 | hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & | ||
650 | ~(BIGMAC_TXCFG_ENABLE)); | ||
651 | while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE) | ||
652 | barrier(); | ||
653 | if (full) { | ||
654 | hp->happy_flags |= HFLAG_FULL; | ||
655 | hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, | ||
656 | hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) | | ||
657 | BIGMAC_TXCFG_FULLDPLX); | ||
658 | } else { | ||
659 | hp->happy_flags &= ~(HFLAG_FULL); | ||
660 | hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, | ||
661 | hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & | ||
662 | ~(BIGMAC_TXCFG_FULLDPLX)); | ||
663 | } | ||
664 | hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, | ||
665 | hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) | | ||
666 | BIGMAC_TXCFG_ENABLE); | ||
667 | return 0; | ||
668 | no_response: | ||
669 | return 1; | ||
670 | } | ||
671 | |||
672 | static int happy_meal_init(struct happy_meal *hp); | ||
673 | |||
674 | static int is_lucent_phy(struct happy_meal *hp) | ||
675 | { | ||
676 | void __iomem *tregs = hp->tcvregs; | ||
677 | unsigned short mr2, mr3; | ||
678 | int ret = 0; | ||
679 | |||
680 | mr2 = happy_meal_tcvr_read(hp, tregs, 2); | ||
681 | mr3 = happy_meal_tcvr_read(hp, tregs, 3); | ||
682 | if ((mr2 & 0xffff) == 0x0180 && | ||
683 | ((mr3 & 0xffff) >> 10) == 0x1d) | ||
684 | ret = 1; | ||
685 | |||
686 | return ret; | ||
687 | } | ||
688 | |||
689 | static void happy_meal_timer(unsigned long data) | ||
690 | { | ||
691 | struct happy_meal *hp = (struct happy_meal *) data; | ||
692 | void __iomem *tregs = hp->tcvregs; | ||
693 | int restart_timer = 0; | ||
694 | |||
695 | spin_lock_irq(&hp->happy_lock); | ||
696 | |||
697 | hp->timer_ticks++; | ||
698 | switch(hp->timer_state) { | ||
699 | case arbwait: | ||
700 | /* Only allow for 5 ticks, thats 10 seconds and much too | ||
701 | * long to wait for arbitration to complete. | ||
702 | */ | ||
703 | if (hp->timer_ticks >= 10) { | ||
704 | /* Enter force mode. */ | ||
705 | do_force_mode: | ||
706 | hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); | ||
707 | printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful, trying force link mode\n", | ||
708 | hp->dev->name); | ||
709 | hp->sw_bmcr = BMCR_SPEED100; | ||
710 | happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); | ||
711 | |||
712 | if (!is_lucent_phy(hp)) { | ||
713 | /* OK, seems we need do disable the transceiver for the first | ||
714 | * tick to make sure we get an accurate link state at the | ||
715 | * second tick. | ||
716 | */ | ||
717 | hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG); | ||
718 | hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB); | ||
719 | happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig); | ||
720 | } | ||
721 | hp->timer_state = ltrywait; | ||
722 | hp->timer_ticks = 0; | ||
723 | restart_timer = 1; | ||
724 | } else { | ||
725 | /* Anything interesting happen? */ | ||
726 | hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); | ||
727 | if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) { | ||
728 | int ret; | ||
729 | |||
730 | /* Just what we've been waiting for... */ | ||
731 | ret = set_happy_link_modes(hp, tregs); | ||
732 | if (ret) { | ||
733 | /* Ooops, something bad happened, go to force | ||
734 | * mode. | ||
735 | * | ||
736 | * XXX Broken hubs which don't support 802.3u | ||
737 | * XXX auto-negotiation make this happen as well. | ||
738 | */ | ||
739 | goto do_force_mode; | ||
740 | } | ||
741 | |||
742 | /* Success, at least so far, advance our state engine. */ | ||
743 | hp->timer_state = lupwait; | ||
744 | restart_timer = 1; | ||
745 | } else { | ||
746 | restart_timer = 1; | ||
747 | } | ||
748 | } | ||
749 | break; | ||
750 | |||
751 | case lupwait: | ||
752 | /* Auto negotiation was successful and we are awaiting a | ||
753 | * link up status. I have decided to let this timer run | ||
754 | * forever until some sort of error is signalled, reporting | ||
755 | * a message to the user at 10 second intervals. | ||
756 | */ | ||
757 | hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); | ||
758 | if (hp->sw_bmsr & BMSR_LSTATUS) { | ||
759 | /* Wheee, it's up, display the link mode in use and put | ||
760 | * the timer to sleep. | ||
761 | */ | ||
762 | display_link_mode(hp, tregs); | ||
763 | hp->timer_state = asleep; | ||
764 | restart_timer = 0; | ||
765 | } else { | ||
766 | if (hp->timer_ticks >= 10) { | ||
767 | printk(KERN_NOTICE "%s: Auto negotiation successful, link still " | ||
768 | "not completely up.\n", hp->dev->name); | ||
769 | hp->timer_ticks = 0; | ||
770 | restart_timer = 1; | ||
771 | } else { | ||
772 | restart_timer = 1; | ||
773 | } | ||
774 | } | ||
775 | break; | ||
776 | |||
777 | case ltrywait: | ||
778 | /* Making the timeout here too long can make it take | ||
779 | * annoyingly long to attempt all of the link mode | ||
780 | * permutations, but then again this is essentially | ||
781 | * error recovery code for the most part. | ||
782 | */ | ||
783 | hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); | ||
784 | hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG); | ||
785 | if (hp->timer_ticks == 1) { | ||
786 | if (!is_lucent_phy(hp)) { | ||
787 | /* Re-enable transceiver, we'll re-enable the transceiver next | ||
788 | * tick, then check link state on the following tick. | ||
789 | */ | ||
790 | hp->sw_csconfig |= CSCONFIG_TCVDISAB; | ||
791 | happy_meal_tcvr_write(hp, tregs, | ||
792 | DP83840_CSCONFIG, hp->sw_csconfig); | ||
793 | } | ||
794 | restart_timer = 1; | ||
795 | break; | ||
796 | } | ||
797 | if (hp->timer_ticks == 2) { | ||
798 | if (!is_lucent_phy(hp)) { | ||
799 | hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB); | ||
800 | happy_meal_tcvr_write(hp, tregs, | ||
801 | DP83840_CSCONFIG, hp->sw_csconfig); | ||
802 | } | ||
803 | restart_timer = 1; | ||
804 | break; | ||
805 | } | ||
806 | if (hp->sw_bmsr & BMSR_LSTATUS) { | ||
807 | /* Force mode selection success. */ | ||
808 | display_forced_link_mode(hp, tregs); | ||
809 | set_happy_link_modes(hp, tregs); /* XXX error? then what? */ | ||
810 | hp->timer_state = asleep; | ||
811 | restart_timer = 0; | ||
812 | } else { | ||
813 | if (hp->timer_ticks >= 4) { /* 6 seconds or so... */ | ||
814 | int ret; | ||
815 | |||
816 | ret = try_next_permutation(hp, tregs); | ||
817 | if (ret == -1) { | ||
818 | /* Aieee, tried them all, reset the | ||
819 | * chip and try all over again. | ||
820 | */ | ||
821 | |||
822 | /* Let the user know... */ | ||
823 | printk(KERN_NOTICE "%s: Link down, cable problem?\n", | ||
824 | hp->dev->name); | ||
825 | |||
826 | ret = happy_meal_init(hp); | ||
827 | if (ret) { | ||
828 | /* ho hum... */ | ||
829 | printk(KERN_ERR "%s: Error, cannot re-init the " | ||
830 | "Happy Meal.\n", hp->dev->name); | ||
831 | } | ||
832 | goto out; | ||
833 | } | ||
834 | if (!is_lucent_phy(hp)) { | ||
835 | hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, | ||
836 | DP83840_CSCONFIG); | ||
837 | hp->sw_csconfig |= CSCONFIG_TCVDISAB; | ||
838 | happy_meal_tcvr_write(hp, tregs, | ||
839 | DP83840_CSCONFIG, hp->sw_csconfig); | ||
840 | } | ||
841 | hp->timer_ticks = 0; | ||
842 | restart_timer = 1; | ||
843 | } else { | ||
844 | restart_timer = 1; | ||
845 | } | ||
846 | } | ||
847 | break; | ||
848 | |||
849 | case asleep: | ||
850 | default: | ||
851 | /* Can't happens.... */ | ||
852 | printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n", | ||
853 | hp->dev->name); | ||
854 | restart_timer = 0; | ||
855 | hp->timer_ticks = 0; | ||
856 | hp->timer_state = asleep; /* foo on you */ | ||
857 | break; | ||
858 | } | ||
859 | |||
860 | if (restart_timer) { | ||
861 | hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */ | ||
862 | add_timer(&hp->happy_timer); | ||
863 | } | ||
864 | |||
865 | out: | ||
866 | spin_unlock_irq(&hp->happy_lock); | ||
867 | } | ||
868 | |||
869 | #define TX_RESET_TRIES 32 | ||
870 | #define RX_RESET_TRIES 32 | ||
871 | |||
872 | /* hp->happy_lock must be held */ | ||
873 | static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs) | ||
874 | { | ||
875 | int tries = TX_RESET_TRIES; | ||
876 | |||
877 | HMD(("happy_meal_tx_reset: reset, ")); | ||
878 | |||
879 | /* Would you like to try our SMCC Delux? */ | ||
880 | hme_write32(hp, bregs + BMAC_TXSWRESET, 0); | ||
881 | while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries) | ||
882 | udelay(20); | ||
883 | |||
884 | /* Lettuce, tomato, buggy hardware (no extra charge)? */ | ||
885 | if (!tries) | ||
886 | printk(KERN_ERR "happy meal: Transceiver BigMac ATTACK!"); | ||
887 | |||
888 | /* Take care. */ | ||
889 | HMD(("done\n")); | ||
890 | } | ||
891 | |||
892 | /* hp->happy_lock must be held */ | ||
893 | static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs) | ||
894 | { | ||
895 | int tries = RX_RESET_TRIES; | ||
896 | |||
897 | HMD(("happy_meal_rx_reset: reset, ")); | ||
898 | |||
899 | /* We have a special on GNU/Viking hardware bugs today. */ | ||
900 | hme_write32(hp, bregs + BMAC_RXSWRESET, 0); | ||
901 | while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries) | ||
902 | udelay(20); | ||
903 | |||
904 | /* Will that be all? */ | ||
905 | if (!tries) | ||
906 | printk(KERN_ERR "happy meal: Receiver BigMac ATTACK!"); | ||
907 | |||
908 | /* Don't forget your vik_1137125_wa. Have a nice day. */ | ||
909 | HMD(("done\n")); | ||
910 | } | ||
911 | |||
912 | #define STOP_TRIES 16 | ||
913 | |||
914 | /* hp->happy_lock must be held */ | ||
915 | static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs) | ||
916 | { | ||
917 | int tries = STOP_TRIES; | ||
918 | |||
919 | HMD(("happy_meal_stop: reset, ")); | ||
920 | |||
921 | /* We're consolidating our STB products, it's your lucky day. */ | ||
922 | hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL); | ||
923 | while (hme_read32(hp, gregs + GREG_SWRESET) && --tries) | ||
924 | udelay(20); | ||
925 | |||
926 | /* Come back next week when we are "Sun Microelectronics". */ | ||
927 | if (!tries) | ||
928 | printk(KERN_ERR "happy meal: Fry guys."); | ||
929 | |||
930 | /* Remember: "Different name, same old buggy as shit hardware." */ | ||
931 | HMD(("done\n")); | ||
932 | } | ||
933 | |||
934 | /* hp->happy_lock must be held */ | ||
935 | static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs) | ||
936 | { | ||
937 | struct net_device_stats *stats = &hp->net_stats; | ||
938 | |||
939 | stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR); | ||
940 | hme_write32(hp, bregs + BMAC_RCRCECTR, 0); | ||
941 | |||
942 | stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR); | ||
943 | hme_write32(hp, bregs + BMAC_UNALECTR, 0); | ||
944 | |||
945 | stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR); | ||
946 | hme_write32(hp, bregs + BMAC_GLECTR, 0); | ||
947 | |||
948 | stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR); | ||
949 | |||
950 | stats->collisions += | ||
951 | (hme_read32(hp, bregs + BMAC_EXCTR) + | ||
952 | hme_read32(hp, bregs + BMAC_LTCTR)); | ||
953 | hme_write32(hp, bregs + BMAC_EXCTR, 0); | ||
954 | hme_write32(hp, bregs + BMAC_LTCTR, 0); | ||
955 | } | ||
956 | |||
957 | /* hp->happy_lock must be held */ | ||
958 | static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs) | ||
959 | { | ||
960 | ASD(("happy_meal_poll_stop: ")); | ||
961 | |||
962 | /* If polling disabled or not polling already, nothing to do. */ | ||
963 | if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) != | ||
964 | (HFLAG_POLLENABLE | HFLAG_POLL)) { | ||
965 | HMD(("not polling, return\n")); | ||
966 | return; | ||
967 | } | ||
968 | |||
969 | /* Shut up the MIF. */ | ||
970 | ASD(("were polling, mif ints off, ")); | ||
971 | hme_write32(hp, tregs + TCVR_IMASK, 0xffff); | ||
972 | |||
973 | /* Turn off polling. */ | ||
974 | ASD(("polling off, ")); | ||
975 | hme_write32(hp, tregs + TCVR_CFG, | ||
976 | hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE)); | ||
977 | |||
978 | /* We are no longer polling. */ | ||
979 | hp->happy_flags &= ~(HFLAG_POLL); | ||
980 | |||
981 | /* Let the bits set. */ | ||
982 | udelay(200); | ||
983 | ASD(("done\n")); | ||
984 | } | ||
985 | |||
986 | /* Only Sun can take such nice parts and fuck up the programming interface | ||
987 | * like this. Good job guys... | ||
988 | */ | ||
989 | #define TCVR_RESET_TRIES 16 /* It should reset quickly */ | ||
990 | #define TCVR_UNISOLATE_TRIES 32 /* Dis-isolation can take longer. */ | ||
991 | |||
992 | /* hp->happy_lock must be held */ | ||
993 | static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs) | ||
994 | { | ||
995 | u32 tconfig; | ||
996 | int result, tries = TCVR_RESET_TRIES; | ||
997 | |||
998 | tconfig = hme_read32(hp, tregs + TCVR_CFG); | ||
999 | ASD(("happy_meal_tcvr_reset: tcfg<%08lx> ", tconfig)); | ||
1000 | if (hp->tcvr_type == external) { | ||
1001 | ASD(("external<")); | ||
1002 | hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT)); | ||
1003 | hp->tcvr_type = internal; | ||
1004 | hp->paddr = TCV_PADDR_ITX; | ||
1005 | ASD(("ISOLATE,")); | ||
1006 | happy_meal_tcvr_write(hp, tregs, MII_BMCR, | ||
1007 | (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE)); | ||
1008 | result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); | ||
1009 | if (result == TCVR_FAILURE) { | ||
1010 | ASD(("phyread_fail>\n")); | ||
1011 | return -1; | ||
1012 | } | ||
1013 | ASD(("phyread_ok,PSELECT>")); | ||
1014 | hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT); | ||
1015 | hp->tcvr_type = external; | ||
1016 | hp->paddr = TCV_PADDR_ETX; | ||
1017 | } else { | ||
1018 | if (tconfig & TCV_CFG_MDIO1) { | ||
1019 | ASD(("internal<PSELECT,")); | ||
1020 | hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT)); | ||
1021 | ASD(("ISOLATE,")); | ||
1022 | happy_meal_tcvr_write(hp, tregs, MII_BMCR, | ||
1023 | (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE)); | ||
1024 | result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); | ||
1025 | if (result == TCVR_FAILURE) { | ||
1026 | ASD(("phyread_fail>\n")); | ||
1027 | return -1; | ||
1028 | } | ||
1029 | ASD(("phyread_ok,~PSELECT>")); | ||
1030 | hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT))); | ||
1031 | hp->tcvr_type = internal; | ||
1032 | hp->paddr = TCV_PADDR_ITX; | ||
1033 | } | ||
1034 | } | ||
1035 | |||
1036 | ASD(("BMCR_RESET ")); | ||
1037 | happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET); | ||
1038 | |||
1039 | while (--tries) { | ||
1040 | result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); | ||
1041 | if (result == TCVR_FAILURE) | ||
1042 | return -1; | ||
1043 | hp->sw_bmcr = result; | ||
1044 | if (!(result & BMCR_RESET)) | ||
1045 | break; | ||
1046 | udelay(20); | ||
1047 | } | ||
1048 | if (!tries) { | ||
1049 | ASD(("BMCR RESET FAILED!\n")); | ||
1050 | return -1; | ||
1051 | } | ||
1052 | ASD(("RESET_OK\n")); | ||
1053 | |||
1054 | /* Get fresh copies of the PHY registers. */ | ||
1055 | hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); | ||
1056 | hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1); | ||
1057 | hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2); | ||
1058 | hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE); | ||
1059 | |||
1060 | ASD(("UNISOLATE")); | ||
1061 | hp->sw_bmcr &= ~(BMCR_ISOLATE); | ||
1062 | happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); | ||
1063 | |||
1064 | tries = TCVR_UNISOLATE_TRIES; | ||
1065 | while (--tries) { | ||
1066 | result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); | ||
1067 | if (result == TCVR_FAILURE) | ||
1068 | return -1; | ||
1069 | if (!(result & BMCR_ISOLATE)) | ||
1070 | break; | ||
1071 | udelay(20); | ||
1072 | } | ||
1073 | if (!tries) { | ||
1074 | ASD((" FAILED!\n")); | ||
1075 | return -1; | ||
1076 | } | ||
1077 | ASD((" SUCCESS and CSCONFIG_DFBYPASS\n")); | ||
1078 | if (!is_lucent_phy(hp)) { | ||
1079 | result = happy_meal_tcvr_read(hp, tregs, | ||
1080 | DP83840_CSCONFIG); | ||
1081 | happy_meal_tcvr_write(hp, tregs, | ||
1082 | DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS)); | ||
1083 | } | ||
1084 | return 0; | ||
1085 | } | ||
1086 | |||
1087 | /* Figure out whether we have an internal or external transceiver. | ||
1088 | * | ||
1089 | * hp->happy_lock must be held | ||
1090 | */ | ||
1091 | static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs) | ||
1092 | { | ||
1093 | unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG); | ||
1094 | |||
1095 | ASD(("happy_meal_transceiver_check: tcfg=%08lx ", tconfig)); | ||
1096 | if (hp->happy_flags & HFLAG_POLL) { | ||
1097 | /* If we are polling, we must stop to get the transceiver type. */ | ||
1098 | ASD(("<polling> ")); | ||
1099 | if (hp->tcvr_type == internal) { | ||
1100 | if (tconfig & TCV_CFG_MDIO1) { | ||
1101 | ASD(("<internal> <poll stop> ")); | ||
1102 | happy_meal_poll_stop(hp, tregs); | ||
1103 | hp->paddr = TCV_PADDR_ETX; | ||
1104 | hp->tcvr_type = external; | ||
1105 | ASD(("<external>\n")); | ||
1106 | tconfig &= ~(TCV_CFG_PENABLE); | ||
1107 | tconfig |= TCV_CFG_PSELECT; | ||
1108 | hme_write32(hp, tregs + TCVR_CFG, tconfig); | ||
1109 | } | ||
1110 | } else { | ||
1111 | if (hp->tcvr_type == external) { | ||
1112 | ASD(("<external> ")); | ||
1113 | if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) { | ||
1114 | ASD(("<poll stop> ")); | ||
1115 | happy_meal_poll_stop(hp, tregs); | ||
1116 | hp->paddr = TCV_PADDR_ITX; | ||
1117 | hp->tcvr_type = internal; | ||
1118 | ASD(("<internal>\n")); | ||
1119 | hme_write32(hp, tregs + TCVR_CFG, | ||
1120 | hme_read32(hp, tregs + TCVR_CFG) & | ||
1121 | ~(TCV_CFG_PSELECT)); | ||
1122 | } | ||
1123 | ASD(("\n")); | ||
1124 | } else { | ||
1125 | ASD(("<none>\n")); | ||
1126 | } | ||
1127 | } | ||
1128 | } else { | ||
1129 | u32 reread = hme_read32(hp, tregs + TCVR_CFG); | ||
1130 | |||
1131 | /* Else we can just work off of the MDIO bits. */ | ||
1132 | ASD(("<not polling> ")); | ||
1133 | if (reread & TCV_CFG_MDIO1) { | ||
1134 | hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT); | ||
1135 | hp->paddr = TCV_PADDR_ETX; | ||
1136 | hp->tcvr_type = external; | ||
1137 | ASD(("<external>\n")); | ||
1138 | } else { | ||
1139 | if (reread & TCV_CFG_MDIO0) { | ||
1140 | hme_write32(hp, tregs + TCVR_CFG, | ||
1141 | tconfig & ~(TCV_CFG_PSELECT)); | ||
1142 | hp->paddr = TCV_PADDR_ITX; | ||
1143 | hp->tcvr_type = internal; | ||
1144 | ASD(("<internal>\n")); | ||
1145 | } else { | ||
1146 | printk(KERN_ERR "happy meal: Transceiver and a coke please."); | ||
1147 | hp->tcvr_type = none; /* Grrr... */ | ||
1148 | ASD(("<none>\n")); | ||
1149 | } | ||
1150 | } | ||
1151 | } | ||
1152 | } | ||
1153 | |||
1154 | /* The receive ring buffers are a bit tricky to get right. Here goes... | ||
1155 | * | ||
1156 | * The buffers we dma into must be 64 byte aligned. So we use a special | ||
1157 | * alloc_skb() routine for the happy meal to allocate 64 bytes more than | ||
1158 | * we really need. | ||
1159 | * | ||
1160 | * We use skb_reserve() to align the data block we get in the skb. We | ||
1161 | * also program the etxregs->cfg register to use an offset of 2. This | ||
1162 | * imperical constant plus the ethernet header size will always leave | ||
1163 | * us with a nicely aligned ip header once we pass things up to the | ||
1164 | * protocol layers. | ||
1165 | * | ||
1166 | * The numbers work out to: | ||
1167 | * | ||
1168 | * Max ethernet frame size 1518 | ||
1169 | * Ethernet header size 14 | ||
1170 | * Happy Meal base offset 2 | ||
1171 | * | ||
1172 | * Say a skb data area is at 0xf001b010, and its size alloced is | ||
1173 | * (ETH_FRAME_LEN + 64 + 2) = (1514 + 64 + 2) = 1580 bytes. | ||
1174 | * | ||
1175 | * First our alloc_skb() routine aligns the data base to a 64 byte | ||
1176 | * boundary. We now have 0xf001b040 as our skb data address. We | ||
1177 | * plug this into the receive descriptor address. | ||
1178 | * | ||
1179 | * Next, we skb_reserve() 2 bytes to account for the Happy Meal offset. | ||
1180 | * So now the data we will end up looking at starts at 0xf001b042. When | ||
1181 | * the packet arrives, we will check out the size received and subtract | ||
1182 | * this from the skb->length. Then we just pass the packet up to the | ||
1183 | * protocols as is, and allocate a new skb to replace this slot we have | ||
1184 | * just received from. | ||
1185 | * | ||
1186 | * The ethernet layer will strip the ether header from the front of the | ||
1187 | * skb we just sent to it, this leaves us with the ip header sitting | ||
1188 | * nicely aligned at 0xf001b050. Also, for tcp and udp packets the | ||
1189 | * Happy Meal has even checksummed the tcp/udp data for us. The 16 | ||
1190 | * bit checksum is obtained from the low bits of the receive descriptor | ||
1191 | * flags, thus: | ||
1192 | * | ||
1193 | * skb->csum = rxd->rx_flags & 0xffff; | ||
1194 | * skb->ip_summed = CHECKSUM_COMPLETE; | ||
1195 | * | ||
1196 | * before sending off the skb to the protocols, and we are good as gold. | ||
1197 | */ | ||
1198 | static void happy_meal_clean_rings(struct happy_meal *hp) | ||
1199 | { | ||
1200 | int i; | ||
1201 | |||
1202 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1203 | if (hp->rx_skbs[i] != NULL) { | ||
1204 | struct sk_buff *skb = hp->rx_skbs[i]; | ||
1205 | struct happy_meal_rxd *rxd; | ||
1206 | u32 dma_addr; | ||
1207 | |||
1208 | rxd = &hp->happy_block->happy_meal_rxd[i]; | ||
1209 | dma_addr = hme_read_desc32(hp, &rxd->rx_addr); | ||
1210 | dma_unmap_single(hp->dma_dev, dma_addr, | ||
1211 | RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); | ||
1212 | dev_kfree_skb_any(skb); | ||
1213 | hp->rx_skbs[i] = NULL; | ||
1214 | } | ||
1215 | } | ||
1216 | |||
1217 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
1218 | if (hp->tx_skbs[i] != NULL) { | ||
1219 | struct sk_buff *skb = hp->tx_skbs[i]; | ||
1220 | struct happy_meal_txd *txd; | ||
1221 | u32 dma_addr; | ||
1222 | int frag; | ||
1223 | |||
1224 | hp->tx_skbs[i] = NULL; | ||
1225 | |||
1226 | for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { | ||
1227 | txd = &hp->happy_block->happy_meal_txd[i]; | ||
1228 | dma_addr = hme_read_desc32(hp, &txd->tx_addr); | ||
1229 | if (!frag) | ||
1230 | dma_unmap_single(hp->dma_dev, dma_addr, | ||
1231 | (hme_read_desc32(hp, &txd->tx_flags) | ||
1232 | & TXFLAG_SIZE), | ||
1233 | DMA_TO_DEVICE); | ||
1234 | else | ||
1235 | dma_unmap_page(hp->dma_dev, dma_addr, | ||
1236 | (hme_read_desc32(hp, &txd->tx_flags) | ||
1237 | & TXFLAG_SIZE), | ||
1238 | DMA_TO_DEVICE); | ||
1239 | |||
1240 | if (frag != skb_shinfo(skb)->nr_frags) | ||
1241 | i++; | ||
1242 | } | ||
1243 | |||
1244 | dev_kfree_skb_any(skb); | ||
1245 | } | ||
1246 | } | ||
1247 | } | ||
1248 | |||
1249 | /* hp->happy_lock must be held */ | ||
1250 | static void happy_meal_init_rings(struct happy_meal *hp) | ||
1251 | { | ||
1252 | struct hmeal_init_block *hb = hp->happy_block; | ||
1253 | struct net_device *dev = hp->dev; | ||
1254 | int i; | ||
1255 | |||
1256 | HMD(("happy_meal_init_rings: counters to zero, ")); | ||
1257 | hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0; | ||
1258 | |||
1259 | /* Free any skippy bufs left around in the rings. */ | ||
1260 | HMD(("clean, ")); | ||
1261 | happy_meal_clean_rings(hp); | ||
1262 | |||
1263 | /* Now get new skippy bufs for the receive ring. */ | ||
1264 | HMD(("init rxring, ")); | ||
1265 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1266 | struct sk_buff *skb; | ||
1267 | |||
1268 | skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); | ||
1269 | if (!skb) { | ||
1270 | hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0); | ||
1271 | continue; | ||
1272 | } | ||
1273 | hp->rx_skbs[i] = skb; | ||
1274 | skb->dev = dev; | ||
1275 | |||
1276 | /* Because we reserve afterwards. */ | ||
1277 | skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); | ||
1278 | hme_write_rxd(hp, &hb->happy_meal_rxd[i], | ||
1279 | (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)), | ||
1280 | dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, | ||
1281 | DMA_FROM_DEVICE)); | ||
1282 | skb_reserve(skb, RX_OFFSET); | ||
1283 | } | ||
1284 | |||
1285 | HMD(("init txring, ")); | ||
1286 | for (i = 0; i < TX_RING_SIZE; i++) | ||
1287 | hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0); | ||
1288 | |||
1289 | HMD(("done\n")); | ||
1290 | } | ||
1291 | |||
1292 | /* hp->happy_lock must be held */ | ||
1293 | static void happy_meal_begin_auto_negotiation(struct happy_meal *hp, | ||
1294 | void __iomem *tregs, | ||
1295 | struct ethtool_cmd *ep) | ||
1296 | { | ||
1297 | int timeout; | ||
1298 | |||
1299 | /* Read all of the registers we are interested in now. */ | ||
1300 | hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); | ||
1301 | hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); | ||
1302 | hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1); | ||
1303 | hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2); | ||
1304 | |||
1305 | /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */ | ||
1306 | |||
1307 | hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE); | ||
1308 | if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) { | ||
1309 | /* Advertise everything we can support. */ | ||
1310 | if (hp->sw_bmsr & BMSR_10HALF) | ||
1311 | hp->sw_advertise |= (ADVERTISE_10HALF); | ||
1312 | else | ||
1313 | hp->sw_advertise &= ~(ADVERTISE_10HALF); | ||
1314 | |||
1315 | if (hp->sw_bmsr & BMSR_10FULL) | ||
1316 | hp->sw_advertise |= (ADVERTISE_10FULL); | ||
1317 | else | ||
1318 | hp->sw_advertise &= ~(ADVERTISE_10FULL); | ||
1319 | if (hp->sw_bmsr & BMSR_100HALF) | ||
1320 | hp->sw_advertise |= (ADVERTISE_100HALF); | ||
1321 | else | ||
1322 | hp->sw_advertise &= ~(ADVERTISE_100HALF); | ||
1323 | if (hp->sw_bmsr & BMSR_100FULL) | ||
1324 | hp->sw_advertise |= (ADVERTISE_100FULL); | ||
1325 | else | ||
1326 | hp->sw_advertise &= ~(ADVERTISE_100FULL); | ||
1327 | happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise); | ||
1328 | |||
1329 | /* XXX Currently no Happy Meal cards I know off support 100BaseT4, | ||
1330 | * XXX and this is because the DP83840 does not support it, changes | ||
1331 | * XXX would need to be made to the tx/rx logic in the driver as well | ||
1332 | * XXX so I completely skip checking for it in the BMSR for now. | ||
1333 | */ | ||
1334 | |||
1335 | #ifdef AUTO_SWITCH_DEBUG | ||
1336 | ASD(("%s: Advertising [ ", hp->dev->name)); | ||
1337 | if (hp->sw_advertise & ADVERTISE_10HALF) | ||
1338 | ASD(("10H ")); | ||
1339 | if (hp->sw_advertise & ADVERTISE_10FULL) | ||
1340 | ASD(("10F ")); | ||
1341 | if (hp->sw_advertise & ADVERTISE_100HALF) | ||
1342 | ASD(("100H ")); | ||
1343 | if (hp->sw_advertise & ADVERTISE_100FULL) | ||
1344 | ASD(("100F ")); | ||
1345 | #endif | ||
1346 | |||
1347 | /* Enable Auto-Negotiation, this is usually on already... */ | ||
1348 | hp->sw_bmcr |= BMCR_ANENABLE; | ||
1349 | happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); | ||
1350 | |||
1351 | /* Restart it to make sure it is going. */ | ||
1352 | hp->sw_bmcr |= BMCR_ANRESTART; | ||
1353 | happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); | ||
1354 | |||
1355 | /* BMCR_ANRESTART self clears when the process has begun. */ | ||
1356 | |||
1357 | timeout = 64; /* More than enough. */ | ||
1358 | while (--timeout) { | ||
1359 | hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); | ||
1360 | if (!(hp->sw_bmcr & BMCR_ANRESTART)) | ||
1361 | break; /* got it. */ | ||
1362 | udelay(10); | ||
1363 | } | ||
1364 | if (!timeout) { | ||
1365 | printk(KERN_ERR "%s: Happy Meal would not start auto negotiation " | ||
1366 | "BMCR=0x%04x\n", hp->dev->name, hp->sw_bmcr); | ||
1367 | printk(KERN_NOTICE "%s: Performing force link detection.\n", | ||
1368 | hp->dev->name); | ||
1369 | goto force_link; | ||
1370 | } else { | ||
1371 | hp->timer_state = arbwait; | ||
1372 | } | ||
1373 | } else { | ||
1374 | force_link: | ||
1375 | /* Force the link up, trying first a particular mode. | ||
1376 | * Either we are here at the request of ethtool or | ||
1377 | * because the Happy Meal would not start to autoneg. | ||
1378 | */ | ||
1379 | |||
1380 | /* Disable auto-negotiation in BMCR, enable the duplex and | ||
1381 | * speed setting, init the timer state machine, and fire it off. | ||
1382 | */ | ||
1383 | if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) { | ||
1384 | hp->sw_bmcr = BMCR_SPEED100; | ||
1385 | } else { | ||
1386 | if (ethtool_cmd_speed(ep) == SPEED_100) | ||
1387 | hp->sw_bmcr = BMCR_SPEED100; | ||
1388 | else | ||
1389 | hp->sw_bmcr = 0; | ||
1390 | if (ep->duplex == DUPLEX_FULL) | ||
1391 | hp->sw_bmcr |= BMCR_FULLDPLX; | ||
1392 | } | ||
1393 | happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); | ||
1394 | |||
1395 | if (!is_lucent_phy(hp)) { | ||
1396 | /* OK, seems we need do disable the transceiver for the first | ||
1397 | * tick to make sure we get an accurate link state at the | ||
1398 | * second tick. | ||
1399 | */ | ||
1400 | hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, | ||
1401 | DP83840_CSCONFIG); | ||
1402 | hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB); | ||
1403 | happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, | ||
1404 | hp->sw_csconfig); | ||
1405 | } | ||
1406 | hp->timer_state = ltrywait; | ||
1407 | } | ||
1408 | |||
1409 | hp->timer_ticks = 0; | ||
1410 | hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */ | ||
1411 | hp->happy_timer.data = (unsigned long) hp; | ||
1412 | hp->happy_timer.function = happy_meal_timer; | ||
1413 | add_timer(&hp->happy_timer); | ||
1414 | } | ||
1415 | |||
1416 | /* hp->happy_lock must be held */ | ||
1417 | static int happy_meal_init(struct happy_meal *hp) | ||
1418 | { | ||
1419 | void __iomem *gregs = hp->gregs; | ||
1420 | void __iomem *etxregs = hp->etxregs; | ||
1421 | void __iomem *erxregs = hp->erxregs; | ||
1422 | void __iomem *bregs = hp->bigmacregs; | ||
1423 | void __iomem *tregs = hp->tcvregs; | ||
1424 | u32 regtmp, rxcfg; | ||
1425 | unsigned char *e = &hp->dev->dev_addr[0]; | ||
1426 | |||
1427 | /* If auto-negotiation timer is running, kill it. */ | ||
1428 | del_timer(&hp->happy_timer); | ||
1429 | |||
1430 | HMD(("happy_meal_init: happy_flags[%08x] ", | ||
1431 | hp->happy_flags)); | ||
1432 | if (!(hp->happy_flags & HFLAG_INIT)) { | ||
1433 | HMD(("set HFLAG_INIT, ")); | ||
1434 | hp->happy_flags |= HFLAG_INIT; | ||
1435 | happy_meal_get_counters(hp, bregs); | ||
1436 | } | ||
1437 | |||
1438 | /* Stop polling. */ | ||
1439 | HMD(("to happy_meal_poll_stop\n")); | ||
1440 | happy_meal_poll_stop(hp, tregs); | ||
1441 | |||
1442 | /* Stop transmitter and receiver. */ | ||
1443 | HMD(("happy_meal_init: to happy_meal_stop\n")); | ||
1444 | happy_meal_stop(hp, gregs); | ||
1445 | |||
1446 | /* Alloc and reset the tx/rx descriptor chains. */ | ||
1447 | HMD(("happy_meal_init: to happy_meal_init_rings\n")); | ||
1448 | happy_meal_init_rings(hp); | ||
1449 | |||
1450 | /* Shut up the MIF. */ | ||
1451 | HMD(("happy_meal_init: Disable all MIF irqs (old[%08x]), ", | ||
1452 | hme_read32(hp, tregs + TCVR_IMASK))); | ||
1453 | hme_write32(hp, tregs + TCVR_IMASK, 0xffff); | ||
1454 | |||
1455 | /* See if we can enable the MIF frame on this card to speak to the DP83840. */ | ||
1456 | if (hp->happy_flags & HFLAG_FENABLE) { | ||
1457 | HMD(("use frame old[%08x], ", | ||
1458 | hme_read32(hp, tregs + TCVR_CFG))); | ||
1459 | hme_write32(hp, tregs + TCVR_CFG, | ||
1460 | hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE)); | ||
1461 | } else { | ||
1462 | HMD(("use bitbang old[%08x], ", | ||
1463 | hme_read32(hp, tregs + TCVR_CFG))); | ||
1464 | hme_write32(hp, tregs + TCVR_CFG, | ||
1465 | hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE); | ||
1466 | } | ||
1467 | |||
1468 | /* Check the state of the transceiver. */ | ||
1469 | HMD(("to happy_meal_transceiver_check\n")); | ||
1470 | happy_meal_transceiver_check(hp, tregs); | ||
1471 | |||
1472 | /* Put the Big Mac into a sane state. */ | ||
1473 | HMD(("happy_meal_init: ")); | ||
1474 | switch(hp->tcvr_type) { | ||
1475 | case none: | ||
1476 | /* Cannot operate if we don't know the transceiver type! */ | ||
1477 | HMD(("AAIEEE no transceiver type, EAGAIN")); | ||
1478 | return -EAGAIN; | ||
1479 | |||
1480 | case internal: | ||
1481 | /* Using the MII buffers. */ | ||
1482 | HMD(("internal, using MII, ")); | ||
1483 | hme_write32(hp, bregs + BMAC_XIFCFG, 0); | ||
1484 | break; | ||
1485 | |||
1486 | case external: | ||
1487 | /* Not using the MII, disable it. */ | ||
1488 | HMD(("external, disable MII, ")); | ||
1489 | hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB); | ||
1490 | break; | ||
1491 | } | ||
1492 | |||
1493 | if (happy_meal_tcvr_reset(hp, tregs)) | ||
1494 | return -EAGAIN; | ||
1495 | |||
1496 | /* Reset the Happy Meal Big Mac transceiver and the receiver. */ | ||
1497 | HMD(("tx/rx reset, ")); | ||
1498 | happy_meal_tx_reset(hp, bregs); | ||
1499 | happy_meal_rx_reset(hp, bregs); | ||
1500 | |||
1501 | /* Set jam size and inter-packet gaps to reasonable defaults. */ | ||
1502 | HMD(("jsize/ipg1/ipg2, ")); | ||
1503 | hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE); | ||
1504 | hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1); | ||
1505 | hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2); | ||
1506 | |||
1507 | /* Load up the MAC address and random seed. */ | ||
1508 | HMD(("rseed/macaddr, ")); | ||
1509 | |||
1510 | /* The docs recommend to use the 10LSB of our MAC here. */ | ||
1511 | hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff)); | ||
1512 | |||
1513 | hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5])); | ||
1514 | hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3])); | ||
1515 | hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1])); | ||
1516 | |||
1517 | HMD(("htable, ")); | ||
1518 | if ((hp->dev->flags & IFF_ALLMULTI) || | ||
1519 | (netdev_mc_count(hp->dev) > 64)) { | ||
1520 | hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff); | ||
1521 | hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff); | ||
1522 | hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff); | ||
1523 | hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff); | ||
1524 | } else if ((hp->dev->flags & IFF_PROMISC) == 0) { | ||
1525 | u16 hash_table[4]; | ||
1526 | struct netdev_hw_addr *ha; | ||
1527 | u32 crc; | ||
1528 | |||
1529 | memset(hash_table, 0, sizeof(hash_table)); | ||
1530 | netdev_for_each_mc_addr(ha, hp->dev) { | ||
1531 | crc = ether_crc_le(6, ha->addr); | ||
1532 | crc >>= 26; | ||
1533 | hash_table[crc >> 4] |= 1 << (crc & 0xf); | ||
1534 | } | ||
1535 | hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]); | ||
1536 | hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]); | ||
1537 | hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]); | ||
1538 | hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]); | ||
1539 | } else { | ||
1540 | hme_write32(hp, bregs + BMAC_HTABLE3, 0); | ||
1541 | hme_write32(hp, bregs + BMAC_HTABLE2, 0); | ||
1542 | hme_write32(hp, bregs + BMAC_HTABLE1, 0); | ||
1543 | hme_write32(hp, bregs + BMAC_HTABLE0, 0); | ||
1544 | } | ||
1545 | |||
1546 | /* Set the RX and TX ring ptrs. */ | ||
1547 | HMD(("ring ptrs rxr[%08x] txr[%08x]\n", | ||
1548 | ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)), | ||
1549 | ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)))); | ||
1550 | hme_write32(hp, erxregs + ERX_RING, | ||
1551 | ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))); | ||
1552 | hme_write32(hp, etxregs + ETX_RING, | ||
1553 | ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))); | ||
1554 | |||
1555 | /* Parity issues in the ERX unit of some HME revisions can cause some | ||
1556 | * registers to not be written unless their parity is even. Detect such | ||
1557 | * lost writes and simply rewrite with a low bit set (which will be ignored | ||
1558 | * since the rxring needs to be 2K aligned). | ||
1559 | */ | ||
1560 | if (hme_read32(hp, erxregs + ERX_RING) != | ||
1561 | ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))) | ||
1562 | hme_write32(hp, erxregs + ERX_RING, | ||
1563 | ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)) | ||
1564 | | 0x4); | ||
1565 | |||
1566 | /* Set the supported burst sizes. */ | ||
1567 | HMD(("happy_meal_init: old[%08x] bursts<", | ||
1568 | hme_read32(hp, gregs + GREG_CFG))); | ||
1569 | |||
1570 | #ifndef CONFIG_SPARC | ||
1571 | /* It is always PCI and can handle 64byte bursts. */ | ||
1572 | hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64); | ||
1573 | #else | ||
1574 | if ((hp->happy_bursts & DMA_BURST64) && | ||
1575 | ((hp->happy_flags & HFLAG_PCI) != 0 | ||
1576 | #ifdef CONFIG_SBUS | ||
1577 | || sbus_can_burst64() | ||
1578 | #endif | ||
1579 | || 0)) { | ||
1580 | u32 gcfg = GREG_CFG_BURST64; | ||
1581 | |||
1582 | /* I have no idea if I should set the extended | ||
1583 | * transfer mode bit for Cheerio, so for now I | ||
1584 | * do not. -DaveM | ||
1585 | */ | ||
1586 | #ifdef CONFIG_SBUS | ||
1587 | if ((hp->happy_flags & HFLAG_PCI) == 0) { | ||
1588 | struct platform_device *op = hp->happy_dev; | ||
1589 | if (sbus_can_dma_64bit()) { | ||
1590 | sbus_set_sbus64(&op->dev, | ||
1591 | hp->happy_bursts); | ||
1592 | gcfg |= GREG_CFG_64BIT; | ||
1593 | } | ||
1594 | } | ||
1595 | #endif | ||
1596 | |||
1597 | HMD(("64>")); | ||
1598 | hme_write32(hp, gregs + GREG_CFG, gcfg); | ||
1599 | } else if (hp->happy_bursts & DMA_BURST32) { | ||
1600 | HMD(("32>")); | ||
1601 | hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32); | ||
1602 | } else if (hp->happy_bursts & DMA_BURST16) { | ||
1603 | HMD(("16>")); | ||
1604 | hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16); | ||
1605 | } else { | ||
1606 | HMD(("XXX>")); | ||
1607 | hme_write32(hp, gregs + GREG_CFG, 0); | ||
1608 | } | ||
1609 | #endif /* CONFIG_SPARC */ | ||
1610 | |||
1611 | /* Turn off interrupts we do not want to hear. */ | ||
1612 | HMD((", enable global interrupts, ")); | ||
1613 | hme_write32(hp, gregs + GREG_IMASK, | ||
1614 | (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP | | ||
1615 | GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR)); | ||
1616 | |||
1617 | /* Set the transmit ring buffer size. */ | ||
1618 | HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE, | ||
1619 | hme_read32(hp, etxregs + ETX_RSIZE))); | ||
1620 | hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1); | ||
1621 | |||
1622 | /* Enable transmitter DVMA. */ | ||
1623 | HMD(("tx dma enable old[%08x], ", | ||
1624 | hme_read32(hp, etxregs + ETX_CFG))); | ||
1625 | hme_write32(hp, etxregs + ETX_CFG, | ||
1626 | hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE); | ||
1627 | |||
1628 | /* This chip really rots, for the receiver sometimes when you | ||
1629 | * write to its control registers not all the bits get there | ||
1630 | * properly. I cannot think of a sane way to provide complete | ||
1631 | * coverage for this hardware bug yet. | ||
1632 | */ | ||
1633 | HMD(("erx regs bug old[%08x]\n", | ||
1634 | hme_read32(hp, erxregs + ERX_CFG))); | ||
1635 | hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET)); | ||
1636 | regtmp = hme_read32(hp, erxregs + ERX_CFG); | ||
1637 | hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET)); | ||
1638 | if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) { | ||
1639 | printk(KERN_ERR "happy meal: Eieee, rx config register gets greasy fries.\n"); | ||
1640 | printk(KERN_ERR "happy meal: Trying to set %08x, reread gives %08x\n", | ||
1641 | ERX_CFG_DEFAULT(RX_OFFSET), regtmp); | ||
1642 | /* XXX Should return failure here... */ | ||
1643 | } | ||
1644 | |||
1645 | /* Enable Big Mac hash table filter. */ | ||
1646 | HMD(("happy_meal_init: enable hash rx_cfg_old[%08x], ", | ||
1647 | hme_read32(hp, bregs + BMAC_RXCFG))); | ||
1648 | rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME; | ||
1649 | if (hp->dev->flags & IFF_PROMISC) | ||
1650 | rxcfg |= BIGMAC_RXCFG_PMISC; | ||
1651 | hme_write32(hp, bregs + BMAC_RXCFG, rxcfg); | ||
1652 | |||
1653 | /* Let the bits settle in the chip. */ | ||
1654 | udelay(10); | ||
1655 | |||
1656 | /* Ok, configure the Big Mac transmitter. */ | ||
1657 | HMD(("BIGMAC init, ")); | ||
1658 | regtmp = 0; | ||
1659 | if (hp->happy_flags & HFLAG_FULL) | ||
1660 | regtmp |= BIGMAC_TXCFG_FULLDPLX; | ||
1661 | |||
1662 | /* Don't turn on the "don't give up" bit for now. It could cause hme | ||
1663 | * to deadlock with the PHY if a Jabber occurs. | ||
1664 | */ | ||
1665 | hme_write32(hp, bregs + BMAC_TXCFG, regtmp /*| BIGMAC_TXCFG_DGIVEUP*/); | ||
1666 | |||
1667 | /* Give up after 16 TX attempts. */ | ||
1668 | hme_write32(hp, bregs + BMAC_ALIMIT, 16); | ||
1669 | |||
1670 | /* Enable the output drivers no matter what. */ | ||
1671 | regtmp = BIGMAC_XCFG_ODENABLE; | ||
1672 | |||
1673 | /* If card can do lance mode, enable it. */ | ||
1674 | if (hp->happy_flags & HFLAG_LANCE) | ||
1675 | regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE; | ||
1676 | |||
1677 | /* Disable the MII buffers if using external transceiver. */ | ||
1678 | if (hp->tcvr_type == external) | ||
1679 | regtmp |= BIGMAC_XCFG_MIIDISAB; | ||
1680 | |||
1681 | HMD(("XIF config old[%08x], ", | ||
1682 | hme_read32(hp, bregs + BMAC_XIFCFG))); | ||
1683 | hme_write32(hp, bregs + BMAC_XIFCFG, regtmp); | ||
1684 | |||
1685 | /* Start things up. */ | ||
1686 | HMD(("tx old[%08x] and rx [%08x] ON!\n", | ||
1687 | hme_read32(hp, bregs + BMAC_TXCFG), | ||
1688 | hme_read32(hp, bregs + BMAC_RXCFG))); | ||
1689 | |||
1690 | /* Set larger TX/RX size to allow for 802.1q */ | ||
1691 | hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8); | ||
1692 | hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8); | ||
1693 | |||
1694 | hme_write32(hp, bregs + BMAC_TXCFG, | ||
1695 | hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE); | ||
1696 | hme_write32(hp, bregs + BMAC_RXCFG, | ||
1697 | hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE); | ||
1698 | |||
1699 | /* Get the autonegotiation started, and the watch timer ticking. */ | ||
1700 | happy_meal_begin_auto_negotiation(hp, tregs, NULL); | ||
1701 | |||
1702 | /* Success. */ | ||
1703 | return 0; | ||
1704 | } | ||
1705 | |||
1706 | /* hp->happy_lock must be held */ | ||
1707 | static void happy_meal_set_initial_advertisement(struct happy_meal *hp) | ||
1708 | { | ||
1709 | void __iomem *tregs = hp->tcvregs; | ||
1710 | void __iomem *bregs = hp->bigmacregs; | ||
1711 | void __iomem *gregs = hp->gregs; | ||
1712 | |||
1713 | happy_meal_stop(hp, gregs); | ||
1714 | hme_write32(hp, tregs + TCVR_IMASK, 0xffff); | ||
1715 | if (hp->happy_flags & HFLAG_FENABLE) | ||
1716 | hme_write32(hp, tregs + TCVR_CFG, | ||
1717 | hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE)); | ||
1718 | else | ||
1719 | hme_write32(hp, tregs + TCVR_CFG, | ||
1720 | hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE); | ||
1721 | happy_meal_transceiver_check(hp, tregs); | ||
1722 | switch(hp->tcvr_type) { | ||
1723 | case none: | ||
1724 | return; | ||
1725 | case internal: | ||
1726 | hme_write32(hp, bregs + BMAC_XIFCFG, 0); | ||
1727 | break; | ||
1728 | case external: | ||
1729 | hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB); | ||
1730 | break; | ||
1731 | } | ||
1732 | if (happy_meal_tcvr_reset(hp, tregs)) | ||
1733 | return; | ||
1734 | |||
1735 | /* Latch PHY registers as of now. */ | ||
1736 | hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); | ||
1737 | hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE); | ||
1738 | |||
1739 | /* Advertise everything we can support. */ | ||
1740 | if (hp->sw_bmsr & BMSR_10HALF) | ||
1741 | hp->sw_advertise |= (ADVERTISE_10HALF); | ||
1742 | else | ||
1743 | hp->sw_advertise &= ~(ADVERTISE_10HALF); | ||
1744 | |||
1745 | if (hp->sw_bmsr & BMSR_10FULL) | ||
1746 | hp->sw_advertise |= (ADVERTISE_10FULL); | ||
1747 | else | ||
1748 | hp->sw_advertise &= ~(ADVERTISE_10FULL); | ||
1749 | if (hp->sw_bmsr & BMSR_100HALF) | ||
1750 | hp->sw_advertise |= (ADVERTISE_100HALF); | ||
1751 | else | ||
1752 | hp->sw_advertise &= ~(ADVERTISE_100HALF); | ||
1753 | if (hp->sw_bmsr & BMSR_100FULL) | ||
1754 | hp->sw_advertise |= (ADVERTISE_100FULL); | ||
1755 | else | ||
1756 | hp->sw_advertise &= ~(ADVERTISE_100FULL); | ||
1757 | |||
1758 | /* Update the PHY advertisement register. */ | ||
1759 | happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise); | ||
1760 | } | ||
1761 | |||
1762 | /* Once status is latched (by happy_meal_interrupt) it is cleared by | ||
1763 | * the hardware, so we cannot re-read it and get a correct value. | ||
1764 | * | ||
1765 | * hp->happy_lock must be held | ||
1766 | */ | ||
1767 | static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status) | ||
1768 | { | ||
1769 | int reset = 0; | ||
1770 | |||
1771 | /* Only print messages for non-counter related interrupts. */ | ||
1772 | if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND | | ||
1773 | GREG_STAT_MAXPKTERR | GREG_STAT_RXERR | | ||
1774 | GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR | | ||
1775 | GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR | | ||
1776 | GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR | | ||
1777 | GREG_STAT_SLVPERR)) | ||
1778 | printk(KERN_ERR "%s: Error interrupt for happy meal, status = %08x\n", | ||
1779 | hp->dev->name, status); | ||
1780 | |||
1781 | if (status & GREG_STAT_RFIFOVF) { | ||
1782 | /* Receive FIFO overflow is harmless and the hardware will take | ||
1783 | care of it, just some packets are lost. Who cares. */ | ||
1784 | printk(KERN_DEBUG "%s: Happy Meal receive FIFO overflow.\n", hp->dev->name); | ||
1785 | } | ||
1786 | |||
1787 | if (status & GREG_STAT_STSTERR) { | ||
1788 | /* BigMAC SQE link test failed. */ | ||
1789 | printk(KERN_ERR "%s: Happy Meal BigMAC SQE test failed.\n", hp->dev->name); | ||
1790 | reset = 1; | ||
1791 | } | ||
1792 | |||
1793 | if (status & GREG_STAT_TFIFO_UND) { | ||
1794 | /* Transmit FIFO underrun, again DMA error likely. */ | ||
1795 | printk(KERN_ERR "%s: Happy Meal transmitter FIFO underrun, DMA error.\n", | ||
1796 | hp->dev->name); | ||
1797 | reset = 1; | ||
1798 | } | ||
1799 | |||
1800 | if (status & GREG_STAT_MAXPKTERR) { | ||
1801 | /* Driver error, tried to transmit something larger | ||
1802 | * than ethernet max mtu. | ||
1803 | */ | ||
1804 | printk(KERN_ERR "%s: Happy Meal MAX Packet size error.\n", hp->dev->name); | ||
1805 | reset = 1; | ||
1806 | } | ||
1807 | |||
1808 | if (status & GREG_STAT_NORXD) { | ||
1809 | /* This is harmless, it just means the system is | ||
1810 | * quite loaded and the incoming packet rate was | ||
1811 | * faster than the interrupt handler could keep up | ||
1812 | * with. | ||
1813 | */ | ||
1814 | printk(KERN_INFO "%s: Happy Meal out of receive " | ||
1815 | "descriptors, packet dropped.\n", | ||
1816 | hp->dev->name); | ||
1817 | } | ||
1818 | |||
1819 | if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) { | ||
1820 | /* All sorts of DMA receive errors. */ | ||
1821 | printk(KERN_ERR "%s: Happy Meal rx DMA errors [ ", hp->dev->name); | ||
1822 | if (status & GREG_STAT_RXERR) | ||
1823 | printk("GenericError "); | ||
1824 | if (status & GREG_STAT_RXPERR) | ||
1825 | printk("ParityError "); | ||
1826 | if (status & GREG_STAT_RXTERR) | ||
1827 | printk("RxTagBotch "); | ||
1828 | printk("]\n"); | ||
1829 | reset = 1; | ||
1830 | } | ||
1831 | |||
1832 | if (status & GREG_STAT_EOPERR) { | ||
1833 | /* Driver bug, didn't set EOP bit in tx descriptor given | ||
1834 | * to the happy meal. | ||
1835 | */ | ||
1836 | printk(KERN_ERR "%s: EOP not set in happy meal transmit descriptor!\n", | ||
1837 | hp->dev->name); | ||
1838 | reset = 1; | ||
1839 | } | ||
1840 | |||
1841 | if (status & GREG_STAT_MIFIRQ) { | ||
1842 | /* MIF signalled an interrupt, were we polling it? */ | ||
1843 | printk(KERN_ERR "%s: Happy Meal MIF interrupt.\n", hp->dev->name); | ||
1844 | } | ||
1845 | |||
1846 | if (status & | ||
1847 | (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) { | ||
1848 | /* All sorts of transmit DMA errors. */ | ||
1849 | printk(KERN_ERR "%s: Happy Meal tx DMA errors [ ", hp->dev->name); | ||
1850 | if (status & GREG_STAT_TXEACK) | ||
1851 | printk("GenericError "); | ||
1852 | if (status & GREG_STAT_TXLERR) | ||
1853 | printk("LateError "); | ||
1854 | if (status & GREG_STAT_TXPERR) | ||
1855 | printk("ParityErro "); | ||
1856 | if (status & GREG_STAT_TXTERR) | ||
1857 | printk("TagBotch "); | ||
1858 | printk("]\n"); | ||
1859 | reset = 1; | ||
1860 | } | ||
1861 | |||
1862 | if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) { | ||
1863 | /* Bus or parity error when cpu accessed happy meal registers | ||
1864 | * or it's internal FIFO's. Should never see this. | ||
1865 | */ | ||
1866 | printk(KERN_ERR "%s: Happy Meal register access SBUS slave (%s) error.\n", | ||
1867 | hp->dev->name, | ||
1868 | (status & GREG_STAT_SLVPERR) ? "parity" : "generic"); | ||
1869 | reset = 1; | ||
1870 | } | ||
1871 | |||
1872 | if (reset) { | ||
1873 | printk(KERN_NOTICE "%s: Resetting...\n", hp->dev->name); | ||
1874 | happy_meal_init(hp); | ||
1875 | return 1; | ||
1876 | } | ||
1877 | return 0; | ||
1878 | } | ||
1879 | |||
1880 | /* hp->happy_lock must be held */ | ||
1881 | static void happy_meal_mif_interrupt(struct happy_meal *hp) | ||
1882 | { | ||
1883 | void __iomem *tregs = hp->tcvregs; | ||
1884 | |||
1885 | printk(KERN_INFO "%s: Link status change.\n", hp->dev->name); | ||
1886 | hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); | ||
1887 | hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA); | ||
1888 | |||
1889 | /* Use the fastest transmission protocol possible. */ | ||
1890 | if (hp->sw_lpa & LPA_100FULL) { | ||
1891 | printk(KERN_INFO "%s: Switching to 100Mbps at full duplex.", hp->dev->name); | ||
1892 | hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100); | ||
1893 | } else if (hp->sw_lpa & LPA_100HALF) { | ||
1894 | printk(KERN_INFO "%s: Switching to 100MBps at half duplex.", hp->dev->name); | ||
1895 | hp->sw_bmcr |= BMCR_SPEED100; | ||
1896 | } else if (hp->sw_lpa & LPA_10FULL) { | ||
1897 | printk(KERN_INFO "%s: Switching to 10MBps at full duplex.", hp->dev->name); | ||
1898 | hp->sw_bmcr |= BMCR_FULLDPLX; | ||
1899 | } else { | ||
1900 | printk(KERN_INFO "%s: Using 10Mbps at half duplex.", hp->dev->name); | ||
1901 | } | ||
1902 | happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); | ||
1903 | |||
1904 | /* Finally stop polling and shut up the MIF. */ | ||
1905 | happy_meal_poll_stop(hp, tregs); | ||
1906 | } | ||
1907 | |||
1908 | #ifdef TXDEBUG | ||
1909 | #define TXD(x) printk x | ||
1910 | #else | ||
1911 | #define TXD(x) | ||
1912 | #endif | ||
1913 | |||
1914 | /* hp->happy_lock must be held */ | ||
1915 | static void happy_meal_tx(struct happy_meal *hp) | ||
1916 | { | ||
1917 | struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0]; | ||
1918 | struct happy_meal_txd *this; | ||
1919 | struct net_device *dev = hp->dev; | ||
1920 | int elem; | ||
1921 | |||
1922 | elem = hp->tx_old; | ||
1923 | TXD(("TX<")); | ||
1924 | while (elem != hp->tx_new) { | ||
1925 | struct sk_buff *skb; | ||
1926 | u32 flags, dma_addr, dma_len; | ||
1927 | int frag; | ||
1928 | |||
1929 | TXD(("[%d]", elem)); | ||
1930 | this = &txbase[elem]; | ||
1931 | flags = hme_read_desc32(hp, &this->tx_flags); | ||
1932 | if (flags & TXFLAG_OWN) | ||
1933 | break; | ||
1934 | skb = hp->tx_skbs[elem]; | ||
1935 | if (skb_shinfo(skb)->nr_frags) { | ||
1936 | int last; | ||
1937 | |||
1938 | last = elem + skb_shinfo(skb)->nr_frags; | ||
1939 | last &= (TX_RING_SIZE - 1); | ||
1940 | flags = hme_read_desc32(hp, &txbase[last].tx_flags); | ||
1941 | if (flags & TXFLAG_OWN) | ||
1942 | break; | ||
1943 | } | ||
1944 | hp->tx_skbs[elem] = NULL; | ||
1945 | hp->net_stats.tx_bytes += skb->len; | ||
1946 | |||
1947 | for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { | ||
1948 | dma_addr = hme_read_desc32(hp, &this->tx_addr); | ||
1949 | dma_len = hme_read_desc32(hp, &this->tx_flags); | ||
1950 | |||
1951 | dma_len &= TXFLAG_SIZE; | ||
1952 | if (!frag) | ||
1953 | dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE); | ||
1954 | else | ||
1955 | dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE); | ||
1956 | |||
1957 | elem = NEXT_TX(elem); | ||
1958 | this = &txbase[elem]; | ||
1959 | } | ||
1960 | |||
1961 | dev_kfree_skb_irq(skb); | ||
1962 | hp->net_stats.tx_packets++; | ||
1963 | } | ||
1964 | hp->tx_old = elem; | ||
1965 | TXD((">")); | ||
1966 | |||
1967 | if (netif_queue_stopped(dev) && | ||
1968 | TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1)) | ||
1969 | netif_wake_queue(dev); | ||
1970 | } | ||
1971 | |||
1972 | #ifdef RXDEBUG | ||
1973 | #define RXD(x) printk x | ||
1974 | #else | ||
1975 | #define RXD(x) | ||
1976 | #endif | ||
1977 | |||
1978 | /* Originally I used to handle the allocation failure by just giving back just | ||
1979 | * that one ring buffer to the happy meal. Problem is that usually when that | ||
1980 | * condition is triggered, the happy meal expects you to do something reasonable | ||
1981 | * with all of the packets it has DMA'd in. So now I just drop the entire | ||
1982 | * ring when we cannot get a new skb and give them all back to the happy meal, | ||
1983 | * maybe things will be "happier" now. | ||
1984 | * | ||
1985 | * hp->happy_lock must be held | ||
1986 | */ | ||
1987 | static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) | ||
1988 | { | ||
1989 | struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0]; | ||
1990 | struct happy_meal_rxd *this; | ||
1991 | int elem = hp->rx_new, drops = 0; | ||
1992 | u32 flags; | ||
1993 | |||
1994 | RXD(("RX<")); | ||
1995 | this = &rxbase[elem]; | ||
1996 | while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) { | ||
1997 | struct sk_buff *skb; | ||
1998 | int len = flags >> 16; | ||
1999 | u16 csum = flags & RXFLAG_CSUM; | ||
2000 | u32 dma_addr = hme_read_desc32(hp, &this->rx_addr); | ||
2001 | |||
2002 | RXD(("[%d ", elem)); | ||
2003 | |||
2004 | /* Check for errors. */ | ||
2005 | if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) { | ||
2006 | RXD(("ERR(%08x)]", flags)); | ||
2007 | hp->net_stats.rx_errors++; | ||
2008 | if (len < ETH_ZLEN) | ||
2009 | hp->net_stats.rx_length_errors++; | ||
2010 | if (len & (RXFLAG_OVERFLOW >> 16)) { | ||
2011 | hp->net_stats.rx_over_errors++; | ||
2012 | hp->net_stats.rx_fifo_errors++; | ||
2013 | } | ||
2014 | |||
2015 | /* Return it to the Happy meal. */ | ||
2016 | drop_it: | ||
2017 | hp->net_stats.rx_dropped++; | ||
2018 | hme_write_rxd(hp, this, | ||
2019 | (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), | ||
2020 | dma_addr); | ||
2021 | goto next; | ||
2022 | } | ||
2023 | skb = hp->rx_skbs[elem]; | ||
2024 | if (len > RX_COPY_THRESHOLD) { | ||
2025 | struct sk_buff *new_skb; | ||
2026 | |||
2027 | /* Now refill the entry, if we can. */ | ||
2028 | new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); | ||
2029 | if (new_skb == NULL) { | ||
2030 | drops++; | ||
2031 | goto drop_it; | ||
2032 | } | ||
2033 | dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); | ||
2034 | hp->rx_skbs[elem] = new_skb; | ||
2035 | new_skb->dev = dev; | ||
2036 | skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); | ||
2037 | hme_write_rxd(hp, this, | ||
2038 | (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), | ||
2039 | dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE, | ||
2040 | DMA_FROM_DEVICE)); | ||
2041 | skb_reserve(new_skb, RX_OFFSET); | ||
2042 | |||
2043 | /* Trim the original skb for the netif. */ | ||
2044 | skb_trim(skb, len); | ||
2045 | } else { | ||
2046 | struct sk_buff *copy_skb = dev_alloc_skb(len + 2); | ||
2047 | |||
2048 | if (copy_skb == NULL) { | ||
2049 | drops++; | ||
2050 | goto drop_it; | ||
2051 | } | ||
2052 | |||
2053 | skb_reserve(copy_skb, 2); | ||
2054 | skb_put(copy_skb, len); | ||
2055 | dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE); | ||
2056 | skb_copy_from_linear_data(skb, copy_skb->data, len); | ||
2057 | dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE); | ||
2058 | /* Reuse original ring buffer. */ | ||
2059 | hme_write_rxd(hp, this, | ||
2060 | (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), | ||
2061 | dma_addr); | ||
2062 | |||
2063 | skb = copy_skb; | ||
2064 | } | ||
2065 | |||
2066 | /* This card is _fucking_ hot... */ | ||
2067 | skb->csum = csum_unfold(~(__force __sum16)htons(csum)); | ||
2068 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
2069 | |||
2070 | RXD(("len=%d csum=%4x]", len, csum)); | ||
2071 | skb->protocol = eth_type_trans(skb, dev); | ||
2072 | netif_rx(skb); | ||
2073 | |||
2074 | hp->net_stats.rx_packets++; | ||
2075 | hp->net_stats.rx_bytes += len; | ||
2076 | next: | ||
2077 | elem = NEXT_RX(elem); | ||
2078 | this = &rxbase[elem]; | ||
2079 | } | ||
2080 | hp->rx_new = elem; | ||
2081 | if (drops) | ||
2082 | printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", hp->dev->name); | ||
2083 | RXD((">")); | ||
2084 | } | ||
2085 | |||
2086 | static irqreturn_t happy_meal_interrupt(int irq, void *dev_id) | ||
2087 | { | ||
2088 | struct net_device *dev = dev_id; | ||
2089 | struct happy_meal *hp = netdev_priv(dev); | ||
2090 | u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT); | ||
2091 | |||
2092 | HMD(("happy_meal_interrupt: status=%08x ", happy_status)); | ||
2093 | |||
2094 | spin_lock(&hp->happy_lock); | ||
2095 | |||
2096 | if (happy_status & GREG_STAT_ERRORS) { | ||
2097 | HMD(("ERRORS ")); | ||
2098 | if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status)) | ||
2099 | goto out; | ||
2100 | } | ||
2101 | |||
2102 | if (happy_status & GREG_STAT_MIFIRQ) { | ||
2103 | HMD(("MIFIRQ ")); | ||
2104 | happy_meal_mif_interrupt(hp); | ||
2105 | } | ||
2106 | |||
2107 | if (happy_status & GREG_STAT_TXALL) { | ||
2108 | HMD(("TXALL ")); | ||
2109 | happy_meal_tx(hp); | ||
2110 | } | ||
2111 | |||
2112 | if (happy_status & GREG_STAT_RXTOHOST) { | ||
2113 | HMD(("RXTOHOST ")); | ||
2114 | happy_meal_rx(hp, dev); | ||
2115 | } | ||
2116 | |||
2117 | HMD(("done\n")); | ||
2118 | out: | ||
2119 | spin_unlock(&hp->happy_lock); | ||
2120 | |||
2121 | return IRQ_HANDLED; | ||
2122 | } | ||
2123 | |||
2124 | #ifdef CONFIG_SBUS | ||
2125 | static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie) | ||
2126 | { | ||
2127 | struct quattro *qp = (struct quattro *) cookie; | ||
2128 | int i; | ||
2129 | |||
2130 | for (i = 0; i < 4; i++) { | ||
2131 | struct net_device *dev = qp->happy_meals[i]; | ||
2132 | struct happy_meal *hp = netdev_priv(dev); | ||
2133 | u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT); | ||
2134 | |||
2135 | HMD(("quattro_interrupt: status=%08x ", happy_status)); | ||
2136 | |||
2137 | if (!(happy_status & (GREG_STAT_ERRORS | | ||
2138 | GREG_STAT_MIFIRQ | | ||
2139 | GREG_STAT_TXALL | | ||
2140 | GREG_STAT_RXTOHOST))) | ||
2141 | continue; | ||
2142 | |||
2143 | spin_lock(&hp->happy_lock); | ||
2144 | |||
2145 | if (happy_status & GREG_STAT_ERRORS) { | ||
2146 | HMD(("ERRORS ")); | ||
2147 | if (happy_meal_is_not_so_happy(hp, happy_status)) | ||
2148 | goto next; | ||
2149 | } | ||
2150 | |||
2151 | if (happy_status & GREG_STAT_MIFIRQ) { | ||
2152 | HMD(("MIFIRQ ")); | ||
2153 | happy_meal_mif_interrupt(hp); | ||
2154 | } | ||
2155 | |||
2156 | if (happy_status & GREG_STAT_TXALL) { | ||
2157 | HMD(("TXALL ")); | ||
2158 | happy_meal_tx(hp); | ||
2159 | } | ||
2160 | |||
2161 | if (happy_status & GREG_STAT_RXTOHOST) { | ||
2162 | HMD(("RXTOHOST ")); | ||
2163 | happy_meal_rx(hp, dev); | ||
2164 | } | ||
2165 | |||
2166 | next: | ||
2167 | spin_unlock(&hp->happy_lock); | ||
2168 | } | ||
2169 | HMD(("done\n")); | ||
2170 | |||
2171 | return IRQ_HANDLED; | ||
2172 | } | ||
2173 | #endif | ||
2174 | |||
2175 | static int happy_meal_open(struct net_device *dev) | ||
2176 | { | ||
2177 | struct happy_meal *hp = netdev_priv(dev); | ||
2178 | int res; | ||
2179 | |||
2180 | HMD(("happy_meal_open: ")); | ||
2181 | |||
2182 | /* On SBUS Quattro QFE cards, all hme interrupts are concentrated | ||
2183 | * into a single source which we register handling at probe time. | ||
2184 | */ | ||
2185 | if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) { | ||
2186 | if (request_irq(dev->irq, happy_meal_interrupt, | ||
2187 | IRQF_SHARED, dev->name, (void *)dev)) { | ||
2188 | HMD(("EAGAIN\n")); | ||
2189 | printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n", | ||
2190 | dev->irq); | ||
2191 | |||
2192 | return -EAGAIN; | ||
2193 | } | ||
2194 | } | ||
2195 | |||
2196 | HMD(("to happy_meal_init\n")); | ||
2197 | |||
2198 | spin_lock_irq(&hp->happy_lock); | ||
2199 | res = happy_meal_init(hp); | ||
2200 | spin_unlock_irq(&hp->happy_lock); | ||
2201 | |||
2202 | if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)) | ||
2203 | free_irq(dev->irq, dev); | ||
2204 | return res; | ||
2205 | } | ||
2206 | |||
2207 | static int happy_meal_close(struct net_device *dev) | ||
2208 | { | ||
2209 | struct happy_meal *hp = netdev_priv(dev); | ||
2210 | |||
2211 | spin_lock_irq(&hp->happy_lock); | ||
2212 | happy_meal_stop(hp, hp->gregs); | ||
2213 | happy_meal_clean_rings(hp); | ||
2214 | |||
2215 | /* If auto-negotiation timer is running, kill it. */ | ||
2216 | del_timer(&hp->happy_timer); | ||
2217 | |||
2218 | spin_unlock_irq(&hp->happy_lock); | ||
2219 | |||
2220 | /* On Quattro QFE cards, all hme interrupts are concentrated | ||
2221 | * into a single source which we register handling at probe | ||
2222 | * time and never unregister. | ||
2223 | */ | ||
2224 | if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) | ||
2225 | free_irq(dev->irq, dev); | ||
2226 | |||
2227 | return 0; | ||
2228 | } | ||
2229 | |||
2230 | #ifdef SXDEBUG | ||
2231 | #define SXD(x) printk x | ||
2232 | #else | ||
2233 | #define SXD(x) | ||
2234 | #endif | ||
2235 | |||
2236 | static void happy_meal_tx_timeout(struct net_device *dev) | ||
2237 | { | ||
2238 | struct happy_meal *hp = netdev_priv(dev); | ||
2239 | |||
2240 | printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name); | ||
2241 | tx_dump_log(); | ||
2242 | printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name, | ||
2243 | hme_read32(hp, hp->gregs + GREG_STAT), | ||
2244 | hme_read32(hp, hp->etxregs + ETX_CFG), | ||
2245 | hme_read32(hp, hp->bigmacregs + BMAC_TXCFG)); | ||
2246 | |||
2247 | spin_lock_irq(&hp->happy_lock); | ||
2248 | happy_meal_init(hp); | ||
2249 | spin_unlock_irq(&hp->happy_lock); | ||
2250 | |||
2251 | netif_wake_queue(dev); | ||
2252 | } | ||
2253 | |||
2254 | static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, | ||
2255 | struct net_device *dev) | ||
2256 | { | ||
2257 | struct happy_meal *hp = netdev_priv(dev); | ||
2258 | int entry; | ||
2259 | u32 tx_flags; | ||
2260 | |||
2261 | tx_flags = TXFLAG_OWN; | ||
2262 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
2263 | const u32 csum_start_off = skb_checksum_start_offset(skb); | ||
2264 | const u32 csum_stuff_off = csum_start_off + skb->csum_offset; | ||
2265 | |||
2266 | tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE | | ||
2267 | ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) | | ||
2268 | ((csum_stuff_off << 20) & TXFLAG_CSLOCATION)); | ||
2269 | } | ||
2270 | |||
2271 | spin_lock_irq(&hp->happy_lock); | ||
2272 | |||
2273 | if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) { | ||
2274 | netif_stop_queue(dev); | ||
2275 | spin_unlock_irq(&hp->happy_lock); | ||
2276 | printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n", | ||
2277 | dev->name); | ||
2278 | return NETDEV_TX_BUSY; | ||
2279 | } | ||
2280 | |||
2281 | entry = hp->tx_new; | ||
2282 | SXD(("SX<l[%d]e[%d]>", len, entry)); | ||
2283 | hp->tx_skbs[entry] = skb; | ||
2284 | |||
2285 | if (skb_shinfo(skb)->nr_frags == 0) { | ||
2286 | u32 mapping, len; | ||
2287 | |||
2288 | len = skb->len; | ||
2289 | mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE); | ||
2290 | tx_flags |= (TXFLAG_SOP | TXFLAG_EOP); | ||
2291 | hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], | ||
2292 | (tx_flags | (len & TXFLAG_SIZE)), | ||
2293 | mapping); | ||
2294 | entry = NEXT_TX(entry); | ||
2295 | } else { | ||
2296 | u32 first_len, first_mapping; | ||
2297 | int frag, first_entry = entry; | ||
2298 | |||
2299 | /* We must give this initial chunk to the device last. | ||
2300 | * Otherwise we could race with the device. | ||
2301 | */ | ||
2302 | first_len = skb_headlen(skb); | ||
2303 | first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len, | ||
2304 | DMA_TO_DEVICE); | ||
2305 | entry = NEXT_TX(entry); | ||
2306 | |||
2307 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | ||
2308 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; | ||
2309 | u32 len, mapping, this_txflags; | ||
2310 | |||
2311 | len = this_frag->size; | ||
2312 | mapping = dma_map_page(hp->dma_dev, this_frag->page, | ||
2313 | this_frag->page_offset, len, | ||
2314 | DMA_TO_DEVICE); | ||
2315 | this_txflags = tx_flags; | ||
2316 | if (frag == skb_shinfo(skb)->nr_frags - 1) | ||
2317 | this_txflags |= TXFLAG_EOP; | ||
2318 | hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], | ||
2319 | (this_txflags | (len & TXFLAG_SIZE)), | ||
2320 | mapping); | ||
2321 | entry = NEXT_TX(entry); | ||
2322 | } | ||
2323 | hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry], | ||
2324 | (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)), | ||
2325 | first_mapping); | ||
2326 | } | ||
2327 | |||
2328 | hp->tx_new = entry; | ||
2329 | |||
2330 | if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1)) | ||
2331 | netif_stop_queue(dev); | ||
2332 | |||
2333 | /* Get it going. */ | ||
2334 | hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP); | ||
2335 | |||
2336 | spin_unlock_irq(&hp->happy_lock); | ||
2337 | |||
2338 | tx_add_log(hp, TXLOG_ACTION_TXMIT, 0); | ||
2339 | return NETDEV_TX_OK; | ||
2340 | } | ||
2341 | |||
2342 | static struct net_device_stats *happy_meal_get_stats(struct net_device *dev) | ||
2343 | { | ||
2344 | struct happy_meal *hp = netdev_priv(dev); | ||
2345 | |||
2346 | spin_lock_irq(&hp->happy_lock); | ||
2347 | happy_meal_get_counters(hp, hp->bigmacregs); | ||
2348 | spin_unlock_irq(&hp->happy_lock); | ||
2349 | |||
2350 | return &hp->net_stats; | ||
2351 | } | ||
2352 | |||
2353 | static void happy_meal_set_multicast(struct net_device *dev) | ||
2354 | { | ||
2355 | struct happy_meal *hp = netdev_priv(dev); | ||
2356 | void __iomem *bregs = hp->bigmacregs; | ||
2357 | struct netdev_hw_addr *ha; | ||
2358 | u32 crc; | ||
2359 | |||
2360 | spin_lock_irq(&hp->happy_lock); | ||
2361 | |||
2362 | if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { | ||
2363 | hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff); | ||
2364 | hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff); | ||
2365 | hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff); | ||
2366 | hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff); | ||
2367 | } else if (dev->flags & IFF_PROMISC) { | ||
2368 | hme_write32(hp, bregs + BMAC_RXCFG, | ||
2369 | hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC); | ||
2370 | } else { | ||
2371 | u16 hash_table[4]; | ||
2372 | |||
2373 | memset(hash_table, 0, sizeof(hash_table)); | ||
2374 | netdev_for_each_mc_addr(ha, dev) { | ||
2375 | crc = ether_crc_le(6, ha->addr); | ||
2376 | crc >>= 26; | ||
2377 | hash_table[crc >> 4] |= 1 << (crc & 0xf); | ||
2378 | } | ||
2379 | hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]); | ||
2380 | hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]); | ||
2381 | hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]); | ||
2382 | hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]); | ||
2383 | } | ||
2384 | |||
2385 | spin_unlock_irq(&hp->happy_lock); | ||
2386 | } | ||
2387 | |||
2388 | /* Ethtool support... */ | ||
2389 | static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
2390 | { | ||
2391 | struct happy_meal *hp = netdev_priv(dev); | ||
2392 | u32 speed; | ||
2393 | |||
2394 | cmd->supported = | ||
2395 | (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | | ||
2396 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | | ||
2397 | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); | ||
2398 | |||
2399 | /* XXX hardcoded stuff for now */ | ||
2400 | cmd->port = PORT_TP; /* XXX no MII support */ | ||
2401 | cmd->transceiver = XCVR_INTERNAL; /* XXX no external xcvr support */ | ||
2402 | cmd->phy_address = 0; /* XXX fixed PHYAD */ | ||
2403 | |||
2404 | /* Record PHY settings. */ | ||
2405 | spin_lock_irq(&hp->happy_lock); | ||
2406 | hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR); | ||
2407 | hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA); | ||
2408 | spin_unlock_irq(&hp->happy_lock); | ||
2409 | |||
2410 | if (hp->sw_bmcr & BMCR_ANENABLE) { | ||
2411 | cmd->autoneg = AUTONEG_ENABLE; | ||
2412 | speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ? | ||
2413 | SPEED_100 : SPEED_10); | ||
2414 | if (speed == SPEED_100) | ||
2415 | cmd->duplex = | ||
2416 | (hp->sw_lpa & (LPA_100FULL)) ? | ||
2417 | DUPLEX_FULL : DUPLEX_HALF; | ||
2418 | else | ||
2419 | cmd->duplex = | ||
2420 | (hp->sw_lpa & (LPA_10FULL)) ? | ||
2421 | DUPLEX_FULL : DUPLEX_HALF; | ||
2422 | } else { | ||
2423 | cmd->autoneg = AUTONEG_DISABLE; | ||
2424 | speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; | ||
2425 | cmd->duplex = | ||
2426 | (hp->sw_bmcr & BMCR_FULLDPLX) ? | ||
2427 | DUPLEX_FULL : DUPLEX_HALF; | ||
2428 | } | ||
2429 | ethtool_cmd_speed_set(cmd, speed); | ||
2430 | return 0; | ||
2431 | } | ||
2432 | |||
2433 | static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
2434 | { | ||
2435 | struct happy_meal *hp = netdev_priv(dev); | ||
2436 | |||
2437 | /* Verify the settings we care about. */ | ||
2438 | if (cmd->autoneg != AUTONEG_ENABLE && | ||
2439 | cmd->autoneg != AUTONEG_DISABLE) | ||
2440 | return -EINVAL; | ||
2441 | if (cmd->autoneg == AUTONEG_DISABLE && | ||
2442 | ((ethtool_cmd_speed(cmd) != SPEED_100 && | ||
2443 | ethtool_cmd_speed(cmd) != SPEED_10) || | ||
2444 | (cmd->duplex != DUPLEX_HALF && | ||
2445 | cmd->duplex != DUPLEX_FULL))) | ||
2446 | return -EINVAL; | ||
2447 | |||
2448 | /* Ok, do it to it. */ | ||
2449 | spin_lock_irq(&hp->happy_lock); | ||
2450 | del_timer(&hp->happy_timer); | ||
2451 | happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd); | ||
2452 | spin_unlock_irq(&hp->happy_lock); | ||
2453 | |||
2454 | return 0; | ||
2455 | } | ||
2456 | |||
2457 | static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
2458 | { | ||
2459 | struct happy_meal *hp = netdev_priv(dev); | ||
2460 | |||
2461 | strcpy(info->driver, "sunhme"); | ||
2462 | strcpy(info->version, "2.02"); | ||
2463 | if (hp->happy_flags & HFLAG_PCI) { | ||
2464 | struct pci_dev *pdev = hp->happy_dev; | ||
2465 | strcpy(info->bus_info, pci_name(pdev)); | ||
2466 | } | ||
2467 | #ifdef CONFIG_SBUS | ||
2468 | else { | ||
2469 | const struct linux_prom_registers *regs; | ||
2470 | struct platform_device *op = hp->happy_dev; | ||
2471 | regs = of_get_property(op->dev.of_node, "regs", NULL); | ||
2472 | if (regs) | ||
2473 | sprintf(info->bus_info, "SBUS:%d", | ||
2474 | regs->which_io); | ||
2475 | } | ||
2476 | #endif | ||
2477 | } | ||
2478 | |||
2479 | static u32 hme_get_link(struct net_device *dev) | ||
2480 | { | ||
2481 | struct happy_meal *hp = netdev_priv(dev); | ||
2482 | |||
2483 | spin_lock_irq(&hp->happy_lock); | ||
2484 | hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR); | ||
2485 | spin_unlock_irq(&hp->happy_lock); | ||
2486 | |||
2487 | return hp->sw_bmsr & BMSR_LSTATUS; | ||
2488 | } | ||
2489 | |||
2490 | static const struct ethtool_ops hme_ethtool_ops = { | ||
2491 | .get_settings = hme_get_settings, | ||
2492 | .set_settings = hme_set_settings, | ||
2493 | .get_drvinfo = hme_get_drvinfo, | ||
2494 | .get_link = hme_get_link, | ||
2495 | }; | ||
2496 | |||
2497 | static int hme_version_printed; | ||
2498 | |||
2499 | #ifdef CONFIG_SBUS | ||
2500 | /* Given a happy meal sbus device, find it's quattro parent. | ||
2501 | * If none exist, allocate and return a new one. | ||
2502 | * | ||
2503 | * Return NULL on failure. | ||
2504 | */ | ||
2505 | static struct quattro * __devinit quattro_sbus_find(struct platform_device *child) | ||
2506 | { | ||
2507 | struct device *parent = child->dev.parent; | ||
2508 | struct platform_device *op; | ||
2509 | struct quattro *qp; | ||
2510 | |||
2511 | op = to_platform_device(parent); | ||
2512 | qp = dev_get_drvdata(&op->dev); | ||
2513 | if (qp) | ||
2514 | return qp; | ||
2515 | |||
2516 | qp = kmalloc(sizeof(struct quattro), GFP_KERNEL); | ||
2517 | if (qp != NULL) { | ||
2518 | int i; | ||
2519 | |||
2520 | for (i = 0; i < 4; i++) | ||
2521 | qp->happy_meals[i] = NULL; | ||
2522 | |||
2523 | qp->quattro_dev = child; | ||
2524 | qp->next = qfe_sbus_list; | ||
2525 | qfe_sbus_list = qp; | ||
2526 | |||
2527 | dev_set_drvdata(&op->dev, qp); | ||
2528 | } | ||
2529 | return qp; | ||
2530 | } | ||
2531 | |||
2532 | /* After all quattro cards have been probed, we call these functions | ||
2533 | * to register the IRQ handlers for the cards that have been | ||
2534 | * successfully probed and skip the cards that failed to initialize | ||
2535 | */ | ||
2536 | static int __init quattro_sbus_register_irqs(void) | ||
2537 | { | ||
2538 | struct quattro *qp; | ||
2539 | |||
2540 | for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) { | ||
2541 | struct platform_device *op = qp->quattro_dev; | ||
2542 | int err, qfe_slot, skip = 0; | ||
2543 | |||
2544 | for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) { | ||
2545 | if (!qp->happy_meals[qfe_slot]) | ||
2546 | skip = 1; | ||
2547 | } | ||
2548 | if (skip) | ||
2549 | continue; | ||
2550 | |||
2551 | err = request_irq(op->archdata.irqs[0], | ||
2552 | quattro_sbus_interrupt, | ||
2553 | IRQF_SHARED, "Quattro", | ||
2554 | qp); | ||
2555 | if (err != 0) { | ||
2556 | printk(KERN_ERR "Quattro HME: IRQ registration " | ||
2557 | "error %d.\n", err); | ||
2558 | return err; | ||
2559 | } | ||
2560 | } | ||
2561 | |||
2562 | return 0; | ||
2563 | } | ||
2564 | |||
2565 | static void quattro_sbus_free_irqs(void) | ||
2566 | { | ||
2567 | struct quattro *qp; | ||
2568 | |||
2569 | for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) { | ||
2570 | struct platform_device *op = qp->quattro_dev; | ||
2571 | int qfe_slot, skip = 0; | ||
2572 | |||
2573 | for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) { | ||
2574 | if (!qp->happy_meals[qfe_slot]) | ||
2575 | skip = 1; | ||
2576 | } | ||
2577 | if (skip) | ||
2578 | continue; | ||
2579 | |||
2580 | free_irq(op->archdata.irqs[0], qp); | ||
2581 | } | ||
2582 | } | ||
2583 | #endif /* CONFIG_SBUS */ | ||
2584 | |||
2585 | #ifdef CONFIG_PCI | ||
2586 | static struct quattro * __devinit quattro_pci_find(struct pci_dev *pdev) | ||
2587 | { | ||
2588 | struct pci_dev *bdev = pdev->bus->self; | ||
2589 | struct quattro *qp; | ||
2590 | |||
2591 | if (!bdev) return NULL; | ||
2592 | for (qp = qfe_pci_list; qp != NULL; qp = qp->next) { | ||
2593 | struct pci_dev *qpdev = qp->quattro_dev; | ||
2594 | |||
2595 | if (qpdev == bdev) | ||
2596 | return qp; | ||
2597 | } | ||
2598 | qp = kmalloc(sizeof(struct quattro), GFP_KERNEL); | ||
2599 | if (qp != NULL) { | ||
2600 | int i; | ||
2601 | |||
2602 | for (i = 0; i < 4; i++) | ||
2603 | qp->happy_meals[i] = NULL; | ||
2604 | |||
2605 | qp->quattro_dev = bdev; | ||
2606 | qp->next = qfe_pci_list; | ||
2607 | qfe_pci_list = qp; | ||
2608 | |||
2609 | /* No range tricks necessary on PCI. */ | ||
2610 | qp->nranges = 0; | ||
2611 | } | ||
2612 | return qp; | ||
2613 | } | ||
2614 | #endif /* CONFIG_PCI */ | ||
2615 | |||
2616 | static const struct net_device_ops hme_netdev_ops = { | ||
2617 | .ndo_open = happy_meal_open, | ||
2618 | .ndo_stop = happy_meal_close, | ||
2619 | .ndo_start_xmit = happy_meal_start_xmit, | ||
2620 | .ndo_tx_timeout = happy_meal_tx_timeout, | ||
2621 | .ndo_get_stats = happy_meal_get_stats, | ||
2622 | .ndo_set_multicast_list = happy_meal_set_multicast, | ||
2623 | .ndo_change_mtu = eth_change_mtu, | ||
2624 | .ndo_set_mac_address = eth_mac_addr, | ||
2625 | .ndo_validate_addr = eth_validate_addr, | ||
2626 | }; | ||
2627 | |||
2628 | #ifdef CONFIG_SBUS | ||
2629 | static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe) | ||
2630 | { | ||
2631 | struct device_node *dp = op->dev.of_node, *sbus_dp; | ||
2632 | struct quattro *qp = NULL; | ||
2633 | struct happy_meal *hp; | ||
2634 | struct net_device *dev; | ||
2635 | int i, qfe_slot = -1; | ||
2636 | int err = -ENODEV; | ||
2637 | |||
2638 | sbus_dp = op->dev.parent->of_node; | ||
2639 | |||
2640 | /* We can match PCI devices too, do not accept those here. */ | ||
2641 | if (strcmp(sbus_dp->name, "sbus")) | ||
2642 | return err; | ||
2643 | |||
2644 | if (is_qfe) { | ||
2645 | qp = quattro_sbus_find(op); | ||
2646 | if (qp == NULL) | ||
2647 | goto err_out; | ||
2648 | for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) | ||
2649 | if (qp->happy_meals[qfe_slot] == NULL) | ||
2650 | break; | ||
2651 | if (qfe_slot == 4) | ||
2652 | goto err_out; | ||
2653 | } | ||
2654 | |||
2655 | err = -ENOMEM; | ||
2656 | dev = alloc_etherdev(sizeof(struct happy_meal)); | ||
2657 | if (!dev) | ||
2658 | goto err_out; | ||
2659 | SET_NETDEV_DEV(dev, &op->dev); | ||
2660 | |||
2661 | if (hme_version_printed++ == 0) | ||
2662 | printk(KERN_INFO "%s", version); | ||
2663 | |||
2664 | /* If user did not specify a MAC address specifically, use | ||
2665 | * the Quattro local-mac-address property... | ||
2666 | */ | ||
2667 | for (i = 0; i < 6; i++) { | ||
2668 | if (macaddr[i] != 0) | ||
2669 | break; | ||
2670 | } | ||
2671 | if (i < 6) { /* a mac address was given */ | ||
2672 | for (i = 0; i < 6; i++) | ||
2673 | dev->dev_addr[i] = macaddr[i]; | ||
2674 | macaddr[5]++; | ||
2675 | } else { | ||
2676 | const unsigned char *addr; | ||
2677 | int len; | ||
2678 | |||
2679 | addr = of_get_property(dp, "local-mac-address", &len); | ||
2680 | |||
2681 | if (qfe_slot != -1 && addr && len == 6) | ||
2682 | memcpy(dev->dev_addr, addr, 6); | ||
2683 | else | ||
2684 | memcpy(dev->dev_addr, idprom->id_ethaddr, 6); | ||
2685 | } | ||
2686 | |||
2687 | hp = netdev_priv(dev); | ||
2688 | |||
2689 | hp->happy_dev = op; | ||
2690 | hp->dma_dev = &op->dev; | ||
2691 | |||
2692 | spin_lock_init(&hp->happy_lock); | ||
2693 | |||
2694 | err = -ENODEV; | ||
2695 | if (qp != NULL) { | ||
2696 | hp->qfe_parent = qp; | ||
2697 | hp->qfe_ent = qfe_slot; | ||
2698 | qp->happy_meals[qfe_slot] = dev; | ||
2699 | } | ||
2700 | |||
2701 | hp->gregs = of_ioremap(&op->resource[0], 0, | ||
2702 | GREG_REG_SIZE, "HME Global Regs"); | ||
2703 | if (!hp->gregs) { | ||
2704 | printk(KERN_ERR "happymeal: Cannot map global registers.\n"); | ||
2705 | goto err_out_free_netdev; | ||
2706 | } | ||
2707 | |||
2708 | hp->etxregs = of_ioremap(&op->resource[1], 0, | ||
2709 | ETX_REG_SIZE, "HME TX Regs"); | ||
2710 | if (!hp->etxregs) { | ||
2711 | printk(KERN_ERR "happymeal: Cannot map MAC TX registers.\n"); | ||
2712 | goto err_out_iounmap; | ||
2713 | } | ||
2714 | |||
2715 | hp->erxregs = of_ioremap(&op->resource[2], 0, | ||
2716 | ERX_REG_SIZE, "HME RX Regs"); | ||
2717 | if (!hp->erxregs) { | ||
2718 | printk(KERN_ERR "happymeal: Cannot map MAC RX registers.\n"); | ||
2719 | goto err_out_iounmap; | ||
2720 | } | ||
2721 | |||
2722 | hp->bigmacregs = of_ioremap(&op->resource[3], 0, | ||
2723 | BMAC_REG_SIZE, "HME BIGMAC Regs"); | ||
2724 | if (!hp->bigmacregs) { | ||
2725 | printk(KERN_ERR "happymeal: Cannot map BIGMAC registers.\n"); | ||
2726 | goto err_out_iounmap; | ||
2727 | } | ||
2728 | |||
2729 | hp->tcvregs = of_ioremap(&op->resource[4], 0, | ||
2730 | TCVR_REG_SIZE, "HME Tranceiver Regs"); | ||
2731 | if (!hp->tcvregs) { | ||
2732 | printk(KERN_ERR "happymeal: Cannot map TCVR registers.\n"); | ||
2733 | goto err_out_iounmap; | ||
2734 | } | ||
2735 | |||
2736 | hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff); | ||
2737 | if (hp->hm_revision == 0xff) | ||
2738 | hp->hm_revision = 0xa0; | ||
2739 | |||
2740 | /* Now enable the feature flags we can. */ | ||
2741 | if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21) | ||
2742 | hp->happy_flags = HFLAG_20_21; | ||
2743 | else if (hp->hm_revision != 0xa0) | ||
2744 | hp->happy_flags = HFLAG_NOT_A0; | ||
2745 | |||
2746 | if (qp != NULL) | ||
2747 | hp->happy_flags |= HFLAG_QUATTRO; | ||
2748 | |||
2749 | /* Get the supported DVMA burst sizes from our Happy SBUS. */ | ||
2750 | hp->happy_bursts = of_getintprop_default(sbus_dp, | ||
2751 | "burst-sizes", 0x00); | ||
2752 | |||
2753 | hp->happy_block = dma_alloc_coherent(hp->dma_dev, | ||
2754 | PAGE_SIZE, | ||
2755 | &hp->hblock_dvma, | ||
2756 | GFP_ATOMIC); | ||
2757 | err = -ENOMEM; | ||
2758 | if (!hp->happy_block) { | ||
2759 | printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n"); | ||
2760 | goto err_out_iounmap; | ||
2761 | } | ||
2762 | |||
2763 | /* Force check of the link first time we are brought up. */ | ||
2764 | hp->linkcheck = 0; | ||
2765 | |||
2766 | /* Force timer state to 'asleep' with count of zero. */ | ||
2767 | hp->timer_state = asleep; | ||
2768 | hp->timer_ticks = 0; | ||
2769 | |||
2770 | init_timer(&hp->happy_timer); | ||
2771 | |||
2772 | hp->dev = dev; | ||
2773 | dev->netdev_ops = &hme_netdev_ops; | ||
2774 | dev->watchdog_timeo = 5*HZ; | ||
2775 | dev->ethtool_ops = &hme_ethtool_ops; | ||
2776 | |||
2777 | /* Happy Meal can do it all... */ | ||
2778 | dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; | ||
2779 | dev->features |= dev->hw_features | NETIF_F_RXCSUM; | ||
2780 | |||
2781 | dev->irq = op->archdata.irqs[0]; | ||
2782 | |||
2783 | #if defined(CONFIG_SBUS) && defined(CONFIG_PCI) | ||
2784 | /* Hook up SBUS register/descriptor accessors. */ | ||
2785 | hp->read_desc32 = sbus_hme_read_desc32; | ||
2786 | hp->write_txd = sbus_hme_write_txd; | ||
2787 | hp->write_rxd = sbus_hme_write_rxd; | ||
2788 | hp->read32 = sbus_hme_read32; | ||
2789 | hp->write32 = sbus_hme_write32; | ||
2790 | #endif | ||
2791 | |||
2792 | /* Grrr, Happy Meal comes up by default not advertising | ||
2793 | * full duplex 100baseT capabilities, fix this. | ||
2794 | */ | ||
2795 | spin_lock_irq(&hp->happy_lock); | ||
2796 | happy_meal_set_initial_advertisement(hp); | ||
2797 | spin_unlock_irq(&hp->happy_lock); | ||
2798 | |||
2799 | err = register_netdev(hp->dev); | ||
2800 | if (err) { | ||
2801 | printk(KERN_ERR "happymeal: Cannot register net device, " | ||
2802 | "aborting.\n"); | ||
2803 | goto err_out_free_coherent; | ||
2804 | } | ||
2805 | |||
2806 | dev_set_drvdata(&op->dev, hp); | ||
2807 | |||
2808 | if (qfe_slot != -1) | ||
2809 | printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ", | ||
2810 | dev->name, qfe_slot); | ||
2811 | else | ||
2812 | printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ", | ||
2813 | dev->name); | ||
2814 | |||
2815 | printk("%pM\n", dev->dev_addr); | ||
2816 | |||
2817 | return 0; | ||
2818 | |||
2819 | err_out_free_coherent: | ||
2820 | dma_free_coherent(hp->dma_dev, | ||
2821 | PAGE_SIZE, | ||
2822 | hp->happy_block, | ||
2823 | hp->hblock_dvma); | ||
2824 | |||
2825 | err_out_iounmap: | ||
2826 | if (hp->gregs) | ||
2827 | of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE); | ||
2828 | if (hp->etxregs) | ||
2829 | of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE); | ||
2830 | if (hp->erxregs) | ||
2831 | of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE); | ||
2832 | if (hp->bigmacregs) | ||
2833 | of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE); | ||
2834 | if (hp->tcvregs) | ||
2835 | of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE); | ||
2836 | |||
2837 | if (qp) | ||
2838 | qp->happy_meals[qfe_slot] = NULL; | ||
2839 | |||
2840 | err_out_free_netdev: | ||
2841 | free_netdev(dev); | ||
2842 | |||
2843 | err_out: | ||
2844 | return err; | ||
2845 | } | ||
2846 | #endif | ||
2847 | |||
2848 | #ifdef CONFIG_PCI | ||
2849 | #ifndef CONFIG_SPARC | ||
2850 | static int is_quattro_p(struct pci_dev *pdev) | ||
2851 | { | ||
2852 | struct pci_dev *busdev = pdev->bus->self; | ||
2853 | struct list_head *tmp; | ||
2854 | int n_hmes; | ||
2855 | |||
2856 | if (busdev == NULL || | ||
2857 | busdev->vendor != PCI_VENDOR_ID_DEC || | ||
2858 | busdev->device != PCI_DEVICE_ID_DEC_21153) | ||
2859 | return 0; | ||
2860 | |||
2861 | n_hmes = 0; | ||
2862 | tmp = pdev->bus->devices.next; | ||
2863 | while (tmp != &pdev->bus->devices) { | ||
2864 | struct pci_dev *this_pdev = pci_dev_b(tmp); | ||
2865 | |||
2866 | if (this_pdev->vendor == PCI_VENDOR_ID_SUN && | ||
2867 | this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL) | ||
2868 | n_hmes++; | ||
2869 | |||
2870 | tmp = tmp->next; | ||
2871 | } | ||
2872 | |||
2873 | if (n_hmes != 4) | ||
2874 | return 0; | ||
2875 | |||
2876 | return 1; | ||
2877 | } | ||
2878 | |||
2879 | /* Fetch MAC address from vital product data of PCI ROM. */ | ||
2880 | static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr) | ||
2881 | { | ||
2882 | int this_offset; | ||
2883 | |||
2884 | for (this_offset = 0x20; this_offset < len; this_offset++) { | ||
2885 | void __iomem *p = rom_base + this_offset; | ||
2886 | |||
2887 | if (readb(p + 0) != 0x90 || | ||
2888 | readb(p + 1) != 0x00 || | ||
2889 | readb(p + 2) != 0x09 || | ||
2890 | readb(p + 3) != 0x4e || | ||
2891 | readb(p + 4) != 0x41 || | ||
2892 | readb(p + 5) != 0x06) | ||
2893 | continue; | ||
2894 | |||
2895 | this_offset += 6; | ||
2896 | p += 6; | ||
2897 | |||
2898 | if (index == 0) { | ||
2899 | int i; | ||
2900 | |||
2901 | for (i = 0; i < 6; i++) | ||
2902 | dev_addr[i] = readb(p + i); | ||
2903 | return 1; | ||
2904 | } | ||
2905 | index--; | ||
2906 | } | ||
2907 | return 0; | ||
2908 | } | ||
2909 | |||
2910 | static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr) | ||
2911 | { | ||
2912 | size_t size; | ||
2913 | void __iomem *p = pci_map_rom(pdev, &size); | ||
2914 | |||
2915 | if (p) { | ||
2916 | int index = 0; | ||
2917 | int found; | ||
2918 | |||
2919 | if (is_quattro_p(pdev)) | ||
2920 | index = PCI_SLOT(pdev->devfn); | ||
2921 | |||
2922 | found = readb(p) == 0x55 && | ||
2923 | readb(p + 1) == 0xaa && | ||
2924 | find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr); | ||
2925 | pci_unmap_rom(pdev, p); | ||
2926 | if (found) | ||
2927 | return; | ||
2928 | } | ||
2929 | |||
2930 | /* Sun MAC prefix then 3 random bytes. */ | ||
2931 | dev_addr[0] = 0x08; | ||
2932 | dev_addr[1] = 0x00; | ||
2933 | dev_addr[2] = 0x20; | ||
2934 | get_random_bytes(&dev_addr[3], 3); | ||
2935 | } | ||
2936 | #endif /* !(CONFIG_SPARC) */ | ||
2937 | |||
2938 | static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, | ||
2939 | const struct pci_device_id *ent) | ||
2940 | { | ||
2941 | struct quattro *qp = NULL; | ||
2942 | #ifdef CONFIG_SPARC | ||
2943 | struct device_node *dp; | ||
2944 | #endif | ||
2945 | struct happy_meal *hp; | ||
2946 | struct net_device *dev; | ||
2947 | void __iomem *hpreg_base; | ||
2948 | unsigned long hpreg_res; | ||
2949 | int i, qfe_slot = -1; | ||
2950 | char prom_name[64]; | ||
2951 | int err; | ||
2952 | |||
2953 | /* Now make sure pci_dev cookie is there. */ | ||
2954 | #ifdef CONFIG_SPARC | ||
2955 | dp = pci_device_to_OF_node(pdev); | ||
2956 | strcpy(prom_name, dp->name); | ||
2957 | #else | ||
2958 | if (is_quattro_p(pdev)) | ||
2959 | strcpy(prom_name, "SUNW,qfe"); | ||
2960 | else | ||
2961 | strcpy(prom_name, "SUNW,hme"); | ||
2962 | #endif | ||
2963 | |||
2964 | err = -ENODEV; | ||
2965 | |||
2966 | if (pci_enable_device(pdev)) | ||
2967 | goto err_out; | ||
2968 | pci_set_master(pdev); | ||
2969 | |||
2970 | if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) { | ||
2971 | qp = quattro_pci_find(pdev); | ||
2972 | if (qp == NULL) | ||
2973 | goto err_out; | ||
2974 | for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) | ||
2975 | if (qp->happy_meals[qfe_slot] == NULL) | ||
2976 | break; | ||
2977 | if (qfe_slot == 4) | ||
2978 | goto err_out; | ||
2979 | } | ||
2980 | |||
2981 | dev = alloc_etherdev(sizeof(struct happy_meal)); | ||
2982 | err = -ENOMEM; | ||
2983 | if (!dev) | ||
2984 | goto err_out; | ||
2985 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
2986 | |||
2987 | if (hme_version_printed++ == 0) | ||
2988 | printk(KERN_INFO "%s", version); | ||
2989 | |||
2990 | dev->base_addr = (long) pdev; | ||
2991 | |||
2992 | hp = netdev_priv(dev); | ||
2993 | |||
2994 | hp->happy_dev = pdev; | ||
2995 | hp->dma_dev = &pdev->dev; | ||
2996 | |||
2997 | spin_lock_init(&hp->happy_lock); | ||
2998 | |||
2999 | if (qp != NULL) { | ||
3000 | hp->qfe_parent = qp; | ||
3001 | hp->qfe_ent = qfe_slot; | ||
3002 | qp->happy_meals[qfe_slot] = dev; | ||
3003 | } | ||
3004 | |||
3005 | hpreg_res = pci_resource_start(pdev, 0); | ||
3006 | err = -ENODEV; | ||
3007 | if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { | ||
3008 | printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n"); | ||
3009 | goto err_out_clear_quattro; | ||
3010 | } | ||
3011 | if (pci_request_regions(pdev, DRV_NAME)) { | ||
3012 | printk(KERN_ERR "happymeal(PCI): Cannot obtain PCI resources, " | ||
3013 | "aborting.\n"); | ||
3014 | goto err_out_clear_quattro; | ||
3015 | } | ||
3016 | |||
3017 | if ((hpreg_base = ioremap(hpreg_res, 0x8000)) == NULL) { | ||
3018 | printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n"); | ||
3019 | goto err_out_free_res; | ||
3020 | } | ||
3021 | |||
3022 | for (i = 0; i < 6; i++) { | ||
3023 | if (macaddr[i] != 0) | ||
3024 | break; | ||
3025 | } | ||
3026 | if (i < 6) { /* a mac address was given */ | ||
3027 | for (i = 0; i < 6; i++) | ||
3028 | dev->dev_addr[i] = macaddr[i]; | ||
3029 | macaddr[5]++; | ||
3030 | } else { | ||
3031 | #ifdef CONFIG_SPARC | ||
3032 | const unsigned char *addr; | ||
3033 | int len; | ||
3034 | |||
3035 | if (qfe_slot != -1 && | ||
3036 | (addr = of_get_property(dp, "local-mac-address", &len)) | ||
3037 | != NULL && | ||
3038 | len == 6) { | ||
3039 | memcpy(dev->dev_addr, addr, 6); | ||
3040 | } else { | ||
3041 | memcpy(dev->dev_addr, idprom->id_ethaddr, 6); | ||
3042 | } | ||
3043 | #else | ||
3044 | get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]); | ||
3045 | #endif | ||
3046 | } | ||
3047 | |||
3048 | /* Layout registers. */ | ||
3049 | hp->gregs = (hpreg_base + 0x0000UL); | ||
3050 | hp->etxregs = (hpreg_base + 0x2000UL); | ||
3051 | hp->erxregs = (hpreg_base + 0x4000UL); | ||
3052 | hp->bigmacregs = (hpreg_base + 0x6000UL); | ||
3053 | hp->tcvregs = (hpreg_base + 0x7000UL); | ||
3054 | |||
3055 | #ifdef CONFIG_SPARC | ||
3056 | hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff); | ||
3057 | if (hp->hm_revision == 0xff) | ||
3058 | hp->hm_revision = 0xc0 | (pdev->revision & 0x0f); | ||
3059 | #else | ||
3060 | /* works with this on non-sparc hosts */ | ||
3061 | hp->hm_revision = 0x20; | ||
3062 | #endif | ||
3063 | |||
3064 | /* Now enable the feature flags we can. */ | ||
3065 | if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21) | ||
3066 | hp->happy_flags = HFLAG_20_21; | ||
3067 | else if (hp->hm_revision != 0xa0 && hp->hm_revision != 0xc0) | ||
3068 | hp->happy_flags = HFLAG_NOT_A0; | ||
3069 | |||
3070 | if (qp != NULL) | ||
3071 | hp->happy_flags |= HFLAG_QUATTRO; | ||
3072 | |||
3073 | /* And of course, indicate this is PCI. */ | ||
3074 | hp->happy_flags |= HFLAG_PCI; | ||
3075 | |||
3076 | #ifdef CONFIG_SPARC | ||
3077 | /* Assume PCI happy meals can handle all burst sizes. */ | ||
3078 | hp->happy_bursts = DMA_BURSTBITS; | ||
3079 | #endif | ||
3080 | |||
3081 | hp->happy_block = (struct hmeal_init_block *) | ||
3082 | dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &hp->hblock_dvma, GFP_KERNEL); | ||
3083 | |||
3084 | err = -ENODEV; | ||
3085 | if (!hp->happy_block) { | ||
3086 | printk(KERN_ERR "happymeal(PCI): Cannot get hme init block.\n"); | ||
3087 | goto err_out_iounmap; | ||
3088 | } | ||
3089 | |||
3090 | hp->linkcheck = 0; | ||
3091 | hp->timer_state = asleep; | ||
3092 | hp->timer_ticks = 0; | ||
3093 | |||
3094 | init_timer(&hp->happy_timer); | ||
3095 | |||
3096 | hp->dev = dev; | ||
3097 | dev->netdev_ops = &hme_netdev_ops; | ||
3098 | dev->watchdog_timeo = 5*HZ; | ||
3099 | dev->ethtool_ops = &hme_ethtool_ops; | ||
3100 | dev->irq = pdev->irq; | ||
3101 | dev->dma = 0; | ||
3102 | |||
3103 | /* Happy Meal can do it all... */ | ||
3104 | dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; | ||
3105 | dev->features |= dev->hw_features | NETIF_F_RXCSUM; | ||
3106 | |||
3107 | #if defined(CONFIG_SBUS) && defined(CONFIG_PCI) | ||
3108 | /* Hook up PCI register/descriptor accessors. */ | ||
3109 | hp->read_desc32 = pci_hme_read_desc32; | ||
3110 | hp->write_txd = pci_hme_write_txd; | ||
3111 | hp->write_rxd = pci_hme_write_rxd; | ||
3112 | hp->read32 = pci_hme_read32; | ||
3113 | hp->write32 = pci_hme_write32; | ||
3114 | #endif | ||
3115 | |||
3116 | /* Grrr, Happy Meal comes up by default not advertising | ||
3117 | * full duplex 100baseT capabilities, fix this. | ||
3118 | */ | ||
3119 | spin_lock_irq(&hp->happy_lock); | ||
3120 | happy_meal_set_initial_advertisement(hp); | ||
3121 | spin_unlock_irq(&hp->happy_lock); | ||
3122 | |||
3123 | err = register_netdev(hp->dev); | ||
3124 | if (err) { | ||
3125 | printk(KERN_ERR "happymeal(PCI): Cannot register net device, " | ||
3126 | "aborting.\n"); | ||
3127 | goto err_out_iounmap; | ||
3128 | } | ||
3129 | |||
3130 | dev_set_drvdata(&pdev->dev, hp); | ||
3131 | |||
3132 | if (!qfe_slot) { | ||
3133 | struct pci_dev *qpdev = qp->quattro_dev; | ||
3134 | |||
3135 | prom_name[0] = 0; | ||
3136 | if (!strncmp(dev->name, "eth", 3)) { | ||
3137 | int i = simple_strtoul(dev->name + 3, NULL, 10); | ||
3138 | sprintf(prom_name, "-%d", i + 3); | ||
3139 | } | ||
3140 | printk(KERN_INFO "%s%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, prom_name); | ||
3141 | if (qpdev->vendor == PCI_VENDOR_ID_DEC && | ||
3142 | qpdev->device == PCI_DEVICE_ID_DEC_21153) | ||
3143 | printk("DEC 21153 PCI Bridge\n"); | ||
3144 | else | ||
3145 | printk("unknown bridge %04x.%04x\n", | ||
3146 | qpdev->vendor, qpdev->device); | ||
3147 | } | ||
3148 | |||
3149 | if (qfe_slot != -1) | ||
3150 | printk(KERN_INFO "%s: Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet ", | ||
3151 | dev->name, qfe_slot); | ||
3152 | else | ||
3153 | printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ", | ||
3154 | dev->name); | ||
3155 | |||
3156 | printk("%pM\n", dev->dev_addr); | ||
3157 | |||
3158 | return 0; | ||
3159 | |||
3160 | err_out_iounmap: | ||
3161 | iounmap(hp->gregs); | ||
3162 | |||
3163 | err_out_free_res: | ||
3164 | pci_release_regions(pdev); | ||
3165 | |||
3166 | err_out_clear_quattro: | ||
3167 | if (qp != NULL) | ||
3168 | qp->happy_meals[qfe_slot] = NULL; | ||
3169 | |||
3170 | free_netdev(dev); | ||
3171 | |||
3172 | err_out: | ||
3173 | return err; | ||
3174 | } | ||
3175 | |||
3176 | static void __devexit happy_meal_pci_remove(struct pci_dev *pdev) | ||
3177 | { | ||
3178 | struct happy_meal *hp = dev_get_drvdata(&pdev->dev); | ||
3179 | struct net_device *net_dev = hp->dev; | ||
3180 | |||
3181 | unregister_netdev(net_dev); | ||
3182 | |||
3183 | dma_free_coherent(hp->dma_dev, PAGE_SIZE, | ||
3184 | hp->happy_block, hp->hblock_dvma); | ||
3185 | iounmap(hp->gregs); | ||
3186 | pci_release_regions(hp->happy_dev); | ||
3187 | |||
3188 | free_netdev(net_dev); | ||
3189 | |||
3190 | dev_set_drvdata(&pdev->dev, NULL); | ||
3191 | } | ||
3192 | |||
3193 | static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = { | ||
3194 | { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) }, | ||
3195 | { } /* Terminating entry */ | ||
3196 | }; | ||
3197 | |||
3198 | MODULE_DEVICE_TABLE(pci, happymeal_pci_ids); | ||
3199 | |||
3200 | static struct pci_driver hme_pci_driver = { | ||
3201 | .name = "hme", | ||
3202 | .id_table = happymeal_pci_ids, | ||
3203 | .probe = happy_meal_pci_probe, | ||
3204 | .remove = __devexit_p(happy_meal_pci_remove), | ||
3205 | }; | ||
3206 | |||
3207 | static int __init happy_meal_pci_init(void) | ||
3208 | { | ||
3209 | return pci_register_driver(&hme_pci_driver); | ||
3210 | } | ||
3211 | |||
3212 | static void happy_meal_pci_exit(void) | ||
3213 | { | ||
3214 | pci_unregister_driver(&hme_pci_driver); | ||
3215 | |||
3216 | while (qfe_pci_list) { | ||
3217 | struct quattro *qfe = qfe_pci_list; | ||
3218 | struct quattro *next = qfe->next; | ||
3219 | |||
3220 | kfree(qfe); | ||
3221 | |||
3222 | qfe_pci_list = next; | ||
3223 | } | ||
3224 | } | ||
3225 | |||
3226 | #endif | ||
3227 | |||
3228 | #ifdef CONFIG_SBUS | ||
3229 | static const struct of_device_id hme_sbus_match[]; | ||
3230 | static int __devinit hme_sbus_probe(struct platform_device *op) | ||
3231 | { | ||
3232 | const struct of_device_id *match; | ||
3233 | struct device_node *dp = op->dev.of_node; | ||
3234 | const char *model = of_get_property(dp, "model", NULL); | ||
3235 | int is_qfe; | ||
3236 | |||
3237 | match = of_match_device(hme_sbus_match, &op->dev); | ||
3238 | if (!match) | ||
3239 | return -EINVAL; | ||
3240 | is_qfe = (match->data != NULL); | ||
3241 | |||
3242 | if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) | ||
3243 | is_qfe = 1; | ||
3244 | |||
3245 | return happy_meal_sbus_probe_one(op, is_qfe); | ||
3246 | } | ||
3247 | |||
3248 | static int __devexit hme_sbus_remove(struct platform_device *op) | ||
3249 | { | ||
3250 | struct happy_meal *hp = dev_get_drvdata(&op->dev); | ||
3251 | struct net_device *net_dev = hp->dev; | ||
3252 | |||
3253 | unregister_netdev(net_dev); | ||
3254 | |||
3255 | /* XXX qfe parent interrupt... */ | ||
3256 | |||
3257 | of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE); | ||
3258 | of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE); | ||
3259 | of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE); | ||
3260 | of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE); | ||
3261 | of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE); | ||
3262 | dma_free_coherent(hp->dma_dev, | ||
3263 | PAGE_SIZE, | ||
3264 | hp->happy_block, | ||
3265 | hp->hblock_dvma); | ||
3266 | |||
3267 | free_netdev(net_dev); | ||
3268 | |||
3269 | dev_set_drvdata(&op->dev, NULL); | ||
3270 | |||
3271 | return 0; | ||
3272 | } | ||
3273 | |||
3274 | static const struct of_device_id hme_sbus_match[] = { | ||
3275 | { | ||
3276 | .name = "SUNW,hme", | ||
3277 | }, | ||
3278 | { | ||
3279 | .name = "SUNW,qfe", | ||
3280 | .data = (void *) 1, | ||
3281 | }, | ||
3282 | { | ||
3283 | .name = "qfe", | ||
3284 | .data = (void *) 1, | ||
3285 | }, | ||
3286 | {}, | ||
3287 | }; | ||
3288 | |||
3289 | MODULE_DEVICE_TABLE(of, hme_sbus_match); | ||
3290 | |||
3291 | static struct platform_driver hme_sbus_driver = { | ||
3292 | .driver = { | ||
3293 | .name = "hme", | ||
3294 | .owner = THIS_MODULE, | ||
3295 | .of_match_table = hme_sbus_match, | ||
3296 | }, | ||
3297 | .probe = hme_sbus_probe, | ||
3298 | .remove = __devexit_p(hme_sbus_remove), | ||
3299 | }; | ||
3300 | |||
3301 | static int __init happy_meal_sbus_init(void) | ||
3302 | { | ||
3303 | int err; | ||
3304 | |||
3305 | err = platform_driver_register(&hme_sbus_driver); | ||
3306 | if (!err) | ||
3307 | err = quattro_sbus_register_irqs(); | ||
3308 | |||
3309 | return err; | ||
3310 | } | ||
3311 | |||
3312 | static void happy_meal_sbus_exit(void) | ||
3313 | { | ||
3314 | platform_driver_unregister(&hme_sbus_driver); | ||
3315 | quattro_sbus_free_irqs(); | ||
3316 | |||
3317 | while (qfe_sbus_list) { | ||
3318 | struct quattro *qfe = qfe_sbus_list; | ||
3319 | struct quattro *next = qfe->next; | ||
3320 | |||
3321 | kfree(qfe); | ||
3322 | |||
3323 | qfe_sbus_list = next; | ||
3324 | } | ||
3325 | } | ||
3326 | #endif | ||
3327 | |||
3328 | static int __init happy_meal_probe(void) | ||
3329 | { | ||
3330 | int err = 0; | ||
3331 | |||
3332 | #ifdef CONFIG_SBUS | ||
3333 | err = happy_meal_sbus_init(); | ||
3334 | #endif | ||
3335 | #ifdef CONFIG_PCI | ||
3336 | if (!err) { | ||
3337 | err = happy_meal_pci_init(); | ||
3338 | #ifdef CONFIG_SBUS | ||
3339 | if (err) | ||
3340 | happy_meal_sbus_exit(); | ||
3341 | #endif | ||
3342 | } | ||
3343 | #endif | ||
3344 | |||
3345 | return err; | ||
3346 | } | ||
3347 | |||
3348 | |||
3349 | static void __exit happy_meal_exit(void) | ||
3350 | { | ||
3351 | #ifdef CONFIG_SBUS | ||
3352 | happy_meal_sbus_exit(); | ||
3353 | #endif | ||
3354 | #ifdef CONFIG_PCI | ||
3355 | happy_meal_pci_exit(); | ||
3356 | #endif | ||
3357 | } | ||
3358 | |||
3359 | module_init(happy_meal_probe); | ||
3360 | module_exit(happy_meal_exit); | ||
diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h new file mode 100644 index 000000000000..64f278360d89 --- /dev/null +++ b/drivers/net/ethernet/sun/sunhme.h | |||
@@ -0,0 +1,512 @@ | |||
1 | /* $Id: sunhme.h,v 1.33 2001/08/03 06:23:04 davem Exp $ | ||
2 | * sunhme.h: Definitions for Sparc HME/BigMac 10/100baseT ethernet driver. | ||
3 | * Also known as the "Happy Meal". | ||
4 | * | ||
5 | * Copyright (C) 1996, 1999 David S. Miller (davem@redhat.com) | ||
6 | */ | ||
7 | |||
8 | #ifndef _SUNHME_H | ||
9 | #define _SUNHME_H | ||
10 | |||
11 | #include <linux/pci.h> | ||
12 | |||
13 | /* Happy Meal global registers. */ | ||
14 | #define GREG_SWRESET 0x000UL /* Software Reset */ | ||
15 | #define GREG_CFG 0x004UL /* Config Register */ | ||
16 | #define GREG_STAT 0x108UL /* Status */ | ||
17 | #define GREG_IMASK 0x10cUL /* Interrupt Mask */ | ||
18 | #define GREG_REG_SIZE 0x110UL | ||
19 | |||
20 | /* Global reset register. */ | ||
21 | #define GREG_RESET_ETX 0x01 | ||
22 | #define GREG_RESET_ERX 0x02 | ||
23 | #define GREG_RESET_ALL 0x03 | ||
24 | |||
25 | /* Global config register. */ | ||
26 | #define GREG_CFG_BURSTMSK 0x03 | ||
27 | #define GREG_CFG_BURST16 0x00 | ||
28 | #define GREG_CFG_BURST32 0x01 | ||
29 | #define GREG_CFG_BURST64 0x02 | ||
30 | #define GREG_CFG_64BIT 0x04 | ||
31 | #define GREG_CFG_PARITY 0x08 | ||
32 | #define GREG_CFG_RESV 0x10 | ||
33 | |||
34 | /* Global status register. */ | ||
35 | #define GREG_STAT_GOTFRAME 0x00000001 /* Received a frame */ | ||
36 | #define GREG_STAT_RCNTEXP 0x00000002 /* Receive frame counter expired */ | ||
37 | #define GREG_STAT_ACNTEXP 0x00000004 /* Align-error counter expired */ | ||
38 | #define GREG_STAT_CCNTEXP 0x00000008 /* CRC-error counter expired */ | ||
39 | #define GREG_STAT_LCNTEXP 0x00000010 /* Length-error counter expired */ | ||
40 | #define GREG_STAT_RFIFOVF 0x00000020 /* Receive FIFO overflow */ | ||
41 | #define GREG_STAT_CVCNTEXP 0x00000040 /* Code-violation counter expired */ | ||
42 | #define GREG_STAT_STSTERR 0x00000080 /* Test error in XIF for SQE */ | ||
43 | #define GREG_STAT_SENTFRAME 0x00000100 /* Transmitted a frame */ | ||
44 | #define GREG_STAT_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */ | ||
45 | #define GREG_STAT_MAXPKTERR 0x00000400 /* Max-packet size error */ | ||
46 | #define GREG_STAT_NCNTEXP 0x00000800 /* Normal-collision counter expired */ | ||
47 | #define GREG_STAT_ECNTEXP 0x00001000 /* Excess-collision counter expired */ | ||
48 | #define GREG_STAT_LCCNTEXP 0x00002000 /* Late-collision counter expired */ | ||
49 | #define GREG_STAT_FCNTEXP 0x00004000 /* First-collision counter expired */ | ||
50 | #define GREG_STAT_DTIMEXP 0x00008000 /* Defer-timer expired */ | ||
51 | #define GREG_STAT_RXTOHOST 0x00010000 /* Moved from receive-FIFO to host memory */ | ||
52 | #define GREG_STAT_NORXD 0x00020000 /* No more receive descriptors */ | ||
53 | #define GREG_STAT_RXERR 0x00040000 /* Error during receive dma */ | ||
54 | #define GREG_STAT_RXLATERR 0x00080000 /* Late error during receive dma */ | ||
55 | #define GREG_STAT_RXPERR 0x00100000 /* Parity error during receive dma */ | ||
56 | #define GREG_STAT_RXTERR 0x00200000 /* Tag error during receive dma */ | ||
57 | #define GREG_STAT_EOPERR 0x00400000 /* Transmit descriptor did not have EOP set */ | ||
58 | #define GREG_STAT_MIFIRQ 0x00800000 /* MIF is signaling an interrupt condition */ | ||
59 | #define GREG_STAT_HOSTTOTX 0x01000000 /* Moved from host memory to transmit-FIFO */ | ||
60 | #define GREG_STAT_TXALL 0x02000000 /* Transmitted all packets in the tx-fifo */ | ||
61 | #define GREG_STAT_TXEACK 0x04000000 /* Error during transmit dma */ | ||
62 | #define GREG_STAT_TXLERR 0x08000000 /* Late error during transmit dma */ | ||
63 | #define GREG_STAT_TXPERR 0x10000000 /* Parity error during transmit dma */ | ||
64 | #define GREG_STAT_TXTERR 0x20000000 /* Tag error during transmit dma */ | ||
65 | #define GREG_STAT_SLVERR 0x40000000 /* PIO access got an error */ | ||
66 | #define GREG_STAT_SLVPERR 0x80000000 /* PIO access got a parity error */ | ||
67 | |||
68 | /* All interesting error conditions. */ | ||
69 | #define GREG_STAT_ERRORS 0xfc7efefc | ||
70 | |||
71 | /* Global interrupt mask register. */ | ||
72 | #define GREG_IMASK_GOTFRAME 0x00000001 /* Received a frame */ | ||
73 | #define GREG_IMASK_RCNTEXP 0x00000002 /* Receive frame counter expired */ | ||
74 | #define GREG_IMASK_ACNTEXP 0x00000004 /* Align-error counter expired */ | ||
75 | #define GREG_IMASK_CCNTEXP 0x00000008 /* CRC-error counter expired */ | ||
76 | #define GREG_IMASK_LCNTEXP 0x00000010 /* Length-error counter expired */ | ||
77 | #define GREG_IMASK_RFIFOVF 0x00000020 /* Receive FIFO overflow */ | ||
78 | #define GREG_IMASK_CVCNTEXP 0x00000040 /* Code-violation counter expired */ | ||
79 | #define GREG_IMASK_STSTERR 0x00000080 /* Test error in XIF for SQE */ | ||
80 | #define GREG_IMASK_SENTFRAME 0x00000100 /* Transmitted a frame */ | ||
81 | #define GREG_IMASK_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */ | ||
82 | #define GREG_IMASK_MAXPKTERR 0x00000400 /* Max-packet size error */ | ||
83 | #define GREG_IMASK_NCNTEXP 0x00000800 /* Normal-collision counter expired */ | ||
84 | #define GREG_IMASK_ECNTEXP 0x00001000 /* Excess-collision counter expired */ | ||
85 | #define GREG_IMASK_LCCNTEXP 0x00002000 /* Late-collision counter expired */ | ||
86 | #define GREG_IMASK_FCNTEXP 0x00004000 /* First-collision counter expired */ | ||
87 | #define GREG_IMASK_DTIMEXP 0x00008000 /* Defer-timer expired */ | ||
88 | #define GREG_IMASK_RXTOHOST 0x00010000 /* Moved from receive-FIFO to host memory */ | ||
89 | #define GREG_IMASK_NORXD 0x00020000 /* No more receive descriptors */ | ||
90 | #define GREG_IMASK_RXERR 0x00040000 /* Error during receive dma */ | ||
91 | #define GREG_IMASK_RXLATERR 0x00080000 /* Late error during receive dma */ | ||
92 | #define GREG_IMASK_RXPERR 0x00100000 /* Parity error during receive dma */ | ||
93 | #define GREG_IMASK_RXTERR 0x00200000 /* Tag error during receive dma */ | ||
94 | #define GREG_IMASK_EOPERR 0x00400000 /* Transmit descriptor did not have EOP set */ | ||
95 | #define GREG_IMASK_MIFIRQ 0x00800000 /* MIF is signaling an interrupt condition */ | ||
96 | #define GREG_IMASK_HOSTTOTX 0x01000000 /* Moved from host memory to transmit-FIFO */ | ||
97 | #define GREG_IMASK_TXALL 0x02000000 /* Transmitted all packets in the tx-fifo */ | ||
98 | #define GREG_IMASK_TXEACK 0x04000000 /* Error during transmit dma */ | ||
99 | #define GREG_IMASK_TXLERR 0x08000000 /* Late error during transmit dma */ | ||
100 | #define GREG_IMASK_TXPERR 0x10000000 /* Parity error during transmit dma */ | ||
101 | #define GREG_IMASK_TXTERR 0x20000000 /* Tag error during transmit dma */ | ||
102 | #define GREG_IMASK_SLVERR 0x40000000 /* PIO access got an error */ | ||
103 | #define GREG_IMASK_SLVPERR 0x80000000 /* PIO access got a parity error */ | ||
104 | |||
105 | /* Happy Meal external transmitter registers. */ | ||
106 | #define ETX_PENDING 0x00UL /* Transmit pending/wakeup register */ | ||
107 | #define ETX_CFG 0x04UL /* Transmit config register */ | ||
108 | #define ETX_RING 0x08UL /* Transmit ring pointer */ | ||
109 | #define ETX_BBASE 0x0cUL /* Transmit buffer base */ | ||
110 | #define ETX_BDISP 0x10UL /* Transmit buffer displacement */ | ||
111 | #define ETX_FIFOWPTR 0x14UL /* FIFO write ptr */ | ||
112 | #define ETX_FIFOSWPTR 0x18UL /* FIFO write ptr (shadow register) */ | ||
113 | #define ETX_FIFORPTR 0x1cUL /* FIFO read ptr */ | ||
114 | #define ETX_FIFOSRPTR 0x20UL /* FIFO read ptr (shadow register) */ | ||
115 | #define ETX_FIFOPCNT 0x24UL /* FIFO packet counter */ | ||
116 | #define ETX_SMACHINE 0x28UL /* Transmitter state machine */ | ||
117 | #define ETX_RSIZE 0x2cUL /* Ring descriptor size */ | ||
118 | #define ETX_BPTR 0x30UL /* Transmit data buffer ptr */ | ||
119 | #define ETX_REG_SIZE 0x34UL | ||
120 | |||
121 | /* ETX transmit pending register. */ | ||
122 | #define ETX_TP_DMAWAKEUP 0x00000001 /* Restart transmit dma */ | ||
123 | |||
124 | /* ETX config register. */ | ||
125 | #define ETX_CFG_DMAENABLE 0x00000001 /* Enable transmit dma */ | ||
126 | #define ETX_CFG_FIFOTHRESH 0x000003fe /* Transmit FIFO threshold */ | ||
127 | #define ETX_CFG_IRQDAFTER 0x00000400 /* Interrupt after TX-FIFO drained */ | ||
128 | #define ETX_CFG_IRQDBEFORE 0x00000000 /* Interrupt before TX-FIFO drained */ | ||
129 | |||
130 | #define ETX_RSIZE_SHIFT 4 | ||
131 | |||
132 | /* Happy Meal external receiver registers. */ | ||
133 | #define ERX_CFG 0x00UL /* Receiver config register */ | ||
134 | #define ERX_RING 0x04UL /* Receiver ring ptr */ | ||
135 | #define ERX_BPTR 0x08UL /* Receiver buffer ptr */ | ||
136 | #define ERX_FIFOWPTR 0x0cUL /* FIFO write ptr */ | ||
137 | #define ERX_FIFOSWPTR 0x10UL /* FIFO write ptr (shadow register) */ | ||
138 | #define ERX_FIFORPTR 0x14UL /* FIFO read ptr */ | ||
139 | #define ERX_FIFOSRPTR 0x18UL /* FIFO read ptr (shadow register) */ | ||
140 | #define ERX_SMACHINE 0x1cUL /* Receiver state machine */ | ||
141 | #define ERX_REG_SIZE 0x20UL | ||
142 | |||
143 | /* ERX config register. */ | ||
144 | #define ERX_CFG_DMAENABLE 0x00000001 /* Enable receive DMA */ | ||
145 | #define ERX_CFG_RESV1 0x00000006 /* Unused... */ | ||
146 | #define ERX_CFG_BYTEOFFSET 0x00000038 /* Receive first byte offset */ | ||
147 | #define ERX_CFG_RESV2 0x000001c0 /* Unused... */ | ||
148 | #define ERX_CFG_SIZE32 0x00000000 /* Receive ring size == 32 */ | ||
149 | #define ERX_CFG_SIZE64 0x00000200 /* Receive ring size == 64 */ | ||
150 | #define ERX_CFG_SIZE128 0x00000400 /* Receive ring size == 128 */ | ||
151 | #define ERX_CFG_SIZE256 0x00000600 /* Receive ring size == 256 */ | ||
152 | #define ERX_CFG_RESV3 0x0000f800 /* Unused... */ | ||
153 | #define ERX_CFG_CSUMSTART 0x007f0000 /* Offset of checksum start, | ||
154 | * in halfwords. */ | ||
155 | |||
156 | /* I'd like a Big Mac, small fries, small coke, and SparcLinux please. */ | ||
157 | #define BMAC_XIFCFG 0x0000UL /* XIF config register */ | ||
158 | /* 0x4-->0x204, reserved */ | ||
159 | #define BMAC_TXSWRESET 0x208UL /* Transmitter software reset */ | ||
160 | #define BMAC_TXCFG 0x20cUL /* Transmitter config register */ | ||
161 | #define BMAC_IGAP1 0x210UL /* Inter-packet gap 1 */ | ||
162 | #define BMAC_IGAP2 0x214UL /* Inter-packet gap 2 */ | ||
163 | #define BMAC_ALIMIT 0x218UL /* Transmit attempt limit */ | ||
164 | #define BMAC_STIME 0x21cUL /* Transmit slot time */ | ||
165 | #define BMAC_PLEN 0x220UL /* Size of transmit preamble */ | ||
166 | #define BMAC_PPAT 0x224UL /* Pattern for transmit preamble */ | ||
167 | #define BMAC_TXSDELIM 0x228UL /* Transmit delimiter */ | ||
168 | #define BMAC_JSIZE 0x22cUL /* Jam size */ | ||
169 | #define BMAC_TXMAX 0x230UL /* Transmit max pkt size */ | ||
170 | #define BMAC_TXMIN 0x234UL /* Transmit min pkt size */ | ||
171 | #define BMAC_PATTEMPT 0x238UL /* Count of transmit peak attempts */ | ||
172 | #define BMAC_DTCTR 0x23cUL /* Transmit defer timer */ | ||
173 | #define BMAC_NCCTR 0x240UL /* Transmit normal-collision counter */ | ||
174 | #define BMAC_FCCTR 0x244UL /* Transmit first-collision counter */ | ||
175 | #define BMAC_EXCTR 0x248UL /* Transmit excess-collision counter */ | ||
176 | #define BMAC_LTCTR 0x24cUL /* Transmit late-collision counter */ | ||
177 | #define BMAC_RSEED 0x250UL /* Transmit random number seed */ | ||
178 | #define BMAC_TXSMACHINE 0x254UL /* Transmit state machine */ | ||
179 | /* 0x258-->0x304, reserved */ | ||
180 | #define BMAC_RXSWRESET 0x308UL /* Receiver software reset */ | ||
181 | #define BMAC_RXCFG 0x30cUL /* Receiver config register */ | ||
182 | #define BMAC_RXMAX 0x310UL /* Receive max pkt size */ | ||
183 | #define BMAC_RXMIN 0x314UL /* Receive min pkt size */ | ||
184 | #define BMAC_MACADDR2 0x318UL /* Ether address register 2 */ | ||
185 | #define BMAC_MACADDR1 0x31cUL /* Ether address register 1 */ | ||
186 | #define BMAC_MACADDR0 0x320UL /* Ether address register 0 */ | ||
187 | #define BMAC_FRCTR 0x324UL /* Receive frame receive counter */ | ||
188 | #define BMAC_GLECTR 0x328UL /* Receive giant-length error counter */ | ||
189 | #define BMAC_UNALECTR 0x32cUL /* Receive unaligned error counter */ | ||
190 | #define BMAC_RCRCECTR 0x330UL /* Receive CRC error counter */ | ||
191 | #define BMAC_RXSMACHINE 0x334UL /* Receiver state machine */ | ||
192 | #define BMAC_RXCVALID 0x338UL /* Receiver code violation */ | ||
193 | /* 0x33c, reserved */ | ||
194 | #define BMAC_HTABLE3 0x340UL /* Hash table 3 */ | ||
195 | #define BMAC_HTABLE2 0x344UL /* Hash table 2 */ | ||
196 | #define BMAC_HTABLE1 0x348UL /* Hash table 1 */ | ||
197 | #define BMAC_HTABLE0 0x34cUL /* Hash table 0 */ | ||
198 | #define BMAC_AFILTER2 0x350UL /* Address filter 2 */ | ||
199 | #define BMAC_AFILTER1 0x354UL /* Address filter 1 */ | ||
200 | #define BMAC_AFILTER0 0x358UL /* Address filter 0 */ | ||
201 | #define BMAC_AFMASK 0x35cUL /* Address filter mask */ | ||
202 | #define BMAC_REG_SIZE 0x360UL | ||
203 | |||
204 | /* BigMac XIF config register. */ | ||
205 | #define BIGMAC_XCFG_ODENABLE 0x00000001 /* Output driver enable */ | ||
206 | #define BIGMAC_XCFG_XLBACK 0x00000002 /* Loopback-mode XIF enable */ | ||
207 | #define BIGMAC_XCFG_MLBACK 0x00000004 /* Loopback-mode MII enable */ | ||
208 | #define BIGMAC_XCFG_MIIDISAB 0x00000008 /* MII receive buffer disable */ | ||
209 | #define BIGMAC_XCFG_SQENABLE 0x00000010 /* SQE test enable */ | ||
210 | #define BIGMAC_XCFG_SQETWIN 0x000003e0 /* SQE time window */ | ||
211 | #define BIGMAC_XCFG_LANCE 0x00000010 /* Lance mode enable */ | ||
212 | #define BIGMAC_XCFG_LIPG0 0x000003e0 /* Lance mode IPG0 */ | ||
213 | |||
214 | /* BigMac transmit config register. */ | ||
215 | #define BIGMAC_TXCFG_ENABLE 0x00000001 /* Enable the transmitter */ | ||
216 | #define BIGMAC_TXCFG_SMODE 0x00000020 /* Enable slow transmit mode */ | ||
217 | #define BIGMAC_TXCFG_CIGN 0x00000040 /* Ignore transmit collisions */ | ||
218 | #define BIGMAC_TXCFG_FCSOFF 0x00000080 /* Do not emit FCS */ | ||
219 | #define BIGMAC_TXCFG_DBACKOFF 0x00000100 /* Disable backoff */ | ||
220 | #define BIGMAC_TXCFG_FULLDPLX 0x00000200 /* Enable full-duplex */ | ||
221 | #define BIGMAC_TXCFG_DGIVEUP 0x00000400 /* Don't give up on transmits */ | ||
222 | |||
223 | /* BigMac receive config register. */ | ||
224 | #define BIGMAC_RXCFG_ENABLE 0x00000001 /* Enable the receiver */ | ||
225 | #define BIGMAC_RXCFG_PSTRIP 0x00000020 /* Pad byte strip enable */ | ||
226 | #define BIGMAC_RXCFG_PMISC 0x00000040 /* Enable promiscuous mode */ | ||
227 | #define BIGMAC_RXCFG_DERR 0x00000080 /* Disable error checking */ | ||
228 | #define BIGMAC_RXCFG_DCRCS 0x00000100 /* Disable CRC stripping */ | ||
229 | #define BIGMAC_RXCFG_REJME 0x00000200 /* Reject packets addressed to me */ | ||
230 | #define BIGMAC_RXCFG_PGRP 0x00000400 /* Enable promisc group mode */ | ||
231 | #define BIGMAC_RXCFG_HENABLE 0x00000800 /* Enable the hash filter */ | ||
232 | #define BIGMAC_RXCFG_AENABLE 0x00001000 /* Enable the address filter */ | ||
233 | |||
234 | /* These are the "Management Interface" (ie. MIF) registers of the transceiver. */ | ||
235 | #define TCVR_BBCLOCK 0x00UL /* Bit bang clock register */ | ||
236 | #define TCVR_BBDATA 0x04UL /* Bit bang data register */ | ||
237 | #define TCVR_BBOENAB 0x08UL /* Bit bang output enable */ | ||
238 | #define TCVR_FRAME 0x0cUL /* Frame control/data register */ | ||
239 | #define TCVR_CFG 0x10UL /* MIF config register */ | ||
240 | #define TCVR_IMASK 0x14UL /* MIF interrupt mask */ | ||
241 | #define TCVR_STATUS 0x18UL /* MIF status */ | ||
242 | #define TCVR_SMACHINE 0x1cUL /* MIF state machine */ | ||
243 | #define TCVR_REG_SIZE 0x20UL | ||
244 | |||
245 | /* Frame commands. */ | ||
246 | #define FRAME_WRITE 0x50020000 | ||
247 | #define FRAME_READ 0x60020000 | ||
248 | |||
249 | /* Transceiver config register */ | ||
250 | #define TCV_CFG_PSELECT 0x00000001 /* Select PHY */ | ||
251 | #define TCV_CFG_PENABLE 0x00000002 /* Enable MIF polling */ | ||
252 | #define TCV_CFG_BENABLE 0x00000004 /* Enable the "bit banger" oh baby */ | ||
253 | #define TCV_CFG_PREGADDR 0x000000f8 /* Address of poll register */ | ||
254 | #define TCV_CFG_MDIO0 0x00000100 /* MDIO zero, data/attached */ | ||
255 | #define TCV_CFG_MDIO1 0x00000200 /* MDIO one, data/attached */ | ||
256 | #define TCV_CFG_PDADDR 0x00007c00 /* Device PHY address polling */ | ||
257 | |||
258 | /* Here are some PHY addresses. */ | ||
259 | #define TCV_PADDR_ETX 0 /* Internal transceiver */ | ||
260 | #define TCV_PADDR_ITX 1 /* External transceiver */ | ||
261 | |||
262 | /* Transceiver status register */ | ||
263 | #define TCV_STAT_BASIC 0xffff0000 /* The "basic" part */ | ||
264 | #define TCV_STAT_NORMAL 0x0000ffff /* The "non-basic" part */ | ||
265 | |||
266 | /* Inside the Happy Meal transceiver is the physical layer, they use an | ||
267 | * implementations for National Semiconductor, part number DP83840VCE. | ||
268 | * You can retrieve the data sheets and programming docs for this beast | ||
269 | * from http://www.national.com/ | ||
270 | * | ||
271 | * The DP83840 is capable of both 10 and 100Mbps ethernet, in both | ||
272 | * half and full duplex mode. It also supports auto negotiation. | ||
273 | * | ||
274 | * But.... THIS THING IS A PAIN IN THE ASS TO PROGRAM! | ||
275 | * Debugging eeprom burnt code is more fun than programming this chip! | ||
276 | */ | ||
277 | |||
278 | /* Generic MII registers defined in linux/mii.h, these below | ||
279 | * are DP83840 specific. | ||
280 | */ | ||
281 | #define DP83840_CSCONFIG 0x17 /* CS configuration */ | ||
282 | |||
283 | /* The Carrier Sense config register. */ | ||
284 | #define CSCONFIG_RESV1 0x0001 /* Unused... */ | ||
285 | #define CSCONFIG_LED4 0x0002 /* Pin for full-dplx LED4 */ | ||
286 | #define CSCONFIG_LED1 0x0004 /* Pin for conn-status LED1 */ | ||
287 | #define CSCONFIG_RESV2 0x0008 /* Unused... */ | ||
288 | #define CSCONFIG_TCVDISAB 0x0010 /* Turns off the transceiver */ | ||
289 | #define CSCONFIG_DFBYPASS 0x0020 /* Bypass disconnect function */ | ||
290 | #define CSCONFIG_GLFORCE 0x0040 /* Good link force for 100mbps */ | ||
291 | #define CSCONFIG_CLKTRISTATE 0x0080 /* Tristate 25m clock */ | ||
292 | #define CSCONFIG_RESV3 0x0700 /* Unused... */ | ||
293 | #define CSCONFIG_ENCODE 0x0800 /* 1=MLT-3, 0=binary */ | ||
294 | #define CSCONFIG_RENABLE 0x1000 /* Repeater mode enable */ | ||
295 | #define CSCONFIG_TCDISABLE 0x2000 /* Disable timeout counter */ | ||
296 | #define CSCONFIG_RESV4 0x4000 /* Unused... */ | ||
297 | #define CSCONFIG_NDISABLE 0x8000 /* Disable NRZI */ | ||
298 | |||
299 | /* Happy Meal descriptor rings and such. | ||
300 | * All descriptor rings must be aligned on a 2K boundary. | ||
301 | * All receive buffers must be 64 byte aligned. | ||
302 | * Always write the address first before setting the ownership | ||
303 | * bits to avoid races with the hardware scanning the ring. | ||
304 | */ | ||
305 | typedef u32 __bitwise__ hme32; | ||
306 | |||
307 | struct happy_meal_rxd { | ||
308 | hme32 rx_flags; | ||
309 | hme32 rx_addr; | ||
310 | }; | ||
311 | |||
312 | #define RXFLAG_OWN 0x80000000 /* 1 = hardware, 0 = software */ | ||
313 | #define RXFLAG_OVERFLOW 0x40000000 /* 1 = buffer overflow */ | ||
314 | #define RXFLAG_SIZE 0x3fff0000 /* Size of the buffer */ | ||
315 | #define RXFLAG_CSUM 0x0000ffff /* HW computed checksum */ | ||
316 | |||
317 | struct happy_meal_txd { | ||
318 | hme32 tx_flags; | ||
319 | hme32 tx_addr; | ||
320 | }; | ||
321 | |||
322 | #define TXFLAG_OWN 0x80000000 /* 1 = hardware, 0 = software */ | ||
323 | #define TXFLAG_SOP 0x40000000 /* 1 = start of packet */ | ||
324 | #define TXFLAG_EOP 0x20000000 /* 1 = end of packet */ | ||
325 | #define TXFLAG_CSENABLE 0x10000000 /* 1 = enable hw-checksums */ | ||
326 | #define TXFLAG_CSLOCATION 0x0ff00000 /* Where to stick the csum */ | ||
327 | #define TXFLAG_CSBUFBEGIN 0x000fc000 /* Where to begin checksum */ | ||
328 | #define TXFLAG_SIZE 0x00003fff /* Size of the packet */ | ||
329 | |||
330 | #define TX_RING_SIZE 32 /* Must be >16 and <255, multiple of 16 */ | ||
331 | #define RX_RING_SIZE 32 /* see ERX_CFG_SIZE* for possible values */ | ||
332 | |||
333 | #if (TX_RING_SIZE < 16 || TX_RING_SIZE > 256 || (TX_RING_SIZE % 16) != 0) | ||
334 | #error TX_RING_SIZE holds illegal value | ||
335 | #endif | ||
336 | |||
337 | #define TX_RING_MAXSIZE 256 | ||
338 | #define RX_RING_MAXSIZE 256 | ||
339 | |||
340 | /* We use a 14 byte offset for checksum computation. */ | ||
341 | #if (RX_RING_SIZE == 32) | ||
342 | #define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE32|((14/2)<<16)) | ||
343 | #else | ||
344 | #if (RX_RING_SIZE == 64) | ||
345 | #define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE64|((14/2)<<16)) | ||
346 | #else | ||
347 | #if (RX_RING_SIZE == 128) | ||
348 | #define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE128|((14/2)<<16)) | ||
349 | #else | ||
350 | #if (RX_RING_SIZE == 256) | ||
351 | #define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE256|((14/2)<<16)) | ||
352 | #else | ||
353 | #error RX_RING_SIZE holds illegal value | ||
354 | #endif | ||
355 | #endif | ||
356 | #endif | ||
357 | #endif | ||
358 | |||
359 | #define NEXT_RX(num) (((num) + 1) & (RX_RING_SIZE - 1)) | ||
360 | #define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1)) | ||
361 | #define PREV_RX(num) (((num) - 1) & (RX_RING_SIZE - 1)) | ||
362 | #define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1)) | ||
363 | |||
364 | #define TX_BUFFS_AVAIL(hp) \ | ||
365 | (((hp)->tx_old <= (hp)->tx_new) ? \ | ||
366 | (hp)->tx_old + (TX_RING_SIZE - 1) - (hp)->tx_new : \ | ||
367 | (hp)->tx_old - (hp)->tx_new - 1) | ||
368 | |||
369 | #define RX_OFFSET 2 | ||
370 | #define RX_BUF_ALLOC_SIZE (1546 + RX_OFFSET + 64) | ||
371 | |||
372 | #define RX_COPY_THRESHOLD 256 | ||
373 | |||
374 | struct hmeal_init_block { | ||
375 | struct happy_meal_rxd happy_meal_rxd[RX_RING_MAXSIZE]; | ||
376 | struct happy_meal_txd happy_meal_txd[TX_RING_MAXSIZE]; | ||
377 | }; | ||
378 | |||
379 | #define hblock_offset(mem, elem) \ | ||
380 | ((__u32)((unsigned long)(&(((struct hmeal_init_block *)0)->mem[elem])))) | ||
381 | |||
382 | /* Now software state stuff. */ | ||
383 | enum happy_transceiver { | ||
384 | external = 0, | ||
385 | internal = 1, | ||
386 | none = 2, | ||
387 | }; | ||
388 | |||
389 | /* Timer state engine. */ | ||
390 | enum happy_timer_state { | ||
391 | arbwait = 0, /* Waiting for auto negotiation to complete. */ | ||
392 | lupwait = 1, /* Auto-neg complete, awaiting link-up status. */ | ||
393 | ltrywait = 2, /* Forcing try of all modes, from fastest to slowest. */ | ||
394 | asleep = 3, /* Time inactive. */ | ||
395 | }; | ||
396 | |||
397 | struct quattro; | ||
398 | |||
399 | /* Happy happy, joy joy! */ | ||
400 | struct happy_meal { | ||
401 | void __iomem *gregs; /* Happy meal global registers */ | ||
402 | struct hmeal_init_block *happy_block; /* RX and TX descriptors (CPU addr) */ | ||
403 | |||
404 | #if defined(CONFIG_SBUS) && defined(CONFIG_PCI) | ||
405 | u32 (*read_desc32)(hme32 *); | ||
406 | void (*write_txd)(struct happy_meal_txd *, u32, u32); | ||
407 | void (*write_rxd)(struct happy_meal_rxd *, u32, u32); | ||
408 | #endif | ||
409 | |||
410 | /* This is either an platform_device or a pci_dev. */ | ||
411 | void *happy_dev; | ||
412 | struct device *dma_dev; | ||
413 | |||
414 | spinlock_t happy_lock; | ||
415 | |||
416 | struct sk_buff *rx_skbs[RX_RING_SIZE]; | ||
417 | struct sk_buff *tx_skbs[TX_RING_SIZE]; | ||
418 | |||
419 | int rx_new, tx_new, rx_old, tx_old; | ||
420 | |||
421 | struct net_device_stats net_stats; /* Statistical counters */ | ||
422 | |||
423 | #if defined(CONFIG_SBUS) && defined(CONFIG_PCI) | ||
424 | u32 (*read32)(void __iomem *); | ||
425 | void (*write32)(void __iomem *, u32); | ||
426 | #endif | ||
427 | |||
428 | void __iomem *etxregs; /* External transmitter regs */ | ||
429 | void __iomem *erxregs; /* External receiver regs */ | ||
430 | void __iomem *bigmacregs; /* BIGMAC core regs */ | ||
431 | void __iomem *tcvregs; /* MIF transceiver regs */ | ||
432 | |||
433 | dma_addr_t hblock_dvma; /* DVMA visible address happy block */ | ||
434 | unsigned int happy_flags; /* Driver state flags */ | ||
435 | enum happy_transceiver tcvr_type; /* Kind of transceiver in use */ | ||
436 | unsigned int happy_bursts; /* Get your mind out of the gutter */ | ||
437 | unsigned int paddr; /* PHY address for transceiver */ | ||
438 | unsigned short hm_revision; /* Happy meal revision */ | ||
439 | unsigned short sw_bmcr; /* SW copy of BMCR */ | ||
440 | unsigned short sw_bmsr; /* SW copy of BMSR */ | ||
441 | unsigned short sw_physid1; /* SW copy of PHYSID1 */ | ||
442 | unsigned short sw_physid2; /* SW copy of PHYSID2 */ | ||
443 | unsigned short sw_advertise; /* SW copy of ADVERTISE */ | ||
444 | unsigned short sw_lpa; /* SW copy of LPA */ | ||
445 | unsigned short sw_expansion; /* SW copy of EXPANSION */ | ||
446 | unsigned short sw_csconfig; /* SW copy of CSCONFIG */ | ||
447 | unsigned int auto_speed; /* Auto-nego link speed */ | ||
448 | unsigned int forced_speed; /* Force mode link speed */ | ||
449 | unsigned int poll_data; /* MIF poll data */ | ||
450 | unsigned int poll_flag; /* MIF poll flag */ | ||
451 | unsigned int linkcheck; /* Have we checked the link yet? */ | ||
452 | unsigned int lnkup; /* Is the link up as far as we know? */ | ||
453 | unsigned int lnkdown; /* Trying to force the link down? */ | ||
454 | unsigned int lnkcnt; /* Counter for link-up attempts. */ | ||
455 | struct timer_list happy_timer; /* To watch the link when coming up. */ | ||
456 | enum happy_timer_state timer_state; /* State of the auto-neg timer. */ | ||
457 | unsigned int timer_ticks; /* Number of clicks at each state. */ | ||
458 | |||
459 | struct net_device *dev; /* Backpointer */ | ||
460 | struct quattro *qfe_parent; /* For Quattro cards */ | ||
461 | int qfe_ent; /* Which instance on quattro */ | ||
462 | }; | ||
463 | |||
464 | /* Here are the happy flags. */ | ||
465 | #define HFLAG_POLL 0x00000001 /* We are doing MIF polling */ | ||
466 | #define HFLAG_FENABLE 0x00000002 /* The MII frame is enabled */ | ||
467 | #define HFLAG_LANCE 0x00000004 /* We are using lance-mode */ | ||
468 | #define HFLAG_RXENABLE 0x00000008 /* Receiver is enabled */ | ||
469 | #define HFLAG_AUTO 0x00000010 /* Using auto-negotiation, 0 = force */ | ||
470 | #define HFLAG_FULL 0x00000020 /* Full duplex enable */ | ||
471 | #define HFLAG_MACFULL 0x00000040 /* Using full duplex in the MAC */ | ||
472 | #define HFLAG_POLLENABLE 0x00000080 /* Actually try MIF polling */ | ||
473 | #define HFLAG_RXCV 0x00000100 /* XXX RXCV ENABLE */ | ||
474 | #define HFLAG_INIT 0x00000200 /* Init called at least once */ | ||
475 | #define HFLAG_LINKUP 0x00000400 /* 1 = Link is up */ | ||
476 | #define HFLAG_PCI 0x00000800 /* PCI based Happy Meal */ | ||
477 | #define HFLAG_QUATTRO 0x00001000 /* On QFE/Quattro card */ | ||
478 | |||
479 | #define HFLAG_20_21 (HFLAG_POLLENABLE | HFLAG_FENABLE) | ||
480 | #define HFLAG_NOT_A0 (HFLAG_POLLENABLE | HFLAG_FENABLE | HFLAG_LANCE | HFLAG_RXCV) | ||
481 | |||
482 | /* Support for QFE/Quattro cards. */ | ||
483 | struct quattro { | ||
484 | struct net_device *happy_meals[4]; | ||
485 | |||
486 | /* This is either a sbus_dev or a pci_dev. */ | ||
487 | void *quattro_dev; | ||
488 | |||
489 | struct quattro *next; | ||
490 | |||
491 | /* PROM ranges, if any. */ | ||
492 | #ifdef CONFIG_SBUS | ||
493 | struct linux_prom_ranges ranges[8]; | ||
494 | #endif | ||
495 | int nranges; | ||
496 | }; | ||
497 | |||
498 | /* We use this to acquire receive skb's that we can DMA directly into. */ | ||
499 | #define ALIGNED_RX_SKB_ADDR(addr) \ | ||
500 | ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) | ||
501 | #define happy_meal_alloc_skb(__length, __gfp_flags) \ | ||
502 | ({ struct sk_buff *__skb; \ | ||
503 | __skb = alloc_skb((__length) + 64, (__gfp_flags)); \ | ||
504 | if(__skb) { \ | ||
505 | int __offset = (int) ALIGNED_RX_SKB_ADDR(__skb->data); \ | ||
506 | if(__offset) \ | ||
507 | skb_reserve(__skb, __offset); \ | ||
508 | } \ | ||
509 | __skb; \ | ||
510 | }) | ||
511 | |||
512 | #endif /* !(_SUNHME_H) */ | ||
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c new file mode 100644 index 000000000000..209c7f8df003 --- /dev/null +++ b/drivers/net/ethernet/sun/sunqe.c | |||
@@ -0,0 +1,1007 @@ | |||
1 | /* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver. | ||
2 | * Once again I am out to prove that every ethernet | ||
3 | * controller out there can be most efficiently programmed | ||
4 | * if you make it look like a LANCE. | ||
5 | * | ||
6 | * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net) | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/fcntl.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/ioport.h> | ||
16 | #include <linux/in.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/crc32.h> | ||
22 | #include <linux/netdevice.h> | ||
23 | #include <linux/etherdevice.h> | ||
24 | #include <linux/skbuff.h> | ||
25 | #include <linux/ethtool.h> | ||
26 | #include <linux/bitops.h> | ||
27 | #include <linux/dma-mapping.h> | ||
28 | #include <linux/of.h> | ||
29 | #include <linux/of_device.h> | ||
30 | |||
31 | #include <asm/system.h> | ||
32 | #include <asm/io.h> | ||
33 | #include <asm/dma.h> | ||
34 | #include <asm/byteorder.h> | ||
35 | #include <asm/idprom.h> | ||
36 | #include <asm/openprom.h> | ||
37 | #include <asm/oplib.h> | ||
38 | #include <asm/auxio.h> | ||
39 | #include <asm/pgtable.h> | ||
40 | #include <asm/irq.h> | ||
41 | |||
42 | #include "sunqe.h" | ||
43 | |||
44 | #define DRV_NAME "sunqe" | ||
45 | #define DRV_VERSION "4.1" | ||
46 | #define DRV_RELDATE "August 27, 2008" | ||
47 | #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" | ||
48 | |||
49 | static char version[] = | ||
50 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; | ||
51 | |||
52 | MODULE_VERSION(DRV_VERSION); | ||
53 | MODULE_AUTHOR(DRV_AUTHOR); | ||
54 | MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver"); | ||
55 | MODULE_LICENSE("GPL"); | ||
56 | |||
57 | static struct sunqec *root_qec_dev; | ||
58 | |||
59 | static void qe_set_multicast(struct net_device *dev); | ||
60 | |||
61 | #define QEC_RESET_TRIES 200 | ||
62 | |||
63 | static inline int qec_global_reset(void __iomem *gregs) | ||
64 | { | ||
65 | int tries = QEC_RESET_TRIES; | ||
66 | |||
67 | sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); | ||
68 | while (--tries) { | ||
69 | u32 tmp = sbus_readl(gregs + GLOB_CTRL); | ||
70 | if (tmp & GLOB_CTRL_RESET) { | ||
71 | udelay(20); | ||
72 | continue; | ||
73 | } | ||
74 | break; | ||
75 | } | ||
76 | if (tries) | ||
77 | return 0; | ||
78 | printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n"); | ||
79 | return -1; | ||
80 | } | ||
81 | |||
82 | #define MACE_RESET_RETRIES 200 | ||
83 | #define QE_RESET_RETRIES 200 | ||
84 | |||
85 | static inline int qe_stop(struct sunqe *qep) | ||
86 | { | ||
87 | void __iomem *cregs = qep->qcregs; | ||
88 | void __iomem *mregs = qep->mregs; | ||
89 | int tries; | ||
90 | |||
91 | /* Reset the MACE, then the QEC channel. */ | ||
92 | sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG); | ||
93 | tries = MACE_RESET_RETRIES; | ||
94 | while (--tries) { | ||
95 | u8 tmp = sbus_readb(mregs + MREGS_BCONFIG); | ||
96 | if (tmp & MREGS_BCONFIG_RESET) { | ||
97 | udelay(20); | ||
98 | continue; | ||
99 | } | ||
100 | break; | ||
101 | } | ||
102 | if (!tries) { | ||
103 | printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n"); | ||
104 | return -1; | ||
105 | } | ||
106 | |||
107 | sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL); | ||
108 | tries = QE_RESET_RETRIES; | ||
109 | while (--tries) { | ||
110 | u32 tmp = sbus_readl(cregs + CREG_CTRL); | ||
111 | if (tmp & CREG_CTRL_RESET) { | ||
112 | udelay(20); | ||
113 | continue; | ||
114 | } | ||
115 | break; | ||
116 | } | ||
117 | if (!tries) { | ||
118 | printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n"); | ||
119 | return -1; | ||
120 | } | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static void qe_init_rings(struct sunqe *qep) | ||
125 | { | ||
126 | struct qe_init_block *qb = qep->qe_block; | ||
127 | struct sunqe_buffers *qbufs = qep->buffers; | ||
128 | __u32 qbufs_dvma = qep->buffers_dvma; | ||
129 | int i; | ||
130 | |||
131 | qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; | ||
132 | memset(qb, 0, sizeof(struct qe_init_block)); | ||
133 | memset(qbufs, 0, sizeof(struct sunqe_buffers)); | ||
134 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
135 | qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i); | ||
136 | qb->qe_rxd[i].rx_flags = | ||
137 | (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); | ||
138 | } | ||
139 | } | ||
140 | |||
141 | static int qe_init(struct sunqe *qep, int from_irq) | ||
142 | { | ||
143 | struct sunqec *qecp = qep->parent; | ||
144 | void __iomem *cregs = qep->qcregs; | ||
145 | void __iomem *mregs = qep->mregs; | ||
146 | void __iomem *gregs = qecp->gregs; | ||
147 | unsigned char *e = &qep->dev->dev_addr[0]; | ||
148 | u32 tmp; | ||
149 | int i; | ||
150 | |||
151 | /* Shut it up. */ | ||
152 | if (qe_stop(qep)) | ||
153 | return -EAGAIN; | ||
154 | |||
155 | /* Setup initial rx/tx init block pointers. */ | ||
156 | sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); | ||
157 | sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); | ||
158 | |||
159 | /* Enable/mask the various irq's. */ | ||
160 | sbus_writel(0, cregs + CREG_RIMASK); | ||
161 | sbus_writel(1, cregs + CREG_TIMASK); | ||
162 | |||
163 | sbus_writel(0, cregs + CREG_QMASK); | ||
164 | sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK); | ||
165 | |||
166 | /* Setup the FIFO pointers into QEC local memory. */ | ||
167 | tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE); | ||
168 | sbus_writel(tmp, cregs + CREG_RXRBUFPTR); | ||
169 | sbus_writel(tmp, cregs + CREG_RXWBUFPTR); | ||
170 | |||
171 | tmp = sbus_readl(cregs + CREG_RXRBUFPTR) + | ||
172 | sbus_readl(gregs + GLOB_RSIZE); | ||
173 | sbus_writel(tmp, cregs + CREG_TXRBUFPTR); | ||
174 | sbus_writel(tmp, cregs + CREG_TXWBUFPTR); | ||
175 | |||
176 | /* Clear the channel collision counter. */ | ||
177 | sbus_writel(0, cregs + CREG_CCNT); | ||
178 | |||
179 | /* For 10baseT, inter frame space nor throttle seems to be necessary. */ | ||
180 | sbus_writel(0, cregs + CREG_PIPG); | ||
181 | |||
182 | /* Now dork with the AMD MACE. */ | ||
183 | sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG); | ||
184 | sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL); | ||
185 | sbus_writeb(0, mregs + MREGS_RXFCNTL); | ||
186 | |||
187 | /* The QEC dma's the rx'd packets from local memory out to main memory, | ||
188 | * and therefore it interrupts when the packet reception is "complete". | ||
189 | * So don't listen for the MACE talking about it. | ||
190 | */ | ||
191 | sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK); | ||
192 | sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG); | ||
193 | sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 | | ||
194 | MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU), | ||
195 | mregs + MREGS_FCONFIG); | ||
196 | |||
197 | /* Only usable interface on QuadEther is twisted pair. */ | ||
198 | sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG); | ||
199 | |||
200 | /* Tell MACE we are changing the ether address. */ | ||
201 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET, | ||
202 | mregs + MREGS_IACONFIG); | ||
203 | while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) | ||
204 | barrier(); | ||
205 | sbus_writeb(e[0], mregs + MREGS_ETHADDR); | ||
206 | sbus_writeb(e[1], mregs + MREGS_ETHADDR); | ||
207 | sbus_writeb(e[2], mregs + MREGS_ETHADDR); | ||
208 | sbus_writeb(e[3], mregs + MREGS_ETHADDR); | ||
209 | sbus_writeb(e[4], mregs + MREGS_ETHADDR); | ||
210 | sbus_writeb(e[5], mregs + MREGS_ETHADDR); | ||
211 | |||
212 | /* Clear out the address filter. */ | ||
213 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, | ||
214 | mregs + MREGS_IACONFIG); | ||
215 | while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) | ||
216 | barrier(); | ||
217 | for (i = 0; i < 8; i++) | ||
218 | sbus_writeb(0, mregs + MREGS_FILTER); | ||
219 | |||
220 | /* Address changes are now complete. */ | ||
221 | sbus_writeb(0, mregs + MREGS_IACONFIG); | ||
222 | |||
223 | qe_init_rings(qep); | ||
224 | |||
225 | /* Wait a little bit for the link to come up... */ | ||
226 | mdelay(5); | ||
227 | if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) { | ||
228 | int tries = 50; | ||
229 | |||
230 | while (--tries) { | ||
231 | u8 tmp; | ||
232 | |||
233 | mdelay(5); | ||
234 | barrier(); | ||
235 | tmp = sbus_readb(mregs + MREGS_PHYCONFIG); | ||
236 | if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0) | ||
237 | break; | ||
238 | } | ||
239 | if (tries == 0) | ||
240 | printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name); | ||
241 | } | ||
242 | |||
243 | /* Missed packet counter is cleared on a read. */ | ||
244 | sbus_readb(mregs + MREGS_MPCNT); | ||
245 | |||
246 | /* Reload multicast information, this will enable the receiver | ||
247 | * and transmitter. | ||
248 | */ | ||
249 | qe_set_multicast(qep->dev); | ||
250 | |||
251 | /* QEC should now start to show interrupts. */ | ||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | /* Grrr, certain error conditions completely lock up the AMD MACE, | ||
256 | * so when we get these we _must_ reset the chip. | ||
257 | */ | ||
258 | static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) | ||
259 | { | ||
260 | struct net_device *dev = qep->dev; | ||
261 | int mace_hwbug_workaround = 0; | ||
262 | |||
263 | if (qe_status & CREG_STAT_EDEFER) { | ||
264 | printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); | ||
265 | dev->stats.tx_errors++; | ||
266 | } | ||
267 | |||
268 | if (qe_status & CREG_STAT_CLOSS) { | ||
269 | printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); | ||
270 | dev->stats.tx_errors++; | ||
271 | dev->stats.tx_carrier_errors++; | ||
272 | } | ||
273 | |||
274 | if (qe_status & CREG_STAT_ERETRIES) { | ||
275 | printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); | ||
276 | dev->stats.tx_errors++; | ||
277 | mace_hwbug_workaround = 1; | ||
278 | } | ||
279 | |||
280 | if (qe_status & CREG_STAT_LCOLL) { | ||
281 | printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); | ||
282 | dev->stats.tx_errors++; | ||
283 | dev->stats.collisions++; | ||
284 | mace_hwbug_workaround = 1; | ||
285 | } | ||
286 | |||
287 | if (qe_status & CREG_STAT_FUFLOW) { | ||
288 | printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); | ||
289 | dev->stats.tx_errors++; | ||
290 | mace_hwbug_workaround = 1; | ||
291 | } | ||
292 | |||
293 | if (qe_status & CREG_STAT_JERROR) { | ||
294 | printk(KERN_ERR "%s: Jabber error.\n", dev->name); | ||
295 | } | ||
296 | |||
297 | if (qe_status & CREG_STAT_BERROR) { | ||
298 | printk(KERN_ERR "%s: Babble error.\n", dev->name); | ||
299 | } | ||
300 | |||
301 | if (qe_status & CREG_STAT_CCOFLOW) { | ||
302 | dev->stats.tx_errors += 256; | ||
303 | dev->stats.collisions += 256; | ||
304 | } | ||
305 | |||
306 | if (qe_status & CREG_STAT_TXDERROR) { | ||
307 | printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); | ||
308 | dev->stats.tx_errors++; | ||
309 | dev->stats.tx_aborted_errors++; | ||
310 | mace_hwbug_workaround = 1; | ||
311 | } | ||
312 | |||
313 | if (qe_status & CREG_STAT_TXLERR) { | ||
314 | printk(KERN_ERR "%s: Transmit late error.\n", dev->name); | ||
315 | dev->stats.tx_errors++; | ||
316 | mace_hwbug_workaround = 1; | ||
317 | } | ||
318 | |||
319 | if (qe_status & CREG_STAT_TXPERR) { | ||
320 | printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); | ||
321 | dev->stats.tx_errors++; | ||
322 | dev->stats.tx_aborted_errors++; | ||
323 | mace_hwbug_workaround = 1; | ||
324 | } | ||
325 | |||
326 | if (qe_status & CREG_STAT_TXSERR) { | ||
327 | printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); | ||
328 | dev->stats.tx_errors++; | ||
329 | dev->stats.tx_aborted_errors++; | ||
330 | mace_hwbug_workaround = 1; | ||
331 | } | ||
332 | |||
333 | if (qe_status & CREG_STAT_RCCOFLOW) { | ||
334 | dev->stats.rx_errors += 256; | ||
335 | dev->stats.collisions += 256; | ||
336 | } | ||
337 | |||
338 | if (qe_status & CREG_STAT_RUOFLOW) { | ||
339 | dev->stats.rx_errors += 256; | ||
340 | dev->stats.rx_over_errors += 256; | ||
341 | } | ||
342 | |||
343 | if (qe_status & CREG_STAT_MCOFLOW) { | ||
344 | dev->stats.rx_errors += 256; | ||
345 | dev->stats.rx_missed_errors += 256; | ||
346 | } | ||
347 | |||
348 | if (qe_status & CREG_STAT_RXFOFLOW) { | ||
349 | printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); | ||
350 | dev->stats.rx_errors++; | ||
351 | dev->stats.rx_over_errors++; | ||
352 | } | ||
353 | |||
354 | if (qe_status & CREG_STAT_RLCOLL) { | ||
355 | printk(KERN_ERR "%s: Late receive collision.\n", dev->name); | ||
356 | dev->stats.rx_errors++; | ||
357 | dev->stats.collisions++; | ||
358 | } | ||
359 | |||
360 | if (qe_status & CREG_STAT_FCOFLOW) { | ||
361 | dev->stats.rx_errors += 256; | ||
362 | dev->stats.rx_frame_errors += 256; | ||
363 | } | ||
364 | |||
365 | if (qe_status & CREG_STAT_CECOFLOW) { | ||
366 | dev->stats.rx_errors += 256; | ||
367 | dev->stats.rx_crc_errors += 256; | ||
368 | } | ||
369 | |||
370 | if (qe_status & CREG_STAT_RXDROP) { | ||
371 | printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); | ||
372 | dev->stats.rx_errors++; | ||
373 | dev->stats.rx_dropped++; | ||
374 | dev->stats.rx_missed_errors++; | ||
375 | } | ||
376 | |||
377 | if (qe_status & CREG_STAT_RXSMALL) { | ||
378 | printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); | ||
379 | dev->stats.rx_errors++; | ||
380 | dev->stats.rx_length_errors++; | ||
381 | } | ||
382 | |||
383 | if (qe_status & CREG_STAT_RXLERR) { | ||
384 | printk(KERN_ERR "%s: Receive late error.\n", dev->name); | ||
385 | dev->stats.rx_errors++; | ||
386 | mace_hwbug_workaround = 1; | ||
387 | } | ||
388 | |||
389 | if (qe_status & CREG_STAT_RXPERR) { | ||
390 | printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); | ||
391 | dev->stats.rx_errors++; | ||
392 | dev->stats.rx_missed_errors++; | ||
393 | mace_hwbug_workaround = 1; | ||
394 | } | ||
395 | |||
396 | if (qe_status & CREG_STAT_RXSERR) { | ||
397 | printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); | ||
398 | dev->stats.rx_errors++; | ||
399 | dev->stats.rx_missed_errors++; | ||
400 | mace_hwbug_workaround = 1; | ||
401 | } | ||
402 | |||
403 | if (mace_hwbug_workaround) | ||
404 | qe_init(qep, 1); | ||
405 | return mace_hwbug_workaround; | ||
406 | } | ||
407 | |||
408 | /* Per-QE receive interrupt service routine. Just like on the happy meal | ||
409 | * we receive directly into skb's with a small packet copy water mark. | ||
410 | */ | ||
411 | static void qe_rx(struct sunqe *qep) | ||
412 | { | ||
413 | struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; | ||
414 | struct net_device *dev = qep->dev; | ||
415 | struct qe_rxd *this; | ||
416 | struct sunqe_buffers *qbufs = qep->buffers; | ||
417 | __u32 qbufs_dvma = qep->buffers_dvma; | ||
418 | int elem = qep->rx_new, drops = 0; | ||
419 | u32 flags; | ||
420 | |||
421 | this = &rxbase[elem]; | ||
422 | while (!((flags = this->rx_flags) & RXD_OWN)) { | ||
423 | struct sk_buff *skb; | ||
424 | unsigned char *this_qbuf = | ||
425 | &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0]; | ||
426 | __u32 this_qbuf_dvma = qbufs_dvma + | ||
427 | qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1))); | ||
428 | struct qe_rxd *end_rxd = | ||
429 | &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)]; | ||
430 | int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */ | ||
431 | |||
432 | /* Check for errors. */ | ||
433 | if (len < ETH_ZLEN) { | ||
434 | dev->stats.rx_errors++; | ||
435 | dev->stats.rx_length_errors++; | ||
436 | dev->stats.rx_dropped++; | ||
437 | } else { | ||
438 | skb = dev_alloc_skb(len + 2); | ||
439 | if (skb == NULL) { | ||
440 | drops++; | ||
441 | dev->stats.rx_dropped++; | ||
442 | } else { | ||
443 | skb_reserve(skb, 2); | ||
444 | skb_put(skb, len); | ||
445 | skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf, | ||
446 | len); | ||
447 | skb->protocol = eth_type_trans(skb, qep->dev); | ||
448 | netif_rx(skb); | ||
449 | dev->stats.rx_packets++; | ||
450 | dev->stats.rx_bytes += len; | ||
451 | } | ||
452 | } | ||
453 | end_rxd->rx_addr = this_qbuf_dvma; | ||
454 | end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); | ||
455 | |||
456 | elem = NEXT_RX(elem); | ||
457 | this = &rxbase[elem]; | ||
458 | } | ||
459 | qep->rx_new = elem; | ||
460 | if (drops) | ||
461 | printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name); | ||
462 | } | ||
463 | |||
464 | static void qe_tx_reclaim(struct sunqe *qep); | ||
465 | |||
466 | /* Interrupts for all QE's get filtered out via the QEC master controller, | ||
467 | * so we just run through each qe and check to see who is signaling | ||
468 | * and thus needs to be serviced. | ||
469 | */ | ||
470 | static irqreturn_t qec_interrupt(int irq, void *dev_id) | ||
471 | { | ||
472 | struct sunqec *qecp = dev_id; | ||
473 | u32 qec_status; | ||
474 | int channel = 0; | ||
475 | |||
476 | /* Latch the status now. */ | ||
477 | qec_status = sbus_readl(qecp->gregs + GLOB_STAT); | ||
478 | while (channel < 4) { | ||
479 | if (qec_status & 0xf) { | ||
480 | struct sunqe *qep = qecp->qes[channel]; | ||
481 | u32 qe_status; | ||
482 | |||
483 | qe_status = sbus_readl(qep->qcregs + CREG_STAT); | ||
484 | if (qe_status & CREG_STAT_ERRORS) { | ||
485 | if (qe_is_bolixed(qep, qe_status)) | ||
486 | goto next; | ||
487 | } | ||
488 | if (qe_status & CREG_STAT_RXIRQ) | ||
489 | qe_rx(qep); | ||
490 | if (netif_queue_stopped(qep->dev) && | ||
491 | (qe_status & CREG_STAT_TXIRQ)) { | ||
492 | spin_lock(&qep->lock); | ||
493 | qe_tx_reclaim(qep); | ||
494 | if (TX_BUFFS_AVAIL(qep) > 0) { | ||
495 | /* Wake net queue and return to | ||
496 | * lazy tx reclaim. | ||
497 | */ | ||
498 | netif_wake_queue(qep->dev); | ||
499 | sbus_writel(1, qep->qcregs + CREG_TIMASK); | ||
500 | } | ||
501 | spin_unlock(&qep->lock); | ||
502 | } | ||
503 | next: | ||
504 | ; | ||
505 | } | ||
506 | qec_status >>= 4; | ||
507 | channel++; | ||
508 | } | ||
509 | |||
510 | return IRQ_HANDLED; | ||
511 | } | ||
512 | |||
513 | static int qe_open(struct net_device *dev) | ||
514 | { | ||
515 | struct sunqe *qep = netdev_priv(dev); | ||
516 | |||
517 | qep->mconfig = (MREGS_MCONFIG_TXENAB | | ||
518 | MREGS_MCONFIG_RXENAB | | ||
519 | MREGS_MCONFIG_MBAENAB); | ||
520 | return qe_init(qep, 0); | ||
521 | } | ||
522 | |||
523 | static int qe_close(struct net_device *dev) | ||
524 | { | ||
525 | struct sunqe *qep = netdev_priv(dev); | ||
526 | |||
527 | qe_stop(qep); | ||
528 | return 0; | ||
529 | } | ||
530 | |||
531 | /* Reclaim TX'd frames from the ring. This must always run under | ||
532 | * the IRQ protected qep->lock. | ||
533 | */ | ||
534 | static void qe_tx_reclaim(struct sunqe *qep) | ||
535 | { | ||
536 | struct qe_txd *txbase = &qep->qe_block->qe_txd[0]; | ||
537 | int elem = qep->tx_old; | ||
538 | |||
539 | while (elem != qep->tx_new) { | ||
540 | u32 flags = txbase[elem].tx_flags; | ||
541 | |||
542 | if (flags & TXD_OWN) | ||
543 | break; | ||
544 | elem = NEXT_TX(elem); | ||
545 | } | ||
546 | qep->tx_old = elem; | ||
547 | } | ||
548 | |||
549 | static void qe_tx_timeout(struct net_device *dev) | ||
550 | { | ||
551 | struct sunqe *qep = netdev_priv(dev); | ||
552 | int tx_full; | ||
553 | |||
554 | spin_lock_irq(&qep->lock); | ||
555 | |||
556 | /* Try to reclaim, if that frees up some tx | ||
557 | * entries, we're fine. | ||
558 | */ | ||
559 | qe_tx_reclaim(qep); | ||
560 | tx_full = TX_BUFFS_AVAIL(qep) <= 0; | ||
561 | |||
562 | spin_unlock_irq(&qep->lock); | ||
563 | |||
564 | if (! tx_full) | ||
565 | goto out; | ||
566 | |||
567 | printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); | ||
568 | qe_init(qep, 1); | ||
569 | |||
570 | out: | ||
571 | netif_wake_queue(dev); | ||
572 | } | ||
573 | |||
574 | /* Get a packet queued to go onto the wire. */ | ||
575 | static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
576 | { | ||
577 | struct sunqe *qep = netdev_priv(dev); | ||
578 | struct sunqe_buffers *qbufs = qep->buffers; | ||
579 | __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; | ||
580 | unsigned char *txbuf; | ||
581 | int len, entry; | ||
582 | |||
583 | spin_lock_irq(&qep->lock); | ||
584 | |||
585 | qe_tx_reclaim(qep); | ||
586 | |||
587 | len = skb->len; | ||
588 | entry = qep->tx_new; | ||
589 | |||
590 | txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0]; | ||
591 | txbuf_dvma = qbufs_dvma + | ||
592 | qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1))); | ||
593 | |||
594 | /* Avoid a race... */ | ||
595 | qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; | ||
596 | |||
597 | skb_copy_from_linear_data(skb, txbuf, len); | ||
598 | |||
599 | qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; | ||
600 | qep->qe_block->qe_txd[entry].tx_flags = | ||
601 | (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); | ||
602 | qep->tx_new = NEXT_TX(entry); | ||
603 | |||
604 | /* Get it going. */ | ||
605 | sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); | ||
606 | |||
607 | dev->stats.tx_packets++; | ||
608 | dev->stats.tx_bytes += len; | ||
609 | |||
610 | if (TX_BUFFS_AVAIL(qep) <= 0) { | ||
611 | /* Halt the net queue and enable tx interrupts. | ||
612 | * When the tx queue empties the tx irq handler | ||
613 | * will wake up the queue and return us back to | ||
614 | * the lazy tx reclaim scheme. | ||
615 | */ | ||
616 | netif_stop_queue(dev); | ||
617 | sbus_writel(0, qep->qcregs + CREG_TIMASK); | ||
618 | } | ||
619 | spin_unlock_irq(&qep->lock); | ||
620 | |||
621 | dev_kfree_skb(skb); | ||
622 | |||
623 | return NETDEV_TX_OK; | ||
624 | } | ||
625 | |||
626 | static void qe_set_multicast(struct net_device *dev) | ||
627 | { | ||
628 | struct sunqe *qep = netdev_priv(dev); | ||
629 | struct netdev_hw_addr *ha; | ||
630 | u8 new_mconfig = qep->mconfig; | ||
631 | int i; | ||
632 | u32 crc; | ||
633 | |||
634 | /* Lock out others. */ | ||
635 | netif_stop_queue(dev); | ||
636 | |||
637 | if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { | ||
638 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, | ||
639 | qep->mregs + MREGS_IACONFIG); | ||
640 | while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) | ||
641 | barrier(); | ||
642 | for (i = 0; i < 8; i++) | ||
643 | sbus_writeb(0xff, qep->mregs + MREGS_FILTER); | ||
644 | sbus_writeb(0, qep->mregs + MREGS_IACONFIG); | ||
645 | } else if (dev->flags & IFF_PROMISC) { | ||
646 | new_mconfig |= MREGS_MCONFIG_PROMISC; | ||
647 | } else { | ||
648 | u16 hash_table[4]; | ||
649 | u8 *hbytes = (unsigned char *) &hash_table[0]; | ||
650 | |||
651 | memset(hash_table, 0, sizeof(hash_table)); | ||
652 | netdev_for_each_mc_addr(ha, dev) { | ||
653 | crc = ether_crc_le(6, ha->addr); | ||
654 | crc >>= 26; | ||
655 | hash_table[crc >> 4] |= 1 << (crc & 0xf); | ||
656 | } | ||
657 | /* Program the qe with the new filter value. */ | ||
658 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, | ||
659 | qep->mregs + MREGS_IACONFIG); | ||
660 | while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) | ||
661 | barrier(); | ||
662 | for (i = 0; i < 8; i++) { | ||
663 | u8 tmp = *hbytes++; | ||
664 | sbus_writeb(tmp, qep->mregs + MREGS_FILTER); | ||
665 | } | ||
666 | sbus_writeb(0, qep->mregs + MREGS_IACONFIG); | ||
667 | } | ||
668 | |||
669 | /* Any change of the logical address filter, the physical address, | ||
670 | * or enabling/disabling promiscuous mode causes the MACE to disable | ||
671 | * the receiver. So we must re-enable them here or else the MACE | ||
672 | * refuses to listen to anything on the network. Sheesh, took | ||
673 | * me a day or two to find this bug. | ||
674 | */ | ||
675 | qep->mconfig = new_mconfig; | ||
676 | sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG); | ||
677 | |||
678 | /* Let us get going again. */ | ||
679 | netif_wake_queue(dev); | ||
680 | } | ||
681 | |||
682 | /* Ethtool support... */ | ||
683 | static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
684 | { | ||
685 | const struct linux_prom_registers *regs; | ||
686 | struct sunqe *qep = netdev_priv(dev); | ||
687 | struct platform_device *op; | ||
688 | |||
689 | strcpy(info->driver, "sunqe"); | ||
690 | strcpy(info->version, "3.0"); | ||
691 | |||
692 | op = qep->op; | ||
693 | regs = of_get_property(op->dev.of_node, "reg", NULL); | ||
694 | if (regs) | ||
695 | sprintf(info->bus_info, "SBUS:%d", regs->which_io); | ||
696 | |||
697 | } | ||
698 | |||
699 | static u32 qe_get_link(struct net_device *dev) | ||
700 | { | ||
701 | struct sunqe *qep = netdev_priv(dev); | ||
702 | void __iomem *mregs = qep->mregs; | ||
703 | u8 phyconfig; | ||
704 | |||
705 | spin_lock_irq(&qep->lock); | ||
706 | phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG); | ||
707 | spin_unlock_irq(&qep->lock); | ||
708 | |||
709 | return phyconfig & MREGS_PHYCONFIG_LSTAT; | ||
710 | } | ||
711 | |||
712 | static const struct ethtool_ops qe_ethtool_ops = { | ||
713 | .get_drvinfo = qe_get_drvinfo, | ||
714 | .get_link = qe_get_link, | ||
715 | }; | ||
716 | |||
717 | /* This is only called once at boot time for each card probed. */ | ||
718 | static void qec_init_once(struct sunqec *qecp, struct platform_device *op) | ||
719 | { | ||
720 | u8 bsizes = qecp->qec_bursts; | ||
721 | |||
722 | if (sbus_can_burst64() && (bsizes & DMA_BURST64)) { | ||
723 | sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL); | ||
724 | } else if (bsizes & DMA_BURST32) { | ||
725 | sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL); | ||
726 | } else { | ||
727 | sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL); | ||
728 | } | ||
729 | |||
730 | /* Packetsize only used in 100baseT BigMAC configurations, | ||
731 | * set it to zero just to be on the safe side. | ||
732 | */ | ||
733 | sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE); | ||
734 | |||
735 | /* Set the local memsize register, divided up to one piece per QE channel. */ | ||
736 | sbus_writel((resource_size(&op->resource[1]) >> 2), | ||
737 | qecp->gregs + GLOB_MSIZE); | ||
738 | |||
739 | /* Divide up the local QEC memory amongst the 4 QE receiver and | ||
740 | * transmitter FIFOs. Basically it is (total / 2 / num_channels). | ||
741 | */ | ||
742 | sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1, | ||
743 | qecp->gregs + GLOB_TSIZE); | ||
744 | sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1, | ||
745 | qecp->gregs + GLOB_RSIZE); | ||
746 | } | ||
747 | |||
748 | static u8 __devinit qec_get_burst(struct device_node *dp) | ||
749 | { | ||
750 | u8 bsizes, bsizes_more; | ||
751 | |||
752 | /* Find and set the burst sizes for the QEC, since it | ||
753 | * does the actual dma for all 4 channels. | ||
754 | */ | ||
755 | bsizes = of_getintprop_default(dp, "burst-sizes", 0xff); | ||
756 | bsizes &= 0xff; | ||
757 | bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff); | ||
758 | |||
759 | if (bsizes_more != 0xff) | ||
760 | bsizes &= bsizes_more; | ||
761 | if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || | ||
762 | (bsizes & DMA_BURST32)==0) | ||
763 | bsizes = (DMA_BURST32 - 1); | ||
764 | |||
765 | return bsizes; | ||
766 | } | ||
767 | |||
768 | static struct sunqec * __devinit get_qec(struct platform_device *child) | ||
769 | { | ||
770 | struct platform_device *op = to_platform_device(child->dev.parent); | ||
771 | struct sunqec *qecp; | ||
772 | |||
773 | qecp = dev_get_drvdata(&op->dev); | ||
774 | if (!qecp) { | ||
775 | qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL); | ||
776 | if (qecp) { | ||
777 | u32 ctrl; | ||
778 | |||
779 | qecp->op = op; | ||
780 | qecp->gregs = of_ioremap(&op->resource[0], 0, | ||
781 | GLOB_REG_SIZE, | ||
782 | "QEC Global Registers"); | ||
783 | if (!qecp->gregs) | ||
784 | goto fail; | ||
785 | |||
786 | /* Make sure the QEC is in MACE mode. */ | ||
787 | ctrl = sbus_readl(qecp->gregs + GLOB_CTRL); | ||
788 | ctrl &= 0xf0000000; | ||
789 | if (ctrl != GLOB_CTRL_MMODE) { | ||
790 | printk(KERN_ERR "qec: Not in MACE mode!\n"); | ||
791 | goto fail; | ||
792 | } | ||
793 | |||
794 | if (qec_global_reset(qecp->gregs)) | ||
795 | goto fail; | ||
796 | |||
797 | qecp->qec_bursts = qec_get_burst(op->dev.of_node); | ||
798 | |||
799 | qec_init_once(qecp, op); | ||
800 | |||
801 | if (request_irq(op->archdata.irqs[0], qec_interrupt, | ||
802 | IRQF_SHARED, "qec", (void *) qecp)) { | ||
803 | printk(KERN_ERR "qec: Can't register irq.\n"); | ||
804 | goto fail; | ||
805 | } | ||
806 | |||
807 | dev_set_drvdata(&op->dev, qecp); | ||
808 | |||
809 | qecp->next_module = root_qec_dev; | ||
810 | root_qec_dev = qecp; | ||
811 | } | ||
812 | } | ||
813 | |||
814 | return qecp; | ||
815 | |||
816 | fail: | ||
817 | if (qecp->gregs) | ||
818 | of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE); | ||
819 | kfree(qecp); | ||
820 | return NULL; | ||
821 | } | ||
822 | |||
823 | static const struct net_device_ops qec_ops = { | ||
824 | .ndo_open = qe_open, | ||
825 | .ndo_stop = qe_close, | ||
826 | .ndo_start_xmit = qe_start_xmit, | ||
827 | .ndo_set_multicast_list = qe_set_multicast, | ||
828 | .ndo_tx_timeout = qe_tx_timeout, | ||
829 | .ndo_change_mtu = eth_change_mtu, | ||
830 | .ndo_set_mac_address = eth_mac_addr, | ||
831 | .ndo_validate_addr = eth_validate_addr, | ||
832 | }; | ||
833 | |||
834 | static int __devinit qec_ether_init(struct platform_device *op) | ||
835 | { | ||
836 | static unsigned version_printed; | ||
837 | struct net_device *dev; | ||
838 | struct sunqec *qecp; | ||
839 | struct sunqe *qe; | ||
840 | int i, res; | ||
841 | |||
842 | if (version_printed++ == 0) | ||
843 | printk(KERN_INFO "%s", version); | ||
844 | |||
845 | dev = alloc_etherdev(sizeof(struct sunqe)); | ||
846 | if (!dev) | ||
847 | return -ENOMEM; | ||
848 | |||
849 | memcpy(dev->dev_addr, idprom->id_ethaddr, 6); | ||
850 | |||
851 | qe = netdev_priv(dev); | ||
852 | |||
853 | res = -ENODEV; | ||
854 | |||
855 | i = of_getintprop_default(op->dev.of_node, "channel#", -1); | ||
856 | if (i == -1) | ||
857 | goto fail; | ||
858 | qe->channel = i; | ||
859 | spin_lock_init(&qe->lock); | ||
860 | |||
861 | qecp = get_qec(op); | ||
862 | if (!qecp) | ||
863 | goto fail; | ||
864 | |||
865 | qecp->qes[qe->channel] = qe; | ||
866 | qe->dev = dev; | ||
867 | qe->parent = qecp; | ||
868 | qe->op = op; | ||
869 | |||
870 | res = -ENOMEM; | ||
871 | qe->qcregs = of_ioremap(&op->resource[0], 0, | ||
872 | CREG_REG_SIZE, "QEC Channel Registers"); | ||
873 | if (!qe->qcregs) { | ||
874 | printk(KERN_ERR "qe: Cannot map channel registers.\n"); | ||
875 | goto fail; | ||
876 | } | ||
877 | |||
878 | qe->mregs = of_ioremap(&op->resource[1], 0, | ||
879 | MREGS_REG_SIZE, "QE MACE Registers"); | ||
880 | if (!qe->mregs) { | ||
881 | printk(KERN_ERR "qe: Cannot map MACE registers.\n"); | ||
882 | goto fail; | ||
883 | } | ||
884 | |||
885 | qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE, | ||
886 | &qe->qblock_dvma, GFP_ATOMIC); | ||
887 | qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers), | ||
888 | &qe->buffers_dvma, GFP_ATOMIC); | ||
889 | if (qe->qe_block == NULL || qe->qblock_dvma == 0 || | ||
890 | qe->buffers == NULL || qe->buffers_dvma == 0) | ||
891 | goto fail; | ||
892 | |||
893 | /* Stop this QE. */ | ||
894 | qe_stop(qe); | ||
895 | |||
896 | SET_NETDEV_DEV(dev, &op->dev); | ||
897 | |||
898 | dev->watchdog_timeo = 5*HZ; | ||
899 | dev->irq = op->archdata.irqs[0]; | ||
900 | dev->dma = 0; | ||
901 | dev->ethtool_ops = &qe_ethtool_ops; | ||
902 | dev->netdev_ops = &qec_ops; | ||
903 | |||
904 | res = register_netdev(dev); | ||
905 | if (res) | ||
906 | goto fail; | ||
907 | |||
908 | dev_set_drvdata(&op->dev, qe); | ||
909 | |||
910 | printk(KERN_INFO "%s: qe channel[%d] ", dev->name, qe->channel); | ||
911 | for (i = 0; i < 6; i++) | ||
912 | printk ("%2.2x%c", | ||
913 | dev->dev_addr[i], | ||
914 | i == 5 ? ' ': ':'); | ||
915 | printk("\n"); | ||
916 | |||
917 | |||
918 | return 0; | ||
919 | |||
920 | fail: | ||
921 | if (qe->qcregs) | ||
922 | of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE); | ||
923 | if (qe->mregs) | ||
924 | of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE); | ||
925 | if (qe->qe_block) | ||
926 | dma_free_coherent(&op->dev, PAGE_SIZE, | ||
927 | qe->qe_block, qe->qblock_dvma); | ||
928 | if (qe->buffers) | ||
929 | dma_free_coherent(&op->dev, | ||
930 | sizeof(struct sunqe_buffers), | ||
931 | qe->buffers, | ||
932 | qe->buffers_dvma); | ||
933 | |||
934 | free_netdev(dev); | ||
935 | |||
936 | return res; | ||
937 | } | ||
938 | |||
939 | static int __devinit qec_sbus_probe(struct platform_device *op) | ||
940 | { | ||
941 | return qec_ether_init(op); | ||
942 | } | ||
943 | |||
944 | static int __devexit qec_sbus_remove(struct platform_device *op) | ||
945 | { | ||
946 | struct sunqe *qp = dev_get_drvdata(&op->dev); | ||
947 | struct net_device *net_dev = qp->dev; | ||
948 | |||
949 | unregister_netdev(net_dev); | ||
950 | |||
951 | of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE); | ||
952 | of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE); | ||
953 | dma_free_coherent(&op->dev, PAGE_SIZE, | ||
954 | qp->qe_block, qp->qblock_dvma); | ||
955 | dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers), | ||
956 | qp->buffers, qp->buffers_dvma); | ||
957 | |||
958 | free_netdev(net_dev); | ||
959 | |||
960 | dev_set_drvdata(&op->dev, NULL); | ||
961 | |||
962 | return 0; | ||
963 | } | ||
964 | |||
965 | static const struct of_device_id qec_sbus_match[] = { | ||
966 | { | ||
967 | .name = "qe", | ||
968 | }, | ||
969 | {}, | ||
970 | }; | ||
971 | |||
972 | MODULE_DEVICE_TABLE(of, qec_sbus_match); | ||
973 | |||
974 | static struct platform_driver qec_sbus_driver = { | ||
975 | .driver = { | ||
976 | .name = "qec", | ||
977 | .owner = THIS_MODULE, | ||
978 | .of_match_table = qec_sbus_match, | ||
979 | }, | ||
980 | .probe = qec_sbus_probe, | ||
981 | .remove = __devexit_p(qec_sbus_remove), | ||
982 | }; | ||
983 | |||
984 | static int __init qec_init(void) | ||
985 | { | ||
986 | return platform_driver_register(&qec_sbus_driver); | ||
987 | } | ||
988 | |||
989 | static void __exit qec_exit(void) | ||
990 | { | ||
991 | platform_driver_unregister(&qec_sbus_driver); | ||
992 | |||
993 | while (root_qec_dev) { | ||
994 | struct sunqec *next = root_qec_dev->next_module; | ||
995 | struct platform_device *op = root_qec_dev->op; | ||
996 | |||
997 | free_irq(op->archdata.irqs[0], (void *) root_qec_dev); | ||
998 | of_iounmap(&op->resource[0], root_qec_dev->gregs, | ||
999 | GLOB_REG_SIZE); | ||
1000 | kfree(root_qec_dev); | ||
1001 | |||
1002 | root_qec_dev = next; | ||
1003 | } | ||
1004 | } | ||
1005 | |||
1006 | module_init(qec_init); | ||
1007 | module_exit(qec_exit); | ||
diff --git a/drivers/net/ethernet/sun/sunqe.h b/drivers/net/ethernet/sun/sunqe.h new file mode 100644 index 000000000000..581781b6b2fa --- /dev/null +++ b/drivers/net/ethernet/sun/sunqe.h | |||
@@ -0,0 +1,350 @@ | |||
1 | /* $Id: sunqe.h,v 1.13 2000/02/09 11:15:42 davem Exp $ | ||
2 | * sunqe.h: Definitions for the Sun QuadEthernet driver. | ||
3 | * | ||
4 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | ||
5 | */ | ||
6 | |||
7 | #ifndef _SUNQE_H | ||
8 | #define _SUNQE_H | ||
9 | |||
10 | /* QEC global registers. */ | ||
11 | #define GLOB_CTRL 0x00UL /* Control */ | ||
12 | #define GLOB_STAT 0x04UL /* Status */ | ||
13 | #define GLOB_PSIZE 0x08UL /* Packet Size */ | ||
14 | #define GLOB_MSIZE 0x0cUL /* Local-memory Size */ | ||
15 | #define GLOB_RSIZE 0x10UL /* Receive partition size */ | ||
16 | #define GLOB_TSIZE 0x14UL /* Transmit partition size */ | ||
17 | #define GLOB_REG_SIZE 0x18UL | ||
18 | |||
19 | #define GLOB_CTRL_MMODE 0x40000000 /* MACE qec mode */ | ||
20 | #define GLOB_CTRL_BMODE 0x10000000 /* BigMAC qec mode */ | ||
21 | #define GLOB_CTRL_EPAR 0x00000020 /* Enable parity */ | ||
22 | #define GLOB_CTRL_ACNTRL 0x00000018 /* SBUS arbitration control */ | ||
23 | #define GLOB_CTRL_B64 0x00000004 /* 64 byte dvma bursts */ | ||
24 | #define GLOB_CTRL_B32 0x00000002 /* 32 byte dvma bursts */ | ||
25 | #define GLOB_CTRL_B16 0x00000000 /* 16 byte dvma bursts */ | ||
26 | #define GLOB_CTRL_RESET 0x00000001 /* Reset the QEC */ | ||
27 | |||
28 | #define GLOB_STAT_TX 0x00000008 /* BigMAC Transmit IRQ */ | ||
29 | #define GLOB_STAT_RX 0x00000004 /* BigMAC Receive IRQ */ | ||
30 | #define GLOB_STAT_BM 0x00000002 /* BigMAC Global IRQ */ | ||
31 | #define GLOB_STAT_ER 0x00000001 /* BigMAC Error IRQ */ | ||
32 | |||
33 | #define GLOB_PSIZE_2048 0x00 /* 2k packet size */ | ||
34 | #define GLOB_PSIZE_4096 0x01 /* 4k packet size */ | ||
35 | #define GLOB_PSIZE_6144 0x10 /* 6k packet size */ | ||
36 | #define GLOB_PSIZE_8192 0x11 /* 8k packet size */ | ||
37 | |||
38 | /* In MACE mode, there are four qe channels. Each channel has it's own | ||
39 | * status bits in the QEC status register. This macro picks out the | ||
40 | * ones you want. | ||
41 | */ | ||
42 | #define GLOB_STAT_PER_QE(status, channel) (((status) >> ((channel) * 4)) & 0xf) | ||
43 | |||
44 | /* The following registers are for per-qe channel information/status. */ | ||
45 | #define CREG_CTRL 0x00UL /* Control */ | ||
46 | #define CREG_STAT 0x04UL /* Status */ | ||
47 | #define CREG_RXDS 0x08UL /* RX descriptor ring ptr */ | ||
48 | #define CREG_TXDS 0x0cUL /* TX descriptor ring ptr */ | ||
49 | #define CREG_RIMASK 0x10UL /* RX Interrupt Mask */ | ||
50 | #define CREG_TIMASK 0x14UL /* TX Interrupt Mask */ | ||
51 | #define CREG_QMASK 0x18UL /* QEC Error Interrupt Mask */ | ||
52 | #define CREG_MMASK 0x1cUL /* MACE Error Interrupt Mask */ | ||
53 | #define CREG_RXWBUFPTR 0x20UL /* Local memory rx write ptr */ | ||
54 | #define CREG_RXRBUFPTR 0x24UL /* Local memory rx read ptr */ | ||
55 | #define CREG_TXWBUFPTR 0x28UL /* Local memory tx write ptr */ | ||
56 | #define CREG_TXRBUFPTR 0x2cUL /* Local memory tx read ptr */ | ||
57 | #define CREG_CCNT 0x30UL /* Collision Counter */ | ||
58 | #define CREG_PIPG 0x34UL /* Inter-Frame Gap */ | ||
59 | #define CREG_REG_SIZE 0x38UL | ||
60 | |||
61 | #define CREG_CTRL_RXOFF 0x00000004 /* Disable this qe's receiver*/ | ||
62 | #define CREG_CTRL_RESET 0x00000002 /* Reset this qe channel */ | ||
63 | #define CREG_CTRL_TWAKEUP 0x00000001 /* Transmitter Wakeup, 'go'. */ | ||
64 | |||
65 | #define CREG_STAT_EDEFER 0x10000000 /* Excessive Defers */ | ||
66 | #define CREG_STAT_CLOSS 0x08000000 /* Carrier Loss */ | ||
67 | #define CREG_STAT_ERETRIES 0x04000000 /* More than 16 retries */ | ||
68 | #define CREG_STAT_LCOLL 0x02000000 /* Late TX Collision */ | ||
69 | #define CREG_STAT_FUFLOW 0x01000000 /* FIFO Underflow */ | ||
70 | #define CREG_STAT_JERROR 0x00800000 /* Jabber Error */ | ||
71 | #define CREG_STAT_BERROR 0x00400000 /* Babble Error */ | ||
72 | #define CREG_STAT_TXIRQ 0x00200000 /* Transmit Interrupt */ | ||
73 | #define CREG_STAT_CCOFLOW 0x00100000 /* TX Coll-counter Overflow */ | ||
74 | #define CREG_STAT_TXDERROR 0x00080000 /* TX Descriptor is bogus */ | ||
75 | #define CREG_STAT_TXLERR 0x00040000 /* Late Transmit Error */ | ||
76 | #define CREG_STAT_TXPERR 0x00020000 /* Transmit Parity Error */ | ||
77 | #define CREG_STAT_TXSERR 0x00010000 /* Transmit SBUS error ack */ | ||
78 | #define CREG_STAT_RCCOFLOW 0x00001000 /* RX Coll-counter Overflow */ | ||
79 | #define CREG_STAT_RUOFLOW 0x00000800 /* Runt Counter Overflow */ | ||
80 | #define CREG_STAT_MCOFLOW 0x00000400 /* Missed Counter Overflow */ | ||
81 | #define CREG_STAT_RXFOFLOW 0x00000200 /* RX FIFO Overflow */ | ||
82 | #define CREG_STAT_RLCOLL 0x00000100 /* RX Late Collision */ | ||
83 | #define CREG_STAT_FCOFLOW 0x00000080 /* Frame Counter Overflow */ | ||
84 | #define CREG_STAT_CECOFLOW 0x00000040 /* CRC Error-counter Overflow*/ | ||
85 | #define CREG_STAT_RXIRQ 0x00000020 /* Receive Interrupt */ | ||
86 | #define CREG_STAT_RXDROP 0x00000010 /* Dropped a RX'd packet */ | ||
87 | #define CREG_STAT_RXSMALL 0x00000008 /* Receive buffer too small */ | ||
88 | #define CREG_STAT_RXLERR 0x00000004 /* Receive Late Error */ | ||
89 | #define CREG_STAT_RXPERR 0x00000002 /* Receive Parity Error */ | ||
90 | #define CREG_STAT_RXSERR 0x00000001 /* Receive SBUS Error ACK */ | ||
91 | |||
92 | #define CREG_STAT_ERRORS (CREG_STAT_EDEFER|CREG_STAT_CLOSS|CREG_STAT_ERETRIES| \ | ||
93 | CREG_STAT_LCOLL|CREG_STAT_FUFLOW|CREG_STAT_JERROR| \ | ||
94 | CREG_STAT_BERROR|CREG_STAT_CCOFLOW|CREG_STAT_TXDERROR| \ | ||
95 | CREG_STAT_TXLERR|CREG_STAT_TXPERR|CREG_STAT_TXSERR| \ | ||
96 | CREG_STAT_RCCOFLOW|CREG_STAT_RUOFLOW|CREG_STAT_MCOFLOW| \ | ||
97 | CREG_STAT_RXFOFLOW|CREG_STAT_RLCOLL|CREG_STAT_FCOFLOW| \ | ||
98 | CREG_STAT_CECOFLOW|CREG_STAT_RXDROP|CREG_STAT_RXSMALL| \ | ||
99 | CREG_STAT_RXLERR|CREG_STAT_RXPERR|CREG_STAT_RXSERR) | ||
100 | |||
101 | #define CREG_QMASK_COFLOW 0x00100000 /* CollCntr overflow */ | ||
102 | #define CREG_QMASK_TXDERROR 0x00080000 /* TXD error */ | ||
103 | #define CREG_QMASK_TXLERR 0x00040000 /* TX late error */ | ||
104 | #define CREG_QMASK_TXPERR 0x00020000 /* TX parity error */ | ||
105 | #define CREG_QMASK_TXSERR 0x00010000 /* TX sbus error ack */ | ||
106 | #define CREG_QMASK_RXDROP 0x00000010 /* RX drop */ | ||
107 | #define CREG_QMASK_RXBERROR 0x00000008 /* RX buffer error */ | ||
108 | #define CREG_QMASK_RXLEERR 0x00000004 /* RX late error */ | ||
109 | #define CREG_QMASK_RXPERR 0x00000002 /* RX parity error */ | ||
110 | #define CREG_QMASK_RXSERR 0x00000001 /* RX sbus error ack */ | ||
111 | |||
112 | #define CREG_MMASK_EDEFER 0x10000000 /* Excess defer */ | ||
113 | #define CREG_MMASK_CLOSS 0x08000000 /* Carrier loss */ | ||
114 | #define CREG_MMASK_ERETRY 0x04000000 /* Excess retry */ | ||
115 | #define CREG_MMASK_LCOLL 0x02000000 /* Late collision error */ | ||
116 | #define CREG_MMASK_UFLOW 0x01000000 /* Underflow */ | ||
117 | #define CREG_MMASK_JABBER 0x00800000 /* Jabber error */ | ||
118 | #define CREG_MMASK_BABBLE 0x00400000 /* Babble error */ | ||
119 | #define CREG_MMASK_OFLOW 0x00000800 /* Overflow */ | ||
120 | #define CREG_MMASK_RXCOLL 0x00000400 /* RX Coll-Cntr overflow */ | ||
121 | #define CREG_MMASK_RPKT 0x00000200 /* Runt pkt overflow */ | ||
122 | #define CREG_MMASK_MPKT 0x00000100 /* Missed pkt overflow */ | ||
123 | |||
124 | #define CREG_PIPG_TENAB 0x00000020 /* Enable Throttle */ | ||
125 | #define CREG_PIPG_MMODE 0x00000010 /* Manual Mode */ | ||
126 | #define CREG_PIPG_WMASK 0x0000000f /* SBUS Wait Mask */ | ||
127 | |||
128 | /* Per-channel AMD 79C940 MACE registers. */ | ||
129 | #define MREGS_RXFIFO 0x00UL /* Receive FIFO */ | ||
130 | #define MREGS_TXFIFO 0x01UL /* Transmit FIFO */ | ||
131 | #define MREGS_TXFCNTL 0x02UL /* Transmit Frame Control */ | ||
132 | #define MREGS_TXFSTAT 0x03UL /* Transmit Frame Status */ | ||
133 | #define MREGS_TXRCNT 0x04UL /* Transmit Retry Count */ | ||
134 | #define MREGS_RXFCNTL 0x05UL /* Receive Frame Control */ | ||
135 | #define MREGS_RXFSTAT 0x06UL /* Receive Frame Status */ | ||
136 | #define MREGS_FFCNT 0x07UL /* FIFO Frame Count */ | ||
137 | #define MREGS_IREG 0x08UL /* Interrupt Register */ | ||
138 | #define MREGS_IMASK 0x09UL /* Interrupt Mask */ | ||
139 | #define MREGS_POLL 0x0aUL /* POLL Register */ | ||
140 | #define MREGS_BCONFIG 0x0bUL /* BIU Config */ | ||
141 | #define MREGS_FCONFIG 0x0cUL /* FIFO Config */ | ||
142 | #define MREGS_MCONFIG 0x0dUL /* MAC Config */ | ||
143 | #define MREGS_PLSCONFIG 0x0eUL /* PLS Config */ | ||
144 | #define MREGS_PHYCONFIG 0x0fUL /* PHY Config */ | ||
145 | #define MREGS_CHIPID1 0x10UL /* Chip-ID, low bits */ | ||
146 | #define MREGS_CHIPID2 0x11UL /* Chip-ID, high bits */ | ||
147 | #define MREGS_IACONFIG 0x12UL /* Internal Address Config */ | ||
148 | /* 0x13UL, reserved */ | ||
149 | #define MREGS_FILTER 0x14UL /* Logical Address Filter */ | ||
150 | #define MREGS_ETHADDR 0x15UL /* Our Ethernet Address */ | ||
151 | /* 0x16UL, reserved */ | ||
152 | /* 0x17UL, reserved */ | ||
153 | #define MREGS_MPCNT 0x18UL /* Missed Packet Count */ | ||
154 | /* 0x19UL, reserved */ | ||
155 | #define MREGS_RPCNT 0x1aUL /* Runt Packet Count */ | ||
156 | #define MREGS_RCCNT 0x1bUL /* RX Collision Count */ | ||
157 | /* 0x1cUL, reserved */ | ||
158 | #define MREGS_UTEST 0x1dUL /* User Test */ | ||
159 | #define MREGS_RTEST1 0x1eUL /* Reserved Test 1 */ | ||
160 | #define MREGS_RTEST2 0x1fUL /* Reserved Test 2 */ | ||
161 | #define MREGS_REG_SIZE 0x20UL | ||
162 | |||
163 | #define MREGS_TXFCNTL_DRETRY 0x80 /* Retry disable */ | ||
164 | #define MREGS_TXFCNTL_DFCS 0x08 /* Disable TX FCS */ | ||
165 | #define MREGS_TXFCNTL_AUTOPAD 0x01 /* TX auto pad */ | ||
166 | |||
167 | #define MREGS_TXFSTAT_VALID 0x80 /* TX valid */ | ||
168 | #define MREGS_TXFSTAT_UNDERFLOW 0x40 /* TX underflow */ | ||
169 | #define MREGS_TXFSTAT_LCOLL 0x20 /* TX late collision */ | ||
170 | #define MREGS_TXFSTAT_MRETRY 0x10 /* TX > 1 retries */ | ||
171 | #define MREGS_TXFSTAT_ORETRY 0x08 /* TX 1 retry */ | ||
172 | #define MREGS_TXFSTAT_PDEFER 0x04 /* TX pkt deferred */ | ||
173 | #define MREGS_TXFSTAT_CLOSS 0x02 /* TX carrier lost */ | ||
174 | #define MREGS_TXFSTAT_RERROR 0x01 /* TX retry error */ | ||
175 | |||
176 | #define MREGS_TXRCNT_EDEFER 0x80 /* TX Excess defers */ | ||
177 | #define MREGS_TXRCNT_CMASK 0x0f /* TX retry count */ | ||
178 | |||
179 | #define MREGS_RXFCNTL_LOWLAT 0x08 /* RX low latency */ | ||
180 | #define MREGS_RXFCNTL_AREJECT 0x04 /* RX addr match rej */ | ||
181 | #define MREGS_RXFCNTL_AUTOSTRIP 0x01 /* RX auto strip */ | ||
182 | |||
183 | #define MREGS_RXFSTAT_OVERFLOW 0x80 /* RX overflow */ | ||
184 | #define MREGS_RXFSTAT_LCOLL 0x40 /* RX late collision */ | ||
185 | #define MREGS_RXFSTAT_FERROR 0x20 /* RX framing error */ | ||
186 | #define MREGS_RXFSTAT_FCSERROR 0x10 /* RX FCS error */ | ||
187 | #define MREGS_RXFSTAT_RBCNT 0x0f /* RX msg byte count */ | ||
188 | |||
189 | #define MREGS_FFCNT_RX 0xf0 /* RX FIFO frame cnt */ | ||
190 | #define MREGS_FFCNT_TX 0x0f /* TX FIFO frame cnt */ | ||
191 | |||
192 | #define MREGS_IREG_JABBER 0x80 /* IRQ Jabber error */ | ||
193 | #define MREGS_IREG_BABBLE 0x40 /* IRQ Babble error */ | ||
194 | #define MREGS_IREG_COLL 0x20 /* IRQ Collision error */ | ||
195 | #define MREGS_IREG_RCCO 0x10 /* IRQ Collision cnt overflow */ | ||
196 | #define MREGS_IREG_RPKTCO 0x08 /* IRQ Runt packet count overflow */ | ||
197 | #define MREGS_IREG_MPKTCO 0x04 /* IRQ missed packet cnt overflow */ | ||
198 | #define MREGS_IREG_RXIRQ 0x02 /* IRQ RX'd a packet */ | ||
199 | #define MREGS_IREG_TXIRQ 0x01 /* IRQ TX'd a packet */ | ||
200 | |||
201 | #define MREGS_IMASK_BABBLE 0x40 /* IMASK Babble errors */ | ||
202 | #define MREGS_IMASK_COLL 0x20 /* IMASK Collision errors */ | ||
203 | #define MREGS_IMASK_MPKTCO 0x04 /* IMASK Missed pkt cnt overflow */ | ||
204 | #define MREGS_IMASK_RXIRQ 0x02 /* IMASK RX interrupts */ | ||
205 | #define MREGS_IMASK_TXIRQ 0x01 /* IMASK TX interrupts */ | ||
206 | |||
207 | #define MREGS_POLL_TXVALID 0x80 /* TX is valid */ | ||
208 | #define MREGS_POLL_TDTR 0x40 /* TX data transfer request */ | ||
209 | #define MREGS_POLL_RDTR 0x20 /* RX data transfer request */ | ||
210 | |||
211 | #define MREGS_BCONFIG_BSWAP 0x40 /* Byte Swap */ | ||
212 | #define MREGS_BCONFIG_4TS 0x00 /* 4byte transmit start point */ | ||
213 | #define MREGS_BCONFIG_16TS 0x10 /* 16byte transmit start point */ | ||
214 | #define MREGS_BCONFIG_64TS 0x20 /* 64byte transmit start point */ | ||
215 | #define MREGS_BCONFIG_112TS 0x30 /* 112byte transmit start point */ | ||
216 | #define MREGS_BCONFIG_RESET 0x01 /* SW-Reset the MACE */ | ||
217 | |||
218 | #define MREGS_FCONFIG_TXF8 0x00 /* TX fifo 8 write cycles */ | ||
219 | #define MREGS_FCONFIG_TXF32 0x80 /* TX fifo 32 write cycles */ | ||
220 | #define MREGS_FCONFIG_TXF16 0x40 /* TX fifo 16 write cycles */ | ||
221 | #define MREGS_FCONFIG_RXF64 0x20 /* RX fifo 64 write cycles */ | ||
222 | #define MREGS_FCONFIG_RXF32 0x10 /* RX fifo 32 write cycles */ | ||
223 | #define MREGS_FCONFIG_RXF16 0x00 /* RX fifo 16 write cycles */ | ||
224 | #define MREGS_FCONFIG_TFWU 0x08 /* TX fifo watermark update */ | ||
225 | #define MREGS_FCONFIG_RFWU 0x04 /* RX fifo watermark update */ | ||
226 | #define MREGS_FCONFIG_TBENAB 0x02 /* TX burst enable */ | ||
227 | #define MREGS_FCONFIG_RBENAB 0x01 /* RX burst enable */ | ||
228 | |||
229 | #define MREGS_MCONFIG_PROMISC 0x80 /* Promiscuous mode enable */ | ||
230 | #define MREGS_MCONFIG_TPDDISAB 0x40 /* TX 2part deferral enable */ | ||
231 | #define MREGS_MCONFIG_MBAENAB 0x20 /* Modified backoff enable */ | ||
232 | #define MREGS_MCONFIG_RPADISAB 0x08 /* RX physical addr disable */ | ||
233 | #define MREGS_MCONFIG_RBDISAB 0x04 /* RX broadcast disable */ | ||
234 | #define MREGS_MCONFIG_TXENAB 0x02 /* Enable transmitter */ | ||
235 | #define MREGS_MCONFIG_RXENAB 0x01 /* Enable receiver */ | ||
236 | |||
237 | #define MREGS_PLSCONFIG_TXMS 0x08 /* TX mode select */ | ||
238 | #define MREGS_PLSCONFIG_GPSI 0x06 /* Use GPSI connector */ | ||
239 | #define MREGS_PLSCONFIG_DAI 0x04 /* Use DAI connector */ | ||
240 | #define MREGS_PLSCONFIG_TP 0x02 /* Use TwistedPair connector */ | ||
241 | #define MREGS_PLSCONFIG_AUI 0x00 /* Use AUI connector */ | ||
242 | #define MREGS_PLSCONFIG_IOENAB 0x01 /* PLS I/O enable */ | ||
243 | |||
244 | #define MREGS_PHYCONFIG_LSTAT 0x80 /* Link status */ | ||
245 | #define MREGS_PHYCONFIG_LTESTDIS 0x40 /* Disable link test logic */ | ||
246 | #define MREGS_PHYCONFIG_RXPOLARITY 0x20 /* RX polarity */ | ||
247 | #define MREGS_PHYCONFIG_APCDISAB 0x10 /* AutoPolarityCorrect disab */ | ||
248 | #define MREGS_PHYCONFIG_LTENAB 0x08 /* Select low threshold */ | ||
249 | #define MREGS_PHYCONFIG_AUTO 0x04 /* Connector port auto-sel */ | ||
250 | #define MREGS_PHYCONFIG_RWU 0x02 /* Remote WakeUp */ | ||
251 | #define MREGS_PHYCONFIG_AW 0x01 /* Auto Wakeup */ | ||
252 | |||
253 | #define MREGS_IACONFIG_ACHNGE 0x80 /* Do address change */ | ||
254 | #define MREGS_IACONFIG_PARESET 0x04 /* Physical address reset */ | ||
255 | #define MREGS_IACONFIG_LARESET 0x02 /* Logical address reset */ | ||
256 | |||
257 | #define MREGS_UTEST_RTRENAB 0x80 /* Enable resv test register */ | ||
258 | #define MREGS_UTEST_RTRDISAB 0x40 /* Disab resv test register */ | ||
259 | #define MREGS_UTEST_RPACCEPT 0x20 /* Accept runt packets */ | ||
260 | #define MREGS_UTEST_FCOLL 0x10 /* Force collision status */ | ||
261 | #define MREGS_UTEST_FCSENAB 0x08 /* Enable FCS on RX */ | ||
262 | #define MREGS_UTEST_INTLOOPM 0x06 /* Intern lpback w/MENDEC */ | ||
263 | #define MREGS_UTEST_INTLOOP 0x04 /* Intern lpback */ | ||
264 | #define MREGS_UTEST_EXTLOOP 0x02 /* Extern lpback */ | ||
265 | #define MREGS_UTEST_NOLOOP 0x00 /* No loopback */ | ||
266 | |||
267 | struct qe_rxd { | ||
268 | u32 rx_flags; | ||
269 | u32 rx_addr; | ||
270 | }; | ||
271 | |||
272 | #define RXD_OWN 0x80000000 /* Ownership. */ | ||
273 | #define RXD_UPDATE 0x10000000 /* Being Updated? */ | ||
274 | #define RXD_LENGTH 0x000007ff /* Packet Length. */ | ||
275 | |||
276 | struct qe_txd { | ||
277 | u32 tx_flags; | ||
278 | u32 tx_addr; | ||
279 | }; | ||
280 | |||
281 | #define TXD_OWN 0x80000000 /* Ownership. */ | ||
282 | #define TXD_SOP 0x40000000 /* Start Of Packet */ | ||
283 | #define TXD_EOP 0x20000000 /* End Of Packet */ | ||
284 | #define TXD_UPDATE 0x10000000 /* Being Updated? */ | ||
285 | #define TXD_LENGTH 0x000007ff /* Packet Length. */ | ||
286 | |||
287 | #define TX_RING_MAXSIZE 256 | ||
288 | #define RX_RING_MAXSIZE 256 | ||
289 | |||
290 | #define TX_RING_SIZE 16 | ||
291 | #define RX_RING_SIZE 16 | ||
292 | |||
293 | #define NEXT_RX(num) (((num) + 1) & (RX_RING_MAXSIZE - 1)) | ||
294 | #define NEXT_TX(num) (((num) + 1) & (TX_RING_MAXSIZE - 1)) | ||
295 | #define PREV_RX(num) (((num) - 1) & (RX_RING_MAXSIZE - 1)) | ||
296 | #define PREV_TX(num) (((num) - 1) & (TX_RING_MAXSIZE - 1)) | ||
297 | |||
298 | #define TX_BUFFS_AVAIL(qp) \ | ||
299 | (((qp)->tx_old <= (qp)->tx_new) ? \ | ||
300 | (qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new : \ | ||
301 | (qp)->tx_old - (qp)->tx_new - 1) | ||
302 | |||
303 | struct qe_init_block { | ||
304 | struct qe_rxd qe_rxd[RX_RING_MAXSIZE]; | ||
305 | struct qe_txd qe_txd[TX_RING_MAXSIZE]; | ||
306 | }; | ||
307 | |||
308 | #define qib_offset(mem, elem) \ | ||
309 | ((__u32)((unsigned long)(&(((struct qe_init_block *)0)->mem[elem])))) | ||
310 | |||
311 | struct sunqe; | ||
312 | |||
313 | struct sunqec { | ||
314 | void __iomem *gregs; /* QEC Global Registers */ | ||
315 | struct sunqe *qes[4]; /* Each child MACE */ | ||
316 | unsigned int qec_bursts; /* Support burst sizes */ | ||
317 | struct platform_device *op; /* QEC's OF device */ | ||
318 | struct sunqec *next_module; /* List of all QECs in system */ | ||
319 | }; | ||
320 | |||
321 | #define PKT_BUF_SZ 1664 | ||
322 | #define RXD_PKT_SZ 1664 | ||
323 | |||
324 | struct sunqe_buffers { | ||
325 | u8 tx_buf[TX_RING_SIZE][PKT_BUF_SZ]; | ||
326 | u8 __pad[2]; | ||
327 | u8 rx_buf[RX_RING_SIZE][PKT_BUF_SZ]; | ||
328 | }; | ||
329 | |||
330 | #define qebuf_offset(mem, elem) \ | ||
331 | ((__u32)((unsigned long)(&(((struct sunqe_buffers *)0)->mem[elem][0])))) | ||
332 | |||
333 | struct sunqe { | ||
334 | void __iomem *qcregs; /* QEC per-channel Registers */ | ||
335 | void __iomem *mregs; /* Per-channel MACE Registers */ | ||
336 | struct qe_init_block *qe_block; /* RX and TX descriptors */ | ||
337 | __u32 qblock_dvma; /* RX and TX descriptors */ | ||
338 | spinlock_t lock; /* Protects txfull state */ | ||
339 | int rx_new, rx_old; /* RX ring extents */ | ||
340 | int tx_new, tx_old; /* TX ring extents */ | ||
341 | struct sunqe_buffers *buffers; /* CPU visible address. */ | ||
342 | __u32 buffers_dvma; /* DVMA visible address. */ | ||
343 | struct sunqec *parent; | ||
344 | u8 mconfig; /* Base MACE mconfig value */ | ||
345 | struct platform_device *op; /* QE's OF device struct */ | ||
346 | struct net_device *dev; /* QE's netdevice struct */ | ||
347 | int channel; /* Who am I? */ | ||
348 | }; | ||
349 | |||
350 | #endif /* !(_SUNQE_H) */ | ||
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c new file mode 100644 index 000000000000..bf3c762de620 --- /dev/null +++ b/drivers/net/ethernet/sun/sunvnet.c | |||
@@ -0,0 +1,1284 @@ | |||
1 | /* sunvnet.c: Sun LDOM Virtual Network Driver. | ||
2 | * | ||
3 | * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
7 | |||
8 | #include <linux/module.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/netdevice.h> | ||
15 | #include <linux/ethtool.h> | ||
16 | #include <linux/etherdevice.h> | ||
17 | #include <linux/mutex.h> | ||
18 | |||
19 | #include <asm/vio.h> | ||
20 | #include <asm/ldc.h> | ||
21 | |||
22 | #include "sunvnet.h" | ||
23 | |||
24 | #define DRV_MODULE_NAME "sunvnet" | ||
25 | #define DRV_MODULE_VERSION "1.0" | ||
26 | #define DRV_MODULE_RELDATE "June 25, 2007" | ||
27 | |||
28 | static char version[] __devinitdata = | ||
29 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | ||
30 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
31 | MODULE_DESCRIPTION("Sun LDOM virtual network driver"); | ||
32 | MODULE_LICENSE("GPL"); | ||
33 | MODULE_VERSION(DRV_MODULE_VERSION); | ||
34 | |||
35 | /* Ordered from largest major to lowest */ | ||
36 | static struct vio_version vnet_versions[] = { | ||
37 | { .major = 1, .minor = 0 }, | ||
38 | }; | ||
39 | |||
40 | static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr) | ||
41 | { | ||
42 | return vio_dring_avail(dr, VNET_TX_RING_SIZE); | ||
43 | } | ||
44 | |||
45 | static int vnet_handle_unknown(struct vnet_port *port, void *arg) | ||
46 | { | ||
47 | struct vio_msg_tag *pkt = arg; | ||
48 | |||
49 | pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n", | ||
50 | pkt->type, pkt->stype, pkt->stype_env, pkt->sid); | ||
51 | pr_err("Resetting connection\n"); | ||
52 | |||
53 | ldc_disconnect(port->vio.lp); | ||
54 | |||
55 | return -ECONNRESET; | ||
56 | } | ||
57 | |||
58 | static int vnet_send_attr(struct vio_driver_state *vio) | ||
59 | { | ||
60 | struct vnet_port *port = to_vnet_port(vio); | ||
61 | struct net_device *dev = port->vp->dev; | ||
62 | struct vio_net_attr_info pkt; | ||
63 | int i; | ||
64 | |||
65 | memset(&pkt, 0, sizeof(pkt)); | ||
66 | pkt.tag.type = VIO_TYPE_CTRL; | ||
67 | pkt.tag.stype = VIO_SUBTYPE_INFO; | ||
68 | pkt.tag.stype_env = VIO_ATTR_INFO; | ||
69 | pkt.tag.sid = vio_send_sid(vio); | ||
70 | pkt.xfer_mode = VIO_DRING_MODE; | ||
71 | pkt.addr_type = VNET_ADDR_ETHERMAC; | ||
72 | pkt.ack_freq = 0; | ||
73 | for (i = 0; i < 6; i++) | ||
74 | pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); | ||
75 | pkt.mtu = ETH_FRAME_LEN; | ||
76 | |||
77 | viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " | ||
78 | "ackfreq[%u] mtu[%llu]\n", | ||
79 | pkt.xfer_mode, pkt.addr_type, | ||
80 | (unsigned long long) pkt.addr, | ||
81 | pkt.ack_freq, | ||
82 | (unsigned long long) pkt.mtu); | ||
83 | |||
84 | return vio_ldc_send(vio, &pkt, sizeof(pkt)); | ||
85 | } | ||
86 | |||
87 | static int handle_attr_info(struct vio_driver_state *vio, | ||
88 | struct vio_net_attr_info *pkt) | ||
89 | { | ||
90 | viodbg(HS, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] " | ||
91 | "ackfreq[%u] mtu[%llu]\n", | ||
92 | pkt->xfer_mode, pkt->addr_type, | ||
93 | (unsigned long long) pkt->addr, | ||
94 | pkt->ack_freq, | ||
95 | (unsigned long long) pkt->mtu); | ||
96 | |||
97 | pkt->tag.sid = vio_send_sid(vio); | ||
98 | |||
99 | if (pkt->xfer_mode != VIO_DRING_MODE || | ||
100 | pkt->addr_type != VNET_ADDR_ETHERMAC || | ||
101 | pkt->mtu != ETH_FRAME_LEN) { | ||
102 | viodbg(HS, "SEND NET ATTR NACK\n"); | ||
103 | |||
104 | pkt->tag.stype = VIO_SUBTYPE_NACK; | ||
105 | |||
106 | (void) vio_ldc_send(vio, pkt, sizeof(*pkt)); | ||
107 | |||
108 | return -ECONNRESET; | ||
109 | } else { | ||
110 | viodbg(HS, "SEND NET ATTR ACK\n"); | ||
111 | |||
112 | pkt->tag.stype = VIO_SUBTYPE_ACK; | ||
113 | |||
114 | return vio_ldc_send(vio, pkt, sizeof(*pkt)); | ||
115 | } | ||
116 | |||
117 | } | ||
118 | |||
119 | static int handle_attr_ack(struct vio_driver_state *vio, | ||
120 | struct vio_net_attr_info *pkt) | ||
121 | { | ||
122 | viodbg(HS, "GOT NET ATTR ACK\n"); | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | static int handle_attr_nack(struct vio_driver_state *vio, | ||
128 | struct vio_net_attr_info *pkt) | ||
129 | { | ||
130 | viodbg(HS, "GOT NET ATTR NACK\n"); | ||
131 | |||
132 | return -ECONNRESET; | ||
133 | } | ||
134 | |||
135 | static int vnet_handle_attr(struct vio_driver_state *vio, void *arg) | ||
136 | { | ||
137 | struct vio_net_attr_info *pkt = arg; | ||
138 | |||
139 | switch (pkt->tag.stype) { | ||
140 | case VIO_SUBTYPE_INFO: | ||
141 | return handle_attr_info(vio, pkt); | ||
142 | |||
143 | case VIO_SUBTYPE_ACK: | ||
144 | return handle_attr_ack(vio, pkt); | ||
145 | |||
146 | case VIO_SUBTYPE_NACK: | ||
147 | return handle_attr_nack(vio, pkt); | ||
148 | |||
149 | default: | ||
150 | return -ECONNRESET; | ||
151 | } | ||
152 | } | ||
153 | |||
154 | static void vnet_handshake_complete(struct vio_driver_state *vio) | ||
155 | { | ||
156 | struct vio_dring_state *dr; | ||
157 | |||
158 | dr = &vio->drings[VIO_DRIVER_RX_RING]; | ||
159 | dr->snd_nxt = dr->rcv_nxt = 1; | ||
160 | |||
161 | dr = &vio->drings[VIO_DRIVER_TX_RING]; | ||
162 | dr->snd_nxt = dr->rcv_nxt = 1; | ||
163 | } | ||
164 | |||
165 | /* The hypervisor interface that implements copying to/from imported | ||
166 | * memory from another domain requires that copies are done to 8-byte | ||
167 | * aligned buffers, and that the lengths of such copies are also 8-byte | ||
168 | * multiples. | ||
169 | * | ||
170 | * So we align skb->data to an 8-byte multiple and pad-out the data | ||
171 | * area so we can round the copy length up to the next multiple of | ||
172 | * 8 for the copy. | ||
173 | * | ||
174 | * The transmitter puts the actual start of the packet 6 bytes into | ||
175 | * the buffer it sends over, so that the IP headers after the ethernet | ||
176 | * header are aligned properly. These 6 bytes are not in the descriptor | ||
177 | * length, they are simply implied. This offset is represented using | ||
178 | * the VNET_PACKET_SKIP macro. | ||
179 | */ | ||
180 | static struct sk_buff *alloc_and_align_skb(struct net_device *dev, | ||
181 | unsigned int len) | ||
182 | { | ||
183 | struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8); | ||
184 | unsigned long addr, off; | ||
185 | |||
186 | if (unlikely(!skb)) | ||
187 | return NULL; | ||
188 | |||
189 | addr = (unsigned long) skb->data; | ||
190 | off = ((addr + 7UL) & ~7UL) - addr; | ||
191 | if (off) | ||
192 | skb_reserve(skb, off); | ||
193 | |||
194 | return skb; | ||
195 | } | ||
196 | |||
197 | static int vnet_rx_one(struct vnet_port *port, unsigned int len, | ||
198 | struct ldc_trans_cookie *cookies, int ncookies) | ||
199 | { | ||
200 | struct net_device *dev = port->vp->dev; | ||
201 | unsigned int copy_len; | ||
202 | struct sk_buff *skb; | ||
203 | int err; | ||
204 | |||
205 | err = -EMSGSIZE; | ||
206 | if (unlikely(len < ETH_ZLEN || len > ETH_FRAME_LEN)) { | ||
207 | dev->stats.rx_length_errors++; | ||
208 | goto out_dropped; | ||
209 | } | ||
210 | |||
211 | skb = alloc_and_align_skb(dev, len); | ||
212 | err = -ENOMEM; | ||
213 | if (unlikely(!skb)) { | ||
214 | dev->stats.rx_missed_errors++; | ||
215 | goto out_dropped; | ||
216 | } | ||
217 | |||
218 | copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U; | ||
219 | skb_put(skb, copy_len); | ||
220 | err = ldc_copy(port->vio.lp, LDC_COPY_IN, | ||
221 | skb->data, copy_len, 0, | ||
222 | cookies, ncookies); | ||
223 | if (unlikely(err < 0)) { | ||
224 | dev->stats.rx_frame_errors++; | ||
225 | goto out_free_skb; | ||
226 | } | ||
227 | |||
228 | skb_pull(skb, VNET_PACKET_SKIP); | ||
229 | skb_trim(skb, len); | ||
230 | skb->protocol = eth_type_trans(skb, dev); | ||
231 | |||
232 | dev->stats.rx_packets++; | ||
233 | dev->stats.rx_bytes += len; | ||
234 | |||
235 | netif_rx(skb); | ||
236 | |||
237 | return 0; | ||
238 | |||
239 | out_free_skb: | ||
240 | kfree_skb(skb); | ||
241 | |||
242 | out_dropped: | ||
243 | dev->stats.rx_dropped++; | ||
244 | return err; | ||
245 | } | ||
246 | |||
247 | static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, | ||
248 | u32 start, u32 end, u8 vio_dring_state) | ||
249 | { | ||
250 | struct vio_dring_data hdr = { | ||
251 | .tag = { | ||
252 | .type = VIO_TYPE_DATA, | ||
253 | .stype = VIO_SUBTYPE_ACK, | ||
254 | .stype_env = VIO_DRING_DATA, | ||
255 | .sid = vio_send_sid(&port->vio), | ||
256 | }, | ||
257 | .dring_ident = dr->ident, | ||
258 | .start_idx = start, | ||
259 | .end_idx = end, | ||
260 | .state = vio_dring_state, | ||
261 | }; | ||
262 | int err, delay; | ||
263 | |||
264 | hdr.seq = dr->snd_nxt; | ||
265 | delay = 1; | ||
266 | do { | ||
267 | err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); | ||
268 | if (err > 0) { | ||
269 | dr->snd_nxt++; | ||
270 | break; | ||
271 | } | ||
272 | udelay(delay); | ||
273 | if ((delay <<= 1) > 128) | ||
274 | delay = 128; | ||
275 | } while (err == -EAGAIN); | ||
276 | |||
277 | return err; | ||
278 | } | ||
279 | |||
280 | static u32 next_idx(u32 idx, struct vio_dring_state *dr) | ||
281 | { | ||
282 | if (++idx == dr->num_entries) | ||
283 | idx = 0; | ||
284 | return idx; | ||
285 | } | ||
286 | |||
287 | static u32 prev_idx(u32 idx, struct vio_dring_state *dr) | ||
288 | { | ||
289 | if (idx == 0) | ||
290 | idx = dr->num_entries - 1; | ||
291 | else | ||
292 | idx--; | ||
293 | |||
294 | return idx; | ||
295 | } | ||
296 | |||
297 | static struct vio_net_desc *get_rx_desc(struct vnet_port *port, | ||
298 | struct vio_dring_state *dr, | ||
299 | u32 index) | ||
300 | { | ||
301 | struct vio_net_desc *desc = port->vio.desc_buf; | ||
302 | int err; | ||
303 | |||
304 | err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size, | ||
305 | (index * dr->entry_size), | ||
306 | dr->cookies, dr->ncookies); | ||
307 | if (err < 0) | ||
308 | return ERR_PTR(err); | ||
309 | |||
310 | return desc; | ||
311 | } | ||
312 | |||
313 | static int put_rx_desc(struct vnet_port *port, | ||
314 | struct vio_dring_state *dr, | ||
315 | struct vio_net_desc *desc, | ||
316 | u32 index) | ||
317 | { | ||
318 | int err; | ||
319 | |||
320 | err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size, | ||
321 | (index * dr->entry_size), | ||
322 | dr->cookies, dr->ncookies); | ||
323 | if (err < 0) | ||
324 | return err; | ||
325 | |||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | static int vnet_walk_rx_one(struct vnet_port *port, | ||
330 | struct vio_dring_state *dr, | ||
331 | u32 index, int *needs_ack) | ||
332 | { | ||
333 | struct vio_net_desc *desc = get_rx_desc(port, dr, index); | ||
334 | struct vio_driver_state *vio = &port->vio; | ||
335 | int err; | ||
336 | |||
337 | if (IS_ERR(desc)) | ||
338 | return PTR_ERR(desc); | ||
339 | |||
340 | viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", | ||
341 | desc->hdr.state, desc->hdr.ack, | ||
342 | desc->size, desc->ncookies, | ||
343 | desc->cookies[0].cookie_addr, | ||
344 | desc->cookies[0].cookie_size); | ||
345 | |||
346 | if (desc->hdr.state != VIO_DESC_READY) | ||
347 | return 1; | ||
348 | err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); | ||
349 | if (err == -ECONNRESET) | ||
350 | return err; | ||
351 | desc->hdr.state = VIO_DESC_DONE; | ||
352 | err = put_rx_desc(port, dr, desc, index); | ||
353 | if (err < 0) | ||
354 | return err; | ||
355 | *needs_ack = desc->hdr.ack; | ||
356 | return 0; | ||
357 | } | ||
358 | |||
359 | static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, | ||
360 | u32 start, u32 end) | ||
361 | { | ||
362 | struct vio_driver_state *vio = &port->vio; | ||
363 | int ack_start = -1, ack_end = -1; | ||
364 | |||
365 | end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr); | ||
366 | |||
367 | viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); | ||
368 | |||
369 | while (start != end) { | ||
370 | int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); | ||
371 | if (err == -ECONNRESET) | ||
372 | return err; | ||
373 | if (err != 0) | ||
374 | break; | ||
375 | if (ack_start == -1) | ||
376 | ack_start = start; | ||
377 | ack_end = start; | ||
378 | start = next_idx(start, dr); | ||
379 | if (ack && start != end) { | ||
380 | err = vnet_send_ack(port, dr, ack_start, ack_end, | ||
381 | VIO_DRING_ACTIVE); | ||
382 | if (err == -ECONNRESET) | ||
383 | return err; | ||
384 | ack_start = -1; | ||
385 | } | ||
386 | } | ||
387 | if (unlikely(ack_start == -1)) | ||
388 | ack_start = ack_end = prev_idx(start, dr); | ||
389 | return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED); | ||
390 | } | ||
391 | |||
392 | static int vnet_rx(struct vnet_port *port, void *msgbuf) | ||
393 | { | ||
394 | struct vio_dring_data *pkt = msgbuf; | ||
395 | struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; | ||
396 | struct vio_driver_state *vio = &port->vio; | ||
397 | |||
398 | viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n", | ||
399 | pkt->tag.stype_env, pkt->seq, dr->rcv_nxt); | ||
400 | |||
401 | if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) | ||
402 | return 0; | ||
403 | if (unlikely(pkt->seq != dr->rcv_nxt)) { | ||
404 | pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n", | ||
405 | pkt->seq, dr->rcv_nxt); | ||
406 | return 0; | ||
407 | } | ||
408 | |||
409 | dr->rcv_nxt++; | ||
410 | |||
411 | /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ | ||
412 | |||
413 | return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx); | ||
414 | } | ||
415 | |||
416 | static int idx_is_pending(struct vio_dring_state *dr, u32 end) | ||
417 | { | ||
418 | u32 idx = dr->cons; | ||
419 | int found = 0; | ||
420 | |||
421 | while (idx != dr->prod) { | ||
422 | if (idx == end) { | ||
423 | found = 1; | ||
424 | break; | ||
425 | } | ||
426 | idx = next_idx(idx, dr); | ||
427 | } | ||
428 | return found; | ||
429 | } | ||
430 | |||
431 | static int vnet_ack(struct vnet_port *port, void *msgbuf) | ||
432 | { | ||
433 | struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | ||
434 | struct vio_dring_data *pkt = msgbuf; | ||
435 | struct net_device *dev; | ||
436 | struct vnet *vp; | ||
437 | u32 end; | ||
438 | |||
439 | if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) | ||
440 | return 0; | ||
441 | |||
442 | end = pkt->end_idx; | ||
443 | if (unlikely(!idx_is_pending(dr, end))) | ||
444 | return 0; | ||
445 | |||
446 | dr->cons = next_idx(end, dr); | ||
447 | |||
448 | vp = port->vp; | ||
449 | dev = vp->dev; | ||
450 | if (unlikely(netif_queue_stopped(dev) && | ||
451 | vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) | ||
452 | return 1; | ||
453 | |||
454 | return 0; | ||
455 | } | ||
456 | |||
457 | static int vnet_nack(struct vnet_port *port, void *msgbuf) | ||
458 | { | ||
459 | /* XXX just reset or similar XXX */ | ||
460 | return 0; | ||
461 | } | ||
462 | |||
463 | static int handle_mcast(struct vnet_port *port, void *msgbuf) | ||
464 | { | ||
465 | struct vio_net_mcast_info *pkt = msgbuf; | ||
466 | |||
467 | if (pkt->tag.stype != VIO_SUBTYPE_ACK) | ||
468 | pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n", | ||
469 | port->vp->dev->name, | ||
470 | pkt->tag.type, | ||
471 | pkt->tag.stype, | ||
472 | pkt->tag.stype_env, | ||
473 | pkt->tag.sid); | ||
474 | |||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | static void maybe_tx_wakeup(struct vnet *vp) | ||
479 | { | ||
480 | struct net_device *dev = vp->dev; | ||
481 | |||
482 | netif_tx_lock(dev); | ||
483 | if (likely(netif_queue_stopped(dev))) { | ||
484 | struct vnet_port *port; | ||
485 | int wake = 1; | ||
486 | |||
487 | list_for_each_entry(port, &vp->port_list, list) { | ||
488 | struct vio_dring_state *dr; | ||
489 | |||
490 | dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | ||
491 | if (vnet_tx_dring_avail(dr) < | ||
492 | VNET_TX_WAKEUP_THRESH(dr)) { | ||
493 | wake = 0; | ||
494 | break; | ||
495 | } | ||
496 | } | ||
497 | if (wake) | ||
498 | netif_wake_queue(dev); | ||
499 | } | ||
500 | netif_tx_unlock(dev); | ||
501 | } | ||
502 | |||
503 | static void vnet_event(void *arg, int event) | ||
504 | { | ||
505 | struct vnet_port *port = arg; | ||
506 | struct vio_driver_state *vio = &port->vio; | ||
507 | unsigned long flags; | ||
508 | int tx_wakeup, err; | ||
509 | |||
510 | spin_lock_irqsave(&vio->lock, flags); | ||
511 | |||
512 | if (unlikely(event == LDC_EVENT_RESET || | ||
513 | event == LDC_EVENT_UP)) { | ||
514 | vio_link_state_change(vio, event); | ||
515 | spin_unlock_irqrestore(&vio->lock, flags); | ||
516 | |||
517 | if (event == LDC_EVENT_RESET) | ||
518 | vio_port_up(vio); | ||
519 | return; | ||
520 | } | ||
521 | |||
522 | if (unlikely(event != LDC_EVENT_DATA_READY)) { | ||
523 | pr_warning("Unexpected LDC event %d\n", event); | ||
524 | spin_unlock_irqrestore(&vio->lock, flags); | ||
525 | return; | ||
526 | } | ||
527 | |||
528 | tx_wakeup = err = 0; | ||
529 | while (1) { | ||
530 | union { | ||
531 | struct vio_msg_tag tag; | ||
532 | u64 raw[8]; | ||
533 | } msgbuf; | ||
534 | |||
535 | err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); | ||
536 | if (unlikely(err < 0)) { | ||
537 | if (err == -ECONNRESET) | ||
538 | vio_conn_reset(vio); | ||
539 | break; | ||
540 | } | ||
541 | if (err == 0) | ||
542 | break; | ||
543 | viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", | ||
544 | msgbuf.tag.type, | ||
545 | msgbuf.tag.stype, | ||
546 | msgbuf.tag.stype_env, | ||
547 | msgbuf.tag.sid); | ||
548 | err = vio_validate_sid(vio, &msgbuf.tag); | ||
549 | if (err < 0) | ||
550 | break; | ||
551 | |||
552 | if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { | ||
553 | if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { | ||
554 | err = vnet_rx(port, &msgbuf); | ||
555 | } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { | ||
556 | err = vnet_ack(port, &msgbuf); | ||
557 | if (err > 0) | ||
558 | tx_wakeup |= err; | ||
559 | } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) { | ||
560 | err = vnet_nack(port, &msgbuf); | ||
561 | } | ||
562 | } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { | ||
563 | if (msgbuf.tag.stype_env == VNET_MCAST_INFO) | ||
564 | err = handle_mcast(port, &msgbuf); | ||
565 | else | ||
566 | err = vio_control_pkt_engine(vio, &msgbuf); | ||
567 | if (err) | ||
568 | break; | ||
569 | } else { | ||
570 | err = vnet_handle_unknown(port, &msgbuf); | ||
571 | } | ||
572 | if (err == -ECONNRESET) | ||
573 | break; | ||
574 | } | ||
575 | spin_unlock(&vio->lock); | ||
576 | if (unlikely(tx_wakeup && err != -ECONNRESET)) | ||
577 | maybe_tx_wakeup(port->vp); | ||
578 | local_irq_restore(flags); | ||
579 | } | ||
580 | |||
581 | static int __vnet_tx_trigger(struct vnet_port *port) | ||
582 | { | ||
583 | struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | ||
584 | struct vio_dring_data hdr = { | ||
585 | .tag = { | ||
586 | .type = VIO_TYPE_DATA, | ||
587 | .stype = VIO_SUBTYPE_INFO, | ||
588 | .stype_env = VIO_DRING_DATA, | ||
589 | .sid = vio_send_sid(&port->vio), | ||
590 | }, | ||
591 | .dring_ident = dr->ident, | ||
592 | .start_idx = dr->prod, | ||
593 | .end_idx = (u32) -1, | ||
594 | }; | ||
595 | int err, delay; | ||
596 | |||
597 | hdr.seq = dr->snd_nxt; | ||
598 | delay = 1; | ||
599 | do { | ||
600 | err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); | ||
601 | if (err > 0) { | ||
602 | dr->snd_nxt++; | ||
603 | break; | ||
604 | } | ||
605 | udelay(delay); | ||
606 | if ((delay <<= 1) > 128) | ||
607 | delay = 128; | ||
608 | } while (err == -EAGAIN); | ||
609 | |||
610 | return err; | ||
611 | } | ||
612 | |||
613 | struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) | ||
614 | { | ||
615 | unsigned int hash = vnet_hashfn(skb->data); | ||
616 | struct hlist_head *hp = &vp->port_hash[hash]; | ||
617 | struct hlist_node *n; | ||
618 | struct vnet_port *port; | ||
619 | |||
620 | hlist_for_each_entry(port, n, hp, hash) { | ||
621 | if (!compare_ether_addr(port->raddr, skb->data)) | ||
622 | return port; | ||
623 | } | ||
624 | port = NULL; | ||
625 | if (!list_empty(&vp->port_list)) | ||
626 | port = list_entry(vp->port_list.next, struct vnet_port, list); | ||
627 | |||
628 | return port; | ||
629 | } | ||
630 | |||
631 | struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb) | ||
632 | { | ||
633 | struct vnet_port *ret; | ||
634 | unsigned long flags; | ||
635 | |||
636 | spin_lock_irqsave(&vp->lock, flags); | ||
637 | ret = __tx_port_find(vp, skb); | ||
638 | spin_unlock_irqrestore(&vp->lock, flags); | ||
639 | |||
640 | return ret; | ||
641 | } | ||
642 | |||
643 | static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
644 | { | ||
645 | struct vnet *vp = netdev_priv(dev); | ||
646 | struct vnet_port *port = tx_port_find(vp, skb); | ||
647 | struct vio_dring_state *dr; | ||
648 | struct vio_net_desc *d; | ||
649 | unsigned long flags; | ||
650 | unsigned int len; | ||
651 | void *tx_buf; | ||
652 | int i, err; | ||
653 | |||
654 | if (unlikely(!port)) | ||
655 | goto out_dropped; | ||
656 | |||
657 | spin_lock_irqsave(&port->vio.lock, flags); | ||
658 | |||
659 | dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | ||
660 | if (unlikely(vnet_tx_dring_avail(dr) < 2)) { | ||
661 | if (!netif_queue_stopped(dev)) { | ||
662 | netif_stop_queue(dev); | ||
663 | |||
664 | /* This is a hard error, log it. */ | ||
665 | netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); | ||
666 | dev->stats.tx_errors++; | ||
667 | } | ||
668 | spin_unlock_irqrestore(&port->vio.lock, flags); | ||
669 | return NETDEV_TX_BUSY; | ||
670 | } | ||
671 | |||
672 | d = vio_dring_cur(dr); | ||
673 | |||
674 | tx_buf = port->tx_bufs[dr->prod].buf; | ||
675 | skb_copy_from_linear_data(skb, tx_buf + VNET_PACKET_SKIP, skb->len); | ||
676 | |||
677 | len = skb->len; | ||
678 | if (len < ETH_ZLEN) { | ||
679 | len = ETH_ZLEN; | ||
680 | memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len); | ||
681 | } | ||
682 | |||
683 | d->hdr.ack = VIO_ACK_ENABLE; | ||
684 | d->size = len; | ||
685 | d->ncookies = port->tx_bufs[dr->prod].ncookies; | ||
686 | for (i = 0; i < d->ncookies; i++) | ||
687 | d->cookies[i] = port->tx_bufs[dr->prod].cookies[i]; | ||
688 | |||
689 | /* This has to be a non-SMP write barrier because we are writing | ||
690 | * to memory which is shared with the peer LDOM. | ||
691 | */ | ||
692 | wmb(); | ||
693 | |||
694 | d->hdr.state = VIO_DESC_READY; | ||
695 | |||
696 | err = __vnet_tx_trigger(port); | ||
697 | if (unlikely(err < 0)) { | ||
698 | netdev_info(dev, "TX trigger error %d\n", err); | ||
699 | d->hdr.state = VIO_DESC_FREE; | ||
700 | dev->stats.tx_carrier_errors++; | ||
701 | goto out_dropped_unlock; | ||
702 | } | ||
703 | |||
704 | dev->stats.tx_packets++; | ||
705 | dev->stats.tx_bytes += skb->len; | ||
706 | |||
707 | dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); | ||
708 | if (unlikely(vnet_tx_dring_avail(dr) < 2)) { | ||
709 | netif_stop_queue(dev); | ||
710 | if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) | ||
711 | netif_wake_queue(dev); | ||
712 | } | ||
713 | |||
714 | spin_unlock_irqrestore(&port->vio.lock, flags); | ||
715 | |||
716 | dev_kfree_skb(skb); | ||
717 | |||
718 | return NETDEV_TX_OK; | ||
719 | |||
720 | out_dropped_unlock: | ||
721 | spin_unlock_irqrestore(&port->vio.lock, flags); | ||
722 | |||
723 | out_dropped: | ||
724 | dev_kfree_skb(skb); | ||
725 | dev->stats.tx_dropped++; | ||
726 | return NETDEV_TX_OK; | ||
727 | } | ||
728 | |||
729 | static void vnet_tx_timeout(struct net_device *dev) | ||
730 | { | ||
731 | /* XXX Implement me XXX */ | ||
732 | } | ||
733 | |||
734 | static int vnet_open(struct net_device *dev) | ||
735 | { | ||
736 | netif_carrier_on(dev); | ||
737 | netif_start_queue(dev); | ||
738 | |||
739 | return 0; | ||
740 | } | ||
741 | |||
742 | static int vnet_close(struct net_device *dev) | ||
743 | { | ||
744 | netif_stop_queue(dev); | ||
745 | netif_carrier_off(dev); | ||
746 | |||
747 | return 0; | ||
748 | } | ||
749 | |||
750 | static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) | ||
751 | { | ||
752 | struct vnet_mcast_entry *m; | ||
753 | |||
754 | for (m = vp->mcast_list; m; m = m->next) { | ||
755 | if (!memcmp(m->addr, addr, ETH_ALEN)) | ||
756 | return m; | ||
757 | } | ||
758 | return NULL; | ||
759 | } | ||
760 | |||
761 | static void __update_mc_list(struct vnet *vp, struct net_device *dev) | ||
762 | { | ||
763 | struct netdev_hw_addr *ha; | ||
764 | |||
765 | netdev_for_each_mc_addr(ha, dev) { | ||
766 | struct vnet_mcast_entry *m; | ||
767 | |||
768 | m = __vnet_mc_find(vp, ha->addr); | ||
769 | if (m) { | ||
770 | m->hit = 1; | ||
771 | continue; | ||
772 | } | ||
773 | |||
774 | if (!m) { | ||
775 | m = kzalloc(sizeof(*m), GFP_ATOMIC); | ||
776 | if (!m) | ||
777 | continue; | ||
778 | memcpy(m->addr, ha->addr, ETH_ALEN); | ||
779 | m->hit = 1; | ||
780 | |||
781 | m->next = vp->mcast_list; | ||
782 | vp->mcast_list = m; | ||
783 | } | ||
784 | } | ||
785 | } | ||
786 | |||
787 | static void __send_mc_list(struct vnet *vp, struct vnet_port *port) | ||
788 | { | ||
789 | struct vio_net_mcast_info info; | ||
790 | struct vnet_mcast_entry *m, **pp; | ||
791 | int n_addrs; | ||
792 | |||
793 | memset(&info, 0, sizeof(info)); | ||
794 | |||
795 | info.tag.type = VIO_TYPE_CTRL; | ||
796 | info.tag.stype = VIO_SUBTYPE_INFO; | ||
797 | info.tag.stype_env = VNET_MCAST_INFO; | ||
798 | info.tag.sid = vio_send_sid(&port->vio); | ||
799 | info.set = 1; | ||
800 | |||
801 | n_addrs = 0; | ||
802 | for (m = vp->mcast_list; m; m = m->next) { | ||
803 | if (m->sent) | ||
804 | continue; | ||
805 | m->sent = 1; | ||
806 | memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], | ||
807 | m->addr, ETH_ALEN); | ||
808 | if (++n_addrs == VNET_NUM_MCAST) { | ||
809 | info.count = n_addrs; | ||
810 | |||
811 | (void) vio_ldc_send(&port->vio, &info, | ||
812 | sizeof(info)); | ||
813 | n_addrs = 0; | ||
814 | } | ||
815 | } | ||
816 | if (n_addrs) { | ||
817 | info.count = n_addrs; | ||
818 | (void) vio_ldc_send(&port->vio, &info, sizeof(info)); | ||
819 | } | ||
820 | |||
821 | info.set = 0; | ||
822 | |||
823 | n_addrs = 0; | ||
824 | pp = &vp->mcast_list; | ||
825 | while ((m = *pp) != NULL) { | ||
826 | if (m->hit) { | ||
827 | m->hit = 0; | ||
828 | pp = &m->next; | ||
829 | continue; | ||
830 | } | ||
831 | |||
832 | memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], | ||
833 | m->addr, ETH_ALEN); | ||
834 | if (++n_addrs == VNET_NUM_MCAST) { | ||
835 | info.count = n_addrs; | ||
836 | (void) vio_ldc_send(&port->vio, &info, | ||
837 | sizeof(info)); | ||
838 | n_addrs = 0; | ||
839 | } | ||
840 | |||
841 | *pp = m->next; | ||
842 | kfree(m); | ||
843 | } | ||
844 | if (n_addrs) { | ||
845 | info.count = n_addrs; | ||
846 | (void) vio_ldc_send(&port->vio, &info, sizeof(info)); | ||
847 | } | ||
848 | } | ||
849 | |||
850 | static void vnet_set_rx_mode(struct net_device *dev) | ||
851 | { | ||
852 | struct vnet *vp = netdev_priv(dev); | ||
853 | struct vnet_port *port; | ||
854 | unsigned long flags; | ||
855 | |||
856 | spin_lock_irqsave(&vp->lock, flags); | ||
857 | if (!list_empty(&vp->port_list)) { | ||
858 | port = list_entry(vp->port_list.next, struct vnet_port, list); | ||
859 | |||
860 | if (port->switch_port) { | ||
861 | __update_mc_list(vp, dev); | ||
862 | __send_mc_list(vp, port); | ||
863 | } | ||
864 | } | ||
865 | spin_unlock_irqrestore(&vp->lock, flags); | ||
866 | } | ||
867 | |||
868 | static int vnet_change_mtu(struct net_device *dev, int new_mtu) | ||
869 | { | ||
870 | if (new_mtu != ETH_DATA_LEN) | ||
871 | return -EINVAL; | ||
872 | |||
873 | dev->mtu = new_mtu; | ||
874 | return 0; | ||
875 | } | ||
876 | |||
877 | static int vnet_set_mac_addr(struct net_device *dev, void *p) | ||
878 | { | ||
879 | return -EINVAL; | ||
880 | } | ||
881 | |||
882 | static void vnet_get_drvinfo(struct net_device *dev, | ||
883 | struct ethtool_drvinfo *info) | ||
884 | { | ||
885 | strcpy(info->driver, DRV_MODULE_NAME); | ||
886 | strcpy(info->version, DRV_MODULE_VERSION); | ||
887 | } | ||
888 | |||
889 | static u32 vnet_get_msglevel(struct net_device *dev) | ||
890 | { | ||
891 | struct vnet *vp = netdev_priv(dev); | ||
892 | return vp->msg_enable; | ||
893 | } | ||
894 | |||
895 | static void vnet_set_msglevel(struct net_device *dev, u32 value) | ||
896 | { | ||
897 | struct vnet *vp = netdev_priv(dev); | ||
898 | vp->msg_enable = value; | ||
899 | } | ||
900 | |||
901 | static const struct ethtool_ops vnet_ethtool_ops = { | ||
902 | .get_drvinfo = vnet_get_drvinfo, | ||
903 | .get_msglevel = vnet_get_msglevel, | ||
904 | .set_msglevel = vnet_set_msglevel, | ||
905 | .get_link = ethtool_op_get_link, | ||
906 | }; | ||
907 | |||
908 | static void vnet_port_free_tx_bufs(struct vnet_port *port) | ||
909 | { | ||
910 | struct vio_dring_state *dr; | ||
911 | int i; | ||
912 | |||
913 | dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | ||
914 | if (dr->base) { | ||
915 | ldc_free_exp_dring(port->vio.lp, dr->base, | ||
916 | (dr->entry_size * dr->num_entries), | ||
917 | dr->cookies, dr->ncookies); | ||
918 | dr->base = NULL; | ||
919 | dr->entry_size = 0; | ||
920 | dr->num_entries = 0; | ||
921 | dr->pending = 0; | ||
922 | dr->ncookies = 0; | ||
923 | } | ||
924 | |||
925 | for (i = 0; i < VNET_TX_RING_SIZE; i++) { | ||
926 | void *buf = port->tx_bufs[i].buf; | ||
927 | |||
928 | if (!buf) | ||
929 | continue; | ||
930 | |||
931 | ldc_unmap(port->vio.lp, | ||
932 | port->tx_bufs[i].cookies, | ||
933 | port->tx_bufs[i].ncookies); | ||
934 | |||
935 | kfree(buf); | ||
936 | port->tx_bufs[i].buf = NULL; | ||
937 | } | ||
938 | } | ||
939 | |||
940 | static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port) | ||
941 | { | ||
942 | struct vio_dring_state *dr; | ||
943 | unsigned long len; | ||
944 | int i, err, ncookies; | ||
945 | void *dring; | ||
946 | |||
947 | for (i = 0; i < VNET_TX_RING_SIZE; i++) { | ||
948 | void *buf = kzalloc(ETH_FRAME_LEN + 8, GFP_KERNEL); | ||
949 | int map_len = (ETH_FRAME_LEN + 7) & ~7; | ||
950 | |||
951 | err = -ENOMEM; | ||
952 | if (!buf) { | ||
953 | pr_err("TX buffer allocation failure\n"); | ||
954 | goto err_out; | ||
955 | } | ||
956 | err = -EFAULT; | ||
957 | if ((unsigned long)buf & (8UL - 1)) { | ||
958 | pr_err("TX buffer misaligned\n"); | ||
959 | kfree(buf); | ||
960 | goto err_out; | ||
961 | } | ||
962 | |||
963 | err = ldc_map_single(port->vio.lp, buf, map_len, | ||
964 | port->tx_bufs[i].cookies, 2, | ||
965 | (LDC_MAP_SHADOW | | ||
966 | LDC_MAP_DIRECT | | ||
967 | LDC_MAP_RW)); | ||
968 | if (err < 0) { | ||
969 | kfree(buf); | ||
970 | goto err_out; | ||
971 | } | ||
972 | port->tx_bufs[i].buf = buf; | ||
973 | port->tx_bufs[i].ncookies = err; | ||
974 | } | ||
975 | |||
976 | dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | ||
977 | |||
978 | len = (VNET_TX_RING_SIZE * | ||
979 | (sizeof(struct vio_net_desc) + | ||
980 | (sizeof(struct ldc_trans_cookie) * 2))); | ||
981 | |||
982 | ncookies = VIO_MAX_RING_COOKIES; | ||
983 | dring = ldc_alloc_exp_dring(port->vio.lp, len, | ||
984 | dr->cookies, &ncookies, | ||
985 | (LDC_MAP_SHADOW | | ||
986 | LDC_MAP_DIRECT | | ||
987 | LDC_MAP_RW)); | ||
988 | if (IS_ERR(dring)) { | ||
989 | err = PTR_ERR(dring); | ||
990 | goto err_out; | ||
991 | } | ||
992 | |||
993 | dr->base = dring; | ||
994 | dr->entry_size = (sizeof(struct vio_net_desc) + | ||
995 | (sizeof(struct ldc_trans_cookie) * 2)); | ||
996 | dr->num_entries = VNET_TX_RING_SIZE; | ||
997 | dr->prod = dr->cons = 0; | ||
998 | dr->pending = VNET_TX_RING_SIZE; | ||
999 | dr->ncookies = ncookies; | ||
1000 | |||
1001 | return 0; | ||
1002 | |||
1003 | err_out: | ||
1004 | vnet_port_free_tx_bufs(port); | ||
1005 | |||
1006 | return err; | ||
1007 | } | ||
1008 | |||
1009 | static LIST_HEAD(vnet_list); | ||
1010 | static DEFINE_MUTEX(vnet_list_mutex); | ||
1011 | |||
1012 | static const struct net_device_ops vnet_ops = { | ||
1013 | .ndo_open = vnet_open, | ||
1014 | .ndo_stop = vnet_close, | ||
1015 | .ndo_set_multicast_list = vnet_set_rx_mode, | ||
1016 | .ndo_set_mac_address = vnet_set_mac_addr, | ||
1017 | .ndo_validate_addr = eth_validate_addr, | ||
1018 | .ndo_tx_timeout = vnet_tx_timeout, | ||
1019 | .ndo_change_mtu = vnet_change_mtu, | ||
1020 | .ndo_start_xmit = vnet_start_xmit, | ||
1021 | }; | ||
1022 | |||
1023 | static struct vnet * __devinit vnet_new(const u64 *local_mac) | ||
1024 | { | ||
1025 | struct net_device *dev; | ||
1026 | struct vnet *vp; | ||
1027 | int err, i; | ||
1028 | |||
1029 | dev = alloc_etherdev(sizeof(*vp)); | ||
1030 | if (!dev) { | ||
1031 | pr_err("Etherdev alloc failed, aborting\n"); | ||
1032 | return ERR_PTR(-ENOMEM); | ||
1033 | } | ||
1034 | |||
1035 | for (i = 0; i < ETH_ALEN; i++) | ||
1036 | dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff; | ||
1037 | |||
1038 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | ||
1039 | |||
1040 | vp = netdev_priv(dev); | ||
1041 | |||
1042 | spin_lock_init(&vp->lock); | ||
1043 | vp->dev = dev; | ||
1044 | |||
1045 | INIT_LIST_HEAD(&vp->port_list); | ||
1046 | for (i = 0; i < VNET_PORT_HASH_SIZE; i++) | ||
1047 | INIT_HLIST_HEAD(&vp->port_hash[i]); | ||
1048 | INIT_LIST_HEAD(&vp->list); | ||
1049 | vp->local_mac = *local_mac; | ||
1050 | |||
1051 | dev->netdev_ops = &vnet_ops; | ||
1052 | dev->ethtool_ops = &vnet_ethtool_ops; | ||
1053 | dev->watchdog_timeo = VNET_TX_TIMEOUT; | ||
1054 | |||
1055 | err = register_netdev(dev); | ||
1056 | if (err) { | ||
1057 | pr_err("Cannot register net device, aborting\n"); | ||
1058 | goto err_out_free_dev; | ||
1059 | } | ||
1060 | |||
1061 | netdev_info(dev, "Sun LDOM vnet %pM\n", dev->dev_addr); | ||
1062 | |||
1063 | list_add(&vp->list, &vnet_list); | ||
1064 | |||
1065 | return vp; | ||
1066 | |||
1067 | err_out_free_dev: | ||
1068 | free_netdev(dev); | ||
1069 | |||
1070 | return ERR_PTR(err); | ||
1071 | } | ||
1072 | |||
1073 | static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac) | ||
1074 | { | ||
1075 | struct vnet *iter, *vp; | ||
1076 | |||
1077 | mutex_lock(&vnet_list_mutex); | ||
1078 | vp = NULL; | ||
1079 | list_for_each_entry(iter, &vnet_list, list) { | ||
1080 | if (iter->local_mac == *local_mac) { | ||
1081 | vp = iter; | ||
1082 | break; | ||
1083 | } | ||
1084 | } | ||
1085 | if (!vp) | ||
1086 | vp = vnet_new(local_mac); | ||
1087 | mutex_unlock(&vnet_list_mutex); | ||
1088 | |||
1089 | return vp; | ||
1090 | } | ||
1091 | |||
1092 | static const char *local_mac_prop = "local-mac-address"; | ||
1093 | |||
1094 | static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp, | ||
1095 | u64 port_node) | ||
1096 | { | ||
1097 | const u64 *local_mac = NULL; | ||
1098 | u64 a; | ||
1099 | |||
1100 | mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) { | ||
1101 | u64 target = mdesc_arc_target(hp, a); | ||
1102 | const char *name; | ||
1103 | |||
1104 | name = mdesc_get_property(hp, target, "name", NULL); | ||
1105 | if (!name || strcmp(name, "network")) | ||
1106 | continue; | ||
1107 | |||
1108 | local_mac = mdesc_get_property(hp, target, | ||
1109 | local_mac_prop, NULL); | ||
1110 | if (local_mac) | ||
1111 | break; | ||
1112 | } | ||
1113 | if (!local_mac) | ||
1114 | return ERR_PTR(-ENODEV); | ||
1115 | |||
1116 | return vnet_find_or_create(local_mac); | ||
1117 | } | ||
1118 | |||
1119 | static struct ldc_channel_config vnet_ldc_cfg = { | ||
1120 | .event = vnet_event, | ||
1121 | .mtu = 64, | ||
1122 | .mode = LDC_MODE_UNRELIABLE, | ||
1123 | }; | ||
1124 | |||
1125 | static struct vio_driver_ops vnet_vio_ops = { | ||
1126 | .send_attr = vnet_send_attr, | ||
1127 | .handle_attr = vnet_handle_attr, | ||
1128 | .handshake_complete = vnet_handshake_complete, | ||
1129 | }; | ||
1130 | |||
1131 | static void __devinit print_version(void) | ||
1132 | { | ||
1133 | printk_once(KERN_INFO "%s", version); | ||
1134 | } | ||
1135 | |||
1136 | const char *remote_macaddr_prop = "remote-mac-address"; | ||
1137 | |||
1138 | static int __devinit vnet_port_probe(struct vio_dev *vdev, | ||
1139 | const struct vio_device_id *id) | ||
1140 | { | ||
1141 | struct mdesc_handle *hp; | ||
1142 | struct vnet_port *port; | ||
1143 | unsigned long flags; | ||
1144 | struct vnet *vp; | ||
1145 | const u64 *rmac; | ||
1146 | int len, i, err, switch_port; | ||
1147 | |||
1148 | print_version(); | ||
1149 | |||
1150 | hp = mdesc_grab(); | ||
1151 | |||
1152 | vp = vnet_find_parent(hp, vdev->mp); | ||
1153 | if (IS_ERR(vp)) { | ||
1154 | pr_err("Cannot find port parent vnet\n"); | ||
1155 | err = PTR_ERR(vp); | ||
1156 | goto err_out_put_mdesc; | ||
1157 | } | ||
1158 | |||
1159 | rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); | ||
1160 | err = -ENODEV; | ||
1161 | if (!rmac) { | ||
1162 | pr_err("Port lacks %s property\n", remote_macaddr_prop); | ||
1163 | goto err_out_put_mdesc; | ||
1164 | } | ||
1165 | |||
1166 | port = kzalloc(sizeof(*port), GFP_KERNEL); | ||
1167 | err = -ENOMEM; | ||
1168 | if (!port) { | ||
1169 | pr_err("Cannot allocate vnet_port\n"); | ||
1170 | goto err_out_put_mdesc; | ||
1171 | } | ||
1172 | |||
1173 | for (i = 0; i < ETH_ALEN; i++) | ||
1174 | port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff; | ||
1175 | |||
1176 | port->vp = vp; | ||
1177 | |||
1178 | err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK, | ||
1179 | vnet_versions, ARRAY_SIZE(vnet_versions), | ||
1180 | &vnet_vio_ops, vp->dev->name); | ||
1181 | if (err) | ||
1182 | goto err_out_free_port; | ||
1183 | |||
1184 | err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port); | ||
1185 | if (err) | ||
1186 | goto err_out_free_port; | ||
1187 | |||
1188 | err = vnet_port_alloc_tx_bufs(port); | ||
1189 | if (err) | ||
1190 | goto err_out_free_ldc; | ||
1191 | |||
1192 | INIT_HLIST_NODE(&port->hash); | ||
1193 | INIT_LIST_HEAD(&port->list); | ||
1194 | |||
1195 | switch_port = 0; | ||
1196 | if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL) | ||
1197 | switch_port = 1; | ||
1198 | port->switch_port = switch_port; | ||
1199 | |||
1200 | spin_lock_irqsave(&vp->lock, flags); | ||
1201 | if (switch_port) | ||
1202 | list_add(&port->list, &vp->port_list); | ||
1203 | else | ||
1204 | list_add_tail(&port->list, &vp->port_list); | ||
1205 | hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]); | ||
1206 | spin_unlock_irqrestore(&vp->lock, flags); | ||
1207 | |||
1208 | dev_set_drvdata(&vdev->dev, port); | ||
1209 | |||
1210 | pr_info("%s: PORT ( remote-mac %pM%s )\n", | ||
1211 | vp->dev->name, port->raddr, switch_port ? " switch-port" : ""); | ||
1212 | |||
1213 | vio_port_up(&port->vio); | ||
1214 | |||
1215 | mdesc_release(hp); | ||
1216 | |||
1217 | return 0; | ||
1218 | |||
1219 | err_out_free_ldc: | ||
1220 | vio_ldc_free(&port->vio); | ||
1221 | |||
1222 | err_out_free_port: | ||
1223 | kfree(port); | ||
1224 | |||
1225 | err_out_put_mdesc: | ||
1226 | mdesc_release(hp); | ||
1227 | return err; | ||
1228 | } | ||
1229 | |||
1230 | static int vnet_port_remove(struct vio_dev *vdev) | ||
1231 | { | ||
1232 | struct vnet_port *port = dev_get_drvdata(&vdev->dev); | ||
1233 | |||
1234 | if (port) { | ||
1235 | struct vnet *vp = port->vp; | ||
1236 | unsigned long flags; | ||
1237 | |||
1238 | del_timer_sync(&port->vio.timer); | ||
1239 | |||
1240 | spin_lock_irqsave(&vp->lock, flags); | ||
1241 | list_del(&port->list); | ||
1242 | hlist_del(&port->hash); | ||
1243 | spin_unlock_irqrestore(&vp->lock, flags); | ||
1244 | |||
1245 | vnet_port_free_tx_bufs(port); | ||
1246 | vio_ldc_free(&port->vio); | ||
1247 | |||
1248 | dev_set_drvdata(&vdev->dev, NULL); | ||
1249 | |||
1250 | kfree(port); | ||
1251 | } | ||
1252 | return 0; | ||
1253 | } | ||
1254 | |||
1255 | static const struct vio_device_id vnet_port_match[] = { | ||
1256 | { | ||
1257 | .type = "vnet-port", | ||
1258 | }, | ||
1259 | {}, | ||
1260 | }; | ||
1261 | MODULE_DEVICE_TABLE(vio, vnet_port_match); | ||
1262 | |||
1263 | static struct vio_driver vnet_port_driver = { | ||
1264 | .id_table = vnet_port_match, | ||
1265 | .probe = vnet_port_probe, | ||
1266 | .remove = vnet_port_remove, | ||
1267 | .driver = { | ||
1268 | .name = "vnet_port", | ||
1269 | .owner = THIS_MODULE, | ||
1270 | } | ||
1271 | }; | ||
1272 | |||
1273 | static int __init vnet_init(void) | ||
1274 | { | ||
1275 | return vio_register_driver(&vnet_port_driver); | ||
1276 | } | ||
1277 | |||
1278 | static void __exit vnet_exit(void) | ||
1279 | { | ||
1280 | vio_unregister_driver(&vnet_port_driver); | ||
1281 | } | ||
1282 | |||
1283 | module_init(vnet_init); | ||
1284 | module_exit(vnet_exit); | ||
diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h new file mode 100644 index 000000000000..d347a5bf24b0 --- /dev/null +++ b/drivers/net/ethernet/sun/sunvnet.h | |||
@@ -0,0 +1,83 @@ | |||
1 | #ifndef _SUNVNET_H | ||
2 | #define _SUNVNET_H | ||
3 | |||
4 | #define DESC_NCOOKIES(entry_size) \ | ||
5 | ((entry_size) - sizeof(struct vio_net_desc)) | ||
6 | |||
7 | /* length of time before we decide the hardware is borked, | ||
8 | * and dev->tx_timeout() should be called to fix the problem | ||
9 | */ | ||
10 | #define VNET_TX_TIMEOUT (5 * HZ) | ||
11 | |||
12 | #define VNET_TX_RING_SIZE 512 | ||
13 | #define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4) | ||
14 | |||
15 | /* VNET packets are sent in buffers with the first 6 bytes skipped | ||
16 | * so that after the ethernet header the IPv4/IPv6 headers are aligned | ||
17 | * properly. | ||
18 | */ | ||
19 | #define VNET_PACKET_SKIP 6 | ||
20 | |||
21 | struct vnet_tx_entry { | ||
22 | void *buf; | ||
23 | unsigned int ncookies; | ||
24 | struct ldc_trans_cookie cookies[2]; | ||
25 | }; | ||
26 | |||
27 | struct vnet; | ||
28 | struct vnet_port { | ||
29 | struct vio_driver_state vio; | ||
30 | |||
31 | struct hlist_node hash; | ||
32 | u8 raddr[ETH_ALEN]; | ||
33 | u8 switch_port; | ||
34 | u8 __pad; | ||
35 | |||
36 | struct vnet *vp; | ||
37 | |||
38 | struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE]; | ||
39 | |||
40 | struct list_head list; | ||
41 | }; | ||
42 | |||
43 | static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio) | ||
44 | { | ||
45 | return container_of(vio, struct vnet_port, vio); | ||
46 | } | ||
47 | |||
48 | #define VNET_PORT_HASH_SIZE 16 | ||
49 | #define VNET_PORT_HASH_MASK (VNET_PORT_HASH_SIZE - 1) | ||
50 | |||
51 | static inline unsigned int vnet_hashfn(u8 *mac) | ||
52 | { | ||
53 | unsigned int val = mac[4] ^ mac[5]; | ||
54 | |||
55 | return val & (VNET_PORT_HASH_MASK); | ||
56 | } | ||
57 | |||
58 | struct vnet_mcast_entry { | ||
59 | u8 addr[ETH_ALEN]; | ||
60 | u8 sent; | ||
61 | u8 hit; | ||
62 | struct vnet_mcast_entry *next; | ||
63 | }; | ||
64 | |||
65 | struct vnet { | ||
66 | /* Protects port_list and port_hash. */ | ||
67 | spinlock_t lock; | ||
68 | |||
69 | struct net_device *dev; | ||
70 | |||
71 | u32 msg_enable; | ||
72 | |||
73 | struct list_head port_list; | ||
74 | |||
75 | struct hlist_head port_hash[VNET_PORT_HASH_SIZE]; | ||
76 | |||
77 | struct vnet_mcast_entry *mcast_list; | ||
78 | |||
79 | struct list_head list; | ||
80 | u64 local_mac; | ||
81 | }; | ||
82 | |||
83 | #endif /* _SUNVNET_H */ | ||