diff options
author | Becky Bruce <bgill@freescale.com> | 2008-05-02 15:40:38 -0400 |
---|---|---|
committer | Kumar Gala <galak@kernel.crashing.org> | 2008-05-13 09:53:48 -0400 |
commit | 54ef0ec22a39071a4e7fbedd201cecac9ac6e8a7 (patch) | |
tree | 57afda969b9ea0639ee1d4e18600bb3a42caa694 /drivers/net/fec_8xx/fec_main.c | |
parent | bfd123bf91704b88093673e615cc93329f820ab4 (diff) |
[POWERPC] Delete unused fec_8xx net driver
This driver has been superseded by fs_enet and is no longer in use.
Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'drivers/net/fec_8xx/fec_main.c')
-rw-r--r-- | drivers/net/fec_8xx/fec_main.c | 1264 |
1 files changed, 0 insertions, 1264 deletions
diff --git a/drivers/net/fec_8xx/fec_main.c b/drivers/net/fec_8xx/fec_main.c deleted file mode 100644 index ca8d2e83ab03..000000000000 --- a/drivers/net/fec_8xx/fec_main.c +++ /dev/null | |||
@@ -1,1264 +0,0 @@ | |||
1 | /* | ||
2 | * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. | ||
3 | * | ||
4 | * Copyright (c) 2003 Intracom S.A. | ||
5 | * by Pantelis Antoniou <panto@intracom.gr> | ||
6 | * | ||
7 | * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> | ||
8 | * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> | ||
9 | * | ||
10 | * Released under the GPL | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/types.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/ptrace.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/ioport.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/netdevice.h> | ||
25 | #include <linux/etherdevice.h> | ||
26 | #include <linux/skbuff.h> | ||
27 | #include <linux/spinlock.h> | ||
28 | #include <linux/mii.h> | ||
29 | #include <linux/ethtool.h> | ||
30 | #include <linux/bitops.h> | ||
31 | #include <linux/dma-mapping.h> | ||
32 | |||
33 | #include <asm/8xx_immap.h> | ||
34 | #include <asm/pgtable.h> | ||
35 | #include <asm/mpc8xx.h> | ||
36 | #include <asm/irq.h> | ||
37 | #include <asm/uaccess.h> | ||
38 | #include <asm/cpm1.h> | ||
39 | |||
40 | #include "fec_8xx.h" | ||
41 | |||
42 | /*************************************************/ | ||
43 | |||
44 | #define FEC_MAX_MULTICAST_ADDRS 64 | ||
45 | |||
46 | /*************************************************/ | ||
47 | |||
48 | static char version[] __devinitdata = | ||
49 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n"; | ||
50 | |||
51 | MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>"); | ||
52 | MODULE_DESCRIPTION("Motorola 8xx FEC ethernet driver"); | ||
53 | MODULE_LICENSE("GPL"); | ||
54 | |||
55 | int fec_8xx_debug = -1; /* -1 == use FEC_8XX_DEF_MSG_ENABLE as value */ | ||
56 | module_param(fec_8xx_debug, int, 0); | ||
57 | MODULE_PARM_DESC(fec_8xx_debug, | ||
58 | "FEC 8xx bitmapped debugging message enable value"); | ||
59 | |||
60 | |||
61 | /*************************************************/ | ||
62 | |||
63 | /* | ||
64 | * Delay to wait for FEC reset command to complete (in us) | ||
65 | */ | ||
66 | #define FEC_RESET_DELAY 50 | ||
67 | |||
68 | /*****************************************************************************************/ | ||
69 | |||
70 | static void fec_whack_reset(fec_t * fecp) | ||
71 | { | ||
72 | int i; | ||
73 | |||
74 | /* | ||
75 | * Whack a reset. We should wait for this. | ||
76 | */ | ||
77 | FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET); | ||
78 | for (i = 0; | ||
79 | (FR(fecp, ecntrl) & FEC_ECNTRL_RESET) != 0 && i < FEC_RESET_DELAY; | ||
80 | i++) | ||
81 | udelay(1); | ||
82 | |||
83 | if (i == FEC_RESET_DELAY) | ||
84 | printk(KERN_WARNING "FEC Reset timeout!\n"); | ||
85 | |||
86 | } | ||
87 | |||
88 | /****************************************************************************/ | ||
89 | |||
90 | /* | ||
91 | * Transmitter timeout. | ||
92 | */ | ||
93 | #define TX_TIMEOUT (2*HZ) | ||
94 | |||
95 | /****************************************************************************/ | ||
96 | |||
97 | /* | ||
98 | * Returns the CRC needed when filling in the hash table for | ||
99 | * multicast group filtering | ||
100 | * pAddr must point to a MAC address (6 bytes) | ||
101 | */ | ||
102 | static __u32 fec_mulicast_calc_crc(char *pAddr) | ||
103 | { | ||
104 | u8 byte; | ||
105 | int byte_count; | ||
106 | int bit_count; | ||
107 | __u32 crc = 0xffffffff; | ||
108 | u8 msb; | ||
109 | |||
110 | for (byte_count = 0; byte_count < 6; byte_count++) { | ||
111 | byte = pAddr[byte_count]; | ||
112 | for (bit_count = 0; bit_count < 8; bit_count++) { | ||
113 | msb = crc >> 31; | ||
114 | crc <<= 1; | ||
115 | if (msb ^ (byte & 0x1)) { | ||
116 | crc ^= FEC_CRC_POLY; | ||
117 | } | ||
118 | byte >>= 1; | ||
119 | } | ||
120 | } | ||
121 | return (crc); | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Set or clear the multicast filter for this adaptor. | ||
126 | * Skeleton taken from sunlance driver. | ||
127 | * The CPM Ethernet implementation allows Multicast as well as individual | ||
128 | * MAC address filtering. Some of the drivers check to make sure it is | ||
129 | * a group multicast address, and discard those that are not. I guess I | ||
130 | * will do the same for now, but just remove the test if you want | ||
131 | * individual filtering as well (do the upper net layers want or support | ||
132 | * this kind of feature?). | ||
133 | */ | ||
134 | static void fec_set_multicast_list(struct net_device *dev) | ||
135 | { | ||
136 | struct fec_enet_private *fep = netdev_priv(dev); | ||
137 | fec_t *fecp = fep->fecp; | ||
138 | struct dev_mc_list *pmc; | ||
139 | __u32 crc; | ||
140 | int temp; | ||
141 | __u32 csrVal; | ||
142 | int hash_index; | ||
143 | __u32 hthi, htlo; | ||
144 | unsigned long flags; | ||
145 | |||
146 | |||
147 | if ((dev->flags & IFF_PROMISC) != 0) { | ||
148 | |||
149 | spin_lock_irqsave(&fep->lock, flags); | ||
150 | FS(fecp, r_cntrl, FEC_RCNTRL_PROM); | ||
151 | spin_unlock_irqrestore(&fep->lock, flags); | ||
152 | |||
153 | /* | ||
154 | * Log any net taps. | ||
155 | */ | ||
156 | printk(KERN_WARNING DRV_MODULE_NAME | ||
157 | ": %s: Promiscuous mode enabled.\n", dev->name); | ||
158 | return; | ||
159 | |||
160 | } | ||
161 | |||
162 | if ((dev->flags & IFF_ALLMULTI) != 0 || | ||
163 | dev->mc_count > FEC_MAX_MULTICAST_ADDRS) { | ||
164 | /* | ||
165 | * Catch all multicast addresses, set the filter to all 1's. | ||
166 | */ | ||
167 | hthi = 0xffffffffU; | ||
168 | htlo = 0xffffffffU; | ||
169 | } else { | ||
170 | hthi = 0; | ||
171 | htlo = 0; | ||
172 | |||
173 | /* | ||
174 | * Now populate the hash table | ||
175 | */ | ||
176 | for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next) { | ||
177 | crc = fec_mulicast_calc_crc(pmc->dmi_addr); | ||
178 | temp = (crc & 0x3f) >> 1; | ||
179 | hash_index = ((temp & 0x01) << 4) | | ||
180 | ((temp & 0x02) << 2) | | ||
181 | ((temp & 0x04)) | | ||
182 | ((temp & 0x08) >> 2) | | ||
183 | ((temp & 0x10) >> 4); | ||
184 | csrVal = (1 << hash_index); | ||
185 | if (crc & 1) | ||
186 | hthi |= csrVal; | ||
187 | else | ||
188 | htlo |= csrVal; | ||
189 | } | ||
190 | } | ||
191 | |||
192 | spin_lock_irqsave(&fep->lock, flags); | ||
193 | FC(fecp, r_cntrl, FEC_RCNTRL_PROM); | ||
194 | FW(fecp, hash_table_high, hthi); | ||
195 | FW(fecp, hash_table_low, htlo); | ||
196 | spin_unlock_irqrestore(&fep->lock, flags); | ||
197 | } | ||
198 | |||
199 | static int fec_set_mac_address(struct net_device *dev, void *addr) | ||
200 | { | ||
201 | struct sockaddr *mac = addr; | ||
202 | struct fec_enet_private *fep = netdev_priv(dev); | ||
203 | struct fec *fecp = fep->fecp; | ||
204 | int i; | ||
205 | __u32 addrhi, addrlo; | ||
206 | unsigned long flags; | ||
207 | |||
208 | /* Get pointer to SCC area in parameter RAM. */ | ||
209 | for (i = 0; i < 6; i++) | ||
210 | dev->dev_addr[i] = mac->sa_data[i]; | ||
211 | |||
212 | /* | ||
213 | * Set station address. | ||
214 | */ | ||
215 | addrhi = ((__u32) dev->dev_addr[0] << 24) | | ||
216 | ((__u32) dev->dev_addr[1] << 16) | | ||
217 | ((__u32) dev->dev_addr[2] << 8) | | ||
218 | (__u32) dev->dev_addr[3]; | ||
219 | addrlo = ((__u32) dev->dev_addr[4] << 24) | | ||
220 | ((__u32) dev->dev_addr[5] << 16); | ||
221 | |||
222 | spin_lock_irqsave(&fep->lock, flags); | ||
223 | FW(fecp, addr_low, addrhi); | ||
224 | FW(fecp, addr_high, addrlo); | ||
225 | spin_unlock_irqrestore(&fep->lock, flags); | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | /* | ||
231 | * This function is called to start or restart the FEC during a link | ||
232 | * change. This only happens when switching between half and full | ||
233 | * duplex. | ||
234 | */ | ||
235 | void fec_restart(struct net_device *dev, int duplex, int speed) | ||
236 | { | ||
237 | #ifdef CONFIG_DUET | ||
238 | immap_t *immap = (immap_t *) IMAP_ADDR; | ||
239 | __u32 cptr; | ||
240 | #endif | ||
241 | struct fec_enet_private *fep = netdev_priv(dev); | ||
242 | struct fec *fecp = fep->fecp; | ||
243 | const struct fec_platform_info *fpi = fep->fpi; | ||
244 | cbd_t *bdp; | ||
245 | struct sk_buff *skb; | ||
246 | int i; | ||
247 | __u32 addrhi, addrlo; | ||
248 | |||
249 | fec_whack_reset(fep->fecp); | ||
250 | |||
251 | /* | ||
252 | * Set station address. | ||
253 | */ | ||
254 | addrhi = ((__u32) dev->dev_addr[0] << 24) | | ||
255 | ((__u32) dev->dev_addr[1] << 16) | | ||
256 | ((__u32) dev->dev_addr[2] << 8) | | ||
257 | (__u32) dev->dev_addr[3]; | ||
258 | addrlo = ((__u32) dev->dev_addr[4] << 24) | | ||
259 | ((__u32) dev->dev_addr[5] << 16); | ||
260 | FW(fecp, addr_low, addrhi); | ||
261 | FW(fecp, addr_high, addrlo); | ||
262 | |||
263 | /* | ||
264 | * Reset all multicast. | ||
265 | */ | ||
266 | FW(fecp, hash_table_high, 0); | ||
267 | FW(fecp, hash_table_low, 0); | ||
268 | |||
269 | /* | ||
270 | * Set maximum receive buffer size. | ||
271 | */ | ||
272 | FW(fecp, r_buff_size, PKT_MAXBLR_SIZE); | ||
273 | FW(fecp, r_hash, PKT_MAXBUF_SIZE); | ||
274 | |||
275 | /* | ||
276 | * Set receive and transmit descriptor base. | ||
277 | */ | ||
278 | FW(fecp, r_des_start, iopa((__u32) (fep->rx_bd_base))); | ||
279 | FW(fecp, x_des_start, iopa((__u32) (fep->tx_bd_base))); | ||
280 | |||
281 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; | ||
282 | fep->tx_free = fep->tx_ring; | ||
283 | fep->cur_rx = fep->rx_bd_base; | ||
284 | |||
285 | /* | ||
286 | * Reset SKB receive buffers | ||
287 | */ | ||
288 | for (i = 0; i < fep->rx_ring; i++) { | ||
289 | if ((skb = fep->rx_skbuff[i]) == NULL) | ||
290 | continue; | ||
291 | fep->rx_skbuff[i] = NULL; | ||
292 | dev_kfree_skb(skb); | ||
293 | } | ||
294 | |||
295 | /* | ||
296 | * Initialize the receive buffer descriptors. | ||
297 | */ | ||
298 | for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { | ||
299 | skb = dev_alloc_skb(ENET_RX_FRSIZE); | ||
300 | if (skb == NULL) { | ||
301 | printk(KERN_WARNING DRV_MODULE_NAME | ||
302 | ": %s Memory squeeze, unable to allocate skb\n", | ||
303 | dev->name); | ||
304 | fep->stats.rx_dropped++; | ||
305 | break; | ||
306 | } | ||
307 | fep->rx_skbuff[i] = skb; | ||
308 | skb->dev = dev; | ||
309 | CBDW_BUFADDR(bdp, dma_map_single(NULL, skb->data, | ||
310 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | ||
311 | DMA_FROM_DEVICE)); | ||
312 | CBDW_DATLEN(bdp, 0); /* zero */ | ||
313 | CBDW_SC(bdp, BD_ENET_RX_EMPTY | | ||
314 | ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); | ||
315 | } | ||
316 | /* | ||
317 | * if we failed, fillup remainder | ||
318 | */ | ||
319 | for (; i < fep->rx_ring; i++, bdp++) { | ||
320 | fep->rx_skbuff[i] = NULL; | ||
321 | CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); | ||
322 | } | ||
323 | |||
324 | /* | ||
325 | * Reset SKB transmit buffers. | ||
326 | */ | ||
327 | for (i = 0; i < fep->tx_ring; i++) { | ||
328 | if ((skb = fep->tx_skbuff[i]) == NULL) | ||
329 | continue; | ||
330 | fep->tx_skbuff[i] = NULL; | ||
331 | dev_kfree_skb(skb); | ||
332 | } | ||
333 | |||
334 | /* | ||
335 | * ...and the same for transmit. | ||
336 | */ | ||
337 | for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { | ||
338 | fep->tx_skbuff[i] = NULL; | ||
339 | CBDW_BUFADDR(bdp, virt_to_bus(NULL)); | ||
340 | CBDW_DATLEN(bdp, 0); | ||
341 | CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP); | ||
342 | } | ||
343 | |||
344 | /* | ||
345 | * Enable big endian and don't care about SDMA FC. | ||
346 | */ | ||
347 | FW(fecp, fun_code, 0x78000000); | ||
348 | |||
349 | /* | ||
350 | * Set MII speed. | ||
351 | */ | ||
352 | FW(fecp, mii_speed, fep->fec_phy_speed); | ||
353 | |||
354 | /* | ||
355 | * Clear any outstanding interrupt. | ||
356 | */ | ||
357 | FW(fecp, ievent, 0xffc0); | ||
358 | FW(fecp, ivec, (fpi->fec_irq / 2) << 29); | ||
359 | |||
360 | /* | ||
361 | * adjust to speed (only for DUET & RMII) | ||
362 | */ | ||
363 | #ifdef CONFIG_DUET | ||
364 | cptr = in_be32(&immap->im_cpm.cp_cptr); | ||
365 | switch (fpi->fec_no) { | ||
366 | case 0: | ||
367 | /* | ||
368 | * check if in RMII mode | ||
369 | */ | ||
370 | if ((cptr & 0x100) == 0) | ||
371 | break; | ||
372 | |||
373 | if (speed == 10) | ||
374 | cptr |= 0x0000010; | ||
375 | else if (speed == 100) | ||
376 | cptr &= ~0x0000010; | ||
377 | break; | ||
378 | case 1: | ||
379 | /* | ||
380 | * check if in RMII mode | ||
381 | */ | ||
382 | if ((cptr & 0x80) == 0) | ||
383 | break; | ||
384 | |||
385 | if (speed == 10) | ||
386 | cptr |= 0x0000008; | ||
387 | else if (speed == 100) | ||
388 | cptr &= ~0x0000008; | ||
389 | break; | ||
390 | default: | ||
391 | break; | ||
392 | } | ||
393 | out_be32(&immap->im_cpm.cp_cptr, cptr); | ||
394 | #endif | ||
395 | |||
396 | FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ | ||
397 | /* | ||
398 | * adjust to duplex mode | ||
399 | */ | ||
400 | if (duplex) { | ||
401 | FC(fecp, r_cntrl, FEC_RCNTRL_DRT); | ||
402 | FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ | ||
403 | } else { | ||
404 | FS(fecp, r_cntrl, FEC_RCNTRL_DRT); | ||
405 | FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */ | ||
406 | } | ||
407 | |||
408 | /* | ||
409 | * Enable interrupts we wish to service. | ||
410 | */ | ||
411 | FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB | | ||
412 | FEC_ENET_RXF | FEC_ENET_RXB); | ||
413 | |||
414 | /* | ||
415 | * And last, enable the transmit and receive processing. | ||
416 | */ | ||
417 | FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); | ||
418 | FW(fecp, r_des_active, 0x01000000); | ||
419 | } | ||
420 | |||
421 | void fec_stop(struct net_device *dev) | ||
422 | { | ||
423 | struct fec_enet_private *fep = netdev_priv(dev); | ||
424 | fec_t *fecp = fep->fecp; | ||
425 | struct sk_buff *skb; | ||
426 | int i; | ||
427 | |||
428 | if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0) | ||
429 | return; /* already down */ | ||
430 | |||
431 | FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */ | ||
432 | for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) && | ||
433 | i < FEC_RESET_DELAY; i++) | ||
434 | udelay(1); | ||
435 | |||
436 | if (i == FEC_RESET_DELAY) | ||
437 | printk(KERN_WARNING DRV_MODULE_NAME | ||
438 | ": %s FEC timeout on graceful transmit stop\n", | ||
439 | dev->name); | ||
440 | /* | ||
441 | * Disable FEC. Let only MII interrupts. | ||
442 | */ | ||
443 | FW(fecp, imask, 0); | ||
444 | FW(fecp, ecntrl, ~FEC_ECNTRL_ETHER_EN); | ||
445 | |||
446 | /* | ||
447 | * Reset SKB transmit buffers. | ||
448 | */ | ||
449 | for (i = 0; i < fep->tx_ring; i++) { | ||
450 | if ((skb = fep->tx_skbuff[i]) == NULL) | ||
451 | continue; | ||
452 | fep->tx_skbuff[i] = NULL; | ||
453 | dev_kfree_skb(skb); | ||
454 | } | ||
455 | |||
456 | /* | ||
457 | * Reset SKB receive buffers | ||
458 | */ | ||
459 | for (i = 0; i < fep->rx_ring; i++) { | ||
460 | if ((skb = fep->rx_skbuff[i]) == NULL) | ||
461 | continue; | ||
462 | fep->rx_skbuff[i] = NULL; | ||
463 | dev_kfree_skb(skb); | ||
464 | } | ||
465 | } | ||
466 | |||
467 | /* common receive function */ | ||
468 | static int fec_enet_rx_common(struct fec_enet_private *ep, | ||
469 | struct net_device *dev, int budget) | ||
470 | { | ||
471 | fec_t *fecp = fep->fecp; | ||
472 | const struct fec_platform_info *fpi = fep->fpi; | ||
473 | cbd_t *bdp; | ||
474 | struct sk_buff *skb, *skbn, *skbt; | ||
475 | int received = 0; | ||
476 | __u16 pkt_len, sc; | ||
477 | int curidx; | ||
478 | |||
479 | /* | ||
480 | * First, grab all of the stats for the incoming packet. | ||
481 | * These get messed up if we get called due to a busy condition. | ||
482 | */ | ||
483 | bdp = fep->cur_rx; | ||
484 | |||
485 | /* clear RX status bits for napi*/ | ||
486 | if (fpi->use_napi) | ||
487 | FW(fecp, ievent, FEC_ENET_RXF | FEC_ENET_RXB); | ||
488 | |||
489 | while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { | ||
490 | |||
491 | curidx = bdp - fep->rx_bd_base; | ||
492 | |||
493 | /* | ||
494 | * Since we have allocated space to hold a complete frame, | ||
495 | * the last indicator should be set. | ||
496 | */ | ||
497 | if ((sc & BD_ENET_RX_LAST) == 0) | ||
498 | printk(KERN_WARNING DRV_MODULE_NAME | ||
499 | ": %s rcv is not +last\n", | ||
500 | dev->name); | ||
501 | |||
502 | /* | ||
503 | * Check for errors. | ||
504 | */ | ||
505 | if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | | ||
506 | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { | ||
507 | fep->stats.rx_errors++; | ||
508 | /* Frame too long or too short. */ | ||
509 | if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) | ||
510 | fep->stats.rx_length_errors++; | ||
511 | /* Frame alignment */ | ||
512 | if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) | ||
513 | fep->stats.rx_frame_errors++; | ||
514 | /* CRC Error */ | ||
515 | if (sc & BD_ENET_RX_CR) | ||
516 | fep->stats.rx_crc_errors++; | ||
517 | /* FIFO overrun */ | ||
518 | if (sc & BD_ENET_RX_OV) | ||
519 | fep->stats.rx_crc_errors++; | ||
520 | |||
521 | skbn = fep->rx_skbuff[curidx]; | ||
522 | BUG_ON(skbn == NULL); | ||
523 | |||
524 | } else { | ||
525 | skb = fep->rx_skbuff[curidx]; | ||
526 | BUG_ON(skb == NULL); | ||
527 | |||
528 | /* | ||
529 | * Process the incoming frame. | ||
530 | */ | ||
531 | fep->stats.rx_packets++; | ||
532 | pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ | ||
533 | fep->stats.rx_bytes += pkt_len + 4; | ||
534 | |||
535 | if (pkt_len <= fpi->rx_copybreak) { | ||
536 | /* +2 to make IP header L1 cache aligned */ | ||
537 | skbn = dev_alloc_skb(pkt_len + 2); | ||
538 | if (skbn != NULL) { | ||
539 | skb_reserve(skbn, 2); /* align IP header */ | ||
540 | skb_copy_from_linear_data(skb, | ||
541 | skbn->data, | ||
542 | pkt_len); | ||
543 | /* swap */ | ||
544 | skbt = skb; | ||
545 | skb = skbn; | ||
546 | skbn = skbt; | ||
547 | } | ||
548 | } else | ||
549 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); | ||
550 | |||
551 | if (skbn != NULL) { | ||
552 | skb_put(skb, pkt_len); /* Make room */ | ||
553 | skb->protocol = eth_type_trans(skb, dev); | ||
554 | received++; | ||
555 | if (!fpi->use_napi) | ||
556 | netif_rx(skb); | ||
557 | else | ||
558 | netif_receive_skb(skb); | ||
559 | } else { | ||
560 | printk(KERN_WARNING DRV_MODULE_NAME | ||
561 | ": %s Memory squeeze, dropping packet.\n", | ||
562 | dev->name); | ||
563 | fep->stats.rx_dropped++; | ||
564 | skbn = skb; | ||
565 | } | ||
566 | } | ||
567 | |||
568 | fep->rx_skbuff[curidx] = skbn; | ||
569 | CBDW_BUFADDR(bdp, dma_map_single(NULL, skbn->data, | ||
570 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | ||
571 | DMA_FROM_DEVICE)); | ||
572 | CBDW_DATLEN(bdp, 0); | ||
573 | CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); | ||
574 | |||
575 | /* | ||
576 | * Update BD pointer to next entry. | ||
577 | */ | ||
578 | if ((sc & BD_ENET_RX_WRAP) == 0) | ||
579 | bdp++; | ||
580 | else | ||
581 | bdp = fep->rx_bd_base; | ||
582 | |||
583 | /* | ||
584 | * Doing this here will keep the FEC running while we process | ||
585 | * incoming frames. On a heavily loaded network, we should be | ||
586 | * able to keep up at the expense of system resources. | ||
587 | */ | ||
588 | FW(fecp, r_des_active, 0x01000000); | ||
589 | |||
590 | if (received >= budget) | ||
591 | break; | ||
592 | |||
593 | } | ||
594 | |||
595 | fep->cur_rx = bdp; | ||
596 | |||
597 | if (fpi->use_napi) { | ||
598 | if (received < budget) { | ||
599 | netif_rx_complete(dev, &fep->napi); | ||
600 | |||
601 | /* enable RX interrupt bits */ | ||
602 | FS(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); | ||
603 | } | ||
604 | } | ||
605 | |||
606 | return received; | ||
607 | } | ||
608 | |||
609 | static void fec_enet_tx(struct net_device *dev) | ||
610 | { | ||
611 | struct fec_enet_private *fep = netdev_priv(dev); | ||
612 | cbd_t *bdp; | ||
613 | struct sk_buff *skb; | ||
614 | int dirtyidx, do_wake; | ||
615 | __u16 sc; | ||
616 | |||
617 | spin_lock(&fep->lock); | ||
618 | bdp = fep->dirty_tx; | ||
619 | |||
620 | do_wake = 0; | ||
621 | while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) { | ||
622 | |||
623 | dirtyidx = bdp - fep->tx_bd_base; | ||
624 | |||
625 | if (fep->tx_free == fep->tx_ring) | ||
626 | break; | ||
627 | |||
628 | skb = fep->tx_skbuff[dirtyidx]; | ||
629 | |||
630 | /* | ||
631 | * Check for errors. | ||
632 | */ | ||
633 | if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | | ||
634 | BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { | ||
635 | fep->stats.tx_errors++; | ||
636 | if (sc & BD_ENET_TX_HB) /* No heartbeat */ | ||
637 | fep->stats.tx_heartbeat_errors++; | ||
638 | if (sc & BD_ENET_TX_LC) /* Late collision */ | ||
639 | fep->stats.tx_window_errors++; | ||
640 | if (sc & BD_ENET_TX_RL) /* Retrans limit */ | ||
641 | fep->stats.tx_aborted_errors++; | ||
642 | if (sc & BD_ENET_TX_UN) /* Underrun */ | ||
643 | fep->stats.tx_fifo_errors++; | ||
644 | if (sc & BD_ENET_TX_CSL) /* Carrier lost */ | ||
645 | fep->stats.tx_carrier_errors++; | ||
646 | } else | ||
647 | fep->stats.tx_packets++; | ||
648 | |||
649 | if (sc & BD_ENET_TX_READY) | ||
650 | printk(KERN_WARNING DRV_MODULE_NAME | ||
651 | ": %s HEY! Enet xmit interrupt and TX_READY.\n", | ||
652 | dev->name); | ||
653 | |||
654 | /* | ||
655 | * Deferred means some collisions occurred during transmit, | ||
656 | * but we eventually sent the packet OK. | ||
657 | */ | ||
658 | if (sc & BD_ENET_TX_DEF) | ||
659 | fep->stats.collisions++; | ||
660 | |||
661 | /* | ||
662 | * Free the sk buffer associated with this last transmit. | ||
663 | */ | ||
664 | dev_kfree_skb_irq(skb); | ||
665 | fep->tx_skbuff[dirtyidx] = NULL; | ||
666 | |||
667 | /* | ||
668 | * Update pointer to next buffer descriptor to be transmitted. | ||
669 | */ | ||
670 | if ((sc & BD_ENET_TX_WRAP) == 0) | ||
671 | bdp++; | ||
672 | else | ||
673 | bdp = fep->tx_bd_base; | ||
674 | |||
675 | /* | ||
676 | * Since we have freed up a buffer, the ring is no longer | ||
677 | * full. | ||
678 | */ | ||
679 | if (!fep->tx_free++) | ||
680 | do_wake = 1; | ||
681 | } | ||
682 | |||
683 | fep->dirty_tx = bdp; | ||
684 | |||
685 | spin_unlock(&fep->lock); | ||
686 | |||
687 | if (do_wake && netif_queue_stopped(dev)) | ||
688 | netif_wake_queue(dev); | ||
689 | } | ||
690 | |||
691 | /* | ||
692 | * The interrupt handler. | ||
693 | * This is called from the MPC core interrupt. | ||
694 | */ | ||
695 | static irqreturn_t | ||
696 | fec_enet_interrupt(int irq, void *dev_id) | ||
697 | { | ||
698 | struct net_device *dev = dev_id; | ||
699 | struct fec_enet_private *fep; | ||
700 | const struct fec_platform_info *fpi; | ||
701 | fec_t *fecp; | ||
702 | __u32 int_events; | ||
703 | __u32 int_events_napi; | ||
704 | |||
705 | if (unlikely(dev == NULL)) | ||
706 | return IRQ_NONE; | ||
707 | |||
708 | fep = netdev_priv(dev); | ||
709 | fecp = fep->fecp; | ||
710 | fpi = fep->fpi; | ||
711 | |||
712 | /* | ||
713 | * Get the interrupt events that caused us to be here. | ||
714 | */ | ||
715 | while ((int_events = FR(fecp, ievent) & FR(fecp, imask)) != 0) { | ||
716 | |||
717 | if (!fpi->use_napi) | ||
718 | FW(fecp, ievent, int_events); | ||
719 | else { | ||
720 | int_events_napi = int_events & ~(FEC_ENET_RXF | FEC_ENET_RXB); | ||
721 | FW(fecp, ievent, int_events_napi); | ||
722 | } | ||
723 | |||
724 | if ((int_events & (FEC_ENET_HBERR | FEC_ENET_BABR | | ||
725 | FEC_ENET_BABT | FEC_ENET_EBERR)) != 0) | ||
726 | printk(KERN_WARNING DRV_MODULE_NAME | ||
727 | ": %s FEC ERROR(s) 0x%x\n", | ||
728 | dev->name, int_events); | ||
729 | |||
730 | if ((int_events & FEC_ENET_RXF) != 0) { | ||
731 | if (!fpi->use_napi) | ||
732 | fec_enet_rx_common(fep, dev, ~0); | ||
733 | else { | ||
734 | if (netif_rx_schedule_prep(dev, &fep->napi)) { | ||
735 | /* disable rx interrupts */ | ||
736 | FC(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); | ||
737 | __netif_rx_schedule(dev, &fep->napi); | ||
738 | } else { | ||
739 | printk(KERN_ERR DRV_MODULE_NAME | ||
740 | ": %s driver bug! interrupt while in poll!\n", | ||
741 | dev->name); | ||
742 | FC(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); | ||
743 | } | ||
744 | } | ||
745 | } | ||
746 | |||
747 | if ((int_events & FEC_ENET_TXF) != 0) | ||
748 | fec_enet_tx(dev); | ||
749 | } | ||
750 | |||
751 | return IRQ_HANDLED; | ||
752 | } | ||
753 | |||
754 | /* This interrupt occurs when the PHY detects a link change. */ | ||
755 | static irqreturn_t | ||
756 | fec_mii_link_interrupt(int irq, void *dev_id) | ||
757 | { | ||
758 | struct net_device *dev = dev_id; | ||
759 | struct fec_enet_private *fep; | ||
760 | const struct fec_platform_info *fpi; | ||
761 | |||
762 | if (unlikely(dev == NULL)) | ||
763 | return IRQ_NONE; | ||
764 | |||
765 | fep = netdev_priv(dev); | ||
766 | fpi = fep->fpi; | ||
767 | |||
768 | if (!fpi->use_mdio) | ||
769 | return IRQ_NONE; | ||
770 | |||
771 | /* | ||
772 | * Acknowledge the interrupt if possible. If we have not | ||
773 | * found the PHY yet we can't process or acknowledge the | ||
774 | * interrupt now. Instead we ignore this interrupt for now, | ||
775 | * which we can do since it is edge triggered. It will be | ||
776 | * acknowledged later by fec_enet_open(). | ||
777 | */ | ||
778 | if (!fep->phy) | ||
779 | return IRQ_NONE; | ||
780 | |||
781 | fec_mii_ack_int(dev); | ||
782 | fec_mii_link_status_change_check(dev, 0); | ||
783 | |||
784 | return IRQ_HANDLED; | ||
785 | } | ||
786 | |||
787 | |||
788 | /**********************************************************************************/ | ||
789 | |||
790 | static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
791 | { | ||
792 | struct fec_enet_private *fep = netdev_priv(dev); | ||
793 | fec_t *fecp = fep->fecp; | ||
794 | cbd_t *bdp; | ||
795 | int curidx; | ||
796 | unsigned long flags; | ||
797 | |||
798 | spin_lock_irqsave(&fep->tx_lock, flags); | ||
799 | |||
800 | /* | ||
801 | * Fill in a Tx ring entry | ||
802 | */ | ||
803 | bdp = fep->cur_tx; | ||
804 | |||
805 | if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { | ||
806 | netif_stop_queue(dev); | ||
807 | spin_unlock_irqrestore(&fep->tx_lock, flags); | ||
808 | |||
809 | /* | ||
810 | * Ooops. All transmit buffers are full. Bail out. | ||
811 | * This should not happen, since the tx queue should be stopped. | ||
812 | */ | ||
813 | printk(KERN_WARNING DRV_MODULE_NAME | ||
814 | ": %s tx queue full!.\n", dev->name); | ||
815 | return 1; | ||
816 | } | ||
817 | |||
818 | curidx = bdp - fep->tx_bd_base; | ||
819 | /* | ||
820 | * Clear all of the status flags. | ||
821 | */ | ||
822 | CBDC_SC(bdp, BD_ENET_TX_STATS); | ||
823 | |||
824 | /* | ||
825 | * Save skb pointer. | ||
826 | */ | ||
827 | fep->tx_skbuff[curidx] = skb; | ||
828 | |||
829 | fep->stats.tx_bytes += skb->len; | ||
830 | |||
831 | /* | ||
832 | * Push the data cache so the CPM does not get stale memory data. | ||
833 | */ | ||
834 | CBDW_BUFADDR(bdp, dma_map_single(NULL, skb->data, | ||
835 | skb->len, DMA_TO_DEVICE)); | ||
836 | CBDW_DATLEN(bdp, skb->len); | ||
837 | |||
838 | dev->trans_start = jiffies; | ||
839 | |||
840 | /* | ||
841 | * If this was the last BD in the ring, start at the beginning again. | ||
842 | */ | ||
843 | if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) | ||
844 | fep->cur_tx++; | ||
845 | else | ||
846 | fep->cur_tx = fep->tx_bd_base; | ||
847 | |||
848 | if (!--fep->tx_free) | ||
849 | netif_stop_queue(dev); | ||
850 | |||
851 | /* | ||
852 | * Trigger transmission start | ||
853 | */ | ||
854 | CBDS_SC(bdp, BD_ENET_TX_READY | BD_ENET_TX_INTR | | ||
855 | BD_ENET_TX_LAST | BD_ENET_TX_TC); | ||
856 | FW(fecp, x_des_active, 0x01000000); | ||
857 | |||
858 | spin_unlock_irqrestore(&fep->tx_lock, flags); | ||
859 | |||
860 | return 0; | ||
861 | } | ||
862 | |||
863 | static void fec_timeout(struct net_device *dev) | ||
864 | { | ||
865 | struct fec_enet_private *fep = netdev_priv(dev); | ||
866 | |||
867 | fep->stats.tx_errors++; | ||
868 | |||
869 | if (fep->tx_free) | ||
870 | netif_wake_queue(dev); | ||
871 | |||
872 | /* check link status again */ | ||
873 | fec_mii_link_status_change_check(dev, 0); | ||
874 | } | ||
875 | |||
876 | static int fec_enet_open(struct net_device *dev) | ||
877 | { | ||
878 | struct fec_enet_private *fep = netdev_priv(dev); | ||
879 | const struct fec_platform_info *fpi = fep->fpi; | ||
880 | unsigned long flags; | ||
881 | |||
882 | napi_enable(&fep->napi); | ||
883 | |||
884 | /* Install our interrupt handler. */ | ||
885 | if (request_irq(fpi->fec_irq, fec_enet_interrupt, 0, "fec", dev) != 0) { | ||
886 | printk(KERN_ERR DRV_MODULE_NAME | ||
887 | ": %s Could not allocate FEC IRQ!", dev->name); | ||
888 | napi_disable(&fep->napi); | ||
889 | return -EINVAL; | ||
890 | } | ||
891 | |||
892 | /* Install our phy interrupt handler */ | ||
893 | if (fpi->phy_irq != -1 && | ||
894 | request_irq(fpi->phy_irq, fec_mii_link_interrupt, 0, "fec-phy", | ||
895 | dev) != 0) { | ||
896 | printk(KERN_ERR DRV_MODULE_NAME | ||
897 | ": %s Could not allocate PHY IRQ!", dev->name); | ||
898 | free_irq(fpi->fec_irq, dev); | ||
899 | napi_disable(&fep->napi); | ||
900 | return -EINVAL; | ||
901 | } | ||
902 | |||
903 | if (fpi->use_mdio) { | ||
904 | fec_mii_startup(dev); | ||
905 | netif_carrier_off(dev); | ||
906 | fec_mii_link_status_change_check(dev, 1); | ||
907 | } else { | ||
908 | spin_lock_irqsave(&fep->lock, flags); | ||
909 | fec_restart(dev, 1, 100); /* XXX this sucks */ | ||
910 | spin_unlock_irqrestore(&fep->lock, flags); | ||
911 | |||
912 | netif_carrier_on(dev); | ||
913 | netif_start_queue(dev); | ||
914 | } | ||
915 | return 0; | ||
916 | } | ||
917 | |||
918 | static int fec_enet_close(struct net_device *dev) | ||
919 | { | ||
920 | struct fec_enet_private *fep = netdev_priv(dev); | ||
921 | const struct fec_platform_info *fpi = fep->fpi; | ||
922 | unsigned long flags; | ||
923 | |||
924 | netif_stop_queue(dev); | ||
925 | napi_disable(&fep->napi); | ||
926 | netif_carrier_off(dev); | ||
927 | |||
928 | if (fpi->use_mdio) | ||
929 | fec_mii_shutdown(dev); | ||
930 | |||
931 | spin_lock_irqsave(&fep->lock, flags); | ||
932 | fec_stop(dev); | ||
933 | spin_unlock_irqrestore(&fep->lock, flags); | ||
934 | |||
935 | /* release any irqs */ | ||
936 | if (fpi->phy_irq != -1) | ||
937 | free_irq(fpi->phy_irq, dev); | ||
938 | free_irq(fpi->fec_irq, dev); | ||
939 | |||
940 | return 0; | ||
941 | } | ||
942 | |||
943 | static struct net_device_stats *fec_enet_get_stats(struct net_device *dev) | ||
944 | { | ||
945 | struct fec_enet_private *fep = netdev_priv(dev); | ||
946 | return &fep->stats; | ||
947 | } | ||
948 | |||
949 | static int fec_enet_poll(struct napi_struct *napi, int budget) | ||
950 | { | ||
951 | struct fec_enet_private *fep = container_of(napi, struct fec_enet_private, napi); | ||
952 | struct net_device *dev = fep->dev; | ||
953 | |||
954 | return fec_enet_rx_common(fep, dev, budget); | ||
955 | } | ||
956 | |||
957 | /*************************************************************************/ | ||
958 | |||
959 | static void fec_get_drvinfo(struct net_device *dev, | ||
960 | struct ethtool_drvinfo *info) | ||
961 | { | ||
962 | strcpy(info->driver, DRV_MODULE_NAME); | ||
963 | strcpy(info->version, DRV_MODULE_VERSION); | ||
964 | } | ||
965 | |||
966 | static int fec_get_regs_len(struct net_device *dev) | ||
967 | { | ||
968 | return sizeof(fec_t); | ||
969 | } | ||
970 | |||
971 | static void fec_get_regs(struct net_device *dev, struct ethtool_regs *regs, | ||
972 | void *p) | ||
973 | { | ||
974 | struct fec_enet_private *fep = netdev_priv(dev); | ||
975 | unsigned long flags; | ||
976 | |||
977 | if (regs->len < sizeof(fec_t)) | ||
978 | return; | ||
979 | |||
980 | regs->version = 0; | ||
981 | spin_lock_irqsave(&fep->lock, flags); | ||
982 | memcpy_fromio(p, fep->fecp, sizeof(fec_t)); | ||
983 | spin_unlock_irqrestore(&fep->lock, flags); | ||
984 | } | ||
985 | |||
986 | static int fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
987 | { | ||
988 | struct fec_enet_private *fep = netdev_priv(dev); | ||
989 | unsigned long flags; | ||
990 | int rc; | ||
991 | |||
992 | spin_lock_irqsave(&fep->lock, flags); | ||
993 | rc = mii_ethtool_gset(&fep->mii_if, cmd); | ||
994 | spin_unlock_irqrestore(&fep->lock, flags); | ||
995 | |||
996 | return rc; | ||
997 | } | ||
998 | |||
999 | static int fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1000 | { | ||
1001 | struct fec_enet_private *fep = netdev_priv(dev); | ||
1002 | unsigned long flags; | ||
1003 | int rc; | ||
1004 | |||
1005 | spin_lock_irqsave(&fep->lock, flags); | ||
1006 | rc = mii_ethtool_sset(&fep->mii_if, cmd); | ||
1007 | spin_unlock_irqrestore(&fep->lock, flags); | ||
1008 | |||
1009 | return rc; | ||
1010 | } | ||
1011 | |||
1012 | static int fec_nway_reset(struct net_device *dev) | ||
1013 | { | ||
1014 | struct fec_enet_private *fep = netdev_priv(dev); | ||
1015 | return mii_nway_restart(&fep->mii_if); | ||
1016 | } | ||
1017 | |||
1018 | static __u32 fec_get_msglevel(struct net_device *dev) | ||
1019 | { | ||
1020 | struct fec_enet_private *fep = netdev_priv(dev); | ||
1021 | return fep->msg_enable; | ||
1022 | } | ||
1023 | |||
1024 | static void fec_set_msglevel(struct net_device *dev, __u32 value) | ||
1025 | { | ||
1026 | struct fec_enet_private *fep = netdev_priv(dev); | ||
1027 | fep->msg_enable = value; | ||
1028 | } | ||
1029 | |||
1030 | static const struct ethtool_ops fec_ethtool_ops = { | ||
1031 | .get_drvinfo = fec_get_drvinfo, | ||
1032 | .get_regs_len = fec_get_regs_len, | ||
1033 | .get_settings = fec_get_settings, | ||
1034 | .set_settings = fec_set_settings, | ||
1035 | .nway_reset = fec_nway_reset, | ||
1036 | .get_link = ethtool_op_get_link, | ||
1037 | .get_msglevel = fec_get_msglevel, | ||
1038 | .set_msglevel = fec_set_msglevel, | ||
1039 | .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ | ||
1040 | .set_sg = ethtool_op_set_sg, | ||
1041 | .get_regs = fec_get_regs, | ||
1042 | }; | ||
1043 | |||
1044 | static int fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
1045 | { | ||
1046 | struct fec_enet_private *fep = netdev_priv(dev); | ||
1047 | struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data; | ||
1048 | unsigned long flags; | ||
1049 | int rc; | ||
1050 | |||
1051 | if (!netif_running(dev)) | ||
1052 | return -EINVAL; | ||
1053 | |||
1054 | spin_lock_irqsave(&fep->lock, flags); | ||
1055 | rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL); | ||
1056 | spin_unlock_irqrestore(&fep->lock, flags); | ||
1057 | return rc; | ||
1058 | } | ||
1059 | |||
1060 | int fec_8xx_init_one(const struct fec_platform_info *fpi, | ||
1061 | struct net_device **devp) | ||
1062 | { | ||
1063 | immap_t *immap = (immap_t *) IMAP_ADDR; | ||
1064 | static int fec_8xx_version_printed = 0; | ||
1065 | struct net_device *dev = NULL; | ||
1066 | struct fec_enet_private *fep = NULL; | ||
1067 | fec_t *fecp = NULL; | ||
1068 | int i; | ||
1069 | int err = 0; | ||
1070 | int registered = 0; | ||
1071 | __u32 siel; | ||
1072 | |||
1073 | *devp = NULL; | ||
1074 | |||
1075 | switch (fpi->fec_no) { | ||
1076 | case 0: | ||
1077 | fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec; | ||
1078 | break; | ||
1079 | #ifdef CONFIG_DUET | ||
1080 | case 1: | ||
1081 | fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec2; | ||
1082 | break; | ||
1083 | #endif | ||
1084 | default: | ||
1085 | return -EINVAL; | ||
1086 | } | ||
1087 | |||
1088 | if (fec_8xx_version_printed++ == 0) | ||
1089 | printk(KERN_INFO "%s", version); | ||
1090 | |||
1091 | i = sizeof(*fep) + (sizeof(struct sk_buff **) * | ||
1092 | (fpi->rx_ring + fpi->tx_ring)); | ||
1093 | |||
1094 | dev = alloc_etherdev(i); | ||
1095 | if (!dev) { | ||
1096 | err = -ENOMEM; | ||
1097 | goto err; | ||
1098 | } | ||
1099 | |||
1100 | fep = netdev_priv(dev); | ||
1101 | fep->dev = dev; | ||
1102 | |||
1103 | /* partial reset of FEC */ | ||
1104 | fec_whack_reset(fecp); | ||
1105 | |||
1106 | /* point rx_skbuff, tx_skbuff */ | ||
1107 | fep->rx_skbuff = (struct sk_buff **)&fep[1]; | ||
1108 | fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; | ||
1109 | |||
1110 | fep->fecp = fecp; | ||
1111 | fep->fpi = fpi; | ||
1112 | |||
1113 | /* init locks */ | ||
1114 | spin_lock_init(&fep->lock); | ||
1115 | spin_lock_init(&fep->tx_lock); | ||
1116 | |||
1117 | /* | ||
1118 | * Set the Ethernet address. | ||
1119 | */ | ||
1120 | for (i = 0; i < 6; i++) | ||
1121 | dev->dev_addr[i] = fpi->macaddr[i]; | ||
1122 | |||
1123 | fep->ring_base = dma_alloc_coherent(NULL, | ||
1124 | (fpi->tx_ring + fpi->rx_ring) * | ||
1125 | sizeof(cbd_t), &fep->ring_mem_addr, | ||
1126 | GFP_KERNEL); | ||
1127 | if (fep->ring_base == NULL) { | ||
1128 | printk(KERN_ERR DRV_MODULE_NAME | ||
1129 | ": %s dma alloc failed.\n", dev->name); | ||
1130 | err = -ENOMEM; | ||
1131 | goto err; | ||
1132 | } | ||
1133 | |||
1134 | /* | ||
1135 | * Set receive and transmit descriptor base. | ||
1136 | */ | ||
1137 | fep->rx_bd_base = fep->ring_base; | ||
1138 | fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; | ||
1139 | |||
1140 | /* initialize ring size variables */ | ||
1141 | fep->tx_ring = fpi->tx_ring; | ||
1142 | fep->rx_ring = fpi->rx_ring; | ||
1143 | |||
1144 | /* SIU interrupt */ | ||
1145 | if (fpi->phy_irq != -1 && | ||
1146 | (fpi->phy_irq >= SIU_IRQ0 && fpi->phy_irq < SIU_LEVEL7)) { | ||
1147 | |||
1148 | siel = in_be32(&immap->im_siu_conf.sc_siel); | ||
1149 | if ((fpi->phy_irq & 1) == 0) | ||
1150 | siel |= (0x80000000 >> fpi->phy_irq); | ||
1151 | else | ||
1152 | siel &= ~(0x80000000 >> (fpi->phy_irq & ~1)); | ||
1153 | out_be32(&immap->im_siu_conf.sc_siel, siel); | ||
1154 | } | ||
1155 | |||
1156 | /* | ||
1157 | * The FEC Ethernet specific entries in the device structure. | ||
1158 | */ | ||
1159 | dev->open = fec_enet_open; | ||
1160 | dev->hard_start_xmit = fec_enet_start_xmit; | ||
1161 | dev->tx_timeout = fec_timeout; | ||
1162 | dev->watchdog_timeo = TX_TIMEOUT; | ||
1163 | dev->stop = fec_enet_close; | ||
1164 | dev->get_stats = fec_enet_get_stats; | ||
1165 | dev->set_multicast_list = fec_set_multicast_list; | ||
1166 | dev->set_mac_address = fec_set_mac_address; | ||
1167 | netif_napi_add(dev, &fec->napi, | ||
1168 | fec_enet_poll, fpi->napi_weight); | ||
1169 | |||
1170 | dev->ethtool_ops = &fec_ethtool_ops; | ||
1171 | dev->do_ioctl = fec_ioctl; | ||
1172 | |||
1173 | fep->fec_phy_speed = | ||
1174 | ((((fpi->sys_clk + 4999999) / 2500000) / 2) & 0x3F) << 1; | ||
1175 | |||
1176 | init_timer(&fep->phy_timer_list); | ||
1177 | |||
1178 | /* partial reset of FEC so that only MII works */ | ||
1179 | FW(fecp, mii_speed, fep->fec_phy_speed); | ||
1180 | FW(fecp, ievent, 0xffc0); | ||
1181 | FW(fecp, ivec, (fpi->fec_irq / 2) << 29); | ||
1182 | FW(fecp, imask, 0); | ||
1183 | FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ | ||
1184 | FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); | ||
1185 | |||
1186 | netif_carrier_off(dev); | ||
1187 | |||
1188 | err = register_netdev(dev); | ||
1189 | if (err != 0) | ||
1190 | goto err; | ||
1191 | registered = 1; | ||
1192 | |||
1193 | if (fpi->use_mdio) { | ||
1194 | fep->mii_if.dev = dev; | ||
1195 | fep->mii_if.mdio_read = fec_mii_read; | ||
1196 | fep->mii_if.mdio_write = fec_mii_write; | ||
1197 | fep->mii_if.phy_id_mask = 0x1f; | ||
1198 | fep->mii_if.reg_num_mask = 0x1f; | ||
1199 | fep->mii_if.phy_id = fec_mii_phy_id_detect(dev); | ||
1200 | } | ||
1201 | |||
1202 | *devp = dev; | ||
1203 | |||
1204 | return 0; | ||
1205 | |||
1206 | err: | ||
1207 | if (dev != NULL) { | ||
1208 | if (fecp != NULL) | ||
1209 | fec_whack_reset(fecp); | ||
1210 | |||
1211 | if (registered) | ||
1212 | unregister_netdev(dev); | ||
1213 | |||
1214 | if (fep != NULL) { | ||
1215 | if (fep->ring_base) | ||
1216 | dma_free_coherent(NULL, | ||
1217 | (fpi->tx_ring + | ||
1218 | fpi->rx_ring) * | ||
1219 | sizeof(cbd_t), fep->ring_base, | ||
1220 | fep->ring_mem_addr); | ||
1221 | } | ||
1222 | free_netdev(dev); | ||
1223 | } | ||
1224 | return err; | ||
1225 | } | ||
1226 | |||
1227 | int fec_8xx_cleanup_one(struct net_device *dev) | ||
1228 | { | ||
1229 | struct fec_enet_private *fep = netdev_priv(dev); | ||
1230 | fec_t *fecp = fep->fecp; | ||
1231 | const struct fec_platform_info *fpi = fep->fpi; | ||
1232 | |||
1233 | fec_whack_reset(fecp); | ||
1234 | |||
1235 | unregister_netdev(dev); | ||
1236 | |||
1237 | dma_free_coherent(NULL, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), | ||
1238 | fep->ring_base, fep->ring_mem_addr); | ||
1239 | |||
1240 | free_netdev(dev); | ||
1241 | |||
1242 | return 0; | ||
1243 | } | ||
1244 | |||
1245 | /**************************************************************************************/ | ||
1246 | /**************************************************************************************/ | ||
1247 | /**************************************************************************************/ | ||
1248 | |||
1249 | static int __init fec_8xx_init(void) | ||
1250 | { | ||
1251 | return fec_8xx_platform_init(); | ||
1252 | } | ||
1253 | |||
1254 | static void __exit fec_8xx_cleanup(void) | ||
1255 | { | ||
1256 | fec_8xx_platform_cleanup(); | ||
1257 | } | ||
1258 | |||
1259 | /**************************************************************************************/ | ||
1260 | /**************************************************************************************/ | ||
1261 | /**************************************************************************************/ | ||
1262 | |||
1263 | module_init(fec_8xx_init); | ||
1264 | module_exit(fec_8xx_cleanup); | ||