diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
commit | fcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch) | |
tree | a57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/net/fs_enet/fs_enet-main.c | |
parent | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff) |
Diffstat (limited to 'drivers/net/fs_enet/fs_enet-main.c')
-rw-r--r-- | drivers/net/fs_enet/fs_enet-main.c | 1196 |
1 files changed, 1196 insertions, 0 deletions
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c new file mode 100644 index 00000000000..329ef231a09 --- /dev/null +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -0,0 +1,1196 @@ | |||
1 | /* | ||
2 | * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. | ||
3 | * | ||
4 | * Copyright (c) 2003 Intracom S.A. | ||
5 | * by Pantelis Antoniou <panto@intracom.gr> | ||
6 | * | ||
7 | * 2005 (c) MontaVista Software, Inc. | ||
8 | * Vitaly Bordug <vbordug@ru.mvista.com> | ||
9 | * | ||
10 | * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> | ||
11 | * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> | ||
12 | * | ||
13 | * This file is licensed under the terms of the GNU General Public License | ||
14 | * version 2. This program is licensed "as is" without any warranty of any | ||
15 | * kind, whether express or implied. | ||
16 | */ | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/types.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/ptrace.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/ioport.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/netdevice.h> | ||
30 | #include <linux/etherdevice.h> | ||
31 | #include <linux/skbuff.h> | ||
32 | #include <linux/spinlock.h> | ||
33 | #include <linux/mii.h> | ||
34 | #include <linux/ethtool.h> | ||
35 | #include <linux/bitops.h> | ||
36 | #include <linux/fs.h> | ||
37 | #include <linux/platform_device.h> | ||
38 | #include <linux/phy.h> | ||
39 | #include <linux/of.h> | ||
40 | #include <linux/of_mdio.h> | ||
41 | #include <linux/of_platform.h> | ||
42 | #include <linux/of_gpio.h> | ||
43 | #include <linux/of_net.h> | ||
44 | |||
45 | #include <linux/vmalloc.h> | ||
46 | #include <asm/pgtable.h> | ||
47 | #include <asm/irq.h> | ||
48 | #include <asm/uaccess.h> | ||
49 | |||
50 | #include "fs_enet.h" | ||
51 | |||
52 | /*************************************************/ | ||
53 | |||
54 | MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>"); | ||
55 | MODULE_DESCRIPTION("Freescale Ethernet Driver"); | ||
56 | MODULE_LICENSE("GPL"); | ||
57 | MODULE_VERSION(DRV_MODULE_VERSION); | ||
58 | |||
59 | static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */ | ||
60 | module_param(fs_enet_debug, int, 0); | ||
61 | MODULE_PARM_DESC(fs_enet_debug, | ||
62 | "Freescale bitmapped debugging message enable value"); | ||
63 | |||
64 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
65 | static void fs_enet_netpoll(struct net_device *dev); | ||
66 | #endif | ||
67 | |||
68 | static void fs_set_multicast_list(struct net_device *dev) | ||
69 | { | ||
70 | struct fs_enet_private *fep = netdev_priv(dev); | ||
71 | |||
72 | (*fep->ops->set_multicast_list)(dev); | ||
73 | } | ||
74 | |||
75 | static void skb_align(struct sk_buff *skb, int align) | ||
76 | { | ||
77 | int off = ((unsigned long)skb->data) & (align - 1); | ||
78 | |||
79 | if (off) | ||
80 | skb_reserve(skb, align - off); | ||
81 | } | ||
82 | |||
83 | /* NAPI receive function */ | ||
84 | static int fs_enet_rx_napi(struct napi_struct *napi, int budget) | ||
85 | { | ||
86 | struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); | ||
87 | struct net_device *dev = fep->ndev; | ||
88 | const struct fs_platform_info *fpi = fep->fpi; | ||
89 | cbd_t __iomem *bdp; | ||
90 | struct sk_buff *skb, *skbn, *skbt; | ||
91 | int received = 0; | ||
92 | u16 pkt_len, sc; | ||
93 | int curidx; | ||
94 | |||
95 | /* | ||
96 | * First, grab all of the stats for the incoming packet. | ||
97 | * These get messed up if we get called due to a busy condition. | ||
98 | */ | ||
99 | bdp = fep->cur_rx; | ||
100 | |||
101 | /* clear RX status bits for napi*/ | ||
102 | (*fep->ops->napi_clear_rx_event)(dev); | ||
103 | |||
104 | while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { | ||
105 | curidx = bdp - fep->rx_bd_base; | ||
106 | |||
107 | /* | ||
108 | * Since we have allocated space to hold a complete frame, | ||
109 | * the last indicator should be set. | ||
110 | */ | ||
111 | if ((sc & BD_ENET_RX_LAST) == 0) | ||
112 | dev_warn(fep->dev, "rcv is not +last\n"); | ||
113 | |||
114 | /* | ||
115 | * Check for errors. | ||
116 | */ | ||
117 | if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | | ||
118 | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { | ||
119 | fep->stats.rx_errors++; | ||
120 | /* Frame too long or too short. */ | ||
121 | if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) | ||
122 | fep->stats.rx_length_errors++; | ||
123 | /* Frame alignment */ | ||
124 | if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) | ||
125 | fep->stats.rx_frame_errors++; | ||
126 | /* CRC Error */ | ||
127 | if (sc & BD_ENET_RX_CR) | ||
128 | fep->stats.rx_crc_errors++; | ||
129 | /* FIFO overrun */ | ||
130 | if (sc & BD_ENET_RX_OV) | ||
131 | fep->stats.rx_crc_errors++; | ||
132 | |||
133 | skb = fep->rx_skbuff[curidx]; | ||
134 | |||
135 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), | ||
136 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | ||
137 | DMA_FROM_DEVICE); | ||
138 | |||
139 | skbn = skb; | ||
140 | |||
141 | } else { | ||
142 | skb = fep->rx_skbuff[curidx]; | ||
143 | |||
144 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), | ||
145 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | ||
146 | DMA_FROM_DEVICE); | ||
147 | |||
148 | /* | ||
149 | * Process the incoming frame. | ||
150 | */ | ||
151 | fep->stats.rx_packets++; | ||
152 | pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ | ||
153 | fep->stats.rx_bytes += pkt_len + 4; | ||
154 | |||
155 | if (pkt_len <= fpi->rx_copybreak) { | ||
156 | /* +2 to make IP header L1 cache aligned */ | ||
157 | skbn = dev_alloc_skb(pkt_len + 2); | ||
158 | if (skbn != NULL) { | ||
159 | skb_reserve(skbn, 2); /* align IP header */ | ||
160 | skb_copy_from_linear_data(skb, | ||
161 | skbn->data, pkt_len); | ||
162 | /* swap */ | ||
163 | skbt = skb; | ||
164 | skb = skbn; | ||
165 | skbn = skbt; | ||
166 | } | ||
167 | } else { | ||
168 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); | ||
169 | |||
170 | if (skbn) | ||
171 | skb_align(skbn, ENET_RX_ALIGN); | ||
172 | } | ||
173 | |||
174 | if (skbn != NULL) { | ||
175 | skb_put(skb, pkt_len); /* Make room */ | ||
176 | skb->protocol = eth_type_trans(skb, dev); | ||
177 | received++; | ||
178 | netif_receive_skb(skb); | ||
179 | } else { | ||
180 | dev_warn(fep->dev, | ||
181 | "Memory squeeze, dropping packet.\n"); | ||
182 | fep->stats.rx_dropped++; | ||
183 | skbn = skb; | ||
184 | } | ||
185 | } | ||
186 | |||
187 | fep->rx_skbuff[curidx] = skbn; | ||
188 | CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, | ||
189 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | ||
190 | DMA_FROM_DEVICE)); | ||
191 | CBDW_DATLEN(bdp, 0); | ||
192 | CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); | ||
193 | |||
194 | /* | ||
195 | * Update BD pointer to next entry. | ||
196 | */ | ||
197 | if ((sc & BD_ENET_RX_WRAP) == 0) | ||
198 | bdp++; | ||
199 | else | ||
200 | bdp = fep->rx_bd_base; | ||
201 | |||
202 | (*fep->ops->rx_bd_done)(dev); | ||
203 | |||
204 | if (received >= budget) | ||
205 | break; | ||
206 | } | ||
207 | |||
208 | fep->cur_rx = bdp; | ||
209 | |||
210 | if (received < budget) { | ||
211 | /* done */ | ||
212 | napi_complete(napi); | ||
213 | (*fep->ops->napi_enable_rx)(dev); | ||
214 | } | ||
215 | return received; | ||
216 | } | ||
217 | |||
218 | /* non NAPI receive function */ | ||
219 | static int fs_enet_rx_non_napi(struct net_device *dev) | ||
220 | { | ||
221 | struct fs_enet_private *fep = netdev_priv(dev); | ||
222 | const struct fs_platform_info *fpi = fep->fpi; | ||
223 | cbd_t __iomem *bdp; | ||
224 | struct sk_buff *skb, *skbn, *skbt; | ||
225 | int received = 0; | ||
226 | u16 pkt_len, sc; | ||
227 | int curidx; | ||
228 | /* | ||
229 | * First, grab all of the stats for the incoming packet. | ||
230 | * These get messed up if we get called due to a busy condition. | ||
231 | */ | ||
232 | bdp = fep->cur_rx; | ||
233 | |||
234 | while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { | ||
235 | |||
236 | curidx = bdp - fep->rx_bd_base; | ||
237 | |||
238 | /* | ||
239 | * Since we have allocated space to hold a complete frame, | ||
240 | * the last indicator should be set. | ||
241 | */ | ||
242 | if ((sc & BD_ENET_RX_LAST) == 0) | ||
243 | dev_warn(fep->dev, "rcv is not +last\n"); | ||
244 | |||
245 | /* | ||
246 | * Check for errors. | ||
247 | */ | ||
248 | if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | | ||
249 | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { | ||
250 | fep->stats.rx_errors++; | ||
251 | /* Frame too long or too short. */ | ||
252 | if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) | ||
253 | fep->stats.rx_length_errors++; | ||
254 | /* Frame alignment */ | ||
255 | if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) | ||
256 | fep->stats.rx_frame_errors++; | ||
257 | /* CRC Error */ | ||
258 | if (sc & BD_ENET_RX_CR) | ||
259 | fep->stats.rx_crc_errors++; | ||
260 | /* FIFO overrun */ | ||
261 | if (sc & BD_ENET_RX_OV) | ||
262 | fep->stats.rx_crc_errors++; | ||
263 | |||
264 | skb = fep->rx_skbuff[curidx]; | ||
265 | |||
266 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), | ||
267 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | ||
268 | DMA_FROM_DEVICE); | ||
269 | |||
270 | skbn = skb; | ||
271 | |||
272 | } else { | ||
273 | |||
274 | skb = fep->rx_skbuff[curidx]; | ||
275 | |||
276 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), | ||
277 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | ||
278 | DMA_FROM_DEVICE); | ||
279 | |||
280 | /* | ||
281 | * Process the incoming frame. | ||
282 | */ | ||
283 | fep->stats.rx_packets++; | ||
284 | pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ | ||
285 | fep->stats.rx_bytes += pkt_len + 4; | ||
286 | |||
287 | if (pkt_len <= fpi->rx_copybreak) { | ||
288 | /* +2 to make IP header L1 cache aligned */ | ||
289 | skbn = dev_alloc_skb(pkt_len + 2); | ||
290 | if (skbn != NULL) { | ||
291 | skb_reserve(skbn, 2); /* align IP header */ | ||
292 | skb_copy_from_linear_data(skb, | ||
293 | skbn->data, pkt_len); | ||
294 | /* swap */ | ||
295 | skbt = skb; | ||
296 | skb = skbn; | ||
297 | skbn = skbt; | ||
298 | } | ||
299 | } else { | ||
300 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); | ||
301 | |||
302 | if (skbn) | ||
303 | skb_align(skbn, ENET_RX_ALIGN); | ||
304 | } | ||
305 | |||
306 | if (skbn != NULL) { | ||
307 | skb_put(skb, pkt_len); /* Make room */ | ||
308 | skb->protocol = eth_type_trans(skb, dev); | ||
309 | received++; | ||
310 | netif_rx(skb); | ||
311 | } else { | ||
312 | dev_warn(fep->dev, | ||
313 | "Memory squeeze, dropping packet.\n"); | ||
314 | fep->stats.rx_dropped++; | ||
315 | skbn = skb; | ||
316 | } | ||
317 | } | ||
318 | |||
319 | fep->rx_skbuff[curidx] = skbn; | ||
320 | CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, | ||
321 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | ||
322 | DMA_FROM_DEVICE)); | ||
323 | CBDW_DATLEN(bdp, 0); | ||
324 | CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); | ||
325 | |||
326 | /* | ||
327 | * Update BD pointer to next entry. | ||
328 | */ | ||
329 | if ((sc & BD_ENET_RX_WRAP) == 0) | ||
330 | bdp++; | ||
331 | else | ||
332 | bdp = fep->rx_bd_base; | ||
333 | |||
334 | (*fep->ops->rx_bd_done)(dev); | ||
335 | } | ||
336 | |||
337 | fep->cur_rx = bdp; | ||
338 | |||
339 | return 0; | ||
340 | } | ||
341 | |||
342 | static void fs_enet_tx(struct net_device *dev) | ||
343 | { | ||
344 | struct fs_enet_private *fep = netdev_priv(dev); | ||
345 | cbd_t __iomem *bdp; | ||
346 | struct sk_buff *skb; | ||
347 | int dirtyidx, do_wake, do_restart; | ||
348 | u16 sc; | ||
349 | |||
350 | spin_lock(&fep->tx_lock); | ||
351 | bdp = fep->dirty_tx; | ||
352 | |||
353 | do_wake = do_restart = 0; | ||
354 | while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) { | ||
355 | dirtyidx = bdp - fep->tx_bd_base; | ||
356 | |||
357 | if (fep->tx_free == fep->tx_ring) | ||
358 | break; | ||
359 | |||
360 | skb = fep->tx_skbuff[dirtyidx]; | ||
361 | |||
362 | /* | ||
363 | * Check for errors. | ||
364 | */ | ||
365 | if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | | ||
366 | BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { | ||
367 | |||
368 | if (sc & BD_ENET_TX_HB) /* No heartbeat */ | ||
369 | fep->stats.tx_heartbeat_errors++; | ||
370 | if (sc & BD_ENET_TX_LC) /* Late collision */ | ||
371 | fep->stats.tx_window_errors++; | ||
372 | if (sc & BD_ENET_TX_RL) /* Retrans limit */ | ||
373 | fep->stats.tx_aborted_errors++; | ||
374 | if (sc & BD_ENET_TX_UN) /* Underrun */ | ||
375 | fep->stats.tx_fifo_errors++; | ||
376 | if (sc & BD_ENET_TX_CSL) /* Carrier lost */ | ||
377 | fep->stats.tx_carrier_errors++; | ||
378 | |||
379 | if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { | ||
380 | fep->stats.tx_errors++; | ||
381 | do_restart = 1; | ||
382 | } | ||
383 | } else | ||
384 | fep->stats.tx_packets++; | ||
385 | |||
386 | if (sc & BD_ENET_TX_READY) { | ||
387 | dev_warn(fep->dev, | ||
388 | "HEY! Enet xmit interrupt and TX_READY.\n"); | ||
389 | } | ||
390 | |||
391 | /* | ||
392 | * Deferred means some collisions occurred during transmit, | ||
393 | * but we eventually sent the packet OK. | ||
394 | */ | ||
395 | if (sc & BD_ENET_TX_DEF) | ||
396 | fep->stats.collisions++; | ||
397 | |||
398 | /* unmap */ | ||
399 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), | ||
400 | skb->len, DMA_TO_DEVICE); | ||
401 | |||
402 | /* | ||
403 | * Free the sk buffer associated with this last transmit. | ||
404 | */ | ||
405 | dev_kfree_skb_irq(skb); | ||
406 | fep->tx_skbuff[dirtyidx] = NULL; | ||
407 | |||
408 | /* | ||
409 | * Update pointer to next buffer descriptor to be transmitted. | ||
410 | */ | ||
411 | if ((sc & BD_ENET_TX_WRAP) == 0) | ||
412 | bdp++; | ||
413 | else | ||
414 | bdp = fep->tx_bd_base; | ||
415 | |||
416 | /* | ||
417 | * Since we have freed up a buffer, the ring is no longer | ||
418 | * full. | ||
419 | */ | ||
420 | if (!fep->tx_free++) | ||
421 | do_wake = 1; | ||
422 | } | ||
423 | |||
424 | fep->dirty_tx = bdp; | ||
425 | |||
426 | if (do_restart) | ||
427 | (*fep->ops->tx_restart)(dev); | ||
428 | |||
429 | spin_unlock(&fep->tx_lock); | ||
430 | |||
431 | if (do_wake) | ||
432 | netif_wake_queue(dev); | ||
433 | } | ||
434 | |||
435 | /* | ||
436 | * The interrupt handler. | ||
437 | * This is called from the MPC core interrupt. | ||
438 | */ | ||
439 | static irqreturn_t | ||
440 | fs_enet_interrupt(int irq, void *dev_id) | ||
441 | { | ||
442 | struct net_device *dev = dev_id; | ||
443 | struct fs_enet_private *fep; | ||
444 | const struct fs_platform_info *fpi; | ||
445 | u32 int_events; | ||
446 | u32 int_clr_events; | ||
447 | int nr, napi_ok; | ||
448 | int handled; | ||
449 | |||
450 | fep = netdev_priv(dev); | ||
451 | fpi = fep->fpi; | ||
452 | |||
453 | nr = 0; | ||
454 | while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) { | ||
455 | nr++; | ||
456 | |||
457 | int_clr_events = int_events; | ||
458 | if (fpi->use_napi) | ||
459 | int_clr_events &= ~fep->ev_napi_rx; | ||
460 | |||
461 | (*fep->ops->clear_int_events)(dev, int_clr_events); | ||
462 | |||
463 | if (int_events & fep->ev_err) | ||
464 | (*fep->ops->ev_error)(dev, int_events); | ||
465 | |||
466 | if (int_events & fep->ev_rx) { | ||
467 | if (!fpi->use_napi) | ||
468 | fs_enet_rx_non_napi(dev); | ||
469 | else { | ||
470 | napi_ok = napi_schedule_prep(&fep->napi); | ||
471 | |||
472 | (*fep->ops->napi_disable_rx)(dev); | ||
473 | (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); | ||
474 | |||
475 | /* NOTE: it is possible for FCCs in NAPI mode */ | ||
476 | /* to submit a spurious interrupt while in poll */ | ||
477 | if (napi_ok) | ||
478 | __napi_schedule(&fep->napi); | ||
479 | } | ||
480 | } | ||
481 | |||
482 | if (int_events & fep->ev_tx) | ||
483 | fs_enet_tx(dev); | ||
484 | } | ||
485 | |||
486 | handled = nr > 0; | ||
487 | return IRQ_RETVAL(handled); | ||
488 | } | ||
489 | |||
490 | void fs_init_bds(struct net_device *dev) | ||
491 | { | ||
492 | struct fs_enet_private *fep = netdev_priv(dev); | ||
493 | cbd_t __iomem *bdp; | ||
494 | struct sk_buff *skb; | ||
495 | int i; | ||
496 | |||
497 | fs_cleanup_bds(dev); | ||
498 | |||
499 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; | ||
500 | fep->tx_free = fep->tx_ring; | ||
501 | fep->cur_rx = fep->rx_bd_base; | ||
502 | |||
503 | /* | ||
504 | * Initialize the receive buffer descriptors. | ||
505 | */ | ||
506 | for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { | ||
507 | skb = dev_alloc_skb(ENET_RX_FRSIZE); | ||
508 | if (skb == NULL) { | ||
509 | dev_warn(fep->dev, | ||
510 | "Memory squeeze, unable to allocate skb\n"); | ||
511 | break; | ||
512 | } | ||
513 | skb_align(skb, ENET_RX_ALIGN); | ||
514 | fep->rx_skbuff[i] = skb; | ||
515 | CBDW_BUFADDR(bdp, | ||
516 | dma_map_single(fep->dev, skb->data, | ||
517 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | ||
518 | DMA_FROM_DEVICE)); | ||
519 | CBDW_DATLEN(bdp, 0); /* zero */ | ||
520 | CBDW_SC(bdp, BD_ENET_RX_EMPTY | | ||
521 | ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); | ||
522 | } | ||
523 | /* | ||
524 | * if we failed, fillup remainder | ||
525 | */ | ||
526 | for (; i < fep->rx_ring; i++, bdp++) { | ||
527 | fep->rx_skbuff[i] = NULL; | ||
528 | CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * ...and the same for transmit. | ||
533 | */ | ||
534 | for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { | ||
535 | fep->tx_skbuff[i] = NULL; | ||
536 | CBDW_BUFADDR(bdp, 0); | ||
537 | CBDW_DATLEN(bdp, 0); | ||
538 | CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP); | ||
539 | } | ||
540 | } | ||
541 | |||
542 | void fs_cleanup_bds(struct net_device *dev) | ||
543 | { | ||
544 | struct fs_enet_private *fep = netdev_priv(dev); | ||
545 | struct sk_buff *skb; | ||
546 | cbd_t __iomem *bdp; | ||
547 | int i; | ||
548 | |||
549 | /* | ||
550 | * Reset SKB transmit buffers. | ||
551 | */ | ||
552 | for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { | ||
553 | if ((skb = fep->tx_skbuff[i]) == NULL) | ||
554 | continue; | ||
555 | |||
556 | /* unmap */ | ||
557 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), | ||
558 | skb->len, DMA_TO_DEVICE); | ||
559 | |||
560 | fep->tx_skbuff[i] = NULL; | ||
561 | dev_kfree_skb(skb); | ||
562 | } | ||
563 | |||
564 | /* | ||
565 | * Reset SKB receive buffers | ||
566 | */ | ||
567 | for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { | ||
568 | if ((skb = fep->rx_skbuff[i]) == NULL) | ||
569 | continue; | ||
570 | |||
571 | /* unmap */ | ||
572 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), | ||
573 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | ||
574 | DMA_FROM_DEVICE); | ||
575 | |||
576 | fep->rx_skbuff[i] = NULL; | ||
577 | |||
578 | dev_kfree_skb(skb); | ||
579 | } | ||
580 | } | ||
581 | |||
582 | /**********************************************************************************/ | ||
583 | |||
584 | #ifdef CONFIG_FS_ENET_MPC5121_FEC | ||
585 | /* | ||
586 | * MPC5121 FEC requeries 4-byte alignment for TX data buffer! | ||
587 | */ | ||
588 | static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, | ||
589 | struct sk_buff *skb) | ||
590 | { | ||
591 | struct sk_buff *new_skb; | ||
592 | struct fs_enet_private *fep = netdev_priv(dev); | ||
593 | |||
594 | /* Alloc new skb */ | ||
595 | new_skb = dev_alloc_skb(skb->len + 4); | ||
596 | if (!new_skb) { | ||
597 | if (net_ratelimit()) { | ||
598 | dev_warn(fep->dev, | ||
599 | "Memory squeeze, dropping tx packet.\n"); | ||
600 | } | ||
601 | return NULL; | ||
602 | } | ||
603 | |||
604 | /* Make sure new skb is properly aligned */ | ||
605 | skb_align(new_skb, 4); | ||
606 | |||
607 | /* Copy data to new skb ... */ | ||
608 | skb_copy_from_linear_data(skb, new_skb->data, skb->len); | ||
609 | skb_put(new_skb, skb->len); | ||
610 | |||
611 | /* ... and free an old one */ | ||
612 | dev_kfree_skb_any(skb); | ||
613 | |||
614 | return new_skb; | ||
615 | } | ||
616 | #endif | ||
617 | |||
618 | static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
619 | { | ||
620 | struct fs_enet_private *fep = netdev_priv(dev); | ||
621 | cbd_t __iomem *bdp; | ||
622 | int curidx; | ||
623 | u16 sc; | ||
624 | unsigned long flags; | ||
625 | |||
626 | #ifdef CONFIG_FS_ENET_MPC5121_FEC | ||
627 | if (((unsigned long)skb->data) & 0x3) { | ||
628 | skb = tx_skb_align_workaround(dev, skb); | ||
629 | if (!skb) { | ||
630 | /* | ||
631 | * We have lost packet due to memory allocation error | ||
632 | * in tx_skb_align_workaround(). Hopefully original | ||
633 | * skb is still valid, so try transmit it later. | ||
634 | */ | ||
635 | return NETDEV_TX_BUSY; | ||
636 | } | ||
637 | } | ||
638 | #endif | ||
639 | spin_lock_irqsave(&fep->tx_lock, flags); | ||
640 | |||
641 | /* | ||
642 | * Fill in a Tx ring entry | ||
643 | */ | ||
644 | bdp = fep->cur_tx; | ||
645 | |||
646 | if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { | ||
647 | netif_stop_queue(dev); | ||
648 | spin_unlock_irqrestore(&fep->tx_lock, flags); | ||
649 | |||
650 | /* | ||
651 | * Ooops. All transmit buffers are full. Bail out. | ||
652 | * This should not happen, since the tx queue should be stopped. | ||
653 | */ | ||
654 | dev_warn(fep->dev, "tx queue full!.\n"); | ||
655 | return NETDEV_TX_BUSY; | ||
656 | } | ||
657 | |||
658 | curidx = bdp - fep->tx_bd_base; | ||
659 | /* | ||
660 | * Clear all of the status flags. | ||
661 | */ | ||
662 | CBDC_SC(bdp, BD_ENET_TX_STATS); | ||
663 | |||
664 | /* | ||
665 | * Save skb pointer. | ||
666 | */ | ||
667 | fep->tx_skbuff[curidx] = skb; | ||
668 | |||
669 | fep->stats.tx_bytes += skb->len; | ||
670 | |||
671 | /* | ||
672 | * Push the data cache so the CPM does not get stale memory data. | ||
673 | */ | ||
674 | CBDW_BUFADDR(bdp, dma_map_single(fep->dev, | ||
675 | skb->data, skb->len, DMA_TO_DEVICE)); | ||
676 | CBDW_DATLEN(bdp, skb->len); | ||
677 | |||
678 | /* | ||
679 | * If this was the last BD in the ring, start at the beginning again. | ||
680 | */ | ||
681 | if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) | ||
682 | fep->cur_tx++; | ||
683 | else | ||
684 | fep->cur_tx = fep->tx_bd_base; | ||
685 | |||
686 | if (!--fep->tx_free) | ||
687 | netif_stop_queue(dev); | ||
688 | |||
689 | /* Trigger transmission start */ | ||
690 | sc = BD_ENET_TX_READY | BD_ENET_TX_INTR | | ||
691 | BD_ENET_TX_LAST | BD_ENET_TX_TC; | ||
692 | |||
693 | /* note that while FEC does not have this bit | ||
694 | * it marks it as available for software use | ||
695 | * yay for hw reuse :) */ | ||
696 | if (skb->len <= 60) | ||
697 | sc |= BD_ENET_TX_PAD; | ||
698 | CBDS_SC(bdp, sc); | ||
699 | |||
700 | skb_tx_timestamp(skb); | ||
701 | |||
702 | (*fep->ops->tx_kickstart)(dev); | ||
703 | |||
704 | spin_unlock_irqrestore(&fep->tx_lock, flags); | ||
705 | |||
706 | return NETDEV_TX_OK; | ||
707 | } | ||
708 | |||
709 | static void fs_timeout(struct net_device *dev) | ||
710 | { | ||
711 | struct fs_enet_private *fep = netdev_priv(dev); | ||
712 | unsigned long flags; | ||
713 | int wake = 0; | ||
714 | |||
715 | fep->stats.tx_errors++; | ||
716 | |||
717 | spin_lock_irqsave(&fep->lock, flags); | ||
718 | |||
719 | if (dev->flags & IFF_UP) { | ||
720 | phy_stop(fep->phydev); | ||
721 | (*fep->ops->stop)(dev); | ||
722 | (*fep->ops->restart)(dev); | ||
723 | phy_start(fep->phydev); | ||
724 | } | ||
725 | |||
726 | phy_start(fep->phydev); | ||
727 | wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); | ||
728 | spin_unlock_irqrestore(&fep->lock, flags); | ||
729 | |||
730 | if (wake) | ||
731 | netif_wake_queue(dev); | ||
732 | } | ||
733 | |||
734 | /*----------------------------------------------------------------------------- | ||
735 | * generic link-change handler - should be sufficient for most cases | ||
736 | *-----------------------------------------------------------------------------*/ | ||
737 | static void generic_adjust_link(struct net_device *dev) | ||
738 | { | ||
739 | struct fs_enet_private *fep = netdev_priv(dev); | ||
740 | struct phy_device *phydev = fep->phydev; | ||
741 | int new_state = 0; | ||
742 | |||
743 | if (phydev->link) { | ||
744 | /* adjust to duplex mode */ | ||
745 | if (phydev->duplex != fep->oldduplex) { | ||
746 | new_state = 1; | ||
747 | fep->oldduplex = phydev->duplex; | ||
748 | } | ||
749 | |||
750 | if (phydev->speed != fep->oldspeed) { | ||
751 | new_state = 1; | ||
752 | fep->oldspeed = phydev->speed; | ||
753 | } | ||
754 | |||
755 | if (!fep->oldlink) { | ||
756 | new_state = 1; | ||
757 | fep->oldlink = 1; | ||
758 | } | ||
759 | |||
760 | if (new_state) | ||
761 | fep->ops->restart(dev); | ||
762 | } else if (fep->oldlink) { | ||
763 | new_state = 1; | ||
764 | fep->oldlink = 0; | ||
765 | fep->oldspeed = 0; | ||
766 | fep->oldduplex = -1; | ||
767 | } | ||
768 | |||
769 | if (new_state && netif_msg_link(fep)) | ||
770 | phy_print_status(phydev); | ||
771 | } | ||
772 | |||
773 | |||
774 | static void fs_adjust_link(struct net_device *dev) | ||
775 | { | ||
776 | struct fs_enet_private *fep = netdev_priv(dev); | ||
777 | unsigned long flags; | ||
778 | |||
779 | spin_lock_irqsave(&fep->lock, flags); | ||
780 | |||
781 | if(fep->ops->adjust_link) | ||
782 | fep->ops->adjust_link(dev); | ||
783 | else | ||
784 | generic_adjust_link(dev); | ||
785 | |||
786 | spin_unlock_irqrestore(&fep->lock, flags); | ||
787 | } | ||
788 | |||
789 | static int fs_init_phy(struct net_device *dev) | ||
790 | { | ||
791 | struct fs_enet_private *fep = netdev_priv(dev); | ||
792 | struct phy_device *phydev; | ||
793 | |||
794 | fep->oldlink = 0; | ||
795 | fep->oldspeed = 0; | ||
796 | fep->oldduplex = -1; | ||
797 | |||
798 | phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, | ||
799 | PHY_INTERFACE_MODE_MII); | ||
800 | if (!phydev) { | ||
801 | phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link, | ||
802 | PHY_INTERFACE_MODE_MII); | ||
803 | } | ||
804 | if (!phydev) { | ||
805 | dev_err(&dev->dev, "Could not attach to PHY\n"); | ||
806 | return -ENODEV; | ||
807 | } | ||
808 | |||
809 | fep->phydev = phydev; | ||
810 | |||
811 | return 0; | ||
812 | } | ||
813 | |||
814 | static int fs_enet_open(struct net_device *dev) | ||
815 | { | ||
816 | struct fs_enet_private *fep = netdev_priv(dev); | ||
817 | int r; | ||
818 | int err; | ||
819 | |||
820 | /* to initialize the fep->cur_rx,... */ | ||
821 | /* not doing this, will cause a crash in fs_enet_rx_napi */ | ||
822 | fs_init_bds(fep->ndev); | ||
823 | |||
824 | if (fep->fpi->use_napi) | ||
825 | napi_enable(&fep->napi); | ||
826 | |||
827 | /* Install our interrupt handler. */ | ||
828 | r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED, | ||
829 | "fs_enet-mac", dev); | ||
830 | if (r != 0) { | ||
831 | dev_err(fep->dev, "Could not allocate FS_ENET IRQ!"); | ||
832 | if (fep->fpi->use_napi) | ||
833 | napi_disable(&fep->napi); | ||
834 | return -EINVAL; | ||
835 | } | ||
836 | |||
837 | err = fs_init_phy(dev); | ||
838 | if (err) { | ||
839 | free_irq(fep->interrupt, dev); | ||
840 | if (fep->fpi->use_napi) | ||
841 | napi_disable(&fep->napi); | ||
842 | return err; | ||
843 | } | ||
844 | phy_start(fep->phydev); | ||
845 | |||
846 | netif_start_queue(dev); | ||
847 | |||
848 | return 0; | ||
849 | } | ||
850 | |||
851 | static int fs_enet_close(struct net_device *dev) | ||
852 | { | ||
853 | struct fs_enet_private *fep = netdev_priv(dev); | ||
854 | unsigned long flags; | ||
855 | |||
856 | netif_stop_queue(dev); | ||
857 | netif_carrier_off(dev); | ||
858 | if (fep->fpi->use_napi) | ||
859 | napi_disable(&fep->napi); | ||
860 | phy_stop(fep->phydev); | ||
861 | |||
862 | spin_lock_irqsave(&fep->lock, flags); | ||
863 | spin_lock(&fep->tx_lock); | ||
864 | (*fep->ops->stop)(dev); | ||
865 | spin_unlock(&fep->tx_lock); | ||
866 | spin_unlock_irqrestore(&fep->lock, flags); | ||
867 | |||
868 | /* release any irqs */ | ||
869 | phy_disconnect(fep->phydev); | ||
870 | fep->phydev = NULL; | ||
871 | free_irq(fep->interrupt, dev); | ||
872 | |||
873 | return 0; | ||
874 | } | ||
875 | |||
876 | static struct net_device_stats *fs_enet_get_stats(struct net_device *dev) | ||
877 | { | ||
878 | struct fs_enet_private *fep = netdev_priv(dev); | ||
879 | return &fep->stats; | ||
880 | } | ||
881 | |||
882 | /*************************************************************************/ | ||
883 | |||
884 | static void fs_get_drvinfo(struct net_device *dev, | ||
885 | struct ethtool_drvinfo *info) | ||
886 | { | ||
887 | strcpy(info->driver, DRV_MODULE_NAME); | ||
888 | strcpy(info->version, DRV_MODULE_VERSION); | ||
889 | } | ||
890 | |||
891 | static int fs_get_regs_len(struct net_device *dev) | ||
892 | { | ||
893 | struct fs_enet_private *fep = netdev_priv(dev); | ||
894 | |||
895 | return (*fep->ops->get_regs_len)(dev); | ||
896 | } | ||
897 | |||
898 | static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, | ||
899 | void *p) | ||
900 | { | ||
901 | struct fs_enet_private *fep = netdev_priv(dev); | ||
902 | unsigned long flags; | ||
903 | int r, len; | ||
904 | |||
905 | len = regs->len; | ||
906 | |||
907 | spin_lock_irqsave(&fep->lock, flags); | ||
908 | r = (*fep->ops->get_regs)(dev, p, &len); | ||
909 | spin_unlock_irqrestore(&fep->lock, flags); | ||
910 | |||
911 | if (r == 0) | ||
912 | regs->version = 0; | ||
913 | } | ||
914 | |||
915 | static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
916 | { | ||
917 | struct fs_enet_private *fep = netdev_priv(dev); | ||
918 | |||
919 | if (!fep->phydev) | ||
920 | return -ENODEV; | ||
921 | |||
922 | return phy_ethtool_gset(fep->phydev, cmd); | ||
923 | } | ||
924 | |||
925 | static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
926 | { | ||
927 | struct fs_enet_private *fep = netdev_priv(dev); | ||
928 | |||
929 | if (!fep->phydev) | ||
930 | return -ENODEV; | ||
931 | |||
932 | return phy_ethtool_sset(fep->phydev, cmd); | ||
933 | } | ||
934 | |||
935 | static int fs_nway_reset(struct net_device *dev) | ||
936 | { | ||
937 | return 0; | ||
938 | } | ||
939 | |||
940 | static u32 fs_get_msglevel(struct net_device *dev) | ||
941 | { | ||
942 | struct fs_enet_private *fep = netdev_priv(dev); | ||
943 | return fep->msg_enable; | ||
944 | } | ||
945 | |||
946 | static void fs_set_msglevel(struct net_device *dev, u32 value) | ||
947 | { | ||
948 | struct fs_enet_private *fep = netdev_priv(dev); | ||
949 | fep->msg_enable = value; | ||
950 | } | ||
951 | |||
952 | static const struct ethtool_ops fs_ethtool_ops = { | ||
953 | .get_drvinfo = fs_get_drvinfo, | ||
954 | .get_regs_len = fs_get_regs_len, | ||
955 | .get_settings = fs_get_settings, | ||
956 | .set_settings = fs_set_settings, | ||
957 | .nway_reset = fs_nway_reset, | ||
958 | .get_link = ethtool_op_get_link, | ||
959 | .get_msglevel = fs_get_msglevel, | ||
960 | .set_msglevel = fs_set_msglevel, | ||
961 | .get_regs = fs_get_regs, | ||
962 | }; | ||
963 | |||
964 | static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
965 | { | ||
966 | struct fs_enet_private *fep = netdev_priv(dev); | ||
967 | |||
968 | if (!netif_running(dev)) | ||
969 | return -EINVAL; | ||
970 | |||
971 | return phy_mii_ioctl(fep->phydev, rq, cmd); | ||
972 | } | ||
973 | |||
974 | extern int fs_mii_connect(struct net_device *dev); | ||
975 | extern void fs_mii_disconnect(struct net_device *dev); | ||
976 | |||
977 | /**************************************************************************************/ | ||
978 | |||
979 | #ifdef CONFIG_FS_ENET_HAS_FEC | ||
980 | #define IS_FEC(match) ((match)->data == &fs_fec_ops) | ||
981 | #else | ||
982 | #define IS_FEC(match) 0 | ||
983 | #endif | ||
984 | |||
985 | static const struct net_device_ops fs_enet_netdev_ops = { | ||
986 | .ndo_open = fs_enet_open, | ||
987 | .ndo_stop = fs_enet_close, | ||
988 | .ndo_get_stats = fs_enet_get_stats, | ||
989 | .ndo_start_xmit = fs_enet_start_xmit, | ||
990 | .ndo_tx_timeout = fs_timeout, | ||
991 | .ndo_set_multicast_list = fs_set_multicast_list, | ||
992 | .ndo_do_ioctl = fs_ioctl, | ||
993 | .ndo_validate_addr = eth_validate_addr, | ||
994 | .ndo_set_mac_address = eth_mac_addr, | ||
995 | .ndo_change_mtu = eth_change_mtu, | ||
996 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
997 | .ndo_poll_controller = fs_enet_netpoll, | ||
998 | #endif | ||
999 | }; | ||
1000 | |||
1001 | static struct of_device_id fs_enet_match[]; | ||
1002 | static int __devinit fs_enet_probe(struct platform_device *ofdev) | ||
1003 | { | ||
1004 | const struct of_device_id *match; | ||
1005 | struct net_device *ndev; | ||
1006 | struct fs_enet_private *fep; | ||
1007 | struct fs_platform_info *fpi; | ||
1008 | const u32 *data; | ||
1009 | const u8 *mac_addr; | ||
1010 | int privsize, len, ret = -ENODEV; | ||
1011 | |||
1012 | match = of_match_device(fs_enet_match, &ofdev->dev); | ||
1013 | if (!match) | ||
1014 | return -EINVAL; | ||
1015 | |||
1016 | fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); | ||
1017 | if (!fpi) | ||
1018 | return -ENOMEM; | ||
1019 | |||
1020 | if (!IS_FEC(match)) { | ||
1021 | data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); | ||
1022 | if (!data || len != 4) | ||
1023 | goto out_free_fpi; | ||
1024 | |||
1025 | fpi->cp_command = *data; | ||
1026 | } | ||
1027 | |||
1028 | fpi->rx_ring = 32; | ||
1029 | fpi->tx_ring = 32; | ||
1030 | fpi->rx_copybreak = 240; | ||
1031 | fpi->use_napi = 1; | ||
1032 | fpi->napi_weight = 17; | ||
1033 | fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); | ||
1034 | if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link", | ||
1035 | NULL))) | ||
1036 | goto out_free_fpi; | ||
1037 | |||
1038 | privsize = sizeof(*fep) + | ||
1039 | sizeof(struct sk_buff **) * | ||
1040 | (fpi->rx_ring + fpi->tx_ring); | ||
1041 | |||
1042 | ndev = alloc_etherdev(privsize); | ||
1043 | if (!ndev) { | ||
1044 | ret = -ENOMEM; | ||
1045 | goto out_put; | ||
1046 | } | ||
1047 | |||
1048 | SET_NETDEV_DEV(ndev, &ofdev->dev); | ||
1049 | dev_set_drvdata(&ofdev->dev, ndev); | ||
1050 | |||
1051 | fep = netdev_priv(ndev); | ||
1052 | fep->dev = &ofdev->dev; | ||
1053 | fep->ndev = ndev; | ||
1054 | fep->fpi = fpi; | ||
1055 | fep->ops = match->data; | ||
1056 | |||
1057 | ret = fep->ops->setup_data(ndev); | ||
1058 | if (ret) | ||
1059 | goto out_free_dev; | ||
1060 | |||
1061 | fep->rx_skbuff = (struct sk_buff **)&fep[1]; | ||
1062 | fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; | ||
1063 | |||
1064 | spin_lock_init(&fep->lock); | ||
1065 | spin_lock_init(&fep->tx_lock); | ||
1066 | |||
1067 | mac_addr = of_get_mac_address(ofdev->dev.of_node); | ||
1068 | if (mac_addr) | ||
1069 | memcpy(ndev->dev_addr, mac_addr, 6); | ||
1070 | |||
1071 | ret = fep->ops->allocate_bd(ndev); | ||
1072 | if (ret) | ||
1073 | goto out_cleanup_data; | ||
1074 | |||
1075 | fep->rx_bd_base = fep->ring_base; | ||
1076 | fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; | ||
1077 | |||
1078 | fep->tx_ring = fpi->tx_ring; | ||
1079 | fep->rx_ring = fpi->rx_ring; | ||
1080 | |||
1081 | ndev->netdev_ops = &fs_enet_netdev_ops; | ||
1082 | ndev->watchdog_timeo = 2 * HZ; | ||
1083 | if (fpi->use_napi) | ||
1084 | netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, | ||
1085 | fpi->napi_weight); | ||
1086 | |||
1087 | ndev->ethtool_ops = &fs_ethtool_ops; | ||
1088 | |||
1089 | init_timer(&fep->phy_timer_list); | ||
1090 | |||
1091 | netif_carrier_off(ndev); | ||
1092 | |||
1093 | ret = register_netdev(ndev); | ||
1094 | if (ret) | ||
1095 | goto out_free_bd; | ||
1096 | |||
1097 | pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr); | ||
1098 | |||
1099 | return 0; | ||
1100 | |||
1101 | out_free_bd: | ||
1102 | fep->ops->free_bd(ndev); | ||
1103 | out_cleanup_data: | ||
1104 | fep->ops->cleanup_data(ndev); | ||
1105 | out_free_dev: | ||
1106 | free_netdev(ndev); | ||
1107 | dev_set_drvdata(&ofdev->dev, NULL); | ||
1108 | out_put: | ||
1109 | of_node_put(fpi->phy_node); | ||
1110 | out_free_fpi: | ||
1111 | kfree(fpi); | ||
1112 | return ret; | ||
1113 | } | ||
1114 | |||
1115 | static int fs_enet_remove(struct platform_device *ofdev) | ||
1116 | { | ||
1117 | struct net_device *ndev = dev_get_drvdata(&ofdev->dev); | ||
1118 | struct fs_enet_private *fep = netdev_priv(ndev); | ||
1119 | |||
1120 | unregister_netdev(ndev); | ||
1121 | |||
1122 | fep->ops->free_bd(ndev); | ||
1123 | fep->ops->cleanup_data(ndev); | ||
1124 | dev_set_drvdata(fep->dev, NULL); | ||
1125 | of_node_put(fep->fpi->phy_node); | ||
1126 | free_netdev(ndev); | ||
1127 | return 0; | ||
1128 | } | ||
1129 | |||
1130 | static struct of_device_id fs_enet_match[] = { | ||
1131 | #ifdef CONFIG_FS_ENET_HAS_SCC | ||
1132 | { | ||
1133 | .compatible = "fsl,cpm1-scc-enet", | ||
1134 | .data = (void *)&fs_scc_ops, | ||
1135 | }, | ||
1136 | { | ||
1137 | .compatible = "fsl,cpm2-scc-enet", | ||
1138 | .data = (void *)&fs_scc_ops, | ||
1139 | }, | ||
1140 | #endif | ||
1141 | #ifdef CONFIG_FS_ENET_HAS_FCC | ||
1142 | { | ||
1143 | .compatible = "fsl,cpm2-fcc-enet", | ||
1144 | .data = (void *)&fs_fcc_ops, | ||
1145 | }, | ||
1146 | #endif | ||
1147 | #ifdef CONFIG_FS_ENET_HAS_FEC | ||
1148 | #ifdef CONFIG_FS_ENET_MPC5121_FEC | ||
1149 | { | ||
1150 | .compatible = "fsl,mpc5121-fec", | ||
1151 | .data = (void *)&fs_fec_ops, | ||
1152 | }, | ||
1153 | #else | ||
1154 | { | ||
1155 | .compatible = "fsl,pq1-fec-enet", | ||
1156 | .data = (void *)&fs_fec_ops, | ||
1157 | }, | ||
1158 | #endif | ||
1159 | #endif | ||
1160 | {} | ||
1161 | }; | ||
1162 | MODULE_DEVICE_TABLE(of, fs_enet_match); | ||
1163 | |||
1164 | static struct platform_driver fs_enet_driver = { | ||
1165 | .driver = { | ||
1166 | .owner = THIS_MODULE, | ||
1167 | .name = "fs_enet", | ||
1168 | .of_match_table = fs_enet_match, | ||
1169 | }, | ||
1170 | .probe = fs_enet_probe, | ||
1171 | .remove = fs_enet_remove, | ||
1172 | }; | ||
1173 | |||
1174 | static int __init fs_init(void) | ||
1175 | { | ||
1176 | return platform_driver_register(&fs_enet_driver); | ||
1177 | } | ||
1178 | |||
1179 | static void __exit fs_cleanup(void) | ||
1180 | { | ||
1181 | platform_driver_unregister(&fs_enet_driver); | ||
1182 | } | ||
1183 | |||
1184 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1185 | static void fs_enet_netpoll(struct net_device *dev) | ||
1186 | { | ||
1187 | disable_irq(dev->irq); | ||
1188 | fs_enet_interrupt(dev->irq, dev); | ||
1189 | enable_irq(dev->irq); | ||
1190 | } | ||
1191 | #endif | ||
1192 | |||
1193 | /**************************************************************************************/ | ||
1194 | |||
1195 | module_init(fs_init); | ||
1196 | module_exit(fs_cleanup); | ||