diff options
Diffstat (limited to 'drivers/net/ethernet/freescale/gianfar.c')
-rw-r--r-- | drivers/net/ethernet/freescale/gianfar.c | 3291 |
1 files changed, 3291 insertions, 0 deletions
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c new file mode 100644 index 000000000000..2659daad783d --- /dev/null +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -0,0 +1,3291 @@ | |||
1 | /* | ||
2 | * drivers/net/gianfar.c | ||
3 | * | ||
4 | * Gianfar Ethernet Driver | ||
5 | * This driver is designed for the non-CPM ethernet controllers | ||
6 | * on the 85xx and 83xx family of integrated processors | ||
7 | * Based on 8260_io/fcc_enet.c | ||
8 | * | ||
9 | * Author: Andy Fleming | ||
10 | * Maintainer: Kumar Gala | ||
11 | * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> | ||
12 | * | ||
13 | * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. | ||
14 | * Copyright 2007 MontaVista Software, Inc. | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or modify it | ||
17 | * under the terms of the GNU General Public License as published by the | ||
18 | * Free Software Foundation; either version 2 of the License, or (at your | ||
19 | * option) any later version. | ||
20 | * | ||
21 | * Gianfar: AKA Lambda Draconis, "Dragon" | ||
22 | * RA 11 31 24.2 | ||
23 | * Dec +69 19 52 | ||
24 | * V 3.84 | ||
25 | * B-V +1.62 | ||
26 | * | ||
27 | * Theory of operation | ||
28 | * | ||
29 | * The driver is initialized through of_device. Configuration information | ||
30 | * is therefore conveyed through an OF-style device tree. | ||
31 | * | ||
32 | * The Gianfar Ethernet Controller uses a ring of buffer | ||
33 | * descriptors. The beginning is indicated by a register | ||
34 | * pointing to the physical address of the start of the ring. | ||
35 | * The end is determined by a "wrap" bit being set in the | ||
36 | * last descriptor of the ring. | ||
37 | * | ||
38 | * When a packet is received, the RXF bit in the | ||
39 | * IEVENT register is set, triggering an interrupt when the | ||
40 | * corresponding bit in the IMASK register is also set (if | ||
41 | * interrupt coalescing is active, then the interrupt may not | ||
42 | * happen immediately, but will wait until either a set number | ||
43 | * of frames or amount of time have passed). In NAPI, the | ||
44 | * interrupt handler will signal there is work to be done, and | ||
45 | * exit. This method will start at the last known empty | ||
46 | * descriptor, and process every subsequent descriptor until there | ||
47 | * are none left with data (NAPI will stop after a set number of | ||
48 | * packets to give time to other tasks, but will eventually | ||
49 | * process all the packets). The data arrives inside a | ||
50 | * pre-allocated skb, and so after the skb is passed up to the | ||
51 | * stack, a new skb must be allocated, and the address field in | ||
52 | * the buffer descriptor must be updated to indicate this new | ||
53 | * skb. | ||
54 | * | ||
55 | * When the kernel requests that a packet be transmitted, the | ||
56 | * driver starts where it left off last time, and points the | ||
57 | * descriptor at the buffer which was passed in. The driver | ||
58 | * then informs the DMA engine that there are packets ready to | ||
59 | * be transmitted. Once the controller is finished transmitting | ||
60 | * the packet, an interrupt may be triggered (under the same | ||
61 | * conditions as for reception, but depending on the TXF bit). | ||
62 | * The driver then cleans up the buffer. | ||
63 | */ | ||
64 | |||
65 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
66 | #define DEBUG | ||
67 | |||
68 | #include <linux/kernel.h> | ||
69 | #include <linux/string.h> | ||
70 | #include <linux/errno.h> | ||
71 | #include <linux/unistd.h> | ||
72 | #include <linux/slab.h> | ||
73 | #include <linux/interrupt.h> | ||
74 | #include <linux/init.h> | ||
75 | #include <linux/delay.h> | ||
76 | #include <linux/netdevice.h> | ||
77 | #include <linux/etherdevice.h> | ||
78 | #include <linux/skbuff.h> | ||
79 | #include <linux/if_vlan.h> | ||
80 | #include <linux/spinlock.h> | ||
81 | #include <linux/mm.h> | ||
82 | #include <linux/of_mdio.h> | ||
83 | #include <linux/of_platform.h> | ||
84 | #include <linux/ip.h> | ||
85 | #include <linux/tcp.h> | ||
86 | #include <linux/udp.h> | ||
87 | #include <linux/in.h> | ||
88 | #include <linux/net_tstamp.h> | ||
89 | |||
90 | #include <asm/io.h> | ||
91 | #include <asm/reg.h> | ||
92 | #include <asm/irq.h> | ||
93 | #include <asm/uaccess.h> | ||
94 | #include <linux/module.h> | ||
95 | #include <linux/dma-mapping.h> | ||
96 | #include <linux/crc32.h> | ||
97 | #include <linux/mii.h> | ||
98 | #include <linux/phy.h> | ||
99 | #include <linux/phy_fixed.h> | ||
100 | #include <linux/of.h> | ||
101 | #include <linux/of_net.h> | ||
102 | |||
103 | #include "gianfar.h" | ||
104 | #include "fsl_pq_mdio.h" | ||
105 | |||
106 | #define TX_TIMEOUT (1*HZ) | ||
107 | #undef BRIEF_GFAR_ERRORS | ||
108 | #undef VERBOSE_GFAR_ERRORS | ||
109 | |||
110 | const char gfar_driver_name[] = "Gianfar Ethernet"; | ||
111 | const char gfar_driver_version[] = "1.3"; | ||
112 | |||
113 | static int gfar_enet_open(struct net_device *dev); | ||
114 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); | ||
115 | static void gfar_reset_task(struct work_struct *work); | ||
116 | static void gfar_timeout(struct net_device *dev); | ||
117 | static int gfar_close(struct net_device *dev); | ||
118 | struct sk_buff *gfar_new_skb(struct net_device *dev); | ||
119 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, | ||
120 | struct sk_buff *skb); | ||
121 | static int gfar_set_mac_address(struct net_device *dev); | ||
122 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); | ||
123 | static irqreturn_t gfar_error(int irq, void *dev_id); | ||
124 | static irqreturn_t gfar_transmit(int irq, void *dev_id); | ||
125 | static irqreturn_t gfar_interrupt(int irq, void *dev_id); | ||
126 | static void adjust_link(struct net_device *dev); | ||
127 | static void init_registers(struct net_device *dev); | ||
128 | static int init_phy(struct net_device *dev); | ||
129 | static int gfar_probe(struct platform_device *ofdev); | ||
130 | static int gfar_remove(struct platform_device *ofdev); | ||
131 | static void free_skb_resources(struct gfar_private *priv); | ||
132 | static void gfar_set_multi(struct net_device *dev); | ||
133 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); | ||
134 | static void gfar_configure_serdes(struct net_device *dev); | ||
135 | static int gfar_poll(struct napi_struct *napi, int budget); | ||
136 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
137 | static void gfar_netpoll(struct net_device *dev); | ||
138 | #endif | ||
139 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); | ||
140 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); | ||
141 | static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | ||
142 | int amount_pull); | ||
143 | void gfar_halt(struct net_device *dev); | ||
144 | static void gfar_halt_nodisable(struct net_device *dev); | ||
145 | void gfar_start(struct net_device *dev); | ||
146 | static void gfar_clear_exact_match(struct net_device *dev); | ||
147 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, | ||
148 | const u8 *addr); | ||
149 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | ||
150 | |||
151 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); | ||
152 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); | ||
153 | MODULE_LICENSE("GPL"); | ||
154 | |||
155 | static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, | ||
156 | dma_addr_t buf) | ||
157 | { | ||
158 | u32 lstatus; | ||
159 | |||
160 | bdp->bufPtr = buf; | ||
161 | |||
162 | lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); | ||
163 | if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) | ||
164 | lstatus |= BD_LFLAG(RXBD_WRAP); | ||
165 | |||
166 | eieio(); | ||
167 | |||
168 | bdp->lstatus = lstatus; | ||
169 | } | ||
170 | |||
171 | static int gfar_init_bds(struct net_device *ndev) | ||
172 | { | ||
173 | struct gfar_private *priv = netdev_priv(ndev); | ||
174 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
175 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
176 | struct txbd8 *txbdp; | ||
177 | struct rxbd8 *rxbdp; | ||
178 | int i, j; | ||
179 | |||
180 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
181 | tx_queue = priv->tx_queue[i]; | ||
182 | /* Initialize some variables in our dev structure */ | ||
183 | tx_queue->num_txbdfree = tx_queue->tx_ring_size; | ||
184 | tx_queue->dirty_tx = tx_queue->tx_bd_base; | ||
185 | tx_queue->cur_tx = tx_queue->tx_bd_base; | ||
186 | tx_queue->skb_curtx = 0; | ||
187 | tx_queue->skb_dirtytx = 0; | ||
188 | |||
189 | /* Initialize Transmit Descriptor Ring */ | ||
190 | txbdp = tx_queue->tx_bd_base; | ||
191 | for (j = 0; j < tx_queue->tx_ring_size; j++) { | ||
192 | txbdp->lstatus = 0; | ||
193 | txbdp->bufPtr = 0; | ||
194 | txbdp++; | ||
195 | } | ||
196 | |||
197 | /* Set the last descriptor in the ring to indicate wrap */ | ||
198 | txbdp--; | ||
199 | txbdp->status |= TXBD_WRAP; | ||
200 | } | ||
201 | |||
202 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
203 | rx_queue = priv->rx_queue[i]; | ||
204 | rx_queue->cur_rx = rx_queue->rx_bd_base; | ||
205 | rx_queue->skb_currx = 0; | ||
206 | rxbdp = rx_queue->rx_bd_base; | ||
207 | |||
208 | for (j = 0; j < rx_queue->rx_ring_size; j++) { | ||
209 | struct sk_buff *skb = rx_queue->rx_skbuff[j]; | ||
210 | |||
211 | if (skb) { | ||
212 | gfar_init_rxbdp(rx_queue, rxbdp, | ||
213 | rxbdp->bufPtr); | ||
214 | } else { | ||
215 | skb = gfar_new_skb(ndev); | ||
216 | if (!skb) { | ||
217 | netdev_err(ndev, "Can't allocate RX buffers\n"); | ||
218 | goto err_rxalloc_fail; | ||
219 | } | ||
220 | rx_queue->rx_skbuff[j] = skb; | ||
221 | |||
222 | gfar_new_rxbdp(rx_queue, rxbdp, skb); | ||
223 | } | ||
224 | |||
225 | rxbdp++; | ||
226 | } | ||
227 | |||
228 | } | ||
229 | |||
230 | return 0; | ||
231 | |||
232 | err_rxalloc_fail: | ||
233 | free_skb_resources(priv); | ||
234 | return -ENOMEM; | ||
235 | } | ||
236 | |||
237 | static int gfar_alloc_skb_resources(struct net_device *ndev) | ||
238 | { | ||
239 | void *vaddr; | ||
240 | dma_addr_t addr; | ||
241 | int i, j, k; | ||
242 | struct gfar_private *priv = netdev_priv(ndev); | ||
243 | struct device *dev = &priv->ofdev->dev; | ||
244 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
245 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
246 | |||
247 | priv->total_tx_ring_size = 0; | ||
248 | for (i = 0; i < priv->num_tx_queues; i++) | ||
249 | priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; | ||
250 | |||
251 | priv->total_rx_ring_size = 0; | ||
252 | for (i = 0; i < priv->num_rx_queues; i++) | ||
253 | priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; | ||
254 | |||
255 | /* Allocate memory for the buffer descriptors */ | ||
256 | vaddr = dma_alloc_coherent(dev, | ||
257 | sizeof(struct txbd8) * priv->total_tx_ring_size + | ||
258 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | ||
259 | &addr, GFP_KERNEL); | ||
260 | if (!vaddr) { | ||
261 | netif_err(priv, ifup, ndev, | ||
262 | "Could not allocate buffer descriptors!\n"); | ||
263 | return -ENOMEM; | ||
264 | } | ||
265 | |||
266 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
267 | tx_queue = priv->tx_queue[i]; | ||
268 | tx_queue->tx_bd_base = vaddr; | ||
269 | tx_queue->tx_bd_dma_base = addr; | ||
270 | tx_queue->dev = ndev; | ||
271 | /* enet DMA only understands physical addresses */ | ||
272 | addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; | ||
273 | vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; | ||
274 | } | ||
275 | |||
276 | /* Start the rx descriptor ring where the tx ring leaves off */ | ||
277 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
278 | rx_queue = priv->rx_queue[i]; | ||
279 | rx_queue->rx_bd_base = vaddr; | ||
280 | rx_queue->rx_bd_dma_base = addr; | ||
281 | rx_queue->dev = ndev; | ||
282 | addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; | ||
283 | vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; | ||
284 | } | ||
285 | |||
286 | /* Setup the skbuff rings */ | ||
287 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
288 | tx_queue = priv->tx_queue[i]; | ||
289 | tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * | ||
290 | tx_queue->tx_ring_size, GFP_KERNEL); | ||
291 | if (!tx_queue->tx_skbuff) { | ||
292 | netif_err(priv, ifup, ndev, | ||
293 | "Could not allocate tx_skbuff\n"); | ||
294 | goto cleanup; | ||
295 | } | ||
296 | |||
297 | for (k = 0; k < tx_queue->tx_ring_size; k++) | ||
298 | tx_queue->tx_skbuff[k] = NULL; | ||
299 | } | ||
300 | |||
301 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
302 | rx_queue = priv->rx_queue[i]; | ||
303 | rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * | ||
304 | rx_queue->rx_ring_size, GFP_KERNEL); | ||
305 | |||
306 | if (!rx_queue->rx_skbuff) { | ||
307 | netif_err(priv, ifup, ndev, | ||
308 | "Could not allocate rx_skbuff\n"); | ||
309 | goto cleanup; | ||
310 | } | ||
311 | |||
312 | for (j = 0; j < rx_queue->rx_ring_size; j++) | ||
313 | rx_queue->rx_skbuff[j] = NULL; | ||
314 | } | ||
315 | |||
316 | if (gfar_init_bds(ndev)) | ||
317 | goto cleanup; | ||
318 | |||
319 | return 0; | ||
320 | |||
321 | cleanup: | ||
322 | free_skb_resources(priv); | ||
323 | return -ENOMEM; | ||
324 | } | ||
325 | |||
326 | static void gfar_init_tx_rx_base(struct gfar_private *priv) | ||
327 | { | ||
328 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
329 | u32 __iomem *baddr; | ||
330 | int i; | ||
331 | |||
332 | baddr = ®s->tbase0; | ||
333 | for(i = 0; i < priv->num_tx_queues; i++) { | ||
334 | gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); | ||
335 | baddr += 2; | ||
336 | } | ||
337 | |||
338 | baddr = ®s->rbase0; | ||
339 | for(i = 0; i < priv->num_rx_queues; i++) { | ||
340 | gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); | ||
341 | baddr += 2; | ||
342 | } | ||
343 | } | ||
344 | |||
345 | static void gfar_init_mac(struct net_device *ndev) | ||
346 | { | ||
347 | struct gfar_private *priv = netdev_priv(ndev); | ||
348 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
349 | u32 rctrl = 0; | ||
350 | u32 tctrl = 0; | ||
351 | u32 attrs = 0; | ||
352 | |||
353 | /* write the tx/rx base registers */ | ||
354 | gfar_init_tx_rx_base(priv); | ||
355 | |||
356 | /* Configure the coalescing support */ | ||
357 | gfar_configure_coalescing(priv, 0xFF, 0xFF); | ||
358 | |||
359 | if (priv->rx_filer_enable) { | ||
360 | rctrl |= RCTRL_FILREN; | ||
361 | /* Program the RIR0 reg with the required distribution */ | ||
362 | gfar_write(®s->rir0, DEFAULT_RIR0); | ||
363 | } | ||
364 | |||
365 | if (ndev->features & NETIF_F_RXCSUM) | ||
366 | rctrl |= RCTRL_CHECKSUMMING; | ||
367 | |||
368 | if (priv->extended_hash) { | ||
369 | rctrl |= RCTRL_EXTHASH; | ||
370 | |||
371 | gfar_clear_exact_match(ndev); | ||
372 | rctrl |= RCTRL_EMEN; | ||
373 | } | ||
374 | |||
375 | if (priv->padding) { | ||
376 | rctrl &= ~RCTRL_PAL_MASK; | ||
377 | rctrl |= RCTRL_PADDING(priv->padding); | ||
378 | } | ||
379 | |||
380 | /* Insert receive time stamps into padding alignment bytes */ | ||
381 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) { | ||
382 | rctrl &= ~RCTRL_PAL_MASK; | ||
383 | rctrl |= RCTRL_PADDING(8); | ||
384 | priv->padding = 8; | ||
385 | } | ||
386 | |||
387 | /* Enable HW time stamping if requested from user space */ | ||
388 | if (priv->hwts_rx_en) | ||
389 | rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; | ||
390 | |||
391 | if (ndev->features & NETIF_F_HW_VLAN_RX) | ||
392 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; | ||
393 | |||
394 | /* Init rctrl based on our settings */ | ||
395 | gfar_write(®s->rctrl, rctrl); | ||
396 | |||
397 | if (ndev->features & NETIF_F_IP_CSUM) | ||
398 | tctrl |= TCTRL_INIT_CSUM; | ||
399 | |||
400 | tctrl |= TCTRL_TXSCHED_PRIO; | ||
401 | |||
402 | gfar_write(®s->tctrl, tctrl); | ||
403 | |||
404 | /* Set the extraction length and index */ | ||
405 | attrs = ATTRELI_EL(priv->rx_stash_size) | | ||
406 | ATTRELI_EI(priv->rx_stash_index); | ||
407 | |||
408 | gfar_write(®s->attreli, attrs); | ||
409 | |||
410 | /* Start with defaults, and add stashing or locking | ||
411 | * depending on the approprate variables */ | ||
412 | attrs = ATTR_INIT_SETTINGS; | ||
413 | |||
414 | if (priv->bd_stash_en) | ||
415 | attrs |= ATTR_BDSTASH; | ||
416 | |||
417 | if (priv->rx_stash_size != 0) | ||
418 | attrs |= ATTR_BUFSTASH; | ||
419 | |||
420 | gfar_write(®s->attr, attrs); | ||
421 | |||
422 | gfar_write(®s->fifo_tx_thr, priv->fifo_threshold); | ||
423 | gfar_write(®s->fifo_tx_starve, priv->fifo_starve); | ||
424 | gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off); | ||
425 | } | ||
426 | |||
427 | static struct net_device_stats *gfar_get_stats(struct net_device *dev) | ||
428 | { | ||
429 | struct gfar_private *priv = netdev_priv(dev); | ||
430 | unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; | ||
431 | unsigned long tx_packets = 0, tx_bytes = 0; | ||
432 | int i = 0; | ||
433 | |||
434 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
435 | rx_packets += priv->rx_queue[i]->stats.rx_packets; | ||
436 | rx_bytes += priv->rx_queue[i]->stats.rx_bytes; | ||
437 | rx_dropped += priv->rx_queue[i]->stats.rx_dropped; | ||
438 | } | ||
439 | |||
440 | dev->stats.rx_packets = rx_packets; | ||
441 | dev->stats.rx_bytes = rx_bytes; | ||
442 | dev->stats.rx_dropped = rx_dropped; | ||
443 | |||
444 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
445 | tx_bytes += priv->tx_queue[i]->stats.tx_bytes; | ||
446 | tx_packets += priv->tx_queue[i]->stats.tx_packets; | ||
447 | } | ||
448 | |||
449 | dev->stats.tx_bytes = tx_bytes; | ||
450 | dev->stats.tx_packets = tx_packets; | ||
451 | |||
452 | return &dev->stats; | ||
453 | } | ||
454 | |||
455 | static const struct net_device_ops gfar_netdev_ops = { | ||
456 | .ndo_open = gfar_enet_open, | ||
457 | .ndo_start_xmit = gfar_start_xmit, | ||
458 | .ndo_stop = gfar_close, | ||
459 | .ndo_change_mtu = gfar_change_mtu, | ||
460 | .ndo_set_features = gfar_set_features, | ||
461 | .ndo_set_multicast_list = gfar_set_multi, | ||
462 | .ndo_tx_timeout = gfar_timeout, | ||
463 | .ndo_do_ioctl = gfar_ioctl, | ||
464 | .ndo_get_stats = gfar_get_stats, | ||
465 | .ndo_set_mac_address = eth_mac_addr, | ||
466 | .ndo_validate_addr = eth_validate_addr, | ||
467 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
468 | .ndo_poll_controller = gfar_netpoll, | ||
469 | #endif | ||
470 | }; | ||
471 | |||
472 | void lock_rx_qs(struct gfar_private *priv) | ||
473 | { | ||
474 | int i = 0x0; | ||
475 | |||
476 | for (i = 0; i < priv->num_rx_queues; i++) | ||
477 | spin_lock(&priv->rx_queue[i]->rxlock); | ||
478 | } | ||
479 | |||
480 | void lock_tx_qs(struct gfar_private *priv) | ||
481 | { | ||
482 | int i = 0x0; | ||
483 | |||
484 | for (i = 0; i < priv->num_tx_queues; i++) | ||
485 | spin_lock(&priv->tx_queue[i]->txlock); | ||
486 | } | ||
487 | |||
488 | void unlock_rx_qs(struct gfar_private *priv) | ||
489 | { | ||
490 | int i = 0x0; | ||
491 | |||
492 | for (i = 0; i < priv->num_rx_queues; i++) | ||
493 | spin_unlock(&priv->rx_queue[i]->rxlock); | ||
494 | } | ||
495 | |||
496 | void unlock_tx_qs(struct gfar_private *priv) | ||
497 | { | ||
498 | int i = 0x0; | ||
499 | |||
500 | for (i = 0; i < priv->num_tx_queues; i++) | ||
501 | spin_unlock(&priv->tx_queue[i]->txlock); | ||
502 | } | ||
503 | |||
504 | static bool gfar_is_vlan_on(struct gfar_private *priv) | ||
505 | { | ||
506 | return (priv->ndev->features & NETIF_F_HW_VLAN_RX) || | ||
507 | (priv->ndev->features & NETIF_F_HW_VLAN_TX); | ||
508 | } | ||
509 | |||
510 | /* Returns 1 if incoming frames use an FCB */ | ||
511 | static inline int gfar_uses_fcb(struct gfar_private *priv) | ||
512 | { | ||
513 | return gfar_is_vlan_on(priv) || | ||
514 | (priv->ndev->features & NETIF_F_RXCSUM) || | ||
515 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER); | ||
516 | } | ||
517 | |||
518 | static void free_tx_pointers(struct gfar_private *priv) | ||
519 | { | ||
520 | int i = 0; | ||
521 | |||
522 | for (i = 0; i < priv->num_tx_queues; i++) | ||
523 | kfree(priv->tx_queue[i]); | ||
524 | } | ||
525 | |||
526 | static void free_rx_pointers(struct gfar_private *priv) | ||
527 | { | ||
528 | int i = 0; | ||
529 | |||
530 | for (i = 0; i < priv->num_rx_queues; i++) | ||
531 | kfree(priv->rx_queue[i]); | ||
532 | } | ||
533 | |||
534 | static void unmap_group_regs(struct gfar_private *priv) | ||
535 | { | ||
536 | int i = 0; | ||
537 | |||
538 | for (i = 0; i < MAXGROUPS; i++) | ||
539 | if (priv->gfargrp[i].regs) | ||
540 | iounmap(priv->gfargrp[i].regs); | ||
541 | } | ||
542 | |||
543 | static void disable_napi(struct gfar_private *priv) | ||
544 | { | ||
545 | int i = 0; | ||
546 | |||
547 | for (i = 0; i < priv->num_grps; i++) | ||
548 | napi_disable(&priv->gfargrp[i].napi); | ||
549 | } | ||
550 | |||
551 | static void enable_napi(struct gfar_private *priv) | ||
552 | { | ||
553 | int i = 0; | ||
554 | |||
555 | for (i = 0; i < priv->num_grps; i++) | ||
556 | napi_enable(&priv->gfargrp[i].napi); | ||
557 | } | ||
558 | |||
559 | static int gfar_parse_group(struct device_node *np, | ||
560 | struct gfar_private *priv, const char *model) | ||
561 | { | ||
562 | u32 *queue_mask; | ||
563 | |||
564 | priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0); | ||
565 | if (!priv->gfargrp[priv->num_grps].regs) | ||
566 | return -ENOMEM; | ||
567 | |||
568 | priv->gfargrp[priv->num_grps].interruptTransmit = | ||
569 | irq_of_parse_and_map(np, 0); | ||
570 | |||
571 | /* If we aren't the FEC we have multiple interrupts */ | ||
572 | if (model && strcasecmp(model, "FEC")) { | ||
573 | priv->gfargrp[priv->num_grps].interruptReceive = | ||
574 | irq_of_parse_and_map(np, 1); | ||
575 | priv->gfargrp[priv->num_grps].interruptError = | ||
576 | irq_of_parse_and_map(np,2); | ||
577 | if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ || | ||
578 | priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ || | ||
579 | priv->gfargrp[priv->num_grps].interruptError == NO_IRQ) | ||
580 | return -EINVAL; | ||
581 | } | ||
582 | |||
583 | priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; | ||
584 | priv->gfargrp[priv->num_grps].priv = priv; | ||
585 | spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); | ||
586 | if(priv->mode == MQ_MG_MODE) { | ||
587 | queue_mask = (u32 *)of_get_property(np, | ||
588 | "fsl,rx-bit-map", NULL); | ||
589 | priv->gfargrp[priv->num_grps].rx_bit_map = | ||
590 | queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); | ||
591 | queue_mask = (u32 *)of_get_property(np, | ||
592 | "fsl,tx-bit-map", NULL); | ||
593 | priv->gfargrp[priv->num_grps].tx_bit_map = | ||
594 | queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); | ||
595 | } else { | ||
596 | priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; | ||
597 | priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; | ||
598 | } | ||
599 | priv->num_grps++; | ||
600 | |||
601 | return 0; | ||
602 | } | ||
603 | |||
604 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | ||
605 | { | ||
606 | const char *model; | ||
607 | const char *ctype; | ||
608 | const void *mac_addr; | ||
609 | int err = 0, i; | ||
610 | struct net_device *dev = NULL; | ||
611 | struct gfar_private *priv = NULL; | ||
612 | struct device_node *np = ofdev->dev.of_node; | ||
613 | struct device_node *child = NULL; | ||
614 | const u32 *stash; | ||
615 | const u32 *stash_len; | ||
616 | const u32 *stash_idx; | ||
617 | unsigned int num_tx_qs, num_rx_qs; | ||
618 | u32 *tx_queues, *rx_queues; | ||
619 | |||
620 | if (!np || !of_device_is_available(np)) | ||
621 | return -ENODEV; | ||
622 | |||
623 | /* parse the num of tx and rx queues */ | ||
624 | tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); | ||
625 | num_tx_qs = tx_queues ? *tx_queues : 1; | ||
626 | |||
627 | if (num_tx_qs > MAX_TX_QS) { | ||
628 | pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", | ||
629 | num_tx_qs, MAX_TX_QS); | ||
630 | pr_err("Cannot do alloc_etherdev, aborting\n"); | ||
631 | return -EINVAL; | ||
632 | } | ||
633 | |||
634 | rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); | ||
635 | num_rx_qs = rx_queues ? *rx_queues : 1; | ||
636 | |||
637 | if (num_rx_qs > MAX_RX_QS) { | ||
638 | pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", | ||
639 | num_rx_qs, MAX_RX_QS); | ||
640 | pr_err("Cannot do alloc_etherdev, aborting\n"); | ||
641 | return -EINVAL; | ||
642 | } | ||
643 | |||
644 | *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); | ||
645 | dev = *pdev; | ||
646 | if (NULL == dev) | ||
647 | return -ENOMEM; | ||
648 | |||
649 | priv = netdev_priv(dev); | ||
650 | priv->node = ofdev->dev.of_node; | ||
651 | priv->ndev = dev; | ||
652 | |||
653 | priv->num_tx_queues = num_tx_qs; | ||
654 | netif_set_real_num_rx_queues(dev, num_rx_qs); | ||
655 | priv->num_rx_queues = num_rx_qs; | ||
656 | priv->num_grps = 0x0; | ||
657 | |||
658 | /* Init Rx queue filer rule set linked list*/ | ||
659 | INIT_LIST_HEAD(&priv->rx_list.list); | ||
660 | priv->rx_list.count = 0; | ||
661 | mutex_init(&priv->rx_queue_access); | ||
662 | |||
663 | model = of_get_property(np, "model", NULL); | ||
664 | |||
665 | for (i = 0; i < MAXGROUPS; i++) | ||
666 | priv->gfargrp[i].regs = NULL; | ||
667 | |||
668 | /* Parse and initialize group specific information */ | ||
669 | if (of_device_is_compatible(np, "fsl,etsec2")) { | ||
670 | priv->mode = MQ_MG_MODE; | ||
671 | for_each_child_of_node(np, child) { | ||
672 | err = gfar_parse_group(child, priv, model); | ||
673 | if (err) | ||
674 | goto err_grp_init; | ||
675 | } | ||
676 | } else { | ||
677 | priv->mode = SQ_SG_MODE; | ||
678 | err = gfar_parse_group(np, priv, model); | ||
679 | if(err) | ||
680 | goto err_grp_init; | ||
681 | } | ||
682 | |||
683 | for (i = 0; i < priv->num_tx_queues; i++) | ||
684 | priv->tx_queue[i] = NULL; | ||
685 | for (i = 0; i < priv->num_rx_queues; i++) | ||
686 | priv->rx_queue[i] = NULL; | ||
687 | |||
688 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
689 | priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), | ||
690 | GFP_KERNEL); | ||
691 | if (!priv->tx_queue[i]) { | ||
692 | err = -ENOMEM; | ||
693 | goto tx_alloc_failed; | ||
694 | } | ||
695 | priv->tx_queue[i]->tx_skbuff = NULL; | ||
696 | priv->tx_queue[i]->qindex = i; | ||
697 | priv->tx_queue[i]->dev = dev; | ||
698 | spin_lock_init(&(priv->tx_queue[i]->txlock)); | ||
699 | } | ||
700 | |||
701 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
702 | priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), | ||
703 | GFP_KERNEL); | ||
704 | if (!priv->rx_queue[i]) { | ||
705 | err = -ENOMEM; | ||
706 | goto rx_alloc_failed; | ||
707 | } | ||
708 | priv->rx_queue[i]->rx_skbuff = NULL; | ||
709 | priv->rx_queue[i]->qindex = i; | ||
710 | priv->rx_queue[i]->dev = dev; | ||
711 | spin_lock_init(&(priv->rx_queue[i]->rxlock)); | ||
712 | } | ||
713 | |||
714 | |||
715 | stash = of_get_property(np, "bd-stash", NULL); | ||
716 | |||
717 | if (stash) { | ||
718 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; | ||
719 | priv->bd_stash_en = 1; | ||
720 | } | ||
721 | |||
722 | stash_len = of_get_property(np, "rx-stash-len", NULL); | ||
723 | |||
724 | if (stash_len) | ||
725 | priv->rx_stash_size = *stash_len; | ||
726 | |||
727 | stash_idx = of_get_property(np, "rx-stash-idx", NULL); | ||
728 | |||
729 | if (stash_idx) | ||
730 | priv->rx_stash_index = *stash_idx; | ||
731 | |||
732 | if (stash_len || stash_idx) | ||
733 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; | ||
734 | |||
735 | mac_addr = of_get_mac_address(np); | ||
736 | if (mac_addr) | ||
737 | memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN); | ||
738 | |||
739 | if (model && !strcasecmp(model, "TSEC")) | ||
740 | priv->device_flags = | ||
741 | FSL_GIANFAR_DEV_HAS_GIGABIT | | ||
742 | FSL_GIANFAR_DEV_HAS_COALESCE | | ||
743 | FSL_GIANFAR_DEV_HAS_RMON | | ||
744 | FSL_GIANFAR_DEV_HAS_MULTI_INTR; | ||
745 | if (model && !strcasecmp(model, "eTSEC")) | ||
746 | priv->device_flags = | ||
747 | FSL_GIANFAR_DEV_HAS_GIGABIT | | ||
748 | FSL_GIANFAR_DEV_HAS_COALESCE | | ||
749 | FSL_GIANFAR_DEV_HAS_RMON | | ||
750 | FSL_GIANFAR_DEV_HAS_MULTI_INTR | | ||
751 | FSL_GIANFAR_DEV_HAS_PADDING | | ||
752 | FSL_GIANFAR_DEV_HAS_CSUM | | ||
753 | FSL_GIANFAR_DEV_HAS_VLAN | | ||
754 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | | ||
755 | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | | ||
756 | FSL_GIANFAR_DEV_HAS_TIMER; | ||
757 | |||
758 | ctype = of_get_property(np, "phy-connection-type", NULL); | ||
759 | |||
760 | /* We only care about rgmii-id. The rest are autodetected */ | ||
761 | if (ctype && !strcmp(ctype, "rgmii-id")) | ||
762 | priv->interface = PHY_INTERFACE_MODE_RGMII_ID; | ||
763 | else | ||
764 | priv->interface = PHY_INTERFACE_MODE_MII; | ||
765 | |||
766 | if (of_get_property(np, "fsl,magic-packet", NULL)) | ||
767 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; | ||
768 | |||
769 | priv->phy_node = of_parse_phandle(np, "phy-handle", 0); | ||
770 | |||
771 | /* Find the TBI PHY. If it's not there, we don't support SGMII */ | ||
772 | priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); | ||
773 | |||
774 | return 0; | ||
775 | |||
776 | rx_alloc_failed: | ||
777 | free_rx_pointers(priv); | ||
778 | tx_alloc_failed: | ||
779 | free_tx_pointers(priv); | ||
780 | err_grp_init: | ||
781 | unmap_group_regs(priv); | ||
782 | free_netdev(dev); | ||
783 | return err; | ||
784 | } | ||
785 | |||
786 | static int gfar_hwtstamp_ioctl(struct net_device *netdev, | ||
787 | struct ifreq *ifr, int cmd) | ||
788 | { | ||
789 | struct hwtstamp_config config; | ||
790 | struct gfar_private *priv = netdev_priv(netdev); | ||
791 | |||
792 | if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) | ||
793 | return -EFAULT; | ||
794 | |||
795 | /* reserved for future extensions */ | ||
796 | if (config.flags) | ||
797 | return -EINVAL; | ||
798 | |||
799 | switch (config.tx_type) { | ||
800 | case HWTSTAMP_TX_OFF: | ||
801 | priv->hwts_tx_en = 0; | ||
802 | break; | ||
803 | case HWTSTAMP_TX_ON: | ||
804 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) | ||
805 | return -ERANGE; | ||
806 | priv->hwts_tx_en = 1; | ||
807 | break; | ||
808 | default: | ||
809 | return -ERANGE; | ||
810 | } | ||
811 | |||
812 | switch (config.rx_filter) { | ||
813 | case HWTSTAMP_FILTER_NONE: | ||
814 | if (priv->hwts_rx_en) { | ||
815 | stop_gfar(netdev); | ||
816 | priv->hwts_rx_en = 0; | ||
817 | startup_gfar(netdev); | ||
818 | } | ||
819 | break; | ||
820 | default: | ||
821 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) | ||
822 | return -ERANGE; | ||
823 | if (!priv->hwts_rx_en) { | ||
824 | stop_gfar(netdev); | ||
825 | priv->hwts_rx_en = 1; | ||
826 | startup_gfar(netdev); | ||
827 | } | ||
828 | config.rx_filter = HWTSTAMP_FILTER_ALL; | ||
829 | break; | ||
830 | } | ||
831 | |||
832 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? | ||
833 | -EFAULT : 0; | ||
834 | } | ||
835 | |||
836 | /* Ioctl MII Interface */ | ||
837 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
838 | { | ||
839 | struct gfar_private *priv = netdev_priv(dev); | ||
840 | |||
841 | if (!netif_running(dev)) | ||
842 | return -EINVAL; | ||
843 | |||
844 | if (cmd == SIOCSHWTSTAMP) | ||
845 | return gfar_hwtstamp_ioctl(dev, rq, cmd); | ||
846 | |||
847 | if (!priv->phydev) | ||
848 | return -ENODEV; | ||
849 | |||
850 | return phy_mii_ioctl(priv->phydev, rq, cmd); | ||
851 | } | ||
852 | |||
853 | static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) | ||
854 | { | ||
855 | unsigned int new_bit_map = 0x0; | ||
856 | int mask = 0x1 << (max_qs - 1), i; | ||
857 | for (i = 0; i < max_qs; i++) { | ||
858 | if (bit_map & mask) | ||
859 | new_bit_map = new_bit_map + (1 << i); | ||
860 | mask = mask >> 0x1; | ||
861 | } | ||
862 | return new_bit_map; | ||
863 | } | ||
864 | |||
865 | static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, | ||
866 | u32 class) | ||
867 | { | ||
868 | u32 rqfpr = FPR_FILER_MASK; | ||
869 | u32 rqfcr = 0x0; | ||
870 | |||
871 | rqfar--; | ||
872 | rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; | ||
873 | priv->ftp_rqfpr[rqfar] = rqfpr; | ||
874 | priv->ftp_rqfcr[rqfar] = rqfcr; | ||
875 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | ||
876 | |||
877 | rqfar--; | ||
878 | rqfcr = RQFCR_CMP_NOMATCH; | ||
879 | priv->ftp_rqfpr[rqfar] = rqfpr; | ||
880 | priv->ftp_rqfcr[rqfar] = rqfcr; | ||
881 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | ||
882 | |||
883 | rqfar--; | ||
884 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; | ||
885 | rqfpr = class; | ||
886 | priv->ftp_rqfcr[rqfar] = rqfcr; | ||
887 | priv->ftp_rqfpr[rqfar] = rqfpr; | ||
888 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | ||
889 | |||
890 | rqfar--; | ||
891 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; | ||
892 | rqfpr = class; | ||
893 | priv->ftp_rqfcr[rqfar] = rqfcr; | ||
894 | priv->ftp_rqfpr[rqfar] = rqfpr; | ||
895 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | ||
896 | |||
897 | return rqfar; | ||
898 | } | ||
899 | |||
900 | static void gfar_init_filer_table(struct gfar_private *priv) | ||
901 | { | ||
902 | int i = 0x0; | ||
903 | u32 rqfar = MAX_FILER_IDX; | ||
904 | u32 rqfcr = 0x0; | ||
905 | u32 rqfpr = FPR_FILER_MASK; | ||
906 | |||
907 | /* Default rule */ | ||
908 | rqfcr = RQFCR_CMP_MATCH; | ||
909 | priv->ftp_rqfcr[rqfar] = rqfcr; | ||
910 | priv->ftp_rqfpr[rqfar] = rqfpr; | ||
911 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | ||
912 | |||
913 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); | ||
914 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); | ||
915 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); | ||
916 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); | ||
917 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); | ||
918 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); | ||
919 | |||
920 | /* cur_filer_idx indicated the first non-masked rule */ | ||
921 | priv->cur_filer_idx = rqfar; | ||
922 | |||
923 | /* Rest are masked rules */ | ||
924 | rqfcr = RQFCR_CMP_NOMATCH; | ||
925 | for (i = 0; i < rqfar; i++) { | ||
926 | priv->ftp_rqfcr[i] = rqfcr; | ||
927 | priv->ftp_rqfpr[i] = rqfpr; | ||
928 | gfar_write_filer(priv, i, rqfcr, rqfpr); | ||
929 | } | ||
930 | } | ||
931 | |||
932 | static void gfar_detect_errata(struct gfar_private *priv) | ||
933 | { | ||
934 | struct device *dev = &priv->ofdev->dev; | ||
935 | unsigned int pvr = mfspr(SPRN_PVR); | ||
936 | unsigned int svr = mfspr(SPRN_SVR); | ||
937 | unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ | ||
938 | unsigned int rev = svr & 0xffff; | ||
939 | |||
940 | /* MPC8313 Rev 2.0 and higher; All MPC837x */ | ||
941 | if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || | ||
942 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) | ||
943 | priv->errata |= GFAR_ERRATA_74; | ||
944 | |||
945 | /* MPC8313 and MPC837x all rev */ | ||
946 | if ((pvr == 0x80850010 && mod == 0x80b0) || | ||
947 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) | ||
948 | priv->errata |= GFAR_ERRATA_76; | ||
949 | |||
950 | /* MPC8313 and MPC837x all rev */ | ||
951 | if ((pvr == 0x80850010 && mod == 0x80b0) || | ||
952 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) | ||
953 | priv->errata |= GFAR_ERRATA_A002; | ||
954 | |||
955 | /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */ | ||
956 | if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) || | ||
957 | (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020)) | ||
958 | priv->errata |= GFAR_ERRATA_12; | ||
959 | |||
960 | if (priv->errata) | ||
961 | dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", | ||
962 | priv->errata); | ||
963 | } | ||
964 | |||
965 | /* Set up the ethernet device structure, private data, | ||
966 | * and anything else we need before we start */ | ||
967 | static int gfar_probe(struct platform_device *ofdev) | ||
968 | { | ||
969 | u32 tempval; | ||
970 | struct net_device *dev = NULL; | ||
971 | struct gfar_private *priv = NULL; | ||
972 | struct gfar __iomem *regs = NULL; | ||
973 | int err = 0, i, grp_idx = 0; | ||
974 | int len_devname; | ||
975 | u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; | ||
976 | u32 isrg = 0; | ||
977 | u32 __iomem *baddr; | ||
978 | |||
979 | err = gfar_of_init(ofdev, &dev); | ||
980 | |||
981 | if (err) | ||
982 | return err; | ||
983 | |||
984 | priv = netdev_priv(dev); | ||
985 | priv->ndev = dev; | ||
986 | priv->ofdev = ofdev; | ||
987 | priv->node = ofdev->dev.of_node; | ||
988 | SET_NETDEV_DEV(dev, &ofdev->dev); | ||
989 | |||
990 | spin_lock_init(&priv->bflock); | ||
991 | INIT_WORK(&priv->reset_task, gfar_reset_task); | ||
992 | |||
993 | dev_set_drvdata(&ofdev->dev, priv); | ||
994 | regs = priv->gfargrp[0].regs; | ||
995 | |||
996 | gfar_detect_errata(priv); | ||
997 | |||
998 | /* Stop the DMA engine now, in case it was running before */ | ||
999 | /* (The firmware could have used it, and left it running). */ | ||
1000 | gfar_halt(dev); | ||
1001 | |||
1002 | /* Reset MAC layer */ | ||
1003 | gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); | ||
1004 | |||
1005 | /* We need to delay at least 3 TX clocks */ | ||
1006 | udelay(2); | ||
1007 | |||
1008 | tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); | ||
1009 | gfar_write(®s->maccfg1, tempval); | ||
1010 | |||
1011 | /* Initialize MACCFG2. */ | ||
1012 | tempval = MACCFG2_INIT_SETTINGS; | ||
1013 | if (gfar_has_errata(priv, GFAR_ERRATA_74)) | ||
1014 | tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; | ||
1015 | gfar_write(®s->maccfg2, tempval); | ||
1016 | |||
1017 | /* Initialize ECNTRL */ | ||
1018 | gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); | ||
1019 | |||
1020 | /* Set the dev->base_addr to the gfar reg region */ | ||
1021 | dev->base_addr = (unsigned long) regs; | ||
1022 | |||
1023 | SET_NETDEV_DEV(dev, &ofdev->dev); | ||
1024 | |||
1025 | /* Fill in the dev structure */ | ||
1026 | dev->watchdog_timeo = TX_TIMEOUT; | ||
1027 | dev->mtu = 1500; | ||
1028 | dev->netdev_ops = &gfar_netdev_ops; | ||
1029 | dev->ethtool_ops = &gfar_ethtool_ops; | ||
1030 | |||
1031 | /* Register for napi ...We are registering NAPI for each grp */ | ||
1032 | for (i = 0; i < priv->num_grps; i++) | ||
1033 | netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); | ||
1034 | |||
1035 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { | ||
1036 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | | ||
1037 | NETIF_F_RXCSUM; | ||
1038 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | | ||
1039 | NETIF_F_RXCSUM | NETIF_F_HIGHDMA; | ||
1040 | } | ||
1041 | |||
1042 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { | ||
1043 | dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
1044 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
1045 | } | ||
1046 | |||
1047 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { | ||
1048 | priv->extended_hash = 1; | ||
1049 | priv->hash_width = 9; | ||
1050 | |||
1051 | priv->hash_regs[0] = ®s->igaddr0; | ||
1052 | priv->hash_regs[1] = ®s->igaddr1; | ||
1053 | priv->hash_regs[2] = ®s->igaddr2; | ||
1054 | priv->hash_regs[3] = ®s->igaddr3; | ||
1055 | priv->hash_regs[4] = ®s->igaddr4; | ||
1056 | priv->hash_regs[5] = ®s->igaddr5; | ||
1057 | priv->hash_regs[6] = ®s->igaddr6; | ||
1058 | priv->hash_regs[7] = ®s->igaddr7; | ||
1059 | priv->hash_regs[8] = ®s->gaddr0; | ||
1060 | priv->hash_regs[9] = ®s->gaddr1; | ||
1061 | priv->hash_regs[10] = ®s->gaddr2; | ||
1062 | priv->hash_regs[11] = ®s->gaddr3; | ||
1063 | priv->hash_regs[12] = ®s->gaddr4; | ||
1064 | priv->hash_regs[13] = ®s->gaddr5; | ||
1065 | priv->hash_regs[14] = ®s->gaddr6; | ||
1066 | priv->hash_regs[15] = ®s->gaddr7; | ||
1067 | |||
1068 | } else { | ||
1069 | priv->extended_hash = 0; | ||
1070 | priv->hash_width = 8; | ||
1071 | |||
1072 | priv->hash_regs[0] = ®s->gaddr0; | ||
1073 | priv->hash_regs[1] = ®s->gaddr1; | ||
1074 | priv->hash_regs[2] = ®s->gaddr2; | ||
1075 | priv->hash_regs[3] = ®s->gaddr3; | ||
1076 | priv->hash_regs[4] = ®s->gaddr4; | ||
1077 | priv->hash_regs[5] = ®s->gaddr5; | ||
1078 | priv->hash_regs[6] = ®s->gaddr6; | ||
1079 | priv->hash_regs[7] = ®s->gaddr7; | ||
1080 | } | ||
1081 | |||
1082 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) | ||
1083 | priv->padding = DEFAULT_PADDING; | ||
1084 | else | ||
1085 | priv->padding = 0; | ||
1086 | |||
1087 | if (dev->features & NETIF_F_IP_CSUM || | ||
1088 | priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) | ||
1089 | dev->hard_header_len += GMAC_FCB_LEN; | ||
1090 | |||
1091 | /* Program the isrg regs only if number of grps > 1 */ | ||
1092 | if (priv->num_grps > 1) { | ||
1093 | baddr = ®s->isrg0; | ||
1094 | for (i = 0; i < priv->num_grps; i++) { | ||
1095 | isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); | ||
1096 | isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); | ||
1097 | gfar_write(baddr, isrg); | ||
1098 | baddr++; | ||
1099 | isrg = 0x0; | ||
1100 | } | ||
1101 | } | ||
1102 | |||
1103 | /* Need to reverse the bit maps as bit_map's MSB is q0 | ||
1104 | * but, for_each_set_bit parses from right to left, which | ||
1105 | * basically reverses the queue numbers */ | ||
1106 | for (i = 0; i< priv->num_grps; i++) { | ||
1107 | priv->gfargrp[i].tx_bit_map = reverse_bitmap( | ||
1108 | priv->gfargrp[i].tx_bit_map, MAX_TX_QS); | ||
1109 | priv->gfargrp[i].rx_bit_map = reverse_bitmap( | ||
1110 | priv->gfargrp[i].rx_bit_map, MAX_RX_QS); | ||
1111 | } | ||
1112 | |||
1113 | /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, | ||
1114 | * also assign queues to groups */ | ||
1115 | for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { | ||
1116 | priv->gfargrp[grp_idx].num_rx_queues = 0x0; | ||
1117 | for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, | ||
1118 | priv->num_rx_queues) { | ||
1119 | priv->gfargrp[grp_idx].num_rx_queues++; | ||
1120 | priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; | ||
1121 | rstat = rstat | (RSTAT_CLEAR_RHALT >> i); | ||
1122 | rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); | ||
1123 | } | ||
1124 | priv->gfargrp[grp_idx].num_tx_queues = 0x0; | ||
1125 | for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, | ||
1126 | priv->num_tx_queues) { | ||
1127 | priv->gfargrp[grp_idx].num_tx_queues++; | ||
1128 | priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; | ||
1129 | tstat = tstat | (TSTAT_CLEAR_THALT >> i); | ||
1130 | tqueue = tqueue | (TQUEUE_EN0 >> i); | ||
1131 | } | ||
1132 | priv->gfargrp[grp_idx].rstat = rstat; | ||
1133 | priv->gfargrp[grp_idx].tstat = tstat; | ||
1134 | rstat = tstat =0; | ||
1135 | } | ||
1136 | |||
1137 | gfar_write(®s->rqueue, rqueue); | ||
1138 | gfar_write(®s->tqueue, tqueue); | ||
1139 | |||
1140 | priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; | ||
1141 | |||
1142 | /* Initializing some of the rx/tx queue level parameters */ | ||
1143 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
1144 | priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; | ||
1145 | priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; | ||
1146 | priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; | ||
1147 | priv->tx_queue[i]->txic = DEFAULT_TXIC; | ||
1148 | } | ||
1149 | |||
1150 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
1151 | priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; | ||
1152 | priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; | ||
1153 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; | ||
1154 | } | ||
1155 | |||
1156 | /* always enable rx filer*/ | ||
1157 | priv->rx_filer_enable = 1; | ||
1158 | /* Enable most messages by default */ | ||
1159 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; | ||
1160 | |||
1161 | /* Carrier starts down, phylib will bring it up */ | ||
1162 | netif_carrier_off(dev); | ||
1163 | |||
1164 | err = register_netdev(dev); | ||
1165 | |||
1166 | if (err) { | ||
1167 | pr_err("%s: Cannot register net device, aborting\n", dev->name); | ||
1168 | goto register_fail; | ||
1169 | } | ||
1170 | |||
1171 | device_init_wakeup(&dev->dev, | ||
1172 | priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | ||
1173 | |||
1174 | /* fill out IRQ number and name fields */ | ||
1175 | len_devname = strlen(dev->name); | ||
1176 | for (i = 0; i < priv->num_grps; i++) { | ||
1177 | strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name, | ||
1178 | len_devname); | ||
1179 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | ||
1180 | strncpy(&priv->gfargrp[i].int_name_tx[len_devname], | ||
1181 | "_g", sizeof("_g")); | ||
1182 | priv->gfargrp[i].int_name_tx[ | ||
1183 | strlen(priv->gfargrp[i].int_name_tx)] = i+48; | ||
1184 | strncpy(&priv->gfargrp[i].int_name_tx[strlen( | ||
1185 | priv->gfargrp[i].int_name_tx)], | ||
1186 | "_tx", sizeof("_tx") + 1); | ||
1187 | |||
1188 | strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name, | ||
1189 | len_devname); | ||
1190 | strncpy(&priv->gfargrp[i].int_name_rx[len_devname], | ||
1191 | "_g", sizeof("_g")); | ||
1192 | priv->gfargrp[i].int_name_rx[ | ||
1193 | strlen(priv->gfargrp[i].int_name_rx)] = i+48; | ||
1194 | strncpy(&priv->gfargrp[i].int_name_rx[strlen( | ||
1195 | priv->gfargrp[i].int_name_rx)], | ||
1196 | "_rx", sizeof("_rx") + 1); | ||
1197 | |||
1198 | strncpy(&priv->gfargrp[i].int_name_er[0], dev->name, | ||
1199 | len_devname); | ||
1200 | strncpy(&priv->gfargrp[i].int_name_er[len_devname], | ||
1201 | "_g", sizeof("_g")); | ||
1202 | priv->gfargrp[i].int_name_er[strlen( | ||
1203 | priv->gfargrp[i].int_name_er)] = i+48; | ||
1204 | strncpy(&priv->gfargrp[i].int_name_er[strlen(\ | ||
1205 | priv->gfargrp[i].int_name_er)], | ||
1206 | "_er", sizeof("_er") + 1); | ||
1207 | } else | ||
1208 | priv->gfargrp[i].int_name_tx[len_devname] = '\0'; | ||
1209 | } | ||
1210 | |||
1211 | /* Initialize the filer table */ | ||
1212 | gfar_init_filer_table(priv); | ||
1213 | |||
1214 | /* Create all the sysfs files */ | ||
1215 | gfar_init_sysfs(dev); | ||
1216 | |||
1217 | /* Print out the device info */ | ||
1218 | netdev_info(dev, "mac: %pM\n", dev->dev_addr); | ||
1219 | |||
1220 | /* Even more device info helps when determining which kernel */ | ||
1221 | /* provided which set of benchmarks. */ | ||
1222 | netdev_info(dev, "Running with NAPI enabled\n"); | ||
1223 | for (i = 0; i < priv->num_rx_queues; i++) | ||
1224 | netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", | ||
1225 | i, priv->rx_queue[i]->rx_ring_size); | ||
1226 | for(i = 0; i < priv->num_tx_queues; i++) | ||
1227 | netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", | ||
1228 | i, priv->tx_queue[i]->tx_ring_size); | ||
1229 | |||
1230 | return 0; | ||
1231 | |||
1232 | register_fail: | ||
1233 | unmap_group_regs(priv); | ||
1234 | free_tx_pointers(priv); | ||
1235 | free_rx_pointers(priv); | ||
1236 | if (priv->phy_node) | ||
1237 | of_node_put(priv->phy_node); | ||
1238 | if (priv->tbi_node) | ||
1239 | of_node_put(priv->tbi_node); | ||
1240 | free_netdev(dev); | ||
1241 | return err; | ||
1242 | } | ||
1243 | |||
1244 | static int gfar_remove(struct platform_device *ofdev) | ||
1245 | { | ||
1246 | struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); | ||
1247 | |||
1248 | if (priv->phy_node) | ||
1249 | of_node_put(priv->phy_node); | ||
1250 | if (priv->tbi_node) | ||
1251 | of_node_put(priv->tbi_node); | ||
1252 | |||
1253 | dev_set_drvdata(&ofdev->dev, NULL); | ||
1254 | |||
1255 | unregister_netdev(priv->ndev); | ||
1256 | unmap_group_regs(priv); | ||
1257 | free_netdev(priv->ndev); | ||
1258 | |||
1259 | return 0; | ||
1260 | } | ||
1261 | |||
1262 | #ifdef CONFIG_PM | ||
1263 | |||
1264 | static int gfar_suspend(struct device *dev) | ||
1265 | { | ||
1266 | struct gfar_private *priv = dev_get_drvdata(dev); | ||
1267 | struct net_device *ndev = priv->ndev; | ||
1268 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
1269 | unsigned long flags; | ||
1270 | u32 tempval; | ||
1271 | |||
1272 | int magic_packet = priv->wol_en && | ||
1273 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | ||
1274 | |||
1275 | netif_device_detach(ndev); | ||
1276 | |||
1277 | if (netif_running(ndev)) { | ||
1278 | |||
1279 | local_irq_save(flags); | ||
1280 | lock_tx_qs(priv); | ||
1281 | lock_rx_qs(priv); | ||
1282 | |||
1283 | gfar_halt_nodisable(ndev); | ||
1284 | |||
1285 | /* Disable Tx, and Rx if wake-on-LAN is disabled. */ | ||
1286 | tempval = gfar_read(®s->maccfg1); | ||
1287 | |||
1288 | tempval &= ~MACCFG1_TX_EN; | ||
1289 | |||
1290 | if (!magic_packet) | ||
1291 | tempval &= ~MACCFG1_RX_EN; | ||
1292 | |||
1293 | gfar_write(®s->maccfg1, tempval); | ||
1294 | |||
1295 | unlock_rx_qs(priv); | ||
1296 | unlock_tx_qs(priv); | ||
1297 | local_irq_restore(flags); | ||
1298 | |||
1299 | disable_napi(priv); | ||
1300 | |||
1301 | if (magic_packet) { | ||
1302 | /* Enable interrupt on Magic Packet */ | ||
1303 | gfar_write(®s->imask, IMASK_MAG); | ||
1304 | |||
1305 | /* Enable Magic Packet mode */ | ||
1306 | tempval = gfar_read(®s->maccfg2); | ||
1307 | tempval |= MACCFG2_MPEN; | ||
1308 | gfar_write(®s->maccfg2, tempval); | ||
1309 | } else { | ||
1310 | phy_stop(priv->phydev); | ||
1311 | } | ||
1312 | } | ||
1313 | |||
1314 | return 0; | ||
1315 | } | ||
1316 | |||
1317 | static int gfar_resume(struct device *dev) | ||
1318 | { | ||
1319 | struct gfar_private *priv = dev_get_drvdata(dev); | ||
1320 | struct net_device *ndev = priv->ndev; | ||
1321 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
1322 | unsigned long flags; | ||
1323 | u32 tempval; | ||
1324 | int magic_packet = priv->wol_en && | ||
1325 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | ||
1326 | |||
1327 | if (!netif_running(ndev)) { | ||
1328 | netif_device_attach(ndev); | ||
1329 | return 0; | ||
1330 | } | ||
1331 | |||
1332 | if (!magic_packet && priv->phydev) | ||
1333 | phy_start(priv->phydev); | ||
1334 | |||
1335 | /* Disable Magic Packet mode, in case something | ||
1336 | * else woke us up. | ||
1337 | */ | ||
1338 | local_irq_save(flags); | ||
1339 | lock_tx_qs(priv); | ||
1340 | lock_rx_qs(priv); | ||
1341 | |||
1342 | tempval = gfar_read(®s->maccfg2); | ||
1343 | tempval &= ~MACCFG2_MPEN; | ||
1344 | gfar_write(®s->maccfg2, tempval); | ||
1345 | |||
1346 | gfar_start(ndev); | ||
1347 | |||
1348 | unlock_rx_qs(priv); | ||
1349 | unlock_tx_qs(priv); | ||
1350 | local_irq_restore(flags); | ||
1351 | |||
1352 | netif_device_attach(ndev); | ||
1353 | |||
1354 | enable_napi(priv); | ||
1355 | |||
1356 | return 0; | ||
1357 | } | ||
1358 | |||
1359 | static int gfar_restore(struct device *dev) | ||
1360 | { | ||
1361 | struct gfar_private *priv = dev_get_drvdata(dev); | ||
1362 | struct net_device *ndev = priv->ndev; | ||
1363 | |||
1364 | if (!netif_running(ndev)) | ||
1365 | return 0; | ||
1366 | |||
1367 | gfar_init_bds(ndev); | ||
1368 | init_registers(ndev); | ||
1369 | gfar_set_mac_address(ndev); | ||
1370 | gfar_init_mac(ndev); | ||
1371 | gfar_start(ndev); | ||
1372 | |||
1373 | priv->oldlink = 0; | ||
1374 | priv->oldspeed = 0; | ||
1375 | priv->oldduplex = -1; | ||
1376 | |||
1377 | if (priv->phydev) | ||
1378 | phy_start(priv->phydev); | ||
1379 | |||
1380 | netif_device_attach(ndev); | ||
1381 | enable_napi(priv); | ||
1382 | |||
1383 | return 0; | ||
1384 | } | ||
1385 | |||
1386 | static struct dev_pm_ops gfar_pm_ops = { | ||
1387 | .suspend = gfar_suspend, | ||
1388 | .resume = gfar_resume, | ||
1389 | .freeze = gfar_suspend, | ||
1390 | .thaw = gfar_resume, | ||
1391 | .restore = gfar_restore, | ||
1392 | }; | ||
1393 | |||
1394 | #define GFAR_PM_OPS (&gfar_pm_ops) | ||
1395 | |||
1396 | #else | ||
1397 | |||
1398 | #define GFAR_PM_OPS NULL | ||
1399 | |||
1400 | #endif | ||
1401 | |||
1402 | /* Reads the controller's registers to determine what interface | ||
1403 | * connects it to the PHY. | ||
1404 | */ | ||
1405 | static phy_interface_t gfar_get_interface(struct net_device *dev) | ||
1406 | { | ||
1407 | struct gfar_private *priv = netdev_priv(dev); | ||
1408 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
1409 | u32 ecntrl; | ||
1410 | |||
1411 | ecntrl = gfar_read(®s->ecntrl); | ||
1412 | |||
1413 | if (ecntrl & ECNTRL_SGMII_MODE) | ||
1414 | return PHY_INTERFACE_MODE_SGMII; | ||
1415 | |||
1416 | if (ecntrl & ECNTRL_TBI_MODE) { | ||
1417 | if (ecntrl & ECNTRL_REDUCED_MODE) | ||
1418 | return PHY_INTERFACE_MODE_RTBI; | ||
1419 | else | ||
1420 | return PHY_INTERFACE_MODE_TBI; | ||
1421 | } | ||
1422 | |||
1423 | if (ecntrl & ECNTRL_REDUCED_MODE) { | ||
1424 | if (ecntrl & ECNTRL_REDUCED_MII_MODE) | ||
1425 | return PHY_INTERFACE_MODE_RMII; | ||
1426 | else { | ||
1427 | phy_interface_t interface = priv->interface; | ||
1428 | |||
1429 | /* | ||
1430 | * This isn't autodetected right now, so it must | ||
1431 | * be set by the device tree or platform code. | ||
1432 | */ | ||
1433 | if (interface == PHY_INTERFACE_MODE_RGMII_ID) | ||
1434 | return PHY_INTERFACE_MODE_RGMII_ID; | ||
1435 | |||
1436 | return PHY_INTERFACE_MODE_RGMII; | ||
1437 | } | ||
1438 | } | ||
1439 | |||
1440 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) | ||
1441 | return PHY_INTERFACE_MODE_GMII; | ||
1442 | |||
1443 | return PHY_INTERFACE_MODE_MII; | ||
1444 | } | ||
1445 | |||
1446 | |||
1447 | /* Initializes driver's PHY state, and attaches to the PHY. | ||
1448 | * Returns 0 on success. | ||
1449 | */ | ||
1450 | static int init_phy(struct net_device *dev) | ||
1451 | { | ||
1452 | struct gfar_private *priv = netdev_priv(dev); | ||
1453 | uint gigabit_support = | ||
1454 | priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? | ||
1455 | SUPPORTED_1000baseT_Full : 0; | ||
1456 | phy_interface_t interface; | ||
1457 | |||
1458 | priv->oldlink = 0; | ||
1459 | priv->oldspeed = 0; | ||
1460 | priv->oldduplex = -1; | ||
1461 | |||
1462 | interface = gfar_get_interface(dev); | ||
1463 | |||
1464 | priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, | ||
1465 | interface); | ||
1466 | if (!priv->phydev) | ||
1467 | priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, | ||
1468 | interface); | ||
1469 | if (!priv->phydev) { | ||
1470 | dev_err(&dev->dev, "could not attach to PHY\n"); | ||
1471 | return -ENODEV; | ||
1472 | } | ||
1473 | |||
1474 | if (interface == PHY_INTERFACE_MODE_SGMII) | ||
1475 | gfar_configure_serdes(dev); | ||
1476 | |||
1477 | /* Remove any features not supported by the controller */ | ||
1478 | priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); | ||
1479 | priv->phydev->advertising = priv->phydev->supported; | ||
1480 | |||
1481 | return 0; | ||
1482 | } | ||
1483 | |||
1484 | /* | ||
1485 | * Initialize TBI PHY interface for communicating with the | ||
1486 | * SERDES lynx PHY on the chip. We communicate with this PHY | ||
1487 | * through the MDIO bus on each controller, treating it as a | ||
1488 | * "normal" PHY at the address found in the TBIPA register. We assume | ||
1489 | * that the TBIPA register is valid. Either the MDIO bus code will set | ||
1490 | * it to a value that doesn't conflict with other PHYs on the bus, or the | ||
1491 | * value doesn't matter, as there are no other PHYs on the bus. | ||
1492 | */ | ||
1493 | static void gfar_configure_serdes(struct net_device *dev) | ||
1494 | { | ||
1495 | struct gfar_private *priv = netdev_priv(dev); | ||
1496 | struct phy_device *tbiphy; | ||
1497 | |||
1498 | if (!priv->tbi_node) { | ||
1499 | dev_warn(&dev->dev, "error: SGMII mode requires that the " | ||
1500 | "device tree specify a tbi-handle\n"); | ||
1501 | return; | ||
1502 | } | ||
1503 | |||
1504 | tbiphy = of_phy_find_device(priv->tbi_node); | ||
1505 | if (!tbiphy) { | ||
1506 | dev_err(&dev->dev, "error: Could not get TBI device\n"); | ||
1507 | return; | ||
1508 | } | ||
1509 | |||
1510 | /* | ||
1511 | * If the link is already up, we must already be ok, and don't need to | ||
1512 | * configure and reset the TBI<->SerDes link. Maybe U-Boot configured | ||
1513 | * everything for us? Resetting it takes the link down and requires | ||
1514 | * several seconds for it to come back. | ||
1515 | */ | ||
1516 | if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) | ||
1517 | return; | ||
1518 | |||
1519 | /* Single clk mode, mii mode off(for serdes communication) */ | ||
1520 | phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); | ||
1521 | |||
1522 | phy_write(tbiphy, MII_ADVERTISE, | ||
1523 | ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | | ||
1524 | ADVERTISE_1000XPSE_ASYM); | ||
1525 | |||
1526 | phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | | ||
1527 | BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); | ||
1528 | } | ||
1529 | |||
1530 | static void init_registers(struct net_device *dev) | ||
1531 | { | ||
1532 | struct gfar_private *priv = netdev_priv(dev); | ||
1533 | struct gfar __iomem *regs = NULL; | ||
1534 | int i = 0; | ||
1535 | |||
1536 | for (i = 0; i < priv->num_grps; i++) { | ||
1537 | regs = priv->gfargrp[i].regs; | ||
1538 | /* Clear IEVENT */ | ||
1539 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); | ||
1540 | |||
1541 | /* Initialize IMASK */ | ||
1542 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | ||
1543 | } | ||
1544 | |||
1545 | regs = priv->gfargrp[0].regs; | ||
1546 | /* Init hash registers to zero */ | ||
1547 | gfar_write(®s->igaddr0, 0); | ||
1548 | gfar_write(®s->igaddr1, 0); | ||
1549 | gfar_write(®s->igaddr2, 0); | ||
1550 | gfar_write(®s->igaddr3, 0); | ||
1551 | gfar_write(®s->igaddr4, 0); | ||
1552 | gfar_write(®s->igaddr5, 0); | ||
1553 | gfar_write(®s->igaddr6, 0); | ||
1554 | gfar_write(®s->igaddr7, 0); | ||
1555 | |||
1556 | gfar_write(®s->gaddr0, 0); | ||
1557 | gfar_write(®s->gaddr1, 0); | ||
1558 | gfar_write(®s->gaddr2, 0); | ||
1559 | gfar_write(®s->gaddr3, 0); | ||
1560 | gfar_write(®s->gaddr4, 0); | ||
1561 | gfar_write(®s->gaddr5, 0); | ||
1562 | gfar_write(®s->gaddr6, 0); | ||
1563 | gfar_write(®s->gaddr7, 0); | ||
1564 | |||
1565 | /* Zero out the rmon mib registers if it has them */ | ||
1566 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { | ||
1567 | memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); | ||
1568 | |||
1569 | /* Mask off the CAM interrupts */ | ||
1570 | gfar_write(®s->rmon.cam1, 0xffffffff); | ||
1571 | gfar_write(®s->rmon.cam2, 0xffffffff); | ||
1572 | } | ||
1573 | |||
1574 | /* Initialize the max receive buffer length */ | ||
1575 | gfar_write(®s->mrblr, priv->rx_buffer_size); | ||
1576 | |||
1577 | /* Initialize the Minimum Frame Length Register */ | ||
1578 | gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); | ||
1579 | } | ||
1580 | |||
1581 | static int __gfar_is_rx_idle(struct gfar_private *priv) | ||
1582 | { | ||
1583 | u32 res; | ||
1584 | |||
1585 | /* | ||
1586 | * Normaly TSEC should not hang on GRS commands, so we should | ||
1587 | * actually wait for IEVENT_GRSC flag. | ||
1588 | */ | ||
1589 | if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) | ||
1590 | return 0; | ||
1591 | |||
1592 | /* | ||
1593 | * Read the eTSEC register at offset 0xD1C. If bits 7-14 are | ||
1594 | * the same as bits 23-30, the eTSEC Rx is assumed to be idle | ||
1595 | * and the Rx can be safely reset. | ||
1596 | */ | ||
1597 | res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); | ||
1598 | res &= 0x7f807f80; | ||
1599 | if ((res & 0xffff) == (res >> 16)) | ||
1600 | return 1; | ||
1601 | |||
1602 | return 0; | ||
1603 | } | ||
1604 | |||
1605 | /* Halt the receive and transmit queues */ | ||
1606 | static void gfar_halt_nodisable(struct net_device *dev) | ||
1607 | { | ||
1608 | struct gfar_private *priv = netdev_priv(dev); | ||
1609 | struct gfar __iomem *regs = NULL; | ||
1610 | u32 tempval; | ||
1611 | int i = 0; | ||
1612 | |||
1613 | for (i = 0; i < priv->num_grps; i++) { | ||
1614 | regs = priv->gfargrp[i].regs; | ||
1615 | /* Mask all interrupts */ | ||
1616 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | ||
1617 | |||
1618 | /* Clear all interrupts */ | ||
1619 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); | ||
1620 | } | ||
1621 | |||
1622 | regs = priv->gfargrp[0].regs; | ||
1623 | /* Stop the DMA, and wait for it to stop */ | ||
1624 | tempval = gfar_read(®s->dmactrl); | ||
1625 | if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) | ||
1626 | != (DMACTRL_GRS | DMACTRL_GTS)) { | ||
1627 | int ret; | ||
1628 | |||
1629 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); | ||
1630 | gfar_write(®s->dmactrl, tempval); | ||
1631 | |||
1632 | do { | ||
1633 | ret = spin_event_timeout(((gfar_read(®s->ievent) & | ||
1634 | (IEVENT_GRSC | IEVENT_GTSC)) == | ||
1635 | (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0); | ||
1636 | if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC)) | ||
1637 | ret = __gfar_is_rx_idle(priv); | ||
1638 | } while (!ret); | ||
1639 | } | ||
1640 | } | ||
1641 | |||
1642 | /* Halt the receive and transmit queues */ | ||
1643 | void gfar_halt(struct net_device *dev) | ||
1644 | { | ||
1645 | struct gfar_private *priv = netdev_priv(dev); | ||
1646 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
1647 | u32 tempval; | ||
1648 | |||
1649 | gfar_halt_nodisable(dev); | ||
1650 | |||
1651 | /* Disable Rx and Tx */ | ||
1652 | tempval = gfar_read(®s->maccfg1); | ||
1653 | tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); | ||
1654 | gfar_write(®s->maccfg1, tempval); | ||
1655 | } | ||
1656 | |||
1657 | static void free_grp_irqs(struct gfar_priv_grp *grp) | ||
1658 | { | ||
1659 | free_irq(grp->interruptError, grp); | ||
1660 | free_irq(grp->interruptTransmit, grp); | ||
1661 | free_irq(grp->interruptReceive, grp); | ||
1662 | } | ||
1663 | |||
1664 | void stop_gfar(struct net_device *dev) | ||
1665 | { | ||
1666 | struct gfar_private *priv = netdev_priv(dev); | ||
1667 | unsigned long flags; | ||
1668 | int i; | ||
1669 | |||
1670 | phy_stop(priv->phydev); | ||
1671 | |||
1672 | |||
1673 | /* Lock it down */ | ||
1674 | local_irq_save(flags); | ||
1675 | lock_tx_qs(priv); | ||
1676 | lock_rx_qs(priv); | ||
1677 | |||
1678 | gfar_halt(dev); | ||
1679 | |||
1680 | unlock_rx_qs(priv); | ||
1681 | unlock_tx_qs(priv); | ||
1682 | local_irq_restore(flags); | ||
1683 | |||
1684 | /* Free the IRQs */ | ||
1685 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | ||
1686 | for (i = 0; i < priv->num_grps; i++) | ||
1687 | free_grp_irqs(&priv->gfargrp[i]); | ||
1688 | } else { | ||
1689 | for (i = 0; i < priv->num_grps; i++) | ||
1690 | free_irq(priv->gfargrp[i].interruptTransmit, | ||
1691 | &priv->gfargrp[i]); | ||
1692 | } | ||
1693 | |||
1694 | free_skb_resources(priv); | ||
1695 | } | ||
1696 | |||
1697 | static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) | ||
1698 | { | ||
1699 | struct txbd8 *txbdp; | ||
1700 | struct gfar_private *priv = netdev_priv(tx_queue->dev); | ||
1701 | int i, j; | ||
1702 | |||
1703 | txbdp = tx_queue->tx_bd_base; | ||
1704 | |||
1705 | for (i = 0; i < tx_queue->tx_ring_size; i++) { | ||
1706 | if (!tx_queue->tx_skbuff[i]) | ||
1707 | continue; | ||
1708 | |||
1709 | dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, | ||
1710 | txbdp->length, DMA_TO_DEVICE); | ||
1711 | txbdp->lstatus = 0; | ||
1712 | for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; | ||
1713 | j++) { | ||
1714 | txbdp++; | ||
1715 | dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, | ||
1716 | txbdp->length, DMA_TO_DEVICE); | ||
1717 | } | ||
1718 | txbdp++; | ||
1719 | dev_kfree_skb_any(tx_queue->tx_skbuff[i]); | ||
1720 | tx_queue->tx_skbuff[i] = NULL; | ||
1721 | } | ||
1722 | kfree(tx_queue->tx_skbuff); | ||
1723 | } | ||
1724 | |||
1725 | static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) | ||
1726 | { | ||
1727 | struct rxbd8 *rxbdp; | ||
1728 | struct gfar_private *priv = netdev_priv(rx_queue->dev); | ||
1729 | int i; | ||
1730 | |||
1731 | rxbdp = rx_queue->rx_bd_base; | ||
1732 | |||
1733 | for (i = 0; i < rx_queue->rx_ring_size; i++) { | ||
1734 | if (rx_queue->rx_skbuff[i]) { | ||
1735 | dma_unmap_single(&priv->ofdev->dev, | ||
1736 | rxbdp->bufPtr, priv->rx_buffer_size, | ||
1737 | DMA_FROM_DEVICE); | ||
1738 | dev_kfree_skb_any(rx_queue->rx_skbuff[i]); | ||
1739 | rx_queue->rx_skbuff[i] = NULL; | ||
1740 | } | ||
1741 | rxbdp->lstatus = 0; | ||
1742 | rxbdp->bufPtr = 0; | ||
1743 | rxbdp++; | ||
1744 | } | ||
1745 | kfree(rx_queue->rx_skbuff); | ||
1746 | } | ||
1747 | |||
1748 | /* If there are any tx skbs or rx skbs still around, free them. | ||
1749 | * Then free tx_skbuff and rx_skbuff */ | ||
1750 | static void free_skb_resources(struct gfar_private *priv) | ||
1751 | { | ||
1752 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
1753 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
1754 | int i; | ||
1755 | |||
1756 | /* Go through all the buffer descriptors and free their data buffers */ | ||
1757 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
1758 | tx_queue = priv->tx_queue[i]; | ||
1759 | if(tx_queue->tx_skbuff) | ||
1760 | free_skb_tx_queue(tx_queue); | ||
1761 | } | ||
1762 | |||
1763 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
1764 | rx_queue = priv->rx_queue[i]; | ||
1765 | if(rx_queue->rx_skbuff) | ||
1766 | free_skb_rx_queue(rx_queue); | ||
1767 | } | ||
1768 | |||
1769 | dma_free_coherent(&priv->ofdev->dev, | ||
1770 | sizeof(struct txbd8) * priv->total_tx_ring_size + | ||
1771 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | ||
1772 | priv->tx_queue[0]->tx_bd_base, | ||
1773 | priv->tx_queue[0]->tx_bd_dma_base); | ||
1774 | skb_queue_purge(&priv->rx_recycle); | ||
1775 | } | ||
1776 | |||
1777 | void gfar_start(struct net_device *dev) | ||
1778 | { | ||
1779 | struct gfar_private *priv = netdev_priv(dev); | ||
1780 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
1781 | u32 tempval; | ||
1782 | int i = 0; | ||
1783 | |||
1784 | /* Enable Rx and Tx in MACCFG1 */ | ||
1785 | tempval = gfar_read(®s->maccfg1); | ||
1786 | tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); | ||
1787 | gfar_write(®s->maccfg1, tempval); | ||
1788 | |||
1789 | /* Initialize DMACTRL to have WWR and WOP */ | ||
1790 | tempval = gfar_read(®s->dmactrl); | ||
1791 | tempval |= DMACTRL_INIT_SETTINGS; | ||
1792 | gfar_write(®s->dmactrl, tempval); | ||
1793 | |||
1794 | /* Make sure we aren't stopped */ | ||
1795 | tempval = gfar_read(®s->dmactrl); | ||
1796 | tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); | ||
1797 | gfar_write(®s->dmactrl, tempval); | ||
1798 | |||
1799 | for (i = 0; i < priv->num_grps; i++) { | ||
1800 | regs = priv->gfargrp[i].regs; | ||
1801 | /* Clear THLT/RHLT, so that the DMA starts polling now */ | ||
1802 | gfar_write(®s->tstat, priv->gfargrp[i].tstat); | ||
1803 | gfar_write(®s->rstat, priv->gfargrp[i].rstat); | ||
1804 | /* Unmask the interrupts we look for */ | ||
1805 | gfar_write(®s->imask, IMASK_DEFAULT); | ||
1806 | } | ||
1807 | |||
1808 | dev->trans_start = jiffies; /* prevent tx timeout */ | ||
1809 | } | ||
1810 | |||
1811 | void gfar_configure_coalescing(struct gfar_private *priv, | ||
1812 | unsigned long tx_mask, unsigned long rx_mask) | ||
1813 | { | ||
1814 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
1815 | u32 __iomem *baddr; | ||
1816 | int i = 0; | ||
1817 | |||
1818 | /* Backward compatible case ---- even if we enable | ||
1819 | * multiple queues, there's only single reg to program | ||
1820 | */ | ||
1821 | gfar_write(®s->txic, 0); | ||
1822 | if(likely(priv->tx_queue[0]->txcoalescing)) | ||
1823 | gfar_write(®s->txic, priv->tx_queue[0]->txic); | ||
1824 | |||
1825 | gfar_write(®s->rxic, 0); | ||
1826 | if(unlikely(priv->rx_queue[0]->rxcoalescing)) | ||
1827 | gfar_write(®s->rxic, priv->rx_queue[0]->rxic); | ||
1828 | |||
1829 | if (priv->mode == MQ_MG_MODE) { | ||
1830 | baddr = ®s->txic0; | ||
1831 | for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { | ||
1832 | if (likely(priv->tx_queue[i]->txcoalescing)) { | ||
1833 | gfar_write(baddr + i, 0); | ||
1834 | gfar_write(baddr + i, priv->tx_queue[i]->txic); | ||
1835 | } | ||
1836 | } | ||
1837 | |||
1838 | baddr = ®s->rxic0; | ||
1839 | for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { | ||
1840 | if (likely(priv->rx_queue[i]->rxcoalescing)) { | ||
1841 | gfar_write(baddr + i, 0); | ||
1842 | gfar_write(baddr + i, priv->rx_queue[i]->rxic); | ||
1843 | } | ||
1844 | } | ||
1845 | } | ||
1846 | } | ||
1847 | |||
1848 | static int register_grp_irqs(struct gfar_priv_grp *grp) | ||
1849 | { | ||
1850 | struct gfar_private *priv = grp->priv; | ||
1851 | struct net_device *dev = priv->ndev; | ||
1852 | int err; | ||
1853 | |||
1854 | /* If the device has multiple interrupts, register for | ||
1855 | * them. Otherwise, only register for the one */ | ||
1856 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | ||
1857 | /* Install our interrupt handlers for Error, | ||
1858 | * Transmit, and Receive */ | ||
1859 | if ((err = request_irq(grp->interruptError, gfar_error, 0, | ||
1860 | grp->int_name_er,grp)) < 0) { | ||
1861 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", | ||
1862 | grp->interruptError); | ||
1863 | |||
1864 | goto err_irq_fail; | ||
1865 | } | ||
1866 | |||
1867 | if ((err = request_irq(grp->interruptTransmit, gfar_transmit, | ||
1868 | 0, grp->int_name_tx, grp)) < 0) { | ||
1869 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", | ||
1870 | grp->interruptTransmit); | ||
1871 | goto tx_irq_fail; | ||
1872 | } | ||
1873 | |||
1874 | if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, | ||
1875 | grp->int_name_rx, grp)) < 0) { | ||
1876 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", | ||
1877 | grp->interruptReceive); | ||
1878 | goto rx_irq_fail; | ||
1879 | } | ||
1880 | } else { | ||
1881 | if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, | ||
1882 | grp->int_name_tx, grp)) < 0) { | ||
1883 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", | ||
1884 | grp->interruptTransmit); | ||
1885 | goto err_irq_fail; | ||
1886 | } | ||
1887 | } | ||
1888 | |||
1889 | return 0; | ||
1890 | |||
1891 | rx_irq_fail: | ||
1892 | free_irq(grp->interruptTransmit, grp); | ||
1893 | tx_irq_fail: | ||
1894 | free_irq(grp->interruptError, grp); | ||
1895 | err_irq_fail: | ||
1896 | return err; | ||
1897 | |||
1898 | } | ||
1899 | |||
1900 | /* Bring the controller up and running */ | ||
1901 | int startup_gfar(struct net_device *ndev) | ||
1902 | { | ||
1903 | struct gfar_private *priv = netdev_priv(ndev); | ||
1904 | struct gfar __iomem *regs = NULL; | ||
1905 | int err, i, j; | ||
1906 | |||
1907 | for (i = 0; i < priv->num_grps; i++) { | ||
1908 | regs= priv->gfargrp[i].regs; | ||
1909 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | ||
1910 | } | ||
1911 | |||
1912 | regs= priv->gfargrp[0].regs; | ||
1913 | err = gfar_alloc_skb_resources(ndev); | ||
1914 | if (err) | ||
1915 | return err; | ||
1916 | |||
1917 | gfar_init_mac(ndev); | ||
1918 | |||
1919 | for (i = 0; i < priv->num_grps; i++) { | ||
1920 | err = register_grp_irqs(&priv->gfargrp[i]); | ||
1921 | if (err) { | ||
1922 | for (j = 0; j < i; j++) | ||
1923 | free_grp_irqs(&priv->gfargrp[j]); | ||
1924 | goto irq_fail; | ||
1925 | } | ||
1926 | } | ||
1927 | |||
1928 | /* Start the controller */ | ||
1929 | gfar_start(ndev); | ||
1930 | |||
1931 | phy_start(priv->phydev); | ||
1932 | |||
1933 | gfar_configure_coalescing(priv, 0xFF, 0xFF); | ||
1934 | |||
1935 | return 0; | ||
1936 | |||
1937 | irq_fail: | ||
1938 | free_skb_resources(priv); | ||
1939 | return err; | ||
1940 | } | ||
1941 | |||
1942 | /* Called when something needs to use the ethernet device */ | ||
1943 | /* Returns 0 for success. */ | ||
1944 | static int gfar_enet_open(struct net_device *dev) | ||
1945 | { | ||
1946 | struct gfar_private *priv = netdev_priv(dev); | ||
1947 | int err; | ||
1948 | |||
1949 | enable_napi(priv); | ||
1950 | |||
1951 | skb_queue_head_init(&priv->rx_recycle); | ||
1952 | |||
1953 | /* Initialize a bunch of registers */ | ||
1954 | init_registers(dev); | ||
1955 | |||
1956 | gfar_set_mac_address(dev); | ||
1957 | |||
1958 | err = init_phy(dev); | ||
1959 | |||
1960 | if (err) { | ||
1961 | disable_napi(priv); | ||
1962 | return err; | ||
1963 | } | ||
1964 | |||
1965 | err = startup_gfar(dev); | ||
1966 | if (err) { | ||
1967 | disable_napi(priv); | ||
1968 | return err; | ||
1969 | } | ||
1970 | |||
1971 | netif_tx_start_all_queues(dev); | ||
1972 | |||
1973 | device_set_wakeup_enable(&dev->dev, priv->wol_en); | ||
1974 | |||
1975 | return err; | ||
1976 | } | ||
1977 | |||
1978 | static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) | ||
1979 | { | ||
1980 | struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); | ||
1981 | |||
1982 | memset(fcb, 0, GMAC_FCB_LEN); | ||
1983 | |||
1984 | return fcb; | ||
1985 | } | ||
1986 | |||
1987 | static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) | ||
1988 | { | ||
1989 | u8 flags = 0; | ||
1990 | |||
1991 | /* If we're here, it's a IP packet with a TCP or UDP | ||
1992 | * payload. We set it to checksum, using a pseudo-header | ||
1993 | * we provide | ||
1994 | */ | ||
1995 | flags = TXFCB_DEFAULT; | ||
1996 | |||
1997 | /* Tell the controller what the protocol is */ | ||
1998 | /* And provide the already calculated phcs */ | ||
1999 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) { | ||
2000 | flags |= TXFCB_UDP; | ||
2001 | fcb->phcs = udp_hdr(skb)->check; | ||
2002 | } else | ||
2003 | fcb->phcs = tcp_hdr(skb)->check; | ||
2004 | |||
2005 | /* l3os is the distance between the start of the | ||
2006 | * frame (skb->data) and the start of the IP hdr. | ||
2007 | * l4os is the distance between the start of the | ||
2008 | * l3 hdr and the l4 hdr */ | ||
2009 | fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); | ||
2010 | fcb->l4os = skb_network_header_len(skb); | ||
2011 | |||
2012 | fcb->flags = flags; | ||
2013 | } | ||
2014 | |||
2015 | void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) | ||
2016 | { | ||
2017 | fcb->flags |= TXFCB_VLN; | ||
2018 | fcb->vlctl = vlan_tx_tag_get(skb); | ||
2019 | } | ||
2020 | |||
2021 | static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, | ||
2022 | struct txbd8 *base, int ring_size) | ||
2023 | { | ||
2024 | struct txbd8 *new_bd = bdp + stride; | ||
2025 | |||
2026 | return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; | ||
2027 | } | ||
2028 | |||
2029 | static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, | ||
2030 | int ring_size) | ||
2031 | { | ||
2032 | return skip_txbd(bdp, 1, base, ring_size); | ||
2033 | } | ||
2034 | |||
2035 | /* This is called by the kernel when a frame is ready for transmission. */ | ||
2036 | /* It is pointed to by the dev->hard_start_xmit function pointer */ | ||
2037 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
2038 | { | ||
2039 | struct gfar_private *priv = netdev_priv(dev); | ||
2040 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
2041 | struct netdev_queue *txq; | ||
2042 | struct gfar __iomem *regs = NULL; | ||
2043 | struct txfcb *fcb = NULL; | ||
2044 | struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; | ||
2045 | u32 lstatus; | ||
2046 | int i, rq = 0, do_tstamp = 0; | ||
2047 | u32 bufaddr; | ||
2048 | unsigned long flags; | ||
2049 | unsigned int nr_frags, nr_txbds, length; | ||
2050 | |||
2051 | /* | ||
2052 | * TOE=1 frames larger than 2500 bytes may see excess delays | ||
2053 | * before start of transmission. | ||
2054 | */ | ||
2055 | if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) && | ||
2056 | skb->ip_summed == CHECKSUM_PARTIAL && | ||
2057 | skb->len > 2500)) { | ||
2058 | int ret; | ||
2059 | |||
2060 | ret = skb_checksum_help(skb); | ||
2061 | if (ret) | ||
2062 | return ret; | ||
2063 | } | ||
2064 | |||
2065 | rq = skb->queue_mapping; | ||
2066 | tx_queue = priv->tx_queue[rq]; | ||
2067 | txq = netdev_get_tx_queue(dev, rq); | ||
2068 | base = tx_queue->tx_bd_base; | ||
2069 | regs = tx_queue->grp->regs; | ||
2070 | |||
2071 | /* check if time stamp should be generated */ | ||
2072 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && | ||
2073 | priv->hwts_tx_en)) | ||
2074 | do_tstamp = 1; | ||
2075 | |||
2076 | /* make space for additional header when fcb is needed */ | ||
2077 | if (((skb->ip_summed == CHECKSUM_PARTIAL) || | ||
2078 | vlan_tx_tag_present(skb) || | ||
2079 | unlikely(do_tstamp)) && | ||
2080 | (skb_headroom(skb) < GMAC_FCB_LEN)) { | ||
2081 | struct sk_buff *skb_new; | ||
2082 | |||
2083 | skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN); | ||
2084 | if (!skb_new) { | ||
2085 | dev->stats.tx_errors++; | ||
2086 | kfree_skb(skb); | ||
2087 | return NETDEV_TX_OK; | ||
2088 | } | ||
2089 | kfree_skb(skb); | ||
2090 | skb = skb_new; | ||
2091 | } | ||
2092 | |||
2093 | /* total number of fragments in the SKB */ | ||
2094 | nr_frags = skb_shinfo(skb)->nr_frags; | ||
2095 | |||
2096 | /* calculate the required number of TxBDs for this skb */ | ||
2097 | if (unlikely(do_tstamp)) | ||
2098 | nr_txbds = nr_frags + 2; | ||
2099 | else | ||
2100 | nr_txbds = nr_frags + 1; | ||
2101 | |||
2102 | /* check if there is space to queue this packet */ | ||
2103 | if (nr_txbds > tx_queue->num_txbdfree) { | ||
2104 | /* no space, stop the queue */ | ||
2105 | netif_tx_stop_queue(txq); | ||
2106 | dev->stats.tx_fifo_errors++; | ||
2107 | return NETDEV_TX_BUSY; | ||
2108 | } | ||
2109 | |||
2110 | /* Update transmit stats */ | ||
2111 | tx_queue->stats.tx_bytes += skb->len; | ||
2112 | tx_queue->stats.tx_packets++; | ||
2113 | |||
2114 | txbdp = txbdp_start = tx_queue->cur_tx; | ||
2115 | lstatus = txbdp->lstatus; | ||
2116 | |||
2117 | /* Time stamp insertion requires one additional TxBD */ | ||
2118 | if (unlikely(do_tstamp)) | ||
2119 | txbdp_tstamp = txbdp = next_txbd(txbdp, base, | ||
2120 | tx_queue->tx_ring_size); | ||
2121 | |||
2122 | if (nr_frags == 0) { | ||
2123 | if (unlikely(do_tstamp)) | ||
2124 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | | ||
2125 | TXBD_INTERRUPT); | ||
2126 | else | ||
2127 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | ||
2128 | } else { | ||
2129 | /* Place the fragment addresses and lengths into the TxBDs */ | ||
2130 | for (i = 0; i < nr_frags; i++) { | ||
2131 | /* Point at the next BD, wrapping as needed */ | ||
2132 | txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); | ||
2133 | |||
2134 | length = skb_shinfo(skb)->frags[i].size; | ||
2135 | |||
2136 | lstatus = txbdp->lstatus | length | | ||
2137 | BD_LFLAG(TXBD_READY); | ||
2138 | |||
2139 | /* Handle the last BD specially */ | ||
2140 | if (i == nr_frags - 1) | ||
2141 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | ||
2142 | |||
2143 | bufaddr = dma_map_page(&priv->ofdev->dev, | ||
2144 | skb_shinfo(skb)->frags[i].page, | ||
2145 | skb_shinfo(skb)->frags[i].page_offset, | ||
2146 | length, | ||
2147 | DMA_TO_DEVICE); | ||
2148 | |||
2149 | /* set the TxBD length and buffer pointer */ | ||
2150 | txbdp->bufPtr = bufaddr; | ||
2151 | txbdp->lstatus = lstatus; | ||
2152 | } | ||
2153 | |||
2154 | lstatus = txbdp_start->lstatus; | ||
2155 | } | ||
2156 | |||
2157 | /* Set up checksumming */ | ||
2158 | if (CHECKSUM_PARTIAL == skb->ip_summed) { | ||
2159 | fcb = gfar_add_fcb(skb); | ||
2160 | /* as specified by errata */ | ||
2161 | if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) | ||
2162 | && ((unsigned long)fcb % 0x20) > 0x18)) { | ||
2163 | __skb_pull(skb, GMAC_FCB_LEN); | ||
2164 | skb_checksum_help(skb); | ||
2165 | } else { | ||
2166 | lstatus |= BD_LFLAG(TXBD_TOE); | ||
2167 | gfar_tx_checksum(skb, fcb); | ||
2168 | } | ||
2169 | } | ||
2170 | |||
2171 | if (vlan_tx_tag_present(skb)) { | ||
2172 | if (unlikely(NULL == fcb)) { | ||
2173 | fcb = gfar_add_fcb(skb); | ||
2174 | lstatus |= BD_LFLAG(TXBD_TOE); | ||
2175 | } | ||
2176 | |||
2177 | gfar_tx_vlan(skb, fcb); | ||
2178 | } | ||
2179 | |||
2180 | /* Setup tx hardware time stamping if requested */ | ||
2181 | if (unlikely(do_tstamp)) { | ||
2182 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | ||
2183 | if (fcb == NULL) | ||
2184 | fcb = gfar_add_fcb(skb); | ||
2185 | fcb->ptp = 1; | ||
2186 | lstatus |= BD_LFLAG(TXBD_TOE); | ||
2187 | } | ||
2188 | |||
2189 | txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, | ||
2190 | skb_headlen(skb), DMA_TO_DEVICE); | ||
2191 | |||
2192 | /* | ||
2193 | * If time stamping is requested one additional TxBD must be set up. The | ||
2194 | * first TxBD points to the FCB and must have a data length of | ||
2195 | * GMAC_FCB_LEN. The second TxBD points to the actual frame data with | ||
2196 | * the full frame length. | ||
2197 | */ | ||
2198 | if (unlikely(do_tstamp)) { | ||
2199 | txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN; | ||
2200 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | | ||
2201 | (skb_headlen(skb) - GMAC_FCB_LEN); | ||
2202 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; | ||
2203 | } else { | ||
2204 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); | ||
2205 | } | ||
2206 | |||
2207 | /* | ||
2208 | * We can work in parallel with gfar_clean_tx_ring(), except | ||
2209 | * when modifying num_txbdfree. Note that we didn't grab the lock | ||
2210 | * when we were reading the num_txbdfree and checking for available | ||
2211 | * space, that's because outside of this function it can only grow, | ||
2212 | * and once we've got needed space, it cannot suddenly disappear. | ||
2213 | * | ||
2214 | * The lock also protects us from gfar_error(), which can modify | ||
2215 | * regs->tstat and thus retrigger the transfers, which is why we | ||
2216 | * also must grab the lock before setting ready bit for the first | ||
2217 | * to be transmitted BD. | ||
2218 | */ | ||
2219 | spin_lock_irqsave(&tx_queue->txlock, flags); | ||
2220 | |||
2221 | /* | ||
2222 | * The powerpc-specific eieio() is used, as wmb() has too strong | ||
2223 | * semantics (it requires synchronization between cacheable and | ||
2224 | * uncacheable mappings, which eieio doesn't provide and which we | ||
2225 | * don't need), thus requiring a more expensive sync instruction. At | ||
2226 | * some point, the set of architecture-independent barrier functions | ||
2227 | * should be expanded to include weaker barriers. | ||
2228 | */ | ||
2229 | eieio(); | ||
2230 | |||
2231 | txbdp_start->lstatus = lstatus; | ||
2232 | |||
2233 | eieio(); /* force lstatus write before tx_skbuff */ | ||
2234 | |||
2235 | tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; | ||
2236 | |||
2237 | /* Update the current skb pointer to the next entry we will use | ||
2238 | * (wrapping if necessary) */ | ||
2239 | tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & | ||
2240 | TX_RING_MOD_MASK(tx_queue->tx_ring_size); | ||
2241 | |||
2242 | tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); | ||
2243 | |||
2244 | /* reduce TxBD free count */ | ||
2245 | tx_queue->num_txbdfree -= (nr_txbds); | ||
2246 | |||
2247 | /* If the next BD still needs to be cleaned up, then the bds | ||
2248 | are full. We need to tell the kernel to stop sending us stuff. */ | ||
2249 | if (!tx_queue->num_txbdfree) { | ||
2250 | netif_tx_stop_queue(txq); | ||
2251 | |||
2252 | dev->stats.tx_fifo_errors++; | ||
2253 | } | ||
2254 | |||
2255 | /* Tell the DMA to go go go */ | ||
2256 | gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); | ||
2257 | |||
2258 | /* Unlock priv */ | ||
2259 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | ||
2260 | |||
2261 | return NETDEV_TX_OK; | ||
2262 | } | ||
2263 | |||
2264 | /* Stops the kernel queue, and halts the controller */ | ||
2265 | static int gfar_close(struct net_device *dev) | ||
2266 | { | ||
2267 | struct gfar_private *priv = netdev_priv(dev); | ||
2268 | |||
2269 | disable_napi(priv); | ||
2270 | |||
2271 | cancel_work_sync(&priv->reset_task); | ||
2272 | stop_gfar(dev); | ||
2273 | |||
2274 | /* Disconnect from the PHY */ | ||
2275 | phy_disconnect(priv->phydev); | ||
2276 | priv->phydev = NULL; | ||
2277 | |||
2278 | netif_tx_stop_all_queues(dev); | ||
2279 | |||
2280 | return 0; | ||
2281 | } | ||
2282 | |||
2283 | /* Changes the mac address if the controller is not running. */ | ||
2284 | static int gfar_set_mac_address(struct net_device *dev) | ||
2285 | { | ||
2286 | gfar_set_mac_for_addr(dev, 0, dev->dev_addr); | ||
2287 | |||
2288 | return 0; | ||
2289 | } | ||
2290 | |||
2291 | /* Check if rx parser should be activated */ | ||
2292 | void gfar_check_rx_parser_mode(struct gfar_private *priv) | ||
2293 | { | ||
2294 | struct gfar __iomem *regs; | ||
2295 | u32 tempval; | ||
2296 | |||
2297 | regs = priv->gfargrp[0].regs; | ||
2298 | |||
2299 | tempval = gfar_read(®s->rctrl); | ||
2300 | /* If parse is no longer required, then disable parser */ | ||
2301 | if (tempval & RCTRL_REQ_PARSER) | ||
2302 | tempval |= RCTRL_PRSDEP_INIT; | ||
2303 | else | ||
2304 | tempval &= ~RCTRL_PRSDEP_INIT; | ||
2305 | gfar_write(®s->rctrl, tempval); | ||
2306 | } | ||
2307 | |||
2308 | /* Enables and disables VLAN insertion/extraction */ | ||
2309 | void gfar_vlan_mode(struct net_device *dev, u32 features) | ||
2310 | { | ||
2311 | struct gfar_private *priv = netdev_priv(dev); | ||
2312 | struct gfar __iomem *regs = NULL; | ||
2313 | unsigned long flags; | ||
2314 | u32 tempval; | ||
2315 | |||
2316 | regs = priv->gfargrp[0].regs; | ||
2317 | local_irq_save(flags); | ||
2318 | lock_rx_qs(priv); | ||
2319 | |||
2320 | if (features & NETIF_F_HW_VLAN_TX) { | ||
2321 | /* Enable VLAN tag insertion */ | ||
2322 | tempval = gfar_read(®s->tctrl); | ||
2323 | tempval |= TCTRL_VLINS; | ||
2324 | gfar_write(®s->tctrl, tempval); | ||
2325 | } else { | ||
2326 | /* Disable VLAN tag insertion */ | ||
2327 | tempval = gfar_read(®s->tctrl); | ||
2328 | tempval &= ~TCTRL_VLINS; | ||
2329 | gfar_write(®s->tctrl, tempval); | ||
2330 | } | ||
2331 | |||
2332 | if (features & NETIF_F_HW_VLAN_RX) { | ||
2333 | /* Enable VLAN tag extraction */ | ||
2334 | tempval = gfar_read(®s->rctrl); | ||
2335 | tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); | ||
2336 | gfar_write(®s->rctrl, tempval); | ||
2337 | } else { | ||
2338 | /* Disable VLAN tag extraction */ | ||
2339 | tempval = gfar_read(®s->rctrl); | ||
2340 | tempval &= ~RCTRL_VLEX; | ||
2341 | gfar_write(®s->rctrl, tempval); | ||
2342 | |||
2343 | gfar_check_rx_parser_mode(priv); | ||
2344 | } | ||
2345 | |||
2346 | gfar_change_mtu(dev, dev->mtu); | ||
2347 | |||
2348 | unlock_rx_qs(priv); | ||
2349 | local_irq_restore(flags); | ||
2350 | } | ||
2351 | |||
2352 | static int gfar_change_mtu(struct net_device *dev, int new_mtu) | ||
2353 | { | ||
2354 | int tempsize, tempval; | ||
2355 | struct gfar_private *priv = netdev_priv(dev); | ||
2356 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
2357 | int oldsize = priv->rx_buffer_size; | ||
2358 | int frame_size = new_mtu + ETH_HLEN; | ||
2359 | |||
2360 | if (gfar_is_vlan_on(priv)) | ||
2361 | frame_size += VLAN_HLEN; | ||
2362 | |||
2363 | if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { | ||
2364 | netif_err(priv, drv, dev, "Invalid MTU setting\n"); | ||
2365 | return -EINVAL; | ||
2366 | } | ||
2367 | |||
2368 | if (gfar_uses_fcb(priv)) | ||
2369 | frame_size += GMAC_FCB_LEN; | ||
2370 | |||
2371 | frame_size += priv->padding; | ||
2372 | |||
2373 | tempsize = | ||
2374 | (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + | ||
2375 | INCREMENTAL_BUFFER_SIZE; | ||
2376 | |||
2377 | /* Only stop and start the controller if it isn't already | ||
2378 | * stopped, and we changed something */ | ||
2379 | if ((oldsize != tempsize) && (dev->flags & IFF_UP)) | ||
2380 | stop_gfar(dev); | ||
2381 | |||
2382 | priv->rx_buffer_size = tempsize; | ||
2383 | |||
2384 | dev->mtu = new_mtu; | ||
2385 | |||
2386 | gfar_write(®s->mrblr, priv->rx_buffer_size); | ||
2387 | gfar_write(®s->maxfrm, priv->rx_buffer_size); | ||
2388 | |||
2389 | /* If the mtu is larger than the max size for standard | ||
2390 | * ethernet frames (ie, a jumbo frame), then set maccfg2 | ||
2391 | * to allow huge frames, and to check the length */ | ||
2392 | tempval = gfar_read(®s->maccfg2); | ||
2393 | |||
2394 | if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || | ||
2395 | gfar_has_errata(priv, GFAR_ERRATA_74)) | ||
2396 | tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); | ||
2397 | else | ||
2398 | tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); | ||
2399 | |||
2400 | gfar_write(®s->maccfg2, tempval); | ||
2401 | |||
2402 | if ((oldsize != tempsize) && (dev->flags & IFF_UP)) | ||
2403 | startup_gfar(dev); | ||
2404 | |||
2405 | return 0; | ||
2406 | } | ||
2407 | |||
2408 | /* gfar_reset_task gets scheduled when a packet has not been | ||
2409 | * transmitted after a set amount of time. | ||
2410 | * For now, assume that clearing out all the structures, and | ||
2411 | * starting over will fix the problem. | ||
2412 | */ | ||
2413 | static void gfar_reset_task(struct work_struct *work) | ||
2414 | { | ||
2415 | struct gfar_private *priv = container_of(work, struct gfar_private, | ||
2416 | reset_task); | ||
2417 | struct net_device *dev = priv->ndev; | ||
2418 | |||
2419 | if (dev->flags & IFF_UP) { | ||
2420 | netif_tx_stop_all_queues(dev); | ||
2421 | stop_gfar(dev); | ||
2422 | startup_gfar(dev); | ||
2423 | netif_tx_start_all_queues(dev); | ||
2424 | } | ||
2425 | |||
2426 | netif_tx_schedule_all(dev); | ||
2427 | } | ||
2428 | |||
2429 | static void gfar_timeout(struct net_device *dev) | ||
2430 | { | ||
2431 | struct gfar_private *priv = netdev_priv(dev); | ||
2432 | |||
2433 | dev->stats.tx_errors++; | ||
2434 | schedule_work(&priv->reset_task); | ||
2435 | } | ||
2436 | |||
2437 | static void gfar_align_skb(struct sk_buff *skb) | ||
2438 | { | ||
2439 | /* We need the data buffer to be aligned properly. We will reserve | ||
2440 | * as many bytes as needed to align the data properly | ||
2441 | */ | ||
2442 | skb_reserve(skb, RXBUF_ALIGNMENT - | ||
2443 | (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); | ||
2444 | } | ||
2445 | |||
2446 | /* Interrupt Handler for Transmit complete */ | ||
2447 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | ||
2448 | { | ||
2449 | struct net_device *dev = tx_queue->dev; | ||
2450 | struct gfar_private *priv = netdev_priv(dev); | ||
2451 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
2452 | struct txbd8 *bdp, *next = NULL; | ||
2453 | struct txbd8 *lbdp = NULL; | ||
2454 | struct txbd8 *base = tx_queue->tx_bd_base; | ||
2455 | struct sk_buff *skb; | ||
2456 | int skb_dirtytx; | ||
2457 | int tx_ring_size = tx_queue->tx_ring_size; | ||
2458 | int frags = 0, nr_txbds = 0; | ||
2459 | int i; | ||
2460 | int howmany = 0; | ||
2461 | u32 lstatus; | ||
2462 | size_t buflen; | ||
2463 | |||
2464 | rx_queue = priv->rx_queue[tx_queue->qindex]; | ||
2465 | bdp = tx_queue->dirty_tx; | ||
2466 | skb_dirtytx = tx_queue->skb_dirtytx; | ||
2467 | |||
2468 | while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { | ||
2469 | unsigned long flags; | ||
2470 | |||
2471 | frags = skb_shinfo(skb)->nr_frags; | ||
2472 | |||
2473 | /* | ||
2474 | * When time stamping, one additional TxBD must be freed. | ||
2475 | * Also, we need to dma_unmap_single() the TxPAL. | ||
2476 | */ | ||
2477 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) | ||
2478 | nr_txbds = frags + 2; | ||
2479 | else | ||
2480 | nr_txbds = frags + 1; | ||
2481 | |||
2482 | lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); | ||
2483 | |||
2484 | lstatus = lbdp->lstatus; | ||
2485 | |||
2486 | /* Only clean completed frames */ | ||
2487 | if ((lstatus & BD_LFLAG(TXBD_READY)) && | ||
2488 | (lstatus & BD_LENGTH_MASK)) | ||
2489 | break; | ||
2490 | |||
2491 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { | ||
2492 | next = next_txbd(bdp, base, tx_ring_size); | ||
2493 | buflen = next->length + GMAC_FCB_LEN; | ||
2494 | } else | ||
2495 | buflen = bdp->length; | ||
2496 | |||
2497 | dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, | ||
2498 | buflen, DMA_TO_DEVICE); | ||
2499 | |||
2500 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { | ||
2501 | struct skb_shared_hwtstamps shhwtstamps; | ||
2502 | u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); | ||
2503 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); | ||
2504 | shhwtstamps.hwtstamp = ns_to_ktime(*ns); | ||
2505 | skb_tstamp_tx(skb, &shhwtstamps); | ||
2506 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); | ||
2507 | bdp = next; | ||
2508 | } | ||
2509 | |||
2510 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); | ||
2511 | bdp = next_txbd(bdp, base, tx_ring_size); | ||
2512 | |||
2513 | for (i = 0; i < frags; i++) { | ||
2514 | dma_unmap_page(&priv->ofdev->dev, | ||
2515 | bdp->bufPtr, | ||
2516 | bdp->length, | ||
2517 | DMA_TO_DEVICE); | ||
2518 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); | ||
2519 | bdp = next_txbd(bdp, base, tx_ring_size); | ||
2520 | } | ||
2521 | |||
2522 | /* | ||
2523 | * If there's room in the queue (limit it to rx_buffer_size) | ||
2524 | * we add this skb back into the pool, if it's the right size | ||
2525 | */ | ||
2526 | if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && | ||
2527 | skb_recycle_check(skb, priv->rx_buffer_size + | ||
2528 | RXBUF_ALIGNMENT)) { | ||
2529 | gfar_align_skb(skb); | ||
2530 | skb_queue_head(&priv->rx_recycle, skb); | ||
2531 | } else | ||
2532 | dev_kfree_skb_any(skb); | ||
2533 | |||
2534 | tx_queue->tx_skbuff[skb_dirtytx] = NULL; | ||
2535 | |||
2536 | skb_dirtytx = (skb_dirtytx + 1) & | ||
2537 | TX_RING_MOD_MASK(tx_ring_size); | ||
2538 | |||
2539 | howmany++; | ||
2540 | spin_lock_irqsave(&tx_queue->txlock, flags); | ||
2541 | tx_queue->num_txbdfree += nr_txbds; | ||
2542 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | ||
2543 | } | ||
2544 | |||
2545 | /* If we freed a buffer, we can restart transmission, if necessary */ | ||
2546 | if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree) | ||
2547 | netif_wake_subqueue(dev, tx_queue->qindex); | ||
2548 | |||
2549 | /* Update dirty indicators */ | ||
2550 | tx_queue->skb_dirtytx = skb_dirtytx; | ||
2551 | tx_queue->dirty_tx = bdp; | ||
2552 | |||
2553 | return howmany; | ||
2554 | } | ||
2555 | |||
2556 | static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) | ||
2557 | { | ||
2558 | unsigned long flags; | ||
2559 | |||
2560 | spin_lock_irqsave(&gfargrp->grplock, flags); | ||
2561 | if (napi_schedule_prep(&gfargrp->napi)) { | ||
2562 | gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); | ||
2563 | __napi_schedule(&gfargrp->napi); | ||
2564 | } else { | ||
2565 | /* | ||
2566 | * Clear IEVENT, so interrupts aren't called again | ||
2567 | * because of the packets that have already arrived. | ||
2568 | */ | ||
2569 | gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); | ||
2570 | } | ||
2571 | spin_unlock_irqrestore(&gfargrp->grplock, flags); | ||
2572 | |||
2573 | } | ||
2574 | |||
2575 | /* Interrupt Handler for Transmit complete */ | ||
2576 | static irqreturn_t gfar_transmit(int irq, void *grp_id) | ||
2577 | { | ||
2578 | gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); | ||
2579 | return IRQ_HANDLED; | ||
2580 | } | ||
2581 | |||
2582 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, | ||
2583 | struct sk_buff *skb) | ||
2584 | { | ||
2585 | struct net_device *dev = rx_queue->dev; | ||
2586 | struct gfar_private *priv = netdev_priv(dev); | ||
2587 | dma_addr_t buf; | ||
2588 | |||
2589 | buf = dma_map_single(&priv->ofdev->dev, skb->data, | ||
2590 | priv->rx_buffer_size, DMA_FROM_DEVICE); | ||
2591 | gfar_init_rxbdp(rx_queue, bdp, buf); | ||
2592 | } | ||
2593 | |||
2594 | static struct sk_buff * gfar_alloc_skb(struct net_device *dev) | ||
2595 | { | ||
2596 | struct gfar_private *priv = netdev_priv(dev); | ||
2597 | struct sk_buff *skb = NULL; | ||
2598 | |||
2599 | skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); | ||
2600 | if (!skb) | ||
2601 | return NULL; | ||
2602 | |||
2603 | gfar_align_skb(skb); | ||
2604 | |||
2605 | return skb; | ||
2606 | } | ||
2607 | |||
2608 | struct sk_buff * gfar_new_skb(struct net_device *dev) | ||
2609 | { | ||
2610 | struct gfar_private *priv = netdev_priv(dev); | ||
2611 | struct sk_buff *skb = NULL; | ||
2612 | |||
2613 | skb = skb_dequeue(&priv->rx_recycle); | ||
2614 | if (!skb) | ||
2615 | skb = gfar_alloc_skb(dev); | ||
2616 | |||
2617 | return skb; | ||
2618 | } | ||
2619 | |||
2620 | static inline void count_errors(unsigned short status, struct net_device *dev) | ||
2621 | { | ||
2622 | struct gfar_private *priv = netdev_priv(dev); | ||
2623 | struct net_device_stats *stats = &dev->stats; | ||
2624 | struct gfar_extra_stats *estats = &priv->extra_stats; | ||
2625 | |||
2626 | /* If the packet was truncated, none of the other errors | ||
2627 | * matter */ | ||
2628 | if (status & RXBD_TRUNCATED) { | ||
2629 | stats->rx_length_errors++; | ||
2630 | |||
2631 | estats->rx_trunc++; | ||
2632 | |||
2633 | return; | ||
2634 | } | ||
2635 | /* Count the errors, if there were any */ | ||
2636 | if (status & (RXBD_LARGE | RXBD_SHORT)) { | ||
2637 | stats->rx_length_errors++; | ||
2638 | |||
2639 | if (status & RXBD_LARGE) | ||
2640 | estats->rx_large++; | ||
2641 | else | ||
2642 | estats->rx_short++; | ||
2643 | } | ||
2644 | if (status & RXBD_NONOCTET) { | ||
2645 | stats->rx_frame_errors++; | ||
2646 | estats->rx_nonoctet++; | ||
2647 | } | ||
2648 | if (status & RXBD_CRCERR) { | ||
2649 | estats->rx_crcerr++; | ||
2650 | stats->rx_crc_errors++; | ||
2651 | } | ||
2652 | if (status & RXBD_OVERRUN) { | ||
2653 | estats->rx_overrun++; | ||
2654 | stats->rx_crc_errors++; | ||
2655 | } | ||
2656 | } | ||
2657 | |||
2658 | irqreturn_t gfar_receive(int irq, void *grp_id) | ||
2659 | { | ||
2660 | gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); | ||
2661 | return IRQ_HANDLED; | ||
2662 | } | ||
2663 | |||
2664 | static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) | ||
2665 | { | ||
2666 | /* If valid headers were found, and valid sums | ||
2667 | * were verified, then we tell the kernel that no | ||
2668 | * checksumming is necessary. Otherwise, it is */ | ||
2669 | if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) | ||
2670 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
2671 | else | ||
2672 | skb_checksum_none_assert(skb); | ||
2673 | } | ||
2674 | |||
2675 | |||
2676 | /* gfar_process_frame() -- handle one incoming packet if skb | ||
2677 | * isn't NULL. */ | ||
2678 | static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | ||
2679 | int amount_pull) | ||
2680 | { | ||
2681 | struct gfar_private *priv = netdev_priv(dev); | ||
2682 | struct rxfcb *fcb = NULL; | ||
2683 | |||
2684 | int ret; | ||
2685 | |||
2686 | /* fcb is at the beginning if exists */ | ||
2687 | fcb = (struct rxfcb *)skb->data; | ||
2688 | |||
2689 | /* Remove the FCB from the skb */ | ||
2690 | /* Remove the padded bytes, if there are any */ | ||
2691 | if (amount_pull) { | ||
2692 | skb_record_rx_queue(skb, fcb->rq); | ||
2693 | skb_pull(skb, amount_pull); | ||
2694 | } | ||
2695 | |||
2696 | /* Get receive timestamp from the skb */ | ||
2697 | if (priv->hwts_rx_en) { | ||
2698 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); | ||
2699 | u64 *ns = (u64 *) skb->data; | ||
2700 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); | ||
2701 | shhwtstamps->hwtstamp = ns_to_ktime(*ns); | ||
2702 | } | ||
2703 | |||
2704 | if (priv->padding) | ||
2705 | skb_pull(skb, priv->padding); | ||
2706 | |||
2707 | if (dev->features & NETIF_F_RXCSUM) | ||
2708 | gfar_rx_checksum(skb, fcb); | ||
2709 | |||
2710 | /* Tell the skb what kind of packet this is */ | ||
2711 | skb->protocol = eth_type_trans(skb, dev); | ||
2712 | |||
2713 | /* Set vlan tag */ | ||
2714 | if (fcb->flags & RXFCB_VLN) | ||
2715 | __vlan_hwaccel_put_tag(skb, fcb->vlctl); | ||
2716 | |||
2717 | /* Send the packet up the stack */ | ||
2718 | ret = netif_receive_skb(skb); | ||
2719 | |||
2720 | if (NET_RX_DROP == ret) | ||
2721 | priv->extra_stats.kernel_dropped++; | ||
2722 | |||
2723 | return 0; | ||
2724 | } | ||
2725 | |||
2726 | /* gfar_clean_rx_ring() -- Processes each frame in the rx ring | ||
2727 | * until the budget/quota has been reached. Returns the number | ||
2728 | * of frames handled | ||
2729 | */ | ||
2730 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) | ||
2731 | { | ||
2732 | struct net_device *dev = rx_queue->dev; | ||
2733 | struct rxbd8 *bdp, *base; | ||
2734 | struct sk_buff *skb; | ||
2735 | int pkt_len; | ||
2736 | int amount_pull; | ||
2737 | int howmany = 0; | ||
2738 | struct gfar_private *priv = netdev_priv(dev); | ||
2739 | |||
2740 | /* Get the first full descriptor */ | ||
2741 | bdp = rx_queue->cur_rx; | ||
2742 | base = rx_queue->rx_bd_base; | ||
2743 | |||
2744 | amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0); | ||
2745 | |||
2746 | while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { | ||
2747 | struct sk_buff *newskb; | ||
2748 | rmb(); | ||
2749 | |||
2750 | /* Add another skb for the future */ | ||
2751 | newskb = gfar_new_skb(dev); | ||
2752 | |||
2753 | skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; | ||
2754 | |||
2755 | dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, | ||
2756 | priv->rx_buffer_size, DMA_FROM_DEVICE); | ||
2757 | |||
2758 | if (unlikely(!(bdp->status & RXBD_ERR) && | ||
2759 | bdp->length > priv->rx_buffer_size)) | ||
2760 | bdp->status = RXBD_LARGE; | ||
2761 | |||
2762 | /* We drop the frame if we failed to allocate a new buffer */ | ||
2763 | if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || | ||
2764 | bdp->status & RXBD_ERR)) { | ||
2765 | count_errors(bdp->status, dev); | ||
2766 | |||
2767 | if (unlikely(!newskb)) | ||
2768 | newskb = skb; | ||
2769 | else if (skb) | ||
2770 | skb_queue_head(&priv->rx_recycle, skb); | ||
2771 | } else { | ||
2772 | /* Increment the number of packets */ | ||
2773 | rx_queue->stats.rx_packets++; | ||
2774 | howmany++; | ||
2775 | |||
2776 | if (likely(skb)) { | ||
2777 | pkt_len = bdp->length - ETH_FCS_LEN; | ||
2778 | /* Remove the FCS from the packet length */ | ||
2779 | skb_put(skb, pkt_len); | ||
2780 | rx_queue->stats.rx_bytes += pkt_len; | ||
2781 | skb_record_rx_queue(skb, rx_queue->qindex); | ||
2782 | gfar_process_frame(dev, skb, amount_pull); | ||
2783 | |||
2784 | } else { | ||
2785 | netif_warn(priv, rx_err, dev, "Missing skb!\n"); | ||
2786 | rx_queue->stats.rx_dropped++; | ||
2787 | priv->extra_stats.rx_skbmissing++; | ||
2788 | } | ||
2789 | |||
2790 | } | ||
2791 | |||
2792 | rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; | ||
2793 | |||
2794 | /* Setup the new bdp */ | ||
2795 | gfar_new_rxbdp(rx_queue, bdp, newskb); | ||
2796 | |||
2797 | /* Update to the next pointer */ | ||
2798 | bdp = next_bd(bdp, base, rx_queue->rx_ring_size); | ||
2799 | |||
2800 | /* update to point at the next skb */ | ||
2801 | rx_queue->skb_currx = | ||
2802 | (rx_queue->skb_currx + 1) & | ||
2803 | RX_RING_MOD_MASK(rx_queue->rx_ring_size); | ||
2804 | } | ||
2805 | |||
2806 | /* Update the current rxbd pointer to be the next one */ | ||
2807 | rx_queue->cur_rx = bdp; | ||
2808 | |||
2809 | return howmany; | ||
2810 | } | ||
2811 | |||
2812 | static int gfar_poll(struct napi_struct *napi, int budget) | ||
2813 | { | ||
2814 | struct gfar_priv_grp *gfargrp = container_of(napi, | ||
2815 | struct gfar_priv_grp, napi); | ||
2816 | struct gfar_private *priv = gfargrp->priv; | ||
2817 | struct gfar __iomem *regs = gfargrp->regs; | ||
2818 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
2819 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
2820 | int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; | ||
2821 | int tx_cleaned = 0, i, left_over_budget = budget; | ||
2822 | unsigned long serviced_queues = 0; | ||
2823 | int num_queues = 0; | ||
2824 | |||
2825 | num_queues = gfargrp->num_rx_queues; | ||
2826 | budget_per_queue = budget/num_queues; | ||
2827 | |||
2828 | /* Clear IEVENT, so interrupts aren't called again | ||
2829 | * because of the packets that have already arrived */ | ||
2830 | gfar_write(®s->ievent, IEVENT_RTX_MASK); | ||
2831 | |||
2832 | while (num_queues && left_over_budget) { | ||
2833 | |||
2834 | budget_per_queue = left_over_budget/num_queues; | ||
2835 | left_over_budget = 0; | ||
2836 | |||
2837 | for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { | ||
2838 | if (test_bit(i, &serviced_queues)) | ||
2839 | continue; | ||
2840 | rx_queue = priv->rx_queue[i]; | ||
2841 | tx_queue = priv->tx_queue[rx_queue->qindex]; | ||
2842 | |||
2843 | tx_cleaned += gfar_clean_tx_ring(tx_queue); | ||
2844 | rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, | ||
2845 | budget_per_queue); | ||
2846 | rx_cleaned += rx_cleaned_per_queue; | ||
2847 | if(rx_cleaned_per_queue < budget_per_queue) { | ||
2848 | left_over_budget = left_over_budget + | ||
2849 | (budget_per_queue - rx_cleaned_per_queue); | ||
2850 | set_bit(i, &serviced_queues); | ||
2851 | num_queues--; | ||
2852 | } | ||
2853 | } | ||
2854 | } | ||
2855 | |||
2856 | if (tx_cleaned) | ||
2857 | return budget; | ||
2858 | |||
2859 | if (rx_cleaned < budget) { | ||
2860 | napi_complete(napi); | ||
2861 | |||
2862 | /* Clear the halt bit in RSTAT */ | ||
2863 | gfar_write(®s->rstat, gfargrp->rstat); | ||
2864 | |||
2865 | gfar_write(®s->imask, IMASK_DEFAULT); | ||
2866 | |||
2867 | /* If we are coalescing interrupts, update the timer */ | ||
2868 | /* Otherwise, clear it */ | ||
2869 | gfar_configure_coalescing(priv, | ||
2870 | gfargrp->rx_bit_map, gfargrp->tx_bit_map); | ||
2871 | } | ||
2872 | |||
2873 | return rx_cleaned; | ||
2874 | } | ||
2875 | |||
2876 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2877 | /* | ||
2878 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
2879 | * without having to re-enable interrupts. It's not called while | ||
2880 | * the interrupt routine is executing. | ||
2881 | */ | ||
2882 | static void gfar_netpoll(struct net_device *dev) | ||
2883 | { | ||
2884 | struct gfar_private *priv = netdev_priv(dev); | ||
2885 | int i = 0; | ||
2886 | |||
2887 | /* If the device has multiple interrupts, run tx/rx */ | ||
2888 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | ||
2889 | for (i = 0; i < priv->num_grps; i++) { | ||
2890 | disable_irq(priv->gfargrp[i].interruptTransmit); | ||
2891 | disable_irq(priv->gfargrp[i].interruptReceive); | ||
2892 | disable_irq(priv->gfargrp[i].interruptError); | ||
2893 | gfar_interrupt(priv->gfargrp[i].interruptTransmit, | ||
2894 | &priv->gfargrp[i]); | ||
2895 | enable_irq(priv->gfargrp[i].interruptError); | ||
2896 | enable_irq(priv->gfargrp[i].interruptReceive); | ||
2897 | enable_irq(priv->gfargrp[i].interruptTransmit); | ||
2898 | } | ||
2899 | } else { | ||
2900 | for (i = 0; i < priv->num_grps; i++) { | ||
2901 | disable_irq(priv->gfargrp[i].interruptTransmit); | ||
2902 | gfar_interrupt(priv->gfargrp[i].interruptTransmit, | ||
2903 | &priv->gfargrp[i]); | ||
2904 | enable_irq(priv->gfargrp[i].interruptTransmit); | ||
2905 | } | ||
2906 | } | ||
2907 | } | ||
2908 | #endif | ||
2909 | |||
2910 | /* The interrupt handler for devices with one interrupt */ | ||
2911 | static irqreturn_t gfar_interrupt(int irq, void *grp_id) | ||
2912 | { | ||
2913 | struct gfar_priv_grp *gfargrp = grp_id; | ||
2914 | |||
2915 | /* Save ievent for future reference */ | ||
2916 | u32 events = gfar_read(&gfargrp->regs->ievent); | ||
2917 | |||
2918 | /* Check for reception */ | ||
2919 | if (events & IEVENT_RX_MASK) | ||
2920 | gfar_receive(irq, grp_id); | ||
2921 | |||
2922 | /* Check for transmit completion */ | ||
2923 | if (events & IEVENT_TX_MASK) | ||
2924 | gfar_transmit(irq, grp_id); | ||
2925 | |||
2926 | /* Check for errors */ | ||
2927 | if (events & IEVENT_ERR_MASK) | ||
2928 | gfar_error(irq, grp_id); | ||
2929 | |||
2930 | return IRQ_HANDLED; | ||
2931 | } | ||
2932 | |||
2933 | /* Called every time the controller might need to be made | ||
2934 | * aware of new link state. The PHY code conveys this | ||
2935 | * information through variables in the phydev structure, and this | ||
2936 | * function converts those variables into the appropriate | ||
2937 | * register values, and can bring down the device if needed. | ||
2938 | */ | ||
2939 | static void adjust_link(struct net_device *dev) | ||
2940 | { | ||
2941 | struct gfar_private *priv = netdev_priv(dev); | ||
2942 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
2943 | unsigned long flags; | ||
2944 | struct phy_device *phydev = priv->phydev; | ||
2945 | int new_state = 0; | ||
2946 | |||
2947 | local_irq_save(flags); | ||
2948 | lock_tx_qs(priv); | ||
2949 | |||
2950 | if (phydev->link) { | ||
2951 | u32 tempval = gfar_read(®s->maccfg2); | ||
2952 | u32 ecntrl = gfar_read(®s->ecntrl); | ||
2953 | |||
2954 | /* Now we make sure that we can be in full duplex mode. | ||
2955 | * If not, we operate in half-duplex mode. */ | ||
2956 | if (phydev->duplex != priv->oldduplex) { | ||
2957 | new_state = 1; | ||
2958 | if (!(phydev->duplex)) | ||
2959 | tempval &= ~(MACCFG2_FULL_DUPLEX); | ||
2960 | else | ||
2961 | tempval |= MACCFG2_FULL_DUPLEX; | ||
2962 | |||
2963 | priv->oldduplex = phydev->duplex; | ||
2964 | } | ||
2965 | |||
2966 | if (phydev->speed != priv->oldspeed) { | ||
2967 | new_state = 1; | ||
2968 | switch (phydev->speed) { | ||
2969 | case 1000: | ||
2970 | tempval = | ||
2971 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); | ||
2972 | |||
2973 | ecntrl &= ~(ECNTRL_R100); | ||
2974 | break; | ||
2975 | case 100: | ||
2976 | case 10: | ||
2977 | tempval = | ||
2978 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); | ||
2979 | |||
2980 | /* Reduced mode distinguishes | ||
2981 | * between 10 and 100 */ | ||
2982 | if (phydev->speed == SPEED_100) | ||
2983 | ecntrl |= ECNTRL_R100; | ||
2984 | else | ||
2985 | ecntrl &= ~(ECNTRL_R100); | ||
2986 | break; | ||
2987 | default: | ||
2988 | netif_warn(priv, link, dev, | ||
2989 | "Ack! Speed (%d) is not 10/100/1000!\n", | ||
2990 | phydev->speed); | ||
2991 | break; | ||
2992 | } | ||
2993 | |||
2994 | priv->oldspeed = phydev->speed; | ||
2995 | } | ||
2996 | |||
2997 | gfar_write(®s->maccfg2, tempval); | ||
2998 | gfar_write(®s->ecntrl, ecntrl); | ||
2999 | |||
3000 | if (!priv->oldlink) { | ||
3001 | new_state = 1; | ||
3002 | priv->oldlink = 1; | ||
3003 | } | ||
3004 | } else if (priv->oldlink) { | ||
3005 | new_state = 1; | ||
3006 | priv->oldlink = 0; | ||
3007 | priv->oldspeed = 0; | ||
3008 | priv->oldduplex = -1; | ||
3009 | } | ||
3010 | |||
3011 | if (new_state && netif_msg_link(priv)) | ||
3012 | phy_print_status(phydev); | ||
3013 | unlock_tx_qs(priv); | ||
3014 | local_irq_restore(flags); | ||
3015 | } | ||
3016 | |||
3017 | /* Update the hash table based on the current list of multicast | ||
3018 | * addresses we subscribe to. Also, change the promiscuity of | ||
3019 | * the device based on the flags (this function is called | ||
3020 | * whenever dev->flags is changed */ | ||
3021 | static void gfar_set_multi(struct net_device *dev) | ||
3022 | { | ||
3023 | struct netdev_hw_addr *ha; | ||
3024 | struct gfar_private *priv = netdev_priv(dev); | ||
3025 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
3026 | u32 tempval; | ||
3027 | |||
3028 | if (dev->flags & IFF_PROMISC) { | ||
3029 | /* Set RCTRL to PROM */ | ||
3030 | tempval = gfar_read(®s->rctrl); | ||
3031 | tempval |= RCTRL_PROM; | ||
3032 | gfar_write(®s->rctrl, tempval); | ||
3033 | } else { | ||
3034 | /* Set RCTRL to not PROM */ | ||
3035 | tempval = gfar_read(®s->rctrl); | ||
3036 | tempval &= ~(RCTRL_PROM); | ||
3037 | gfar_write(®s->rctrl, tempval); | ||
3038 | } | ||
3039 | |||
3040 | if (dev->flags & IFF_ALLMULTI) { | ||
3041 | /* Set the hash to rx all multicast frames */ | ||
3042 | gfar_write(®s->igaddr0, 0xffffffff); | ||
3043 | gfar_write(®s->igaddr1, 0xffffffff); | ||
3044 | gfar_write(®s->igaddr2, 0xffffffff); | ||
3045 | gfar_write(®s->igaddr3, 0xffffffff); | ||
3046 | gfar_write(®s->igaddr4, 0xffffffff); | ||
3047 | gfar_write(®s->igaddr5, 0xffffffff); | ||
3048 | gfar_write(®s->igaddr6, 0xffffffff); | ||
3049 | gfar_write(®s->igaddr7, 0xffffffff); | ||
3050 | gfar_write(®s->gaddr0, 0xffffffff); | ||
3051 | gfar_write(®s->gaddr1, 0xffffffff); | ||
3052 | gfar_write(®s->gaddr2, 0xffffffff); | ||
3053 | gfar_write(®s->gaddr3, 0xffffffff); | ||
3054 | gfar_write(®s->gaddr4, 0xffffffff); | ||
3055 | gfar_write(®s->gaddr5, 0xffffffff); | ||
3056 | gfar_write(®s->gaddr6, 0xffffffff); | ||
3057 | gfar_write(®s->gaddr7, 0xffffffff); | ||
3058 | } else { | ||
3059 | int em_num; | ||
3060 | int idx; | ||
3061 | |||
3062 | /* zero out the hash */ | ||
3063 | gfar_write(®s->igaddr0, 0x0); | ||
3064 | gfar_write(®s->igaddr1, 0x0); | ||
3065 | gfar_write(®s->igaddr2, 0x0); | ||
3066 | gfar_write(®s->igaddr3, 0x0); | ||
3067 | gfar_write(®s->igaddr4, 0x0); | ||
3068 | gfar_write(®s->igaddr5, 0x0); | ||
3069 | gfar_write(®s->igaddr6, 0x0); | ||
3070 | gfar_write(®s->igaddr7, 0x0); | ||
3071 | gfar_write(®s->gaddr0, 0x0); | ||
3072 | gfar_write(®s->gaddr1, 0x0); | ||
3073 | gfar_write(®s->gaddr2, 0x0); | ||
3074 | gfar_write(®s->gaddr3, 0x0); | ||
3075 | gfar_write(®s->gaddr4, 0x0); | ||
3076 | gfar_write(®s->gaddr5, 0x0); | ||
3077 | gfar_write(®s->gaddr6, 0x0); | ||
3078 | gfar_write(®s->gaddr7, 0x0); | ||
3079 | |||
3080 | /* If we have extended hash tables, we need to | ||
3081 | * clear the exact match registers to prepare for | ||
3082 | * setting them */ | ||
3083 | if (priv->extended_hash) { | ||
3084 | em_num = GFAR_EM_NUM + 1; | ||
3085 | gfar_clear_exact_match(dev); | ||
3086 | idx = 1; | ||
3087 | } else { | ||
3088 | idx = 0; | ||
3089 | em_num = 0; | ||
3090 | } | ||
3091 | |||
3092 | if (netdev_mc_empty(dev)) | ||
3093 | return; | ||
3094 | |||
3095 | /* Parse the list, and set the appropriate bits */ | ||
3096 | netdev_for_each_mc_addr(ha, dev) { | ||
3097 | if (idx < em_num) { | ||
3098 | gfar_set_mac_for_addr(dev, idx, ha->addr); | ||
3099 | idx++; | ||
3100 | } else | ||
3101 | gfar_set_hash_for_addr(dev, ha->addr); | ||
3102 | } | ||
3103 | } | ||
3104 | } | ||
3105 | |||
3106 | |||
3107 | /* Clears each of the exact match registers to zero, so they | ||
3108 | * don't interfere with normal reception */ | ||
3109 | static void gfar_clear_exact_match(struct net_device *dev) | ||
3110 | { | ||
3111 | int idx; | ||
3112 | static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0}; | ||
3113 | |||
3114 | for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) | ||
3115 | gfar_set_mac_for_addr(dev, idx, zero_arr); | ||
3116 | } | ||
3117 | |||
3118 | /* Set the appropriate hash bit for the given addr */ | ||
3119 | /* The algorithm works like so: | ||
3120 | * 1) Take the Destination Address (ie the multicast address), and | ||
3121 | * do a CRC on it (little endian), and reverse the bits of the | ||
3122 | * result. | ||
3123 | * 2) Use the 8 most significant bits as a hash into a 256-entry | ||
3124 | * table. The table is controlled through 8 32-bit registers: | ||
3125 | * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is | ||
3126 | * gaddr7. This means that the 3 most significant bits in the | ||
3127 | * hash index which gaddr register to use, and the 5 other bits | ||
3128 | * indicate which bit (assuming an IBM numbering scheme, which | ||
3129 | * for PowerPC (tm) is usually the case) in the register holds | ||
3130 | * the entry. */ | ||
3131 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) | ||
3132 | { | ||
3133 | u32 tempval; | ||
3134 | struct gfar_private *priv = netdev_priv(dev); | ||
3135 | u32 result = ether_crc(MAC_ADDR_LEN, addr); | ||
3136 | int width = priv->hash_width; | ||
3137 | u8 whichbit = (result >> (32 - width)) & 0x1f; | ||
3138 | u8 whichreg = result >> (32 - width + 5); | ||
3139 | u32 value = (1 << (31-whichbit)); | ||
3140 | |||
3141 | tempval = gfar_read(priv->hash_regs[whichreg]); | ||
3142 | tempval |= value; | ||
3143 | gfar_write(priv->hash_regs[whichreg], tempval); | ||
3144 | } | ||
3145 | |||
3146 | |||
3147 | /* There are multiple MAC Address register pairs on some controllers | ||
3148 | * This function sets the numth pair to a given address | ||
3149 | */ | ||
3150 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, | ||
3151 | const u8 *addr) | ||
3152 | { | ||
3153 | struct gfar_private *priv = netdev_priv(dev); | ||
3154 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
3155 | int idx; | ||
3156 | char tmpbuf[MAC_ADDR_LEN]; | ||
3157 | u32 tempval; | ||
3158 | u32 __iomem *macptr = ®s->macstnaddr1; | ||
3159 | |||
3160 | macptr += num*2; | ||
3161 | |||
3162 | /* Now copy it into the mac registers backwards, cuz */ | ||
3163 | /* little endian is silly */ | ||
3164 | for (idx = 0; idx < MAC_ADDR_LEN; idx++) | ||
3165 | tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx]; | ||
3166 | |||
3167 | gfar_write(macptr, *((u32 *) (tmpbuf))); | ||
3168 | |||
3169 | tempval = *((u32 *) (tmpbuf + 4)); | ||
3170 | |||
3171 | gfar_write(macptr+1, tempval); | ||
3172 | } | ||
3173 | |||
3174 | /* GFAR error interrupt handler */ | ||
3175 | static irqreturn_t gfar_error(int irq, void *grp_id) | ||
3176 | { | ||
3177 | struct gfar_priv_grp *gfargrp = grp_id; | ||
3178 | struct gfar __iomem *regs = gfargrp->regs; | ||
3179 | struct gfar_private *priv= gfargrp->priv; | ||
3180 | struct net_device *dev = priv->ndev; | ||
3181 | |||
3182 | /* Save ievent for future reference */ | ||
3183 | u32 events = gfar_read(®s->ievent); | ||
3184 | |||
3185 | /* Clear IEVENT */ | ||
3186 | gfar_write(®s->ievent, events & IEVENT_ERR_MASK); | ||
3187 | |||
3188 | /* Magic Packet is not an error. */ | ||
3189 | if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && | ||
3190 | (events & IEVENT_MAG)) | ||
3191 | events &= ~IEVENT_MAG; | ||
3192 | |||
3193 | /* Hmm... */ | ||
3194 | if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) | ||
3195 | netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n", | ||
3196 | events, gfar_read(®s->imask)); | ||
3197 | |||
3198 | /* Update the error counters */ | ||
3199 | if (events & IEVENT_TXE) { | ||
3200 | dev->stats.tx_errors++; | ||
3201 | |||
3202 | if (events & IEVENT_LC) | ||
3203 | dev->stats.tx_window_errors++; | ||
3204 | if (events & IEVENT_CRL) | ||
3205 | dev->stats.tx_aborted_errors++; | ||
3206 | if (events & IEVENT_XFUN) { | ||
3207 | unsigned long flags; | ||
3208 | |||
3209 | netif_dbg(priv, tx_err, dev, | ||
3210 | "TX FIFO underrun, packet dropped\n"); | ||
3211 | dev->stats.tx_dropped++; | ||
3212 | priv->extra_stats.tx_underrun++; | ||
3213 | |||
3214 | local_irq_save(flags); | ||
3215 | lock_tx_qs(priv); | ||
3216 | |||
3217 | /* Reactivate the Tx Queues */ | ||
3218 | gfar_write(®s->tstat, gfargrp->tstat); | ||
3219 | |||
3220 | unlock_tx_qs(priv); | ||
3221 | local_irq_restore(flags); | ||
3222 | } | ||
3223 | netif_dbg(priv, tx_err, dev, "Transmit Error\n"); | ||
3224 | } | ||
3225 | if (events & IEVENT_BSY) { | ||
3226 | dev->stats.rx_errors++; | ||
3227 | priv->extra_stats.rx_bsy++; | ||
3228 | |||
3229 | gfar_receive(irq, grp_id); | ||
3230 | |||
3231 | netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", | ||
3232 | gfar_read(®s->rstat)); | ||
3233 | } | ||
3234 | if (events & IEVENT_BABR) { | ||
3235 | dev->stats.rx_errors++; | ||
3236 | priv->extra_stats.rx_babr++; | ||
3237 | |||
3238 | netif_dbg(priv, rx_err, dev, "babbling RX error\n"); | ||
3239 | } | ||
3240 | if (events & IEVENT_EBERR) { | ||
3241 | priv->extra_stats.eberr++; | ||
3242 | netif_dbg(priv, rx_err, dev, "bus error\n"); | ||
3243 | } | ||
3244 | if (events & IEVENT_RXC) | ||
3245 | netif_dbg(priv, rx_status, dev, "control frame\n"); | ||
3246 | |||
3247 | if (events & IEVENT_BABT) { | ||
3248 | priv->extra_stats.tx_babt++; | ||
3249 | netif_dbg(priv, tx_err, dev, "babbling TX error\n"); | ||
3250 | } | ||
3251 | return IRQ_HANDLED; | ||
3252 | } | ||
3253 | |||
3254 | static struct of_device_id gfar_match[] = | ||
3255 | { | ||
3256 | { | ||
3257 | .type = "network", | ||
3258 | .compatible = "gianfar", | ||
3259 | }, | ||
3260 | { | ||
3261 | .compatible = "fsl,etsec2", | ||
3262 | }, | ||
3263 | {}, | ||
3264 | }; | ||
3265 | MODULE_DEVICE_TABLE(of, gfar_match); | ||
3266 | |||
3267 | /* Structure for a device driver */ | ||
3268 | static struct platform_driver gfar_driver = { | ||
3269 | .driver = { | ||
3270 | .name = "fsl-gianfar", | ||
3271 | .owner = THIS_MODULE, | ||
3272 | .pm = GFAR_PM_OPS, | ||
3273 | .of_match_table = gfar_match, | ||
3274 | }, | ||
3275 | .probe = gfar_probe, | ||
3276 | .remove = gfar_remove, | ||
3277 | }; | ||
3278 | |||
3279 | static int __init gfar_init(void) | ||
3280 | { | ||
3281 | return platform_driver_register(&gfar_driver); | ||
3282 | } | ||
3283 | |||
3284 | static void __exit gfar_exit(void) | ||
3285 | { | ||
3286 | platform_driver_unregister(&gfar_driver); | ||
3287 | } | ||
3288 | |||
3289 | module_init(gfar_init); | ||
3290 | module_exit(gfar_exit); | ||
3291 | |||