aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/gianfar.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/gianfar.c')
-rw-r--r--drivers/net/gianfar.c1875
1 files changed, 1297 insertions, 578 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 5bf31f1509c9..e0620d084644 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -8,9 +8,10 @@
8 * 8 *
9 * Author: Andy Fleming 9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala 10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 12 *
12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc. 13 * Copyright 2002-2009 Freescale Semiconductor, Inc.
13 * Copyright (c) 2007 MontaVista Software, Inc. 14 * Copyright 2007 MontaVista Software, Inc.
14 * 15 *
15 * This program is free software; you can redistribute it and/or modify it 16 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the 17 * under the terms of the GNU General Public License as published by the
@@ -109,7 +110,7 @@ static void gfar_reset_task(struct work_struct *work);
109static void gfar_timeout(struct net_device *dev); 110static void gfar_timeout(struct net_device *dev);
110static int gfar_close(struct net_device *dev); 111static int gfar_close(struct net_device *dev);
111struct sk_buff *gfar_new_skb(struct net_device *dev); 112struct sk_buff *gfar_new_skb(struct net_device *dev);
112static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 113static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
113 struct sk_buff *skb); 114 struct sk_buff *skb);
114static int gfar_set_mac_address(struct net_device *dev); 115static int gfar_set_mac_address(struct net_device *dev);
115static int gfar_change_mtu(struct net_device *dev, int new_mtu); 116static int gfar_change_mtu(struct net_device *dev, int new_mtu);
@@ -130,8 +131,8 @@ static int gfar_poll(struct napi_struct *napi, int budget);
130#ifdef CONFIG_NET_POLL_CONTROLLER 131#ifdef CONFIG_NET_POLL_CONTROLLER
131static void gfar_netpoll(struct net_device *dev); 132static void gfar_netpoll(struct net_device *dev);
132#endif 133#endif
133int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 134int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
134static int gfar_clean_tx_ring(struct net_device *dev); 135static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
135static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 136static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
136 int amount_pull); 137 int amount_pull);
137static void gfar_vlan_rx_register(struct net_device *netdev, 138static void gfar_vlan_rx_register(struct net_device *netdev,
@@ -142,11 +143,310 @@ void gfar_start(struct net_device *dev);
142static void gfar_clear_exact_match(struct net_device *dev); 143static void gfar_clear_exact_match(struct net_device *dev);
143static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 144static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
144static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 145static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
146u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
145 147
146MODULE_AUTHOR("Freescale Semiconductor, Inc"); 148MODULE_AUTHOR("Freescale Semiconductor, Inc");
147MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 149MODULE_DESCRIPTION("Gianfar Ethernet Driver");
148MODULE_LICENSE("GPL"); 150MODULE_LICENSE("GPL");
149 151
152static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
153 dma_addr_t buf)
154{
155 u32 lstatus;
156
157 bdp->bufPtr = buf;
158
159 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
160 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
161 lstatus |= BD_LFLAG(RXBD_WRAP);
162
163 eieio();
164
165 bdp->lstatus = lstatus;
166}
167
168static int gfar_init_bds(struct net_device *ndev)
169{
170 struct gfar_private *priv = netdev_priv(ndev);
171 struct gfar_priv_tx_q *tx_queue = NULL;
172 struct gfar_priv_rx_q *rx_queue = NULL;
173 struct txbd8 *txbdp;
174 struct rxbd8 *rxbdp;
175 int i, j;
176
177 for (i = 0; i < priv->num_tx_queues; i++) {
178 tx_queue = priv->tx_queue[i];
179 /* Initialize some variables in our dev structure */
180 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
181 tx_queue->dirty_tx = tx_queue->tx_bd_base;
182 tx_queue->cur_tx = tx_queue->tx_bd_base;
183 tx_queue->skb_curtx = 0;
184 tx_queue->skb_dirtytx = 0;
185
186 /* Initialize Transmit Descriptor Ring */
187 txbdp = tx_queue->tx_bd_base;
188 for (j = 0; j < tx_queue->tx_ring_size; j++) {
189 txbdp->lstatus = 0;
190 txbdp->bufPtr = 0;
191 txbdp++;
192 }
193
194 /* Set the last descriptor in the ring to indicate wrap */
195 txbdp--;
196 txbdp->status |= TXBD_WRAP;
197 }
198
199 for (i = 0; i < priv->num_rx_queues; i++) {
200 rx_queue = priv->rx_queue[i];
201 rx_queue->cur_rx = rx_queue->rx_bd_base;
202 rx_queue->skb_currx = 0;
203 rxbdp = rx_queue->rx_bd_base;
204
205 for (j = 0; j < rx_queue->rx_ring_size; j++) {
206 struct sk_buff *skb = rx_queue->rx_skbuff[j];
207
208 if (skb) {
209 gfar_init_rxbdp(rx_queue, rxbdp,
210 rxbdp->bufPtr);
211 } else {
212 skb = gfar_new_skb(ndev);
213 if (!skb) {
214 pr_err("%s: Can't allocate RX buffers\n",
215 ndev->name);
216 goto err_rxalloc_fail;
217 }
218 rx_queue->rx_skbuff[j] = skb;
219
220 gfar_new_rxbdp(rx_queue, rxbdp, skb);
221 }
222
223 rxbdp++;
224 }
225
226 }
227
228 return 0;
229
230err_rxalloc_fail:
231 free_skb_resources(priv);
232 return -ENOMEM;
233}
234
235static int gfar_alloc_skb_resources(struct net_device *ndev)
236{
237 void *vaddr;
238 dma_addr_t addr;
239 int i, j, k;
240 struct gfar_private *priv = netdev_priv(ndev);
241 struct device *dev = &priv->ofdev->dev;
242 struct gfar_priv_tx_q *tx_queue = NULL;
243 struct gfar_priv_rx_q *rx_queue = NULL;
244
245 priv->total_tx_ring_size = 0;
246 for (i = 0; i < priv->num_tx_queues; i++)
247 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
248
249 priv->total_rx_ring_size = 0;
250 for (i = 0; i < priv->num_rx_queues; i++)
251 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
252
253 /* Allocate memory for the buffer descriptors */
254 vaddr = dma_alloc_coherent(dev,
255 sizeof(struct txbd8) * priv->total_tx_ring_size +
256 sizeof(struct rxbd8) * priv->total_rx_ring_size,
257 &addr, GFP_KERNEL);
258 if (!vaddr) {
259 if (netif_msg_ifup(priv))
260 pr_err("%s: Could not allocate buffer descriptors!\n",
261 ndev->name);
262 return -ENOMEM;
263 }
264
265 for (i = 0; i < priv->num_tx_queues; i++) {
266 tx_queue = priv->tx_queue[i];
267 tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
268 tx_queue->tx_bd_dma_base = addr;
269 tx_queue->dev = ndev;
270 /* enet DMA only understands physical addresses */
271 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
272 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
273 }
274
275 /* Start the rx descriptor ring where the tx ring leaves off */
276 for (i = 0; i < priv->num_rx_queues; i++) {
277 rx_queue = priv->rx_queue[i];
278 rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
279 rx_queue->rx_bd_dma_base = addr;
280 rx_queue->dev = ndev;
281 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
282 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
283 }
284
285 /* Setup the skbuff rings */
286 for (i = 0; i < priv->num_tx_queues; i++) {
287 tx_queue = priv->tx_queue[i];
288 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
289 tx_queue->tx_ring_size, GFP_KERNEL);
290 if (!tx_queue->tx_skbuff) {
291 if (netif_msg_ifup(priv))
292 pr_err("%s: Could not allocate tx_skbuff\n",
293 ndev->name);
294 goto cleanup;
295 }
296
297 for (k = 0; k < tx_queue->tx_ring_size; k++)
298 tx_queue->tx_skbuff[k] = NULL;
299 }
300
301 for (i = 0; i < priv->num_rx_queues; i++) {
302 rx_queue = priv->rx_queue[i];
303 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
304 rx_queue->rx_ring_size, GFP_KERNEL);
305
306 if (!rx_queue->rx_skbuff) {
307 if (netif_msg_ifup(priv))
308 pr_err("%s: Could not allocate rx_skbuff\n",
309 ndev->name);
310 goto cleanup;
311 }
312
313 for (j = 0; j < rx_queue->rx_ring_size; j++)
314 rx_queue->rx_skbuff[j] = NULL;
315 }
316
317 if (gfar_init_bds(ndev))
318 goto cleanup;
319
320 return 0;
321
322cleanup:
323 free_skb_resources(priv);
324 return -ENOMEM;
325}
326
327static void gfar_init_tx_rx_base(struct gfar_private *priv)
328{
329 struct gfar __iomem *regs = priv->gfargrp[0].regs;
330 u32 __iomem *baddr;
331 int i;
332
333 baddr = &regs->tbase0;
334 for(i = 0; i < priv->num_tx_queues; i++) {
335 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
336 baddr += 2;
337 }
338
339 baddr = &regs->rbase0;
340 for(i = 0; i < priv->num_rx_queues; i++) {
341 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
342 baddr += 2;
343 }
344}
345
346static void gfar_init_mac(struct net_device *ndev)
347{
348 struct gfar_private *priv = netdev_priv(ndev);
349 struct gfar __iomem *regs = priv->gfargrp[0].regs;
350 u32 rctrl = 0;
351 u32 tctrl = 0;
352 u32 attrs = 0;
353
354 /* write the tx/rx base registers */
355 gfar_init_tx_rx_base(priv);
356
357 /* Configure the coalescing support */
358 gfar_configure_coalescing(priv, 0xFF, 0xFF);
359
360 if (priv->rx_filer_enable) {
361 rctrl |= RCTRL_FILREN;
362 /* Program the RIR0 reg with the required distribution */
363 gfar_write(&regs->rir0, DEFAULT_RIR0);
364 }
365
366 if (priv->rx_csum_enable)
367 rctrl |= RCTRL_CHECKSUMMING;
368
369 if (priv->extended_hash) {
370 rctrl |= RCTRL_EXTHASH;
371
372 gfar_clear_exact_match(ndev);
373 rctrl |= RCTRL_EMEN;
374 }
375
376 if (priv->padding) {
377 rctrl &= ~RCTRL_PAL_MASK;
378 rctrl |= RCTRL_PADDING(priv->padding);
379 }
380
381 /* keep vlan related bits if it's enabled */
382 if (priv->vlgrp) {
383 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
384 tctrl |= TCTRL_VLINS;
385 }
386
387 /* Init rctrl based on our settings */
388 gfar_write(&regs->rctrl, rctrl);
389
390 if (ndev->features & NETIF_F_IP_CSUM)
391 tctrl |= TCTRL_INIT_CSUM;
392
393 tctrl |= TCTRL_TXSCHED_PRIO;
394
395 gfar_write(&regs->tctrl, tctrl);
396
397 /* Set the extraction length and index */
398 attrs = ATTRELI_EL(priv->rx_stash_size) |
399 ATTRELI_EI(priv->rx_stash_index);
400
401 gfar_write(&regs->attreli, attrs);
402
403 /* Start with defaults, and add stashing or locking
404 * depending on the approprate variables */
405 attrs = ATTR_INIT_SETTINGS;
406
407 if (priv->bd_stash_en)
408 attrs |= ATTR_BDSTASH;
409
410 if (priv->rx_stash_size != 0)
411 attrs |= ATTR_BUFSTASH;
412
413 gfar_write(&regs->attr, attrs);
414
415 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
416 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
417 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
418}
419
420static struct net_device_stats *gfar_get_stats(struct net_device *dev)
421{
422 struct gfar_private *priv = netdev_priv(dev);
423 struct netdev_queue *txq;
424 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
425 unsigned long tx_packets = 0, tx_bytes = 0;
426 int i = 0;
427
428 for (i = 0; i < priv->num_rx_queues; i++) {
429 rx_packets += priv->rx_queue[i]->stats.rx_packets;
430 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
431 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
432 }
433
434 dev->stats.rx_packets = rx_packets;
435 dev->stats.rx_bytes = rx_bytes;
436 dev->stats.rx_dropped = rx_dropped;
437
438 for (i = 0; i < priv->num_tx_queues; i++) {
439 txq = netdev_get_tx_queue(dev, i);
440 tx_bytes += txq->tx_bytes;
441 tx_packets += txq->tx_packets;
442 }
443
444 dev->stats.tx_bytes = tx_bytes;
445 dev->stats.tx_packets = tx_packets;
446
447 return &dev->stats;
448}
449
150static const struct net_device_ops gfar_netdev_ops = { 450static const struct net_device_ops gfar_netdev_ops = {
151 .ndo_open = gfar_enet_open, 451 .ndo_open = gfar_enet_open,
152 .ndo_start_xmit = gfar_start_xmit, 452 .ndo_start_xmit = gfar_start_xmit,
@@ -155,6 +455,8 @@ static const struct net_device_ops gfar_netdev_ops = {
155 .ndo_set_multicast_list = gfar_set_multi, 455 .ndo_set_multicast_list = gfar_set_multi,
156 .ndo_tx_timeout = gfar_timeout, 456 .ndo_tx_timeout = gfar_timeout,
157 .ndo_do_ioctl = gfar_ioctl, 457 .ndo_do_ioctl = gfar_ioctl,
458 .ndo_select_queue = gfar_select_queue,
459 .ndo_get_stats = gfar_get_stats,
158 .ndo_vlan_rx_register = gfar_vlan_rx_register, 460 .ndo_vlan_rx_register = gfar_vlan_rx_register,
159 .ndo_set_mac_address = eth_mac_addr, 461 .ndo_set_mac_address = eth_mac_addr,
160 .ndo_validate_addr = eth_validate_addr, 462 .ndo_validate_addr = eth_validate_addr,
@@ -163,56 +465,252 @@ static const struct net_device_ops gfar_netdev_ops = {
163#endif 465#endif
164}; 466};
165 467
468unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
469unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
470
471void lock_rx_qs(struct gfar_private *priv)
472{
473 int i = 0x0;
474
475 for (i = 0; i < priv->num_rx_queues; i++)
476 spin_lock(&priv->rx_queue[i]->rxlock);
477}
478
479void lock_tx_qs(struct gfar_private *priv)
480{
481 int i = 0x0;
482
483 for (i = 0; i < priv->num_tx_queues; i++)
484 spin_lock(&priv->tx_queue[i]->txlock);
485}
486
487void unlock_rx_qs(struct gfar_private *priv)
488{
489 int i = 0x0;
490
491 for (i = 0; i < priv->num_rx_queues; i++)
492 spin_unlock(&priv->rx_queue[i]->rxlock);
493}
494
495void unlock_tx_qs(struct gfar_private *priv)
496{
497 int i = 0x0;
498
499 for (i = 0; i < priv->num_tx_queues; i++)
500 spin_unlock(&priv->tx_queue[i]->txlock);
501}
502
166/* Returns 1 if incoming frames use an FCB */ 503/* Returns 1 if incoming frames use an FCB */
167static inline int gfar_uses_fcb(struct gfar_private *priv) 504static inline int gfar_uses_fcb(struct gfar_private *priv)
168{ 505{
169 return priv->vlgrp || priv->rx_csum_enable; 506 return priv->vlgrp || priv->rx_csum_enable;
170} 507}
171 508
172static int gfar_of_init(struct net_device *dev) 509u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
510{
511 return skb_get_queue_mapping(skb);
512}
513static void free_tx_pointers(struct gfar_private *priv)
514{
515 int i = 0;
516
517 for (i = 0; i < priv->num_tx_queues; i++)
518 kfree(priv->tx_queue[i]);
519}
520
521static void free_rx_pointers(struct gfar_private *priv)
522{
523 int i = 0;
524
525 for (i = 0; i < priv->num_rx_queues; i++)
526 kfree(priv->rx_queue[i]);
527}
528
529static void unmap_group_regs(struct gfar_private *priv)
530{
531 int i = 0;
532
533 for (i = 0; i < MAXGROUPS; i++)
534 if (priv->gfargrp[i].regs)
535 iounmap(priv->gfargrp[i].regs);
536}
537
538static void disable_napi(struct gfar_private *priv)
539{
540 int i = 0;
541
542 for (i = 0; i < priv->num_grps; i++)
543 napi_disable(&priv->gfargrp[i].napi);
544}
545
546static void enable_napi(struct gfar_private *priv)
547{
548 int i = 0;
549
550 for (i = 0; i < priv->num_grps; i++)
551 napi_enable(&priv->gfargrp[i].napi);
552}
553
554static int gfar_parse_group(struct device_node *np,
555 struct gfar_private *priv, const char *model)
556{
557 u32 *queue_mask;
558 u64 addr, size;
559
560 addr = of_translate_address(np,
561 of_get_address(np, 0, &size, NULL));
562 priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
563
564 if (!priv->gfargrp[priv->num_grps].regs)
565 return -ENOMEM;
566
567 priv->gfargrp[priv->num_grps].interruptTransmit =
568 irq_of_parse_and_map(np, 0);
569
570 /* If we aren't the FEC we have multiple interrupts */
571 if (model && strcasecmp(model, "FEC")) {
572 priv->gfargrp[priv->num_grps].interruptReceive =
573 irq_of_parse_and_map(np, 1);
574 priv->gfargrp[priv->num_grps].interruptError =
575 irq_of_parse_and_map(np,2);
576 if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
577 priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
578 priv->gfargrp[priv->num_grps].interruptError < 0) {
579 return -EINVAL;
580 }
581 }
582
583 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
584 priv->gfargrp[priv->num_grps].priv = priv;
585 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
586 if(priv->mode == MQ_MG_MODE) {
587 queue_mask = (u32 *)of_get_property(np,
588 "fsl,rx-bit-map", NULL);
589 priv->gfargrp[priv->num_grps].rx_bit_map =
590 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
591 queue_mask = (u32 *)of_get_property(np,
592 "fsl,tx-bit-map", NULL);
593 priv->gfargrp[priv->num_grps].tx_bit_map =
594 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
595 } else {
596 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
597 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
598 }
599 priv->num_grps++;
600
601 return 0;
602}
603
604static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
173{ 605{
174 const char *model; 606 const char *model;
175 const char *ctype; 607 const char *ctype;
176 const void *mac_addr; 608 const void *mac_addr;
177 u64 addr, size; 609 int err = 0, i;
178 int err = 0; 610 struct net_device *dev = NULL;
179 struct gfar_private *priv = netdev_priv(dev); 611 struct gfar_private *priv = NULL;
180 struct device_node *np = priv->node; 612 struct device_node *np = ofdev->node;
613 struct device_node *child = NULL;
181 const u32 *stash; 614 const u32 *stash;
182 const u32 *stash_len; 615 const u32 *stash_len;
183 const u32 *stash_idx; 616 const u32 *stash_idx;
617 unsigned int num_tx_qs, num_rx_qs;
618 u32 *tx_queues, *rx_queues;
184 619
185 if (!np || !of_device_is_available(np)) 620 if (!np || !of_device_is_available(np))
186 return -ENODEV; 621 return -ENODEV;
187 622
188 /* get a pointer to the register memory */ 623 /* parse the num of tx and rx queues */
189 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL)); 624 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
190 priv->regs = ioremap(addr, size); 625 num_tx_qs = tx_queues ? *tx_queues : 1;
626
627 if (num_tx_qs > MAX_TX_QS) {
628 printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
629 num_tx_qs, MAX_TX_QS);
630 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
631 return -EINVAL;
632 }
633
634 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
635 num_rx_qs = rx_queues ? *rx_queues : 1;
636
637 if (num_rx_qs > MAX_RX_QS) {
638 printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
639 num_tx_qs, MAX_TX_QS);
640 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
641 return -EINVAL;
642 }
191 643
192 if (priv->regs == NULL) 644 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
645 dev = *pdev;
646 if (NULL == dev)
193 return -ENOMEM; 647 return -ENOMEM;
194 648
195 priv->interruptTransmit = irq_of_parse_and_map(np, 0); 649 priv = netdev_priv(dev);
650 priv->node = ofdev->node;
651 priv->ndev = dev;
652
653 dev->num_tx_queues = num_tx_qs;
654 dev->real_num_tx_queues = num_tx_qs;
655 priv->num_tx_queues = num_tx_qs;
656 priv->num_rx_queues = num_rx_qs;
657 priv->num_grps = 0x0;
196 658
197 model = of_get_property(np, "model", NULL); 659 model = of_get_property(np, "model", NULL);
198 660
199 /* If we aren't the FEC we have multiple interrupts */ 661 for (i = 0; i < MAXGROUPS; i++)
200 if (model && strcasecmp(model, "FEC")) { 662 priv->gfargrp[i].regs = NULL;
201 priv->interruptReceive = irq_of_parse_and_map(np, 1); 663
664 /* Parse and initialize group specific information */
665 if (of_device_is_compatible(np, "fsl,etsec2")) {
666 priv->mode = MQ_MG_MODE;
667 for_each_child_of_node(np, child) {
668 err = gfar_parse_group(child, priv, model);
669 if (err)
670 goto err_grp_init;
671 }
672 } else {
673 priv->mode = SQ_SG_MODE;
674 err = gfar_parse_group(np, priv, model);
675 if(err)
676 goto err_grp_init;
677 }
202 678
203 priv->interruptError = irq_of_parse_and_map(np, 2); 679 for (i = 0; i < priv->num_tx_queues; i++)
680 priv->tx_queue[i] = NULL;
681 for (i = 0; i < priv->num_rx_queues; i++)
682 priv->rx_queue[i] = NULL;
683
684 for (i = 0; i < priv->num_tx_queues; i++) {
685 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc(
686 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
687 if (!priv->tx_queue[i]) {
688 err = -ENOMEM;
689 goto tx_alloc_failed;
690 }
691 priv->tx_queue[i]->tx_skbuff = NULL;
692 priv->tx_queue[i]->qindex = i;
693 priv->tx_queue[i]->dev = dev;
694 spin_lock_init(&(priv->tx_queue[i]->txlock));
695 }
204 696
205 if (priv->interruptTransmit < 0 || 697 for (i = 0; i < priv->num_rx_queues; i++) {
206 priv->interruptReceive < 0 || 698 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
207 priv->interruptError < 0) { 699 sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
208 err = -EINVAL; 700 if (!priv->rx_queue[i]) {
209 goto err_out; 701 err = -ENOMEM;
702 goto rx_alloc_failed;
210 } 703 }
704 priv->rx_queue[i]->rx_skbuff = NULL;
705 priv->rx_queue[i]->qindex = i;
706 priv->rx_queue[i]->dev = dev;
707 spin_lock_init(&(priv->rx_queue[i]->rxlock));
211 } 708 }
212 709
710
213 stash = of_get_property(np, "bd-stash", NULL); 711 stash = of_get_property(np, "bd-stash", NULL);
214 712
215 if(stash) { 713 if (stash) {
216 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; 714 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
217 priv->bd_stash_en = 1; 715 priv->bd_stash_en = 1;
218 } 716 }
@@ -270,8 +768,13 @@ static int gfar_of_init(struct net_device *dev)
270 768
271 return 0; 769 return 0;
272 770
273err_out: 771rx_alloc_failed:
274 iounmap(priv->regs); 772 free_rx_pointers(priv);
773tx_alloc_failed:
774 free_tx_pointers(priv);
775err_grp_init:
776 unmap_group_regs(priv);
777 free_netdev(dev);
275 return err; 778 return err;
276} 779}
277 780
@@ -289,6 +792,85 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
289 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); 792 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
290} 793}
291 794
795static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
796{
797 unsigned int new_bit_map = 0x0;
798 int mask = 0x1 << (max_qs - 1), i;
799 for (i = 0; i < max_qs; i++) {
800 if (bit_map & mask)
801 new_bit_map = new_bit_map + (1 << i);
802 mask = mask >> 0x1;
803 }
804 return new_bit_map;
805}
806
807static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
808 u32 class)
809{
810 u32 rqfpr = FPR_FILER_MASK;
811 u32 rqfcr = 0x0;
812
813 rqfar--;
814 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
815 ftp_rqfpr[rqfar] = rqfpr;
816 ftp_rqfcr[rqfar] = rqfcr;
817 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
818
819 rqfar--;
820 rqfcr = RQFCR_CMP_NOMATCH;
821 ftp_rqfpr[rqfar] = rqfpr;
822 ftp_rqfcr[rqfar] = rqfcr;
823 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
824
825 rqfar--;
826 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
827 rqfpr = class;
828 ftp_rqfcr[rqfar] = rqfcr;
829 ftp_rqfpr[rqfar] = rqfpr;
830 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
831
832 rqfar--;
833 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
834 rqfpr = class;
835 ftp_rqfcr[rqfar] = rqfcr;
836 ftp_rqfpr[rqfar] = rqfpr;
837 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
838
839 return rqfar;
840}
841
842static void gfar_init_filer_table(struct gfar_private *priv)
843{
844 int i = 0x0;
845 u32 rqfar = MAX_FILER_IDX;
846 u32 rqfcr = 0x0;
847 u32 rqfpr = FPR_FILER_MASK;
848
849 /* Default rule */
850 rqfcr = RQFCR_CMP_MATCH;
851 ftp_rqfcr[rqfar] = rqfcr;
852 ftp_rqfpr[rqfar] = rqfpr;
853 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
854
855 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
856 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
857 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
858 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
859 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
860 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
861
862 /* cur_filer_idx indicated the fisrt non-masked rule */
863 priv->cur_filer_idx = rqfar;
864
865 /* Rest are masked rules */
866 rqfcr = RQFCR_CMP_NOMATCH;
867 for (i = 0; i < rqfar; i++) {
868 ftp_rqfcr[i] = rqfcr;
869 ftp_rqfpr[i] = rqfpr;
870 gfar_write_filer(priv, i, rqfcr, rqfpr);
871 }
872}
873
292/* Set up the ethernet device structure, private data, 874/* Set up the ethernet device structure, private data,
293 * and anything else we need before we start */ 875 * and anything else we need before we start */
294static int gfar_probe(struct of_device *ofdev, 876static int gfar_probe(struct of_device *ofdev,
@@ -297,14 +879,17 @@ static int gfar_probe(struct of_device *ofdev,
297 u32 tempval; 879 u32 tempval;
298 struct net_device *dev = NULL; 880 struct net_device *dev = NULL;
299 struct gfar_private *priv = NULL; 881 struct gfar_private *priv = NULL;
300 int err = 0; 882 struct gfar __iomem *regs = NULL;
883 int err = 0, i, grp_idx = 0;
301 int len_devname; 884 int len_devname;
885 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
886 u32 isrg = 0;
887 u32 __iomem *baddr;
302 888
303 /* Create an ethernet device instance */ 889 err = gfar_of_init(ofdev, &dev);
304 dev = alloc_etherdev(sizeof (*priv));
305 890
306 if (NULL == dev) 891 if (err)
307 return -ENOMEM; 892 return err;
308 893
309 priv = netdev_priv(dev); 894 priv = netdev_priv(dev);
310 priv->ndev = dev; 895 priv->ndev = dev;
@@ -312,50 +897,46 @@ static int gfar_probe(struct of_device *ofdev,
312 priv->node = ofdev->node; 897 priv->node = ofdev->node;
313 SET_NETDEV_DEV(dev, &ofdev->dev); 898 SET_NETDEV_DEV(dev, &ofdev->dev);
314 899
315 err = gfar_of_init(dev);
316
317 if (err)
318 goto regs_fail;
319
320 spin_lock_init(&priv->txlock);
321 spin_lock_init(&priv->rxlock);
322 spin_lock_init(&priv->bflock); 900 spin_lock_init(&priv->bflock);
323 INIT_WORK(&priv->reset_task, gfar_reset_task); 901 INIT_WORK(&priv->reset_task, gfar_reset_task);
324 902
325 dev_set_drvdata(&ofdev->dev, priv); 903 dev_set_drvdata(&ofdev->dev, priv);
904 regs = priv->gfargrp[0].regs;
326 905
327 /* Stop the DMA engine now, in case it was running before */ 906 /* Stop the DMA engine now, in case it was running before */
328 /* (The firmware could have used it, and left it running). */ 907 /* (The firmware could have used it, and left it running). */
329 gfar_halt(dev); 908 gfar_halt(dev);
330 909
331 /* Reset MAC layer */ 910 /* Reset MAC layer */
332 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET); 911 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
333 912
334 /* We need to delay at least 3 TX clocks */ 913 /* We need to delay at least 3 TX clocks */
335 udelay(2); 914 udelay(2);
336 915
337 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 916 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
338 gfar_write(&priv->regs->maccfg1, tempval); 917 gfar_write(&regs->maccfg1, tempval);
339 918
340 /* Initialize MACCFG2. */ 919 /* Initialize MACCFG2. */
341 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS); 920 gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
342 921
343 /* Initialize ECNTRL */ 922 /* Initialize ECNTRL */
344 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS); 923 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
345 924
346 /* Set the dev->base_addr to the gfar reg region */ 925 /* Set the dev->base_addr to the gfar reg region */
347 dev->base_addr = (unsigned long) (priv->regs); 926 dev->base_addr = (unsigned long) regs;
348 927
349 SET_NETDEV_DEV(dev, &ofdev->dev); 928 SET_NETDEV_DEV(dev, &ofdev->dev);
350 929
351 /* Fill in the dev structure */ 930 /* Fill in the dev structure */
352 dev->watchdog_timeo = TX_TIMEOUT; 931 dev->watchdog_timeo = TX_TIMEOUT;
353 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
354 dev->mtu = 1500; 932 dev->mtu = 1500;
355
356 dev->netdev_ops = &gfar_netdev_ops; 933 dev->netdev_ops = &gfar_netdev_ops;
357 dev->ethtool_ops = &gfar_ethtool_ops; 934 dev->ethtool_ops = &gfar_ethtool_ops;
358 935
936 /* Register for napi ...We are registering NAPI for each grp */
937 for (i = 0; i < priv->num_grps; i++)
938 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
939
359 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 940 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
360 priv->rx_csum_enable = 1; 941 priv->rx_csum_enable = 1;
361 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; 942 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
@@ -371,35 +952,35 @@ static int gfar_probe(struct of_device *ofdev,
371 priv->extended_hash = 1; 952 priv->extended_hash = 1;
372 priv->hash_width = 9; 953 priv->hash_width = 9;
373 954
374 priv->hash_regs[0] = &priv->regs->igaddr0; 955 priv->hash_regs[0] = &regs->igaddr0;
375 priv->hash_regs[1] = &priv->regs->igaddr1; 956 priv->hash_regs[1] = &regs->igaddr1;
376 priv->hash_regs[2] = &priv->regs->igaddr2; 957 priv->hash_regs[2] = &regs->igaddr2;
377 priv->hash_regs[3] = &priv->regs->igaddr3; 958 priv->hash_regs[3] = &regs->igaddr3;
378 priv->hash_regs[4] = &priv->regs->igaddr4; 959 priv->hash_regs[4] = &regs->igaddr4;
379 priv->hash_regs[5] = &priv->regs->igaddr5; 960 priv->hash_regs[5] = &regs->igaddr5;
380 priv->hash_regs[6] = &priv->regs->igaddr6; 961 priv->hash_regs[6] = &regs->igaddr6;
381 priv->hash_regs[7] = &priv->regs->igaddr7; 962 priv->hash_regs[7] = &regs->igaddr7;
382 priv->hash_regs[8] = &priv->regs->gaddr0; 963 priv->hash_regs[8] = &regs->gaddr0;
383 priv->hash_regs[9] = &priv->regs->gaddr1; 964 priv->hash_regs[9] = &regs->gaddr1;
384 priv->hash_regs[10] = &priv->regs->gaddr2; 965 priv->hash_regs[10] = &regs->gaddr2;
385 priv->hash_regs[11] = &priv->regs->gaddr3; 966 priv->hash_regs[11] = &regs->gaddr3;
386 priv->hash_regs[12] = &priv->regs->gaddr4; 967 priv->hash_regs[12] = &regs->gaddr4;
387 priv->hash_regs[13] = &priv->regs->gaddr5; 968 priv->hash_regs[13] = &regs->gaddr5;
388 priv->hash_regs[14] = &priv->regs->gaddr6; 969 priv->hash_regs[14] = &regs->gaddr6;
389 priv->hash_regs[15] = &priv->regs->gaddr7; 970 priv->hash_regs[15] = &regs->gaddr7;
390 971
391 } else { 972 } else {
392 priv->extended_hash = 0; 973 priv->extended_hash = 0;
393 priv->hash_width = 8; 974 priv->hash_width = 8;
394 975
395 priv->hash_regs[0] = &priv->regs->gaddr0; 976 priv->hash_regs[0] = &regs->gaddr0;
396 priv->hash_regs[1] = &priv->regs->gaddr1; 977 priv->hash_regs[1] = &regs->gaddr1;
397 priv->hash_regs[2] = &priv->regs->gaddr2; 978 priv->hash_regs[2] = &regs->gaddr2;
398 priv->hash_regs[3] = &priv->regs->gaddr3; 979 priv->hash_regs[3] = &regs->gaddr3;
399 priv->hash_regs[4] = &priv->regs->gaddr4; 980 priv->hash_regs[4] = &regs->gaddr4;
400 priv->hash_regs[5] = &priv->regs->gaddr5; 981 priv->hash_regs[5] = &regs->gaddr5;
401 priv->hash_regs[6] = &priv->regs->gaddr6; 982 priv->hash_regs[6] = &regs->gaddr6;
402 priv->hash_regs[7] = &priv->regs->gaddr7; 983 priv->hash_regs[7] = &regs->gaddr7;
403 } 984 }
404 985
405 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 986 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
@@ -410,16 +991,74 @@ static int gfar_probe(struct of_device *ofdev,
410 if (dev->features & NETIF_F_IP_CSUM) 991 if (dev->features & NETIF_F_IP_CSUM)
411 dev->hard_header_len += GMAC_FCB_LEN; 992 dev->hard_header_len += GMAC_FCB_LEN;
412 993
994 /* Program the isrg regs only if number of grps > 1 */
995 if (priv->num_grps > 1) {
996 baddr = &regs->isrg0;
997 for (i = 0; i < priv->num_grps; i++) {
998 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
999 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
1000 gfar_write(baddr, isrg);
1001 baddr++;
1002 isrg = 0x0;
1003 }
1004 }
1005
1006 /* Need to reverse the bit maps as bit_map's MSB is q0
1007 * but, for_each_bit parses from right to left, which
1008 * basically reverses the queue numbers */
1009 for (i = 0; i< priv->num_grps; i++) {
1010 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
1011 priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1012 priv->gfargrp[i].rx_bit_map = reverse_bitmap(
1013 priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1014 }
1015
1016 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1017 * also assign queues to groups */
1018 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1019 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1020 for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1021 priv->num_rx_queues) {
1022 priv->gfargrp[grp_idx].num_rx_queues++;
1023 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1024 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1025 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1026 }
1027 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1028 for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
1029 priv->num_tx_queues) {
1030 priv->gfargrp[grp_idx].num_tx_queues++;
1031 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1032 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1033 tqueue = tqueue | (TQUEUE_EN0 >> i);
1034 }
1035 priv->gfargrp[grp_idx].rstat = rstat;
1036 priv->gfargrp[grp_idx].tstat = tstat;
1037 rstat = tstat =0;
1038 }
1039
1040 gfar_write(&regs->rqueue, rqueue);
1041 gfar_write(&regs->tqueue, tqueue);
1042
413 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 1043 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
414 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
415 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
416 priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
417 1044
418 priv->txcoalescing = DEFAULT_TX_COALESCE; 1045 /* Initializing some of the rx/tx queue level parameters */
419 priv->txic = DEFAULT_TXIC; 1046 for (i = 0; i < priv->num_tx_queues; i++) {
420 priv->rxcoalescing = DEFAULT_RX_COALESCE; 1047 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
421 priv->rxic = DEFAULT_RXIC; 1048 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1049 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1050 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1051 }
1052
1053 for (i = 0; i < priv->num_rx_queues; i++) {
1054 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1055 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1056 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1057 }
422 1058
1059 /* enable filer if using multiple RX queues*/
1060 if(priv->num_rx_queues > 1)
1061 priv->rx_filer_enable = 1;
423 /* Enable most messages by default */ 1062 /* Enable most messages by default */
424 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1063 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
425 1064
@@ -439,20 +1078,43 @@ static int gfar_probe(struct of_device *ofdev,
439 1078
440 /* fill out IRQ number and name fields */ 1079 /* fill out IRQ number and name fields */
441 len_devname = strlen(dev->name); 1080 len_devname = strlen(dev->name);
442 strncpy(&priv->int_name_tx[0], dev->name, len_devname); 1081 for (i = 0; i < priv->num_grps; i++) {
443 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1082 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
444 strncpy(&priv->int_name_tx[len_devname], 1083 len_devname);
445 "_tx", sizeof("_tx") + 1); 1084 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
446 1085 strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
447 strncpy(&priv->int_name_rx[0], dev->name, len_devname); 1086 "_g", sizeof("_g"));
448 strncpy(&priv->int_name_rx[len_devname], 1087 priv->gfargrp[i].int_name_tx[
449 "_rx", sizeof("_rx") + 1); 1088 strlen(priv->gfargrp[i].int_name_tx)] = i+48;
1089 strncpy(&priv->gfargrp[i].int_name_tx[strlen(
1090 priv->gfargrp[i].int_name_tx)],
1091 "_tx", sizeof("_tx") + 1);
1092
1093 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
1094 len_devname);
1095 strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
1096 "_g", sizeof("_g"));
1097 priv->gfargrp[i].int_name_rx[
1098 strlen(priv->gfargrp[i].int_name_rx)] = i+48;
1099 strncpy(&priv->gfargrp[i].int_name_rx[strlen(
1100 priv->gfargrp[i].int_name_rx)],
1101 "_rx", sizeof("_rx") + 1);
1102
1103 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
1104 len_devname);
1105 strncpy(&priv->gfargrp[i].int_name_er[len_devname],
1106 "_g", sizeof("_g"));
1107 priv->gfargrp[i].int_name_er[strlen(
1108 priv->gfargrp[i].int_name_er)] = i+48;
1109 strncpy(&priv->gfargrp[i].int_name_er[strlen(\
1110 priv->gfargrp[i].int_name_er)],
1111 "_er", sizeof("_er") + 1);
1112 } else
1113 priv->gfargrp[i].int_name_tx[len_devname] = '\0';
1114 }
450 1115
451 strncpy(&priv->int_name_er[0], dev->name, len_devname); 1116 /* Initialize the filer table */
452 strncpy(&priv->int_name_er[len_devname], 1117 gfar_init_filer_table(priv);
453 "_er", sizeof("_er") + 1);
454 } else
455 priv->int_name_tx[len_devname] = '\0';
456 1118
457 /* Create all the sysfs files */ 1119 /* Create all the sysfs files */
458 gfar_init_sysfs(dev); 1120 gfar_init_sysfs(dev);
@@ -463,14 +1125,19 @@ static int gfar_probe(struct of_device *ofdev,
463 /* Even more device info helps when determining which kernel */ 1125 /* Even more device info helps when determining which kernel */
464 /* provided which set of benchmarks. */ 1126 /* provided which set of benchmarks. */
465 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 1127 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
466 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", 1128 for (i = 0; i < priv->num_rx_queues; i++)
467 dev->name, priv->rx_ring_size, priv->tx_ring_size); 1129 printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
1130 dev->name, i, priv->rx_queue[i]->rx_ring_size);
1131 for(i = 0; i < priv->num_tx_queues; i++)
1132 printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
1133 dev->name, i, priv->tx_queue[i]->tx_ring_size);
468 1134
469 return 0; 1135 return 0;
470 1136
471register_fail: 1137register_fail:
472 iounmap(priv->regs); 1138 unmap_group_regs(priv);
473regs_fail: 1139 free_tx_pointers(priv);
1140 free_rx_pointers(priv);
474 if (priv->phy_node) 1141 if (priv->phy_node)
475 of_node_put(priv->phy_node); 1142 of_node_put(priv->phy_node);
476 if (priv->tbi_node) 1143 if (priv->tbi_node)
@@ -491,54 +1158,59 @@ static int gfar_remove(struct of_device *ofdev)
491 dev_set_drvdata(&ofdev->dev, NULL); 1158 dev_set_drvdata(&ofdev->dev, NULL);
492 1159
493 unregister_netdev(priv->ndev); 1160 unregister_netdev(priv->ndev);
494 iounmap(priv->regs); 1161 unmap_group_regs(priv);
495 free_netdev(priv->ndev); 1162 free_netdev(priv->ndev);
496 1163
497 return 0; 1164 return 0;
498} 1165}
499 1166
500#ifdef CONFIG_PM 1167#ifdef CONFIG_PM
501static int gfar_suspend(struct of_device *ofdev, pm_message_t state) 1168
1169static int gfar_suspend(struct device *dev)
502{ 1170{
503 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); 1171 struct gfar_private *priv = dev_get_drvdata(dev);
504 struct net_device *dev = priv->ndev; 1172 struct net_device *ndev = priv->ndev;
1173 struct gfar __iomem *regs = priv->gfargrp[0].regs;
505 unsigned long flags; 1174 unsigned long flags;
506 u32 tempval; 1175 u32 tempval;
507 1176
508 int magic_packet = priv->wol_en && 1177 int magic_packet = priv->wol_en &&
509 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1178 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
510 1179
511 netif_device_detach(dev); 1180 netif_device_detach(ndev);
1181
1182 if (netif_running(ndev)) {
512 1183
513 if (netif_running(dev)) { 1184 local_irq_save(flags);
514 spin_lock_irqsave(&priv->txlock, flags); 1185 lock_tx_qs(priv);
515 spin_lock(&priv->rxlock); 1186 lock_rx_qs(priv);
516 1187
517 gfar_halt_nodisable(dev); 1188 gfar_halt_nodisable(ndev);
518 1189
519 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1190 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
520 tempval = gfar_read(&priv->regs->maccfg1); 1191 tempval = gfar_read(&regs->maccfg1);
521 1192
522 tempval &= ~MACCFG1_TX_EN; 1193 tempval &= ~MACCFG1_TX_EN;
523 1194
524 if (!magic_packet) 1195 if (!magic_packet)
525 tempval &= ~MACCFG1_RX_EN; 1196 tempval &= ~MACCFG1_RX_EN;
526 1197
527 gfar_write(&priv->regs->maccfg1, tempval); 1198 gfar_write(&regs->maccfg1, tempval);
528 1199
529 spin_unlock(&priv->rxlock); 1200 unlock_rx_qs(priv);
530 spin_unlock_irqrestore(&priv->txlock, flags); 1201 unlock_tx_qs(priv);
1202 local_irq_restore(flags);
531 1203
532 napi_disable(&priv->napi); 1204 disable_napi(priv);
533 1205
534 if (magic_packet) { 1206 if (magic_packet) {
535 /* Enable interrupt on Magic Packet */ 1207 /* Enable interrupt on Magic Packet */
536 gfar_write(&priv->regs->imask, IMASK_MAG); 1208 gfar_write(&regs->imask, IMASK_MAG);
537 1209
538 /* Enable Magic Packet mode */ 1210 /* Enable Magic Packet mode */
539 tempval = gfar_read(&priv->regs->maccfg2); 1211 tempval = gfar_read(&regs->maccfg2);
540 tempval |= MACCFG2_MPEN; 1212 tempval |= MACCFG2_MPEN;
541 gfar_write(&priv->regs->maccfg2, tempval); 1213 gfar_write(&regs->maccfg2, tempval);
542 } else { 1214 } else {
543 phy_stop(priv->phydev); 1215 phy_stop(priv->phydev);
544 } 1216 }
@@ -547,17 +1219,18 @@ static int gfar_suspend(struct of_device *ofdev, pm_message_t state)
547 return 0; 1219 return 0;
548} 1220}
549 1221
550static int gfar_resume(struct of_device *ofdev) 1222static int gfar_resume(struct device *dev)
551{ 1223{
552 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); 1224 struct gfar_private *priv = dev_get_drvdata(dev);
553 struct net_device *dev = priv->ndev; 1225 struct net_device *ndev = priv->ndev;
1226 struct gfar __iomem *regs = priv->gfargrp[0].regs;
554 unsigned long flags; 1227 unsigned long flags;
555 u32 tempval; 1228 u32 tempval;
556 int magic_packet = priv->wol_en && 1229 int magic_packet = priv->wol_en &&
557 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1230 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
558 1231
559 if (!netif_running(dev)) { 1232 if (!netif_running(ndev)) {
560 netif_device_attach(dev); 1233 netif_device_attach(ndev);
561 return 0; 1234 return 0;
562 } 1235 }
563 1236
@@ -567,28 +1240,80 @@ static int gfar_resume(struct of_device *ofdev)
567 /* Disable Magic Packet mode, in case something 1240 /* Disable Magic Packet mode, in case something
568 * else woke us up. 1241 * else woke us up.
569 */ 1242 */
1243 local_irq_save(flags);
1244 lock_tx_qs(priv);
1245 lock_rx_qs(priv);
570 1246
571 spin_lock_irqsave(&priv->txlock, flags); 1247 tempval = gfar_read(&regs->maccfg2);
572 spin_lock(&priv->rxlock);
573
574 tempval = gfar_read(&priv->regs->maccfg2);
575 tempval &= ~MACCFG2_MPEN; 1248 tempval &= ~MACCFG2_MPEN;
576 gfar_write(&priv->regs->maccfg2, tempval); 1249 gfar_write(&regs->maccfg2, tempval);
1250
1251 gfar_start(ndev);
577 1252
578 gfar_start(dev); 1253 unlock_rx_qs(priv);
1254 unlock_tx_qs(priv);
1255 local_irq_restore(flags);
579 1256
580 spin_unlock(&priv->rxlock); 1257 netif_device_attach(ndev);
581 spin_unlock_irqrestore(&priv->txlock, flags);
582 1258
583 netif_device_attach(dev); 1259 enable_napi(priv);
584 1260
585 napi_enable(&priv->napi); 1261 return 0;
1262}
1263
1264static int gfar_restore(struct device *dev)
1265{
1266 struct gfar_private *priv = dev_get_drvdata(dev);
1267 struct net_device *ndev = priv->ndev;
1268
1269 if (!netif_running(ndev))
1270 return 0;
1271
1272 gfar_init_bds(ndev);
1273 init_registers(ndev);
1274 gfar_set_mac_address(ndev);
1275 gfar_init_mac(ndev);
1276 gfar_start(ndev);
1277
1278 priv->oldlink = 0;
1279 priv->oldspeed = 0;
1280 priv->oldduplex = -1;
1281
1282 if (priv->phydev)
1283 phy_start(priv->phydev);
1284
1285 netif_device_attach(ndev);
1286 enable_napi(priv);
586 1287
587 return 0; 1288 return 0;
588} 1289}
1290
1291static struct dev_pm_ops gfar_pm_ops = {
1292 .suspend = gfar_suspend,
1293 .resume = gfar_resume,
1294 .freeze = gfar_suspend,
1295 .thaw = gfar_resume,
1296 .restore = gfar_restore,
1297};
1298
1299#define GFAR_PM_OPS (&gfar_pm_ops)
1300
1301static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
1302{
1303 return gfar_suspend(&ofdev->dev);
1304}
1305
1306static int gfar_legacy_resume(struct of_device *ofdev)
1307{
1308 return gfar_resume(&ofdev->dev);
1309}
1310
589#else 1311#else
590#define gfar_suspend NULL 1312
591#define gfar_resume NULL 1313#define GFAR_PM_OPS NULL
1314#define gfar_legacy_suspend NULL
1315#define gfar_legacy_resume NULL
1316
592#endif 1317#endif
593 1318
594/* Reads the controller's registers to determine what interface 1319/* Reads the controller's registers to determine what interface
@@ -597,7 +1322,10 @@ static int gfar_resume(struct of_device *ofdev)
597static phy_interface_t gfar_get_interface(struct net_device *dev) 1322static phy_interface_t gfar_get_interface(struct net_device *dev)
598{ 1323{
599 struct gfar_private *priv = netdev_priv(dev); 1324 struct gfar_private *priv = netdev_priv(dev);
600 u32 ecntrl = gfar_read(&priv->regs->ecntrl); 1325 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1326 u32 ecntrl;
1327
1328 ecntrl = gfar_read(&regs->ecntrl);
601 1329
602 if (ecntrl & ECNTRL_SGMII_MODE) 1330 if (ecntrl & ECNTRL_SGMII_MODE)
603 return PHY_INTERFACE_MODE_SGMII; 1331 return PHY_INTERFACE_MODE_SGMII;
@@ -719,46 +1447,52 @@ static void gfar_configure_serdes(struct net_device *dev)
719static void init_registers(struct net_device *dev) 1447static void init_registers(struct net_device *dev)
720{ 1448{
721 struct gfar_private *priv = netdev_priv(dev); 1449 struct gfar_private *priv = netdev_priv(dev);
1450 struct gfar __iomem *regs = NULL;
1451 int i = 0;
722 1452
723 /* Clear IEVENT */ 1453 for (i = 0; i < priv->num_grps; i++) {
724 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR); 1454 regs = priv->gfargrp[i].regs;
1455 /* Clear IEVENT */
1456 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
725 1457
726 /* Initialize IMASK */ 1458 /* Initialize IMASK */
727 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); 1459 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1460 }
728 1461
1462 regs = priv->gfargrp[0].regs;
729 /* Init hash registers to zero */ 1463 /* Init hash registers to zero */
730 gfar_write(&priv->regs->igaddr0, 0); 1464 gfar_write(&regs->igaddr0, 0);
731 gfar_write(&priv->regs->igaddr1, 0); 1465 gfar_write(&regs->igaddr1, 0);
732 gfar_write(&priv->regs->igaddr2, 0); 1466 gfar_write(&regs->igaddr2, 0);
733 gfar_write(&priv->regs->igaddr3, 0); 1467 gfar_write(&regs->igaddr3, 0);
734 gfar_write(&priv->regs->igaddr4, 0); 1468 gfar_write(&regs->igaddr4, 0);
735 gfar_write(&priv->regs->igaddr5, 0); 1469 gfar_write(&regs->igaddr5, 0);
736 gfar_write(&priv->regs->igaddr6, 0); 1470 gfar_write(&regs->igaddr6, 0);
737 gfar_write(&priv->regs->igaddr7, 0); 1471 gfar_write(&regs->igaddr7, 0);
738 1472
739 gfar_write(&priv->regs->gaddr0, 0); 1473 gfar_write(&regs->gaddr0, 0);
740 gfar_write(&priv->regs->gaddr1, 0); 1474 gfar_write(&regs->gaddr1, 0);
741 gfar_write(&priv->regs->gaddr2, 0); 1475 gfar_write(&regs->gaddr2, 0);
742 gfar_write(&priv->regs->gaddr3, 0); 1476 gfar_write(&regs->gaddr3, 0);
743 gfar_write(&priv->regs->gaddr4, 0); 1477 gfar_write(&regs->gaddr4, 0);
744 gfar_write(&priv->regs->gaddr5, 0); 1478 gfar_write(&regs->gaddr5, 0);
745 gfar_write(&priv->regs->gaddr6, 0); 1479 gfar_write(&regs->gaddr6, 0);
746 gfar_write(&priv->regs->gaddr7, 0); 1480 gfar_write(&regs->gaddr7, 0);
747 1481
748 /* Zero out the rmon mib registers if it has them */ 1482 /* Zero out the rmon mib registers if it has them */
749 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 1483 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
750 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib)); 1484 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
751 1485
752 /* Mask off the CAM interrupts */ 1486 /* Mask off the CAM interrupts */
753 gfar_write(&priv->regs->rmon.cam1, 0xffffffff); 1487 gfar_write(&regs->rmon.cam1, 0xffffffff);
754 gfar_write(&priv->regs->rmon.cam2, 0xffffffff); 1488 gfar_write(&regs->rmon.cam2, 0xffffffff);
755 } 1489 }
756 1490
757 /* Initialize the max receive buffer length */ 1491 /* Initialize the max receive buffer length */
758 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 1492 gfar_write(&regs->mrblr, priv->rx_buffer_size);
759 1493
760 /* Initialize the Minimum Frame Length Register */ 1494 /* Initialize the Minimum Frame Length Register */
761 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); 1495 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
762} 1496}
763 1497
764 1498
@@ -766,23 +1500,28 @@ static void init_registers(struct net_device *dev)
766static void gfar_halt_nodisable(struct net_device *dev) 1500static void gfar_halt_nodisable(struct net_device *dev)
767{ 1501{
768 struct gfar_private *priv = netdev_priv(dev); 1502 struct gfar_private *priv = netdev_priv(dev);
769 struct gfar __iomem *regs = priv->regs; 1503 struct gfar __iomem *regs = NULL;
770 u32 tempval; 1504 u32 tempval;
1505 int i = 0;
771 1506
772 /* Mask all interrupts */ 1507 for (i = 0; i < priv->num_grps; i++) {
773 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 1508 regs = priv->gfargrp[i].regs;
1509 /* Mask all interrupts */
1510 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
774 1511
775 /* Clear all interrupts */ 1512 /* Clear all interrupts */
776 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); 1513 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1514 }
777 1515
1516 regs = priv->gfargrp[0].regs;
778 /* Stop the DMA, and wait for it to stop */ 1517 /* Stop the DMA, and wait for it to stop */
779 tempval = gfar_read(&priv->regs->dmactrl); 1518 tempval = gfar_read(&regs->dmactrl);
780 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 1519 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
781 != (DMACTRL_GRS | DMACTRL_GTS)) { 1520 != (DMACTRL_GRS | DMACTRL_GTS)) {
782 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1521 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
783 gfar_write(&priv->regs->dmactrl, tempval); 1522 gfar_write(&regs->dmactrl, tempval);
784 1523
785 while (!(gfar_read(&priv->regs->ievent) & 1524 while (!(gfar_read(&regs->ievent) &
786 (IEVENT_GRSC | IEVENT_GTSC))) 1525 (IEVENT_GRSC | IEVENT_GTSC)))
787 cpu_relax(); 1526 cpu_relax();
788 } 1527 }
@@ -792,7 +1531,7 @@ static void gfar_halt_nodisable(struct net_device *dev)
792void gfar_halt(struct net_device *dev) 1531void gfar_halt(struct net_device *dev)
793{ 1532{
794 struct gfar_private *priv = netdev_priv(dev); 1533 struct gfar_private *priv = netdev_priv(dev);
795 struct gfar __iomem *regs = priv->regs; 1534 struct gfar __iomem *regs = priv->gfargrp[0].regs;
796 u32 tempval; 1535 u32 tempval;
797 1536
798 gfar_halt_nodisable(dev); 1537 gfar_halt_nodisable(dev);
@@ -803,101 +1542,131 @@ void gfar_halt(struct net_device *dev)
803 gfar_write(&regs->maccfg1, tempval); 1542 gfar_write(&regs->maccfg1, tempval);
804} 1543}
805 1544
1545static void free_grp_irqs(struct gfar_priv_grp *grp)
1546{
1547 free_irq(grp->interruptError, grp);
1548 free_irq(grp->interruptTransmit, grp);
1549 free_irq(grp->interruptReceive, grp);
1550}
1551
806void stop_gfar(struct net_device *dev) 1552void stop_gfar(struct net_device *dev)
807{ 1553{
808 struct gfar_private *priv = netdev_priv(dev); 1554 struct gfar_private *priv = netdev_priv(dev);
809 struct gfar __iomem *regs = priv->regs;
810 unsigned long flags; 1555 unsigned long flags;
1556 int i;
811 1557
812 phy_stop(priv->phydev); 1558 phy_stop(priv->phydev);
813 1559
1560
814 /* Lock it down */ 1561 /* Lock it down */
815 spin_lock_irqsave(&priv->txlock, flags); 1562 local_irq_save(flags);
816 spin_lock(&priv->rxlock); 1563 lock_tx_qs(priv);
1564 lock_rx_qs(priv);
817 1565
818 gfar_halt(dev); 1566 gfar_halt(dev);
819 1567
820 spin_unlock(&priv->rxlock); 1568 unlock_rx_qs(priv);
821 spin_unlock_irqrestore(&priv->txlock, flags); 1569 unlock_tx_qs(priv);
1570 local_irq_restore(flags);
822 1571
823 /* Free the IRQs */ 1572 /* Free the IRQs */
824 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1573 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
825 free_irq(priv->interruptError, dev); 1574 for (i = 0; i < priv->num_grps; i++)
826 free_irq(priv->interruptTransmit, dev); 1575 free_grp_irqs(&priv->gfargrp[i]);
827 free_irq(priv->interruptReceive, dev);
828 } else { 1576 } else {
829 free_irq(priv->interruptTransmit, dev); 1577 for (i = 0; i < priv->num_grps; i++)
1578 free_irq(priv->gfargrp[i].interruptTransmit,
1579 &priv->gfargrp[i]);
830 } 1580 }
831 1581
832 free_skb_resources(priv); 1582 free_skb_resources(priv);
833
834 dma_free_coherent(&priv->ofdev->dev,
835 sizeof(struct txbd8)*priv->tx_ring_size
836 + sizeof(struct rxbd8)*priv->rx_ring_size,
837 priv->tx_bd_base,
838 gfar_read(&regs->tbase0));
839} 1583}
840 1584
841/* If there are any tx skbs or rx skbs still around, free them. 1585static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
842 * Then free tx_skbuff and rx_skbuff */
843static void free_skb_resources(struct gfar_private *priv)
844{ 1586{
845 struct rxbd8 *rxbdp;
846 struct txbd8 *txbdp; 1587 struct txbd8 *txbdp;
1588 struct gfar_private *priv = netdev_priv(tx_queue->dev);
847 int i, j; 1589 int i, j;
848 1590
849 /* Go through all the buffer descriptors and free their data buffers */ 1591 txbdp = tx_queue->tx_bd_base;
850 txbdp = priv->tx_bd_base;
851 1592
852 for (i = 0; i < priv->tx_ring_size; i++) { 1593 for (i = 0; i < tx_queue->tx_ring_size; i++) {
853 if (!priv->tx_skbuff[i]) 1594 if (!tx_queue->tx_skbuff[i])
854 continue; 1595 continue;
855 1596
856 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, 1597 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
857 txbdp->length, DMA_TO_DEVICE); 1598 txbdp->length, DMA_TO_DEVICE);
858 txbdp->lstatus = 0; 1599 txbdp->lstatus = 0;
859 for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) { 1600 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1601 j++) {
860 txbdp++; 1602 txbdp++;
861 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, 1603 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
862 txbdp->length, DMA_TO_DEVICE); 1604 txbdp->length, DMA_TO_DEVICE);
863 } 1605 }
864 txbdp++; 1606 txbdp++;
865 dev_kfree_skb_any(priv->tx_skbuff[i]); 1607 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
866 priv->tx_skbuff[i] = NULL; 1608 tx_queue->tx_skbuff[i] = NULL;
867 } 1609 }
1610 kfree(tx_queue->tx_skbuff);
1611}
868 1612
869 kfree(priv->tx_skbuff); 1613static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
870 1614{
871 rxbdp = priv->rx_bd_base; 1615 struct rxbd8 *rxbdp;
1616 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1617 int i;
872 1618
873 /* rx_skbuff is not guaranteed to be allocated, so only 1619 rxbdp = rx_queue->rx_bd_base;
874 * free it and its contents if it is allocated */
875 if(priv->rx_skbuff != NULL) {
876 for (i = 0; i < priv->rx_ring_size; i++) {
877 if (priv->rx_skbuff[i]) {
878 dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr,
879 priv->rx_buffer_size,
880 DMA_FROM_DEVICE);
881 1620
882 dev_kfree_skb_any(priv->rx_skbuff[i]); 1621 for (i = 0; i < rx_queue->rx_ring_size; i++) {
883 priv->rx_skbuff[i] = NULL; 1622 if (rx_queue->rx_skbuff[i]) {
884 } 1623 dma_unmap_single(&priv->ofdev->dev,
1624 rxbdp->bufPtr, priv->rx_buffer_size,
1625 DMA_FROM_DEVICE);
1626 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1627 rx_queue->rx_skbuff[i] = NULL;
1628 }
1629 rxbdp->lstatus = 0;
1630 rxbdp->bufPtr = 0;
1631 rxbdp++;
1632 }
1633 kfree(rx_queue->rx_skbuff);
1634}
885 1635
886 rxbdp->lstatus = 0; 1636/* If there are any tx skbs or rx skbs still around, free them.
887 rxbdp->bufPtr = 0; 1637 * Then free tx_skbuff and rx_skbuff */
1638static void free_skb_resources(struct gfar_private *priv)
1639{
1640 struct gfar_priv_tx_q *tx_queue = NULL;
1641 struct gfar_priv_rx_q *rx_queue = NULL;
1642 int i;
888 1643
889 rxbdp++; 1644 /* Go through all the buffer descriptors and free their data buffers */
890 } 1645 for (i = 0; i < priv->num_tx_queues; i++) {
1646 tx_queue = priv->tx_queue[i];
1647 if(!tx_queue->tx_skbuff)
1648 free_skb_tx_queue(tx_queue);
1649 }
891 1650
892 kfree(priv->rx_skbuff); 1651 for (i = 0; i < priv->num_rx_queues; i++) {
1652 rx_queue = priv->rx_queue[i];
1653 if(!rx_queue->rx_skbuff)
1654 free_skb_rx_queue(rx_queue);
893 } 1655 }
1656
1657 dma_free_coherent(&priv->ofdev->dev,
1658 sizeof(struct txbd8) * priv->total_tx_ring_size +
1659 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1660 priv->tx_queue[0]->tx_bd_base,
1661 priv->tx_queue[0]->tx_bd_dma_base);
894} 1662}
895 1663
896void gfar_start(struct net_device *dev) 1664void gfar_start(struct net_device *dev)
897{ 1665{
898 struct gfar_private *priv = netdev_priv(dev); 1666 struct gfar_private *priv = netdev_priv(dev);
899 struct gfar __iomem *regs = priv->regs; 1667 struct gfar __iomem *regs = priv->gfargrp[0].regs;
900 u32 tempval; 1668 u32 tempval;
1669 int i = 0;
901 1670
902 /* Enable Rx and Tx in MACCFG1 */ 1671 /* Enable Rx and Tx in MACCFG1 */
903 tempval = gfar_read(&regs->maccfg1); 1672 tempval = gfar_read(&regs->maccfg1);
@@ -905,269 +1674,159 @@ void gfar_start(struct net_device *dev)
905 gfar_write(&regs->maccfg1, tempval); 1674 gfar_write(&regs->maccfg1, tempval);
906 1675
907 /* Initialize DMACTRL to have WWR and WOP */ 1676 /* Initialize DMACTRL to have WWR and WOP */
908 tempval = gfar_read(&priv->regs->dmactrl); 1677 tempval = gfar_read(&regs->dmactrl);
909 tempval |= DMACTRL_INIT_SETTINGS; 1678 tempval |= DMACTRL_INIT_SETTINGS;
910 gfar_write(&priv->regs->dmactrl, tempval); 1679 gfar_write(&regs->dmactrl, tempval);
911 1680
912 /* Make sure we aren't stopped */ 1681 /* Make sure we aren't stopped */
913 tempval = gfar_read(&priv->regs->dmactrl); 1682 tempval = gfar_read(&regs->dmactrl);
914 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 1683 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
915 gfar_write(&priv->regs->dmactrl, tempval); 1684 gfar_write(&regs->dmactrl, tempval);
916 1685
917 /* Clear THLT/RHLT, so that the DMA starts polling now */ 1686 for (i = 0; i < priv->num_grps; i++) {
918 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT); 1687 regs = priv->gfargrp[i].regs;
919 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT); 1688 /* Clear THLT/RHLT, so that the DMA starts polling now */
920 1689 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
921 /* Unmask the interrupts we look for */ 1690 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
922 gfar_write(&regs->imask, IMASK_DEFAULT); 1691 /* Unmask the interrupts we look for */
1692 gfar_write(&regs->imask, IMASK_DEFAULT);
1693 }
923 1694
924 dev->trans_start = jiffies; 1695 dev->trans_start = jiffies;
925} 1696}
926 1697
927/* Bring the controller up and running */ 1698void gfar_configure_coalescing(struct gfar_private *priv,
928int startup_gfar(struct net_device *dev) 1699 unsigned long tx_mask, unsigned long rx_mask)
929{ 1700{
930 struct txbd8 *txbdp; 1701 struct gfar __iomem *regs = priv->gfargrp[0].regs;
931 struct rxbd8 *rxbdp; 1702 u32 __iomem *baddr;
932 dma_addr_t addr = 0; 1703 int i = 0;
933 unsigned long vaddr;
934 int i;
935 struct gfar_private *priv = netdev_priv(dev);
936 struct gfar __iomem *regs = priv->regs;
937 int err = 0;
938 u32 rctrl = 0;
939 u32 tctrl = 0;
940 u32 attrs = 0;
941
942 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
943
944 /* Allocate memory for the buffer descriptors */
945 vaddr = (unsigned long) dma_alloc_coherent(&priv->ofdev->dev,
946 sizeof (struct txbd8) * priv->tx_ring_size +
947 sizeof (struct rxbd8) * priv->rx_ring_size,
948 &addr, GFP_KERNEL);
949
950 if (vaddr == 0) {
951 if (netif_msg_ifup(priv))
952 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
953 dev->name);
954 return -ENOMEM;
955 }
956
957 priv->tx_bd_base = (struct txbd8 *) vaddr;
958
959 /* enet DMA only understands physical addresses */
960 gfar_write(&regs->tbase0, addr);
961
962 /* Start the rx descriptor ring where the tx ring leaves off */
963 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
964 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
965 priv->rx_bd_base = (struct rxbd8 *) vaddr;
966 gfar_write(&regs->rbase0, addr);
967
968 /* Setup the skbuff rings */
969 priv->tx_skbuff =
970 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
971 priv->tx_ring_size, GFP_KERNEL);
972
973 if (NULL == priv->tx_skbuff) {
974 if (netif_msg_ifup(priv))
975 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
976 dev->name);
977 err = -ENOMEM;
978 goto tx_skb_fail;
979 }
980
981 for (i = 0; i < priv->tx_ring_size; i++)
982 priv->tx_skbuff[i] = NULL;
983
984 priv->rx_skbuff =
985 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
986 priv->rx_ring_size, GFP_KERNEL);
987
988 if (NULL == priv->rx_skbuff) {
989 if (netif_msg_ifup(priv))
990 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
991 dev->name);
992 err = -ENOMEM;
993 goto rx_skb_fail;
994 }
995
996 for (i = 0; i < priv->rx_ring_size; i++)
997 priv->rx_skbuff[i] = NULL;
998
999 /* Initialize some variables in our dev structure */
1000 priv->num_txbdfree = priv->tx_ring_size;
1001 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
1002 priv->cur_rx = priv->rx_bd_base;
1003 priv->skb_curtx = priv->skb_dirtytx = 0;
1004 priv->skb_currx = 0;
1005
1006 /* Initialize Transmit Descriptor Ring */
1007 txbdp = priv->tx_bd_base;
1008 for (i = 0; i < priv->tx_ring_size; i++) {
1009 txbdp->lstatus = 0;
1010 txbdp->bufPtr = 0;
1011 txbdp++;
1012 }
1013
1014 /* Set the last descriptor in the ring to indicate wrap */
1015 txbdp--;
1016 txbdp->status |= TXBD_WRAP;
1017
1018 rxbdp = priv->rx_bd_base;
1019 for (i = 0; i < priv->rx_ring_size; i++) {
1020 struct sk_buff *skb;
1021
1022 skb = gfar_new_skb(dev);
1023 1704
1024 if (!skb) { 1705 /* Backward compatible case ---- even if we enable
1025 printk(KERN_ERR "%s: Can't allocate RX buffers\n", 1706 * multiple queues, there's only single reg to program
1026 dev->name); 1707 */
1708 gfar_write(&regs->txic, 0);
1709 if(likely(priv->tx_queue[0]->txcoalescing))
1710 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1027 1711
1028 goto err_rxalloc_fail; 1712 gfar_write(&regs->rxic, 0);
1713 if(unlikely(priv->rx_queue[0]->rxcoalescing))
1714 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1715
1716 if (priv->mode == MQ_MG_MODE) {
1717 baddr = &regs->txic0;
1718 for_each_bit (i, &tx_mask, priv->num_tx_queues) {
1719 if (likely(priv->tx_queue[i]->txcoalescing)) {
1720 gfar_write(baddr + i, 0);
1721 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1722 }
1029 } 1723 }
1030 1724
1031 priv->rx_skbuff[i] = skb; 1725 baddr = &regs->rxic0;
1032 1726 for_each_bit (i, &rx_mask, priv->num_rx_queues) {
1033 gfar_new_rxbdp(dev, rxbdp, skb); 1727 if (likely(priv->rx_queue[i]->rxcoalescing)) {
1034 1728 gfar_write(baddr + i, 0);
1035 rxbdp++; 1729 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1730 }
1731 }
1036 } 1732 }
1733}
1037 1734
1038 /* Set the last descriptor in the ring to wrap */ 1735static int register_grp_irqs(struct gfar_priv_grp *grp)
1039 rxbdp--; 1736{
1040 rxbdp->status |= RXBD_WRAP; 1737 struct gfar_private *priv = grp->priv;
1738 struct net_device *dev = priv->ndev;
1739 int err;
1041 1740
1042 /* If the device has multiple interrupts, register for 1741 /* If the device has multiple interrupts, register for
1043 * them. Otherwise, only register for the one */ 1742 * them. Otherwise, only register for the one */
1044 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1743 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1045 /* Install our interrupt handlers for Error, 1744 /* Install our interrupt handlers for Error,
1046 * Transmit, and Receive */ 1745 * Transmit, and Receive */
1047 if (request_irq(priv->interruptError, gfar_error, 1746 if ((err = request_irq(grp->interruptError, gfar_error, 0,
1048 0, priv->int_name_er, dev) < 0) { 1747 grp->int_name_er,grp)) < 0) {
1049 if (netif_msg_intr(priv)) 1748 if (netif_msg_intr(priv))
1050 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1749 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1051 dev->name, priv->interruptError); 1750 dev->name, grp->interruptError);
1052 1751
1053 err = -1; 1752 goto err_irq_fail;
1054 goto err_irq_fail;
1055 } 1753 }
1056 1754
1057 if (request_irq(priv->interruptTransmit, gfar_transmit, 1755 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1058 0, priv->int_name_tx, dev) < 0) { 1756 0, grp->int_name_tx, grp)) < 0) {
1059 if (netif_msg_intr(priv)) 1757 if (netif_msg_intr(priv))
1060 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1758 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1061 dev->name, priv->interruptTransmit); 1759 dev->name, grp->interruptTransmit);
1062
1063 err = -1;
1064
1065 goto tx_irq_fail; 1760 goto tx_irq_fail;
1066 } 1761 }
1067 1762
1068 if (request_irq(priv->interruptReceive, gfar_receive, 1763 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1069 0, priv->int_name_rx, dev) < 0) { 1764 grp->int_name_rx, grp)) < 0) {
1070 if (netif_msg_intr(priv)) 1765 if (netif_msg_intr(priv))
1071 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", 1766 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1072 dev->name, priv->interruptReceive); 1767 dev->name, grp->interruptReceive);
1073
1074 err = -1;
1075 goto rx_irq_fail; 1768 goto rx_irq_fail;
1076 } 1769 }
1077 } else { 1770 } else {
1078 if (request_irq(priv->interruptTransmit, gfar_interrupt, 1771 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1079 0, priv->int_name_tx, dev) < 0) { 1772 grp->int_name_tx, grp)) < 0) {
1080 if (netif_msg_intr(priv)) 1773 if (netif_msg_intr(priv))
1081 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1774 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1082 dev->name, priv->interruptTransmit); 1775 dev->name, grp->interruptTransmit);
1083
1084 err = -1;
1085 goto err_irq_fail; 1776 goto err_irq_fail;
1086 } 1777 }
1087 } 1778 }
1088 1779
1089 phy_start(priv->phydev); 1780 return 0;
1090
1091 /* Configure the coalescing support */
1092 gfar_write(&regs->txic, 0);
1093 if (priv->txcoalescing)
1094 gfar_write(&regs->txic, priv->txic);
1095
1096 gfar_write(&regs->rxic, 0);
1097 if (priv->rxcoalescing)
1098 gfar_write(&regs->rxic, priv->rxic);
1099
1100 if (priv->rx_csum_enable)
1101 rctrl |= RCTRL_CHECKSUMMING;
1102 1781
1103 if (priv->extended_hash) { 1782rx_irq_fail:
1104 rctrl |= RCTRL_EXTHASH; 1783 free_irq(grp->interruptTransmit, grp);
1784tx_irq_fail:
1785 free_irq(grp->interruptError, grp);
1786err_irq_fail:
1787 return err;
1105 1788
1106 gfar_clear_exact_match(dev); 1789}
1107 rctrl |= RCTRL_EMEN;
1108 }
1109 1790
1110 if (priv->padding) { 1791/* Bring the controller up and running */
1111 rctrl &= ~RCTRL_PAL_MASK; 1792int startup_gfar(struct net_device *ndev)
1112 rctrl |= RCTRL_PADDING(priv->padding); 1793{
1113 } 1794 struct gfar_private *priv = netdev_priv(ndev);
1795 struct gfar __iomem *regs = NULL;
1796 int err, i, j;
1114 1797
1115 /* keep vlan related bits if it's enabled */ 1798 for (i = 0; i < priv->num_grps; i++) {
1116 if (priv->vlgrp) { 1799 regs= priv->gfargrp[i].regs;
1117 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; 1800 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1118 tctrl |= TCTRL_VLINS;
1119 } 1801 }
1120 1802
1121 /* Init rctrl based on our settings */ 1803 regs= priv->gfargrp[0].regs;
1122 gfar_write(&priv->regs->rctrl, rctrl); 1804 err = gfar_alloc_skb_resources(ndev);
1123 1805 if (err)
1124 if (dev->features & NETIF_F_IP_CSUM) 1806 return err;
1125 tctrl |= TCTRL_INIT_CSUM;
1126
1127 gfar_write(&priv->regs->tctrl, tctrl);
1128
1129 /* Set the extraction length and index */
1130 attrs = ATTRELI_EL(priv->rx_stash_size) |
1131 ATTRELI_EI(priv->rx_stash_index);
1132
1133 gfar_write(&priv->regs->attreli, attrs);
1134
1135 /* Start with defaults, and add stashing or locking
1136 * depending on the approprate variables */
1137 attrs = ATTR_INIT_SETTINGS;
1138 1807
1139 if (priv->bd_stash_en) 1808 gfar_init_mac(ndev);
1140 attrs |= ATTR_BDSTASH;
1141 1809
1142 if (priv->rx_stash_size != 0) 1810 for (i = 0; i < priv->num_grps; i++) {
1143 attrs |= ATTR_BUFSTASH; 1811 err = register_grp_irqs(&priv->gfargrp[i]);
1812 if (err) {
1813 for (j = 0; j < i; j++)
1814 free_grp_irqs(&priv->gfargrp[j]);
1815 goto irq_fail;
1816 }
1817 }
1144 1818
1145 gfar_write(&priv->regs->attr, attrs); 1819 /* Start the controller */
1820 gfar_start(ndev);
1146 1821
1147 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold); 1822 phy_start(priv->phydev);
1148 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
1149 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
1150 1823
1151 /* Start the controller */ 1824 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1152 gfar_start(dev);
1153 1825
1154 return 0; 1826 return 0;
1155 1827
1156rx_irq_fail: 1828irq_fail:
1157 free_irq(priv->interruptTransmit, dev);
1158tx_irq_fail:
1159 free_irq(priv->interruptError, dev);
1160err_irq_fail:
1161err_rxalloc_fail:
1162rx_skb_fail:
1163 free_skb_resources(priv); 1829 free_skb_resources(priv);
1164tx_skb_fail:
1165 dma_free_coherent(&priv->ofdev->dev,
1166 sizeof(struct txbd8)*priv->tx_ring_size
1167 + sizeof(struct rxbd8)*priv->rx_ring_size,
1168 priv->tx_bd_base,
1169 gfar_read(&regs->tbase0));
1170
1171 return err; 1830 return err;
1172} 1831}
1173 1832
@@ -1178,7 +1837,7 @@ static int gfar_enet_open(struct net_device *dev)
1178 struct gfar_private *priv = netdev_priv(dev); 1837 struct gfar_private *priv = netdev_priv(dev);
1179 int err; 1838 int err;
1180 1839
1181 napi_enable(&priv->napi); 1840 enable_napi(priv);
1182 1841
1183 skb_queue_head_init(&priv->rx_recycle); 1842 skb_queue_head_init(&priv->rx_recycle);
1184 1843
@@ -1189,18 +1848,18 @@ static int gfar_enet_open(struct net_device *dev)
1189 1848
1190 err = init_phy(dev); 1849 err = init_phy(dev);
1191 1850
1192 if(err) { 1851 if (err) {
1193 napi_disable(&priv->napi); 1852 disable_napi(priv);
1194 return err; 1853 return err;
1195 } 1854 }
1196 1855
1197 err = startup_gfar(dev); 1856 err = startup_gfar(dev);
1198 if (err) { 1857 if (err) {
1199 napi_disable(&priv->napi); 1858 disable_napi(priv);
1200 return err; 1859 return err;
1201 } 1860 }
1202 1861
1203 netif_start_queue(dev); 1862 netif_tx_start_all_queues(dev);
1204 1863
1205 device_set_wakeup_enable(&dev->dev, priv->wol_en); 1864 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1206 1865
@@ -1269,15 +1928,23 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1269static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 1928static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1270{ 1929{
1271 struct gfar_private *priv = netdev_priv(dev); 1930 struct gfar_private *priv = netdev_priv(dev);
1931 struct gfar_priv_tx_q *tx_queue = NULL;
1932 struct netdev_queue *txq;
1933 struct gfar __iomem *regs = NULL;
1272 struct txfcb *fcb = NULL; 1934 struct txfcb *fcb = NULL;
1273 struct txbd8 *txbdp, *txbdp_start, *base; 1935 struct txbd8 *txbdp, *txbdp_start, *base;
1274 u32 lstatus; 1936 u32 lstatus;
1275 int i; 1937 int i, rq = 0;
1276 u32 bufaddr; 1938 u32 bufaddr;
1277 unsigned long flags; 1939 unsigned long flags;
1278 unsigned int nr_frags, length; 1940 unsigned int nr_frags, length;
1279 1941
1280 base = priv->tx_bd_base; 1942
1943 rq = skb->queue_mapping;
1944 tx_queue = priv->tx_queue[rq];
1945 txq = netdev_get_tx_queue(dev, rq);
1946 base = tx_queue->tx_bd_base;
1947 regs = tx_queue->grp->regs;
1281 1948
1282 /* make space for additional header when fcb is needed */ 1949 /* make space for additional header when fcb is needed */
1283 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 1950 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -1298,21 +1965,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1298 /* total number of fragments in the SKB */ 1965 /* total number of fragments in the SKB */
1299 nr_frags = skb_shinfo(skb)->nr_frags; 1966 nr_frags = skb_shinfo(skb)->nr_frags;
1300 1967
1301 spin_lock_irqsave(&priv->txlock, flags);
1302
1303 /* check if there is space to queue this packet */ 1968 /* check if there is space to queue this packet */
1304 if ((nr_frags+1) > priv->num_txbdfree) { 1969 if ((nr_frags+1) > tx_queue->num_txbdfree) {
1305 /* no space, stop the queue */ 1970 /* no space, stop the queue */
1306 netif_stop_queue(dev); 1971 netif_tx_stop_queue(txq);
1307 dev->stats.tx_fifo_errors++; 1972 dev->stats.tx_fifo_errors++;
1308 spin_unlock_irqrestore(&priv->txlock, flags);
1309 return NETDEV_TX_BUSY; 1973 return NETDEV_TX_BUSY;
1310 } 1974 }
1311 1975
1312 /* Update transmit stats */ 1976 /* Update transmit stats */
1313 dev->stats.tx_bytes += skb->len; 1977 txq->tx_bytes += skb->len;
1978 txq->tx_packets ++;
1314 1979
1315 txbdp = txbdp_start = priv->cur_tx; 1980 txbdp = txbdp_start = tx_queue->cur_tx;
1316 1981
1317 if (nr_frags == 0) { 1982 if (nr_frags == 0) {
1318 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 1983 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
@@ -1320,7 +1985,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1320 /* Place the fragment addresses and lengths into the TxBDs */ 1985 /* Place the fragment addresses and lengths into the TxBDs */
1321 for (i = 0; i < nr_frags; i++) { 1986 for (i = 0; i < nr_frags; i++) {
1322 /* Point at the next BD, wrapping as needed */ 1987 /* Point at the next BD, wrapping as needed */
1323 txbdp = next_txbd(txbdp, base, priv->tx_ring_size); 1988 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1324 1989
1325 length = skb_shinfo(skb)->frags[i].size; 1990 length = skb_shinfo(skb)->frags[i].size;
1326 1991
@@ -1362,13 +2027,27 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1362 } 2027 }
1363 2028
1364 /* setup the TxBD length and buffer pointer for the first BD */ 2029 /* setup the TxBD length and buffer pointer for the first BD */
1365 priv->tx_skbuff[priv->skb_curtx] = skb; 2030 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1366 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 2031 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
1367 skb_headlen(skb), DMA_TO_DEVICE); 2032 skb_headlen(skb), DMA_TO_DEVICE);
1368 2033
1369 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 2034 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1370 2035
1371 /* 2036 /*
2037 * We can work in parallel with gfar_clean_tx_ring(), except
2038 * when modifying num_txbdfree. Note that we didn't grab the lock
2039 * when we were reading the num_txbdfree and checking for available
2040 * space, that's because outside of this function it can only grow,
2041 * and once we've got needed space, it cannot suddenly disappear.
2042 *
2043 * The lock also protects us from gfar_error(), which can modify
2044 * regs->tstat and thus retrigger the transfers, which is why we
2045 * also must grab the lock before setting ready bit for the first
2046 * to be transmitted BD.
2047 */
2048 spin_lock_irqsave(&tx_queue->txlock, flags);
2049
2050 /*
1372 * The powerpc-specific eieio() is used, as wmb() has too strong 2051 * The powerpc-specific eieio() is used, as wmb() has too strong
1373 * semantics (it requires synchronization between cacheable and 2052 * semantics (it requires synchronization between cacheable and
1374 * uncacheable mappings, which eieio doesn't provide and which we 2053 * uncacheable mappings, which eieio doesn't provide and which we
@@ -1382,29 +2061,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1382 2061
1383 /* Update the current skb pointer to the next entry we will use 2062 /* Update the current skb pointer to the next entry we will use
1384 * (wrapping if necessary) */ 2063 * (wrapping if necessary) */
1385 priv->skb_curtx = (priv->skb_curtx + 1) & 2064 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1386 TX_RING_MOD_MASK(priv->tx_ring_size); 2065 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1387 2066
1388 priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size); 2067 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1389 2068
1390 /* reduce TxBD free count */ 2069 /* reduce TxBD free count */
1391 priv->num_txbdfree -= (nr_frags + 1); 2070 tx_queue->num_txbdfree -= (nr_frags + 1);
1392 2071
1393 dev->trans_start = jiffies; 2072 dev->trans_start = jiffies;
1394 2073
1395 /* If the next BD still needs to be cleaned up, then the bds 2074 /* If the next BD still needs to be cleaned up, then the bds
1396 are full. We need to tell the kernel to stop sending us stuff. */ 2075 are full. We need to tell the kernel to stop sending us stuff. */
1397 if (!priv->num_txbdfree) { 2076 if (!tx_queue->num_txbdfree) {
1398 netif_stop_queue(dev); 2077 netif_tx_stop_queue(txq);
1399 2078
1400 dev->stats.tx_fifo_errors++; 2079 dev->stats.tx_fifo_errors++;
1401 } 2080 }
1402 2081
1403 /* Tell the DMA to go go go */ 2082 /* Tell the DMA to go go go */
1404 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 2083 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
1405 2084
1406 /* Unlock priv */ 2085 /* Unlock priv */
1407 spin_unlock_irqrestore(&priv->txlock, flags); 2086 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1408 2087
1409 return NETDEV_TX_OK; 2088 return NETDEV_TX_OK;
1410} 2089}
@@ -1414,7 +2093,7 @@ static int gfar_close(struct net_device *dev)
1414{ 2093{
1415 struct gfar_private *priv = netdev_priv(dev); 2094 struct gfar_private *priv = netdev_priv(dev);
1416 2095
1417 napi_disable(&priv->napi); 2096 disable_napi(priv);
1418 2097
1419 skb_queue_purge(&priv->rx_recycle); 2098 skb_queue_purge(&priv->rx_recycle);
1420 cancel_work_sync(&priv->reset_task); 2099 cancel_work_sync(&priv->reset_task);
@@ -1424,7 +2103,7 @@ static int gfar_close(struct net_device *dev)
1424 phy_disconnect(priv->phydev); 2103 phy_disconnect(priv->phydev);
1425 priv->phydev = NULL; 2104 priv->phydev = NULL;
1426 2105
1427 netif_stop_queue(dev); 2106 netif_tx_stop_all_queues(dev);
1428 2107
1429 return 0; 2108 return 0;
1430} 2109}
@@ -1443,50 +2122,55 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1443 struct vlan_group *grp) 2122 struct vlan_group *grp)
1444{ 2123{
1445 struct gfar_private *priv = netdev_priv(dev); 2124 struct gfar_private *priv = netdev_priv(dev);
2125 struct gfar __iomem *regs = NULL;
1446 unsigned long flags; 2126 unsigned long flags;
1447 u32 tempval; 2127 u32 tempval;
1448 2128
1449 spin_lock_irqsave(&priv->rxlock, flags); 2129 regs = priv->gfargrp[0].regs;
2130 local_irq_save(flags);
2131 lock_rx_qs(priv);
1450 2132
1451 priv->vlgrp = grp; 2133 priv->vlgrp = grp;
1452 2134
1453 if (grp) { 2135 if (grp) {
1454 /* Enable VLAN tag insertion */ 2136 /* Enable VLAN tag insertion */
1455 tempval = gfar_read(&priv->regs->tctrl); 2137 tempval = gfar_read(&regs->tctrl);
1456 tempval |= TCTRL_VLINS; 2138 tempval |= TCTRL_VLINS;
1457 2139
1458 gfar_write(&priv->regs->tctrl, tempval); 2140 gfar_write(&regs->tctrl, tempval);
1459 2141
1460 /* Enable VLAN tag extraction */ 2142 /* Enable VLAN tag extraction */
1461 tempval = gfar_read(&priv->regs->rctrl); 2143 tempval = gfar_read(&regs->rctrl);
1462 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); 2144 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
1463 gfar_write(&priv->regs->rctrl, tempval); 2145 gfar_write(&regs->rctrl, tempval);
1464 } else { 2146 } else {
1465 /* Disable VLAN tag insertion */ 2147 /* Disable VLAN tag insertion */
1466 tempval = gfar_read(&priv->regs->tctrl); 2148 tempval = gfar_read(&regs->tctrl);
1467 tempval &= ~TCTRL_VLINS; 2149 tempval &= ~TCTRL_VLINS;
1468 gfar_write(&priv->regs->tctrl, tempval); 2150 gfar_write(&regs->tctrl, tempval);
1469 2151
1470 /* Disable VLAN tag extraction */ 2152 /* Disable VLAN tag extraction */
1471 tempval = gfar_read(&priv->regs->rctrl); 2153 tempval = gfar_read(&regs->rctrl);
1472 tempval &= ~RCTRL_VLEX; 2154 tempval &= ~RCTRL_VLEX;
1473 /* If parse is no longer required, then disable parser */ 2155 /* If parse is no longer required, then disable parser */
1474 if (tempval & RCTRL_REQ_PARSER) 2156 if (tempval & RCTRL_REQ_PARSER)
1475 tempval |= RCTRL_PRSDEP_INIT; 2157 tempval |= RCTRL_PRSDEP_INIT;
1476 else 2158 else
1477 tempval &= ~RCTRL_PRSDEP_INIT; 2159 tempval &= ~RCTRL_PRSDEP_INIT;
1478 gfar_write(&priv->regs->rctrl, tempval); 2160 gfar_write(&regs->rctrl, tempval);
1479 } 2161 }
1480 2162
1481 gfar_change_mtu(dev, dev->mtu); 2163 gfar_change_mtu(dev, dev->mtu);
1482 2164
1483 spin_unlock_irqrestore(&priv->rxlock, flags); 2165 unlock_rx_qs(priv);
2166 local_irq_restore(flags);
1484} 2167}
1485 2168
1486static int gfar_change_mtu(struct net_device *dev, int new_mtu) 2169static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1487{ 2170{
1488 int tempsize, tempval; 2171 int tempsize, tempval;
1489 struct gfar_private *priv = netdev_priv(dev); 2172 struct gfar_private *priv = netdev_priv(dev);
2173 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1490 int oldsize = priv->rx_buffer_size; 2174 int oldsize = priv->rx_buffer_size;
1491 int frame_size = new_mtu + ETH_HLEN; 2175 int frame_size = new_mtu + ETH_HLEN;
1492 2176
@@ -1518,20 +2202,20 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1518 2202
1519 dev->mtu = new_mtu; 2203 dev->mtu = new_mtu;
1520 2204
1521 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 2205 gfar_write(&regs->mrblr, priv->rx_buffer_size);
1522 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size); 2206 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
1523 2207
1524 /* If the mtu is larger than the max size for standard 2208 /* If the mtu is larger than the max size for standard
1525 * ethernet frames (ie, a jumbo frame), then set maccfg2 2209 * ethernet frames (ie, a jumbo frame), then set maccfg2
1526 * to allow huge frames, and to check the length */ 2210 * to allow huge frames, and to check the length */
1527 tempval = gfar_read(&priv->regs->maccfg2); 2211 tempval = gfar_read(&regs->maccfg2);
1528 2212
1529 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) 2213 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1530 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2214 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1531 else 2215 else
1532 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2216 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1533 2217
1534 gfar_write(&priv->regs->maccfg2, tempval); 2218 gfar_write(&regs->maccfg2, tempval);
1535 2219
1536 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2220 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1537 startup_gfar(dev); 2221 startup_gfar(dev);
@@ -1551,10 +2235,10 @@ static void gfar_reset_task(struct work_struct *work)
1551 struct net_device *dev = priv->ndev; 2235 struct net_device *dev = priv->ndev;
1552 2236
1553 if (dev->flags & IFF_UP) { 2237 if (dev->flags & IFF_UP) {
1554 netif_stop_queue(dev); 2238 netif_tx_stop_all_queues(dev);
1555 stop_gfar(dev); 2239 stop_gfar(dev);
1556 startup_gfar(dev); 2240 startup_gfar(dev);
1557 netif_start_queue(dev); 2241 netif_tx_start_all_queues(dev);
1558 } 2242 }
1559 2243
1560 netif_tx_schedule_all(dev); 2244 netif_tx_schedule_all(dev);
@@ -1569,24 +2253,29 @@ static void gfar_timeout(struct net_device *dev)
1569} 2253}
1570 2254
1571/* Interrupt Handler for Transmit complete */ 2255/* Interrupt Handler for Transmit complete */
1572static int gfar_clean_tx_ring(struct net_device *dev) 2256static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
1573{ 2257{
2258 struct net_device *dev = tx_queue->dev;
1574 struct gfar_private *priv = netdev_priv(dev); 2259 struct gfar_private *priv = netdev_priv(dev);
2260 struct gfar_priv_rx_q *rx_queue = NULL;
1575 struct txbd8 *bdp; 2261 struct txbd8 *bdp;
1576 struct txbd8 *lbdp = NULL; 2262 struct txbd8 *lbdp = NULL;
1577 struct txbd8 *base = priv->tx_bd_base; 2263 struct txbd8 *base = tx_queue->tx_bd_base;
1578 struct sk_buff *skb; 2264 struct sk_buff *skb;
1579 int skb_dirtytx; 2265 int skb_dirtytx;
1580 int tx_ring_size = priv->tx_ring_size; 2266 int tx_ring_size = tx_queue->tx_ring_size;
1581 int frags = 0; 2267 int frags = 0;
1582 int i; 2268 int i;
1583 int howmany = 0; 2269 int howmany = 0;
1584 u32 lstatus; 2270 u32 lstatus;
1585 2271
1586 bdp = priv->dirty_tx; 2272 rx_queue = priv->rx_queue[tx_queue->qindex];
1587 skb_dirtytx = priv->skb_dirtytx; 2273 bdp = tx_queue->dirty_tx;
2274 skb_dirtytx = tx_queue->skb_dirtytx;
2275
2276 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2277 unsigned long flags;
1588 2278
1589 while ((skb = priv->tx_skbuff[skb_dirtytx])) {
1590 frags = skb_shinfo(skb)->nr_frags; 2279 frags = skb_shinfo(skb)->nr_frags;
1591 lbdp = skip_txbd(bdp, frags, base, tx_ring_size); 2280 lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
1592 2281
@@ -1618,82 +2307,71 @@ static int gfar_clean_tx_ring(struct net_device *dev)
1618 * If there's room in the queue (limit it to rx_buffer_size) 2307 * If there's room in the queue (limit it to rx_buffer_size)
1619 * we add this skb back into the pool, if it's the right size 2308 * we add this skb back into the pool, if it's the right size
1620 */ 2309 */
1621 if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size && 2310 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
1622 skb_recycle_check(skb, priv->rx_buffer_size + 2311 skb_recycle_check(skb, priv->rx_buffer_size +
1623 RXBUF_ALIGNMENT)) 2312 RXBUF_ALIGNMENT))
1624 __skb_queue_head(&priv->rx_recycle, skb); 2313 __skb_queue_head(&priv->rx_recycle, skb);
1625 else 2314 else
1626 dev_kfree_skb_any(skb); 2315 dev_kfree_skb_any(skb);
1627 2316
1628 priv->tx_skbuff[skb_dirtytx] = NULL; 2317 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
1629 2318
1630 skb_dirtytx = (skb_dirtytx + 1) & 2319 skb_dirtytx = (skb_dirtytx + 1) &
1631 TX_RING_MOD_MASK(tx_ring_size); 2320 TX_RING_MOD_MASK(tx_ring_size);
1632 2321
1633 howmany++; 2322 howmany++;
1634 priv->num_txbdfree += frags + 1; 2323 spin_lock_irqsave(&tx_queue->txlock, flags);
2324 tx_queue->num_txbdfree += frags + 1;
2325 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1635 } 2326 }
1636 2327
1637 /* If we freed a buffer, we can restart transmission, if necessary */ 2328 /* If we freed a buffer, we can restart transmission, if necessary */
1638 if (netif_queue_stopped(dev) && priv->num_txbdfree) 2329 if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
1639 netif_wake_queue(dev); 2330 netif_wake_subqueue(dev, tx_queue->qindex);
1640 2331
1641 /* Update dirty indicators */ 2332 /* Update dirty indicators */
1642 priv->skb_dirtytx = skb_dirtytx; 2333 tx_queue->skb_dirtytx = skb_dirtytx;
1643 priv->dirty_tx = bdp; 2334 tx_queue->dirty_tx = bdp;
1644
1645 dev->stats.tx_packets += howmany;
1646 2335
1647 return howmany; 2336 return howmany;
1648} 2337}
1649 2338
1650static void gfar_schedule_cleanup(struct net_device *dev) 2339static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
1651{ 2340{
1652 struct gfar_private *priv = netdev_priv(dev);
1653 unsigned long flags; 2341 unsigned long flags;
1654 2342
1655 spin_lock_irqsave(&priv->txlock, flags); 2343 spin_lock_irqsave(&gfargrp->grplock, flags);
1656 spin_lock(&priv->rxlock); 2344 if (napi_schedule_prep(&gfargrp->napi)) {
1657 2345 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
1658 if (napi_schedule_prep(&priv->napi)) { 2346 __napi_schedule(&gfargrp->napi);
1659 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
1660 __napi_schedule(&priv->napi);
1661 } else { 2347 } else {
1662 /* 2348 /*
1663 * Clear IEVENT, so interrupts aren't called again 2349 * Clear IEVENT, so interrupts aren't called again
1664 * because of the packets that have already arrived. 2350 * because of the packets that have already arrived.
1665 */ 2351 */
1666 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); 2352 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
1667 } 2353 }
2354 spin_unlock_irqrestore(&gfargrp->grplock, flags);
1668 2355
1669 spin_unlock(&priv->rxlock);
1670 spin_unlock_irqrestore(&priv->txlock, flags);
1671} 2356}
1672 2357
1673/* Interrupt Handler for Transmit complete */ 2358/* Interrupt Handler for Transmit complete */
1674static irqreturn_t gfar_transmit(int irq, void *dev_id) 2359static irqreturn_t gfar_transmit(int irq, void *grp_id)
1675{ 2360{
1676 gfar_schedule_cleanup((struct net_device *)dev_id); 2361 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
1677 return IRQ_HANDLED; 2362 return IRQ_HANDLED;
1678} 2363}
1679 2364
1680static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 2365static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
1681 struct sk_buff *skb) 2366 struct sk_buff *skb)
1682{ 2367{
2368 struct net_device *dev = rx_queue->dev;
1683 struct gfar_private *priv = netdev_priv(dev); 2369 struct gfar_private *priv = netdev_priv(dev);
1684 u32 lstatus; 2370 dma_addr_t buf;
1685
1686 bdp->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
1687 priv->rx_buffer_size, DMA_FROM_DEVICE);
1688
1689 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
1690
1691 if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
1692 lstatus |= BD_LFLAG(RXBD_WRAP);
1693
1694 eieio();
1695 2371
1696 bdp->lstatus = lstatus; 2372 buf = dma_map_single(&priv->ofdev->dev, skb->data,
2373 priv->rx_buffer_size, DMA_FROM_DEVICE);
2374 gfar_init_rxbdp(rx_queue, bdp, buf);
1697} 2375}
1698 2376
1699 2377
@@ -1760,9 +2438,9 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
1760 } 2438 }
1761} 2439}
1762 2440
1763irqreturn_t gfar_receive(int irq, void *dev_id) 2441irqreturn_t gfar_receive(int irq, void *grp_id)
1764{ 2442{
1765 gfar_schedule_cleanup((struct net_device *)dev_id); 2443 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
1766 return IRQ_HANDLED; 2444 return IRQ_HANDLED;
1767} 2445}
1768 2446
@@ -1792,6 +2470,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1792 fcb = (struct rxfcb *)skb->data; 2470 fcb = (struct rxfcb *)skb->data;
1793 2471
1794 /* Remove the FCB from the skb */ 2472 /* Remove the FCB from the skb */
2473 skb_set_queue_mapping(skb, fcb->rq);
1795 /* Remove the padded bytes, if there are any */ 2474 /* Remove the padded bytes, if there are any */
1796 if (amount_pull) 2475 if (amount_pull)
1797 skb_pull(skb, amount_pull); 2476 skb_pull(skb, amount_pull);
@@ -1818,8 +2497,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1818 * until the budget/quota has been reached. Returns the number 2497 * until the budget/quota has been reached. Returns the number
1819 * of frames handled 2498 * of frames handled
1820 */ 2499 */
1821int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) 2500int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
1822{ 2501{
2502 struct net_device *dev = rx_queue->dev;
1823 struct rxbd8 *bdp, *base; 2503 struct rxbd8 *bdp, *base;
1824 struct sk_buff *skb; 2504 struct sk_buff *skb;
1825 int pkt_len; 2505 int pkt_len;
@@ -1828,8 +2508,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1828 struct gfar_private *priv = netdev_priv(dev); 2508 struct gfar_private *priv = netdev_priv(dev);
1829 2509
1830 /* Get the first full descriptor */ 2510 /* Get the first full descriptor */
1831 bdp = priv->cur_rx; 2511 bdp = rx_queue->cur_rx;
1832 base = priv->rx_bd_base; 2512 base = rx_queue->rx_bd_base;
1833 2513
1834 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + 2514 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
1835 priv->padding; 2515 priv->padding;
@@ -1841,7 +2521,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1841 /* Add another skb for the future */ 2521 /* Add another skb for the future */
1842 newskb = gfar_new_skb(dev); 2522 newskb = gfar_new_skb(dev);
1843 2523
1844 skb = priv->rx_skbuff[priv->skb_currx]; 2524 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
1845 2525
1846 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2526 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
1847 priv->rx_buffer_size, DMA_FROM_DEVICE); 2527 priv->rx_buffer_size, DMA_FROM_DEVICE);
@@ -1866,69 +2546,91 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1866 } 2546 }
1867 } else { 2547 } else {
1868 /* Increment the number of packets */ 2548 /* Increment the number of packets */
1869 dev->stats.rx_packets++; 2549 rx_queue->stats.rx_packets++;
1870 howmany++; 2550 howmany++;
1871 2551
1872 if (likely(skb)) { 2552 if (likely(skb)) {
1873 pkt_len = bdp->length - ETH_FCS_LEN; 2553 pkt_len = bdp->length - ETH_FCS_LEN;
1874 /* Remove the FCS from the packet length */ 2554 /* Remove the FCS from the packet length */
1875 skb_put(skb, pkt_len); 2555 skb_put(skb, pkt_len);
1876 dev->stats.rx_bytes += pkt_len; 2556 rx_queue->stats.rx_bytes += pkt_len;
1877 2557
1878 if (in_irq() || irqs_disabled())
1879 printk("Interrupt problem!\n");
1880 gfar_process_frame(dev, skb, amount_pull); 2558 gfar_process_frame(dev, skb, amount_pull);
1881 2559
1882 } else { 2560 } else {
1883 if (netif_msg_rx_err(priv)) 2561 if (netif_msg_rx_err(priv))
1884 printk(KERN_WARNING 2562 printk(KERN_WARNING
1885 "%s: Missing skb!\n", dev->name); 2563 "%s: Missing skb!\n", dev->name);
1886 dev->stats.rx_dropped++; 2564 rx_queue->stats.rx_dropped++;
1887 priv->extra_stats.rx_skbmissing++; 2565 priv->extra_stats.rx_skbmissing++;
1888 } 2566 }
1889 2567
1890 } 2568 }
1891 2569
1892 priv->rx_skbuff[priv->skb_currx] = newskb; 2570 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
1893 2571
1894 /* Setup the new bdp */ 2572 /* Setup the new bdp */
1895 gfar_new_rxbdp(dev, bdp, newskb); 2573 gfar_new_rxbdp(rx_queue, bdp, newskb);
1896 2574
1897 /* Update to the next pointer */ 2575 /* Update to the next pointer */
1898 bdp = next_bd(bdp, base, priv->rx_ring_size); 2576 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
1899 2577
1900 /* update to point at the next skb */ 2578 /* update to point at the next skb */
1901 priv->skb_currx = 2579 rx_queue->skb_currx =
1902 (priv->skb_currx + 1) & 2580 (rx_queue->skb_currx + 1) &
1903 RX_RING_MOD_MASK(priv->rx_ring_size); 2581 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
1904 } 2582 }
1905 2583
1906 /* Update the current rxbd pointer to be the next one */ 2584 /* Update the current rxbd pointer to be the next one */
1907 priv->cur_rx = bdp; 2585 rx_queue->cur_rx = bdp;
1908 2586
1909 return howmany; 2587 return howmany;
1910} 2588}
1911 2589
1912static int gfar_poll(struct napi_struct *napi, int budget) 2590static int gfar_poll(struct napi_struct *napi, int budget)
1913{ 2591{
1914 struct gfar_private *priv = container_of(napi, struct gfar_private, napi); 2592 struct gfar_priv_grp *gfargrp = container_of(napi,
1915 struct net_device *dev = priv->ndev; 2593 struct gfar_priv_grp, napi);
1916 int tx_cleaned = 0; 2594 struct gfar_private *priv = gfargrp->priv;
1917 int rx_cleaned = 0; 2595 struct gfar __iomem *regs = gfargrp->regs;
1918 unsigned long flags; 2596 struct gfar_priv_tx_q *tx_queue = NULL;
2597 struct gfar_priv_rx_q *rx_queue = NULL;
2598 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2599 int tx_cleaned = 0, i, left_over_budget = budget;
2600 unsigned long serviced_queues = 0;
2601 int num_queues = 0;
2602
2603 num_queues = gfargrp->num_rx_queues;
2604 budget_per_queue = budget/num_queues;
1919 2605
1920 /* Clear IEVENT, so interrupts aren't called again 2606 /* Clear IEVENT, so interrupts aren't called again
1921 * because of the packets that have already arrived */ 2607 * because of the packets that have already arrived */
1922 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); 2608 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
1923 2609
1924 /* If we fail to get the lock, don't bother with the TX BDs */ 2610 while (num_queues && left_over_budget) {
1925 if (spin_trylock_irqsave(&priv->txlock, flags)) { 2611
1926 tx_cleaned = gfar_clean_tx_ring(dev); 2612 budget_per_queue = left_over_budget/num_queues;
1927 spin_unlock_irqrestore(&priv->txlock, flags); 2613 left_over_budget = 0;
2614
2615 for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2616 if (test_bit(i, &serviced_queues))
2617 continue;
2618 rx_queue = priv->rx_queue[i];
2619 tx_queue = priv->tx_queue[rx_queue->qindex];
2620
2621 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2622 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2623 budget_per_queue);
2624 rx_cleaned += rx_cleaned_per_queue;
2625 if(rx_cleaned_per_queue < budget_per_queue) {
2626 left_over_budget = left_over_budget +
2627 (budget_per_queue - rx_cleaned_per_queue);
2628 set_bit(i, &serviced_queues);
2629 num_queues--;
2630 }
2631 }
1928 } 2632 }
1929 2633
1930 rx_cleaned = gfar_clean_rx_ring(dev, budget);
1931
1932 if (tx_cleaned) 2634 if (tx_cleaned)
1933 return budget; 2635 return budget;
1934 2636
@@ -1936,20 +2638,14 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1936 napi_complete(napi); 2638 napi_complete(napi);
1937 2639
1938 /* Clear the halt bit in RSTAT */ 2640 /* Clear the halt bit in RSTAT */
1939 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 2641 gfar_write(&regs->rstat, gfargrp->rstat);
1940 2642
1941 gfar_write(&priv->regs->imask, IMASK_DEFAULT); 2643 gfar_write(&regs->imask, IMASK_DEFAULT);
1942 2644
1943 /* If we are coalescing interrupts, update the timer */ 2645 /* If we are coalescing interrupts, update the timer */
1944 /* Otherwise, clear it */ 2646 /* Otherwise, clear it */
1945 if (likely(priv->rxcoalescing)) { 2647 gfar_configure_coalescing(priv,
1946 gfar_write(&priv->regs->rxic, 0); 2648 gfargrp->rx_bit_map, gfargrp->tx_bit_map);
1947 gfar_write(&priv->regs->rxic, priv->rxic);
1948 }
1949 if (likely(priv->txcoalescing)) {
1950 gfar_write(&priv->regs->txic, 0);
1951 gfar_write(&priv->regs->txic, priv->txic);
1952 }
1953 } 2649 }
1954 2650
1955 return rx_cleaned; 2651 return rx_cleaned;
@@ -1964,44 +2660,50 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1964static void gfar_netpoll(struct net_device *dev) 2660static void gfar_netpoll(struct net_device *dev)
1965{ 2661{
1966 struct gfar_private *priv = netdev_priv(dev); 2662 struct gfar_private *priv = netdev_priv(dev);
2663 int i = 0;
1967 2664
1968 /* If the device has multiple interrupts, run tx/rx */ 2665 /* If the device has multiple interrupts, run tx/rx */
1969 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2666 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1970 disable_irq(priv->interruptTransmit); 2667 for (i = 0; i < priv->num_grps; i++) {
1971 disable_irq(priv->interruptReceive); 2668 disable_irq(priv->gfargrp[i].interruptTransmit);
1972 disable_irq(priv->interruptError); 2669 disable_irq(priv->gfargrp[i].interruptReceive);
1973 gfar_interrupt(priv->interruptTransmit, dev); 2670 disable_irq(priv->gfargrp[i].interruptError);
1974 enable_irq(priv->interruptError); 2671 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
1975 enable_irq(priv->interruptReceive); 2672 &priv->gfargrp[i]);
1976 enable_irq(priv->interruptTransmit); 2673 enable_irq(priv->gfargrp[i].interruptError);
2674 enable_irq(priv->gfargrp[i].interruptReceive);
2675 enable_irq(priv->gfargrp[i].interruptTransmit);
2676 }
1977 } else { 2677 } else {
1978 disable_irq(priv->interruptTransmit); 2678 for (i = 0; i < priv->num_grps; i++) {
1979 gfar_interrupt(priv->interruptTransmit, dev); 2679 disable_irq(priv->gfargrp[i].interruptTransmit);
1980 enable_irq(priv->interruptTransmit); 2680 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2681 &priv->gfargrp[i]);
2682 enable_irq(priv->gfargrp[i].interruptTransmit);
2683 }
1981 } 2684 }
1982} 2685}
1983#endif 2686#endif
1984 2687
1985/* The interrupt handler for devices with one interrupt */ 2688/* The interrupt handler for devices with one interrupt */
1986static irqreturn_t gfar_interrupt(int irq, void *dev_id) 2689static irqreturn_t gfar_interrupt(int irq, void *grp_id)
1987{ 2690{
1988 struct net_device *dev = dev_id; 2691 struct gfar_priv_grp *gfargrp = grp_id;
1989 struct gfar_private *priv = netdev_priv(dev);
1990 2692
1991 /* Save ievent for future reference */ 2693 /* Save ievent for future reference */
1992 u32 events = gfar_read(&priv->regs->ievent); 2694 u32 events = gfar_read(&gfargrp->regs->ievent);
1993 2695
1994 /* Check for reception */ 2696 /* Check for reception */
1995 if (events & IEVENT_RX_MASK) 2697 if (events & IEVENT_RX_MASK)
1996 gfar_receive(irq, dev_id); 2698 gfar_receive(irq, grp_id);
1997 2699
1998 /* Check for transmit completion */ 2700 /* Check for transmit completion */
1999 if (events & IEVENT_TX_MASK) 2701 if (events & IEVENT_TX_MASK)
2000 gfar_transmit(irq, dev_id); 2702 gfar_transmit(irq, grp_id);
2001 2703
2002 /* Check for errors */ 2704 /* Check for errors */
2003 if (events & IEVENT_ERR_MASK) 2705 if (events & IEVENT_ERR_MASK)
2004 gfar_error(irq, dev_id); 2706 gfar_error(irq, grp_id);
2005 2707
2006 return IRQ_HANDLED; 2708 return IRQ_HANDLED;
2007} 2709}
@@ -2015,12 +2717,14 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id)
2015static void adjust_link(struct net_device *dev) 2717static void adjust_link(struct net_device *dev)
2016{ 2718{
2017 struct gfar_private *priv = netdev_priv(dev); 2719 struct gfar_private *priv = netdev_priv(dev);
2018 struct gfar __iomem *regs = priv->regs; 2720 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2019 unsigned long flags; 2721 unsigned long flags;
2020 struct phy_device *phydev = priv->phydev; 2722 struct phy_device *phydev = priv->phydev;
2021 int new_state = 0; 2723 int new_state = 0;
2022 2724
2023 spin_lock_irqsave(&priv->txlock, flags); 2725 local_irq_save(flags);
2726 lock_tx_qs(priv);
2727
2024 if (phydev->link) { 2728 if (phydev->link) {
2025 u32 tempval = gfar_read(&regs->maccfg2); 2729 u32 tempval = gfar_read(&regs->maccfg2);
2026 u32 ecntrl = gfar_read(&regs->ecntrl); 2730 u32 ecntrl = gfar_read(&regs->ecntrl);
@@ -2085,8 +2789,8 @@ static void adjust_link(struct net_device *dev)
2085 2789
2086 if (new_state && netif_msg_link(priv)) 2790 if (new_state && netif_msg_link(priv))
2087 phy_print_status(phydev); 2791 phy_print_status(phydev);
2088 2792 unlock_tx_qs(priv);
2089 spin_unlock_irqrestore(&priv->txlock, flags); 2793 local_irq_restore(flags);
2090} 2794}
2091 2795
2092/* Update the hash table based on the current list of multicast 2796/* Update the hash table based on the current list of multicast
@@ -2097,10 +2801,10 @@ static void gfar_set_multi(struct net_device *dev)
2097{ 2801{
2098 struct dev_mc_list *mc_ptr; 2802 struct dev_mc_list *mc_ptr;
2099 struct gfar_private *priv = netdev_priv(dev); 2803 struct gfar_private *priv = netdev_priv(dev);
2100 struct gfar __iomem *regs = priv->regs; 2804 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2101 u32 tempval; 2805 u32 tempval;
2102 2806
2103 if(dev->flags & IFF_PROMISC) { 2807 if (dev->flags & IFF_PROMISC) {
2104 /* Set RCTRL to PROM */ 2808 /* Set RCTRL to PROM */
2105 tempval = gfar_read(&regs->rctrl); 2809 tempval = gfar_read(&regs->rctrl);
2106 tempval |= RCTRL_PROM; 2810 tempval |= RCTRL_PROM;
@@ -2112,7 +2816,7 @@ static void gfar_set_multi(struct net_device *dev)
2112 gfar_write(&regs->rctrl, tempval); 2816 gfar_write(&regs->rctrl, tempval);
2113 } 2817 }
2114 2818
2115 if(dev->flags & IFF_ALLMULTI) { 2819 if (dev->flags & IFF_ALLMULTI) {
2116 /* Set the hash to rx all multicast frames */ 2820 /* Set the hash to rx all multicast frames */
2117 gfar_write(&regs->igaddr0, 0xffffffff); 2821 gfar_write(&regs->igaddr0, 0xffffffff);
2118 gfar_write(&regs->igaddr1, 0xffffffff); 2822 gfar_write(&regs->igaddr1, 0xffffffff);
@@ -2164,7 +2868,7 @@ static void gfar_set_multi(struct net_device *dev)
2164 em_num = 0; 2868 em_num = 0;
2165 } 2869 }
2166 2870
2167 if(dev->mc_count == 0) 2871 if (dev->mc_count == 0)
2168 return; 2872 return;
2169 2873
2170 /* Parse the list, and set the appropriate bits */ 2874 /* Parse the list, and set the appropriate bits */
@@ -2230,10 +2934,11 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
2230static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) 2934static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2231{ 2935{
2232 struct gfar_private *priv = netdev_priv(dev); 2936 struct gfar_private *priv = netdev_priv(dev);
2937 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2233 int idx; 2938 int idx;
2234 char tmpbuf[MAC_ADDR_LEN]; 2939 char tmpbuf[MAC_ADDR_LEN];
2235 u32 tempval; 2940 u32 tempval;
2236 u32 __iomem *macptr = &priv->regs->macstnaddr1; 2941 u32 __iomem *macptr = &regs->macstnaddr1;
2237 2942
2238 macptr += num*2; 2943 macptr += num*2;
2239 2944
@@ -2250,16 +2955,18 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2250} 2955}
2251 2956
2252/* GFAR error interrupt handler */ 2957/* GFAR error interrupt handler */
2253static irqreturn_t gfar_error(int irq, void *dev_id) 2958static irqreturn_t gfar_error(int irq, void *grp_id)
2254{ 2959{
2255 struct net_device *dev = dev_id; 2960 struct gfar_priv_grp *gfargrp = grp_id;
2256 struct gfar_private *priv = netdev_priv(dev); 2961 struct gfar __iomem *regs = gfargrp->regs;
2962 struct gfar_private *priv= gfargrp->priv;
2963 struct net_device *dev = priv->ndev;
2257 2964
2258 /* Save ievent for future reference */ 2965 /* Save ievent for future reference */
2259 u32 events = gfar_read(&priv->regs->ievent); 2966 u32 events = gfar_read(&regs->ievent);
2260 2967
2261 /* Clear IEVENT */ 2968 /* Clear IEVENT */
2262 gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK); 2969 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
2263 2970
2264 /* Magic Packet is not an error. */ 2971 /* Magic Packet is not an error. */
2265 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 2972 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
@@ -2269,7 +2976,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2269 /* Hmm... */ 2976 /* Hmm... */
2270 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 2977 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2271 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", 2978 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
2272 dev->name, events, gfar_read(&priv->regs->imask)); 2979 dev->name, events, gfar_read(&regs->imask));
2273 2980
2274 /* Update the error counters */ 2981 /* Update the error counters */
2275 if (events & IEVENT_TXE) { 2982 if (events & IEVENT_TXE) {
@@ -2280,14 +2987,22 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2280 if (events & IEVENT_CRL) 2987 if (events & IEVENT_CRL)
2281 dev->stats.tx_aborted_errors++; 2988 dev->stats.tx_aborted_errors++;
2282 if (events & IEVENT_XFUN) { 2989 if (events & IEVENT_XFUN) {
2990 unsigned long flags;
2991
2283 if (netif_msg_tx_err(priv)) 2992 if (netif_msg_tx_err(priv))
2284 printk(KERN_DEBUG "%s: TX FIFO underrun, " 2993 printk(KERN_DEBUG "%s: TX FIFO underrun, "
2285 "packet dropped.\n", dev->name); 2994 "packet dropped.\n", dev->name);
2286 dev->stats.tx_dropped++; 2995 dev->stats.tx_dropped++;
2287 priv->extra_stats.tx_underrun++; 2996 priv->extra_stats.tx_underrun++;
2288 2997
2998 local_irq_save(flags);
2999 lock_tx_qs(priv);
3000
2289 /* Reactivate the Tx Queues */ 3001 /* Reactivate the Tx Queues */
2290 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 3002 gfar_write(&regs->tstat, gfargrp->tstat);
3003
3004 unlock_tx_qs(priv);
3005 local_irq_restore(flags);
2291 } 3006 }
2292 if (netif_msg_tx_err(priv)) 3007 if (netif_msg_tx_err(priv))
2293 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 3008 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
@@ -2296,11 +3011,11 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2296 dev->stats.rx_errors++; 3011 dev->stats.rx_errors++;
2297 priv->extra_stats.rx_bsy++; 3012 priv->extra_stats.rx_bsy++;
2298 3013
2299 gfar_receive(irq, dev_id); 3014 gfar_receive(irq, grp_id);
2300 3015
2301 if (netif_msg_rx_err(priv)) 3016 if (netif_msg_rx_err(priv))
2302 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", 3017 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2303 dev->name, gfar_read(&priv->regs->rstat)); 3018 dev->name, gfar_read(&regs->rstat));
2304 } 3019 }
2305 if (events & IEVENT_BABR) { 3020 if (events & IEVENT_BABR) {
2306 dev->stats.rx_errors++; 3021 dev->stats.rx_errors++;
@@ -2331,6 +3046,9 @@ static struct of_device_id gfar_match[] =
2331 .type = "network", 3046 .type = "network",
2332 .compatible = "gianfar", 3047 .compatible = "gianfar",
2333 }, 3048 },
3049 {
3050 .compatible = "fsl,etsec2",
3051 },
2334 {}, 3052 {},
2335}; 3053};
2336MODULE_DEVICE_TABLE(of, gfar_match); 3054MODULE_DEVICE_TABLE(of, gfar_match);
@@ -2342,8 +3060,9 @@ static struct of_platform_driver gfar_driver = {
2342 3060
2343 .probe = gfar_probe, 3061 .probe = gfar_probe,
2344 .remove = gfar_remove, 3062 .remove = gfar_remove,
2345 .suspend = gfar_suspend, 3063 .suspend = gfar_legacy_suspend,
2346 .resume = gfar_resume, 3064 .resume = gfar_legacy_resume,
3065 .driver.pm = GFAR_PM_OPS,
2347}; 3066};
2348 3067
2349static int __init gfar_init(void) 3068static int __init gfar_init(void)