aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/gianfar.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/gianfar.c')
-rw-r--r--drivers/net/gianfar.c1886
1 files changed, 1300 insertions, 586 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 5bf31f1509c9..5d3763fb3472 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -8,9 +8,10 @@
8 * 8 *
9 * Author: Andy Fleming 9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala 10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 12 *
12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc. 13 * Copyright 2002-2009 Freescale Semiconductor, Inc.
13 * Copyright (c) 2007 MontaVista Software, Inc. 14 * Copyright 2007 MontaVista Software, Inc.
14 * 15 *
15 * This program is free software; you can redistribute it and/or modify it 16 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the 17 * under the terms of the GNU General Public License as published by the
@@ -109,7 +110,7 @@ static void gfar_reset_task(struct work_struct *work);
109static void gfar_timeout(struct net_device *dev); 110static void gfar_timeout(struct net_device *dev);
110static int gfar_close(struct net_device *dev); 111static int gfar_close(struct net_device *dev);
111struct sk_buff *gfar_new_skb(struct net_device *dev); 112struct sk_buff *gfar_new_skb(struct net_device *dev);
112static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 113static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
113 struct sk_buff *skb); 114 struct sk_buff *skb);
114static int gfar_set_mac_address(struct net_device *dev); 115static int gfar_set_mac_address(struct net_device *dev);
115static int gfar_change_mtu(struct net_device *dev, int new_mtu); 116static int gfar_change_mtu(struct net_device *dev, int new_mtu);
@@ -130,8 +131,8 @@ static int gfar_poll(struct napi_struct *napi, int budget);
130#ifdef CONFIG_NET_POLL_CONTROLLER 131#ifdef CONFIG_NET_POLL_CONTROLLER
131static void gfar_netpoll(struct net_device *dev); 132static void gfar_netpoll(struct net_device *dev);
132#endif 133#endif
133int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 134int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
134static int gfar_clean_tx_ring(struct net_device *dev); 135static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
135static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 136static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
136 int amount_pull); 137 int amount_pull);
137static void gfar_vlan_rx_register(struct net_device *netdev, 138static void gfar_vlan_rx_register(struct net_device *netdev,
@@ -147,6 +148,304 @@ MODULE_AUTHOR("Freescale Semiconductor, Inc");
147MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 148MODULE_DESCRIPTION("Gianfar Ethernet Driver");
148MODULE_LICENSE("GPL"); 149MODULE_LICENSE("GPL");
149 150
151static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
152 dma_addr_t buf)
153{
154 u32 lstatus;
155
156 bdp->bufPtr = buf;
157
158 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
159 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
160 lstatus |= BD_LFLAG(RXBD_WRAP);
161
162 eieio();
163
164 bdp->lstatus = lstatus;
165}
166
167static int gfar_init_bds(struct net_device *ndev)
168{
169 struct gfar_private *priv = netdev_priv(ndev);
170 struct gfar_priv_tx_q *tx_queue = NULL;
171 struct gfar_priv_rx_q *rx_queue = NULL;
172 struct txbd8 *txbdp;
173 struct rxbd8 *rxbdp;
174 int i, j;
175
176 for (i = 0; i < priv->num_tx_queues; i++) {
177 tx_queue = priv->tx_queue[i];
178 /* Initialize some variables in our dev structure */
179 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
180 tx_queue->dirty_tx = tx_queue->tx_bd_base;
181 tx_queue->cur_tx = tx_queue->tx_bd_base;
182 tx_queue->skb_curtx = 0;
183 tx_queue->skb_dirtytx = 0;
184
185 /* Initialize Transmit Descriptor Ring */
186 txbdp = tx_queue->tx_bd_base;
187 for (j = 0; j < tx_queue->tx_ring_size; j++) {
188 txbdp->lstatus = 0;
189 txbdp->bufPtr = 0;
190 txbdp++;
191 }
192
193 /* Set the last descriptor in the ring to indicate wrap */
194 txbdp--;
195 txbdp->status |= TXBD_WRAP;
196 }
197
198 for (i = 0; i < priv->num_rx_queues; i++) {
199 rx_queue = priv->rx_queue[i];
200 rx_queue->cur_rx = rx_queue->rx_bd_base;
201 rx_queue->skb_currx = 0;
202 rxbdp = rx_queue->rx_bd_base;
203
204 for (j = 0; j < rx_queue->rx_ring_size; j++) {
205 struct sk_buff *skb = rx_queue->rx_skbuff[j];
206
207 if (skb) {
208 gfar_init_rxbdp(rx_queue, rxbdp,
209 rxbdp->bufPtr);
210 } else {
211 skb = gfar_new_skb(ndev);
212 if (!skb) {
213 pr_err("%s: Can't allocate RX buffers\n",
214 ndev->name);
215 goto err_rxalloc_fail;
216 }
217 rx_queue->rx_skbuff[j] = skb;
218
219 gfar_new_rxbdp(rx_queue, rxbdp, skb);
220 }
221
222 rxbdp++;
223 }
224
225 }
226
227 return 0;
228
229err_rxalloc_fail:
230 free_skb_resources(priv);
231 return -ENOMEM;
232}
233
234static int gfar_alloc_skb_resources(struct net_device *ndev)
235{
236 void *vaddr;
237 dma_addr_t addr;
238 int i, j, k;
239 struct gfar_private *priv = netdev_priv(ndev);
240 struct device *dev = &priv->ofdev->dev;
241 struct gfar_priv_tx_q *tx_queue = NULL;
242 struct gfar_priv_rx_q *rx_queue = NULL;
243
244 priv->total_tx_ring_size = 0;
245 for (i = 0; i < priv->num_tx_queues; i++)
246 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
247
248 priv->total_rx_ring_size = 0;
249 for (i = 0; i < priv->num_rx_queues; i++)
250 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
251
252 /* Allocate memory for the buffer descriptors */
253 vaddr = dma_alloc_coherent(dev,
254 sizeof(struct txbd8) * priv->total_tx_ring_size +
255 sizeof(struct rxbd8) * priv->total_rx_ring_size,
256 &addr, GFP_KERNEL);
257 if (!vaddr) {
258 if (netif_msg_ifup(priv))
259 pr_err("%s: Could not allocate buffer descriptors!\n",
260 ndev->name);
261 return -ENOMEM;
262 }
263
264 for (i = 0; i < priv->num_tx_queues; i++) {
265 tx_queue = priv->tx_queue[i];
266 tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
267 tx_queue->tx_bd_dma_base = addr;
268 tx_queue->dev = ndev;
269 /* enet DMA only understands physical addresses */
270 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
271 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
272 }
273
274 /* Start the rx descriptor ring where the tx ring leaves off */
275 for (i = 0; i < priv->num_rx_queues; i++) {
276 rx_queue = priv->rx_queue[i];
277 rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
278 rx_queue->rx_bd_dma_base = addr;
279 rx_queue->dev = ndev;
280 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
281 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
282 }
283
284 /* Setup the skbuff rings */
285 for (i = 0; i < priv->num_tx_queues; i++) {
286 tx_queue = priv->tx_queue[i];
287 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
288 tx_queue->tx_ring_size, GFP_KERNEL);
289 if (!tx_queue->tx_skbuff) {
290 if (netif_msg_ifup(priv))
291 pr_err("%s: Could not allocate tx_skbuff\n",
292 ndev->name);
293 goto cleanup;
294 }
295
296 for (k = 0; k < tx_queue->tx_ring_size; k++)
297 tx_queue->tx_skbuff[k] = NULL;
298 }
299
300 for (i = 0; i < priv->num_rx_queues; i++) {
301 rx_queue = priv->rx_queue[i];
302 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
303 rx_queue->rx_ring_size, GFP_KERNEL);
304
305 if (!rx_queue->rx_skbuff) {
306 if (netif_msg_ifup(priv))
307 pr_err("%s: Could not allocate rx_skbuff\n",
308 ndev->name);
309 goto cleanup;
310 }
311
312 for (j = 0; j < rx_queue->rx_ring_size; j++)
313 rx_queue->rx_skbuff[j] = NULL;
314 }
315
316 if (gfar_init_bds(ndev))
317 goto cleanup;
318
319 return 0;
320
321cleanup:
322 free_skb_resources(priv);
323 return -ENOMEM;
324}
325
326static void gfar_init_tx_rx_base(struct gfar_private *priv)
327{
328 struct gfar __iomem *regs = priv->gfargrp[0].regs;
329 u32 __iomem *baddr;
330 int i;
331
332 baddr = &regs->tbase0;
333 for(i = 0; i < priv->num_tx_queues; i++) {
334 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
335 baddr += 2;
336 }
337
338 baddr = &regs->rbase0;
339 for(i = 0; i < priv->num_rx_queues; i++) {
340 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
341 baddr += 2;
342 }
343}
344
345static void gfar_init_mac(struct net_device *ndev)
346{
347 struct gfar_private *priv = netdev_priv(ndev);
348 struct gfar __iomem *regs = priv->gfargrp[0].regs;
349 u32 rctrl = 0;
350 u32 tctrl = 0;
351 u32 attrs = 0;
352
353 /* write the tx/rx base registers */
354 gfar_init_tx_rx_base(priv);
355
356 /* Configure the coalescing support */
357 gfar_configure_coalescing(priv, 0xFF, 0xFF);
358
359 if (priv->rx_filer_enable) {
360 rctrl |= RCTRL_FILREN;
361 /* Program the RIR0 reg with the required distribution */
362 gfar_write(&regs->rir0, DEFAULT_RIR0);
363 }
364
365 if (priv->rx_csum_enable)
366 rctrl |= RCTRL_CHECKSUMMING;
367
368 if (priv->extended_hash) {
369 rctrl |= RCTRL_EXTHASH;
370
371 gfar_clear_exact_match(ndev);
372 rctrl |= RCTRL_EMEN;
373 }
374
375 if (priv->padding) {
376 rctrl &= ~RCTRL_PAL_MASK;
377 rctrl |= RCTRL_PADDING(priv->padding);
378 }
379
380 /* keep vlan related bits if it's enabled */
381 if (priv->vlgrp) {
382 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
383 tctrl |= TCTRL_VLINS;
384 }
385
386 /* Init rctrl based on our settings */
387 gfar_write(&regs->rctrl, rctrl);
388
389 if (ndev->features & NETIF_F_IP_CSUM)
390 tctrl |= TCTRL_INIT_CSUM;
391
392 tctrl |= TCTRL_TXSCHED_PRIO;
393
394 gfar_write(&regs->tctrl, tctrl);
395
396 /* Set the extraction length and index */
397 attrs = ATTRELI_EL(priv->rx_stash_size) |
398 ATTRELI_EI(priv->rx_stash_index);
399
400 gfar_write(&regs->attreli, attrs);
401
402 /* Start with defaults, and add stashing or locking
403 * depending on the approprate variables */
404 attrs = ATTR_INIT_SETTINGS;
405
406 if (priv->bd_stash_en)
407 attrs |= ATTR_BDSTASH;
408
409 if (priv->rx_stash_size != 0)
410 attrs |= ATTR_BUFSTASH;
411
412 gfar_write(&regs->attr, attrs);
413
414 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
415 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
416 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
417}
418
419static struct net_device_stats *gfar_get_stats(struct net_device *dev)
420{
421 struct gfar_private *priv = netdev_priv(dev);
422 struct netdev_queue *txq;
423 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
424 unsigned long tx_packets = 0, tx_bytes = 0;
425 int i = 0;
426
427 for (i = 0; i < priv->num_rx_queues; i++) {
428 rx_packets += priv->rx_queue[i]->stats.rx_packets;
429 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
430 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
431 }
432
433 dev->stats.rx_packets = rx_packets;
434 dev->stats.rx_bytes = rx_bytes;
435 dev->stats.rx_dropped = rx_dropped;
436
437 for (i = 0; i < priv->num_tx_queues; i++) {
438 txq = netdev_get_tx_queue(dev, i);
439 tx_bytes += txq->tx_bytes;
440 tx_packets += txq->tx_packets;
441 }
442
443 dev->stats.tx_bytes = tx_bytes;
444 dev->stats.tx_packets = tx_packets;
445
446 return &dev->stats;
447}
448
150static const struct net_device_ops gfar_netdev_ops = { 449static const struct net_device_ops gfar_netdev_ops = {
151 .ndo_open = gfar_enet_open, 450 .ndo_open = gfar_enet_open,
152 .ndo_start_xmit = gfar_start_xmit, 451 .ndo_start_xmit = gfar_start_xmit,
@@ -155,6 +454,7 @@ static const struct net_device_ops gfar_netdev_ops = {
155 .ndo_set_multicast_list = gfar_set_multi, 454 .ndo_set_multicast_list = gfar_set_multi,
156 .ndo_tx_timeout = gfar_timeout, 455 .ndo_tx_timeout = gfar_timeout,
157 .ndo_do_ioctl = gfar_ioctl, 456 .ndo_do_ioctl = gfar_ioctl,
457 .ndo_get_stats = gfar_get_stats,
158 .ndo_vlan_rx_register = gfar_vlan_rx_register, 458 .ndo_vlan_rx_register = gfar_vlan_rx_register,
159 .ndo_set_mac_address = eth_mac_addr, 459 .ndo_set_mac_address = eth_mac_addr,
160 .ndo_validate_addr = eth_validate_addr, 460 .ndo_validate_addr = eth_validate_addr,
@@ -163,56 +463,244 @@ static const struct net_device_ops gfar_netdev_ops = {
163#endif 463#endif
164}; 464};
165 465
466unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
467unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
468
469void lock_rx_qs(struct gfar_private *priv)
470{
471 int i = 0x0;
472
473 for (i = 0; i < priv->num_rx_queues; i++)
474 spin_lock(&priv->rx_queue[i]->rxlock);
475}
476
477void lock_tx_qs(struct gfar_private *priv)
478{
479 int i = 0x0;
480
481 for (i = 0; i < priv->num_tx_queues; i++)
482 spin_lock(&priv->tx_queue[i]->txlock);
483}
484
485void unlock_rx_qs(struct gfar_private *priv)
486{
487 int i = 0x0;
488
489 for (i = 0; i < priv->num_rx_queues; i++)
490 spin_unlock(&priv->rx_queue[i]->rxlock);
491}
492
493void unlock_tx_qs(struct gfar_private *priv)
494{
495 int i = 0x0;
496
497 for (i = 0; i < priv->num_tx_queues; i++)
498 spin_unlock(&priv->tx_queue[i]->txlock);
499}
500
166/* Returns 1 if incoming frames use an FCB */ 501/* Returns 1 if incoming frames use an FCB */
167static inline int gfar_uses_fcb(struct gfar_private *priv) 502static inline int gfar_uses_fcb(struct gfar_private *priv)
168{ 503{
169 return priv->vlgrp || priv->rx_csum_enable; 504 return priv->vlgrp || priv->rx_csum_enable;
170} 505}
171 506
172static int gfar_of_init(struct net_device *dev) 507static void free_tx_pointers(struct gfar_private *priv)
508{
509 int i = 0;
510
511 for (i = 0; i < priv->num_tx_queues; i++)
512 kfree(priv->tx_queue[i]);
513}
514
515static void free_rx_pointers(struct gfar_private *priv)
516{
517 int i = 0;
518
519 for (i = 0; i < priv->num_rx_queues; i++)
520 kfree(priv->rx_queue[i]);
521}
522
523static void unmap_group_regs(struct gfar_private *priv)
524{
525 int i = 0;
526
527 for (i = 0; i < MAXGROUPS; i++)
528 if (priv->gfargrp[i].regs)
529 iounmap(priv->gfargrp[i].regs);
530}
531
532static void disable_napi(struct gfar_private *priv)
533{
534 int i = 0;
535
536 for (i = 0; i < priv->num_grps; i++)
537 napi_disable(&priv->gfargrp[i].napi);
538}
539
540static void enable_napi(struct gfar_private *priv)
541{
542 int i = 0;
543
544 for (i = 0; i < priv->num_grps; i++)
545 napi_enable(&priv->gfargrp[i].napi);
546}
547
548static int gfar_parse_group(struct device_node *np,
549 struct gfar_private *priv, const char *model)
550{
551 u32 *queue_mask;
552
553 priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
554 if (!priv->gfargrp[priv->num_grps].regs)
555 return -ENOMEM;
556
557 priv->gfargrp[priv->num_grps].interruptTransmit =
558 irq_of_parse_and_map(np, 0);
559
560 /* If we aren't the FEC we have multiple interrupts */
561 if (model && strcasecmp(model, "FEC")) {
562 priv->gfargrp[priv->num_grps].interruptReceive =
563 irq_of_parse_and_map(np, 1);
564 priv->gfargrp[priv->num_grps].interruptError =
565 irq_of_parse_and_map(np,2);
566 if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
567 priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
568 priv->gfargrp[priv->num_grps].interruptError < 0) {
569 return -EINVAL;
570 }
571 }
572
573 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
574 priv->gfargrp[priv->num_grps].priv = priv;
575 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
576 if(priv->mode == MQ_MG_MODE) {
577 queue_mask = (u32 *)of_get_property(np,
578 "fsl,rx-bit-map", NULL);
579 priv->gfargrp[priv->num_grps].rx_bit_map =
580 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
581 queue_mask = (u32 *)of_get_property(np,
582 "fsl,tx-bit-map", NULL);
583 priv->gfargrp[priv->num_grps].tx_bit_map =
584 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
585 } else {
586 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
587 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
588 }
589 priv->num_grps++;
590
591 return 0;
592}
593
594static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
173{ 595{
174 const char *model; 596 const char *model;
175 const char *ctype; 597 const char *ctype;
176 const void *mac_addr; 598 const void *mac_addr;
177 u64 addr, size; 599 int err = 0, i;
178 int err = 0; 600 struct net_device *dev = NULL;
179 struct gfar_private *priv = netdev_priv(dev); 601 struct gfar_private *priv = NULL;
180 struct device_node *np = priv->node; 602 struct device_node *np = ofdev->node;
603 struct device_node *child = NULL;
181 const u32 *stash; 604 const u32 *stash;
182 const u32 *stash_len; 605 const u32 *stash_len;
183 const u32 *stash_idx; 606 const u32 *stash_idx;
607 unsigned int num_tx_qs, num_rx_qs;
608 u32 *tx_queues, *rx_queues;
184 609
185 if (!np || !of_device_is_available(np)) 610 if (!np || !of_device_is_available(np))
186 return -ENODEV; 611 return -ENODEV;
187 612
188 /* get a pointer to the register memory */ 613 /* parse the num of tx and rx queues */
189 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL)); 614 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
190 priv->regs = ioremap(addr, size); 615 num_tx_qs = tx_queues ? *tx_queues : 1;
616
617 if (num_tx_qs > MAX_TX_QS) {
618 printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
619 num_tx_qs, MAX_TX_QS);
620 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
621 return -EINVAL;
622 }
623
624 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
625 num_rx_qs = rx_queues ? *rx_queues : 1;
626
627 if (num_rx_qs > MAX_RX_QS) {
628 printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
629 num_tx_qs, MAX_TX_QS);
630 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
631 return -EINVAL;
632 }
191 633
192 if (priv->regs == NULL) 634 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
635 dev = *pdev;
636 if (NULL == dev)
193 return -ENOMEM; 637 return -ENOMEM;
194 638
195 priv->interruptTransmit = irq_of_parse_and_map(np, 0); 639 priv = netdev_priv(dev);
640 priv->node = ofdev->node;
641 priv->ndev = dev;
642
643 dev->num_tx_queues = num_tx_qs;
644 dev->real_num_tx_queues = num_tx_qs;
645 priv->num_tx_queues = num_tx_qs;
646 priv->num_rx_queues = num_rx_qs;
647 priv->num_grps = 0x0;
196 648
197 model = of_get_property(np, "model", NULL); 649 model = of_get_property(np, "model", NULL);
198 650
199 /* If we aren't the FEC we have multiple interrupts */ 651 for (i = 0; i < MAXGROUPS; i++)
200 if (model && strcasecmp(model, "FEC")) { 652 priv->gfargrp[i].regs = NULL;
201 priv->interruptReceive = irq_of_parse_and_map(np, 1); 653
654 /* Parse and initialize group specific information */
655 if (of_device_is_compatible(np, "fsl,etsec2")) {
656 priv->mode = MQ_MG_MODE;
657 for_each_child_of_node(np, child) {
658 err = gfar_parse_group(child, priv, model);
659 if (err)
660 goto err_grp_init;
661 }
662 } else {
663 priv->mode = SQ_SG_MODE;
664 err = gfar_parse_group(np, priv, model);
665 if(err)
666 goto err_grp_init;
667 }
202 668
203 priv->interruptError = irq_of_parse_and_map(np, 2); 669 for (i = 0; i < priv->num_tx_queues; i++)
670 priv->tx_queue[i] = NULL;
671 for (i = 0; i < priv->num_rx_queues; i++)
672 priv->rx_queue[i] = NULL;
673
674 for (i = 0; i < priv->num_tx_queues; i++) {
675 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc(
676 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
677 if (!priv->tx_queue[i]) {
678 err = -ENOMEM;
679 goto tx_alloc_failed;
680 }
681 priv->tx_queue[i]->tx_skbuff = NULL;
682 priv->tx_queue[i]->qindex = i;
683 priv->tx_queue[i]->dev = dev;
684 spin_lock_init(&(priv->tx_queue[i]->txlock));
685 }
204 686
205 if (priv->interruptTransmit < 0 || 687 for (i = 0; i < priv->num_rx_queues; i++) {
206 priv->interruptReceive < 0 || 688 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc(
207 priv->interruptError < 0) { 689 sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
208 err = -EINVAL; 690 if (!priv->rx_queue[i]) {
209 goto err_out; 691 err = -ENOMEM;
692 goto rx_alloc_failed;
210 } 693 }
694 priv->rx_queue[i]->rx_skbuff = NULL;
695 priv->rx_queue[i]->qindex = i;
696 priv->rx_queue[i]->dev = dev;
697 spin_lock_init(&(priv->rx_queue[i]->rxlock));
211 } 698 }
212 699
700
213 stash = of_get_property(np, "bd-stash", NULL); 701 stash = of_get_property(np, "bd-stash", NULL);
214 702
215 if(stash) { 703 if (stash) {
216 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; 704 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
217 priv->bd_stash_en = 1; 705 priv->bd_stash_en = 1;
218 } 706 }
@@ -270,8 +758,13 @@ static int gfar_of_init(struct net_device *dev)
270 758
271 return 0; 759 return 0;
272 760
273err_out: 761rx_alloc_failed:
274 iounmap(priv->regs); 762 free_rx_pointers(priv);
763tx_alloc_failed:
764 free_tx_pointers(priv);
765err_grp_init:
766 unmap_group_regs(priv);
767 free_netdev(dev);
275 return err; 768 return err;
276} 769}
277 770
@@ -289,6 +782,85 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
289 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); 782 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
290} 783}
291 784
785static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
786{
787 unsigned int new_bit_map = 0x0;
788 int mask = 0x1 << (max_qs - 1), i;
789 for (i = 0; i < max_qs; i++) {
790 if (bit_map & mask)
791 new_bit_map = new_bit_map + (1 << i);
792 mask = mask >> 0x1;
793 }
794 return new_bit_map;
795}
796
797static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
798 u32 class)
799{
800 u32 rqfpr = FPR_FILER_MASK;
801 u32 rqfcr = 0x0;
802
803 rqfar--;
804 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
805 ftp_rqfpr[rqfar] = rqfpr;
806 ftp_rqfcr[rqfar] = rqfcr;
807 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
808
809 rqfar--;
810 rqfcr = RQFCR_CMP_NOMATCH;
811 ftp_rqfpr[rqfar] = rqfpr;
812 ftp_rqfcr[rqfar] = rqfcr;
813 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
814
815 rqfar--;
816 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
817 rqfpr = class;
818 ftp_rqfcr[rqfar] = rqfcr;
819 ftp_rqfpr[rqfar] = rqfpr;
820 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
821
822 rqfar--;
823 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
824 rqfpr = class;
825 ftp_rqfcr[rqfar] = rqfcr;
826 ftp_rqfpr[rqfar] = rqfpr;
827 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
828
829 return rqfar;
830}
831
832static void gfar_init_filer_table(struct gfar_private *priv)
833{
834 int i = 0x0;
835 u32 rqfar = MAX_FILER_IDX;
836 u32 rqfcr = 0x0;
837 u32 rqfpr = FPR_FILER_MASK;
838
839 /* Default rule */
840 rqfcr = RQFCR_CMP_MATCH;
841 ftp_rqfcr[rqfar] = rqfcr;
842 ftp_rqfpr[rqfar] = rqfpr;
843 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
844
845 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
846 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
847 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
848 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
849 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
850 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
851
852 /* cur_filer_idx indicated the fisrt non-masked rule */
853 priv->cur_filer_idx = rqfar;
854
855 /* Rest are masked rules */
856 rqfcr = RQFCR_CMP_NOMATCH;
857 for (i = 0; i < rqfar; i++) {
858 ftp_rqfcr[i] = rqfcr;
859 ftp_rqfpr[i] = rqfpr;
860 gfar_write_filer(priv, i, rqfcr, rqfpr);
861 }
862}
863
292/* Set up the ethernet device structure, private data, 864/* Set up the ethernet device structure, private data,
293 * and anything else we need before we start */ 865 * and anything else we need before we start */
294static int gfar_probe(struct of_device *ofdev, 866static int gfar_probe(struct of_device *ofdev,
@@ -297,14 +869,17 @@ static int gfar_probe(struct of_device *ofdev,
297 u32 tempval; 869 u32 tempval;
298 struct net_device *dev = NULL; 870 struct net_device *dev = NULL;
299 struct gfar_private *priv = NULL; 871 struct gfar_private *priv = NULL;
300 int err = 0; 872 struct gfar __iomem *regs = NULL;
873 int err = 0, i, grp_idx = 0;
301 int len_devname; 874 int len_devname;
875 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
876 u32 isrg = 0;
877 u32 __iomem *baddr;
302 878
303 /* Create an ethernet device instance */ 879 err = gfar_of_init(ofdev, &dev);
304 dev = alloc_etherdev(sizeof (*priv));
305 880
306 if (NULL == dev) 881 if (err)
307 return -ENOMEM; 882 return err;
308 883
309 priv = netdev_priv(dev); 884 priv = netdev_priv(dev);
310 priv->ndev = dev; 885 priv->ndev = dev;
@@ -312,50 +887,46 @@ static int gfar_probe(struct of_device *ofdev,
312 priv->node = ofdev->node; 887 priv->node = ofdev->node;
313 SET_NETDEV_DEV(dev, &ofdev->dev); 888 SET_NETDEV_DEV(dev, &ofdev->dev);
314 889
315 err = gfar_of_init(dev);
316
317 if (err)
318 goto regs_fail;
319
320 spin_lock_init(&priv->txlock);
321 spin_lock_init(&priv->rxlock);
322 spin_lock_init(&priv->bflock); 890 spin_lock_init(&priv->bflock);
323 INIT_WORK(&priv->reset_task, gfar_reset_task); 891 INIT_WORK(&priv->reset_task, gfar_reset_task);
324 892
325 dev_set_drvdata(&ofdev->dev, priv); 893 dev_set_drvdata(&ofdev->dev, priv);
894 regs = priv->gfargrp[0].regs;
326 895
327 /* Stop the DMA engine now, in case it was running before */ 896 /* Stop the DMA engine now, in case it was running before */
328 /* (The firmware could have used it, and left it running). */ 897 /* (The firmware could have used it, and left it running). */
329 gfar_halt(dev); 898 gfar_halt(dev);
330 899
331 /* Reset MAC layer */ 900 /* Reset MAC layer */
332 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET); 901 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
333 902
334 /* We need to delay at least 3 TX clocks */ 903 /* We need to delay at least 3 TX clocks */
335 udelay(2); 904 udelay(2);
336 905
337 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 906 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
338 gfar_write(&priv->regs->maccfg1, tempval); 907 gfar_write(&regs->maccfg1, tempval);
339 908
340 /* Initialize MACCFG2. */ 909 /* Initialize MACCFG2. */
341 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS); 910 gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
342 911
343 /* Initialize ECNTRL */ 912 /* Initialize ECNTRL */
344 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS); 913 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
345 914
346 /* Set the dev->base_addr to the gfar reg region */ 915 /* Set the dev->base_addr to the gfar reg region */
347 dev->base_addr = (unsigned long) (priv->regs); 916 dev->base_addr = (unsigned long) regs;
348 917
349 SET_NETDEV_DEV(dev, &ofdev->dev); 918 SET_NETDEV_DEV(dev, &ofdev->dev);
350 919
351 /* Fill in the dev structure */ 920 /* Fill in the dev structure */
352 dev->watchdog_timeo = TX_TIMEOUT; 921 dev->watchdog_timeo = TX_TIMEOUT;
353 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
354 dev->mtu = 1500; 922 dev->mtu = 1500;
355
356 dev->netdev_ops = &gfar_netdev_ops; 923 dev->netdev_ops = &gfar_netdev_ops;
357 dev->ethtool_ops = &gfar_ethtool_ops; 924 dev->ethtool_ops = &gfar_ethtool_ops;
358 925
926 /* Register for napi ...We are registering NAPI for each grp */
927 for (i = 0; i < priv->num_grps; i++)
928 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
929
359 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 930 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
360 priv->rx_csum_enable = 1; 931 priv->rx_csum_enable = 1;
361 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; 932 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
@@ -371,35 +942,35 @@ static int gfar_probe(struct of_device *ofdev,
371 priv->extended_hash = 1; 942 priv->extended_hash = 1;
372 priv->hash_width = 9; 943 priv->hash_width = 9;
373 944
374 priv->hash_regs[0] = &priv->regs->igaddr0; 945 priv->hash_regs[0] = &regs->igaddr0;
375 priv->hash_regs[1] = &priv->regs->igaddr1; 946 priv->hash_regs[1] = &regs->igaddr1;
376 priv->hash_regs[2] = &priv->regs->igaddr2; 947 priv->hash_regs[2] = &regs->igaddr2;
377 priv->hash_regs[3] = &priv->regs->igaddr3; 948 priv->hash_regs[3] = &regs->igaddr3;
378 priv->hash_regs[4] = &priv->regs->igaddr4; 949 priv->hash_regs[4] = &regs->igaddr4;
379 priv->hash_regs[5] = &priv->regs->igaddr5; 950 priv->hash_regs[5] = &regs->igaddr5;
380 priv->hash_regs[6] = &priv->regs->igaddr6; 951 priv->hash_regs[6] = &regs->igaddr6;
381 priv->hash_regs[7] = &priv->regs->igaddr7; 952 priv->hash_regs[7] = &regs->igaddr7;
382 priv->hash_regs[8] = &priv->regs->gaddr0; 953 priv->hash_regs[8] = &regs->gaddr0;
383 priv->hash_regs[9] = &priv->regs->gaddr1; 954 priv->hash_regs[9] = &regs->gaddr1;
384 priv->hash_regs[10] = &priv->regs->gaddr2; 955 priv->hash_regs[10] = &regs->gaddr2;
385 priv->hash_regs[11] = &priv->regs->gaddr3; 956 priv->hash_regs[11] = &regs->gaddr3;
386 priv->hash_regs[12] = &priv->regs->gaddr4; 957 priv->hash_regs[12] = &regs->gaddr4;
387 priv->hash_regs[13] = &priv->regs->gaddr5; 958 priv->hash_regs[13] = &regs->gaddr5;
388 priv->hash_regs[14] = &priv->regs->gaddr6; 959 priv->hash_regs[14] = &regs->gaddr6;
389 priv->hash_regs[15] = &priv->regs->gaddr7; 960 priv->hash_regs[15] = &regs->gaddr7;
390 961
391 } else { 962 } else {
392 priv->extended_hash = 0; 963 priv->extended_hash = 0;
393 priv->hash_width = 8; 964 priv->hash_width = 8;
394 965
395 priv->hash_regs[0] = &priv->regs->gaddr0; 966 priv->hash_regs[0] = &regs->gaddr0;
396 priv->hash_regs[1] = &priv->regs->gaddr1; 967 priv->hash_regs[1] = &regs->gaddr1;
397 priv->hash_regs[2] = &priv->regs->gaddr2; 968 priv->hash_regs[2] = &regs->gaddr2;
398 priv->hash_regs[3] = &priv->regs->gaddr3; 969 priv->hash_regs[3] = &regs->gaddr3;
399 priv->hash_regs[4] = &priv->regs->gaddr4; 970 priv->hash_regs[4] = &regs->gaddr4;
400 priv->hash_regs[5] = &priv->regs->gaddr5; 971 priv->hash_regs[5] = &regs->gaddr5;
401 priv->hash_regs[6] = &priv->regs->gaddr6; 972 priv->hash_regs[6] = &regs->gaddr6;
402 priv->hash_regs[7] = &priv->regs->gaddr7; 973 priv->hash_regs[7] = &regs->gaddr7;
403 } 974 }
404 975
405 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 976 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
@@ -410,16 +981,74 @@ static int gfar_probe(struct of_device *ofdev,
410 if (dev->features & NETIF_F_IP_CSUM) 981 if (dev->features & NETIF_F_IP_CSUM)
411 dev->hard_header_len += GMAC_FCB_LEN; 982 dev->hard_header_len += GMAC_FCB_LEN;
412 983
984 /* Program the isrg regs only if number of grps > 1 */
985 if (priv->num_grps > 1) {
986 baddr = &regs->isrg0;
987 for (i = 0; i < priv->num_grps; i++) {
988 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
989 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
990 gfar_write(baddr, isrg);
991 baddr++;
992 isrg = 0x0;
993 }
994 }
995
996 /* Need to reverse the bit maps as bit_map's MSB is q0
997 * but, for_each_set_bit parses from right to left, which
998 * basically reverses the queue numbers */
999 for (i = 0; i< priv->num_grps; i++) {
1000 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
1001 priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1002 priv->gfargrp[i].rx_bit_map = reverse_bitmap(
1003 priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1004 }
1005
1006 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1007 * also assign queues to groups */
1008 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1009 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1010 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1011 priv->num_rx_queues) {
1012 priv->gfargrp[grp_idx].num_rx_queues++;
1013 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1014 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1015 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1016 }
1017 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1018 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1019 priv->num_tx_queues) {
1020 priv->gfargrp[grp_idx].num_tx_queues++;
1021 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1022 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1023 tqueue = tqueue | (TQUEUE_EN0 >> i);
1024 }
1025 priv->gfargrp[grp_idx].rstat = rstat;
1026 priv->gfargrp[grp_idx].tstat = tstat;
1027 rstat = tstat =0;
1028 }
1029
1030 gfar_write(&regs->rqueue, rqueue);
1031 gfar_write(&regs->tqueue, tqueue);
1032
413 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 1033 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
414 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
415 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
416 priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
417 1034
418 priv->txcoalescing = DEFAULT_TX_COALESCE; 1035 /* Initializing some of the rx/tx queue level parameters */
419 priv->txic = DEFAULT_TXIC; 1036 for (i = 0; i < priv->num_tx_queues; i++) {
420 priv->rxcoalescing = DEFAULT_RX_COALESCE; 1037 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
421 priv->rxic = DEFAULT_RXIC; 1038 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1039 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1040 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1041 }
422 1042
1043 for (i = 0; i < priv->num_rx_queues; i++) {
1044 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1045 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1046 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1047 }
1048
1049 /* enable filer if using multiple RX queues*/
1050 if(priv->num_rx_queues > 1)
1051 priv->rx_filer_enable = 1;
423 /* Enable most messages by default */ 1052 /* Enable most messages by default */
424 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1053 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
425 1054
@@ -439,20 +1068,43 @@ static int gfar_probe(struct of_device *ofdev,
439 1068
440 /* fill out IRQ number and name fields */ 1069 /* fill out IRQ number and name fields */
441 len_devname = strlen(dev->name); 1070 len_devname = strlen(dev->name);
442 strncpy(&priv->int_name_tx[0], dev->name, len_devname); 1071 for (i = 0; i < priv->num_grps; i++) {
443 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1072 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
444 strncpy(&priv->int_name_tx[len_devname], 1073 len_devname);
445 "_tx", sizeof("_tx") + 1); 1074 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
446 1075 strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
447 strncpy(&priv->int_name_rx[0], dev->name, len_devname); 1076 "_g", sizeof("_g"));
448 strncpy(&priv->int_name_rx[len_devname], 1077 priv->gfargrp[i].int_name_tx[
449 "_rx", sizeof("_rx") + 1); 1078 strlen(priv->gfargrp[i].int_name_tx)] = i+48;
1079 strncpy(&priv->gfargrp[i].int_name_tx[strlen(
1080 priv->gfargrp[i].int_name_tx)],
1081 "_tx", sizeof("_tx") + 1);
1082
1083 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
1084 len_devname);
1085 strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
1086 "_g", sizeof("_g"));
1087 priv->gfargrp[i].int_name_rx[
1088 strlen(priv->gfargrp[i].int_name_rx)] = i+48;
1089 strncpy(&priv->gfargrp[i].int_name_rx[strlen(
1090 priv->gfargrp[i].int_name_rx)],
1091 "_rx", sizeof("_rx") + 1);
1092
1093 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
1094 len_devname);
1095 strncpy(&priv->gfargrp[i].int_name_er[len_devname],
1096 "_g", sizeof("_g"));
1097 priv->gfargrp[i].int_name_er[strlen(
1098 priv->gfargrp[i].int_name_er)] = i+48;
1099 strncpy(&priv->gfargrp[i].int_name_er[strlen(\
1100 priv->gfargrp[i].int_name_er)],
1101 "_er", sizeof("_er") + 1);
1102 } else
1103 priv->gfargrp[i].int_name_tx[len_devname] = '\0';
1104 }
450 1105
451 strncpy(&priv->int_name_er[0], dev->name, len_devname); 1106 /* Initialize the filer table */
452 strncpy(&priv->int_name_er[len_devname], 1107 gfar_init_filer_table(priv);
453 "_er", sizeof("_er") + 1);
454 } else
455 priv->int_name_tx[len_devname] = '\0';
456 1108
457 /* Create all the sysfs files */ 1109 /* Create all the sysfs files */
458 gfar_init_sysfs(dev); 1110 gfar_init_sysfs(dev);
@@ -463,14 +1115,19 @@ static int gfar_probe(struct of_device *ofdev,
463 /* Even more device info helps when determining which kernel */ 1115 /* Even more device info helps when determining which kernel */
464 /* provided which set of benchmarks. */ 1116 /* provided which set of benchmarks. */
465 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 1117 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
466 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", 1118 for (i = 0; i < priv->num_rx_queues; i++)
467 dev->name, priv->rx_ring_size, priv->tx_ring_size); 1119 printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n",
1120 dev->name, i, priv->rx_queue[i]->rx_ring_size);
1121 for(i = 0; i < priv->num_tx_queues; i++)
1122 printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n",
1123 dev->name, i, priv->tx_queue[i]->tx_ring_size);
468 1124
469 return 0; 1125 return 0;
470 1126
471register_fail: 1127register_fail:
472 iounmap(priv->regs); 1128 unmap_group_regs(priv);
473regs_fail: 1129 free_tx_pointers(priv);
1130 free_rx_pointers(priv);
474 if (priv->phy_node) 1131 if (priv->phy_node)
475 of_node_put(priv->phy_node); 1132 of_node_put(priv->phy_node);
476 if (priv->tbi_node) 1133 if (priv->tbi_node)
@@ -491,54 +1148,59 @@ static int gfar_remove(struct of_device *ofdev)
491 dev_set_drvdata(&ofdev->dev, NULL); 1148 dev_set_drvdata(&ofdev->dev, NULL);
492 1149
493 unregister_netdev(priv->ndev); 1150 unregister_netdev(priv->ndev);
494 iounmap(priv->regs); 1151 unmap_group_regs(priv);
495 free_netdev(priv->ndev); 1152 free_netdev(priv->ndev);
496 1153
497 return 0; 1154 return 0;
498} 1155}
499 1156
500#ifdef CONFIG_PM 1157#ifdef CONFIG_PM
501static int gfar_suspend(struct of_device *ofdev, pm_message_t state) 1158
1159static int gfar_suspend(struct device *dev)
502{ 1160{
503 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); 1161 struct gfar_private *priv = dev_get_drvdata(dev);
504 struct net_device *dev = priv->ndev; 1162 struct net_device *ndev = priv->ndev;
1163 struct gfar __iomem *regs = priv->gfargrp[0].regs;
505 unsigned long flags; 1164 unsigned long flags;
506 u32 tempval; 1165 u32 tempval;
507 1166
508 int magic_packet = priv->wol_en && 1167 int magic_packet = priv->wol_en &&
509 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1168 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
510 1169
511 netif_device_detach(dev); 1170 netif_device_detach(ndev);
1171
1172 if (netif_running(ndev)) {
512 1173
513 if (netif_running(dev)) { 1174 local_irq_save(flags);
514 spin_lock_irqsave(&priv->txlock, flags); 1175 lock_tx_qs(priv);
515 spin_lock(&priv->rxlock); 1176 lock_rx_qs(priv);
516 1177
517 gfar_halt_nodisable(dev); 1178 gfar_halt_nodisable(ndev);
518 1179
519 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1180 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
520 tempval = gfar_read(&priv->regs->maccfg1); 1181 tempval = gfar_read(&regs->maccfg1);
521 1182
522 tempval &= ~MACCFG1_TX_EN; 1183 tempval &= ~MACCFG1_TX_EN;
523 1184
524 if (!magic_packet) 1185 if (!magic_packet)
525 tempval &= ~MACCFG1_RX_EN; 1186 tempval &= ~MACCFG1_RX_EN;
526 1187
527 gfar_write(&priv->regs->maccfg1, tempval); 1188 gfar_write(&regs->maccfg1, tempval);
528 1189
529 spin_unlock(&priv->rxlock); 1190 unlock_rx_qs(priv);
530 spin_unlock_irqrestore(&priv->txlock, flags); 1191 unlock_tx_qs(priv);
1192 local_irq_restore(flags);
531 1193
532 napi_disable(&priv->napi); 1194 disable_napi(priv);
533 1195
534 if (magic_packet) { 1196 if (magic_packet) {
535 /* Enable interrupt on Magic Packet */ 1197 /* Enable interrupt on Magic Packet */
536 gfar_write(&priv->regs->imask, IMASK_MAG); 1198 gfar_write(&regs->imask, IMASK_MAG);
537 1199
538 /* Enable Magic Packet mode */ 1200 /* Enable Magic Packet mode */
539 tempval = gfar_read(&priv->regs->maccfg2); 1201 tempval = gfar_read(&regs->maccfg2);
540 tempval |= MACCFG2_MPEN; 1202 tempval |= MACCFG2_MPEN;
541 gfar_write(&priv->regs->maccfg2, tempval); 1203 gfar_write(&regs->maccfg2, tempval);
542 } else { 1204 } else {
543 phy_stop(priv->phydev); 1205 phy_stop(priv->phydev);
544 } 1206 }
@@ -547,17 +1209,18 @@ static int gfar_suspend(struct of_device *ofdev, pm_message_t state)
547 return 0; 1209 return 0;
548} 1210}
549 1211
550static int gfar_resume(struct of_device *ofdev) 1212static int gfar_resume(struct device *dev)
551{ 1213{
552 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); 1214 struct gfar_private *priv = dev_get_drvdata(dev);
553 struct net_device *dev = priv->ndev; 1215 struct net_device *ndev = priv->ndev;
1216 struct gfar __iomem *regs = priv->gfargrp[0].regs;
554 unsigned long flags; 1217 unsigned long flags;
555 u32 tempval; 1218 u32 tempval;
556 int magic_packet = priv->wol_en && 1219 int magic_packet = priv->wol_en &&
557 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1220 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
558 1221
559 if (!netif_running(dev)) { 1222 if (!netif_running(ndev)) {
560 netif_device_attach(dev); 1223 netif_device_attach(ndev);
561 return 0; 1224 return 0;
562 } 1225 }
563 1226
@@ -567,28 +1230,80 @@ static int gfar_resume(struct of_device *ofdev)
567 /* Disable Magic Packet mode, in case something 1230 /* Disable Magic Packet mode, in case something
568 * else woke us up. 1231 * else woke us up.
569 */ 1232 */
1233 local_irq_save(flags);
1234 lock_tx_qs(priv);
1235 lock_rx_qs(priv);
570 1236
571 spin_lock_irqsave(&priv->txlock, flags); 1237 tempval = gfar_read(&regs->maccfg2);
572 spin_lock(&priv->rxlock);
573
574 tempval = gfar_read(&priv->regs->maccfg2);
575 tempval &= ~MACCFG2_MPEN; 1238 tempval &= ~MACCFG2_MPEN;
576 gfar_write(&priv->regs->maccfg2, tempval); 1239 gfar_write(&regs->maccfg2, tempval);
1240
1241 gfar_start(ndev);
1242
1243 unlock_rx_qs(priv);
1244 unlock_tx_qs(priv);
1245 local_irq_restore(flags);
1246
1247 netif_device_attach(ndev);
1248
1249 enable_napi(priv);
1250
1251 return 0;
1252}
577 1253
578 gfar_start(dev); 1254static int gfar_restore(struct device *dev)
1255{
1256 struct gfar_private *priv = dev_get_drvdata(dev);
1257 struct net_device *ndev = priv->ndev;
1258
1259 if (!netif_running(ndev))
1260 return 0;
1261
1262 gfar_init_bds(ndev);
1263 init_registers(ndev);
1264 gfar_set_mac_address(ndev);
1265 gfar_init_mac(ndev);
1266 gfar_start(ndev);
579 1267
580 spin_unlock(&priv->rxlock); 1268 priv->oldlink = 0;
581 spin_unlock_irqrestore(&priv->txlock, flags); 1269 priv->oldspeed = 0;
1270 priv->oldduplex = -1;
582 1271
583 netif_device_attach(dev); 1272 if (priv->phydev)
1273 phy_start(priv->phydev);
584 1274
585 napi_enable(&priv->napi); 1275 netif_device_attach(ndev);
1276 enable_napi(priv);
586 1277
587 return 0; 1278 return 0;
588} 1279}
1280
1281static struct dev_pm_ops gfar_pm_ops = {
1282 .suspend = gfar_suspend,
1283 .resume = gfar_resume,
1284 .freeze = gfar_suspend,
1285 .thaw = gfar_resume,
1286 .restore = gfar_restore,
1287};
1288
1289#define GFAR_PM_OPS (&gfar_pm_ops)
1290
1291static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
1292{
1293 return gfar_suspend(&ofdev->dev);
1294}
1295
1296static int gfar_legacy_resume(struct of_device *ofdev)
1297{
1298 return gfar_resume(&ofdev->dev);
1299}
1300
589#else 1301#else
590#define gfar_suspend NULL 1302
591#define gfar_resume NULL 1303#define GFAR_PM_OPS NULL
1304#define gfar_legacy_suspend NULL
1305#define gfar_legacy_resume NULL
1306
592#endif 1307#endif
593 1308
594/* Reads the controller's registers to determine what interface 1309/* Reads the controller's registers to determine what interface
@@ -597,7 +1312,10 @@ static int gfar_resume(struct of_device *ofdev)
597static phy_interface_t gfar_get_interface(struct net_device *dev) 1312static phy_interface_t gfar_get_interface(struct net_device *dev)
598{ 1313{
599 struct gfar_private *priv = netdev_priv(dev); 1314 struct gfar_private *priv = netdev_priv(dev);
600 u32 ecntrl = gfar_read(&priv->regs->ecntrl); 1315 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1316 u32 ecntrl;
1317
1318 ecntrl = gfar_read(&regs->ecntrl);
601 1319
602 if (ecntrl & ECNTRL_SGMII_MODE) 1320 if (ecntrl & ECNTRL_SGMII_MODE)
603 return PHY_INTERFACE_MODE_SGMII; 1321 return PHY_INTERFACE_MODE_SGMII;
@@ -719,46 +1437,52 @@ static void gfar_configure_serdes(struct net_device *dev)
719static void init_registers(struct net_device *dev) 1437static void init_registers(struct net_device *dev)
720{ 1438{
721 struct gfar_private *priv = netdev_priv(dev); 1439 struct gfar_private *priv = netdev_priv(dev);
1440 struct gfar __iomem *regs = NULL;
1441 int i = 0;
722 1442
723 /* Clear IEVENT */ 1443 for (i = 0; i < priv->num_grps; i++) {
724 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR); 1444 regs = priv->gfargrp[i].regs;
1445 /* Clear IEVENT */
1446 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
725 1447
726 /* Initialize IMASK */ 1448 /* Initialize IMASK */
727 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); 1449 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1450 }
728 1451
1452 regs = priv->gfargrp[0].regs;
729 /* Init hash registers to zero */ 1453 /* Init hash registers to zero */
730 gfar_write(&priv->regs->igaddr0, 0); 1454 gfar_write(&regs->igaddr0, 0);
731 gfar_write(&priv->regs->igaddr1, 0); 1455 gfar_write(&regs->igaddr1, 0);
732 gfar_write(&priv->regs->igaddr2, 0); 1456 gfar_write(&regs->igaddr2, 0);
733 gfar_write(&priv->regs->igaddr3, 0); 1457 gfar_write(&regs->igaddr3, 0);
734 gfar_write(&priv->regs->igaddr4, 0); 1458 gfar_write(&regs->igaddr4, 0);
735 gfar_write(&priv->regs->igaddr5, 0); 1459 gfar_write(&regs->igaddr5, 0);
736 gfar_write(&priv->regs->igaddr6, 0); 1460 gfar_write(&regs->igaddr6, 0);
737 gfar_write(&priv->regs->igaddr7, 0); 1461 gfar_write(&regs->igaddr7, 0);
738 1462
739 gfar_write(&priv->regs->gaddr0, 0); 1463 gfar_write(&regs->gaddr0, 0);
740 gfar_write(&priv->regs->gaddr1, 0); 1464 gfar_write(&regs->gaddr1, 0);
741 gfar_write(&priv->regs->gaddr2, 0); 1465 gfar_write(&regs->gaddr2, 0);
742 gfar_write(&priv->regs->gaddr3, 0); 1466 gfar_write(&regs->gaddr3, 0);
743 gfar_write(&priv->regs->gaddr4, 0); 1467 gfar_write(&regs->gaddr4, 0);
744 gfar_write(&priv->regs->gaddr5, 0); 1468 gfar_write(&regs->gaddr5, 0);
745 gfar_write(&priv->regs->gaddr6, 0); 1469 gfar_write(&regs->gaddr6, 0);
746 gfar_write(&priv->regs->gaddr7, 0); 1470 gfar_write(&regs->gaddr7, 0);
747 1471
748 /* Zero out the rmon mib registers if it has them */ 1472 /* Zero out the rmon mib registers if it has them */
749 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 1473 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
750 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib)); 1474 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
751 1475
752 /* Mask off the CAM interrupts */ 1476 /* Mask off the CAM interrupts */
753 gfar_write(&priv->regs->rmon.cam1, 0xffffffff); 1477 gfar_write(&regs->rmon.cam1, 0xffffffff);
754 gfar_write(&priv->regs->rmon.cam2, 0xffffffff); 1478 gfar_write(&regs->rmon.cam2, 0xffffffff);
755 } 1479 }
756 1480
757 /* Initialize the max receive buffer length */ 1481 /* Initialize the max receive buffer length */
758 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 1482 gfar_write(&regs->mrblr, priv->rx_buffer_size);
759 1483
760 /* Initialize the Minimum Frame Length Register */ 1484 /* Initialize the Minimum Frame Length Register */
761 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); 1485 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
762} 1486}
763 1487
764 1488
@@ -766,25 +1490,30 @@ static void init_registers(struct net_device *dev)
766static void gfar_halt_nodisable(struct net_device *dev) 1490static void gfar_halt_nodisable(struct net_device *dev)
767{ 1491{
768 struct gfar_private *priv = netdev_priv(dev); 1492 struct gfar_private *priv = netdev_priv(dev);
769 struct gfar __iomem *regs = priv->regs; 1493 struct gfar __iomem *regs = NULL;
770 u32 tempval; 1494 u32 tempval;
1495 int i = 0;
771 1496
772 /* Mask all interrupts */ 1497 for (i = 0; i < priv->num_grps; i++) {
773 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 1498 regs = priv->gfargrp[i].regs;
1499 /* Mask all interrupts */
1500 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
774 1501
775 /* Clear all interrupts */ 1502 /* Clear all interrupts */
776 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); 1503 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1504 }
777 1505
1506 regs = priv->gfargrp[0].regs;
778 /* Stop the DMA, and wait for it to stop */ 1507 /* Stop the DMA, and wait for it to stop */
779 tempval = gfar_read(&priv->regs->dmactrl); 1508 tempval = gfar_read(&regs->dmactrl);
780 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 1509 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
781 != (DMACTRL_GRS | DMACTRL_GTS)) { 1510 != (DMACTRL_GRS | DMACTRL_GTS)) {
782 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1511 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
783 gfar_write(&priv->regs->dmactrl, tempval); 1512 gfar_write(&regs->dmactrl, tempval);
784 1513
785 while (!(gfar_read(&priv->regs->ievent) & 1514 spin_event_timeout(((gfar_read(&regs->ievent) &
786 (IEVENT_GRSC | IEVENT_GTSC))) 1515 (IEVENT_GRSC | IEVENT_GTSC)) ==
787 cpu_relax(); 1516 (IEVENT_GRSC | IEVENT_GTSC)), -1, 0);
788 } 1517 }
789} 1518}
790 1519
@@ -792,7 +1521,7 @@ static void gfar_halt_nodisable(struct net_device *dev)
792void gfar_halt(struct net_device *dev) 1521void gfar_halt(struct net_device *dev)
793{ 1522{
794 struct gfar_private *priv = netdev_priv(dev); 1523 struct gfar_private *priv = netdev_priv(dev);
795 struct gfar __iomem *regs = priv->regs; 1524 struct gfar __iomem *regs = priv->gfargrp[0].regs;
796 u32 tempval; 1525 u32 tempval;
797 1526
798 gfar_halt_nodisable(dev); 1527 gfar_halt_nodisable(dev);
@@ -803,101 +1532,132 @@ void gfar_halt(struct net_device *dev)
803 gfar_write(&regs->maccfg1, tempval); 1532 gfar_write(&regs->maccfg1, tempval);
804} 1533}
805 1534
1535static void free_grp_irqs(struct gfar_priv_grp *grp)
1536{
1537 free_irq(grp->interruptError, grp);
1538 free_irq(grp->interruptTransmit, grp);
1539 free_irq(grp->interruptReceive, grp);
1540}
1541
806void stop_gfar(struct net_device *dev) 1542void stop_gfar(struct net_device *dev)
807{ 1543{
808 struct gfar_private *priv = netdev_priv(dev); 1544 struct gfar_private *priv = netdev_priv(dev);
809 struct gfar __iomem *regs = priv->regs;
810 unsigned long flags; 1545 unsigned long flags;
1546 int i;
811 1547
812 phy_stop(priv->phydev); 1548 phy_stop(priv->phydev);
813 1549
1550
814 /* Lock it down */ 1551 /* Lock it down */
815 spin_lock_irqsave(&priv->txlock, flags); 1552 local_irq_save(flags);
816 spin_lock(&priv->rxlock); 1553 lock_tx_qs(priv);
1554 lock_rx_qs(priv);
817 1555
818 gfar_halt(dev); 1556 gfar_halt(dev);
819 1557
820 spin_unlock(&priv->rxlock); 1558 unlock_rx_qs(priv);
821 spin_unlock_irqrestore(&priv->txlock, flags); 1559 unlock_tx_qs(priv);
1560 local_irq_restore(flags);
822 1561
823 /* Free the IRQs */ 1562 /* Free the IRQs */
824 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1563 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
825 free_irq(priv->interruptError, dev); 1564 for (i = 0; i < priv->num_grps; i++)
826 free_irq(priv->interruptTransmit, dev); 1565 free_grp_irqs(&priv->gfargrp[i]);
827 free_irq(priv->interruptReceive, dev);
828 } else { 1566 } else {
829 free_irq(priv->interruptTransmit, dev); 1567 for (i = 0; i < priv->num_grps; i++)
1568 free_irq(priv->gfargrp[i].interruptTransmit,
1569 &priv->gfargrp[i]);
830 } 1570 }
831 1571
832 free_skb_resources(priv); 1572 free_skb_resources(priv);
833
834 dma_free_coherent(&priv->ofdev->dev,
835 sizeof(struct txbd8)*priv->tx_ring_size
836 + sizeof(struct rxbd8)*priv->rx_ring_size,
837 priv->tx_bd_base,
838 gfar_read(&regs->tbase0));
839} 1573}
840 1574
841/* If there are any tx skbs or rx skbs still around, free them. 1575static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
842 * Then free tx_skbuff and rx_skbuff */
843static void free_skb_resources(struct gfar_private *priv)
844{ 1576{
845 struct rxbd8 *rxbdp;
846 struct txbd8 *txbdp; 1577 struct txbd8 *txbdp;
1578 struct gfar_private *priv = netdev_priv(tx_queue->dev);
847 int i, j; 1579 int i, j;
848 1580
849 /* Go through all the buffer descriptors and free their data buffers */ 1581 txbdp = tx_queue->tx_bd_base;
850 txbdp = priv->tx_bd_base;
851 1582
852 for (i = 0; i < priv->tx_ring_size; i++) { 1583 for (i = 0; i < tx_queue->tx_ring_size; i++) {
853 if (!priv->tx_skbuff[i]) 1584 if (!tx_queue->tx_skbuff[i])
854 continue; 1585 continue;
855 1586
856 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, 1587 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
857 txbdp->length, DMA_TO_DEVICE); 1588 txbdp->length, DMA_TO_DEVICE);
858 txbdp->lstatus = 0; 1589 txbdp->lstatus = 0;
859 for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) { 1590 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1591 j++) {
860 txbdp++; 1592 txbdp++;
861 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, 1593 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
862 txbdp->length, DMA_TO_DEVICE); 1594 txbdp->length, DMA_TO_DEVICE);
863 } 1595 }
864 txbdp++; 1596 txbdp++;
865 dev_kfree_skb_any(priv->tx_skbuff[i]); 1597 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
866 priv->tx_skbuff[i] = NULL; 1598 tx_queue->tx_skbuff[i] = NULL;
867 } 1599 }
1600 kfree(tx_queue->tx_skbuff);
1601}
868 1602
869 kfree(priv->tx_skbuff); 1603static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
870 1604{
871 rxbdp = priv->rx_bd_base; 1605 struct rxbd8 *rxbdp;
1606 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1607 int i;
872 1608
873 /* rx_skbuff is not guaranteed to be allocated, so only 1609 rxbdp = rx_queue->rx_bd_base;
874 * free it and its contents if it is allocated */
875 if(priv->rx_skbuff != NULL) {
876 for (i = 0; i < priv->rx_ring_size; i++) {
877 if (priv->rx_skbuff[i]) {
878 dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr,
879 priv->rx_buffer_size,
880 DMA_FROM_DEVICE);
881 1610
882 dev_kfree_skb_any(priv->rx_skbuff[i]); 1611 for (i = 0; i < rx_queue->rx_ring_size; i++) {
883 priv->rx_skbuff[i] = NULL; 1612 if (rx_queue->rx_skbuff[i]) {
884 } 1613 dma_unmap_single(&priv->ofdev->dev,
1614 rxbdp->bufPtr, priv->rx_buffer_size,
1615 DMA_FROM_DEVICE);
1616 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1617 rx_queue->rx_skbuff[i] = NULL;
1618 }
1619 rxbdp->lstatus = 0;
1620 rxbdp->bufPtr = 0;
1621 rxbdp++;
1622 }
1623 kfree(rx_queue->rx_skbuff);
1624}
885 1625
886 rxbdp->lstatus = 0; 1626/* If there are any tx skbs or rx skbs still around, free them.
887 rxbdp->bufPtr = 0; 1627 * Then free tx_skbuff and rx_skbuff */
1628static void free_skb_resources(struct gfar_private *priv)
1629{
1630 struct gfar_priv_tx_q *tx_queue = NULL;
1631 struct gfar_priv_rx_q *rx_queue = NULL;
1632 int i;
888 1633
889 rxbdp++; 1634 /* Go through all the buffer descriptors and free their data buffers */
890 } 1635 for (i = 0; i < priv->num_tx_queues; i++) {
1636 tx_queue = priv->tx_queue[i];
1637 if(tx_queue->tx_skbuff)
1638 free_skb_tx_queue(tx_queue);
1639 }
891 1640
892 kfree(priv->rx_skbuff); 1641 for (i = 0; i < priv->num_rx_queues; i++) {
1642 rx_queue = priv->rx_queue[i];
1643 if(rx_queue->rx_skbuff)
1644 free_skb_rx_queue(rx_queue);
893 } 1645 }
1646
1647 dma_free_coherent(&priv->ofdev->dev,
1648 sizeof(struct txbd8) * priv->total_tx_ring_size +
1649 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1650 priv->tx_queue[0]->tx_bd_base,
1651 priv->tx_queue[0]->tx_bd_dma_base);
1652 skb_queue_purge(&priv->rx_recycle);
894} 1653}
895 1654
896void gfar_start(struct net_device *dev) 1655void gfar_start(struct net_device *dev)
897{ 1656{
898 struct gfar_private *priv = netdev_priv(dev); 1657 struct gfar_private *priv = netdev_priv(dev);
899 struct gfar __iomem *regs = priv->regs; 1658 struct gfar __iomem *regs = priv->gfargrp[0].regs;
900 u32 tempval; 1659 u32 tempval;
1660 int i = 0;
901 1661
902 /* Enable Rx and Tx in MACCFG1 */ 1662 /* Enable Rx and Tx in MACCFG1 */
903 tempval = gfar_read(&regs->maccfg1); 1663 tempval = gfar_read(&regs->maccfg1);
@@ -905,269 +1665,159 @@ void gfar_start(struct net_device *dev)
905 gfar_write(&regs->maccfg1, tempval); 1665 gfar_write(&regs->maccfg1, tempval);
906 1666
907 /* Initialize DMACTRL to have WWR and WOP */ 1667 /* Initialize DMACTRL to have WWR and WOP */
908 tempval = gfar_read(&priv->regs->dmactrl); 1668 tempval = gfar_read(&regs->dmactrl);
909 tempval |= DMACTRL_INIT_SETTINGS; 1669 tempval |= DMACTRL_INIT_SETTINGS;
910 gfar_write(&priv->regs->dmactrl, tempval); 1670 gfar_write(&regs->dmactrl, tempval);
911 1671
912 /* Make sure we aren't stopped */ 1672 /* Make sure we aren't stopped */
913 tempval = gfar_read(&priv->regs->dmactrl); 1673 tempval = gfar_read(&regs->dmactrl);
914 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 1674 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
915 gfar_write(&priv->regs->dmactrl, tempval); 1675 gfar_write(&regs->dmactrl, tempval);
916 1676
917 /* Clear THLT/RHLT, so that the DMA starts polling now */ 1677 for (i = 0; i < priv->num_grps; i++) {
918 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT); 1678 regs = priv->gfargrp[i].regs;
919 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT); 1679 /* Clear THLT/RHLT, so that the DMA starts polling now */
920 1680 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
921 /* Unmask the interrupts we look for */ 1681 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
922 gfar_write(&regs->imask, IMASK_DEFAULT); 1682 /* Unmask the interrupts we look for */
1683 gfar_write(&regs->imask, IMASK_DEFAULT);
1684 }
923 1685
924 dev->trans_start = jiffies; 1686 dev->trans_start = jiffies;
925} 1687}
926 1688
927/* Bring the controller up and running */ 1689void gfar_configure_coalescing(struct gfar_private *priv,
928int startup_gfar(struct net_device *dev) 1690 unsigned long tx_mask, unsigned long rx_mask)
929{ 1691{
930 struct txbd8 *txbdp; 1692 struct gfar __iomem *regs = priv->gfargrp[0].regs;
931 struct rxbd8 *rxbdp; 1693 u32 __iomem *baddr;
932 dma_addr_t addr = 0; 1694 int i = 0;
933 unsigned long vaddr;
934 int i;
935 struct gfar_private *priv = netdev_priv(dev);
936 struct gfar __iomem *regs = priv->regs;
937 int err = 0;
938 u32 rctrl = 0;
939 u32 tctrl = 0;
940 u32 attrs = 0;
941 1695
942 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 1696 /* Backward compatible case ---- even if we enable
943 1697 * multiple queues, there's only single reg to program
944 /* Allocate memory for the buffer descriptors */ 1698 */
945 vaddr = (unsigned long) dma_alloc_coherent(&priv->ofdev->dev, 1699 gfar_write(&regs->txic, 0);
946 sizeof (struct txbd8) * priv->tx_ring_size + 1700 if(likely(priv->tx_queue[0]->txcoalescing))
947 sizeof (struct rxbd8) * priv->rx_ring_size, 1701 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
948 &addr, GFP_KERNEL);
949
950 if (vaddr == 0) {
951 if (netif_msg_ifup(priv))
952 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
953 dev->name);
954 return -ENOMEM;
955 }
956
957 priv->tx_bd_base = (struct txbd8 *) vaddr;
958
959 /* enet DMA only understands physical addresses */
960 gfar_write(&regs->tbase0, addr);
961
962 /* Start the rx descriptor ring where the tx ring leaves off */
963 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
964 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
965 priv->rx_bd_base = (struct rxbd8 *) vaddr;
966 gfar_write(&regs->rbase0, addr);
967
968 /* Setup the skbuff rings */
969 priv->tx_skbuff =
970 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
971 priv->tx_ring_size, GFP_KERNEL);
972
973 if (NULL == priv->tx_skbuff) {
974 if (netif_msg_ifup(priv))
975 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
976 dev->name);
977 err = -ENOMEM;
978 goto tx_skb_fail;
979 }
980
981 for (i = 0; i < priv->tx_ring_size; i++)
982 priv->tx_skbuff[i] = NULL;
983
984 priv->rx_skbuff =
985 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
986 priv->rx_ring_size, GFP_KERNEL);
987
988 if (NULL == priv->rx_skbuff) {
989 if (netif_msg_ifup(priv))
990 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
991 dev->name);
992 err = -ENOMEM;
993 goto rx_skb_fail;
994 }
995
996 for (i = 0; i < priv->rx_ring_size; i++)
997 priv->rx_skbuff[i] = NULL;
998
999 /* Initialize some variables in our dev structure */
1000 priv->num_txbdfree = priv->tx_ring_size;
1001 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
1002 priv->cur_rx = priv->rx_bd_base;
1003 priv->skb_curtx = priv->skb_dirtytx = 0;
1004 priv->skb_currx = 0;
1005
1006 /* Initialize Transmit Descriptor Ring */
1007 txbdp = priv->tx_bd_base;
1008 for (i = 0; i < priv->tx_ring_size; i++) {
1009 txbdp->lstatus = 0;
1010 txbdp->bufPtr = 0;
1011 txbdp++;
1012 }
1013
1014 /* Set the last descriptor in the ring to indicate wrap */
1015 txbdp--;
1016 txbdp->status |= TXBD_WRAP;
1017
1018 rxbdp = priv->rx_bd_base;
1019 for (i = 0; i < priv->rx_ring_size; i++) {
1020 struct sk_buff *skb;
1021
1022 skb = gfar_new_skb(dev);
1023
1024 if (!skb) {
1025 printk(KERN_ERR "%s: Can't allocate RX buffers\n",
1026 dev->name);
1027 1702
1028 goto err_rxalloc_fail; 1703 gfar_write(&regs->rxic, 0);
1704 if(unlikely(priv->rx_queue[0]->rxcoalescing))
1705 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1706
1707 if (priv->mode == MQ_MG_MODE) {
1708 baddr = &regs->txic0;
1709 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1710 if (likely(priv->tx_queue[i]->txcoalescing)) {
1711 gfar_write(baddr + i, 0);
1712 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1713 }
1029 } 1714 }
1030 1715
1031 priv->rx_skbuff[i] = skb; 1716 baddr = &regs->rxic0;
1032 1717 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
1033 gfar_new_rxbdp(dev, rxbdp, skb); 1718 if (likely(priv->rx_queue[i]->rxcoalescing)) {
1034 1719 gfar_write(baddr + i, 0);
1035 rxbdp++; 1720 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1721 }
1722 }
1036 } 1723 }
1724}
1037 1725
1038 /* Set the last descriptor in the ring to wrap */ 1726static int register_grp_irqs(struct gfar_priv_grp *grp)
1039 rxbdp--; 1727{
1040 rxbdp->status |= RXBD_WRAP; 1728 struct gfar_private *priv = grp->priv;
1729 struct net_device *dev = priv->ndev;
1730 int err;
1041 1731
1042 /* If the device has multiple interrupts, register for 1732 /* If the device has multiple interrupts, register for
1043 * them. Otherwise, only register for the one */ 1733 * them. Otherwise, only register for the one */
1044 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1734 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1045 /* Install our interrupt handlers for Error, 1735 /* Install our interrupt handlers for Error,
1046 * Transmit, and Receive */ 1736 * Transmit, and Receive */
1047 if (request_irq(priv->interruptError, gfar_error, 1737 if ((err = request_irq(grp->interruptError, gfar_error, 0,
1048 0, priv->int_name_er, dev) < 0) { 1738 grp->int_name_er,grp)) < 0) {
1049 if (netif_msg_intr(priv)) 1739 if (netif_msg_intr(priv))
1050 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1740 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1051 dev->name, priv->interruptError); 1741 dev->name, grp->interruptError);
1052 1742
1053 err = -1; 1743 goto err_irq_fail;
1054 goto err_irq_fail;
1055 } 1744 }
1056 1745
1057 if (request_irq(priv->interruptTransmit, gfar_transmit, 1746 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1058 0, priv->int_name_tx, dev) < 0) { 1747 0, grp->int_name_tx, grp)) < 0) {
1059 if (netif_msg_intr(priv)) 1748 if (netif_msg_intr(priv))
1060 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1749 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1061 dev->name, priv->interruptTransmit); 1750 dev->name, grp->interruptTransmit);
1062
1063 err = -1;
1064
1065 goto tx_irq_fail; 1751 goto tx_irq_fail;
1066 } 1752 }
1067 1753
1068 if (request_irq(priv->interruptReceive, gfar_receive, 1754 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1069 0, priv->int_name_rx, dev) < 0) { 1755 grp->int_name_rx, grp)) < 0) {
1070 if (netif_msg_intr(priv)) 1756 if (netif_msg_intr(priv))
1071 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", 1757 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1072 dev->name, priv->interruptReceive); 1758 dev->name, grp->interruptReceive);
1073
1074 err = -1;
1075 goto rx_irq_fail; 1759 goto rx_irq_fail;
1076 } 1760 }
1077 } else { 1761 } else {
1078 if (request_irq(priv->interruptTransmit, gfar_interrupt, 1762 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1079 0, priv->int_name_tx, dev) < 0) { 1763 grp->int_name_tx, grp)) < 0) {
1080 if (netif_msg_intr(priv)) 1764 if (netif_msg_intr(priv))
1081 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1765 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1082 dev->name, priv->interruptTransmit); 1766 dev->name, grp->interruptTransmit);
1083
1084 err = -1;
1085 goto err_irq_fail; 1767 goto err_irq_fail;
1086 } 1768 }
1087 } 1769 }
1088 1770
1089 phy_start(priv->phydev); 1771 return 0;
1090
1091 /* Configure the coalescing support */
1092 gfar_write(&regs->txic, 0);
1093 if (priv->txcoalescing)
1094 gfar_write(&regs->txic, priv->txic);
1095
1096 gfar_write(&regs->rxic, 0);
1097 if (priv->rxcoalescing)
1098 gfar_write(&regs->rxic, priv->rxic);
1099
1100 if (priv->rx_csum_enable)
1101 rctrl |= RCTRL_CHECKSUMMING;
1102 1772
1103 if (priv->extended_hash) { 1773rx_irq_fail:
1104 rctrl |= RCTRL_EXTHASH; 1774 free_irq(grp->interruptTransmit, grp);
1775tx_irq_fail:
1776 free_irq(grp->interruptError, grp);
1777err_irq_fail:
1778 return err;
1105 1779
1106 gfar_clear_exact_match(dev); 1780}
1107 rctrl |= RCTRL_EMEN;
1108 }
1109 1781
1110 if (priv->padding) { 1782/* Bring the controller up and running */
1111 rctrl &= ~RCTRL_PAL_MASK; 1783int startup_gfar(struct net_device *ndev)
1112 rctrl |= RCTRL_PADDING(priv->padding); 1784{
1113 } 1785 struct gfar_private *priv = netdev_priv(ndev);
1786 struct gfar __iomem *regs = NULL;
1787 int err, i, j;
1114 1788
1115 /* keep vlan related bits if it's enabled */ 1789 for (i = 0; i < priv->num_grps; i++) {
1116 if (priv->vlgrp) { 1790 regs= priv->gfargrp[i].regs;
1117 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; 1791 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1118 tctrl |= TCTRL_VLINS;
1119 } 1792 }
1120 1793
1121 /* Init rctrl based on our settings */ 1794 regs= priv->gfargrp[0].regs;
1122 gfar_write(&priv->regs->rctrl, rctrl); 1795 err = gfar_alloc_skb_resources(ndev);
1123 1796 if (err)
1124 if (dev->features & NETIF_F_IP_CSUM) 1797 return err;
1125 tctrl |= TCTRL_INIT_CSUM;
1126
1127 gfar_write(&priv->regs->tctrl, tctrl);
1128
1129 /* Set the extraction length and index */
1130 attrs = ATTRELI_EL(priv->rx_stash_size) |
1131 ATTRELI_EI(priv->rx_stash_index);
1132
1133 gfar_write(&priv->regs->attreli, attrs);
1134
1135 /* Start with defaults, and add stashing or locking
1136 * depending on the approprate variables */
1137 attrs = ATTR_INIT_SETTINGS;
1138 1798
1139 if (priv->bd_stash_en) 1799 gfar_init_mac(ndev);
1140 attrs |= ATTR_BDSTASH;
1141 1800
1142 if (priv->rx_stash_size != 0) 1801 for (i = 0; i < priv->num_grps; i++) {
1143 attrs |= ATTR_BUFSTASH; 1802 err = register_grp_irqs(&priv->gfargrp[i]);
1803 if (err) {
1804 for (j = 0; j < i; j++)
1805 free_grp_irqs(&priv->gfargrp[j]);
1806 goto irq_fail;
1807 }
1808 }
1144 1809
1145 gfar_write(&priv->regs->attr, attrs); 1810 /* Start the controller */
1811 gfar_start(ndev);
1146 1812
1147 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold); 1813 phy_start(priv->phydev);
1148 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
1149 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
1150 1814
1151 /* Start the controller */ 1815 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1152 gfar_start(dev);
1153 1816
1154 return 0; 1817 return 0;
1155 1818
1156rx_irq_fail: 1819irq_fail:
1157 free_irq(priv->interruptTransmit, dev);
1158tx_irq_fail:
1159 free_irq(priv->interruptError, dev);
1160err_irq_fail:
1161err_rxalloc_fail:
1162rx_skb_fail:
1163 free_skb_resources(priv); 1820 free_skb_resources(priv);
1164tx_skb_fail:
1165 dma_free_coherent(&priv->ofdev->dev,
1166 sizeof(struct txbd8)*priv->tx_ring_size
1167 + sizeof(struct rxbd8)*priv->rx_ring_size,
1168 priv->tx_bd_base,
1169 gfar_read(&regs->tbase0));
1170
1171 return err; 1821 return err;
1172} 1822}
1173 1823
@@ -1178,7 +1828,7 @@ static int gfar_enet_open(struct net_device *dev)
1178 struct gfar_private *priv = netdev_priv(dev); 1828 struct gfar_private *priv = netdev_priv(dev);
1179 int err; 1829 int err;
1180 1830
1181 napi_enable(&priv->napi); 1831 enable_napi(priv);
1182 1832
1183 skb_queue_head_init(&priv->rx_recycle); 1833 skb_queue_head_init(&priv->rx_recycle);
1184 1834
@@ -1189,18 +1839,18 @@ static int gfar_enet_open(struct net_device *dev)
1189 1839
1190 err = init_phy(dev); 1840 err = init_phy(dev);
1191 1841
1192 if(err) { 1842 if (err) {
1193 napi_disable(&priv->napi); 1843 disable_napi(priv);
1194 return err; 1844 return err;
1195 } 1845 }
1196 1846
1197 err = startup_gfar(dev); 1847 err = startup_gfar(dev);
1198 if (err) { 1848 if (err) {
1199 napi_disable(&priv->napi); 1849 disable_napi(priv);
1200 return err; 1850 return err;
1201 } 1851 }
1202 1852
1203 netif_start_queue(dev); 1853 netif_tx_start_all_queues(dev);
1204 1854
1205 device_set_wakeup_enable(&dev->dev, priv->wol_en); 1855 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1206 1856
@@ -1269,15 +1919,23 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1269static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 1919static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1270{ 1920{
1271 struct gfar_private *priv = netdev_priv(dev); 1921 struct gfar_private *priv = netdev_priv(dev);
1922 struct gfar_priv_tx_q *tx_queue = NULL;
1923 struct netdev_queue *txq;
1924 struct gfar __iomem *regs = NULL;
1272 struct txfcb *fcb = NULL; 1925 struct txfcb *fcb = NULL;
1273 struct txbd8 *txbdp, *txbdp_start, *base; 1926 struct txbd8 *txbdp, *txbdp_start, *base;
1274 u32 lstatus; 1927 u32 lstatus;
1275 int i; 1928 int i, rq = 0;
1276 u32 bufaddr; 1929 u32 bufaddr;
1277 unsigned long flags; 1930 unsigned long flags;
1278 unsigned int nr_frags, length; 1931 unsigned int nr_frags, length;
1279 1932
1280 base = priv->tx_bd_base; 1933
1934 rq = skb->queue_mapping;
1935 tx_queue = priv->tx_queue[rq];
1936 txq = netdev_get_tx_queue(dev, rq);
1937 base = tx_queue->tx_bd_base;
1938 regs = tx_queue->grp->regs;
1281 1939
1282 /* make space for additional header when fcb is needed */ 1940 /* make space for additional header when fcb is needed */
1283 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 1941 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -1298,21 +1956,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1298 /* total number of fragments in the SKB */ 1956 /* total number of fragments in the SKB */
1299 nr_frags = skb_shinfo(skb)->nr_frags; 1957 nr_frags = skb_shinfo(skb)->nr_frags;
1300 1958
1301 spin_lock_irqsave(&priv->txlock, flags);
1302
1303 /* check if there is space to queue this packet */ 1959 /* check if there is space to queue this packet */
1304 if ((nr_frags+1) > priv->num_txbdfree) { 1960 if ((nr_frags+1) > tx_queue->num_txbdfree) {
1305 /* no space, stop the queue */ 1961 /* no space, stop the queue */
1306 netif_stop_queue(dev); 1962 netif_tx_stop_queue(txq);
1307 dev->stats.tx_fifo_errors++; 1963 dev->stats.tx_fifo_errors++;
1308 spin_unlock_irqrestore(&priv->txlock, flags);
1309 return NETDEV_TX_BUSY; 1964 return NETDEV_TX_BUSY;
1310 } 1965 }
1311 1966
1312 /* Update transmit stats */ 1967 /* Update transmit stats */
1313 dev->stats.tx_bytes += skb->len; 1968 txq->tx_bytes += skb->len;
1969 txq->tx_packets ++;
1314 1970
1315 txbdp = txbdp_start = priv->cur_tx; 1971 txbdp = txbdp_start = tx_queue->cur_tx;
1316 1972
1317 if (nr_frags == 0) { 1973 if (nr_frags == 0) {
1318 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 1974 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
@@ -1320,7 +1976,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1320 /* Place the fragment addresses and lengths into the TxBDs */ 1976 /* Place the fragment addresses and lengths into the TxBDs */
1321 for (i = 0; i < nr_frags; i++) { 1977 for (i = 0; i < nr_frags; i++) {
1322 /* Point at the next BD, wrapping as needed */ 1978 /* Point at the next BD, wrapping as needed */
1323 txbdp = next_txbd(txbdp, base, priv->tx_ring_size); 1979 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1324 1980
1325 length = skb_shinfo(skb)->frags[i].size; 1981 length = skb_shinfo(skb)->frags[i].size;
1326 1982
@@ -1362,13 +2018,26 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1362 } 2018 }
1363 2019
1364 /* setup the TxBD length and buffer pointer for the first BD */ 2020 /* setup the TxBD length and buffer pointer for the first BD */
1365 priv->tx_skbuff[priv->skb_curtx] = skb;
1366 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 2021 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
1367 skb_headlen(skb), DMA_TO_DEVICE); 2022 skb_headlen(skb), DMA_TO_DEVICE);
1368 2023
1369 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 2024 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1370 2025
1371 /* 2026 /*
2027 * We can work in parallel with gfar_clean_tx_ring(), except
2028 * when modifying num_txbdfree. Note that we didn't grab the lock
2029 * when we were reading the num_txbdfree and checking for available
2030 * space, that's because outside of this function it can only grow,
2031 * and once we've got needed space, it cannot suddenly disappear.
2032 *
2033 * The lock also protects us from gfar_error(), which can modify
2034 * regs->tstat and thus retrigger the transfers, which is why we
2035 * also must grab the lock before setting ready bit for the first
2036 * to be transmitted BD.
2037 */
2038 spin_lock_irqsave(&tx_queue->txlock, flags);
2039
2040 /*
1372 * The powerpc-specific eieio() is used, as wmb() has too strong 2041 * The powerpc-specific eieio() is used, as wmb() has too strong
1373 * semantics (it requires synchronization between cacheable and 2042 * semantics (it requires synchronization between cacheable and
1374 * uncacheable mappings, which eieio doesn't provide and which we 2043 * uncacheable mappings, which eieio doesn't provide and which we
@@ -1380,31 +2049,35 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1380 2049
1381 txbdp_start->lstatus = lstatus; 2050 txbdp_start->lstatus = lstatus;
1382 2051
2052 eieio(); /* force lstatus write before tx_skbuff */
2053
2054 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2055
1383 /* Update the current skb pointer to the next entry we will use 2056 /* Update the current skb pointer to the next entry we will use
1384 * (wrapping if necessary) */ 2057 * (wrapping if necessary) */
1385 priv->skb_curtx = (priv->skb_curtx + 1) & 2058 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1386 TX_RING_MOD_MASK(priv->tx_ring_size); 2059 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1387 2060
1388 priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size); 2061 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1389 2062
1390 /* reduce TxBD free count */ 2063 /* reduce TxBD free count */
1391 priv->num_txbdfree -= (nr_frags + 1); 2064 tx_queue->num_txbdfree -= (nr_frags + 1);
1392 2065
1393 dev->trans_start = jiffies; 2066 dev->trans_start = jiffies;
1394 2067
1395 /* If the next BD still needs to be cleaned up, then the bds 2068 /* If the next BD still needs to be cleaned up, then the bds
1396 are full. We need to tell the kernel to stop sending us stuff. */ 2069 are full. We need to tell the kernel to stop sending us stuff. */
1397 if (!priv->num_txbdfree) { 2070 if (!tx_queue->num_txbdfree) {
1398 netif_stop_queue(dev); 2071 netif_tx_stop_queue(txq);
1399 2072
1400 dev->stats.tx_fifo_errors++; 2073 dev->stats.tx_fifo_errors++;
1401 } 2074 }
1402 2075
1403 /* Tell the DMA to go go go */ 2076 /* Tell the DMA to go go go */
1404 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 2077 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
1405 2078
1406 /* Unlock priv */ 2079 /* Unlock priv */
1407 spin_unlock_irqrestore(&priv->txlock, flags); 2080 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1408 2081
1409 return NETDEV_TX_OK; 2082 return NETDEV_TX_OK;
1410} 2083}
@@ -1414,9 +2087,8 @@ static int gfar_close(struct net_device *dev)
1414{ 2087{
1415 struct gfar_private *priv = netdev_priv(dev); 2088 struct gfar_private *priv = netdev_priv(dev);
1416 2089
1417 napi_disable(&priv->napi); 2090 disable_napi(priv);
1418 2091
1419 skb_queue_purge(&priv->rx_recycle);
1420 cancel_work_sync(&priv->reset_task); 2092 cancel_work_sync(&priv->reset_task);
1421 stop_gfar(dev); 2093 stop_gfar(dev);
1422 2094
@@ -1424,7 +2096,7 @@ static int gfar_close(struct net_device *dev)
1424 phy_disconnect(priv->phydev); 2096 phy_disconnect(priv->phydev);
1425 priv->phydev = NULL; 2097 priv->phydev = NULL;
1426 2098
1427 netif_stop_queue(dev); 2099 netif_tx_stop_all_queues(dev);
1428 2100
1429 return 0; 2101 return 0;
1430} 2102}
@@ -1443,50 +2115,55 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1443 struct vlan_group *grp) 2115 struct vlan_group *grp)
1444{ 2116{
1445 struct gfar_private *priv = netdev_priv(dev); 2117 struct gfar_private *priv = netdev_priv(dev);
2118 struct gfar __iomem *regs = NULL;
1446 unsigned long flags; 2119 unsigned long flags;
1447 u32 tempval; 2120 u32 tempval;
1448 2121
1449 spin_lock_irqsave(&priv->rxlock, flags); 2122 regs = priv->gfargrp[0].regs;
2123 local_irq_save(flags);
2124 lock_rx_qs(priv);
1450 2125
1451 priv->vlgrp = grp; 2126 priv->vlgrp = grp;
1452 2127
1453 if (grp) { 2128 if (grp) {
1454 /* Enable VLAN tag insertion */ 2129 /* Enable VLAN tag insertion */
1455 tempval = gfar_read(&priv->regs->tctrl); 2130 tempval = gfar_read(&regs->tctrl);
1456 tempval |= TCTRL_VLINS; 2131 tempval |= TCTRL_VLINS;
1457 2132
1458 gfar_write(&priv->regs->tctrl, tempval); 2133 gfar_write(&regs->tctrl, tempval);
1459 2134
1460 /* Enable VLAN tag extraction */ 2135 /* Enable VLAN tag extraction */
1461 tempval = gfar_read(&priv->regs->rctrl); 2136 tempval = gfar_read(&regs->rctrl);
1462 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); 2137 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
1463 gfar_write(&priv->regs->rctrl, tempval); 2138 gfar_write(&regs->rctrl, tempval);
1464 } else { 2139 } else {
1465 /* Disable VLAN tag insertion */ 2140 /* Disable VLAN tag insertion */
1466 tempval = gfar_read(&priv->regs->tctrl); 2141 tempval = gfar_read(&regs->tctrl);
1467 tempval &= ~TCTRL_VLINS; 2142 tempval &= ~TCTRL_VLINS;
1468 gfar_write(&priv->regs->tctrl, tempval); 2143 gfar_write(&regs->tctrl, tempval);
1469 2144
1470 /* Disable VLAN tag extraction */ 2145 /* Disable VLAN tag extraction */
1471 tempval = gfar_read(&priv->regs->rctrl); 2146 tempval = gfar_read(&regs->rctrl);
1472 tempval &= ~RCTRL_VLEX; 2147 tempval &= ~RCTRL_VLEX;
1473 /* If parse is no longer required, then disable parser */ 2148 /* If parse is no longer required, then disable parser */
1474 if (tempval & RCTRL_REQ_PARSER) 2149 if (tempval & RCTRL_REQ_PARSER)
1475 tempval |= RCTRL_PRSDEP_INIT; 2150 tempval |= RCTRL_PRSDEP_INIT;
1476 else 2151 else
1477 tempval &= ~RCTRL_PRSDEP_INIT; 2152 tempval &= ~RCTRL_PRSDEP_INIT;
1478 gfar_write(&priv->regs->rctrl, tempval); 2153 gfar_write(&regs->rctrl, tempval);
1479 } 2154 }
1480 2155
1481 gfar_change_mtu(dev, dev->mtu); 2156 gfar_change_mtu(dev, dev->mtu);
1482 2157
1483 spin_unlock_irqrestore(&priv->rxlock, flags); 2158 unlock_rx_qs(priv);
2159 local_irq_restore(flags);
1484} 2160}
1485 2161
1486static int gfar_change_mtu(struct net_device *dev, int new_mtu) 2162static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1487{ 2163{
1488 int tempsize, tempval; 2164 int tempsize, tempval;
1489 struct gfar_private *priv = netdev_priv(dev); 2165 struct gfar_private *priv = netdev_priv(dev);
2166 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1490 int oldsize = priv->rx_buffer_size; 2167 int oldsize = priv->rx_buffer_size;
1491 int frame_size = new_mtu + ETH_HLEN; 2168 int frame_size = new_mtu + ETH_HLEN;
1492 2169
@@ -1518,20 +2195,20 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1518 2195
1519 dev->mtu = new_mtu; 2196 dev->mtu = new_mtu;
1520 2197
1521 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 2198 gfar_write(&regs->mrblr, priv->rx_buffer_size);
1522 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size); 2199 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
1523 2200
1524 /* If the mtu is larger than the max size for standard 2201 /* If the mtu is larger than the max size for standard
1525 * ethernet frames (ie, a jumbo frame), then set maccfg2 2202 * ethernet frames (ie, a jumbo frame), then set maccfg2
1526 * to allow huge frames, and to check the length */ 2203 * to allow huge frames, and to check the length */
1527 tempval = gfar_read(&priv->regs->maccfg2); 2204 tempval = gfar_read(&regs->maccfg2);
1528 2205
1529 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) 2206 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1530 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2207 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1531 else 2208 else
1532 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2209 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1533 2210
1534 gfar_write(&priv->regs->maccfg2, tempval); 2211 gfar_write(&regs->maccfg2, tempval);
1535 2212
1536 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2213 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1537 startup_gfar(dev); 2214 startup_gfar(dev);
@@ -1551,10 +2228,10 @@ static void gfar_reset_task(struct work_struct *work)
1551 struct net_device *dev = priv->ndev; 2228 struct net_device *dev = priv->ndev;
1552 2229
1553 if (dev->flags & IFF_UP) { 2230 if (dev->flags & IFF_UP) {
1554 netif_stop_queue(dev); 2231 netif_tx_stop_all_queues(dev);
1555 stop_gfar(dev); 2232 stop_gfar(dev);
1556 startup_gfar(dev); 2233 startup_gfar(dev);
1557 netif_start_queue(dev); 2234 netif_tx_start_all_queues(dev);
1558 } 2235 }
1559 2236
1560 netif_tx_schedule_all(dev); 2237 netif_tx_schedule_all(dev);
@@ -1569,24 +2246,29 @@ static void gfar_timeout(struct net_device *dev)
1569} 2246}
1570 2247
1571/* Interrupt Handler for Transmit complete */ 2248/* Interrupt Handler for Transmit complete */
1572static int gfar_clean_tx_ring(struct net_device *dev) 2249static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
1573{ 2250{
2251 struct net_device *dev = tx_queue->dev;
1574 struct gfar_private *priv = netdev_priv(dev); 2252 struct gfar_private *priv = netdev_priv(dev);
2253 struct gfar_priv_rx_q *rx_queue = NULL;
1575 struct txbd8 *bdp; 2254 struct txbd8 *bdp;
1576 struct txbd8 *lbdp = NULL; 2255 struct txbd8 *lbdp = NULL;
1577 struct txbd8 *base = priv->tx_bd_base; 2256 struct txbd8 *base = tx_queue->tx_bd_base;
1578 struct sk_buff *skb; 2257 struct sk_buff *skb;
1579 int skb_dirtytx; 2258 int skb_dirtytx;
1580 int tx_ring_size = priv->tx_ring_size; 2259 int tx_ring_size = tx_queue->tx_ring_size;
1581 int frags = 0; 2260 int frags = 0;
1582 int i; 2261 int i;
1583 int howmany = 0; 2262 int howmany = 0;
1584 u32 lstatus; 2263 u32 lstatus;
1585 2264
1586 bdp = priv->dirty_tx; 2265 rx_queue = priv->rx_queue[tx_queue->qindex];
1587 skb_dirtytx = priv->skb_dirtytx; 2266 bdp = tx_queue->dirty_tx;
2267 skb_dirtytx = tx_queue->skb_dirtytx;
2268
2269 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2270 unsigned long flags;
1588 2271
1589 while ((skb = priv->tx_skbuff[skb_dirtytx])) {
1590 frags = skb_shinfo(skb)->nr_frags; 2272 frags = skb_shinfo(skb)->nr_frags;
1591 lbdp = skip_txbd(bdp, frags, base, tx_ring_size); 2273 lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
1592 2274
@@ -1618,82 +2300,71 @@ static int gfar_clean_tx_ring(struct net_device *dev)
1618 * If there's room in the queue (limit it to rx_buffer_size) 2300 * If there's room in the queue (limit it to rx_buffer_size)
1619 * we add this skb back into the pool, if it's the right size 2301 * we add this skb back into the pool, if it's the right size
1620 */ 2302 */
1621 if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size && 2303 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
1622 skb_recycle_check(skb, priv->rx_buffer_size + 2304 skb_recycle_check(skb, priv->rx_buffer_size +
1623 RXBUF_ALIGNMENT)) 2305 RXBUF_ALIGNMENT))
1624 __skb_queue_head(&priv->rx_recycle, skb); 2306 __skb_queue_head(&priv->rx_recycle, skb);
1625 else 2307 else
1626 dev_kfree_skb_any(skb); 2308 dev_kfree_skb_any(skb);
1627 2309
1628 priv->tx_skbuff[skb_dirtytx] = NULL; 2310 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
1629 2311
1630 skb_dirtytx = (skb_dirtytx + 1) & 2312 skb_dirtytx = (skb_dirtytx + 1) &
1631 TX_RING_MOD_MASK(tx_ring_size); 2313 TX_RING_MOD_MASK(tx_ring_size);
1632 2314
1633 howmany++; 2315 howmany++;
1634 priv->num_txbdfree += frags + 1; 2316 spin_lock_irqsave(&tx_queue->txlock, flags);
2317 tx_queue->num_txbdfree += frags + 1;
2318 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1635 } 2319 }
1636 2320
1637 /* If we freed a buffer, we can restart transmission, if necessary */ 2321 /* If we freed a buffer, we can restart transmission, if necessary */
1638 if (netif_queue_stopped(dev) && priv->num_txbdfree) 2322 if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
1639 netif_wake_queue(dev); 2323 netif_wake_subqueue(dev, tx_queue->qindex);
1640 2324
1641 /* Update dirty indicators */ 2325 /* Update dirty indicators */
1642 priv->skb_dirtytx = skb_dirtytx; 2326 tx_queue->skb_dirtytx = skb_dirtytx;
1643 priv->dirty_tx = bdp; 2327 tx_queue->dirty_tx = bdp;
1644
1645 dev->stats.tx_packets += howmany;
1646 2328
1647 return howmany; 2329 return howmany;
1648} 2330}
1649 2331
1650static void gfar_schedule_cleanup(struct net_device *dev) 2332static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
1651{ 2333{
1652 struct gfar_private *priv = netdev_priv(dev);
1653 unsigned long flags; 2334 unsigned long flags;
1654 2335
1655 spin_lock_irqsave(&priv->txlock, flags); 2336 spin_lock_irqsave(&gfargrp->grplock, flags);
1656 spin_lock(&priv->rxlock); 2337 if (napi_schedule_prep(&gfargrp->napi)) {
1657 2338 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
1658 if (napi_schedule_prep(&priv->napi)) { 2339 __napi_schedule(&gfargrp->napi);
1659 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
1660 __napi_schedule(&priv->napi);
1661 } else { 2340 } else {
1662 /* 2341 /*
1663 * Clear IEVENT, so interrupts aren't called again 2342 * Clear IEVENT, so interrupts aren't called again
1664 * because of the packets that have already arrived. 2343 * because of the packets that have already arrived.
1665 */ 2344 */
1666 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); 2345 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
1667 } 2346 }
2347 spin_unlock_irqrestore(&gfargrp->grplock, flags);
1668 2348
1669 spin_unlock(&priv->rxlock);
1670 spin_unlock_irqrestore(&priv->txlock, flags);
1671} 2349}
1672 2350
1673/* Interrupt Handler for Transmit complete */ 2351/* Interrupt Handler for Transmit complete */
1674static irqreturn_t gfar_transmit(int irq, void *dev_id) 2352static irqreturn_t gfar_transmit(int irq, void *grp_id)
1675{ 2353{
1676 gfar_schedule_cleanup((struct net_device *)dev_id); 2354 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
1677 return IRQ_HANDLED; 2355 return IRQ_HANDLED;
1678} 2356}
1679 2357
1680static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 2358static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
1681 struct sk_buff *skb) 2359 struct sk_buff *skb)
1682{ 2360{
2361 struct net_device *dev = rx_queue->dev;
1683 struct gfar_private *priv = netdev_priv(dev); 2362 struct gfar_private *priv = netdev_priv(dev);
1684 u32 lstatus; 2363 dma_addr_t buf;
1685
1686 bdp->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
1687 priv->rx_buffer_size, DMA_FROM_DEVICE);
1688 2364
1689 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); 2365 buf = dma_map_single(&priv->ofdev->dev, skb->data,
1690 2366 priv->rx_buffer_size, DMA_FROM_DEVICE);
1691 if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1) 2367 gfar_init_rxbdp(rx_queue, bdp, buf);
1692 lstatus |= BD_LFLAG(RXBD_WRAP);
1693
1694 eieio();
1695
1696 bdp->lstatus = lstatus;
1697} 2368}
1698 2369
1699 2370
@@ -1718,6 +2389,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
1718 * as many bytes as needed to align the data properly 2389 * as many bytes as needed to align the data properly
1719 */ 2390 */
1720 skb_reserve(skb, alignamount); 2391 skb_reserve(skb, alignamount);
2392 GFAR_CB(skb)->alignamount = alignamount;
1721 2393
1722 return skb; 2394 return skb;
1723} 2395}
@@ -1760,9 +2432,9 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
1760 } 2432 }
1761} 2433}
1762 2434
1763irqreturn_t gfar_receive(int irq, void *dev_id) 2435irqreturn_t gfar_receive(int irq, void *grp_id)
1764{ 2436{
1765 gfar_schedule_cleanup((struct net_device *)dev_id); 2437 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
1766 return IRQ_HANDLED; 2438 return IRQ_HANDLED;
1767} 2439}
1768 2440
@@ -1793,8 +2465,10 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1793 2465
1794 /* Remove the FCB from the skb */ 2466 /* Remove the FCB from the skb */
1795 /* Remove the padded bytes, if there are any */ 2467 /* Remove the padded bytes, if there are any */
1796 if (amount_pull) 2468 if (amount_pull) {
2469 skb_record_rx_queue(skb, fcb->rq);
1797 skb_pull(skb, amount_pull); 2470 skb_pull(skb, amount_pull);
2471 }
1798 2472
1799 if (priv->rx_csum_enable) 2473 if (priv->rx_csum_enable)
1800 gfar_rx_checksum(skb, fcb); 2474 gfar_rx_checksum(skb, fcb);
@@ -1818,8 +2492,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1818 * until the budget/quota has been reached. Returns the number 2492 * until the budget/quota has been reached. Returns the number
1819 * of frames handled 2493 * of frames handled
1820 */ 2494 */
1821int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) 2495int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
1822{ 2496{
2497 struct net_device *dev = rx_queue->dev;
1823 struct rxbd8 *bdp, *base; 2498 struct rxbd8 *bdp, *base;
1824 struct sk_buff *skb; 2499 struct sk_buff *skb;
1825 int pkt_len; 2500 int pkt_len;
@@ -1828,8 +2503,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1828 struct gfar_private *priv = netdev_priv(dev); 2503 struct gfar_private *priv = netdev_priv(dev);
1829 2504
1830 /* Get the first full descriptor */ 2505 /* Get the first full descriptor */
1831 bdp = priv->cur_rx; 2506 bdp = rx_queue->cur_rx;
1832 base = priv->rx_bd_base; 2507 base = rx_queue->rx_bd_base;
1833 2508
1834 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + 2509 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
1835 priv->padding; 2510 priv->padding;
@@ -1841,7 +2516,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1841 /* Add another skb for the future */ 2516 /* Add another skb for the future */
1842 newskb = gfar_new_skb(dev); 2517 newskb = gfar_new_skb(dev);
1843 2518
1844 skb = priv->rx_skbuff[priv->skb_currx]; 2519 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
1845 2520
1846 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2521 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
1847 priv->rx_buffer_size, DMA_FROM_DEVICE); 2522 priv->rx_buffer_size, DMA_FROM_DEVICE);
@@ -1855,80 +2530,102 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1855 newskb = skb; 2530 newskb = skb;
1856 else if (skb) { 2531 else if (skb) {
1857 /* 2532 /*
1858 * We need to reset ->data to what it 2533 * We need to un-reserve() the skb to what it
1859 * was before gfar_new_skb() re-aligned 2534 * was before gfar_new_skb() re-aligned
1860 * it to an RXBUF_ALIGNMENT boundary 2535 * it to an RXBUF_ALIGNMENT boundary
1861 * before we put the skb back on the 2536 * before we put the skb back on the
1862 * recycle list. 2537 * recycle list.
1863 */ 2538 */
1864 skb->data = skb->head + NET_SKB_PAD; 2539 skb_reserve(skb, -GFAR_CB(skb)->alignamount);
1865 __skb_queue_head(&priv->rx_recycle, skb); 2540 __skb_queue_head(&priv->rx_recycle, skb);
1866 } 2541 }
1867 } else { 2542 } else {
1868 /* Increment the number of packets */ 2543 /* Increment the number of packets */
1869 dev->stats.rx_packets++; 2544 rx_queue->stats.rx_packets++;
1870 howmany++; 2545 howmany++;
1871 2546
1872 if (likely(skb)) { 2547 if (likely(skb)) {
1873 pkt_len = bdp->length - ETH_FCS_LEN; 2548 pkt_len = bdp->length - ETH_FCS_LEN;
1874 /* Remove the FCS from the packet length */ 2549 /* Remove the FCS from the packet length */
1875 skb_put(skb, pkt_len); 2550 skb_put(skb, pkt_len);
1876 dev->stats.rx_bytes += pkt_len; 2551 rx_queue->stats.rx_bytes += pkt_len;
1877 2552 skb_record_rx_queue(skb, rx_queue->qindex);
1878 if (in_irq() || irqs_disabled())
1879 printk("Interrupt problem!\n");
1880 gfar_process_frame(dev, skb, amount_pull); 2553 gfar_process_frame(dev, skb, amount_pull);
1881 2554
1882 } else { 2555 } else {
1883 if (netif_msg_rx_err(priv)) 2556 if (netif_msg_rx_err(priv))
1884 printk(KERN_WARNING 2557 printk(KERN_WARNING
1885 "%s: Missing skb!\n", dev->name); 2558 "%s: Missing skb!\n", dev->name);
1886 dev->stats.rx_dropped++; 2559 rx_queue->stats.rx_dropped++;
1887 priv->extra_stats.rx_skbmissing++; 2560 priv->extra_stats.rx_skbmissing++;
1888 } 2561 }
1889 2562
1890 } 2563 }
1891 2564
1892 priv->rx_skbuff[priv->skb_currx] = newskb; 2565 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
1893 2566
1894 /* Setup the new bdp */ 2567 /* Setup the new bdp */
1895 gfar_new_rxbdp(dev, bdp, newskb); 2568 gfar_new_rxbdp(rx_queue, bdp, newskb);
1896 2569
1897 /* Update to the next pointer */ 2570 /* Update to the next pointer */
1898 bdp = next_bd(bdp, base, priv->rx_ring_size); 2571 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
1899 2572
1900 /* update to point at the next skb */ 2573 /* update to point at the next skb */
1901 priv->skb_currx = 2574 rx_queue->skb_currx =
1902 (priv->skb_currx + 1) & 2575 (rx_queue->skb_currx + 1) &
1903 RX_RING_MOD_MASK(priv->rx_ring_size); 2576 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
1904 } 2577 }
1905 2578
1906 /* Update the current rxbd pointer to be the next one */ 2579 /* Update the current rxbd pointer to be the next one */
1907 priv->cur_rx = bdp; 2580 rx_queue->cur_rx = bdp;
1908 2581
1909 return howmany; 2582 return howmany;
1910} 2583}
1911 2584
1912static int gfar_poll(struct napi_struct *napi, int budget) 2585static int gfar_poll(struct napi_struct *napi, int budget)
1913{ 2586{
1914 struct gfar_private *priv = container_of(napi, struct gfar_private, napi); 2587 struct gfar_priv_grp *gfargrp = container_of(napi,
1915 struct net_device *dev = priv->ndev; 2588 struct gfar_priv_grp, napi);
1916 int tx_cleaned = 0; 2589 struct gfar_private *priv = gfargrp->priv;
1917 int rx_cleaned = 0; 2590 struct gfar __iomem *regs = gfargrp->regs;
1918 unsigned long flags; 2591 struct gfar_priv_tx_q *tx_queue = NULL;
2592 struct gfar_priv_rx_q *rx_queue = NULL;
2593 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2594 int tx_cleaned = 0, i, left_over_budget = budget;
2595 unsigned long serviced_queues = 0;
2596 int num_queues = 0;
2597
2598 num_queues = gfargrp->num_rx_queues;
2599 budget_per_queue = budget/num_queues;
1919 2600
1920 /* Clear IEVENT, so interrupts aren't called again 2601 /* Clear IEVENT, so interrupts aren't called again
1921 * because of the packets that have already arrived */ 2602 * because of the packets that have already arrived */
1922 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); 2603 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
1923 2604
1924 /* If we fail to get the lock, don't bother with the TX BDs */ 2605 while (num_queues && left_over_budget) {
1925 if (spin_trylock_irqsave(&priv->txlock, flags)) { 2606
1926 tx_cleaned = gfar_clean_tx_ring(dev); 2607 budget_per_queue = left_over_budget/num_queues;
1927 spin_unlock_irqrestore(&priv->txlock, flags); 2608 left_over_budget = 0;
2609
2610 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2611 if (test_bit(i, &serviced_queues))
2612 continue;
2613 rx_queue = priv->rx_queue[i];
2614 tx_queue = priv->tx_queue[rx_queue->qindex];
2615
2616 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2617 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2618 budget_per_queue);
2619 rx_cleaned += rx_cleaned_per_queue;
2620 if(rx_cleaned_per_queue < budget_per_queue) {
2621 left_over_budget = left_over_budget +
2622 (budget_per_queue - rx_cleaned_per_queue);
2623 set_bit(i, &serviced_queues);
2624 num_queues--;
2625 }
2626 }
1928 } 2627 }
1929 2628
1930 rx_cleaned = gfar_clean_rx_ring(dev, budget);
1931
1932 if (tx_cleaned) 2629 if (tx_cleaned)
1933 return budget; 2630 return budget;
1934 2631
@@ -1936,20 +2633,14 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1936 napi_complete(napi); 2633 napi_complete(napi);
1937 2634
1938 /* Clear the halt bit in RSTAT */ 2635 /* Clear the halt bit in RSTAT */
1939 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 2636 gfar_write(&regs->rstat, gfargrp->rstat);
1940 2637
1941 gfar_write(&priv->regs->imask, IMASK_DEFAULT); 2638 gfar_write(&regs->imask, IMASK_DEFAULT);
1942 2639
1943 /* If we are coalescing interrupts, update the timer */ 2640 /* If we are coalescing interrupts, update the timer */
1944 /* Otherwise, clear it */ 2641 /* Otherwise, clear it */
1945 if (likely(priv->rxcoalescing)) { 2642 gfar_configure_coalescing(priv,
1946 gfar_write(&priv->regs->rxic, 0); 2643 gfargrp->rx_bit_map, gfargrp->tx_bit_map);
1947 gfar_write(&priv->regs->rxic, priv->rxic);
1948 }
1949 if (likely(priv->txcoalescing)) {
1950 gfar_write(&priv->regs->txic, 0);
1951 gfar_write(&priv->regs->txic, priv->txic);
1952 }
1953 } 2644 }
1954 2645
1955 return rx_cleaned; 2646 return rx_cleaned;
@@ -1964,44 +2655,50 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1964static void gfar_netpoll(struct net_device *dev) 2655static void gfar_netpoll(struct net_device *dev)
1965{ 2656{
1966 struct gfar_private *priv = netdev_priv(dev); 2657 struct gfar_private *priv = netdev_priv(dev);
2658 int i = 0;
1967 2659
1968 /* If the device has multiple interrupts, run tx/rx */ 2660 /* If the device has multiple interrupts, run tx/rx */
1969 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2661 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1970 disable_irq(priv->interruptTransmit); 2662 for (i = 0; i < priv->num_grps; i++) {
1971 disable_irq(priv->interruptReceive); 2663 disable_irq(priv->gfargrp[i].interruptTransmit);
1972 disable_irq(priv->interruptError); 2664 disable_irq(priv->gfargrp[i].interruptReceive);
1973 gfar_interrupt(priv->interruptTransmit, dev); 2665 disable_irq(priv->gfargrp[i].interruptError);
1974 enable_irq(priv->interruptError); 2666 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
1975 enable_irq(priv->interruptReceive); 2667 &priv->gfargrp[i]);
1976 enable_irq(priv->interruptTransmit); 2668 enable_irq(priv->gfargrp[i].interruptError);
2669 enable_irq(priv->gfargrp[i].interruptReceive);
2670 enable_irq(priv->gfargrp[i].interruptTransmit);
2671 }
1977 } else { 2672 } else {
1978 disable_irq(priv->interruptTransmit); 2673 for (i = 0; i < priv->num_grps; i++) {
1979 gfar_interrupt(priv->interruptTransmit, dev); 2674 disable_irq(priv->gfargrp[i].interruptTransmit);
1980 enable_irq(priv->interruptTransmit); 2675 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2676 &priv->gfargrp[i]);
2677 enable_irq(priv->gfargrp[i].interruptTransmit);
2678 }
1981 } 2679 }
1982} 2680}
1983#endif 2681#endif
1984 2682
1985/* The interrupt handler for devices with one interrupt */ 2683/* The interrupt handler for devices with one interrupt */
1986static irqreturn_t gfar_interrupt(int irq, void *dev_id) 2684static irqreturn_t gfar_interrupt(int irq, void *grp_id)
1987{ 2685{
1988 struct net_device *dev = dev_id; 2686 struct gfar_priv_grp *gfargrp = grp_id;
1989 struct gfar_private *priv = netdev_priv(dev);
1990 2687
1991 /* Save ievent for future reference */ 2688 /* Save ievent for future reference */
1992 u32 events = gfar_read(&priv->regs->ievent); 2689 u32 events = gfar_read(&gfargrp->regs->ievent);
1993 2690
1994 /* Check for reception */ 2691 /* Check for reception */
1995 if (events & IEVENT_RX_MASK) 2692 if (events & IEVENT_RX_MASK)
1996 gfar_receive(irq, dev_id); 2693 gfar_receive(irq, grp_id);
1997 2694
1998 /* Check for transmit completion */ 2695 /* Check for transmit completion */
1999 if (events & IEVENT_TX_MASK) 2696 if (events & IEVENT_TX_MASK)
2000 gfar_transmit(irq, dev_id); 2697 gfar_transmit(irq, grp_id);
2001 2698
2002 /* Check for errors */ 2699 /* Check for errors */
2003 if (events & IEVENT_ERR_MASK) 2700 if (events & IEVENT_ERR_MASK)
2004 gfar_error(irq, dev_id); 2701 gfar_error(irq, grp_id);
2005 2702
2006 return IRQ_HANDLED; 2703 return IRQ_HANDLED;
2007} 2704}
@@ -2015,12 +2712,14 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id)
2015static void adjust_link(struct net_device *dev) 2712static void adjust_link(struct net_device *dev)
2016{ 2713{
2017 struct gfar_private *priv = netdev_priv(dev); 2714 struct gfar_private *priv = netdev_priv(dev);
2018 struct gfar __iomem *regs = priv->regs; 2715 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2019 unsigned long flags; 2716 unsigned long flags;
2020 struct phy_device *phydev = priv->phydev; 2717 struct phy_device *phydev = priv->phydev;
2021 int new_state = 0; 2718 int new_state = 0;
2022 2719
2023 spin_lock_irqsave(&priv->txlock, flags); 2720 local_irq_save(flags);
2721 lock_tx_qs(priv);
2722
2024 if (phydev->link) { 2723 if (phydev->link) {
2025 u32 tempval = gfar_read(&regs->maccfg2); 2724 u32 tempval = gfar_read(&regs->maccfg2);
2026 u32 ecntrl = gfar_read(&regs->ecntrl); 2725 u32 ecntrl = gfar_read(&regs->ecntrl);
@@ -2085,8 +2784,8 @@ static void adjust_link(struct net_device *dev)
2085 2784
2086 if (new_state && netif_msg_link(priv)) 2785 if (new_state && netif_msg_link(priv))
2087 phy_print_status(phydev); 2786 phy_print_status(phydev);
2088 2787 unlock_tx_qs(priv);
2089 spin_unlock_irqrestore(&priv->txlock, flags); 2788 local_irq_restore(flags);
2090} 2789}
2091 2790
2092/* Update the hash table based on the current list of multicast 2791/* Update the hash table based on the current list of multicast
@@ -2097,10 +2796,10 @@ static void gfar_set_multi(struct net_device *dev)
2097{ 2796{
2098 struct dev_mc_list *mc_ptr; 2797 struct dev_mc_list *mc_ptr;
2099 struct gfar_private *priv = netdev_priv(dev); 2798 struct gfar_private *priv = netdev_priv(dev);
2100 struct gfar __iomem *regs = priv->regs; 2799 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2101 u32 tempval; 2800 u32 tempval;
2102 2801
2103 if(dev->flags & IFF_PROMISC) { 2802 if (dev->flags & IFF_PROMISC) {
2104 /* Set RCTRL to PROM */ 2803 /* Set RCTRL to PROM */
2105 tempval = gfar_read(&regs->rctrl); 2804 tempval = gfar_read(&regs->rctrl);
2106 tempval |= RCTRL_PROM; 2805 tempval |= RCTRL_PROM;
@@ -2112,7 +2811,7 @@ static void gfar_set_multi(struct net_device *dev)
2112 gfar_write(&regs->rctrl, tempval); 2811 gfar_write(&regs->rctrl, tempval);
2113 } 2812 }
2114 2813
2115 if(dev->flags & IFF_ALLMULTI) { 2814 if (dev->flags & IFF_ALLMULTI) {
2116 /* Set the hash to rx all multicast frames */ 2815 /* Set the hash to rx all multicast frames */
2117 gfar_write(&regs->igaddr0, 0xffffffff); 2816 gfar_write(&regs->igaddr0, 0xffffffff);
2118 gfar_write(&regs->igaddr1, 0xffffffff); 2817 gfar_write(&regs->igaddr1, 0xffffffff);
@@ -2164,11 +2863,11 @@ static void gfar_set_multi(struct net_device *dev)
2164 em_num = 0; 2863 em_num = 0;
2165 } 2864 }
2166 2865
2167 if(dev->mc_count == 0) 2866 if (netdev_mc_empty(dev))
2168 return; 2867 return;
2169 2868
2170 /* Parse the list, and set the appropriate bits */ 2869 /* Parse the list, and set the appropriate bits */
2171 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 2870 netdev_for_each_mc_addr(mc_ptr, dev) {
2172 if (idx < em_num) { 2871 if (idx < em_num) {
2173 gfar_set_mac_for_addr(dev, idx, 2872 gfar_set_mac_for_addr(dev, idx,
2174 mc_ptr->dmi_addr); 2873 mc_ptr->dmi_addr);
@@ -2230,10 +2929,11 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
2230static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) 2929static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2231{ 2930{
2232 struct gfar_private *priv = netdev_priv(dev); 2931 struct gfar_private *priv = netdev_priv(dev);
2932 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2233 int idx; 2933 int idx;
2234 char tmpbuf[MAC_ADDR_LEN]; 2934 char tmpbuf[MAC_ADDR_LEN];
2235 u32 tempval; 2935 u32 tempval;
2236 u32 __iomem *macptr = &priv->regs->macstnaddr1; 2936 u32 __iomem *macptr = &regs->macstnaddr1;
2237 2937
2238 macptr += num*2; 2938 macptr += num*2;
2239 2939
@@ -2250,16 +2950,18 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2250} 2950}
2251 2951
2252/* GFAR error interrupt handler */ 2952/* GFAR error interrupt handler */
2253static irqreturn_t gfar_error(int irq, void *dev_id) 2953static irqreturn_t gfar_error(int irq, void *grp_id)
2254{ 2954{
2255 struct net_device *dev = dev_id; 2955 struct gfar_priv_grp *gfargrp = grp_id;
2256 struct gfar_private *priv = netdev_priv(dev); 2956 struct gfar __iomem *regs = gfargrp->regs;
2957 struct gfar_private *priv= gfargrp->priv;
2958 struct net_device *dev = priv->ndev;
2257 2959
2258 /* Save ievent for future reference */ 2960 /* Save ievent for future reference */
2259 u32 events = gfar_read(&priv->regs->ievent); 2961 u32 events = gfar_read(&regs->ievent);
2260 2962
2261 /* Clear IEVENT */ 2963 /* Clear IEVENT */
2262 gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK); 2964 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
2263 2965
2264 /* Magic Packet is not an error. */ 2966 /* Magic Packet is not an error. */
2265 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 2967 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
@@ -2269,7 +2971,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2269 /* Hmm... */ 2971 /* Hmm... */
2270 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 2972 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2271 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", 2973 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
2272 dev->name, events, gfar_read(&priv->regs->imask)); 2974 dev->name, events, gfar_read(&regs->imask));
2273 2975
2274 /* Update the error counters */ 2976 /* Update the error counters */
2275 if (events & IEVENT_TXE) { 2977 if (events & IEVENT_TXE) {
@@ -2280,14 +2982,22 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2280 if (events & IEVENT_CRL) 2982 if (events & IEVENT_CRL)
2281 dev->stats.tx_aborted_errors++; 2983 dev->stats.tx_aborted_errors++;
2282 if (events & IEVENT_XFUN) { 2984 if (events & IEVENT_XFUN) {
2985 unsigned long flags;
2986
2283 if (netif_msg_tx_err(priv)) 2987 if (netif_msg_tx_err(priv))
2284 printk(KERN_DEBUG "%s: TX FIFO underrun, " 2988 printk(KERN_DEBUG "%s: TX FIFO underrun, "
2285 "packet dropped.\n", dev->name); 2989 "packet dropped.\n", dev->name);
2286 dev->stats.tx_dropped++; 2990 dev->stats.tx_dropped++;
2287 priv->extra_stats.tx_underrun++; 2991 priv->extra_stats.tx_underrun++;
2288 2992
2993 local_irq_save(flags);
2994 lock_tx_qs(priv);
2995
2289 /* Reactivate the Tx Queues */ 2996 /* Reactivate the Tx Queues */
2290 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 2997 gfar_write(&regs->tstat, gfargrp->tstat);
2998
2999 unlock_tx_qs(priv);
3000 local_irq_restore(flags);
2291 } 3001 }
2292 if (netif_msg_tx_err(priv)) 3002 if (netif_msg_tx_err(priv))
2293 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 3003 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
@@ -2296,11 +3006,11 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2296 dev->stats.rx_errors++; 3006 dev->stats.rx_errors++;
2297 priv->extra_stats.rx_bsy++; 3007 priv->extra_stats.rx_bsy++;
2298 3008
2299 gfar_receive(irq, dev_id); 3009 gfar_receive(irq, grp_id);
2300 3010
2301 if (netif_msg_rx_err(priv)) 3011 if (netif_msg_rx_err(priv))
2302 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", 3012 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2303 dev->name, gfar_read(&priv->regs->rstat)); 3013 dev->name, gfar_read(&regs->rstat));
2304 } 3014 }
2305 if (events & IEVENT_BABR) { 3015 if (events & IEVENT_BABR) {
2306 dev->stats.rx_errors++; 3016 dev->stats.rx_errors++;
@@ -2331,6 +3041,9 @@ static struct of_device_id gfar_match[] =
2331 .type = "network", 3041 .type = "network",
2332 .compatible = "gianfar", 3042 .compatible = "gianfar",
2333 }, 3043 },
3044 {
3045 .compatible = "fsl,etsec2",
3046 },
2334 {}, 3047 {},
2335}; 3048};
2336MODULE_DEVICE_TABLE(of, gfar_match); 3049MODULE_DEVICE_TABLE(of, gfar_match);
@@ -2342,8 +3055,9 @@ static struct of_platform_driver gfar_driver = {
2342 3055
2343 .probe = gfar_probe, 3056 .probe = gfar_probe,
2344 .remove = gfar_remove, 3057 .remove = gfar_remove,
2345 .suspend = gfar_suspend, 3058 .suspend = gfar_legacy_suspend,
2346 .resume = gfar_resume, 3059 .resume = gfar_legacy_resume,
3060 .driver.pm = GFAR_PM_OPS,
2347}; 3061};
2348 3062
2349static int __init gfar_init(void) 3063static int __init gfar_init(void)