diff options
author | Sandeep Gopalpet <Sandeep.Kumar@freescale.com> | 2009-11-02 02:03:15 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-03 02:40:57 -0500 |
commit | fba4ed030cfae7efdb6b79a57b0c5a9d72c9de83 (patch) | |
tree | ddee54010c64517a01ea112ca16e5bc1fee0938c /drivers/net/gianfar_ethtool.c | |
parent | f4983704a63b3764418905a77d48105a8cbce97f (diff) |
gianfar: Add Multiple Queue Support
This patch introduces multiple Tx and Rx queues.
The incoming packets can be classified into different queues
based on filer rules (out of scope of this patch). The number
of queues enabled will be based on a DTS entries fsl,num_tx_queues
and fsl,num_rx_queues.
Although we are enabling multiple queues, the interrupt coalescing
is on per device level (etsec-1.7 doesn't support multiple rxics
and txics).
Signed-off-by: Sandeep Gopalpet <Sandeep.Kumar@freescale.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/gianfar_ethtool.c')
-rw-r--r-- | drivers/net/gianfar_ethtool.c | 70 |
1 files changed, 37 insertions, 33 deletions
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index c681b414767a..d3d26234f190 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c | |||
@@ -204,9 +204,11 @@ static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
204 | 204 | ||
205 | if (NULL == phydev) | 205 | if (NULL == phydev) |
206 | return -ENODEV; | 206 | return -ENODEV; |
207 | tx_queue = priv->tx_queue; | 207 | tx_queue = priv->tx_queue[0]; |
208 | rx_queue = priv->rx_queue; | 208 | rx_queue = priv->rx_queue[0]; |
209 | 209 | ||
210 | /* etsec-1.7 and older versions have only one txic | ||
211 | * and rxic regs although they support multiple queues */ | ||
210 | cmd->maxtxpkt = get_icft_value(tx_queue->txic); | 212 | cmd->maxtxpkt = get_icft_value(tx_queue->txic); |
211 | cmd->maxrxpkt = get_icft_value(rx_queue->rxic); | 213 | cmd->maxrxpkt = get_icft_value(rx_queue->rxic); |
212 | 214 | ||
@@ -298,8 +300,8 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals | |||
298 | if (NULL == priv->phydev) | 300 | if (NULL == priv->phydev) |
299 | return -ENODEV; | 301 | return -ENODEV; |
300 | 302 | ||
301 | rx_queue = priv->rx_queue; | 303 | rx_queue = priv->rx_queue[0]; |
302 | tx_queue = priv->tx_queue; | 304 | tx_queue = priv->tx_queue[0]; |
303 | 305 | ||
304 | rxtime = get_ictt_value(rx_queue->rxic); | 306 | rxtime = get_ictt_value(rx_queue->rxic); |
305 | rxcount = get_icft_value(rx_queue->rxic); | 307 | rxcount = get_icft_value(rx_queue->rxic); |
@@ -357,8 +359,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals | |||
357 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) | 359 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) |
358 | return -EOPNOTSUPP; | 360 | return -EOPNOTSUPP; |
359 | 361 | ||
360 | tx_queue = priv->tx_queue; | 362 | tx_queue = priv->tx_queue[0]; |
361 | rx_queue = priv->rx_queue; | 363 | rx_queue = priv->rx_queue[0]; |
362 | 364 | ||
363 | /* Set up rx coalescing */ | 365 | /* Set up rx coalescing */ |
364 | if ((cvals->rx_coalesce_usecs == 0) || | 366 | if ((cvals->rx_coalesce_usecs == 0) || |
@@ -429,8 +431,8 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv | |||
429 | struct gfar_priv_tx_q *tx_queue = NULL; | 431 | struct gfar_priv_tx_q *tx_queue = NULL; |
430 | struct gfar_priv_rx_q *rx_queue = NULL; | 432 | struct gfar_priv_rx_q *rx_queue = NULL; |
431 | 433 | ||
432 | tx_queue = priv->tx_queue; | 434 | tx_queue = priv->tx_queue[0]; |
433 | rx_queue = priv->rx_queue; | 435 | rx_queue = priv->rx_queue[0]; |
434 | 436 | ||
435 | rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE; | 437 | rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE; |
436 | rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE; | 438 | rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE; |
@@ -453,9 +455,7 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv | |||
453 | static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) | 455 | static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) |
454 | { | 456 | { |
455 | struct gfar_private *priv = netdev_priv(dev); | 457 | struct gfar_private *priv = netdev_priv(dev); |
456 | struct gfar_priv_tx_q *tx_queue = NULL; | 458 | int err = 0, i = 0; |
457 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
458 | int err = 0; | ||
459 | 459 | ||
460 | if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) | 460 | if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) |
461 | return -EINVAL; | 461 | return -EINVAL; |
@@ -475,37 +475,41 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva | |||
475 | return -EINVAL; | 475 | return -EINVAL; |
476 | } | 476 | } |
477 | 477 | ||
478 | tx_queue = priv->tx_queue; | ||
479 | rx_queue = priv->rx_queue; | ||
480 | 478 | ||
481 | if (dev->flags & IFF_UP) { | 479 | if (dev->flags & IFF_UP) { |
482 | unsigned long flags; | 480 | unsigned long flags; |
483 | 481 | ||
484 | /* Halt TX and RX, and process the frames which | 482 | /* Halt TX and RX, and process the frames which |
485 | * have already been received */ | 483 | * have already been received */ |
486 | spin_lock_irqsave(&tx_queue->txlock, flags); | 484 | local_irq_save(flags); |
487 | spin_lock(&rx_queue->rxlock); | 485 | lock_tx_qs(priv); |
486 | lock_rx_qs(priv); | ||
488 | 487 | ||
489 | gfar_halt(dev); | 488 | gfar_halt(dev); |
490 | 489 | ||
491 | spin_unlock(&rx_queue->rxlock); | 490 | unlock_rx_qs(priv); |
492 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | 491 | unlock_tx_qs(priv); |
492 | local_irq_restore(flags); | ||
493 | 493 | ||
494 | gfar_clean_rx_ring(rx_queue, rx_queue->rx_ring_size); | 494 | for (i = 0; i < priv->num_rx_queues; i++) |
495 | gfar_clean_rx_ring(priv->rx_queue[i], | ||
496 | priv->rx_queue[i]->rx_ring_size); | ||
495 | 497 | ||
496 | /* Now we take down the rings to rebuild them */ | 498 | /* Now we take down the rings to rebuild them */ |
497 | stop_gfar(dev); | 499 | stop_gfar(dev); |
498 | } | 500 | } |
499 | 501 | ||
500 | /* Change the size */ | 502 | /* Change the size */ |
501 | rx_queue->rx_ring_size = rvals->rx_pending; | 503 | for (i = 0; i < priv->num_rx_queues; i++) { |
502 | tx_queue->tx_ring_size = rvals->tx_pending; | 504 | priv->rx_queue[i]->rx_ring_size = rvals->rx_pending; |
503 | tx_queue->num_txbdfree = tx_queue->tx_ring_size; | 505 | priv->tx_queue[i]->tx_ring_size = rvals->tx_pending; |
506 | priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size; | ||
507 | } | ||
504 | 508 | ||
505 | /* Rebuild the rings with the new size */ | 509 | /* Rebuild the rings with the new size */ |
506 | if (dev->flags & IFF_UP) { | 510 | if (dev->flags & IFF_UP) { |
507 | err = startup_gfar(dev); | 511 | err = startup_gfar(dev); |
508 | netif_wake_queue(dev); | 512 | netif_tx_wake_all_queues(dev); |
509 | } | 513 | } |
510 | return err; | 514 | return err; |
511 | } | 515 | } |
@@ -513,29 +517,29 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva | |||
513 | static int gfar_set_rx_csum(struct net_device *dev, uint32_t data) | 517 | static int gfar_set_rx_csum(struct net_device *dev, uint32_t data) |
514 | { | 518 | { |
515 | struct gfar_private *priv = netdev_priv(dev); | 519 | struct gfar_private *priv = netdev_priv(dev); |
516 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
517 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
518 | unsigned long flags; | 520 | unsigned long flags; |
519 | int err = 0; | 521 | int err = 0, i = 0; |
520 | 522 | ||
521 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) | 523 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) |
522 | return -EOPNOTSUPP; | 524 | return -EOPNOTSUPP; |
523 | 525 | ||
524 | tx_queue = priv->tx_queue; | ||
525 | rx_queue = priv->rx_queue; | ||
526 | 526 | ||
527 | if (dev->flags & IFF_UP) { | 527 | if (dev->flags & IFF_UP) { |
528 | /* Halt TX and RX, and process the frames which | 528 | /* Halt TX and RX, and process the frames which |
529 | * have already been received */ | 529 | * have already been received */ |
530 | spin_lock_irqsave(&tx_queue->txlock, flags); | 530 | local_irq_save(flags); |
531 | spin_lock(&rx_queue->rxlock); | 531 | lock_tx_qs(priv); |
532 | lock_rx_qs(priv); | ||
532 | 533 | ||
533 | gfar_halt(dev); | 534 | gfar_halt(dev); |
534 | 535 | ||
535 | spin_unlock(&rx_queue->rxlock); | 536 | unlock_tx_qs(priv); |
536 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | 537 | unlock_rx_qs(priv); |
538 | local_irq_save(flags); | ||
537 | 539 | ||
538 | gfar_clean_rx_ring(rx_queue, rx_queue->rx_ring_size); | 540 | for (i = 0; i < priv->num_rx_queues; i++) |
541 | gfar_clean_rx_ring(priv->rx_queue[i], | ||
542 | priv->rx_queue[i]->rx_ring_size); | ||
539 | 543 | ||
540 | /* Now we take down the rings to rebuild them */ | 544 | /* Now we take down the rings to rebuild them */ |
541 | stop_gfar(dev); | 545 | stop_gfar(dev); |
@@ -547,7 +551,7 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data) | |||
547 | 551 | ||
548 | if (dev->flags & IFF_UP) { | 552 | if (dev->flags & IFF_UP) { |
549 | err = startup_gfar(dev); | 553 | err = startup_gfar(dev); |
550 | netif_wake_queue(dev); | 554 | netif_tx_wake_all_queues(dev); |
551 | } | 555 | } |
552 | return err; | 556 | return err; |
553 | } | 557 | } |