diff options
author | Paul Gortmaker <paul.gortmaker@windriver.com> | 2012-01-06 13:51:03 -0500 |
---|---|---|
committer | Paul Gortmaker <paul.gortmaker@windriver.com> | 2012-03-15 15:37:48 -0400 |
commit | d8a0f1b0af67679bba886784de10d8c21acc4e0e (patch) | |
tree | c22a927a5b2545e7ab0e4bfcfeb34b27afb56f01 /drivers/net | |
parent | cdf485be3a63d1f34293740fb726088c6840ceea (diff) |
gianfar: Add support for byte queue limits.
Add support for byte queue limits (BQL), based on the similar
modifications made to intel/igb/igb_main.c from Eric Dumazet
in commit bdbc063129e811264cd6c311d8c2d9b95de01231
"igb: Add support for byte queue limits."
A local variable for tx_queue->qindex was introduced in
gfar_clean_tx_ring, since it is now used often enough to warrant it,
and it cleans up the readability somewhat as well.
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/ethernet/freescale/gianfar.c | 19 |
1 files changed, 16 insertions, 3 deletions
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index adb0ae4e4195..a4c934bbea17 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -1755,9 +1755,12 @@ static void free_skb_resources(struct gfar_private *priv) | |||
1755 | 1755 | ||
1756 | /* Go through all the buffer descriptors and free their data buffers */ | 1756 | /* Go through all the buffer descriptors and free their data buffers */ |
1757 | for (i = 0; i < priv->num_tx_queues; i++) { | 1757 | for (i = 0; i < priv->num_tx_queues; i++) { |
1758 | struct netdev_queue *txq; | ||
1758 | tx_queue = priv->tx_queue[i]; | 1759 | tx_queue = priv->tx_queue[i]; |
1760 | txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); | ||
1759 | if(tx_queue->tx_skbuff) | 1761 | if(tx_queue->tx_skbuff) |
1760 | free_skb_tx_queue(tx_queue); | 1762 | free_skb_tx_queue(tx_queue); |
1763 | netdev_tx_reset_queue(txq); | ||
1761 | } | 1764 | } |
1762 | 1765 | ||
1763 | for (i = 0; i < priv->num_rx_queues; i++) { | 1766 | for (i = 0; i < priv->num_rx_queues; i++) { |
@@ -2217,6 +2220,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2217 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); | 2220 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); |
2218 | } | 2221 | } |
2219 | 2222 | ||
2223 | netdev_tx_sent_queue(txq, skb->len); | ||
2224 | |||
2220 | /* | 2225 | /* |
2221 | * We can work in parallel with gfar_clean_tx_ring(), except | 2226 | * We can work in parallel with gfar_clean_tx_ring(), except |
2222 | * when modifying num_txbdfree. Note that we didn't grab the lock | 2227 | * when modifying num_txbdfree. Note that we didn't grab the lock |
@@ -2460,6 +2465,7 @@ static void gfar_align_skb(struct sk_buff *skb) | |||
2460 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | 2465 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) |
2461 | { | 2466 | { |
2462 | struct net_device *dev = tx_queue->dev; | 2467 | struct net_device *dev = tx_queue->dev; |
2468 | struct netdev_queue *txq; | ||
2463 | struct gfar_private *priv = netdev_priv(dev); | 2469 | struct gfar_private *priv = netdev_priv(dev); |
2464 | struct gfar_priv_rx_q *rx_queue = NULL; | 2470 | struct gfar_priv_rx_q *rx_queue = NULL; |
2465 | struct txbd8 *bdp, *next = NULL; | 2471 | struct txbd8 *bdp, *next = NULL; |
@@ -2471,10 +2477,13 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
2471 | int frags = 0, nr_txbds = 0; | 2477 | int frags = 0, nr_txbds = 0; |
2472 | int i; | 2478 | int i; |
2473 | int howmany = 0; | 2479 | int howmany = 0; |
2480 | int tqi = tx_queue->qindex; | ||
2481 | unsigned int bytes_sent = 0; | ||
2474 | u32 lstatus; | 2482 | u32 lstatus; |
2475 | size_t buflen; | 2483 | size_t buflen; |
2476 | 2484 | ||
2477 | rx_queue = priv->rx_queue[tx_queue->qindex]; | 2485 | rx_queue = priv->rx_queue[tqi]; |
2486 | txq = netdev_get_tx_queue(dev, tqi); | ||
2478 | bdp = tx_queue->dirty_tx; | 2487 | bdp = tx_queue->dirty_tx; |
2479 | skb_dirtytx = tx_queue->skb_dirtytx; | 2488 | skb_dirtytx = tx_queue->skb_dirtytx; |
2480 | 2489 | ||
@@ -2533,6 +2542,8 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
2533 | bdp = next_txbd(bdp, base, tx_ring_size); | 2542 | bdp = next_txbd(bdp, base, tx_ring_size); |
2534 | } | 2543 | } |
2535 | 2544 | ||
2545 | bytes_sent += skb->len; | ||
2546 | |||
2536 | /* | 2547 | /* |
2537 | * If there's room in the queue (limit it to rx_buffer_size) | 2548 | * If there's room in the queue (limit it to rx_buffer_size) |
2538 | * we add this skb back into the pool, if it's the right size | 2549 | * we add this skb back into the pool, if it's the right size |
@@ -2557,13 +2568,15 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
2557 | } | 2568 | } |
2558 | 2569 | ||
2559 | /* If we freed a buffer, we can restart transmission, if necessary */ | 2570 | /* If we freed a buffer, we can restart transmission, if necessary */ |
2560 | if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree) | 2571 | if (__netif_subqueue_stopped(dev, tqi) && tx_queue->num_txbdfree) |
2561 | netif_wake_subqueue(dev, tx_queue->qindex); | 2572 | netif_wake_subqueue(dev, tqi); |
2562 | 2573 | ||
2563 | /* Update dirty indicators */ | 2574 | /* Update dirty indicators */ |
2564 | tx_queue->skb_dirtytx = skb_dirtytx; | 2575 | tx_queue->skb_dirtytx = skb_dirtytx; |
2565 | tx_queue->dirty_tx = bdp; | 2576 | tx_queue->dirty_tx = bdp; |
2566 | 2577 | ||
2578 | netdev_tx_completed_queue(txq, howmany, bytes_sent); | ||
2579 | |||
2567 | return howmany; | 2580 | return howmany; |
2568 | } | 2581 | } |
2569 | 2582 | ||