diff options
author | Tom Herbert <therbert@google.com> | 2011-11-28 11:33:23 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-11-29 12:46:20 -0500 |
commit | b8bfca9439d4ed03446bc9a3fdaef81b364d32dd (patch) | |
tree | 89d404ea2ca45030d4380c0129fbf32eaed1d8ab /drivers/net/ethernet/nvidia | |
parent | 3f0cfa3bc11e7f00c9994e0f469cbc0e7da7b00c (diff) |
forcedeth: Support for byte queue limits
Changes to forcedeth to use byte queue limits.
Signed-off-by: Tom Herbert <therbert@google.com>
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/nvidia')
-rw-r--r-- | drivers/net/ethernet/nvidia/forcedeth.c | 18 |
1 files changed, 18 insertions, 0 deletions
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 8db0b376d5b7..5245dacc3a49 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c | |||
@@ -1928,6 +1928,7 @@ static void nv_init_tx(struct net_device *dev) | |||
1928 | np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; | 1928 | np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; |
1929 | np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; | 1929 | np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; |
1930 | np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; | 1930 | np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; |
1931 | netdev_reset_queue(np->dev); | ||
1931 | np->tx_pkts_in_progress = 0; | 1932 | np->tx_pkts_in_progress = 0; |
1932 | np->tx_change_owner = NULL; | 1933 | np->tx_change_owner = NULL; |
1933 | np->tx_end_flip = NULL; | 1934 | np->tx_end_flip = NULL; |
@@ -2276,6 +2277,9 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2276 | 2277 | ||
2277 | /* set tx flags */ | 2278 | /* set tx flags */ |
2278 | start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); | 2279 | start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); |
2280 | |||
2281 | netdev_sent_queue(np->dev, skb->len); | ||
2282 | |||
2279 | np->put_tx.orig = put_tx; | 2283 | np->put_tx.orig = put_tx; |
2280 | 2284 | ||
2281 | spin_unlock_irqrestore(&np->lock, flags); | 2285 | spin_unlock_irqrestore(&np->lock, flags); |
@@ -2420,6 +2424,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, | |||
2420 | 2424 | ||
2421 | /* set tx flags */ | 2425 | /* set tx flags */ |
2422 | start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); | 2426 | start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); |
2427 | |||
2428 | netdev_sent_queue(np->dev, skb->len); | ||
2429 | |||
2423 | np->put_tx.ex = put_tx; | 2430 | np->put_tx.ex = put_tx; |
2424 | 2431 | ||
2425 | spin_unlock_irqrestore(&np->lock, flags); | 2432 | spin_unlock_irqrestore(&np->lock, flags); |
@@ -2457,6 +2464,7 @@ static int nv_tx_done(struct net_device *dev, int limit) | |||
2457 | u32 flags; | 2464 | u32 flags; |
2458 | int tx_work = 0; | 2465 | int tx_work = 0; |
2459 | struct ring_desc *orig_get_tx = np->get_tx.orig; | 2466 | struct ring_desc *orig_get_tx = np->get_tx.orig; |
2467 | unsigned int bytes_compl = 0; | ||
2460 | 2468 | ||
2461 | while ((np->get_tx.orig != np->put_tx.orig) && | 2469 | while ((np->get_tx.orig != np->put_tx.orig) && |
2462 | !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && | 2470 | !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && |
@@ -2476,6 +2484,7 @@ static int nv_tx_done(struct net_device *dev, int limit) | |||
2476 | np->stat_tx_bytes += np->get_tx_ctx->skb->len; | 2484 | np->stat_tx_bytes += np->get_tx_ctx->skb->len; |
2477 | u64_stats_update_end(&np->swstats_tx_syncp); | 2485 | u64_stats_update_end(&np->swstats_tx_syncp); |
2478 | } | 2486 | } |
2487 | bytes_compl += np->get_tx_ctx->skb->len; | ||
2479 | dev_kfree_skb_any(np->get_tx_ctx->skb); | 2488 | dev_kfree_skb_any(np->get_tx_ctx->skb); |
2480 | np->get_tx_ctx->skb = NULL; | 2489 | np->get_tx_ctx->skb = NULL; |
2481 | tx_work++; | 2490 | tx_work++; |
@@ -2492,6 +2501,7 @@ static int nv_tx_done(struct net_device *dev, int limit) | |||
2492 | np->stat_tx_bytes += np->get_tx_ctx->skb->len; | 2501 | np->stat_tx_bytes += np->get_tx_ctx->skb->len; |
2493 | u64_stats_update_end(&np->swstats_tx_syncp); | 2502 | u64_stats_update_end(&np->swstats_tx_syncp); |
2494 | } | 2503 | } |
2504 | bytes_compl += np->get_tx_ctx->skb->len; | ||
2495 | dev_kfree_skb_any(np->get_tx_ctx->skb); | 2505 | dev_kfree_skb_any(np->get_tx_ctx->skb); |
2496 | np->get_tx_ctx->skb = NULL; | 2506 | np->get_tx_ctx->skb = NULL; |
2497 | tx_work++; | 2507 | tx_work++; |
@@ -2502,6 +2512,9 @@ static int nv_tx_done(struct net_device *dev, int limit) | |||
2502 | if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) | 2512 | if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) |
2503 | np->get_tx_ctx = np->first_tx_ctx; | 2513 | np->get_tx_ctx = np->first_tx_ctx; |
2504 | } | 2514 | } |
2515 | |||
2516 | netdev_completed_queue(np->dev, tx_work, bytes_compl); | ||
2517 | |||
2505 | if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { | 2518 | if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { |
2506 | np->tx_stop = 0; | 2519 | np->tx_stop = 0; |
2507 | netif_wake_queue(dev); | 2520 | netif_wake_queue(dev); |
@@ -2515,6 +2528,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) | |||
2515 | u32 flags; | 2528 | u32 flags; |
2516 | int tx_work = 0; | 2529 | int tx_work = 0; |
2517 | struct ring_desc_ex *orig_get_tx = np->get_tx.ex; | 2530 | struct ring_desc_ex *orig_get_tx = np->get_tx.ex; |
2531 | unsigned long bytes_cleaned = 0; | ||
2518 | 2532 | ||
2519 | while ((np->get_tx.ex != np->put_tx.ex) && | 2533 | while ((np->get_tx.ex != np->put_tx.ex) && |
2520 | !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && | 2534 | !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && |
@@ -2538,6 +2552,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) | |||
2538 | u64_stats_update_end(&np->swstats_tx_syncp); | 2552 | u64_stats_update_end(&np->swstats_tx_syncp); |
2539 | } | 2553 | } |
2540 | 2554 | ||
2555 | bytes_cleaned += np->get_tx_ctx->skb->len; | ||
2541 | dev_kfree_skb_any(np->get_tx_ctx->skb); | 2556 | dev_kfree_skb_any(np->get_tx_ctx->skb); |
2542 | np->get_tx_ctx->skb = NULL; | 2557 | np->get_tx_ctx->skb = NULL; |
2543 | tx_work++; | 2558 | tx_work++; |
@@ -2545,6 +2560,9 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) | |||
2545 | if (np->tx_limit) | 2560 | if (np->tx_limit) |
2546 | nv_tx_flip_ownership(dev); | 2561 | nv_tx_flip_ownership(dev); |
2547 | } | 2562 | } |
2563 | |||
2564 | netdev_completed_queue(np->dev, tx_work, bytes_cleaned); | ||
2565 | |||
2548 | if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) | 2566 | if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) |
2549 | np->get_tx.ex = np->first_tx.ex; | 2567 | np->get_tx.ex = np->first_tx.ex; |
2550 | if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) | 2568 | if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) |