aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAyaz Abdulla <aabdulla@nvidia.com>2009-03-05 03:02:10 -0500
committerDavid S. Miller <davem@davemloft.net>2009-03-10 08:29:47 -0400
commit33912e72d00c3627dbbb7c59463df9535176059f (patch)
tree3cea30a214bddc35c730618a14d5cb07959ccc35 /drivers
parent2daac3e8f831beba2012fdefda17770456be9b7e (diff)
forcedeth: add/modify tx done with limit
There are two tx_done routines to handle tx completion processing. Both these functions now take in a limit value and return the amount of tx completions. This will be used by a future patch to determine the total amount of work done. Signed-off-by: Ayaz Abdulla <aabdulla@nvidia.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/forcedeth.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 74511f7e13e9..78c2fe185281 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -2397,14 +2397,16 @@ static inline void nv_tx_flip_ownership(struct net_device *dev)
2397 * 2397 *
2398 * Caller must own np->lock. 2398 * Caller must own np->lock.
2399 */ 2399 */
2400static void nv_tx_done(struct net_device *dev) 2400static int nv_tx_done(struct net_device *dev, int limit)
2401{ 2401{
2402 struct fe_priv *np = netdev_priv(dev); 2402 struct fe_priv *np = netdev_priv(dev);
2403 u32 flags; 2403 u32 flags;
2404 int tx_work = 0;
2404 struct ring_desc* orig_get_tx = np->get_tx.orig; 2405 struct ring_desc* orig_get_tx = np->get_tx.orig;
2405 2406
2406 while ((np->get_tx.orig != np->put_tx.orig) && 2407 while ((np->get_tx.orig != np->put_tx.orig) &&
2407 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) { 2408 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
2409 (tx_work < limit)) {
2408 2410
2409 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n", 2411 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
2410 dev->name, flags); 2412 dev->name, flags);
@@ -2430,6 +2432,7 @@ static void nv_tx_done(struct net_device *dev)
2430 } 2432 }
2431 dev_kfree_skb_any(np->get_tx_ctx->skb); 2433 dev_kfree_skb_any(np->get_tx_ctx->skb);
2432 np->get_tx_ctx->skb = NULL; 2434 np->get_tx_ctx->skb = NULL;
2435 tx_work++;
2433 } 2436 }
2434 } else { 2437 } else {
2435 if (flags & NV_TX2_LASTPACKET) { 2438 if (flags & NV_TX2_LASTPACKET) {
@@ -2447,6 +2450,7 @@ static void nv_tx_done(struct net_device *dev)
2447 } 2450 }
2448 dev_kfree_skb_any(np->get_tx_ctx->skb); 2451 dev_kfree_skb_any(np->get_tx_ctx->skb);
2449 np->get_tx_ctx->skb = NULL; 2452 np->get_tx_ctx->skb = NULL;
2453 tx_work++;
2450 } 2454 }
2451 } 2455 }
2452 if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) 2456 if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
@@ -2458,17 +2462,19 @@ static void nv_tx_done(struct net_device *dev)
2458 np->tx_stop = 0; 2462 np->tx_stop = 0;
2459 netif_wake_queue(dev); 2463 netif_wake_queue(dev);
2460 } 2464 }
2465 return tx_work;
2461} 2466}
2462 2467
2463static void nv_tx_done_optimized(struct net_device *dev, int limit) 2468static int nv_tx_done_optimized(struct net_device *dev, int limit)
2464{ 2469{
2465 struct fe_priv *np = netdev_priv(dev); 2470 struct fe_priv *np = netdev_priv(dev);
2466 u32 flags; 2471 u32 flags;
2472 int tx_work = 0;
2467 struct ring_desc_ex* orig_get_tx = np->get_tx.ex; 2473 struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
2468 2474
2469 while ((np->get_tx.ex != np->put_tx.ex) && 2475 while ((np->get_tx.ex != np->put_tx.ex) &&
2470 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) && 2476 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
2471 (limit-- > 0)) { 2477 (tx_work < limit)) {
2472 2478
2473 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", 2479 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
2474 dev->name, flags); 2480 dev->name, flags);
@@ -2492,6 +2498,7 @@ static void nv_tx_done_optimized(struct net_device *dev, int limit)
2492 2498
2493 dev_kfree_skb_any(np->get_tx_ctx->skb); 2499 dev_kfree_skb_any(np->get_tx_ctx->skb);
2494 np->get_tx_ctx->skb = NULL; 2500 np->get_tx_ctx->skb = NULL;
2501 tx_work++;
2495 2502
2496 if (np->tx_limit) { 2503 if (np->tx_limit) {
2497 nv_tx_flip_ownership(dev); 2504 nv_tx_flip_ownership(dev);
@@ -2506,6 +2513,7 @@ static void nv_tx_done_optimized(struct net_device *dev, int limit)
2506 np->tx_stop = 0; 2513 np->tx_stop = 0;
2507 netif_wake_queue(dev); 2514 netif_wake_queue(dev);
2508 } 2515 }
2516 return tx_work;
2509} 2517}
2510 2518
2511/* 2519/*
@@ -2578,7 +2586,7 @@ static void nv_tx_timeout(struct net_device *dev)
2578 2586
2579 /* 2) check that the packets were not sent already: */ 2587 /* 2) check that the packets were not sent already: */
2580 if (!nv_optimized(np)) 2588 if (!nv_optimized(np))
2581 nv_tx_done(dev); 2589 nv_tx_done(dev, np->tx_ring_size);
2582 else 2590 else
2583 nv_tx_done_optimized(dev, np->tx_ring_size); 2591 nv_tx_done_optimized(dev, np->tx_ring_size);
2584 2592
@@ -3433,7 +3441,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3433 nv_msi_workaround(np); 3441 nv_msi_workaround(np);
3434 3442
3435 spin_lock(&np->lock); 3443 spin_lock(&np->lock);
3436 nv_tx_done(dev); 3444 nv_tx_done(dev, np->tx_ring_size);
3437 spin_unlock(&np->lock); 3445 spin_unlock(&np->lock);
3438 3446
3439#ifdef CONFIG_FORCEDETH_NAPI 3447#ifdef CONFIG_FORCEDETH_NAPI