diff options
author | Stephen Hemminger <shemminger@vyatta.com> | 2009-08-27 09:55:11 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-08-29 02:31:55 -0400 |
commit | 3791decb5aa0202d2a2473d6cf4947d98e846c7a (patch) | |
tree | 43fa8ce25c787bf31b9d37545f6f840bbc8f1670 /net/core | |
parent | 3bda06a3d7987bfeabb218ac2f17ce22c34f13b3 (diff) |
pktgen: xmit logic reorganization
Do some reorganization of transmit logic path:
* move transmit queue full idle to separate routine
* add a cpu_relax()
* eliminate some of the uneeded goto's
* if queue is still stopped, go back to main thread loop.
* don't give up transmitting if quantum is exhausted (be greedy)
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/pktgen.c | 55 |
1 files changed, 24 insertions, 31 deletions
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 1b1f1262a275..89fd232a234b 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -3339,6 +3339,18 @@ static void pktgen_rem_thread(struct pktgen_thread *t) | |||
3339 | mutex_unlock(&pktgen_thread_lock); | 3339 | mutex_unlock(&pktgen_thread_lock); |
3340 | } | 3340 | } |
3341 | 3341 | ||
3342 | static void idle(struct pktgen_dev *pkt_dev) | ||
3343 | { | ||
3344 | u64 idle_start = getCurUs(); | ||
3345 | |||
3346 | if (need_resched()) | ||
3347 | schedule(); | ||
3348 | else | ||
3349 | cpu_relax(); | ||
3350 | |||
3351 | pkt_dev->idle_acc += getCurUs() - idle_start; | ||
3352 | } | ||
3353 | |||
3342 | static void pktgen_xmit(struct pktgen_dev *pkt_dev) | 3354 | static void pktgen_xmit(struct pktgen_dev *pkt_dev) |
3343 | { | 3355 | { |
3344 | struct net_device *odev = pkt_dev->odev; | 3356 | struct net_device *odev = pkt_dev->odev; |
@@ -3361,7 +3373,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3361 | if (pkt_dev->delay_us == 0x7FFFFFFF) { | 3373 | if (pkt_dev->delay_us == 0x7FFFFFFF) { |
3362 | pkt_dev->next_tx_us = getCurUs() + pkt_dev->delay_us; | 3374 | pkt_dev->next_tx_us = getCurUs() + pkt_dev->delay_us; |
3363 | pkt_dev->next_tx_ns = pkt_dev->delay_ns; | 3375 | pkt_dev->next_tx_ns = pkt_dev->delay_ns; |
3364 | goto out; | 3376 | return; |
3365 | } | 3377 | } |
3366 | } | 3378 | } |
3367 | 3379 | ||
@@ -3373,26 +3385,14 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3373 | } | 3385 | } |
3374 | 3386 | ||
3375 | txq = netdev_get_tx_queue(odev, queue_map); | 3387 | txq = netdev_get_tx_queue(odev, queue_map); |
3376 | if (netif_tx_queue_stopped(txq) || | 3388 | /* Did we saturate the queue already? */ |
3377 | netif_tx_queue_frozen(txq) || | 3389 | if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)) { |
3378 | need_resched()) { | 3390 | /* If device is down, then all queues are permnantly frozen */ |
3379 | u64 idle_start = getCurUs(); | 3391 | if (netif_running(odev)) |
3380 | 3392 | idle(pkt_dev); | |
3381 | if (!netif_running(odev)) { | 3393 | else |
3382 | pktgen_stop_device(pkt_dev); | 3394 | pktgen_stop_device(pkt_dev); |
3383 | goto out; | 3395 | return; |
3384 | } | ||
3385 | if (need_resched()) | ||
3386 | schedule(); | ||
3387 | |||
3388 | pkt_dev->idle_acc += getCurUs() - idle_start; | ||
3389 | |||
3390 | if (netif_tx_queue_stopped(txq) || | ||
3391 | netif_tx_queue_frozen(txq)) { | ||
3392 | pkt_dev->next_tx_us = getCurUs(); /* TODO */ | ||
3393 | pkt_dev->next_tx_ns = 0; | ||
3394 | goto out; /* Try the next interface */ | ||
3395 | } | ||
3396 | } | 3396 | } |
3397 | 3397 | ||
3398 | if (pkt_dev->last_ok || !pkt_dev->skb) { | 3398 | if (pkt_dev->last_ok || !pkt_dev->skb) { |
@@ -3407,7 +3407,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3407 | "allocate skb in fill_packet.\n"); | 3407 | "allocate skb in fill_packet.\n"); |
3408 | schedule(); | 3408 | schedule(); |
3409 | pkt_dev->clone_count--; /* back out increment, OOM */ | 3409 | pkt_dev->clone_count--; /* back out increment, OOM */ |
3410 | goto out; | 3410 | return; |
3411 | } | 3411 | } |
3412 | pkt_dev->allocated_skbs++; | 3412 | pkt_dev->allocated_skbs++; |
3413 | pkt_dev->clone_count = 0; /* reset counter */ | 3413 | pkt_dev->clone_count = 0; /* reset counter */ |
@@ -3419,9 +3419,9 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3419 | txq = netdev_get_tx_queue(odev, queue_map); | 3419 | txq = netdev_get_tx_queue(odev, queue_map); |
3420 | 3420 | ||
3421 | __netif_tx_lock_bh(txq); | 3421 | __netif_tx_lock_bh(txq); |
3422 | if (!netif_tx_queue_stopped(txq) && | 3422 | if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) |
3423 | !netif_tx_queue_frozen(txq)) { | 3423 | pkt_dev->last_ok = 0; |
3424 | 3424 | else { | |
3425 | atomic_inc(&(pkt_dev->skb->users)); | 3425 | atomic_inc(&(pkt_dev->skb->users)); |
3426 | retry_now: | 3426 | retry_now: |
3427 | ret = (*xmit)(pkt_dev->skb, odev); | 3427 | ret = (*xmit)(pkt_dev->skb, odev); |
@@ -3458,13 +3458,6 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3458 | pkt_dev->next_tx_ns -= 1000; | 3458 | pkt_dev->next_tx_ns -= 1000; |
3459 | } | 3459 | } |
3460 | } | 3460 | } |
3461 | |||
3462 | else { /* Retry it next time */ | ||
3463 | pkt_dev->last_ok = 0; | ||
3464 | pkt_dev->next_tx_us = getCurUs(); /* TODO */ | ||
3465 | pkt_dev->next_tx_ns = 0; | ||
3466 | } | ||
3467 | |||
3468 | __netif_tx_unlock_bh(txq); | 3461 | __netif_tx_unlock_bh(txq); |
3469 | 3462 | ||
3470 | /* If pkt_dev->count is zero, then run forever */ | 3463 | /* If pkt_dev->count is zero, then run forever */ |