diff options
-rw-r--r-- | net/core/pktgen.c | 151 |
1 files changed, 72 insertions, 79 deletions
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 475f52530b6c..4d11c28ca8ca 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -2104,7 +2104,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) | |||
2104 | 2104 | ||
2105 | static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | 2105 | static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) |
2106 | { | 2106 | { |
2107 | ktime_t start; | 2107 | ktime_t start_time, end_time; |
2108 | s32 remaining; | 2108 | s32 remaining; |
2109 | struct hrtimer_sleeper t; | 2109 | struct hrtimer_sleeper t; |
2110 | 2110 | ||
@@ -2115,7 +2115,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | |||
2115 | if (remaining <= 0) | 2115 | if (remaining <= 0) |
2116 | return; | 2116 | return; |
2117 | 2117 | ||
2118 | start = ktime_now(); | 2118 | start_time = ktime_now(); |
2119 | if (remaining < 100) | 2119 | if (remaining < 100) |
2120 | udelay(remaining); /* really small just spin */ | 2120 | udelay(remaining); /* really small just spin */ |
2121 | else { | 2121 | else { |
@@ -2134,7 +2134,10 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | |||
2134 | } while (t.task && pkt_dev->running && !signal_pending(current)); | 2134 | } while (t.task && pkt_dev->running && !signal_pending(current)); |
2135 | __set_current_state(TASK_RUNNING); | 2135 | __set_current_state(TASK_RUNNING); |
2136 | } | 2136 | } |
2137 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), start)); | 2137 | end_time = ktime_now(); |
2138 | |||
2139 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); | ||
2140 | pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay); | ||
2138 | } | 2141 | } |
2139 | 2142 | ||
2140 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) | 2143 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) |
@@ -3364,19 +3367,29 @@ static void pktgen_rem_thread(struct pktgen_thread *t) | |||
3364 | mutex_unlock(&pktgen_thread_lock); | 3367 | mutex_unlock(&pktgen_thread_lock); |
3365 | } | 3368 | } |
3366 | 3369 | ||
3367 | static void idle(struct pktgen_dev *pkt_dev) | 3370 | static void pktgen_resched(struct pktgen_dev *pkt_dev) |
3368 | { | 3371 | { |
3369 | ktime_t idle_start = ktime_now(); | 3372 | ktime_t idle_start = ktime_now(); |
3373 | schedule(); | ||
3374 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start)); | ||
3375 | } | ||
3370 | 3376 | ||
3371 | if (need_resched()) | 3377 | static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev) |
3372 | schedule(); | 3378 | { |
3373 | else | 3379 | ktime_t idle_start = ktime_now(); |
3374 | cpu_relax(); | ||
3375 | 3380 | ||
3381 | while (atomic_read(&(pkt_dev->skb->users)) != 1) { | ||
3382 | if (signal_pending(current)) | ||
3383 | break; | ||
3384 | |||
3385 | if (need_resched()) | ||
3386 | pktgen_resched(pkt_dev); | ||
3387 | else | ||
3388 | cpu_relax(); | ||
3389 | } | ||
3376 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start)); | 3390 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start)); |
3377 | } | 3391 | } |
3378 | 3392 | ||
3379 | |||
3380 | static void pktgen_xmit(struct pktgen_dev *pkt_dev) | 3393 | static void pktgen_xmit(struct pktgen_dev *pkt_dev) |
3381 | { | 3394 | { |
3382 | struct net_device *odev = pkt_dev->odev; | 3395 | struct net_device *odev = pkt_dev->odev; |
@@ -3386,36 +3399,21 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3386 | u16 queue_map; | 3399 | u16 queue_map; |
3387 | int ret; | 3400 | int ret; |
3388 | 3401 | ||
3389 | if (pkt_dev->delay) { | 3402 | /* If device is offline, then don't send */ |
3390 | spin(pkt_dev, pkt_dev->next_tx); | 3403 | if (unlikely(!netif_running(odev) || !netif_carrier_ok(odev))) { |
3391 | 3404 | pktgen_stop_device(pkt_dev); | |
3392 | /* This is max DELAY, this has special meaning of | 3405 | return; |
3393 | * "never transmit" | ||
3394 | */ | ||
3395 | if (pkt_dev->delay == ULLONG_MAX) { | ||
3396 | pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX); | ||
3397 | return; | ||
3398 | } | ||
3399 | } | ||
3400 | |||
3401 | if (!pkt_dev->skb) { | ||
3402 | set_cur_queue_map(pkt_dev); | ||
3403 | queue_map = pkt_dev->cur_queue_map; | ||
3404 | } else { | ||
3405 | queue_map = skb_get_queue_mapping(pkt_dev->skb); | ||
3406 | } | 3406 | } |
3407 | 3407 | ||
3408 | txq = netdev_get_tx_queue(odev, queue_map); | 3408 | /* This is max DELAY, this has special meaning of |
3409 | /* Did we saturate the queue already? */ | 3409 | * "never transmit" |
3410 | if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)) { | 3410 | */ |
3411 | /* If device is down, then all queues are permnantly frozen */ | 3411 | if (unlikely(pkt_dev->delay == ULLONG_MAX)) { |
3412 | if (netif_running(odev)) | 3412 | pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX); |
3413 | idle(pkt_dev); | ||
3414 | else | ||
3415 | pktgen_stop_device(pkt_dev); | ||
3416 | return; | 3413 | return; |
3417 | } | 3414 | } |
3418 | 3415 | ||
3416 | /* If no skb or clone count exhausted then get new one */ | ||
3419 | if (!pkt_dev->skb || (pkt_dev->last_ok && | 3417 | if (!pkt_dev->skb || (pkt_dev->last_ok && |
3420 | ++pkt_dev->clone_count >= pkt_dev->clone_skb)) { | 3418 | ++pkt_dev->clone_count >= pkt_dev->clone_skb)) { |
3421 | /* build a new pkt */ | 3419 | /* build a new pkt */ |
@@ -3434,54 +3432,45 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3434 | pkt_dev->clone_count = 0; /* reset counter */ | 3432 | pkt_dev->clone_count = 0; /* reset counter */ |
3435 | } | 3433 | } |
3436 | 3434 | ||
3437 | /* fill_packet() might have changed the queue */ | 3435 | if (pkt_dev->delay && pkt_dev->last_ok) |
3436 | spin(pkt_dev, pkt_dev->next_tx); | ||
3437 | |||
3438 | queue_map = skb_get_queue_mapping(pkt_dev->skb); | 3438 | queue_map = skb_get_queue_mapping(pkt_dev->skb); |
3439 | txq = netdev_get_tx_queue(odev, queue_map); | 3439 | txq = netdev_get_tx_queue(odev, queue_map); |
3440 | 3440 | ||
3441 | __netif_tx_lock_bh(txq); | 3441 | __netif_tx_lock_bh(txq); |
3442 | if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) | 3442 | atomic_inc(&(pkt_dev->skb->users)); |
3443 | pkt_dev->last_ok = 0; | ||
3444 | else { | ||
3445 | atomic_inc(&(pkt_dev->skb->users)); | ||
3446 | 3443 | ||
3447 | retry_now: | 3444 | if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) |
3445 | ret = NETDEV_TX_BUSY; | ||
3446 | else | ||
3448 | ret = (*xmit)(pkt_dev->skb, odev); | 3447 | ret = (*xmit)(pkt_dev->skb, odev); |
3449 | switch (ret) { | 3448 | |
3450 | case NETDEV_TX_OK: | 3449 | switch (ret) { |
3451 | txq_trans_update(txq); | 3450 | case NETDEV_TX_OK: |
3452 | pkt_dev->last_ok = 1; | 3451 | txq_trans_update(txq); |
3453 | pkt_dev->sofar++; | 3452 | pkt_dev->last_ok = 1; |
3454 | pkt_dev->seq_num++; | 3453 | pkt_dev->sofar++; |
3455 | pkt_dev->tx_bytes += pkt_dev->cur_pkt_size; | 3454 | pkt_dev->seq_num++; |
3456 | break; | 3455 | pkt_dev->tx_bytes += pkt_dev->cur_pkt_size; |
3457 | case NETDEV_TX_LOCKED: | 3456 | break; |
3458 | cpu_relax(); | 3457 | default: /* Drivers are not supposed to return other values! */ |
3459 | goto retry_now; | 3458 | if (net_ratelimit()) |
3460 | default: /* Drivers are not supposed to return other values! */ | 3459 | pr_info("pktgen: %s xmit error: %d\n", |
3461 | if (net_ratelimit()) | 3460 | odev->name, ret); |
3462 | pr_info("pktgen: %s xmit error: %d\n", | 3461 | pkt_dev->errors++; |
3463 | odev->name, ret); | 3462 | /* fallthru */ |
3464 | pkt_dev->errors++; | 3463 | case NETDEV_TX_LOCKED: |
3465 | /* fallthru */ | 3464 | case NETDEV_TX_BUSY: |
3466 | case NETDEV_TX_BUSY: | 3465 | /* Retry it next time */ |
3467 | /* Retry it next time */ | 3466 | atomic_dec(&(pkt_dev->skb->users)); |
3468 | atomic_dec(&(pkt_dev->skb->users)); | 3467 | pkt_dev->last_ok = 0; |
3469 | pkt_dev->last_ok = 0; | ||
3470 | } | ||
3471 | |||
3472 | if (pkt_dev->delay) | ||
3473 | pkt_dev->next_tx = ktime_add_ns(ktime_now(), | ||
3474 | pkt_dev->delay); | ||
3475 | } | 3468 | } |
3476 | __netif_tx_unlock_bh(txq); | 3469 | __netif_tx_unlock_bh(txq); |
3477 | 3470 | ||
3478 | /* If pkt_dev->count is zero, then run forever */ | 3471 | /* If pkt_dev->count is zero, then run forever */ |
3479 | if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { | 3472 | if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { |
3480 | while (atomic_read(&(pkt_dev->skb->users)) != 1) { | 3473 | pktgen_wait_for_skb(pkt_dev); |
3481 | if (signal_pending(current)) | ||
3482 | break; | ||
3483 | idle(pkt_dev); | ||
3484 | } | ||
3485 | 3474 | ||
3486 | /* Done with this */ | 3475 | /* Done with this */ |
3487 | pktgen_stop_device(pkt_dev); | 3476 | pktgen_stop_device(pkt_dev); |
@@ -3514,20 +3503,24 @@ static int pktgen_thread_worker(void *arg) | |||
3514 | while (!kthread_should_stop()) { | 3503 | while (!kthread_should_stop()) { |
3515 | pkt_dev = next_to_run(t); | 3504 | pkt_dev = next_to_run(t); |
3516 | 3505 | ||
3517 | if (!pkt_dev && | 3506 | if (unlikely(!pkt_dev && t->control == 0)) { |
3518 | (t->control & (T_STOP | T_RUN | T_REMDEVALL | T_REMDEV)) | 3507 | wait_event_interruptible_timeout(t->queue, |
3519 | == 0) { | 3508 | t->control != 0, |
3520 | prepare_to_wait(&(t->queue), &wait, | 3509 | HZ/10); |
3521 | TASK_INTERRUPTIBLE); | 3510 | continue; |
3522 | schedule_timeout(HZ / 10); | ||
3523 | finish_wait(&(t->queue), &wait); | ||
3524 | } | 3511 | } |
3525 | 3512 | ||
3526 | __set_current_state(TASK_RUNNING); | 3513 | __set_current_state(TASK_RUNNING); |
3527 | 3514 | ||
3528 | if (pkt_dev) | 3515 | if (likely(pkt_dev)) { |
3529 | pktgen_xmit(pkt_dev); | 3516 | pktgen_xmit(pkt_dev); |
3530 | 3517 | ||
3518 | if (need_resched()) | ||
3519 | pktgen_resched(pkt_dev); | ||
3520 | else | ||
3521 | cpu_relax(); | ||
3522 | } | ||
3523 | |||
3531 | if (t->control & T_STOP) { | 3524 | if (t->control & T_STOP) { |
3532 | pktgen_stop(t); | 3525 | pktgen_stop(t); |
3533 | t->control &= ~(T_STOP); | 3526 | t->control &= ~(T_STOP); |