aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/pktgen.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/pktgen.c')
-rw-r--r--net/core/pktgen.c160
1 files changed, 76 insertions, 84 deletions
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 0bcecbf06581..4d11c28ca8ca 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -192,11 +192,10 @@
192#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */ 192#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */
193 193
194/* Thread control flag bits */ 194/* Thread control flag bits */
195#define T_TERMINATE (1<<0) 195#define T_STOP (1<<0) /* Stop run */
196#define T_STOP (1<<1) /* Stop run */ 196#define T_RUN (1<<1) /* Start run */
197#define T_RUN (1<<2) /* Start run */ 197#define T_REMDEVALL (1<<2) /* Remove all devs */
198#define T_REMDEVALL (1<<3) /* Remove all devs */ 198#define T_REMDEV (1<<3) /* Remove one dev */
199#define T_REMDEV (1<<4) /* Remove one dev */
200 199
201/* If lock -- can be removed after some work */ 200/* If lock -- can be removed after some work */
202#define if_lock(t) spin_lock(&(t->if_lock)); 201#define if_lock(t) spin_lock(&(t->if_lock));
@@ -2105,7 +2104,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
2105 2104
2106static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) 2105static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2107{ 2106{
2108 ktime_t start; 2107 ktime_t start_time, end_time;
2109 s32 remaining; 2108 s32 remaining;
2110 struct hrtimer_sleeper t; 2109 struct hrtimer_sleeper t;
2111 2110
@@ -2116,7 +2115,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2116 if (remaining <= 0) 2115 if (remaining <= 0)
2117 return; 2116 return;
2118 2117
2119 start = ktime_now(); 2118 start_time = ktime_now();
2120 if (remaining < 100) 2119 if (remaining < 100)
2121 udelay(remaining); /* really small just spin */ 2120 udelay(remaining); /* really small just spin */
2122 else { 2121 else {
@@ -2135,7 +2134,10 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2135 } while (t.task && pkt_dev->running && !signal_pending(current)); 2134 } while (t.task && pkt_dev->running && !signal_pending(current));
2136 __set_current_state(TASK_RUNNING); 2135 __set_current_state(TASK_RUNNING);
2137 } 2136 }
2138 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), start)); 2137 end_time = ktime_now();
2138
2139 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
2140 pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay);
2139} 2141}
2140 2142
2141static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) 2143static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
@@ -3365,19 +3367,29 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
3365 mutex_unlock(&pktgen_thread_lock); 3367 mutex_unlock(&pktgen_thread_lock);
3366} 3368}
3367 3369
3368static void idle(struct pktgen_dev *pkt_dev) 3370static void pktgen_resched(struct pktgen_dev *pkt_dev)
3369{ 3371{
3370 ktime_t idle_start = ktime_now(); 3372 ktime_t idle_start = ktime_now();
3373 schedule();
3374 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
3375}
3371 3376
3372 if (need_resched()) 3377static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
3373 schedule(); 3378{
3374 else 3379 ktime_t idle_start = ktime_now();
3375 cpu_relax();
3376 3380
3381 while (atomic_read(&(pkt_dev->skb->users)) != 1) {
3382 if (signal_pending(current))
3383 break;
3384
3385 if (need_resched())
3386 pktgen_resched(pkt_dev);
3387 else
3388 cpu_relax();
3389 }
3377 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start)); 3390 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
3378} 3391}
3379 3392
3380
3381static void pktgen_xmit(struct pktgen_dev *pkt_dev) 3393static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3382{ 3394{
3383 struct net_device *odev = pkt_dev->odev; 3395 struct net_device *odev = pkt_dev->odev;
@@ -3387,36 +3399,21 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3387 u16 queue_map; 3399 u16 queue_map;
3388 int ret; 3400 int ret;
3389 3401
3390 if (pkt_dev->delay) { 3402 /* If device is offline, then don't send */
3391 spin(pkt_dev, pkt_dev->next_tx); 3403 if (unlikely(!netif_running(odev) || !netif_carrier_ok(odev))) {
3392 3404 pktgen_stop_device(pkt_dev);
3393 /* This is max DELAY, this has special meaning of 3405 return;
3394 * "never transmit"
3395 */
3396 if (pkt_dev->delay == ULLONG_MAX) {
3397 pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
3398 return;
3399 }
3400 }
3401
3402 if (!pkt_dev->skb) {
3403 set_cur_queue_map(pkt_dev);
3404 queue_map = pkt_dev->cur_queue_map;
3405 } else {
3406 queue_map = skb_get_queue_mapping(pkt_dev->skb);
3407 } 3406 }
3408 3407
3409 txq = netdev_get_tx_queue(odev, queue_map); 3408 /* This is max DELAY, this has special meaning of
3410 /* Did we saturate the queue already? */ 3409 * "never transmit"
3411 if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)) { 3410 */
3412 /* If device is down, then all queues are permnantly frozen */ 3411 if (unlikely(pkt_dev->delay == ULLONG_MAX)) {
3413 if (netif_running(odev)) 3412 pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
3414 idle(pkt_dev);
3415 else
3416 pktgen_stop_device(pkt_dev);
3417 return; 3413 return;
3418 } 3414 }
3419 3415
3416 /* If no skb or clone count exhausted then get new one */
3420 if (!pkt_dev->skb || (pkt_dev->last_ok && 3417 if (!pkt_dev->skb || (pkt_dev->last_ok &&
3421 ++pkt_dev->clone_count >= pkt_dev->clone_skb)) { 3418 ++pkt_dev->clone_count >= pkt_dev->clone_skb)) {
3422 /* build a new pkt */ 3419 /* build a new pkt */
@@ -3435,54 +3432,45 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3435 pkt_dev->clone_count = 0; /* reset counter */ 3432 pkt_dev->clone_count = 0; /* reset counter */
3436 } 3433 }
3437 3434
3438 /* fill_packet() might have changed the queue */ 3435 if (pkt_dev->delay && pkt_dev->last_ok)
3436 spin(pkt_dev, pkt_dev->next_tx);
3437
3439 queue_map = skb_get_queue_mapping(pkt_dev->skb); 3438 queue_map = skb_get_queue_mapping(pkt_dev->skb);
3440 txq = netdev_get_tx_queue(odev, queue_map); 3439 txq = netdev_get_tx_queue(odev, queue_map);
3441 3440
3442 __netif_tx_lock_bh(txq); 3441 __netif_tx_lock_bh(txq);
3443 if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) 3442 atomic_inc(&(pkt_dev->skb->users));
3444 pkt_dev->last_ok = 0;
3445 else {
3446 atomic_inc(&(pkt_dev->skb->users));
3447 3443
3448 retry_now: 3444 if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)))
3445 ret = NETDEV_TX_BUSY;
3446 else
3449 ret = (*xmit)(pkt_dev->skb, odev); 3447 ret = (*xmit)(pkt_dev->skb, odev);
3450 switch (ret) { 3448
3451 case NETDEV_TX_OK: 3449 switch (ret) {
3452 txq_trans_update(txq); 3450 case NETDEV_TX_OK:
3453 pkt_dev->last_ok = 1; 3451 txq_trans_update(txq);
3454 pkt_dev->sofar++; 3452 pkt_dev->last_ok = 1;
3455 pkt_dev->seq_num++; 3453 pkt_dev->sofar++;
3456 pkt_dev->tx_bytes += pkt_dev->cur_pkt_size; 3454 pkt_dev->seq_num++;
3457 break; 3455 pkt_dev->tx_bytes += pkt_dev->cur_pkt_size;
3458 case NETDEV_TX_LOCKED: 3456 break;
3459 cpu_relax(); 3457 default: /* Drivers are not supposed to return other values! */
3460 goto retry_now; 3458 if (net_ratelimit())
3461 default: /* Drivers are not supposed to return other values! */ 3459 pr_info("pktgen: %s xmit error: %d\n",
3462 if (net_ratelimit()) 3460 odev->name, ret);
3463 pr_info("pktgen: %s xmit error: %d\n", 3461 pkt_dev->errors++;
3464 odev->name, ret); 3462 /* fallthru */
3465 pkt_dev->errors++; 3463 case NETDEV_TX_LOCKED:
3466 /* fallthru */ 3464 case NETDEV_TX_BUSY:
3467 case NETDEV_TX_BUSY: 3465 /* Retry it next time */
3468 /* Retry it next time */ 3466 atomic_dec(&(pkt_dev->skb->users));
3469 atomic_dec(&(pkt_dev->skb->users)); 3467 pkt_dev->last_ok = 0;
3470 pkt_dev->last_ok = 0;
3471 }
3472
3473 if (pkt_dev->delay)
3474 pkt_dev->next_tx = ktime_add_ns(ktime_now(),
3475 pkt_dev->delay);
3476 } 3468 }
3477 __netif_tx_unlock_bh(txq); 3469 __netif_tx_unlock_bh(txq);
3478 3470
3479 /* If pkt_dev->count is zero, then run forever */ 3471 /* If pkt_dev->count is zero, then run forever */
3480 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 3472 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
3481 while (atomic_read(&(pkt_dev->skb->users)) != 1) { 3473 pktgen_wait_for_skb(pkt_dev);
3482 if (signal_pending(current))
3483 break;
3484 idle(pkt_dev);
3485 }
3486 3474
3487 /* Done with this */ 3475 /* Done with this */
3488 pktgen_stop_device(pkt_dev); 3476 pktgen_stop_device(pkt_dev);
@@ -3515,20 +3503,24 @@ static int pktgen_thread_worker(void *arg)
3515 while (!kthread_should_stop()) { 3503 while (!kthread_should_stop()) {
3516 pkt_dev = next_to_run(t); 3504 pkt_dev = next_to_run(t);
3517 3505
3518 if (!pkt_dev && 3506 if (unlikely(!pkt_dev && t->control == 0)) {
3519 (t->control & (T_STOP | T_RUN | T_REMDEVALL | T_REMDEV)) 3507 wait_event_interruptible_timeout(t->queue,
3520 == 0) { 3508 t->control != 0,
3521 prepare_to_wait(&(t->queue), &wait, 3509 HZ/10);
3522 TASK_INTERRUPTIBLE); 3510 continue;
3523 schedule_timeout(HZ / 10);
3524 finish_wait(&(t->queue), &wait);
3525 } 3511 }
3526 3512
3527 __set_current_state(TASK_RUNNING); 3513 __set_current_state(TASK_RUNNING);
3528 3514
3529 if (pkt_dev) 3515 if (likely(pkt_dev)) {
3530 pktgen_xmit(pkt_dev); 3516 pktgen_xmit(pkt_dev);
3531 3517
3518 if (need_resched())
3519 pktgen_resched(pkt_dev);
3520 else
3521 cpu_relax();
3522 }
3523
3532 if (t->control & T_STOP) { 3524 if (t->control & T_STOP) {
3533 pktgen_stop(t); 3525 pktgen_stop(t);
3534 t->control &= ~(T_STOP); 3526 t->control &= ~(T_STOP);