aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@vyatta.com>2009-08-27 09:55:16 -0400
committerDavid S. Miller <davem@davemloft.net>2009-08-29 02:32:12 -0400
commitfd29cf72621071d1d5f9bae634a4505b05f0e58b (patch)
treeb5894ae4412740526255c1ac66edf4b85adc7712 /net/core
parent5c9d191c166233e723e632e79bcca2127a5fece9 (diff)
pktgen: convert to use ktime_t
The kernel ktime_t is a nice generic infrastructure for mananging high resolution times, as is done in pktgen. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/pktgen.c184
1 files changed, 84 insertions, 100 deletions
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index b7302e18764a..bede00b54038 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -246,16 +246,14 @@ struct pktgen_dev {
246 int max_pkt_size; /* = ETH_ZLEN; */ 246 int max_pkt_size; /* = ETH_ZLEN; */
247 int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */ 247 int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */
248 int nfrags; 248 int nfrags;
249 __u32 delay_us; /* Default delay */ 249 u64 delay; /* nano-seconds */
250 __u32 delay_ns; 250
251 __u64 count; /* Default No packets to send */ 251 __u64 count; /* Default No packets to send */
252 __u64 sofar; /* How many pkts we've sent so far */ 252 __u64 sofar; /* How many pkts we've sent so far */
253 __u64 tx_bytes; /* How many bytes we've transmitted */ 253 __u64 tx_bytes; /* How many bytes we've transmitted */
254 __u64 errors; /* Errors when trying to transmit, pkts will be re-sent */ 254 __u64 errors; /* Errors when trying to transmit, pkts will be re-sent */
255 255
256 /* runtime counters relating to clone_skb */ 256 /* runtime counters relating to clone_skb */
257 __u64 next_tx_us; /* timestamp of when to tx next */
258 __u32 next_tx_ns;
259 257
260 __u64 allocated_skbs; 258 __u64 allocated_skbs;
261 __u32 clone_count; 259 __u32 clone_count;
@@ -263,9 +261,11 @@ struct pktgen_dev {
263 * Or a failed transmit of some sort? This will keep 261 * Or a failed transmit of some sort? This will keep
264 * sequence numbers in order, for example. 262 * sequence numbers in order, for example.
265 */ 263 */
266 __u64 started_at; /* micro-seconds */ 264 ktime_t next_tx;
267 __u64 stopped_at; /* micro-seconds */ 265 ktime_t started_at;
268 __u64 idle_acc; /* micro-seconds */ 266 ktime_t stopped_at;
267 u64 idle_acc; /* nano-seconds */
268
269 __u32 seq_num; 269 __u32 seq_num;
270 270
271 int clone_skb; /* Use multiple SKBs during packet gen. If this number 271 int clone_skb; /* Use multiple SKBs during packet gen. If this number
@@ -397,23 +397,20 @@ struct pktgen_thread {
397#define REMOVE 1 397#define REMOVE 1
398#define FIND 0 398#define FIND 0
399 399
400/** Convert to micro-seconds */ 400static inline ktime_t ktime_now(void)
401static inline __u64 tv_to_us(const struct timeval *tv)
402{ 401{
403 __u64 us = tv->tv_usec; 402 struct timespec ts;
404 us += (__u64) tv->tv_sec * (__u64) 1000000; 403 ktime_get_ts(&ts);
405 return us; 404
405 return timespec_to_ktime(ts);
406} 406}
407 407
408static __u64 getCurUs(void) 408/* This works even if 32 bit because of careful byte order choice */
409static inline int ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
409{ 410{
410 struct timeval tv; 411 return cmp1.tv64 < cmp2.tv64;
411 do_gettimeofday(&tv);
412 return tv_to_us(&tv);
413} 412}
414 413
415/* old include end */
416
417static const char version[] __initconst = VERSION; 414static const char version[] __initconst = VERSION;
418 415
419static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i); 416static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
@@ -510,9 +507,8 @@ static const struct file_operations pktgen_fops = {
510static int pktgen_if_show(struct seq_file *seq, void *v) 507static int pktgen_if_show(struct seq_file *seq, void *v)
511{ 508{
512 const struct pktgen_dev *pkt_dev = seq->private; 509 const struct pktgen_dev *pkt_dev = seq->private;
513 __u64 sa; 510 ktime_t stopped;
514 __u64 stopped; 511 u64 idle;
515 __u64 now = getCurUs();
516 512
517 seq_printf(seq, 513 seq_printf(seq,
518 "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", 514 "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n",
@@ -520,9 +516,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
520 pkt_dev->max_pkt_size); 516 pkt_dev->max_pkt_size);
521 517
522 seq_printf(seq, 518 seq_printf(seq,
523 " frags: %d delay: %u clone_skb: %d ifname: %s\n", 519 " frags: %d delay: %llu clone_skb: %d ifname: %s\n",
524 pkt_dev->nfrags, 520 pkt_dev->nfrags, (unsigned long long) pkt_dev->delay,
525 1000 * pkt_dev->delay_us + pkt_dev->delay_ns,
526 pkt_dev->clone_skb, pkt_dev->odev->name); 521 pkt_dev->clone_skb, pkt_dev->odev->name);
527 522
528 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, 523 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows,
@@ -654,17 +649,21 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
654 649
655 seq_puts(seq, "\n"); 650 seq_puts(seq, "\n");
656 651
657 sa = pkt_dev->started_at; 652 /* not really stopped, more like last-running-at */
658 stopped = pkt_dev->stopped_at; 653 stopped = pkt_dev->running ? ktime_now() : pkt_dev->stopped_at;
659 if (pkt_dev->running) 654 idle = pkt_dev->idle_acc;
660 stopped = now; /* not really stopped, more like last-running-at */ 655 do_div(idle, NSEC_PER_USEC);
661 656
662 seq_printf(seq, 657 seq_printf(seq,
663 "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n", 658 "Current:\n pkts-sofar: %llu errors: %llu\n",
664 (unsigned long long)pkt_dev->sofar, 659 (unsigned long long)pkt_dev->sofar,
665 (unsigned long long)pkt_dev->errors, (unsigned long long)sa, 660 (unsigned long long)pkt_dev->errors);
666 (unsigned long long)stopped, 661
667 (unsigned long long)pkt_dev->idle_acc); 662 seq_printf(seq,
663 " started: %lluus stopped: %lluus idle: %lluus\n",
664 (unsigned long long) ktime_to_us(pkt_dev->started_at),
665 (unsigned long long) ktime_to_us(stopped),
666 (unsigned long long) idle);
668 667
669 seq_printf(seq, 668 seq_printf(seq,
670 " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", 669 " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n",
@@ -950,15 +949,13 @@ static ssize_t pktgen_if_write(struct file *file,
950 return len; 949 return len;
951 } 950 }
952 i += len; 951 i += len;
953 if (value == 0x7FFFFFFF) { 952 if (value == 0x7FFFFFFF)
954 pkt_dev->delay_us = 0x7FFFFFFF; 953 pkt_dev->delay = ULLONG_MAX;
955 pkt_dev->delay_ns = 0; 954 else
956 } else { 955 pkt_dev->delay = (u64)value * NSEC_PER_USEC;
957 pkt_dev->delay_us = value / 1000; 956
958 pkt_dev->delay_ns = value % 1000; 957 sprintf(pg_result, "OK: delay=%llu",
959 } 958 (unsigned long long) pkt_dev->delay);
960 sprintf(pg_result, "OK: delay=%u",
961 1000 * pkt_dev->delay_us + pkt_dev->delay_ns);
962 return count; 959 return count;
963 } 960 }
964 if (!strcmp(name, "udp_src_min")) { 961 if (!strcmp(name, "udp_src_min")) {
@@ -2089,27 +2086,33 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
2089 pkt_dev->nflows = 0; 2086 pkt_dev->nflows = 0;
2090} 2087}
2091 2088
2092static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us) 2089static inline s64 delta_ns(ktime_t a, ktime_t b)
2093{ 2090{
2094 __u64 start; 2091 return ktime_to_ns(ktime_sub(a, b));
2095 __u64 now; 2092}
2093
2094static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2095{
2096 ktime_t start, now;
2097 s64 dt;
2098
2099 start = now = ktime_now();
2096 2100
2097 start = now = getCurUs(); 2101 while ((dt = delta_ns(spin_until, now)) > 0) {
2098 while (now < spin_until_us) {
2099 /* TODO: optimize sleeping behavior */ 2102 /* TODO: optimize sleeping behavior */
2100 if (spin_until_us - now > jiffies_to_usecs(1) + 1) 2103 if (dt > TICK_NSEC)
2101 schedule_timeout_interruptible(1); 2104 schedule_timeout_interruptible(1);
2102 else if (spin_until_us - now > 100) { 2105 else if (dt > 100*NSEC_PER_USEC) {
2103 if (!pkt_dev->running) 2106 if (!pkt_dev->running)
2104 return; 2107 return;
2105 if (need_resched()) 2108 if (need_resched())
2106 schedule(); 2109 schedule();
2107 } 2110 }
2108 2111
2109 now = getCurUs(); 2112 now = ktime_now();
2110 } 2113 }
2111 2114
2112 pkt_dev->idle_acc += now - start; 2115 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(now, start));
2113} 2116}
2114 2117
2115static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) 2118static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
@@ -3070,9 +3073,9 @@ static void pktgen_run(struct pktgen_thread *t)
3070 pktgen_clear_counters(pkt_dev); 3073 pktgen_clear_counters(pkt_dev);
3071 pkt_dev->running = 1; /* Cranke yeself! */ 3074 pkt_dev->running = 1; /* Cranke yeself! */
3072 pkt_dev->skb = NULL; 3075 pkt_dev->skb = NULL;
3073 pkt_dev->started_at = getCurUs(); 3076 pkt_dev->started_at =
3074 pkt_dev->next_tx_us = getCurUs(); /* Transmit immediately */ 3077 pkt_dev->next_tx = ktime_now();
3075 pkt_dev->next_tx_ns = 0; 3078
3076 set_pkt_overhead(pkt_dev); 3079 set_pkt_overhead(pkt_dev);
3077 3080
3078 strcpy(pkt_dev->result, "Starting"); 3081 strcpy(pkt_dev->result, "Starting");
@@ -3188,28 +3191,21 @@ static void pktgen_reset_all_threads(void)
3188 3191
3189static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) 3192static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
3190{ 3193{
3191 __u64 total_us, bps, mbps, pps, idle; 3194 __u64 bps, mbps, pps;
3192 char *p = pkt_dev->result; 3195 char *p = pkt_dev->result;
3193 3196 ktime_t elapsed = ktime_sub(pkt_dev->stopped_at,
3194 total_us = pkt_dev->stopped_at - pkt_dev->started_at; 3197 pkt_dev->started_at);
3195 3198 ktime_t idle = ns_to_ktime(pkt_dev->idle_acc);
3196 idle = pkt_dev->idle_acc; 3199
3197 3200 p += sprintf(p, "OK: %llu(c%llu+d%llu) nsec, %llu (%dbyte,%dfrags)\n",
3198 p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n", 3201 (unsigned long long)ktime_to_us(elapsed),
3199 (unsigned long long)total_us, 3202 (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)),
3200 (unsigned long long)(total_us - idle), 3203 (unsigned long long)ktime_to_us(idle),
3201 (unsigned long long)idle,
3202 (unsigned long long)pkt_dev->sofar, 3204 (unsigned long long)pkt_dev->sofar,
3203 pkt_dev->cur_pkt_size, nr_frags); 3205 pkt_dev->cur_pkt_size, nr_frags);
3204 3206
3205 pps = pkt_dev->sofar * USEC_PER_SEC; 3207 pps = div64_u64(pkt_dev->sofar * NSEC_PER_SEC,
3206 3208 ktime_to_ns(elapsed));
3207 while ((total_us >> 32) != 0) {
3208 pps >>= 1;
3209 total_us >>= 1;
3210 }
3211
3212 do_div(pps, total_us);
3213 3209
3214 bps = pps * 8 * pkt_dev->cur_pkt_size; 3210 bps = pps * 8 * pkt_dev->cur_pkt_size;
3215 3211
@@ -3235,7 +3231,7 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
3235 3231
3236 kfree_skb(pkt_dev->skb); 3232 kfree_skb(pkt_dev->skb);
3237 pkt_dev->skb = NULL; 3233 pkt_dev->skb = NULL;
3238 pkt_dev->stopped_at = getCurUs(); 3234 pkt_dev->stopped_at = ktime_now();
3239 pkt_dev->running = 0; 3235 pkt_dev->running = 0;
3240 3236
3241 show_results(pkt_dev, nr_frags); 3237 show_results(pkt_dev, nr_frags);
@@ -3254,7 +3250,7 @@ static struct pktgen_dev *next_to_run(struct pktgen_thread *t)
3254 continue; 3250 continue;
3255 if (best == NULL) 3251 if (best == NULL)
3256 best = pkt_dev; 3252 best = pkt_dev;
3257 else if (pkt_dev->next_tx_us < best->next_tx_us) 3253 else if (ktime_lt(pkt_dev->next_tx, best->next_tx))
3258 best = pkt_dev; 3254 best = pkt_dev;
3259 } 3255 }
3260 if_unlock(t); 3256 if_unlock(t);
@@ -3343,16 +3339,17 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
3343 3339
3344static void idle(struct pktgen_dev *pkt_dev) 3340static void idle(struct pktgen_dev *pkt_dev)
3345{ 3341{
3346 u64 idle_start = getCurUs(); 3342 ktime_t idle_start = ktime_now();
3347 3343
3348 if (need_resched()) 3344 if (need_resched())
3349 schedule(); 3345 schedule();
3350 else 3346 else
3351 cpu_relax(); 3347 cpu_relax();
3352 3348
3353 pkt_dev->idle_acc += getCurUs() - idle_start; 3349 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
3354} 3350}
3355 3351
3352
3356static void pktgen_xmit(struct pktgen_dev *pkt_dev) 3353static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3357{ 3354{
3358 struct net_device *odev = pkt_dev->odev; 3355 struct net_device *odev = pkt_dev->odev;
@@ -3362,19 +3359,15 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3362 u16 queue_map; 3359 u16 queue_map;
3363 int ret; 3360 int ret;
3364 3361
3365 if (pkt_dev->delay_us || pkt_dev->delay_ns) { 3362 if (pkt_dev->delay) {
3366 u64 now; 3363 if (ktime_lt(ktime_now(), pkt_dev->next_tx))
3367 3364 spin(pkt_dev, pkt_dev->next_tx);
3368 now = getCurUs();
3369 if (now < pkt_dev->next_tx_us)
3370 spin(pkt_dev, pkt_dev->next_tx_us);
3371 3365
3372 /* This is max DELAY, this has special meaning of 3366 /* This is max DELAY, this has special meaning of
3373 * "never transmit" 3367 * "never transmit"
3374 */ 3368 */
3375 if (pkt_dev->delay_us == 0x7FFFFFFF) { 3369 if (pkt_dev->delay == ULLONG_MAX) {
3376 pkt_dev->next_tx_us = getCurUs() + pkt_dev->delay_us; 3370 pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
3377 pkt_dev->next_tx_ns = pkt_dev->delay_ns;
3378 return; 3371 return;
3379 } 3372 }
3380 } 3373 }
@@ -3450,32 +3443,24 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3450 pkt_dev->last_ok = 0; 3443 pkt_dev->last_ok = 0;
3451 } 3444 }
3452 3445
3453 if (pkt_dev->delay_us || pkt_dev->delay_ns) { 3446 if (pkt_dev->delay)
3454 pkt_dev->next_tx_us = getCurUs(); 3447 pkt_dev->next_tx = ktime_add_ns(ktime_now(),
3455 pkt_dev->next_tx_ns = 0; 3448 pkt_dev->delay);
3456
3457 pkt_dev->next_tx_us += pkt_dev->delay_us;
3458 pkt_dev->next_tx_ns += pkt_dev->delay_ns;
3459
3460 if (pkt_dev->next_tx_ns > 1000) {
3461 pkt_dev->next_tx_us++;
3462 pkt_dev->next_tx_ns -= 1000;
3463 }
3464 }
3465 } 3449 }
3466 __netif_tx_unlock_bh(txq); 3450 __netif_tx_unlock_bh(txq);
3467 3451
3468 /* If pkt_dev->count is zero, then run forever */ 3452 /* If pkt_dev->count is zero, then run forever */
3469 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 3453 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
3470 if (atomic_read(&(pkt_dev->skb->users)) != 1) { 3454 if (atomic_read(&(pkt_dev->skb->users)) != 1) {
3471 u64 idle_start = getCurUs(); 3455 ktime_t idle_start = ktime_now();
3472 while (atomic_read(&(pkt_dev->skb->users)) != 1) { 3456 while (atomic_read(&(pkt_dev->skb->users)) != 1) {
3473 if (signal_pending(current)) { 3457 if (signal_pending(current)) {
3474 break; 3458 break;
3475 } 3459 }
3476 schedule(); 3460 schedule();
3477 } 3461 }
3478 pkt_dev->idle_acc += getCurUs() - idle_start; 3462 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(),
3463 idle_start));
3479 } 3464 }
3480 3465
3481 /* Done with this */ 3466 /* Done with this */
@@ -3634,8 +3619,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3634 pkt_dev->max_pkt_size = ETH_ZLEN; 3619 pkt_dev->max_pkt_size = ETH_ZLEN;
3635 pkt_dev->nfrags = 0; 3620 pkt_dev->nfrags = 0;
3636 pkt_dev->clone_skb = pg_clone_skb_d; 3621 pkt_dev->clone_skb = pg_clone_skb_d;
3637 pkt_dev->delay_us = pg_delay_d / 1000; 3622 pkt_dev->delay = pg_delay_d;
3638 pkt_dev->delay_ns = pg_delay_d % 1000;
3639 pkt_dev->count = pg_count_d; 3623 pkt_dev->count = pg_count_d;
3640 pkt_dev->sofar = 0; 3624 pkt_dev->sofar = 0;
3641 pkt_dev->udp_src_min = 9; /* sink port */ 3625 pkt_dev->udp_src_min = 9; /* sink port */