aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/sched/sch_netem.c89
1 files changed, 69 insertions, 20 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 2c38e3d07924..84658f60a872 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -77,6 +77,10 @@ struct netem_sched_data {
77 /* internal t(ime)fifo qdisc uses t_root and sch->limit */ 77 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
78 struct rb_root t_root; 78 struct rb_root t_root;
79 79
80 /* a linear queue; reduces rbtree rebalancing when jitter is low */
81 struct sk_buff *t_head;
82 struct sk_buff *t_tail;
83
80 /* optional qdisc for classful handling (NULL at netem init) */ 84 /* optional qdisc for classful handling (NULL at netem init) */
81 struct Qdisc *qdisc; 85 struct Qdisc *qdisc;
82 86
@@ -369,26 +373,39 @@ static void tfifo_reset(struct Qdisc *sch)
369 rb_erase(&skb->rbnode, &q->t_root); 373 rb_erase(&skb->rbnode, &q->t_root);
370 rtnl_kfree_skbs(skb, skb); 374 rtnl_kfree_skbs(skb, skb);
371 } 375 }
376
377 rtnl_kfree_skbs(q->t_head, q->t_tail);
378 q->t_head = NULL;
379 q->t_tail = NULL;
372} 380}
373 381
374static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) 382static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
375{ 383{
376 struct netem_sched_data *q = qdisc_priv(sch); 384 struct netem_sched_data *q = qdisc_priv(sch);
377 u64 tnext = netem_skb_cb(nskb)->time_to_send; 385 u64 tnext = netem_skb_cb(nskb)->time_to_send;
378 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
379 386
380 while (*p) { 387 if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
381 struct sk_buff *skb; 388 if (q->t_tail)
382 389 q->t_tail->next = nskb;
383 parent = *p;
384 skb = rb_to_skb(parent);
385 if (tnext >= netem_skb_cb(skb)->time_to_send)
386 p = &parent->rb_right;
387 else 390 else
388 p = &parent->rb_left; 391 q->t_head = nskb;
392 q->t_tail = nskb;
393 } else {
394 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
395
396 while (*p) {
397 struct sk_buff *skb;
398
399 parent = *p;
400 skb = rb_to_skb(parent);
401 if (tnext >= netem_skb_cb(skb)->time_to_send)
402 p = &parent->rb_right;
403 else
404 p = &parent->rb_left;
405 }
406 rb_link_node(&nskb->rbnode, parent, p);
407 rb_insert_color(&nskb->rbnode, &q->t_root);
389 } 408 }
390 rb_link_node(&nskb->rbnode, parent, p);
391 rb_insert_color(&nskb->rbnode, &q->t_root);
392 sch->q.qlen++; 409 sch->q.qlen++;
393} 410}
394 411
@@ -530,9 +547,16 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
530 t_skb = skb_rb_last(&q->t_root); 547 t_skb = skb_rb_last(&q->t_root);
531 t_last = netem_skb_cb(t_skb); 548 t_last = netem_skb_cb(t_skb);
532 if (!last || 549 if (!last ||
533 t_last->time_to_send > last->time_to_send) { 550 t_last->time_to_send > last->time_to_send)
551 last = t_last;
552 }
553 if (q->t_tail) {
554 struct netem_skb_cb *t_last =
555 netem_skb_cb(q->t_tail);
556
557 if (!last ||
558 t_last->time_to_send > last->time_to_send)
534 last = t_last; 559 last = t_last;
535 }
536 } 560 }
537 561
538 if (last) { 562 if (last) {
@@ -611,11 +635,38 @@ static void get_slot_next(struct netem_sched_data *q, u64 now)
611 q->slot.bytes_left = q->slot_config.max_bytes; 635 q->slot.bytes_left = q->slot_config.max_bytes;
612} 636}
613 637
638static struct sk_buff *netem_peek(struct netem_sched_data *q)
639{
640 struct sk_buff *skb = skb_rb_first(&q->t_root);
641 u64 t1, t2;
642
643 if (!skb)
644 return q->t_head;
645 if (!q->t_head)
646 return skb;
647
648 t1 = netem_skb_cb(skb)->time_to_send;
649 t2 = netem_skb_cb(q->t_head)->time_to_send;
650 if (t1 < t2)
651 return skb;
652 return q->t_head;
653}
654
655static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
656{
657 if (skb == q->t_head) {
658 q->t_head = skb->next;
659 if (!q->t_head)
660 q->t_tail = NULL;
661 } else {
662 rb_erase(&skb->rbnode, &q->t_root);
663 }
664}
665
614static struct sk_buff *netem_dequeue(struct Qdisc *sch) 666static struct sk_buff *netem_dequeue(struct Qdisc *sch)
615{ 667{
616 struct netem_sched_data *q = qdisc_priv(sch); 668 struct netem_sched_data *q = qdisc_priv(sch);
617 struct sk_buff *skb; 669 struct sk_buff *skb;
618 struct rb_node *p;
619 670
620tfifo_dequeue: 671tfifo_dequeue:
621 skb = __qdisc_dequeue_head(&sch->q); 672 skb = __qdisc_dequeue_head(&sch->q);
@@ -625,20 +676,18 @@ deliver:
625 qdisc_bstats_update(sch, skb); 676 qdisc_bstats_update(sch, skb);
626 return skb; 677 return skb;
627 } 678 }
628 p = rb_first(&q->t_root); 679 skb = netem_peek(q);
629 if (p) { 680 if (skb) {
630 u64 time_to_send; 681 u64 time_to_send;
631 u64 now = ktime_get_ns(); 682 u64 now = ktime_get_ns();
632 683
633 skb = rb_to_skb(p);
634
635 /* if more time remaining? */ 684 /* if more time remaining? */
636 time_to_send = netem_skb_cb(skb)->time_to_send; 685 time_to_send = netem_skb_cb(skb)->time_to_send;
637 if (q->slot.slot_next && q->slot.slot_next < time_to_send) 686 if (q->slot.slot_next && q->slot.slot_next < time_to_send)
638 get_slot_next(q, now); 687 get_slot_next(q, now);
639 688
640 if (time_to_send <= now && q->slot.slot_next <= now) { 689 if (time_to_send <= now && q->slot.slot_next <= now) {
641 rb_erase(p, &q->t_root); 690 netem_erase_head(q, skb);
642 sch->q.qlen--; 691 sch->q.qlen--;
643 qdisc_qstats_backlog_dec(sch, skb); 692 qdisc_qstats_backlog_dec(sch, skb);
644 skb->next = NULL; 693 skb->next = NULL;