aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_netem.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_netem.c')
-rw-r--r--net/sched/sch_netem.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index aaaf02175338..9f7b380cf0a3 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -413,6 +413,16 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
413 return segs; 413 return segs;
414} 414}
415 415
416static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
417{
418 skb->next = qh->head;
419
420 if (!qh->head)
421 qh->tail = skb;
422 qh->head = skb;
423 qh->qlen++;
424}
425
416/* 426/*
417 * Insert one skb into qdisc. 427 * Insert one skb into qdisc.
418 * Note: parent depends on return value to account for queue length. 428 * Note: parent depends on return value to account for queue length.
@@ -502,7 +512,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
502 1<<(prandom_u32() % 8); 512 1<<(prandom_u32() % 8);
503 } 513 }
504 514
505 if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) 515 if (unlikely(sch->q.qlen >= sch->limit))
506 return qdisc_drop(skb, sch, to_free); 516 return qdisc_drop(skb, sch, to_free);
507 517
508 qdisc_qstats_backlog_inc(sch, skb); 518 qdisc_qstats_backlog_inc(sch, skb);
@@ -522,8 +532,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
522 if (q->rate) { 532 if (q->rate) {
523 struct sk_buff *last; 533 struct sk_buff *last;
524 534
525 if (!skb_queue_empty(&sch->q)) 535 if (sch->q.qlen)
526 last = skb_peek_tail(&sch->q); 536 last = sch->q.tail;
527 else 537 else
528 last = netem_rb_to_skb(rb_last(&q->t_root)); 538 last = netem_rb_to_skb(rb_last(&q->t_root));
529 if (last) { 539 if (last) {
@@ -552,7 +562,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
552 cb->time_to_send = psched_get_time(); 562 cb->time_to_send = psched_get_time();
553 q->counter = 0; 563 q->counter = 0;
554 564
555 __skb_queue_head(&sch->q, skb); 565 netem_enqueue_skb_head(&sch->q, skb);
556 sch->qstats.requeues++; 566 sch->qstats.requeues++;
557 } 567 }
558 568
@@ -587,7 +597,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
587 struct rb_node *p; 597 struct rb_node *p;
588 598
589tfifo_dequeue: 599tfifo_dequeue:
590 skb = __skb_dequeue(&sch->q); 600 skb = __qdisc_dequeue_head(&sch->q);
591 if (skb) { 601 if (skb) {
592 qdisc_qstats_backlog_dec(sch, skb); 602 qdisc_qstats_backlog_dec(sch, skb);
593deliver: 603deliver: