aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-09-19 01:47:23 -0400
committerDavid S. Miller <davem@davemloft.net>2016-09-19 01:47:23 -0400
commit029ac211464f9cf87fa7aa51a6f01e41642d76c3 (patch)
tree123595224534ee49f1175b6ef8371728b6d31538
parent106323b905a6bcd21ff83dd4e19566282fd5eb52 (diff)
parent48da34b7a74201f15315cb1fc40bb9a7bd2b4940 (diff)
Merge branch 'net-sched-singly-linked-list'
Florian Westphal says: ==================== sched: convert queues to single-linked list During Netfilter Workshop 2016 Eric Dumazet pointed out that qdisc schedulers use doubly-linked lists, even though single-linked list would be enough. The double-linked skb lists incur one extra write on enqueue/dequeue operations (to change ->prev pointer of next list elem). This series converts qdiscs to single-linked version, listhead maintains pointers to first (for dequeue) and last skb (for enqueue). Most qdiscs don't queue at all and instead use a leaf qdisc (typically pfifo_fast) so only a few schedulers needed changes. I briefly tested netem and htb and they seemed fine. UDP_STREAM netperf with 64 byte packets via veth+pfifo_fast shows a small (~2%) improvement. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sch_generic.h72
-rw-r--r--net/sched/sch_codel.c4
-rw-r--r--net/sched/sch_fifo.c4
-rw-r--r--net/sched/sch_generic.c28
-rw-r--r--net/sched/sch_htb.c24
-rw-r--r--net/sched/sch_netem.c20
-rw-r--r--net/sched/sch_pie.c4
7 files changed, 114 insertions, 42 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 52a2015667b4..e6aa0a249672 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -36,6 +36,14 @@ struct qdisc_size_table {
36 u16 data[]; 36 u16 data[];
37}; 37};
38 38
39/* similar to sk_buff_head, but skb->prev pointer is undefined. */
40struct qdisc_skb_head {
41 struct sk_buff *head;
42 struct sk_buff *tail;
43 __u32 qlen;
44 spinlock_t lock;
45};
46
39struct Qdisc { 47struct Qdisc {
40 int (*enqueue)(struct sk_buff *skb, 48 int (*enqueue)(struct sk_buff *skb,
41 struct Qdisc *sch, 49 struct Qdisc *sch,
@@ -76,7 +84,7 @@ struct Qdisc {
76 * For performance sake on SMP, we put highly modified fields at the end 84 * For performance sake on SMP, we put highly modified fields at the end
77 */ 85 */
78 struct sk_buff *gso_skb ____cacheline_aligned_in_smp; 86 struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
79 struct sk_buff_head q; 87 struct qdisc_skb_head q;
80 struct gnet_stats_basic_packed bstats; 88 struct gnet_stats_basic_packed bstats;
81 seqcount_t running; 89 seqcount_t running;
82 struct gnet_stats_queue qstats; 90 struct gnet_stats_queue qstats;
@@ -600,10 +608,27 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
600 sch->qstats.overlimits++; 608 sch->qstats.overlimits++;
601} 609}
602 610
611static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
612{
613 qh->head = NULL;
614 qh->tail = NULL;
615 qh->qlen = 0;
616}
617
603static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 618static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
604 struct sk_buff_head *list) 619 struct qdisc_skb_head *qh)
605{ 620{
606 __skb_queue_tail(list, skb); 621 struct sk_buff *last = qh->tail;
622
623 if (last) {
624 skb->next = NULL;
625 last->next = skb;
626 qh->tail = skb;
627 } else {
628 qh->tail = skb;
629 qh->head = skb;
630 }
631 qh->qlen++;
607 qdisc_qstats_backlog_inc(sch, skb); 632 qdisc_qstats_backlog_inc(sch, skb);
608 633
609 return NET_XMIT_SUCCESS; 634 return NET_XMIT_SUCCESS;
@@ -614,14 +639,16 @@ static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
614 return __qdisc_enqueue_tail(skb, sch, &sch->q); 639 return __qdisc_enqueue_tail(skb, sch, &sch->q);
615} 640}
616 641
617static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, 642static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
618 struct sk_buff_head *list)
619{ 643{
620 struct sk_buff *skb = __skb_dequeue(list); 644 struct sk_buff *skb = qh->head;
621 645
622 if (likely(skb != NULL)) { 646 if (likely(skb != NULL)) {
623 qdisc_qstats_backlog_dec(sch, skb); 647 qh->head = skb->next;
624 qdisc_bstats_update(sch, skb); 648 qh->qlen--;
649 if (qh->head == NULL)
650 qh->tail = NULL;
651 skb->next = NULL;
625 } 652 }
626 653
627 return skb; 654 return skb;
@@ -629,7 +656,14 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
629 656
630static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 657static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
631{ 658{
632 return __qdisc_dequeue_head(sch, &sch->q); 659 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
660
661 if (likely(skb != NULL)) {
662 qdisc_qstats_backlog_dec(sch, skb);
663 qdisc_bstats_update(sch, skb);
664 }
665
666 return skb;
633} 667}
634 668
635/* Instead of calling kfree_skb() while root qdisc lock is held, 669/* Instead of calling kfree_skb() while root qdisc lock is held,
@@ -642,10 +676,10 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
642} 676}
643 677
644static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 678static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
645 struct sk_buff_head *list, 679 struct qdisc_skb_head *qh,
646 struct sk_buff **to_free) 680 struct sk_buff **to_free)
647{ 681{
648 struct sk_buff *skb = __skb_dequeue(list); 682 struct sk_buff *skb = __qdisc_dequeue_head(qh);
649 683
650 if (likely(skb != NULL)) { 684 if (likely(skb != NULL)) {
651 unsigned int len = qdisc_pkt_len(skb); 685 unsigned int len = qdisc_pkt_len(skb);
@@ -666,7 +700,9 @@ static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
666 700
667static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 701static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
668{ 702{
669 return skb_peek(&sch->q); 703 const struct qdisc_skb_head *qh = &sch->q;
704
705 return qh->head;
670} 706}
671 707
672/* generic pseudo peek method for non-work-conserving qdisc */ 708/* generic pseudo peek method for non-work-conserving qdisc */
@@ -701,15 +737,19 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
701 return skb; 737 return skb;
702} 738}
703 739
704static inline void __qdisc_reset_queue(struct sk_buff_head *list) 740static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
705{ 741{
706 /* 742 /*
707 * We do not know the backlog in bytes of this list, it 743 * We do not know the backlog in bytes of this list, it
708 * is up to the caller to correct it 744 * is up to the caller to correct it
709 */ 745 */
710 if (!skb_queue_empty(list)) { 746 ASSERT_RTNL();
711 rtnl_kfree_skbs(list->next, list->prev); 747 if (qh->qlen) {
712 __skb_queue_head_init(list); 748 rtnl_kfree_skbs(qh->head, qh->tail);
749
750 qh->head = NULL;
751 qh->tail = NULL;
752 qh->qlen = 0;
713 } 753 }
714} 754}
715 755
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 4002df3c7d9f..5bfa79ee657c 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -69,7 +69,7 @@ struct codel_sched_data {
69static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx) 69static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
70{ 70{
71 struct Qdisc *sch = ctx; 71 struct Qdisc *sch = ctx;
72 struct sk_buff *skb = __skb_dequeue(&sch->q); 72 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
73 73
74 if (skb) 74 if (skb)
75 sch->qstats.backlog -= qdisc_pkt_len(skb); 75 sch->qstats.backlog -= qdisc_pkt_len(skb);
@@ -172,7 +172,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
172 172
173 qlen = sch->q.qlen; 173 qlen = sch->q.qlen;
174 while (sch->q.qlen > sch->limit) { 174 while (sch->q.qlen > sch->limit) {
175 struct sk_buff *skb = __skb_dequeue(&sch->q); 175 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
176 176
177 dropped += qdisc_pkt_len(skb); 177 dropped += qdisc_pkt_len(skb);
178 qdisc_qstats_backlog_dec(sch, skb); 178 qdisc_qstats_backlog_dec(sch, skb);
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index baeed6a78d28..1e37247656f8 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -31,7 +31,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
31static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, 31static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
32 struct sk_buff **to_free) 32 struct sk_buff **to_free)
33{ 33{
34 if (likely(skb_queue_len(&sch->q) < sch->limit)) 34 if (likely(sch->q.qlen < sch->limit))
35 return qdisc_enqueue_tail(skb, sch); 35 return qdisc_enqueue_tail(skb, sch);
36 36
37 return qdisc_drop(skb, sch, to_free); 37 return qdisc_drop(skb, sch, to_free);
@@ -42,7 +42,7 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
42{ 42{
43 unsigned int prev_backlog; 43 unsigned int prev_backlog;
44 44
45 if (likely(skb_queue_len(&sch->q) < sch->limit)) 45 if (likely(sch->q.qlen < sch->limit))
46 return qdisc_enqueue_tail(skb, sch); 46 return qdisc_enqueue_tail(skb, sch);
47 47
48 prev_backlog = sch->qstats.backlog; 48 prev_backlog = sch->qstats.backlog;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 0d21b567ff27..6cfb6e9038c2 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -466,7 +466,7 @@ static const u8 prio2band[TC_PRIO_MAX + 1] = {
466 */ 466 */
467struct pfifo_fast_priv { 467struct pfifo_fast_priv {
468 u32 bitmap; 468 u32 bitmap;
469 struct sk_buff_head q[PFIFO_FAST_BANDS]; 469 struct qdisc_skb_head q[PFIFO_FAST_BANDS];
470}; 470};
471 471
472/* 472/*
@@ -477,7 +477,7 @@ struct pfifo_fast_priv {
477 */ 477 */
478static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0}; 478static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
479 479
480static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, 480static inline struct qdisc_skb_head *band2list(struct pfifo_fast_priv *priv,
481 int band) 481 int band)
482{ 482{
483 return priv->q + band; 483 return priv->q + band;
@@ -486,10 +486,10 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
486static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, 486static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
487 struct sk_buff **to_free) 487 struct sk_buff **to_free)
488{ 488{
489 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { 489 if (qdisc->q.qlen < qdisc_dev(qdisc)->tx_queue_len) {
490 int band = prio2band[skb->priority & TC_PRIO_MAX]; 490 int band = prio2band[skb->priority & TC_PRIO_MAX];
491 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 491 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
492 struct sk_buff_head *list = band2list(priv, band); 492 struct qdisc_skb_head *list = band2list(priv, band);
493 493
494 priv->bitmap |= (1 << band); 494 priv->bitmap |= (1 << band);
495 qdisc->q.qlen++; 495 qdisc->q.qlen++;
@@ -505,11 +505,16 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
505 int band = bitmap2band[priv->bitmap]; 505 int band = bitmap2band[priv->bitmap];
506 506
507 if (likely(band >= 0)) { 507 if (likely(band >= 0)) {
508 struct sk_buff_head *list = band2list(priv, band); 508 struct qdisc_skb_head *qh = band2list(priv, band);
509 struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list); 509 struct sk_buff *skb = __qdisc_dequeue_head(qh);
510
511 if (likely(skb != NULL)) {
512 qdisc_qstats_backlog_dec(qdisc, skb);
513 qdisc_bstats_update(qdisc, skb);
514 }
510 515
511 qdisc->q.qlen--; 516 qdisc->q.qlen--;
512 if (skb_queue_empty(list)) 517 if (qh->qlen == 0)
513 priv->bitmap &= ~(1 << band); 518 priv->bitmap &= ~(1 << band);
514 519
515 return skb; 520 return skb;
@@ -524,9 +529,9 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
524 int band = bitmap2band[priv->bitmap]; 529 int band = bitmap2band[priv->bitmap];
525 530
526 if (band >= 0) { 531 if (band >= 0) {
527 struct sk_buff_head *list = band2list(priv, band); 532 struct qdisc_skb_head *qh = band2list(priv, band);
528 533
529 return skb_peek(list); 534 return qh->head;
530 } 535 }
531 536
532 return NULL; 537 return NULL;
@@ -564,7 +569,7 @@ static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
564 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 569 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
565 570
566 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 571 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
567 __skb_queue_head_init(band2list(priv, prio)); 572 qdisc_skb_head_init(band2list(priv, prio));
568 573
569 /* Can by-pass the queue discipline */ 574 /* Can by-pass the queue discipline */
570 qdisc->flags |= TCQ_F_CAN_BYPASS; 575 qdisc->flags |= TCQ_F_CAN_BYPASS;
@@ -612,7 +617,8 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
612 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 617 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
613 sch->padded = (char *) sch - (char *) p; 618 sch->padded = (char *) sch - (char *) p;
614 } 619 }
615 skb_queue_head_init(&sch->q); 620 qdisc_skb_head_init(&sch->q);
621 spin_lock_init(&sch->q.lock);
616 622
617 spin_lock_init(&sch->busylock); 623 spin_lock_init(&sch->busylock);
618 lockdep_set_class(&sch->busylock, 624 lockdep_set_class(&sch->busylock,
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 53dbfa187870..c798d0de8a9d 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -162,7 +162,7 @@ struct htb_sched {
162 struct work_struct work; 162 struct work_struct work;
163 163
164 /* non shaped skbs; let them go directly thru */ 164 /* non shaped skbs; let them go directly thru */
165 struct sk_buff_head direct_queue; 165 struct qdisc_skb_head direct_queue;
166 long direct_pkts; 166 long direct_pkts;
167 167
168 struct qdisc_watchdog watchdog; 168 struct qdisc_watchdog watchdog;
@@ -570,6 +570,22 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
570 list_del_init(&cl->un.leaf.drop_list); 570 list_del_init(&cl->un.leaf.drop_list);
571} 571}
572 572
573static void htb_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
574 struct qdisc_skb_head *qh)
575{
576 struct sk_buff *last = qh->tail;
577
578 if (last) {
579 skb->next = NULL;
580 last->next = skb;
581 qh->tail = skb;
582 } else {
583 qh->tail = skb;
584 qh->head = skb;
585 }
586 qh->qlen++;
587}
588
573static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, 589static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
574 struct sk_buff **to_free) 590 struct sk_buff **to_free)
575{ 591{
@@ -580,7 +596,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
580 if (cl == HTB_DIRECT) { 596 if (cl == HTB_DIRECT) {
581 /* enqueue to helper queue */ 597 /* enqueue to helper queue */
582 if (q->direct_queue.qlen < q->direct_qlen) { 598 if (q->direct_queue.qlen < q->direct_qlen) {
583 __skb_queue_tail(&q->direct_queue, skb); 599 htb_enqueue_tail(skb, sch, &q->direct_queue);
584 q->direct_pkts++; 600 q->direct_pkts++;
585 } else { 601 } else {
586 return qdisc_drop(skb, sch, to_free); 602 return qdisc_drop(skb, sch, to_free);
@@ -888,7 +904,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
888 unsigned long start_at; 904 unsigned long start_at;
889 905
890 /* try to dequeue direct packets as high prio (!) to minimize cpu work */ 906 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
891 skb = __skb_dequeue(&q->direct_queue); 907 skb = __qdisc_dequeue_head(&q->direct_queue);
892 if (skb != NULL) { 908 if (skb != NULL) {
893ok: 909ok:
894 qdisc_bstats_update(sch, skb); 910 qdisc_bstats_update(sch, skb);
@@ -1019,7 +1035,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1019 1035
1020 qdisc_watchdog_init(&q->watchdog, sch); 1036 qdisc_watchdog_init(&q->watchdog, sch);
1021 INIT_WORK(&q->work, htb_work_func); 1037 INIT_WORK(&q->work, htb_work_func);
1022 __skb_queue_head_init(&q->direct_queue); 1038 qdisc_skb_head_init(&q->direct_queue);
1023 1039
1024 if (tb[TCA_HTB_DIRECT_QLEN]) 1040 if (tb[TCA_HTB_DIRECT_QLEN])
1025 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); 1041 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index aaaf02175338..9f7b380cf0a3 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -413,6 +413,16 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
413 return segs; 413 return segs;
414} 414}
415 415
416static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
417{
418 skb->next = qh->head;
419
420 if (!qh->head)
421 qh->tail = skb;
422 qh->head = skb;
423 qh->qlen++;
424}
425
416/* 426/*
417 * Insert one skb into qdisc. 427 * Insert one skb into qdisc.
418 * Note: parent depends on return value to account for queue length. 428 * Note: parent depends on return value to account for queue length.
@@ -502,7 +512,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
502 1<<(prandom_u32() % 8); 512 1<<(prandom_u32() % 8);
503 } 513 }
504 514
505 if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) 515 if (unlikely(sch->q.qlen >= sch->limit))
506 return qdisc_drop(skb, sch, to_free); 516 return qdisc_drop(skb, sch, to_free);
507 517
508 qdisc_qstats_backlog_inc(sch, skb); 518 qdisc_qstats_backlog_inc(sch, skb);
@@ -522,8 +532,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
522 if (q->rate) { 532 if (q->rate) {
523 struct sk_buff *last; 533 struct sk_buff *last;
524 534
525 if (!skb_queue_empty(&sch->q)) 535 if (sch->q.qlen)
526 last = skb_peek_tail(&sch->q); 536 last = sch->q.tail;
527 else 537 else
528 last = netem_rb_to_skb(rb_last(&q->t_root)); 538 last = netem_rb_to_skb(rb_last(&q->t_root));
529 if (last) { 539 if (last) {
@@ -552,7 +562,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
552 cb->time_to_send = psched_get_time(); 562 cb->time_to_send = psched_get_time();
553 q->counter = 0; 563 q->counter = 0;
554 564
555 __skb_queue_head(&sch->q, skb); 565 netem_enqueue_skb_head(&sch->q, skb);
556 sch->qstats.requeues++; 566 sch->qstats.requeues++;
557 } 567 }
558 568
@@ -587,7 +597,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
587 struct rb_node *p; 597 struct rb_node *p;
588 598
589tfifo_dequeue: 599tfifo_dequeue:
590 skb = __skb_dequeue(&sch->q); 600 skb = __qdisc_dequeue_head(&sch->q);
591 if (skb) { 601 if (skb) {
592 qdisc_qstats_backlog_dec(sch, skb); 602 qdisc_qstats_backlog_dec(sch, skb);
593deliver: 603deliver:
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index a570b0bb254c..5c3a99d6aa82 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -231,7 +231,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
231 /* Drop excess packets if new limit is lower */ 231 /* Drop excess packets if new limit is lower */
232 qlen = sch->q.qlen; 232 qlen = sch->q.qlen;
233 while (sch->q.qlen > sch->limit) { 233 while (sch->q.qlen > sch->limit) {
234 struct sk_buff *skb = __skb_dequeue(&sch->q); 234 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
235 235
236 dropped += qdisc_pkt_len(skb); 236 dropped += qdisc_pkt_len(skb);
237 qdisc_qstats_backlog_dec(sch, skb); 237 qdisc_qstats_backlog_dec(sch, skb);
@@ -511,7 +511,7 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
511static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch) 511static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
512{ 512{
513 struct sk_buff *skb; 513 struct sk_buff *skb;
514 skb = __qdisc_dequeue_head(sch, &sch->q); 514 skb = qdisc_dequeue_head(sch);
515 515
516 if (!skb) 516 if (!skb)
517 return NULL; 517 return NULL;