aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2016-09-17 18:57:31 -0400
committerDavid S. Miller <davem@davemloft.net>2016-09-19 01:47:18 -0400
commit97d0678f913369af0dc8b510a682a641654ab743 (patch)
treed88ecc69e4ae92f7f1d2ff39e79c32c61b067d8d /net
parent1486587b2fcda08dee7eab23784d504eed772c45 (diff)
sched: don't use skb queue helpers
A followup change will replace the sk_buff_head in the qdisc struct with a slightly different list. Use of the sk_buff_head helpers will thus cause compiler warnings. Open-code these accesses in an extra change to ease review. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_fifo.c4
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_netem.c4
3 files changed, 5 insertions, 5 deletions
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index baeed6a78d28..1e37247656f8 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -31,7 +31,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
31static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, 31static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
32 struct sk_buff **to_free) 32 struct sk_buff **to_free)
33{ 33{
34 if (likely(skb_queue_len(&sch->q) < sch->limit)) 34 if (likely(sch->q.qlen < sch->limit))
35 return qdisc_enqueue_tail(skb, sch); 35 return qdisc_enqueue_tail(skb, sch);
36 36
37 return qdisc_drop(skb, sch, to_free); 37 return qdisc_drop(skb, sch, to_free);
@@ -42,7 +42,7 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
42{ 42{
43 unsigned int prev_backlog; 43 unsigned int prev_backlog;
44 44
45 if (likely(skb_queue_len(&sch->q) < sch->limit)) 45 if (likely(sch->q.qlen < sch->limit))
46 return qdisc_enqueue_tail(skb, sch); 46 return qdisc_enqueue_tail(skb, sch);
47 47
48 prev_backlog = sch->qstats.backlog; 48 prev_backlog = sch->qstats.backlog;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 0d21b567ff27..5e63bf638350 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -486,7 +486,7 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
486static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, 486static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
487 struct sk_buff **to_free) 487 struct sk_buff **to_free)
488{ 488{
489 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { 489 if (qdisc->q.qlen < qdisc_dev(qdisc)->tx_queue_len) {
490 int band = prio2band[skb->priority & TC_PRIO_MAX]; 490 int band = prio2band[skb->priority & TC_PRIO_MAX];
491 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 491 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
492 struct sk_buff_head *list = band2list(priv, band); 492 struct sk_buff_head *list = band2list(priv, band);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index aaaf02175338..1832d7732dbc 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -502,7 +502,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
502 1<<(prandom_u32() % 8); 502 1<<(prandom_u32() % 8);
503 } 503 }
504 504
505 if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) 505 if (unlikely(sch->q.qlen >= sch->limit))
506 return qdisc_drop(skb, sch, to_free); 506 return qdisc_drop(skb, sch, to_free);
507 507
508 qdisc_qstats_backlog_inc(sch, skb); 508 qdisc_qstats_backlog_inc(sch, skb);
@@ -522,7 +522,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
522 if (q->rate) { 522 if (q->rate) {
523 struct sk_buff *last; 523 struct sk_buff *last;
524 524
525 if (!skb_queue_empty(&sch->q)) 525 if (sch->q.qlen)
526 last = skb_peek_tail(&sch->q); 526 last = skb_peek_tail(&sch->q);
527 else 527 else
528 last = netem_rb_to_skb(rb_last(&q->t_root)); 528 last = netem_rb_to_skb(rb_last(&q->t_root));