diff options
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r-- | net/sched/sch_generic.c | 22 |
1 files changed, 13 insertions, 9 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 190570f21b20..7e3fbe9cc936 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -106,6 +106,14 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q, | |||
106 | 106 | ||
107 | __skb_queue_tail(&q->skb_bad_txq, skb); | 107 | __skb_queue_tail(&q->skb_bad_txq, skb); |
108 | 108 | ||
109 | if (qdisc_is_percpu_stats(q)) { | ||
110 | qdisc_qstats_cpu_backlog_inc(q, skb); | ||
111 | qdisc_qstats_cpu_qlen_inc(q); | ||
112 | } else { | ||
113 | qdisc_qstats_backlog_inc(q, skb); | ||
114 | q->q.qlen++; | ||
115 | } | ||
116 | |||
109 | if (lock) | 117 | if (lock) |
110 | spin_unlock(lock); | 118 | spin_unlock(lock); |
111 | } | 119 | } |
@@ -196,14 +204,6 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q, | |||
196 | break; | 204 | break; |
197 | if (unlikely(skb_get_queue_mapping(nskb) != mapping)) { | 205 | if (unlikely(skb_get_queue_mapping(nskb) != mapping)) { |
198 | qdisc_enqueue_skb_bad_txq(q, nskb); | 206 | qdisc_enqueue_skb_bad_txq(q, nskb); |
199 | |||
200 | if (qdisc_is_percpu_stats(q)) { | ||
201 | qdisc_qstats_cpu_backlog_inc(q, nskb); | ||
202 | qdisc_qstats_cpu_qlen_inc(q); | ||
203 | } else { | ||
204 | qdisc_qstats_backlog_inc(q, nskb); | ||
205 | q->q.qlen++; | ||
206 | } | ||
207 | break; | 207 | break; |
208 | } | 208 | } |
209 | skb->next = nskb; | 209 | skb->next = nskb; |
@@ -628,6 +628,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, | |||
628 | int band = prio2band[skb->priority & TC_PRIO_MAX]; | 628 | int band = prio2band[skb->priority & TC_PRIO_MAX]; |
629 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); | 629 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
630 | struct skb_array *q = band2list(priv, band); | 630 | struct skb_array *q = band2list(priv, band); |
631 | unsigned int pkt_len = qdisc_pkt_len(skb); | ||
631 | int err; | 632 | int err; |
632 | 633 | ||
633 | err = skb_array_produce(q, skb); | 634 | err = skb_array_produce(q, skb); |
@@ -636,7 +637,10 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, | |||
636 | return qdisc_drop_cpu(skb, qdisc, to_free); | 637 | return qdisc_drop_cpu(skb, qdisc, to_free); |
637 | 638 | ||
638 | qdisc_qstats_cpu_qlen_inc(qdisc); | 639 | qdisc_qstats_cpu_qlen_inc(qdisc); |
639 | qdisc_qstats_cpu_backlog_inc(qdisc, skb); | 640 | /* Note: skb can not be used after skb_array_produce(), |
641 | * so we better not use qdisc_qstats_cpu_backlog_inc() | ||
642 | */ | ||
643 | this_cpu_add(qdisc->cpu_qstats->backlog, pkt_len); | ||
640 | return NET_XMIT_SUCCESS; | 644 | return NET_XMIT_SUCCESS; |
641 | } | 645 | } |
642 | 646 | ||