diff options
author | Jussi Kivilinna <jussi.kivilinna@mbnet.fi> | 2008-07-20 03:08:27 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-20 03:08:27 -0400 |
commit | 0abf77e55a2459aa9905be4b226e4729d5b4f0cb (patch) | |
tree | 0224961150c0c2c65b5ad407b1af8cf84266e919 /net/sched | |
parent | 5f86173bdf15981ca49d0434f638b68f70a35644 (diff) |
net_sched: Add accessor function for packet length for qdiscs
Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/act_gact.c | 2 | ||||
-rw-r--r-- | net/sched/act_ipt.c | 2 | ||||
-rw-r--r-- | net/sched/act_mirred.c | 4 | ||||
-rw-r--r-- | net/sched/act_nat.c | 2 | ||||
-rw-r--r-- | net/sched/act_pedit.c | 2 | ||||
-rw-r--r-- | net/sched/act_police.c | 8 | ||||
-rw-r--r-- | net/sched/act_simple.c | 2 | ||||
-rw-r--r-- | net/sched/sch_atm.c | 4 | ||||
-rw-r--r-- | net/sched/sch_cbq.c | 14 | ||||
-rw-r--r-- | net/sched/sch_dsmark.c | 2 | ||||
-rw-r--r-- | net/sched/sch_fifo.c | 2 | ||||
-rw-r--r-- | net/sched/sch_gred.c | 12 | ||||
-rw-r--r-- | net/sched/sch_hfsc.c | 14 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 9 | ||||
-rw-r--r-- | net/sched/sch_ingress.c | 2 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 6 | ||||
-rw-r--r-- | net/sched/sch_prio.c | 2 | ||||
-rw-r--r-- | net/sched/sch_red.c | 2 | ||||
-rw-r--r-- | net/sched/sch_sfq.c | 16 | ||||
-rw-r--r-- | net/sched/sch_tbf.c | 6 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 6 |
21 files changed, 58 insertions, 61 deletions
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 422872c4f14b..ac04289da5d7 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c | |||
@@ -139,7 +139,7 @@ static int tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result | |||
139 | #else | 139 | #else |
140 | action = gact->tcf_action; | 140 | action = gact->tcf_action; |
141 | #endif | 141 | #endif |
142 | gact->tcf_bstats.bytes += skb->len; | 142 | gact->tcf_bstats.bytes += qdisc_pkt_len(skb); |
143 | gact->tcf_bstats.packets++; | 143 | gact->tcf_bstats.packets++; |
144 | if (action == TC_ACT_SHOT) | 144 | if (action == TC_ACT_SHOT) |
145 | gact->tcf_qstats.drops++; | 145 | gact->tcf_qstats.drops++; |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index da696fd3e341..d1263b3c96c3 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
@@ -205,7 +205,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, | |||
205 | spin_lock(&ipt->tcf_lock); | 205 | spin_lock(&ipt->tcf_lock); |
206 | 206 | ||
207 | ipt->tcf_tm.lastuse = jiffies; | 207 | ipt->tcf_tm.lastuse = jiffies; |
208 | ipt->tcf_bstats.bytes += skb->len; | 208 | ipt->tcf_bstats.bytes += qdisc_pkt_len(skb); |
209 | ipt->tcf_bstats.packets++; | 209 | ipt->tcf_bstats.packets++; |
210 | 210 | ||
211 | /* yes, we have to worry about both in and out dev | 211 | /* yes, we have to worry about both in and out dev |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 1aff005d95cd..70341c020b6d 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -164,7 +164,7 @@ bad_mirred: | |||
164 | if (skb2 != NULL) | 164 | if (skb2 != NULL) |
165 | kfree_skb(skb2); | 165 | kfree_skb(skb2); |
166 | m->tcf_qstats.overlimits++; | 166 | m->tcf_qstats.overlimits++; |
167 | m->tcf_bstats.bytes += skb->len; | 167 | m->tcf_bstats.bytes += qdisc_pkt_len(skb); |
168 | m->tcf_bstats.packets++; | 168 | m->tcf_bstats.packets++; |
169 | spin_unlock(&m->tcf_lock); | 169 | spin_unlock(&m->tcf_lock); |
170 | /* should we be asking for packet to be dropped? | 170 | /* should we be asking for packet to be dropped? |
@@ -184,7 +184,7 @@ bad_mirred: | |||
184 | goto bad_mirred; | 184 | goto bad_mirred; |
185 | } | 185 | } |
186 | 186 | ||
187 | m->tcf_bstats.bytes += skb2->len; | 187 | m->tcf_bstats.bytes += qdisc_pkt_len(skb2); |
188 | m->tcf_bstats.packets++; | 188 | m->tcf_bstats.packets++; |
189 | if (!(at & AT_EGRESS)) | 189 | if (!(at & AT_EGRESS)) |
190 | if (m->tcfm_ok_push) | 190 | if (m->tcfm_ok_push) |
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 0a3c8339767a..7b39ed485bca 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
@@ -124,7 +124,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, | |||
124 | egress = p->flags & TCA_NAT_FLAG_EGRESS; | 124 | egress = p->flags & TCA_NAT_FLAG_EGRESS; |
125 | action = p->tcf_action; | 125 | action = p->tcf_action; |
126 | 126 | ||
127 | p->tcf_bstats.bytes += skb->len; | 127 | p->tcf_bstats.bytes += qdisc_pkt_len(skb); |
128 | p->tcf_bstats.packets++; | 128 | p->tcf_bstats.packets++; |
129 | 129 | ||
130 | spin_unlock(&p->tcf_lock); | 130 | spin_unlock(&p->tcf_lock); |
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 3cc4cb9e500e..d5f4e3404864 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -182,7 +182,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
182 | bad: | 182 | bad: |
183 | p->tcf_qstats.overlimits++; | 183 | p->tcf_qstats.overlimits++; |
184 | done: | 184 | done: |
185 | p->tcf_bstats.bytes += skb->len; | 185 | p->tcf_bstats.bytes += qdisc_pkt_len(skb); |
186 | p->tcf_bstats.packets++; | 186 | p->tcf_bstats.packets++; |
187 | spin_unlock(&p->tcf_lock); | 187 | spin_unlock(&p->tcf_lock); |
188 | return p->tcf_action; | 188 | return p->tcf_action; |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 0898120bbcc0..32c3f9d9fb7a 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -272,7 +272,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, | |||
272 | 272 | ||
273 | spin_lock(&police->tcf_lock); | 273 | spin_lock(&police->tcf_lock); |
274 | 274 | ||
275 | police->tcf_bstats.bytes += skb->len; | 275 | police->tcf_bstats.bytes += qdisc_pkt_len(skb); |
276 | police->tcf_bstats.packets++; | 276 | police->tcf_bstats.packets++; |
277 | 277 | ||
278 | if (police->tcfp_ewma_rate && | 278 | if (police->tcfp_ewma_rate && |
@@ -282,7 +282,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, | |||
282 | return police->tcf_action; | 282 | return police->tcf_action; |
283 | } | 283 | } |
284 | 284 | ||
285 | if (skb->len <= police->tcfp_mtu) { | 285 | if (qdisc_pkt_len(skb) <= police->tcfp_mtu) { |
286 | if (police->tcfp_R_tab == NULL) { | 286 | if (police->tcfp_R_tab == NULL) { |
287 | spin_unlock(&police->tcf_lock); | 287 | spin_unlock(&police->tcf_lock); |
288 | return police->tcfp_result; | 288 | return police->tcfp_result; |
@@ -295,12 +295,12 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, | |||
295 | ptoks = toks + police->tcfp_ptoks; | 295 | ptoks = toks + police->tcfp_ptoks; |
296 | if (ptoks > (long)L2T_P(police, police->tcfp_mtu)) | 296 | if (ptoks > (long)L2T_P(police, police->tcfp_mtu)) |
297 | ptoks = (long)L2T_P(police, police->tcfp_mtu); | 297 | ptoks = (long)L2T_P(police, police->tcfp_mtu); |
298 | ptoks -= L2T_P(police, skb->len); | 298 | ptoks -= L2T_P(police, qdisc_pkt_len(skb)); |
299 | } | 299 | } |
300 | toks += police->tcfp_toks; | 300 | toks += police->tcfp_toks; |
301 | if (toks > (long)police->tcfp_burst) | 301 | if (toks > (long)police->tcfp_burst) |
302 | toks = police->tcfp_burst; | 302 | toks = police->tcfp_burst; |
303 | toks -= L2T(police, skb->len); | 303 | toks -= L2T(police, qdisc_pkt_len(skb)); |
304 | if ((toks|ptoks) >= 0) { | 304 | if ((toks|ptoks) >= 0) { |
305 | police->tcfp_t_c = now; | 305 | police->tcfp_t_c = now; |
306 | police->tcfp_toks = toks; | 306 | police->tcfp_toks = toks; |
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 1d421d059caf..e7851ce92cfe 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
@@ -41,7 +41,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result | |||
41 | 41 | ||
42 | spin_lock(&d->tcf_lock); | 42 | spin_lock(&d->tcf_lock); |
43 | d->tcf_tm.lastuse = jiffies; | 43 | d->tcf_tm.lastuse = jiffies; |
44 | d->tcf_bstats.bytes += skb->len; | 44 | d->tcf_bstats.bytes += qdisc_pkt_len(skb); |
45 | d->tcf_bstats.packets++; | 45 | d->tcf_bstats.packets++; |
46 | 46 | ||
47 | /* print policy string followed by _ then packet count | 47 | /* print policy string followed by _ then packet count |
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 68ed35e2a763..04faa835be17 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c | |||
@@ -437,9 +437,9 @@ drop: __maybe_unused | |||
437 | flow->qstats.drops++; | 437 | flow->qstats.drops++; |
438 | return ret; | 438 | return ret; |
439 | } | 439 | } |
440 | sch->bstats.bytes += skb->len; | 440 | sch->bstats.bytes += qdisc_pkt_len(skb); |
441 | sch->bstats.packets++; | 441 | sch->bstats.packets++; |
442 | flow->bstats.bytes += skb->len; | 442 | flow->bstats.bytes += qdisc_pkt_len(skb); |
443 | flow->bstats.packets++; | 443 | flow->bstats.packets++; |
444 | /* | 444 | /* |
445 | * Okay, this may seem weird. We pretend we've dropped the packet if | 445 | * Okay, this may seem weird. We pretend we've dropped the packet if |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 1afe3eece627..f1d2f8ec8b4c 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -370,7 +370,6 @@ static int | |||
370 | cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 370 | cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
371 | { | 371 | { |
372 | struct cbq_sched_data *q = qdisc_priv(sch); | 372 | struct cbq_sched_data *q = qdisc_priv(sch); |
373 | int len = skb->len; | ||
374 | int uninitialized_var(ret); | 373 | int uninitialized_var(ret); |
375 | struct cbq_class *cl = cbq_classify(skb, sch, &ret); | 374 | struct cbq_class *cl = cbq_classify(skb, sch, &ret); |
376 | 375 | ||
@@ -391,7 +390,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
391 | if (ret == NET_XMIT_SUCCESS) { | 390 | if (ret == NET_XMIT_SUCCESS) { |
392 | sch->q.qlen++; | 391 | sch->q.qlen++; |
393 | sch->bstats.packets++; | 392 | sch->bstats.packets++; |
394 | sch->bstats.bytes+=len; | 393 | sch->bstats.bytes += qdisc_pkt_len(skb); |
395 | cbq_mark_toplevel(q, cl); | 394 | cbq_mark_toplevel(q, cl); |
396 | if (!cl->next_alive) | 395 | if (!cl->next_alive) |
397 | cbq_activate_class(cl); | 396 | cbq_activate_class(cl); |
@@ -658,7 +657,6 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) | |||
658 | #ifdef CONFIG_NET_CLS_ACT | 657 | #ifdef CONFIG_NET_CLS_ACT |
659 | static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) | 658 | static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) |
660 | { | 659 | { |
661 | int len = skb->len; | ||
662 | struct Qdisc *sch = child->__parent; | 660 | struct Qdisc *sch = child->__parent; |
663 | struct cbq_sched_data *q = qdisc_priv(sch); | 661 | struct cbq_sched_data *q = qdisc_priv(sch); |
664 | struct cbq_class *cl = q->rx_class; | 662 | struct cbq_class *cl = q->rx_class; |
@@ -675,7 +673,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) | |||
675 | if (qdisc_enqueue(skb, cl->q) == 0) { | 673 | if (qdisc_enqueue(skb, cl->q) == 0) { |
676 | sch->q.qlen++; | 674 | sch->q.qlen++; |
677 | sch->bstats.packets++; | 675 | sch->bstats.packets++; |
678 | sch->bstats.bytes+=len; | 676 | sch->bstats.bytes += qdisc_pkt_len(skb); |
679 | if (!cl->next_alive) | 677 | if (!cl->next_alive) |
680 | cbq_activate_class(cl); | 678 | cbq_activate_class(cl); |
681 | return 0; | 679 | return 0; |
@@ -881,7 +879,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio) | |||
881 | if (skb == NULL) | 879 | if (skb == NULL) |
882 | goto skip_class; | 880 | goto skip_class; |
883 | 881 | ||
884 | cl->deficit -= skb->len; | 882 | cl->deficit -= qdisc_pkt_len(skb); |
885 | q->tx_class = cl; | 883 | q->tx_class = cl; |
886 | q->tx_borrowed = borrow; | 884 | q->tx_borrowed = borrow; |
887 | if (borrow != cl) { | 885 | if (borrow != cl) { |
@@ -889,11 +887,11 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio) | |||
889 | borrow->xstats.borrows++; | 887 | borrow->xstats.borrows++; |
890 | cl->xstats.borrows++; | 888 | cl->xstats.borrows++; |
891 | #else | 889 | #else |
892 | borrow->xstats.borrows += skb->len; | 890 | borrow->xstats.borrows += qdisc_pkt_len(skb); |
893 | cl->xstats.borrows += skb->len; | 891 | cl->xstats.borrows += qdisc_pkt_len(skb); |
894 | #endif | 892 | #endif |
895 | } | 893 | } |
896 | q->tx_len = skb->len; | 894 | q->tx_len = qdisc_pkt_len(skb); |
897 | 895 | ||
898 | if (cl->deficit <= 0) { | 896 | if (cl->deficit <= 0) { |
899 | q->active[prio] = cl; | 897 | q->active[prio] = cl; |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 44d347e831cf..a935676987e2 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
@@ -258,7 +258,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
258 | return err; | 258 | return err; |
259 | } | 259 | } |
260 | 260 | ||
261 | sch->bstats.bytes += skb->len; | 261 | sch->bstats.bytes += qdisc_pkt_len(skb); |
262 | sch->bstats.packets++; | 262 | sch->bstats.packets++; |
263 | sch->q.qlen++; | 263 | sch->q.qlen++; |
264 | 264 | ||
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index 1d97fa42c902..23d258bfe8ac 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c | |||
@@ -27,7 +27,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
27 | { | 27 | { |
28 | struct fifo_sched_data *q = qdisc_priv(sch); | 28 | struct fifo_sched_data *q = qdisc_priv(sch); |
29 | 29 | ||
30 | if (likely(sch->qstats.backlog + skb->len <= q->limit)) | 30 | if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= q->limit)) |
31 | return qdisc_enqueue_tail(skb, sch); | 31 | return qdisc_enqueue_tail(skb, sch); |
32 | 32 | ||
33 | return qdisc_reshape_fail(skb, sch); | 33 | return qdisc_reshape_fail(skb, sch); |
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 39fa28511f07..c1ad6b8de105 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c | |||
@@ -188,7 +188,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
188 | } | 188 | } |
189 | 189 | ||
190 | q->packetsin++; | 190 | q->packetsin++; |
191 | q->bytesin += skb->len; | 191 | q->bytesin += qdisc_pkt_len(skb); |
192 | 192 | ||
193 | if (gred_wred_mode(t)) | 193 | if (gred_wred_mode(t)) |
194 | gred_load_wred_set(t, q); | 194 | gred_load_wred_set(t, q); |
@@ -226,8 +226,8 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
226 | break; | 226 | break; |
227 | } | 227 | } |
228 | 228 | ||
229 | if (q->backlog + skb->len <= q->limit) { | 229 | if (q->backlog + qdisc_pkt_len(skb) <= q->limit) { |
230 | q->backlog += skb->len; | 230 | q->backlog += qdisc_pkt_len(skb); |
231 | return qdisc_enqueue_tail(skb, sch); | 231 | return qdisc_enqueue_tail(skb, sch); |
232 | } | 232 | } |
233 | 233 | ||
@@ -254,7 +254,7 @@ static int gred_requeue(struct sk_buff *skb, struct Qdisc* sch) | |||
254 | } else { | 254 | } else { |
255 | if (red_is_idling(&q->parms)) | 255 | if (red_is_idling(&q->parms)) |
256 | red_end_of_idle_period(&q->parms); | 256 | red_end_of_idle_period(&q->parms); |
257 | q->backlog += skb->len; | 257 | q->backlog += qdisc_pkt_len(skb); |
258 | } | 258 | } |
259 | 259 | ||
260 | return qdisc_requeue(skb, sch); | 260 | return qdisc_requeue(skb, sch); |
@@ -277,7 +277,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch) | |||
277 | "VQ 0x%x after dequeue, screwing up " | 277 | "VQ 0x%x after dequeue, screwing up " |
278 | "backlog.\n", tc_index_to_dp(skb)); | 278 | "backlog.\n", tc_index_to_dp(skb)); |
279 | } else { | 279 | } else { |
280 | q->backlog -= skb->len; | 280 | q->backlog -= qdisc_pkt_len(skb); |
281 | 281 | ||
282 | if (!q->backlog && !gred_wred_mode(t)) | 282 | if (!q->backlog && !gred_wred_mode(t)) |
283 | red_start_of_idle_period(&q->parms); | 283 | red_start_of_idle_period(&q->parms); |
@@ -299,7 +299,7 @@ static unsigned int gred_drop(struct Qdisc* sch) | |||
299 | 299 | ||
300 | skb = qdisc_dequeue_tail(sch); | 300 | skb = qdisc_dequeue_tail(sch); |
301 | if (skb) { | 301 | if (skb) { |
302 | unsigned int len = skb->len; | 302 | unsigned int len = qdisc_pkt_len(skb); |
303 | struct gred_sched_data *q; | 303 | struct gred_sched_data *q; |
304 | u16 dp = tc_index_to_dp(skb); | 304 | u16 dp = tc_index_to_dp(skb); |
305 | 305 | ||
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index fd61ed6ee1e7..0ae7d19dcba8 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -895,7 +895,7 @@ qdisc_peek_len(struct Qdisc *sch) | |||
895 | printk("qdisc_peek_len: non work-conserving qdisc ?\n"); | 895 | printk("qdisc_peek_len: non work-conserving qdisc ?\n"); |
896 | return 0; | 896 | return 0; |
897 | } | 897 | } |
898 | len = skb->len; | 898 | len = qdisc_pkt_len(skb); |
899 | if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) { | 899 | if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) { |
900 | if (net_ratelimit()) | 900 | if (net_ratelimit()) |
901 | printk("qdisc_peek_len: failed to requeue\n"); | 901 | printk("qdisc_peek_len: failed to requeue\n"); |
@@ -1574,7 +1574,6 @@ static int | |||
1574 | hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 1574 | hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
1575 | { | 1575 | { |
1576 | struct hfsc_class *cl; | 1576 | struct hfsc_class *cl; |
1577 | unsigned int len; | ||
1578 | int err; | 1577 | int err; |
1579 | 1578 | ||
1580 | cl = hfsc_classify(skb, sch, &err); | 1579 | cl = hfsc_classify(skb, sch, &err); |
@@ -1585,7 +1584,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
1585 | return err; | 1584 | return err; |
1586 | } | 1585 | } |
1587 | 1586 | ||
1588 | len = skb->len; | ||
1589 | err = qdisc_enqueue(skb, cl->qdisc); | 1587 | err = qdisc_enqueue(skb, cl->qdisc); |
1590 | if (unlikely(err != NET_XMIT_SUCCESS)) { | 1588 | if (unlikely(err != NET_XMIT_SUCCESS)) { |
1591 | cl->qstats.drops++; | 1589 | cl->qstats.drops++; |
@@ -1594,12 +1592,12 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
1594 | } | 1592 | } |
1595 | 1593 | ||
1596 | if (cl->qdisc->q.qlen == 1) | 1594 | if (cl->qdisc->q.qlen == 1) |
1597 | set_active(cl, len); | 1595 | set_active(cl, qdisc_pkt_len(skb)); |
1598 | 1596 | ||
1599 | cl->bstats.packets++; | 1597 | cl->bstats.packets++; |
1600 | cl->bstats.bytes += len; | 1598 | cl->bstats.bytes += qdisc_pkt_len(skb); |
1601 | sch->bstats.packets++; | 1599 | sch->bstats.packets++; |
1602 | sch->bstats.bytes += len; | 1600 | sch->bstats.bytes += qdisc_pkt_len(skb); |
1603 | sch->q.qlen++; | 1601 | sch->q.qlen++; |
1604 | 1602 | ||
1605 | return NET_XMIT_SUCCESS; | 1603 | return NET_XMIT_SUCCESS; |
@@ -1649,9 +1647,9 @@ hfsc_dequeue(struct Qdisc *sch) | |||
1649 | return NULL; | 1647 | return NULL; |
1650 | } | 1648 | } |
1651 | 1649 | ||
1652 | update_vf(cl, skb->len, cur_time); | 1650 | update_vf(cl, qdisc_pkt_len(skb), cur_time); |
1653 | if (realtime) | 1651 | if (realtime) |
1654 | cl->cl_cumul += skb->len; | 1652 | cl->cl_cumul += qdisc_pkt_len(skb); |
1655 | 1653 | ||
1656 | if (cl->qdisc->q.qlen != 0) { | 1654 | if (cl->qdisc->q.qlen != 0) { |
1657 | if (cl->cl_flags & HFSC_RSC) { | 1655 | if (cl->cl_flags & HFSC_RSC) { |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 72b5a946178f..30c999c61b01 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -579,13 +579,13 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
579 | } else { | 579 | } else { |
580 | cl->bstats.packets += | 580 | cl->bstats.packets += |
581 | skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; | 581 | skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; |
582 | cl->bstats.bytes += skb->len; | 582 | cl->bstats.bytes += qdisc_pkt_len(skb); |
583 | htb_activate(q, cl); | 583 | htb_activate(q, cl); |
584 | } | 584 | } |
585 | 585 | ||
586 | sch->q.qlen++; | 586 | sch->q.qlen++; |
587 | sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; | 587 | sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; |
588 | sch->bstats.bytes += skb->len; | 588 | sch->bstats.bytes += qdisc_pkt_len(skb); |
589 | return NET_XMIT_SUCCESS; | 589 | return NET_XMIT_SUCCESS; |
590 | } | 590 | } |
591 | 591 | ||
@@ -642,7 +642,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) | |||
642 | static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, | 642 | static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, |
643 | int level, struct sk_buff *skb) | 643 | int level, struct sk_buff *skb) |
644 | { | 644 | { |
645 | int bytes = skb->len; | 645 | int bytes = qdisc_pkt_len(skb); |
646 | long toks, diff; | 646 | long toks, diff; |
647 | enum htb_cmode old_mode; | 647 | enum htb_cmode old_mode; |
648 | 648 | ||
@@ -855,7 +855,8 @@ next: | |||
855 | } while (cl != start); | 855 | } while (cl != start); |
856 | 856 | ||
857 | if (likely(skb != NULL)) { | 857 | if (likely(skb != NULL)) { |
858 | if ((cl->un.leaf.deficit[level] -= skb->len) < 0) { | 858 | cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb); |
859 | if (cl->un.leaf.deficit[level] < 0) { | ||
859 | cl->un.leaf.deficit[level] += cl->un.leaf.quantum; | 860 | cl->un.leaf.deficit[level] += cl->un.leaf.quantum; |
860 | htb_next_rb_node((level ? cl->parent->un.inner.ptr : q-> | 861 | htb_next_rb_node((level ? cl->parent->un.inner.ptr : q-> |
861 | ptr[0]) + prio); | 862 | ptr[0]) + prio); |
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 956c80ad5965..4a2b77374358 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c | |||
@@ -77,7 +77,7 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
77 | result = tc_classify(skb, p->filter_list, &res); | 77 | result = tc_classify(skb, p->filter_list, &res); |
78 | 78 | ||
79 | sch->bstats.packets++; | 79 | sch->bstats.packets++; |
80 | sch->bstats.bytes += skb->len; | 80 | sch->bstats.bytes += qdisc_pkt_len(skb); |
81 | switch (result) { | 81 | switch (result) { |
82 | case TC_ACT_SHOT: | 82 | case TC_ACT_SHOT: |
83 | result = TC_ACT_SHOT; | 83 | result = TC_ACT_SHOT; |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 13c4821e42b8..ae49be00022f 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -237,7 +237,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
237 | 237 | ||
238 | if (likely(ret == NET_XMIT_SUCCESS)) { | 238 | if (likely(ret == NET_XMIT_SUCCESS)) { |
239 | sch->q.qlen++; | 239 | sch->q.qlen++; |
240 | sch->bstats.bytes += skb->len; | 240 | sch->bstats.bytes += qdisc_pkt_len(skb); |
241 | sch->bstats.packets++; | 241 | sch->bstats.packets++; |
242 | } else | 242 | } else |
243 | sch->qstats.drops++; | 243 | sch->qstats.drops++; |
@@ -481,8 +481,8 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) | |||
481 | 481 | ||
482 | __skb_queue_after(list, skb, nskb); | 482 | __skb_queue_after(list, skb, nskb); |
483 | 483 | ||
484 | sch->qstats.backlog += nskb->len; | 484 | sch->qstats.backlog += qdisc_pkt_len(nskb); |
485 | sch->bstats.bytes += nskb->len; | 485 | sch->bstats.bytes += qdisc_pkt_len(nskb); |
486 | sch->bstats.packets++; | 486 | sch->bstats.packets++; |
487 | 487 | ||
488 | return NET_XMIT_SUCCESS; | 488 | return NET_XMIT_SUCCESS; |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index d29c2f87fc0b..f849243eb095 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -83,7 +83,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
83 | 83 | ||
84 | ret = qdisc_enqueue(skb, qdisc); | 84 | ret = qdisc_enqueue(skb, qdisc); |
85 | if (ret == NET_XMIT_SUCCESS) { | 85 | if (ret == NET_XMIT_SUCCESS) { |
86 | sch->bstats.bytes += skb->len; | 86 | sch->bstats.bytes += qdisc_pkt_len(skb); |
87 | sch->bstats.packets++; | 87 | sch->bstats.packets++; |
88 | sch->q.qlen++; | 88 | sch->q.qlen++; |
89 | return NET_XMIT_SUCCESS; | 89 | return NET_XMIT_SUCCESS; |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index b48a391bc129..3f2d1d7f3bbd 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
@@ -94,7 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
94 | 94 | ||
95 | ret = qdisc_enqueue(skb, child); | 95 | ret = qdisc_enqueue(skb, child); |
96 | if (likely(ret == NET_XMIT_SUCCESS)) { | 96 | if (likely(ret == NET_XMIT_SUCCESS)) { |
97 | sch->bstats.bytes += skb->len; | 97 | sch->bstats.bytes += qdisc_pkt_len(skb); |
98 | sch->bstats.packets++; | 98 | sch->bstats.packets++; |
99 | sch->q.qlen++; | 99 | sch->q.qlen++; |
100 | } else { | 100 | } else { |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 8458f630fac4..8589da666568 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -245,7 +245,7 @@ static unsigned int sfq_drop(struct Qdisc *sch) | |||
245 | if (d > 1) { | 245 | if (d > 1) { |
246 | sfq_index x = q->dep[d + SFQ_DEPTH].next; | 246 | sfq_index x = q->dep[d + SFQ_DEPTH].next; |
247 | skb = q->qs[x].prev; | 247 | skb = q->qs[x].prev; |
248 | len = skb->len; | 248 | len = qdisc_pkt_len(skb); |
249 | __skb_unlink(skb, &q->qs[x]); | 249 | __skb_unlink(skb, &q->qs[x]); |
250 | kfree_skb(skb); | 250 | kfree_skb(skb); |
251 | sfq_dec(q, x); | 251 | sfq_dec(q, x); |
@@ -261,7 +261,7 @@ static unsigned int sfq_drop(struct Qdisc *sch) | |||
261 | q->next[q->tail] = q->next[d]; | 261 | q->next[q->tail] = q->next[d]; |
262 | q->allot[q->next[d]] += q->quantum; | 262 | q->allot[q->next[d]] += q->quantum; |
263 | skb = q->qs[d].prev; | 263 | skb = q->qs[d].prev; |
264 | len = skb->len; | 264 | len = qdisc_pkt_len(skb); |
265 | __skb_unlink(skb, &q->qs[d]); | 265 | __skb_unlink(skb, &q->qs[d]); |
266 | kfree_skb(skb); | 266 | kfree_skb(skb); |
267 | sfq_dec(q, d); | 267 | sfq_dec(q, d); |
@@ -305,7 +305,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
305 | if (q->qs[x].qlen >= q->limit) | 305 | if (q->qs[x].qlen >= q->limit) |
306 | return qdisc_drop(skb, sch); | 306 | return qdisc_drop(skb, sch); |
307 | 307 | ||
308 | sch->qstats.backlog += skb->len; | 308 | sch->qstats.backlog += qdisc_pkt_len(skb); |
309 | __skb_queue_tail(&q->qs[x], skb); | 309 | __skb_queue_tail(&q->qs[x], skb); |
310 | sfq_inc(q, x); | 310 | sfq_inc(q, x); |
311 | if (q->qs[x].qlen == 1) { /* The flow is new */ | 311 | if (q->qs[x].qlen == 1) { /* The flow is new */ |
@@ -320,7 +320,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
320 | } | 320 | } |
321 | } | 321 | } |
322 | if (++sch->q.qlen <= q->limit) { | 322 | if (++sch->q.qlen <= q->limit) { |
323 | sch->bstats.bytes += skb->len; | 323 | sch->bstats.bytes += qdisc_pkt_len(skb); |
324 | sch->bstats.packets++; | 324 | sch->bstats.packets++; |
325 | return 0; | 325 | return 0; |
326 | } | 326 | } |
@@ -352,7 +352,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch) | |||
352 | q->hash[x] = hash; | 352 | q->hash[x] = hash; |
353 | } | 353 | } |
354 | 354 | ||
355 | sch->qstats.backlog += skb->len; | 355 | sch->qstats.backlog += qdisc_pkt_len(skb); |
356 | __skb_queue_head(&q->qs[x], skb); | 356 | __skb_queue_head(&q->qs[x], skb); |
357 | /* If selected queue has length q->limit+1, this means that | 357 | /* If selected queue has length q->limit+1, this means that |
358 | * all another queues are empty and we do simple tail drop. | 358 | * all another queues are empty and we do simple tail drop. |
@@ -363,7 +363,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch) | |||
363 | skb = q->qs[x].prev; | 363 | skb = q->qs[x].prev; |
364 | __skb_unlink(skb, &q->qs[x]); | 364 | __skb_unlink(skb, &q->qs[x]); |
365 | sch->qstats.drops++; | 365 | sch->qstats.drops++; |
366 | sch->qstats.backlog -= skb->len; | 366 | sch->qstats.backlog -= qdisc_pkt_len(skb); |
367 | kfree_skb(skb); | 367 | kfree_skb(skb); |
368 | return NET_XMIT_CN; | 368 | return NET_XMIT_CN; |
369 | } | 369 | } |
@@ -411,7 +411,7 @@ sfq_dequeue(struct Qdisc *sch) | |||
411 | skb = __skb_dequeue(&q->qs[a]); | 411 | skb = __skb_dequeue(&q->qs[a]); |
412 | sfq_dec(q, a); | 412 | sfq_dec(q, a); |
413 | sch->q.qlen--; | 413 | sch->q.qlen--; |
414 | sch->qstats.backlog -= skb->len; | 414 | sch->qstats.backlog -= qdisc_pkt_len(skb); |
415 | 415 | ||
416 | /* Is the slot empty? */ | 416 | /* Is the slot empty? */ |
417 | if (q->qs[a].qlen == 0) { | 417 | if (q->qs[a].qlen == 0) { |
@@ -423,7 +423,7 @@ sfq_dequeue(struct Qdisc *sch) | |||
423 | } | 423 | } |
424 | q->next[q->tail] = a; | 424 | q->next[q->tail] = a; |
425 | q->allot[a] += q->quantum; | 425 | q->allot[a] += q->quantum; |
426 | } else if ((q->allot[a] -= skb->len) <= 0) { | 426 | } else if ((q->allot[a] -= qdisc_pkt_len(skb)) <= 0) { |
427 | q->tail = a; | 427 | q->tail = a; |
428 | a = q->next[a]; | 428 | a = q->next[a]; |
429 | q->allot[a] += q->quantum; | 429 | q->allot[a] += q->quantum; |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 7d705b86dae5..b296672f7632 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -123,7 +123,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
123 | struct tbf_sched_data *q = qdisc_priv(sch); | 123 | struct tbf_sched_data *q = qdisc_priv(sch); |
124 | int ret; | 124 | int ret; |
125 | 125 | ||
126 | if (skb->len > q->max_size) { | 126 | if (qdisc_pkt_len(skb) > q->max_size) { |
127 | sch->qstats.drops++; | 127 | sch->qstats.drops++; |
128 | #ifdef CONFIG_NET_CLS_ACT | 128 | #ifdef CONFIG_NET_CLS_ACT |
129 | if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) | 129 | if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) |
@@ -140,7 +140,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
140 | } | 140 | } |
141 | 141 | ||
142 | sch->q.qlen++; | 142 | sch->q.qlen++; |
143 | sch->bstats.bytes += skb->len; | 143 | sch->bstats.bytes += qdisc_pkt_len(skb); |
144 | sch->bstats.packets++; | 144 | sch->bstats.packets++; |
145 | return 0; | 145 | return 0; |
146 | } | 146 | } |
@@ -181,7 +181,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch) | |||
181 | psched_time_t now; | 181 | psched_time_t now; |
182 | long toks; | 182 | long toks; |
183 | long ptoks = 0; | 183 | long ptoks = 0; |
184 | unsigned int len = skb->len; | 184 | unsigned int len = qdisc_pkt_len(skb); |
185 | 185 | ||
186 | now = psched_get_time(); | 186 | now = psched_get_time(); |
187 | toks = psched_tdiff_bounded(now, q->t_c, q->buffer); | 187 | toks = psched_tdiff_bounded(now, q->t_c, q->buffer); |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 8b0ff345f9da..537223642b6e 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -83,7 +83,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
83 | 83 | ||
84 | if (q->q.qlen < dev->tx_queue_len) { | 84 | if (q->q.qlen < dev->tx_queue_len) { |
85 | __skb_queue_tail(&q->q, skb); | 85 | __skb_queue_tail(&q->q, skb); |
86 | sch->bstats.bytes += skb->len; | 86 | sch->bstats.bytes += qdisc_pkt_len(skb); |
87 | sch->bstats.packets++; | 87 | sch->bstats.packets++; |
88 | return 0; | 88 | return 0; |
89 | } | 89 | } |
@@ -278,7 +278,6 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) | |||
278 | struct Qdisc *start, *q; | 278 | struct Qdisc *start, *q; |
279 | int busy; | 279 | int busy; |
280 | int nores; | 280 | int nores; |
281 | int len = skb->len; | ||
282 | int subq = skb_get_queue_mapping(skb); | 281 | int subq = skb_get_queue_mapping(skb); |
283 | struct sk_buff *skb_res = NULL; | 282 | struct sk_buff *skb_res = NULL; |
284 | 283 | ||
@@ -313,7 +312,8 @@ restart: | |||
313 | master->slaves = NEXT_SLAVE(q); | 312 | master->slaves = NEXT_SLAVE(q); |
314 | netif_wake_queue(dev); | 313 | netif_wake_queue(dev); |
315 | master->stats.tx_packets++; | 314 | master->stats.tx_packets++; |
316 | master->stats.tx_bytes += len; | 315 | master->stats.tx_bytes += |
316 | qdisc_pkt_len(skb); | ||
317 | return 0; | 317 | return 0; |
318 | } | 318 | } |
319 | netif_tx_unlock(slave); | 319 | netif_tx_unlock(slave); |