aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorJarek Poplawski <jarkao2@gmail.com>2008-08-05 01:31:03 -0400
committerDavid S. Miller <davem@davemloft.net>2008-08-05 01:31:03 -0400
commit378a2f090f7a478704a372a4869b8a9ac206234e (patch)
treecf324a45a9dc21231d1d3225c51c9d5d2b57bbee /net/sched
parent6e583ce5242f32e925dcb198f7123256d0798370 (diff)
net_sched: Add qdisc __NET_XMIT_STOLEN flag
Patrick McHardy <kaber@trash.net> noticed: "The other problem that affects all qdiscs supporting actions is TC_ACT_QUEUED/TC_ACT_STOLEN getting mapped to NET_XMIT_SUCCESS even though the packet is not queued, corrupting upper qdiscs' qlen counters." and later explained: "The reason why it translates it at all seems to be to not increase the drops counter. Within a single qdisc this could be avoided by other means easily, upper qdiscs would still increase the counter when we return anything besides NET_XMIT_SUCCESS though. This means we need a new NET_XMIT return value to indicate this to the upper qdiscs. So I'd suggest to introduce NET_XMIT_STOLEN, return that to upper qdiscs and translate it to NET_XMIT_SUCCESS in dev_queue_xmit, similar to NET_XMIT_BYPASS." David Miller <davem@davemloft.net> noticed: "Maybe these NET_XMIT_* values being passed around should be a set of bits. They could be composed of base meanings, combined with specific attributes. So you could say "NET_XMIT_DROP | __NET_XMIT_NO_DROP_COUNT" The attributes get masked out by the top-level ->enqueue() caller, such that the base meanings are the only thing that make their way up into the stack. If it's only about communication within the qdisc tree, let's simply code it that way." This patch is trying to realize these ideas. Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_atm.c12
-rw-r--r--net/sched/sch_cbq.c23
-rw-r--r--net/sched/sch_dsmark.c8
-rw-r--r--net/sched/sch_hfsc.c8
-rw-r--r--net/sched/sch_htb.c18
-rw-r--r--net/sched/sch_netem.c3
-rw-r--r--net/sched/sch_prio.c8
-rw-r--r--net/sched/sch_red.c2
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sched/sch_tbf.c3
10 files changed, 54 insertions, 33 deletions
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 6b517b9dac5b..27dd773481bc 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -415,7 +415,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
415 case TC_ACT_QUEUED: 415 case TC_ACT_QUEUED:
416 case TC_ACT_STOLEN: 416 case TC_ACT_STOLEN:
417 kfree_skb(skb); 417 kfree_skb(skb);
418 return NET_XMIT_SUCCESS; 418 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
419 case TC_ACT_SHOT: 419 case TC_ACT_SHOT:
420 kfree_skb(skb); 420 kfree_skb(skb);
421 goto drop; 421 goto drop;
@@ -432,9 +432,11 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
432 ret = qdisc_enqueue(skb, flow->q); 432 ret = qdisc_enqueue(skb, flow->q);
433 if (ret != 0) { 433 if (ret != 0) {
434drop: __maybe_unused 434drop: __maybe_unused
435 sch->qstats.drops++; 435 if (net_xmit_drop_count(ret)) {
436 if (flow) 436 sch->qstats.drops++;
437 flow->qstats.drops++; 437 if (flow)
438 flow->qstats.drops++;
439 }
438 return ret; 440 return ret;
439 } 441 }
440 sch->bstats.bytes += qdisc_pkt_len(skb); 442 sch->bstats.bytes += qdisc_pkt_len(skb);
@@ -530,7 +532,7 @@ static int atm_tc_requeue(struct sk_buff *skb, struct Qdisc *sch)
530 if (!ret) { 532 if (!ret) {
531 sch->q.qlen++; 533 sch->q.qlen++;
532 sch->qstats.requeues++; 534 sch->qstats.requeues++;
533 } else { 535 } else if (net_xmit_drop_count(ret)) {
534 sch->qstats.drops++; 536 sch->qstats.drops++;
535 p->link.qstats.drops++; 537 p->link.qstats.drops++;
536 } 538 }
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 14954bf4a683..765ae5659000 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -256,7 +256,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
256 switch (result) { 256 switch (result) {
257 case TC_ACT_QUEUED: 257 case TC_ACT_QUEUED:
258 case TC_ACT_STOLEN: 258 case TC_ACT_STOLEN:
259 *qerr = NET_XMIT_SUCCESS; 259 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
260 case TC_ACT_SHOT: 260 case TC_ACT_SHOT:
261 return NULL; 261 return NULL;
262 case TC_ACT_RECLASSIFY: 262 case TC_ACT_RECLASSIFY:
@@ -397,9 +397,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
397 return ret; 397 return ret;
398 } 398 }
399 399
400 sch->qstats.drops++; 400 if (net_xmit_drop_count(ret)) {
401 cbq_mark_toplevel(q, cl); 401 sch->qstats.drops++;
402 cl->qstats.drops++; 402 cbq_mark_toplevel(q, cl);
403 cl->qstats.drops++;
404 }
403 return ret; 405 return ret;
404} 406}
405 407
@@ -430,8 +432,10 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
430 cbq_activate_class(cl); 432 cbq_activate_class(cl);
431 return 0; 433 return 0;
432 } 434 }
433 sch->qstats.drops++; 435 if (net_xmit_drop_count(ret)) {
434 cl->qstats.drops++; 436 sch->qstats.drops++;
437 cl->qstats.drops++;
438 }
435 return ret; 439 return ret;
436} 440}
437 441
@@ -664,13 +668,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
664 q->rx_class = NULL; 668 q->rx_class = NULL;
665 669
666 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) { 670 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
671 int ret;
667 672
668 cbq_mark_toplevel(q, cl); 673 cbq_mark_toplevel(q, cl);
669 674
670 q->rx_class = cl; 675 q->rx_class = cl;
671 cl->q->__parent = sch; 676 cl->q->__parent = sch;
672 677
673 if (qdisc_enqueue(skb, cl->q) == 0) { 678 ret = qdisc_enqueue(skb, cl->q);
679 if (ret == NET_XMIT_SUCCESS) {
674 sch->q.qlen++; 680 sch->q.qlen++;
675 sch->bstats.packets++; 681 sch->bstats.packets++;
676 sch->bstats.bytes += qdisc_pkt_len(skb); 682 sch->bstats.bytes += qdisc_pkt_len(skb);
@@ -678,7 +684,8 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
678 cbq_activate_class(cl); 684 cbq_activate_class(cl);
679 return 0; 685 return 0;
680 } 686 }
681 sch->qstats.drops++; 687 if (net_xmit_drop_count(ret))
688 sch->qstats.drops++;
682 return 0; 689 return 0;
683 } 690 }
684 691
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index a935676987e2..7170275d9f99 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -236,7 +236,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
236 case TC_ACT_QUEUED: 236 case TC_ACT_QUEUED:
237 case TC_ACT_STOLEN: 237 case TC_ACT_STOLEN:
238 kfree_skb(skb); 238 kfree_skb(skb);
239 return NET_XMIT_SUCCESS; 239 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
240 240
241 case TC_ACT_SHOT: 241 case TC_ACT_SHOT:
242 goto drop; 242 goto drop;
@@ -254,7 +254,8 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
254 254
255 err = qdisc_enqueue(skb, p->q); 255 err = qdisc_enqueue(skb, p->q);
256 if (err != NET_XMIT_SUCCESS) { 256 if (err != NET_XMIT_SUCCESS) {
257 sch->qstats.drops++; 257 if (net_xmit_drop_count(err))
258 sch->qstats.drops++;
258 return err; 259 return err;
259 } 260 }
260 261
@@ -321,7 +322,8 @@ static int dsmark_requeue(struct sk_buff *skb, struct Qdisc *sch)
321 322
322 err = p->q->ops->requeue(skb, p->q); 323 err = p->q->ops->requeue(skb, p->q);
323 if (err != NET_XMIT_SUCCESS) { 324 if (err != NET_XMIT_SUCCESS) {
324 sch->qstats.drops++; 325 if (net_xmit_drop_count(err))
326 sch->qstats.drops++;
325 return err; 327 return err;
326 } 328 }
327 329
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 0ae7d19dcba8..5cf9ae716118 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1166,7 +1166,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1166 switch (result) { 1166 switch (result) {
1167 case TC_ACT_QUEUED: 1167 case TC_ACT_QUEUED:
1168 case TC_ACT_STOLEN: 1168 case TC_ACT_STOLEN:
1169 *qerr = NET_XMIT_SUCCESS; 1169 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1170 case TC_ACT_SHOT: 1170 case TC_ACT_SHOT:
1171 return NULL; 1171 return NULL;
1172 } 1172 }
@@ -1586,8 +1586,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1586 1586
1587 err = qdisc_enqueue(skb, cl->qdisc); 1587 err = qdisc_enqueue(skb, cl->qdisc);
1588 if (unlikely(err != NET_XMIT_SUCCESS)) { 1588 if (unlikely(err != NET_XMIT_SUCCESS)) {
1589 cl->qstats.drops++; 1589 if (net_xmit_drop_count(err)) {
1590 sch->qstats.drops++; 1590 cl->qstats.drops++;
1591 sch->qstats.drops++;
1592 }
1591 return err; 1593 return err;
1592 } 1594 }
1593 1595
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 75a40951c4f2..538d79b489ae 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -221,7 +221,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
221 switch (result) { 221 switch (result) {
222 case TC_ACT_QUEUED: 222 case TC_ACT_QUEUED:
223 case TC_ACT_STOLEN: 223 case TC_ACT_STOLEN:
224 *qerr = NET_XMIT_SUCCESS; 224 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
225 case TC_ACT_SHOT: 225 case TC_ACT_SHOT:
226 return NULL; 226 return NULL;
227 } 227 }
@@ -572,9 +572,11 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
572 kfree_skb(skb); 572 kfree_skb(skb);
573 return ret; 573 return ret;
574#endif 574#endif
575 } else if (qdisc_enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { 575 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
576 sch->qstats.drops++; 576 if (net_xmit_drop_count(ret)) {
577 cl->qstats.drops++; 577 sch->qstats.drops++;
578 cl->qstats.drops++;
579 }
578 return NET_XMIT_DROP; 580 return NET_XMIT_DROP;
579 } else { 581 } else {
580 cl->bstats.packets += 582 cl->bstats.packets +=
@@ -615,10 +617,12 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
615 kfree_skb(skb); 617 kfree_skb(skb);
616 return ret; 618 return ret;
617#endif 619#endif
618 } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != 620 } else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) !=
619 NET_XMIT_SUCCESS) { 621 NET_XMIT_SUCCESS) {
620 sch->qstats.drops++; 622 if (net_xmit_drop_count(ret)) {
621 cl->qstats.drops++; 623 sch->qstats.drops++;
624 cl->qstats.drops++;
625 }
622 return NET_XMIT_DROP; 626 return NET_XMIT_DROP;
623 } else 627 } else
624 htb_activate(q, cl); 628 htb_activate(q, cl);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a59085700678..6cd6f2bc749e 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -240,8 +240,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
240 sch->q.qlen++; 240 sch->q.qlen++;
241 sch->bstats.bytes += qdisc_pkt_len(skb); 241 sch->bstats.bytes += qdisc_pkt_len(skb);
242 sch->bstats.packets++; 242 sch->bstats.packets++;
243 } else 243 } else if (net_xmit_drop_count(ret)) {
244 sch->qstats.drops++; 244 sch->qstats.drops++;
245 }
245 246
246 pr_debug("netem: enqueue ret %d\n", ret); 247 pr_debug("netem: enqueue ret %d\n", ret);
247 return ret; 248 return ret;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index f849243eb095..adb1a52b77d3 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -45,7 +45,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
45 switch (err) { 45 switch (err) {
46 case TC_ACT_STOLEN: 46 case TC_ACT_STOLEN:
47 case TC_ACT_QUEUED: 47 case TC_ACT_QUEUED:
48 *qerr = NET_XMIT_SUCCESS; 48 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
49 case TC_ACT_SHOT: 49 case TC_ACT_SHOT:
50 return NULL; 50 return NULL;
51 } 51 }
@@ -88,7 +88,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
88 sch->q.qlen++; 88 sch->q.qlen++;
89 return NET_XMIT_SUCCESS; 89 return NET_XMIT_SUCCESS;
90 } 90 }
91 sch->qstats.drops++; 91 if (net_xmit_drop_count(ret))
92 sch->qstats.drops++;
92 return ret; 93 return ret;
93} 94}
94 95
@@ -114,7 +115,8 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
114 sch->qstats.requeues++; 115 sch->qstats.requeues++;
115 return 0; 116 return 0;
116 } 117 }
117 sch->qstats.drops++; 118 if (net_xmit_drop_count(ret))
119 sch->qstats.drops++;
118 return NET_XMIT_DROP; 120 return NET_XMIT_DROP;
119} 121}
120 122
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 3f2d1d7f3bbd..5da05839e225 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -97,7 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
97 sch->bstats.bytes += qdisc_pkt_len(skb); 97 sch->bstats.bytes += qdisc_pkt_len(skb);
98 sch->bstats.packets++; 98 sch->bstats.packets++;
99 sch->q.qlen++; 99 sch->q.qlen++;
100 } else { 100 } else if (net_xmit_drop_count(ret)) {
101 q->stats.pdrop++; 101 q->stats.pdrop++;
102 sch->qstats.drops++; 102 sch->qstats.drops++;
103 } 103 }
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 8589da666568..3a456e1b829a 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -178,7 +178,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
178 switch (result) { 178 switch (result) {
179 case TC_ACT_STOLEN: 179 case TC_ACT_STOLEN:
180 case TC_ACT_QUEUED: 180 case TC_ACT_QUEUED:
181 *qerr = NET_XMIT_SUCCESS; 181 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
182 case TC_ACT_SHOT: 182 case TC_ACT_SHOT:
183 return 0; 183 return 0;
184 } 184 }
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index b296672f7632..7d3b7ff3bf07 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -135,7 +135,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
135 135
136 ret = qdisc_enqueue(skb, q->qdisc); 136 ret = qdisc_enqueue(skb, q->qdisc);
137 if (ret != 0) { 137 if (ret != 0) {
138 sch->qstats.drops++; 138 if (net_xmit_drop_count(ret))
139 sch->qstats.drops++;
139 return ret; 140 return ret;
140 } 141 }
141 142