aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorJamal Hadi Salim <hadi@cyberus.ca>2006-01-09 01:35:55 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-01-09 17:16:26 -0500
commit29f1df6cc1c3ee3530939f0e38d80a9b50645ba5 (patch)
treeea2e82bf11a7f41df35ce380d0931125a5511c99 /net/sched
parent253af4235d24ddfcd9f5403485e9273b33d8fa5e (diff)
[PKT_SCHED]: Fix qdisc return code.
The mapping between TC_ACTION_SHOT and the qdisc return codes is better suited to NET_XMIT_BYPASS so as not to confuse TCP Signed-off-by: Jamal Hadi Salim <hadi@cyberus.ca> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_cbq.c4
-rw-r--r--net/sched/sch_hfsc.c4
-rw-r--r--net/sched/sch_htb.c4
-rw-r--r--net/sched/sch_prio.c7
4 files changed, 10 insertions, 9 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 09453f997d8c..6cd81708bf71 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -257,7 +257,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
257 (cl = cbq_class_lookup(q, prio)) != NULL) 257 (cl = cbq_class_lookup(q, prio)) != NULL)
258 return cl; 258 return cl;
259 259
260 *qerr = NET_XMIT_DROP; 260 *qerr = NET_XMIT_BYPASS;
261 for (;;) { 261 for (;;) {
262 int result = 0; 262 int result = 0;
263 defmap = head->defaults; 263 defmap = head->defaults;
@@ -413,7 +413,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
413 q->rx_class = cl; 413 q->rx_class = cl;
414#endif 414#endif
415 if (cl == NULL) { 415 if (cl == NULL) {
416 if (ret == NET_XMIT_DROP) 416 if (ret == NET_XMIT_BYPASS)
417 sch->qstats.drops++; 417 sch->qstats.drops++;
418 kfree_skb(skb); 418 kfree_skb(skb);
419 return ret; 419 return ret;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index cad1758ec0df..91132f6871d7 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1227,7 +1227,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1227 if (cl->level == 0) 1227 if (cl->level == 0)
1228 return cl; 1228 return cl;
1229 1229
1230 *qerr = NET_XMIT_DROP; 1230 *qerr = NET_XMIT_BYPASS;
1231 tcf = q->root.filter_list; 1231 tcf = q->root.filter_list;
1232 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 1232 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
1233#ifdef CONFIG_NET_CLS_ACT 1233#ifdef CONFIG_NET_CLS_ACT
@@ -1643,7 +1643,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1643 1643
1644 cl = hfsc_classify(skb, sch, &err); 1644 cl = hfsc_classify(skb, sch, &err);
1645 if (cl == NULL) { 1645 if (cl == NULL) {
1646 if (err == NET_XMIT_DROP) 1646 if (err == NET_XMIT_BYPASS)
1647 sch->qstats.drops++; 1647 sch->qstats.drops++;
1648 kfree_skb(skb); 1648 kfree_skb(skb);
1649 return err; 1649 return err;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 558cc087e602..3ec95df4a85e 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -321,7 +321,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, in
321 if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0) 321 if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0)
322 return cl; 322 return cl;
323 323
324 *qerr = NET_XMIT_DROP; 324 *qerr = NET_XMIT_BYPASS;
325 tcf = q->filter_list; 325 tcf = q->filter_list;
326 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 326 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
327#ifdef CONFIG_NET_CLS_ACT 327#ifdef CONFIG_NET_CLS_ACT
@@ -724,7 +724,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
724 } 724 }
725#ifdef CONFIG_NET_CLS_ACT 725#ifdef CONFIG_NET_CLS_ACT
726 } else if (!cl) { 726 } else if (!cl) {
727 if (ret == NET_XMIT_DROP) 727 if (ret == NET_XMIT_BYPASS)
728 sch->qstats.drops++; 728 sch->qstats.drops++;
729 kfree_skb (skb); 729 kfree_skb (skb);
730 return ret; 730 return ret;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 3ac0f495bad0..5b3a3e48ed92 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -54,7 +54,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
54 u32 band = skb->priority; 54 u32 band = skb->priority;
55 struct tcf_result res; 55 struct tcf_result res;
56 56
57 *qerr = NET_XMIT_DROP; 57 *qerr = NET_XMIT_BYPASS;
58 if (TC_H_MAJ(skb->priority) != sch->handle) { 58 if (TC_H_MAJ(skb->priority) != sch->handle) {
59#ifdef CONFIG_NET_CLS_ACT 59#ifdef CONFIG_NET_CLS_ACT
60 switch (tc_classify(skb, q->filter_list, &res)) { 60 switch (tc_classify(skb, q->filter_list, &res)) {
@@ -91,7 +91,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
91 qdisc = prio_classify(skb, sch, &ret); 91 qdisc = prio_classify(skb, sch, &ret);
92#ifdef CONFIG_NET_CLS_ACT 92#ifdef CONFIG_NET_CLS_ACT
93 if (qdisc == NULL) { 93 if (qdisc == NULL) {
94 if (ret == NET_XMIT_DROP) 94
95 if (ret == NET_XMIT_BYPASS)
95 sch->qstats.drops++; 96 sch->qstats.drops++;
96 kfree_skb(skb); 97 kfree_skb(skb);
97 return ret; 98 return ret;
@@ -118,7 +119,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
118 qdisc = prio_classify(skb, sch, &ret); 119 qdisc = prio_classify(skb, sch, &ret);
119#ifdef CONFIG_NET_CLS_ACT 120#ifdef CONFIG_NET_CLS_ACT
120 if (qdisc == NULL) { 121 if (qdisc == NULL) {
121 if (ret == NET_XMIT_DROP) 122 if (ret == NET_XMIT_BYPASS)
122 sch->qstats.drops++; 123 sch->qstats.drops++;
123 kfree_skb(skb); 124 kfree_skb(skb);
124 return ret; 125 return ret;