aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJarek Poplawski <jarkao2@gmail.com>2008-08-05 01:39:11 -0400
committerDavid S. Miller <davem@davemloft.net>2008-08-05 01:39:11 -0400
commitc27f339af90bb874a7a9c680b17abfd32d4a727b (patch)
tree5af236e0b3835976c70c1d5daa2f30be11adf35b
parent378a2f090f7a478704a372a4869b8a9ac206234e (diff)
net_sched: Add qdisc __NET_XMIT_BYPASS flag
Patrick McHardy <kaber@trash.net> noticed that it would be nice to handle NET_XMIT_BYPASS by NET_XMIT_SUCCESS with an internal qdisc flag __NET_XMIT_BYPASS and to remove the mapping from dev_queue_xmit(). David Miller <davem@davemloft.net> spotted a serious bug in the first version of this patch. Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sch_generic.h6
-rw-r--r--net/core/dev.c1
-rw-r--r--net/sched/sch_atm.c2
-rw-r--r--net/sched/sch_cbq.c4
-rw-r--r--net/sched/sch_dsmark.c2
-rw-r--r--net/sched/sch_hfsc.c4
-rw-r--r--net/sched/sch_htb.c6
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sched/sch_prio.c6
-rw-r--r--net/sched/sch_sfq.c6
10 files changed, 19 insertions, 20 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index f15b045a85e9..a7abfda3e447 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -343,14 +343,14 @@ static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
343 return qdisc_skb_cb(skb)->pkt_len; 343 return qdisc_skb_cb(skb)->pkt_len;
344} 344}
345 345
346#ifdef CONFIG_NET_CLS_ACT 346/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
347/* additional qdisc xmit flags */
348enum net_xmit_qdisc_t { 347enum net_xmit_qdisc_t {
349 __NET_XMIT_STOLEN = 0x00010000, 348 __NET_XMIT_STOLEN = 0x00010000,
349 __NET_XMIT_BYPASS = 0x00020000,
350}; 350};
351 351
352#ifdef CONFIG_NET_CLS_ACT
352#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 353#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
353
354#else 354#else
355#define net_xmit_drop_count(e) (1) 355#define net_xmit_drop_count(e) (1)
356#endif 356#endif
diff --git a/net/core/dev.c b/net/core/dev.c
index fc6c9881eca8..01993ad74e76 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1805,7 +1805,6 @@ gso:
1805 1805
1806 spin_unlock(root_lock); 1806 spin_unlock(root_lock);
1807 1807
1808 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1809 goto out; 1808 goto out;
1810 } 1809 }
1811 1810
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 27dd773481bc..43d37256c15e 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -457,7 +457,7 @@ drop: __maybe_unused
457 return 0; 457 return 0;
458 } 458 }
459 tasklet_schedule(&p->task); 459 tasklet_schedule(&p->task);
460 return NET_XMIT_BYPASS; 460 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
461} 461}
462 462
463/* 463/*
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 765ae5659000..4e261ce62f48 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -230,7 +230,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
230 (cl = cbq_class_lookup(q, prio)) != NULL) 230 (cl = cbq_class_lookup(q, prio)) != NULL)
231 return cl; 231 return cl;
232 232
233 *qerr = NET_XMIT_BYPASS; 233 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
234 for (;;) { 234 for (;;) {
235 int result = 0; 235 int result = 0;
236 defmap = head->defaults; 236 defmap = head->defaults;
@@ -377,7 +377,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
377 q->rx_class = cl; 377 q->rx_class = cl;
378#endif 378#endif
379 if (cl == NULL) { 379 if (cl == NULL) {
380 if (ret == NET_XMIT_BYPASS) 380 if (ret & __NET_XMIT_BYPASS)
381 sch->qstats.drops++; 381 sch->qstats.drops++;
382 kfree_skb(skb); 382 kfree_skb(skb);
383 return ret; 383 return ret;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 7170275d9f99..edd1298f85f6 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -268,7 +268,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
268drop: 268drop:
269 kfree_skb(skb); 269 kfree_skb(skb);
270 sch->qstats.drops++; 270 sch->qstats.drops++;
271 return NET_XMIT_BYPASS; 271 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
272} 272}
273 273
274static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) 274static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 5cf9ae716118..c2b8d9cce3d2 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1159,7 +1159,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1159 if (cl->level == 0) 1159 if (cl->level == 0)
1160 return cl; 1160 return cl;
1161 1161
1162 *qerr = NET_XMIT_BYPASS; 1162 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1163 tcf = q->root.filter_list; 1163 tcf = q->root.filter_list;
1164 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 1164 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
1165#ifdef CONFIG_NET_CLS_ACT 1165#ifdef CONFIG_NET_CLS_ACT
@@ -1578,7 +1578,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1578 1578
1579 cl = hfsc_classify(skb, sch, &err); 1579 cl = hfsc_classify(skb, sch, &err);
1580 if (cl == NULL) { 1580 if (cl == NULL) {
1581 if (err == NET_XMIT_BYPASS) 1581 if (err & __NET_XMIT_BYPASS)
1582 sch->qstats.drops++; 1582 sch->qstats.drops++;
1583 kfree_skb(skb); 1583 kfree_skb(skb);
1584 return err; 1584 return err;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 538d79b489ae..be35422711a3 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -214,7 +214,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
214 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0) 214 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
215 return cl; 215 return cl;
216 216
217 *qerr = NET_XMIT_BYPASS; 217 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
218 tcf = q->filter_list; 218 tcf = q->filter_list;
219 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 219 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
220#ifdef CONFIG_NET_CLS_ACT 220#ifdef CONFIG_NET_CLS_ACT
@@ -567,7 +567,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
567 } 567 }
568#ifdef CONFIG_NET_CLS_ACT 568#ifdef CONFIG_NET_CLS_ACT
569 } else if (!cl) { 569 } else if (!cl) {
570 if (ret == NET_XMIT_BYPASS) 570 if (ret & __NET_XMIT_BYPASS)
571 sch->qstats.drops++; 571 sch->qstats.drops++;
572 kfree_skb(skb); 572 kfree_skb(skb);
573 return ret; 573 return ret;
@@ -612,7 +612,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
612 } 612 }
613#ifdef CONFIG_NET_CLS_ACT 613#ifdef CONFIG_NET_CLS_ACT
614 } else if (!cl) { 614 } else if (!cl) {
615 if (ret == NET_XMIT_BYPASS) 615 if (ret & __NET_XMIT_BYPASS)
616 sch->qstats.drops++; 616 sch->qstats.drops++;
617 kfree_skb(skb); 617 kfree_skb(skb);
618 return ret; 618 return ret;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 6cd6f2bc749e..fb0294d0b55e 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -176,7 +176,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
176 if (count == 0) { 176 if (count == 0) {
177 sch->qstats.drops++; 177 sch->qstats.drops++;
178 kfree_skb(skb); 178 kfree_skb(skb);
179 return NET_XMIT_BYPASS; 179 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
180 } 180 }
181 181
182 skb_orphan(skb); 182 skb_orphan(skb);
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index adb1a52b77d3..eac197610edf 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -38,7 +38,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
38 struct tcf_result res; 38 struct tcf_result res;
39 int err; 39 int err;
40 40
41 *qerr = NET_XMIT_BYPASS; 41 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
42 if (TC_H_MAJ(skb->priority) != sch->handle) { 42 if (TC_H_MAJ(skb->priority) != sch->handle) {
43 err = tc_classify(skb, q->filter_list, &res); 43 err = tc_classify(skb, q->filter_list, &res);
44#ifdef CONFIG_NET_CLS_ACT 44#ifdef CONFIG_NET_CLS_ACT
@@ -74,7 +74,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
74#ifdef CONFIG_NET_CLS_ACT 74#ifdef CONFIG_NET_CLS_ACT
75 if (qdisc == NULL) { 75 if (qdisc == NULL) {
76 76
77 if (ret == NET_XMIT_BYPASS) 77 if (ret & __NET_XMIT_BYPASS)
78 sch->qstats.drops++; 78 sch->qstats.drops++;
79 kfree_skb(skb); 79 kfree_skb(skb);
80 return ret; 80 return ret;
@@ -103,7 +103,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
103 qdisc = prio_classify(skb, sch, &ret); 103 qdisc = prio_classify(skb, sch, &ret);
104#ifdef CONFIG_NET_CLS_ACT 104#ifdef CONFIG_NET_CLS_ACT
105 if (qdisc == NULL) { 105 if (qdisc == NULL) {
106 if (ret == NET_XMIT_BYPASS) 106 if (ret & __NET_XMIT_BYPASS)
107 sch->qstats.drops++; 107 sch->qstats.drops++;
108 kfree_skb(skb); 108 kfree_skb(skb);
109 return ret; 109 return ret;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 3a456e1b829a..6e041d10dbdb 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -171,7 +171,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
171 if (!q->filter_list) 171 if (!q->filter_list)
172 return sfq_hash(q, skb) + 1; 172 return sfq_hash(q, skb) + 1;
173 173
174 *qerr = NET_XMIT_BYPASS; 174 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
175 result = tc_classify(skb, q->filter_list, &res); 175 result = tc_classify(skb, q->filter_list, &res);
176 if (result >= 0) { 176 if (result >= 0) {
177#ifdef CONFIG_NET_CLS_ACT 177#ifdef CONFIG_NET_CLS_ACT
@@ -285,7 +285,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
285 285
286 hash = sfq_classify(skb, sch, &ret); 286 hash = sfq_classify(skb, sch, &ret);
287 if (hash == 0) { 287 if (hash == 0) {
288 if (ret == NET_XMIT_BYPASS) 288 if (ret & __NET_XMIT_BYPASS)
289 sch->qstats.drops++; 289 sch->qstats.drops++;
290 kfree_skb(skb); 290 kfree_skb(skb);
291 return ret; 291 return ret;
@@ -339,7 +339,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch)
339 339
340 hash = sfq_classify(skb, sch, &ret); 340 hash = sfq_classify(skb, sch, &ret);
341 if (hash == 0) { 341 if (hash == 0) {
342 if (ret == NET_XMIT_BYPASS) 342 if (ret & __NET_XMIT_BYPASS)
343 sch->qstats.drops++; 343 sch->qstats.drops++;
344 kfree_skb(skb); 344 kfree_skb(skb);
345 return ret; 345 return ret;