aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorBen Greear <greearb@candelatech.com>2010-08-10 04:45:40 -0400
committerDavid S. Miller <davem@davemloft.net>2010-08-10 05:51:11 -0400
commit9871e50edd25e2adf69b369817100821cb1e6de8 (patch)
treeba44998e2c45d3ff842c42dd7fcc4e2c0e69e4e6 /net/sched
parent06d88e4a88cf6a90de6f0744e2cc320eb67aac81 (diff)
net: Use NET_XMIT_SUCCESS where possible.
This is based on work originally done by Patric McHardy. Signed-off-by: Ben Greear <greearb@candelatech.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_atm.c4
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sched/sch_tbf.c4
-rw-r--r--net/sched/sch_teql.c2
4 files changed, 6 insertions, 6 deletions
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index e114f23d5eae..340662789529 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -418,7 +418,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
418 } 418 }
419 419
420 ret = qdisc_enqueue(skb, flow->q); 420 ret = qdisc_enqueue(skb, flow->q);
421 if (ret != 0) { 421 if (ret != NET_XMIT_SUCCESS) {
422drop: __maybe_unused 422drop: __maybe_unused
423 if (net_xmit_drop_count(ret)) { 423 if (net_xmit_drop_count(ret)) {
424 sch->qstats.drops++; 424 sch->qstats.drops++;
@@ -442,7 +442,7 @@ drop: __maybe_unused
442 */ 442 */
443 if (flow == &p->link) { 443 if (flow == &p->link) {
444 sch->q.qlen++; 444 sch->q.qlen++;
445 return 0; 445 return NET_XMIT_SUCCESS;
446 } 446 }
447 tasklet_schedule(&p->task); 447 tasklet_schedule(&p->task);
448 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 448 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index d8e0ae5fa16a..b8bcb2096df8 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -334,7 +334,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
334 if (++sch->q.qlen <= q->limit) { 334 if (++sch->q.qlen <= q->limit) {
335 sch->bstats.bytes += qdisc_pkt_len(skb); 335 sch->bstats.bytes += qdisc_pkt_len(skb);
336 sch->bstats.packets++; 336 sch->bstats.packets++;
337 return 0; 337 return NET_XMIT_SUCCESS;
338 } 338 }
339 339
340 sfq_drop(sch); 340 sfq_drop(sch);
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 0991c640cd3e..641a30d64635 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -127,7 +127,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
127 return qdisc_reshape_fail(skb, sch); 127 return qdisc_reshape_fail(skb, sch);
128 128
129 ret = qdisc_enqueue(skb, q->qdisc); 129 ret = qdisc_enqueue(skb, q->qdisc);
130 if (ret != 0) { 130 if (ret != NET_XMIT_SUCCESS) {
131 if (net_xmit_drop_count(ret)) 131 if (net_xmit_drop_count(ret))
132 sch->qstats.drops++; 132 sch->qstats.drops++;
133 return ret; 133 return ret;
@@ -136,7 +136,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
136 sch->q.qlen++; 136 sch->q.qlen++;
137 sch->bstats.bytes += qdisc_pkt_len(skb); 137 sch->bstats.bytes += qdisc_pkt_len(skb);
138 sch->bstats.packets++; 138 sch->bstats.packets++;
139 return 0; 139 return NET_XMIT_SUCCESS;
140} 140}
141 141
142static unsigned int tbf_drop(struct Qdisc* sch) 142static unsigned int tbf_drop(struct Qdisc* sch)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 807643bdcbac..feaabc103ce6 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -85,7 +85,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
85 __skb_queue_tail(&q->q, skb); 85 __skb_queue_tail(&q->q, skb);
86 sch->bstats.bytes += qdisc_pkt_len(skb); 86 sch->bstats.bytes += qdisc_pkt_len(skb);
87 sch->bstats.packets++; 87 sch->bstats.packets++;
88 return 0; 88 return NET_XMIT_SUCCESS;
89 } 89 }
90 90
91 kfree_skb(skb); 91 kfree_skb(skb);