aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorJussi Kivilinna <jussi.kivilinna@mbnet.fi>2008-07-20 03:08:04 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-20 03:08:04 -0400
commit5f86173bdf15981ca49d0434f638b68f70a35644 (patch)
treee6792339e577ed4a8261358e56df9f1a2b87f655 /net/sched
parentdb7a94d60f871ce6a52e97d82dea476cee0c4ea0 (diff)
net_sched: Add qdisc_enqueue wrapper
Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_atm.c2
-rw-r--r--net/sched/sch_cbq.c5
-rw-r--r--net/sched/sch_dsmark.c2
-rw-r--r--net/sched/sch_hfsc.c2
-rw-r--r--net/sched/sch_htb.c3
-rw-r--r--net/sched/sch_netem.c20
-rw-r--r--net/sched/sch_prio.c3
-rw-r--r--net/sched/sch_red.c2
-rw-r--r--net/sched/sch_tbf.c3
9 files changed, 24 insertions, 18 deletions
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 0de757e3be4a..68ed35e2a763 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -429,7 +429,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
429#endif 429#endif
430 } 430 }
431 431
432 ret = flow->q->enqueue(skb, flow->q); 432 ret = qdisc_enqueue(skb, flow->q);
433 if (ret != 0) { 433 if (ret != 0) {
434drop: __maybe_unused 434drop: __maybe_unused
435 sch->qstats.drops++; 435 sch->qstats.drops++;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index a3953bbe2d79..1afe3eece627 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -387,7 +387,8 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
387#ifdef CONFIG_NET_CLS_ACT 387#ifdef CONFIG_NET_CLS_ACT
388 cl->q->__parent = sch; 388 cl->q->__parent = sch;
389#endif 389#endif
390 if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) { 390 ret = qdisc_enqueue(skb, cl->q);
391 if (ret == NET_XMIT_SUCCESS) {
391 sch->q.qlen++; 392 sch->q.qlen++;
392 sch->bstats.packets++; 393 sch->bstats.packets++;
393 sch->bstats.bytes+=len; 394 sch->bstats.bytes+=len;
@@ -671,7 +672,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
671 q->rx_class = cl; 672 q->rx_class = cl;
672 cl->q->__parent = sch; 673 cl->q->__parent = sch;
673 674
674 if (cl->q->enqueue(skb, cl->q) == 0) { 675 if (qdisc_enqueue(skb, cl->q) == 0) {
675 sch->q.qlen++; 676 sch->q.qlen++;
676 sch->bstats.packets++; 677 sch->bstats.packets++;
677 sch->bstats.bytes+=len; 678 sch->bstats.bytes+=len;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 3aafbd17393a..44d347e831cf 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -252,7 +252,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
252 } 252 }
253 } 253 }
254 254
255 err = p->q->enqueue(skb, p->q); 255 err = qdisc_enqueue(skb, p->q);
256 if (err != NET_XMIT_SUCCESS) { 256 if (err != NET_XMIT_SUCCESS) {
257 sch->qstats.drops++; 257 sch->qstats.drops++;
258 return err; 258 return err;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 5090708ba384..fd61ed6ee1e7 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1586,7 +1586,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1586 } 1586 }
1587 1587
1588 len = skb->len; 1588 len = skb->len;
1589 err = cl->qdisc->enqueue(skb, cl->qdisc); 1589 err = qdisc_enqueue(skb, cl->qdisc);
1590 if (unlikely(err != NET_XMIT_SUCCESS)) { 1590 if (unlikely(err != NET_XMIT_SUCCESS)) {
1591 cl->qstats.drops++; 1591 cl->qstats.drops++;
1592 sch->qstats.drops++; 1592 sch->qstats.drops++;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index ee48457eaa4a..72b5a946178f 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -572,8 +572,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
572 kfree_skb(skb); 572 kfree_skb(skb);
573 return ret; 573 return ret;
574#endif 574#endif
575 } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != 575 } else if (qdisc_enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
576 NET_XMIT_SUCCESS) {
577 sch->qstats.drops++; 576 sch->qstats.drops++;
578 cl->qstats.drops++; 577 cl->qstats.drops++;
579 return NET_XMIT_DROP; 578 return NET_XMIT_DROP;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index c5ea40c9eb21..13c4821e42b8 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -82,6 +82,12 @@ struct netem_skb_cb {
82 psched_time_t time_to_send; 82 psched_time_t time_to_send;
83}; 83};
84 84
85static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
86{
87 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct netem_skb_cb));
88 return (struct netem_skb_cb *)skb->cb;
89}
90
85/* init_crandom - initialize correlated random number generator 91/* init_crandom - initialize correlated random number generator
86 * Use entropy source for initial seed. 92 * Use entropy source for initial seed.
87 */ 93 */
@@ -184,7 +190,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
184 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ 190 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
185 q->duplicate = 0; 191 q->duplicate = 0;
186 192
187 rootq->enqueue(skb2, rootq); 193 qdisc_enqueue_root(skb2, rootq);
188 q->duplicate = dupsave; 194 q->duplicate = dupsave;
189 } 195 }
190 196
@@ -205,7 +211,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
205 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 211 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
206 } 212 }
207 213
208 cb = (struct netem_skb_cb *)skb->cb; 214 cb = netem_skb_cb(skb);
209 if (q->gap == 0 /* not doing reordering */ 215 if (q->gap == 0 /* not doing reordering */
210 || q->counter < q->gap /* inside last reordering gap */ 216 || q->counter < q->gap /* inside last reordering gap */
211 || q->reorder < get_crandom(&q->reorder_cor)) { 217 || q->reorder < get_crandom(&q->reorder_cor)) {
@@ -218,7 +224,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
218 now = psched_get_time(); 224 now = psched_get_time();
219 cb->time_to_send = now + delay; 225 cb->time_to_send = now + delay;
220 ++q->counter; 226 ++q->counter;
221 ret = q->qdisc->enqueue(skb, q->qdisc); 227 ret = qdisc_enqueue(skb, q->qdisc);
222 } else { 228 } else {
223 /* 229 /*
224 * Do re-ordering by putting one out of N packets at the front 230 * Do re-ordering by putting one out of N packets at the front
@@ -277,8 +283,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
277 283
278 skb = q->qdisc->dequeue(q->qdisc); 284 skb = q->qdisc->dequeue(q->qdisc);
279 if (skb) { 285 if (skb) {
280 const struct netem_skb_cb *cb 286 const struct netem_skb_cb *cb = netem_skb_cb(skb);
281 = (const struct netem_skb_cb *)skb->cb;
282 psched_time_t now = psched_get_time(); 287 psched_time_t now = psched_get_time();
283 288
284 /* if more time remaining? */ 289 /* if more time remaining? */
@@ -457,7 +462,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
457{ 462{
458 struct fifo_sched_data *q = qdisc_priv(sch); 463 struct fifo_sched_data *q = qdisc_priv(sch);
459 struct sk_buff_head *list = &sch->q; 464 struct sk_buff_head *list = &sch->q;
460 psched_time_t tnext = ((struct netem_skb_cb *)nskb->cb)->time_to_send; 465 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
461 struct sk_buff *skb; 466 struct sk_buff *skb;
462 467
463 if (likely(skb_queue_len(list) < q->limit)) { 468 if (likely(skb_queue_len(list) < q->limit)) {
@@ -468,8 +473,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
468 } 473 }
469 474
470 skb_queue_reverse_walk(list, skb) { 475 skb_queue_reverse_walk(list, skb) {
471 const struct netem_skb_cb *cb 476 const struct netem_skb_cb *cb = netem_skb_cb(skb);
472 = (const struct netem_skb_cb *)skb->cb;
473 477
474 if (tnext >= cb->time_to_send) 478 if (tnext >= cb->time_to_send)
475 break; 479 break;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 536ca474dc69..d29c2f87fc0b 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -81,7 +81,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
81 } 81 }
82#endif 82#endif
83 83
84 if ((ret = qdisc->enqueue(skb, qdisc)) == NET_XMIT_SUCCESS) { 84 ret = qdisc_enqueue(skb, qdisc);
85 if (ret == NET_XMIT_SUCCESS) {
85 sch->bstats.bytes += skb->len; 86 sch->bstats.bytes += skb->len;
86 sch->bstats.packets++; 87 sch->bstats.packets++;
87 sch->q.qlen++; 88 sch->q.qlen++;
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 77098acf0adc..b48a391bc129 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -92,7 +92,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
92 break; 92 break;
93 } 93 }
94 94
95 ret = child->enqueue(skb, child); 95 ret = qdisc_enqueue(skb, child);
96 if (likely(ret == NET_XMIT_SUCCESS)) { 96 if (likely(ret == NET_XMIT_SUCCESS)) {
97 sch->bstats.bytes += skb->len; 97 sch->bstats.bytes += skb->len;
98 sch->bstats.packets++; 98 sch->bstats.packets++;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 444c227fcb6b..7d705b86dae5 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -133,7 +133,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
133 return NET_XMIT_DROP; 133 return NET_XMIT_DROP;
134 } 134 }
135 135
136 if ((ret = q->qdisc->enqueue(skb, q->qdisc)) != 0) { 136 ret = qdisc_enqueue(skb, q->qdisc);
137 if (ret != 0) {
137 sch->qstats.drops++; 138 sch->qstats.drops++;
138 return ret; 139 return ret;
139 } 140 }