diff options
author | Jarek Poplawski <jarkao2@gmail.com> | 2008-10-31 03:46:19 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-10-31 03:46:19 -0400 |
commit | 03c05f0d4bb0c267edf12d614025a40e33c5a6f9 (patch) | |
tree | c23b79ed8b7dfcdabb872f5c114541ae2de8ac0f | |
parent | 8e3af97899db433111287e07d5105189f56fe191 (diff) |
pkt_sched: Use qdisc->ops->peek() instead of ->dequeue() & ->requeue()
Use qdisc->ops->peek() instead of ->dequeue() & ->requeue() pair.
After this patch the only remaining user of qdisc->ops->requeue() is
netem_enqueue(). Based on ideas of Herbert Xu, Patrick McHardy and
David S. Miller.
Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/sched/sch_atm.c | 11 | ||||
-rw-r--r-- | net/sched/sch_hfsc.c | 12 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 13 | ||||
-rw-r--r-- | net/sched/sch_tbf.c | 12 |
4 files changed, 19 insertions, 29 deletions
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index f9eac0818d18..2ee0c1a8efa9 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c | |||
@@ -480,11 +480,14 @@ static void sch_atm_dequeue(unsigned long data) | |||
480 | * If traffic is properly shaped, this won't generate nasty | 480 | * If traffic is properly shaped, this won't generate nasty |
481 | * little bursts. Otherwise, it may ... (but that's okay) | 481 | * little bursts. Otherwise, it may ... (but that's okay) |
482 | */ | 482 | */ |
483 | while ((skb = flow->q->dequeue(flow->q))) { | 483 | while ((skb = flow->q->ops->peek(flow->q))) { |
484 | if (!atm_may_send(flow->vcc, skb->truesize)) { | 484 | if (!atm_may_send(flow->vcc, skb->truesize)) |
485 | (void)flow->q->ops->requeue(skb, flow->q); | ||
486 | break; | 485 | break; |
487 | } | 486 | |
487 | skb = flow->q->dequeue(flow->q); | ||
488 | if (unlikely(!skb)) | ||
489 | break; | ||
490 | |||
488 | pr_debug("atm_tc_dequeue: sending on class %p\n", flow); | 491 | pr_debug("atm_tc_dequeue: sending on class %p\n", flow); |
489 | /* remove any LL header somebody else has attached */ | 492 | /* remove any LL header somebody else has attached */ |
490 | skb_pull(skb, skb_network_offset(skb)); | 493 | skb_pull(skb, skb_network_offset(skb)); |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index c1e77da8cd09..ddfc40887848 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -880,28 +880,20 @@ set_passive(struct hfsc_class *cl) | |||
880 | */ | 880 | */ |
881 | } | 881 | } |
882 | 882 | ||
883 | /* | ||
884 | * hack to get length of first packet in queue. | ||
885 | */ | ||
886 | static unsigned int | 883 | static unsigned int |
887 | qdisc_peek_len(struct Qdisc *sch) | 884 | qdisc_peek_len(struct Qdisc *sch) |
888 | { | 885 | { |
889 | struct sk_buff *skb; | 886 | struct sk_buff *skb; |
890 | unsigned int len; | 887 | unsigned int len; |
891 | 888 | ||
892 | skb = sch->dequeue(sch); | 889 | skb = sch->ops->peek(sch); |
893 | if (skb == NULL) { | 890 | if (skb == NULL) { |
894 | if (net_ratelimit()) | 891 | if (net_ratelimit()) |
895 | printk("qdisc_peek_len: non work-conserving qdisc ?\n"); | 892 | printk("qdisc_peek_len: non work-conserving qdisc ?\n"); |
896 | return 0; | 893 | return 0; |
897 | } | 894 | } |
898 | len = qdisc_pkt_len(skb); | 895 | len = qdisc_pkt_len(skb); |
899 | if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) { | 896 | |
900 | if (net_ratelimit()) | ||
901 | printk("qdisc_peek_len: failed to requeue\n"); | ||
902 | qdisc_tree_decrease_qlen(sch, 1); | ||
903 | return 0; | ||
904 | } | ||
905 | return len; | 897 | return len; |
906 | } | 898 | } |
907 | 899 | ||
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 2898d9dc31eb..74fbdb52baed 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -283,25 +283,22 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) | |||
283 | if (sch->flags & TCQ_F_THROTTLED) | 283 | if (sch->flags & TCQ_F_THROTTLED) |
284 | return NULL; | 284 | return NULL; |
285 | 285 | ||
286 | skb = q->qdisc->dequeue(q->qdisc); | 286 | skb = q->qdisc->ops->peek(q->qdisc); |
287 | if (skb) { | 287 | if (skb) { |
288 | const struct netem_skb_cb *cb = netem_skb_cb(skb); | 288 | const struct netem_skb_cb *cb = netem_skb_cb(skb); |
289 | psched_time_t now = psched_get_time(); | 289 | psched_time_t now = psched_get_time(); |
290 | 290 | ||
291 | /* if more time remaining? */ | 291 | /* if more time remaining? */ |
292 | if (cb->time_to_send <= now) { | 292 | if (cb->time_to_send <= now) { |
293 | skb = q->qdisc->dequeue(q->qdisc); | ||
294 | if (!skb) | ||
295 | return NULL; | ||
296 | |||
293 | pr_debug("netem_dequeue: return skb=%p\n", skb); | 297 | pr_debug("netem_dequeue: return skb=%p\n", skb); |
294 | sch->q.qlen--; | 298 | sch->q.qlen--; |
295 | return skb; | 299 | return skb; |
296 | } | 300 | } |
297 | 301 | ||
298 | if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) { | ||
299 | qdisc_tree_decrease_qlen(q->qdisc, 1); | ||
300 | sch->qstats.drops++; | ||
301 | printk(KERN_ERR "netem: %s could not requeue\n", | ||
302 | q->qdisc->ops->id); | ||
303 | } | ||
304 | |||
305 | qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); | 302 | qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); |
306 | } | 303 | } |
307 | 304 | ||
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 94c61598b86a..61fdc77a48d2 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -169,7 +169,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch) | |||
169 | struct tbf_sched_data *q = qdisc_priv(sch); | 169 | struct tbf_sched_data *q = qdisc_priv(sch); |
170 | struct sk_buff *skb; | 170 | struct sk_buff *skb; |
171 | 171 | ||
172 | skb = q->qdisc->dequeue(q->qdisc); | 172 | skb = q->qdisc->ops->peek(q->qdisc); |
173 | 173 | ||
174 | if (skb) { | 174 | if (skb) { |
175 | psched_time_t now; | 175 | psched_time_t now; |
@@ -192,6 +192,10 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch) | |||
192 | toks -= L2T(q, len); | 192 | toks -= L2T(q, len); |
193 | 193 | ||
194 | if ((toks|ptoks) >= 0) { | 194 | if ((toks|ptoks) >= 0) { |
195 | skb = q->qdisc->dequeue(q->qdisc); | ||
196 | if (unlikely(!skb)) | ||
197 | return NULL; | ||
198 | |||
195 | q->t_c = now; | 199 | q->t_c = now; |
196 | q->tokens = toks; | 200 | q->tokens = toks; |
197 | q->ptokens = ptoks; | 201 | q->ptokens = ptoks; |
@@ -214,12 +218,6 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch) | |||
214 | (cf. CSZ, HPFQ, HFSC) | 218 | (cf. CSZ, HPFQ, HFSC) |
215 | */ | 219 | */ |
216 | 220 | ||
217 | if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) { | ||
218 | /* When requeue fails skb is dropped */ | ||
219 | qdisc_tree_decrease_qlen(q->qdisc, 1); | ||
220 | sch->qstats.drops++; | ||
221 | } | ||
222 | |||
223 | sch->qstats.overlimits++; | 221 | sch->qstats.overlimits++; |
224 | } | 222 | } |
225 | return NULL; | 223 | return NULL; |