diff options
author | Jarek Poplawski <jarkao2@gmail.com> | 2008-11-14 01:56:30 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-11-14 01:56:30 -0500 |
commit | f30ab418a1d3c5a8b83493e7d70d6876a74aa0ce (patch) | |
tree | 271f0d093d2436b0d0ebdff151fc4f5b1fb15f21 /net/sched/sch_sfq.c | |
parent | 38a7ddffa4b79d7b1fbc9bf2fa82b21b72622858 (diff) |
pkt_sched: Remove qdisc->ops->requeue() etc.
After implementing qdisc->ops->peek() and changing sch_netem into
classless qdisc there are no more qdisc->ops->requeue() users. This
patch removes this method with its wrappers (qdisc_requeue()), and
also unused qdisc->requeue structure. There are a few minor fixes of
warnings (htb_enqueue()) and comments btw.
The idea to kill ->requeue() and a similar patch were first developed
by David S. Miller.
Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_sfq.c')
-rw-r--r-- | net/sched/sch_sfq.c | 63 |
1 files changed, 0 insertions, 63 deletions
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 198b83d42ba8..ab8cfee3c9ce 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -329,68 +329,6 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
329 | return NET_XMIT_CN; | 329 | return NET_XMIT_CN; |
330 | } | 330 | } |
331 | 331 | ||
332 | static int | ||
333 | sfq_requeue(struct sk_buff *skb, struct Qdisc *sch) | ||
334 | { | ||
335 | struct sfq_sched_data *q = qdisc_priv(sch); | ||
336 | unsigned int hash; | ||
337 | sfq_index x; | ||
338 | int ret; | ||
339 | |||
340 | hash = sfq_classify(skb, sch, &ret); | ||
341 | if (hash == 0) { | ||
342 | if (ret & __NET_XMIT_BYPASS) | ||
343 | sch->qstats.drops++; | ||
344 | kfree_skb(skb); | ||
345 | return ret; | ||
346 | } | ||
347 | hash--; | ||
348 | |||
349 | x = q->ht[hash]; | ||
350 | if (x == SFQ_DEPTH) { | ||
351 | q->ht[hash] = x = q->dep[SFQ_DEPTH].next; | ||
352 | q->hash[x] = hash; | ||
353 | } | ||
354 | |||
355 | sch->qstats.backlog += qdisc_pkt_len(skb); | ||
356 | __skb_queue_head(&q->qs[x], skb); | ||
357 | /* If selected queue has length q->limit+1, this means that | ||
358 | * all another queues are empty and we do simple tail drop. | ||
359 | * This packet is still requeued at head of queue, tail packet | ||
360 | * is dropped. | ||
361 | */ | ||
362 | if (q->qs[x].qlen > q->limit) { | ||
363 | skb = q->qs[x].prev; | ||
364 | __skb_unlink(skb, &q->qs[x]); | ||
365 | sch->qstats.drops++; | ||
366 | sch->qstats.backlog -= qdisc_pkt_len(skb); | ||
367 | kfree_skb(skb); | ||
368 | return NET_XMIT_CN; | ||
369 | } | ||
370 | |||
371 | sfq_inc(q, x); | ||
372 | if (q->qs[x].qlen == 1) { /* The flow is new */ | ||
373 | if (q->tail == SFQ_DEPTH) { /* It is the first flow */ | ||
374 | q->tail = x; | ||
375 | q->next[x] = x; | ||
376 | q->allot[x] = q->quantum; | ||
377 | } else { | ||
378 | q->next[x] = q->next[q->tail]; | ||
379 | q->next[q->tail] = x; | ||
380 | q->tail = x; | ||
381 | } | ||
382 | } | ||
383 | |||
384 | if (++sch->q.qlen <= q->limit) { | ||
385 | sch->qstats.requeues++; | ||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | sch->qstats.drops++; | ||
390 | sfq_drop(sch); | ||
391 | return NET_XMIT_CN; | ||
392 | } | ||
393 | |||
394 | static struct sk_buff * | 332 | static struct sk_buff * |
395 | sfq_peek(struct Qdisc *sch) | 333 | sfq_peek(struct Qdisc *sch) |
396 | { | 334 | { |
@@ -636,7 +574,6 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = { | |||
636 | .enqueue = sfq_enqueue, | 574 | .enqueue = sfq_enqueue, |
637 | .dequeue = sfq_dequeue, | 575 | .dequeue = sfq_dequeue, |
638 | .peek = sfq_peek, | 576 | .peek = sfq_peek, |
639 | .requeue = sfq_requeue, | ||
640 | .drop = sfq_drop, | 577 | .drop = sfq_drop, |
641 | .init = sfq_init, | 578 | .init = sfq_init, |
642 | .reset = sfq_reset, | 579 | .reset = sfq_reset, |