aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorJarek Poplawski <jarkao2@gmail.com>2008-11-14 01:56:30 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-14 01:56:30 -0500
commitf30ab418a1d3c5a8b83493e7d70d6876a74aa0ce (patch)
tree271f0d093d2436b0d0ebdff151fc4f5b1fb15f21 /net/sched/sch_generic.c
parent38a7ddffa4b79d7b1fbc9bf2fa82b21b72622858 (diff)
pkt_sched: Remove qdisc->ops->requeue() etc.
After implementing qdisc->ops->peek() and changing sch_netem into classless qdisc there are no more qdisc->ops->requeue() users. This patch removes this method with its wrappers (qdisc_requeue()), and also unused qdisc->requeue structure. There are a few minor fixes of warnings (htb_enqueue()) and comments btw. The idea to kill ->requeue() and a similar patch were first developed by David S. Miller. Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c23
1 files changed, 0 insertions, 23 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 1192da229835..80c8f3dbbea1 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -306,22 +306,12 @@ static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
306 return NULL; 306 return NULL;
307} 307}
308 308
309static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
310{
311 if (net_ratelimit())
312 printk(KERN_DEBUG "%s deferred output. It is buggy.\n",
313 skb->dev->name);
314 kfree_skb(skb);
315 return NET_XMIT_CN;
316}
317
318struct Qdisc_ops noop_qdisc_ops __read_mostly = { 309struct Qdisc_ops noop_qdisc_ops __read_mostly = {
319 .id = "noop", 310 .id = "noop",
320 .priv_size = 0, 311 .priv_size = 0,
321 .enqueue = noop_enqueue, 312 .enqueue = noop_enqueue,
322 .dequeue = noop_dequeue, 313 .dequeue = noop_dequeue,
323 .peek = noop_dequeue, 314 .peek = noop_dequeue,
324 .requeue = noop_requeue,
325 .owner = THIS_MODULE, 315 .owner = THIS_MODULE,
326}; 316};
327 317
@@ -336,7 +326,6 @@ struct Qdisc noop_qdisc = {
336 .flags = TCQ_F_BUILTIN, 326 .flags = TCQ_F_BUILTIN,
337 .ops = &noop_qdisc_ops, 327 .ops = &noop_qdisc_ops,
338 .list = LIST_HEAD_INIT(noop_qdisc.list), 328 .list = LIST_HEAD_INIT(noop_qdisc.list),
339 .requeue.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
340 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), 329 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
341 .dev_queue = &noop_netdev_queue, 330 .dev_queue = &noop_netdev_queue,
342}; 331};
@@ -348,7 +337,6 @@ static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
348 .enqueue = noop_enqueue, 337 .enqueue = noop_enqueue,
349 .dequeue = noop_dequeue, 338 .dequeue = noop_dequeue,
350 .peek = noop_dequeue, 339 .peek = noop_dequeue,
351 .requeue = noop_requeue,
352 .owner = THIS_MODULE, 340 .owner = THIS_MODULE,
353}; 341};
354 342
@@ -364,7 +352,6 @@ static struct Qdisc noqueue_qdisc = {
364 .flags = TCQ_F_BUILTIN, 352 .flags = TCQ_F_BUILTIN,
365 .ops = &noqueue_qdisc_ops, 353 .ops = &noqueue_qdisc_ops,
366 .list = LIST_HEAD_INIT(noqueue_qdisc.list), 354 .list = LIST_HEAD_INIT(noqueue_qdisc.list),
367 .requeue.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
368 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), 355 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
369 .dev_queue = &noqueue_netdev_queue, 356 .dev_queue = &noqueue_netdev_queue,
370}; 357};
@@ -426,12 +413,6 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
426 return NULL; 413 return NULL;
427} 414}
428 415
429static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
430{
431 qdisc->q.qlen++;
432 return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
433}
434
435static void pfifo_fast_reset(struct Qdisc* qdisc) 416static void pfifo_fast_reset(struct Qdisc* qdisc)
436{ 417{
437 int prio; 418 int prio;
@@ -473,7 +454,6 @@ static struct Qdisc_ops pfifo_fast_ops __read_mostly = {
473 .enqueue = pfifo_fast_enqueue, 454 .enqueue = pfifo_fast_enqueue,
474 .dequeue = pfifo_fast_dequeue, 455 .dequeue = pfifo_fast_dequeue,
475 .peek = pfifo_fast_peek, 456 .peek = pfifo_fast_peek,
476 .requeue = pfifo_fast_requeue,
477 .init = pfifo_fast_init, 457 .init = pfifo_fast_init,
478 .reset = pfifo_fast_reset, 458 .reset = pfifo_fast_reset,
479 .dump = pfifo_fast_dump, 459 .dump = pfifo_fast_dump,
@@ -499,7 +479,6 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
499 sch->padded = (char *) sch - (char *) p; 479 sch->padded = (char *) sch - (char *) p;
500 480
501 INIT_LIST_HEAD(&sch->list); 481 INIT_LIST_HEAD(&sch->list);
502 skb_queue_head_init(&sch->requeue);
503 skb_queue_head_init(&sch->q); 482 skb_queue_head_init(&sch->q);
504 sch->ops = ops; 483 sch->ops = ops;
505 sch->enqueue = ops->enqueue; 484 sch->enqueue = ops->enqueue;
@@ -571,8 +550,6 @@ void qdisc_destroy(struct Qdisc *qdisc)
571 dev_put(qdisc_dev(qdisc)); 550 dev_put(qdisc_dev(qdisc));
572 551
573 kfree_skb(qdisc->gso_skb); 552 kfree_skb(qdisc->gso_skb);
574 __skb_queue_purge(&qdisc->requeue);
575
576 kfree((char *) qdisc - qdisc->padded); 553 kfree((char *) qdisc - qdisc->padded);
577} 554}
578EXPORT_SYMBOL(qdisc_destroy); 555EXPORT_SYMBOL(qdisc_destroy);