aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c40
1 files changed, 18 insertions, 22 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index cdcd16fcfeda..5f5efe4e6072 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -224,7 +224,7 @@ static void dev_watchdog(unsigned long arg)
224 char drivername[64]; 224 char drivername[64];
225 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n", 225 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n",
226 dev->name, netdev_drivername(dev, drivername, 64)); 226 dev->name, netdev_drivername(dev, drivername, 64));
227 dev->tx_timeout(dev); 227 dev->netdev_ops->ndo_tx_timeout(dev);
228 } 228 }
229 if (!mod_timer(&dev->watchdog_timer, 229 if (!mod_timer(&dev->watchdog_timer,
230 round_jiffies(jiffies + 230 round_jiffies(jiffies +
@@ -239,7 +239,7 @@ static void dev_watchdog(unsigned long arg)
239 239
240void __netdev_watchdog_up(struct net_device *dev) 240void __netdev_watchdog_up(struct net_device *dev)
241{ 241{
242 if (dev->tx_timeout) { 242 if (dev->netdev_ops->ndo_tx_timeout) {
243 if (dev->watchdog_timeo <= 0) 243 if (dev->watchdog_timeo <= 0)
244 dev->watchdog_timeo = 5*HZ; 244 dev->watchdog_timeo = 5*HZ;
245 if (!mod_timer(&dev->watchdog_timer, 245 if (!mod_timer(&dev->watchdog_timer,
@@ -311,21 +311,12 @@ static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
311 return NULL; 311 return NULL;
312} 312}
313 313
314static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
315{
316 if (net_ratelimit())
317 printk(KERN_DEBUG "%s deferred output. It is buggy.\n",
318 skb->dev->name);
319 kfree_skb(skb);
320 return NET_XMIT_CN;
321}
322
323struct Qdisc_ops noop_qdisc_ops __read_mostly = { 314struct Qdisc_ops noop_qdisc_ops __read_mostly = {
324 .id = "noop", 315 .id = "noop",
325 .priv_size = 0, 316 .priv_size = 0,
326 .enqueue = noop_enqueue, 317 .enqueue = noop_enqueue,
327 .dequeue = noop_dequeue, 318 .dequeue = noop_dequeue,
328 .requeue = noop_requeue, 319 .peek = noop_dequeue,
329 .owner = THIS_MODULE, 320 .owner = THIS_MODULE,
330}; 321};
331 322
@@ -340,7 +331,6 @@ struct Qdisc noop_qdisc = {
340 .flags = TCQ_F_BUILTIN, 331 .flags = TCQ_F_BUILTIN,
341 .ops = &noop_qdisc_ops, 332 .ops = &noop_qdisc_ops,
342 .list = LIST_HEAD_INIT(noop_qdisc.list), 333 .list = LIST_HEAD_INIT(noop_qdisc.list),
343 .requeue.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
344 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), 334 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
345 .dev_queue = &noop_netdev_queue, 335 .dev_queue = &noop_netdev_queue,
346}; 336};
@@ -351,7 +341,7 @@ static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
351 .priv_size = 0, 341 .priv_size = 0,
352 .enqueue = noop_enqueue, 342 .enqueue = noop_enqueue,
353 .dequeue = noop_dequeue, 343 .dequeue = noop_dequeue,
354 .requeue = noop_requeue, 344 .peek = noop_dequeue,
355 .owner = THIS_MODULE, 345 .owner = THIS_MODULE,
356}; 346};
357 347
@@ -367,7 +357,6 @@ static struct Qdisc noqueue_qdisc = {
367 .flags = TCQ_F_BUILTIN, 357 .flags = TCQ_F_BUILTIN,
368 .ops = &noqueue_qdisc_ops, 358 .ops = &noqueue_qdisc_ops,
369 .list = LIST_HEAD_INIT(noqueue_qdisc.list), 359 .list = LIST_HEAD_INIT(noqueue_qdisc.list),
370 .requeue.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
371 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), 360 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
372 .dev_queue = &noqueue_netdev_queue, 361 .dev_queue = &noqueue_netdev_queue,
373}; 362};
@@ -416,10 +405,17 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
416 return NULL; 405 return NULL;
417} 406}
418 407
419static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) 408static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
420{ 409{
421 qdisc->q.qlen++; 410 int prio;
422 return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc)); 411 struct sk_buff_head *list = qdisc_priv(qdisc);
412
413 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
414 if (!skb_queue_empty(list + prio))
415 return skb_peek(list + prio);
416 }
417
418 return NULL;
423} 419}
424 420
425static void pfifo_fast_reset(struct Qdisc* qdisc) 421static void pfifo_fast_reset(struct Qdisc* qdisc)
@@ -462,7 +458,7 @@ static struct Qdisc_ops pfifo_fast_ops __read_mostly = {
462 .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), 458 .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
463 .enqueue = pfifo_fast_enqueue, 459 .enqueue = pfifo_fast_enqueue,
464 .dequeue = pfifo_fast_dequeue, 460 .dequeue = pfifo_fast_dequeue,
465 .requeue = pfifo_fast_requeue, 461 .peek = pfifo_fast_peek,
466 .init = pfifo_fast_init, 462 .init = pfifo_fast_init,
467 .reset = pfifo_fast_reset, 463 .reset = pfifo_fast_reset,
468 .dump = pfifo_fast_dump, 464 .dump = pfifo_fast_dump,
@@ -488,7 +484,6 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
488 sch->padded = (char *) sch - (char *) p; 484 sch->padded = (char *) sch - (char *) p;
489 485
490 INIT_LIST_HEAD(&sch->list); 486 INIT_LIST_HEAD(&sch->list);
491 skb_queue_head_init(&sch->requeue);
492 skb_queue_head_init(&sch->q); 487 skb_queue_head_init(&sch->q);
493 sch->ops = ops; 488 sch->ops = ops;
494 sch->enqueue = ops->enqueue; 489 sch->enqueue = ops->enqueue;
@@ -531,6 +526,9 @@ void qdisc_reset(struct Qdisc *qdisc)
531 526
532 if (ops->reset) 527 if (ops->reset)
533 ops->reset(qdisc); 528 ops->reset(qdisc);
529
530 kfree_skb(qdisc->gso_skb);
531 qdisc->gso_skb = NULL;
534} 532}
535EXPORT_SYMBOL(qdisc_reset); 533EXPORT_SYMBOL(qdisc_reset);
536 534
@@ -557,8 +555,6 @@ void qdisc_destroy(struct Qdisc *qdisc)
557 dev_put(qdisc_dev(qdisc)); 555 dev_put(qdisc_dev(qdisc));
558 556
559 kfree_skb(qdisc->gso_skb); 557 kfree_skb(qdisc->gso_skb);
560 __skb_queue_purge(&qdisc->requeue);
561
562 kfree((char *) qdisc - qdisc->padded); 558 kfree((char *) qdisc - qdisc->padded);
563} 559}
564EXPORT_SYMBOL(qdisc_destroy); 560EXPORT_SYMBOL(qdisc_destroy);