aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-21 12:56:13 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-21 13:10:50 -0400
commitd3678b463df73f5060d7420915080e19baeb379b (patch)
tree4f4fc3e49bb0a9d21b9280f1c1a990eb03651564 /net
parent867d79fb9a4d5929ad8335c896fcfe11c3b2ef14 (diff)
Revert "pkt_sched: Make default qdisc nonshared-multiqueue safe."
This reverts commit a0c80b80e0fb48129e4e9d6a9ede914f9ff1850d. After discussions with Jamal and Herbert on netdev, we should provide at least minimal prioritization at the qdisc level even in multiqueue situations. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_generic.c99
1 files changed, 77 insertions, 22 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 09dead335805..cb625b4d6da5 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -356,44 +356,99 @@ static struct Qdisc noqueue_qdisc = {
356}; 356};
357 357
358 358
359static int fifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) 359static const u8 prio2band[TC_PRIO_MAX+1] =
360 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
361
362/* 3-band FIFO queue: old style, but should be a bit faster than
363 generic prio+fifo combination.
364 */
365
366#define PFIFO_FAST_BANDS 3
367
368static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
369 struct Qdisc *qdisc)
370{
371 struct sk_buff_head *list = qdisc_priv(qdisc);
372 return list + prio2band[skb->priority & TC_PRIO_MAX];
373}
374
375static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
360{ 376{
361 struct sk_buff_head *list = &qdisc->q; 377 struct sk_buff_head *list = prio2list(skb, qdisc);
362 378
363 if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) 379 if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) {
380 qdisc->q.qlen++;
364 return __qdisc_enqueue_tail(skb, qdisc, list); 381 return __qdisc_enqueue_tail(skb, qdisc, list);
382 }
365 383
366 return qdisc_drop(skb, qdisc); 384 return qdisc_drop(skb, qdisc);
367} 385}
368 386
369static struct sk_buff *fifo_fast_dequeue(struct Qdisc* qdisc) 387static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
370{ 388{
371 struct sk_buff_head *list = &qdisc->q; 389 int prio;
390 struct sk_buff_head *list = qdisc_priv(qdisc);
372 391
373 if (!skb_queue_empty(list)) 392 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
374 return __qdisc_dequeue_head(qdisc, list); 393 if (!skb_queue_empty(list + prio)) {
394 qdisc->q.qlen--;
395 return __qdisc_dequeue_head(qdisc, list + prio);
396 }
397 }
375 398
376 return NULL; 399 return NULL;
377} 400}
378 401
379static int fifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) 402static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
380{ 403{
381 return __qdisc_requeue(skb, qdisc, &qdisc->q); 404 qdisc->q.qlen++;
405 return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
382} 406}
383 407
384static void fifo_fast_reset(struct Qdisc* qdisc) 408static void pfifo_fast_reset(struct Qdisc* qdisc)
385{ 409{
386 __qdisc_reset_queue(qdisc, &qdisc->q); 410 int prio;
411 struct sk_buff_head *list = qdisc_priv(qdisc);
412
413 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
414 __qdisc_reset_queue(qdisc, list + prio);
415
387 qdisc->qstats.backlog = 0; 416 qdisc->qstats.backlog = 0;
417 qdisc->q.qlen = 0;
388} 418}
389 419
390static struct Qdisc_ops fifo_fast_ops __read_mostly = { 420static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
391 .id = "fifo_fast", 421{
392 .priv_size = 0, 422 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
393 .enqueue = fifo_fast_enqueue, 423
394 .dequeue = fifo_fast_dequeue, 424 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
395 .requeue = fifo_fast_requeue, 425 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
396 .reset = fifo_fast_reset, 426 return skb->len;
427
428nla_put_failure:
429 return -1;
430}
431
432static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
433{
434 int prio;
435 struct sk_buff_head *list = qdisc_priv(qdisc);
436
437 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
438 skb_queue_head_init(list + prio);
439
440 return 0;
441}
442
443static struct Qdisc_ops pfifo_fast_ops __read_mostly = {
444 .id = "pfifo_fast",
445 .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
446 .enqueue = pfifo_fast_enqueue,
447 .dequeue = pfifo_fast_dequeue,
448 .requeue = pfifo_fast_requeue,
449 .init = pfifo_fast_init,
450 .reset = pfifo_fast_reset,
451 .dump = pfifo_fast_dump,
397 .owner = THIS_MODULE, 452 .owner = THIS_MODULE,
398}; 453};
399 454
@@ -522,7 +577,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
522 577
523 if (dev->tx_queue_len) { 578 if (dev->tx_queue_len) {
524 qdisc = qdisc_create_dflt(dev, dev_queue, 579 qdisc = qdisc_create_dflt(dev, dev_queue,
525 &fifo_fast_ops, TC_H_ROOT); 580 &pfifo_fast_ops, TC_H_ROOT);
526 if (!qdisc) { 581 if (!qdisc) {
527 printk(KERN_INFO "%s: activation failed\n", dev->name); 582 printk(KERN_INFO "%s: activation failed\n", dev->name);
528 return; 583 return;
@@ -550,9 +605,9 @@ void dev_activate(struct net_device *dev)
550 int need_watchdog; 605 int need_watchdog;
551 606
552 /* No queueing discipline is attached to device; 607 /* No queueing discipline is attached to device;
553 * create default one i.e. fifo_fast for devices, 608 create default one i.e. pfifo_fast for devices,
554 * which need queueing and noqueue_qdisc for 609 which need queueing and noqueue_qdisc for
555 * virtual interfaces. 610 virtual interfaces
556 */ 611 */
557 612
558 if (dev_all_qdisc_sleeping_noop(dev)) 613 if (dev_all_qdisc_sleeping_noop(dev))