aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-17 04:46:06 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-17 22:21:33 -0400
commita0c80b80e0fb48129e4e9d6a9ede914f9ff1850d (patch)
treedb2364cff1fad40b56cae6770ed73945b42e6e85 /net/sched/sch_generic.c
parent93245dd6d356b864f6676396a9f3edecbd378ed0 (diff)
pkt_sched: Make default qdisc nonshared-multiqueue safe.
Instead of 'pfifo_fast' we have just plain 'fifo_fast'. No priority queues, just a straight FIFO. This is necessary in order to legally have a seperate qdisc per queue in multi-TX-queue setups, and thus get full parallelization. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c99
1 files changed, 22 insertions, 77 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 8fc580b3e173..e244c462e6bd 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -349,99 +349,44 @@ static struct Qdisc noqueue_qdisc = {
349}; 349};
350 350
351 351
352static const u8 prio2band[TC_PRIO_MAX+1] = 352static int fifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
353 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
354
355/* 3-band FIFO queue: old style, but should be a bit faster than
356 generic prio+fifo combination.
357 */
358
359#define PFIFO_FAST_BANDS 3
360
361static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
362 struct Qdisc *qdisc)
363{
364 struct sk_buff_head *list = qdisc_priv(qdisc);
365 return list + prio2band[skb->priority & TC_PRIO_MAX];
366}
367
368static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
369{ 353{
370 struct sk_buff_head *list = prio2list(skb, qdisc); 354 struct sk_buff_head *list = &qdisc->q;
371 355
372 if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) { 356 if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len)
373 qdisc->q.qlen++;
374 return __qdisc_enqueue_tail(skb, qdisc, list); 357 return __qdisc_enqueue_tail(skb, qdisc, list);
375 }
376 358
377 return qdisc_drop(skb, qdisc); 359 return qdisc_drop(skb, qdisc);
378} 360}
379 361
380static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) 362static struct sk_buff *fifo_fast_dequeue(struct Qdisc* qdisc)
381{ 363{
382 int prio; 364 struct sk_buff_head *list = &qdisc->q;
383 struct sk_buff_head *list = qdisc_priv(qdisc);
384 365
385 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { 366 if (!skb_queue_empty(list))
386 if (!skb_queue_empty(list + prio)) { 367 return __qdisc_dequeue_head(qdisc, list);
387 qdisc->q.qlen--;
388 return __qdisc_dequeue_head(qdisc, list + prio);
389 }
390 }
391 368
392 return NULL; 369 return NULL;
393} 370}
394 371
395static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) 372static int fifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
396{ 373{
397 qdisc->q.qlen++; 374 return __qdisc_requeue(skb, qdisc, &qdisc->q);
398 return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
399} 375}
400 376
401static void pfifo_fast_reset(struct Qdisc* qdisc) 377static void fifo_fast_reset(struct Qdisc* qdisc)
402{ 378{
403 int prio; 379 __qdisc_reset_queue(qdisc, &qdisc->q);
404 struct sk_buff_head *list = qdisc_priv(qdisc);
405
406 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
407 __qdisc_reset_queue(qdisc, list + prio);
408
409 qdisc->qstats.backlog = 0; 380 qdisc->qstats.backlog = 0;
410 qdisc->q.qlen = 0;
411} 381}
412 382
413static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) 383static struct Qdisc_ops fifo_fast_ops __read_mostly = {
414{ 384 .id = "fifo_fast",
415 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; 385 .priv_size = 0,
416 386 .enqueue = fifo_fast_enqueue,
417 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); 387 .dequeue = fifo_fast_dequeue,
418 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 388 .requeue = fifo_fast_requeue,
419 return skb->len; 389 .reset = fifo_fast_reset,
420
421nla_put_failure:
422 return -1;
423}
424
425static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
426{
427 int prio;
428 struct sk_buff_head *list = qdisc_priv(qdisc);
429
430 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
431 skb_queue_head_init(list + prio);
432
433 return 0;
434}
435
436static struct Qdisc_ops pfifo_fast_ops __read_mostly = {
437 .id = "pfifo_fast",
438 .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
439 .enqueue = pfifo_fast_enqueue,
440 .dequeue = pfifo_fast_dequeue,
441 .requeue = pfifo_fast_requeue,
442 .init = pfifo_fast_init,
443 .reset = pfifo_fast_reset,
444 .dump = pfifo_fast_dump,
445 .owner = THIS_MODULE, 390 .owner = THIS_MODULE,
446}; 391};
447 392
@@ -570,7 +515,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
570 515
571 if (dev->tx_queue_len) { 516 if (dev->tx_queue_len) {
572 qdisc = qdisc_create_dflt(dev, dev_queue, 517 qdisc = qdisc_create_dflt(dev, dev_queue,
573 &pfifo_fast_ops, TC_H_ROOT); 518 &fifo_fast_ops, TC_H_ROOT);
574 if (!qdisc) { 519 if (!qdisc) {
575 printk(KERN_INFO "%s: activation failed\n", dev->name); 520 printk(KERN_INFO "%s: activation failed\n", dev->name);
576 return; 521 return;
@@ -601,9 +546,9 @@ void dev_activate(struct net_device *dev)
601 int need_watchdog; 546 int need_watchdog;
602 547
603 /* No queueing discipline is attached to device; 548 /* No queueing discipline is attached to device;
604 create default one i.e. pfifo_fast for devices, 549 * create default one i.e. fifo_fast for devices,
605 which need queueing and noqueue_qdisc for 550 * which need queueing and noqueue_qdisc for
606 virtual interfaces 551 * virtual interfaces.
607 */ 552 */
608 553
609 if (dev_all_qdisc_sleeping_noop(dev)) 554 if (dev_all_qdisc_sleeping_noop(dev))