aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-17 07:54:10 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-17 22:21:30 -0400
commit99194cff398d056e5ee469647c294466c246c88a (patch)
tree24d85fffc71915a61bcc062deb32a4fa82dc7b9a /net
parent83874000929ed63aef30b44083a9f713135ff040 (diff)
pkt_sched: Add multiqueue handling to qdisc_graft().
Move the destruction of the old queue into qdisc_graft(). When operating on a root qdisc (ie. "parent == NULL"), apply the operation to all queues. The caller has grabbed a single implicit reference for this graft, therefore when we apply the change to more than one queue we must grab additional qdisc references. Otherwise, we are operating on a class of a specific parent qdisc, and therefore no multiqueue handling is necessary. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_api.c101
1 files changed, 59 insertions, 42 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 74924893ef7f..b3ef8307204e 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -435,28 +435,22 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
435 return i>0 ? autohandle : 0; 435 return i>0 ? autohandle : 0;
436} 436}
437 437
438/* Attach toplevel qdisc to device dev */ 438/* Attach toplevel qdisc to device queue. */
439 439
440static struct Qdisc * 440static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
441dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) 441 struct Qdisc *qdisc)
442{ 442{
443 struct netdev_queue *dev_queue;
444 spinlock_t *root_lock; 443 spinlock_t *root_lock;
445 struct Qdisc *oqdisc; 444 struct Qdisc *oqdisc;
446 int ingress; 445 int ingress;
447 446
448 if (dev->flags & IFF_UP)
449 dev_deactivate(dev);
450
451 ingress = 0; 447 ingress = 0;
452 if (qdisc && qdisc->flags&TCQ_F_INGRESS) 448 if (qdisc && qdisc->flags&TCQ_F_INGRESS)
453 ingress = 1; 449 ingress = 1;
454 450
455 if (ingress) { 451 if (ingress) {
456 dev_queue = &dev->rx_queue;
457 oqdisc = dev_queue->qdisc; 452 oqdisc = dev_queue->qdisc;
458 } else { 453 } else {
459 dev_queue = netdev_get_tx_queue(dev, 0);
460 oqdisc = dev_queue->qdisc_sleeping; 454 oqdisc = dev_queue->qdisc_sleeping;
461 } 455 }
462 456
@@ -487,9 +481,6 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
487 481
488 spin_unlock_bh(root_lock); 482 spin_unlock_bh(root_lock);
489 483
490 if (dev->flags & IFF_UP)
491 dev_activate(dev);
492
493 return oqdisc; 484 return oqdisc;
494} 485}
495 486
@@ -521,26 +512,66 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
521} 512}
522EXPORT_SYMBOL(qdisc_tree_decrease_qlen); 513EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
523 514
524/* Graft qdisc "new" to class "classid" of qdisc "parent" or 515static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid,
525 to device "dev". 516 struct Qdisc *old, struct Qdisc *new)
517{
518 if (new || old)
519 qdisc_notify(skb, n, clid, old, new);
526 520
527 Old qdisc is not destroyed but returned in *old. 521 if (old) {
522 spin_lock_bh(&old->q.lock);
523 qdisc_destroy(old);
524 spin_unlock_bh(&old->q.lock);
525 }
526}
527
528/* Graft qdisc "new" to class "classid" of qdisc "parent" or
529 * to device "dev".
530 *
531 * When appropriate send a netlink notification using 'skb'
532 * and "n".
533 *
534 * On success, destroy old qdisc.
528 */ 535 */
529 536
530static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, 537static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
531 u32 classid, 538 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
532 struct Qdisc *new, struct Qdisc **old) 539 struct Qdisc *new, struct Qdisc *old)
533{ 540{
541 struct Qdisc *q = old;
534 int err = 0; 542 int err = 0;
535 struct Qdisc *q = *old;
536
537 543
538 if (parent == NULL) { 544 if (parent == NULL) {
539 if (q && q->flags&TCQ_F_INGRESS) { 545 unsigned int i, num_q, ingress;
540 *old = dev_graft_qdisc(dev, q); 546
541 } else { 547 ingress = 0;
542 *old = dev_graft_qdisc(dev, new); 548 num_q = dev->num_tx_queues;
549 if (q && q->flags & TCQ_F_INGRESS) {
550 num_q = 1;
551 ingress = 1;
543 } 552 }
553
554 if (dev->flags & IFF_UP)
555 dev_deactivate(dev);
556
557 for (i = 0; i < num_q; i++) {
558 struct netdev_queue *dev_queue = &dev->rx_queue;
559
560 if (!ingress)
561 dev_queue = netdev_get_tx_queue(dev, i);
562
563 if (ingress) {
564 old = dev_graft_qdisc(dev_queue, q);
565 } else {
566 old = dev_graft_qdisc(dev_queue, new);
567 if (new && i > 0)
568 atomic_inc(&new->refcnt);
569 }
570 notify_and_destroy(skb, n, classid, old, new);
571 }
572
573 if (dev->flags & IFF_UP)
574 dev_activate(dev);
544 } else { 575 } else {
545 const struct Qdisc_class_ops *cops = parent->ops->cl_ops; 576 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
546 577
@@ -549,10 +580,12 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
549 if (cops) { 580 if (cops) {
550 unsigned long cl = cops->get(parent, classid); 581 unsigned long cl = cops->get(parent, classid);
551 if (cl) { 582 if (cl) {
552 err = cops->graft(parent, cl, new, old); 583 err = cops->graft(parent, cl, new, &old);
553 cops->put(parent, cl); 584 cops->put(parent, cl);
554 } 585 }
555 } 586 }
587 if (!err)
588 notify_and_destroy(skb, n, classid, old, new);
556 } 589 }
557 return err; 590 return err;
558} 591}
@@ -773,16 +806,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
773 return -EINVAL; 806 return -EINVAL;
774 if (q->handle == 0) 807 if (q->handle == 0)
775 return -ENOENT; 808 return -ENOENT;
776 if ((err = qdisc_graft(dev, p, clid, NULL, &q)) != 0) 809 if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0)
777 return err; 810 return err;
778 if (q) {
779 spinlock_t *root_lock = qdisc_root_lock(q);
780
781 qdisc_notify(skb, n, clid, q, NULL);
782 spin_unlock_bh(root_lock);
783 qdisc_destroy(q);
784 spin_unlock_bh(root_lock);
785 }
786 } else { 811 } else {
787 qdisc_notify(skb, n, clid, NULL, q); 812 qdisc_notify(skb, n, clid, NULL, q);
788 } 813 }
@@ -923,10 +948,9 @@ create_n_graft:
923 948
924graft: 949graft:
925 if (1) { 950 if (1) {
926 struct Qdisc *old_q = NULL;
927 spinlock_t *root_lock; 951 spinlock_t *root_lock;
928 952
929 err = qdisc_graft(dev, p, clid, q, &old_q); 953 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
930 if (err) { 954 if (err) {
931 if (q) { 955 if (q) {
932 root_lock = qdisc_root_lock(q); 956 root_lock = qdisc_root_lock(q);
@@ -936,13 +960,6 @@ graft:
936 } 960 }
937 return err; 961 return err;
938 } 962 }
939 qdisc_notify(skb, n, clid, old_q, q);
940 if (old_q) {
941 root_lock = qdisc_root_lock(old_q);
942 spin_lock_bh(root_lock);
943 qdisc_destroy(old_q);
944 spin_unlock_bh(root_lock);
945 }
946 } 963 }
947 return 0; 964 return 0;
948} 965}