aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-17 03:47:45 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-17 22:21:22 -0400
commit8a34c5dc3a7c6431f1cd94c0904be81b296e08ca (patch)
treefd4f5f3872ab0470622d49d02c58334f5c24adec /net/sched
parent16361127ebed0fb8f9d7cc94c6e137eaf710f676 (diff)
pkt_sched: Perform bulk of qdisc destruction in RCU.
This allows less strict control of access to the qdisc attached to a netdev_queue. It is even allowed to enqueue into a qdisc which is in the process of being destroyed. The RCU handler will toss out those packets. We will need this to handle sharing of a qdisc amongst multiple TX queues. In such a setup the lock has to be shared, so will be inside of the qdisc itself. At which point the netdev_queue lock cannot be used to hard synchronize access to the ->qdisc pointer. One operation we have to keep inside of qdisc_destroy() is the list deletion. It is the only piece of state visible after the RCU quiesce period, so we have to undo it early and under the appropriate locking. The operations in the RCU handler do not need any looking because the qdisc tree is no longer visible to anything at that point. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_generic.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 7e078c593194..082db8abe703 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -545,6 +545,17 @@ EXPORT_SYMBOL(qdisc_reset);
545static void __qdisc_destroy(struct rcu_head *head) 545static void __qdisc_destroy(struct rcu_head *head)
546{ 546{
547 struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu); 547 struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
548 const struct Qdisc_ops *ops = qdisc->ops;
549
550 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
551 if (ops->reset)
552 ops->reset(qdisc);
553 if (ops->destroy)
554 ops->destroy(qdisc);
555
556 module_put(ops->owner);
557 dev_put(qdisc_dev(qdisc));
558
548 kfree((char *) qdisc - qdisc->padded); 559 kfree((char *) qdisc - qdisc->padded);
549} 560}
550 561
@@ -552,21 +563,12 @@ static void __qdisc_destroy(struct rcu_head *head)
552 563
553void qdisc_destroy(struct Qdisc *qdisc) 564void qdisc_destroy(struct Qdisc *qdisc)
554{ 565{
555 const struct Qdisc_ops *ops = qdisc->ops;
556
557 if (qdisc->flags & TCQ_F_BUILTIN || 566 if (qdisc->flags & TCQ_F_BUILTIN ||
558 !atomic_dec_and_test(&qdisc->refcnt)) 567 !atomic_dec_and_test(&qdisc->refcnt))
559 return; 568 return;
560 569
561 list_del(&qdisc->list); 570 list_del(&qdisc->list);
562 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
563 if (ops->reset)
564 ops->reset(qdisc);
565 if (ops->destroy)
566 ops->destroy(qdisc);
567 571
568 module_put(ops->owner);
569 dev_put(qdisc_dev(qdisc));
570 call_rcu(&qdisc->q_rcu, __qdisc_destroy); 572 call_rcu(&qdisc->q_rcu, __qdisc_destroy);
571} 573}
572EXPORT_SYMBOL(qdisc_destroy); 574EXPORT_SYMBOL(qdisc_destroy);