aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_htb.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-16 04:42:40 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-17 22:21:19 -0400
commit7698b4fcabcd790efc4f226bada1e7b5870653af (patch)
tree031ce7a911fc5bff995421a5615d9ab25416a479 /net/sched/sch_htb.c
parente2627c8c2241bce45e368e150654d076b58a4595 (diff)
pkt_sched: Add and use qdisc_root() and qdisc_root_lock().
When code wants to lock the qdisc tree state, the logic operation it's doing is locking the top-level qdisc that sits of the root of the netdev_queue. Add qdisc_root_lock() to represent this and convert the easiest cases. In order for this to work out in all cases, we have to hook up the noop_qdisc to a dummy netdev_queue. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_htb.c')
-rw-r--r--net/sched/sch_htb.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index c8ca54cc26b0..ee48457eaa4a 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1039,11 +1039,12 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1039 1039
1040static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) 1040static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1041{ 1041{
1042 spinlock_t *root_lock = qdisc_root_lock(sch);
1042 struct htb_sched *q = qdisc_priv(sch); 1043 struct htb_sched *q = qdisc_priv(sch);
1043 struct nlattr *nest; 1044 struct nlattr *nest;
1044 struct tc_htb_glob gopt; 1045 struct tc_htb_glob gopt;
1045 1046
1046 spin_lock_bh(&sch->dev_queue->lock); 1047 spin_lock_bh(root_lock);
1047 1048
1048 gopt.direct_pkts = q->direct_pkts; 1049 gopt.direct_pkts = q->direct_pkts;
1049 gopt.version = HTB_VER; 1050 gopt.version = HTB_VER;
@@ -1057,11 +1058,11 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1057 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); 1058 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1058 nla_nest_end(skb, nest); 1059 nla_nest_end(skb, nest);
1059 1060
1060 spin_unlock_bh(&sch->dev_queue->lock); 1061 spin_unlock_bh(root_lock);
1061 return skb->len; 1062 return skb->len;
1062 1063
1063nla_put_failure: 1064nla_put_failure:
1064 spin_unlock_bh(&sch->dev_queue->lock); 1065 spin_unlock_bh(root_lock);
1065 nla_nest_cancel(skb, nest); 1066 nla_nest_cancel(skb, nest);
1066 return -1; 1067 return -1;
1067} 1068}
@@ -1070,10 +1071,11 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1070 struct sk_buff *skb, struct tcmsg *tcm) 1071 struct sk_buff *skb, struct tcmsg *tcm)
1071{ 1072{
1072 struct htb_class *cl = (struct htb_class *)arg; 1073 struct htb_class *cl = (struct htb_class *)arg;
1074 spinlock_t *root_lock = qdisc_root_lock(sch);
1073 struct nlattr *nest; 1075 struct nlattr *nest;
1074 struct tc_htb_opt opt; 1076 struct tc_htb_opt opt;
1075 1077
1076 spin_lock_bh(&sch->dev_queue->lock); 1078 spin_lock_bh(root_lock);
1077 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT; 1079 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1078 tcm->tcm_handle = cl->common.classid; 1080 tcm->tcm_handle = cl->common.classid;
1079 if (!cl->level && cl->un.leaf.q) 1081 if (!cl->level && cl->un.leaf.q)
@@ -1095,11 +1097,11 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1095 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); 1097 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1096 1098
1097 nla_nest_end(skb, nest); 1099 nla_nest_end(skb, nest);
1098 spin_unlock_bh(&sch->dev_queue->lock); 1100 spin_unlock_bh(root_lock);
1099 return skb->len; 1101 return skb->len;
1100 1102
1101nla_put_failure: 1103nla_put_failure:
1102 spin_unlock_bh(&sch->dev_queue->lock); 1104 spin_unlock_bh(root_lock);
1103 nla_nest_cancel(skb, nest); 1105 nla_nest_cancel(skb, nest);
1104 return -1; 1106 return -1;
1105} 1107}
@@ -1365,7 +1367,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1365 goto failure; 1367 goto failure;
1366 1368
1367 gen_new_estimator(&cl->bstats, &cl->rate_est, 1369 gen_new_estimator(&cl->bstats, &cl->rate_est,
1368 &sch->dev_queue->lock, 1370 qdisc_root_lock(sch),
1369 tca[TCA_RATE] ? : &est.nla); 1371 tca[TCA_RATE] ? : &est.nla);
1370 cl->refcnt = 1; 1372 cl->refcnt = 1;
1371 cl->children = 0; 1373 cl->children = 0;
@@ -1420,7 +1422,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1420 } else { 1422 } else {
1421 if (tca[TCA_RATE]) 1423 if (tca[TCA_RATE])
1422 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1424 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1423 &sch->dev_queue->lock, 1425 qdisc_root_lock(sch),
1424 tca[TCA_RATE]); 1426 tca[TCA_RATE]);
1425 sch_tree_lock(sch); 1427 sch_tree_lock(sch);
1426 } 1428 }