aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJarek Poplawski <jarkao2@gmail.com>2010-09-02 16:22:11 -0400
committerDavid S. Miller <davem@davemloft.net>2010-09-02 16:22:11 -0400
commit0b5d404e349c0236b11466c0a4785520c0be6982 (patch)
treef4e51049b1433982591204dd67d158ee84808ace
parent7bcbf81a2296a8f71342445560dcbe16100b567c (diff)
pkt_sched: Fix lockdep warning on est_tree_lock in gen_estimator
This patch fixes a lockdep warning: [ 516.287584] ========================================================= [ 516.288386] [ INFO: possible irq lock inversion dependency detected ] [ 516.288386] 2.6.35b #7 [ 516.288386] --------------------------------------------------------- [ 516.288386] swapper/0 just changed the state of lock: [ 516.288386] (&qdisc_tx_lock){+.-...}, at: [<c12eacda>] est_timer+0x62/0x1b4 [ 516.288386] but this lock took another, SOFTIRQ-unsafe lock in the past: [ 516.288386] (est_tree_lock){+.+...} [ 516.288386] [ 516.288386] and interrupts could create inverse lock ordering between them. ... So, est_tree_lock needs BH protection because it's taken by qdisc_tx_lock, which is used both in BH and process contexts. (Full warning with this patch at netdev, 02 Sep 2010.) Fixes commit: ae638c47dc040b8def16d05dc6acdd527628f231 ("pkt_sched: gen_estimator: add a new lock") Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/core/gen_estimator.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 9fbe7f7429b0..6743146e4d6b 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -232,7 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
232 est->last_packets = bstats->packets; 232 est->last_packets = bstats->packets;
233 est->avpps = rate_est->pps<<10; 233 est->avpps = rate_est->pps<<10;
234 234
235 spin_lock(&est_tree_lock); 235 spin_lock_bh(&est_tree_lock);
236 if (!elist[idx].timer.function) { 236 if (!elist[idx].timer.function) {
237 INIT_LIST_HEAD(&elist[idx].list); 237 INIT_LIST_HEAD(&elist[idx].list);
238 setup_timer(&elist[idx].timer, est_timer, idx); 238 setup_timer(&elist[idx].timer, est_timer, idx);
@@ -243,7 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
243 243
244 list_add_rcu(&est->list, &elist[idx].list); 244 list_add_rcu(&est->list, &elist[idx].list);
245 gen_add_node(est); 245 gen_add_node(est);
246 spin_unlock(&est_tree_lock); 246 spin_unlock_bh(&est_tree_lock);
247 247
248 return 0; 248 return 0;
249} 249}
@@ -270,7 +270,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
270{ 270{
271 struct gen_estimator *e; 271 struct gen_estimator *e;
272 272
273 spin_lock(&est_tree_lock); 273 spin_lock_bh(&est_tree_lock);
274 while ((e = gen_find_node(bstats, rate_est))) { 274 while ((e = gen_find_node(bstats, rate_est))) {
275 rb_erase(&e->node, &est_root); 275 rb_erase(&e->node, &est_root);
276 276
@@ -281,7 +281,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
281 list_del_rcu(&e->list); 281 list_del_rcu(&e->list);
282 call_rcu(&e->e_rcu, __gen_kill_estimator); 282 call_rcu(&e->e_rcu, __gen_kill_estimator);
283 } 283 }
284 spin_unlock(&est_tree_lock); 284 spin_unlock_bh(&est_tree_lock);
285} 285}
286EXPORT_SYMBOL(gen_kill_estimator); 286EXPORT_SYMBOL(gen_kill_estimator);
287 287
@@ -320,9 +320,9 @@ bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
320 320
321 ASSERT_RTNL(); 321 ASSERT_RTNL();
322 322
323 spin_lock(&est_tree_lock); 323 spin_lock_bh(&est_tree_lock);
324 res = gen_find_node(bstats, rate_est) != NULL; 324 res = gen_find_node(bstats, rate_est) != NULL;
325 spin_unlock(&est_tree_lock); 325 spin_unlock_bh(&est_tree_lock);
326 326
327 return res; 327 return res;
328} 328}