aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-11-26 06:07:34 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2007-11-26 06:07:34 -0500
commit8053fc3de720e1027d690f892ff7d7c1737fdd9d (patch)
tree0437efa29587beb5ebb174c402f7954cfde2269a
parent7f9c33e515353ea91afc62341161fead19e78567 (diff)
[IPSEC]: Temporarily remove locks around copying of non-atomic fields
The change 050f009e16f908932070313c1745d09dc69fd62b [IPSEC]: Lock state when copying non-atomic fields to user-space caused a regression. Ingo Molnar reports that it causes a potential dead-lock found by the lock validator as it tries to take x->lock within xfrm_state_lock while numerous other sites take the locks in opposite order. For 2.6.24, the best fix is to simply remove the added locks as that puts us back in the same state as we've been in for years. For later kernels a proper fix would be to reverse the locking order for every xfrm state user such that if x->lock is taken together with xfrm_state_lock then it is to be taken within it. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/xfrm/xfrm_user.c2
2 files changed, 0 insertions, 4 deletions
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 3b2d864ab942..878039b9557d 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1015,9 +1015,7 @@ static inline struct sk_buff *pfkey_xfrm_state2msg(struct xfrm_state *x)
1015{ 1015{
1016 struct sk_buff *skb; 1016 struct sk_buff *skb;
1017 1017
1018 spin_lock_bh(&x->lock);
1019 skb = __pfkey_xfrm_state2msg(x, 1, 3); 1018 skb = __pfkey_xfrm_state2msg(x, 1, 3);
1020 spin_unlock_bh(&x->lock);
1021 1019
1022 return skb; 1020 return skb;
1023} 1021}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d41588d101d0..e75dbdcb08a4 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -507,7 +507,6 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
507 struct xfrm_usersa_info *p, 507 struct xfrm_usersa_info *p,
508 struct sk_buff *skb) 508 struct sk_buff *skb)
509{ 509{
510 spin_lock_bh(&x->lock);
511 copy_to_user_state(x, p); 510 copy_to_user_state(x, p);
512 511
513 if (x->coaddr) 512 if (x->coaddr)
@@ -515,7 +514,6 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
515 514
516 if (x->lastused) 515 if (x->lastused)
517 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused); 516 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused);
518 spin_unlock_bh(&x->lock);
519 517
520 if (x->aalg) 518 if (x->aalg)
521 NLA_PUT(skb, XFRMA_ALG_AUTH, alg_len(x->aalg), x->aalg); 519 NLA_PUT(skb, XFRMA_ALG_AUTH, alg_len(x->aalg), x->aalg);