aboutsummaryrefslogtreecommitdiffstats
path: root/net/atm
diff options
context:
space:
mode:
authorArjan van de Ven <arjan@linux.intel.com>2006-07-08 16:30:52 -0400
committerDavid S. Miller <davem@davemloft.net>2006-07-08 16:30:52 -0400
commit1252ecf63f77ea147bd40f5462c7d9e3d3ae2815 (patch)
tree15f6fd4d0672ae3c0dc371394aa92874fd619dfd /net/atm
parent00181fc94648b4bb30d30ef95506055105316051 (diff)
[ATM]: fix possible recursive locking in skb_migrate()
ok this is a real potential deadlock in a way, it takes two locks of 2 skbuffs without doing any kind of lock ordering; I think the following patch should fix it. Just sort the lock taking order by address of the skb.. it's not pretty but it's the best this can do in a minimally invasive way. Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Chas Williams <chas@cmf.nrl.navy.mil> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/atm')
-rw-r--r--net/atm/ipcommon.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/net/atm/ipcommon.c b/net/atm/ipcommon.c
index 4b1faca5013f..1d3de42fada0 100644
--- a/net/atm/ipcommon.c
+++ b/net/atm/ipcommon.c
@@ -25,22 +25,27 @@
25/* 25/*
26 * skb_migrate appends the list at "from" to "to", emptying "from" in the 26 * skb_migrate appends the list at "from" to "to", emptying "from" in the
27 * process. skb_migrate is atomic with respect to all other skb operations on 27 * process. skb_migrate is atomic with respect to all other skb operations on
28 * "from" and "to". Note that it locks both lists at the same time, so beware 28 * "from" and "to". Note that it locks both lists at the same time, so to deal
29 * of potential deadlocks. 29 * with the lock ordering, the locks are taken in address order.
30 * 30 *
31 * This function should live in skbuff.c or skbuff.h. 31 * This function should live in skbuff.c or skbuff.h.
32 */ 32 */
33 33
34 34
35void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to) 35void skb_migrate(struct sk_buff_head *from, struct sk_buff_head *to)
36{ 36{
37 unsigned long flags; 37 unsigned long flags;
38 struct sk_buff *skb_from = (struct sk_buff *) from; 38 struct sk_buff *skb_from = (struct sk_buff *) from;
39 struct sk_buff *skb_to = (struct sk_buff *) to; 39 struct sk_buff *skb_to = (struct sk_buff *) to;
40 struct sk_buff *prev; 40 struct sk_buff *prev;
41 41
42 spin_lock_irqsave(&from->lock,flags); 42 if ((unsigned long) from < (unsigned long) to) {
43 spin_lock(&to->lock); 43 spin_lock_irqsave(&from->lock, flags);
44 spin_lock_nested(&to->lock, SINGLE_DEPTH_NESTING);
45 } else {
46 spin_lock_irqsave(&to->lock, flags);
47 spin_lock_nested(&from->lock, SINGLE_DEPTH_NESTING);
48 }
44 prev = from->prev; 49 prev = from->prev;
45 from->next->prev = to->prev; 50 from->next->prev = to->prev;
46 prev->next = skb_to; 51 prev->next = skb_to;
@@ -51,7 +56,7 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
51 from->prev = skb_from; 56 from->prev = skb_from;
52 from->next = skb_from; 57 from->next = skb_from;
53 from->qlen = 0; 58 from->qlen = 0;
54 spin_unlock_irqrestore(&from->lock,flags); 59 spin_unlock_irqrestore(&from->lock, flags);
55} 60}
56 61
57 62