diff options
-rw-r--r-- | net/ipv4/ip_sockglue.c | 6 | ||||
-rw-r--r-- | net/ipv4/raw.c | 4 | ||||
-rw-r--r-- | net/ipv6/datagram.c | 6 | ||||
-rw-r--r-- | net/ipv6/raw.c | 8 | ||||
-rw-r--r-- | net/ipv6/udp.c | 4 |
5 files changed, 14 insertions, 14 deletions
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 47012b93cad2..f8b172f89811 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -360,14 +360,14 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
360 | err = copied; | 360 | err = copied; |
361 | 361 | ||
362 | /* Reset and regenerate socket error */ | 362 | /* Reset and regenerate socket error */ |
363 | spin_lock_irq(&sk->sk_error_queue.lock); | 363 | spin_lock_bh(&sk->sk_error_queue.lock); |
364 | sk->sk_err = 0; | 364 | sk->sk_err = 0; |
365 | if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { | 365 | if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { |
366 | sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; | 366 | sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; |
367 | spin_unlock_irq(&sk->sk_error_queue.lock); | 367 | spin_unlock_bh(&sk->sk_error_queue.lock); |
368 | sk->sk_error_report(sk); | 368 | sk->sk_error_report(sk); |
369 | } else | 369 | } else |
370 | spin_unlock_irq(&sk->sk_error_queue.lock); | 370 | spin_unlock_bh(&sk->sk_error_queue.lock); |
371 | 371 | ||
372 | out_free_skb: | 372 | out_free_skb: |
373 | kfree_skb(skb); | 373 | kfree_skb(skb); |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 5b1ec586bae6..8c1512021ee8 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -691,11 +691,11 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
691 | struct sk_buff *skb; | 691 | struct sk_buff *skb; |
692 | int amount = 0; | 692 | int amount = 0; |
693 | 693 | ||
694 | spin_lock_irq(&sk->sk_receive_queue.lock); | 694 | spin_lock_bh(&sk->sk_receive_queue.lock); |
695 | skb = skb_peek(&sk->sk_receive_queue); | 695 | skb = skb_peek(&sk->sk_receive_queue); |
696 | if (skb != NULL) | 696 | if (skb != NULL) |
697 | amount = skb->len; | 697 | amount = skb->len; |
698 | spin_unlock_irq(&sk->sk_receive_queue.lock); | 698 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
699 | return put_user(amount, (int __user *)arg); | 699 | return put_user(amount, (int __user *)arg); |
700 | } | 700 | } |
701 | 701 | ||
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 65b9375df57d..5229365cd8b4 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -353,14 +353,14 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
353 | err = copied; | 353 | err = copied; |
354 | 354 | ||
355 | /* Reset and regenerate socket error */ | 355 | /* Reset and regenerate socket error */ |
356 | spin_lock_irq(&sk->sk_error_queue.lock); | 356 | spin_lock_bh(&sk->sk_error_queue.lock); |
357 | sk->sk_err = 0; | 357 | sk->sk_err = 0; |
358 | if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { | 358 | if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { |
359 | sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; | 359 | sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; |
360 | spin_unlock_irq(&sk->sk_error_queue.lock); | 360 | spin_unlock_bh(&sk->sk_error_queue.lock); |
361 | sk->sk_error_report(sk); | 361 | sk->sk_error_report(sk); |
362 | } else { | 362 | } else { |
363 | spin_unlock_irq(&sk->sk_error_queue.lock); | 363 | spin_unlock_bh(&sk->sk_error_queue.lock); |
364 | } | 364 | } |
365 | 365 | ||
366 | out_free_skb: | 366 | out_free_skb: |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 617645bc5ed6..e2b848ec9851 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -434,12 +434,12 @@ csum_copy_err: | |||
434 | /* Clear queue. */ | 434 | /* Clear queue. */ |
435 | if (flags&MSG_PEEK) { | 435 | if (flags&MSG_PEEK) { |
436 | int clear = 0; | 436 | int clear = 0; |
437 | spin_lock_irq(&sk->sk_receive_queue.lock); | 437 | spin_lock_bh(&sk->sk_receive_queue.lock); |
438 | if (skb == skb_peek(&sk->sk_receive_queue)) { | 438 | if (skb == skb_peek(&sk->sk_receive_queue)) { |
439 | __skb_unlink(skb, &sk->sk_receive_queue); | 439 | __skb_unlink(skb, &sk->sk_receive_queue); |
440 | clear = 1; | 440 | clear = 1; |
441 | } | 441 | } |
442 | spin_unlock_irq(&sk->sk_receive_queue.lock); | 442 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
443 | if (clear) | 443 | if (clear) |
444 | kfree_skb(skb); | 444 | kfree_skb(skb); |
445 | } | 445 | } |
@@ -971,11 +971,11 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
971 | struct sk_buff *skb; | 971 | struct sk_buff *skb; |
972 | int amount = 0; | 972 | int amount = 0; |
973 | 973 | ||
974 | spin_lock_irq(&sk->sk_receive_queue.lock); | 974 | spin_lock_bh(&sk->sk_receive_queue.lock); |
975 | skb = skb_peek(&sk->sk_receive_queue); | 975 | skb = skb_peek(&sk->sk_receive_queue); |
976 | if (skb != NULL) | 976 | if (skb != NULL) |
977 | amount = skb->tail - skb->h.raw; | 977 | amount = skb->tail - skb->h.raw; |
978 | spin_unlock_irq(&sk->sk_receive_queue.lock); | 978 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
979 | return put_user(amount, (int __user *)arg); | 979 | return put_user(amount, (int __user *)arg); |
980 | } | 980 | } |
981 | 981 | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index e251d0ba4f39..eff050ac7049 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -300,12 +300,12 @@ csum_copy_err: | |||
300 | /* Clear queue. */ | 300 | /* Clear queue. */ |
301 | if (flags&MSG_PEEK) { | 301 | if (flags&MSG_PEEK) { |
302 | int clear = 0; | 302 | int clear = 0; |
303 | spin_lock_irq(&sk->sk_receive_queue.lock); | 303 | spin_lock_bh(&sk->sk_receive_queue.lock); |
304 | if (skb == skb_peek(&sk->sk_receive_queue)) { | 304 | if (skb == skb_peek(&sk->sk_receive_queue)) { |
305 | __skb_unlink(skb, &sk->sk_receive_queue); | 305 | __skb_unlink(skb, &sk->sk_receive_queue); |
306 | clear = 1; | 306 | clear = 1; |
307 | } | 307 | } |
308 | spin_unlock_irq(&sk->sk_receive_queue.lock); | 308 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
309 | if (clear) | 309 | if (clear) |
310 | kfree_skb(skb); | 310 | kfree_skb(skb); |
311 | } | 311 | } |