aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2005-05-30 18:50:15 -0400
committerDavid S. Miller <davem@davemloft.net>2005-05-30 18:50:15 -0400
commit208d89843b7b03978d8e748b8b991c1be81c4f43 (patch)
tree10669e257624aa511eb6b14db296e40f9db777cc /net
parent9bb7bc942d3da606f184ac6a4dfc7e4d470c831b (diff)
[IPV4]: Fix BUG() in 2.6.x, udp_poll(), fragments + CONFIG_HIGHMEM
Steven Hand <Steven.Hand@cl.cam.ac.uk> wrote: > > Reconstructed forward trace: > > net/ipv4/udp.c:1334 spin_lock_irq() > net/ipv4/udp.c:1336 udp_checksum_complete() > net/core/skbuff.c:1069 skb_shinfo(skb)->nr_frags > 1 > net/core/skbuff.c:1086 kunmap_skb_frag() > net/core/skbuff.h:1087 local_bh_enable() > kernel/softirq.c:0140 WARN_ON(irqs_disabled()); The receive queue lock is never taken in IRQs (and should never be) so we can simply substitute bh for irq. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/udp.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 4a6952e3fee9..7c24e64b443f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -738,7 +738,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
738 unsigned long amount; 738 unsigned long amount;
739 739
740 amount = 0; 740 amount = 0;
741 spin_lock_irq(&sk->sk_receive_queue.lock); 741 spin_lock_bh(&sk->sk_receive_queue.lock);
742 skb = skb_peek(&sk->sk_receive_queue); 742 skb = skb_peek(&sk->sk_receive_queue);
743 if (skb != NULL) { 743 if (skb != NULL) {
744 /* 744 /*
@@ -748,7 +748,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
748 */ 748 */
749 amount = skb->len - sizeof(struct udphdr); 749 amount = skb->len - sizeof(struct udphdr);
750 } 750 }
751 spin_unlock_irq(&sk->sk_receive_queue.lock); 751 spin_unlock_bh(&sk->sk_receive_queue.lock);
752 return put_user(amount, (int __user *)arg); 752 return put_user(amount, (int __user *)arg);
753 } 753 }
754 754
@@ -848,12 +848,12 @@ csum_copy_err:
848 /* Clear queue. */ 848 /* Clear queue. */
849 if (flags&MSG_PEEK) { 849 if (flags&MSG_PEEK) {
850 int clear = 0; 850 int clear = 0;
851 spin_lock_irq(&sk->sk_receive_queue.lock); 851 spin_lock_bh(&sk->sk_receive_queue.lock);
852 if (skb == skb_peek(&sk->sk_receive_queue)) { 852 if (skb == skb_peek(&sk->sk_receive_queue)) {
853 __skb_unlink(skb, &sk->sk_receive_queue); 853 __skb_unlink(skb, &sk->sk_receive_queue);
854 clear = 1; 854 clear = 1;
855 } 855 }
856 spin_unlock_irq(&sk->sk_receive_queue.lock); 856 spin_unlock_bh(&sk->sk_receive_queue.lock);
857 if (clear) 857 if (clear)
858 kfree_skb(skb); 858 kfree_skb(skb);
859 } 859 }
@@ -1334,7 +1334,7 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1334 struct sk_buff_head *rcvq = &sk->sk_receive_queue; 1334 struct sk_buff_head *rcvq = &sk->sk_receive_queue;
1335 struct sk_buff *skb; 1335 struct sk_buff *skb;
1336 1336
1337 spin_lock_irq(&rcvq->lock); 1337 spin_lock_bh(&rcvq->lock);
1338 while ((skb = skb_peek(rcvq)) != NULL) { 1338 while ((skb = skb_peek(rcvq)) != NULL) {
1339 if (udp_checksum_complete(skb)) { 1339 if (udp_checksum_complete(skb)) {
1340 UDP_INC_STATS_BH(UDP_MIB_INERRORS); 1340 UDP_INC_STATS_BH(UDP_MIB_INERRORS);
@@ -1345,7 +1345,7 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1345 break; 1345 break;
1346 } 1346 }
1347 } 1347 }
1348 spin_unlock_irq(&rcvq->lock); 1348 spin_unlock_bh(&rcvq->lock);
1349 1349
1350 /* nothing to see, move along */ 1350 /* nothing to see, move along */
1351 if (skb == NULL) 1351 if (skb == NULL)