diff options
author | Benjamin LaHaise <bcrl@kvack.org> | 2012-04-27 04:23:59 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-04-28 22:21:51 -0400 |
commit | cb80ef463d1881757ade3197cdf875a2ff856651 (patch) | |
tree | a2b8f2b4f087dfc90d9148a94fcc2aa6702eb5eb /net/ipv6 | |
parent | f7ad74fef3af6c6e2ef7f01c5589d77fe7db3d7c (diff) |
net/ipv6/udp: UDP encapsulation: move socket locking into udpv6_queue_rcv_skb()
In order to make sure that when the encap_rcv() hook is introduced it is
not called with the socket lock held, move socket locking from callers into
udpv6_queue_rcv_skb(), matching what happens in IPv4.
Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r-- | net/ipv6/udp.c | 97 |
1 files changed, 44 insertions, 53 deletions
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 6c0367ff7be..bc533ea8fc6 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -558,14 +558,25 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
558 | goto drop; | 558 | goto drop; |
559 | } | 559 | } |
560 | 560 | ||
561 | if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) | ||
562 | goto drop; | ||
563 | |||
561 | skb_dst_drop(skb); | 564 | skb_dst_drop(skb); |
562 | 565 | ||
563 | rc = __udpv6_queue_rcv_skb(sk, skb); | 566 | bh_lock_sock(sk); |
567 | rc = 0; | ||
568 | if (!sock_owned_by_user(sk)) | ||
569 | rc = __udpv6_queue_rcv_skb(sk, skb); | ||
570 | else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { | ||
571 | bh_unlock_sock(sk); | ||
572 | goto drop; | ||
573 | } | ||
574 | bh_unlock_sock(sk); | ||
564 | 575 | ||
565 | return rc; | 576 | return rc; |
566 | drop: | 577 | drop: |
567 | atomic_inc(&sk->sk_drops); | ||
568 | UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | 578 | UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
579 | atomic_inc(&sk->sk_drops); | ||
569 | kfree_skb(skb); | 580 | kfree_skb(skb); |
570 | return -1; | 581 | return -1; |
571 | } | 582 | } |
@@ -614,37 +625,27 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, | |||
614 | static void flush_stack(struct sock **stack, unsigned int count, | 625 | static void flush_stack(struct sock **stack, unsigned int count, |
615 | struct sk_buff *skb, unsigned int final) | 626 | struct sk_buff *skb, unsigned int final) |
616 | { | 627 | { |
617 | unsigned int i; | 628 | struct sk_buff *skb1 = NULL; |
618 | struct sock *sk; | 629 | struct sock *sk; |
619 | struct sk_buff *skb1; | 630 | unsigned int i; |
620 | 631 | ||
621 | for (i = 0; i < count; i++) { | 632 | for (i = 0; i < count; i++) { |
622 | skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); | ||
623 | |||
624 | sk = stack[i]; | 633 | sk = stack[i]; |
625 | if (skb1) { | 634 | if (likely(skb1 == NULL)) |
626 | if (sk_rcvqueues_full(sk, skb1, sk->sk_rcvbuf)) { | 635 | skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); |
627 | kfree_skb(skb1); | 636 | if (!skb1) { |
628 | goto drop; | 637 | atomic_inc(&sk->sk_drops); |
629 | } | 638 | UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, |
630 | bh_lock_sock(sk); | 639 | IS_UDPLITE(sk)); |
631 | if (!sock_owned_by_user(sk)) | 640 | UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, |
632 | udpv6_queue_rcv_skb(sk, skb1); | 641 | IS_UDPLITE(sk)); |
633 | else if (sk_add_backlog(sk, skb1, sk->sk_rcvbuf)) { | ||
634 | kfree_skb(skb1); | ||
635 | bh_unlock_sock(sk); | ||
636 | goto drop; | ||
637 | } | ||
638 | bh_unlock_sock(sk); | ||
639 | continue; | ||
640 | } | 642 | } |
641 | drop: | 643 | |
642 | atomic_inc(&sk->sk_drops); | 644 | if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0) |
643 | UDP6_INC_STATS_BH(sock_net(sk), | 645 | skb1 = NULL; |
644 | UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); | ||
645 | UDP6_INC_STATS_BH(sock_net(sk), | ||
646 | UDP_MIB_INERRORS, IS_UDPLITE(sk)); | ||
647 | } | 646 | } |
647 | if (unlikely(skb1)) | ||
648 | kfree_skb(skb1); | ||
648 | } | 649 | } |
649 | /* | 650 | /* |
650 | * Note: called only from the BH handler context, | 651 | * Note: called only from the BH handler context, |
@@ -784,39 +785,29 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
784 | * for sock caches... i'll skip this for now. | 785 | * for sock caches... i'll skip this for now. |
785 | */ | 786 | */ |
786 | sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); | 787 | sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); |
788 | if (sk != NULL) { | ||
789 | int ret = udpv6_queue_rcv_skb(sk, skb); | ||
790 | sock_put(sk); | ||
787 | 791 | ||
788 | if (sk == NULL) { | 792 | /* a return value > 0 means to resubmit the input, but |
789 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) | 793 | * it wants the return to be -protocol, or 0 |
790 | goto discard; | 794 | */ |
791 | 795 | if (ret > 0) | |
792 | if (udp_lib_checksum_complete(skb)) | 796 | return -ret; |
793 | goto discard; | ||
794 | UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, | ||
795 | proto == IPPROTO_UDPLITE); | ||
796 | |||
797 | icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); | ||
798 | 797 | ||
799 | kfree_skb(skb); | ||
800 | return 0; | 798 | return 0; |
801 | } | 799 | } |
802 | 800 | ||
803 | /* deliver */ | 801 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) |
804 | |||
805 | if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) { | ||
806 | sock_put(sk); | ||
807 | goto discard; | 802 | goto discard; |
808 | } | 803 | |
809 | bh_lock_sock(sk); | 804 | if (udp_lib_checksum_complete(skb)) |
810 | if (!sock_owned_by_user(sk)) | ||
811 | udpv6_queue_rcv_skb(sk, skb); | ||
812 | else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { | ||
813 | atomic_inc(&sk->sk_drops); | ||
814 | bh_unlock_sock(sk); | ||
815 | sock_put(sk); | ||
816 | goto discard; | 805 | goto discard; |
817 | } | 806 | |
818 | bh_unlock_sock(sk); | 807 | UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); |
819 | sock_put(sk); | 808 | icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); |
809 | |||
810 | kfree_skb(skb); | ||
820 | return 0; | 811 | return 0; |
821 | 812 | ||
822 | short_packet: | 813 | short_packet: |