diff options
author | David Held <drheld@google.com> | 2014-07-15 23:28:31 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-07-17 02:29:52 -0400 |
commit | 5cf3d46192fccf68b4a4759e4d7346e41c669a76 (patch) | |
tree | b2a162b9ee42c0842e3178eefb7759d64ebb0416 | |
parent | 3e1c0f0b06e38b50bfca197a6443d639353bb035 (diff) |
udp: Simplify __udp*_lib_mcast_deliver.
Switch to using sk_nulls_for_each which shortens the code and makes it
easier to update.
Signed-off-by: David Held <drheld@google.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/ipv4/udp.c | 48 | ||||
-rw-r--r-- | net/ipv6/udp.c | 88 |
2 files changed, 49 insertions, 87 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 668af516f094..bbcc33737ef1 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -594,26 +594,6 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, | |||
594 | return true; | 594 | return true; |
595 | } | 595 | } |
596 | 596 | ||
597 | static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk, | ||
598 | __be16 loc_port, __be32 loc_addr, | ||
599 | __be16 rmt_port, __be32 rmt_addr, | ||
600 | int dif) | ||
601 | { | ||
602 | struct hlist_nulls_node *node; | ||
603 | unsigned short hnum = ntohs(loc_port); | ||
604 | |||
605 | sk_nulls_for_each_from(sk, node) { | ||
606 | if (__udp_is_mcast_sock(net, sk, | ||
607 | loc_port, loc_addr, | ||
608 | rmt_port, rmt_addr, | ||
609 | dif, hnum)) | ||
610 | goto found; | ||
611 | } | ||
612 | sk = NULL; | ||
613 | found: | ||
614 | return sk; | ||
615 | } | ||
616 | |||
617 | /* | 597 | /* |
618 | * This routine is called by the ICMP module when it gets some | 598 | * This routine is called by the ICMP module when it gets some |
619 | * sort of error condition. If err < 0 then the socket should | 599 | * sort of error condition. If err < 0 then the socket should |
@@ -1667,23 +1647,23 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
1667 | struct udp_table *udptable) | 1647 | struct udp_table *udptable) |
1668 | { | 1648 | { |
1669 | struct sock *sk, *stack[256 / sizeof(struct sock *)]; | 1649 | struct sock *sk, *stack[256 / sizeof(struct sock *)]; |
1670 | struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); | 1650 | struct hlist_nulls_node *node; |
1671 | int dif; | 1651 | unsigned short hnum = ntohs(uh->dest); |
1652 | struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); | ||
1653 | int dif = skb->dev->ifindex; | ||
1672 | unsigned int i, count = 0; | 1654 | unsigned int i, count = 0; |
1673 | 1655 | ||
1674 | spin_lock(&hslot->lock); | 1656 | spin_lock(&hslot->lock); |
1675 | sk = sk_nulls_head(&hslot->head); | 1657 | sk_nulls_for_each(sk, node, &hslot->head) { |
1676 | dif = skb->dev->ifindex; | 1658 | if (__udp_is_mcast_sock(net, sk, |
1677 | sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); | 1659 | uh->dest, daddr, |
1678 | while (sk) { | 1660 | uh->source, saddr, |
1679 | stack[count++] = sk; | 1661 | dif, hnum)) { |
1680 | sk = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest, | 1662 | if (unlikely(count == ARRAY_SIZE(stack))) { |
1681 | daddr, uh->source, saddr, dif); | 1663 | flush_stack(stack, count, skb, ~0); |
1682 | if (unlikely(count == ARRAY_SIZE(stack))) { | 1664 | count = 0; |
1683 | if (!sk) | 1665 | } |
1684 | break; | 1666 | stack[count++] = sk; |
1685 | flush_stack(stack, count, skb, ~0); | ||
1686 | count = 0; | ||
1687 | } | 1667 | } |
1688 | } | 1668 | } |
1689 | /* | 1669 | /* |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index b4481df3d5fa..7d3bd80085be 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -702,43 +702,26 @@ drop: | |||
702 | return -1; | 702 | return -1; |
703 | } | 703 | } |
704 | 704 | ||
705 | static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, | 705 | static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk, |
706 | __be16 loc_port, const struct in6_addr *loc_addr, | 706 | __be16 loc_port, const struct in6_addr *loc_addr, |
707 | __be16 rmt_port, const struct in6_addr *rmt_addr, | 707 | __be16 rmt_port, const struct in6_addr *rmt_addr, |
708 | int dif) | 708 | int dif, unsigned short hnum) |
709 | { | 709 | { |
710 | struct hlist_nulls_node *node; | 710 | struct inet_sock *inet = inet_sk(sk); |
711 | unsigned short num = ntohs(loc_port); | ||
712 | |||
713 | sk_nulls_for_each_from(sk, node) { | ||
714 | struct inet_sock *inet = inet_sk(sk); | ||
715 | |||
716 | if (!net_eq(sock_net(sk), net)) | ||
717 | continue; | ||
718 | |||
719 | if (udp_sk(sk)->udp_port_hash == num && | ||
720 | sk->sk_family == PF_INET6) { | ||
721 | if (inet->inet_dport) { | ||
722 | if (inet->inet_dport != rmt_port) | ||
723 | continue; | ||
724 | } | ||
725 | if (!ipv6_addr_any(&sk->sk_v6_daddr) && | ||
726 | !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) | ||
727 | continue; | ||
728 | |||
729 | if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) | ||
730 | continue; | ||
731 | 711 | ||
732 | if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { | 712 | if (!net_eq(sock_net(sk), net)) |
733 | if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)) | 713 | return false; |
734 | continue; | 714 | |
735 | } | 715 | if (udp_sk(sk)->udp_port_hash != hnum || |
736 | if (!inet6_mc_check(sk, loc_addr, rmt_addr)) | 716 | sk->sk_family != PF_INET6 || |
737 | continue; | 717 | (inet->inet_dport && inet->inet_dport != rmt_port) || |
738 | return sk; | 718 | (!ipv6_addr_any(&sk->sk_v6_daddr) && |
739 | } | 719 | !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || |
740 | } | 720 | (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) |
741 | return NULL; | 721 | return false; |
722 | if (!inet6_mc_check(sk, loc_addr, rmt_addr)) | ||
723 | return false; | ||
724 | return true; | ||
742 | } | 725 | } |
743 | 726 | ||
744 | static void flush_stack(struct sock **stack, unsigned int count, | 727 | static void flush_stack(struct sock **stack, unsigned int count, |
@@ -787,28 +770,27 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
787 | { | 770 | { |
788 | struct sock *sk, *stack[256 / sizeof(struct sock *)]; | 771 | struct sock *sk, *stack[256 / sizeof(struct sock *)]; |
789 | const struct udphdr *uh = udp_hdr(skb); | 772 | const struct udphdr *uh = udp_hdr(skb); |
790 | struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); | 773 | struct hlist_nulls_node *node; |
791 | int dif; | 774 | unsigned short hnum = ntohs(uh->dest); |
775 | struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); | ||
776 | int dif = inet6_iif(skb); | ||
792 | unsigned int i, count = 0; | 777 | unsigned int i, count = 0; |
793 | 778 | ||
794 | spin_lock(&hslot->lock); | 779 | spin_lock(&hslot->lock); |
795 | sk = sk_nulls_head(&hslot->head); | 780 | sk_nulls_for_each(sk, node, &hslot->head) { |
796 | dif = inet6_iif(skb); | 781 | if (__udp_v6_is_mcast_sock(net, sk, |
797 | sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); | 782 | uh->dest, daddr, |
798 | while (sk) { | 783 | uh->source, saddr, |
799 | /* If zero checksum and no_check is not on for | 784 | dif, hnum) && |
800 | * the socket then skip it. | 785 | /* If zero checksum and no_check is not on for |
801 | */ | 786 | * the socket then skip it. |
802 | if (uh->check || udp_sk(sk)->no_check6_rx) | 787 | */ |
788 | (uh->check || udp_sk(sk)->no_check6_rx)) { | ||
789 | if (unlikely(count == ARRAY_SIZE(stack))) { | ||
790 | flush_stack(stack, count, skb, ~0); | ||
791 | count = 0; | ||
792 | } | ||
803 | stack[count++] = sk; | 793 | stack[count++] = sk; |
804 | |||
805 | sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr, | ||
806 | uh->source, saddr, dif); | ||
807 | if (unlikely(count == ARRAY_SIZE(stack))) { | ||
808 | if (!sk) | ||
809 | break; | ||
810 | flush_stack(stack, count, skb, ~0); | ||
811 | count = 0; | ||
812 | } | 794 | } |
813 | } | 795 | } |
814 | /* | 796 | /* |