aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorRick Jones <rick.jones2@hp.com>2014-11-06 13:37:54 -0500
committerDavid S. Miller <davem@davemloft.net>2014-11-07 15:45:50 -0500
commit36cbb2452cbafca64dcdd3578047433787900cf0 (patch)
tree3e50381574c3850fc2a6c76a14777daa821f85a0 /net
parentf46ad73ac6965f71d1a2ba217c4eb5b644e62949 (diff)
udp: Increment UDP_MIB_IGNOREDMULTI for arriving unmatched multicasts
As NIC multicast filtering isn't perfect, and some platforms are quite content to spew broadcasts, we should not trigger an event for skb:kfree_skb when we do not have a match for such an incoming datagram. We do though want to avoid sweeping the matter under the rug entirely, so increment a suitable statistic. This incorporates feedback from David L. Stevens, Karl Neiss and Eric Dumazet. V3 - use bool per David Miller Signed-off-by: Rick Jones <rick.jones2@hp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/udp.c12
-rw-r--r--net/ipv6/proc.c1
-rw-r--r--net/ipv6/udp.c11
4 files changed, 19 insertions, 6 deletions
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index f0d4eb8b99b9..6513ade8d6dc 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -181,6 +181,7 @@ static const struct snmp_mib snmp4_udp_list[] = {
181 SNMP_MIB_ITEM("RcvbufErrors", UDP_MIB_RCVBUFERRORS), 181 SNMP_MIB_ITEM("RcvbufErrors", UDP_MIB_RCVBUFERRORS),
182 SNMP_MIB_ITEM("SndbufErrors", UDP_MIB_SNDBUFERRORS), 182 SNMP_MIB_ITEM("SndbufErrors", UDP_MIB_SNDBUFERRORS),
183 SNMP_MIB_ITEM("InCsumErrors", UDP_MIB_CSUMERRORS), 183 SNMP_MIB_ITEM("InCsumErrors", UDP_MIB_CSUMERRORS),
184 SNMP_MIB_ITEM("IgnoredMulti", UDP_MIB_IGNOREDMULTI),
184 SNMP_MIB_SENTINEL 185 SNMP_MIB_SENTINEL
185}; 186};
186 187
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index df19027f44f3..5d0fdca8e965 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1647,7 +1647,8 @@ static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
1647static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 1647static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1648 struct udphdr *uh, 1648 struct udphdr *uh,
1649 __be32 saddr, __be32 daddr, 1649 __be32 saddr, __be32 daddr,
1650 struct udp_table *udptable) 1650 struct udp_table *udptable,
1651 int proto)
1651{ 1652{
1652 struct sock *sk, *stack[256 / sizeof(struct sock *)]; 1653 struct sock *sk, *stack[256 / sizeof(struct sock *)];
1653 struct hlist_nulls_node *node; 1654 struct hlist_nulls_node *node;
@@ -1656,6 +1657,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1656 int dif = skb->dev->ifindex; 1657 int dif = skb->dev->ifindex;
1657 unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node); 1658 unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
1658 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 1659 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
1660 bool inner_flushed = false;
1659 1661
1660 if (use_hash2) { 1662 if (use_hash2) {
1661 hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & 1663 hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
@@ -1674,6 +1676,7 @@ start_lookup:
1674 dif, hnum)) { 1676 dif, hnum)) {
1675 if (unlikely(count == ARRAY_SIZE(stack))) { 1677 if (unlikely(count == ARRAY_SIZE(stack))) {
1676 flush_stack(stack, count, skb, ~0); 1678 flush_stack(stack, count, skb, ~0);
1679 inner_flushed = true;
1677 count = 0; 1680 count = 0;
1678 } 1681 }
1679 stack[count++] = sk; 1682 stack[count++] = sk;
@@ -1695,7 +1698,10 @@ start_lookup:
1695 if (count) { 1698 if (count) {
1696 flush_stack(stack, count, skb, count - 1); 1699 flush_stack(stack, count, skb, count - 1);
1697 } else { 1700 } else {
1698 kfree_skb(skb); 1701 if (!inner_flushed)
1702 UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
1703 proto == IPPROTO_UDPLITE);
1704 consume_skb(skb);
1699 } 1705 }
1700 return 0; 1706 return 0;
1701} 1707}
@@ -1781,7 +1787,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1781 1787
1782 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 1788 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1783 return __udp4_lib_mcast_deliver(net, skb, uh, 1789 return __udp4_lib_mcast_deliver(net, skb, uh,
1784 saddr, daddr, udptable); 1790 saddr, daddr, udptable, proto);
1785 1791
1786 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 1792 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1787 if (sk != NULL) { 1793 if (sk != NULL) {
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 1752cd0b4882..679253d0af84 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -136,6 +136,7 @@ static const struct snmp_mib snmp6_udp6_list[] = {
136 SNMP_MIB_ITEM("Udp6RcvbufErrors", UDP_MIB_RCVBUFERRORS), 136 SNMP_MIB_ITEM("Udp6RcvbufErrors", UDP_MIB_RCVBUFERRORS),
137 SNMP_MIB_ITEM("Udp6SndbufErrors", UDP_MIB_SNDBUFERRORS), 137 SNMP_MIB_ITEM("Udp6SndbufErrors", UDP_MIB_SNDBUFERRORS),
138 SNMP_MIB_ITEM("Udp6InCsumErrors", UDP_MIB_CSUMERRORS), 138 SNMP_MIB_ITEM("Udp6InCsumErrors", UDP_MIB_CSUMERRORS),
139 SNMP_MIB_ITEM("Udp6IgnoredMulti", UDP_MIB_IGNOREDMULTI),
139 SNMP_MIB_SENTINEL 140 SNMP_MIB_SENTINEL
140}; 141};
141 142
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9b6809232b17..b756355e9739 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -771,7 +771,7 @@ static void udp6_csum_zero_error(struct sk_buff *skb)
771 */ 771 */
772static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 772static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
773 const struct in6_addr *saddr, const struct in6_addr *daddr, 773 const struct in6_addr *saddr, const struct in6_addr *daddr,
774 struct udp_table *udptable) 774 struct udp_table *udptable, int proto)
775{ 775{
776 struct sock *sk, *stack[256 / sizeof(struct sock *)]; 776 struct sock *sk, *stack[256 / sizeof(struct sock *)];
777 const struct udphdr *uh = udp_hdr(skb); 777 const struct udphdr *uh = udp_hdr(skb);
@@ -781,6 +781,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
781 int dif = inet6_iif(skb); 781 int dif = inet6_iif(skb);
782 unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node); 782 unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
783 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 783 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
784 bool inner_flushed = false;
784 785
785 if (use_hash2) { 786 if (use_hash2) {
786 hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) & 787 hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
@@ -803,6 +804,7 @@ start_lookup:
803 (uh->check || udp_sk(sk)->no_check6_rx)) { 804 (uh->check || udp_sk(sk)->no_check6_rx)) {
804 if (unlikely(count == ARRAY_SIZE(stack))) { 805 if (unlikely(count == ARRAY_SIZE(stack))) {
805 flush_stack(stack, count, skb, ~0); 806 flush_stack(stack, count, skb, ~0);
807 inner_flushed = true;
806 count = 0; 808 count = 0;
807 } 809 }
808 stack[count++] = sk; 810 stack[count++] = sk;
@@ -821,7 +823,10 @@ start_lookup:
821 if (count) { 823 if (count) {
822 flush_stack(stack, count, skb, count - 1); 824 flush_stack(stack, count, skb, count - 1);
823 } else { 825 } else {
824 kfree_skb(skb); 826 if (!inner_flushed)
827 UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
828 proto == IPPROTO_UDPLITE);
829 consume_skb(skb);
825 } 830 }
826 return 0; 831 return 0;
827} 832}
@@ -873,7 +878,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
873 */ 878 */
874 if (ipv6_addr_is_multicast(daddr)) 879 if (ipv6_addr_is_multicast(daddr))
875 return __udp6_lib_mcast_deliver(net, skb, 880 return __udp6_lib_mcast_deliver(net, skb,
876 saddr, daddr, udptable); 881 saddr, daddr, udptable, proto);
877 882
878 /* Unicast */ 883 /* Unicast */
879 884