aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/udp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv6/udp.c')
-rw-r--r--net/ipv6/udp.c107
1 files changed, 28 insertions, 79 deletions
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 8d3432a70f3a..9662561701d1 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -23,7 +23,6 @@
23 * 2 of the License, or (at your option) any later version. 23 * 2 of the License, or (at your option) any later version.
24 */ 24 */
25 25
26#include <linux/config.h>
27#include <linux/errno.h> 26#include <linux/errno.h>
28#include <linux/types.h> 27#include <linux/types.h>
29#include <linux/socket.h> 28#include <linux/socket.h>
@@ -62,81 +61,9 @@
62 61
63DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly; 62DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly;
64 63
65/* Grrr, addr_type already calculated by caller, but I don't want 64static inline int udp_v6_get_port(struct sock *sk, unsigned short snum)
66 * to add some silly "cookie" argument to this method just for that.
67 */
68static int udp_v6_get_port(struct sock *sk, unsigned short snum)
69{ 65{
70 struct sock *sk2; 66 return udp_get_port(sk, snum, ipv6_rcv_saddr_equal);
71 struct hlist_node *node;
72
73 write_lock_bh(&udp_hash_lock);
74 if (snum == 0) {
75 int best_size_so_far, best, result, i;
76
77 if (udp_port_rover > sysctl_local_port_range[1] ||
78 udp_port_rover < sysctl_local_port_range[0])
79 udp_port_rover = sysctl_local_port_range[0];
80 best_size_so_far = 32767;
81 best = result = udp_port_rover;
82 for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) {
83 int size;
84 struct hlist_head *list;
85
86 list = &udp_hash[result & (UDP_HTABLE_SIZE - 1)];
87 if (hlist_empty(list)) {
88 if (result > sysctl_local_port_range[1])
89 result = sysctl_local_port_range[0] +
90 ((result - sysctl_local_port_range[0]) &
91 (UDP_HTABLE_SIZE - 1));
92 goto gotit;
93 }
94 size = 0;
95 sk_for_each(sk2, node, list)
96 if (++size >= best_size_so_far)
97 goto next;
98 best_size_so_far = size;
99 best = result;
100 next:;
101 }
102 result = best;
103 for(i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++, result += UDP_HTABLE_SIZE) {
104 if (result > sysctl_local_port_range[1])
105 result = sysctl_local_port_range[0]
106 + ((result - sysctl_local_port_range[0]) &
107 (UDP_HTABLE_SIZE - 1));
108 if (!udp_lport_inuse(result))
109 break;
110 }
111 if (i >= (1 << 16) / UDP_HTABLE_SIZE)
112 goto fail;
113gotit:
114 udp_port_rover = snum = result;
115 } else {
116 sk_for_each(sk2, node,
117 &udp_hash[snum & (UDP_HTABLE_SIZE - 1)]) {
118 if (inet_sk(sk2)->num == snum &&
119 sk2 != sk &&
120 (!sk2->sk_bound_dev_if ||
121 !sk->sk_bound_dev_if ||
122 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
123 (!sk2->sk_reuse || !sk->sk_reuse) &&
124 ipv6_rcv_saddr_equal(sk, sk2))
125 goto fail;
126 }
127 }
128
129 inet_sk(sk)->num = snum;
130 if (sk_unhashed(sk)) {
131 sk_add_node(sk, &udp_hash[snum & (UDP_HTABLE_SIZE - 1)]);
132 sock_prot_inc_use(sk->sk_prot);
133 }
134 write_unlock_bh(&udp_hash_lock);
135 return 0;
136
137fail:
138 write_unlock_bh(&udp_hash_lock);
139 return 1;
140} 67}
141 68
142static void udp_v6_hash(struct sock *sk) 69static void udp_v6_hash(struct sock *sk)
@@ -346,6 +273,8 @@ out:
346 273
347static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) 274static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
348{ 275{
276 int rc;
277
349 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { 278 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
350 kfree_skb(skb); 279 kfree_skb(skb);
351 return -1; 280 return -1;
@@ -357,7 +286,10 @@ static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
357 return 0; 286 return 0;
358 } 287 }
359 288
360 if (sock_queue_rcv_skb(sk,skb)<0) { 289 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
290 /* Note that an ENOMEM error is charged twice */
291 if (rc == -ENOMEM)
292 UDP6_INC_STATS_BH(UDP_MIB_RCVBUFERRORS);
361 UDP6_INC_STATS_BH(UDP_MIB_INERRORS); 293 UDP6_INC_STATS_BH(UDP_MIB_INERRORS);
362 kfree_skb(skb); 294 kfree_skb(skb);
363 return 0; 295 return 0;
@@ -476,7 +408,7 @@ static int udpv6_rcv(struct sk_buff **pskb)
476 uh = skb->h.uh; 408 uh = skb->h.uh;
477 } 409 }
478 410
479 if (skb->ip_summed == CHECKSUM_HW && 411 if (skb->ip_summed == CHECKSUM_COMPLETE &&
480 !csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, skb->csum)) 412 !csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, skb->csum))
481 skb->ip_summed = CHECKSUM_UNNECESSARY; 413 skb->ip_summed = CHECKSUM_UNNECESSARY;
482 414
@@ -783,7 +715,9 @@ do_udp_sendmsg:
783 connected = 0; 715 connected = 0;
784 } 716 }
785 717
786 err = ip6_dst_lookup(sk, &dst, fl); 718 security_sk_classify_flow(sk, fl);
719
720 err = ip6_sk_dst_lookup(sk, &dst, fl);
787 if (err) 721 if (err)
788 goto out; 722 goto out;
789 if (final_p) 723 if (final_p)
@@ -841,7 +775,12 @@ do_append_data:
841 if (connected) { 775 if (connected) {
842 ip6_dst_store(sk, dst, 776 ip6_dst_store(sk, dst,
843 ipv6_addr_equal(&fl->fl6_dst, &np->daddr) ? 777 ipv6_addr_equal(&fl->fl6_dst, &np->daddr) ?
844 &np->daddr : NULL); 778 &np->daddr : NULL,
779#ifdef CONFIG_IPV6_SUBTREES
780 ipv6_addr_equal(&fl->fl6_src, &np->saddr) ?
781 &np->saddr :
782#endif
783 NULL);
845 } else { 784 } else {
846 dst_release(dst); 785 dst_release(dst);
847 } 786 }
@@ -856,6 +795,16 @@ out:
856 UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS); 795 UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS);
857 return len; 796 return len;
858 } 797 }
798 /*
799 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
800 * ENOBUFS might not be good (it's not tunable per se), but otherwise
801 * we don't have a good statistic (IpOutDiscards but it can be too many
802 * things). We could add another new stat but at least for now that
803 * seems like overkill.
804 */
805 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
806 UDP6_INC_STATS_USER(UDP_MIB_SNDBUFERRORS);
807 }
859 return err; 808 return err;
860 809
861do_confirm: 810do_confirm: