aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/udp.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-03-06 19:22:02 -0500
committerDavid S. Miller <davem@davemloft.net>2008-03-06 19:22:02 -0500
commitdb8dac20d5199307dcfcf4e01dac4bda5edf9e89 (patch)
tree3694d1aee5c0014fb45eec045a67ca150ca1231f /net/ipv6/udp.c
parentba0fa4599484b98dbb21d279fbfdb40e9c07d30d (diff)
[UDP]: Revert udplite and code split.
This reverts commit db1ed684f6c430c4cdad67d058688b8a1b5e607c ("[IPV6] UDP: Rename IPv6 UDP files."), commit 8be8af8fa4405652e6c0797db5465a4be8afb998 ("[IPV4] UDP: Move IPv4-specific bits to other file.") and commit e898d4db2749c6052072e9bc4448e396cbdeb06a ("[UDP]: Allow users to configure UDP-Lite."). First, udplite is of such small cost, and it is a core protocol just like TCP and normal UDP are. We spent enormous amounts of effort to make udplite share as much code with core UDP as possible. All of that work is less valuable if we're just going to slap a config option on udplite support. It is also causing build failures, as reported on linux-next, showing that the changeset was not tested very well. In fact, this is the second build failure resulting from the udplite change. Finally, the config options provided was a bool, instead of a modular option. Meaning the udplite code does not even get build tested by allmodconfig builds, and furthermore the user is not presented with a reasonable modular build option which is particularly needed by distribution vendors. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/udp.c')
-rw-r--r--net/ipv6/udp.c1065
1 files changed, 1065 insertions, 0 deletions
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
new file mode 100644
index 000000000000..53739de829db
--- /dev/null
+++ b/net/ipv6/udp.c
@@ -0,0 +1,1065 @@
1/*
2 * UDP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on linux/ipv4/udp.c
9 *
10 * $Id: udp.c,v 1.65 2002/02/01 22:01:04 davem Exp $
11 *
12 * Fixes:
13 * Hideaki YOSHIFUJI : sin6_scope_id support
14 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
15 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
16 * a single port at the same time.
17 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#include <linux/errno.h>
27#include <linux/types.h>
28#include <linux/socket.h>
29#include <linux/sockios.h>
30#include <linux/net.h>
31#include <linux/in6.h>
32#include <linux/netdevice.h>
33#include <linux/if_arp.h>
34#include <linux/ipv6.h>
35#include <linux/icmpv6.h>
36#include <linux/init.h>
37#include <linux/module.h>
38#include <linux/skbuff.h>
39#include <asm/uaccess.h>
40
41#include <net/ndisc.h>
42#include <net/protocol.h>
43#include <net/transp_v6.h>
44#include <net/ip6_route.h>
45#include <net/raw.h>
46#include <net/tcp_states.h>
47#include <net/ip6_checksum.h>
48#include <net/xfrm.h>
49
50#include <linux/proc_fs.h>
51#include <linux/seq_file.h>
52#include "udp_impl.h"
53
54static inline int udp_v6_get_port(struct sock *sk, unsigned short snum)
55{
56 return udp_get_port(sk, snum, ipv6_rcv_saddr_equal);
57}
58
59static struct sock *__udp6_lib_lookup(struct net *net,
60 struct in6_addr *saddr, __be16 sport,
61 struct in6_addr *daddr, __be16 dport,
62 int dif, struct hlist_head udptable[])
63{
64 struct sock *sk, *result = NULL;
65 struct hlist_node *node;
66 unsigned short hnum = ntohs(dport);
67 int badness = -1;
68
69 read_lock(&udp_hash_lock);
70 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
71 struct inet_sock *inet = inet_sk(sk);
72
73 if (sk->sk_net == net && sk->sk_hash == hnum &&
74 sk->sk_family == PF_INET6) {
75 struct ipv6_pinfo *np = inet6_sk(sk);
76 int score = 0;
77 if (inet->dport) {
78 if (inet->dport != sport)
79 continue;
80 score++;
81 }
82 if (!ipv6_addr_any(&np->rcv_saddr)) {
83 if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
84 continue;
85 score++;
86 }
87 if (!ipv6_addr_any(&np->daddr)) {
88 if (!ipv6_addr_equal(&np->daddr, saddr))
89 continue;
90 score++;
91 }
92 if (sk->sk_bound_dev_if) {
93 if (sk->sk_bound_dev_if != dif)
94 continue;
95 score++;
96 }
97 if (score == 4) {
98 result = sk;
99 break;
100 } else if (score > badness) {
101 result = sk;
102 badness = score;
103 }
104 }
105 }
106 if (result)
107 sock_hold(result);
108 read_unlock(&udp_hash_lock);
109 return result;
110}
111
112/*
113 * This should be easy, if there is something there we
114 * return it, otherwise we block.
115 */
116
117int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
118 struct msghdr *msg, size_t len,
119 int noblock, int flags, int *addr_len)
120{
121 struct ipv6_pinfo *np = inet6_sk(sk);
122 struct inet_sock *inet = inet_sk(sk);
123 struct sk_buff *skb;
124 unsigned int ulen, copied;
125 int peeked;
126 int err;
127 int is_udplite = IS_UDPLITE(sk);
128
129 if (addr_len)
130 *addr_len=sizeof(struct sockaddr_in6);
131
132 if (flags & MSG_ERRQUEUE)
133 return ipv6_recv_error(sk, msg, len);
134
135try_again:
136 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
137 &peeked, &err);
138 if (!skb)
139 goto out;
140
141 ulen = skb->len - sizeof(struct udphdr);
142 copied = len;
143 if (copied > ulen)
144 copied = ulen;
145 else if (copied < ulen)
146 msg->msg_flags |= MSG_TRUNC;
147
148 /*
149 * If checksum is needed at all, try to do it while copying the
150 * data. If the data is truncated, or if we only want a partial
151 * coverage checksum (UDP-Lite), do it before the copy.
152 */
153
154 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
155 if (udp_lib_checksum_complete(skb))
156 goto csum_copy_err;
157 }
158
159 if (skb_csum_unnecessary(skb))
160 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
161 msg->msg_iov, copied );
162 else {
163 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
164 if (err == -EINVAL)
165 goto csum_copy_err;
166 }
167 if (err)
168 goto out_free;
169
170 if (!peeked)
171 UDP6_INC_STATS_USER(UDP_MIB_INDATAGRAMS, is_udplite);
172
173 sock_recv_timestamp(msg, sk, skb);
174
175 /* Copy the address. */
176 if (msg->msg_name) {
177 struct sockaddr_in6 *sin6;
178
179 sin6 = (struct sockaddr_in6 *) msg->msg_name;
180 sin6->sin6_family = AF_INET6;
181 sin6->sin6_port = udp_hdr(skb)->source;
182 sin6->sin6_flowinfo = 0;
183 sin6->sin6_scope_id = 0;
184
185 if (skb->protocol == htons(ETH_P_IP))
186 ipv6_addr_set(&sin6->sin6_addr, 0, 0,
187 htonl(0xffff), ip_hdr(skb)->saddr);
188 else {
189 ipv6_addr_copy(&sin6->sin6_addr,
190 &ipv6_hdr(skb)->saddr);
191 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
192 sin6->sin6_scope_id = IP6CB(skb)->iif;
193 }
194
195 }
196 if (skb->protocol == htons(ETH_P_IP)) {
197 if (inet->cmsg_flags)
198 ip_cmsg_recv(msg, skb);
199 } else {
200 if (np->rxopt.all)
201 datagram_recv_ctl(sk, msg, skb);
202 }
203
204 err = copied;
205 if (flags & MSG_TRUNC)
206 err = ulen;
207
208out_free:
209 lock_sock(sk);
210 skb_free_datagram(sk, skb);
211 release_sock(sk);
212out:
213 return err;
214
215csum_copy_err:
216 lock_sock(sk);
217 if (!skb_kill_datagram(sk, skb, flags))
218 UDP6_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite);
219 release_sock(sk);
220
221 if (flags & MSG_DONTWAIT)
222 return -EAGAIN;
223 goto try_again;
224}
225
226void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
227 int type, int code, int offset, __be32 info,
228 struct hlist_head udptable[] )
229{
230 struct ipv6_pinfo *np;
231 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
232 struct in6_addr *saddr = &hdr->saddr;
233 struct in6_addr *daddr = &hdr->daddr;
234 struct udphdr *uh = (struct udphdr*)(skb->data+offset);
235 struct sock *sk;
236 int err;
237
238 sk = __udp6_lib_lookup(skb->dev->nd_net, daddr, uh->dest,
239 saddr, uh->source, inet6_iif(skb), udptable);
240 if (sk == NULL)
241 return;
242
243 np = inet6_sk(sk);
244
245 if (!icmpv6_err_convert(type, code, &err) && !np->recverr)
246 goto out;
247
248 if (sk->sk_state != TCP_ESTABLISHED && !np->recverr)
249 goto out;
250
251 if (np->recverr)
252 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
253
254 sk->sk_err = err;
255 sk->sk_error_report(sk);
256out:
257 sock_put(sk);
258}
259
260static __inline__ void udpv6_err(struct sk_buff *skb,
261 struct inet6_skb_parm *opt, int type,
262 int code, int offset, __be32 info )
263{
264 __udp6_lib_err(skb, opt, type, code, offset, info, udp_hash);
265}
266
267int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
268{
269 struct udp_sock *up = udp_sk(sk);
270 int rc;
271 int is_udplite = IS_UDPLITE(sk);
272
273 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
274 goto drop;
275
276 /*
277 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
278 */
279 if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
280
281 if (up->pcrlen == 0) { /* full coverage was set */
282 LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: partial coverage"
283 " %d while full coverage %d requested\n",
284 UDP_SKB_CB(skb)->cscov, skb->len);
285 goto drop;
286 }
287 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
288 LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: coverage %d "
289 "too small, need min %d\n",
290 UDP_SKB_CB(skb)->cscov, up->pcrlen);
291 goto drop;
292 }
293 }
294
295 if (sk->sk_filter) {
296 if (udp_lib_checksum_complete(skb))
297 goto drop;
298 }
299
300 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
301 /* Note that an ENOMEM error is charged twice */
302 if (rc == -ENOMEM)
303 UDP6_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite);
304 goto drop;
305 }
306
307 return 0;
308drop:
309 UDP6_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
310 kfree_skb(skb);
311 return -1;
312}
313
314static struct sock *udp_v6_mcast_next(struct sock *sk,
315 __be16 loc_port, struct in6_addr *loc_addr,
316 __be16 rmt_port, struct in6_addr *rmt_addr,
317 int dif)
318{
319 struct hlist_node *node;
320 struct sock *s = sk;
321 unsigned short num = ntohs(loc_port);
322
323 sk_for_each_from(s, node) {
324 struct inet_sock *inet = inet_sk(s);
325
326 if (s->sk_hash == num && s->sk_family == PF_INET6) {
327 struct ipv6_pinfo *np = inet6_sk(s);
328 if (inet->dport) {
329 if (inet->dport != rmt_port)
330 continue;
331 }
332 if (!ipv6_addr_any(&np->daddr) &&
333 !ipv6_addr_equal(&np->daddr, rmt_addr))
334 continue;
335
336 if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)
337 continue;
338
339 if (!ipv6_addr_any(&np->rcv_saddr)) {
340 if (!ipv6_addr_equal(&np->rcv_saddr, loc_addr))
341 continue;
342 }
343 if (!inet6_mc_check(s, loc_addr, rmt_addr))
344 continue;
345 return s;
346 }
347 }
348 return NULL;
349}
350
351/*
352 * Note: called only from the BH handler context,
353 * so we don't need to lock the hashes.
354 */
355static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr,
356 struct in6_addr *daddr, struct hlist_head udptable[])
357{
358 struct sock *sk, *sk2;
359 const struct udphdr *uh = udp_hdr(skb);
360 int dif;
361
362 read_lock(&udp_hash_lock);
363 sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]);
364 dif = inet6_iif(skb);
365 sk = udp_v6_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
366 if (!sk) {
367 kfree_skb(skb);
368 goto out;
369 }
370
371 sk2 = sk;
372 while ((sk2 = udp_v6_mcast_next(sk_next(sk2), uh->dest, daddr,
373 uh->source, saddr, dif))) {
374 struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
375 if (buff) {
376 bh_lock_sock_nested(sk2);
377 if (!sock_owned_by_user(sk2))
378 udpv6_queue_rcv_skb(sk2, buff);
379 else
380 sk_add_backlog(sk2, buff);
381 bh_unlock_sock(sk2);
382 }
383 }
384 bh_lock_sock_nested(sk);
385 if (!sock_owned_by_user(sk))
386 udpv6_queue_rcv_skb(sk, skb);
387 else
388 sk_add_backlog(sk, skb);
389 bh_unlock_sock(sk);
390out:
391 read_unlock(&udp_hash_lock);
392 return 0;
393}
394
395static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh,
396 int proto)
397{
398 int err;
399
400 UDP_SKB_CB(skb)->partial_cov = 0;
401 UDP_SKB_CB(skb)->cscov = skb->len;
402
403 if (proto == IPPROTO_UDPLITE) {
404 err = udplite_checksum_init(skb, uh);
405 if (err)
406 return err;
407 }
408
409 if (uh->check == 0) {
410 /* RFC 2460 section 8.1 says that we SHOULD log
411 this error. Well, it is reasonable.
412 */
413 LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0\n");
414 return 1;
415 }
416 if (skb->ip_summed == CHECKSUM_COMPLETE &&
417 !csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
418 skb->len, proto, skb->csum))
419 skb->ip_summed = CHECKSUM_UNNECESSARY;
420
421 if (!skb_csum_unnecessary(skb))
422 skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
423 &ipv6_hdr(skb)->daddr,
424 skb->len, proto, 0));
425
426 return 0;
427}
428
429int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
430 int proto)
431{
432 struct sock *sk;
433 struct udphdr *uh;
434 struct net_device *dev = skb->dev;
435 struct in6_addr *saddr, *daddr;
436 u32 ulen = 0;
437
438 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
439 goto short_packet;
440
441 saddr = &ipv6_hdr(skb)->saddr;
442 daddr = &ipv6_hdr(skb)->daddr;
443 uh = udp_hdr(skb);
444
445 ulen = ntohs(uh->len);
446 if (ulen > skb->len)
447 goto short_packet;
448
449 if (proto == IPPROTO_UDP) {
450 /* UDP validates ulen. */
451
452 /* Check for jumbo payload */
453 if (ulen == 0)
454 ulen = skb->len;
455
456 if (ulen < sizeof(*uh))
457 goto short_packet;
458
459 if (ulen < skb->len) {
460 if (pskb_trim_rcsum(skb, ulen))
461 goto short_packet;
462 saddr = &ipv6_hdr(skb)->saddr;
463 daddr = &ipv6_hdr(skb)->daddr;
464 uh = udp_hdr(skb);
465 }
466 }
467
468 if (udp6_csum_init(skb, uh, proto))
469 goto discard;
470
471 /*
472 * Multicast receive code
473 */
474 if (ipv6_addr_is_multicast(daddr))
475 return __udp6_lib_mcast_deliver(skb, saddr, daddr, udptable);
476
477 /* Unicast */
478
479 /*
480 * check socket cache ... must talk to Alan about his plans
481 * for sock caches... i'll skip this for now.
482 */
483 sk = __udp6_lib_lookup(skb->dev->nd_net, saddr, uh->source,
484 daddr, uh->dest, inet6_iif(skb), udptable);
485
486 if (sk == NULL) {
487 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
488 goto discard;
489
490 if (udp_lib_checksum_complete(skb))
491 goto discard;
492 UDP6_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
493
494 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
495
496 kfree_skb(skb);
497 return 0;
498 }
499
500 /* deliver */
501
502 bh_lock_sock_nested(sk);
503 if (!sock_owned_by_user(sk))
504 udpv6_queue_rcv_skb(sk, skb);
505 else
506 sk_add_backlog(sk, skb);
507 bh_unlock_sock(sk);
508 sock_put(sk);
509 return 0;
510
511short_packet:
512 LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: %d/%u\n",
513 proto == IPPROTO_UDPLITE ? "-Lite" : "",
514 ulen, skb->len);
515
516discard:
517 UDP6_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
518 kfree_skb(skb);
519 return 0;
520}
521
522static __inline__ int udpv6_rcv(struct sk_buff *skb)
523{
524 return __udp6_lib_rcv(skb, udp_hash, IPPROTO_UDP);
525}
526
527/*
528 * Throw away all pending data and cancel the corking. Socket is locked.
529 */
530static void udp_v6_flush_pending_frames(struct sock *sk)
531{
532 struct udp_sock *up = udp_sk(sk);
533
534 if (up->pending) {
535 up->len = 0;
536 up->pending = 0;
537 ip6_flush_pending_frames(sk);
538 }
539}
540
541/*
542 * Sending
543 */
544
545static int udp_v6_push_pending_frames(struct sock *sk)
546{
547 struct sk_buff *skb;
548 struct udphdr *uh;
549 struct udp_sock *up = udp_sk(sk);
550 struct inet_sock *inet = inet_sk(sk);
551 struct flowi *fl = &inet->cork.fl;
552 int err = 0;
553 int is_udplite = IS_UDPLITE(sk);
554 __wsum csum = 0;
555
556 /* Grab the skbuff where UDP header space exists. */
557 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
558 goto out;
559
560 /*
561 * Create a UDP header
562 */
563 uh = udp_hdr(skb);
564 uh->source = fl->fl_ip_sport;
565 uh->dest = fl->fl_ip_dport;
566 uh->len = htons(up->len);
567 uh->check = 0;
568
569 if (is_udplite)
570 csum = udplite_csum_outgoing(sk, skb);
571 else
572 csum = udp_csum_outgoing(sk, skb);
573
574 /* add protocol-dependent pseudo-header */
575 uh->check = csum_ipv6_magic(&fl->fl6_src, &fl->fl6_dst,
576 up->len, fl->proto, csum );
577 if (uh->check == 0)
578 uh->check = CSUM_MANGLED_0;
579
580 err = ip6_push_pending_frames(sk);
581out:
582 up->len = 0;
583 up->pending = 0;
584 if (!err)
585 UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite);
586 return err;
587}
588
589int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
590 struct msghdr *msg, size_t len)
591{
592 struct ipv6_txoptions opt_space;
593 struct udp_sock *up = udp_sk(sk);
594 struct inet_sock *inet = inet_sk(sk);
595 struct ipv6_pinfo *np = inet6_sk(sk);
596 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name;
597 struct in6_addr *daddr, *final_p = NULL, final;
598 struct ipv6_txoptions *opt = NULL;
599 struct ip6_flowlabel *flowlabel = NULL;
600 struct flowi fl;
601 struct dst_entry *dst;
602 int addr_len = msg->msg_namelen;
603 int ulen = len;
604 int hlimit = -1;
605 int tclass = -1;
606 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
607 int err;
608 int connected = 0;
609 int is_udplite = IS_UDPLITE(sk);
610 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
611
612 /* destination address check */
613 if (sin6) {
614 if (addr_len < offsetof(struct sockaddr, sa_data))
615 return -EINVAL;
616
617 switch (sin6->sin6_family) {
618 case AF_INET6:
619 if (addr_len < SIN6_LEN_RFC2133)
620 return -EINVAL;
621 daddr = &sin6->sin6_addr;
622 break;
623 case AF_INET:
624 goto do_udp_sendmsg;
625 case AF_UNSPEC:
626 msg->msg_name = sin6 = NULL;
627 msg->msg_namelen = addr_len = 0;
628 daddr = NULL;
629 break;
630 default:
631 return -EINVAL;
632 }
633 } else if (!up->pending) {
634 if (sk->sk_state != TCP_ESTABLISHED)
635 return -EDESTADDRREQ;
636 daddr = &np->daddr;
637 } else
638 daddr = NULL;
639
640 if (daddr) {
641 if (ipv6_addr_v4mapped(daddr)) {
642 struct sockaddr_in sin;
643 sin.sin_family = AF_INET;
644 sin.sin_port = sin6 ? sin6->sin6_port : inet->dport;
645 sin.sin_addr.s_addr = daddr->s6_addr32[3];
646 msg->msg_name = &sin;
647 msg->msg_namelen = sizeof(sin);
648do_udp_sendmsg:
649 if (__ipv6_only_sock(sk))
650 return -ENETUNREACH;
651 return udp_sendmsg(iocb, sk, msg, len);
652 }
653 }
654
655 if (up->pending == AF_INET)
656 return udp_sendmsg(iocb, sk, msg, len);
657
658 /* Rough check on arithmetic overflow,
659 better check is made in ip6_append_data().
660 */
661 if (len > INT_MAX - sizeof(struct udphdr))
662 return -EMSGSIZE;
663
664 if (up->pending) {
665 /*
666 * There are pending frames.
667 * The socket lock must be held while it's corked.
668 */
669 lock_sock(sk);
670 if (likely(up->pending)) {
671 if (unlikely(up->pending != AF_INET6)) {
672 release_sock(sk);
673 return -EAFNOSUPPORT;
674 }
675 dst = NULL;
676 goto do_append_data;
677 }
678 release_sock(sk);
679 }
680 ulen += sizeof(struct udphdr);
681
682 memset(&fl, 0, sizeof(fl));
683
684 if (sin6) {
685 if (sin6->sin6_port == 0)
686 return -EINVAL;
687
688 fl.fl_ip_dport = sin6->sin6_port;
689 daddr = &sin6->sin6_addr;
690
691 if (np->sndflow) {
692 fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
693 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
694 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
695 if (flowlabel == NULL)
696 return -EINVAL;
697 daddr = &flowlabel->dst;
698 }
699 }
700
701 /*
702 * Otherwise it will be difficult to maintain
703 * sk->sk_dst_cache.
704 */
705 if (sk->sk_state == TCP_ESTABLISHED &&
706 ipv6_addr_equal(daddr, &np->daddr))
707 daddr = &np->daddr;
708
709 if (addr_len >= sizeof(struct sockaddr_in6) &&
710 sin6->sin6_scope_id &&
711 ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
712 fl.oif = sin6->sin6_scope_id;
713 } else {
714 if (sk->sk_state != TCP_ESTABLISHED)
715 return -EDESTADDRREQ;
716
717 fl.fl_ip_dport = inet->dport;
718 daddr = &np->daddr;
719 fl.fl6_flowlabel = np->flow_label;
720 connected = 1;
721 }
722
723 if (!fl.oif)
724 fl.oif = sk->sk_bound_dev_if;
725
726 if (msg->msg_controllen) {
727 opt = &opt_space;
728 memset(opt, 0, sizeof(struct ipv6_txoptions));
729 opt->tot_len = sizeof(*opt);
730
731 err = datagram_send_ctl(msg, &fl, opt, &hlimit, &tclass);
732 if (err < 0) {
733 fl6_sock_release(flowlabel);
734 return err;
735 }
736 if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
737 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
738 if (flowlabel == NULL)
739 return -EINVAL;
740 }
741 if (!(opt->opt_nflen|opt->opt_flen))
742 opt = NULL;
743 connected = 0;
744 }
745 if (opt == NULL)
746 opt = np->opt;
747 if (flowlabel)
748 opt = fl6_merge_options(&opt_space, flowlabel, opt);
749 opt = ipv6_fixup_options(&opt_space, opt);
750
751 fl.proto = sk->sk_protocol;
752 ipv6_addr_copy(&fl.fl6_dst, daddr);
753 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
754 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
755 fl.fl_ip_sport = inet->sport;
756
757 /* merge ip6_build_xmit from ip6_output */
758 if (opt && opt->srcrt) {
759 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
760 ipv6_addr_copy(&final, &fl.fl6_dst);
761 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
762 final_p = &final;
763 connected = 0;
764 }
765
766 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) {
767 fl.oif = np->mcast_oif;
768 connected = 0;
769 }
770
771 security_sk_classify_flow(sk, &fl);
772
773 err = ip6_sk_dst_lookup(sk, &dst, &fl);
774 if (err)
775 goto out;
776 if (final_p)
777 ipv6_addr_copy(&fl.fl6_dst, final_p);
778
779 if ((err = __xfrm_lookup(&dst, &fl, sk, XFRM_LOOKUP_WAIT)) < 0) {
780 if (err == -EREMOTE)
781 err = ip6_dst_blackhole(sk, &dst, &fl);
782 if (err < 0)
783 goto out;
784 }
785
786 if (hlimit < 0) {
787 if (ipv6_addr_is_multicast(&fl.fl6_dst))
788 hlimit = np->mcast_hops;
789 else
790 hlimit = np->hop_limit;
791 if (hlimit < 0)
792 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
793 if (hlimit < 0)
794 hlimit = ipv6_get_hoplimit(dst->dev);
795 }
796
797 if (tclass < 0) {
798 tclass = np->tclass;
799 if (tclass < 0)
800 tclass = 0;
801 }
802
803 if (msg->msg_flags&MSG_CONFIRM)
804 goto do_confirm;
805back_from_confirm:
806
807 lock_sock(sk);
808 if (unlikely(up->pending)) {
809 /* The socket is already corked while preparing it. */
810 /* ... which is an evident application bug. --ANK */
811 release_sock(sk);
812
813 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
814 err = -EINVAL;
815 goto out;
816 }
817
818 up->pending = AF_INET6;
819
820do_append_data:
821 up->len += ulen;
822 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
823 err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
824 sizeof(struct udphdr), hlimit, tclass, opt, &fl,
825 (struct rt6_info*)dst,
826 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
827 if (err)
828 udp_v6_flush_pending_frames(sk);
829 else if (!corkreq)
830 err = udp_v6_push_pending_frames(sk);
831 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
832 up->pending = 0;
833
834 if (dst) {
835 if (connected) {
836 ip6_dst_store(sk, dst,
837 ipv6_addr_equal(&fl.fl6_dst, &np->daddr) ?
838 &np->daddr : NULL,
839#ifdef CONFIG_IPV6_SUBTREES
840 ipv6_addr_equal(&fl.fl6_src, &np->saddr) ?
841 &np->saddr :
842#endif
843 NULL);
844 } else {
845 dst_release(dst);
846 }
847 }
848
849 if (err > 0)
850 err = np->recverr ? net_xmit_errno(err) : 0;
851 release_sock(sk);
852out:
853 fl6_sock_release(flowlabel);
854 if (!err)
855 return len;
856 /*
857 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
858 * ENOBUFS might not be good (it's not tunable per se), but otherwise
859 * we don't have a good statistic (IpOutDiscards but it can be too many
860 * things). We could add another new stat but at least for now that
861 * seems like overkill.
862 */
863 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
864 UDP6_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite);
865 }
866 return err;
867
868do_confirm:
869 dst_confirm(dst);
870 if (!(msg->msg_flags&MSG_PROBE) || len)
871 goto back_from_confirm;
872 err = 0;
873 goto out;
874}
875
876int udpv6_destroy_sock(struct sock *sk)
877{
878 lock_sock(sk);
879 udp_v6_flush_pending_frames(sk);
880 release_sock(sk);
881
882 inet6_destroy_sock(sk);
883
884 return 0;
885}
886
887/*
888 * Socket option code for UDP
889 */
890int udpv6_setsockopt(struct sock *sk, int level, int optname,
891 char __user *optval, int optlen)
892{
893 if (level == SOL_UDP || level == SOL_UDPLITE)
894 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
895 udp_v6_push_pending_frames);
896 return ipv6_setsockopt(sk, level, optname, optval, optlen);
897}
898
899#ifdef CONFIG_COMPAT
900int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
901 char __user *optval, int optlen)
902{
903 if (level == SOL_UDP || level == SOL_UDPLITE)
904 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
905 udp_v6_push_pending_frames);
906 return compat_ipv6_setsockopt(sk, level, optname, optval, optlen);
907}
908#endif
909
910int udpv6_getsockopt(struct sock *sk, int level, int optname,
911 char __user *optval, int __user *optlen)
912{
913 if (level == SOL_UDP || level == SOL_UDPLITE)
914 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
915 return ipv6_getsockopt(sk, level, optname, optval, optlen);
916}
917
918#ifdef CONFIG_COMPAT
919int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
920 char __user *optval, int __user *optlen)
921{
922 if (level == SOL_UDP || level == SOL_UDPLITE)
923 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
924 return compat_ipv6_getsockopt(sk, level, optname, optval, optlen);
925}
926#endif
927
928static struct inet6_protocol udpv6_protocol = {
929 .handler = udpv6_rcv,
930 .err_handler = udpv6_err,
931 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
932};
933
934/* ------------------------------------------------------------------------ */
935#ifdef CONFIG_PROC_FS
936
937static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket)
938{
939 struct inet_sock *inet = inet_sk(sp);
940 struct ipv6_pinfo *np = inet6_sk(sp);
941 struct in6_addr *dest, *src;
942 __u16 destp, srcp;
943
944 dest = &np->daddr;
945 src = &np->rcv_saddr;
946 destp = ntohs(inet->dport);
947 srcp = ntohs(inet->sport);
948 seq_printf(seq,
949 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
950 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p\n",
951 bucket,
952 src->s6_addr32[0], src->s6_addr32[1],
953 src->s6_addr32[2], src->s6_addr32[3], srcp,
954 dest->s6_addr32[0], dest->s6_addr32[1],
955 dest->s6_addr32[2], dest->s6_addr32[3], destp,
956 sp->sk_state,
957 atomic_read(&sp->sk_wmem_alloc),
958 atomic_read(&sp->sk_rmem_alloc),
959 0, 0L, 0,
960 sock_i_uid(sp), 0,
961 sock_i_ino(sp),
962 atomic_read(&sp->sk_refcnt), sp);
963}
964
965int udp6_seq_show(struct seq_file *seq, void *v)
966{
967 if (v == SEQ_START_TOKEN)
968 seq_printf(seq,
969 " sl "
970 "local_address "
971 "remote_address "
972 "st tx_queue rx_queue tr tm->when retrnsmt"
973 " uid timeout inode\n");
974 else
975 udp6_sock_seq_show(seq, v, ((struct udp_iter_state *)seq->private)->bucket);
976 return 0;
977}
978
979static struct file_operations udp6_seq_fops;
980static struct udp_seq_afinfo udp6_seq_afinfo = {
981 .owner = THIS_MODULE,
982 .name = "udp6",
983 .family = AF_INET6,
984 .hashtable = udp_hash,
985 .seq_show = udp6_seq_show,
986 .seq_fops = &udp6_seq_fops,
987};
988
989int __init udp6_proc_init(void)
990{
991 return udp_proc_register(&udp6_seq_afinfo);
992}
993
994void udp6_proc_exit(void) {
995 udp_proc_unregister(&udp6_seq_afinfo);
996}
997#endif /* CONFIG_PROC_FS */
998
999/* ------------------------------------------------------------------------ */
1000
1001DEFINE_PROTO_INUSE(udpv6)
1002
1003struct proto udpv6_prot = {
1004 .name = "UDPv6",
1005 .owner = THIS_MODULE,
1006 .close = udp_lib_close,
1007 .connect = ip6_datagram_connect,
1008 .disconnect = udp_disconnect,
1009 .ioctl = udp_ioctl,
1010 .destroy = udpv6_destroy_sock,
1011 .setsockopt = udpv6_setsockopt,
1012 .getsockopt = udpv6_getsockopt,
1013 .sendmsg = udpv6_sendmsg,
1014 .recvmsg = udpv6_recvmsg,
1015 .backlog_rcv = udpv6_queue_rcv_skb,
1016 .hash = udp_lib_hash,
1017 .unhash = udp_lib_unhash,
1018 .get_port = udp_v6_get_port,
1019 .memory_allocated = &udp_memory_allocated,
1020 .sysctl_mem = sysctl_udp_mem,
1021 .sysctl_wmem = &sysctl_udp_wmem_min,
1022 .sysctl_rmem = &sysctl_udp_rmem_min,
1023 .obj_size = sizeof(struct udp6_sock),
1024#ifdef CONFIG_COMPAT
1025 .compat_setsockopt = compat_udpv6_setsockopt,
1026 .compat_getsockopt = compat_udpv6_getsockopt,
1027#endif
1028 REF_PROTO_INUSE(udpv6)
1029};
1030
1031static struct inet_protosw udpv6_protosw = {
1032 .type = SOCK_DGRAM,
1033 .protocol = IPPROTO_UDP,
1034 .prot = &udpv6_prot,
1035 .ops = &inet6_dgram_ops,
1036 .capability =-1,
1037 .no_check = UDP_CSUM_DEFAULT,
1038 .flags = INET_PROTOSW_PERMANENT,
1039};
1040
1041
1042int __init udpv6_init(void)
1043{
1044 int ret;
1045
1046 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
1047 if (ret)
1048 goto out;
1049
1050 ret = inet6_register_protosw(&udpv6_protosw);
1051 if (ret)
1052 goto out_udpv6_protocol;
1053out:
1054 return ret;
1055
1056out_udpv6_protocol:
1057 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1058 goto out;
1059}
1060
1061void udpv6_exit(void)
1062{
1063 inet6_unregister_protosw(&udpv6_protosw);
1064 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1065}