aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp/ipv6.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/dccp/ipv6.c')
-rw-r--r--net/dccp/ipv6.c1438
1 files changed, 1438 insertions, 0 deletions
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
new file mode 100644
index 000000000000..a7d2aee5b3af
--- /dev/null
+++ b/net/dccp/ipv6.c
@@ -0,0 +1,1438 @@
1/*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/random.h>
18#include <linux/xfrm.h>
19
20#include <net/addrconf.h>
21#include <net/inet_common.h>
22#include <net/inet_hashtables.h>
23#include <net/inet6_connection_sock.h>
24#include <net/inet6_hashtables.h>
25#include <net/ip6_route.h>
26#include <net/ipv6.h>
27#include <net/protocol.h>
28#include <net/transp_v6.h>
29#include <net/xfrm.h>
30
31#include "dccp.h"
32#include "ipv6.h"
33
34static void dccp_v6_ctl_send_reset(struct sk_buff *skb);
35static void dccp_v6_reqsk_send_ack(struct sk_buff *skb,
36 struct request_sock *req);
37static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb);
38
39static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
40
41static struct inet_connection_sock_af_ops dccp_ipv6_mapped;
42static struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
43
44static int dccp_v6_get_port(struct sock *sk, unsigned short snum)
45{
46 return inet_csk_get_port(&dccp_hashinfo, sk, snum,
47 inet6_csk_bind_conflict);
48}
49
50static void dccp_v6_hash(struct sock *sk)
51{
52 if (sk->sk_state != DCCP_CLOSED) {
53 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
54 dccp_prot.hash(sk);
55 return;
56 }
57 local_bh_disable();
58 __inet6_hash(&dccp_hashinfo, sk);
59 local_bh_enable();
60 }
61}
62
63static inline u16 dccp_v6_check(struct dccp_hdr *dh, int len,
64 struct in6_addr *saddr,
65 struct in6_addr *daddr,
66 unsigned long base)
67{
68 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_DCCP, base);
69}
70
71static __u32 dccp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
72{
73 const struct dccp_hdr *dh = dccp_hdr(skb);
74
75 if (skb->protocol == htons(ETH_P_IPV6))
76 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
77 skb->nh.ipv6h->saddr.s6_addr32,
78 dh->dccph_dport,
79 dh->dccph_sport);
80 else
81 return secure_dccp_sequence_number(skb->nh.iph->daddr,
82 skb->nh.iph->saddr,
83 dh->dccph_dport,
84 dh->dccph_sport);
85}
86
87static int __dccp_v6_check_established(struct sock *sk, const __u16 lport,
88 struct inet_timewait_sock **twp)
89{
90 struct inet_sock *inet = inet_sk(sk);
91 const struct ipv6_pinfo *np = inet6_sk(sk);
92 const struct in6_addr *daddr = &np->rcv_saddr;
93 const struct in6_addr *saddr = &np->daddr;
94 const int dif = sk->sk_bound_dev_if;
95 const u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
96 const unsigned int hash = inet6_ehashfn(daddr, inet->num,
97 saddr, inet->dport);
98 struct inet_ehash_bucket *head = inet_ehash_bucket(&dccp_hashinfo, hash);
99 struct sock *sk2;
100 const struct hlist_node *node;
101 struct inet_timewait_sock *tw;
102
103 prefetch(head->chain.first);
104 write_lock(&head->lock);
105
106 /* Check TIME-WAIT sockets first. */
107 sk_for_each(sk2, node, &(head + dccp_hashinfo.ehash_size)->chain) {
108 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk2);
109
110 tw = inet_twsk(sk2);
111
112 if(*((__u32 *)&(tw->tw_dport)) == ports &&
113 sk2->sk_family == PF_INET6 &&
114 ipv6_addr_equal(&tw6->tw_v6_daddr, saddr) &&
115 ipv6_addr_equal(&tw6->tw_v6_rcv_saddr, daddr) &&
116 sk2->sk_bound_dev_if == sk->sk_bound_dev_if)
117 goto not_unique;
118 }
119 tw = NULL;
120
121 /* And established part... */
122 sk_for_each(sk2, node, &head->chain) {
123 if (INET6_MATCH(sk2, hash, saddr, daddr, ports, dif))
124 goto not_unique;
125 }
126
127 BUG_TRAP(sk_unhashed(sk));
128 __sk_add_node(sk, &head->chain);
129 sk->sk_hash = hash;
130 sock_prot_inc_use(sk->sk_prot);
131 write_unlock(&head->lock);
132
133 if (twp) {
134 *twp = tw;
135 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
136 } else if (tw) {
137 /* Silly. Should hash-dance instead... */
138 inet_twsk_deschedule(tw, &dccp_death_row);
139 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
140
141 inet_twsk_put(tw);
142 }
143 return 0;
144
145not_unique:
146 write_unlock(&head->lock);
147 return -EADDRNOTAVAIL;
148}
149
150static inline u32 dccp_v6_port_offset(const struct sock *sk)
151{
152 const struct inet_sock *inet = inet_sk(sk);
153 const struct ipv6_pinfo *np = inet6_sk(sk);
154
155 return secure_tcpv6_port_ephemeral(np->rcv_saddr.s6_addr32,
156 np->daddr.s6_addr32,
157 inet->dport);
158}
159
160static int dccp_v6_hash_connect(struct sock *sk)
161{
162 const unsigned short snum = inet_sk(sk)->num;
163 struct inet_bind_hashbucket *head;
164 struct inet_bind_bucket *tb;
165 int ret;
166
167 if (snum == 0) {
168 int low = sysctl_local_port_range[0];
169 int high = sysctl_local_port_range[1];
170 int range = high - low;
171 int i;
172 int port;
173 static u32 hint;
174 u32 offset = hint + dccp_v6_port_offset(sk);
175 struct hlist_node *node;
176 struct inet_timewait_sock *tw = NULL;
177
178 local_bh_disable();
179 for (i = 1; i <= range; i++) {
180 port = low + (i + offset) % range;
181 head = &dccp_hashinfo.bhash[inet_bhashfn(port,
182 dccp_hashinfo.bhash_size)];
183 spin_lock(&head->lock);
184
185 /* Does not bother with rcv_saddr checks,
186 * because the established check is already
187 * unique enough.
188 */
189 inet_bind_bucket_for_each(tb, node, &head->chain) {
190 if (tb->port == port) {
191 BUG_TRAP(!hlist_empty(&tb->owners));
192 if (tb->fastreuse >= 0)
193 goto next_port;
194 if (!__dccp_v6_check_established(sk,
195 port,
196 &tw))
197 goto ok;
198 goto next_port;
199 }
200 }
201
202 tb = inet_bind_bucket_create(dccp_hashinfo.bind_bucket_cachep,
203 head, port);
204 if (!tb) {
205 spin_unlock(&head->lock);
206 break;
207 }
208 tb->fastreuse = -1;
209 goto ok;
210
211 next_port:
212 spin_unlock(&head->lock);
213 }
214 local_bh_enable();
215
216 return -EADDRNOTAVAIL;
217ok:
218 hint += i;
219
220 /* Head lock still held and bh's disabled */
221 inet_bind_hash(sk, tb, port);
222 if (sk_unhashed(sk)) {
223 inet_sk(sk)->sport = htons(port);
224 __inet6_hash(&dccp_hashinfo, sk);
225 }
226 spin_unlock(&head->lock);
227
228 if (tw) {
229 inet_twsk_deschedule(tw, &dccp_death_row);
230 inet_twsk_put(tw);
231 }
232
233 ret = 0;
234 goto out;
235 }
236
237 head = &dccp_hashinfo.bhash[inet_bhashfn(snum,
238 dccp_hashinfo.bhash_size)];
239 tb = inet_csk(sk)->icsk_bind_hash;
240 spin_lock_bh(&head->lock);
241
242 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
243 __inet6_hash(&dccp_hashinfo, sk);
244 spin_unlock_bh(&head->lock);
245 return 0;
246 } else {
247 spin_unlock(&head->lock);
248 /* No definite answer... Walk to established hash table */
249 ret = __dccp_v6_check_established(sk, snum, NULL);
250out:
251 local_bh_enable();
252 return ret;
253 }
254}
255
256static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
257 int addr_len)
258{
259 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
260 struct inet_sock *inet = inet_sk(sk);
261 struct ipv6_pinfo *np = inet6_sk(sk);
262 struct dccp_sock *dp = dccp_sk(sk);
263 struct in6_addr *saddr = NULL, *final_p = NULL, final;
264 struct flowi fl;
265 struct dst_entry *dst;
266 int addr_type;
267 int err;
268
269 dp->dccps_role = DCCP_ROLE_CLIENT;
270
271 if (addr_len < SIN6_LEN_RFC2133)
272 return -EINVAL;
273
274 if (usin->sin6_family != AF_INET6)
275 return -EAFNOSUPPORT;
276
277 memset(&fl, 0, sizeof(fl));
278
279 if (np->sndflow) {
280 fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
281 IP6_ECN_flow_init(fl.fl6_flowlabel);
282 if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) {
283 struct ip6_flowlabel *flowlabel;
284 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
285 if (flowlabel == NULL)
286 return -EINVAL;
287 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
288 fl6_sock_release(flowlabel);
289 }
290 }
291
292 /*
293 * connect() to INADDR_ANY means loopback (BSD'ism).
294 */
295
296 if (ipv6_addr_any(&usin->sin6_addr))
297 usin->sin6_addr.s6_addr[15] = 0x1;
298
299 addr_type = ipv6_addr_type(&usin->sin6_addr);
300
301 if(addr_type & IPV6_ADDR_MULTICAST)
302 return -ENETUNREACH;
303
304 if (addr_type & IPV6_ADDR_LINKLOCAL) {
305 if (addr_len >= sizeof(struct sockaddr_in6) &&
306 usin->sin6_scope_id) {
307 /* If interface is set while binding, indices
308 * must coincide.
309 */
310 if (sk->sk_bound_dev_if &&
311 sk->sk_bound_dev_if != usin->sin6_scope_id)
312 return -EINVAL;
313
314 sk->sk_bound_dev_if = usin->sin6_scope_id;
315 }
316
317 /* Connect to link-local address requires an interface */
318 if (!sk->sk_bound_dev_if)
319 return -EINVAL;
320 }
321
322 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
323 np->flow_label = fl.fl6_flowlabel;
324
325 /*
326 * DCCP over IPv4
327 */
328
329 if (addr_type == IPV6_ADDR_MAPPED) {
330 u32 exthdrlen = dp->dccps_ext_header_len;
331 struct sockaddr_in sin;
332
333 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
334
335 if (__ipv6_only_sock(sk))
336 return -ENETUNREACH;
337
338 sin.sin_family = AF_INET;
339 sin.sin_port = usin->sin6_port;
340 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
341
342 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_mapped;
343 sk->sk_backlog_rcv = dccp_v4_do_rcv;
344
345 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
346
347 if (err) {
348 dp->dccps_ext_header_len = exthdrlen;
349 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
350 sk->sk_backlog_rcv = dccp_v6_do_rcv;
351 goto failure;
352 } else {
353 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
354 inet->saddr);
355 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
356 inet->rcv_saddr);
357 }
358
359 return err;
360 }
361
362 if (!ipv6_addr_any(&np->rcv_saddr))
363 saddr = &np->rcv_saddr;
364
365 fl.proto = IPPROTO_DCCP;
366 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
367 ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
368 fl.oif = sk->sk_bound_dev_if;
369 fl.fl_ip_dport = usin->sin6_port;
370 fl.fl_ip_sport = inet->sport;
371
372 if (np->opt && np->opt->srcrt) {
373 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
374 ipv6_addr_copy(&final, &fl.fl6_dst);
375 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
376 final_p = &final;
377 }
378
379 err = ip6_dst_lookup(sk, &dst, &fl);
380 if (err)
381 goto failure;
382 if (final_p)
383 ipv6_addr_copy(&fl.fl6_dst, final_p);
384
385 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
386 goto failure;
387
388 if (saddr == NULL) {
389 saddr = &fl.fl6_src;
390 ipv6_addr_copy(&np->rcv_saddr, saddr);
391 }
392
393 /* set the source address */
394 ipv6_addr_copy(&np->saddr, saddr);
395 inet->rcv_saddr = LOOPBACK4_IPV6;
396
397 ip6_dst_store(sk, dst, NULL);
398
399 dp->dccps_ext_header_len = 0;
400 if (np->opt)
401 dp->dccps_ext_header_len = np->opt->opt_flen + np->opt->opt_nflen;
402
403 inet->dport = usin->sin6_port;
404
405 dccp_set_state(sk, DCCP_REQUESTING);
406 err = dccp_v6_hash_connect(sk);
407 if (err)
408 goto late_failure;
409 /* FIXME */
410#if 0
411 dp->dccps_gar = secure_dccp_v6_sequence_number(np->saddr.s6_addr32,
412 np->daddr.s6_addr32,
413 inet->sport,
414 inet->dport);
415#endif
416 err = dccp_connect(sk);
417 if (err)
418 goto late_failure;
419
420 return 0;
421
422late_failure:
423 dccp_set_state(sk, DCCP_CLOSED);
424 __sk_dst_reset(sk);
425failure:
426 inet->dport = 0;
427 sk->sk_route_caps = 0;
428 return err;
429}
430
431static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
432 int type, int code, int offset, __u32 info)
433{
434 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data;
435 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
436 struct ipv6_pinfo *np;
437 struct sock *sk;
438 int err;
439 __u64 seq;
440
441 sk = inet6_lookup(&dccp_hashinfo, &hdr->daddr, dh->dccph_dport,
442 &hdr->saddr, dh->dccph_sport, skb->dev->ifindex);
443
444 if (sk == NULL) {
445 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
446 return;
447 }
448
449 if (sk->sk_state == DCCP_TIME_WAIT) {
450 inet_twsk_put((struct inet_timewait_sock *)sk);
451 return;
452 }
453
454 bh_lock_sock(sk);
455 if (sock_owned_by_user(sk))
456 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
457
458 if (sk->sk_state == DCCP_CLOSED)
459 goto out;
460
461 np = inet6_sk(sk);
462
463 if (type == ICMPV6_PKT_TOOBIG) {
464 struct dccp_sock *dp = dccp_sk(sk);
465 struct dst_entry *dst = NULL;
466
467 if (sock_owned_by_user(sk))
468 goto out;
469 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
470 goto out;
471
472 /* icmp should have updated the destination cache entry */
473 dst = __sk_dst_check(sk, np->dst_cookie);
474
475 if (dst == NULL) {
476 struct inet_sock *inet = inet_sk(sk);
477 struct flowi fl;
478
479 /* BUGGG_FUTURE: Again, it is not clear how
480 to handle rthdr case. Ignore this complexity
481 for now.
482 */
483 memset(&fl, 0, sizeof(fl));
484 fl.proto = IPPROTO_DCCP;
485 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
486 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
487 fl.oif = sk->sk_bound_dev_if;
488 fl.fl_ip_dport = inet->dport;
489 fl.fl_ip_sport = inet->sport;
490
491 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
492 sk->sk_err_soft = -err;
493 goto out;
494 }
495
496 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
497 sk->sk_err_soft = -err;
498 goto out;
499 }
500
501 } else
502 dst_hold(dst);
503
504 if (dp->dccps_pmtu_cookie > dst_mtu(dst)) {
505 dccp_sync_mss(sk, dst_mtu(dst));
506 } /* else let the usual retransmit timer handle it */
507 dst_release(dst);
508 goto out;
509 }
510
511 icmpv6_err_convert(type, code, &err);
512
513 seq = DCCP_SKB_CB(skb)->dccpd_seq;
514 /* Might be for an request_sock */
515 switch (sk->sk_state) {
516 struct request_sock *req, **prev;
517 case DCCP_LISTEN:
518 if (sock_owned_by_user(sk))
519 goto out;
520
521 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
522 &hdr->daddr, &hdr->saddr,
523 inet6_iif(skb));
524 if (!req)
525 goto out;
526
527 /* ICMPs are not backlogged, hence we cannot get
528 * an established socket here.
529 */
530 BUG_TRAP(req->sk == NULL);
531
532 if (seq != dccp_rsk(req)->dreq_iss) {
533 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
534 goto out;
535 }
536
537 inet_csk_reqsk_queue_drop(sk, req, prev);
538 goto out;
539
540 case DCCP_REQUESTING:
541 case DCCP_RESPOND: /* Cannot happen.
542 It can, it SYNs are crossed. --ANK */
543 if (!sock_owned_by_user(sk)) {
544 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
545 sk->sk_err = err;
546 /*
547 * Wake people up to see the error
548 * (see connect in sock.c)
549 */
550 sk->sk_error_report(sk);
551
552 dccp_done(sk);
553 } else
554 sk->sk_err_soft = err;
555 goto out;
556 }
557
558 if (!sock_owned_by_user(sk) && np->recverr) {
559 sk->sk_err = err;
560 sk->sk_error_report(sk);
561 } else
562 sk->sk_err_soft = err;
563
564out:
565 bh_unlock_sock(sk);
566 sock_put(sk);
567}
568
569
570static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
571 struct dst_entry *dst)
572{
573 struct inet6_request_sock *ireq6 = inet6_rsk(req);
574 struct ipv6_pinfo *np = inet6_sk(sk);
575 struct sk_buff *skb;
576 struct ipv6_txoptions *opt = NULL;
577 struct in6_addr *final_p = NULL, final;
578 struct flowi fl;
579 int err = -1;
580
581 memset(&fl, 0, sizeof(fl));
582 fl.proto = IPPROTO_DCCP;
583 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
584 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
585 fl.fl6_flowlabel = 0;
586 fl.oif = ireq6->iif;
587 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
588 fl.fl_ip_sport = inet_sk(sk)->sport;
589
590 if (dst == NULL) {
591 opt = np->opt;
592 if (opt == NULL &&
593 np->rxopt.bits.osrcrt == 2 &&
594 ireq6->pktopts) {
595 struct sk_buff *pktopts = ireq6->pktopts;
596 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
597 if (rxopt->srcrt)
598 opt = ipv6_invert_rthdr(sk,
599 (struct ipv6_rt_hdr *)(pktopts->nh.raw +
600 rxopt->srcrt));
601 }
602
603 if (opt && opt->srcrt) {
604 struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
605 ipv6_addr_copy(&final, &fl.fl6_dst);
606 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
607 final_p = &final;
608 }
609
610 err = ip6_dst_lookup(sk, &dst, &fl);
611 if (err)
612 goto done;
613 if (final_p)
614 ipv6_addr_copy(&fl.fl6_dst, final_p);
615 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
616 goto done;
617 }
618
619 skb = dccp_make_response(sk, dst, req);
620 if (skb != NULL) {
621 struct dccp_hdr *dh = dccp_hdr(skb);
622 dh->dccph_checksum = dccp_v6_check(dh, skb->len,
623 &ireq6->loc_addr,
624 &ireq6->rmt_addr,
625 csum_partial((char *)dh,
626 skb->len,
627 skb->csum));
628 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
629 err = ip6_xmit(sk, skb, &fl, opt, 0);
630 if (err == NET_XMIT_CN)
631 err = 0;
632 }
633
634done:
635 if (opt && opt != np->opt)
636 sock_kfree_s(sk, opt, opt->tot_len);
637 return err;
638}
639
640static void dccp_v6_reqsk_destructor(struct request_sock *req)
641{
642 if (inet6_rsk(req)->pktopts != NULL)
643 kfree_skb(inet6_rsk(req)->pktopts);
644}
645
646static struct request_sock_ops dccp6_request_sock_ops = {
647 .family = AF_INET6,
648 .obj_size = sizeof(struct dccp6_request_sock),
649 .rtx_syn_ack = dccp_v6_send_response,
650 .send_ack = dccp_v6_reqsk_send_ack,
651 .destructor = dccp_v6_reqsk_destructor,
652 .send_reset = dccp_v6_ctl_send_reset,
653};
654
655static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
656{
657 struct ipv6_pinfo *np = inet6_sk(sk);
658 struct dccp_hdr *dh = dccp_hdr(skb);
659
660 dh->dccph_checksum = csum_ipv6_magic(&np->saddr, &np->daddr,
661 len, IPPROTO_DCCP,
662 csum_partial((char *)dh,
663 dh->dccph_doff << 2,
664 skb->csum));
665}
666
667static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb)
668{
669 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
670 const int dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
671 sizeof(struct dccp_hdr_ext) +
672 sizeof(struct dccp_hdr_reset);
673 struct sk_buff *skb;
674 struct flowi fl;
675 u64 seqno;
676
677 if (rxdh->dccph_type == DCCP_PKT_RESET)
678 return;
679
680 if (!ipv6_unicast_destination(rxskb))
681 return;
682
683 /*
684 * We need to grab some memory, and put together an RST,
685 * and then put it into the queue to be sent.
686 */
687
688 skb = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) +
689 dccp_hdr_reset_len, GFP_ATOMIC);
690 if (skb == NULL)
691 return;
692
693 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr) +
694 dccp_hdr_reset_len);
695
696 skb->h.raw = skb_push(skb, dccp_hdr_reset_len);
697 dh = dccp_hdr(skb);
698 memset(dh, 0, dccp_hdr_reset_len);
699
700 /* Swap the send and the receive. */
701 dh->dccph_type = DCCP_PKT_RESET;
702 dh->dccph_sport = rxdh->dccph_dport;
703 dh->dccph_dport = rxdh->dccph_sport;
704 dh->dccph_doff = dccp_hdr_reset_len / 4;
705 dh->dccph_x = 1;
706 dccp_hdr_reset(skb)->dccph_reset_code =
707 DCCP_SKB_CB(rxskb)->dccpd_reset_code;
708
709 /* See "8.3.1. Abnormal Termination" in draft-ietf-dccp-spec-11 */
710 seqno = 0;
711 if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
712 dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1);
713
714 dccp_hdr_set_seq(dh, seqno);
715 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
716 DCCP_SKB_CB(rxskb)->dccpd_seq);
717
718 memset(&fl, 0, sizeof(fl));
719 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
720 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
721 dh->dccph_checksum = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
722 sizeof(*dh), IPPROTO_DCCP,
723 skb->csum);
724 fl.proto = IPPROTO_DCCP;
725 fl.oif = inet6_iif(rxskb);
726 fl.fl_ip_dport = dh->dccph_dport;
727 fl.fl_ip_sport = dh->dccph_sport;
728
729 /* sk = NULL, but it is safe for now. RST socket required. */
730 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
731 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
732 ip6_xmit(NULL, skb, &fl, NULL, 0);
733 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
734 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
735 return;
736 }
737 }
738
739 kfree_skb(skb);
740}
741
742static void dccp_v6_ctl_send_ack(struct sk_buff *rxskb)
743{
744 struct flowi fl;
745 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
746 const int dccp_hdr_ack_len = sizeof(struct dccp_hdr) +
747 sizeof(struct dccp_hdr_ext) +
748 sizeof(struct dccp_hdr_ack_bits);
749 struct sk_buff *skb;
750
751 skb = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) +
752 dccp_hdr_ack_len, GFP_ATOMIC);
753 if (skb == NULL)
754 return;
755
756 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr) +
757 dccp_hdr_ack_len);
758
759 skb->h.raw = skb_push(skb, dccp_hdr_ack_len);
760 dh = dccp_hdr(skb);
761 memset(dh, 0, dccp_hdr_ack_len);
762
763 /* Build DCCP header and checksum it. */
764 dh->dccph_type = DCCP_PKT_ACK;
765 dh->dccph_sport = rxdh->dccph_dport;
766 dh->dccph_dport = rxdh->dccph_sport;
767 dh->dccph_doff = dccp_hdr_ack_len / 4;
768 dh->dccph_x = 1;
769
770 dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq);
771 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
772 DCCP_SKB_CB(rxskb)->dccpd_seq);
773
774 memset(&fl, 0, sizeof(fl));
775 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
776 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
777
778 /* FIXME: calculate checksum, IPv4 also should... */
779
780 fl.proto = IPPROTO_DCCP;
781 fl.oif = inet6_iif(rxskb);
782 fl.fl_ip_dport = dh->dccph_dport;
783 fl.fl_ip_sport = dh->dccph_sport;
784
785 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
786 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
787 ip6_xmit(NULL, skb, &fl, NULL, 0);
788 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
789 return;
790 }
791 }
792
793 kfree_skb(skb);
794}
795
796static void dccp_v6_reqsk_send_ack(struct sk_buff *skb,
797 struct request_sock *req)
798{
799 dccp_v6_ctl_send_ack(skb);
800}
801
802static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
803{
804 const struct dccp_hdr *dh = dccp_hdr(skb);
805 const struct ipv6hdr *iph = skb->nh.ipv6h;
806 struct sock *nsk;
807 struct request_sock **prev;
808 /* Find possible connection requests. */
809 struct request_sock *req = inet6_csk_search_req(sk, &prev,
810 dh->dccph_sport,
811 &iph->saddr,
812 &iph->daddr,
813 inet6_iif(skb));
814 if (req != NULL)
815 return dccp_check_req(sk, skb, req, prev);
816
817 nsk = __inet6_lookup_established(&dccp_hashinfo,
818 &iph->saddr, dh->dccph_sport,
819 &iph->daddr, ntohs(dh->dccph_dport),
820 inet6_iif(skb));
821
822 if (nsk != NULL) {
823 if (nsk->sk_state != DCCP_TIME_WAIT) {
824 bh_lock_sock(nsk);
825 return nsk;
826 }
827 inet_twsk_put((struct inet_timewait_sock *)nsk);
828 return NULL;
829 }
830
831 return sk;
832}
833
834static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
835{
836 struct inet_request_sock *ireq;
837 struct dccp_sock dp;
838 struct request_sock *req;
839 struct dccp_request_sock *dreq;
840 struct inet6_request_sock *ireq6;
841 struct ipv6_pinfo *np = inet6_sk(sk);
842 const __u32 service = dccp_hdr_request(skb)->dccph_req_service;
843 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
844 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
845
846 if (skb->protocol == htons(ETH_P_IP))
847 return dccp_v4_conn_request(sk, skb);
848
849 if (!ipv6_unicast_destination(skb))
850 goto drop;
851
852 if (dccp_bad_service_code(sk, service)) {
853 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
854 goto drop;
855 }
856 /*
857 * There are no SYN attacks on IPv6, yet...
858 */
859 if (inet_csk_reqsk_queue_is_full(sk))
860 goto drop;
861
862 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
863 goto drop;
864
865 req = inet6_reqsk_alloc(sk->sk_prot->rsk_prot);
866 if (req == NULL)
867 goto drop;
868
869 /* FIXME: process options */
870
871 dccp_openreq_init(req, &dp, skb);
872
873 ireq6 = inet6_rsk(req);
874 ireq = inet_rsk(req);
875 ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr);
876 ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr);
877 req->rcv_wnd = 100; /* Fake, option parsing will get the
878 right value */
879 ireq6->pktopts = NULL;
880
881 if (ipv6_opt_accepted(sk, skb) ||
882 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
883 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
884 atomic_inc(&skb->users);
885 ireq6->pktopts = skb;
886 }
887 ireq6->iif = sk->sk_bound_dev_if;
888
889 /* So that link locals have meaning */
890 if (!sk->sk_bound_dev_if &&
891 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
892 ireq6->iif = inet6_iif(skb);
893
894 /*
895 * Step 3: Process LISTEN state
896 *
897 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
898 *
899 * In fact we defer setting S.GSR, S.SWL, S.SWH to
900 * dccp_create_openreq_child.
901 */
902 dreq = dccp_rsk(req);
903 dreq->dreq_isr = dcb->dccpd_seq;
904 dreq->dreq_iss = dccp_v6_init_sequence(sk, skb);
905 dreq->dreq_service = service;
906
907 if (dccp_v6_send_response(sk, req, NULL))
908 goto drop_and_free;
909
910 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
911 return 0;
912
913drop_and_free:
914 reqsk_free(req);
915drop:
916 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
917 dcb->dccpd_reset_code = reset_code;
918 return -1;
919}
920
921static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
922 struct sk_buff *skb,
923 struct request_sock *req,
924 struct dst_entry *dst)
925{
926 struct inet6_request_sock *ireq6 = inet6_rsk(req);
927 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
928 struct inet_sock *newinet;
929 struct dccp_sock *newdp;
930 struct dccp6_sock *newdp6;
931 struct sock *newsk;
932 struct ipv6_txoptions *opt;
933
934 if (skb->protocol == htons(ETH_P_IP)) {
935 /*
936 * v6 mapped
937 */
938
939 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
940 if (newsk == NULL)
941 return NULL;
942
943 newdp6 = (struct dccp6_sock *)newsk;
944 newdp = dccp_sk(newsk);
945 newinet = inet_sk(newsk);
946 newinet->pinet6 = &newdp6->inet6;
947 newnp = inet6_sk(newsk);
948
949 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
950
951 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
952 newinet->daddr);
953
954 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
955 newinet->saddr);
956
957 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
958
959 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
960 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
961 newnp->pktoptions = NULL;
962 newnp->opt = NULL;
963 newnp->mcast_oif = inet6_iif(skb);
964 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
965
966 /*
967 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
968 * here, dccp_create_openreq_child now does this for us, see the comment in
969 * that function for the gory details. -acme
970 */
971
972 /* It is tricky place. Until this moment IPv4 tcp
973 worked with IPv6 icsk.icsk_af_ops.
974 Sync it now.
975 */
976 dccp_sync_mss(newsk, newdp->dccps_pmtu_cookie);
977
978 return newsk;
979 }
980
981 opt = np->opt;
982
983 if (sk_acceptq_is_full(sk))
984 goto out_overflow;
985
986 if (np->rxopt.bits.osrcrt == 2 &&
987 opt == NULL && ireq6->pktopts) {
988 struct inet6_skb_parm *rxopt = IP6CB(ireq6->pktopts);
989 if (rxopt->srcrt)
990 opt = ipv6_invert_rthdr(sk,
991 (struct ipv6_rt_hdr *)(ireq6->pktopts->nh.raw +
992 rxopt->srcrt));
993 }
994
995 if (dst == NULL) {
996 struct in6_addr *final_p = NULL, final;
997 struct flowi fl;
998
999 memset(&fl, 0, sizeof(fl));
1000 fl.proto = IPPROTO_DCCP;
1001 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
1002 if (opt && opt->srcrt) {
1003 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1004 ipv6_addr_copy(&final, &fl.fl6_dst);
1005 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1006 final_p = &final;
1007 }
1008 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
1009 fl.oif = sk->sk_bound_dev_if;
1010 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1011 fl.fl_ip_sport = inet_sk(sk)->sport;
1012
1013 if (ip6_dst_lookup(sk, &dst, &fl))
1014 goto out;
1015
1016 if (final_p)
1017 ipv6_addr_copy(&fl.fl6_dst, final_p);
1018
1019 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1020 goto out;
1021 }
1022
1023 newsk = dccp_create_openreq_child(sk, req, skb);
1024 if (newsk == NULL)
1025 goto out;
1026
1027 /*
1028 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1029 * count here, dccp_create_openreq_child now does this for us, see the
1030 * comment in that function for the gory details. -acme
1031 */
1032
1033 ip6_dst_store(newsk, dst, NULL);
1034 newsk->sk_route_caps = dst->dev->features &
1035 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1036
1037 newdp6 = (struct dccp6_sock *)newsk;
1038 newinet = inet_sk(newsk);
1039 newinet->pinet6 = &newdp6->inet6;
1040 newdp = dccp_sk(newsk);
1041 newnp = inet6_sk(newsk);
1042
1043 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1044
1045 ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
1046 ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
1047 ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
1048 newsk->sk_bound_dev_if = ireq6->iif;
1049
1050 /* Now IPv6 options...
1051
1052 First: no IPv4 options.
1053 */
1054 newinet->opt = NULL;
1055
1056 /* Clone RX bits */
1057 newnp->rxopt.all = np->rxopt.all;
1058
1059 /* Clone pktoptions received with SYN */
1060 newnp->pktoptions = NULL;
1061 if (ireq6->pktopts != NULL) {
1062 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
1063 kfree_skb(ireq6->pktopts);
1064 ireq6->pktopts = NULL;
1065 if (newnp->pktoptions)
1066 skb_set_owner_r(newnp->pktoptions, newsk);
1067 }
1068 newnp->opt = NULL;
1069 newnp->mcast_oif = inet6_iif(skb);
1070 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1071
1072 /* Clone native IPv6 options from listening socket (if any)
1073
1074 Yes, keeping reference count would be much more clever,
1075 but we make one more one thing there: reattach optmem
1076 to newsk.
1077 */
1078 if (opt) {
1079 newnp->opt = ipv6_dup_options(newsk, opt);
1080 if (opt != np->opt)
1081 sock_kfree_s(sk, opt, opt->tot_len);
1082 }
1083
1084 newdp->dccps_ext_header_len = 0;
1085 if (newnp->opt)
1086 newdp->dccps_ext_header_len = newnp->opt->opt_nflen +
1087 newnp->opt->opt_flen;
1088
1089 dccp_sync_mss(newsk, dst_mtu(dst));
1090
1091 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1092
1093 __inet6_hash(&dccp_hashinfo, newsk);
1094 inet_inherit_port(&dccp_hashinfo, sk, newsk);
1095
1096 return newsk;
1097
1098out_overflow:
1099 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1100out:
1101 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1102 if (opt && opt != np->opt)
1103 sock_kfree_s(sk, opt, opt->tot_len);
1104 dst_release(dst);
1105 return NULL;
1106}
1107
1108/* The socket must have it's spinlock held when we get
1109 * here.
1110 *
1111 * We have a potential double-lock case here, so even when
1112 * doing backlog processing we use the BH locking scheme.
1113 * This is because we cannot sleep with the original spinlock
1114 * held.
1115 */
1116static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1117{
1118 struct ipv6_pinfo *np = inet6_sk(sk);
1119 struct sk_buff *opt_skb = NULL;
1120
1121 /* Imagine: socket is IPv6. IPv4 packet arrives,
1122 goes to IPv4 receive handler and backlogged.
1123 From backlog it always goes here. Kerboom...
1124 Fortunately, dccp_rcv_established and rcv_established
1125 handle them correctly, but it is not case with
1126 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
1127 */
1128
1129 if (skb->protocol == htons(ETH_P_IP))
1130 return dccp_v4_do_rcv(sk, skb);
1131
1132 if (sk_filter(sk, skb, 0))
1133 goto discard;
1134
1135 /*
1136 * socket locking is here for SMP purposes as backlog rcv
1137 * is currently called with bh processing disabled.
1138 */
1139
1140 /* Do Stevens' IPV6_PKTOPTIONS.
1141
1142 Yes, guys, it is the only place in our code, where we
1143 may make it not affecting IPv4.
1144 The rest of code is protocol independent,
1145 and I do not like idea to uglify IPv4.
1146
1147 Actually, all the idea behind IPV6_PKTOPTIONS
1148 looks not very well thought. For now we latch
1149 options, received in the last packet, enqueued
1150 by tcp. Feel free to propose better solution.
1151 --ANK (980728)
1152 */
1153 if (np->rxopt.all)
1154 opt_skb = skb_clone(skb, GFP_ATOMIC);
1155
1156 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
1157 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
1158 goto reset;
1159 return 0;
1160 }
1161
1162 if (sk->sk_state == DCCP_LISTEN) {
1163 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
1164 if (!nsk)
1165 goto discard;
1166
1167 /*
1168 * Queue it on the new socket if the new socket is active,
1169 * otherwise we just shortcircuit this and continue with
1170 * the new socket..
1171 */
1172 if(nsk != sk) {
1173 if (dccp_child_process(sk, nsk, skb))
1174 goto reset;
1175 if (opt_skb)
1176 __kfree_skb(opt_skb);
1177 return 0;
1178 }
1179 }
1180
1181 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
1182 goto reset;
1183 return 0;
1184
1185reset:
1186 dccp_v6_ctl_send_reset(skb);
1187discard:
1188 if (opt_skb)
1189 __kfree_skb(opt_skb);
1190 kfree_skb(skb);
1191 return 0;
1192}
1193
1194static int dccp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
1195{
1196 const struct dccp_hdr *dh;
1197 struct sk_buff *skb = *pskb;
1198 struct sock *sk;
1199 int rc;
1200
1201 /* Step 1: Check header basics: */
1202
1203 if (dccp_invalid_packet(skb))
1204 goto discard_it;
1205
1206 dh = dccp_hdr(skb);
1207
1208 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb);
1209 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
1210
1211 if (dccp_packet_without_ack(skb))
1212 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
1213 else
1214 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
1215
1216 /* Step 2:
1217 * Look up flow ID in table and get corresponding socket */
1218 sk = __inet6_lookup(&dccp_hashinfo, &skb->nh.ipv6h->saddr,
1219 dh->dccph_sport,
1220 &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport),
1221 inet6_iif(skb));
1222 /*
1223 * Step 2:
1224 * If no socket ...
1225 * Generate Reset(No Connection) unless P.type == Reset
1226 * Drop packet and return
1227 */
1228 if (sk == NULL)
1229 goto no_dccp_socket;
1230
1231 /*
1232 * Step 2:
1233 * ... or S.state == TIMEWAIT,
1234 * Generate Reset(No Connection) unless P.type == Reset
1235 * Drop packet and return
1236 */
1237
1238 if (sk->sk_state == DCCP_TIME_WAIT)
1239 goto do_time_wait;
1240
1241 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1242 goto discard_and_relse;
1243
1244 if (sk_filter(sk, skb, 0))
1245 goto discard_and_relse;
1246
1247 skb->dev = NULL;
1248
1249 bh_lock_sock(sk);
1250 rc = 0;
1251 if (!sock_owned_by_user(sk))
1252 rc = dccp_v6_do_rcv(sk, skb);
1253 else
1254 sk_add_backlog(sk, skb);
1255 bh_unlock_sock(sk);
1256
1257 sock_put(sk);
1258 return rc ? -1 : 0;
1259
1260no_dccp_socket:
1261 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1262 goto discard_it;
1263 /*
1264 * Step 2:
1265 * Generate Reset(No Connection) unless P.type == Reset
1266 * Drop packet and return
1267 */
1268 if (dh->dccph_type != DCCP_PKT_RESET) {
1269 DCCP_SKB_CB(skb)->dccpd_reset_code =
1270 DCCP_RESET_CODE_NO_CONNECTION;
1271 dccp_v6_ctl_send_reset(skb);
1272 }
1273discard_it:
1274
1275 /*
1276 * Discard frame
1277 */
1278
1279 kfree_skb(skb);
1280 return 0;
1281
1282discard_and_relse:
1283 sock_put(sk);
1284 goto discard_it;
1285
1286do_time_wait:
1287 inet_twsk_put((struct inet_timewait_sock *)sk);
1288 goto no_dccp_socket;
1289}
1290
1291static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1292 .queue_xmit = inet6_csk_xmit,
1293 .send_check = dccp_v6_send_check,
1294 .rebuild_header = inet6_sk_rebuild_header,
1295 .conn_request = dccp_v6_conn_request,
1296 .syn_recv_sock = dccp_v6_request_recv_sock,
1297 .net_header_len = sizeof(struct ipv6hdr),
1298 .setsockopt = ipv6_setsockopt,
1299 .getsockopt = ipv6_getsockopt,
1300 .addr2sockaddr = inet6_csk_addr2sockaddr,
1301 .sockaddr_len = sizeof(struct sockaddr_in6)
1302};
1303
1304/*
1305 * DCCP over IPv4 via INET6 API
1306 */
1307static struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1308 .queue_xmit = ip_queue_xmit,
1309 .send_check = dccp_v4_send_check,
1310 .rebuild_header = inet_sk_rebuild_header,
1311 .conn_request = dccp_v6_conn_request,
1312 .syn_recv_sock = dccp_v6_request_recv_sock,
1313 .net_header_len = sizeof(struct iphdr),
1314 .setsockopt = ipv6_setsockopt,
1315 .getsockopt = ipv6_getsockopt,
1316 .addr2sockaddr = inet6_csk_addr2sockaddr,
1317 .sockaddr_len = sizeof(struct sockaddr_in6)
1318};
1319
1320/* NOTE: A lot of things set to zero explicitly by call to
1321 * sk_alloc() so need not be done here.
1322 */
1323static int dccp_v6_init_sock(struct sock *sk)
1324{
1325 int err = dccp_v4_init_sock(sk);
1326
1327 if (err == 0)
1328 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1329
1330 return err;
1331}
1332
1333static int dccp_v6_destroy_sock(struct sock *sk)
1334{
1335 dccp_v4_destroy_sock(sk);
1336 return inet6_destroy_sock(sk);
1337}
1338
1339static struct proto dccp_v6_prot = {
1340 .name = "DCCPv6",
1341 .owner = THIS_MODULE,
1342 .close = dccp_close,
1343 .connect = dccp_v6_connect,
1344 .disconnect = dccp_disconnect,
1345 .ioctl = dccp_ioctl,
1346 .init = dccp_v6_init_sock,
1347 .setsockopt = dccp_setsockopt,
1348 .getsockopt = dccp_getsockopt,
1349 .sendmsg = dccp_sendmsg,
1350 .recvmsg = dccp_recvmsg,
1351 .backlog_rcv = dccp_v6_do_rcv,
1352 .hash = dccp_v6_hash,
1353 .unhash = dccp_unhash,
1354 .accept = inet_csk_accept,
1355 .get_port = dccp_v6_get_port,
1356 .shutdown = dccp_shutdown,
1357 .destroy = dccp_v6_destroy_sock,
1358 .orphan_count = &dccp_orphan_count,
1359 .max_header = MAX_DCCP_HEADER,
1360 .obj_size = sizeof(struct dccp6_sock),
1361 .rsk_prot = &dccp6_request_sock_ops,
1362 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
1363};
1364
1365static struct inet6_protocol dccp_v6_protocol = {
1366 .handler = dccp_v6_rcv,
1367 .err_handler = dccp_v6_err,
1368 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1369};
1370
1371static struct proto_ops inet6_dccp_ops = {
1372 .family = PF_INET6,
1373 .owner = THIS_MODULE,
1374 .release = inet6_release,
1375 .bind = inet6_bind,
1376 .connect = inet_stream_connect,
1377 .socketpair = sock_no_socketpair,
1378 .accept = inet_accept,
1379 .getname = inet6_getname,
1380 .poll = dccp_poll,
1381 .ioctl = inet6_ioctl,
1382 .listen = inet_dccp_listen,
1383 .shutdown = inet_shutdown,
1384 .setsockopt = sock_common_setsockopt,
1385 .getsockopt = sock_common_getsockopt,
1386 .sendmsg = inet_sendmsg,
1387 .recvmsg = sock_common_recvmsg,
1388 .mmap = sock_no_mmap,
1389 .sendpage = sock_no_sendpage,
1390};
1391
1392static struct inet_protosw dccp_v6_protosw = {
1393 .type = SOCK_DCCP,
1394 .protocol = IPPROTO_DCCP,
1395 .prot = &dccp_v6_prot,
1396 .ops = &inet6_dccp_ops,
1397 .capability = -1,
1398};
1399
1400static int __init dccp_v6_init(void)
1401{
1402 int err = proto_register(&dccp_v6_prot, 1);
1403
1404 if (err != 0)
1405 goto out;
1406
1407 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1408 if (err != 0)
1409 goto out_unregister_proto;
1410
1411 inet6_register_protosw(&dccp_v6_protosw);
1412out:
1413 return err;
1414out_unregister_proto:
1415 proto_unregister(&dccp_v6_prot);
1416 goto out;
1417}
1418
1419static void __exit dccp_v6_exit(void)
1420{
1421 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1422 inet6_unregister_protosw(&dccp_v6_protosw);
1423 proto_unregister(&dccp_v6_prot);
1424}
1425
1426module_init(dccp_v6_init);
1427module_exit(dccp_v6_exit);
1428
1429/*
1430 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1431 * values directly, Also cover the case where the protocol is not specified,
1432 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1433 */
1434MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-33-type-6");
1435MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-0-type-6");
1436MODULE_LICENSE("GPL");
1437MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1438MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");