aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp/proto.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/dccp/proto.c')
-rw-r--r--net/dccp/proto.c818
1 files changed, 818 insertions, 0 deletions
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
new file mode 100644
index 000000000000..70284e6afe05
--- /dev/null
+++ b/net/dccp/proto.c
@@ -0,0 +1,818 @@
1/*
2 * net/dccp/proto.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/config.h>
13#include <linux/dccp.h>
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/sched.h>
17#include <linux/kernel.h>
18#include <linux/skbuff.h>
19#include <linux/netdevice.h>
20#include <linux/in.h>
21#include <linux/if_arp.h>
22#include <linux/init.h>
23#include <linux/random.h>
24#include <net/checksum.h>
25
26#include <net/inet_common.h>
27#include <net/ip.h>
28#include <net/protocol.h>
29#include <net/sock.h>
30#include <net/xfrm.h>
31
32#include <asm/semaphore.h>
33#include <linux/spinlock.h>
34#include <linux/timer.h>
35#include <linux/delay.h>
36#include <linux/poll.h>
37#include <linux/dccp.h>
38
39#include "ccid.h"
40#include "dccp.h"
41
42DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics);
43
44atomic_t dccp_orphan_count = ATOMIC_INIT(0);
45
46static struct net_protocol dccp_protocol = {
47 .handler = dccp_v4_rcv,
48 .err_handler = dccp_v4_err,
49};
50
51const char *dccp_packet_name(const int type)
52{
53 static const char *dccp_packet_names[] = {
54 [DCCP_PKT_REQUEST] = "REQUEST",
55 [DCCP_PKT_RESPONSE] = "RESPONSE",
56 [DCCP_PKT_DATA] = "DATA",
57 [DCCP_PKT_ACK] = "ACK",
58 [DCCP_PKT_DATAACK] = "DATAACK",
59 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
60 [DCCP_PKT_CLOSE] = "CLOSE",
61 [DCCP_PKT_RESET] = "RESET",
62 [DCCP_PKT_SYNC] = "SYNC",
63 [DCCP_PKT_SYNCACK] = "SYNCACK",
64 };
65
66 if (type >= DCCP_NR_PKT_TYPES)
67 return "INVALID";
68 else
69 return dccp_packet_names[type];
70}
71
72EXPORT_SYMBOL_GPL(dccp_packet_name);
73
74const char *dccp_state_name(const int state)
75{
76 static char *dccp_state_names[] = {
77 [DCCP_OPEN] = "OPEN",
78 [DCCP_REQUESTING] = "REQUESTING",
79 [DCCP_PARTOPEN] = "PARTOPEN",
80 [DCCP_LISTEN] = "LISTEN",
81 [DCCP_RESPOND] = "RESPOND",
82 [DCCP_CLOSING] = "CLOSING",
83 [DCCP_TIME_WAIT] = "TIME_WAIT",
84 [DCCP_CLOSED] = "CLOSED",
85 };
86
87 if (state >= DCCP_MAX_STATES)
88 return "INVALID STATE!";
89 else
90 return dccp_state_names[state];
91}
92
93EXPORT_SYMBOL_GPL(dccp_state_name);
94
95static inline int dccp_listen_start(struct sock *sk)
96{
97 dccp_sk(sk)->dccps_role = DCCP_ROLE_LISTEN;
98 return inet_csk_listen_start(sk, TCP_SYNQ_HSIZE);
99}
100
101int dccp_disconnect(struct sock *sk, int flags)
102{
103 struct inet_connection_sock *icsk = inet_csk(sk);
104 struct inet_sock *inet = inet_sk(sk);
105 int err = 0;
106 const int old_state = sk->sk_state;
107
108 if (old_state != DCCP_CLOSED)
109 dccp_set_state(sk, DCCP_CLOSED);
110
111 /* ABORT function of RFC793 */
112 if (old_state == DCCP_LISTEN) {
113 inet_csk_listen_stop(sk);
114 /* FIXME: do the active reset thing */
115 } else if (old_state == DCCP_REQUESTING)
116 sk->sk_err = ECONNRESET;
117
118 dccp_clear_xmit_timers(sk);
119 __skb_queue_purge(&sk->sk_receive_queue);
120 if (sk->sk_send_head != NULL) {
121 __kfree_skb(sk->sk_send_head);
122 sk->sk_send_head = NULL;
123 }
124
125 inet->dport = 0;
126
127 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
128 inet_reset_saddr(sk);
129
130 sk->sk_shutdown = 0;
131 sock_reset_flag(sk, SOCK_DONE);
132
133 icsk->icsk_backoff = 0;
134 inet_csk_delack_init(sk);
135 __sk_dst_reset(sk);
136
137 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
138
139 sk->sk_error_report(sk);
140 return err;
141}
142
143int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
144{
145 dccp_pr_debug("entry\n");
146 return -ENOIOCTLCMD;
147}
148
149int dccp_setsockopt(struct sock *sk, int level, int optname,
150 char *optval, int optlen)
151{
152 dccp_pr_debug("entry\n");
153
154 if (level != SOL_DCCP)
155 return ip_setsockopt(sk, level, optname, optval, optlen);
156
157 return -EOPNOTSUPP;
158}
159
160int dccp_getsockopt(struct sock *sk, int level, int optname,
161 char *optval, int *optlen)
162{
163 dccp_pr_debug("entry\n");
164
165 if (level != SOL_DCCP)
166 return ip_getsockopt(sk, level, optname, optval, optlen);
167
168 return -EOPNOTSUPP;
169}
170
171int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
172 size_t len)
173{
174 const struct dccp_sock *dp = dccp_sk(sk);
175 const int flags = msg->msg_flags;
176 const int noblock = flags & MSG_DONTWAIT;
177 struct sk_buff *skb;
178 int rc, size;
179 long timeo;
180
181 if (len > dp->dccps_mss_cache)
182 return -EMSGSIZE;
183
184 lock_sock(sk);
185
186 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
187
188 /*
189 * We have to use sk_stream_wait_connect here to set sk_write_pending,
190 * so that the trick in dccp_rcv_request_sent_state_process.
191 */
192 /* Wait for a connection to finish. */
193 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN | DCCPF_CLOSING))
194 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
195 goto out_err;
196
197 size = sk->sk_prot->max_header + len;
198 release_sock(sk);
199 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
200 lock_sock(sk);
201
202 if (skb == NULL)
203 goto out_release;
204
205 skb_reserve(skb, sk->sk_prot->max_header);
206 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
207 if (rc == 0) {
208 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
209 const struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts;
210 long delay;
211
212 /*
213 * XXX: This is just to match the Waikato tree CA interaction
214 * points, after the CCID3 code is stable and I have a better
215 * understanding of behaviour I'll change this to look more like
216 * TCP.
217 */
218 while (1) {
219 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk,
220 skb, len, &delay);
221 if (rc == 0)
222 break;
223 if (rc != -EAGAIN)
224 goto out_discard;
225 if (delay > timeo)
226 goto out_discard;
227 release_sock(sk);
228 delay = schedule_timeout(delay);
229 lock_sock(sk);
230 timeo -= delay;
231 if (signal_pending(current))
232 goto out_interrupted;
233 rc = -EPIPE;
234 if (!(sk->sk_state == DCCP_PARTOPEN || sk->sk_state == DCCP_OPEN))
235 goto out_discard;
236 }
237
238 if (sk->sk_state == DCCP_PARTOPEN) {
239 /* See 8.1.5. Handshake Completion */
240 inet_csk_schedule_ack(sk);
241 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
242 dcb->dccpd_type = DCCP_PKT_DATAACK;
243 /* FIXME: we really should have a dccps_ack_pending or use icsk */
244 } else if (inet_csk_ack_scheduled(sk) ||
245 (dp->dccps_options.dccpo_send_ack_vector &&
246 ap->dccpap_buf_ackno != DCCP_MAX_SEQNO + 1 &&
247 ap->dccpap_ack_seqno == DCCP_MAX_SEQNO + 1))
248 dcb->dccpd_type = DCCP_PKT_DATAACK;
249 else
250 dcb->dccpd_type = DCCP_PKT_DATA;
251 dccp_transmit_skb(sk, skb);
252 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
253 } else {
254out_discard:
255 kfree_skb(skb);
256 }
257out_release:
258 release_sock(sk);
259 return rc ? : len;
260out_err:
261 rc = sk_stream_error(sk, flags, rc);
262 goto out_release;
263out_interrupted:
264 rc = sock_intr_errno(timeo);
265 goto out_discard;
266}
267
268EXPORT_SYMBOL(dccp_sendmsg);
269
270int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
271 size_t len, int nonblock, int flags, int *addr_len)
272{
273 const struct dccp_hdr *dh;
274 int copied = 0;
275 unsigned long used;
276 int err;
277 int target; /* Read at least this many bytes */
278 long timeo;
279
280 lock_sock(sk);
281
282 err = -ENOTCONN;
283 if (sk->sk_state == DCCP_LISTEN)
284 goto out;
285
286 timeo = sock_rcvtimeo(sk, nonblock);
287
288 /* Urgent data needs to be handled specially. */
289 if (flags & MSG_OOB)
290 goto recv_urg;
291
292 /* FIXME */
293#if 0
294 seq = &tp->copied_seq;
295 if (flags & MSG_PEEK) {
296 peek_seq = tp->copied_seq;
297 seq = &peek_seq;
298 }
299#endif
300
301 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
302
303 do {
304 struct sk_buff *skb;
305 u32 offset;
306
307 /* FIXME */
308#if 0
309 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
310 if (tp->urg_data && tp->urg_seq == *seq) {
311 if (copied)
312 break;
313 if (signal_pending(current)) {
314 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
315 break;
316 }
317 }
318#endif
319
320 /* Next get a buffer. */
321
322 skb = skb_peek(&sk->sk_receive_queue);
323 do {
324 if (!skb)
325 break;
326
327 offset = 0;
328 dh = dccp_hdr(skb);
329
330 if (dh->dccph_type == DCCP_PKT_DATA ||
331 dh->dccph_type == DCCP_PKT_DATAACK)
332 goto found_ok_skb;
333
334 if (dh->dccph_type == DCCP_PKT_RESET ||
335 dh->dccph_type == DCCP_PKT_CLOSE) {
336 dccp_pr_debug("found fin ok!\n");
337 goto found_fin_ok;
338 }
339 dccp_pr_debug("packet_type=%s\n", dccp_packet_name(dh->dccph_type));
340 BUG_TRAP(flags & MSG_PEEK);
341 skb = skb->next;
342 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
343
344 /* Well, if we have backlog, try to process it now yet. */
345 if (copied >= target && !sk->sk_backlog.tail)
346 break;
347
348 if (copied) {
349 if (sk->sk_err ||
350 sk->sk_state == DCCP_CLOSED ||
351 (sk->sk_shutdown & RCV_SHUTDOWN) ||
352 !timeo ||
353 signal_pending(current) ||
354 (flags & MSG_PEEK))
355 break;
356 } else {
357 if (sock_flag(sk, SOCK_DONE))
358 break;
359
360 if (sk->sk_err) {
361 copied = sock_error(sk);
362 break;
363 }
364
365 if (sk->sk_shutdown & RCV_SHUTDOWN)
366 break;
367
368 if (sk->sk_state == DCCP_CLOSED) {
369 if (!sock_flag(sk, SOCK_DONE)) {
370 /* This occurs when user tries to read
371 * from never connected socket.
372 */
373 copied = -ENOTCONN;
374 break;
375 }
376 break;
377 }
378
379 if (!timeo) {
380 copied = -EAGAIN;
381 break;
382 }
383
384 if (signal_pending(current)) {
385 copied = sock_intr_errno(timeo);
386 break;
387 }
388 }
389
390 /* FIXME: cleanup_rbuf(sk, copied); */
391
392 if (copied >= target) {
393 /* Do not sleep, just process backlog. */
394 release_sock(sk);
395 lock_sock(sk);
396 } else
397 sk_wait_data(sk, &timeo);
398
399 continue;
400
401 found_ok_skb:
402 /* Ok so how much can we use? */
403 used = skb->len - offset;
404 if (len < used)
405 used = len;
406
407 if (!(flags & MSG_TRUNC)) {
408 err = skb_copy_datagram_iovec(skb, offset,
409 msg->msg_iov, used);
410 if (err) {
411 /* Exception. Bailout! */
412 if (!copied)
413 copied = -EFAULT;
414 break;
415 }
416 }
417
418 copied += used;
419 len -= used;
420
421 /* FIXME: tcp_rcv_space_adjust(sk); */
422
423//skip_copy:
424 if (used + offset < skb->len)
425 continue;
426
427 if (!(flags & MSG_PEEK))
428 sk_eat_skb(sk, skb);
429 continue;
430 found_fin_ok:
431 if (!(flags & MSG_PEEK))
432 sk_eat_skb(sk, skb);
433 break;
434
435 } while (len > 0);
436
437 /* According to UNIX98, msg_name/msg_namelen are ignored
438 * on connected socket. I was just happy when found this 8) --ANK
439 */
440
441 /* Clean up data we have read: This will do ACK frames. */
442 /* FIXME: cleanup_rbuf(sk, copied); */
443
444 release_sock(sk);
445 return copied;
446
447out:
448 release_sock(sk);
449 return err;
450
451recv_urg:
452 /* FIXME: err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len); */
453 goto out;
454}
455
456static int inet_dccp_listen(struct socket *sock, int backlog)
457{
458 struct sock *sk = sock->sk;
459 unsigned char old_state;
460 int err;
461
462 lock_sock(sk);
463
464 err = -EINVAL;
465 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
466 goto out;
467
468 old_state = sk->sk_state;
469 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
470 goto out;
471
472 /* Really, if the socket is already in listen state
473 * we can only allow the backlog to be adjusted.
474 */
475 if (old_state != DCCP_LISTEN) {
476 /*
477 * FIXME: here it probably should be sk->sk_prot->listen_start
478 * see tcp_listen_start
479 */
480 err = dccp_listen_start(sk);
481 if (err)
482 goto out;
483 }
484 sk->sk_max_ack_backlog = backlog;
485 err = 0;
486
487out:
488 release_sock(sk);
489 return err;
490}
491
492static const unsigned char dccp_new_state[] = {
493 /* current state: new state: action: */
494 [0] = DCCP_CLOSED,
495 [DCCP_OPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
496 [DCCP_REQUESTING] = DCCP_CLOSED,
497 [DCCP_PARTOPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
498 [DCCP_LISTEN] = DCCP_CLOSED,
499 [DCCP_RESPOND] = DCCP_CLOSED,
500 [DCCP_CLOSING] = DCCP_CLOSED,
501 [DCCP_TIME_WAIT] = DCCP_CLOSED,
502 [DCCP_CLOSED] = DCCP_CLOSED,
503};
504
505static int dccp_close_state(struct sock *sk)
506{
507 const int next = dccp_new_state[sk->sk_state];
508 const int ns = next & DCCP_STATE_MASK;
509
510 if (ns != sk->sk_state)
511 dccp_set_state(sk, ns);
512
513 return next & DCCP_ACTION_FIN;
514}
515
516void dccp_close(struct sock *sk, long timeout)
517{
518 struct sk_buff *skb;
519
520 lock_sock(sk);
521
522 sk->sk_shutdown = SHUTDOWN_MASK;
523
524 if (sk->sk_state == DCCP_LISTEN) {
525 dccp_set_state(sk, DCCP_CLOSED);
526
527 /* Special case. */
528 inet_csk_listen_stop(sk);
529
530 goto adjudge_to_death;
531 }
532
533 /*
534 * We need to flush the recv. buffs. We do this only on the
535 * descriptor close, not protocol-sourced closes, because the
536 *reader process may not have drained the data yet!
537 */
538 /* FIXME: check for unread data */
539 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
540 __kfree_skb(skb);
541 }
542
543 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
544 /* Check zero linger _after_ checking for unread data. */
545 sk->sk_prot->disconnect(sk, 0);
546 } else if (dccp_close_state(sk)) {
547 dccp_send_close(sk);
548 }
549
550 sk_stream_wait_close(sk, timeout);
551
552adjudge_to_death:
553 release_sock(sk);
554 /*
555 * Now socket is owned by kernel and we acquire BH lock
556 * to finish close. No need to check for user refs.
557 */
558 local_bh_disable();
559 bh_lock_sock(sk);
560 BUG_TRAP(!sock_owned_by_user(sk));
561
562 sock_hold(sk);
563 sock_orphan(sk);
564
565 if (sk->sk_state != DCCP_CLOSED)
566 dccp_set_state(sk, DCCP_CLOSED);
567
568 atomic_inc(&dccp_orphan_count);
569 if (sk->sk_state == DCCP_CLOSED)
570 inet_csk_destroy_sock(sk);
571
572 /* Otherwise, socket is reprieved until protocol close. */
573
574 bh_unlock_sock(sk);
575 local_bh_enable();
576 sock_put(sk);
577}
578
579void dccp_shutdown(struct sock *sk, int how)
580{
581 dccp_pr_debug("entry\n");
582}
583
584struct proto_ops inet_dccp_ops = {
585 .family = PF_INET,
586 .owner = THIS_MODULE,
587 .release = inet_release,
588 .bind = inet_bind,
589 .connect = inet_stream_connect,
590 .socketpair = sock_no_socketpair,
591 .accept = inet_accept,
592 .getname = inet_getname,
593 .poll = sock_no_poll,
594 .ioctl = inet_ioctl,
595 .listen = inet_dccp_listen, /* FIXME: work on inet_listen to rename it to sock_common_listen */
596 .shutdown = inet_shutdown,
597 .setsockopt = sock_common_setsockopt,
598 .getsockopt = sock_common_getsockopt,
599 .sendmsg = inet_sendmsg,
600 .recvmsg = sock_common_recvmsg,
601 .mmap = sock_no_mmap,
602 .sendpage = sock_no_sendpage,
603};
604
605extern struct net_proto_family inet_family_ops;
606
607static struct inet_protosw dccp_v4_protosw = {
608 .type = SOCK_DCCP,
609 .protocol = IPPROTO_DCCP,
610 .prot = &dccp_v4_prot,
611 .ops = &inet_dccp_ops,
612 .capability = -1,
613 .no_check = 0,
614 .flags = 0,
615};
616
617/*
618 * This is the global socket data structure used for responding to
619 * the Out-of-the-blue (OOTB) packets. A control sock will be created
620 * for this socket at the initialization time.
621 */
622struct socket *dccp_ctl_socket;
623
624static char dccp_ctl_socket_err_msg[] __initdata =
625 KERN_ERR "DCCP: Failed to create the control socket.\n";
626
627static int __init dccp_ctl_sock_init(void)
628{
629 int rc = sock_create_kern(PF_INET, SOCK_DCCP, IPPROTO_DCCP,
630 &dccp_ctl_socket);
631 if (rc < 0)
632 printk(dccp_ctl_socket_err_msg);
633 else {
634 dccp_ctl_socket->sk->sk_allocation = GFP_ATOMIC;
635 inet_sk(dccp_ctl_socket->sk)->uc_ttl = -1;
636
637 /* Unhash it so that IP input processing does not even
638 * see it, we do not wish this socket to see incoming
639 * packets.
640 */
641 dccp_ctl_socket->sk->sk_prot->unhash(dccp_ctl_socket->sk);
642 }
643
644 return rc;
645}
646
647static void __exit dccp_ctl_sock_exit(void)
648{
649 if (dccp_ctl_socket != NULL)
650 sock_release(dccp_ctl_socket);
651}
652
653static int __init init_dccp_v4_mibs(void)
654{
655 int rc = -ENOMEM;
656
657 dccp_statistics[0] = alloc_percpu(struct dccp_mib);
658 if (dccp_statistics[0] == NULL)
659 goto out;
660
661 dccp_statistics[1] = alloc_percpu(struct dccp_mib);
662 if (dccp_statistics[1] == NULL)
663 goto out_free_one;
664
665 rc = 0;
666out:
667 return rc;
668out_free_one:
669 free_percpu(dccp_statistics[0]);
670 dccp_statistics[0] = NULL;
671 goto out;
672
673}
674
675static int thash_entries;
676module_param(thash_entries, int, 0444);
677MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
678
679int dccp_debug;
680module_param(dccp_debug, int, 0444);
681MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
682
683static int __init dccp_init(void)
684{
685 unsigned long goal;
686 int ehash_order, bhash_order, i;
687 int rc = proto_register(&dccp_v4_prot, 1);
688
689 if (rc)
690 goto out;
691
692 dccp_hashinfo.bind_bucket_cachep = kmem_cache_create("dccp_bind_bucket",
693 sizeof(struct inet_bind_bucket),
694 0, SLAB_HWCACHE_ALIGN,
695 NULL, NULL);
696 if (!dccp_hashinfo.bind_bucket_cachep)
697 goto out_proto_unregister;
698
699 /*
700 * Size and allocate the main established and bind bucket
701 * hash tables.
702 *
703 * The methodology is similar to that of the buffer cache.
704 */
705 if (num_physpages >= (128 * 1024))
706 goal = num_physpages >> (21 - PAGE_SHIFT);
707 else
708 goal = num_physpages >> (23 - PAGE_SHIFT);
709
710 if (thash_entries)
711 goal = (thash_entries * sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
712 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
713 ;
714 do {
715 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
716 sizeof(struct inet_ehash_bucket);
717 dccp_hashinfo.ehash_size >>= 1;
718 while (dccp_hashinfo.ehash_size & (dccp_hashinfo.ehash_size - 1))
719 dccp_hashinfo.ehash_size--;
720 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
721 __get_free_pages(GFP_ATOMIC, ehash_order);
722 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
723
724 if (!dccp_hashinfo.ehash) {
725 printk(KERN_CRIT "Failed to allocate DCCP "
726 "established hash table\n");
727 goto out_free_bind_bucket_cachep;
728 }
729
730 for (i = 0; i < (dccp_hashinfo.ehash_size << 1); i++) {
731 rwlock_init(&dccp_hashinfo.ehash[i].lock);
732 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
733 }
734
735 bhash_order = ehash_order;
736
737 do {
738 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
739 sizeof(struct inet_bind_hashbucket);
740 if ((dccp_hashinfo.bhash_size > (64 * 1024)) && bhash_order > 0)
741 continue;
742 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
743 __get_free_pages(GFP_ATOMIC, bhash_order);
744 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
745
746 if (!dccp_hashinfo.bhash) {
747 printk(KERN_CRIT "Failed to allocate DCCP bind hash table\n");
748 goto out_free_dccp_ehash;
749 }
750
751 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
752 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
753 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
754 }
755
756 if (init_dccp_v4_mibs())
757 goto out_free_dccp_bhash;
758
759 rc = -EAGAIN;
760 if (inet_add_protocol(&dccp_protocol, IPPROTO_DCCP))
761 goto out_free_dccp_v4_mibs;
762
763 inet_register_protosw(&dccp_v4_protosw);
764
765 rc = dccp_ctl_sock_init();
766 if (rc)
767 goto out_unregister_protosw;
768out:
769 return rc;
770out_unregister_protosw:
771 inet_unregister_protosw(&dccp_v4_protosw);
772 inet_del_protocol(&dccp_protocol, IPPROTO_DCCP);
773out_free_dccp_v4_mibs:
774 free_percpu(dccp_statistics[0]);
775 free_percpu(dccp_statistics[1]);
776 dccp_statistics[0] = dccp_statistics[1] = NULL;
777out_free_dccp_bhash:
778 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
779 dccp_hashinfo.bhash = NULL;
780out_free_dccp_ehash:
781 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
782 dccp_hashinfo.ehash = NULL;
783out_free_bind_bucket_cachep:
784 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
785 dccp_hashinfo.bind_bucket_cachep = NULL;
786out_proto_unregister:
787 proto_unregister(&dccp_v4_prot);
788 goto out;
789}
790
791static const char dccp_del_proto_err_msg[] __exitdata =
792 KERN_ERR "can't remove dccp net_protocol\n";
793
794static void __exit dccp_fini(void)
795{
796 dccp_ctl_sock_exit();
797
798 inet_unregister_protosw(&dccp_v4_protosw);
799
800 if (inet_del_protocol(&dccp_protocol, IPPROTO_DCCP) < 0)
801 printk(dccp_del_proto_err_msg);
802
803 /* Free the control endpoint. */
804 sock_release(dccp_ctl_socket);
805
806 proto_unregister(&dccp_v4_prot);
807
808 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
809}
810
811module_init(dccp_init);
812module_exit(dccp_fini);
813
814/* __stringify doesn't likes enums, so use SOCK_DCCP (6) value directly */
815MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-6");
816MODULE_LICENSE("GPL");
817MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
818MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");