aboutsummaryrefslogtreecommitdiffstats
path: root/net/rxrpc/af_rxrpc.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2007-04-26 18:48:28 -0400
committerDavid S. Miller <davem@davemloft.net>2007-04-26 18:48:28 -0400
commit17926a79320afa9b95df6b977b40cca6d8713cea (patch)
tree5cedff43b69520ad17b86783d3752053686ec99c /net/rxrpc/af_rxrpc.c
parente19dff1fdd99a25819af74cf0710e147fff4fd3a (diff)
[AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both
Provide AF_RXRPC sockets that can be used to talk to AFS servers, or serve answers to AFS clients. KerberosIV security is fully supported. The patches and some example test programs can be found in: http://people.redhat.com/~dhowells/rxrpc/ This will eventually replace the old implementation of kernel-only RxRPC currently resident in net/rxrpc/. Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rxrpc/af_rxrpc.c')
-rw-r--r--net/rxrpc/af_rxrpc.c754
1 files changed, 754 insertions, 0 deletions
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
new file mode 100644
index 00000000000..bfa8822e228
--- /dev/null
+++ b/net/rxrpc/af_rxrpc.c
@@ -0,0 +1,754 @@
1/* AF_RXRPC implementation
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/poll.h>
16#include <linux/proc_fs.h>
17#include <net/sock.h>
18#include <net/af_rxrpc.h>
19#include "ar-internal.h"
20
21MODULE_DESCRIPTION("RxRPC network protocol");
22MODULE_AUTHOR("Red Hat, Inc.");
23MODULE_LICENSE("GPL");
24MODULE_ALIAS_NETPROTO(PF_RXRPC);
25
26unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
27module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
28MODULE_PARM_DESC(rxrpc_debug, "RxRPC debugging mask");
29
30static int sysctl_rxrpc_max_qlen __read_mostly = 10;
31
32static struct proto rxrpc_proto;
33static const struct proto_ops rxrpc_rpc_ops;
34
35/* local epoch for detecting local-end reset */
36__be32 rxrpc_epoch;
37
38/* current debugging ID */
39atomic_t rxrpc_debug_id;
40
41/* count of skbs currently in use */
42atomic_t rxrpc_n_skbs;
43
44static void rxrpc_sock_destructor(struct sock *);
45
46/*
47 * see if an RxRPC socket is currently writable
48 */
49static inline int rxrpc_writable(struct sock *sk)
50{
51 return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
52}
53
54/*
55 * wait for write bufferage to become available
56 */
57static void rxrpc_write_space(struct sock *sk)
58{
59 _enter("%p", sk);
60 read_lock(&sk->sk_callback_lock);
61 if (rxrpc_writable(sk)) {
62 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
63 wake_up_interruptible(sk->sk_sleep);
64 sk_wake_async(sk, 2, POLL_OUT);
65 }
66 read_unlock(&sk->sk_callback_lock);
67}
68
69/*
70 * validate an RxRPC address
71 */
72static int rxrpc_validate_address(struct rxrpc_sock *rx,
73 struct sockaddr_rxrpc *srx,
74 int len)
75{
76 if (len < sizeof(struct sockaddr_rxrpc))
77 return -EINVAL;
78
79 if (srx->srx_family != AF_RXRPC)
80 return -EAFNOSUPPORT;
81
82 if (srx->transport_type != SOCK_DGRAM)
83 return -ESOCKTNOSUPPORT;
84
85 len -= offsetof(struct sockaddr_rxrpc, transport);
86 if (srx->transport_len < sizeof(sa_family_t) ||
87 srx->transport_len > len)
88 return -EINVAL;
89
90 if (srx->transport.family != rx->proto)
91 return -EAFNOSUPPORT;
92
93 switch (srx->transport.family) {
94 case AF_INET:
95 _debug("INET: %x @ %u.%u.%u.%u",
96 ntohs(srx->transport.sin.sin_port),
97 NIPQUAD(srx->transport.sin.sin_addr));
98 if (srx->transport_len > 8)
99 memset((void *)&srx->transport + 8, 0,
100 srx->transport_len - 8);
101 break;
102
103 case AF_INET6:
104 default:
105 return -EAFNOSUPPORT;
106 }
107
108 return 0;
109}
110
111/*
112 * bind a local address to an RxRPC socket
113 */
114static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
115{
116 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) saddr;
117 struct sock *sk = sock->sk;
118 struct rxrpc_local *local;
119 struct rxrpc_sock *rx = rxrpc_sk(sk), *prx;
120 __be16 service_id;
121 int ret;
122
123 _enter("%p,%p,%d", rx, saddr, len);
124
125 ret = rxrpc_validate_address(rx, srx, len);
126 if (ret < 0)
127 goto error;
128
129 lock_sock(&rx->sk);
130
131 if (rx->sk.sk_state != RXRPC_UNCONNECTED) {
132 ret = -EINVAL;
133 goto error_unlock;
134 }
135
136 memcpy(&rx->srx, srx, sizeof(rx->srx));
137
138 /* find a local transport endpoint if we don't have one already */
139 local = rxrpc_lookup_local(&rx->srx);
140 if (IS_ERR(local)) {
141 ret = PTR_ERR(local);
142 goto error_unlock;
143 }
144
145 rx->local = local;
146 if (srx->srx_service) {
147 service_id = htons(srx->srx_service);
148 write_lock_bh(&local->services_lock);
149 list_for_each_entry(prx, &local->services, listen_link) {
150 if (prx->service_id == service_id)
151 goto service_in_use;
152 }
153
154 rx->service_id = service_id;
155 list_add_tail(&rx->listen_link, &local->services);
156 write_unlock_bh(&local->services_lock);
157
158 rx->sk.sk_state = RXRPC_SERVER_BOUND;
159 } else {
160 rx->sk.sk_state = RXRPC_CLIENT_BOUND;
161 }
162
163 release_sock(&rx->sk);
164 _leave(" = 0");
165 return 0;
166
167service_in_use:
168 ret = -EADDRINUSE;
169 write_unlock_bh(&local->services_lock);
170error_unlock:
171 release_sock(&rx->sk);
172error:
173 _leave(" = %d", ret);
174 return ret;
175}
176
177/*
178 * set the number of pending calls permitted on a listening socket
179 */
180static int rxrpc_listen(struct socket *sock, int backlog)
181{
182 struct sock *sk = sock->sk;
183 struct rxrpc_sock *rx = rxrpc_sk(sk);
184 int ret;
185
186 _enter("%p,%d", rx, backlog);
187
188 lock_sock(&rx->sk);
189
190 switch (rx->sk.sk_state) {
191 case RXRPC_UNCONNECTED:
192 ret = -EADDRNOTAVAIL;
193 break;
194 case RXRPC_CLIENT_BOUND:
195 case RXRPC_CLIENT_CONNECTED:
196 default:
197 ret = -EBUSY;
198 break;
199 case RXRPC_SERVER_BOUND:
200 ASSERT(rx->local != NULL);
201 sk->sk_max_ack_backlog = backlog;
202 rx->sk.sk_state = RXRPC_SERVER_LISTENING;
203 ret = 0;
204 break;
205 }
206
207 release_sock(&rx->sk);
208 _leave(" = %d", ret);
209 return ret;
210}
211
212/*
213 * find a transport by address
214 */
215static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock,
216 struct sockaddr *addr,
217 int addr_len, int flags)
218{
219 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
220 struct rxrpc_transport *trans;
221 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
222 struct rxrpc_peer *peer;
223
224 _enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
225
226 ASSERT(rx->local != NULL);
227 ASSERT(rx->sk.sk_state > RXRPC_UNCONNECTED);
228
229 if (rx->srx.transport_type != srx->transport_type)
230 return ERR_PTR(-ESOCKTNOSUPPORT);
231 if (rx->srx.transport.family != srx->transport.family)
232 return ERR_PTR(-EAFNOSUPPORT);
233
234 /* find a remote transport endpoint from the local one */
235 peer = rxrpc_get_peer(srx, GFP_KERNEL);
236 if (IS_ERR(peer))
237 return ERR_PTR(PTR_ERR(peer));
238
239 /* find a transport */
240 trans = rxrpc_get_transport(rx->local, peer, GFP_KERNEL);
241 rxrpc_put_peer(peer);
242 _leave(" = %p", trans);
243 return trans;
244}
245
246/*
247 * connect an RxRPC socket
248 * - this just targets it at a specific destination; no actual connection
249 * negotiation takes place
250 */
251static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
252 int addr_len, int flags)
253{
254 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
255 struct sock *sk = sock->sk;
256 struct rxrpc_transport *trans;
257 struct rxrpc_local *local;
258 struct rxrpc_sock *rx = rxrpc_sk(sk);
259 int ret;
260
261 _enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
262
263 ret = rxrpc_validate_address(rx, srx, addr_len);
264 if (ret < 0) {
265 _leave(" = %d [bad addr]", ret);
266 return ret;
267 }
268
269 lock_sock(&rx->sk);
270
271 switch (rx->sk.sk_state) {
272 case RXRPC_UNCONNECTED:
273 /* find a local transport endpoint if we don't have one already */
274 ASSERTCMP(rx->local, ==, NULL);
275 rx->srx.srx_family = AF_RXRPC;
276 rx->srx.srx_service = 0;
277 rx->srx.transport_type = srx->transport_type;
278 rx->srx.transport_len = sizeof(sa_family_t);
279 rx->srx.transport.family = srx->transport.family;
280 local = rxrpc_lookup_local(&rx->srx);
281 if (IS_ERR(local)) {
282 release_sock(&rx->sk);
283 return PTR_ERR(local);
284 }
285 rx->local = local;
286 rx->sk.sk_state = RXRPC_CLIENT_BOUND;
287 case RXRPC_CLIENT_BOUND:
288 break;
289 case RXRPC_CLIENT_CONNECTED:
290 release_sock(&rx->sk);
291 return -EISCONN;
292 default:
293 release_sock(&rx->sk);
294 return -EBUSY; /* server sockets can't connect as well */
295 }
296
297 trans = rxrpc_name_to_transport(sock, addr, addr_len, flags);
298 if (IS_ERR(trans)) {
299 release_sock(&rx->sk);
300 _leave(" = %ld", PTR_ERR(trans));
301 return PTR_ERR(trans);
302 }
303
304 rx->trans = trans;
305 rx->service_id = htons(srx->srx_service);
306 rx->sk.sk_state = RXRPC_CLIENT_CONNECTED;
307
308 release_sock(&rx->sk);
309 return 0;
310}
311
312/*
313 * send a message through an RxRPC socket
314 * - in a client this does a number of things:
315 * - finds/sets up a connection for the security specified (if any)
316 * - initiates a call (ID in control data)
317 * - ends the request phase of a call (if MSG_MORE is not set)
318 * - sends a call data packet
319 * - may send an abort (abort code in control data)
320 */
321static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock,
322 struct msghdr *m, size_t len)
323{
324 struct rxrpc_transport *trans;
325 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
326 int ret;
327
328 _enter(",{%d},,%zu", rx->sk.sk_state, len);
329
330 if (m->msg_flags & MSG_OOB)
331 return -EOPNOTSUPP;
332
333 if (m->msg_name) {
334 ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen);
335 if (ret < 0) {
336 _leave(" = %d [bad addr]", ret);
337 return ret;
338 }
339 }
340
341 trans = NULL;
342 lock_sock(&rx->sk);
343
344 if (m->msg_name) {
345 ret = -EISCONN;
346 trans = rxrpc_name_to_transport(sock, m->msg_name,
347 m->msg_namelen, 0);
348 if (IS_ERR(trans)) {
349 ret = PTR_ERR(trans);
350 trans = NULL;
351 goto out;
352 }
353 } else {
354 trans = rx->trans;
355 if (trans)
356 atomic_inc(&trans->usage);
357 }
358
359 switch (rx->sk.sk_state) {
360 case RXRPC_SERVER_LISTENING:
361 if (!m->msg_name) {
362 ret = rxrpc_server_sendmsg(iocb, rx, m, len);
363 break;
364 }
365 case RXRPC_SERVER_BOUND:
366 case RXRPC_CLIENT_BOUND:
367 if (!m->msg_name) {
368 ret = -ENOTCONN;
369 break;
370 }
371 case RXRPC_CLIENT_CONNECTED:
372 ret = rxrpc_client_sendmsg(iocb, rx, trans, m, len);
373 break;
374 default:
375 ret = -ENOTCONN;
376 break;
377 }
378
379out:
380 release_sock(&rx->sk);
381 if (trans)
382 rxrpc_put_transport(trans);
383 _leave(" = %d", ret);
384 return ret;
385}
386
387/*
388 * set RxRPC socket options
389 */
390static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
391 char __user *optval, int optlen)
392{
393 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
394 unsigned min_sec_level;
395 int ret;
396
397 _enter(",%d,%d,,%d", level, optname, optlen);
398
399 lock_sock(&rx->sk);
400 ret = -EOPNOTSUPP;
401
402 if (level == SOL_RXRPC) {
403 switch (optname) {
404 case RXRPC_EXCLUSIVE_CONNECTION:
405 ret = -EINVAL;
406 if (optlen != 0)
407 goto error;
408 ret = -EISCONN;
409 if (rx->sk.sk_state != RXRPC_UNCONNECTED)
410 goto error;
411 set_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags);
412 goto success;
413
414 case RXRPC_SECURITY_KEY:
415 ret = -EINVAL;
416 if (rx->key)
417 goto error;
418 ret = -EISCONN;
419 if (rx->sk.sk_state != RXRPC_UNCONNECTED)
420 goto error;
421 ret = rxrpc_request_key(rx, optval, optlen);
422 goto error;
423
424 case RXRPC_SECURITY_KEYRING:
425 ret = -EINVAL;
426 if (rx->key)
427 goto error;
428 ret = -EISCONN;
429 if (rx->sk.sk_state != RXRPC_UNCONNECTED)
430 goto error;
431 ret = rxrpc_server_keyring(rx, optval, optlen);
432 goto error;
433
434 case RXRPC_MIN_SECURITY_LEVEL:
435 ret = -EINVAL;
436 if (optlen != sizeof(unsigned))
437 goto error;
438 ret = -EISCONN;
439 if (rx->sk.sk_state != RXRPC_UNCONNECTED)
440 goto error;
441 ret = get_user(min_sec_level,
442 (unsigned __user *) optval);
443 if (ret < 0)
444 goto error;
445 ret = -EINVAL;
446 if (min_sec_level > RXRPC_SECURITY_MAX)
447 goto error;
448 rx->min_sec_level = min_sec_level;
449 goto success;
450
451 default:
452 break;
453 }
454 }
455
456success:
457 ret = 0;
458error:
459 release_sock(&rx->sk);
460 return ret;
461}
462
463/*
464 * permit an RxRPC socket to be polled
465 */
466static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
467 poll_table *wait)
468{
469 unsigned int mask;
470 struct sock *sk = sock->sk;
471
472 poll_wait(file, sk->sk_sleep, wait);
473 mask = 0;
474
475 /* the socket is readable if there are any messages waiting on the Rx
476 * queue */
477 if (!skb_queue_empty(&sk->sk_receive_queue))
478 mask |= POLLIN | POLLRDNORM;
479
480 /* the socket is writable if there is space to add new data to the
481 * socket; there is no guarantee that any particular call in progress
482 * on the socket may have space in the Tx ACK window */
483 if (rxrpc_writable(sk))
484 mask |= POLLOUT | POLLWRNORM;
485
486 return mask;
487}
488
489/*
490 * create an RxRPC socket
491 */
492static int rxrpc_create(struct socket *sock, int protocol)
493{
494 struct rxrpc_sock *rx;
495 struct sock *sk;
496
497 _enter("%p,%d", sock, protocol);
498
499 /* we support transport protocol UDP only */
500 if (protocol != PF_INET)
501 return -EPROTONOSUPPORT;
502
503 if (sock->type != SOCK_DGRAM)
504 return -ESOCKTNOSUPPORT;
505
506 sock->ops = &rxrpc_rpc_ops;
507 sock->state = SS_UNCONNECTED;
508
509 sk = sk_alloc(PF_RXRPC, GFP_KERNEL, &rxrpc_proto, 1);
510 if (!sk)
511 return -ENOMEM;
512
513 sock_init_data(sock, sk);
514 sk->sk_state = RXRPC_UNCONNECTED;
515 sk->sk_write_space = rxrpc_write_space;
516 sk->sk_max_ack_backlog = sysctl_rxrpc_max_qlen;
517 sk->sk_destruct = rxrpc_sock_destructor;
518
519 rx = rxrpc_sk(sk);
520 rx->proto = protocol;
521 rx->calls = RB_ROOT;
522
523 INIT_LIST_HEAD(&rx->listen_link);
524 INIT_LIST_HEAD(&rx->secureq);
525 INIT_LIST_HEAD(&rx->acceptq);
526 rwlock_init(&rx->call_lock);
527 memset(&rx->srx, 0, sizeof(rx->srx));
528
529 _leave(" = 0 [%p]", rx);
530 return 0;
531}
532
533/*
534 * RxRPC socket destructor
535 */
536static void rxrpc_sock_destructor(struct sock *sk)
537{
538 _enter("%p", sk);
539
540 rxrpc_purge_queue(&sk->sk_receive_queue);
541
542 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
543 BUG_TRAP(sk_unhashed(sk));
544 BUG_TRAP(!sk->sk_socket);
545
546 if (!sock_flag(sk, SOCK_DEAD)) {
547 printk("Attempt to release alive rxrpc socket: %p\n", sk);
548 return;
549 }
550}
551
552/*
553 * release an RxRPC socket
554 */
555static int rxrpc_release_sock(struct sock *sk)
556{
557 struct rxrpc_sock *rx = rxrpc_sk(sk);
558
559 _enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
560
561 /* declare the socket closed for business */
562 sock_orphan(sk);
563 sk->sk_shutdown = SHUTDOWN_MASK;
564
565 spin_lock_bh(&sk->sk_receive_queue.lock);
566 sk->sk_state = RXRPC_CLOSE;
567 spin_unlock_bh(&sk->sk_receive_queue.lock);
568
569 ASSERTCMP(rx->listen_link.next, !=, LIST_POISON1);
570
571 if (!list_empty(&rx->listen_link)) {
572 write_lock_bh(&rx->local->services_lock);
573 list_del(&rx->listen_link);
574 write_unlock_bh(&rx->local->services_lock);
575 }
576
577 /* try to flush out this socket */
578 rxrpc_release_calls_on_socket(rx);
579 flush_scheduled_work();
580 rxrpc_purge_queue(&sk->sk_receive_queue);
581
582 if (rx->conn) {
583 rxrpc_put_connection(rx->conn);
584 rx->conn = NULL;
585 }
586
587 if (rx->bundle) {
588 rxrpc_put_bundle(rx->trans, rx->bundle);
589 rx->bundle = NULL;
590 }
591 if (rx->trans) {
592 rxrpc_put_transport(rx->trans);
593 rx->trans = NULL;
594 }
595 if (rx->local) {
596 rxrpc_put_local(rx->local);
597 rx->local = NULL;
598 }
599
600 key_put(rx->key);
601 rx->key = NULL;
602 key_put(rx->securities);
603 rx->securities = NULL;
604 sock_put(sk);
605
606 _leave(" = 0");
607 return 0;
608}
609
610/*
611 * release an RxRPC BSD socket on close() or equivalent
612 */
613static int rxrpc_release(struct socket *sock)
614{
615 struct sock *sk = sock->sk;
616
617 _enter("%p{%p}", sock, sk);
618
619 if (!sk)
620 return 0;
621
622 sock->sk = NULL;
623
624 return rxrpc_release_sock(sk);
625}
626
627/*
628 * RxRPC network protocol
629 */
630static const struct proto_ops rxrpc_rpc_ops = {
631 .family = PF_UNIX,
632 .owner = THIS_MODULE,
633 .release = rxrpc_release,
634 .bind = rxrpc_bind,
635 .connect = rxrpc_connect,
636 .socketpair = sock_no_socketpair,
637 .accept = sock_no_accept,
638 .getname = sock_no_getname,
639 .poll = rxrpc_poll,
640 .ioctl = sock_no_ioctl,
641 .listen = rxrpc_listen,
642 .shutdown = sock_no_shutdown,
643 .setsockopt = rxrpc_setsockopt,
644 .getsockopt = sock_no_getsockopt,
645 .sendmsg = rxrpc_sendmsg,
646 .recvmsg = rxrpc_recvmsg,
647 .mmap = sock_no_mmap,
648 .sendpage = sock_no_sendpage,
649};
650
651static struct proto rxrpc_proto = {
652 .name = "RXRPC",
653 .owner = THIS_MODULE,
654 .obj_size = sizeof(struct rxrpc_sock),
655 .max_header = sizeof(struct rxrpc_header),
656};
657
658static struct net_proto_family rxrpc_family_ops = {
659 .family = PF_RXRPC,
660 .create = rxrpc_create,
661 .owner = THIS_MODULE,
662};
663
664/*
665 * initialise and register the RxRPC protocol
666 */
667static int __init af_rxrpc_init(void)
668{
669 struct sk_buff *dummy_skb;
670 int ret = -1;
671
672 BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof(dummy_skb->cb));
673
674 rxrpc_epoch = htonl(xtime.tv_sec);
675
676 rxrpc_call_jar = kmem_cache_create(
677 "rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
678 SLAB_HWCACHE_ALIGN, NULL, NULL);
679 if (!rxrpc_call_jar) {
680 printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n");
681 ret = -ENOMEM;
682 goto error_call_jar;
683 }
684
685 ret = proto_register(&rxrpc_proto, 1);
686 if (ret < 0) {
687 printk(KERN_CRIT "RxRPC: Cannot register protocol\n");
688 goto error_proto;
689 }
690
691 ret = sock_register(&rxrpc_family_ops);
692 if (ret < 0) {
693 printk(KERN_CRIT "RxRPC: Cannot register socket family\n");
694 goto error_sock;
695 }
696
697 ret = register_key_type(&key_type_rxrpc);
698 if (ret < 0) {
699 printk(KERN_CRIT "RxRPC: Cannot register client key type\n");
700 goto error_key_type;
701 }
702
703 ret = register_key_type(&key_type_rxrpc_s);
704 if (ret < 0) {
705 printk(KERN_CRIT "RxRPC: Cannot register server key type\n");
706 goto error_key_type_s;
707 }
708
709#ifdef CONFIG_PROC_FS
710 proc_net_fops_create("rxrpc_calls", 0, &rxrpc_call_seq_fops);
711 proc_net_fops_create("rxrpc_conns", 0, &rxrpc_connection_seq_fops);
712#endif
713 return 0;
714
715error_key_type_s:
716 unregister_key_type(&key_type_rxrpc);
717error_key_type:
718 sock_unregister(PF_RXRPC);
719error_sock:
720 proto_unregister(&rxrpc_proto);
721error_proto:
722 kmem_cache_destroy(rxrpc_call_jar);
723error_call_jar:
724 return ret;
725}
726
727/*
728 * unregister the RxRPC protocol
729 */
730static void __exit af_rxrpc_exit(void)
731{
732 _enter("");
733 unregister_key_type(&key_type_rxrpc_s);
734 unregister_key_type(&key_type_rxrpc);
735 sock_unregister(PF_RXRPC);
736 proto_unregister(&rxrpc_proto);
737 rxrpc_destroy_all_calls();
738 rxrpc_destroy_all_connections();
739 rxrpc_destroy_all_transports();
740 rxrpc_destroy_all_peers();
741 rxrpc_destroy_all_locals();
742
743 ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0);
744
745 _debug("flush scheduled work");
746 flush_scheduled_work();
747 proc_net_remove("rxrpc_conns");
748 proc_net_remove("rxrpc_calls");
749 kmem_cache_destroy(rxrpc_call_jar);
750 _leave("");
751}
752
753module_init(af_rxrpc_init);
754module_exit(af_rxrpc_exit);