diff options
Diffstat (limited to 'net/rxrpc/af_rxrpc.c')
-rw-r--r-- | net/rxrpc/af_rxrpc.c | 879 |
1 files changed, 879 insertions, 0 deletions
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c new file mode 100644 index 000000000000..2c57df9c131b --- /dev/null +++ b/net/rxrpc/af_rxrpc.c | |||
@@ -0,0 +1,879 @@ | |||
1 | /* AF_RXRPC implementation | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/net.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/poll.h> | ||
16 | #include <linux/proc_fs.h> | ||
17 | #include <net/sock.h> | ||
18 | #include <net/af_rxrpc.h> | ||
19 | #include "ar-internal.h" | ||
20 | |||
21 | MODULE_DESCRIPTION("RxRPC network protocol"); | ||
22 | MODULE_AUTHOR("Red Hat, Inc."); | ||
23 | MODULE_LICENSE("GPL"); | ||
24 | MODULE_ALIAS_NETPROTO(PF_RXRPC); | ||
25 | |||
26 | unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO; | ||
27 | module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); | ||
28 | MODULE_PARM_DESC(rxrpc_debug, "RxRPC debugging mask"); | ||
29 | |||
30 | static int sysctl_rxrpc_max_qlen __read_mostly = 10; | ||
31 | |||
32 | static struct proto rxrpc_proto; | ||
33 | static const struct proto_ops rxrpc_rpc_ops; | ||
34 | |||
35 | /* local epoch for detecting local-end reset */ | ||
36 | __be32 rxrpc_epoch; | ||
37 | |||
38 | /* current debugging ID */ | ||
39 | atomic_t rxrpc_debug_id; | ||
40 | |||
41 | /* count of skbs currently in use */ | ||
42 | atomic_t rxrpc_n_skbs; | ||
43 | |||
44 | struct workqueue_struct *rxrpc_workqueue; | ||
45 | |||
46 | static void rxrpc_sock_destructor(struct sock *); | ||
47 | |||
48 | /* | ||
49 | * see if an RxRPC socket is currently writable | ||
50 | */ | ||
51 | static inline int rxrpc_writable(struct sock *sk) | ||
52 | { | ||
53 | return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf; | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * wait for write bufferage to become available | ||
58 | */ | ||
59 | static void rxrpc_write_space(struct sock *sk) | ||
60 | { | ||
61 | _enter("%p", sk); | ||
62 | read_lock(&sk->sk_callback_lock); | ||
63 | if (rxrpc_writable(sk)) { | ||
64 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | ||
65 | wake_up_interruptible(sk->sk_sleep); | ||
66 | sk_wake_async(sk, 2, POLL_OUT); | ||
67 | } | ||
68 | read_unlock(&sk->sk_callback_lock); | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * validate an RxRPC address | ||
73 | */ | ||
74 | static int rxrpc_validate_address(struct rxrpc_sock *rx, | ||
75 | struct sockaddr_rxrpc *srx, | ||
76 | int len) | ||
77 | { | ||
78 | if (len < sizeof(struct sockaddr_rxrpc)) | ||
79 | return -EINVAL; | ||
80 | |||
81 | if (srx->srx_family != AF_RXRPC) | ||
82 | return -EAFNOSUPPORT; | ||
83 | |||
84 | if (srx->transport_type != SOCK_DGRAM) | ||
85 | return -ESOCKTNOSUPPORT; | ||
86 | |||
87 | len -= offsetof(struct sockaddr_rxrpc, transport); | ||
88 | if (srx->transport_len < sizeof(sa_family_t) || | ||
89 | srx->transport_len > len) | ||
90 | return -EINVAL; | ||
91 | |||
92 | if (srx->transport.family != rx->proto) | ||
93 | return -EAFNOSUPPORT; | ||
94 | |||
95 | switch (srx->transport.family) { | ||
96 | case AF_INET: | ||
97 | _debug("INET: %x @ %u.%u.%u.%u", | ||
98 | ntohs(srx->transport.sin.sin_port), | ||
99 | NIPQUAD(srx->transport.sin.sin_addr)); | ||
100 | if (srx->transport_len > 8) | ||
101 | memset((void *)&srx->transport + 8, 0, | ||
102 | srx->transport_len - 8); | ||
103 | break; | ||
104 | |||
105 | case AF_INET6: | ||
106 | default: | ||
107 | return -EAFNOSUPPORT; | ||
108 | } | ||
109 | |||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * bind a local address to an RxRPC socket | ||
115 | */ | ||
116 | static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) | ||
117 | { | ||
118 | struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) saddr; | ||
119 | struct sock *sk = sock->sk; | ||
120 | struct rxrpc_local *local; | ||
121 | struct rxrpc_sock *rx = rxrpc_sk(sk), *prx; | ||
122 | __be16 service_id; | ||
123 | int ret; | ||
124 | |||
125 | _enter("%p,%p,%d", rx, saddr, len); | ||
126 | |||
127 | ret = rxrpc_validate_address(rx, srx, len); | ||
128 | if (ret < 0) | ||
129 | goto error; | ||
130 | |||
131 | lock_sock(&rx->sk); | ||
132 | |||
133 | if (rx->sk.sk_state != RXRPC_UNCONNECTED) { | ||
134 | ret = -EINVAL; | ||
135 | goto error_unlock; | ||
136 | } | ||
137 | |||
138 | memcpy(&rx->srx, srx, sizeof(rx->srx)); | ||
139 | |||
140 | /* find a local transport endpoint if we don't have one already */ | ||
141 | local = rxrpc_lookup_local(&rx->srx); | ||
142 | if (IS_ERR(local)) { | ||
143 | ret = PTR_ERR(local); | ||
144 | goto error_unlock; | ||
145 | } | ||
146 | |||
147 | rx->local = local; | ||
148 | if (srx->srx_service) { | ||
149 | service_id = htons(srx->srx_service); | ||
150 | write_lock_bh(&local->services_lock); | ||
151 | list_for_each_entry(prx, &local->services, listen_link) { | ||
152 | if (prx->service_id == service_id) | ||
153 | goto service_in_use; | ||
154 | } | ||
155 | |||
156 | rx->service_id = service_id; | ||
157 | list_add_tail(&rx->listen_link, &local->services); | ||
158 | write_unlock_bh(&local->services_lock); | ||
159 | |||
160 | rx->sk.sk_state = RXRPC_SERVER_BOUND; | ||
161 | } else { | ||
162 | rx->sk.sk_state = RXRPC_CLIENT_BOUND; | ||
163 | } | ||
164 | |||
165 | release_sock(&rx->sk); | ||
166 | _leave(" = 0"); | ||
167 | return 0; | ||
168 | |||
169 | service_in_use: | ||
170 | ret = -EADDRINUSE; | ||
171 | write_unlock_bh(&local->services_lock); | ||
172 | error_unlock: | ||
173 | release_sock(&rx->sk); | ||
174 | error: | ||
175 | _leave(" = %d", ret); | ||
176 | return ret; | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * set the number of pending calls permitted on a listening socket | ||
181 | */ | ||
182 | static int rxrpc_listen(struct socket *sock, int backlog) | ||
183 | { | ||
184 | struct sock *sk = sock->sk; | ||
185 | struct rxrpc_sock *rx = rxrpc_sk(sk); | ||
186 | int ret; | ||
187 | |||
188 | _enter("%p,%d", rx, backlog); | ||
189 | |||
190 | lock_sock(&rx->sk); | ||
191 | |||
192 | switch (rx->sk.sk_state) { | ||
193 | case RXRPC_UNCONNECTED: | ||
194 | ret = -EADDRNOTAVAIL; | ||
195 | break; | ||
196 | case RXRPC_CLIENT_BOUND: | ||
197 | case RXRPC_CLIENT_CONNECTED: | ||
198 | default: | ||
199 | ret = -EBUSY; | ||
200 | break; | ||
201 | case RXRPC_SERVER_BOUND: | ||
202 | ASSERT(rx->local != NULL); | ||
203 | sk->sk_max_ack_backlog = backlog; | ||
204 | rx->sk.sk_state = RXRPC_SERVER_LISTENING; | ||
205 | ret = 0; | ||
206 | break; | ||
207 | } | ||
208 | |||
209 | release_sock(&rx->sk); | ||
210 | _leave(" = %d", ret); | ||
211 | return ret; | ||
212 | } | ||
213 | |||
214 | /* | ||
215 | * find a transport by address | ||
216 | */ | ||
217 | static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock, | ||
218 | struct sockaddr *addr, | ||
219 | int addr_len, int flags, | ||
220 | gfp_t gfp) | ||
221 | { | ||
222 | struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr; | ||
223 | struct rxrpc_transport *trans; | ||
224 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | ||
225 | struct rxrpc_peer *peer; | ||
226 | |||
227 | _enter("%p,%p,%d,%d", rx, addr, addr_len, flags); | ||
228 | |||
229 | ASSERT(rx->local != NULL); | ||
230 | ASSERT(rx->sk.sk_state > RXRPC_UNCONNECTED); | ||
231 | |||
232 | if (rx->srx.transport_type != srx->transport_type) | ||
233 | return ERR_PTR(-ESOCKTNOSUPPORT); | ||
234 | if (rx->srx.transport.family != srx->transport.family) | ||
235 | return ERR_PTR(-EAFNOSUPPORT); | ||
236 | |||
237 | /* find a remote transport endpoint from the local one */ | ||
238 | peer = rxrpc_get_peer(srx, gfp); | ||
239 | if (IS_ERR(peer)) | ||
240 | return ERR_PTR(PTR_ERR(peer)); | ||
241 | |||
242 | /* find a transport */ | ||
243 | trans = rxrpc_get_transport(rx->local, peer, gfp); | ||
244 | rxrpc_put_peer(peer); | ||
245 | _leave(" = %p", trans); | ||
246 | return trans; | ||
247 | } | ||
248 | |||
249 | /** | ||
250 | * rxrpc_kernel_begin_call - Allow a kernel service to begin a call | ||
251 | * @sock: The socket on which to make the call | ||
252 | * @srx: The address of the peer to contact (defaults to socket setting) | ||
253 | * @key: The security context to use (defaults to socket setting) | ||
254 | * @user_call_ID: The ID to use | ||
255 | * | ||
256 | * Allow a kernel service to begin a call on the nominated socket. This just | ||
257 | * sets up all the internal tracking structures and allocates connection and | ||
258 | * call IDs as appropriate. The call to be used is returned. | ||
259 | * | ||
260 | * The default socket destination address and security may be overridden by | ||
261 | * supplying @srx and @key. | ||
262 | */ | ||
263 | struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, | ||
264 | struct sockaddr_rxrpc *srx, | ||
265 | struct key *key, | ||
266 | unsigned long user_call_ID, | ||
267 | gfp_t gfp) | ||
268 | { | ||
269 | struct rxrpc_conn_bundle *bundle; | ||
270 | struct rxrpc_transport *trans; | ||
271 | struct rxrpc_call *call; | ||
272 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | ||
273 | __be16 service_id; | ||
274 | |||
275 | _enter(",,%x,%lx", key_serial(key), user_call_ID); | ||
276 | |||
277 | lock_sock(&rx->sk); | ||
278 | |||
279 | if (srx) { | ||
280 | trans = rxrpc_name_to_transport(sock, (struct sockaddr *) srx, | ||
281 | sizeof(*srx), 0, gfp); | ||
282 | if (IS_ERR(trans)) { | ||
283 | call = ERR_PTR(PTR_ERR(trans)); | ||
284 | trans = NULL; | ||
285 | goto out; | ||
286 | } | ||
287 | } else { | ||
288 | trans = rx->trans; | ||
289 | if (!trans) { | ||
290 | call = ERR_PTR(-ENOTCONN); | ||
291 | goto out; | ||
292 | } | ||
293 | atomic_inc(&trans->usage); | ||
294 | } | ||
295 | |||
296 | service_id = rx->service_id; | ||
297 | if (srx) | ||
298 | service_id = htons(srx->srx_service); | ||
299 | |||
300 | if (!key) | ||
301 | key = rx->key; | ||
302 | if (key && !key->payload.data) | ||
303 | key = NULL; /* a no-security key */ | ||
304 | |||
305 | bundle = rxrpc_get_bundle(rx, trans, key, service_id, gfp); | ||
306 | if (IS_ERR(bundle)) { | ||
307 | call = ERR_PTR(PTR_ERR(bundle)); | ||
308 | goto out; | ||
309 | } | ||
310 | |||
311 | call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, true, | ||
312 | gfp); | ||
313 | rxrpc_put_bundle(trans, bundle); | ||
314 | out: | ||
315 | rxrpc_put_transport(trans); | ||
316 | release_sock(&rx->sk); | ||
317 | _leave(" = %p", call); | ||
318 | return call; | ||
319 | } | ||
320 | |||
321 | EXPORT_SYMBOL(rxrpc_kernel_begin_call); | ||
322 | |||
323 | /** | ||
324 | * rxrpc_kernel_end_call - Allow a kernel service to end a call it was using | ||
325 | * @call: The call to end | ||
326 | * | ||
327 | * Allow a kernel service to end a call it was using. The call must be | ||
328 | * complete before this is called (the call should be aborted if necessary). | ||
329 | */ | ||
330 | void rxrpc_kernel_end_call(struct rxrpc_call *call) | ||
331 | { | ||
332 | _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); | ||
333 | rxrpc_remove_user_ID(call->socket, call); | ||
334 | rxrpc_put_call(call); | ||
335 | } | ||
336 | |||
337 | EXPORT_SYMBOL(rxrpc_kernel_end_call); | ||
338 | |||
339 | /** | ||
340 | * rxrpc_kernel_intercept_rx_messages - Intercept received RxRPC messages | ||
341 | * @sock: The socket to intercept received messages on | ||
342 | * @interceptor: The function to pass the messages to | ||
343 | * | ||
344 | * Allow a kernel service to intercept messages heading for the Rx queue on an | ||
345 | * RxRPC socket. They get passed to the specified function instead. | ||
346 | * @interceptor should free the socket buffers it is given. @interceptor is | ||
347 | * called with the socket receive queue spinlock held and softirqs disabled - | ||
348 | * this ensures that the messages will be delivered in the right order. | ||
349 | */ | ||
350 | void rxrpc_kernel_intercept_rx_messages(struct socket *sock, | ||
351 | rxrpc_interceptor_t interceptor) | ||
352 | { | ||
353 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | ||
354 | |||
355 | _enter(""); | ||
356 | rx->interceptor = interceptor; | ||
357 | } | ||
358 | |||
359 | EXPORT_SYMBOL(rxrpc_kernel_intercept_rx_messages); | ||
360 | |||
361 | /* | ||
362 | * connect an RxRPC socket | ||
363 | * - this just targets it at a specific destination; no actual connection | ||
364 | * negotiation takes place | ||
365 | */ | ||
366 | static int rxrpc_connect(struct socket *sock, struct sockaddr *addr, | ||
367 | int addr_len, int flags) | ||
368 | { | ||
369 | struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr; | ||
370 | struct sock *sk = sock->sk; | ||
371 | struct rxrpc_transport *trans; | ||
372 | struct rxrpc_local *local; | ||
373 | struct rxrpc_sock *rx = rxrpc_sk(sk); | ||
374 | int ret; | ||
375 | |||
376 | _enter("%p,%p,%d,%d", rx, addr, addr_len, flags); | ||
377 | |||
378 | ret = rxrpc_validate_address(rx, srx, addr_len); | ||
379 | if (ret < 0) { | ||
380 | _leave(" = %d [bad addr]", ret); | ||
381 | return ret; | ||
382 | } | ||
383 | |||
384 | lock_sock(&rx->sk); | ||
385 | |||
386 | switch (rx->sk.sk_state) { | ||
387 | case RXRPC_UNCONNECTED: | ||
388 | /* find a local transport endpoint if we don't have one already */ | ||
389 | ASSERTCMP(rx->local, ==, NULL); | ||
390 | rx->srx.srx_family = AF_RXRPC; | ||
391 | rx->srx.srx_service = 0; | ||
392 | rx->srx.transport_type = srx->transport_type; | ||
393 | rx->srx.transport_len = sizeof(sa_family_t); | ||
394 | rx->srx.transport.family = srx->transport.family; | ||
395 | local = rxrpc_lookup_local(&rx->srx); | ||
396 | if (IS_ERR(local)) { | ||
397 | release_sock(&rx->sk); | ||
398 | return PTR_ERR(local); | ||
399 | } | ||
400 | rx->local = local; | ||
401 | rx->sk.sk_state = RXRPC_CLIENT_BOUND; | ||
402 | case RXRPC_CLIENT_BOUND: | ||
403 | break; | ||
404 | case RXRPC_CLIENT_CONNECTED: | ||
405 | release_sock(&rx->sk); | ||
406 | return -EISCONN; | ||
407 | default: | ||
408 | release_sock(&rx->sk); | ||
409 | return -EBUSY; /* server sockets can't connect as well */ | ||
410 | } | ||
411 | |||
412 | trans = rxrpc_name_to_transport(sock, addr, addr_len, flags, | ||
413 | GFP_KERNEL); | ||
414 | if (IS_ERR(trans)) { | ||
415 | release_sock(&rx->sk); | ||
416 | _leave(" = %ld", PTR_ERR(trans)); | ||
417 | return PTR_ERR(trans); | ||
418 | } | ||
419 | |||
420 | rx->trans = trans; | ||
421 | rx->service_id = htons(srx->srx_service); | ||
422 | rx->sk.sk_state = RXRPC_CLIENT_CONNECTED; | ||
423 | |||
424 | release_sock(&rx->sk); | ||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | /* | ||
429 | * send a message through an RxRPC socket | ||
430 | * - in a client this does a number of things: | ||
431 | * - finds/sets up a connection for the security specified (if any) | ||
432 | * - initiates a call (ID in control data) | ||
433 | * - ends the request phase of a call (if MSG_MORE is not set) | ||
434 | * - sends a call data packet | ||
435 | * - may send an abort (abort code in control data) | ||
436 | */ | ||
437 | static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock, | ||
438 | struct msghdr *m, size_t len) | ||
439 | { | ||
440 | struct rxrpc_transport *trans; | ||
441 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | ||
442 | int ret; | ||
443 | |||
444 | _enter(",{%d},,%zu", rx->sk.sk_state, len); | ||
445 | |||
446 | if (m->msg_flags & MSG_OOB) | ||
447 | return -EOPNOTSUPP; | ||
448 | |||
449 | if (m->msg_name) { | ||
450 | ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen); | ||
451 | if (ret < 0) { | ||
452 | _leave(" = %d [bad addr]", ret); | ||
453 | return ret; | ||
454 | } | ||
455 | } | ||
456 | |||
457 | trans = NULL; | ||
458 | lock_sock(&rx->sk); | ||
459 | |||
460 | if (m->msg_name) { | ||
461 | ret = -EISCONN; | ||
462 | trans = rxrpc_name_to_transport(sock, m->msg_name, | ||
463 | m->msg_namelen, 0, GFP_KERNEL); | ||
464 | if (IS_ERR(trans)) { | ||
465 | ret = PTR_ERR(trans); | ||
466 | trans = NULL; | ||
467 | goto out; | ||
468 | } | ||
469 | } else { | ||
470 | trans = rx->trans; | ||
471 | if (trans) | ||
472 | atomic_inc(&trans->usage); | ||
473 | } | ||
474 | |||
475 | switch (rx->sk.sk_state) { | ||
476 | case RXRPC_SERVER_LISTENING: | ||
477 | if (!m->msg_name) { | ||
478 | ret = rxrpc_server_sendmsg(iocb, rx, m, len); | ||
479 | break; | ||
480 | } | ||
481 | case RXRPC_SERVER_BOUND: | ||
482 | case RXRPC_CLIENT_BOUND: | ||
483 | if (!m->msg_name) { | ||
484 | ret = -ENOTCONN; | ||
485 | break; | ||
486 | } | ||
487 | case RXRPC_CLIENT_CONNECTED: | ||
488 | ret = rxrpc_client_sendmsg(iocb, rx, trans, m, len); | ||
489 | break; | ||
490 | default: | ||
491 | ret = -ENOTCONN; | ||
492 | break; | ||
493 | } | ||
494 | |||
495 | out: | ||
496 | release_sock(&rx->sk); | ||
497 | if (trans) | ||
498 | rxrpc_put_transport(trans); | ||
499 | _leave(" = %d", ret); | ||
500 | return ret; | ||
501 | } | ||
502 | |||
503 | /* | ||
504 | * set RxRPC socket options | ||
505 | */ | ||
506 | static int rxrpc_setsockopt(struct socket *sock, int level, int optname, | ||
507 | char __user *optval, int optlen) | ||
508 | { | ||
509 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | ||
510 | unsigned min_sec_level; | ||
511 | int ret; | ||
512 | |||
513 | _enter(",%d,%d,,%d", level, optname, optlen); | ||
514 | |||
515 | lock_sock(&rx->sk); | ||
516 | ret = -EOPNOTSUPP; | ||
517 | |||
518 | if (level == SOL_RXRPC) { | ||
519 | switch (optname) { | ||
520 | case RXRPC_EXCLUSIVE_CONNECTION: | ||
521 | ret = -EINVAL; | ||
522 | if (optlen != 0) | ||
523 | goto error; | ||
524 | ret = -EISCONN; | ||
525 | if (rx->sk.sk_state != RXRPC_UNCONNECTED) | ||
526 | goto error; | ||
527 | set_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags); | ||
528 | goto success; | ||
529 | |||
530 | case RXRPC_SECURITY_KEY: | ||
531 | ret = -EINVAL; | ||
532 | if (rx->key) | ||
533 | goto error; | ||
534 | ret = -EISCONN; | ||
535 | if (rx->sk.sk_state != RXRPC_UNCONNECTED) | ||
536 | goto error; | ||
537 | ret = rxrpc_request_key(rx, optval, optlen); | ||
538 | goto error; | ||
539 | |||
540 | case RXRPC_SECURITY_KEYRING: | ||
541 | ret = -EINVAL; | ||
542 | if (rx->key) | ||
543 | goto error; | ||
544 | ret = -EISCONN; | ||
545 | if (rx->sk.sk_state != RXRPC_UNCONNECTED) | ||
546 | goto error; | ||
547 | ret = rxrpc_server_keyring(rx, optval, optlen); | ||
548 | goto error; | ||
549 | |||
550 | case RXRPC_MIN_SECURITY_LEVEL: | ||
551 | ret = -EINVAL; | ||
552 | if (optlen != sizeof(unsigned)) | ||
553 | goto error; | ||
554 | ret = -EISCONN; | ||
555 | if (rx->sk.sk_state != RXRPC_UNCONNECTED) | ||
556 | goto error; | ||
557 | ret = get_user(min_sec_level, | ||
558 | (unsigned __user *) optval); | ||
559 | if (ret < 0) | ||
560 | goto error; | ||
561 | ret = -EINVAL; | ||
562 | if (min_sec_level > RXRPC_SECURITY_MAX) | ||
563 | goto error; | ||
564 | rx->min_sec_level = min_sec_level; | ||
565 | goto success; | ||
566 | |||
567 | default: | ||
568 | break; | ||
569 | } | ||
570 | } | ||
571 | |||
572 | success: | ||
573 | ret = 0; | ||
574 | error: | ||
575 | release_sock(&rx->sk); | ||
576 | return ret; | ||
577 | } | ||
578 | |||
579 | /* | ||
580 | * permit an RxRPC socket to be polled | ||
581 | */ | ||
582 | static unsigned int rxrpc_poll(struct file *file, struct socket *sock, | ||
583 | poll_table *wait) | ||
584 | { | ||
585 | unsigned int mask; | ||
586 | struct sock *sk = sock->sk; | ||
587 | |||
588 | poll_wait(file, sk->sk_sleep, wait); | ||
589 | mask = 0; | ||
590 | |||
591 | /* the socket is readable if there are any messages waiting on the Rx | ||
592 | * queue */ | ||
593 | if (!skb_queue_empty(&sk->sk_receive_queue)) | ||
594 | mask |= POLLIN | POLLRDNORM; | ||
595 | |||
596 | /* the socket is writable if there is space to add new data to the | ||
597 | * socket; there is no guarantee that any particular call in progress | ||
598 | * on the socket may have space in the Tx ACK window */ | ||
599 | if (rxrpc_writable(sk)) | ||
600 | mask |= POLLOUT | POLLWRNORM; | ||
601 | |||
602 | return mask; | ||
603 | } | ||
604 | |||
605 | /* | ||
606 | * create an RxRPC socket | ||
607 | */ | ||
608 | static int rxrpc_create(struct socket *sock, int protocol) | ||
609 | { | ||
610 | struct rxrpc_sock *rx; | ||
611 | struct sock *sk; | ||
612 | |||
613 | _enter("%p,%d", sock, protocol); | ||
614 | |||
615 | /* we support transport protocol UDP only */ | ||
616 | if (protocol != PF_INET) | ||
617 | return -EPROTONOSUPPORT; | ||
618 | |||
619 | if (sock->type != SOCK_DGRAM) | ||
620 | return -ESOCKTNOSUPPORT; | ||
621 | |||
622 | sock->ops = &rxrpc_rpc_ops; | ||
623 | sock->state = SS_UNCONNECTED; | ||
624 | |||
625 | sk = sk_alloc(PF_RXRPC, GFP_KERNEL, &rxrpc_proto, 1); | ||
626 | if (!sk) | ||
627 | return -ENOMEM; | ||
628 | |||
629 | sock_init_data(sock, sk); | ||
630 | sk->sk_state = RXRPC_UNCONNECTED; | ||
631 | sk->sk_write_space = rxrpc_write_space; | ||
632 | sk->sk_max_ack_backlog = sysctl_rxrpc_max_qlen; | ||
633 | sk->sk_destruct = rxrpc_sock_destructor; | ||
634 | |||
635 | rx = rxrpc_sk(sk); | ||
636 | rx->proto = protocol; | ||
637 | rx->calls = RB_ROOT; | ||
638 | |||
639 | INIT_LIST_HEAD(&rx->listen_link); | ||
640 | INIT_LIST_HEAD(&rx->secureq); | ||
641 | INIT_LIST_HEAD(&rx->acceptq); | ||
642 | rwlock_init(&rx->call_lock); | ||
643 | memset(&rx->srx, 0, sizeof(rx->srx)); | ||
644 | |||
645 | _leave(" = 0 [%p]", rx); | ||
646 | return 0; | ||
647 | } | ||
648 | |||
649 | /* | ||
650 | * RxRPC socket destructor | ||
651 | */ | ||
652 | static void rxrpc_sock_destructor(struct sock *sk) | ||
653 | { | ||
654 | _enter("%p", sk); | ||
655 | |||
656 | rxrpc_purge_queue(&sk->sk_receive_queue); | ||
657 | |||
658 | BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); | ||
659 | BUG_TRAP(sk_unhashed(sk)); | ||
660 | BUG_TRAP(!sk->sk_socket); | ||
661 | |||
662 | if (!sock_flag(sk, SOCK_DEAD)) { | ||
663 | printk("Attempt to release alive rxrpc socket: %p\n", sk); | ||
664 | return; | ||
665 | } | ||
666 | } | ||
667 | |||
668 | /* | ||
669 | * release an RxRPC socket | ||
670 | */ | ||
671 | static int rxrpc_release_sock(struct sock *sk) | ||
672 | { | ||
673 | struct rxrpc_sock *rx = rxrpc_sk(sk); | ||
674 | |||
675 | _enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt)); | ||
676 | |||
677 | /* declare the socket closed for business */ | ||
678 | sock_orphan(sk); | ||
679 | sk->sk_shutdown = SHUTDOWN_MASK; | ||
680 | |||
681 | spin_lock_bh(&sk->sk_receive_queue.lock); | ||
682 | sk->sk_state = RXRPC_CLOSE; | ||
683 | spin_unlock_bh(&sk->sk_receive_queue.lock); | ||
684 | |||
685 | ASSERTCMP(rx->listen_link.next, !=, LIST_POISON1); | ||
686 | |||
687 | if (!list_empty(&rx->listen_link)) { | ||
688 | write_lock_bh(&rx->local->services_lock); | ||
689 | list_del(&rx->listen_link); | ||
690 | write_unlock_bh(&rx->local->services_lock); | ||
691 | } | ||
692 | |||
693 | /* try to flush out this socket */ | ||
694 | rxrpc_release_calls_on_socket(rx); | ||
695 | flush_workqueue(rxrpc_workqueue); | ||
696 | rxrpc_purge_queue(&sk->sk_receive_queue); | ||
697 | |||
698 | if (rx->conn) { | ||
699 | rxrpc_put_connection(rx->conn); | ||
700 | rx->conn = NULL; | ||
701 | } | ||
702 | |||
703 | if (rx->bundle) { | ||
704 | rxrpc_put_bundle(rx->trans, rx->bundle); | ||
705 | rx->bundle = NULL; | ||
706 | } | ||
707 | if (rx->trans) { | ||
708 | rxrpc_put_transport(rx->trans); | ||
709 | rx->trans = NULL; | ||
710 | } | ||
711 | if (rx->local) { | ||
712 | rxrpc_put_local(rx->local); | ||
713 | rx->local = NULL; | ||
714 | } | ||
715 | |||
716 | key_put(rx->key); | ||
717 | rx->key = NULL; | ||
718 | key_put(rx->securities); | ||
719 | rx->securities = NULL; | ||
720 | sock_put(sk); | ||
721 | |||
722 | _leave(" = 0"); | ||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | /* | ||
727 | * release an RxRPC BSD socket on close() or equivalent | ||
728 | */ | ||
729 | static int rxrpc_release(struct socket *sock) | ||
730 | { | ||
731 | struct sock *sk = sock->sk; | ||
732 | |||
733 | _enter("%p{%p}", sock, sk); | ||
734 | |||
735 | if (!sk) | ||
736 | return 0; | ||
737 | |||
738 | sock->sk = NULL; | ||
739 | |||
740 | return rxrpc_release_sock(sk); | ||
741 | } | ||
742 | |||
743 | /* | ||
744 | * RxRPC network protocol | ||
745 | */ | ||
746 | static const struct proto_ops rxrpc_rpc_ops = { | ||
747 | .family = PF_UNIX, | ||
748 | .owner = THIS_MODULE, | ||
749 | .release = rxrpc_release, | ||
750 | .bind = rxrpc_bind, | ||
751 | .connect = rxrpc_connect, | ||
752 | .socketpair = sock_no_socketpair, | ||
753 | .accept = sock_no_accept, | ||
754 | .getname = sock_no_getname, | ||
755 | .poll = rxrpc_poll, | ||
756 | .ioctl = sock_no_ioctl, | ||
757 | .listen = rxrpc_listen, | ||
758 | .shutdown = sock_no_shutdown, | ||
759 | .setsockopt = rxrpc_setsockopt, | ||
760 | .getsockopt = sock_no_getsockopt, | ||
761 | .sendmsg = rxrpc_sendmsg, | ||
762 | .recvmsg = rxrpc_recvmsg, | ||
763 | .mmap = sock_no_mmap, | ||
764 | .sendpage = sock_no_sendpage, | ||
765 | }; | ||
766 | |||
767 | static struct proto rxrpc_proto = { | ||
768 | .name = "RXRPC", | ||
769 | .owner = THIS_MODULE, | ||
770 | .obj_size = sizeof(struct rxrpc_sock), | ||
771 | .max_header = sizeof(struct rxrpc_header), | ||
772 | }; | ||
773 | |||
774 | static struct net_proto_family rxrpc_family_ops = { | ||
775 | .family = PF_RXRPC, | ||
776 | .create = rxrpc_create, | ||
777 | .owner = THIS_MODULE, | ||
778 | }; | ||
779 | |||
780 | /* | ||
781 | * initialise and register the RxRPC protocol | ||
782 | */ | ||
783 | static int __init af_rxrpc_init(void) | ||
784 | { | ||
785 | struct sk_buff *dummy_skb; | ||
786 | int ret = -1; | ||
787 | |||
788 | BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof(dummy_skb->cb)); | ||
789 | |||
790 | rxrpc_epoch = htonl(xtime.tv_sec); | ||
791 | |||
792 | ret = -ENOMEM; | ||
793 | rxrpc_call_jar = kmem_cache_create( | ||
794 | "rxrpc_call_jar", sizeof(struct rxrpc_call), 0, | ||
795 | SLAB_HWCACHE_ALIGN, NULL, NULL); | ||
796 | if (!rxrpc_call_jar) { | ||
797 | printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n"); | ||
798 | goto error_call_jar; | ||
799 | } | ||
800 | |||
801 | rxrpc_workqueue = create_workqueue("krxrpcd"); | ||
802 | if (!rxrpc_workqueue) { | ||
803 | printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n"); | ||
804 | goto error_work_queue; | ||
805 | } | ||
806 | |||
807 | ret = proto_register(&rxrpc_proto, 1); | ||
808 | if (ret < 0) { | ||
809 | printk(KERN_CRIT "RxRPC: Cannot register protocol\n"); | ||
810 | goto error_proto; | ||
811 | } | ||
812 | |||
813 | ret = sock_register(&rxrpc_family_ops); | ||
814 | if (ret < 0) { | ||
815 | printk(KERN_CRIT "RxRPC: Cannot register socket family\n"); | ||
816 | goto error_sock; | ||
817 | } | ||
818 | |||
819 | ret = register_key_type(&key_type_rxrpc); | ||
820 | if (ret < 0) { | ||
821 | printk(KERN_CRIT "RxRPC: Cannot register client key type\n"); | ||
822 | goto error_key_type; | ||
823 | } | ||
824 | |||
825 | ret = register_key_type(&key_type_rxrpc_s); | ||
826 | if (ret < 0) { | ||
827 | printk(KERN_CRIT "RxRPC: Cannot register server key type\n"); | ||
828 | goto error_key_type_s; | ||
829 | } | ||
830 | |||
831 | #ifdef CONFIG_PROC_FS | ||
832 | proc_net_fops_create("rxrpc_calls", 0, &rxrpc_call_seq_fops); | ||
833 | proc_net_fops_create("rxrpc_conns", 0, &rxrpc_connection_seq_fops); | ||
834 | #endif | ||
835 | return 0; | ||
836 | |||
837 | error_key_type_s: | ||
838 | unregister_key_type(&key_type_rxrpc); | ||
839 | error_key_type: | ||
840 | sock_unregister(PF_RXRPC); | ||
841 | error_sock: | ||
842 | proto_unregister(&rxrpc_proto); | ||
843 | error_proto: | ||
844 | destroy_workqueue(rxrpc_workqueue); | ||
845 | error_work_queue: | ||
846 | kmem_cache_destroy(rxrpc_call_jar); | ||
847 | error_call_jar: | ||
848 | return ret; | ||
849 | } | ||
850 | |||
851 | /* | ||
852 | * unregister the RxRPC protocol | ||
853 | */ | ||
854 | static void __exit af_rxrpc_exit(void) | ||
855 | { | ||
856 | _enter(""); | ||
857 | unregister_key_type(&key_type_rxrpc_s); | ||
858 | unregister_key_type(&key_type_rxrpc); | ||
859 | sock_unregister(PF_RXRPC); | ||
860 | proto_unregister(&rxrpc_proto); | ||
861 | rxrpc_destroy_all_calls(); | ||
862 | rxrpc_destroy_all_connections(); | ||
863 | rxrpc_destroy_all_transports(); | ||
864 | rxrpc_destroy_all_peers(); | ||
865 | rxrpc_destroy_all_locals(); | ||
866 | |||
867 | ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0); | ||
868 | |||
869 | _debug("flush scheduled work"); | ||
870 | flush_workqueue(rxrpc_workqueue); | ||
871 | proc_net_remove("rxrpc_conns"); | ||
872 | proc_net_remove("rxrpc_calls"); | ||
873 | destroy_workqueue(rxrpc_workqueue); | ||
874 | kmem_cache_destroy(rxrpc_call_jar); | ||
875 | _leave(""); | ||
876 | } | ||
877 | |||
878 | module_init(af_rxrpc_init); | ||
879 | module_exit(af_rxrpc_exit); | ||