aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile1
-rw-r--r--net/core/sock.c6
-rw-r--r--net/rxrpc/Kconfig37
-rw-r--r--net/rxrpc/Makefile31
-rw-r--r--net/rxrpc/af_rxrpc.c754
-rw-r--r--net/rxrpc/ar-accept.c399
-rw-r--r--net/rxrpc/ar-ack.c1250
-rw-r--r--net/rxrpc/ar-call.c787
-rw-r--r--net/rxrpc/ar-connection.c895
-rw-r--r--net/rxrpc/ar-connevent.c387
-rw-r--r--net/rxrpc/ar-error.c253
-rw-r--r--net/rxrpc/ar-input.c791
-rw-r--r--net/rxrpc/ar-internal.h842
-rw-r--r--net/rxrpc/ar-key.c334
-rw-r--r--net/rxrpc/ar-local.c309
-rw-r--r--net/rxrpc/ar-output.c658
-rw-r--r--net/rxrpc/ar-peer.c273
-rw-r--r--net/rxrpc/ar-proc.c247
-rw-r--r--net/rxrpc/ar-recvmsg.c366
-rw-r--r--net/rxrpc/ar-security.c258
-rw-r--r--net/rxrpc/ar-skbuff.c118
-rw-r--r--net/rxrpc/ar-transport.c276
-rw-r--r--net/rxrpc/rxkad.c1153
24 files changed, 10423 insertions, 3 deletions
diff --git a/net/Kconfig b/net/Kconfig
index ae1817dc51b8..2fc8e77b1e62 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -212,6 +212,7 @@ endmenu
212source "net/ax25/Kconfig" 212source "net/ax25/Kconfig"
213source "net/irda/Kconfig" 213source "net/irda/Kconfig"
214source "net/bluetooth/Kconfig" 214source "net/bluetooth/Kconfig"
215source "net/rxrpc/Kconfig"
215 216
216config FIB_RULES 217config FIB_RULES
217 bool 218 bool
diff --git a/net/Makefile b/net/Makefile
index 29bbe19d87f1..6b74d4118c5b 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_IRDA) += irda/
38obj-$(CONFIG_BT) += bluetooth/ 38obj-$(CONFIG_BT) += bluetooth/
39obj-$(CONFIG_SUNRPC) += sunrpc/ 39obj-$(CONFIG_SUNRPC) += sunrpc/
40obj-$(CONFIG_RXRPC) += rxrpc/ 40obj-$(CONFIG_RXRPC) += rxrpc/
41obj-$(CONFIG_AF_RXRPC) += rxrpc/
41obj-$(CONFIG_ATM) += atm/ 42obj-$(CONFIG_ATM) += atm/
42obj-$(CONFIG_DECNET) += decnet/ 43obj-$(CONFIG_DECNET) += decnet/
43obj-$(CONFIG_ECONET) += econet/ 44obj-$(CONFIG_ECONET) += econet/
diff --git a/net/core/sock.c b/net/core/sock.c
index 043bdc05d211..22183c2ef284 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -154,7 +154,8 @@ static const char *af_family_key_strings[AF_MAX+1] = {
154 "sk_lock-21" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , 154 "sk_lock-21" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
155 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , 155 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
156 "sk_lock-27" , "sk_lock-28" , "sk_lock-29" , 156 "sk_lock-27" , "sk_lock-28" , "sk_lock-29" ,
157 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-AF_MAX" 157 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
158 "sk_lock-AF_RXRPC" , "sk_lock-AF_MAX"
158}; 159};
159static const char *af_family_slock_key_strings[AF_MAX+1] = { 160static const char *af_family_slock_key_strings[AF_MAX+1] = {
160 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 161 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
@@ -167,7 +168,8 @@ static const char *af_family_slock_key_strings[AF_MAX+1] = {
167 "slock-21" , "slock-AF_SNA" , "slock-AF_IRDA" , 168 "slock-21" , "slock-AF_SNA" , "slock-AF_IRDA" ,
168 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , 169 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
169 "slock-27" , "slock-28" , "slock-29" , 170 "slock-27" , "slock-28" , "slock-29" ,
170 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_MAX" 171 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
172 "slock-AF_RXRPC" , "slock-AF_MAX"
171}; 173};
172#endif 174#endif
173 175
diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig
new file mode 100644
index 000000000000..d72380e304ae
--- /dev/null
+++ b/net/rxrpc/Kconfig
@@ -0,0 +1,37 @@
1#
2# RxRPC session sockets
3#
4
5config AF_RXRPC
6 tristate "RxRPC session sockets"
7 depends on EXPERIMENTAL
8 help
9 Say Y or M here to include support for RxRPC session sockets (just
10 the transport part, not the presentation part: (un)marshalling is
11 left to the application).
12
13 These are used for AFS kernel filesystem and userspace utilities.
14
15 This module at the moment only supports client operations and is
16 currently incomplete.
17
18 See Documentation/networking/rxrpc.txt.
19
20
21config AF_RXRPC_DEBUG
22 bool "RxRPC dynamic debugging"
23 depends on AF_RXRPC
24 help
25 Say Y here to make runtime controllable debugging messages appear.
26
27 See Documentation/networking/rxrpc.txt.
28
29
30config RXKAD
31 tristate "RxRPC Kerberos security"
32 depends on AF_RXRPC && KEYS
33 help
34 Provide kerberos 4 and AFS kaserver security handling for AF_RXRPC
35 through the use of the key retention service.
36
37 See Documentation/networking/rxrpc.txt.
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile
index 6efcb6f162a0..07bf82ffec6a 100644
--- a/net/rxrpc/Makefile
+++ b/net/rxrpc/Makefile
@@ -4,6 +4,35 @@
4 4
5#CFLAGS += -finstrument-functions 5#CFLAGS += -finstrument-functions
6 6
7af-rxrpc-objs := \
8 af_rxrpc.o \
9 ar-accept.o \
10 ar-ack.o \
11 ar-call.o \
12 ar-connection.o \
13 ar-connevent.o \
14 ar-error.o \
15 ar-input.o \
16 ar-key.o \
17 ar-local.o \
18 ar-output.o \
19 ar-peer.o \
20 ar-recvmsg.o \
21 ar-security.o \
22 ar-skbuff.o \
23 ar-transport.o
24
25ifeq ($(CONFIG_PROC_FS),y)
26af-rxrpc-objs += ar-proc.o
27endif
28
29obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o
30
31obj-$(CONFIG_RXKAD) += rxkad.o
32
33#
34# obsolete RxRPC interface, still used by fs/afs/
35#
7rxrpc-objs := \ 36rxrpc-objs := \
8 call.o \ 37 call.o \
9 connection.o \ 38 connection.o \
@@ -22,4 +51,4 @@ ifeq ($(CONFIG_SYSCTL),y)
22rxrpc-objs += sysctl.o 51rxrpc-objs += sysctl.o
23endif 52endif
24 53
25obj-$(CONFIG_RXRPC) := rxrpc.o 54obj-$(CONFIG_RXRPC) += rxrpc.o
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
new file mode 100644
index 000000000000..bfa8822e2286
--- /dev/null
+++ b/net/rxrpc/af_rxrpc.c
@@ -0,0 +1,754 @@
1/* AF_RXRPC implementation
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/poll.h>
16#include <linux/proc_fs.h>
17#include <net/sock.h>
18#include <net/af_rxrpc.h>
19#include "ar-internal.h"
20
21MODULE_DESCRIPTION("RxRPC network protocol");
22MODULE_AUTHOR("Red Hat, Inc.");
23MODULE_LICENSE("GPL");
24MODULE_ALIAS_NETPROTO(PF_RXRPC);
25
26unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
27module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
28MODULE_PARM_DESC(rxrpc_debug, "RxRPC debugging mask");
29
30static int sysctl_rxrpc_max_qlen __read_mostly = 10;
31
32static struct proto rxrpc_proto;
33static const struct proto_ops rxrpc_rpc_ops;
34
35/* local epoch for detecting local-end reset */
36__be32 rxrpc_epoch;
37
38/* current debugging ID */
39atomic_t rxrpc_debug_id;
40
41/* count of skbs currently in use */
42atomic_t rxrpc_n_skbs;
43
44static void rxrpc_sock_destructor(struct sock *);
45
46/*
47 * see if an RxRPC socket is currently writable
48 */
49static inline int rxrpc_writable(struct sock *sk)
50{
51 return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
52}
53
54/*
55 * wait for write bufferage to become available
56 */
57static void rxrpc_write_space(struct sock *sk)
58{
59 _enter("%p", sk);
60 read_lock(&sk->sk_callback_lock);
61 if (rxrpc_writable(sk)) {
62 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
63 wake_up_interruptible(sk->sk_sleep);
64 sk_wake_async(sk, 2, POLL_OUT);
65 }
66 read_unlock(&sk->sk_callback_lock);
67}
68
69/*
70 * validate an RxRPC address
71 */
72static int rxrpc_validate_address(struct rxrpc_sock *rx,
73 struct sockaddr_rxrpc *srx,
74 int len)
75{
76 if (len < sizeof(struct sockaddr_rxrpc))
77 return -EINVAL;
78
79 if (srx->srx_family != AF_RXRPC)
80 return -EAFNOSUPPORT;
81
82 if (srx->transport_type != SOCK_DGRAM)
83 return -ESOCKTNOSUPPORT;
84
85 len -= offsetof(struct sockaddr_rxrpc, transport);
86 if (srx->transport_len < sizeof(sa_family_t) ||
87 srx->transport_len > len)
88 return -EINVAL;
89
90 if (srx->transport.family != rx->proto)
91 return -EAFNOSUPPORT;
92
93 switch (srx->transport.family) {
94 case AF_INET:
95 _debug("INET: %x @ %u.%u.%u.%u",
96 ntohs(srx->transport.sin.sin_port),
97 NIPQUAD(srx->transport.sin.sin_addr));
98 if (srx->transport_len > 8)
99 memset((void *)&srx->transport + 8, 0,
100 srx->transport_len - 8);
101 break;
102
103 case AF_INET6:
104 default:
105 return -EAFNOSUPPORT;
106 }
107
108 return 0;
109}
110
111/*
112 * bind a local address to an RxRPC socket
113 */
114static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
115{
116 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) saddr;
117 struct sock *sk = sock->sk;
118 struct rxrpc_local *local;
119 struct rxrpc_sock *rx = rxrpc_sk(sk), *prx;
120 __be16 service_id;
121 int ret;
122
123 _enter("%p,%p,%d", rx, saddr, len);
124
125 ret = rxrpc_validate_address(rx, srx, len);
126 if (ret < 0)
127 goto error;
128
129 lock_sock(&rx->sk);
130
131 if (rx->sk.sk_state != RXRPC_UNCONNECTED) {
132 ret = -EINVAL;
133 goto error_unlock;
134 }
135
136 memcpy(&rx->srx, srx, sizeof(rx->srx));
137
138 /* find a local transport endpoint if we don't have one already */
139 local = rxrpc_lookup_local(&rx->srx);
140 if (IS_ERR(local)) {
141 ret = PTR_ERR(local);
142 goto error_unlock;
143 }
144
145 rx->local = local;
146 if (srx->srx_service) {
147 service_id = htons(srx->srx_service);
148 write_lock_bh(&local->services_lock);
149 list_for_each_entry(prx, &local->services, listen_link) {
150 if (prx->service_id == service_id)
151 goto service_in_use;
152 }
153
154 rx->service_id = service_id;
155 list_add_tail(&rx->listen_link, &local->services);
156 write_unlock_bh(&local->services_lock);
157
158 rx->sk.sk_state = RXRPC_SERVER_BOUND;
159 } else {
160 rx->sk.sk_state = RXRPC_CLIENT_BOUND;
161 }
162
163 release_sock(&rx->sk);
164 _leave(" = 0");
165 return 0;
166
167service_in_use:
168 ret = -EADDRINUSE;
169 write_unlock_bh(&local->services_lock);
170error_unlock:
171 release_sock(&rx->sk);
172error:
173 _leave(" = %d", ret);
174 return ret;
175}
176
177/*
178 * set the number of pending calls permitted on a listening socket
179 */
180static int rxrpc_listen(struct socket *sock, int backlog)
181{
182 struct sock *sk = sock->sk;
183 struct rxrpc_sock *rx = rxrpc_sk(sk);
184 int ret;
185
186 _enter("%p,%d", rx, backlog);
187
188 lock_sock(&rx->sk);
189
190 switch (rx->sk.sk_state) {
191 case RXRPC_UNCONNECTED:
192 ret = -EADDRNOTAVAIL;
193 break;
194 case RXRPC_CLIENT_BOUND:
195 case RXRPC_CLIENT_CONNECTED:
196 default:
197 ret = -EBUSY;
198 break;
199 case RXRPC_SERVER_BOUND:
200 ASSERT(rx->local != NULL);
201 sk->sk_max_ack_backlog = backlog;
202 rx->sk.sk_state = RXRPC_SERVER_LISTENING;
203 ret = 0;
204 break;
205 }
206
207 release_sock(&rx->sk);
208 _leave(" = %d", ret);
209 return ret;
210}
211
212/*
213 * find a transport by address
214 */
215static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock,
216 struct sockaddr *addr,
217 int addr_len, int flags)
218{
219 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
220 struct rxrpc_transport *trans;
221 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
222 struct rxrpc_peer *peer;
223
224 _enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
225
226 ASSERT(rx->local != NULL);
227 ASSERT(rx->sk.sk_state > RXRPC_UNCONNECTED);
228
229 if (rx->srx.transport_type != srx->transport_type)
230 return ERR_PTR(-ESOCKTNOSUPPORT);
231 if (rx->srx.transport.family != srx->transport.family)
232 return ERR_PTR(-EAFNOSUPPORT);
233
234 /* find a remote transport endpoint from the local one */
235 peer = rxrpc_get_peer(srx, GFP_KERNEL);
236 if (IS_ERR(peer))
237 return ERR_PTR(PTR_ERR(peer));
238
239 /* find a transport */
240 trans = rxrpc_get_transport(rx->local, peer, GFP_KERNEL);
241 rxrpc_put_peer(peer);
242 _leave(" = %p", trans);
243 return trans;
244}
245
246/*
247 * connect an RxRPC socket
248 * - this just targets it at a specific destination; no actual connection
249 * negotiation takes place
250 */
251static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
252 int addr_len, int flags)
253{
254 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
255 struct sock *sk = sock->sk;
256 struct rxrpc_transport *trans;
257 struct rxrpc_local *local;
258 struct rxrpc_sock *rx = rxrpc_sk(sk);
259 int ret;
260
261 _enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
262
263 ret = rxrpc_validate_address(rx, srx, addr_len);
264 if (ret < 0) {
265 _leave(" = %d [bad addr]", ret);
266 return ret;
267 }
268
269 lock_sock(&rx->sk);
270
271 switch (rx->sk.sk_state) {
272 case RXRPC_UNCONNECTED:
273 /* find a local transport endpoint if we don't have one already */
274 ASSERTCMP(rx->local, ==, NULL);
275 rx->srx.srx_family = AF_RXRPC;
276 rx->srx.srx_service = 0;
277 rx->srx.transport_type = srx->transport_type;
278 rx->srx.transport_len = sizeof(sa_family_t);
279 rx->srx.transport.family = srx->transport.family;
280 local = rxrpc_lookup_local(&rx->srx);
281 if (IS_ERR(local)) {
282 release_sock(&rx->sk);
283 return PTR_ERR(local);
284 }
285 rx->local = local;
286 rx->sk.sk_state = RXRPC_CLIENT_BOUND;
287 case RXRPC_CLIENT_BOUND:
288 break;
289 case RXRPC_CLIENT_CONNECTED:
290 release_sock(&rx->sk);
291 return -EISCONN;
292 default:
293 release_sock(&rx->sk);
294 return -EBUSY; /* server sockets can't connect as well */
295 }
296
297 trans = rxrpc_name_to_transport(sock, addr, addr_len, flags);
298 if (IS_ERR(trans)) {
299 release_sock(&rx->sk);
300 _leave(" = %ld", PTR_ERR(trans));
301 return PTR_ERR(trans);
302 }
303
304 rx->trans = trans;
305 rx->service_id = htons(srx->srx_service);
306 rx->sk.sk_state = RXRPC_CLIENT_CONNECTED;
307
308 release_sock(&rx->sk);
309 return 0;
310}
311
312/*
313 * send a message through an RxRPC socket
314 * - in a client this does a number of things:
315 * - finds/sets up a connection for the security specified (if any)
316 * - initiates a call (ID in control data)
317 * - ends the request phase of a call (if MSG_MORE is not set)
318 * - sends a call data packet
319 * - may send an abort (abort code in control data)
320 */
321static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock,
322 struct msghdr *m, size_t len)
323{
324 struct rxrpc_transport *trans;
325 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
326 int ret;
327
328 _enter(",{%d},,%zu", rx->sk.sk_state, len);
329
330 if (m->msg_flags & MSG_OOB)
331 return -EOPNOTSUPP;
332
333 if (m->msg_name) {
334 ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen);
335 if (ret < 0) {
336 _leave(" = %d [bad addr]", ret);
337 return ret;
338 }
339 }
340
341 trans = NULL;
342 lock_sock(&rx->sk);
343
344 if (m->msg_name) {
345 ret = -EISCONN;
346 trans = rxrpc_name_to_transport(sock, m->msg_name,
347 m->msg_namelen, 0);
348 if (IS_ERR(trans)) {
349 ret = PTR_ERR(trans);
350 trans = NULL;
351 goto out;
352 }
353 } else {
354 trans = rx->trans;
355 if (trans)
356 atomic_inc(&trans->usage);
357 }
358
359 switch (rx->sk.sk_state) {
360 case RXRPC_SERVER_LISTENING:
361 if (!m->msg_name) {
362 ret = rxrpc_server_sendmsg(iocb, rx, m, len);
363 break;
364 }
365 case RXRPC_SERVER_BOUND:
366 case RXRPC_CLIENT_BOUND:
367 if (!m->msg_name) {
368 ret = -ENOTCONN;
369 break;
370 }
371 case RXRPC_CLIENT_CONNECTED:
372 ret = rxrpc_client_sendmsg(iocb, rx, trans, m, len);
373 break;
374 default:
375 ret = -ENOTCONN;
376 break;
377 }
378
379out:
380 release_sock(&rx->sk);
381 if (trans)
382 rxrpc_put_transport(trans);
383 _leave(" = %d", ret);
384 return ret;
385}
386
387/*
388 * set RxRPC socket options
389 */
390static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
391 char __user *optval, int optlen)
392{
393 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
394 unsigned min_sec_level;
395 int ret;
396
397 _enter(",%d,%d,,%d", level, optname, optlen);
398
399 lock_sock(&rx->sk);
400 ret = -EOPNOTSUPP;
401
402 if (level == SOL_RXRPC) {
403 switch (optname) {
404 case RXRPC_EXCLUSIVE_CONNECTION:
405 ret = -EINVAL;
406 if (optlen != 0)
407 goto error;
408 ret = -EISCONN;
409 if (rx->sk.sk_state != RXRPC_UNCONNECTED)
410 goto error;
411 set_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags);
412 goto success;
413
414 case RXRPC_SECURITY_KEY:
415 ret = -EINVAL;
416 if (rx->key)
417 goto error;
418 ret = -EISCONN;
419 if (rx->sk.sk_state != RXRPC_UNCONNECTED)
420 goto error;
421 ret = rxrpc_request_key(rx, optval, optlen);
422 goto error;
423
424 case RXRPC_SECURITY_KEYRING:
425 ret = -EINVAL;
426 if (rx->key)
427 goto error;
428 ret = -EISCONN;
429 if (rx->sk.sk_state != RXRPC_UNCONNECTED)
430 goto error;
431 ret = rxrpc_server_keyring(rx, optval, optlen);
432 goto error;
433
434 case RXRPC_MIN_SECURITY_LEVEL:
435 ret = -EINVAL;
436 if (optlen != sizeof(unsigned))
437 goto error;
438 ret = -EISCONN;
439 if (rx->sk.sk_state != RXRPC_UNCONNECTED)
440 goto error;
441 ret = get_user(min_sec_level,
442 (unsigned __user *) optval);
443 if (ret < 0)
444 goto error;
445 ret = -EINVAL;
446 if (min_sec_level > RXRPC_SECURITY_MAX)
447 goto error;
448 rx->min_sec_level = min_sec_level;
449 goto success;
450
451 default:
452 break;
453 }
454 }
455
456success:
457 ret = 0;
458error:
459 release_sock(&rx->sk);
460 return ret;
461}
462
463/*
464 * permit an RxRPC socket to be polled
465 */
466static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
467 poll_table *wait)
468{
469 unsigned int mask;
470 struct sock *sk = sock->sk;
471
472 poll_wait(file, sk->sk_sleep, wait);
473 mask = 0;
474
475 /* the socket is readable if there are any messages waiting on the Rx
476 * queue */
477 if (!skb_queue_empty(&sk->sk_receive_queue))
478 mask |= POLLIN | POLLRDNORM;
479
480 /* the socket is writable if there is space to add new data to the
481 * socket; there is no guarantee that any particular call in progress
482 * on the socket may have space in the Tx ACK window */
483 if (rxrpc_writable(sk))
484 mask |= POLLOUT | POLLWRNORM;
485
486 return mask;
487}
488
489/*
490 * create an RxRPC socket
491 */
492static int rxrpc_create(struct socket *sock, int protocol)
493{
494 struct rxrpc_sock *rx;
495 struct sock *sk;
496
497 _enter("%p,%d", sock, protocol);
498
499 /* we support transport protocol UDP only */
500 if (protocol != PF_INET)
501 return -EPROTONOSUPPORT;
502
503 if (sock->type != SOCK_DGRAM)
504 return -ESOCKTNOSUPPORT;
505
506 sock->ops = &rxrpc_rpc_ops;
507 sock->state = SS_UNCONNECTED;
508
509 sk = sk_alloc(PF_RXRPC, GFP_KERNEL, &rxrpc_proto, 1);
510 if (!sk)
511 return -ENOMEM;
512
513 sock_init_data(sock, sk);
514 sk->sk_state = RXRPC_UNCONNECTED;
515 sk->sk_write_space = rxrpc_write_space;
516 sk->sk_max_ack_backlog = sysctl_rxrpc_max_qlen;
517 sk->sk_destruct = rxrpc_sock_destructor;
518
519 rx = rxrpc_sk(sk);
520 rx->proto = protocol;
521 rx->calls = RB_ROOT;
522
523 INIT_LIST_HEAD(&rx->listen_link);
524 INIT_LIST_HEAD(&rx->secureq);
525 INIT_LIST_HEAD(&rx->acceptq);
526 rwlock_init(&rx->call_lock);
527 memset(&rx->srx, 0, sizeof(rx->srx));
528
529 _leave(" = 0 [%p]", rx);
530 return 0;
531}
532
533/*
534 * RxRPC socket destructor
535 */
536static void rxrpc_sock_destructor(struct sock *sk)
537{
538 _enter("%p", sk);
539
540 rxrpc_purge_queue(&sk->sk_receive_queue);
541
542 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
543 BUG_TRAP(sk_unhashed(sk));
544 BUG_TRAP(!sk->sk_socket);
545
546 if (!sock_flag(sk, SOCK_DEAD)) {
547 printk("Attempt to release alive rxrpc socket: %p\n", sk);
548 return;
549 }
550}
551
552/*
553 * release an RxRPC socket
554 */
555static int rxrpc_release_sock(struct sock *sk)
556{
557 struct rxrpc_sock *rx = rxrpc_sk(sk);
558
559 _enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
560
561 /* declare the socket closed for business */
562 sock_orphan(sk);
563 sk->sk_shutdown = SHUTDOWN_MASK;
564
565 spin_lock_bh(&sk->sk_receive_queue.lock);
566 sk->sk_state = RXRPC_CLOSE;
567 spin_unlock_bh(&sk->sk_receive_queue.lock);
568
569 ASSERTCMP(rx->listen_link.next, !=, LIST_POISON1);
570
571 if (!list_empty(&rx->listen_link)) {
572 write_lock_bh(&rx->local->services_lock);
573 list_del(&rx->listen_link);
574 write_unlock_bh(&rx->local->services_lock);
575 }
576
577 /* try to flush out this socket */
578 rxrpc_release_calls_on_socket(rx);
579 flush_scheduled_work();
580 rxrpc_purge_queue(&sk->sk_receive_queue);
581
582 if (rx->conn) {
583 rxrpc_put_connection(rx->conn);
584 rx->conn = NULL;
585 }
586
587 if (rx->bundle) {
588 rxrpc_put_bundle(rx->trans, rx->bundle);
589 rx->bundle = NULL;
590 }
591 if (rx->trans) {
592 rxrpc_put_transport(rx->trans);
593 rx->trans = NULL;
594 }
595 if (rx->local) {
596 rxrpc_put_local(rx->local);
597 rx->local = NULL;
598 }
599
600 key_put(rx->key);
601 rx->key = NULL;
602 key_put(rx->securities);
603 rx->securities = NULL;
604 sock_put(sk);
605
606 _leave(" = 0");
607 return 0;
608}
609
610/*
611 * release an RxRPC BSD socket on close() or equivalent
612 */
613static int rxrpc_release(struct socket *sock)
614{
615 struct sock *sk = sock->sk;
616
617 _enter("%p{%p}", sock, sk);
618
619 if (!sk)
620 return 0;
621
622 sock->sk = NULL;
623
624 return rxrpc_release_sock(sk);
625}
626
627/*
628 * RxRPC network protocol
629 */
630static const struct proto_ops rxrpc_rpc_ops = {
631 .family = PF_UNIX,
632 .owner = THIS_MODULE,
633 .release = rxrpc_release,
634 .bind = rxrpc_bind,
635 .connect = rxrpc_connect,
636 .socketpair = sock_no_socketpair,
637 .accept = sock_no_accept,
638 .getname = sock_no_getname,
639 .poll = rxrpc_poll,
640 .ioctl = sock_no_ioctl,
641 .listen = rxrpc_listen,
642 .shutdown = sock_no_shutdown,
643 .setsockopt = rxrpc_setsockopt,
644 .getsockopt = sock_no_getsockopt,
645 .sendmsg = rxrpc_sendmsg,
646 .recvmsg = rxrpc_recvmsg,
647 .mmap = sock_no_mmap,
648 .sendpage = sock_no_sendpage,
649};
650
651static struct proto rxrpc_proto = {
652 .name = "RXRPC",
653 .owner = THIS_MODULE,
654 .obj_size = sizeof(struct rxrpc_sock),
655 .max_header = sizeof(struct rxrpc_header),
656};
657
658static struct net_proto_family rxrpc_family_ops = {
659 .family = PF_RXRPC,
660 .create = rxrpc_create,
661 .owner = THIS_MODULE,
662};
663
664/*
665 * initialise and register the RxRPC protocol
666 */
667static int __init af_rxrpc_init(void)
668{
669 struct sk_buff *dummy_skb;
670 int ret = -1;
671
672 BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof(dummy_skb->cb));
673
674 rxrpc_epoch = htonl(xtime.tv_sec);
675
676 rxrpc_call_jar = kmem_cache_create(
677 "rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
678 SLAB_HWCACHE_ALIGN, NULL, NULL);
679 if (!rxrpc_call_jar) {
680 printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n");
681 ret = -ENOMEM;
682 goto error_call_jar;
683 }
684
685 ret = proto_register(&rxrpc_proto, 1);
686 if (ret < 0) {
687 printk(KERN_CRIT "RxRPC: Cannot register protocol\n");
688 goto error_proto;
689 }
690
691 ret = sock_register(&rxrpc_family_ops);
692 if (ret < 0) {
693 printk(KERN_CRIT "RxRPC: Cannot register socket family\n");
694 goto error_sock;
695 }
696
697 ret = register_key_type(&key_type_rxrpc);
698 if (ret < 0) {
699 printk(KERN_CRIT "RxRPC: Cannot register client key type\n");
700 goto error_key_type;
701 }
702
703 ret = register_key_type(&key_type_rxrpc_s);
704 if (ret < 0) {
705 printk(KERN_CRIT "RxRPC: Cannot register server key type\n");
706 goto error_key_type_s;
707 }
708
709#ifdef CONFIG_PROC_FS
710 proc_net_fops_create("rxrpc_calls", 0, &rxrpc_call_seq_fops);
711 proc_net_fops_create("rxrpc_conns", 0, &rxrpc_connection_seq_fops);
712#endif
713 return 0;
714
715error_key_type_s:
716 unregister_key_type(&key_type_rxrpc);
717error_key_type:
718 sock_unregister(PF_RXRPC);
719error_sock:
720 proto_unregister(&rxrpc_proto);
721error_proto:
722 kmem_cache_destroy(rxrpc_call_jar);
723error_call_jar:
724 return ret;
725}
726
727/*
728 * unregister the RxRPC protocol
729 */
730static void __exit af_rxrpc_exit(void)
731{
732 _enter("");
733 unregister_key_type(&key_type_rxrpc_s);
734 unregister_key_type(&key_type_rxrpc);
735 sock_unregister(PF_RXRPC);
736 proto_unregister(&rxrpc_proto);
737 rxrpc_destroy_all_calls();
738 rxrpc_destroy_all_connections();
739 rxrpc_destroy_all_transports();
740 rxrpc_destroy_all_peers();
741 rxrpc_destroy_all_locals();
742
743 ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0);
744
745 _debug("flush scheduled work");
746 flush_scheduled_work();
747 proc_net_remove("rxrpc_conns");
748 proc_net_remove("rxrpc_calls");
749 kmem_cache_destroy(rxrpc_call_jar);
750 _leave("");
751}
752
753module_init(af_rxrpc_init);
754module_exit(af_rxrpc_exit);
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c
new file mode 100644
index 000000000000..e7af780cd6f9
--- /dev/null
+++ b/net/rxrpc/ar-accept.c
@@ -0,0 +1,399 @@
1/* incoming call handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/errqueue.h>
16#include <linux/udp.h>
17#include <linux/in.h>
18#include <linux/in6.h>
19#include <linux/icmp.h>
20#include <net/sock.h>
21#include <net/af_rxrpc.h>
22#include <net/ip.h>
23#include "ar-internal.h"
24
25/*
26 * generate a connection-level abort
27 */
28static int rxrpc_busy(struct rxrpc_local *local, struct sockaddr_rxrpc *srx,
29 struct rxrpc_header *hdr)
30{
31 struct msghdr msg;
32 struct kvec iov[1];
33 size_t len;
34 int ret;
35
36 _enter("%d,,", local->debug_id);
37
38 msg.msg_name = &srx->transport.sin;
39 msg.msg_namelen = sizeof(srx->transport.sin);
40 msg.msg_control = NULL;
41 msg.msg_controllen = 0;
42 msg.msg_flags = 0;
43
44 hdr->seq = 0;
45 hdr->type = RXRPC_PACKET_TYPE_BUSY;
46 hdr->flags = 0;
47 hdr->userStatus = 0;
48 hdr->_rsvd = 0;
49
50 iov[0].iov_base = hdr;
51 iov[0].iov_len = sizeof(*hdr);
52
53 len = iov[0].iov_len;
54
55 hdr->serial = htonl(1);
56 _proto("Tx BUSY %%%u", ntohl(hdr->serial));
57
58 ret = kernel_sendmsg(local->socket, &msg, iov, 1, len);
59 if (ret < 0) {
60 _leave(" = -EAGAIN [sendmsg failed: %d]", ret);
61 return -EAGAIN;
62 }
63
64 _leave(" = 0");
65 return 0;
66}
67
68/*
69 * accept an incoming call that needs peer, transport and/or connection setting
70 * up
71 */
72static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
73 struct rxrpc_sock *rx,
74 struct sk_buff *skb,
75 struct sockaddr_rxrpc *srx)
76{
77 struct rxrpc_connection *conn;
78 struct rxrpc_transport *trans;
79 struct rxrpc_skb_priv *sp, *nsp;
80 struct rxrpc_peer *peer;
81 struct rxrpc_call *call;
82 struct sk_buff *notification;
83 int ret;
84
85 _enter("");
86
87 sp = rxrpc_skb(skb);
88
89 /* get a notification message to send to the server app */
90 notification = alloc_skb(0, GFP_NOFS);
91 rxrpc_new_skb(notification);
92 notification->mark = RXRPC_SKB_MARK_NEW_CALL;
93
94 peer = rxrpc_get_peer(srx, GFP_NOIO);
95 if (IS_ERR(peer)) {
96 _debug("no peer");
97 ret = -EBUSY;
98 goto error;
99 }
100
101 trans = rxrpc_get_transport(local, peer, GFP_NOIO);
102 rxrpc_put_peer(peer);
103 if (!trans) {
104 _debug("no trans");
105 ret = -EBUSY;
106 goto error;
107 }
108
109 conn = rxrpc_incoming_connection(trans, &sp->hdr, GFP_NOIO);
110 rxrpc_put_transport(trans);
111 if (IS_ERR(conn)) {
112 _debug("no conn");
113 ret = PTR_ERR(conn);
114 goto error;
115 }
116
117 call = rxrpc_incoming_call(rx, conn, &sp->hdr, GFP_NOIO);
118 rxrpc_put_connection(conn);
119 if (IS_ERR(call)) {
120 _debug("no call");
121 ret = PTR_ERR(call);
122 goto error;
123 }
124
125 /* attach the call to the socket */
126 read_lock_bh(&local->services_lock);
127 if (rx->sk.sk_state == RXRPC_CLOSE)
128 goto invalid_service;
129
130 write_lock(&rx->call_lock);
131 if (!test_and_set_bit(RXRPC_CALL_INIT_ACCEPT, &call->flags)) {
132 rxrpc_get_call(call);
133
134 spin_lock(&call->conn->state_lock);
135 if (sp->hdr.securityIndex > 0 &&
136 call->conn->state == RXRPC_CONN_SERVER_UNSECURED) {
137 _debug("await conn sec");
138 list_add_tail(&call->accept_link, &rx->secureq);
139 call->conn->state = RXRPC_CONN_SERVER_CHALLENGING;
140 atomic_inc(&call->conn->usage);
141 set_bit(RXRPC_CONN_CHALLENGE, &call->conn->events);
142 schedule_work(&call->conn->processor);
143 } else {
144 _debug("conn ready");
145 call->state = RXRPC_CALL_SERVER_ACCEPTING;
146 list_add_tail(&call->accept_link, &rx->acceptq);
147 rxrpc_get_call(call);
148 nsp = rxrpc_skb(notification);
149 nsp->call = call;
150
151 ASSERTCMP(atomic_read(&call->usage), >=, 3);
152
153 _debug("notify");
154 spin_lock(&call->lock);
155 ret = rxrpc_queue_rcv_skb(call, notification, true,
156 false);
157 spin_unlock(&call->lock);
158 notification = NULL;
159 if (ret < 0)
160 BUG();
161 }
162 spin_unlock(&call->conn->state_lock);
163
164 _debug("queued");
165 }
166 write_unlock(&rx->call_lock);
167
168 _debug("process");
169 rxrpc_fast_process_packet(call, skb);
170
171 _debug("done");
172 read_unlock_bh(&local->services_lock);
173 rxrpc_free_skb(notification);
174 rxrpc_put_call(call);
175 _leave(" = 0");
176 return 0;
177
178invalid_service:
179 _debug("invalid");
180 read_unlock_bh(&local->services_lock);
181
182 read_lock_bh(&call->state_lock);
183 if (!test_bit(RXRPC_CALL_RELEASE, &call->flags) &&
184 !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) {
185 rxrpc_get_call(call);
186 schedule_work(&call->processor);
187 }
188 read_unlock_bh(&call->state_lock);
189 rxrpc_put_call(call);
190 ret = -ECONNREFUSED;
191error:
192 rxrpc_free_skb(notification);
193 _leave(" = %d", ret);
194 return ret;
195}
196
197/*
198 * accept incoming calls that need peer, transport and/or connection setting up
199 * - the packets we get are all incoming client DATA packets that have seq == 1
200 */
201void rxrpc_accept_incoming_calls(struct work_struct *work)
202{
203 struct rxrpc_local *local =
204 container_of(work, struct rxrpc_local, acceptor);
205 struct rxrpc_skb_priv *sp;
206 struct sockaddr_rxrpc srx;
207 struct rxrpc_sock *rx;
208 struct sk_buff *skb;
209 __be16 service_id;
210 int ret;
211
212 _enter("%d", local->debug_id);
213
214 read_lock_bh(&rxrpc_local_lock);
215 if (atomic_read(&local->usage) > 0)
216 rxrpc_get_local(local);
217 else
218 local = NULL;
219 read_unlock_bh(&rxrpc_local_lock);
220 if (!local) {
221 _leave(" [local dead]");
222 return;
223 }
224
225process_next_packet:
226 skb = skb_dequeue(&local->accept_queue);
227 if (!skb) {
228 rxrpc_put_local(local);
229 _leave("\n");
230 return;
231 }
232
233 _net("incoming call skb %p", skb);
234
235 sp = rxrpc_skb(skb);
236
237 /* determine the remote address */
238 memset(&srx, 0, sizeof(srx));
239 srx.srx_family = AF_RXRPC;
240 srx.transport.family = local->srx.transport.family;
241 srx.transport_type = local->srx.transport_type;
242 switch (srx.transport.family) {
243 case AF_INET:
244 srx.transport_len = sizeof(struct sockaddr_in);
245 srx.transport.sin.sin_port = udp_hdr(skb)->source;
246 srx.transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
247 break;
248 default:
249 goto busy;
250 }
251
252 /* get the socket providing the service */
253 service_id = sp->hdr.serviceId;
254 read_lock_bh(&local->services_lock);
255 list_for_each_entry(rx, &local->services, listen_link) {
256 if (rx->service_id == service_id &&
257 rx->sk.sk_state != RXRPC_CLOSE)
258 goto found_service;
259 }
260 read_unlock_bh(&local->services_lock);
261 goto invalid_service;
262
263found_service:
264 _debug("found service %hd", ntohs(rx->service_id));
265 if (sk_acceptq_is_full(&rx->sk))
266 goto backlog_full;
267 sk_acceptq_added(&rx->sk);
268 sock_hold(&rx->sk);
269 read_unlock_bh(&local->services_lock);
270
271 ret = rxrpc_accept_incoming_call(local, rx, skb, &srx);
272 if (ret < 0)
273 sk_acceptq_removed(&rx->sk);
274 sock_put(&rx->sk);
275 switch (ret) {
276 case -ECONNRESET: /* old calls are ignored */
277 case -ECONNABORTED: /* aborted calls are reaborted or ignored */
278 case 0:
279 goto process_next_packet;
280 case -ECONNREFUSED:
281 goto invalid_service;
282 case -EBUSY:
283 goto busy;
284 case -EKEYREJECTED:
285 goto security_mismatch;
286 default:
287 BUG();
288 }
289
290backlog_full:
291 read_unlock_bh(&local->services_lock);
292busy:
293 rxrpc_busy(local, &srx, &sp->hdr);
294 rxrpc_free_skb(skb);
295 goto process_next_packet;
296
297invalid_service:
298 skb->priority = RX_INVALID_OPERATION;
299 rxrpc_reject_packet(local, skb);
300 goto process_next_packet;
301
302 /* can't change connection security type mid-flow */
303security_mismatch:
304 skb->priority = RX_PROTOCOL_ERROR;
305 rxrpc_reject_packet(local, skb);
306 goto process_next_packet;
307}
308
309/*
310 * handle acceptance of a call by userspace
311 * - assign the user call ID to the call at the front of the queue
312 */
313int rxrpc_accept_call(struct rxrpc_sock *rx, unsigned long user_call_ID)
314{
315 struct rxrpc_call *call;
316 struct rb_node *parent, **pp;
317 int ret;
318
319 _enter(",%lx", user_call_ID);
320
321 ASSERT(!irqs_disabled());
322
323 write_lock(&rx->call_lock);
324
325 ret = -ENODATA;
326 if (list_empty(&rx->acceptq))
327 goto out;
328
329 /* check the user ID isn't already in use */
330 ret = -EBADSLT;
331 pp = &rx->calls.rb_node;
332 parent = NULL;
333 while (*pp) {
334 parent = *pp;
335 call = rb_entry(parent, struct rxrpc_call, sock_node);
336
337 if (user_call_ID < call->user_call_ID)
338 pp = &(*pp)->rb_left;
339 else if (user_call_ID > call->user_call_ID)
340 pp = &(*pp)->rb_right;
341 else
342 goto out;
343 }
344
345 /* dequeue the first call and check it's still valid */
346 call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
347 list_del_init(&call->accept_link);
348 sk_acceptq_removed(&rx->sk);
349
350 write_lock_bh(&call->state_lock);
351 switch (call->state) {
352 case RXRPC_CALL_SERVER_ACCEPTING:
353 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
354 break;
355 case RXRPC_CALL_REMOTELY_ABORTED:
356 case RXRPC_CALL_LOCALLY_ABORTED:
357 ret = -ECONNABORTED;
358 goto out_release;
359 case RXRPC_CALL_NETWORK_ERROR:
360 ret = call->conn->error;
361 goto out_release;
362 case RXRPC_CALL_DEAD:
363 ret = -ETIME;
364 goto out_discard;
365 default:
366 BUG();
367 }
368
369 /* formalise the acceptance */
370 call->user_call_ID = user_call_ID;
371 rb_link_node(&call->sock_node, parent, pp);
372 rb_insert_color(&call->sock_node, &rx->calls);
373 if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
374 BUG();
375 if (test_and_set_bit(RXRPC_CALL_ACCEPTED, &call->events))
376 BUG();
377 schedule_work(&call->processor);
378
379 write_unlock_bh(&call->state_lock);
380 write_unlock(&rx->call_lock);
381 _leave(" = 0");
382 return 0;
383
384 /* if the call is already dying or dead, then we leave the socket's ref
385 * on it to be released by rxrpc_dead_call_expired() as induced by
386 * rxrpc_release_call() */
387out_release:
388 _debug("release %p", call);
389 if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
390 !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
391 schedule_work(&call->processor);
392out_discard:
393 write_unlock_bh(&call->state_lock);
394 _debug("discard %p", call);
395out:
396 write_unlock(&rx->call_lock);
397 _leave(" = %d", ret);
398 return ret;
399}
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
new file mode 100644
index 000000000000..8f7764eca96c
--- /dev/null
+++ b/net/rxrpc/ar-ack.c
@@ -0,0 +1,1250 @@
1/* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/circ_buf.h>
14#include <linux/net.h>
15#include <linux/skbuff.h>
16#include <linux/udp.h>
17#include <net/sock.h>
18#include <net/af_rxrpc.h>
19#include "ar-internal.h"
20
21static unsigned rxrpc_ack_defer = 1;
22
23static const char *rxrpc_acks[] = {
24 "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL",
25 "-?-"
26};
27
28static const s8 rxrpc_ack_priority[] = {
29 [0] = 0,
30 [RXRPC_ACK_DELAY] = 1,
31 [RXRPC_ACK_REQUESTED] = 2,
32 [RXRPC_ACK_IDLE] = 3,
33 [RXRPC_ACK_PING_RESPONSE] = 4,
34 [RXRPC_ACK_DUPLICATE] = 5,
35 [RXRPC_ACK_OUT_OF_SEQUENCE] = 6,
36 [RXRPC_ACK_EXCEEDS_WINDOW] = 7,
37 [RXRPC_ACK_NOSPACE] = 8,
38};
39
40/*
41 * propose an ACK be sent
42 */
43void __rxrpc_propose_ACK(struct rxrpc_call *call, uint8_t ack_reason,
44 __be32 serial, bool immediate)
45{
46 unsigned long expiry;
47 s8 prior = rxrpc_ack_priority[ack_reason];
48
49 ASSERTCMP(prior, >, 0);
50
51 _enter("{%d},%s,%%%x,%u",
52 call->debug_id, rxrpc_acks[ack_reason], ntohl(serial),
53 immediate);
54
55 if (prior < rxrpc_ack_priority[call->ackr_reason]) {
56 if (immediate)
57 goto cancel_timer;
58 return;
59 }
60
61 /* update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
62 * numbers */
63 if (prior == rxrpc_ack_priority[call->ackr_reason]) {
64 if (prior <= 4)
65 call->ackr_serial = serial;
66 if (immediate)
67 goto cancel_timer;
68 return;
69 }
70
71 call->ackr_reason = ack_reason;
72 call->ackr_serial = serial;
73
74 switch (ack_reason) {
75 case RXRPC_ACK_DELAY:
76 _debug("run delay timer");
77 call->ack_timer.expires = jiffies + rxrpc_ack_timeout * HZ;
78 add_timer(&call->ack_timer);
79 return;
80
81 case RXRPC_ACK_IDLE:
82 if (!immediate) {
83 _debug("run defer timer");
84 expiry = 1;
85 goto run_timer;
86 }
87 goto cancel_timer;
88
89 case RXRPC_ACK_REQUESTED:
90 if (!rxrpc_ack_defer)
91 goto cancel_timer;
92 if (!immediate || serial == cpu_to_be32(1)) {
93 _debug("run defer timer");
94 expiry = rxrpc_ack_defer;
95 goto run_timer;
96 }
97
98 default:
99 _debug("immediate ACK");
100 goto cancel_timer;
101 }
102
103run_timer:
104 expiry += jiffies;
105 if (!timer_pending(&call->ack_timer) ||
106 time_after(call->ack_timer.expires, expiry))
107 mod_timer(&call->ack_timer, expiry);
108 return;
109
110cancel_timer:
111 _debug("cancel timer %%%u", ntohl(serial));
112 try_to_del_timer_sync(&call->ack_timer);
113 read_lock_bh(&call->state_lock);
114 if (call->state <= RXRPC_CALL_COMPLETE &&
115 !test_and_set_bit(RXRPC_CALL_ACK, &call->events))
116 schedule_work(&call->processor);
117 read_unlock_bh(&call->state_lock);
118}
119
120/*
121 * propose an ACK be sent, locking the call structure
122 */
123void rxrpc_propose_ACK(struct rxrpc_call *call, uint8_t ack_reason,
124 __be32 serial, bool immediate)
125{
126 s8 prior = rxrpc_ack_priority[ack_reason];
127
128 if (prior > rxrpc_ack_priority[call->ackr_reason]) {
129 spin_lock_bh(&call->lock);
130 __rxrpc_propose_ACK(call, ack_reason, serial, immediate);
131 spin_unlock_bh(&call->lock);
132 }
133}
134
135/*
136 * set the resend timer
137 */
138static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend,
139 unsigned long resend_at)
140{
141 read_lock_bh(&call->state_lock);
142 if (call->state >= RXRPC_CALL_COMPLETE)
143 resend = 0;
144
145 if (resend & 1) {
146 _debug("SET RESEND");
147 set_bit(RXRPC_CALL_RESEND, &call->events);
148 }
149
150 if (resend & 2) {
151 _debug("MODIFY RESEND TIMER");
152 set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
153 mod_timer(&call->resend_timer, resend_at);
154 } else {
155 _debug("KILL RESEND TIMER");
156 del_timer_sync(&call->resend_timer);
157 clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
158 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
159 }
160 read_unlock_bh(&call->state_lock);
161}
162
163/*
164 * resend packets
165 */
166static void rxrpc_resend(struct rxrpc_call *call)
167{
168 struct rxrpc_skb_priv *sp;
169 struct rxrpc_header *hdr;
170 struct sk_buff *txb;
171 unsigned long *p_txb, resend_at;
172 int loop, stop;
173 u8 resend;
174
175 _enter("{%d,%d,%d,%d},",
176 call->acks_hard, call->acks_unacked,
177 atomic_read(&call->sequence),
178 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
179
180 stop = 0;
181 resend = 0;
182 resend_at = 0;
183
184 for (loop = call->acks_tail;
185 loop != call->acks_head || stop;
186 loop = (loop + 1) & (call->acks_winsz - 1)
187 ) {
188 p_txb = call->acks_window + loop;
189 smp_read_barrier_depends();
190 if (*p_txb & 1)
191 continue;
192
193 txb = (struct sk_buff *) *p_txb;
194 sp = rxrpc_skb(txb);
195
196 if (sp->need_resend) {
197 sp->need_resend = 0;
198
199 /* each Tx packet has a new serial number */
200 sp->hdr.serial =
201 htonl(atomic_inc_return(&call->conn->serial));
202
203 hdr = (struct rxrpc_header *) txb->head;
204 hdr->serial = sp->hdr.serial;
205
206 _proto("Tx DATA %%%u { #%d }",
207 ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
208 if (rxrpc_send_packet(call->conn->trans, txb) < 0) {
209 stop = 0;
210 sp->resend_at = jiffies + 3;
211 } else {
212 sp->resend_at =
213 jiffies + rxrpc_resend_timeout * HZ;
214 }
215 }
216
217 if (time_after_eq(jiffies + 1, sp->resend_at)) {
218 sp->need_resend = 1;
219 resend |= 1;
220 } else if (resend & 2) {
221 if (time_before(sp->resend_at, resend_at))
222 resend_at = sp->resend_at;
223 } else {
224 resend_at = sp->resend_at;
225 resend |= 2;
226 }
227 }
228
229 rxrpc_set_resend(call, resend, resend_at);
230 _leave("");
231}
232
233/*
234 * handle resend timer expiry
235 */
236static void rxrpc_resend_timer(struct rxrpc_call *call)
237{
238 struct rxrpc_skb_priv *sp;
239 struct sk_buff *txb;
240 unsigned long *p_txb, resend_at;
241 int loop;
242 u8 resend;
243
244 _enter("%d,%d,%d",
245 call->acks_tail, call->acks_unacked, call->acks_head);
246
247 resend = 0;
248 resend_at = 0;
249
250 for (loop = call->acks_unacked;
251 loop != call->acks_head;
252 loop = (loop + 1) & (call->acks_winsz - 1)
253 ) {
254 p_txb = call->acks_window + loop;
255 smp_read_barrier_depends();
256 txb = (struct sk_buff *) (*p_txb & ~1);
257 sp = rxrpc_skb(txb);
258
259 ASSERT(!(*p_txb & 1));
260
261 if (sp->need_resend) {
262 ;
263 } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
264 sp->need_resend = 1;
265 resend |= 1;
266 } else if (resend & 2) {
267 if (time_before(sp->resend_at, resend_at))
268 resend_at = sp->resend_at;
269 } else {
270 resend_at = sp->resend_at;
271 resend |= 2;
272 }
273 }
274
275 rxrpc_set_resend(call, resend, resend_at);
276 _leave("");
277}
278
279/*
280 * process soft ACKs of our transmitted packets
281 * - these indicate packets the peer has or has not received, but hasn't yet
282 * given to the consumer, and so can still be discarded and re-requested
283 */
284static int rxrpc_process_soft_ACKs(struct rxrpc_call *call,
285 struct rxrpc_ackpacket *ack,
286 struct sk_buff *skb)
287{
288 struct rxrpc_skb_priv *sp;
289 struct sk_buff *txb;
290 unsigned long *p_txb, resend_at;
291 int loop;
292 u8 sacks[RXRPC_MAXACKS], resend;
293
294 _enter("{%d,%d},{%d},",
295 call->acks_hard,
296 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz),
297 ack->nAcks);
298
299 if (skb_copy_bits(skb, 0, sacks, ack->nAcks) < 0)
300 goto protocol_error;
301
302 resend = 0;
303 resend_at = 0;
304 for (loop = 0; loop < ack->nAcks; loop++) {
305 p_txb = call->acks_window;
306 p_txb += (call->acks_tail + loop) & (call->acks_winsz - 1);
307 smp_read_barrier_depends();
308 txb = (struct sk_buff *) (*p_txb & ~1);
309 sp = rxrpc_skb(txb);
310
311 switch (sacks[loop]) {
312 case RXRPC_ACK_TYPE_ACK:
313 sp->need_resend = 0;
314 *p_txb |= 1;
315 break;
316 case RXRPC_ACK_TYPE_NACK:
317 sp->need_resend = 1;
318 *p_txb &= ~1;
319 resend = 1;
320 break;
321 default:
322 _debug("Unsupported ACK type %d", sacks[loop]);
323 goto protocol_error;
324 }
325 }
326
327 smp_mb();
328 call->acks_unacked = (call->acks_tail + loop) & (call->acks_winsz - 1);
329
330 /* anything not explicitly ACK'd is implicitly NACK'd, but may just not
331 * have been received or processed yet by the far end */
332 for (loop = call->acks_unacked;
333 loop != call->acks_head;
334 loop = (loop + 1) & (call->acks_winsz - 1)
335 ) {
336 p_txb = call->acks_window + loop;
337 smp_read_barrier_depends();
338 txb = (struct sk_buff *) (*p_txb & ~1);
339 sp = rxrpc_skb(txb);
340
341 if (*p_txb & 1) {
342 /* packet must have been discarded */
343 sp->need_resend = 1;
344 *p_txb &= ~1;
345 resend |= 1;
346 } else if (sp->need_resend) {
347 ;
348 } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
349 sp->need_resend = 1;
350 resend |= 1;
351 } else if (resend & 2) {
352 if (time_before(sp->resend_at, resend_at))
353 resend_at = sp->resend_at;
354 } else {
355 resend_at = sp->resend_at;
356 resend |= 2;
357 }
358 }
359
360 rxrpc_set_resend(call, resend, resend_at);
361 _leave(" = 0");
362 return 0;
363
364protocol_error:
365 _leave(" = -EPROTO");
366 return -EPROTO;
367}
368
369/*
370 * discard hard-ACK'd packets from the Tx window
371 */
372static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
373{
374 struct rxrpc_skb_priv *sp;
375 unsigned long _skb;
376 int tail = call->acks_tail, old_tail;
377 int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz);
378
379 _enter("{%u,%u},%u", call->acks_hard, win, hard);
380
381 ASSERTCMP(hard - call->acks_hard, <=, win);
382
383 while (call->acks_hard < hard) {
384 smp_read_barrier_depends();
385 _skb = call->acks_window[tail] & ~1;
386 sp = rxrpc_skb((struct sk_buff *) _skb);
387 rxrpc_free_skb((struct sk_buff *) _skb);
388 old_tail = tail;
389 tail = (tail + 1) & (call->acks_winsz - 1);
390 call->acks_tail = tail;
391 if (call->acks_unacked == old_tail)
392 call->acks_unacked = tail;
393 call->acks_hard++;
394 }
395
396 wake_up(&call->tx_waitq);
397}
398
399/*
400 * clear the Tx window in the event of a failure
401 */
402static void rxrpc_clear_tx_window(struct rxrpc_call *call)
403{
404 rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
405}
406
407/*
408 * drain the out of sequence received packet queue into the packet Rx queue
409 */
410static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call)
411{
412 struct rxrpc_skb_priv *sp;
413 struct sk_buff *skb;
414 bool terminal;
415 int ret;
416
417 _enter("{%d,%d}", call->rx_data_post, call->rx_first_oos);
418
419 spin_lock_bh(&call->lock);
420
421 ret = -ECONNRESET;
422 if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
423 goto socket_unavailable;
424
425 skb = skb_dequeue(&call->rx_oos_queue);
426 if (skb) {
427 sp = rxrpc_skb(skb);
428
429 _debug("drain OOS packet %d [%d]",
430 ntohl(sp->hdr.seq), call->rx_first_oos);
431
432 if (ntohl(sp->hdr.seq) != call->rx_first_oos) {
433 skb_queue_head(&call->rx_oos_queue, skb);
434 call->rx_first_oos = ntohl(rxrpc_skb(skb)->hdr.seq);
435 _debug("requeue %p {%u}", skb, call->rx_first_oos);
436 } else {
437 skb->mark = RXRPC_SKB_MARK_DATA;
438 terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
439 !(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
440 ret = rxrpc_queue_rcv_skb(call, skb, true, terminal);
441 BUG_ON(ret < 0);
442 _debug("drain #%u", call->rx_data_post);
443 call->rx_data_post++;
444
445 /* find out what the next packet is */
446 skb = skb_peek(&call->rx_oos_queue);
447 if (skb)
448 call->rx_first_oos =
449 ntohl(rxrpc_skb(skb)->hdr.seq);
450 else
451 call->rx_first_oos = 0;
452 _debug("peek %p {%u}", skb, call->rx_first_oos);
453 }
454 }
455
456 ret = 0;
457socket_unavailable:
458 spin_unlock_bh(&call->lock);
459 _leave(" = %d", ret);
460 return ret;
461}
462
463/*
464 * insert an out of sequence packet into the buffer
465 */
466static void rxrpc_insert_oos_packet(struct rxrpc_call *call,
467 struct sk_buff *skb)
468{
469 struct rxrpc_skb_priv *sp, *psp;
470 struct sk_buff *p;
471 u32 seq;
472
473 sp = rxrpc_skb(skb);
474 seq = ntohl(sp->hdr.seq);
475 _enter(",,{%u}", seq);
476
477 skb->destructor = rxrpc_packet_destructor;
478 ASSERTCMP(sp->call, ==, NULL);
479 sp->call = call;
480 rxrpc_get_call(call);
481
482 /* insert into the buffer in sequence order */
483 spin_lock_bh(&call->lock);
484
485 skb_queue_walk(&call->rx_oos_queue, p) {
486 psp = rxrpc_skb(p);
487 if (ntohl(psp->hdr.seq) > seq) {
488 _debug("insert oos #%u before #%u",
489 seq, ntohl(psp->hdr.seq));
490 skb_insert(p, skb, &call->rx_oos_queue);
491 goto inserted;
492 }
493 }
494
495 _debug("append oos #%u", seq);
496 skb_queue_tail(&call->rx_oos_queue, skb);
497inserted:
498
499 /* we might now have a new front to the queue */
500 if (call->rx_first_oos == 0 || seq < call->rx_first_oos)
501 call->rx_first_oos = seq;
502
503 read_lock(&call->state_lock);
504 if (call->state < RXRPC_CALL_COMPLETE &&
505 call->rx_data_post == call->rx_first_oos) {
506 _debug("drain rx oos now");
507 set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
508 }
509 read_unlock(&call->state_lock);
510
511 spin_unlock_bh(&call->lock);
512 _leave(" [stored #%u]", call->rx_first_oos);
513}
514
515/*
516 * clear the Tx window on final ACK reception
517 */
518static void rxrpc_zap_tx_window(struct rxrpc_call *call)
519{
520 struct rxrpc_skb_priv *sp;
521 struct sk_buff *skb;
522 unsigned long _skb, *acks_window;
523 uint8_t winsz = call->acks_winsz;
524 int tail;
525
526 acks_window = call->acks_window;
527 call->acks_window = NULL;
528
529 while (CIRC_CNT(call->acks_head, call->acks_tail, winsz) > 0) {
530 tail = call->acks_tail;
531 smp_read_barrier_depends();
532 _skb = acks_window[tail] & ~1;
533 smp_mb();
534 call->acks_tail = (call->acks_tail + 1) & (winsz - 1);
535
536 skb = (struct sk_buff *) _skb;
537 sp = rxrpc_skb(skb);
538 _debug("+++ clear Tx %u", ntohl(sp->hdr.seq));
539 rxrpc_free_skb(skb);
540 }
541
542 kfree(acks_window);
543}
544
545/*
546 * process packets in the reception queue
547 */
548static int rxrpc_process_rx_queue(struct rxrpc_call *call,
549 u32 *_abort_code)
550{
551 struct rxrpc_ackpacket ack;
552 struct rxrpc_skb_priv *sp;
553 struct sk_buff *skb;
554 bool post_ACK;
555 int latest;
556 u32 hard, tx;
557
558 _enter("");
559
560process_further:
561 skb = skb_dequeue(&call->rx_queue);
562 if (!skb)
563 return -EAGAIN;
564
565 _net("deferred skb %p", skb);
566
567 sp = rxrpc_skb(skb);
568
569 _debug("process %s [st %d]", rxrpc_pkts[sp->hdr.type], call->state);
570
571 post_ACK = false;
572
573 switch (sp->hdr.type) {
574 /* data packets that wind up here have been received out of
575 * order, need security processing or are jumbo packets */
576 case RXRPC_PACKET_TYPE_DATA:
577 _proto("OOSQ DATA %%%u { #%u }",
578 ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
579
580 /* secured packets must be verified and possibly decrypted */
581 if (rxrpc_verify_packet(call, skb, _abort_code) < 0)
582 goto protocol_error;
583
584 rxrpc_insert_oos_packet(call, skb);
585 goto process_further;
586
587 /* partial ACK to process */
588 case RXRPC_PACKET_TYPE_ACK:
589 if (skb_copy_bits(skb, 0, &ack, sizeof(ack)) < 0) {
590 _debug("extraction failure");
591 goto protocol_error;
592 }
593 if (!skb_pull(skb, sizeof(ack)))
594 BUG();
595
596 latest = ntohl(sp->hdr.serial);
597 hard = ntohl(ack.firstPacket);
598 tx = atomic_read(&call->sequence);
599
600 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
601 latest,
602 ntohs(ack.maxSkew),
603 hard,
604 ntohl(ack.previousPacket),
605 ntohl(ack.serial),
606 rxrpc_acks[ack.reason],
607 ack.nAcks);
608
609 if (ack.reason == RXRPC_ACK_PING) {
610 _proto("Rx ACK %%%u PING Request", latest);
611 rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
612 sp->hdr.serial, true);
613 }
614
615 /* discard any out-of-order or duplicate ACKs */
616 if (latest - call->acks_latest <= 0) {
617 _debug("discard ACK %d <= %d",
618 latest, call->acks_latest);
619 goto discard;
620 }
621 call->acks_latest = latest;
622
623 if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
624 call->state != RXRPC_CALL_CLIENT_AWAIT_REPLY &&
625 call->state != RXRPC_CALL_SERVER_SEND_REPLY &&
626 call->state != RXRPC_CALL_SERVER_AWAIT_ACK)
627 goto discard;
628
629 _debug("Tx=%d H=%u S=%d", tx, call->acks_hard, call->state);
630
631 if (hard > 0) {
632 if (hard - 1 > tx) {
633 _debug("hard-ACK'd packet %d not transmitted"
634 " (%d top)",
635 hard - 1, tx);
636 goto protocol_error;
637 }
638
639 if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY ||
640 call->state == RXRPC_CALL_SERVER_AWAIT_ACK) &&
641 hard > tx)
642 goto all_acked;
643
644 smp_rmb();
645 rxrpc_rotate_tx_window(call, hard - 1);
646 }
647
648 if (ack.nAcks > 0) {
649 if (hard - 1 + ack.nAcks > tx) {
650 _debug("soft-ACK'd packet %d+%d not"
651 " transmitted (%d top)",
652 hard - 1, ack.nAcks, tx);
653 goto protocol_error;
654 }
655
656 if (rxrpc_process_soft_ACKs(call, &ack, skb) < 0)
657 goto protocol_error;
658 }
659 goto discard;
660
661 /* complete ACK to process */
662 case RXRPC_PACKET_TYPE_ACKALL:
663 goto all_acked;
664
665 /* abort and busy are handled elsewhere */
666 case RXRPC_PACKET_TYPE_BUSY:
667 case RXRPC_PACKET_TYPE_ABORT:
668 BUG();
669
670 /* connection level events - also handled elsewhere */
671 case RXRPC_PACKET_TYPE_CHALLENGE:
672 case RXRPC_PACKET_TYPE_RESPONSE:
673 case RXRPC_PACKET_TYPE_DEBUG:
674 BUG();
675 }
676
677 /* if we've had a hard ACK that covers all the packets we've sent, then
678 * that ends that phase of the operation */
679all_acked:
680 write_lock_bh(&call->state_lock);
681 _debug("ack all %d", call->state);
682
683 switch (call->state) {
684 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
685 call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
686 break;
687 case RXRPC_CALL_SERVER_AWAIT_ACK:
688 _debug("srv complete");
689 call->state = RXRPC_CALL_COMPLETE;
690 post_ACK = true;
691 break;
692 case RXRPC_CALL_CLIENT_SEND_REQUEST:
693 case RXRPC_CALL_SERVER_RECV_REQUEST:
694 goto protocol_error_unlock; /* can't occur yet */
695 default:
696 write_unlock_bh(&call->state_lock);
697 goto discard; /* assume packet left over from earlier phase */
698 }
699
700 write_unlock_bh(&call->state_lock);
701
702 /* if all the packets we sent are hard-ACK'd, then we can discard
703 * whatever we've got left */
704 _debug("clear Tx %d",
705 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
706
707 del_timer_sync(&call->resend_timer);
708 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
709 clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
710
711 if (call->acks_window)
712 rxrpc_zap_tx_window(call);
713
714 if (post_ACK) {
715 /* post the final ACK message for userspace to pick up */
716 _debug("post ACK");
717 skb->mark = RXRPC_SKB_MARK_FINAL_ACK;
718 sp->call = call;
719 rxrpc_get_call(call);
720 spin_lock_bh(&call->lock);
721 if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0)
722 BUG();
723 spin_unlock_bh(&call->lock);
724 goto process_further;
725 }
726
727discard:
728 rxrpc_free_skb(skb);
729 goto process_further;
730
731protocol_error_unlock:
732 write_unlock_bh(&call->state_lock);
733protocol_error:
734 rxrpc_free_skb(skb);
735 _leave(" = -EPROTO");
736 return -EPROTO;
737}
738
739/*
740 * post a message to the socket Rx queue for recvmsg() to pick up
741 */
742static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
743 bool fatal)
744{
745 struct rxrpc_skb_priv *sp;
746 struct sk_buff *skb;
747 int ret;
748
749 _enter("{%d,%lx},%u,%u,%d",
750 call->debug_id, call->flags, mark, error, fatal);
751
752 /* remove timers and things for fatal messages */
753 if (fatal) {
754 del_timer_sync(&call->resend_timer);
755 del_timer_sync(&call->ack_timer);
756 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
757 }
758
759 if (mark != RXRPC_SKB_MARK_NEW_CALL &&
760 !test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
761 _leave("[no userid]");
762 return 0;
763 }
764
765 if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
766 skb = alloc_skb(0, GFP_NOFS);
767 if (!skb)
768 return -ENOMEM;
769
770 rxrpc_new_skb(skb);
771
772 skb->mark = mark;
773
774 sp = rxrpc_skb(skb);
775 memset(sp, 0, sizeof(*sp));
776 sp->error = error;
777 sp->call = call;
778 rxrpc_get_call(call);
779
780 spin_lock_bh(&call->lock);
781 ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
782 spin_unlock_bh(&call->lock);
783 if (ret < 0)
784 BUG();
785 }
786
787 return 0;
788}
789
790/*
791 * handle background processing of incoming call packets and ACK / abort
792 * generation
793 */
794void rxrpc_process_call(struct work_struct *work)
795{
796 struct rxrpc_call *call =
797 container_of(work, struct rxrpc_call, processor);
798 struct rxrpc_ackpacket ack;
799 struct rxrpc_ackinfo ackinfo;
800 struct rxrpc_header hdr;
801 struct msghdr msg;
802 struct kvec iov[5];
803 unsigned long bits;
804 __be32 data;
805 size_t len;
806 int genbit, loop, nbit, ioc, ret;
807 u32 abort_code = RX_PROTOCOL_ERROR;
808 u8 *acks = NULL;
809
810 //printk("\n--------------------\n");
811 _enter("{%d,%s,%lx} [%lu]",
812 call->debug_id, rxrpc_call_states[call->state], call->events,
813 (jiffies - call->creation_jif) / (HZ / 10));
814
815 if (test_and_set_bit(RXRPC_CALL_PROC_BUSY, &call->flags)) {
816 _debug("XXXXXXXXXXXXX RUNNING ON MULTIPLE CPUS XXXXXXXXXXXXX");
817 return;
818 }
819
820 /* there's a good chance we're going to have to send a message, so set
821 * one up in advance */
822 msg.msg_name = &call->conn->trans->peer->srx.transport.sin;
823 msg.msg_namelen = sizeof(call->conn->trans->peer->srx.transport.sin);
824 msg.msg_control = NULL;
825 msg.msg_controllen = 0;
826 msg.msg_flags = 0;
827
828 hdr.epoch = call->conn->epoch;
829 hdr.cid = call->cid;
830 hdr.callNumber = call->call_id;
831 hdr.seq = 0;
832 hdr.type = RXRPC_PACKET_TYPE_ACK;
833 hdr.flags = call->conn->out_clientflag;
834 hdr.userStatus = 0;
835 hdr.securityIndex = call->conn->security_ix;
836 hdr._rsvd = 0;
837 hdr.serviceId = call->conn->service_id;
838
839 memset(iov, 0, sizeof(iov));
840 iov[0].iov_base = &hdr;
841 iov[0].iov_len = sizeof(hdr);
842
843 /* deal with events of a final nature */
844 if (test_bit(RXRPC_CALL_RELEASE, &call->events)) {
845 rxrpc_release_call(call);
846 clear_bit(RXRPC_CALL_RELEASE, &call->events);
847 }
848
849 if (test_bit(RXRPC_CALL_RCVD_ERROR, &call->events)) {
850 int error;
851
852 clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
853 clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
854 clear_bit(RXRPC_CALL_ABORT, &call->events);
855
856 error = call->conn->trans->peer->net_error;
857 _debug("post net error %d", error);
858
859 if (rxrpc_post_message(call, RXRPC_SKB_MARK_NET_ERROR,
860 error, true) < 0)
861 goto no_mem;
862 clear_bit(RXRPC_CALL_RCVD_ERROR, &call->events);
863 goto kill_ACKs;
864 }
865
866 if (test_bit(RXRPC_CALL_CONN_ABORT, &call->events)) {
867 ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
868
869 clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
870 clear_bit(RXRPC_CALL_ABORT, &call->events);
871
872 _debug("post conn abort");
873
874 if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
875 call->conn->error, true) < 0)
876 goto no_mem;
877 clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
878 goto kill_ACKs;
879 }
880
881 if (test_bit(RXRPC_CALL_REJECT_BUSY, &call->events)) {
882 hdr.type = RXRPC_PACKET_TYPE_BUSY;
883 genbit = RXRPC_CALL_REJECT_BUSY;
884 goto send_message;
885 }
886
887 if (test_bit(RXRPC_CALL_ABORT, &call->events)) {
888 ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
889
890 if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
891 ECONNABORTED, true) < 0)
892 goto no_mem;
893 hdr.type = RXRPC_PACKET_TYPE_ABORT;
894 data = htonl(call->abort_code);
895 iov[1].iov_base = &data;
896 iov[1].iov_len = sizeof(data);
897 genbit = RXRPC_CALL_ABORT;
898 goto send_message;
899 }
900
901 if (test_bit(RXRPC_CALL_ACK_FINAL, &call->events)) {
902 hdr.type = RXRPC_PACKET_TYPE_ACKALL;
903 genbit = RXRPC_CALL_ACK_FINAL;
904 goto send_message;
905 }
906
907 if (call->events & ((1 << RXRPC_CALL_RCVD_BUSY) |
908 (1 << RXRPC_CALL_RCVD_ABORT))
909 ) {
910 u32 mark;
911
912 if (test_bit(RXRPC_CALL_RCVD_ABORT, &call->events))
913 mark = RXRPC_SKB_MARK_REMOTE_ABORT;
914 else
915 mark = RXRPC_SKB_MARK_BUSY;
916
917 _debug("post abort/busy");
918 rxrpc_clear_tx_window(call);
919 if (rxrpc_post_message(call, mark, ECONNABORTED, true) < 0)
920 goto no_mem;
921
922 clear_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
923 clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
924 goto kill_ACKs;
925 }
926
927 if (test_and_clear_bit(RXRPC_CALL_RCVD_ACKALL, &call->events)) {
928 _debug("do implicit ackall");
929 rxrpc_clear_tx_window(call);
930 }
931
932 if (test_bit(RXRPC_CALL_LIFE_TIMER, &call->events)) {
933 write_lock_bh(&call->state_lock);
934 if (call->state <= RXRPC_CALL_COMPLETE) {
935 call->state = RXRPC_CALL_LOCALLY_ABORTED;
936 call->abort_code = RX_CALL_TIMEOUT;
937 set_bit(RXRPC_CALL_ABORT, &call->events);
938 }
939 write_unlock_bh(&call->state_lock);
940
941 _debug("post timeout");
942 if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
943 ETIME, true) < 0)
944 goto no_mem;
945
946 clear_bit(RXRPC_CALL_LIFE_TIMER, &call->events);
947 goto kill_ACKs;
948 }
949
950 /* deal with assorted inbound messages */
951 if (!skb_queue_empty(&call->rx_queue)) {
952 switch (rxrpc_process_rx_queue(call, &abort_code)) {
953 case 0:
954 case -EAGAIN:
955 break;
956 case -ENOMEM:
957 goto no_mem;
958 case -EKEYEXPIRED:
959 case -EKEYREJECTED:
960 case -EPROTO:
961 rxrpc_abort_call(call, abort_code);
962 goto kill_ACKs;
963 }
964 }
965
966 /* handle resending */
967 if (test_and_clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
968 rxrpc_resend_timer(call);
969 if (test_and_clear_bit(RXRPC_CALL_RESEND, &call->events))
970 rxrpc_resend(call);
971
972 /* consider sending an ordinary ACK */
973 if (test_bit(RXRPC_CALL_ACK, &call->events)) {
974 __be32 pad;
975
976 _debug("send ACK: window: %d - %d { %lx }",
977 call->rx_data_eaten, call->ackr_win_top,
978 call->ackr_window[0]);
979
980 if (call->state > RXRPC_CALL_SERVER_ACK_REQUEST &&
981 call->ackr_reason != RXRPC_ACK_PING_RESPONSE) {
982 /* ACK by sending reply DATA packet in this state */
983 clear_bit(RXRPC_CALL_ACK, &call->events);
984 goto maybe_reschedule;
985 }
986
987 genbit = RXRPC_CALL_ACK;
988
989 acks = kzalloc(call->ackr_win_top - call->rx_data_eaten,
990 GFP_NOFS);
991 if (!acks)
992 goto no_mem;
993
994 //hdr.flags = RXRPC_SLOW_START_OK;
995 ack.bufferSpace = htons(8);
996 ack.maxSkew = 0;
997 ack.serial = 0;
998 ack.reason = 0;
999
1000 ackinfo.rxMTU = htonl(5692);
1001// ackinfo.rxMTU = htonl(call->conn->trans->peer->maxdata);
1002 ackinfo.maxMTU = htonl(call->conn->trans->peer->maxdata);
1003 ackinfo.rwind = htonl(32);
1004 ackinfo.jumbo_max = htonl(4);
1005
1006 spin_lock_bh(&call->lock);
1007 ack.reason = call->ackr_reason;
1008 ack.serial = call->ackr_serial;
1009 ack.previousPacket = call->ackr_prev_seq;
1010 ack.firstPacket = htonl(call->rx_data_eaten + 1);
1011
1012 ack.nAcks = 0;
1013 for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
1014 nbit = loop * BITS_PER_LONG;
1015 for (bits = call->ackr_window[loop]; bits; bits >>= 1
1016 ) {
1017 _debug("- l=%d n=%d b=%lx", loop, nbit, bits);
1018 if (bits & 1) {
1019 acks[nbit] = RXRPC_ACK_TYPE_ACK;
1020 ack.nAcks = nbit + 1;
1021 }
1022 nbit++;
1023 }
1024 }
1025 call->ackr_reason = 0;
1026 spin_unlock_bh(&call->lock);
1027
1028 pad = 0;
1029
1030 iov[1].iov_base = &ack;
1031 iov[1].iov_len = sizeof(ack);
1032 iov[2].iov_base = acks;
1033 iov[2].iov_len = ack.nAcks;
1034 iov[3].iov_base = &pad;
1035 iov[3].iov_len = 3;
1036 iov[4].iov_base = &ackinfo;
1037 iov[4].iov_len = sizeof(ackinfo);
1038
1039 switch (ack.reason) {
1040 case RXRPC_ACK_REQUESTED:
1041 case RXRPC_ACK_DUPLICATE:
1042 case RXRPC_ACK_OUT_OF_SEQUENCE:
1043 case RXRPC_ACK_EXCEEDS_WINDOW:
1044 case RXRPC_ACK_NOSPACE:
1045 case RXRPC_ACK_PING:
1046 case RXRPC_ACK_PING_RESPONSE:
1047 goto send_ACK_with_skew;
1048 case RXRPC_ACK_DELAY:
1049 case RXRPC_ACK_IDLE:
1050 goto send_ACK;
1051 }
1052 }
1053
1054 /* handle completion of security negotiations on an incoming
1055 * connection */
1056 if (test_and_clear_bit(RXRPC_CALL_SECURED, &call->events)) {
1057 _debug("secured");
1058 spin_lock_bh(&call->lock);
1059
1060 if (call->state == RXRPC_CALL_SERVER_SECURING) {
1061 _debug("securing");
1062 write_lock(&call->conn->lock);
1063 if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
1064 !test_bit(RXRPC_CALL_RELEASE, &call->events)) {
1065 _debug("not released");
1066 call->state = RXRPC_CALL_SERVER_ACCEPTING;
1067 list_move_tail(&call->accept_link,
1068 &call->socket->acceptq);
1069 }
1070 write_unlock(&call->conn->lock);
1071 read_lock(&call->state_lock);
1072 if (call->state < RXRPC_CALL_COMPLETE)
1073 set_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
1074 read_unlock(&call->state_lock);
1075 }
1076
1077 spin_unlock_bh(&call->lock);
1078 if (!test_bit(RXRPC_CALL_POST_ACCEPT, &call->events))
1079 goto maybe_reschedule;
1080 }
1081
1082 /* post a notification of an acceptable connection to the app */
1083 if (test_bit(RXRPC_CALL_POST_ACCEPT, &call->events)) {
1084 _debug("post accept");
1085 if (rxrpc_post_message(call, RXRPC_SKB_MARK_NEW_CALL,
1086 0, false) < 0)
1087 goto no_mem;
1088 clear_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
1089 goto maybe_reschedule;
1090 }
1091
1092 /* handle incoming call acceptance */
1093 if (test_and_clear_bit(RXRPC_CALL_ACCEPTED, &call->events)) {
1094 _debug("accepted");
1095 ASSERTCMP(call->rx_data_post, ==, 0);
1096 call->rx_data_post = 1;
1097 read_lock_bh(&call->state_lock);
1098 if (call->state < RXRPC_CALL_COMPLETE)
1099 set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
1100 read_unlock_bh(&call->state_lock);
1101 }
1102
1103 /* drain the out of sequence received packet queue into the packet Rx
1104 * queue */
1105 if (test_and_clear_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events)) {
1106 while (call->rx_data_post == call->rx_first_oos)
1107 if (rxrpc_drain_rx_oos_queue(call) < 0)
1108 break;
1109 goto maybe_reschedule;
1110 }
1111
1112 /* other events may have been raised since we started checking */
1113 goto maybe_reschedule;
1114
1115send_ACK_with_skew:
1116 ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
1117 ntohl(ack.serial));
1118send_ACK:
1119 hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
1120 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
1121 ntohl(hdr.serial),
1122 ntohs(ack.maxSkew),
1123 ntohl(ack.firstPacket),
1124 ntohl(ack.previousPacket),
1125 ntohl(ack.serial),
1126 rxrpc_acks[ack.reason],
1127 ack.nAcks);
1128
1129 del_timer_sync(&call->ack_timer);
1130 if (ack.nAcks > 0)
1131 set_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags);
1132 goto send_message_2;
1133
1134send_message:
1135 _debug("send message");
1136
1137 hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
1138 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
1139send_message_2:
1140
1141 len = iov[0].iov_len;
1142 ioc = 1;
1143 if (iov[4].iov_len) {
1144 ioc = 5;
1145 len += iov[4].iov_len;
1146 len += iov[3].iov_len;
1147 len += iov[2].iov_len;
1148 len += iov[1].iov_len;
1149 } else if (iov[3].iov_len) {
1150 ioc = 4;
1151 len += iov[3].iov_len;
1152 len += iov[2].iov_len;
1153 len += iov[1].iov_len;
1154 } else if (iov[2].iov_len) {
1155 ioc = 3;
1156 len += iov[2].iov_len;
1157 len += iov[1].iov_len;
1158 } else if (iov[1].iov_len) {
1159 ioc = 2;
1160 len += iov[1].iov_len;
1161 }
1162
1163 ret = kernel_sendmsg(call->conn->trans->local->socket,
1164 &msg, iov, ioc, len);
1165 if (ret < 0) {
1166 _debug("sendmsg failed: %d", ret);
1167 read_lock_bh(&call->state_lock);
1168 if (call->state < RXRPC_CALL_DEAD)
1169 schedule_work(&call->processor);
1170 read_unlock_bh(&call->state_lock);
1171 goto error;
1172 }
1173
1174 switch (genbit) {
1175 case RXRPC_CALL_ABORT:
1176 clear_bit(genbit, &call->events);
1177 clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
1178 goto kill_ACKs;
1179
1180 case RXRPC_CALL_ACK_FINAL:
1181 write_lock_bh(&call->state_lock);
1182 if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK)
1183 call->state = RXRPC_CALL_COMPLETE;
1184 write_unlock_bh(&call->state_lock);
1185 goto kill_ACKs;
1186
1187 default:
1188 clear_bit(genbit, &call->events);
1189 switch (call->state) {
1190 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
1191 case RXRPC_CALL_CLIENT_RECV_REPLY:
1192 case RXRPC_CALL_SERVER_RECV_REQUEST:
1193 case RXRPC_CALL_SERVER_ACK_REQUEST:
1194 _debug("start ACK timer");
1195 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY,
1196 call->ackr_serial, false);
1197 default:
1198 break;
1199 }
1200 goto maybe_reschedule;
1201 }
1202
1203kill_ACKs:
1204 del_timer_sync(&call->ack_timer);
1205 if (test_and_clear_bit(RXRPC_CALL_ACK_FINAL, &call->events))
1206 rxrpc_put_call(call);
1207 clear_bit(RXRPC_CALL_ACK, &call->events);
1208
1209maybe_reschedule:
1210 if (call->events || !skb_queue_empty(&call->rx_queue)) {
1211 read_lock_bh(&call->state_lock);
1212 if (call->state < RXRPC_CALL_DEAD)
1213 schedule_work(&call->processor);
1214 read_unlock_bh(&call->state_lock);
1215 }
1216
1217 /* don't leave aborted connections on the accept queue */
1218 if (call->state >= RXRPC_CALL_COMPLETE &&
1219 !list_empty(&call->accept_link)) {
1220 _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }",
1221 call, call->events, call->flags,
1222 ntohl(call->conn->cid));
1223
1224 read_lock_bh(&call->state_lock);
1225 if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
1226 !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
1227 schedule_work(&call->processor);
1228 read_unlock_bh(&call->state_lock);
1229 }
1230
1231error:
1232 clear_bit(RXRPC_CALL_PROC_BUSY, &call->flags);
1233 kfree(acks);
1234
1235 /* because we don't want two CPUs both processing the work item for one
1236 * call at the same time, we use a flag to note when it's busy; however
1237 * this means there's a race between clearing the flag and setting the
1238 * work pending bit and the work item being processed again */
1239 if (call->events && !work_pending(&call->processor)) {
1240 _debug("jumpstart %x", ntohl(call->conn->cid));
1241 schedule_work(&call->processor);
1242 }
1243
1244 _leave("");
1245 return;
1246
1247no_mem:
1248 _debug("out of memory");
1249 goto maybe_reschedule;
1250}
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
new file mode 100644
index 000000000000..ac31cceda2f1
--- /dev/null
+++ b/net/rxrpc/ar-call.c
@@ -0,0 +1,787 @@
1/* RxRPC individual remote procedure call handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/circ_buf.h>
14#include <net/sock.h>
15#include <net/af_rxrpc.h>
16#include "ar-internal.h"
17
18struct kmem_cache *rxrpc_call_jar;
19LIST_HEAD(rxrpc_calls);
20DEFINE_RWLOCK(rxrpc_call_lock);
21static unsigned rxrpc_call_max_lifetime = 60;
22static unsigned rxrpc_dead_call_timeout = 10;
23
24static void rxrpc_destroy_call(struct work_struct *work);
25static void rxrpc_call_life_expired(unsigned long _call);
26static void rxrpc_dead_call_expired(unsigned long _call);
27static void rxrpc_ack_time_expired(unsigned long _call);
28static void rxrpc_resend_time_expired(unsigned long _call);
29
30/*
31 * allocate a new call
32 */
33static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
34{
35 struct rxrpc_call *call;
36
37 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
38 if (!call)
39 return NULL;
40
41 call->acks_winsz = 16;
42 call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
43 gfp);
44 if (!call->acks_window) {
45 kmem_cache_free(rxrpc_call_jar, call);
46 return NULL;
47 }
48
49 setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
50 (unsigned long) call);
51 setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
52 (unsigned long) call);
53 setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
54 (unsigned long) call);
55 setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
56 (unsigned long) call);
57 INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
58 INIT_WORK(&call->processor, &rxrpc_process_call);
59 INIT_LIST_HEAD(&call->accept_link);
60 skb_queue_head_init(&call->rx_queue);
61 skb_queue_head_init(&call->rx_oos_queue);
62 init_waitqueue_head(&call->tx_waitq);
63 spin_lock_init(&call->lock);
64 rwlock_init(&call->state_lock);
65 atomic_set(&call->usage, 1);
66 call->debug_id = atomic_inc_return(&rxrpc_debug_id);
67 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
68
69 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
70
71 call->rx_data_expect = 1;
72 call->rx_data_eaten = 0;
73 call->rx_first_oos = 0;
74 call->ackr_win_top = call->rx_data_eaten + 1 + RXRPC_MAXACKS;
75 call->creation_jif = jiffies;
76 return call;
77}
78
79/*
80 * allocate a new client call and attempt to to get a connection slot for it
81 */
82static struct rxrpc_call *rxrpc_alloc_client_call(
83 struct rxrpc_sock *rx,
84 struct rxrpc_transport *trans,
85 struct rxrpc_conn_bundle *bundle,
86 gfp_t gfp)
87{
88 struct rxrpc_call *call;
89 int ret;
90
91 _enter("");
92
93 ASSERT(rx != NULL);
94 ASSERT(trans != NULL);
95 ASSERT(bundle != NULL);
96
97 call = rxrpc_alloc_call(gfp);
98 if (!call)
99 return ERR_PTR(-ENOMEM);
100
101 sock_hold(&rx->sk);
102 call->socket = rx;
103 call->rx_data_post = 1;
104
105 ret = rxrpc_connect_call(rx, trans, bundle, call, gfp);
106 if (ret < 0) {
107 kmem_cache_free(rxrpc_call_jar, call);
108 return ERR_PTR(ret);
109 }
110
111 spin_lock(&call->conn->trans->peer->lock);
112 list_add(&call->error_link, &call->conn->trans->peer->error_targets);
113 spin_unlock(&call->conn->trans->peer->lock);
114
115 call->lifetimer.expires = jiffies + rxrpc_call_max_lifetime * HZ;
116 add_timer(&call->lifetimer);
117
118 _leave(" = %p", call);
119 return call;
120}
121
122/*
123 * set up a call for the given data
124 * - called in process context with IRQs enabled
125 */
126struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx,
127 struct rxrpc_transport *trans,
128 struct rxrpc_conn_bundle *bundle,
129 unsigned long user_call_ID,
130 int create,
131 gfp_t gfp)
132{
133 struct rxrpc_call *call, *candidate;
134 struct rb_node *p, *parent, **pp;
135
136 _enter("%p,%d,%d,%lx,%d",
137 rx, trans ? trans->debug_id : -1, bundle ? bundle->debug_id : -1,
138 user_call_ID, create);
139
140 /* search the extant calls first for one that matches the specified
141 * user ID */
142 read_lock(&rx->call_lock);
143
144 p = rx->calls.rb_node;
145 while (p) {
146 call = rb_entry(p, struct rxrpc_call, sock_node);
147
148 if (user_call_ID < call->user_call_ID)
149 p = p->rb_left;
150 else if (user_call_ID > call->user_call_ID)
151 p = p->rb_right;
152 else
153 goto found_extant_call;
154 }
155
156 read_unlock(&rx->call_lock);
157
158 if (!create || !trans)
159 return ERR_PTR(-EBADSLT);
160
161 /* not yet present - create a candidate for a new record and then
162 * redo the search */
163 candidate = rxrpc_alloc_client_call(rx, trans, bundle, gfp);
164 if (IS_ERR(candidate)) {
165 _leave(" = %ld", PTR_ERR(candidate));
166 return candidate;
167 }
168
169 candidate->user_call_ID = user_call_ID;
170 __set_bit(RXRPC_CALL_HAS_USERID, &candidate->flags);
171
172 write_lock(&rx->call_lock);
173
174 pp = &rx->calls.rb_node;
175 parent = NULL;
176 while (*pp) {
177 parent = *pp;
178 call = rb_entry(parent, struct rxrpc_call, sock_node);
179
180 if (user_call_ID < call->user_call_ID)
181 pp = &(*pp)->rb_left;
182 else if (user_call_ID > call->user_call_ID)
183 pp = &(*pp)->rb_right;
184 else
185 goto found_extant_second;
186 }
187
188 /* second search also failed; add the new call */
189 call = candidate;
190 candidate = NULL;
191 rxrpc_get_call(call);
192
193 rb_link_node(&call->sock_node, parent, pp);
194 rb_insert_color(&call->sock_node, &rx->calls);
195 write_unlock(&rx->call_lock);
196
197 write_lock_bh(&rxrpc_call_lock);
198 list_add_tail(&call->link, &rxrpc_calls);
199 write_unlock_bh(&rxrpc_call_lock);
200
201 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
202
203 _leave(" = %p [new]", call);
204 return call;
205
206 /* we found the call in the list immediately */
207found_extant_call:
208 rxrpc_get_call(call);
209 read_unlock(&rx->call_lock);
210 _leave(" = %p [extant %d]", call, atomic_read(&call->usage));
211 return call;
212
213 /* we found the call on the second time through the list */
214found_extant_second:
215 rxrpc_get_call(call);
216 write_unlock(&rx->call_lock);
217 rxrpc_put_call(candidate);
218 _leave(" = %p [second %d]", call, atomic_read(&call->usage));
219 return call;
220}
221
222/*
223 * set up an incoming call
224 * - called in process context with IRQs enabled
225 */
226struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
227 struct rxrpc_connection *conn,
228 struct rxrpc_header *hdr,
229 gfp_t gfp)
230{
231 struct rxrpc_call *call, *candidate;
232 struct rb_node **p, *parent;
233 __be32 call_id;
234
235 _enter(",%d,,%x", conn->debug_id, gfp);
236
237 ASSERT(rx != NULL);
238
239 candidate = rxrpc_alloc_call(gfp);
240 if (!candidate)
241 return ERR_PTR(-EBUSY);
242
243 candidate->socket = rx;
244 candidate->conn = conn;
245 candidate->cid = hdr->cid;
246 candidate->call_id = hdr->callNumber;
247 candidate->channel = ntohl(hdr->cid) & RXRPC_CHANNELMASK;
248 candidate->rx_data_post = 0;
249 candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
250 if (conn->security_ix > 0)
251 candidate->state = RXRPC_CALL_SERVER_SECURING;
252
253 write_lock_bh(&conn->lock);
254
255 /* set the channel for this call */
256 call = conn->channels[candidate->channel];
257 _debug("channel[%u] is %p", candidate->channel, call);
258 if (call && call->call_id == hdr->callNumber) {
259 /* already set; must've been a duplicate packet */
260 _debug("extant call [%d]", call->state);
261 ASSERTCMP(call->conn, ==, conn);
262
263 read_lock(&call->state_lock);
264 switch (call->state) {
265 case RXRPC_CALL_LOCALLY_ABORTED:
266 if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
267 schedule_work(&call->processor);
268 case RXRPC_CALL_REMOTELY_ABORTED:
269 read_unlock(&call->state_lock);
270 goto aborted_call;
271 default:
272 rxrpc_get_call(call);
273 read_unlock(&call->state_lock);
274 goto extant_call;
275 }
276 }
277
278 if (call) {
279 /* it seems the channel is still in use from the previous call
280 * - ditch the old binding if its call is now complete */
281 _debug("CALL: %u { %s }",
282 call->debug_id, rxrpc_call_states[call->state]);
283
284 if (call->state >= RXRPC_CALL_COMPLETE) {
285 conn->channels[call->channel] = NULL;
286 } else {
287 write_unlock_bh(&conn->lock);
288 kmem_cache_free(rxrpc_call_jar, candidate);
289 _leave(" = -EBUSY");
290 return ERR_PTR(-EBUSY);
291 }
292 }
293
294 /* check the call number isn't duplicate */
295 _debug("check dup");
296 call_id = hdr->callNumber;
297 p = &conn->calls.rb_node;
298 parent = NULL;
299 while (*p) {
300 parent = *p;
301 call = rb_entry(parent, struct rxrpc_call, conn_node);
302
303 if (call_id < call->call_id)
304 p = &(*p)->rb_left;
305 else if (call_id > call->call_id)
306 p = &(*p)->rb_right;
307 else
308 goto old_call;
309 }
310
311 /* make the call available */
312 _debug("new call");
313 call = candidate;
314 candidate = NULL;
315 rb_link_node(&call->conn_node, parent, p);
316 rb_insert_color(&call->conn_node, &conn->calls);
317 conn->channels[call->channel] = call;
318 sock_hold(&rx->sk);
319 atomic_inc(&conn->usage);
320 write_unlock_bh(&conn->lock);
321
322 spin_lock(&conn->trans->peer->lock);
323 list_add(&call->error_link, &conn->trans->peer->error_targets);
324 spin_unlock(&conn->trans->peer->lock);
325
326 write_lock_bh(&rxrpc_call_lock);
327 list_add_tail(&call->link, &rxrpc_calls);
328 write_unlock_bh(&rxrpc_call_lock);
329
330 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
331
332 call->lifetimer.expires = jiffies + rxrpc_call_max_lifetime * HZ;
333 add_timer(&call->lifetimer);
334 _leave(" = %p {%d} [new]", call, call->debug_id);
335 return call;
336
337extant_call:
338 write_unlock_bh(&conn->lock);
339 kmem_cache_free(rxrpc_call_jar, candidate);
340 _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
341 return call;
342
343aborted_call:
344 write_unlock_bh(&conn->lock);
345 kmem_cache_free(rxrpc_call_jar, candidate);
346 _leave(" = -ECONNABORTED");
347 return ERR_PTR(-ECONNABORTED);
348
349old_call:
350 write_unlock_bh(&conn->lock);
351 kmem_cache_free(rxrpc_call_jar, candidate);
352 _leave(" = -ECONNRESET [old]");
353 return ERR_PTR(-ECONNRESET);
354}
355
356/*
357 * find an extant server call
358 * - called in process context with IRQs enabled
359 */
360struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *rx,
361 unsigned long user_call_ID)
362{
363 struct rxrpc_call *call;
364 struct rb_node *p;
365
366 _enter("%p,%lx", rx, user_call_ID);
367
368 /* search the extant calls for one that matches the specified user
369 * ID */
370 read_lock(&rx->call_lock);
371
372 p = rx->calls.rb_node;
373 while (p) {
374 call = rb_entry(p, struct rxrpc_call, sock_node);
375
376 if (user_call_ID < call->user_call_ID)
377 p = p->rb_left;
378 else if (user_call_ID > call->user_call_ID)
379 p = p->rb_right;
380 else
381 goto found_extant_call;
382 }
383
384 read_unlock(&rx->call_lock);
385 _leave(" = NULL");
386 return NULL;
387
388 /* we found the call in the list immediately */
389found_extant_call:
390 rxrpc_get_call(call);
391 read_unlock(&rx->call_lock);
392 _leave(" = %p [%d]", call, atomic_read(&call->usage));
393 return call;
394}
395
396/*
397 * detach a call from a socket and set up for release
398 */
399void rxrpc_release_call(struct rxrpc_call *call)
400{
401 struct rxrpc_sock *rx = call->socket;
402
403 _enter("{%d,%d,%d,%d}",
404 call->debug_id, atomic_read(&call->usage),
405 atomic_read(&call->ackr_not_idle),
406 call->rx_first_oos);
407
408 spin_lock_bh(&call->lock);
409 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
410 BUG();
411 spin_unlock_bh(&call->lock);
412
413 /* dissociate from the socket
414 * - the socket's ref on the call is passed to the death timer
415 */
416 _debug("RELEASE CALL %p (%d CONN %p)",
417 call, call->debug_id, call->conn);
418
419 write_lock_bh(&rx->call_lock);
420 if (!list_empty(&call->accept_link)) {
421 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
422 call, call->events, call->flags);
423 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
424 list_del_init(&call->accept_link);
425 sk_acceptq_removed(&rx->sk);
426 } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
427 rb_erase(&call->sock_node, &rx->calls);
428 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
429 clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
430 }
431 write_unlock_bh(&rx->call_lock);
432
433 if (call->conn->out_clientflag)
434 spin_lock(&call->conn->trans->client_lock);
435 write_lock_bh(&call->conn->lock);
436
437 /* free up the channel for reuse */
438 if (call->conn->out_clientflag) {
439 call->conn->avail_calls++;
440 if (call->conn->avail_calls == RXRPC_MAXCALLS)
441 list_move_tail(&call->conn->bundle_link,
442 &call->conn->bundle->unused_conns);
443 else if (call->conn->avail_calls == 1)
444 list_move_tail(&call->conn->bundle_link,
445 &call->conn->bundle->avail_conns);
446 }
447
448 write_lock(&call->state_lock);
449 if (call->conn->channels[call->channel] == call)
450 call->conn->channels[call->channel] = NULL;
451
452 if (call->state < RXRPC_CALL_COMPLETE &&
453 call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
454 _debug("+++ ABORTING STATE %d +++\n", call->state);
455 call->state = RXRPC_CALL_LOCALLY_ABORTED;
456 call->abort_code = RX_CALL_DEAD;
457 set_bit(RXRPC_CALL_ABORT, &call->events);
458 schedule_work(&call->processor);
459 }
460 write_unlock(&call->state_lock);
461 write_unlock_bh(&call->conn->lock);
462 if (call->conn->out_clientflag)
463 spin_unlock(&call->conn->trans->client_lock);
464
465 if (!skb_queue_empty(&call->rx_queue) ||
466 !skb_queue_empty(&call->rx_oos_queue)) {
467 struct rxrpc_skb_priv *sp;
468 struct sk_buff *skb;
469
470 _debug("purge Rx queues");
471
472 spin_lock_bh(&call->lock);
473 while ((skb = skb_dequeue(&call->rx_queue)) ||
474 (skb = skb_dequeue(&call->rx_oos_queue))) {
475 sp = rxrpc_skb(skb);
476 if (sp->call) {
477 ASSERTCMP(sp->call, ==, call);
478 rxrpc_put_call(call);
479 sp->call = NULL;
480 }
481 skb->destructor = NULL;
482 spin_unlock_bh(&call->lock);
483
484 _debug("- zap %s %%%u #%u",
485 rxrpc_pkts[sp->hdr.type],
486 ntohl(sp->hdr.serial),
487 ntohl(sp->hdr.seq));
488 rxrpc_free_skb(skb);
489 spin_lock_bh(&call->lock);
490 }
491 spin_unlock_bh(&call->lock);
492
493 ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
494 }
495
496 del_timer_sync(&call->resend_timer);
497 del_timer_sync(&call->ack_timer);
498 del_timer_sync(&call->lifetimer);
499 call->deadspan.expires = jiffies + rxrpc_dead_call_timeout * HZ;
500 add_timer(&call->deadspan);
501
502 _leave("");
503}
504
505/*
506 * handle a dead call being ready for reaping
507 */
508static void rxrpc_dead_call_expired(unsigned long _call)
509{
510 struct rxrpc_call *call = (struct rxrpc_call *) _call;
511
512 _enter("{%d}", call->debug_id);
513
514 write_lock_bh(&call->state_lock);
515 call->state = RXRPC_CALL_DEAD;
516 write_unlock_bh(&call->state_lock);
517 rxrpc_put_call(call);
518}
519
520/*
521 * mark a call as to be released, aborting it if it's still in progress
522 * - called with softirqs disabled
523 */
524static void rxrpc_mark_call_released(struct rxrpc_call *call)
525{
526 bool sched;
527
528 write_lock(&call->state_lock);
529 if (call->state < RXRPC_CALL_DEAD) {
530 sched = false;
531 if (call->state < RXRPC_CALL_COMPLETE) {
532 _debug("abort call %p", call);
533 call->state = RXRPC_CALL_LOCALLY_ABORTED;
534 call->abort_code = RX_CALL_DEAD;
535 if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
536 sched = true;
537 }
538 if (!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
539 sched = true;
540 if (sched)
541 schedule_work(&call->processor);
542 }
543 write_unlock(&call->state_lock);
544}
545
546/*
547 * release all the calls associated with a socket
548 */
549void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
550{
551 struct rxrpc_call *call;
552 struct rb_node *p;
553
554 _enter("%p", rx);
555
556 read_lock_bh(&rx->call_lock);
557
558 /* mark all the calls as no longer wanting incoming packets */
559 for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
560 call = rb_entry(p, struct rxrpc_call, sock_node);
561 rxrpc_mark_call_released(call);
562 }
563
564 /* kill the not-yet-accepted incoming calls */
565 list_for_each_entry(call, &rx->secureq, accept_link) {
566 rxrpc_mark_call_released(call);
567 }
568
569 list_for_each_entry(call, &rx->acceptq, accept_link) {
570 rxrpc_mark_call_released(call);
571 }
572
573 read_unlock_bh(&rx->call_lock);
574 _leave("");
575}
576
577/*
578 * release a call
579 */
580void __rxrpc_put_call(struct rxrpc_call *call)
581{
582 ASSERT(call != NULL);
583
584 _enter("%p{u=%d}", call, atomic_read(&call->usage));
585
586 ASSERTCMP(atomic_read(&call->usage), >, 0);
587
588 if (atomic_dec_and_test(&call->usage)) {
589 _debug("call %d dead", call->debug_id);
590 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
591 schedule_work(&call->destroyer);
592 }
593 _leave("");
594}
595
596/*
597 * clean up a call
598 */
599static void rxrpc_cleanup_call(struct rxrpc_call *call)
600{
601 _net("DESTROY CALL %d", call->debug_id);
602
603 ASSERT(call->socket);
604
605 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
606
607 del_timer_sync(&call->lifetimer);
608 del_timer_sync(&call->deadspan);
609 del_timer_sync(&call->ack_timer);
610 del_timer_sync(&call->resend_timer);
611
612 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
613 ASSERTCMP(call->events, ==, 0);
614 if (work_pending(&call->processor)) {
615 _debug("defer destroy");
616 schedule_work(&call->destroyer);
617 return;
618 }
619
620 if (call->conn) {
621 spin_lock(&call->conn->trans->peer->lock);
622 list_del(&call->error_link);
623 spin_unlock(&call->conn->trans->peer->lock);
624
625 write_lock_bh(&call->conn->lock);
626 rb_erase(&call->conn_node, &call->conn->calls);
627 write_unlock_bh(&call->conn->lock);
628 rxrpc_put_connection(call->conn);
629 }
630
631 if (call->acks_window) {
632 _debug("kill Tx window %d",
633 CIRC_CNT(call->acks_head, call->acks_tail,
634 call->acks_winsz));
635 smp_mb();
636 while (CIRC_CNT(call->acks_head, call->acks_tail,
637 call->acks_winsz) > 0) {
638 struct rxrpc_skb_priv *sp;
639 unsigned long _skb;
640
641 _skb = call->acks_window[call->acks_tail] & ~1;
642 sp = rxrpc_skb((struct sk_buff *) _skb);
643 _debug("+++ clear Tx %u", ntohl(sp->hdr.seq));
644 rxrpc_free_skb((struct sk_buff *) _skb);
645 call->acks_tail =
646 (call->acks_tail + 1) & (call->acks_winsz - 1);
647 }
648
649 kfree(call->acks_window);
650 }
651
652 rxrpc_free_skb(call->tx_pending);
653
654 rxrpc_purge_queue(&call->rx_queue);
655 ASSERT(skb_queue_empty(&call->rx_oos_queue));
656 sock_put(&call->socket->sk);
657 kmem_cache_free(rxrpc_call_jar, call);
658}
659
660/*
661 * destroy a call
662 */
663static void rxrpc_destroy_call(struct work_struct *work)
664{
665 struct rxrpc_call *call =
666 container_of(work, struct rxrpc_call, destroyer);
667
668 _enter("%p{%d,%d,%p}",
669 call, atomic_read(&call->usage), call->channel, call->conn);
670
671 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
672
673 write_lock_bh(&rxrpc_call_lock);
674 list_del_init(&call->link);
675 write_unlock_bh(&rxrpc_call_lock);
676
677 rxrpc_cleanup_call(call);
678 _leave("");
679}
680
681/*
682 * preemptively destroy all the call records from a transport endpoint rather
683 * than waiting for them to time out
684 */
685void __exit rxrpc_destroy_all_calls(void)
686{
687 struct rxrpc_call *call;
688
689 _enter("");
690 write_lock_bh(&rxrpc_call_lock);
691
692 while (!list_empty(&rxrpc_calls)) {
693 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
694 _debug("Zapping call %p", call);
695
696 list_del_init(&call->link);
697
698 switch (atomic_read(&call->usage)) {
699 case 0:
700 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
701 break;
702 case 1:
703 if (del_timer_sync(&call->deadspan) != 0 &&
704 call->state != RXRPC_CALL_DEAD)
705 rxrpc_dead_call_expired((unsigned long) call);
706 if (call->state != RXRPC_CALL_DEAD)
707 break;
708 default:
709 printk(KERN_ERR "RXRPC:"
710 " Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
711 call, atomic_read(&call->usage),
712 atomic_read(&call->ackr_not_idle),
713 rxrpc_call_states[call->state],
714 call->flags, call->events);
715 if (!skb_queue_empty(&call->rx_queue))
716 printk(KERN_ERR"RXRPC: Rx queue occupied\n");
717 if (!skb_queue_empty(&call->rx_oos_queue))
718 printk(KERN_ERR"RXRPC: OOS queue occupied\n");
719 break;
720 }
721
722 write_unlock_bh(&rxrpc_call_lock);
723 cond_resched();
724 write_lock_bh(&rxrpc_call_lock);
725 }
726
727 write_unlock_bh(&rxrpc_call_lock);
728 _leave("");
729}
730
731/*
732 * handle call lifetime being exceeded
733 */
734static void rxrpc_call_life_expired(unsigned long _call)
735{
736 struct rxrpc_call *call = (struct rxrpc_call *) _call;
737
738 if (call->state >= RXRPC_CALL_COMPLETE)
739 return;
740
741 _enter("{%d}", call->debug_id);
742 read_lock_bh(&call->state_lock);
743 if (call->state < RXRPC_CALL_COMPLETE) {
744 set_bit(RXRPC_CALL_LIFE_TIMER, &call->events);
745 schedule_work(&call->processor);
746 }
747 read_unlock_bh(&call->state_lock);
748}
749
750/*
751 * handle resend timer expiry
752 */
753static void rxrpc_resend_time_expired(unsigned long _call)
754{
755 struct rxrpc_call *call = (struct rxrpc_call *) _call;
756
757 _enter("{%d}", call->debug_id);
758
759 if (call->state >= RXRPC_CALL_COMPLETE)
760 return;
761
762 read_lock_bh(&call->state_lock);
763 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
764 if (call->state < RXRPC_CALL_COMPLETE &&
765 !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
766 schedule_work(&call->processor);
767 read_unlock_bh(&call->state_lock);
768}
769
770/*
771 * handle ACK timer expiry
772 */
773static void rxrpc_ack_time_expired(unsigned long _call)
774{
775 struct rxrpc_call *call = (struct rxrpc_call *) _call;
776
777 _enter("{%d}", call->debug_id);
778
779 if (call->state >= RXRPC_CALL_COMPLETE)
780 return;
781
782 read_lock_bh(&call->state_lock);
783 if (call->state < RXRPC_CALL_COMPLETE &&
784 !test_and_set_bit(RXRPC_CALL_ACK, &call->events))
785 schedule_work(&call->processor);
786 read_unlock_bh(&call->state_lock);
787}
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
new file mode 100644
index 000000000000..01eb33c30571
--- /dev/null
+++ b/net/rxrpc/ar-connection.c
@@ -0,0 +1,895 @@
1/* RxRPC virtual connection handler
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/crypto.h>
16#include <net/sock.h>
17#include <net/af_rxrpc.h>
18#include "ar-internal.h"
19
20static void rxrpc_connection_reaper(struct work_struct *work);
21
22LIST_HEAD(rxrpc_connections);
23DEFINE_RWLOCK(rxrpc_connection_lock);
24static unsigned long rxrpc_connection_timeout = 10 * 60;
25static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
26
27/*
28 * allocate a new client connection bundle
29 */
30static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp)
31{
32 struct rxrpc_conn_bundle *bundle;
33
34 _enter("");
35
36 bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp);
37 if (bundle) {
38 INIT_LIST_HEAD(&bundle->unused_conns);
39 INIT_LIST_HEAD(&bundle->avail_conns);
40 INIT_LIST_HEAD(&bundle->busy_conns);
41 init_waitqueue_head(&bundle->chanwait);
42 atomic_set(&bundle->usage, 1);
43 }
44
45 _leave(" = %p", bundle);
46 return bundle;
47}
48
49/*
50 * compare bundle parameters with what we're looking for
51 * - return -ve, 0 or +ve
52 */
53static inline
54int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle,
55 struct key *key, __be16 service_id)
56{
57 return (bundle->service_id - service_id) ?:
58 ((unsigned long) bundle->key - (unsigned long) key);
59}
60
61/*
62 * get bundle of client connections that a client socket can make use of
63 */
64struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
65 struct rxrpc_transport *trans,
66 struct key *key,
67 __be16 service_id,
68 gfp_t gfp)
69{
70 struct rxrpc_conn_bundle *bundle, *candidate;
71 struct rb_node *p, *parent, **pp;
72
73 _enter("%p{%x},%x,%hx,",
74 rx, key_serial(key), trans->debug_id, ntohl(service_id));
75
76 if (rx->trans == trans && rx->bundle) {
77 atomic_inc(&rx->bundle->usage);
78 return rx->bundle;
79 }
80
81 /* search the extant bundles first for one that matches the specified
82 * user ID */
83 spin_lock(&trans->client_lock);
84
85 p = trans->bundles.rb_node;
86 while (p) {
87 bundle = rb_entry(p, struct rxrpc_conn_bundle, node);
88
89 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
90 p = p->rb_left;
91 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
92 p = p->rb_right;
93 else
94 goto found_extant_bundle;
95 }
96
97 spin_unlock(&trans->client_lock);
98
99 /* not yet present - create a candidate for a new record and then
100 * redo the search */
101 candidate = rxrpc_alloc_bundle(gfp);
102 if (!candidate) {
103 _leave(" = -ENOMEM");
104 return ERR_PTR(-ENOMEM);
105 }
106
107 candidate->key = key_get(key);
108 candidate->service_id = service_id;
109
110 spin_lock(&trans->client_lock);
111
112 pp = &trans->bundles.rb_node;
113 parent = NULL;
114 while (*pp) {
115 parent = *pp;
116 bundle = rb_entry(parent, struct rxrpc_conn_bundle, node);
117
118 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
119 pp = &(*pp)->rb_left;
120 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
121 pp = &(*pp)->rb_right;
122 else
123 goto found_extant_second;
124 }
125
126 /* second search also failed; add the new bundle */
127 bundle = candidate;
128 candidate = NULL;
129
130 rb_link_node(&bundle->node, parent, pp);
131 rb_insert_color(&bundle->node, &trans->bundles);
132 spin_unlock(&trans->client_lock);
133 _net("BUNDLE new on trans %d", trans->debug_id);
134 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
135 atomic_inc(&bundle->usage);
136 rx->bundle = bundle;
137 }
138 _leave(" = %p [new]", bundle);
139 return bundle;
140
141 /* we found the bundle in the list immediately */
142found_extant_bundle:
143 atomic_inc(&bundle->usage);
144 spin_unlock(&trans->client_lock);
145 _net("BUNDLE old on trans %d", trans->debug_id);
146 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
147 atomic_inc(&bundle->usage);
148 rx->bundle = bundle;
149 }
150 _leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage));
151 return bundle;
152
153 /* we found the bundle on the second time through the list */
154found_extant_second:
155 atomic_inc(&bundle->usage);
156 spin_unlock(&trans->client_lock);
157 kfree(candidate);
158 _net("BUNDLE old2 on trans %d", trans->debug_id);
159 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
160 atomic_inc(&bundle->usage);
161 rx->bundle = bundle;
162 }
163 _leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage));
164 return bundle;
165}
166
167/*
168 * release a bundle
169 */
170void rxrpc_put_bundle(struct rxrpc_transport *trans,
171 struct rxrpc_conn_bundle *bundle)
172{
173 _enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage));
174
175 if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) {
176 _debug("Destroy bundle");
177 rb_erase(&bundle->node, &trans->bundles);
178 spin_unlock(&trans->client_lock);
179 ASSERT(list_empty(&bundle->unused_conns));
180 ASSERT(list_empty(&bundle->avail_conns));
181 ASSERT(list_empty(&bundle->busy_conns));
182 ASSERTCMP(bundle->num_conns, ==, 0);
183 key_put(bundle->key);
184 kfree(bundle);
185 }
186
187 _leave("");
188}
189
190/*
191 * allocate a new connection
192 */
193static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
194{
195 struct rxrpc_connection *conn;
196
197 _enter("");
198
199 conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
200 if (conn) {
201 INIT_WORK(&conn->processor, &rxrpc_process_connection);
202 INIT_LIST_HEAD(&conn->bundle_link);
203 conn->calls = RB_ROOT;
204 skb_queue_head_init(&conn->rx_queue);
205 rwlock_init(&conn->lock);
206 spin_lock_init(&conn->state_lock);
207 atomic_set(&conn->usage, 1);
208 conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
209 conn->avail_calls = RXRPC_MAXCALLS;
210 conn->size_align = 4;
211 conn->header_size = sizeof(struct rxrpc_header);
212 }
213
214 _leave(" = %p{%d}", conn, conn->debug_id);
215 return conn;
216}
217
218/*
219 * assign a connection ID to a connection and add it to the transport's
220 * connection lookup tree
221 * - called with transport client lock held
222 */
223static void rxrpc_assign_connection_id(struct rxrpc_connection *conn)
224{
225 struct rxrpc_connection *xconn;
226 struct rb_node *parent, **p;
227 __be32 epoch;
228 u32 real_conn_id;
229
230 _enter("");
231
232 epoch = conn->epoch;
233
234 write_lock_bh(&conn->trans->conn_lock);
235
236 conn->trans->conn_idcounter += RXRPC_CID_INC;
237 if (conn->trans->conn_idcounter < RXRPC_CID_INC)
238 conn->trans->conn_idcounter = RXRPC_CID_INC;
239 real_conn_id = conn->trans->conn_idcounter;
240
241attempt_insertion:
242 parent = NULL;
243 p = &conn->trans->client_conns.rb_node;
244
245 while (*p) {
246 parent = *p;
247 xconn = rb_entry(parent, struct rxrpc_connection, node);
248
249 if (epoch < xconn->epoch)
250 p = &(*p)->rb_left;
251 else if (epoch > xconn->epoch)
252 p = &(*p)->rb_right;
253 else if (real_conn_id < xconn->real_conn_id)
254 p = &(*p)->rb_left;
255 else if (real_conn_id > xconn->real_conn_id)
256 p = &(*p)->rb_right;
257 else
258 goto id_exists;
259 }
260
261 /* we've found a suitable hole - arrange for this connection to occupy
262 * it */
263 rb_link_node(&conn->node, parent, p);
264 rb_insert_color(&conn->node, &conn->trans->client_conns);
265
266 conn->real_conn_id = real_conn_id;
267 conn->cid = htonl(real_conn_id);
268 write_unlock_bh(&conn->trans->conn_lock);
269 _leave(" [CONNID %x CID %x]", real_conn_id, ntohl(conn->cid));
270 return;
271
272 /* we found a connection with the proposed ID - walk the tree from that
273 * point looking for the next unused ID */
274id_exists:
275 for (;;) {
276 real_conn_id += RXRPC_CID_INC;
277 if (real_conn_id < RXRPC_CID_INC) {
278 real_conn_id = RXRPC_CID_INC;
279 conn->trans->conn_idcounter = real_conn_id;
280 goto attempt_insertion;
281 }
282
283 parent = rb_next(parent);
284 if (!parent)
285 goto attempt_insertion;
286
287 xconn = rb_entry(parent, struct rxrpc_connection, node);
288 if (epoch < xconn->epoch ||
289 real_conn_id < xconn->real_conn_id)
290 goto attempt_insertion;
291 }
292}
293
294/*
295 * add a call to a connection's call-by-ID tree
296 */
297static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
298 struct rxrpc_call *call)
299{
300 struct rxrpc_call *xcall;
301 struct rb_node *parent, **p;
302 __be32 call_id;
303
304 write_lock_bh(&conn->lock);
305
306 call_id = call->call_id;
307 p = &conn->calls.rb_node;
308 parent = NULL;
309 while (*p) {
310 parent = *p;
311 xcall = rb_entry(parent, struct rxrpc_call, conn_node);
312
313 if (call_id < xcall->call_id)
314 p = &(*p)->rb_left;
315 else if (call_id > xcall->call_id)
316 p = &(*p)->rb_right;
317 else
318 BUG();
319 }
320
321 rb_link_node(&call->conn_node, parent, p);
322 rb_insert_color(&call->conn_node, &conn->calls);
323
324 write_unlock_bh(&conn->lock);
325}
326
327/*
328 * connect a call on an exclusive connection
329 */
330static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
331 struct rxrpc_transport *trans,
332 __be16 service_id,
333 struct rxrpc_call *call,
334 gfp_t gfp)
335{
336 struct rxrpc_connection *conn;
337 int chan, ret;
338
339 _enter("");
340
341 conn = rx->conn;
342 if (!conn) {
343 /* not yet present - create a candidate for a new connection
344 * and then redo the check */
345 conn = rxrpc_alloc_connection(gfp);
346 if (IS_ERR(conn)) {
347 _leave(" = %ld", PTR_ERR(conn));
348 return PTR_ERR(conn);
349 }
350
351 conn->trans = trans;
352 conn->bundle = NULL;
353 conn->service_id = service_id;
354 conn->epoch = rxrpc_epoch;
355 conn->in_clientflag = 0;
356 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
357 conn->cid = 0;
358 conn->state = RXRPC_CONN_CLIENT;
359 conn->avail_calls = RXRPC_MAXCALLS;
360 conn->security_level = rx->min_sec_level;
361 conn->key = key_get(rx->key);
362
363 ret = rxrpc_init_client_conn_security(conn);
364 if (ret < 0) {
365 key_put(conn->key);
366 kfree(conn);
367 _leave(" = %d [key]", ret);
368 return ret;
369 }
370
371 write_lock_bh(&rxrpc_connection_lock);
372 list_add_tail(&conn->link, &rxrpc_connections);
373 write_unlock_bh(&rxrpc_connection_lock);
374
375 spin_lock(&trans->client_lock);
376 atomic_inc(&trans->usage);
377
378 _net("CONNECT EXCL new %d on TRANS %d",
379 conn->debug_id, conn->trans->debug_id);
380
381 rxrpc_assign_connection_id(conn);
382 rx->conn = conn;
383 }
384
385 /* we've got a connection with a free channel and we can now attach the
386 * call to it
387 * - we're holding the transport's client lock
388 * - we're holding a reference on the connection
389 */
390 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
391 if (!conn->channels[chan])
392 goto found_channel;
393 goto no_free_channels;
394
395found_channel:
396 atomic_inc(&conn->usage);
397 conn->channels[chan] = call;
398 call->conn = conn;
399 call->channel = chan;
400 call->cid = conn->cid | htonl(chan);
401 call->call_id = htonl(++conn->call_counter);
402
403 _net("CONNECT client on conn %d chan %d as call %x",
404 conn->debug_id, chan, ntohl(call->call_id));
405
406 spin_unlock(&trans->client_lock);
407
408 rxrpc_add_call_ID_to_conn(conn, call);
409 _leave(" = 0");
410 return 0;
411
412no_free_channels:
413 spin_unlock(&trans->client_lock);
414 _leave(" = -ENOSR");
415 return -ENOSR;
416}
417
418/*
419 * find a connection for a call
420 * - called in process context with IRQs enabled
421 */
422int rxrpc_connect_call(struct rxrpc_sock *rx,
423 struct rxrpc_transport *trans,
424 struct rxrpc_conn_bundle *bundle,
425 struct rxrpc_call *call,
426 gfp_t gfp)
427{
428 struct rxrpc_connection *conn, *candidate;
429 int chan, ret;
430
431 DECLARE_WAITQUEUE(myself, current);
432
433 _enter("%p,%lx,", rx, call->user_call_ID);
434
435 if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags))
436 return rxrpc_connect_exclusive(rx, trans, bundle->service_id,
437 call, gfp);
438
439 spin_lock(&trans->client_lock);
440 for (;;) {
441 /* see if the bundle has a call slot available */
442 if (!list_empty(&bundle->avail_conns)) {
443 _debug("avail");
444 conn = list_entry(bundle->avail_conns.next,
445 struct rxrpc_connection,
446 bundle_link);
447 if (--conn->avail_calls == 0)
448 list_move(&conn->bundle_link,
449 &bundle->busy_conns);
450 atomic_inc(&conn->usage);
451 break;
452 }
453
454 if (!list_empty(&bundle->unused_conns)) {
455 _debug("unused");
456 conn = list_entry(bundle->unused_conns.next,
457 struct rxrpc_connection,
458 bundle_link);
459 atomic_inc(&conn->usage);
460 list_move(&conn->bundle_link, &bundle->avail_conns);
461 break;
462 }
463
464 /* need to allocate a new connection */
465 _debug("get new conn [%d]", bundle->num_conns);
466
467 spin_unlock(&trans->client_lock);
468
469 if (signal_pending(current))
470 goto interrupted;
471
472 if (bundle->num_conns >= 20) {
473 _debug("too many conns");
474
475 if (!(gfp & __GFP_WAIT)) {
476 _leave(" = -EAGAIN");
477 return -EAGAIN;
478 }
479
480 add_wait_queue(&bundle->chanwait, &myself);
481 for (;;) {
482 set_current_state(TASK_INTERRUPTIBLE);
483 if (bundle->num_conns < 20 ||
484 !list_empty(&bundle->unused_conns) ||
485 !list_empty(&bundle->avail_conns))
486 break;
487 if (signal_pending(current))
488 goto interrupted_dequeue;
489 schedule();
490 }
491 remove_wait_queue(&bundle->chanwait, &myself);
492 __set_current_state(TASK_RUNNING);
493 spin_lock(&trans->client_lock);
494 continue;
495 }
496
497 /* not yet present - create a candidate for a new connection and then
498 * redo the check */
499 candidate = rxrpc_alloc_connection(gfp);
500 if (IS_ERR(candidate)) {
501 _leave(" = %ld", PTR_ERR(candidate));
502 return PTR_ERR(candidate);
503 }
504
505 candidate->trans = trans;
506 candidate->bundle = bundle;
507 candidate->service_id = bundle->service_id;
508 candidate->epoch = rxrpc_epoch;
509 candidate->in_clientflag = 0;
510 candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
511 candidate->cid = 0;
512 candidate->state = RXRPC_CONN_CLIENT;
513 candidate->avail_calls = RXRPC_MAXCALLS;
514 candidate->security_level = rx->min_sec_level;
515 candidate->key = key_get(rx->key);
516
517 ret = rxrpc_init_client_conn_security(candidate);
518 if (ret < 0) {
519 key_put(candidate->key);
520 kfree(candidate);
521 _leave(" = %d [key]", ret);
522 return ret;
523 }
524
525 write_lock_bh(&rxrpc_connection_lock);
526 list_add_tail(&candidate->link, &rxrpc_connections);
527 write_unlock_bh(&rxrpc_connection_lock);
528
529 spin_lock(&trans->client_lock);
530
531 list_add(&candidate->bundle_link, &bundle->unused_conns);
532 bundle->num_conns++;
533 atomic_inc(&bundle->usage);
534 atomic_inc(&trans->usage);
535
536 _net("CONNECT new %d on TRANS %d",
537 candidate->debug_id, candidate->trans->debug_id);
538
539 rxrpc_assign_connection_id(candidate);
540 if (candidate->security)
541 candidate->security->prime_packet_security(candidate);
542
543 /* leave the candidate lurking in zombie mode attached to the
544 * bundle until we're ready for it */
545 rxrpc_put_connection(candidate);
546 candidate = NULL;
547 }
548
549 /* we've got a connection with a free channel and we can now attach the
550 * call to it
551 * - we're holding the transport's client lock
552 * - we're holding a reference on the connection
553 * - we're holding a reference on the bundle
554 */
555 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
556 if (!conn->channels[chan])
557 goto found_channel;
558 BUG();
559
560found_channel:
561 conn->channels[chan] = call;
562 call->conn = conn;
563 call->channel = chan;
564 call->cid = conn->cid | htonl(chan);
565 call->call_id = htonl(++conn->call_counter);
566
567 _net("CONNECT client on conn %d chan %d as call %x",
568 conn->debug_id, chan, ntohl(call->call_id));
569
570 spin_unlock(&trans->client_lock);
571
572 rxrpc_add_call_ID_to_conn(conn, call);
573
574 _leave(" = 0");
575 return 0;
576
577interrupted_dequeue:
578 remove_wait_queue(&bundle->chanwait, &myself);
579 __set_current_state(TASK_RUNNING);
580interrupted:
581 _leave(" = -ERESTARTSYS");
582 return -ERESTARTSYS;
583}
584
585/*
586 * get a record of an incoming connection
587 */
588struct rxrpc_connection *
589rxrpc_incoming_connection(struct rxrpc_transport *trans,
590 struct rxrpc_header *hdr,
591 gfp_t gfp)
592{
593 struct rxrpc_connection *conn, *candidate = NULL;
594 struct rb_node *p, **pp;
595 const char *new = "old";
596 __be32 epoch;
597 u32 conn_id;
598
599 _enter("");
600
601 ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED);
602
603 epoch = hdr->epoch;
604 conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
605
606 /* search the connection list first */
607 read_lock_bh(&trans->conn_lock);
608
609 p = trans->server_conns.rb_node;
610 while (p) {
611 conn = rb_entry(p, struct rxrpc_connection, node);
612
613 _debug("maybe %x", conn->real_conn_id);
614
615 if (epoch < conn->epoch)
616 p = p->rb_left;
617 else if (epoch > conn->epoch)
618 p = p->rb_right;
619 else if (conn_id < conn->real_conn_id)
620 p = p->rb_left;
621 else if (conn_id > conn->real_conn_id)
622 p = p->rb_right;
623 else
624 goto found_extant_connection;
625 }
626 read_unlock_bh(&trans->conn_lock);
627
628 /* not yet present - create a candidate for a new record and then
629 * redo the search */
630 candidate = rxrpc_alloc_connection(gfp);
631 if (!candidate) {
632 _leave(" = -ENOMEM");
633 return ERR_PTR(-ENOMEM);
634 }
635
636 candidate->trans = trans;
637 candidate->epoch = hdr->epoch;
638 candidate->cid = hdr->cid & __constant_cpu_to_be32(RXRPC_CIDMASK);
639 candidate->service_id = hdr->serviceId;
640 candidate->security_ix = hdr->securityIndex;
641 candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
642 candidate->out_clientflag = 0;
643 candidate->real_conn_id = conn_id;
644 candidate->state = RXRPC_CONN_SERVER;
645 if (candidate->service_id)
646 candidate->state = RXRPC_CONN_SERVER_UNSECURED;
647
648 write_lock_bh(&trans->conn_lock);
649
650 pp = &trans->server_conns.rb_node;
651 p = NULL;
652 while (*pp) {
653 p = *pp;
654 conn = rb_entry(p, struct rxrpc_connection, node);
655
656 if (epoch < conn->epoch)
657 pp = &(*pp)->rb_left;
658 else if (epoch > conn->epoch)
659 pp = &(*pp)->rb_right;
660 else if (conn_id < conn->real_conn_id)
661 pp = &(*pp)->rb_left;
662 else if (conn_id > conn->real_conn_id)
663 pp = &(*pp)->rb_right;
664 else
665 goto found_extant_second;
666 }
667
668 /* we can now add the new candidate to the list */
669 conn = candidate;
670 candidate = NULL;
671 rb_link_node(&conn->node, p, pp);
672 rb_insert_color(&conn->node, &trans->server_conns);
673 atomic_inc(&conn->trans->usage);
674
675 write_unlock_bh(&trans->conn_lock);
676
677 write_lock_bh(&rxrpc_connection_lock);
678 list_add_tail(&conn->link, &rxrpc_connections);
679 write_unlock_bh(&rxrpc_connection_lock);
680
681 new = "new";
682
683success:
684 _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->real_conn_id);
685
686 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
687 return conn;
688
689 /* we found the connection in the list immediately */
690found_extant_connection:
691 if (hdr->securityIndex != conn->security_ix) {
692 read_unlock_bh(&trans->conn_lock);
693 goto security_mismatch;
694 }
695 atomic_inc(&conn->usage);
696 read_unlock_bh(&trans->conn_lock);
697 goto success;
698
699 /* we found the connection on the second time through the list */
700found_extant_second:
701 if (hdr->securityIndex != conn->security_ix) {
702 write_unlock_bh(&trans->conn_lock);
703 goto security_mismatch;
704 }
705 atomic_inc(&conn->usage);
706 write_unlock_bh(&trans->conn_lock);
707 kfree(candidate);
708 goto success;
709
710security_mismatch:
711 kfree(candidate);
712 _leave(" = -EKEYREJECTED");
713 return ERR_PTR(-EKEYREJECTED);
714}
715
716/*
717 * find a connection based on transport and RxRPC connection ID for an incoming
718 * packet
719 */
720struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans,
721 struct rxrpc_header *hdr)
722{
723 struct rxrpc_connection *conn;
724 struct rb_node *p;
725 __be32 epoch;
726 u32 conn_id;
727
728 _enter(",{%x,%x}", ntohl(hdr->cid), hdr->flags);
729
730 read_lock_bh(&trans->conn_lock);
731
732 conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
733 epoch = hdr->epoch;
734
735 if (hdr->flags & RXRPC_CLIENT_INITIATED)
736 p = trans->server_conns.rb_node;
737 else
738 p = trans->client_conns.rb_node;
739
740 while (p) {
741 conn = rb_entry(p, struct rxrpc_connection, node);
742
743 _debug("maybe %x", conn->real_conn_id);
744
745 if (epoch < conn->epoch)
746 p = p->rb_left;
747 else if (epoch > conn->epoch)
748 p = p->rb_right;
749 else if (conn_id < conn->real_conn_id)
750 p = p->rb_left;
751 else if (conn_id > conn->real_conn_id)
752 p = p->rb_right;
753 else
754 goto found;
755 }
756
757 read_unlock_bh(&trans->conn_lock);
758 _leave(" = NULL");
759 return NULL;
760
761found:
762 atomic_inc(&conn->usage);
763 read_unlock_bh(&trans->conn_lock);
764 _leave(" = %p", conn);
765 return conn;
766}
767
768/*
769 * release a virtual connection
770 */
771void rxrpc_put_connection(struct rxrpc_connection *conn)
772{
773 _enter("%p{u=%d,d=%d}",
774 conn, atomic_read(&conn->usage), conn->debug_id);
775
776 ASSERTCMP(atomic_read(&conn->usage), >, 0);
777
778 conn->put_time = xtime.tv_sec;
779 if (atomic_dec_and_test(&conn->usage)) {
780 _debug("zombie");
781 schedule_delayed_work(&rxrpc_connection_reap, 0);
782 }
783
784 _leave("");
785}
786
787/*
788 * destroy a virtual connection
789 */
790static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
791{
792 _enter("%p{%d}", conn, atomic_read(&conn->usage));
793
794 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
795
796 _net("DESTROY CONN %d", conn->debug_id);
797
798 if (conn->bundle)
799 rxrpc_put_bundle(conn->trans, conn->bundle);
800
801 ASSERT(RB_EMPTY_ROOT(&conn->calls));
802 rxrpc_purge_queue(&conn->rx_queue);
803
804 rxrpc_clear_conn_security(conn);
805 rxrpc_put_transport(conn->trans);
806 kfree(conn);
807 _leave("");
808}
809
810/*
811 * reap dead connections
812 */
813void rxrpc_connection_reaper(struct work_struct *work)
814{
815 struct rxrpc_connection *conn, *_p;
816 unsigned long now, earliest, reap_time;
817
818 LIST_HEAD(graveyard);
819
820 _enter("");
821
822 now = xtime.tv_sec;
823 earliest = ULONG_MAX;
824
825 write_lock_bh(&rxrpc_connection_lock);
826 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
827 _debug("reap CONN %d { u=%d,t=%ld }",
828 conn->debug_id, atomic_read(&conn->usage),
829 (long) now - (long) conn->put_time);
830
831 if (likely(atomic_read(&conn->usage) > 0))
832 continue;
833
834 spin_lock(&conn->trans->client_lock);
835 write_lock(&conn->trans->conn_lock);
836 reap_time = conn->put_time + rxrpc_connection_timeout;
837
838 if (atomic_read(&conn->usage) > 0) {
839 ;
840 } else if (reap_time <= now) {
841 list_move_tail(&conn->link, &graveyard);
842 if (conn->out_clientflag)
843 rb_erase(&conn->node,
844 &conn->trans->client_conns);
845 else
846 rb_erase(&conn->node,
847 &conn->trans->server_conns);
848 if (conn->bundle) {
849 list_del_init(&conn->bundle_link);
850 conn->bundle->num_conns--;
851 }
852
853 } else if (reap_time < earliest) {
854 earliest = reap_time;
855 }
856
857 write_unlock(&conn->trans->conn_lock);
858 spin_unlock(&conn->trans->client_lock);
859 }
860 write_unlock_bh(&rxrpc_connection_lock);
861
862 if (earliest != ULONG_MAX) {
863 _debug("reschedule reaper %ld", (long) earliest - now);
864 ASSERTCMP(earliest, >, now);
865 schedule_delayed_work(&rxrpc_connection_reap,
866 (earliest - now) * HZ);
867 }
868
869 /* then destroy all those pulled out */
870 while (!list_empty(&graveyard)) {
871 conn = list_entry(graveyard.next, struct rxrpc_connection,
872 link);
873 list_del_init(&conn->link);
874
875 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
876 rxrpc_destroy_connection(conn);
877 }
878
879 _leave("");
880}
881
882/*
883 * preemptively destroy all the connection records rather than waiting for them
884 * to time out
885 */
886void __exit rxrpc_destroy_all_connections(void)
887{
888 _enter("");
889
890 rxrpc_connection_timeout = 0;
891 cancel_delayed_work(&rxrpc_connection_reap);
892 schedule_delayed_work(&rxrpc_connection_reap, 0);
893
894 _leave("");
895}
diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
new file mode 100644
index 000000000000..4b02815c1ded
--- /dev/null
+++ b/net/rxrpc/ar-connevent.c
@@ -0,0 +1,387 @@
1/* connection-level event handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/errqueue.h>
16#include <linux/udp.h>
17#include <linux/in.h>
18#include <linux/in6.h>
19#include <linux/icmp.h>
20#include <net/sock.h>
21#include <net/af_rxrpc.h>
22#include <net/ip.h>
23#include "ar-internal.h"
24
25/*
26 * pass a connection-level abort onto all calls on that connection
27 */
28static void rxrpc_abort_calls(struct rxrpc_connection *conn, int state,
29 u32 abort_code)
30{
31 struct rxrpc_call *call;
32 struct rb_node *p;
33
34 _enter("{%d},%x", conn->debug_id, abort_code);
35
36 read_lock_bh(&conn->lock);
37
38 for (p = rb_first(&conn->calls); p; p = rb_next(p)) {
39 call = rb_entry(p, struct rxrpc_call, conn_node);
40 write_lock(&call->state_lock);
41 if (call->state <= RXRPC_CALL_COMPLETE) {
42 call->state = state;
43 call->abort_code = abort_code;
44 if (state == RXRPC_CALL_LOCALLY_ABORTED)
45 set_bit(RXRPC_CALL_CONN_ABORT, &call->events);
46 else
47 set_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
48 schedule_work(&call->processor);
49 }
50 write_unlock(&call->state_lock);
51 }
52
53 read_unlock_bh(&conn->lock);
54 _leave("");
55}
56
57/*
58 * generate a connection-level abort
59 */
60static int rxrpc_abort_connection(struct rxrpc_connection *conn,
61 u32 error, u32 abort_code)
62{
63 struct rxrpc_header hdr;
64 struct msghdr msg;
65 struct kvec iov[2];
66 __be32 word;
67 size_t len;
68 int ret;
69
70 _enter("%d,,%u,%u", conn->debug_id, error, abort_code);
71
72 /* generate a connection-level abort */
73 spin_lock_bh(&conn->state_lock);
74 if (conn->state < RXRPC_CONN_REMOTELY_ABORTED) {
75 conn->state = RXRPC_CONN_LOCALLY_ABORTED;
76 conn->error = error;
77 spin_unlock_bh(&conn->state_lock);
78 } else {
79 spin_unlock_bh(&conn->state_lock);
80 _leave(" = 0 [already dead]");
81 return 0;
82 }
83
84 rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code);
85
86 msg.msg_name = &conn->trans->peer->srx.transport.sin;
87 msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin);
88 msg.msg_control = NULL;
89 msg.msg_controllen = 0;
90 msg.msg_flags = 0;
91
92 hdr.epoch = conn->epoch;
93 hdr.cid = conn->cid;
94 hdr.callNumber = 0;
95 hdr.seq = 0;
96 hdr.type = RXRPC_PACKET_TYPE_ABORT;
97 hdr.flags = conn->out_clientflag;
98 hdr.userStatus = 0;
99 hdr.securityIndex = conn->security_ix;
100 hdr._rsvd = 0;
101 hdr.serviceId = conn->service_id;
102
103 word = htonl(abort_code);
104
105 iov[0].iov_base = &hdr;
106 iov[0].iov_len = sizeof(hdr);
107 iov[1].iov_base = &word;
108 iov[1].iov_len = sizeof(word);
109
110 len = iov[0].iov_len + iov[1].iov_len;
111
112 hdr.serial = htonl(atomic_inc_return(&conn->serial));
113 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
114
115 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
116 if (ret < 0) {
117 _debug("sendmsg failed: %d", ret);
118 return -EAGAIN;
119 }
120
121 _leave(" = 0");
122 return 0;
123}
124
125/*
126 * mark a call as being on a now-secured channel
127 * - must be called with softirqs disabled
128 */
129void rxrpc_call_is_secure(struct rxrpc_call *call)
130{
131 _enter("%p", call);
132 if (call) {
133 read_lock(&call->state_lock);
134 if (call->state < RXRPC_CALL_COMPLETE &&
135 !test_and_set_bit(RXRPC_CALL_SECURED, &call->events))
136 schedule_work(&call->processor);
137 read_unlock(&call->state_lock);
138 }
139}
140
141/*
142 * connection-level Rx packet processor
143 */
144static int rxrpc_process_event(struct rxrpc_connection *conn,
145 struct sk_buff *skb,
146 u32 *_abort_code)
147{
148 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
149 __be32 tmp;
150 u32 serial;
151 int loop, ret;
152
153 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED)
154 return -ECONNABORTED;
155
156 serial = ntohl(sp->hdr.serial);
157
158 switch (sp->hdr.type) {
159 case RXRPC_PACKET_TYPE_ABORT:
160 if (skb_copy_bits(skb, 0, &tmp, sizeof(tmp)) < 0)
161 return -EPROTO;
162 _proto("Rx ABORT %%%u { ac=%d }", serial, ntohl(tmp));
163
164 conn->state = RXRPC_CONN_REMOTELY_ABORTED;
165 rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED,
166 ntohl(tmp));
167 return -ECONNABORTED;
168
169 case RXRPC_PACKET_TYPE_CHALLENGE:
170 if (conn->security)
171 return conn->security->respond_to_challenge(
172 conn, skb, _abort_code);
173 return -EPROTO;
174
175 case RXRPC_PACKET_TYPE_RESPONSE:
176 if (!conn->security)
177 return -EPROTO;
178
179 ret = conn->security->verify_response(conn, skb, _abort_code);
180 if (ret < 0)
181 return ret;
182
183 ret = conn->security->init_connection_security(conn);
184 if (ret < 0)
185 return ret;
186
187 conn->security->prime_packet_security(conn);
188 read_lock_bh(&conn->lock);
189 spin_lock(&conn->state_lock);
190
191 if (conn->state == RXRPC_CONN_SERVER_CHALLENGING) {
192 conn->state = RXRPC_CONN_SERVER;
193 for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
194 rxrpc_call_is_secure(conn->channels[loop]);
195 }
196
197 spin_unlock(&conn->state_lock);
198 read_unlock_bh(&conn->lock);
199 return 0;
200
201 default:
202 return -EPROTO;
203 }
204}
205
206/*
207 * set up security and issue a challenge
208 */
209static void rxrpc_secure_connection(struct rxrpc_connection *conn)
210{
211 u32 abort_code;
212 int ret;
213
214 _enter("{%d}", conn->debug_id);
215
216 ASSERT(conn->security_ix != 0);
217
218 if (!conn->key) {
219 _debug("set up security");
220 ret = rxrpc_init_server_conn_security(conn);
221 switch (ret) {
222 case 0:
223 break;
224 case -ENOENT:
225 abort_code = RX_CALL_DEAD;
226 goto abort;
227 default:
228 abort_code = RXKADNOAUTH;
229 goto abort;
230 }
231 }
232
233 ASSERT(conn->security != NULL);
234
235 if (conn->security->issue_challenge(conn) < 0) {
236 abort_code = RX_CALL_DEAD;
237 ret = -ENOMEM;
238 goto abort;
239 }
240
241 _leave("");
242 return;
243
244abort:
245 _debug("abort %d, %d", ret, abort_code);
246 rxrpc_abort_connection(conn, -ret, abort_code);
247 _leave(" [aborted]");
248}
249
250/*
251 * connection-level event processor
252 */
253void rxrpc_process_connection(struct work_struct *work)
254{
255 struct rxrpc_connection *conn =
256 container_of(work, struct rxrpc_connection, processor);
257 struct rxrpc_skb_priv *sp;
258 struct sk_buff *skb;
259 u32 abort_code = RX_PROTOCOL_ERROR;
260 int ret;
261
262 _enter("{%d}", conn->debug_id);
263
264 atomic_inc(&conn->usage);
265
266 if (test_and_clear_bit(RXRPC_CONN_CHALLENGE, &conn->events)) {
267 rxrpc_secure_connection(conn);
268 rxrpc_put_connection(conn);
269 }
270
271 /* go through the conn-level event packets, releasing the ref on this
272 * connection that each one has when we've finished with it */
273 while ((skb = skb_dequeue(&conn->rx_queue))) {
274 sp = rxrpc_skb(skb);
275
276 ret = rxrpc_process_event(conn, skb, &abort_code);
277 switch (ret) {
278 case -EPROTO:
279 case -EKEYEXPIRED:
280 case -EKEYREJECTED:
281 goto protocol_error;
282 case -EAGAIN:
283 goto requeue_and_leave;
284 case -ECONNABORTED:
285 default:
286 rxrpc_put_connection(conn);
287 rxrpc_free_skb(skb);
288 break;
289 }
290 }
291
292out:
293 rxrpc_put_connection(conn);
294 _leave("");
295 return;
296
297requeue_and_leave:
298 skb_queue_head(&conn->rx_queue, skb);
299 goto out;
300
301protocol_error:
302 if (rxrpc_abort_connection(conn, -ret, abort_code) < 0)
303 goto requeue_and_leave;
304 rxrpc_put_connection(conn);
305 rxrpc_free_skb(skb);
306 _leave(" [EPROTO]");
307 goto out;
308}
309
310/*
311 * reject packets through the local endpoint
312 */
313void rxrpc_reject_packets(struct work_struct *work)
314{
315 union {
316 struct sockaddr sa;
317 struct sockaddr_in sin;
318 } sa;
319 struct rxrpc_skb_priv *sp;
320 struct rxrpc_header hdr;
321 struct rxrpc_local *local;
322 struct sk_buff *skb;
323 struct msghdr msg;
324 struct kvec iov[2];
325 size_t size;
326 __be32 code;
327
328 local = container_of(work, struct rxrpc_local, rejecter);
329 rxrpc_get_local(local);
330
331 _enter("%d", local->debug_id);
332
333 iov[0].iov_base = &hdr;
334 iov[0].iov_len = sizeof(hdr);
335 iov[1].iov_base = &code;
336 iov[1].iov_len = sizeof(code);
337 size = sizeof(hdr) + sizeof(code);
338
339 msg.msg_name = &sa;
340 msg.msg_control = NULL;
341 msg.msg_controllen = 0;
342 msg.msg_flags = 0;
343
344 memset(&sa, 0, sizeof(sa));
345 sa.sa.sa_family = local->srx.transport.family;
346 switch (sa.sa.sa_family) {
347 case AF_INET:
348 msg.msg_namelen = sizeof(sa.sin);
349 break;
350 default:
351 msg.msg_namelen = 0;
352 break;
353 }
354
355 memset(&hdr, 0, sizeof(hdr));
356 hdr.type = RXRPC_PACKET_TYPE_ABORT;
357
358 while ((skb = skb_dequeue(&local->reject_queue))) {
359 sp = rxrpc_skb(skb);
360 switch (sa.sa.sa_family) {
361 case AF_INET:
362 sa.sin.sin_port = udp_hdr(skb)->source;
363 sa.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
364 code = htonl(skb->priority);
365
366 hdr.epoch = sp->hdr.epoch;
367 hdr.cid = sp->hdr.cid;
368 hdr.callNumber = sp->hdr.callNumber;
369 hdr.serviceId = sp->hdr.serviceId;
370 hdr.flags = sp->hdr.flags;
371 hdr.flags ^= RXRPC_CLIENT_INITIATED;
372 hdr.flags &= RXRPC_CLIENT_INITIATED;
373
374 kernel_sendmsg(local->socket, &msg, iov, 2, size);
375 break;
376
377 default:
378 break;
379 }
380
381 rxrpc_free_skb(skb);
382 rxrpc_put_local(local);
383 }
384
385 rxrpc_put_local(local);
386 _leave("");
387}
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c
new file mode 100644
index 000000000000..f5539e2f7b58
--- /dev/null
+++ b/net/rxrpc/ar-error.c
@@ -0,0 +1,253 @@
1/* Error message handling (ICMP)
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/errqueue.h>
16#include <linux/udp.h>
17#include <linux/in.h>
18#include <linux/in6.h>
19#include <linux/icmp.h>
20#include <net/sock.h>
21#include <net/af_rxrpc.h>
22#include <net/ip.h>
23#include "ar-internal.h"
24
25/*
26 * handle an error received on the local endpoint
27 */
28void rxrpc_UDP_error_report(struct sock *sk)
29{
30 struct sock_exterr_skb *serr;
31 struct rxrpc_transport *trans;
32 struct rxrpc_local *local = sk->sk_user_data;
33 struct rxrpc_peer *peer;
34 struct sk_buff *skb;
35 __be32 addr;
36 __be16 port;
37
38 _enter("%p{%d}", sk, local->debug_id);
39
40 skb = skb_dequeue(&sk->sk_error_queue);
41 if (!skb) {
42 _leave("UDP socket errqueue empty");
43 return;
44 }
45
46 rxrpc_new_skb(skb);
47
48 serr = SKB_EXT_ERR(skb);
49 addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset);
50 port = serr->port;
51
52 _net("Rx UDP Error from "NIPQUAD_FMT":%hu",
53 NIPQUAD(addr), ntohs(port));
54 _debug("Msg l:%d d:%d", skb->len, skb->data_len);
55
56 peer = rxrpc_find_peer(local, addr, port);
57 if (IS_ERR(peer)) {
58 rxrpc_free_skb(skb);
59 _leave(" [no peer]");
60 return;
61 }
62
63 trans = rxrpc_find_transport(local, peer);
64 if (!trans) {
65 rxrpc_put_peer(peer);
66 rxrpc_free_skb(skb);
67 _leave(" [no trans]");
68 return;
69 }
70
71 if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
72 serr->ee.ee_type == ICMP_DEST_UNREACH &&
73 serr->ee.ee_code == ICMP_FRAG_NEEDED
74 ) {
75 u32 mtu = serr->ee.ee_info;
76
77 _net("Rx Received ICMP Fragmentation Needed (%d)", mtu);
78
79 /* wind down the local interface MTU */
80 if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) {
81 peer->if_mtu = mtu;
82 _net("I/F MTU %u", mtu);
83 }
84
85 /* ip_rt_frag_needed() may have eaten the info */
86 if (mtu == 0)
87 mtu = ntohs(icmp_hdr(skb)->un.frag.mtu);
88
89 if (mtu == 0) {
90 /* they didn't give us a size, estimate one */
91 if (mtu > 1500) {
92 mtu >>= 1;
93 if (mtu < 1500)
94 mtu = 1500;
95 } else {
96 mtu -= 100;
97 if (mtu < peer->hdrsize)
98 mtu = peer->hdrsize + 4;
99 }
100 }
101
102 if (mtu < peer->mtu) {
103 peer->mtu = mtu;
104 peer->maxdata = peer->mtu - peer->hdrsize;
105 _net("Net MTU %u (maxdata %u)",
106 peer->mtu, peer->maxdata);
107 }
108 }
109
110 rxrpc_put_peer(peer);
111
112 /* pass the transport ref to error_handler to release */
113 skb_queue_tail(&trans->error_queue, skb);
114 schedule_work(&trans->error_handler);
115
116 /* reset and regenerate socket error */
117 spin_lock_bh(&sk->sk_error_queue.lock);
118 sk->sk_err = 0;
119 skb = skb_peek(&sk->sk_error_queue);
120 if (skb) {
121 sk->sk_err = SKB_EXT_ERR(skb)->ee.ee_errno;
122 spin_unlock_bh(&sk->sk_error_queue.lock);
123 sk->sk_error_report(sk);
124 } else {
125 spin_unlock_bh(&sk->sk_error_queue.lock);
126 }
127
128 _leave("");
129}
130
131/*
132 * deal with UDP error messages
133 */
134void rxrpc_UDP_error_handler(struct work_struct *work)
135{
136 struct sock_extended_err *ee;
137 struct sock_exterr_skb *serr;
138 struct rxrpc_transport *trans =
139 container_of(work, struct rxrpc_transport, error_handler);
140 struct sk_buff *skb;
141 int local, err;
142
143 _enter("");
144
145 skb = skb_dequeue(&trans->error_queue);
146 if (!skb)
147 return;
148
149 serr = SKB_EXT_ERR(skb);
150 ee = &serr->ee;
151
152 _net("Rx Error o=%d t=%d c=%d e=%d",
153 ee->ee_origin, ee->ee_type, ee->ee_code, ee->ee_errno);
154
155 err = ee->ee_errno;
156
157 switch (ee->ee_origin) {
158 case SO_EE_ORIGIN_ICMP:
159 local = 0;
160 switch (ee->ee_type) {
161 case ICMP_DEST_UNREACH:
162 switch (ee->ee_code) {
163 case ICMP_NET_UNREACH:
164 _net("Rx Received ICMP Network Unreachable");
165 err = ENETUNREACH;
166 break;
167 case ICMP_HOST_UNREACH:
168 _net("Rx Received ICMP Host Unreachable");
169 err = EHOSTUNREACH;
170 break;
171 case ICMP_PORT_UNREACH:
172 _net("Rx Received ICMP Port Unreachable");
173 err = ECONNREFUSED;
174 break;
175 case ICMP_FRAG_NEEDED:
176 _net("Rx Received ICMP Fragmentation Needed (%d)",
177 ee->ee_info);
178 err = 0; /* dealt with elsewhere */
179 break;
180 case ICMP_NET_UNKNOWN:
181 _net("Rx Received ICMP Unknown Network");
182 err = ENETUNREACH;
183 break;
184 case ICMP_HOST_UNKNOWN:
185 _net("Rx Received ICMP Unknown Host");
186 err = EHOSTUNREACH;
187 break;
188 default:
189 _net("Rx Received ICMP DestUnreach code=%u",
190 ee->ee_code);
191 break;
192 }
193 break;
194
195 case ICMP_TIME_EXCEEDED:
196 _net("Rx Received ICMP TTL Exceeded");
197 break;
198
199 default:
200 _proto("Rx Received ICMP error { type=%u code=%u }",
201 ee->ee_type, ee->ee_code);
202 break;
203 }
204 break;
205
206 case SO_EE_ORIGIN_LOCAL:
207 _proto("Rx Received local error { error=%d }",
208 ee->ee_errno);
209 local = 1;
210 break;
211
212 case SO_EE_ORIGIN_NONE:
213 case SO_EE_ORIGIN_ICMP6:
214 default:
215 _proto("Rx Received error report { orig=%u }",
216 ee->ee_origin);
217 local = 0;
218 break;
219 }
220
221 /* terminate all the affected calls if there's an unrecoverable
222 * error */
223 if (err) {
224 struct rxrpc_call *call, *_n;
225
226 _debug("ISSUE ERROR %d", err);
227
228 spin_lock_bh(&trans->peer->lock);
229 trans->peer->net_error = err;
230
231 list_for_each_entry_safe(call, _n, &trans->peer->error_targets,
232 error_link) {
233 write_lock(&call->state_lock);
234 if (call->state != RXRPC_CALL_COMPLETE &&
235 call->state < RXRPC_CALL_NETWORK_ERROR) {
236 call->state = RXRPC_CALL_NETWORK_ERROR;
237 set_bit(RXRPC_CALL_RCVD_ERROR, &call->events);
238 schedule_work(&call->processor);
239 }
240 write_unlock(&call->state_lock);
241 list_del_init(&call->error_link);
242 }
243
244 spin_unlock_bh(&trans->peer->lock);
245 }
246
247 if (!skb_queue_empty(&trans->error_queue))
248 schedule_work(&trans->error_handler);
249
250 rxrpc_free_skb(skb);
251 rxrpc_put_transport(trans);
252 _leave("");
253}
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
new file mode 100644
index 000000000000..323c3454561c
--- /dev/null
+++ b/net/rxrpc/ar-input.c
@@ -0,0 +1,791 @@
1/* RxRPC packet reception
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/errqueue.h>
16#include <linux/udp.h>
17#include <linux/in.h>
18#include <linux/in6.h>
19#include <linux/icmp.h>
20#include <net/sock.h>
21#include <net/af_rxrpc.h>
22#include <net/ip.h>
23#include "ar-internal.h"
24
25unsigned long rxrpc_ack_timeout = 1;
26
27const char *rxrpc_pkts[] = {
28 "?00",
29 "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG",
30 "?09", "?10", "?11", "?12", "?13", "?14", "?15"
31};
32
33/*
34 * queue a packet for recvmsg to pass to userspace
35 * - the caller must hold a lock on call->lock
36 * - must not be called with interrupts disabled (sk_filter() disables BH's)
37 * - eats the packet whether successful or not
38 * - there must be just one reference to the packet, which the caller passes to
39 * this function
40 */
41int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
42 bool force, bool terminal)
43{
44 struct rxrpc_skb_priv *sp;
45 struct sock *sk;
46 int skb_len, ret;
47
48 _enter(",,%d,%d", force, terminal);
49
50 ASSERT(!irqs_disabled());
51
52 sp = rxrpc_skb(skb);
53 ASSERTCMP(sp->call, ==, call);
54
55 /* if we've already posted the terminal message for a call, then we
56 * don't post any more */
57 if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
58 _debug("already terminated");
59 ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE);
60 skb->destructor = NULL;
61 sp->call = NULL;
62 rxrpc_put_call(call);
63 rxrpc_free_skb(skb);
64 return 0;
65 }
66
67 sk = &call->socket->sk;
68
69 if (!force) {
70 /* cast skb->rcvbuf to unsigned... It's pointless, but
71 * reduces number of warnings when compiling with -W
72 * --ANK */
73// ret = -ENOBUFS;
74// if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
75// (unsigned) sk->sk_rcvbuf)
76// goto out;
77
78 ret = sk_filter(sk, skb);
79 if (ret < 0)
80 goto out;
81 }
82
83 spin_lock_bh(&sk->sk_receive_queue.lock);
84 if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags) &&
85 !test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
86 call->socket->sk.sk_state != RXRPC_CLOSE) {
87 skb->destructor = rxrpc_packet_destructor;
88 skb->dev = NULL;
89 skb->sk = sk;
90 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
91
92 /* Cache the SKB length before we tack it onto the receive
93 * queue. Once it is added it no longer belongs to us and
94 * may be freed by other threads of control pulling packets
95 * from the queue.
96 */
97 skb_len = skb->len;
98
99 _net("post skb %p", skb);
100 __skb_queue_tail(&sk->sk_receive_queue, skb);
101 spin_unlock_bh(&sk->sk_receive_queue.lock);
102
103 if (!sock_flag(sk, SOCK_DEAD))
104 sk->sk_data_ready(sk, skb_len);
105
106 if (terminal) {
107 _debug("<<<< TERMINAL MESSAGE >>>>");
108 set_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags);
109 }
110
111 skb = NULL;
112 } else {
113 spin_unlock_bh(&sk->sk_receive_queue.lock);
114 }
115 ret = 0;
116
117out:
118 /* release the socket buffer */
119 if (skb) {
120 skb->destructor = NULL;
121 sp->call = NULL;
122 rxrpc_put_call(call);
123 rxrpc_free_skb(skb);
124 }
125
126 _leave(" = %d", ret);
127 return ret;
128}
129
130/*
131 * process a DATA packet, posting the packet to the appropriate queue
132 * - eats the packet if successful
133 */
134static int rxrpc_fast_process_data(struct rxrpc_call *call,
135 struct sk_buff *skb, u32 seq)
136{
137 struct rxrpc_skb_priv *sp;
138 bool terminal;
139 int ret, ackbit, ack;
140
141 _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq);
142
143 sp = rxrpc_skb(skb);
144 ASSERTCMP(sp->call, ==, NULL);
145
146 spin_lock(&call->lock);
147
148 if (call->state > RXRPC_CALL_COMPLETE)
149 goto discard;
150
151 ASSERTCMP(call->rx_data_expect, >=, call->rx_data_post);
152 ASSERTCMP(call->rx_data_post, >=, call->rx_data_recv);
153 ASSERTCMP(call->rx_data_recv, >=, call->rx_data_eaten);
154
155 if (seq < call->rx_data_post) {
156 _debug("dup #%u [-%u]", seq, call->rx_data_post);
157 ack = RXRPC_ACK_DUPLICATE;
158 ret = -ENOBUFS;
159 goto discard_and_ack;
160 }
161
162 /* we may already have the packet in the out of sequence queue */
163 ackbit = seq - (call->rx_data_eaten + 1);
164 ASSERTCMP(ackbit, >=, 0);
165 if (__test_and_set_bit(ackbit, &call->ackr_window)) {
166 _debug("dup oos #%u [%u,%u]",
167 seq, call->rx_data_eaten, call->rx_data_post);
168 ack = RXRPC_ACK_DUPLICATE;
169 goto discard_and_ack;
170 }
171
172 if (seq >= call->ackr_win_top) {
173 _debug("exceed #%u [%u]", seq, call->ackr_win_top);
174 __clear_bit(ackbit, &call->ackr_window);
175 ack = RXRPC_ACK_EXCEEDS_WINDOW;
176 goto discard_and_ack;
177 }
178
179 if (seq == call->rx_data_expect) {
180 clear_bit(RXRPC_CALL_EXPECT_OOS, &call->flags);
181 call->rx_data_expect++;
182 } else if (seq > call->rx_data_expect) {
183 _debug("oos #%u [%u]", seq, call->rx_data_expect);
184 call->rx_data_expect = seq + 1;
185 if (test_and_set_bit(RXRPC_CALL_EXPECT_OOS, &call->flags)) {
186 ack = RXRPC_ACK_OUT_OF_SEQUENCE;
187 goto enqueue_and_ack;
188 }
189 goto enqueue_packet;
190 }
191
192 if (seq != call->rx_data_post) {
193 _debug("ahead #%u [%u]", seq, call->rx_data_post);
194 goto enqueue_packet;
195 }
196
197 if (test_bit(RXRPC_CALL_RCVD_LAST, &call->flags))
198 goto protocol_error;
199
200 /* if the packet need security things doing to it, then it goes down
201 * the slow path */
202 if (call->conn->security)
203 goto enqueue_packet;
204
205 sp->call = call;
206 rxrpc_get_call(call);
207 terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
208 !(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
209 ret = rxrpc_queue_rcv_skb(call, skb, false, terminal);
210 if (ret < 0) {
211 if (ret == -ENOMEM || ret == -ENOBUFS) {
212 __clear_bit(ackbit, &call->ackr_window);
213 ack = RXRPC_ACK_NOSPACE;
214 goto discard_and_ack;
215 }
216 goto out;
217 }
218
219 skb = NULL;
220
221 _debug("post #%u", seq);
222 ASSERTCMP(call->rx_data_post, ==, seq);
223 call->rx_data_post++;
224
225 if (sp->hdr.flags & RXRPC_LAST_PACKET)
226 set_bit(RXRPC_CALL_RCVD_LAST, &call->flags);
227
228 /* if we've reached an out of sequence packet then we need to drain
229 * that queue into the socket Rx queue now */
230 if (call->rx_data_post == call->rx_first_oos) {
231 _debug("drain rx oos now");
232 read_lock(&call->state_lock);
233 if (call->state < RXRPC_CALL_COMPLETE &&
234 !test_and_set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events))
235 schedule_work(&call->processor);
236 read_unlock(&call->state_lock);
237 }
238
239 spin_unlock(&call->lock);
240 atomic_inc(&call->ackr_not_idle);
241 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, sp->hdr.serial, false);
242 _leave(" = 0 [posted]");
243 return 0;
244
245protocol_error:
246 ret = -EBADMSG;
247out:
248 spin_unlock(&call->lock);
249 _leave(" = %d", ret);
250 return ret;
251
252discard_and_ack:
253 _debug("discard and ACK packet %p", skb);
254 __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true);
255discard:
256 spin_unlock(&call->lock);
257 rxrpc_free_skb(skb);
258 _leave(" = 0 [discarded]");
259 return 0;
260
261enqueue_and_ack:
262 __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true);
263enqueue_packet:
264 _net("defer skb %p", skb);
265 spin_unlock(&call->lock);
266 skb_queue_tail(&call->rx_queue, skb);
267 atomic_inc(&call->ackr_not_idle);
268 read_lock(&call->state_lock);
269 if (call->state < RXRPC_CALL_DEAD)
270 schedule_work(&call->processor);
271 read_unlock(&call->state_lock);
272 _leave(" = 0 [queued]");
273 return 0;
274}
275
276/*
277 * assume an implicit ACKALL of the transmission phase of a client socket upon
278 * reception of the first reply packet
279 */
280static void rxrpc_assume_implicit_ackall(struct rxrpc_call *call, u32 serial)
281{
282 write_lock_bh(&call->state_lock);
283
284 switch (call->state) {
285 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
286 call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
287 call->acks_latest = serial;
288
289 _debug("implicit ACKALL %%%u", call->acks_latest);
290 set_bit(RXRPC_CALL_RCVD_ACKALL, &call->events);
291 write_unlock_bh(&call->state_lock);
292
293 if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
294 clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
295 clear_bit(RXRPC_CALL_RESEND, &call->events);
296 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
297 }
298 break;
299
300 default:
301 write_unlock_bh(&call->state_lock);
302 break;
303 }
304}
305
306/*
307 * post an incoming packet to the nominated call to deal with
308 * - must get rid of the sk_buff, either by freeing it or by queuing it
309 */
310void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
311{
312 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
313 __be32 _abort_code;
314 u32 serial, hi_serial, seq, abort_code;
315
316 _enter("%p,%p", call, skb);
317
318 ASSERT(!irqs_disabled());
319
320#if 0 // INJECT RX ERROR
321 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
322 static int skip = 0;
323 if (++skip == 3) {
324 printk("DROPPED 3RD PACKET!!!!!!!!!!!!!\n");
325 skip = 0;
326 goto free_packet;
327 }
328 }
329#endif
330
331 /* track the latest serial number on this connection for ACK packet
332 * information */
333 serial = ntohl(sp->hdr.serial);
334 hi_serial = atomic_read(&call->conn->hi_serial);
335 while (serial > hi_serial)
336 hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
337 serial);
338
339 /* request ACK generation for any ACK or DATA packet that requests
340 * it */
341 if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
342 _proto("ACK Requested on %%%u", serial);
343 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, sp->hdr.serial,
344 !(sp->hdr.flags & RXRPC_MORE_PACKETS));
345 }
346
347 switch (sp->hdr.type) {
348 case RXRPC_PACKET_TYPE_ABORT:
349 _debug("abort");
350
351 if (skb_copy_bits(skb, 0, &_abort_code,
352 sizeof(_abort_code)) < 0)
353 goto protocol_error;
354
355 abort_code = ntohl(_abort_code);
356 _proto("Rx ABORT %%%u { %x }", serial, abort_code);
357
358 write_lock_bh(&call->state_lock);
359 if (call->state < RXRPC_CALL_COMPLETE) {
360 call->state = RXRPC_CALL_REMOTELY_ABORTED;
361 call->abort_code = abort_code;
362 set_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
363 schedule_work(&call->processor);
364 }
365 goto free_packet_unlock;
366
367 case RXRPC_PACKET_TYPE_BUSY:
368 _proto("Rx BUSY %%%u", serial);
369
370 if (call->conn->out_clientflag)
371 goto protocol_error;
372
373 write_lock_bh(&call->state_lock);
374 switch (call->state) {
375 case RXRPC_CALL_CLIENT_SEND_REQUEST:
376 call->state = RXRPC_CALL_SERVER_BUSY;
377 set_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
378 schedule_work(&call->processor);
379 case RXRPC_CALL_SERVER_BUSY:
380 goto free_packet_unlock;
381 default:
382 goto protocol_error_locked;
383 }
384
385 default:
386 _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], serial);
387 goto protocol_error;
388
389 case RXRPC_PACKET_TYPE_DATA:
390 seq = ntohl(sp->hdr.seq);
391
392 _proto("Rx DATA %%%u { #%u }", serial, seq);
393
394 if (seq == 0)
395 goto protocol_error;
396
397 call->ackr_prev_seq = sp->hdr.seq;
398
399 /* received data implicitly ACKs all of the request packets we
400 * sent when we're acting as a client */
401 if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
402 rxrpc_assume_implicit_ackall(call, serial);
403
404 switch (rxrpc_fast_process_data(call, skb, seq)) {
405 case 0:
406 skb = NULL;
407 goto done;
408
409 default:
410 BUG();
411
412 /* data packet received beyond the last packet */
413 case -EBADMSG:
414 goto protocol_error;
415 }
416
417 case RXRPC_PACKET_TYPE_ACK:
418 /* ACK processing is done in process context */
419 read_lock_bh(&call->state_lock);
420 if (call->state < RXRPC_CALL_DEAD) {
421 skb_queue_tail(&call->rx_queue, skb);
422 schedule_work(&call->processor);
423 skb = NULL;
424 }
425 read_unlock_bh(&call->state_lock);
426 goto free_packet;
427 }
428
429protocol_error:
430 _debug("protocol error");
431 write_lock_bh(&call->state_lock);
432protocol_error_locked:
433 if (call->state <= RXRPC_CALL_COMPLETE) {
434 call->state = RXRPC_CALL_LOCALLY_ABORTED;
435 call->abort_code = RX_PROTOCOL_ERROR;
436 set_bit(RXRPC_CALL_ABORT, &call->events);
437 schedule_work(&call->processor);
438 }
439free_packet_unlock:
440 write_unlock_bh(&call->state_lock);
441free_packet:
442 rxrpc_free_skb(skb);
443done:
444 _leave("");
445}
446
447/*
448 * split up a jumbo data packet
449 */
450static void rxrpc_process_jumbo_packet(struct rxrpc_call *call,
451 struct sk_buff *jumbo)
452{
453 struct rxrpc_jumbo_header jhdr;
454 struct rxrpc_skb_priv *sp;
455 struct sk_buff *part;
456
457 _enter(",{%u,%u}", jumbo->data_len, jumbo->len);
458
459 sp = rxrpc_skb(jumbo);
460
461 do {
462 sp->hdr.flags &= ~RXRPC_JUMBO_PACKET;
463
464 /* make a clone to represent the first subpacket in what's left
465 * of the jumbo packet */
466 part = skb_clone(jumbo, GFP_ATOMIC);
467 if (!part) {
468 /* simply ditch the tail in the event of ENOMEM */
469 pskb_trim(jumbo, RXRPC_JUMBO_DATALEN);
470 break;
471 }
472 rxrpc_new_skb(part);
473
474 pskb_trim(part, RXRPC_JUMBO_DATALEN);
475
476 if (!pskb_pull(jumbo, RXRPC_JUMBO_DATALEN))
477 goto protocol_error;
478
479 if (skb_copy_bits(jumbo, 0, &jhdr, sizeof(jhdr)) < 0)
480 goto protocol_error;
481 if (!pskb_pull(jumbo, sizeof(jhdr)))
482 BUG();
483
484 sp->hdr.seq = htonl(ntohl(sp->hdr.seq) + 1);
485 sp->hdr.serial = htonl(ntohl(sp->hdr.serial) + 1);
486 sp->hdr.flags = jhdr.flags;
487 sp->hdr._rsvd = jhdr._rsvd;
488
489 _proto("Rx DATA Jumbo %%%u", ntohl(sp->hdr.serial) - 1);
490
491 rxrpc_fast_process_packet(call, part);
492 part = NULL;
493
494 } while (sp->hdr.flags & RXRPC_JUMBO_PACKET);
495
496 rxrpc_fast_process_packet(call, jumbo);
497 _leave("");
498 return;
499
500protocol_error:
501 _debug("protocol error");
502 rxrpc_free_skb(part);
503 rxrpc_free_skb(jumbo);
504 write_lock_bh(&call->state_lock);
505 if (call->state <= RXRPC_CALL_COMPLETE) {
506 call->state = RXRPC_CALL_LOCALLY_ABORTED;
507 call->abort_code = RX_PROTOCOL_ERROR;
508 set_bit(RXRPC_CALL_ABORT, &call->events);
509 schedule_work(&call->processor);
510 }
511 write_unlock_bh(&call->state_lock);
512 _leave("");
513}
514
515/*
516 * post an incoming packet to the appropriate call/socket to deal with
517 * - must get rid of the sk_buff, either by freeing it or by queuing it
518 */
519static void rxrpc_post_packet_to_call(struct rxrpc_connection *conn,
520 struct sk_buff *skb)
521{
522 struct rxrpc_skb_priv *sp;
523 struct rxrpc_call *call;
524 struct rb_node *p;
525 __be32 call_id;
526
527 _enter("%p,%p", conn, skb);
528
529 read_lock_bh(&conn->lock);
530
531 sp = rxrpc_skb(skb);
532
533 /* look at extant calls by channel number first */
534 call = conn->channels[ntohl(sp->hdr.cid) & RXRPC_CHANNELMASK];
535 if (!call || call->call_id != sp->hdr.callNumber)
536 goto call_not_extant;
537
538 _debug("extant call [%d]", call->state);
539 ASSERTCMP(call->conn, ==, conn);
540
541 read_lock(&call->state_lock);
542 switch (call->state) {
543 case RXRPC_CALL_LOCALLY_ABORTED:
544 if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
545 schedule_work(&call->processor);
546 case RXRPC_CALL_REMOTELY_ABORTED:
547 case RXRPC_CALL_NETWORK_ERROR:
548 case RXRPC_CALL_DEAD:
549 goto free_unlock;
550 default:
551 break;
552 }
553
554 read_unlock(&call->state_lock);
555 rxrpc_get_call(call);
556 read_unlock_bh(&conn->lock);
557
558 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
559 sp->hdr.flags & RXRPC_JUMBO_PACKET)
560 rxrpc_process_jumbo_packet(call, skb);
561 else
562 rxrpc_fast_process_packet(call, skb);
563
564 rxrpc_put_call(call);
565 goto done;
566
567call_not_extant:
568 /* search the completed calls in case what we're dealing with is
569 * there */
570 _debug("call not extant");
571
572 call_id = sp->hdr.callNumber;
573 p = conn->calls.rb_node;
574 while (p) {
575 call = rb_entry(p, struct rxrpc_call, conn_node);
576
577 if (call_id < call->call_id)
578 p = p->rb_left;
579 else if (call_id > call->call_id)
580 p = p->rb_right;
581 else
582 goto found_completed_call;
583 }
584
585dead_call:
586 /* it's a either a really old call that we no longer remember or its a
587 * new incoming call */
588 read_unlock_bh(&conn->lock);
589
590 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED &&
591 sp->hdr.seq == __constant_cpu_to_be32(1)) {
592 _debug("incoming call");
593 skb_queue_tail(&conn->trans->local->accept_queue, skb);
594 schedule_work(&conn->trans->local->acceptor);
595 goto done;
596 }
597
598 _debug("dead call");
599 skb->priority = RX_CALL_DEAD;
600 rxrpc_reject_packet(conn->trans->local, skb);
601 goto done;
602
603 /* resend last packet of a completed call
604 * - client calls may have been aborted or ACK'd
605 * - server calls may have been aborted
606 */
607found_completed_call:
608 _debug("completed call");
609
610 if (atomic_read(&call->usage) == 0)
611 goto dead_call;
612
613 /* synchronise any state changes */
614 read_lock(&call->state_lock);
615 ASSERTIFCMP(call->state != RXRPC_CALL_CLIENT_FINAL_ACK,
616 call->state, >=, RXRPC_CALL_COMPLETE);
617
618 if (call->state == RXRPC_CALL_LOCALLY_ABORTED ||
619 call->state == RXRPC_CALL_REMOTELY_ABORTED ||
620 call->state == RXRPC_CALL_DEAD) {
621 read_unlock(&call->state_lock);
622 goto dead_call;
623 }
624
625 if (call->conn->in_clientflag) {
626 read_unlock(&call->state_lock);
627 goto dead_call; /* complete server call */
628 }
629
630 _debug("final ack again");
631 rxrpc_get_call(call);
632 set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
633 schedule_work(&call->processor);
634
635free_unlock:
636 read_unlock(&call->state_lock);
637 read_unlock_bh(&conn->lock);
638 rxrpc_free_skb(skb);
639done:
640 _leave("");
641}
642
643/*
644 * post connection-level events to the connection
645 * - this includes challenges, responses and some aborts
646 */
647static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
648 struct sk_buff *skb)
649{
650 _enter("%p,%p", conn, skb);
651
652 atomic_inc(&conn->usage);
653 skb_queue_tail(&conn->rx_queue, skb);
654 schedule_work(&conn->processor);
655}
656
657/*
658 * handle data received on the local endpoint
659 * - may be called in interrupt context
660 */
661void rxrpc_data_ready(struct sock *sk, int count)
662{
663 struct rxrpc_connection *conn;
664 struct rxrpc_transport *trans;
665 struct rxrpc_skb_priv *sp;
666 struct rxrpc_local *local;
667 struct rxrpc_peer *peer;
668 struct sk_buff *skb;
669 int ret;
670
671 _enter("%p, %d", sk, count);
672
673 ASSERT(!irqs_disabled());
674
675 read_lock_bh(&rxrpc_local_lock);
676 local = sk->sk_user_data;
677 if (local && atomic_read(&local->usage) > 0)
678 rxrpc_get_local(local);
679 else
680 local = NULL;
681 read_unlock_bh(&rxrpc_local_lock);
682 if (!local) {
683 _leave(" [local dead]");
684 return;
685 }
686
687 skb = skb_recv_datagram(sk, 0, 1, &ret);
688 if (!skb) {
689 rxrpc_put_local(local);
690 if (ret == -EAGAIN)
691 return;
692 _debug("UDP socket error %d", ret);
693 return;
694 }
695
696 rxrpc_new_skb(skb);
697
698 _net("recv skb %p", skb);
699
700 /* we'll probably need to checksum it (didn't call sock_recvmsg) */
701 if (skb_checksum_complete(skb)) {
702 rxrpc_free_skb(skb);
703 rxrpc_put_local(local);
704 _leave(" [CSUM failed]");
705 return;
706 }
707
708 /* the socket buffer we have is owned by UDP, with UDP's data all over
709 * it, but we really want our own */
710 skb_orphan(skb);
711 sp = rxrpc_skb(skb);
712 memset(sp, 0, sizeof(*sp));
713
714 _net("Rx UDP packet from %08x:%04hu",
715 ntohl(ip_hdr(skb)->saddr), ntohs(udp_hdr(skb)->source));
716
717 /* dig out the RxRPC connection details */
718 if (skb_copy_bits(skb, sizeof(struct udphdr), &sp->hdr,
719 sizeof(sp->hdr)) < 0)
720 goto bad_message;
721 if (!pskb_pull(skb, sizeof(struct udphdr) + sizeof(sp->hdr)))
722 BUG();
723
724 _net("Rx RxRPC %s ep=%x call=%x:%x",
725 sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
726 ntohl(sp->hdr.epoch),
727 ntohl(sp->hdr.cid),
728 ntohl(sp->hdr.callNumber));
729
730 if (sp->hdr.type == 0 || sp->hdr.type >= RXRPC_N_PACKET_TYPES) {
731 _proto("Rx Bad Packet Type %u", sp->hdr.type);
732 goto bad_message;
733 }
734
735 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
736 (sp->hdr.callNumber == 0 || sp->hdr.seq == 0))
737 goto bad_message;
738
739 peer = rxrpc_find_peer(local, ip_hdr(skb)->saddr, udp_hdr(skb)->source);
740 if (IS_ERR(peer))
741 goto cant_route_call;
742
743 trans = rxrpc_find_transport(local, peer);
744 rxrpc_put_peer(peer);
745 if (!trans)
746 goto cant_route_call;
747
748 conn = rxrpc_find_connection(trans, &sp->hdr);
749 rxrpc_put_transport(trans);
750 if (!conn)
751 goto cant_route_call;
752
753 _debug("CONN %p {%d}", conn, conn->debug_id);
754
755 if (sp->hdr.callNumber == 0)
756 rxrpc_post_packet_to_conn(conn, skb);
757 else
758 rxrpc_post_packet_to_call(conn, skb);
759 rxrpc_put_connection(conn);
760 rxrpc_put_local(local);
761 return;
762
763cant_route_call:
764 _debug("can't route call");
765 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED &&
766 sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
767 if (sp->hdr.seq == __constant_cpu_to_be32(1)) {
768 _debug("first packet");
769 skb_queue_tail(&local->accept_queue, skb);
770 schedule_work(&local->acceptor);
771 rxrpc_put_local(local);
772 _leave(" [incoming]");
773 return;
774 }
775 skb->priority = RX_INVALID_OPERATION;
776 } else {
777 skb->priority = RX_CALL_DEAD;
778 }
779
780 _debug("reject");
781 rxrpc_reject_packet(local, skb);
782 rxrpc_put_local(local);
783 _leave(" [no call]");
784 return;
785
786bad_message:
787 skb->priority = RX_PROTOCOL_ERROR;
788 rxrpc_reject_packet(local, skb);
789 rxrpc_put_local(local);
790 _leave(" [badmsg]");
791}
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
new file mode 100644
index 000000000000..7bfbf471c81e
--- /dev/null
+++ b/net/rxrpc/ar-internal.h
@@ -0,0 +1,842 @@
1/* AF_RXRPC internal definitions
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <rxrpc/packet.h>
13
14#if 0
15#define CHECK_SLAB_OKAY(X) \
16 BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
17 (POISON_FREE << 8 | POISON_FREE))
18#else
19#define CHECK_SLAB_OKAY(X) do {} while(0)
20#endif
21
22extern atomic_t rxrpc_n_skbs;
23
24#define FCRYPT_BSIZE 8
25struct rxrpc_crypt {
26 union {
27 u8 x[FCRYPT_BSIZE];
28 u32 n[2];
29 };
30} __attribute__((aligned(8)));
31
32extern __be32 rxrpc_epoch; /* local epoch for detecting local-end reset */
33extern atomic_t rxrpc_debug_id; /* current debugging ID */
34
35/*
36 * sk_state for RxRPC sockets
37 */
38enum {
39 RXRPC_UNCONNECTED = 0,
40 RXRPC_CLIENT_BOUND, /* client local address bound */
41 RXRPC_CLIENT_CONNECTED, /* client is connected */
42 RXRPC_SERVER_BOUND, /* server local address bound */
43 RXRPC_SERVER_LISTENING, /* server listening for connections */
44 RXRPC_CLOSE, /* socket is being closed */
45};
46
47/*
48 * RxRPC socket definition
49 */
50struct rxrpc_sock {
51 /* WARNING: sk has to be the first member */
52 struct sock sk;
53 struct rxrpc_local *local; /* local endpoint */
54 struct rxrpc_transport *trans; /* transport handler */
55 struct rxrpc_conn_bundle *bundle; /* virtual connection bundle */
56 struct rxrpc_connection *conn; /* exclusive virtual connection */
57 struct list_head listen_link; /* link in the local endpoint's listen list */
58 struct list_head secureq; /* calls awaiting connection security clearance */
59 struct list_head acceptq; /* calls awaiting acceptance */
60 struct key *key; /* security for this socket */
61 struct key *securities; /* list of server security descriptors */
62 struct rb_root calls; /* outstanding calls on this socket */
63 unsigned long flags;
64#define RXRPC_SOCK_EXCLUSIVE_CONN 1 /* exclusive connection for a client socket */
65 rwlock_t call_lock; /* lock for calls */
66 u32 min_sec_level; /* minimum security level */
67#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
68 struct sockaddr_rxrpc srx; /* local address */
69 sa_family_t proto; /* protocol created with */
70 __be16 service_id; /* service ID of local/remote service */
71};
72
73#define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
74
75/*
76 * RxRPC socket buffer private variables
77 * - max 48 bytes (struct sk_buff::cb)
78 */
79struct rxrpc_skb_priv {
80 struct rxrpc_call *call; /* call with which associated */
81 unsigned long resend_at; /* time in jiffies at which to resend */
82 union {
83 unsigned offset; /* offset into buffer of next read */
84 int remain; /* amount of space remaining for next write */
85 u32 error; /* network error code */
86 bool need_resend; /* T if needs resending */
87 };
88
89 struct rxrpc_header hdr; /* RxRPC packet header from this packet */
90};
91
92#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
93
94enum {
95 RXRPC_SKB_MARK_DATA, /* data message */
96 RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */
97 RXRPC_SKB_MARK_BUSY, /* server busy message */
98 RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
99 RXRPC_SKB_MARK_NET_ERROR, /* network error message */
100 RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
101 RXRPC_SKB_MARK_NEW_CALL, /* local error message */
102};
103
104enum rxrpc_command {
105 RXRPC_CMD_SEND_DATA, /* send data message */
106 RXRPC_CMD_SEND_ABORT, /* request abort generation */
107 RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
108 RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
109};
110
111/*
112 * RxRPC security module interface
113 */
114struct rxrpc_security {
115 struct module *owner; /* providing module */
116 struct list_head link; /* link in master list */
117 const char *name; /* name of this service */
118 u8 security_index; /* security type provided */
119
120 /* initialise a connection's security */
121 int (*init_connection_security)(struct rxrpc_connection *);
122
123 /* prime a connection's packet security */
124 void (*prime_packet_security)(struct rxrpc_connection *);
125
126 /* impose security on a packet */
127 int (*secure_packet)(const struct rxrpc_call *,
128 struct sk_buff *,
129 size_t,
130 void *);
131
132 /* verify the security on a received packet */
133 int (*verify_packet)(const struct rxrpc_call *, struct sk_buff *,
134 u32 *);
135
136 /* issue a challenge */
137 int (*issue_challenge)(struct rxrpc_connection *);
138
139 /* respond to a challenge */
140 int (*respond_to_challenge)(struct rxrpc_connection *,
141 struct sk_buff *,
142 u32 *);
143
144 /* verify a response */
145 int (*verify_response)(struct rxrpc_connection *,
146 struct sk_buff *,
147 u32 *);
148
149 /* clear connection security */
150 void (*clear)(struct rxrpc_connection *);
151};
152
153/*
154 * RxRPC local transport endpoint definition
155 * - matched by local port, address and protocol type
156 */
157struct rxrpc_local {
158 struct socket *socket; /* my UDP socket */
159 struct work_struct destroyer; /* endpoint destroyer */
160 struct work_struct acceptor; /* incoming call processor */
161 struct work_struct rejecter; /* packet reject writer */
162 struct list_head services; /* services listening on this endpoint */
163 struct list_head link; /* link in endpoint list */
164 struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
165 struct sk_buff_head accept_queue; /* incoming calls awaiting acceptance */
166 struct sk_buff_head reject_queue; /* packets awaiting rejection */
167 spinlock_t lock; /* access lock */
168 rwlock_t services_lock; /* lock for services list */
169 atomic_t usage;
170 int debug_id; /* debug ID for printks */
171 volatile char error_rcvd; /* T if received ICMP error outstanding */
172 struct sockaddr_rxrpc srx; /* local address */
173};
174
175/*
176 * RxRPC remote transport endpoint definition
177 * - matched by remote port, address and protocol type
178 * - holds the connection ID counter for connections between the two endpoints
179 */
180struct rxrpc_peer {
181 struct work_struct destroyer; /* peer destroyer */
182 struct list_head link; /* link in master peer list */
183 struct list_head error_targets; /* targets for net error distribution */
184 spinlock_t lock; /* access lock */
185 atomic_t usage;
186 unsigned if_mtu; /* interface MTU for this peer */
187 unsigned mtu; /* network MTU for this peer */
188 unsigned maxdata; /* data size (MTU - hdrsize) */
189 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
190 int debug_id; /* debug ID for printks */
191 int net_error; /* network error distributed */
192 struct sockaddr_rxrpc srx; /* remote address */
193
194 /* calculated RTT cache */
195#define RXRPC_RTT_CACHE_SIZE 32
196 suseconds_t rtt; /* current RTT estimate (in uS) */
197 unsigned rtt_point; /* next entry at which to insert */
198 unsigned rtt_usage; /* amount of cache actually used */
199 suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
200};
201
202/*
203 * RxRPC point-to-point transport / connection manager definition
204 * - handles a bundle of connections between two endpoints
205 * - matched by { local, peer }
206 */
207struct rxrpc_transport {
208 struct rxrpc_local *local; /* local transport endpoint */
209 struct rxrpc_peer *peer; /* remote transport endpoint */
210 struct work_struct error_handler; /* network error distributor */
211 struct rb_root bundles; /* client connection bundles on this transport */
212 struct rb_root client_conns; /* client connections on this transport */
213 struct rb_root server_conns; /* server connections on this transport */
214 struct list_head link; /* link in master session list */
215 struct sk_buff_head error_queue; /* error packets awaiting processing */
216 time_t put_time; /* time at which to reap */
217 spinlock_t client_lock; /* client connection allocation lock */
218 rwlock_t conn_lock; /* lock for active/dead connections */
219 atomic_t usage;
220 int debug_id; /* debug ID for printks */
221 unsigned int conn_idcounter; /* connection ID counter (client) */
222};
223
224/*
225 * RxRPC client connection bundle
226 * - matched by { transport, service_id, key }
227 */
228struct rxrpc_conn_bundle {
229 struct rb_node node; /* node in transport's lookup tree */
230 struct list_head unused_conns; /* unused connections in this bundle */
231 struct list_head avail_conns; /* available connections in this bundle */
232 struct list_head busy_conns; /* busy connections in this bundle */
233 struct key *key; /* security for this bundle */
234 wait_queue_head_t chanwait; /* wait for channel to become available */
235 atomic_t usage;
236 int debug_id; /* debug ID for printks */
237 unsigned short num_conns; /* number of connections in this bundle */
238 __be16 service_id; /* service ID */
239 uint8_t security_ix; /* security type */
240};
241
242/*
243 * RxRPC connection definition
244 * - matched by { transport, service_id, conn_id, direction, key }
245 * - each connection can only handle four simultaneous calls
246 */
247struct rxrpc_connection {
248 struct rxrpc_transport *trans; /* transport session */
249 struct rxrpc_conn_bundle *bundle; /* connection bundle (client) */
250 struct work_struct processor; /* connection event processor */
251 struct rb_node node; /* node in transport's lookup tree */
252 struct list_head link; /* link in master connection list */
253 struct list_head bundle_link; /* link in bundle */
254 struct rb_root calls; /* calls on this connection */
255 struct sk_buff_head rx_queue; /* received conn-level packets */
256 struct rxrpc_call *channels[RXRPC_MAXCALLS]; /* channels (active calls) */
257 struct rxrpc_security *security; /* applied security module */
258 struct key *key; /* security for this connection (client) */
259 struct key *server_key; /* security for this service */
260 struct crypto_blkcipher *cipher; /* encryption handle */
261 struct rxrpc_crypt csum_iv; /* packet checksum base */
262 unsigned long events;
263#define RXRPC_CONN_CHALLENGE 0 /* send challenge packet */
264 time_t put_time; /* time at which to reap */
265 rwlock_t lock; /* access lock */
266 spinlock_t state_lock; /* state-change lock */
267 atomic_t usage;
268 u32 real_conn_id; /* connection ID (host-endian) */
269 enum { /* current state of connection */
270 RXRPC_CONN_UNUSED, /* - connection not yet attempted */
271 RXRPC_CONN_CLIENT, /* - client connection */
272 RXRPC_CONN_SERVER_UNSECURED, /* - server unsecured connection */
273 RXRPC_CONN_SERVER_CHALLENGING, /* - server challenging for security */
274 RXRPC_CONN_SERVER, /* - server secured connection */
275 RXRPC_CONN_REMOTELY_ABORTED, /* - conn aborted by peer */
276 RXRPC_CONN_LOCALLY_ABORTED, /* - conn aborted locally */
277 RXRPC_CONN_NETWORK_ERROR, /* - conn terminated by network error */
278 } state;
279 int error; /* error code for local abort */
280 int debug_id; /* debug ID for printks */
281 unsigned call_counter; /* call ID counter */
282 atomic_t serial; /* packet serial number counter */
283 atomic_t hi_serial; /* highest serial number received */
284 u8 avail_calls; /* number of calls available */
285 u8 size_align; /* data size alignment (for security) */
286 u8 header_size; /* rxrpc + security header size */
287 u8 security_size; /* security header size */
288 u32 security_level; /* security level negotiated */
289 u32 security_nonce; /* response re-use preventer */
290
291 /* the following are all in net order */
292 __be32 epoch; /* epoch of this connection */
293 __be32 cid; /* connection ID */
294 __be16 service_id; /* service ID */
295 u8 security_ix; /* security type */
296 u8 in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */
297 u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
298};
299
300/*
301 * RxRPC call definition
302 * - matched by { connection, call_id }
303 */
304struct rxrpc_call {
305 struct rxrpc_connection *conn; /* connection carrying call */
306 struct rxrpc_sock *socket; /* socket responsible */
307 struct timer_list lifetimer; /* lifetime remaining on call */
308 struct timer_list deadspan; /* reap timer for re-ACK'ing, etc */
309 struct timer_list ack_timer; /* ACK generation timer */
310 struct timer_list resend_timer; /* Tx resend timer */
311 struct work_struct destroyer; /* call destroyer */
312 struct work_struct processor; /* packet processor and ACK generator */
313 struct list_head link; /* link in master call list */
314 struct list_head error_link; /* link in error distribution list */
315 struct list_head accept_link; /* calls awaiting acceptance */
316 struct rb_node sock_node; /* node in socket call tree */
317 struct rb_node conn_node; /* node in connection call tree */
318 struct sk_buff_head rx_queue; /* received packets */
319 struct sk_buff_head rx_oos_queue; /* packets received out of sequence */
320 struct sk_buff *tx_pending; /* Tx socket buffer being filled */
321 wait_queue_head_t tx_waitq; /* wait for Tx window space to become available */
322 unsigned long user_call_ID; /* user-defined call ID */
323 unsigned long creation_jif; /* time of call creation */
324 unsigned long flags;
325#define RXRPC_CALL_RELEASED 0 /* call has been released - no more message to userspace */
326#define RXRPC_CALL_TERMINAL_MSG 1 /* call has given the socket its final message */
327#define RXRPC_CALL_RCVD_LAST 2 /* all packets received */
328#define RXRPC_CALL_RUN_RTIMER 3 /* Tx resend timer started */
329#define RXRPC_CALL_TX_SOFT_ACK 4 /* sent some soft ACKs */
330#define RXRPC_CALL_PROC_BUSY 5 /* the processor is busy */
331#define RXRPC_CALL_INIT_ACCEPT 6 /* acceptance was initiated */
332#define RXRPC_CALL_HAS_USERID 7 /* has a user ID attached */
333#define RXRPC_CALL_EXPECT_OOS 8 /* expect out of sequence packets */
334 unsigned long events;
335#define RXRPC_CALL_RCVD_ACKALL 0 /* ACKALL or reply received */
336#define RXRPC_CALL_RCVD_BUSY 1 /* busy packet received */
337#define RXRPC_CALL_RCVD_ABORT 2 /* abort packet received */
338#define RXRPC_CALL_RCVD_ERROR 3 /* network error received */
339#define RXRPC_CALL_ACK_FINAL 4 /* need to generate final ACK (and release call) */
340#define RXRPC_CALL_ACK 5 /* need to generate ACK */
341#define RXRPC_CALL_REJECT_BUSY 6 /* need to generate busy message */
342#define RXRPC_CALL_ABORT 7 /* need to generate abort */
343#define RXRPC_CALL_CONN_ABORT 8 /* local connection abort generated */
344#define RXRPC_CALL_RESEND_TIMER 9 /* Tx resend timer expired */
345#define RXRPC_CALL_RESEND 10 /* Tx resend required */
346#define RXRPC_CALL_DRAIN_RX_OOS 11 /* drain the Rx out of sequence queue */
347#define RXRPC_CALL_LIFE_TIMER 12 /* call's lifetimer ran out */
348#define RXRPC_CALL_ACCEPTED 13 /* incoming call accepted by userspace app */
349#define RXRPC_CALL_SECURED 14 /* incoming call's connection is now secure */
350#define RXRPC_CALL_POST_ACCEPT 15 /* need to post an "accept?" message to the app */
351#define RXRPC_CALL_RELEASE 16 /* need to release the call's resources */
352
353 spinlock_t lock;
354 rwlock_t state_lock; /* lock for state transition */
355 atomic_t usage;
356 atomic_t sequence; /* Tx data packet sequence counter */
357 u32 abort_code; /* local/remote abort code */
358 enum { /* current state of call */
359 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
360 RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
361 RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
362 RXRPC_CALL_CLIENT_FINAL_ACK, /* - client sending final ACK phase */
363 RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
364 RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
365 RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
366 RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
367 RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
368 RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
369 RXRPC_CALL_COMPLETE, /* - call completed */
370 RXRPC_CALL_SERVER_BUSY, /* - call rejected by busy server */
371 RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
372 RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
373 RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
374 RXRPC_CALL_DEAD, /* - call is dead */
375 } state;
376 int debug_id; /* debug ID for printks */
377 u8 channel; /* connection channel occupied by this call */
378
379 /* transmission-phase ACK management */
380 uint8_t acks_head; /* offset into window of first entry */
381 uint8_t acks_tail; /* offset into window of last entry */
382 uint8_t acks_winsz; /* size of un-ACK'd window */
383 uint8_t acks_unacked; /* lowest unacked packet in last ACK received */
384 int acks_latest; /* serial number of latest ACK received */
385 rxrpc_seq_t acks_hard; /* highest definitively ACK'd msg seq */
386 unsigned long *acks_window; /* sent packet window
387 * - elements are pointers with LSB set if ACK'd
388 */
389
390 /* receive-phase ACK management */
391 rxrpc_seq_t rx_data_expect; /* next data seq ID expected to be received */
392 rxrpc_seq_t rx_data_post; /* next data seq ID expected to be posted */
393 rxrpc_seq_t rx_data_recv; /* last data seq ID encountered by recvmsg */
394 rxrpc_seq_t rx_data_eaten; /* last data seq ID consumed by recvmsg */
395 rxrpc_seq_t rx_first_oos; /* first packet in rx_oos_queue (or 0) */
396 rxrpc_seq_t ackr_win_top; /* top of ACK window (rx_data_eaten is bottom) */
397 rxrpc_seq_net_t ackr_prev_seq; /* previous sequence number received */
398 uint8_t ackr_reason; /* reason to ACK */
399 __be32 ackr_serial; /* serial of packet being ACK'd */
400 atomic_t ackr_not_idle; /* number of packets in Rx queue */
401
402 /* received packet records, 1 bit per record */
403#define RXRPC_ACKR_WINDOW_ASZ DIV_ROUND_UP(RXRPC_MAXACKS, BITS_PER_LONG)
404 unsigned long ackr_window[RXRPC_ACKR_WINDOW_ASZ + 1];
405
406 /* the following should all be in net order */
407 __be32 cid; /* connection ID + channel index */
408 __be32 call_id; /* call ID on connection */
409};
410
411/*
412 * RxRPC key for Kerberos (type-2 security)
413 */
414struct rxkad_key {
415 u16 security_index; /* RxRPC header security index */
416 u16 ticket_len; /* length of ticket[] */
417 u32 expiry; /* time at which expires */
418 u32 kvno; /* key version number */
419 u8 session_key[8]; /* DES session key */
420 u8 ticket[0]; /* the encrypted ticket */
421};
422
423struct rxrpc_key_payload {
424 struct rxkad_key k;
425};
426
427/*
428 * locally abort an RxRPC call
429 */
430static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
431{
432 write_lock_bh(&call->state_lock);
433 if (call->state < RXRPC_CALL_COMPLETE) {
434 call->abort_code = abort_code;
435 call->state = RXRPC_CALL_LOCALLY_ABORTED;
436 set_bit(RXRPC_CALL_ABORT, &call->events);
437 }
438 write_unlock_bh(&call->state_lock);
439}
440
441/*
442 * put a packet up for transport-level abort
443 */
444static inline
445void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
446{
447 CHECK_SLAB_OKAY(&local->usage);
448 if (!atomic_inc_not_zero(&local->usage)) {
449 printk("resurrected on reject\n");
450 BUG();
451 }
452 skb_queue_tail(&local->reject_queue, skb);
453 schedule_work(&local->rejecter);
454}
455
456/*
457 * ar-accept.c
458 */
459extern void rxrpc_accept_incoming_calls(struct work_struct *);
460extern int rxrpc_accept_call(struct rxrpc_sock *, unsigned long);
461
462/*
463 * ar-ack.c
464 */
465extern void __rxrpc_propose_ACK(struct rxrpc_call *, uint8_t, __be32, bool);
466extern void rxrpc_propose_ACK(struct rxrpc_call *, uint8_t, __be32, bool);
467extern void rxrpc_process_call(struct work_struct *);
468
469/*
470 * ar-call.c
471 */
472extern struct kmem_cache *rxrpc_call_jar;
473extern struct list_head rxrpc_calls;
474extern rwlock_t rxrpc_call_lock;
475
476extern struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *,
477 struct rxrpc_transport *,
478 struct rxrpc_conn_bundle *,
479 unsigned long, int, gfp_t);
480extern struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
481 struct rxrpc_connection *,
482 struct rxrpc_header *, gfp_t);
483extern struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *,
484 unsigned long);
485extern void rxrpc_release_call(struct rxrpc_call *);
486extern void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
487extern void __rxrpc_put_call(struct rxrpc_call *);
488extern void __exit rxrpc_destroy_all_calls(void);
489
490/*
491 * ar-connection.c
492 */
493extern struct list_head rxrpc_connections;
494extern rwlock_t rxrpc_connection_lock;
495
496extern struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *,
497 struct rxrpc_transport *,
498 struct key *,
499 __be16, gfp_t);
500extern void rxrpc_put_bundle(struct rxrpc_transport *,
501 struct rxrpc_conn_bundle *);
502extern int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *,
503 struct rxrpc_conn_bundle *, struct rxrpc_call *,
504 gfp_t);
505extern void rxrpc_put_connection(struct rxrpc_connection *);
506extern void __exit rxrpc_destroy_all_connections(void);
507extern struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
508 struct rxrpc_header *);
509extern struct rxrpc_connection *
510rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_header *,
511 gfp_t);
512
513/*
514 * ar-connevent.c
515 */
516extern void rxrpc_process_connection(struct work_struct *);
517extern void rxrpc_reject_packets(struct work_struct *);
518
519/*
520 * ar-error.c
521 */
522extern void rxrpc_UDP_error_report(struct sock *);
523extern void rxrpc_UDP_error_handler(struct work_struct *);
524
525/*
526 * ar-input.c
527 */
528extern unsigned long rxrpc_ack_timeout;
529extern const char *rxrpc_pkts[];
530
531extern void rxrpc_data_ready(struct sock *, int);
532extern int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool,
533 bool);
534extern void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
535
536/*
537 * ar-local.c
538 */
539extern rwlock_t rxrpc_local_lock;
540extern struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *);
541extern void rxrpc_put_local(struct rxrpc_local *);
542extern void __exit rxrpc_destroy_all_locals(void);
543
544/*
545 * ar-key.c
546 */
547extern struct key_type key_type_rxrpc;
548extern struct key_type key_type_rxrpc_s;
549
550extern int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
551extern int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
552extern int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *,
553 time_t, u32);
554
555/*
556 * ar-output.c
557 */
558extern int rxrpc_resend_timeout;
559
560extern int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
561extern int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *,
562 struct rxrpc_transport *, struct msghdr *,
563 size_t);
564extern int rxrpc_server_sendmsg(struct kiocb *, struct rxrpc_sock *,
565 struct msghdr *, size_t);
566
567/*
568 * ar-peer.c
569 */
570extern struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *, gfp_t);
571extern void rxrpc_put_peer(struct rxrpc_peer *);
572extern struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *,
573 __be32, __be16);
574extern void __exit rxrpc_destroy_all_peers(void);
575
576/*
577 * ar-proc.c
578 */
579extern const char *rxrpc_call_states[];
580extern struct file_operations rxrpc_call_seq_fops;
581extern struct file_operations rxrpc_connection_seq_fops;
582
583/*
584 * ar-recvmsg.c
585 */
586extern int rxrpc_recvmsg(struct kiocb *, struct socket *, struct msghdr *,
587 size_t, int);
588
589/*
590 * ar-security.c
591 */
592extern int rxrpc_register_security(struct rxrpc_security *);
593extern void rxrpc_unregister_security(struct rxrpc_security *);
594extern int rxrpc_init_client_conn_security(struct rxrpc_connection *);
595extern int rxrpc_init_server_conn_security(struct rxrpc_connection *);
596extern int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *,
597 size_t, void *);
598extern int rxrpc_verify_packet(const struct rxrpc_call *, struct sk_buff *,
599 u32 *);
600extern void rxrpc_clear_conn_security(struct rxrpc_connection *);
601
602/*
603 * ar-skbuff.c
604 */
605extern void rxrpc_packet_destructor(struct sk_buff *);
606
607/*
608 * ar-transport.c
609 */
610extern struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *,
611 struct rxrpc_peer *,
612 gfp_t);
613extern void rxrpc_put_transport(struct rxrpc_transport *);
614extern void __exit rxrpc_destroy_all_transports(void);
615extern struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
616 struct rxrpc_peer *);
617
618/*
619 * debug tracing
620 */
621extern unsigned rxrpc_debug;
622
623#define dbgprintk(FMT,...) \
624 printk("[%x%-6.6s] "FMT"\n", smp_processor_id(), current->comm ,##__VA_ARGS__)
625
626/* make sure we maintain the format strings, even when debugging is disabled */
627static inline __attribute__((format(printf,1,2)))
628void _dbprintk(const char *fmt, ...)
629{
630}
631
632#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__)
633#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__)
634#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
635#define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
636#define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
637
638
639#if defined(__KDEBUG)
640#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
641#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
642#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
643#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
644#define _net(FMT,...) knet(FMT,##__VA_ARGS__)
645
646#elif defined(CONFIG_AF_RXRPC_DEBUG)
647#define RXRPC_DEBUG_KENTER 0x01
648#define RXRPC_DEBUG_KLEAVE 0x02
649#define RXRPC_DEBUG_KDEBUG 0x04
650#define RXRPC_DEBUG_KPROTO 0x08
651#define RXRPC_DEBUG_KNET 0x10
652
653#define _enter(FMT,...) \
654do { \
655 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
656 kenter(FMT,##__VA_ARGS__); \
657} while (0)
658
659#define _leave(FMT,...) \
660do { \
661 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
662 kleave(FMT,##__VA_ARGS__); \
663} while (0)
664
665#define _debug(FMT,...) \
666do { \
667 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
668 kdebug(FMT,##__VA_ARGS__); \
669} while (0)
670
671#define _proto(FMT,...) \
672do { \
673 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
674 kproto(FMT,##__VA_ARGS__); \
675} while (0)
676
677#define _net(FMT,...) \
678do { \
679 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
680 knet(FMT,##__VA_ARGS__); \
681} while (0)
682
683#else
684#define _enter(FMT,...) _dbprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__)
685#define _leave(FMT,...) _dbprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__)
686#define _debug(FMT,...) _dbprintk(" "FMT ,##__VA_ARGS__)
687#define _proto(FMT,...) _dbprintk("### "FMT ,##__VA_ARGS__)
688#define _net(FMT,...) _dbprintk("@@@ "FMT ,##__VA_ARGS__)
689#endif
690
691/*
692 * debug assertion checking
693 */
694#if 1 // defined(__KDEBUGALL)
695
696#define ASSERT(X) \
697do { \
698 if (unlikely(!(X))) { \
699 printk(KERN_ERR "\n"); \
700 printk(KERN_ERR "RxRPC: Assertion failed\n"); \
701 BUG(); \
702 } \
703} while(0)
704
705#define ASSERTCMP(X, OP, Y) \
706do { \
707 if (unlikely(!((X) OP (Y)))) { \
708 printk(KERN_ERR "\n"); \
709 printk(KERN_ERR "RxRPC: Assertion failed\n"); \
710 printk(KERN_ERR "%lu " #OP " %lu is false\n", \
711 (unsigned long)(X), (unsigned long)(Y)); \
712 printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \
713 (unsigned long)(X), (unsigned long)(Y)); \
714 BUG(); \
715 } \
716} while(0)
717
718#define ASSERTIF(C, X) \
719do { \
720 if (unlikely((C) && !(X))) { \
721 printk(KERN_ERR "\n"); \
722 printk(KERN_ERR "RxRPC: Assertion failed\n"); \
723 BUG(); \
724 } \
725} while(0)
726
727#define ASSERTIFCMP(C, X, OP, Y) \
728do { \
729 if (unlikely((C) && !((X) OP (Y)))) { \
730 printk(KERN_ERR "\n"); \
731 printk(KERN_ERR "RxRPC: Assertion failed\n"); \
732 printk(KERN_ERR "%lu " #OP " %lu is false\n", \
733 (unsigned long)(X), (unsigned long)(Y)); \
734 printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \
735 (unsigned long)(X), (unsigned long)(Y)); \
736 BUG(); \
737 } \
738} while(0)
739
740#else
741
742#define ASSERT(X) \
743do { \
744} while(0)
745
746#define ASSERTCMP(X, OP, Y) \
747do { \
748} while(0)
749
750#define ASSERTIF(C, X) \
751do { \
752} while(0)
753
754#define ASSERTIFCMP(C, X, OP, Y) \
755do { \
756} while(0)
757
758#endif /* __KDEBUGALL */
759
760/*
761 * socket buffer accounting / leak finding
762 */
763static inline void __rxrpc_new_skb(struct sk_buff *skb, const char *fn)
764{
765 //_net("new skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs));
766 //atomic_inc(&rxrpc_n_skbs);
767}
768
769#define rxrpc_new_skb(skb) __rxrpc_new_skb((skb), __func__)
770
771static inline void __rxrpc_kill_skb(struct sk_buff *skb, const char *fn)
772{
773 //_net("kill skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs));
774 //atomic_dec(&rxrpc_n_skbs);
775}
776
777#define rxrpc_kill_skb(skb) __rxrpc_kill_skb((skb), __func__)
778
779static inline void __rxrpc_free_skb(struct sk_buff *skb, const char *fn)
780{
781 if (skb) {
782 CHECK_SLAB_OKAY(&skb->users);
783 //_net("free skb %p %s [%d]",
784 // skb, fn, atomic_read(&rxrpc_n_skbs));
785 //atomic_dec(&rxrpc_n_skbs);
786 kfree_skb(skb);
787 }
788}
789
790#define rxrpc_free_skb(skb) __rxrpc_free_skb((skb), __func__)
791
792static inline void rxrpc_purge_queue(struct sk_buff_head *list)
793{
794 struct sk_buff *skb;
795 while ((skb = skb_dequeue((list))) != NULL)
796 rxrpc_free_skb(skb);
797}
798
799static inline void __rxrpc__atomic_inc(atomic_t *v)
800{
801 CHECK_SLAB_OKAY(v);
802 atomic_inc(v);
803}
804
805#define atomic_inc(v) __rxrpc__atomic_inc((v))
806
807static inline void __rxrpc__atomic_dec(atomic_t *v)
808{
809 CHECK_SLAB_OKAY(v);
810 atomic_dec(v);
811}
812
813#define atomic_dec(v) __rxrpc__atomic_dec((v))
814
815static inline int __rxrpc__atomic_dec_and_test(atomic_t *v)
816{
817 CHECK_SLAB_OKAY(v);
818 return atomic_dec_and_test(v);
819}
820
821#define atomic_dec_and_test(v) __rxrpc__atomic_dec_and_test((v))
822
823static inline void __rxrpc_get_local(struct rxrpc_local *local, const char *f)
824{
825 CHECK_SLAB_OKAY(&local->usage);
826 if (atomic_inc_return(&local->usage) == 1)
827 printk("resurrected (%s)\n", f);
828}
829
830#define rxrpc_get_local(LOCAL) __rxrpc_get_local((LOCAL), __func__)
831
832#define rxrpc_get_call(CALL) \
833do { \
834 CHECK_SLAB_OKAY(&(CALL)->usage); \
835 if (atomic_inc_return(&(CALL)->usage) == 1) \
836 BUG(); \
837} while(0)
838
839#define rxrpc_put_call(CALL) \
840do { \
841 __rxrpc_put_call(CALL); \
842} while(0)
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
new file mode 100644
index 000000000000..7e049ff6ae60
--- /dev/null
+++ b/net/rxrpc/ar-key.c
@@ -0,0 +1,334 @@
1/* RxRPC key management
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * RxRPC keys should have a description of describing their purpose:
12 * "afs@CAMBRIDGE.REDHAT.COM>
13 */
14
15#include <linux/module.h>
16#include <linux/net.h>
17#include <linux/skbuff.h>
18#include <linux/key.h>
19#include <linux/crypto.h>
20#include <net/sock.h>
21#include <net/af_rxrpc.h>
22#include <keys/rxrpc-type.h>
23#include <keys/user-type.h>
24#include "ar-internal.h"
25
26static int rxrpc_instantiate(struct key *, const void *, size_t);
27static int rxrpc_instantiate_s(struct key *, const void *, size_t);
28static void rxrpc_destroy(struct key *);
29static void rxrpc_destroy_s(struct key *);
30static void rxrpc_describe(const struct key *, struct seq_file *);
31
32/*
33 * rxrpc defined keys take an arbitrary string as the description and an
34 * arbitrary blob of data as the payload
35 */
36struct key_type key_type_rxrpc = {
37 .name = "rxrpc",
38 .instantiate = rxrpc_instantiate,
39 .match = user_match,
40 .destroy = rxrpc_destroy,
41 .describe = rxrpc_describe,
42};
43
44EXPORT_SYMBOL(key_type_rxrpc);
45
46/*
47 * rxrpc server defined keys take "<serviceId>:<securityIndex>" as the
48 * description and an 8-byte decryption key as the payload
49 */
50struct key_type key_type_rxrpc_s = {
51 .name = "rxrpc_s",
52 .instantiate = rxrpc_instantiate_s,
53 .match = user_match,
54 .destroy = rxrpc_destroy_s,
55 .describe = rxrpc_describe,
56};
57
58/*
59 * instantiate an rxrpc defined key
60 * data should be of the form:
61 * OFFSET LEN CONTENT
62 * 0 4 key interface version number
63 * 4 2 security index (type)
64 * 6 2 ticket length
65 * 8 4 key expiry time (time_t)
66 * 12 4 kvno
67 * 16 8 session key
68 * 24 [len] ticket
69 *
70 * if no data is provided, then a no-security key is made
71 */
72static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
73{
74 const struct rxkad_key *tsec;
75 struct rxrpc_key_payload *upayload;
76 size_t plen;
77 u32 kver;
78 int ret;
79
80 _enter("{%x},,%zu", key_serial(key), datalen);
81
82 /* handle a no-security key */
83 if (!data && datalen == 0)
84 return 0;
85
86 /* get the key interface version number */
87 ret = -EINVAL;
88 if (datalen <= 4 || !data)
89 goto error;
90 memcpy(&kver, data, sizeof(kver));
91 data += sizeof(kver);
92 datalen -= sizeof(kver);
93
94 _debug("KEY I/F VERSION: %u", kver);
95
96 ret = -EKEYREJECTED;
97 if (kver != 1)
98 goto error;
99
100 /* deal with a version 1 key */
101 ret = -EINVAL;
102 if (datalen < sizeof(*tsec))
103 goto error;
104
105 tsec = data;
106 if (datalen != sizeof(*tsec) + tsec->ticket_len)
107 goto error;
108
109 _debug("SCIX: %u", tsec->security_index);
110 _debug("TLEN: %u", tsec->ticket_len);
111 _debug("EXPY: %x", tsec->expiry);
112 _debug("KVNO: %u", tsec->kvno);
113 _debug("SKEY: %02x%02x%02x%02x%02x%02x%02x%02x",
114 tsec->session_key[0], tsec->session_key[1],
115 tsec->session_key[2], tsec->session_key[3],
116 tsec->session_key[4], tsec->session_key[5],
117 tsec->session_key[6], tsec->session_key[7]);
118 if (tsec->ticket_len >= 8)
119 _debug("TCKT: %02x%02x%02x%02x%02x%02x%02x%02x",
120 tsec->ticket[0], tsec->ticket[1],
121 tsec->ticket[2], tsec->ticket[3],
122 tsec->ticket[4], tsec->ticket[5],
123 tsec->ticket[6], tsec->ticket[7]);
124
125 ret = -EPROTONOSUPPORT;
126 if (tsec->security_index != 2)
127 goto error;
128
129 key->type_data.x[0] = tsec->security_index;
130
131 plen = sizeof(*upayload) + tsec->ticket_len;
132 ret = key_payload_reserve(key, plen);
133 if (ret < 0)
134 goto error;
135
136 ret = -ENOMEM;
137 upayload = kmalloc(plen, GFP_KERNEL);
138 if (!upayload)
139 goto error;
140
141 /* attach the data */
142 memcpy(&upayload->k, tsec, sizeof(*tsec));
143 memcpy(&upayload->k.ticket, (void *)tsec + sizeof(*tsec),
144 tsec->ticket_len);
145 key->payload.data = upayload;
146 key->expiry = tsec->expiry;
147 ret = 0;
148
149error:
150 return ret;
151}
152
153/*
154 * instantiate a server secret key
155 * data should be a pointer to the 8-byte secret key
156 */
157static int rxrpc_instantiate_s(struct key *key, const void *data,
158 size_t datalen)
159{
160 struct crypto_blkcipher *ci;
161
162 _enter("{%x},,%zu", key_serial(key), datalen);
163
164 if (datalen != 8)
165 return -EINVAL;
166
167 memcpy(&key->type_data, data, 8);
168
169 ci = crypto_alloc_blkcipher("pcbc(des)", 0, CRYPTO_ALG_ASYNC);
170 if (IS_ERR(ci)) {
171 _leave(" = %ld", PTR_ERR(ci));
172 return PTR_ERR(ci);
173 }
174
175 if (crypto_blkcipher_setkey(ci, data, 8) < 0)
176 BUG();
177
178 key->payload.data = ci;
179 _leave(" = 0");
180 return 0;
181}
182
183/*
184 * dispose of the data dangling from the corpse of a rxrpc key
185 */
186static void rxrpc_destroy(struct key *key)
187{
188 kfree(key->payload.data);
189}
190
191/*
192 * dispose of the data dangling from the corpse of a rxrpc key
193 */
194static void rxrpc_destroy_s(struct key *key)
195{
196 if (key->payload.data) {
197 crypto_free_blkcipher(key->payload.data);
198 key->payload.data = NULL;
199 }
200}
201
202/*
203 * describe the rxrpc key
204 */
205static void rxrpc_describe(const struct key *key, struct seq_file *m)
206{
207 seq_puts(m, key->description);
208}
209
210/*
211 * grab the security key for a socket
212 */
213int rxrpc_request_key(struct rxrpc_sock *rx, char __user *optval, int optlen)
214{
215 struct key *key;
216 char *description;
217
218 _enter("");
219
220 if (optlen <= 0 || optlen > PAGE_SIZE - 1)
221 return -EINVAL;
222
223 description = kmalloc(optlen + 1, GFP_KERNEL);
224 if (!description)
225 return -ENOMEM;
226
227 if (copy_from_user(description, optval, optlen)) {
228 kfree(description);
229 return -EFAULT;
230 }
231 description[optlen] = 0;
232
233 key = request_key(&key_type_rxrpc, description, NULL);
234 if (IS_ERR(key)) {
235 kfree(description);
236 _leave(" = %ld", PTR_ERR(key));
237 return PTR_ERR(key);
238 }
239
240 rx->key = key;
241 kfree(description);
242 _leave(" = 0 [key %x]", key->serial);
243 return 0;
244}
245
246/*
247 * grab the security keyring for a server socket
248 */
249int rxrpc_server_keyring(struct rxrpc_sock *rx, char __user *optval,
250 int optlen)
251{
252 struct key *key;
253 char *description;
254
255 _enter("");
256
257 if (optlen <= 0 || optlen > PAGE_SIZE - 1)
258 return -EINVAL;
259
260 description = kmalloc(optlen + 1, GFP_KERNEL);
261 if (!description)
262 return -ENOMEM;
263
264 if (copy_from_user(description, optval, optlen)) {
265 kfree(description);
266 return -EFAULT;
267 }
268 description[optlen] = 0;
269
270 key = request_key(&key_type_keyring, description, NULL);
271 if (IS_ERR(key)) {
272 kfree(description);
273 _leave(" = %ld", PTR_ERR(key));
274 return PTR_ERR(key);
275 }
276
277 rx->securities = key;
278 kfree(description);
279 _leave(" = 0 [key %x]", key->serial);
280 return 0;
281}
282
283/*
284 * generate a server data key
285 */
286int rxrpc_get_server_data_key(struct rxrpc_connection *conn,
287 const void *session_key,
288 time_t expiry,
289 u32 kvno)
290{
291 struct key *key;
292 int ret;
293
294 struct {
295 u32 kver;
296 struct rxkad_key tsec;
297 } data;
298
299 _enter("");
300
301 key = key_alloc(&key_type_rxrpc, "x", 0, 0, current, 0,
302 KEY_ALLOC_NOT_IN_QUOTA);
303 if (IS_ERR(key)) {
304 _leave(" = -ENOMEM [alloc %ld]", PTR_ERR(key));
305 return -ENOMEM;
306 }
307
308 _debug("key %d", key_serial(key));
309
310 data.kver = 1;
311 data.tsec.security_index = 2;
312 data.tsec.ticket_len = 0;
313 data.tsec.expiry = expiry;
314 data.tsec.kvno = 0;
315
316 memcpy(&data.tsec.session_key, session_key,
317 sizeof(data.tsec.session_key));
318
319 ret = key_instantiate_and_link(key, &data, sizeof(data), NULL, NULL);
320 if (ret < 0)
321 goto error;
322
323 conn->key = key;
324 _leave(" = 0 [%d]", key_serial(key));
325 return 0;
326
327error:
328 key_revoke(key);
329 key_put(key);
330 _leave(" = -ENOMEM [ins %d]", ret);
331 return -ENOMEM;
332}
333
334EXPORT_SYMBOL(rxrpc_get_server_data_key);
diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
new file mode 100644
index 000000000000..a20a2c0fe105
--- /dev/null
+++ b/net/rxrpc/ar-local.c
@@ -0,0 +1,309 @@
1/* AF_RXRPC local endpoint management
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <net/sock.h>
16#include <net/af_rxrpc.h>
17#include "ar-internal.h"
18
19static LIST_HEAD(rxrpc_locals);
20DEFINE_RWLOCK(rxrpc_local_lock);
21static DECLARE_RWSEM(rxrpc_local_sem);
22static DECLARE_WAIT_QUEUE_HEAD(rxrpc_local_wq);
23
24static void rxrpc_destroy_local(struct work_struct *work);
25
26/*
27 * allocate a new local
28 */
29static
30struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
31{
32 struct rxrpc_local *local;
33
34 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
35 if (local) {
36 INIT_WORK(&local->destroyer, &rxrpc_destroy_local);
37 INIT_WORK(&local->acceptor, &rxrpc_accept_incoming_calls);
38 INIT_WORK(&local->rejecter, &rxrpc_reject_packets);
39 INIT_LIST_HEAD(&local->services);
40 INIT_LIST_HEAD(&local->link);
41 init_rwsem(&local->defrag_sem);
42 skb_queue_head_init(&local->accept_queue);
43 skb_queue_head_init(&local->reject_queue);
44 spin_lock_init(&local->lock);
45 rwlock_init(&local->services_lock);
46 atomic_set(&local->usage, 1);
47 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
48 memcpy(&local->srx, srx, sizeof(*srx));
49 }
50
51 _leave(" = %p", local);
52 return local;
53}
54
55/*
56 * create the local socket
57 * - must be called with rxrpc_local_sem writelocked
58 */
59static int rxrpc_create_local(struct rxrpc_local *local)
60{
61 struct sock *sock;
62 int ret, opt;
63
64 _enter("%p{%d}", local, local->srx.transport_type);
65
66 /* create a socket to represent the local endpoint */
67 ret = sock_create_kern(PF_INET, local->srx.transport_type, IPPROTO_UDP,
68 &local->socket);
69 if (ret < 0) {
70 _leave(" = %d [socket]", ret);
71 return ret;
72 }
73
74 /* if a local address was supplied then bind it */
75 if (local->srx.transport_len > sizeof(sa_family_t)) {
76 _debug("bind");
77 ret = kernel_bind(local->socket,
78 (struct sockaddr *) &local->srx.transport,
79 local->srx.transport_len);
80 if (ret < 0) {
81 _debug("bind failed");
82 goto error;
83 }
84 }
85
86 /* we want to receive ICMP errors */
87 opt = 1;
88 ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
89 (char *) &opt, sizeof(opt));
90 if (ret < 0) {
91 _debug("setsockopt failed");
92 goto error;
93 }
94
95 /* we want to set the don't fragment bit */
96 opt = IP_PMTUDISC_DO;
97 ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
98 (char *) &opt, sizeof(opt));
99 if (ret < 0) {
100 _debug("setsockopt failed");
101 goto error;
102 }
103
104 write_lock_bh(&rxrpc_local_lock);
105 list_add(&local->link, &rxrpc_locals);
106 write_unlock_bh(&rxrpc_local_lock);
107
108 /* set the socket up */
109 sock = local->socket->sk;
110 sock->sk_user_data = local;
111 sock->sk_data_ready = rxrpc_data_ready;
112 sock->sk_error_report = rxrpc_UDP_error_report;
113 _leave(" = 0");
114 return 0;
115
116error:
117 local->socket->ops->shutdown(local->socket, 2);
118 local->socket->sk->sk_user_data = NULL;
119 sock_release(local->socket);
120 local->socket = NULL;
121
122 _leave(" = %d", ret);
123 return ret;
124}
125
126/*
127 * create a new local endpoint using the specified UDP address
128 */
129struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *srx)
130{
131 struct rxrpc_local *local;
132 int ret;
133
134 _enter("{%d,%u,%u.%u.%u.%u+%hu}",
135 srx->transport_type,
136 srx->transport.family,
137 NIPQUAD(srx->transport.sin.sin_addr),
138 ntohs(srx->transport.sin.sin_port));
139
140 down_write(&rxrpc_local_sem);
141
142 /* see if we have a suitable local local endpoint already */
143 read_lock_bh(&rxrpc_local_lock);
144
145 list_for_each_entry(local, &rxrpc_locals, link) {
146 _debug("CMP {%d,%u,%u.%u.%u.%u+%hu}",
147 local->srx.transport_type,
148 local->srx.transport.family,
149 NIPQUAD(local->srx.transport.sin.sin_addr),
150 ntohs(local->srx.transport.sin.sin_port));
151
152 if (local->srx.transport_type != srx->transport_type ||
153 local->srx.transport.family != srx->transport.family)
154 continue;
155
156 switch (srx->transport.family) {
157 case AF_INET:
158 if (local->srx.transport.sin.sin_port !=
159 srx->transport.sin.sin_port)
160 continue;
161 if (memcmp(&local->srx.transport.sin.sin_addr,
162 &srx->transport.sin.sin_addr,
163 sizeof(struct in_addr)) != 0)
164 continue;
165 goto found_local;
166
167 default:
168 BUG();
169 }
170 }
171
172 read_unlock_bh(&rxrpc_local_lock);
173
174 /* we didn't find one, so we need to create one */
175 local = rxrpc_alloc_local(srx);
176 if (!local) {
177 up_write(&rxrpc_local_sem);
178 return ERR_PTR(-ENOMEM);
179 }
180
181 ret = rxrpc_create_local(local);
182 if (ret < 0) {
183 up_write(&rxrpc_local_sem);
184 kfree(local);
185 _leave(" = %d", ret);
186 return ERR_PTR(ret);
187 }
188
189 up_write(&rxrpc_local_sem);
190
191 _net("LOCAL new %d {%d,%u,%u.%u.%u.%u+%hu}",
192 local->debug_id,
193 local->srx.transport_type,
194 local->srx.transport.family,
195 NIPQUAD(local->srx.transport.sin.sin_addr),
196 ntohs(local->srx.transport.sin.sin_port));
197
198 _leave(" = %p [new]", local);
199 return local;
200
201found_local:
202 rxrpc_get_local(local);
203 read_unlock_bh(&rxrpc_local_lock);
204 up_write(&rxrpc_local_sem);
205
206 _net("LOCAL old %d {%d,%u,%u.%u.%u.%u+%hu}",
207 local->debug_id,
208 local->srx.transport_type,
209 local->srx.transport.family,
210 NIPQUAD(local->srx.transport.sin.sin_addr),
211 ntohs(local->srx.transport.sin.sin_port));
212
213 _leave(" = %p [reuse]", local);
214 return local;
215}
216
217/*
218 * release a local endpoint
219 */
220void rxrpc_put_local(struct rxrpc_local *local)
221{
222 _enter("%p{u=%d}", local, atomic_read(&local->usage));
223
224 ASSERTCMP(atomic_read(&local->usage), >, 0);
225
226 /* to prevent a race, the decrement and the dequeue must be effectively
227 * atomic */
228 write_lock_bh(&rxrpc_local_lock);
229 if (unlikely(atomic_dec_and_test(&local->usage))) {
230 _debug("destroy local");
231 schedule_work(&local->destroyer);
232 }
233 write_unlock_bh(&rxrpc_local_lock);
234 _leave("");
235}
236
237/*
238 * destroy a local endpoint
239 */
240static void rxrpc_destroy_local(struct work_struct *work)
241{
242 struct rxrpc_local *local =
243 container_of(work, struct rxrpc_local, destroyer);
244
245 _enter("%p{%d}", local, atomic_read(&local->usage));
246
247 down_write(&rxrpc_local_sem);
248
249 write_lock_bh(&rxrpc_local_lock);
250 if (atomic_read(&local->usage) > 0) {
251 write_unlock_bh(&rxrpc_local_lock);
252 up_read(&rxrpc_local_sem);
253 _leave(" [resurrected]");
254 return;
255 }
256
257 list_del(&local->link);
258 local->socket->sk->sk_user_data = NULL;
259 write_unlock_bh(&rxrpc_local_lock);
260
261 downgrade_write(&rxrpc_local_sem);
262
263 ASSERT(list_empty(&local->services));
264 ASSERT(!work_pending(&local->acceptor));
265 ASSERT(!work_pending(&local->rejecter));
266
267 /* finish cleaning up the local descriptor */
268 rxrpc_purge_queue(&local->accept_queue);
269 rxrpc_purge_queue(&local->reject_queue);
270 local->socket->ops->shutdown(local->socket, 2);
271 sock_release(local->socket);
272
273 up_read(&rxrpc_local_sem);
274
275 _net("DESTROY LOCAL %d", local->debug_id);
276 kfree(local);
277
278 if (list_empty(&rxrpc_locals))
279 wake_up_all(&rxrpc_local_wq);
280
281 _leave("");
282}
283
284/*
285 * preemptively destroy all local local endpoint rather than waiting for
286 * them to be destroyed
287 */
288void __exit rxrpc_destroy_all_locals(void)
289{
290 DECLARE_WAITQUEUE(myself,current);
291
292 _enter("");
293
294 /* we simply have to wait for them to go away */
295 if (!list_empty(&rxrpc_locals)) {
296 set_current_state(TASK_UNINTERRUPTIBLE);
297 add_wait_queue(&rxrpc_local_wq, &myself);
298
299 while (!list_empty(&rxrpc_locals)) {
300 schedule();
301 set_current_state(TASK_UNINTERRUPTIBLE);
302 }
303
304 remove_wait_queue(&rxrpc_local_wq, &myself);
305 set_current_state(TASK_RUNNING);
306 }
307
308 _leave("");
309}
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
new file mode 100644
index 000000000000..67aa9510f09b
--- /dev/null
+++ b/net/rxrpc/ar-output.c
@@ -0,0 +1,658 @@
1/* RxRPC packet transmission
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/net.h>
13#include <linux/skbuff.h>
14#include <linux/circ_buf.h>
15#include <net/sock.h>
16#include <net/af_rxrpc.h>
17#include "ar-internal.h"
18
19int rxrpc_resend_timeout = 4;
20
21static int rxrpc_send_data(struct kiocb *iocb,
22 struct rxrpc_sock *rx,
23 struct rxrpc_call *call,
24 struct msghdr *msg, size_t len);
25
26/*
27 * extract control messages from the sendmsg() control buffer
28 */
29static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg,
30 unsigned long *user_call_ID,
31 enum rxrpc_command *command,
32 u32 *abort_code,
33 bool server)
34{
35 struct cmsghdr *cmsg;
36 int len;
37
38 *command = RXRPC_CMD_SEND_DATA;
39
40 if (msg->msg_controllen == 0)
41 return -EINVAL;
42
43 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
44 if (!CMSG_OK(msg, cmsg))
45 return -EINVAL;
46
47 len = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
48 _debug("CMSG %d, %d, %d",
49 cmsg->cmsg_level, cmsg->cmsg_type, len);
50
51 if (cmsg->cmsg_level != SOL_RXRPC)
52 continue;
53
54 switch (cmsg->cmsg_type) {
55 case RXRPC_USER_CALL_ID:
56 if (msg->msg_flags & MSG_CMSG_COMPAT) {
57 if (len != sizeof(u32))
58 return -EINVAL;
59 *user_call_ID = *(u32 *) CMSG_DATA(cmsg);
60 } else {
61 if (len != sizeof(unsigned long))
62 return -EINVAL;
63 *user_call_ID = *(unsigned long *)
64 CMSG_DATA(cmsg);
65 }
66 _debug("User Call ID %lx", *user_call_ID);
67 break;
68
69 case RXRPC_ABORT:
70 if (*command != RXRPC_CMD_SEND_DATA)
71 return -EINVAL;
72 *command = RXRPC_CMD_SEND_ABORT;
73 if (len != sizeof(*abort_code))
74 return -EINVAL;
75 *abort_code = *(unsigned int *) CMSG_DATA(cmsg);
76 _debug("Abort %x", *abort_code);
77 if (*abort_code == 0)
78 return -EINVAL;
79 break;
80
81 case RXRPC_ACCEPT:
82 if (*command != RXRPC_CMD_SEND_DATA)
83 return -EINVAL;
84 *command = RXRPC_CMD_ACCEPT;
85 if (len != 0)
86 return -EINVAL;
87 if (!server)
88 return -EISCONN;
89 break;
90
91 default:
92 return -EINVAL;
93 }
94 }
95
96 _leave(" = 0");
97 return 0;
98}
99
100/*
101 * abort a call, sending an ABORT packet to the peer
102 */
103static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code)
104{
105 write_lock_bh(&call->state_lock);
106
107 if (call->state <= RXRPC_CALL_COMPLETE) {
108 call->state = RXRPC_CALL_LOCALLY_ABORTED;
109 call->abort_code = abort_code;
110 set_bit(RXRPC_CALL_ABORT, &call->events);
111 del_timer_sync(&call->resend_timer);
112 del_timer_sync(&call->ack_timer);
113 clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
114 clear_bit(RXRPC_CALL_ACK, &call->events);
115 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
116 schedule_work(&call->processor);
117 }
118
119 write_unlock_bh(&call->state_lock);
120}
121
122/*
123 * send a message forming part of a client call through an RxRPC socket
124 * - caller holds the socket locked
125 * - the socket may be either a client socket or a server socket
126 */
127int rxrpc_client_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx,
128 struct rxrpc_transport *trans, struct msghdr *msg,
129 size_t len)
130{
131 struct rxrpc_conn_bundle *bundle;
132 enum rxrpc_command cmd;
133 struct rxrpc_call *call;
134 unsigned long user_call_ID = 0;
135 struct key *key;
136 __be16 service_id;
137 u32 abort_code = 0;
138 int ret;
139
140 _enter("");
141
142 ASSERT(trans != NULL);
143
144 ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code,
145 false);
146 if (ret < 0)
147 return ret;
148
149 bundle = NULL;
150 if (trans) {
151 service_id = rx->service_id;
152 if (msg->msg_name) {
153 struct sockaddr_rxrpc *srx =
154 (struct sockaddr_rxrpc *) msg->msg_name;
155 service_id = htons(srx->srx_service);
156 }
157 key = rx->key;
158 if (key && !rx->key->payload.data)
159 key = NULL;
160 bundle = rxrpc_get_bundle(rx, trans, key, service_id,
161 GFP_KERNEL);
162 if (IS_ERR(bundle))
163 return PTR_ERR(bundle);
164 }
165
166 call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID,
167 abort_code == 0, GFP_KERNEL);
168 if (trans)
169 rxrpc_put_bundle(trans, bundle);
170 if (IS_ERR(call)) {
171 _leave(" = %ld", PTR_ERR(call));
172 return PTR_ERR(call);
173 }
174
175 _debug("CALL %d USR %lx ST %d on CONN %p",
176 call->debug_id, call->user_call_ID, call->state, call->conn);
177
178 if (call->state >= RXRPC_CALL_COMPLETE) {
179 /* it's too late for this call */
180 ret = -ESHUTDOWN;
181 } else if (cmd == RXRPC_CMD_SEND_ABORT) {
182 rxrpc_send_abort(call, abort_code);
183 } else if (cmd != RXRPC_CMD_SEND_DATA) {
184 ret = -EINVAL;
185 } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
186 /* request phase complete for this client call */
187 ret = -EPROTO;
188 } else {
189 ret = rxrpc_send_data(iocb, rx, call, msg, len);
190 }
191
192 rxrpc_put_call(call);
193 _leave(" = %d", ret);
194 return ret;
195}
196
197/*
198 * send a message through a server socket
199 * - caller holds the socket locked
200 */
201int rxrpc_server_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx,
202 struct msghdr *msg, size_t len)
203{
204 enum rxrpc_command cmd;
205 struct rxrpc_call *call;
206 unsigned long user_call_ID = 0;
207 u32 abort_code = 0;
208 int ret;
209
210 _enter("");
211
212 ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code,
213 true);
214 if (ret < 0)
215 return ret;
216
217 if (cmd == RXRPC_CMD_ACCEPT)
218 return rxrpc_accept_call(rx, user_call_ID);
219
220 call = rxrpc_find_server_call(rx, user_call_ID);
221 if (!call)
222 return -EBADSLT;
223 if (call->state >= RXRPC_CALL_COMPLETE) {
224 ret = -ESHUTDOWN;
225 goto out;
226 }
227
228 switch (cmd) {
229 case RXRPC_CMD_SEND_DATA:
230 if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
231 call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
232 call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
233 /* Tx phase not yet begun for this call */
234 ret = -EPROTO;
235 break;
236 }
237
238 ret = rxrpc_send_data(iocb, rx, call, msg, len);
239 break;
240
241 case RXRPC_CMD_SEND_ABORT:
242 rxrpc_send_abort(call, abort_code);
243 break;
244 default:
245 BUG();
246 }
247
248 out:
249 rxrpc_put_call(call);
250 _leave(" = %d", ret);
251 return ret;
252}
253
254/*
255 * send a packet through the transport endpoint
256 */
257int rxrpc_send_packet(struct rxrpc_transport *trans, struct sk_buff *skb)
258{
259 struct kvec iov[1];
260 struct msghdr msg;
261 int ret, opt;
262
263 _enter(",{%d}", skb->len);
264
265 iov[0].iov_base = skb->head;
266 iov[0].iov_len = skb->len;
267
268 msg.msg_name = &trans->peer->srx.transport.sin;
269 msg.msg_namelen = sizeof(trans->peer->srx.transport.sin);
270 msg.msg_control = NULL;
271 msg.msg_controllen = 0;
272 msg.msg_flags = 0;
273
274 /* send the packet with the don't fragment bit set if we currently
275 * think it's small enough */
276 if (skb->len - sizeof(struct rxrpc_header) < trans->peer->maxdata) {
277 down_read(&trans->local->defrag_sem);
278 /* send the packet by UDP
279 * - returns -EMSGSIZE if UDP would have to fragment the packet
280 * to go out of the interface
281 * - in which case, we'll have processed the ICMP error
282 * message and update the peer record
283 */
284 ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1,
285 iov[0].iov_len);
286
287 up_read(&trans->local->defrag_sem);
288 if (ret == -EMSGSIZE)
289 goto send_fragmentable;
290
291 _leave(" = %d [%u]", ret, trans->peer->maxdata);
292 return ret;
293 }
294
295send_fragmentable:
296 /* attempt to send this message with fragmentation enabled */
297 _debug("send fragment");
298
299 down_write(&trans->local->defrag_sem);
300 opt = IP_PMTUDISC_DONT;
301 ret = kernel_setsockopt(trans->local->socket, SOL_IP, IP_MTU_DISCOVER,
302 (char *) &opt, sizeof(opt));
303 if (ret == 0) {
304 ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1,
305 iov[0].iov_len);
306
307 opt = IP_PMTUDISC_DO;
308 kernel_setsockopt(trans->local->socket, SOL_IP,
309 IP_MTU_DISCOVER, (char *) &opt, sizeof(opt));
310 }
311
312 up_write(&trans->local->defrag_sem);
313 _leave(" = %d [frag %u]", ret, trans->peer->maxdata);
314 return ret;
315}
316
317/*
318 * wait for space to appear in the transmit/ACK window
319 * - caller holds the socket locked
320 */
321static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
322 struct rxrpc_call *call,
323 long *timeo)
324{
325 DECLARE_WAITQUEUE(myself, current);
326 int ret;
327
328 _enter(",{%d},%ld",
329 CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz),
330 *timeo);
331
332 add_wait_queue(&call->tx_waitq, &myself);
333
334 for (;;) {
335 set_current_state(TASK_INTERRUPTIBLE);
336 ret = 0;
337 if (CIRC_SPACE(call->acks_head, call->acks_tail,
338 call->acks_winsz) > 0)
339 break;
340 if (signal_pending(current)) {
341 ret = sock_intr_errno(*timeo);
342 break;
343 }
344
345 release_sock(&rx->sk);
346 *timeo = schedule_timeout(*timeo);
347 lock_sock(&rx->sk);
348 }
349
350 remove_wait_queue(&call->tx_waitq, &myself);
351 set_current_state(TASK_RUNNING);
352 _leave(" = %d", ret);
353 return ret;
354}
355
356/*
357 * attempt to schedule an instant Tx resend
358 */
359static inline void rxrpc_instant_resend(struct rxrpc_call *call)
360{
361 read_lock_bh(&call->state_lock);
362 if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
363 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
364 if (call->state < RXRPC_CALL_COMPLETE &&
365 !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
366 schedule_work(&call->processor);
367 }
368 read_unlock_bh(&call->state_lock);
369}
370
371/*
372 * queue a packet for transmission, set the resend timer and attempt
373 * to send the packet immediately
374 */
375static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
376 bool last)
377{
378 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
379 int ret;
380
381 _net("queue skb %p [%d]", skb, call->acks_head);
382
383 ASSERT(call->acks_window != NULL);
384 call->acks_window[call->acks_head] = (unsigned long) skb;
385 smp_wmb();
386 call->acks_head = (call->acks_head + 1) & (call->acks_winsz - 1);
387
388 if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) {
389 _debug("________awaiting reply/ACK__________");
390 write_lock_bh(&call->state_lock);
391 switch (call->state) {
392 case RXRPC_CALL_CLIENT_SEND_REQUEST:
393 call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
394 break;
395 case RXRPC_CALL_SERVER_ACK_REQUEST:
396 call->state = RXRPC_CALL_SERVER_SEND_REPLY;
397 if (!last)
398 break;
399 case RXRPC_CALL_SERVER_SEND_REPLY:
400 call->state = RXRPC_CALL_SERVER_AWAIT_ACK;
401 break;
402 default:
403 break;
404 }
405 write_unlock_bh(&call->state_lock);
406 }
407
408 _proto("Tx DATA %%%u { #%u }",
409 ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
410
411 sp->need_resend = 0;
412 sp->resend_at = jiffies + rxrpc_resend_timeout * HZ;
413 if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) {
414 _debug("run timer");
415 call->resend_timer.expires = sp->resend_at;
416 add_timer(&call->resend_timer);
417 }
418
419 /* attempt to cancel the rx-ACK timer, deferring reply transmission if
420 * we're ACK'ing the request phase of an incoming call */
421 ret = -EAGAIN;
422 if (try_to_del_timer_sync(&call->ack_timer) >= 0) {
423 /* the packet may be freed by rxrpc_process_call() before this
424 * returns */
425 ret = rxrpc_send_packet(call->conn->trans, skb);
426 _net("sent skb %p", skb);
427 } else {
428 _debug("failed to delete ACK timer");
429 }
430
431 if (ret < 0) {
432 _debug("need instant resend %d", ret);
433 sp->need_resend = 1;
434 rxrpc_instant_resend(call);
435 }
436
437 _leave("");
438}
439
440/*
441 * send data through a socket
442 * - must be called in process context
443 * - caller holds the socket locked
444 */
445static int rxrpc_send_data(struct kiocb *iocb,
446 struct rxrpc_sock *rx,
447 struct rxrpc_call *call,
448 struct msghdr *msg, size_t len)
449{
450 struct rxrpc_skb_priv *sp;
451 unsigned char __user *from;
452 struct sk_buff *skb;
453 struct iovec *iov;
454 struct sock *sk = &rx->sk;
455 long timeo;
456 bool more;
457 int ret, ioc, segment, copied;
458
459 _enter(",,,{%zu},%zu", msg->msg_iovlen, len);
460
461 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
462
463 /* this should be in poll */
464 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
465
466 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
467 return -EPIPE;
468
469 iov = msg->msg_iov;
470 ioc = msg->msg_iovlen - 1;
471 from = iov->iov_base;
472 segment = iov->iov_len;
473 iov++;
474 more = msg->msg_flags & MSG_MORE;
475
476 skb = call->tx_pending;
477 call->tx_pending = NULL;
478
479 copied = 0;
480 do {
481 int copy;
482
483 if (segment > len)
484 segment = len;
485
486 _debug("SEGMENT %d @%p", segment, from);
487
488 if (!skb) {
489 size_t size, chunk, max, space;
490
491 _debug("alloc");
492
493 if (CIRC_SPACE(call->acks_head, call->acks_tail,
494 call->acks_winsz) <= 0) {
495 ret = -EAGAIN;
496 if (msg->msg_flags & MSG_DONTWAIT)
497 goto maybe_error;
498 ret = rxrpc_wait_for_tx_window(rx, call,
499 &timeo);
500 if (ret < 0)
501 goto maybe_error;
502 }
503
504 max = call->conn->trans->peer->maxdata;
505 max -= call->conn->security_size;
506 max &= ~(call->conn->size_align - 1UL);
507
508 chunk = max;
509 if (chunk > len)
510 chunk = len;
511
512 space = chunk + call->conn->size_align;
513 space &= ~(call->conn->size_align - 1UL);
514
515 size = space + call->conn->header_size;
516
517 _debug("SIZE: %zu/%zu/%zu", chunk, space, size);
518
519 /* create a buffer that we can retain until it's ACK'd */
520 skb = sock_alloc_send_skb(
521 sk, size, msg->msg_flags & MSG_DONTWAIT, &ret);
522 if (!skb)
523 goto maybe_error;
524
525 rxrpc_new_skb(skb);
526
527 _debug("ALLOC SEND %p", skb);
528
529 ASSERTCMP(skb->mark, ==, 0);
530
531 _debug("HS: %u", call->conn->header_size);
532 skb_reserve(skb, call->conn->header_size);
533 skb->len += call->conn->header_size;
534
535 sp = rxrpc_skb(skb);
536 sp->remain = chunk;
537 if (sp->remain > skb_tailroom(skb))
538 sp->remain = skb_tailroom(skb);
539
540 _net("skb: hr %d, tr %d, hl %d, rm %d",
541 skb_headroom(skb),
542 skb_tailroom(skb),
543 skb_headlen(skb),
544 sp->remain);
545
546 skb->ip_summed = CHECKSUM_UNNECESSARY;
547 }
548
549 _debug("append");
550 sp = rxrpc_skb(skb);
551
552 /* append next segment of data to the current buffer */
553 copy = skb_tailroom(skb);
554 ASSERTCMP(copy, >, 0);
555 if (copy > segment)
556 copy = segment;
557 if (copy > sp->remain)
558 copy = sp->remain;
559
560 _debug("add");
561 ret = skb_add_data(skb, from, copy);
562 _debug("added");
563 if (ret < 0)
564 goto efault;
565 sp->remain -= copy;
566 skb->mark += copy;
567
568 len -= copy;
569 segment -= copy;
570 from += copy;
571 while (segment == 0 && ioc > 0) {
572 from = iov->iov_base;
573 segment = iov->iov_len;
574 iov++;
575 ioc--;
576 }
577 if (len == 0) {
578 segment = 0;
579 ioc = 0;
580 }
581
582 /* check for the far side aborting the call or a network error
583 * occurring */
584 if (call->state > RXRPC_CALL_COMPLETE)
585 goto call_aborted;
586
587 /* add the packet to the send queue if it's now full */
588 if (sp->remain <= 0 || (segment == 0 && !more)) {
589 struct rxrpc_connection *conn = call->conn;
590 size_t pad;
591
592 /* pad out if we're using security */
593 if (conn->security) {
594 pad = conn->security_size + skb->mark;
595 pad = conn->size_align - pad;
596 pad &= conn->size_align - 1;
597 _debug("pad %zu", pad);
598 if (pad)
599 memset(skb_put(skb, pad), 0, pad);
600 }
601
602 sp->hdr.epoch = conn->epoch;
603 sp->hdr.cid = call->cid;
604 sp->hdr.callNumber = call->call_id;
605 sp->hdr.seq =
606 htonl(atomic_inc_return(&call->sequence));
607 sp->hdr.serial =
608 htonl(atomic_inc_return(&conn->serial));
609 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
610 sp->hdr.userStatus = 0;
611 sp->hdr.securityIndex = conn->security_ix;
612 sp->hdr._rsvd = 0;
613 sp->hdr.serviceId = conn->service_id;
614
615 sp->hdr.flags = conn->out_clientflag;
616 if (len == 0 && !more)
617 sp->hdr.flags |= RXRPC_LAST_PACKET;
618 else if (CIRC_SPACE(call->acks_head, call->acks_tail,
619 call->acks_winsz) > 1)
620 sp->hdr.flags |= RXRPC_MORE_PACKETS;
621
622 ret = rxrpc_secure_packet(
623 call, skb, skb->mark,
624 skb->head + sizeof(struct rxrpc_header));
625 if (ret < 0)
626 goto out;
627
628 memcpy(skb->head, &sp->hdr,
629 sizeof(struct rxrpc_header));
630 rxrpc_queue_packet(call, skb, segment == 0 && !more);
631 skb = NULL;
632 }
633
634 } while (segment > 0);
635
636out:
637 call->tx_pending = skb;
638 _leave(" = %d", ret);
639 return ret;
640
641call_aborted:
642 rxrpc_free_skb(skb);
643 if (call->state == RXRPC_CALL_NETWORK_ERROR)
644 ret = call->conn->trans->peer->net_error;
645 else
646 ret = -ECONNABORTED;
647 _leave(" = %d", ret);
648 return ret;
649
650maybe_error:
651 if (copied)
652 ret = copied;
653 goto out;
654
655efault:
656 ret = -EFAULT;
657 goto out;
658}
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
new file mode 100644
index 000000000000..69ac355546ae
--- /dev/null
+++ b/net/rxrpc/ar-peer.c
@@ -0,0 +1,273 @@
1/* RxRPC remote transport endpoint management
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/udp.h>
16#include <linux/in.h>
17#include <linux/in6.h>
18#include <linux/icmp.h>
19#include <net/sock.h>
20#include <net/af_rxrpc.h>
21#include <net/ip.h>
22#include "ar-internal.h"
23
24static LIST_HEAD(rxrpc_peers);
25static DEFINE_RWLOCK(rxrpc_peer_lock);
26static DECLARE_WAIT_QUEUE_HEAD(rxrpc_peer_wq);
27
28static void rxrpc_destroy_peer(struct work_struct *work);
29
30/*
31 * allocate a new peer
32 */
33static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
34 gfp_t gfp)
35{
36 struct rxrpc_peer *peer;
37
38 _enter("");
39
40 peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
41 if (peer) {
42 INIT_WORK(&peer->destroyer, &rxrpc_destroy_peer);
43 INIT_LIST_HEAD(&peer->link);
44 INIT_LIST_HEAD(&peer->error_targets);
45 spin_lock_init(&peer->lock);
46 atomic_set(&peer->usage, 1);
47 peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
48 memcpy(&peer->srx, srx, sizeof(*srx));
49
50 peer->mtu = peer->if_mtu = 65535;
51
52 if (srx->transport.family == AF_INET) {
53 peer->hdrsize = sizeof(struct iphdr);
54 switch (srx->transport_type) {
55 case SOCK_DGRAM:
56 peer->hdrsize += sizeof(struct udphdr);
57 break;
58 default:
59 BUG();
60 break;
61 }
62 } else {
63 BUG();
64 }
65
66 peer->hdrsize += sizeof(struct rxrpc_header);
67 peer->maxdata = peer->mtu - peer->hdrsize;
68 }
69
70 _leave(" = %p", peer);
71 return peer;
72}
73
74/*
75 * obtain a remote transport endpoint for the specified address
76 */
77struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *srx, gfp_t gfp)
78{
79 struct rxrpc_peer *peer, *candidate;
80 const char *new = "old";
81 int usage;
82
83 _enter("{%d,%d,%u.%u.%u.%u+%hu}",
84 srx->transport_type,
85 srx->transport_len,
86 NIPQUAD(srx->transport.sin.sin_addr),
87 ntohs(srx->transport.sin.sin_port));
88
89 /* search the peer list first */
90 read_lock_bh(&rxrpc_peer_lock);
91 list_for_each_entry(peer, &rxrpc_peers, link) {
92 _debug("check PEER %d { u=%d t=%d l=%d }",
93 peer->debug_id,
94 atomic_read(&peer->usage),
95 peer->srx.transport_type,
96 peer->srx.transport_len);
97
98 if (atomic_read(&peer->usage) > 0 &&
99 peer->srx.transport_type == srx->transport_type &&
100 peer->srx.transport_len == srx->transport_len &&
101 memcmp(&peer->srx.transport,
102 &srx->transport,
103 srx->transport_len) == 0)
104 goto found_extant_peer;
105 }
106 read_unlock_bh(&rxrpc_peer_lock);
107
108 /* not yet present - create a candidate for a new record and then
109 * redo the search */
110 candidate = rxrpc_alloc_peer(srx, gfp);
111 if (!candidate) {
112 _leave(" = -ENOMEM");
113 return ERR_PTR(-ENOMEM);
114 }
115
116 write_lock_bh(&rxrpc_peer_lock);
117
118 list_for_each_entry(peer, &rxrpc_peers, link) {
119 if (atomic_read(&peer->usage) > 0 &&
120 peer->srx.transport_type == srx->transport_type &&
121 peer->srx.transport_len == srx->transport_len &&
122 memcmp(&peer->srx.transport,
123 &srx->transport,
124 srx->transport_len) == 0)
125 goto found_extant_second;
126 }
127
128 /* we can now add the new candidate to the list */
129 peer = candidate;
130 candidate = NULL;
131
132 list_add_tail(&peer->link, &rxrpc_peers);
133 write_unlock_bh(&rxrpc_peer_lock);
134 new = "new";
135
136success:
137 _net("PEER %s %d {%d,%u,%u.%u.%u.%u+%hu}",
138 new,
139 peer->debug_id,
140 peer->srx.transport_type,
141 peer->srx.transport.family,
142 NIPQUAD(peer->srx.transport.sin.sin_addr),
143 ntohs(peer->srx.transport.sin.sin_port));
144
145 _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
146 return peer;
147
148 /* we found the peer in the list immediately */
149found_extant_peer:
150 usage = atomic_inc_return(&peer->usage);
151 read_unlock_bh(&rxrpc_peer_lock);
152 goto success;
153
154 /* we found the peer on the second time through the list */
155found_extant_second:
156 usage = atomic_inc_return(&peer->usage);
157 write_unlock_bh(&rxrpc_peer_lock);
158 kfree(candidate);
159 goto success;
160}
161
162/*
163 * find the peer associated with a packet
164 */
165struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *local,
166 __be32 addr, __be16 port)
167{
168 struct rxrpc_peer *peer;
169
170 _enter("");
171
172 /* search the peer list */
173 read_lock_bh(&rxrpc_peer_lock);
174
175 if (local->srx.transport.family == AF_INET &&
176 local->srx.transport_type == SOCK_DGRAM
177 ) {
178 list_for_each_entry(peer, &rxrpc_peers, link) {
179 if (atomic_read(&peer->usage) > 0 &&
180 peer->srx.transport_type == SOCK_DGRAM &&
181 peer->srx.transport.family == AF_INET &&
182 peer->srx.transport.sin.sin_port == port &&
183 peer->srx.transport.sin.sin_addr.s_addr == addr)
184 goto found_UDP_peer;
185 }
186
187 goto new_UDP_peer;
188 }
189
190 read_unlock_bh(&rxrpc_peer_lock);
191 _leave(" = -EAFNOSUPPORT");
192 return ERR_PTR(-EAFNOSUPPORT);
193
194found_UDP_peer:
195 _net("Rx UDP DGRAM from peer %d", peer->debug_id);
196 atomic_inc(&peer->usage);
197 read_unlock_bh(&rxrpc_peer_lock);
198 _leave(" = %p", peer);
199 return peer;
200
201new_UDP_peer:
202 _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id);
203 read_unlock_bh(&rxrpc_peer_lock);
204 _leave(" = -EBUSY [new]");
205 return ERR_PTR(-EBUSY);
206}
207
208/*
209 * release a remote transport endpoint
210 */
211void rxrpc_put_peer(struct rxrpc_peer *peer)
212{
213 _enter("%p{u=%d}", peer, atomic_read(&peer->usage));
214
215 ASSERTCMP(atomic_read(&peer->usage), >, 0);
216
217 if (likely(!atomic_dec_and_test(&peer->usage))) {
218 _leave(" [in use]");
219 return;
220 }
221
222 schedule_work(&peer->destroyer);
223 _leave("");
224}
225
226/*
227 * destroy a remote transport endpoint
228 */
229static void rxrpc_destroy_peer(struct work_struct *work)
230{
231 struct rxrpc_peer *peer =
232 container_of(work, struct rxrpc_peer, destroyer);
233
234 _enter("%p{%d}", peer, atomic_read(&peer->usage));
235
236 write_lock_bh(&rxrpc_peer_lock);
237 list_del(&peer->link);
238 write_unlock_bh(&rxrpc_peer_lock);
239
240 _net("DESTROY PEER %d", peer->debug_id);
241 kfree(peer);
242
243 if (list_empty(&rxrpc_peers))
244 wake_up_all(&rxrpc_peer_wq);
245 _leave("");
246}
247
248/*
249 * preemptively destroy all the peer records from a transport endpoint rather
250 * than waiting for them to time out
251 */
252void __exit rxrpc_destroy_all_peers(void)
253{
254 DECLARE_WAITQUEUE(myself,current);
255
256 _enter("");
257
258 /* we simply have to wait for them to go away */
259 if (!list_empty(&rxrpc_peers)) {
260 set_current_state(TASK_UNINTERRUPTIBLE);
261 add_wait_queue(&rxrpc_peer_wq, &myself);
262
263 while (!list_empty(&rxrpc_peers)) {
264 schedule();
265 set_current_state(TASK_UNINTERRUPTIBLE);
266 }
267
268 remove_wait_queue(&rxrpc_peer_wq, &myself);
269 set_current_state(TASK_RUNNING);
270 }
271
272 _leave("");
273}
diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
new file mode 100644
index 000000000000..58f4b4e5cece
--- /dev/null
+++ b/net/rxrpc/ar-proc.c
@@ -0,0 +1,247 @@
1/* /proc/net/ support for AF_RXRPC
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <net/sock.h>
14#include <net/af_rxrpc.h>
15#include "ar-internal.h"
16
17static const char *rxrpc_conn_states[] = {
18 [RXRPC_CONN_UNUSED] = "Unused ",
19 [RXRPC_CONN_CLIENT] = "Client ",
20 [RXRPC_CONN_SERVER_UNSECURED] = "SvUnsec ",
21 [RXRPC_CONN_SERVER_CHALLENGING] = "SvChall ",
22 [RXRPC_CONN_SERVER] = "SvSecure",
23 [RXRPC_CONN_REMOTELY_ABORTED] = "RmtAbort",
24 [RXRPC_CONN_LOCALLY_ABORTED] = "LocAbort",
25 [RXRPC_CONN_NETWORK_ERROR] = "NetError",
26};
27
28const char *rxrpc_call_states[] = {
29 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
30 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
31 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
32 [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
33 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
34 [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
35 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
36 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
37 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
38 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
39 [RXRPC_CALL_COMPLETE] = "Complete",
40 [RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
41 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
42 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
43 [RXRPC_CALL_NETWORK_ERROR] = "NetError",
44 [RXRPC_CALL_DEAD] = "Dead ",
45};
46
47/*
48 * generate a list of extant and dead calls in /proc/net/rxrpc_calls
49 */
50static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
51{
52 struct list_head *_p;
53 loff_t pos = *_pos;
54
55 read_lock(&rxrpc_call_lock);
56 if (!pos)
57 return SEQ_START_TOKEN;
58 pos--;
59
60 list_for_each(_p, &rxrpc_calls)
61 if (!pos--)
62 break;
63
64 return _p != &rxrpc_calls ? _p : NULL;
65}
66
67static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
68{
69 struct list_head *_p;
70
71 (*pos)++;
72
73 _p = v;
74 _p = (v == SEQ_START_TOKEN) ? rxrpc_calls.next : _p->next;
75
76 return _p != &rxrpc_calls ? _p : NULL;
77}
78
79static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
80{
81 read_unlock(&rxrpc_call_lock);
82}
83
84static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
85{
86 struct rxrpc_transport *trans;
87 struct rxrpc_call *call;
88 char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1];
89
90 if (v == SEQ_START_TOKEN) {
91 seq_puts(seq,
92 "Proto Local Remote "
93 " SvID ConnID CallID End Use State Abort "
94 " UserID\n");
95 return 0;
96 }
97
98 call = list_entry(v, struct rxrpc_call, link);
99 trans = call->conn->trans;
100
101 sprintf(lbuff, NIPQUAD_FMT":%u",
102 NIPQUAD(trans->local->srx.transport.sin.sin_addr),
103 ntohs(trans->local->srx.transport.sin.sin_port));
104
105 sprintf(rbuff, NIPQUAD_FMT":%u",
106 NIPQUAD(trans->peer->srx.transport.sin.sin_addr),
107 ntohs(trans->peer->srx.transport.sin.sin_port));
108
109 seq_printf(seq,
110 "UDP %-22.22s %-22.22s %4x %08x %08x %s %3u"
111 " %-8.8s %08x %lx\n",
112 lbuff,
113 rbuff,
114 ntohs(call->conn->service_id),
115 ntohl(call->conn->cid),
116 ntohl(call->call_id),
117 call->conn->in_clientflag ? "Svc" : "Clt",
118 atomic_read(&call->usage),
119 rxrpc_call_states[call->state],
120 call->abort_code,
121 call->user_call_ID);
122
123 return 0;
124}
125
126static struct seq_operations rxrpc_call_seq_ops = {
127 .start = rxrpc_call_seq_start,
128 .next = rxrpc_call_seq_next,
129 .stop = rxrpc_call_seq_stop,
130 .show = rxrpc_call_seq_show,
131};
132
133static int rxrpc_call_seq_open(struct inode *inode, struct file *file)
134{
135 return seq_open(file, &rxrpc_call_seq_ops);
136}
137
138struct file_operations rxrpc_call_seq_fops = {
139 .owner = THIS_MODULE,
140 .open = rxrpc_call_seq_open,
141 .read = seq_read,
142 .llseek = seq_lseek,
143 .release = seq_release_private,
144};
145
146/*
147 * generate a list of extant virtual connections in /proc/net/rxrpc_conns
148 */
149static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
150{
151 struct list_head *_p;
152 loff_t pos = *_pos;
153
154 read_lock(&rxrpc_connection_lock);
155 if (!pos)
156 return SEQ_START_TOKEN;
157 pos--;
158
159 list_for_each(_p, &rxrpc_connections)
160 if (!pos--)
161 break;
162
163 return _p != &rxrpc_connections ? _p : NULL;
164}
165
166static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
167 loff_t *pos)
168{
169 struct list_head *_p;
170
171 (*pos)++;
172
173 _p = v;
174 _p = (v == SEQ_START_TOKEN) ? rxrpc_connections.next : _p->next;
175
176 return _p != &rxrpc_connections ? _p : NULL;
177}
178
179static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
180{
181 read_unlock(&rxrpc_connection_lock);
182}
183
184static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
185{
186 struct rxrpc_connection *conn;
187 struct rxrpc_transport *trans;
188 char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1];
189
190 if (v == SEQ_START_TOKEN) {
191 seq_puts(seq,
192 "Proto Local Remote "
193 " SvID ConnID Calls End Use State Key "
194 " Serial ISerial\n"
195 );
196 return 0;
197 }
198
199 conn = list_entry(v, struct rxrpc_connection, link);
200 trans = conn->trans;
201
202 sprintf(lbuff, NIPQUAD_FMT":%u",
203 NIPQUAD(trans->local->srx.transport.sin.sin_addr),
204 ntohs(trans->local->srx.transport.sin.sin_port));
205
206 sprintf(rbuff, NIPQUAD_FMT":%u",
207 NIPQUAD(trans->peer->srx.transport.sin.sin_addr),
208 ntohs(trans->peer->srx.transport.sin.sin_port));
209
210 seq_printf(seq,
211 "UDP %-22.22s %-22.22s %4x %08x %08x %s %3u"
212 " %s %08x %08x %08x\n",
213 lbuff,
214 rbuff,
215 ntohs(conn->service_id),
216 ntohl(conn->cid),
217 conn->call_counter,
218 conn->in_clientflag ? "Svc" : "Clt",
219 atomic_read(&conn->usage),
220 rxrpc_conn_states[conn->state],
221 key_serial(conn->key),
222 atomic_read(&conn->serial),
223 atomic_read(&conn->hi_serial));
224
225 return 0;
226}
227
228static struct seq_operations rxrpc_connection_seq_ops = {
229 .start = rxrpc_connection_seq_start,
230 .next = rxrpc_connection_seq_next,
231 .stop = rxrpc_connection_seq_stop,
232 .show = rxrpc_connection_seq_show,
233};
234
235
236static int rxrpc_connection_seq_open(struct inode *inode, struct file *file)
237{
238 return seq_open(file, &rxrpc_connection_seq_ops);
239}
240
241struct file_operations rxrpc_connection_seq_fops = {
242 .owner = THIS_MODULE,
243 .open = rxrpc_connection_seq_open,
244 .read = seq_read,
245 .llseek = seq_lseek,
246 .release = seq_release_private,
247};
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
new file mode 100644
index 000000000000..e947d5c15900
--- /dev/null
+++ b/net/rxrpc/ar-recvmsg.c
@@ -0,0 +1,366 @@
1/* RxRPC recvmsg() implementation
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/net.h>
13#include <linux/skbuff.h>
14#include <net/sock.h>
15#include <net/af_rxrpc.h>
16#include "ar-internal.h"
17
18/*
19 * removal a call's user ID from the socket tree to make the user ID available
20 * again and so that it won't be seen again in association with that call
21 */
22static void rxrpc_remove_user_ID(struct rxrpc_sock *rx, struct rxrpc_call *call)
23{
24 _debug("RELEASE CALL %d", call->debug_id);
25
26 if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
27 write_lock_bh(&rx->call_lock);
28 rb_erase(&call->sock_node, &call->socket->calls);
29 clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
30 write_unlock_bh(&rx->call_lock);
31 }
32
33 read_lock_bh(&call->state_lock);
34 if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
35 !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
36 schedule_work(&call->processor);
37 read_unlock_bh(&call->state_lock);
38}
39
40/*
41 * receive a message from an RxRPC socket
42 * - we need to be careful about two or more threads calling recvmsg
43 * simultaneously
44 */
45int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
46 struct msghdr *msg, size_t len, int flags)
47{
48 struct rxrpc_skb_priv *sp;
49 struct rxrpc_call *call = NULL, *continue_call = NULL;
50 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
51 struct sk_buff *skb;
52 long timeo;
53 int copy, ret, ullen, offset, copied = 0;
54 u32 abort_code;
55
56 DEFINE_WAIT(wait);
57
58 _enter(",,,%zu,%d", len, flags);
59
60 if (flags & (MSG_OOB | MSG_TRUNC))
61 return -EOPNOTSUPP;
62
63 ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long);
64
65 timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
66 msg->msg_flags |= MSG_MORE;
67
68 lock_sock(&rx->sk);
69
70 for (;;) {
71 /* return immediately if a client socket has no outstanding
72 * calls */
73 if (RB_EMPTY_ROOT(&rx->calls)) {
74 if (copied)
75 goto out;
76 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
77 release_sock(&rx->sk);
78 if (continue_call)
79 rxrpc_put_call(continue_call);
80 return -ENODATA;
81 }
82 }
83
84 /* get the next message on the Rx queue */
85 skb = skb_peek(&rx->sk.sk_receive_queue);
86 if (!skb) {
87 /* nothing remains on the queue */
88 if (copied &&
89 (msg->msg_flags & MSG_PEEK || timeo == 0))
90 goto out;
91
92 /* wait for a message to turn up */
93 release_sock(&rx->sk);
94 prepare_to_wait_exclusive(rx->sk.sk_sleep, &wait,
95 TASK_INTERRUPTIBLE);
96 ret = sock_error(&rx->sk);
97 if (ret)
98 goto wait_error;
99
100 if (skb_queue_empty(&rx->sk.sk_receive_queue)) {
101 if (signal_pending(current))
102 goto wait_interrupted;
103 timeo = schedule_timeout(timeo);
104 }
105 finish_wait(rx->sk.sk_sleep, &wait);
106 lock_sock(&rx->sk);
107 continue;
108 }
109
110 peek_next_packet:
111 sp = rxrpc_skb(skb);
112 call = sp->call;
113 ASSERT(call != NULL);
114
115 _debug("next pkt %s", rxrpc_pkts[sp->hdr.type]);
116
117 /* make sure we wait for the state to be updated in this call */
118 spin_lock_bh(&call->lock);
119 spin_unlock_bh(&call->lock);
120
121 if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
122 _debug("packet from released call");
123 if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
124 BUG();
125 rxrpc_free_skb(skb);
126 continue;
127 }
128
129 /* determine whether to continue last data receive */
130 if (continue_call) {
131 _debug("maybe cont");
132 if (call != continue_call ||
133 skb->mark != RXRPC_SKB_MARK_DATA) {
134 release_sock(&rx->sk);
135 rxrpc_put_call(continue_call);
136 _leave(" = %d [noncont]", copied);
137 return copied;
138 }
139 }
140
141 rxrpc_get_call(call);
142
143 /* copy the peer address and timestamp */
144 if (!continue_call) {
145 if (msg->msg_name && msg->msg_namelen > 0)
146 memcpy(&msg->msg_name, &call->conn->trans->peer->srx,
147 sizeof(call->conn->trans->peer->srx));
148 sock_recv_timestamp(msg, &rx->sk, skb);
149 }
150
151 /* receive the message */
152 if (skb->mark != RXRPC_SKB_MARK_DATA)
153 goto receive_non_data_message;
154
155 _debug("recvmsg DATA #%u { %d, %d }",
156 ntohl(sp->hdr.seq), skb->len, sp->offset);
157
158 if (!continue_call) {
159 /* only set the control data once per recvmsg() */
160 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
161 ullen, &call->user_call_ID);
162 if (ret < 0)
163 goto copy_error;
164 ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
165 }
166
167 ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv);
168 ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1);
169 call->rx_data_recv = ntohl(sp->hdr.seq);
170
171 ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten);
172
173 offset = sp->offset;
174 copy = skb->len - offset;
175 if (copy > len - copied)
176 copy = len - copied;
177
178 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
179 ret = skb_copy_datagram_iovec(skb, offset,
180 msg->msg_iov, copy);
181 } else {
182 ret = skb_copy_and_csum_datagram_iovec(skb, offset,
183 msg->msg_iov);
184 if (ret == -EINVAL)
185 goto csum_copy_error;
186 }
187
188 if (ret < 0)
189 goto copy_error;
190
191 /* handle piecemeal consumption of data packets */
192 _debug("copied %d+%d", copy, copied);
193
194 offset += copy;
195 copied += copy;
196
197 if (!(flags & MSG_PEEK))
198 sp->offset = offset;
199
200 if (sp->offset < skb->len) {
201 _debug("buffer full");
202 ASSERTCMP(copied, ==, len);
203 break;
204 }
205
206 /* we transferred the whole data packet */
207 if (sp->hdr.flags & RXRPC_LAST_PACKET) {
208 _debug("last");
209 if (call->conn->out_clientflag) {
210 /* last byte of reply received */
211 ret = copied;
212 goto terminal_message;
213 }
214
215 /* last bit of request received */
216 if (!(flags & MSG_PEEK)) {
217 _debug("eat packet");
218 if (skb_dequeue(&rx->sk.sk_receive_queue) !=
219 skb)
220 BUG();
221 rxrpc_free_skb(skb);
222 }
223 msg->msg_flags &= ~MSG_MORE;
224 break;
225 }
226
227 /* move on to the next data message */
228 _debug("next");
229 if (!continue_call)
230 continue_call = sp->call;
231 else
232 rxrpc_put_call(call);
233 call = NULL;
234
235 if (flags & MSG_PEEK) {
236 _debug("peek next");
237 skb = skb->next;
238 if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue)
239 break;
240 goto peek_next_packet;
241 }
242
243 _debug("eat packet");
244 if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
245 BUG();
246 rxrpc_free_skb(skb);
247 }
248
249 /* end of non-terminal data packet reception for the moment */
250 _debug("end rcv data");
251out:
252 release_sock(&rx->sk);
253 if (call)
254 rxrpc_put_call(call);
255 if (continue_call)
256 rxrpc_put_call(continue_call);
257 _leave(" = %d [data]", copied);
258 return copied;
259
260 /* handle non-DATA messages such as aborts, incoming connections and
261 * final ACKs */
262receive_non_data_message:
263 _debug("non-data");
264
265 if (skb->mark == RXRPC_SKB_MARK_NEW_CALL) {
266 _debug("RECV NEW CALL");
267 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &abort_code);
268 if (ret < 0)
269 goto copy_error;
270 if (!(flags & MSG_PEEK)) {
271 if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
272 BUG();
273 rxrpc_free_skb(skb);
274 }
275 goto out;
276 }
277
278 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
279 ullen, &call->user_call_ID);
280 if (ret < 0)
281 goto copy_error;
282 ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
283
284 switch (skb->mark) {
285 case RXRPC_SKB_MARK_DATA:
286 BUG();
287 case RXRPC_SKB_MARK_FINAL_ACK:
288 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &abort_code);
289 break;
290 case RXRPC_SKB_MARK_BUSY:
291 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_BUSY, 0, &abort_code);
292 break;
293 case RXRPC_SKB_MARK_REMOTE_ABORT:
294 abort_code = call->abort_code;
295 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code);
296 break;
297 case RXRPC_SKB_MARK_NET_ERROR:
298 _debug("RECV NET ERROR %d", sp->error);
299 abort_code = sp->error;
300 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &abort_code);
301 break;
302 case RXRPC_SKB_MARK_LOCAL_ERROR:
303 _debug("RECV LOCAL ERROR %d", sp->error);
304 abort_code = sp->error;
305 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4,
306 &abort_code);
307 break;
308 default:
309 BUG();
310 break;
311 }
312
313 if (ret < 0)
314 goto copy_error;
315
316terminal_message:
317 _debug("terminal");
318 msg->msg_flags &= ~MSG_MORE;
319 msg->msg_flags |= MSG_EOR;
320
321 if (!(flags & MSG_PEEK)) {
322 _net("free terminal skb %p", skb);
323 if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
324 BUG();
325 rxrpc_free_skb(skb);
326 rxrpc_remove_user_ID(rx, call);
327 }
328
329 release_sock(&rx->sk);
330 rxrpc_put_call(call);
331 if (continue_call)
332 rxrpc_put_call(continue_call);
333 _leave(" = %d", ret);
334 return ret;
335
336copy_error:
337 _debug("copy error");
338 release_sock(&rx->sk);
339 rxrpc_put_call(call);
340 if (continue_call)
341 rxrpc_put_call(continue_call);
342 _leave(" = %d", ret);
343 return ret;
344
345csum_copy_error:
346 _debug("csum error");
347 release_sock(&rx->sk);
348 if (continue_call)
349 rxrpc_put_call(continue_call);
350 rxrpc_kill_skb(skb);
351 skb_kill_datagram(&rx->sk, skb, flags);
352 rxrpc_put_call(call);
353 return -EAGAIN;
354
355wait_interrupted:
356 ret = sock_intr_errno(timeo);
357wait_error:
358 finish_wait(rx->sk.sk_sleep, &wait);
359 if (continue_call)
360 rxrpc_put_call(continue_call);
361 if (copied)
362 copied = ret;
363 _leave(" = %d [waitfail %d]", copied, ret);
364 return copied;
365
366}
diff --git a/net/rxrpc/ar-security.c b/net/rxrpc/ar-security.c
new file mode 100644
index 000000000000..60d1d364430a
--- /dev/null
+++ b/net/rxrpc/ar-security.c
@@ -0,0 +1,258 @@
1/* RxRPC security handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/udp.h>
16#include <linux/crypto.h>
17#include <net/sock.h>
18#include <net/af_rxrpc.h>
19#include "ar-internal.h"
20
21static LIST_HEAD(rxrpc_security_methods);
22static DECLARE_RWSEM(rxrpc_security_sem);
23
24/*
25 * get an RxRPC security module
26 */
27static struct rxrpc_security *rxrpc_security_get(struct rxrpc_security *sec)
28{
29 return try_module_get(sec->owner) ? sec : NULL;
30}
31
32/*
33 * release an RxRPC security module
34 */
35static void rxrpc_security_put(struct rxrpc_security *sec)
36{
37 module_put(sec->owner);
38}
39
40/*
41 * look up an rxrpc security module
42 */
43struct rxrpc_security *rxrpc_security_lookup(u8 security_index)
44{
45 struct rxrpc_security *sec = NULL;
46
47 _enter("");
48
49 down_read(&rxrpc_security_sem);
50
51 list_for_each_entry(sec, &rxrpc_security_methods, link) {
52 if (sec->security_index == security_index) {
53 if (unlikely(!rxrpc_security_get(sec)))
54 break;
55 goto out;
56 }
57 }
58
59 sec = NULL;
60out:
61 up_read(&rxrpc_security_sem);
62 _leave(" = %p [%s]", sec, sec ? sec->name : "");
63 return sec;
64}
65
66/**
67 * rxrpc_register_security - register an RxRPC security handler
68 * @sec: security module
69 *
70 * register an RxRPC security handler for use by RxRPC
71 */
72int rxrpc_register_security(struct rxrpc_security *sec)
73{
74 struct rxrpc_security *psec;
75 int ret;
76
77 _enter("");
78 down_write(&rxrpc_security_sem);
79
80 ret = -EEXIST;
81 list_for_each_entry(psec, &rxrpc_security_methods, link) {
82 if (psec->security_index == sec->security_index)
83 goto out;
84 }
85
86 list_add(&sec->link, &rxrpc_security_methods);
87
88 printk(KERN_NOTICE "RxRPC: Registered security type %d '%s'\n",
89 sec->security_index, sec->name);
90 ret = 0;
91
92out:
93 up_write(&rxrpc_security_sem);
94 _leave(" = %d", ret);
95 return ret;
96}
97
98EXPORT_SYMBOL_GPL(rxrpc_register_security);
99
100/**
101 * rxrpc_unregister_security - unregister an RxRPC security handler
102 * @sec: security module
103 *
104 * unregister an RxRPC security handler
105 */
106void rxrpc_unregister_security(struct rxrpc_security *sec)
107{
108
109 _enter("");
110 down_write(&rxrpc_security_sem);
111 list_del_init(&sec->link);
112 up_write(&rxrpc_security_sem);
113
114 printk(KERN_NOTICE "RxRPC: Unregistered security type %d '%s'\n",
115 sec->security_index, sec->name);
116}
117
118EXPORT_SYMBOL_GPL(rxrpc_unregister_security);
119
120/*
121 * initialise the security on a client connection
122 */
123int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
124{
125 struct rxrpc_security *sec;
126 struct key *key = conn->key;
127 int ret;
128
129 _enter("{%d},{%x}", conn->debug_id, key_serial(key));
130
131 if (!key)
132 return 0;
133
134 ret = key_validate(key);
135 if (ret < 0)
136 return ret;
137
138 sec = rxrpc_security_lookup(key->type_data.x[0]);
139 if (!sec)
140 return -EKEYREJECTED;
141 conn->security = sec;
142
143 ret = conn->security->init_connection_security(conn);
144 if (ret < 0) {
145 rxrpc_security_put(conn->security);
146 conn->security = NULL;
147 return ret;
148 }
149
150 _leave(" = 0");
151 return 0;
152}
153
154/*
155 * initialise the security on a server connection
156 */
157int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
158{
159 struct rxrpc_security *sec;
160 struct rxrpc_local *local = conn->trans->local;
161 struct rxrpc_sock *rx;
162 struct key *key;
163 key_ref_t kref;
164 char kdesc[5+1+3+1];
165
166 _enter("");
167
168 sprintf(kdesc, "%u:%u", ntohs(conn->service_id), conn->security_ix);
169
170 sec = rxrpc_security_lookup(conn->security_ix);
171 if (!sec) {
172 _leave(" = -ENOKEY [lookup]");
173 return -ENOKEY;
174 }
175
176 /* find the service */
177 read_lock_bh(&local->services_lock);
178 list_for_each_entry(rx, &local->services, listen_link) {
179 if (rx->service_id == conn->service_id)
180 goto found_service;
181 }
182
183 /* the service appears to have died */
184 read_unlock_bh(&local->services_lock);
185 rxrpc_security_put(sec);
186 _leave(" = -ENOENT");
187 return -ENOENT;
188
189found_service:
190 if (!rx->securities) {
191 read_unlock_bh(&local->services_lock);
192 rxrpc_security_put(sec);
193 _leave(" = -ENOKEY");
194 return -ENOKEY;
195 }
196
197 /* look through the service's keyring */
198 kref = keyring_search(make_key_ref(rx->securities, 1UL),
199 &key_type_rxrpc_s, kdesc);
200 if (IS_ERR(kref)) {
201 read_unlock_bh(&local->services_lock);
202 rxrpc_security_put(sec);
203 _leave(" = %ld [search]", PTR_ERR(kref));
204 return PTR_ERR(kref);
205 }
206
207 key = key_ref_to_ptr(kref);
208 read_unlock_bh(&local->services_lock);
209
210 conn->server_key = key;
211 conn->security = sec;
212
213 _leave(" = 0");
214 return 0;
215}
216
217/*
218 * secure a packet prior to transmission
219 */
220int rxrpc_secure_packet(const struct rxrpc_call *call,
221 struct sk_buff *skb,
222 size_t data_size,
223 void *sechdr)
224{
225 if (call->conn->security)
226 return call->conn->security->secure_packet(
227 call, skb, data_size, sechdr);
228 return 0;
229}
230
231/*
232 * secure a packet prior to transmission
233 */
234int rxrpc_verify_packet(const struct rxrpc_call *call, struct sk_buff *skb,
235 u32 *_abort_code)
236{
237 if (call->conn->security)
238 return call->conn->security->verify_packet(
239 call, skb, _abort_code);
240 return 0;
241}
242
243/*
244 * clear connection security
245 */
246void rxrpc_clear_conn_security(struct rxrpc_connection *conn)
247{
248 _enter("{%d}", conn->debug_id);
249
250 if (conn->security) {
251 conn->security->clear(conn);
252 rxrpc_security_put(conn->security);
253 conn->security = NULL;
254 }
255
256 key_put(conn->key);
257 key_put(conn->server_key);
258}
diff --git a/net/rxrpc/ar-skbuff.c b/net/rxrpc/ar-skbuff.c
new file mode 100644
index 000000000000..d73f6fc76011
--- /dev/null
+++ b/net/rxrpc/ar-skbuff.c
@@ -0,0 +1,118 @@
1/* ar-skbuff.c: socket buffer destruction handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <net/sock.h>
16#include <net/af_rxrpc.h>
17#include "ar-internal.h"
18
19/*
20 * set up for the ACK at the end of the receive phase when we discard the final
21 * receive phase data packet
22 * - called with softirqs disabled
23 */
24static void rxrpc_request_final_ACK(struct rxrpc_call *call)
25{
26 /* the call may be aborted before we have a chance to ACK it */
27 write_lock(&call->state_lock);
28
29 switch (call->state) {
30 case RXRPC_CALL_CLIENT_RECV_REPLY:
31 call->state = RXRPC_CALL_CLIENT_FINAL_ACK;
32 _debug("request final ACK");
33
34 /* get an extra ref on the call for the final-ACK generator to
35 * release */
36 rxrpc_get_call(call);
37 set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
38 if (try_to_del_timer_sync(&call->ack_timer) >= 0)
39 schedule_work(&call->processor);
40 break;
41
42 case RXRPC_CALL_SERVER_RECV_REQUEST:
43 call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
44 default:
45 break;
46 }
47
48 write_unlock(&call->state_lock);
49}
50
51/*
52 * drop the bottom ACK off of the call ACK window and advance the window
53 */
54static void rxrpc_hard_ACK_data(struct rxrpc_call *call,
55 struct rxrpc_skb_priv *sp)
56{
57 int loop;
58 u32 seq;
59
60 spin_lock_bh(&call->lock);
61
62 _debug("hard ACK #%u", ntohl(sp->hdr.seq));
63
64 for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
65 call->ackr_window[loop] >>= 1;
66 call->ackr_window[loop] |=
67 call->ackr_window[loop + 1] << (BITS_PER_LONG - 1);
68 }
69
70 seq = ntohl(sp->hdr.seq);
71 ASSERTCMP(seq, ==, call->rx_data_eaten + 1);
72 call->rx_data_eaten = seq;
73
74 if (call->ackr_win_top < UINT_MAX)
75 call->ackr_win_top++;
76
77 ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE,
78 call->rx_data_post, >=, call->rx_data_recv);
79 ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE,
80 call->rx_data_recv, >=, call->rx_data_eaten);
81
82 if (sp->hdr.flags & RXRPC_LAST_PACKET) {
83 rxrpc_request_final_ACK(call);
84 } else if (atomic_dec_and_test(&call->ackr_not_idle) &&
85 test_and_clear_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags)) {
86 _debug("send Rx idle ACK");
87 __rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, sp->hdr.serial,
88 true);
89 }
90
91 spin_unlock_bh(&call->lock);
92}
93
94/*
95 * destroy a packet that has an RxRPC control buffer
96 * - advance the hard-ACK state of the parent call (done here in case something
97 * in the kernel bypasses recvmsg() and steals the packet directly off of the
98 * socket receive queue)
99 */
100void rxrpc_packet_destructor(struct sk_buff *skb)
101{
102 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
103 struct rxrpc_call *call = sp->call;
104
105 _enter("%p{%p}", skb, call);
106
107 if (call) {
108 /* send the final ACK on a client call */
109 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA)
110 rxrpc_hard_ACK_data(call, sp);
111 rxrpc_put_call(call);
112 sp->call = NULL;
113 }
114
115 if (skb->sk)
116 sock_rfree(skb);
117 _leave("");
118}
diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
new file mode 100644
index 000000000000..9b4e5cb545d2
--- /dev/null
+++ b/net/rxrpc/ar-transport.c
@@ -0,0 +1,276 @@
1/* RxRPC point-to-point transport session management
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <net/sock.h>
16#include <net/af_rxrpc.h>
17#include "ar-internal.h"
18
19static void rxrpc_transport_reaper(struct work_struct *work);
20
21static LIST_HEAD(rxrpc_transports);
22static DEFINE_RWLOCK(rxrpc_transport_lock);
23static unsigned long rxrpc_transport_timeout = 3600 * 24;
24static DECLARE_DELAYED_WORK(rxrpc_transport_reap, rxrpc_transport_reaper);
25
26/*
27 * allocate a new transport session manager
28 */
29static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
30 struct rxrpc_peer *peer,
31 gfp_t gfp)
32{
33 struct rxrpc_transport *trans;
34
35 _enter("");
36
37 trans = kzalloc(sizeof(struct rxrpc_transport), gfp);
38 if (trans) {
39 trans->local = local;
40 trans->peer = peer;
41 INIT_LIST_HEAD(&trans->link);
42 trans->bundles = RB_ROOT;
43 trans->client_conns = RB_ROOT;
44 trans->server_conns = RB_ROOT;
45 skb_queue_head_init(&trans->error_queue);
46 spin_lock_init(&trans->client_lock);
47 rwlock_init(&trans->conn_lock);
48 atomic_set(&trans->usage, 1);
49 trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
50
51 if (peer->srx.transport.family == AF_INET) {
52 switch (peer->srx.transport_type) {
53 case SOCK_DGRAM:
54 INIT_WORK(&trans->error_handler,
55 rxrpc_UDP_error_handler);
56 break;
57 default:
58 BUG();
59 break;
60 }
61 } else {
62 BUG();
63 }
64 }
65
66 _leave(" = %p", trans);
67 return trans;
68}
69
70/*
71 * obtain a transport session for the nominated endpoints
72 */
73struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *local,
74 struct rxrpc_peer *peer,
75 gfp_t gfp)
76{
77 struct rxrpc_transport *trans, *candidate;
78 const char *new = "old";
79 int usage;
80
81 _enter("{%u.%u.%u.%u+%hu},{%u.%u.%u.%u+%hu},",
82 NIPQUAD(local->srx.transport.sin.sin_addr),
83 ntohs(local->srx.transport.sin.sin_port),
84 NIPQUAD(peer->srx.transport.sin.sin_addr),
85 ntohs(peer->srx.transport.sin.sin_port));
86
87 /* search the transport list first */
88 read_lock_bh(&rxrpc_transport_lock);
89 list_for_each_entry(trans, &rxrpc_transports, link) {
90 if (trans->local == local && trans->peer == peer)
91 goto found_extant_transport;
92 }
93 read_unlock_bh(&rxrpc_transport_lock);
94
95 /* not yet present - create a candidate for a new record and then
96 * redo the search */
97 candidate = rxrpc_alloc_transport(local, peer, gfp);
98 if (!candidate) {
99 _leave(" = -ENOMEM");
100 return ERR_PTR(-ENOMEM);
101 }
102
103 write_lock_bh(&rxrpc_transport_lock);
104
105 list_for_each_entry(trans, &rxrpc_transports, link) {
106 if (trans->local == local && trans->peer == peer)
107 goto found_extant_second;
108 }
109
110 /* we can now add the new candidate to the list */
111 trans = candidate;
112 candidate = NULL;
113
114 rxrpc_get_local(trans->local);
115 atomic_inc(&trans->peer->usage);
116 list_add_tail(&trans->link, &rxrpc_transports);
117 write_unlock_bh(&rxrpc_transport_lock);
118 new = "new";
119
120success:
121 _net("TRANSPORT %s %d local %d -> peer %d",
122 new,
123 trans->debug_id,
124 trans->local->debug_id,
125 trans->peer->debug_id);
126
127 _leave(" = %p {u=%d}", trans, atomic_read(&trans->usage));
128 return trans;
129
130 /* we found the transport in the list immediately */
131found_extant_transport:
132 usage = atomic_inc_return(&trans->usage);
133 read_unlock_bh(&rxrpc_transport_lock);
134 goto success;
135
136 /* we found the transport on the second time through the list */
137found_extant_second:
138 usage = atomic_inc_return(&trans->usage);
139 write_unlock_bh(&rxrpc_transport_lock);
140 kfree(candidate);
141 goto success;
142}
143
144/*
145 * find the transport connecting two endpoints
146 */
147struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *local,
148 struct rxrpc_peer *peer)
149{
150 struct rxrpc_transport *trans;
151
152 _enter("{%u.%u.%u.%u+%hu},{%u.%u.%u.%u+%hu},",
153 NIPQUAD(local->srx.transport.sin.sin_addr),
154 ntohs(local->srx.transport.sin.sin_port),
155 NIPQUAD(peer->srx.transport.sin.sin_addr),
156 ntohs(peer->srx.transport.sin.sin_port));
157
158 /* search the transport list */
159 read_lock_bh(&rxrpc_transport_lock);
160
161 list_for_each_entry(trans, &rxrpc_transports, link) {
162 if (trans->local == local && trans->peer == peer)
163 goto found_extant_transport;
164 }
165
166 read_unlock_bh(&rxrpc_transport_lock);
167 _leave(" = NULL");
168 return NULL;
169
170found_extant_transport:
171 atomic_inc(&trans->usage);
172 read_unlock_bh(&rxrpc_transport_lock);
173 _leave(" = %p", trans);
174 return trans;
175}
176
177/*
178 * release a transport session
179 */
180void rxrpc_put_transport(struct rxrpc_transport *trans)
181{
182 _enter("%p{u=%d}", trans, atomic_read(&trans->usage));
183
184 ASSERTCMP(atomic_read(&trans->usage), >, 0);
185
186 trans->put_time = xtime.tv_sec;
187 if (unlikely(atomic_dec_and_test(&trans->usage)))
188 _debug("zombie");
189 /* let the reaper determine the timeout to avoid a race with
190 * overextending the timeout if the reaper is running at the
191 * same time */
192 schedule_delayed_work(&rxrpc_transport_reap, 0);
193 _leave("");
194}
195
196/*
197 * clean up a transport session
198 */
199static void rxrpc_cleanup_transport(struct rxrpc_transport *trans)
200{
201 _net("DESTROY TRANS %d", trans->debug_id);
202
203 rxrpc_purge_queue(&trans->error_queue);
204
205 rxrpc_put_local(trans->local);
206 rxrpc_put_peer(trans->peer);
207 kfree(trans);
208}
209
210/*
211 * reap dead transports that have passed their expiry date
212 */
213static void rxrpc_transport_reaper(struct work_struct *work)
214{
215 struct rxrpc_transport *trans, *_p;
216 unsigned long now, earliest, reap_time;
217
218 LIST_HEAD(graveyard);
219
220 _enter("");
221
222 now = xtime.tv_sec;
223 earliest = ULONG_MAX;
224
225 /* extract all the transports that have been dead too long */
226 write_lock_bh(&rxrpc_transport_lock);
227 list_for_each_entry_safe(trans, _p, &rxrpc_transports, link) {
228 _debug("reap TRANS %d { u=%d t=%ld }",
229 trans->debug_id, atomic_read(&trans->usage),
230 (long) now - (long) trans->put_time);
231
232 if (likely(atomic_read(&trans->usage) > 0))
233 continue;
234
235 reap_time = trans->put_time + rxrpc_transport_timeout;
236 if (reap_time <= now)
237 list_move_tail(&trans->link, &graveyard);
238 else if (reap_time < earliest)
239 earliest = reap_time;
240 }
241 write_unlock_bh(&rxrpc_transport_lock);
242
243 if (earliest != ULONG_MAX) {
244 _debug("reschedule reaper %ld", (long) earliest - now);
245 ASSERTCMP(earliest, >, now);
246 schedule_delayed_work(&rxrpc_transport_reap,
247 (earliest - now) * HZ);
248 }
249
250 /* then destroy all those pulled out */
251 while (!list_empty(&graveyard)) {
252 trans = list_entry(graveyard.next, struct rxrpc_transport,
253 link);
254 list_del_init(&trans->link);
255
256 ASSERTCMP(atomic_read(&trans->usage), ==, 0);
257 rxrpc_cleanup_transport(trans);
258 }
259
260 _leave("");
261}
262
263/*
264 * preemptively destroy all the transport session records rather than waiting
265 * for them to time out
266 */
267void __exit rxrpc_destroy_all_transports(void)
268{
269 _enter("");
270
271 rxrpc_transport_timeout = 0;
272 cancel_delayed_work(&rxrpc_transport_reap);
273 schedule_delayed_work(&rxrpc_transport_reap, 0);
274
275 _leave("");
276}
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
new file mode 100644
index 000000000000..1eaf529efac1
--- /dev/null
+++ b/net/rxrpc/rxkad.c
@@ -0,0 +1,1153 @@
1/* Kerberos-based RxRPC security
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/udp.h>
16#include <linux/crypto.h>
17#include <linux/scatterlist.h>
18#include <linux/ctype.h>
19#include <net/sock.h>
20#include <net/af_rxrpc.h>
21#include "ar-internal.h"
22
23#define RXKAD_VERSION 2
24#define MAXKRB5TICKETLEN 1024
25#define RXKAD_TKT_TYPE_KERBEROS_V5 256
26#define ANAME_SZ 40 /* size of authentication name */
27#define INST_SZ 40 /* size of principal's instance */
28#define REALM_SZ 40 /* size of principal's auth domain */
29#define SNAME_SZ 40 /* size of service name */
30
31unsigned rxrpc_debug;
32module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
33MODULE_PARM_DESC(rxrpc_debug, "rxkad debugging mask");
34
35struct rxkad_level1_hdr {
36 __be32 data_size; /* true data size (excluding padding) */
37};
38
39struct rxkad_level2_hdr {
40 __be32 data_size; /* true data size (excluding padding) */
41 __be32 checksum; /* decrypted data checksum */
42};
43
44MODULE_DESCRIPTION("RxRPC network protocol type-2 security (Kerberos)");
45MODULE_AUTHOR("Red Hat, Inc.");
46MODULE_LICENSE("GPL");
47
48/*
49 * this holds a pinned cipher so that keventd doesn't get called by the cipher
50 * alloc routine, but since we have it to hand, we use it to decrypt RESPONSE
51 * packets
52 */
53static struct crypto_blkcipher *rxkad_ci;
54static DEFINE_MUTEX(rxkad_ci_mutex);
55
56/*
57 * initialise connection security
58 */
59static int rxkad_init_connection_security(struct rxrpc_connection *conn)
60{
61 struct rxrpc_key_payload *payload;
62 struct crypto_blkcipher *ci;
63 int ret;
64
65 _enter("{%d},{%x}", conn->debug_id, key_serial(conn->key));
66
67 payload = conn->key->payload.data;
68 conn->security_ix = payload->k.security_index;
69
70 ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
71 if (IS_ERR(ci)) {
72 _debug("no cipher");
73 ret = PTR_ERR(ci);
74 goto error;
75 }
76
77 if (crypto_blkcipher_setkey(ci, payload->k.session_key,
78 sizeof(payload->k.session_key)) < 0)
79 BUG();
80
81 switch (conn->security_level) {
82 case RXRPC_SECURITY_PLAIN:
83 break;
84 case RXRPC_SECURITY_AUTH:
85 conn->size_align = 8;
86 conn->security_size = sizeof(struct rxkad_level1_hdr);
87 conn->header_size += sizeof(struct rxkad_level1_hdr);
88 break;
89 case RXRPC_SECURITY_ENCRYPT:
90 conn->size_align = 8;
91 conn->security_size = sizeof(struct rxkad_level2_hdr);
92 conn->header_size += sizeof(struct rxkad_level2_hdr);
93 break;
94 default:
95 ret = -EKEYREJECTED;
96 goto error;
97 }
98
99 conn->cipher = ci;
100 ret = 0;
101error:
102 _leave(" = %d", ret);
103 return ret;
104}
105
106/*
107 * prime the encryption state with the invariant parts of a connection's
108 * description
109 */
110static void rxkad_prime_packet_security(struct rxrpc_connection *conn)
111{
112 struct rxrpc_key_payload *payload;
113 struct blkcipher_desc desc;
114 struct scatterlist sg[2];
115 struct rxrpc_crypt iv;
116 struct {
117 __be32 x[4];
118 } tmpbuf __attribute__((aligned(16))); /* must all be in same page */
119
120 _enter("");
121
122 if (!conn->key)
123 return;
124
125 payload = conn->key->payload.data;
126 memcpy(&iv, payload->k.session_key, sizeof(iv));
127
128 desc.tfm = conn->cipher;
129 desc.info = iv.x;
130 desc.flags = 0;
131
132 tmpbuf.x[0] = conn->epoch;
133 tmpbuf.x[1] = conn->cid;
134 tmpbuf.x[2] = 0;
135 tmpbuf.x[3] = htonl(conn->security_ix);
136
137 memset(sg, 0, sizeof(sg));
138 sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf));
139 sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf));
140 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
141
142 memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv));
143 ASSERTCMP(conn->csum_iv.n[0], ==, tmpbuf.x[2]);
144
145 _leave("");
146}
147
148/*
149 * partially encrypt a packet (level 1 security)
150 */
151static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
152 struct sk_buff *skb,
153 u32 data_size,
154 void *sechdr)
155{
156 struct rxrpc_skb_priv *sp;
157 struct blkcipher_desc desc;
158 struct rxrpc_crypt iv;
159 struct scatterlist sg[2];
160 struct {
161 struct rxkad_level1_hdr hdr;
162 __be32 first; /* first four bytes of data and padding */
163 } tmpbuf __attribute__((aligned(8))); /* must all be in same page */
164 u16 check;
165
166 sp = rxrpc_skb(skb);
167
168 _enter("");
169
170 check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
171 data_size |= (u32) check << 16;
172
173 tmpbuf.hdr.data_size = htonl(data_size);
174 memcpy(&tmpbuf.first, sechdr + 4, sizeof(tmpbuf.first));
175
176 /* start the encryption afresh */
177 memset(&iv, 0, sizeof(iv));
178 desc.tfm = call->conn->cipher;
179 desc.info = iv.x;
180 desc.flags = 0;
181
182 memset(sg, 0, sizeof(sg));
183 sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf));
184 sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf));
185 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
186
187 memcpy(sechdr, &tmpbuf, sizeof(tmpbuf));
188
189 _leave(" = 0");
190 return 0;
191}
192
193/*
194 * wholly encrypt a packet (level 2 security)
195 */
196static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
197 struct sk_buff *skb,
198 u32 data_size,
199 void *sechdr)
200{
201 const struct rxrpc_key_payload *payload;
202 struct rxkad_level2_hdr rxkhdr
203 __attribute__((aligned(8))); /* must be all on one page */
204 struct rxrpc_skb_priv *sp;
205 struct blkcipher_desc desc;
206 struct rxrpc_crypt iv;
207 struct scatterlist sg[16];
208 struct sk_buff *trailer;
209 unsigned len;
210 u16 check;
211 int nsg;
212
213 sp = rxrpc_skb(skb);
214
215 _enter("");
216
217 check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
218
219 rxkhdr.data_size = htonl(data_size | (u32) check << 16);
220 rxkhdr.checksum = 0;
221
222 /* encrypt from the session key */
223 payload = call->conn->key->payload.data;
224 memcpy(&iv, payload->k.session_key, sizeof(iv));
225 desc.tfm = call->conn->cipher;
226 desc.info = iv.x;
227 desc.flags = 0;
228
229 memset(sg, 0, sizeof(sg[0]) * 2);
230 sg_set_buf(&sg[0], sechdr, sizeof(rxkhdr));
231 sg_set_buf(&sg[1], &rxkhdr, sizeof(rxkhdr));
232 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr));
233
234 /* we want to encrypt the skbuff in-place */
235 nsg = skb_cow_data(skb, 0, &trailer);
236 if (nsg < 0 || nsg > 16)
237 return -ENOMEM;
238
239 len = data_size + call->conn->size_align - 1;
240 len &= ~(call->conn->size_align - 1);
241
242 skb_to_sgvec(skb, sg, 0, len);
243 crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
244
245 _leave(" = 0");
246 return 0;
247}
248
249/*
250 * checksum an RxRPC packet header
251 */
252static int rxkad_secure_packet(const struct rxrpc_call *call,
253 struct sk_buff *skb,
254 size_t data_size,
255 void *sechdr)
256{
257 struct rxrpc_skb_priv *sp;
258 struct blkcipher_desc desc;
259 struct rxrpc_crypt iv;
260 struct scatterlist sg[2];
261 struct {
262 __be32 x[2];
263 } tmpbuf __attribute__((aligned(8))); /* must all be in same page */
264 __be32 x;
265 int ret;
266
267 sp = rxrpc_skb(skb);
268
269 _enter("{%d{%x}},{#%u},%zu,",
270 call->debug_id, key_serial(call->conn->key), ntohl(sp->hdr.seq),
271 data_size);
272
273 if (!call->conn->cipher)
274 return 0;
275
276 ret = key_validate(call->conn->key);
277 if (ret < 0)
278 return ret;
279
280 /* continue encrypting from where we left off */
281 memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
282 desc.tfm = call->conn->cipher;
283 desc.info = iv.x;
284 desc.flags = 0;
285
286 /* calculate the security checksum */
287 x = htonl(call->channel << (32 - RXRPC_CIDSHIFT));
288 x |= sp->hdr.seq & __constant_cpu_to_be32(0x3fffffff);
289 tmpbuf.x[0] = sp->hdr.callNumber;
290 tmpbuf.x[1] = x;
291
292 memset(&sg, 0, sizeof(sg));
293 sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf));
294 sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf));
295 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
296
297 x = ntohl(tmpbuf.x[1]);
298 x = (x >> 16) & 0xffff;
299 if (x == 0)
300 x = 1; /* zero checksums are not permitted */
301 sp->hdr.cksum = htons(x);
302
303 switch (call->conn->security_level) {
304 case RXRPC_SECURITY_PLAIN:
305 ret = 0;
306 break;
307 case RXRPC_SECURITY_AUTH:
308 ret = rxkad_secure_packet_auth(call, skb, data_size, sechdr);
309 break;
310 case RXRPC_SECURITY_ENCRYPT:
311 ret = rxkad_secure_packet_encrypt(call, skb, data_size,
312 sechdr);
313 break;
314 default:
315 ret = -EPERM;
316 break;
317 }
318
319 _leave(" = %d [set %hx]", ret, x);
320 return ret;
321}
322
323/*
324 * decrypt partial encryption on a packet (level 1 security)
325 */
326static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
327 struct sk_buff *skb,
328 u32 *_abort_code)
329{
330 struct rxkad_level1_hdr sechdr;
331 struct rxrpc_skb_priv *sp;
332 struct blkcipher_desc desc;
333 struct rxrpc_crypt iv;
334 struct scatterlist sg[2];
335 struct sk_buff *trailer;
336 u32 data_size, buf;
337 u16 check;
338
339 _enter("");
340
341 sp = rxrpc_skb(skb);
342
343 /* we want to decrypt the skbuff in-place */
344 if (skb_cow_data(skb, 0, &trailer) < 0)
345 goto nomem;
346
347 skb_to_sgvec(skb, sg, 0, 8);
348
349 /* start the decryption afresh */
350 memset(&iv, 0, sizeof(iv));
351 desc.tfm = call->conn->cipher;
352 desc.info = iv.x;
353 desc.flags = 0;
354
355 crypto_blkcipher_decrypt_iv(&desc, sg, sg, 8);
356
357 /* remove the decrypted packet length */
358 if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0)
359 goto datalen_error;
360 if (!skb_pull(skb, sizeof(sechdr)))
361 BUG();
362
363 buf = ntohl(sechdr.data_size);
364 data_size = buf & 0xffff;
365
366 check = buf >> 16;
367 check ^= ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
368 check &= 0xffff;
369 if (check != 0) {
370 *_abort_code = RXKADSEALEDINCON;
371 goto protocol_error;
372 }
373
374 /* shorten the packet to remove the padding */
375 if (data_size > skb->len)
376 goto datalen_error;
377 else if (data_size < skb->len)
378 skb->len = data_size;
379
380 _leave(" = 0 [dlen=%x]", data_size);
381 return 0;
382
383datalen_error:
384 *_abort_code = RXKADDATALEN;
385protocol_error:
386 _leave(" = -EPROTO");
387 return -EPROTO;
388
389nomem:
390 _leave(" = -ENOMEM");
391 return -ENOMEM;
392}
393
394/*
395 * wholly decrypt a packet (level 2 security)
396 */
397static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
398 struct sk_buff *skb,
399 u32 *_abort_code)
400{
401 const struct rxrpc_key_payload *payload;
402 struct rxkad_level2_hdr sechdr;
403 struct rxrpc_skb_priv *sp;
404 struct blkcipher_desc desc;
405 struct rxrpc_crypt iv;
406 struct scatterlist _sg[4], *sg;
407 struct sk_buff *trailer;
408 u32 data_size, buf;
409 u16 check;
410 int nsg;
411
412 _enter(",{%d}", skb->len);
413
414 sp = rxrpc_skb(skb);
415
416 /* we want to decrypt the skbuff in-place */
417 nsg = skb_cow_data(skb, 0, &trailer);
418 if (nsg < 0)
419 goto nomem;
420
421 sg = _sg;
422 if (unlikely(nsg > 4)) {
423 sg = kmalloc(sizeof(*sg) * nsg, GFP_NOIO);
424 if (!sg)
425 goto nomem;
426 }
427
428 skb_to_sgvec(skb, sg, 0, skb->len);
429
430 /* decrypt from the session key */
431 payload = call->conn->key->payload.data;
432 memcpy(&iv, payload->k.session_key, sizeof(iv));
433 desc.tfm = call->conn->cipher;
434 desc.info = iv.x;
435 desc.flags = 0;
436
437 crypto_blkcipher_decrypt_iv(&desc, sg, sg, skb->len);
438 if (sg != _sg)
439 kfree(sg);
440
441 /* remove the decrypted packet length */
442 if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0)
443 goto datalen_error;
444 if (!skb_pull(skb, sizeof(sechdr)))
445 BUG();
446
447 buf = ntohl(sechdr.data_size);
448 data_size = buf & 0xffff;
449
450 check = buf >> 16;
451 check ^= ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
452 check &= 0xffff;
453 if (check != 0) {
454 *_abort_code = RXKADSEALEDINCON;
455 goto protocol_error;
456 }
457
458 /* shorten the packet to remove the padding */
459 if (data_size > skb->len)
460 goto datalen_error;
461 else if (data_size < skb->len)
462 skb->len = data_size;
463
464 _leave(" = 0 [dlen=%x]", data_size);
465 return 0;
466
467datalen_error:
468 *_abort_code = RXKADDATALEN;
469protocol_error:
470 _leave(" = -EPROTO");
471 return -EPROTO;
472
473nomem:
474 _leave(" = -ENOMEM");
475 return -ENOMEM;
476}
477
478/*
479 * verify the security on a received packet
480 */
481static int rxkad_verify_packet(const struct rxrpc_call *call,
482 struct sk_buff *skb,
483 u32 *_abort_code)
484{
485 struct blkcipher_desc desc;
486 struct rxrpc_skb_priv *sp;
487 struct rxrpc_crypt iv;
488 struct scatterlist sg[2];
489 struct {
490 __be32 x[2];
491 } tmpbuf __attribute__((aligned(8))); /* must all be in same page */
492 __be32 x;
493 __be16 cksum;
494 int ret;
495
496 sp = rxrpc_skb(skb);
497
498 _enter("{%d{%x}},{#%u}",
499 call->debug_id, key_serial(call->conn->key),
500 ntohl(sp->hdr.seq));
501
502 if (!call->conn->cipher)
503 return 0;
504
505 if (sp->hdr.securityIndex != 2) {
506 *_abort_code = RXKADINCONSISTENCY;
507 _leave(" = -EPROTO [not rxkad]");
508 return -EPROTO;
509 }
510
511 /* continue encrypting from where we left off */
512 memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
513 desc.tfm = call->conn->cipher;
514 desc.info = iv.x;
515 desc.flags = 0;
516
517 /* validate the security checksum */
518 x = htonl(call->channel << (32 - RXRPC_CIDSHIFT));
519 x |= sp->hdr.seq & __constant_cpu_to_be32(0x3fffffff);
520 tmpbuf.x[0] = call->call_id;
521 tmpbuf.x[1] = x;
522
523 memset(&sg, 0, sizeof(sg));
524 sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf));
525 sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf));
526 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
527
528 x = ntohl(tmpbuf.x[1]);
529 x = (x >> 16) & 0xffff;
530 if (x == 0)
531 x = 1; /* zero checksums are not permitted */
532
533 cksum = htons(x);
534 if (sp->hdr.cksum != cksum) {
535 *_abort_code = RXKADSEALEDINCON;
536 _leave(" = -EPROTO [csum failed]");
537 return -EPROTO;
538 }
539
540 switch (call->conn->security_level) {
541 case RXRPC_SECURITY_PLAIN:
542 ret = 0;
543 break;
544 case RXRPC_SECURITY_AUTH:
545 ret = rxkad_verify_packet_auth(call, skb, _abort_code);
546 break;
547 case RXRPC_SECURITY_ENCRYPT:
548 ret = rxkad_verify_packet_encrypt(call, skb, _abort_code);
549 break;
550 default:
551 ret = -ENOANO;
552 break;
553 }
554
555 _leave(" = %d", ret);
556 return ret;
557}
558
559/*
560 * issue a challenge
561 */
562static int rxkad_issue_challenge(struct rxrpc_connection *conn)
563{
564 struct rxkad_challenge challenge;
565 struct rxrpc_header hdr;
566 struct msghdr msg;
567 struct kvec iov[2];
568 size_t len;
569 int ret;
570
571 _enter("{%d,%x}", conn->debug_id, key_serial(conn->key));
572
573 ret = key_validate(conn->key);
574 if (ret < 0)
575 return ret;
576
577 get_random_bytes(&conn->security_nonce, sizeof(conn->security_nonce));
578
579 challenge.version = htonl(2);
580 challenge.nonce = htonl(conn->security_nonce);
581 challenge.min_level = htonl(0);
582 challenge.__padding = 0;
583
584 msg.msg_name = &conn->trans->peer->srx.transport.sin;
585 msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin);
586 msg.msg_control = NULL;
587 msg.msg_controllen = 0;
588 msg.msg_flags = 0;
589
590 hdr.epoch = conn->epoch;
591 hdr.cid = conn->cid;
592 hdr.callNumber = 0;
593 hdr.seq = 0;
594 hdr.type = RXRPC_PACKET_TYPE_CHALLENGE;
595 hdr.flags = conn->out_clientflag;
596 hdr.userStatus = 0;
597 hdr.securityIndex = conn->security_ix;
598 hdr._rsvd = 0;
599 hdr.serviceId = conn->service_id;
600
601 iov[0].iov_base = &hdr;
602 iov[0].iov_len = sizeof(hdr);
603 iov[1].iov_base = &challenge;
604 iov[1].iov_len = sizeof(challenge);
605
606 len = iov[0].iov_len + iov[1].iov_len;
607
608 hdr.serial = htonl(atomic_inc_return(&conn->serial));
609 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
610
611 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
612 if (ret < 0) {
613 _debug("sendmsg failed: %d", ret);
614 return -EAGAIN;
615 }
616
617 _leave(" = 0");
618 return 0;
619}
620
621/*
622 * send a Kerberos security response
623 */
624static int rxkad_send_response(struct rxrpc_connection *conn,
625 struct rxrpc_header *hdr,
626 struct rxkad_response *resp,
627 const struct rxkad_key *s2)
628{
629 struct msghdr msg;
630 struct kvec iov[3];
631 size_t len;
632 int ret;
633
634 _enter("");
635
636 msg.msg_name = &conn->trans->peer->srx.transport.sin;
637 msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin);
638 msg.msg_control = NULL;
639 msg.msg_controllen = 0;
640 msg.msg_flags = 0;
641
642 hdr->epoch = conn->epoch;
643 hdr->seq = 0;
644 hdr->type = RXRPC_PACKET_TYPE_RESPONSE;
645 hdr->flags = conn->out_clientflag;
646 hdr->userStatus = 0;
647 hdr->_rsvd = 0;
648
649 iov[0].iov_base = hdr;
650 iov[0].iov_len = sizeof(*hdr);
651 iov[1].iov_base = resp;
652 iov[1].iov_len = sizeof(*resp);
653 iov[2].iov_base = (void *) s2->ticket;
654 iov[2].iov_len = s2->ticket_len;
655
656 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
657
658 hdr->serial = htonl(atomic_inc_return(&conn->serial));
659 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
660
661 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
662 if (ret < 0) {
663 _debug("sendmsg failed: %d", ret);
664 return -EAGAIN;
665 }
666
667 _leave(" = 0");
668 return 0;
669}
670
671/*
672 * calculate the response checksum
673 */
674static void rxkad_calc_response_checksum(struct rxkad_response *response)
675{
676 u32 csum = 1000003;
677 int loop;
678 u8 *p = (u8 *) response;
679
680 for (loop = sizeof(*response); loop > 0; loop--)
681 csum = csum * 0x10204081 + *p++;
682
683 response->encrypted.checksum = htonl(csum);
684}
685
686/*
687 * load a scatterlist with a potentially split-page buffer
688 */
689static void rxkad_sg_set_buf2(struct scatterlist sg[2],
690 void *buf, size_t buflen)
691{
692
693 memset(sg, 0, sizeof(sg));
694
695 sg_set_buf(&sg[0], buf, buflen);
696 if (sg[0].offset + buflen > PAGE_SIZE) {
697 /* the buffer was split over two pages */
698 sg[0].length = PAGE_SIZE - sg[0].offset;
699 sg_set_buf(&sg[1], buf + sg[0].length, buflen - sg[0].length);
700 }
701
702 ASSERTCMP(sg[0].length + sg[1].length, ==, buflen);
703}
704
705/*
706 * encrypt the response packet
707 */
708static void rxkad_encrypt_response(struct rxrpc_connection *conn,
709 struct rxkad_response *resp,
710 const struct rxkad_key *s2)
711{
712 struct blkcipher_desc desc;
713 struct rxrpc_crypt iv;
714 struct scatterlist ssg[2], dsg[2];
715
716 /* continue encrypting from where we left off */
717 memcpy(&iv, s2->session_key, sizeof(iv));
718 desc.tfm = conn->cipher;
719 desc.info = iv.x;
720 desc.flags = 0;
721
722 rxkad_sg_set_buf2(ssg, &resp->encrypted, sizeof(resp->encrypted));
723 memcpy(dsg, ssg, sizeof(dsg));
724 crypto_blkcipher_encrypt_iv(&desc, dsg, ssg, sizeof(resp->encrypted));
725}
726
727/*
728 * respond to a challenge packet
729 */
730static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
731 struct sk_buff *skb,
732 u32 *_abort_code)
733{
734 const struct rxrpc_key_payload *payload;
735 struct rxkad_challenge challenge;
736 struct rxkad_response resp
737 __attribute__((aligned(8))); /* must be aligned for crypto */
738 struct rxrpc_skb_priv *sp;
739 u32 version, nonce, min_level, abort_code;
740 int ret;
741
742 _enter("{%d,%x}", conn->debug_id, key_serial(conn->key));
743
744 if (!conn->key) {
745 _leave(" = -EPROTO [no key]");
746 return -EPROTO;
747 }
748
749 ret = key_validate(conn->key);
750 if (ret < 0) {
751 *_abort_code = RXKADEXPIRED;
752 return ret;
753 }
754
755 abort_code = RXKADPACKETSHORT;
756 sp = rxrpc_skb(skb);
757 if (skb_copy_bits(skb, 0, &challenge, sizeof(challenge)) < 0)
758 goto protocol_error;
759
760 version = ntohl(challenge.version);
761 nonce = ntohl(challenge.nonce);
762 min_level = ntohl(challenge.min_level);
763
764 _proto("Rx CHALLENGE %%%u { v=%u n=%u ml=%u }",
765 ntohl(sp->hdr.serial), version, nonce, min_level);
766
767 abort_code = RXKADINCONSISTENCY;
768 if (version != RXKAD_VERSION)
769 goto protocol_error;
770
771 abort_code = RXKADLEVELFAIL;
772 if (conn->security_level < min_level)
773 goto protocol_error;
774
775 payload = conn->key->payload.data;
776
777 /* build the response packet */
778 memset(&resp, 0, sizeof(resp));
779
780 resp.version = RXKAD_VERSION;
781 resp.encrypted.epoch = conn->epoch;
782 resp.encrypted.cid = conn->cid;
783 resp.encrypted.securityIndex = htonl(conn->security_ix);
784 resp.encrypted.call_id[0] =
785 (conn->channels[0] ? conn->channels[0]->call_id : 0);
786 resp.encrypted.call_id[1] =
787 (conn->channels[1] ? conn->channels[1]->call_id : 0);
788 resp.encrypted.call_id[2] =
789 (conn->channels[2] ? conn->channels[2]->call_id : 0);
790 resp.encrypted.call_id[3] =
791 (conn->channels[3] ? conn->channels[3]->call_id : 0);
792 resp.encrypted.inc_nonce = htonl(nonce + 1);
793 resp.encrypted.level = htonl(conn->security_level);
794 resp.kvno = htonl(payload->k.kvno);
795 resp.ticket_len = htonl(payload->k.ticket_len);
796
797 /* calculate the response checksum and then do the encryption */
798 rxkad_calc_response_checksum(&resp);
799 rxkad_encrypt_response(conn, &resp, &payload->k);
800 return rxkad_send_response(conn, &sp->hdr, &resp, &payload->k);
801
802protocol_error:
803 *_abort_code = abort_code;
804 _leave(" = -EPROTO [%d]", abort_code);
805 return -EPROTO;
806}
807
808/*
809 * decrypt the kerberos IV ticket in the response
810 */
811static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
812 void *ticket, size_t ticket_len,
813 struct rxrpc_crypt *_session_key,
814 time_t *_expiry,
815 u32 *_abort_code)
816{
817 struct blkcipher_desc desc;
818 struct rxrpc_crypt iv, key;
819 struct scatterlist ssg[1], dsg[1];
820 struct in_addr addr;
821 unsigned life;
822 time_t issue, now;
823 bool little_endian;
824 int ret;
825 u8 *p, *q, *name, *end;
826
827 _enter("{%d},{%x}", conn->debug_id, key_serial(conn->server_key));
828
829 *_expiry = 0;
830
831 ret = key_validate(conn->server_key);
832 if (ret < 0) {
833 switch (ret) {
834 case -EKEYEXPIRED:
835 *_abort_code = RXKADEXPIRED;
836 goto error;
837 default:
838 *_abort_code = RXKADNOAUTH;
839 goto error;
840 }
841 }
842
843 ASSERT(conn->server_key->payload.data != NULL);
844 ASSERTCMP((unsigned long) ticket & 7UL, ==, 0);
845
846 memcpy(&iv, &conn->server_key->type_data, sizeof(iv));
847
848 desc.tfm = conn->server_key->payload.data;
849 desc.info = iv.x;
850 desc.flags = 0;
851
852 sg_init_one(&ssg[0], ticket, ticket_len);
853 memcpy(dsg, ssg, sizeof(dsg));
854 crypto_blkcipher_decrypt_iv(&desc, dsg, ssg, ticket_len);
855
856 p = ticket;
857 end = p + ticket_len;
858
859#define Z(size) \
860 ({ \
861 u8 *__str = p; \
862 q = memchr(p, 0, end - p); \
863 if (!q || q - p > (size)) \
864 goto bad_ticket; \
865 for (; p < q; p++) \
866 if (!isprint(*p)) \
867 goto bad_ticket; \
868 p++; \
869 __str; \
870 })
871
872 /* extract the ticket flags */
873 _debug("KIV FLAGS: %x", *p);
874 little_endian = *p & 1;
875 p++;
876
877 /* extract the authentication name */
878 name = Z(ANAME_SZ);
879 _debug("KIV ANAME: %s", name);
880
881 /* extract the principal's instance */
882 name = Z(INST_SZ);
883 _debug("KIV INST : %s", name);
884
885 /* extract the principal's authentication domain */
886 name = Z(REALM_SZ);
887 _debug("KIV REALM: %s", name);
888
889 if (end - p < 4 + 8 + 4 + 2)
890 goto bad_ticket;
891
892 /* get the IPv4 address of the entity that requested the ticket */
893 memcpy(&addr, p, sizeof(addr));
894 p += 4;
895 _debug("KIV ADDR : "NIPQUAD_FMT, NIPQUAD(addr));
896
897 /* get the session key from the ticket */
898 memcpy(&key, p, sizeof(key));
899 p += 8;
900 _debug("KIV KEY : %08x %08x", ntohl(key.n[0]), ntohl(key.n[1]));
901 memcpy(_session_key, &key, sizeof(key));
902
903 /* get the ticket's lifetime */
904 life = *p++ * 5 * 60;
905 _debug("KIV LIFE : %u", life);
906
907 /* get the issue time of the ticket */
908 if (little_endian) {
909 __le32 stamp;
910 memcpy(&stamp, p, 4);
911 issue = le32_to_cpu(stamp);
912 } else {
913 __be32 stamp;
914 memcpy(&stamp, p, 4);
915 issue = be32_to_cpu(stamp);
916 }
917 p += 4;
918 now = xtime.tv_sec;
919 _debug("KIV ISSUE: %lx [%lx]", issue, now);
920
921 /* check the ticket is in date */
922 if (issue > now) {
923 *_abort_code = RXKADNOAUTH;
924 ret = -EKEYREJECTED;
925 goto error;
926 }
927
928 if (issue < now - life) {
929 *_abort_code = RXKADEXPIRED;
930 ret = -EKEYEXPIRED;
931 goto error;
932 }
933
934 *_expiry = issue + life;
935
936 /* get the service name */
937 name = Z(SNAME_SZ);
938 _debug("KIV SNAME: %s", name);
939
940 /* get the service instance name */
941 name = Z(INST_SZ);
942 _debug("KIV SINST: %s", name);
943
944 ret = 0;
945error:
946 _leave(" = %d", ret);
947 return ret;
948
949bad_ticket:
950 *_abort_code = RXKADBADTICKET;
951 ret = -EBADMSG;
952 goto error;
953}
954
955/*
956 * decrypt the response packet
957 */
958static void rxkad_decrypt_response(struct rxrpc_connection *conn,
959 struct rxkad_response *resp,
960 const struct rxrpc_crypt *session_key)
961{
962 struct blkcipher_desc desc;
963 struct scatterlist ssg[2], dsg[2];
964 struct rxrpc_crypt iv;
965
966 _enter(",,%08x%08x",
967 ntohl(session_key->n[0]), ntohl(session_key->n[1]));
968
969 ASSERT(rxkad_ci != NULL);
970
971 mutex_lock(&rxkad_ci_mutex);
972 if (crypto_blkcipher_setkey(rxkad_ci, session_key->x,
973 sizeof(*session_key)) < 0)
974 BUG();
975
976 memcpy(&iv, session_key, sizeof(iv));
977 desc.tfm = rxkad_ci;
978 desc.info = iv.x;
979 desc.flags = 0;
980
981 rxkad_sg_set_buf2(ssg, &resp->encrypted, sizeof(resp->encrypted));
982 memcpy(dsg, ssg, sizeof(dsg));
983 crypto_blkcipher_decrypt_iv(&desc, dsg, ssg, sizeof(resp->encrypted));
984 mutex_unlock(&rxkad_ci_mutex);
985
986 _leave("");
987}
988
989/*
990 * verify a response
991 */
992static int rxkad_verify_response(struct rxrpc_connection *conn,
993 struct sk_buff *skb,
994 u32 *_abort_code)
995{
996 struct rxkad_response response
997 __attribute__((aligned(8))); /* must be aligned for crypto */
998 struct rxrpc_skb_priv *sp;
999 struct rxrpc_crypt session_key;
1000 time_t expiry;
1001 void *ticket;
1002 u32 abort_code, version, kvno, ticket_len, csum, level;
1003 int ret;
1004
1005 _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
1006
1007 abort_code = RXKADPACKETSHORT;
1008 if (skb_copy_bits(skb, 0, &response, sizeof(response)) < 0)
1009 goto protocol_error;
1010 if (!pskb_pull(skb, sizeof(response)))
1011 BUG();
1012
1013 version = ntohl(response.version);
1014 ticket_len = ntohl(response.ticket_len);
1015 kvno = ntohl(response.kvno);
1016 sp = rxrpc_skb(skb);
1017 _proto("Rx RESPONSE %%%u { v=%u kv=%u tl=%u }",
1018 ntohl(sp->hdr.serial), version, kvno, ticket_len);
1019
1020 abort_code = RXKADINCONSISTENCY;
1021 if (version != RXKAD_VERSION)
1022
1023 abort_code = RXKADTICKETLEN;
1024 if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN)
1025 goto protocol_error;
1026
1027 abort_code = RXKADUNKNOWNKEY;
1028 if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5)
1029 goto protocol_error;
1030
1031 /* extract the kerberos ticket and decrypt and decode it */
1032 ticket = kmalloc(ticket_len, GFP_NOFS);
1033 if (!ticket)
1034 return -ENOMEM;
1035
1036 abort_code = RXKADPACKETSHORT;
1037 if (skb_copy_bits(skb, 0, ticket, ticket_len) < 0)
1038 goto protocol_error_free;
1039
1040 ret = rxkad_decrypt_ticket(conn, ticket, ticket_len, &session_key,
1041 &expiry, &abort_code);
1042 if (ret < 0) {
1043 *_abort_code = abort_code;
1044 kfree(ticket);
1045 return ret;
1046 }
1047
1048 /* use the session key from inside the ticket to decrypt the
1049 * response */
1050 rxkad_decrypt_response(conn, &response, &session_key);
1051
1052 abort_code = RXKADSEALEDINCON;
1053 if (response.encrypted.epoch != conn->epoch)
1054 goto protocol_error_free;
1055 if (response.encrypted.cid != conn->cid)
1056 goto protocol_error_free;
1057 if (ntohl(response.encrypted.securityIndex) != conn->security_ix)
1058 goto protocol_error_free;
1059 csum = response.encrypted.checksum;
1060 response.encrypted.checksum = 0;
1061 rxkad_calc_response_checksum(&response);
1062 if (response.encrypted.checksum != csum)
1063 goto protocol_error_free;
1064
1065 if (ntohl(response.encrypted.call_id[0]) > INT_MAX ||
1066 ntohl(response.encrypted.call_id[1]) > INT_MAX ||
1067 ntohl(response.encrypted.call_id[2]) > INT_MAX ||
1068 ntohl(response.encrypted.call_id[3]) > INT_MAX)
1069 goto protocol_error_free;
1070
1071 abort_code = RXKADOUTOFSEQUENCE;
1072 if (response.encrypted.inc_nonce != htonl(conn->security_nonce + 1))
1073 goto protocol_error_free;
1074
1075 abort_code = RXKADLEVELFAIL;
1076 level = ntohl(response.encrypted.level);
1077 if (level > RXRPC_SECURITY_ENCRYPT)
1078 goto protocol_error_free;
1079 conn->security_level = level;
1080
1081 /* create a key to hold the security data and expiration time - after
1082 * this the connection security can be handled in exactly the same way
1083 * as for a client connection */
1084 ret = rxrpc_get_server_data_key(conn, &session_key, expiry, kvno);
1085 if (ret < 0) {
1086 kfree(ticket);
1087 return ret;
1088 }
1089
1090 kfree(ticket);
1091 _leave(" = 0");
1092 return 0;
1093
1094protocol_error_free:
1095 kfree(ticket);
1096protocol_error:
1097 *_abort_code = abort_code;
1098 _leave(" = -EPROTO [%d]", abort_code);
1099 return -EPROTO;
1100}
1101
1102/*
1103 * clear the connection security
1104 */
1105static void rxkad_clear(struct rxrpc_connection *conn)
1106{
1107 _enter("");
1108
1109 if (conn->cipher)
1110 crypto_free_blkcipher(conn->cipher);
1111}
1112
1113/*
1114 * RxRPC Kerberos-based security
1115 */
1116static struct rxrpc_security rxkad = {
1117 .owner = THIS_MODULE,
1118 .name = "rxkad",
1119 .security_index = RXKAD_VERSION,
1120 .init_connection_security = rxkad_init_connection_security,
1121 .prime_packet_security = rxkad_prime_packet_security,
1122 .secure_packet = rxkad_secure_packet,
1123 .verify_packet = rxkad_verify_packet,
1124 .issue_challenge = rxkad_issue_challenge,
1125 .respond_to_challenge = rxkad_respond_to_challenge,
1126 .verify_response = rxkad_verify_response,
1127 .clear = rxkad_clear,
1128};
1129
1130static __init int rxkad_init(void)
1131{
1132 _enter("");
1133
1134 /* pin the cipher we need so that the crypto layer doesn't invoke
1135 * keventd to go get it */
1136 rxkad_ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
1137 if (IS_ERR(rxkad_ci))
1138 return PTR_ERR(rxkad_ci);
1139
1140 return rxrpc_register_security(&rxkad);
1141}
1142
1143module_init(rxkad_init);
1144
1145static __exit void rxkad_exit(void)
1146{
1147 _enter("");
1148
1149 rxrpc_unregister_security(&rxkad);
1150 crypto_free_blkcipher(rxkad_ci);
1151}
1152
1153module_exit(rxkad_exit);