diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-04-27 12:26:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-04-27 12:26:46 -0400 |
commit | 15c54033964a943de7b0763efd3bd0ede7326395 (patch) | |
tree | 840b292612d1b5396d5bab5bde537a9013db3ceb /net/rxrpc | |
parent | ad5da3cf39a5b11a198929be1f2644e17ecd767e (diff) | |
parent | 912a41a4ab935ce8c4308428ec13fc7f8b1f18f4 (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (448 commits)
[IPV4] nl_fib_lookup: Initialise res.r before fib_res_put(&res)
[IPV6]: Fix thinko in ipv6_rthdr_rcv() changes.
[IPV4]: Add multipath cached to feature-removal-schedule.txt
[WIRELESS] cfg80211: Clarify locking comment.
[WIRELESS] cfg80211: Fix locking in wiphy_new.
[WEXT] net_device: Don't include wext bits if not required.
[WEXT]: Misc code cleanups.
[WEXT]: Reduce inline abuse.
[WEXT]: Move EXPORT_SYMBOL statements where they belong.
[WEXT]: Cleanup early ioctl call path.
[WEXT]: Remove options.
[WEXT]: Remove dead debug code.
[WEXT]: Clean up how wext is called.
[WEXT]: Move to net/wireless
[AFS]: Eliminate cmpxchg() usage in vlocation code.
[RXRPC]: Fix pointers passed to bitops.
[RXRPC]: Remove bogus atomic_* overrides.
[AFS]: Fix u64 printing in debug logging.
[AFS]: Add "directory write" support.
[AFS]: Implement the CB.InitCallBackState3 operation.
...
Diffstat (limited to 'net/rxrpc')
33 files changed, 10821 insertions, 6110 deletions
diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig new file mode 100644 index 000000000000..d72380e304ae --- /dev/null +++ b/net/rxrpc/Kconfig | |||
@@ -0,0 +1,37 @@ | |||
1 | # | ||
2 | # RxRPC session sockets | ||
3 | # | ||
4 | |||
5 | config AF_RXRPC | ||
6 | tristate "RxRPC session sockets" | ||
7 | depends on EXPERIMENTAL | ||
8 | help | ||
9 | Say Y or M here to include support for RxRPC session sockets (just | ||
10 | the transport part, not the presentation part: (un)marshalling is | ||
11 | left to the application). | ||
12 | |||
13 | These are used for AFS kernel filesystem and userspace utilities. | ||
14 | |||
15 | This module at the moment only supports client operations and is | ||
16 | currently incomplete. | ||
17 | |||
18 | See Documentation/networking/rxrpc.txt. | ||
19 | |||
20 | |||
21 | config AF_RXRPC_DEBUG | ||
22 | bool "RxRPC dynamic debugging" | ||
23 | depends on AF_RXRPC | ||
24 | help | ||
25 | Say Y here to make runtime controllable debugging messages appear. | ||
26 | |||
27 | See Documentation/networking/rxrpc.txt. | ||
28 | |||
29 | |||
30 | config RXKAD | ||
31 | tristate "RxRPC Kerberos security" | ||
32 | depends on AF_RXRPC && KEYS | ||
33 | help | ||
34 | Provide kerberos 4 and AFS kaserver security handling for AF_RXRPC | ||
35 | through the use of the key retention service. | ||
36 | |||
37 | See Documentation/networking/rxrpc.txt. | ||
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile index 6efcb6f162a0..c46867c61c98 100644 --- a/net/rxrpc/Makefile +++ b/net/rxrpc/Makefile | |||
@@ -1,25 +1,29 @@ | |||
1 | # | 1 | # |
2 | # Makefile for Linux kernel Rx RPC | 2 | # Makefile for Linux kernel RxRPC |
3 | # | 3 | # |
4 | 4 | ||
5 | #CFLAGS += -finstrument-functions | 5 | af-rxrpc-objs := \ |
6 | 6 | af_rxrpc.o \ | |
7 | rxrpc-objs := \ | 7 | ar-accept.o \ |
8 | call.o \ | 8 | ar-ack.o \ |
9 | connection.o \ | 9 | ar-call.o \ |
10 | krxiod.o \ | 10 | ar-connection.o \ |
11 | krxsecd.o \ | 11 | ar-connevent.o \ |
12 | krxtimod.o \ | 12 | ar-error.o \ |
13 | main.o \ | 13 | ar-input.o \ |
14 | peer.o \ | 14 | ar-key.o \ |
15 | rxrpc_syms.o \ | 15 | ar-local.o \ |
16 | transport.o | 16 | ar-output.o \ |
17 | ar-peer.o \ | ||
18 | ar-recvmsg.o \ | ||
19 | ar-security.o \ | ||
20 | ar-skbuff.o \ | ||
21 | ar-transport.o | ||
17 | 22 | ||
18 | ifeq ($(CONFIG_PROC_FS),y) | 23 | ifeq ($(CONFIG_PROC_FS),y) |
19 | rxrpc-objs += proc.o | 24 | af-rxrpc-objs += ar-proc.o |
20 | endif | ||
21 | ifeq ($(CONFIG_SYSCTL),y) | ||
22 | rxrpc-objs += sysctl.o | ||
23 | endif | 25 | endif |
24 | 26 | ||
25 | obj-$(CONFIG_RXRPC) := rxrpc.o | 27 | obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o |
28 | |||
29 | obj-$(CONFIG_RXKAD) += rxkad.o | ||
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c new file mode 100644 index 000000000000..2c57df9c131b --- /dev/null +++ b/net/rxrpc/af_rxrpc.c | |||
@@ -0,0 +1,879 @@ | |||
1 | /* AF_RXRPC implementation | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/net.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/poll.h> | ||
16 | #include <linux/proc_fs.h> | ||
17 | #include <net/sock.h> | ||
18 | #include <net/af_rxrpc.h> | ||
19 | #include "ar-internal.h" | ||
20 | |||
21 | MODULE_DESCRIPTION("RxRPC network protocol"); | ||
22 | MODULE_AUTHOR("Red Hat, Inc."); | ||
23 | MODULE_LICENSE("GPL"); | ||
24 | MODULE_ALIAS_NETPROTO(PF_RXRPC); | ||
25 | |||
26 | unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO; | ||
27 | module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); | ||
28 | MODULE_PARM_DESC(rxrpc_debug, "RxRPC debugging mask"); | ||
29 | |||
30 | static int sysctl_rxrpc_max_qlen __read_mostly = 10; | ||
31 | |||
32 | static struct proto rxrpc_proto; | ||
33 | static const struct proto_ops rxrpc_rpc_ops; | ||
34 | |||
35 | /* local epoch for detecting local-end reset */ | ||
36 | __be32 rxrpc_epoch; | ||
37 | |||
38 | /* current debugging ID */ | ||
39 | atomic_t rxrpc_debug_id; | ||
40 | |||
41 | /* count of skbs currently in use */ | ||
42 | atomic_t rxrpc_n_skbs; | ||
43 | |||
44 | struct workqueue_struct *rxrpc_workqueue; | ||
45 | |||
46 | static void rxrpc_sock_destructor(struct sock *); | ||
47 | |||
48 | /* | ||
49 | * see if an RxRPC socket is currently writable | ||
50 | */ | ||
51 | static inline int rxrpc_writable(struct sock *sk) | ||
52 | { | ||
53 | return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf; | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * wait for write bufferage to become available | ||
58 | */ | ||
59 | static void rxrpc_write_space(struct sock *sk) | ||
60 | { | ||
61 | _enter("%p", sk); | ||
62 | read_lock(&sk->sk_callback_lock); | ||
63 | if (rxrpc_writable(sk)) { | ||
64 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | ||
65 | wake_up_interruptible(sk->sk_sleep); | ||
66 | sk_wake_async(sk, 2, POLL_OUT); | ||
67 | } | ||
68 | read_unlock(&sk->sk_callback_lock); | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * validate an RxRPC address | ||
73 | */ | ||
74 | static int rxrpc_validate_address(struct rxrpc_sock *rx, | ||
75 | struct sockaddr_rxrpc *srx, | ||
76 | int len) | ||
77 | { | ||
78 | if (len < sizeof(struct sockaddr_rxrpc)) | ||
79 | return -EINVAL; | ||
80 | |||
81 | if (srx->srx_family != AF_RXRPC) | ||
82 | return -EAFNOSUPPORT; | ||
83 | |||
84 | if (srx->transport_type != SOCK_DGRAM) | ||
85 | return -ESOCKTNOSUPPORT; | ||
86 | |||
87 | len -= offsetof(struct sockaddr_rxrpc, transport); | ||
88 | if (srx->transport_len < sizeof(sa_family_t) || | ||
89 | srx->transport_len > len) | ||
90 | return -EINVAL; | ||
91 | |||
92 | if (srx->transport.family != rx->proto) | ||
93 | return -EAFNOSUPPORT; | ||
94 | |||
95 | switch (srx->transport.family) { | ||
96 | case AF_INET: | ||
97 | _debug("INET: %x @ %u.%u.%u.%u", | ||
98 | ntohs(srx->transport.sin.sin_port), | ||
99 | NIPQUAD(srx->transport.sin.sin_addr)); | ||
100 | if (srx->transport_len > 8) | ||
101 | memset((void *)&srx->transport + 8, 0, | ||
102 | srx->transport_len - 8); | ||
103 | break; | ||
104 | |||
105 | case AF_INET6: | ||
106 | default: | ||
107 | return -EAFNOSUPPORT; | ||
108 | } | ||
109 | |||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * bind a local address to an RxRPC socket | ||
115 | */ | ||
116 | static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) | ||
117 | { | ||
118 | struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) saddr; | ||
119 | struct sock *sk = sock->sk; | ||
120 | struct rxrpc_local *local; | ||
121 | struct rxrpc_sock *rx = rxrpc_sk(sk), *prx; | ||
122 | __be16 service_id; | ||
123 | int ret; | ||
124 | |||
125 | _enter("%p,%p,%d", rx, saddr, len); | ||
126 | |||
127 | ret = rxrpc_validate_address(rx, srx, len); | ||
128 | if (ret < 0) | ||
129 | goto error; | ||
130 | |||
131 | lock_sock(&rx->sk); | ||
132 | |||
133 | if (rx->sk.sk_state != RXRPC_UNCONNECTED) { | ||
134 | ret = -EINVAL; | ||
135 | goto error_unlock; | ||
136 | } | ||
137 | |||
138 | memcpy(&rx->srx, srx, sizeof(rx->srx)); | ||
139 | |||
140 | /* find a local transport endpoint if we don't have one already */ | ||
141 | local = rxrpc_lookup_local(&rx->srx); | ||
142 | if (IS_ERR(local)) { | ||
143 | ret = PTR_ERR(local); | ||
144 | goto error_unlock; | ||
145 | } | ||
146 | |||
147 | rx->local = local; | ||
148 | if (srx->srx_service) { | ||
149 | service_id = htons(srx->srx_service); | ||
150 | write_lock_bh(&local->services_lock); | ||
151 | list_for_each_entry(prx, &local->services, listen_link) { | ||
152 | if (prx->service_id == service_id) | ||
153 | goto service_in_use; | ||
154 | } | ||
155 | |||
156 | rx->service_id = service_id; | ||
157 | list_add_tail(&rx->listen_link, &local->services); | ||
158 | write_unlock_bh(&local->services_lock); | ||
159 | |||
160 | rx->sk.sk_state = RXRPC_SERVER_BOUND; | ||
161 | } else { | ||
162 | rx->sk.sk_state = RXRPC_CLIENT_BOUND; | ||
163 | } | ||
164 | |||
165 | release_sock(&rx->sk); | ||
166 | _leave(" = 0"); | ||
167 | return 0; | ||
168 | |||
169 | service_in_use: | ||
170 | ret = -EADDRINUSE; | ||
171 | write_unlock_bh(&local->services_lock); | ||
172 | error_unlock: | ||
173 | release_sock(&rx->sk); | ||
174 | error: | ||
175 | _leave(" = %d", ret); | ||
176 | return ret; | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * set the number of pending calls permitted on a listening socket | ||
181 | */ | ||
182 | static int rxrpc_listen(struct socket *sock, int backlog) | ||
183 | { | ||
184 | struct sock *sk = sock->sk; | ||
185 | struct rxrpc_sock *rx = rxrpc_sk(sk); | ||
186 | int ret; | ||
187 | |||
188 | _enter("%p,%d", rx, backlog); | ||
189 | |||
190 | lock_sock(&rx->sk); | ||
191 | |||
192 | switch (rx->sk.sk_state) { | ||
193 | case RXRPC_UNCONNECTED: | ||
194 | ret = -EADDRNOTAVAIL; | ||
195 | break; | ||
196 | case RXRPC_CLIENT_BOUND: | ||
197 | case RXRPC_CLIENT_CONNECTED: | ||
198 | default: | ||
199 | ret = -EBUSY; | ||
200 | break; | ||
201 | case RXRPC_SERVER_BOUND: | ||
202 | ASSERT(rx->local != NULL); | ||
203 | sk->sk_max_ack_backlog = backlog; | ||
204 | rx->sk.sk_state = RXRPC_SERVER_LISTENING; | ||
205 | ret = 0; | ||
206 | break; | ||
207 | } | ||
208 | |||
209 | release_sock(&rx->sk); | ||
210 | _leave(" = %d", ret); | ||
211 | return ret; | ||
212 | } | ||
213 | |||
214 | /* | ||
215 | * find a transport by address | ||
216 | */ | ||
217 | static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock, | ||
218 | struct sockaddr *addr, | ||
219 | int addr_len, int flags, | ||
220 | gfp_t gfp) | ||
221 | { | ||
222 | struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr; | ||
223 | struct rxrpc_transport *trans; | ||
224 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | ||
225 | struct rxrpc_peer *peer; | ||
226 | |||
227 | _enter("%p,%p,%d,%d", rx, addr, addr_len, flags); | ||
228 | |||
229 | ASSERT(rx->local != NULL); | ||
230 | ASSERT(rx->sk.sk_state > RXRPC_UNCONNECTED); | ||
231 | |||
232 | if (rx->srx.transport_type != srx->transport_type) | ||
233 | return ERR_PTR(-ESOCKTNOSUPPORT); | ||
234 | if (rx->srx.transport.family != srx->transport.family) | ||
235 | return ERR_PTR(-EAFNOSUPPORT); | ||
236 | |||
237 | /* find a remote transport endpoint from the local one */ | ||
238 | peer = rxrpc_get_peer(srx, gfp); | ||
239 | if (IS_ERR(peer)) | ||
240 | return ERR_PTR(PTR_ERR(peer)); | ||
241 | |||
242 | /* find a transport */ | ||
243 | trans = rxrpc_get_transport(rx->local, peer, gfp); | ||
244 | rxrpc_put_peer(peer); | ||
245 | _leave(" = %p", trans); | ||
246 | return trans; | ||
247 | } | ||
248 | |||
249 | /** | ||
250 | * rxrpc_kernel_begin_call - Allow a kernel service to begin a call | ||
251 | * @sock: The socket on which to make the call | ||
252 | * @srx: The address of the peer to contact (defaults to socket setting) | ||
253 | * @key: The security context to use (defaults to socket setting) | ||
254 | * @user_call_ID: The ID to use | ||
255 | * | ||
256 | * Allow a kernel service to begin a call on the nominated socket. This just | ||
257 | * sets up all the internal tracking structures and allocates connection and | ||
258 | * call IDs as appropriate. The call to be used is returned. | ||
259 | * | ||
260 | * The default socket destination address and security may be overridden by | ||
261 | * supplying @srx and @key. | ||
262 | */ | ||
263 | struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, | ||
264 | struct sockaddr_rxrpc *srx, | ||
265 | struct key *key, | ||
266 | unsigned long user_call_ID, | ||
267 | gfp_t gfp) | ||
268 | { | ||
269 | struct rxrpc_conn_bundle *bundle; | ||
270 | struct rxrpc_transport *trans; | ||
271 | struct rxrpc_call *call; | ||
272 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | ||
273 | __be16 service_id; | ||
274 | |||
275 | _enter(",,%x,%lx", key_serial(key), user_call_ID); | ||
276 | |||
277 | lock_sock(&rx->sk); | ||
278 | |||
279 | if (srx) { | ||
280 | trans = rxrpc_name_to_transport(sock, (struct sockaddr *) srx, | ||
281 | sizeof(*srx), 0, gfp); | ||
282 | if (IS_ERR(trans)) { | ||
283 | call = ERR_PTR(PTR_ERR(trans)); | ||
284 | trans = NULL; | ||
285 | goto out; | ||
286 | } | ||
287 | } else { | ||
288 | trans = rx->trans; | ||
289 | if (!trans) { | ||
290 | call = ERR_PTR(-ENOTCONN); | ||
291 | goto out; | ||
292 | } | ||
293 | atomic_inc(&trans->usage); | ||
294 | } | ||
295 | |||
296 | service_id = rx->service_id; | ||
297 | if (srx) | ||
298 | service_id = htons(srx->srx_service); | ||
299 | |||
300 | if (!key) | ||
301 | key = rx->key; | ||
302 | if (key && !key->payload.data) | ||
303 | key = NULL; /* a no-security key */ | ||
304 | |||
305 | bundle = rxrpc_get_bundle(rx, trans, key, service_id, gfp); | ||
306 | if (IS_ERR(bundle)) { | ||
307 | call = ERR_PTR(PTR_ERR(bundle)); | ||
308 | goto out; | ||
309 | } | ||
310 | |||
311 | call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, true, | ||
312 | gfp); | ||
313 | rxrpc_put_bundle(trans, bundle); | ||
314 | out: | ||
315 | rxrpc_put_transport(trans); | ||
316 | release_sock(&rx->sk); | ||
317 | _leave(" = %p", call); | ||
318 | return call; | ||
319 | } | ||
320 | |||
321 | EXPORT_SYMBOL(rxrpc_kernel_begin_call); | ||
322 | |||
323 | /** | ||
324 | * rxrpc_kernel_end_call - Allow a kernel service to end a call it was using | ||
325 | * @call: The call to end | ||
326 | * | ||
327 | * Allow a kernel service to end a call it was using. The call must be | ||
328 | * complete before this is called (the call should be aborted if necessary). | ||
329 | */ | ||
330 | void rxrpc_kernel_end_call(struct rxrpc_call *call) | ||
331 | { | ||
332 | _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); | ||
333 | rxrpc_remove_user_ID(call->socket, call); | ||
334 | rxrpc_put_call(call); | ||
335 | } | ||
336 | |||
337 | EXPORT_SYMBOL(rxrpc_kernel_end_call); | ||
338 | |||
339 | /** | ||
340 | * rxrpc_kernel_intercept_rx_messages - Intercept received RxRPC messages | ||
341 | * @sock: The socket to intercept received messages on | ||
342 | * @interceptor: The function to pass the messages to | ||
343 | * | ||
344 | * Allow a kernel service to intercept messages heading for the Rx queue on an | ||
345 | * RxRPC socket. They get passed to the specified function instead. | ||
346 | * @interceptor should free the socket buffers it is given. @interceptor is | ||
347 | * called with the socket receive queue spinlock held and softirqs disabled - | ||
348 | * this ensures that the messages will be delivered in the right order. | ||
349 | */ | ||
350 | void rxrpc_kernel_intercept_rx_messages(struct socket *sock, | ||
351 | rxrpc_interceptor_t interceptor) | ||
352 | { | ||
353 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | ||
354 | |||
355 | _enter(""); | ||
356 | rx->interceptor = interceptor; | ||
357 | } | ||
358 | |||
359 | EXPORT_SYMBOL(rxrpc_kernel_intercept_rx_messages); | ||
360 | |||
361 | /* | ||
362 | * connect an RxRPC socket | ||
363 | * - this just targets it at a specific destination; no actual connection | ||
364 | * negotiation takes place | ||
365 | */ | ||
366 | static int rxrpc_connect(struct socket *sock, struct sockaddr *addr, | ||
367 | int addr_len, int flags) | ||
368 | { | ||
369 | struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr; | ||
370 | struct sock *sk = sock->sk; | ||
371 | struct rxrpc_transport *trans; | ||
372 | struct rxrpc_local *local; | ||
373 | struct rxrpc_sock *rx = rxrpc_sk(sk); | ||
374 | int ret; | ||
375 | |||
376 | _enter("%p,%p,%d,%d", rx, addr, addr_len, flags); | ||
377 | |||
378 | ret = rxrpc_validate_address(rx, srx, addr_len); | ||
379 | if (ret < 0) { | ||
380 | _leave(" = %d [bad addr]", ret); | ||
381 | return ret; | ||
382 | } | ||
383 | |||
384 | lock_sock(&rx->sk); | ||
385 | |||
386 | switch (rx->sk.sk_state) { | ||
387 | case RXRPC_UNCONNECTED: | ||
388 | /* find a local transport endpoint if we don't have one already */ | ||
389 | ASSERTCMP(rx->local, ==, NULL); | ||
390 | rx->srx.srx_family = AF_RXRPC; | ||
391 | rx->srx.srx_service = 0; | ||
392 | rx->srx.transport_type = srx->transport_type; | ||
393 | rx->srx.transport_len = sizeof(sa_family_t); | ||
394 | rx->srx.transport.family = srx->transport.family; | ||
395 | local = rxrpc_lookup_local(&rx->srx); | ||
396 | if (IS_ERR(local)) { | ||
397 | release_sock(&rx->sk); | ||
398 | return PTR_ERR(local); | ||
399 | } | ||
400 | rx->local = local; | ||
401 | rx->sk.sk_state = RXRPC_CLIENT_BOUND; | ||
402 | case RXRPC_CLIENT_BOUND: | ||
403 | break; | ||
404 | case RXRPC_CLIENT_CONNECTED: | ||
405 | release_sock(&rx->sk); | ||
406 | return -EISCONN; | ||
407 | default: | ||
408 | release_sock(&rx->sk); | ||
409 | return -EBUSY; /* server sockets can't connect as well */ | ||
410 | } | ||
411 | |||
412 | trans = rxrpc_name_to_transport(sock, addr, addr_len, flags, | ||
413 | GFP_KERNEL); | ||
414 | if (IS_ERR(trans)) { | ||
415 | release_sock(&rx->sk); | ||
416 | _leave(" = %ld", PTR_ERR(trans)); | ||
417 | return PTR_ERR(trans); | ||
418 | } | ||
419 | |||
420 | rx->trans = trans; | ||
421 | rx->service_id = htons(srx->srx_service); | ||
422 | rx->sk.sk_state = RXRPC_CLIENT_CONNECTED; | ||
423 | |||
424 | release_sock(&rx->sk); | ||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | /* | ||
429 | * send a message through an RxRPC socket | ||
430 | * - in a client this does a number of things: | ||
431 | * - finds/sets up a connection for the security specified (if any) | ||
432 | * - initiates a call (ID in control data) | ||
433 | * - ends the request phase of a call (if MSG_MORE is not set) | ||
434 | * - sends a call data packet | ||
435 | * - may send an abort (abort code in control data) | ||
436 | */ | ||
437 | static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock, | ||
438 | struct msghdr *m, size_t len) | ||
439 | { | ||
440 | struct rxrpc_transport *trans; | ||
441 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | ||
442 | int ret; | ||
443 | |||
444 | _enter(",{%d},,%zu", rx->sk.sk_state, len); | ||
445 | |||
446 | if (m->msg_flags & MSG_OOB) | ||
447 | return -EOPNOTSUPP; | ||
448 | |||
449 | if (m->msg_name) { | ||
450 | ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen); | ||
451 | if (ret < 0) { | ||
452 | _leave(" = %d [bad addr]", ret); | ||
453 | return ret; | ||
454 | } | ||
455 | } | ||
456 | |||
457 | trans = NULL; | ||
458 | lock_sock(&rx->sk); | ||
459 | |||
460 | if (m->msg_name) { | ||
461 | ret = -EISCONN; | ||
462 | trans = rxrpc_name_to_transport(sock, m->msg_name, | ||
463 | m->msg_namelen, 0, GFP_KERNEL); | ||
464 | if (IS_ERR(trans)) { | ||
465 | ret = PTR_ERR(trans); | ||
466 | trans = NULL; | ||
467 | goto out; | ||
468 | } | ||
469 | } else { | ||
470 | trans = rx->trans; | ||
471 | if (trans) | ||
472 | atomic_inc(&trans->usage); | ||
473 | } | ||
474 | |||
475 | switch (rx->sk.sk_state) { | ||
476 | case RXRPC_SERVER_LISTENING: | ||
477 | if (!m->msg_name) { | ||
478 | ret = rxrpc_server_sendmsg(iocb, rx, m, len); | ||
479 | break; | ||
480 | } | ||
481 | case RXRPC_SERVER_BOUND: | ||
482 | case RXRPC_CLIENT_BOUND: | ||
483 | if (!m->msg_name) { | ||
484 | ret = -ENOTCONN; | ||
485 | break; | ||
486 | } | ||
487 | case RXRPC_CLIENT_CONNECTED: | ||
488 | ret = rxrpc_client_sendmsg(iocb, rx, trans, m, len); | ||
489 | break; | ||
490 | default: | ||
491 | ret = -ENOTCONN; | ||
492 | break; | ||
493 | } | ||
494 | |||
495 | out: | ||
496 | release_sock(&rx->sk); | ||
497 | if (trans) | ||
498 | rxrpc_put_transport(trans); | ||
499 | _leave(" = %d", ret); | ||
500 | return ret; | ||
501 | } | ||
502 | |||
503 | /* | ||
504 | * set RxRPC socket options | ||
505 | */ | ||
506 | static int rxrpc_setsockopt(struct socket *sock, int level, int optname, | ||
507 | char __user *optval, int optlen) | ||
508 | { | ||
509 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | ||
510 | unsigned min_sec_level; | ||
511 | int ret; | ||
512 | |||
513 | _enter(",%d,%d,,%d", level, optname, optlen); | ||
514 | |||
515 | lock_sock(&rx->sk); | ||
516 | ret = -EOPNOTSUPP; | ||
517 | |||
518 | if (level == SOL_RXRPC) { | ||
519 | switch (optname) { | ||
520 | case RXRPC_EXCLUSIVE_CONNECTION: | ||
521 | ret = -EINVAL; | ||
522 | if (optlen != 0) | ||
523 | goto error; | ||
524 | ret = -EISCONN; | ||
525 | if (rx->sk.sk_state != RXRPC_UNCONNECTED) | ||
526 | goto error; | ||
527 | set_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags); | ||
528 | goto success; | ||
529 | |||
530 | case RXRPC_SECURITY_KEY: | ||
531 | ret = -EINVAL; | ||
532 | if (rx->key) | ||
533 | goto error; | ||
534 | ret = -EISCONN; | ||
535 | if (rx->sk.sk_state != RXRPC_UNCONNECTED) | ||
536 | goto error; | ||
537 | ret = rxrpc_request_key(rx, optval, optlen); | ||
538 | goto error; | ||
539 | |||
540 | case RXRPC_SECURITY_KEYRING: | ||
541 | ret = -EINVAL; | ||
542 | if (rx->key) | ||
543 | goto error; | ||
544 | ret = -EISCONN; | ||
545 | if (rx->sk.sk_state != RXRPC_UNCONNECTED) | ||
546 | goto error; | ||
547 | ret = rxrpc_server_keyring(rx, optval, optlen); | ||
548 | goto error; | ||
549 | |||
550 | case RXRPC_MIN_SECURITY_LEVEL: | ||
551 | ret = -EINVAL; | ||
552 | if (optlen != sizeof(unsigned)) | ||
553 | goto error; | ||
554 | ret = -EISCONN; | ||
555 | if (rx->sk.sk_state != RXRPC_UNCONNECTED) | ||
556 | goto error; | ||
557 | ret = get_user(min_sec_level, | ||
558 | (unsigned __user *) optval); | ||
559 | if (ret < 0) | ||
560 | goto error; | ||
561 | ret = -EINVAL; | ||
562 | if (min_sec_level > RXRPC_SECURITY_MAX) | ||
563 | goto error; | ||
564 | rx->min_sec_level = min_sec_level; | ||
565 | goto success; | ||
566 | |||
567 | default: | ||
568 | break; | ||
569 | } | ||
570 | } | ||
571 | |||
572 | success: | ||
573 | ret = 0; | ||
574 | error: | ||
575 | release_sock(&rx->sk); | ||
576 | return ret; | ||
577 | } | ||
578 | |||
579 | /* | ||
580 | * permit an RxRPC socket to be polled | ||
581 | */ | ||
582 | static unsigned int rxrpc_poll(struct file *file, struct socket *sock, | ||
583 | poll_table *wait) | ||
584 | { | ||
585 | unsigned int mask; | ||
586 | struct sock *sk = sock->sk; | ||
587 | |||
588 | poll_wait(file, sk->sk_sleep, wait); | ||
589 | mask = 0; | ||
590 | |||
591 | /* the socket is readable if there are any messages waiting on the Rx | ||
592 | * queue */ | ||
593 | if (!skb_queue_empty(&sk->sk_receive_queue)) | ||
594 | mask |= POLLIN | POLLRDNORM; | ||
595 | |||
596 | /* the socket is writable if there is space to add new data to the | ||
597 | * socket; there is no guarantee that any particular call in progress | ||
598 | * on the socket may have space in the Tx ACK window */ | ||
599 | if (rxrpc_writable(sk)) | ||
600 | mask |= POLLOUT | POLLWRNORM; | ||
601 | |||
602 | return mask; | ||
603 | } | ||
604 | |||
605 | /* | ||
606 | * create an RxRPC socket | ||
607 | */ | ||
608 | static int rxrpc_create(struct socket *sock, int protocol) | ||
609 | { | ||
610 | struct rxrpc_sock *rx; | ||
611 | struct sock *sk; | ||
612 | |||
613 | _enter("%p,%d", sock, protocol); | ||
614 | |||
615 | /* we support transport protocol UDP only */ | ||
616 | if (protocol != PF_INET) | ||
617 | return -EPROTONOSUPPORT; | ||
618 | |||
619 | if (sock->type != SOCK_DGRAM) | ||
620 | return -ESOCKTNOSUPPORT; | ||
621 | |||
622 | sock->ops = &rxrpc_rpc_ops; | ||
623 | sock->state = SS_UNCONNECTED; | ||
624 | |||
625 | sk = sk_alloc(PF_RXRPC, GFP_KERNEL, &rxrpc_proto, 1); | ||
626 | if (!sk) | ||
627 | return -ENOMEM; | ||
628 | |||
629 | sock_init_data(sock, sk); | ||
630 | sk->sk_state = RXRPC_UNCONNECTED; | ||
631 | sk->sk_write_space = rxrpc_write_space; | ||
632 | sk->sk_max_ack_backlog = sysctl_rxrpc_max_qlen; | ||
633 | sk->sk_destruct = rxrpc_sock_destructor; | ||
634 | |||
635 | rx = rxrpc_sk(sk); | ||
636 | rx->proto = protocol; | ||
637 | rx->calls = RB_ROOT; | ||
638 | |||
639 | INIT_LIST_HEAD(&rx->listen_link); | ||
640 | INIT_LIST_HEAD(&rx->secureq); | ||
641 | INIT_LIST_HEAD(&rx->acceptq); | ||
642 | rwlock_init(&rx->call_lock); | ||
643 | memset(&rx->srx, 0, sizeof(rx->srx)); | ||
644 | |||
645 | _leave(" = 0 [%p]", rx); | ||
646 | return 0; | ||
647 | } | ||
648 | |||
649 | /* | ||
650 | * RxRPC socket destructor | ||
651 | */ | ||
652 | static void rxrpc_sock_destructor(struct sock *sk) | ||
653 | { | ||
654 | _enter("%p", sk); | ||
655 | |||
656 | rxrpc_purge_queue(&sk->sk_receive_queue); | ||
657 | |||
658 | BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); | ||
659 | BUG_TRAP(sk_unhashed(sk)); | ||
660 | BUG_TRAP(!sk->sk_socket); | ||
661 | |||
662 | if (!sock_flag(sk, SOCK_DEAD)) { | ||
663 | printk("Attempt to release alive rxrpc socket: %p\n", sk); | ||
664 | return; | ||
665 | } | ||
666 | } | ||
667 | |||
668 | /* | ||
669 | * release an RxRPC socket | ||
670 | */ | ||
671 | static int rxrpc_release_sock(struct sock *sk) | ||
672 | { | ||
673 | struct rxrpc_sock *rx = rxrpc_sk(sk); | ||
674 | |||
675 | _enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt)); | ||
676 | |||
677 | /* declare the socket closed for business */ | ||
678 | sock_orphan(sk); | ||
679 | sk->sk_shutdown = SHUTDOWN_MASK; | ||
680 | |||
681 | spin_lock_bh(&sk->sk_receive_queue.lock); | ||
682 | sk->sk_state = RXRPC_CLOSE; | ||
683 | spin_unlock_bh(&sk->sk_receive_queue.lock); | ||
684 | |||
685 | ASSERTCMP(rx->listen_link.next, !=, LIST_POISON1); | ||
686 | |||
687 | if (!list_empty(&rx->listen_link)) { | ||
688 | write_lock_bh(&rx->local->services_lock); | ||
689 | list_del(&rx->listen_link); | ||
690 | write_unlock_bh(&rx->local->services_lock); | ||
691 | } | ||
692 | |||
693 | /* try to flush out this socket */ | ||
694 | rxrpc_release_calls_on_socket(rx); | ||
695 | flush_workqueue(rxrpc_workqueue); | ||
696 | rxrpc_purge_queue(&sk->sk_receive_queue); | ||
697 | |||
698 | if (rx->conn) { | ||
699 | rxrpc_put_connection(rx->conn); | ||
700 | rx->conn = NULL; | ||
701 | } | ||
702 | |||
703 | if (rx->bundle) { | ||
704 | rxrpc_put_bundle(rx->trans, rx->bundle); | ||
705 | rx->bundle = NULL; | ||
706 | } | ||
707 | if (rx->trans) { | ||
708 | rxrpc_put_transport(rx->trans); | ||
709 | rx->trans = NULL; | ||
710 | } | ||
711 | if (rx->local) { | ||
712 | rxrpc_put_local(rx->local); | ||
713 | rx->local = NULL; | ||
714 | } | ||
715 | |||
716 | key_put(rx->key); | ||
717 | rx->key = NULL; | ||
718 | key_put(rx->securities); | ||
719 | rx->securities = NULL; | ||
720 | sock_put(sk); | ||
721 | |||
722 | _leave(" = 0"); | ||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | /* | ||
727 | * release an RxRPC BSD socket on close() or equivalent | ||
728 | */ | ||
729 | static int rxrpc_release(struct socket *sock) | ||
730 | { | ||
731 | struct sock *sk = sock->sk; | ||
732 | |||
733 | _enter("%p{%p}", sock, sk); | ||
734 | |||
735 | if (!sk) | ||
736 | return 0; | ||
737 | |||
738 | sock->sk = NULL; | ||
739 | |||
740 | return rxrpc_release_sock(sk); | ||
741 | } | ||
742 | |||
743 | /* | ||
744 | * RxRPC network protocol | ||
745 | */ | ||
746 | static const struct proto_ops rxrpc_rpc_ops = { | ||
747 | .family = PF_UNIX, | ||
748 | .owner = THIS_MODULE, | ||
749 | .release = rxrpc_release, | ||
750 | .bind = rxrpc_bind, | ||
751 | .connect = rxrpc_connect, | ||
752 | .socketpair = sock_no_socketpair, | ||
753 | .accept = sock_no_accept, | ||
754 | .getname = sock_no_getname, | ||
755 | .poll = rxrpc_poll, | ||
756 | .ioctl = sock_no_ioctl, | ||
757 | .listen = rxrpc_listen, | ||
758 | .shutdown = sock_no_shutdown, | ||
759 | .setsockopt = rxrpc_setsockopt, | ||
760 | .getsockopt = sock_no_getsockopt, | ||
761 | .sendmsg = rxrpc_sendmsg, | ||
762 | .recvmsg = rxrpc_recvmsg, | ||
763 | .mmap = sock_no_mmap, | ||
764 | .sendpage = sock_no_sendpage, | ||
765 | }; | ||
766 | |||
767 | static struct proto rxrpc_proto = { | ||
768 | .name = "RXRPC", | ||
769 | .owner = THIS_MODULE, | ||
770 | .obj_size = sizeof(struct rxrpc_sock), | ||
771 | .max_header = sizeof(struct rxrpc_header), | ||
772 | }; | ||
773 | |||
774 | static struct net_proto_family rxrpc_family_ops = { | ||
775 | .family = PF_RXRPC, | ||
776 | .create = rxrpc_create, | ||
777 | .owner = THIS_MODULE, | ||
778 | }; | ||
779 | |||
780 | /* | ||
781 | * initialise and register the RxRPC protocol | ||
782 | */ | ||
783 | static int __init af_rxrpc_init(void) | ||
784 | { | ||
785 | struct sk_buff *dummy_skb; | ||
786 | int ret = -1; | ||
787 | |||
788 | BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof(dummy_skb->cb)); | ||
789 | |||
790 | rxrpc_epoch = htonl(xtime.tv_sec); | ||
791 | |||
792 | ret = -ENOMEM; | ||
793 | rxrpc_call_jar = kmem_cache_create( | ||
794 | "rxrpc_call_jar", sizeof(struct rxrpc_call), 0, | ||
795 | SLAB_HWCACHE_ALIGN, NULL, NULL); | ||
796 | if (!rxrpc_call_jar) { | ||
797 | printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n"); | ||
798 | goto error_call_jar; | ||
799 | } | ||
800 | |||
801 | rxrpc_workqueue = create_workqueue("krxrpcd"); | ||
802 | if (!rxrpc_workqueue) { | ||
803 | printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n"); | ||
804 | goto error_work_queue; | ||
805 | } | ||
806 | |||
807 | ret = proto_register(&rxrpc_proto, 1); | ||
808 | if (ret < 0) { | ||
809 | printk(KERN_CRIT "RxRPC: Cannot register protocol\n"); | ||
810 | goto error_proto; | ||
811 | } | ||
812 | |||
813 | ret = sock_register(&rxrpc_family_ops); | ||
814 | if (ret < 0) { | ||
815 | printk(KERN_CRIT "RxRPC: Cannot register socket family\n"); | ||
816 | goto error_sock; | ||
817 | } | ||
818 | |||
819 | ret = register_key_type(&key_type_rxrpc); | ||
820 | if (ret < 0) { | ||
821 | printk(KERN_CRIT "RxRPC: Cannot register client key type\n"); | ||
822 | goto error_key_type; | ||
823 | } | ||
824 | |||
825 | ret = register_key_type(&key_type_rxrpc_s); | ||
826 | if (ret < 0) { | ||
827 | printk(KERN_CRIT "RxRPC: Cannot register server key type\n"); | ||
828 | goto error_key_type_s; | ||
829 | } | ||
830 | |||
831 | #ifdef CONFIG_PROC_FS | ||
832 | proc_net_fops_create("rxrpc_calls", 0, &rxrpc_call_seq_fops); | ||
833 | proc_net_fops_create("rxrpc_conns", 0, &rxrpc_connection_seq_fops); | ||
834 | #endif | ||
835 | return 0; | ||
836 | |||
837 | error_key_type_s: | ||
838 | unregister_key_type(&key_type_rxrpc); | ||
839 | error_key_type: | ||
840 | sock_unregister(PF_RXRPC); | ||
841 | error_sock: | ||
842 | proto_unregister(&rxrpc_proto); | ||
843 | error_proto: | ||
844 | destroy_workqueue(rxrpc_workqueue); | ||
845 | error_work_queue: | ||
846 | kmem_cache_destroy(rxrpc_call_jar); | ||
847 | error_call_jar: | ||
848 | return ret; | ||
849 | } | ||
850 | |||
851 | /* | ||
852 | * unregister the RxRPC protocol | ||
853 | */ | ||
854 | static void __exit af_rxrpc_exit(void) | ||
855 | { | ||
856 | _enter(""); | ||
857 | unregister_key_type(&key_type_rxrpc_s); | ||
858 | unregister_key_type(&key_type_rxrpc); | ||
859 | sock_unregister(PF_RXRPC); | ||
860 | proto_unregister(&rxrpc_proto); | ||
861 | rxrpc_destroy_all_calls(); | ||
862 | rxrpc_destroy_all_connections(); | ||
863 | rxrpc_destroy_all_transports(); | ||
864 | rxrpc_destroy_all_peers(); | ||
865 | rxrpc_destroy_all_locals(); | ||
866 | |||
867 | ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0); | ||
868 | |||
869 | _debug("flush scheduled work"); | ||
870 | flush_workqueue(rxrpc_workqueue); | ||
871 | proc_net_remove("rxrpc_conns"); | ||
872 | proc_net_remove("rxrpc_calls"); | ||
873 | destroy_workqueue(rxrpc_workqueue); | ||
874 | kmem_cache_destroy(rxrpc_call_jar); | ||
875 | _leave(""); | ||
876 | } | ||
877 | |||
878 | module_init(af_rxrpc_init); | ||
879 | module_exit(af_rxrpc_exit); | ||
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c new file mode 100644 index 000000000000..92a87fde8bfe --- /dev/null +++ b/net/rxrpc/ar-accept.c | |||
@@ -0,0 +1,504 @@ | |||
1 | /* incoming call handling | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/net.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/errqueue.h> | ||
16 | #include <linux/udp.h> | ||
17 | #include <linux/in.h> | ||
18 | #include <linux/in6.h> | ||
19 | #include <linux/icmp.h> | ||
20 | #include <net/sock.h> | ||
21 | #include <net/af_rxrpc.h> | ||
22 | #include <net/ip.h> | ||
23 | #include "ar-internal.h" | ||
24 | |||
25 | /* | ||
26 | * generate a connection-level abort | ||
27 | */ | ||
28 | static int rxrpc_busy(struct rxrpc_local *local, struct sockaddr_rxrpc *srx, | ||
29 | struct rxrpc_header *hdr) | ||
30 | { | ||
31 | struct msghdr msg; | ||
32 | struct kvec iov[1]; | ||
33 | size_t len; | ||
34 | int ret; | ||
35 | |||
36 | _enter("%d,,", local->debug_id); | ||
37 | |||
38 | msg.msg_name = &srx->transport.sin; | ||
39 | msg.msg_namelen = sizeof(srx->transport.sin); | ||
40 | msg.msg_control = NULL; | ||
41 | msg.msg_controllen = 0; | ||
42 | msg.msg_flags = 0; | ||
43 | |||
44 | hdr->seq = 0; | ||
45 | hdr->type = RXRPC_PACKET_TYPE_BUSY; | ||
46 | hdr->flags = 0; | ||
47 | hdr->userStatus = 0; | ||
48 | hdr->_rsvd = 0; | ||
49 | |||
50 | iov[0].iov_base = hdr; | ||
51 | iov[0].iov_len = sizeof(*hdr); | ||
52 | |||
53 | len = iov[0].iov_len; | ||
54 | |||
55 | hdr->serial = htonl(1); | ||
56 | _proto("Tx BUSY %%%u", ntohl(hdr->serial)); | ||
57 | |||
58 | ret = kernel_sendmsg(local->socket, &msg, iov, 1, len); | ||
59 | if (ret < 0) { | ||
60 | _leave(" = -EAGAIN [sendmsg failed: %d]", ret); | ||
61 | return -EAGAIN; | ||
62 | } | ||
63 | |||
64 | _leave(" = 0"); | ||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * accept an incoming call that needs peer, transport and/or connection setting | ||
70 | * up | ||
71 | */ | ||
72 | static int rxrpc_accept_incoming_call(struct rxrpc_local *local, | ||
73 | struct rxrpc_sock *rx, | ||
74 | struct sk_buff *skb, | ||
75 | struct sockaddr_rxrpc *srx) | ||
76 | { | ||
77 | struct rxrpc_connection *conn; | ||
78 | struct rxrpc_transport *trans; | ||
79 | struct rxrpc_skb_priv *sp, *nsp; | ||
80 | struct rxrpc_peer *peer; | ||
81 | struct rxrpc_call *call; | ||
82 | struct sk_buff *notification; | ||
83 | int ret; | ||
84 | |||
85 | _enter(""); | ||
86 | |||
87 | sp = rxrpc_skb(skb); | ||
88 | |||
89 | /* get a notification message to send to the server app */ | ||
90 | notification = alloc_skb(0, GFP_NOFS); | ||
91 | rxrpc_new_skb(notification); | ||
92 | notification->mark = RXRPC_SKB_MARK_NEW_CALL; | ||
93 | |||
94 | peer = rxrpc_get_peer(srx, GFP_NOIO); | ||
95 | if (IS_ERR(peer)) { | ||
96 | _debug("no peer"); | ||
97 | ret = -EBUSY; | ||
98 | goto error; | ||
99 | } | ||
100 | |||
101 | trans = rxrpc_get_transport(local, peer, GFP_NOIO); | ||
102 | rxrpc_put_peer(peer); | ||
103 | if (!trans) { | ||
104 | _debug("no trans"); | ||
105 | ret = -EBUSY; | ||
106 | goto error; | ||
107 | } | ||
108 | |||
109 | conn = rxrpc_incoming_connection(trans, &sp->hdr, GFP_NOIO); | ||
110 | rxrpc_put_transport(trans); | ||
111 | if (IS_ERR(conn)) { | ||
112 | _debug("no conn"); | ||
113 | ret = PTR_ERR(conn); | ||
114 | goto error; | ||
115 | } | ||
116 | |||
117 | call = rxrpc_incoming_call(rx, conn, &sp->hdr, GFP_NOIO); | ||
118 | rxrpc_put_connection(conn); | ||
119 | if (IS_ERR(call)) { | ||
120 | _debug("no call"); | ||
121 | ret = PTR_ERR(call); | ||
122 | goto error; | ||
123 | } | ||
124 | |||
125 | /* attach the call to the socket */ | ||
126 | read_lock_bh(&local->services_lock); | ||
127 | if (rx->sk.sk_state == RXRPC_CLOSE) | ||
128 | goto invalid_service; | ||
129 | |||
130 | write_lock(&rx->call_lock); | ||
131 | if (!test_and_set_bit(RXRPC_CALL_INIT_ACCEPT, &call->flags)) { | ||
132 | rxrpc_get_call(call); | ||
133 | |||
134 | spin_lock(&call->conn->state_lock); | ||
135 | if (sp->hdr.securityIndex > 0 && | ||
136 | call->conn->state == RXRPC_CONN_SERVER_UNSECURED) { | ||
137 | _debug("await conn sec"); | ||
138 | list_add_tail(&call->accept_link, &rx->secureq); | ||
139 | call->conn->state = RXRPC_CONN_SERVER_CHALLENGING; | ||
140 | atomic_inc(&call->conn->usage); | ||
141 | set_bit(RXRPC_CONN_CHALLENGE, &call->conn->events); | ||
142 | rxrpc_queue_conn(call->conn); | ||
143 | } else { | ||
144 | _debug("conn ready"); | ||
145 | call->state = RXRPC_CALL_SERVER_ACCEPTING; | ||
146 | list_add_tail(&call->accept_link, &rx->acceptq); | ||
147 | rxrpc_get_call(call); | ||
148 | nsp = rxrpc_skb(notification); | ||
149 | nsp->call = call; | ||
150 | |||
151 | ASSERTCMP(atomic_read(&call->usage), >=, 3); | ||
152 | |||
153 | _debug("notify"); | ||
154 | spin_lock(&call->lock); | ||
155 | ret = rxrpc_queue_rcv_skb(call, notification, true, | ||
156 | false); | ||
157 | spin_unlock(&call->lock); | ||
158 | notification = NULL; | ||
159 | if (ret < 0) | ||
160 | BUG(); | ||
161 | } | ||
162 | spin_unlock(&call->conn->state_lock); | ||
163 | |||
164 | _debug("queued"); | ||
165 | } | ||
166 | write_unlock(&rx->call_lock); | ||
167 | |||
168 | _debug("process"); | ||
169 | rxrpc_fast_process_packet(call, skb); | ||
170 | |||
171 | _debug("done"); | ||
172 | read_unlock_bh(&local->services_lock); | ||
173 | rxrpc_free_skb(notification); | ||
174 | rxrpc_put_call(call); | ||
175 | _leave(" = 0"); | ||
176 | return 0; | ||
177 | |||
178 | invalid_service: | ||
179 | _debug("invalid"); | ||
180 | read_unlock_bh(&local->services_lock); | ||
181 | |||
182 | read_lock_bh(&call->state_lock); | ||
183 | if (!test_bit(RXRPC_CALL_RELEASE, &call->flags) && | ||
184 | !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) { | ||
185 | rxrpc_get_call(call); | ||
186 | rxrpc_queue_call(call); | ||
187 | } | ||
188 | read_unlock_bh(&call->state_lock); | ||
189 | rxrpc_put_call(call); | ||
190 | ret = -ECONNREFUSED; | ||
191 | error: | ||
192 | rxrpc_free_skb(notification); | ||
193 | _leave(" = %d", ret); | ||
194 | return ret; | ||
195 | } | ||
196 | |||
197 | /* | ||
198 | * accept incoming calls that need peer, transport and/or connection setting up | ||
199 | * - the packets we get are all incoming client DATA packets that have seq == 1 | ||
200 | */ | ||
201 | void rxrpc_accept_incoming_calls(struct work_struct *work) | ||
202 | { | ||
203 | struct rxrpc_local *local = | ||
204 | container_of(work, struct rxrpc_local, acceptor); | ||
205 | struct rxrpc_skb_priv *sp; | ||
206 | struct sockaddr_rxrpc srx; | ||
207 | struct rxrpc_sock *rx; | ||
208 | struct sk_buff *skb; | ||
209 | __be16 service_id; | ||
210 | int ret; | ||
211 | |||
212 | _enter("%d", local->debug_id); | ||
213 | |||
214 | read_lock_bh(&rxrpc_local_lock); | ||
215 | if (atomic_read(&local->usage) > 0) | ||
216 | rxrpc_get_local(local); | ||
217 | else | ||
218 | local = NULL; | ||
219 | read_unlock_bh(&rxrpc_local_lock); | ||
220 | if (!local) { | ||
221 | _leave(" [local dead]"); | ||
222 | return; | ||
223 | } | ||
224 | |||
225 | process_next_packet: | ||
226 | skb = skb_dequeue(&local->accept_queue); | ||
227 | if (!skb) { | ||
228 | rxrpc_put_local(local); | ||
229 | _leave("\n"); | ||
230 | return; | ||
231 | } | ||
232 | |||
233 | _net("incoming call skb %p", skb); | ||
234 | |||
235 | sp = rxrpc_skb(skb); | ||
236 | |||
237 | /* determine the remote address */ | ||
238 | memset(&srx, 0, sizeof(srx)); | ||
239 | srx.srx_family = AF_RXRPC; | ||
240 | srx.transport.family = local->srx.transport.family; | ||
241 | srx.transport_type = local->srx.transport_type; | ||
242 | switch (srx.transport.family) { | ||
243 | case AF_INET: | ||
244 | srx.transport_len = sizeof(struct sockaddr_in); | ||
245 | srx.transport.sin.sin_port = udp_hdr(skb)->source; | ||
246 | srx.transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; | ||
247 | break; | ||
248 | default: | ||
249 | goto busy; | ||
250 | } | ||
251 | |||
252 | /* get the socket providing the service */ | ||
253 | service_id = sp->hdr.serviceId; | ||
254 | read_lock_bh(&local->services_lock); | ||
255 | list_for_each_entry(rx, &local->services, listen_link) { | ||
256 | if (rx->service_id == service_id && | ||
257 | rx->sk.sk_state != RXRPC_CLOSE) | ||
258 | goto found_service; | ||
259 | } | ||
260 | read_unlock_bh(&local->services_lock); | ||
261 | goto invalid_service; | ||
262 | |||
263 | found_service: | ||
264 | _debug("found service %hd", ntohs(rx->service_id)); | ||
265 | if (sk_acceptq_is_full(&rx->sk)) | ||
266 | goto backlog_full; | ||
267 | sk_acceptq_added(&rx->sk); | ||
268 | sock_hold(&rx->sk); | ||
269 | read_unlock_bh(&local->services_lock); | ||
270 | |||
271 | ret = rxrpc_accept_incoming_call(local, rx, skb, &srx); | ||
272 | if (ret < 0) | ||
273 | sk_acceptq_removed(&rx->sk); | ||
274 | sock_put(&rx->sk); | ||
275 | switch (ret) { | ||
276 | case -ECONNRESET: /* old calls are ignored */ | ||
277 | case -ECONNABORTED: /* aborted calls are reaborted or ignored */ | ||
278 | case 0: | ||
279 | goto process_next_packet; | ||
280 | case -ECONNREFUSED: | ||
281 | goto invalid_service; | ||
282 | case -EBUSY: | ||
283 | goto busy; | ||
284 | case -EKEYREJECTED: | ||
285 | goto security_mismatch; | ||
286 | default: | ||
287 | BUG(); | ||
288 | } | ||
289 | |||
290 | backlog_full: | ||
291 | read_unlock_bh(&local->services_lock); | ||
292 | busy: | ||
293 | rxrpc_busy(local, &srx, &sp->hdr); | ||
294 | rxrpc_free_skb(skb); | ||
295 | goto process_next_packet; | ||
296 | |||
297 | invalid_service: | ||
298 | skb->priority = RX_INVALID_OPERATION; | ||
299 | rxrpc_reject_packet(local, skb); | ||
300 | goto process_next_packet; | ||
301 | |||
302 | /* can't change connection security type mid-flow */ | ||
303 | security_mismatch: | ||
304 | skb->priority = RX_PROTOCOL_ERROR; | ||
305 | rxrpc_reject_packet(local, skb); | ||
306 | goto process_next_packet; | ||
307 | } | ||
308 | |||
309 | /* | ||
310 | * handle acceptance of a call by userspace | ||
311 | * - assign the user call ID to the call at the front of the queue | ||
312 | */ | ||
313 | struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, | ||
314 | unsigned long user_call_ID) | ||
315 | { | ||
316 | struct rxrpc_call *call; | ||
317 | struct rb_node *parent, **pp; | ||
318 | int ret; | ||
319 | |||
320 | _enter(",%lx", user_call_ID); | ||
321 | |||
322 | ASSERT(!irqs_disabled()); | ||
323 | |||
324 | write_lock(&rx->call_lock); | ||
325 | |||
326 | ret = -ENODATA; | ||
327 | if (list_empty(&rx->acceptq)) | ||
328 | goto out; | ||
329 | |||
330 | /* check the user ID isn't already in use */ | ||
331 | ret = -EBADSLT; | ||
332 | pp = &rx->calls.rb_node; | ||
333 | parent = NULL; | ||
334 | while (*pp) { | ||
335 | parent = *pp; | ||
336 | call = rb_entry(parent, struct rxrpc_call, sock_node); | ||
337 | |||
338 | if (user_call_ID < call->user_call_ID) | ||
339 | pp = &(*pp)->rb_left; | ||
340 | else if (user_call_ID > call->user_call_ID) | ||
341 | pp = &(*pp)->rb_right; | ||
342 | else | ||
343 | goto out; | ||
344 | } | ||
345 | |||
346 | /* dequeue the first call and check it's still valid */ | ||
347 | call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link); | ||
348 | list_del_init(&call->accept_link); | ||
349 | sk_acceptq_removed(&rx->sk); | ||
350 | |||
351 | write_lock_bh(&call->state_lock); | ||
352 | switch (call->state) { | ||
353 | case RXRPC_CALL_SERVER_ACCEPTING: | ||
354 | call->state = RXRPC_CALL_SERVER_RECV_REQUEST; | ||
355 | break; | ||
356 | case RXRPC_CALL_REMOTELY_ABORTED: | ||
357 | case RXRPC_CALL_LOCALLY_ABORTED: | ||
358 | ret = -ECONNABORTED; | ||
359 | goto out_release; | ||
360 | case RXRPC_CALL_NETWORK_ERROR: | ||
361 | ret = call->conn->error; | ||
362 | goto out_release; | ||
363 | case RXRPC_CALL_DEAD: | ||
364 | ret = -ETIME; | ||
365 | goto out_discard; | ||
366 | default: | ||
367 | BUG(); | ||
368 | } | ||
369 | |||
370 | /* formalise the acceptance */ | ||
371 | call->user_call_ID = user_call_ID; | ||
372 | rb_link_node(&call->sock_node, parent, pp); | ||
373 | rb_insert_color(&call->sock_node, &rx->calls); | ||
374 | if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags)) | ||
375 | BUG(); | ||
376 | if (test_and_set_bit(RXRPC_CALL_ACCEPTED, &call->events)) | ||
377 | BUG(); | ||
378 | rxrpc_queue_call(call); | ||
379 | |||
380 | rxrpc_get_call(call); | ||
381 | write_unlock_bh(&call->state_lock); | ||
382 | write_unlock(&rx->call_lock); | ||
383 | _leave(" = %p{%d}", call, call->debug_id); | ||
384 | return call; | ||
385 | |||
386 | /* if the call is already dying or dead, then we leave the socket's ref | ||
387 | * on it to be released by rxrpc_dead_call_expired() as induced by | ||
388 | * rxrpc_release_call() */ | ||
389 | out_release: | ||
390 | _debug("release %p", call); | ||
391 | if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && | ||
392 | !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) | ||
393 | rxrpc_queue_call(call); | ||
394 | out_discard: | ||
395 | write_unlock_bh(&call->state_lock); | ||
396 | _debug("discard %p", call); | ||
397 | out: | ||
398 | write_unlock(&rx->call_lock); | ||
399 | _leave(" = %d", ret); | ||
400 | return ERR_PTR(ret); | ||
401 | } | ||
402 | |||
403 | /* | ||
404 | * handle rejectance of a call by userspace | ||
405 | * - reject the call at the front of the queue | ||
406 | */ | ||
407 | int rxrpc_reject_call(struct rxrpc_sock *rx) | ||
408 | { | ||
409 | struct rxrpc_call *call; | ||
410 | int ret; | ||
411 | |||
412 | _enter(""); | ||
413 | |||
414 | ASSERT(!irqs_disabled()); | ||
415 | |||
416 | write_lock(&rx->call_lock); | ||
417 | |||
418 | ret = -ENODATA; | ||
419 | if (list_empty(&rx->acceptq)) | ||
420 | goto out; | ||
421 | |||
422 | /* dequeue the first call and check it's still valid */ | ||
423 | call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link); | ||
424 | list_del_init(&call->accept_link); | ||
425 | sk_acceptq_removed(&rx->sk); | ||
426 | |||
427 | write_lock_bh(&call->state_lock); | ||
428 | switch (call->state) { | ||
429 | case RXRPC_CALL_SERVER_ACCEPTING: | ||
430 | call->state = RXRPC_CALL_SERVER_BUSY; | ||
431 | if (test_and_set_bit(RXRPC_CALL_REJECT_BUSY, &call->events)) | ||
432 | rxrpc_queue_call(call); | ||
433 | ret = 0; | ||
434 | goto out_release; | ||
435 | case RXRPC_CALL_REMOTELY_ABORTED: | ||
436 | case RXRPC_CALL_LOCALLY_ABORTED: | ||
437 | ret = -ECONNABORTED; | ||
438 | goto out_release; | ||
439 | case RXRPC_CALL_NETWORK_ERROR: | ||
440 | ret = call->conn->error; | ||
441 | goto out_release; | ||
442 | case RXRPC_CALL_DEAD: | ||
443 | ret = -ETIME; | ||
444 | goto out_discard; | ||
445 | default: | ||
446 | BUG(); | ||
447 | } | ||
448 | |||
449 | /* if the call is already dying or dead, then we leave the socket's ref | ||
450 | * on it to be released by rxrpc_dead_call_expired() as induced by | ||
451 | * rxrpc_release_call() */ | ||
452 | out_release: | ||
453 | _debug("release %p", call); | ||
454 | if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && | ||
455 | !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) | ||
456 | rxrpc_queue_call(call); | ||
457 | out_discard: | ||
458 | write_unlock_bh(&call->state_lock); | ||
459 | _debug("discard %p", call); | ||
460 | out: | ||
461 | write_unlock(&rx->call_lock); | ||
462 | _leave(" = %d", ret); | ||
463 | return ret; | ||
464 | } | ||
465 | |||
466 | /** | ||
467 | * rxrpc_kernel_accept_call - Allow a kernel service to accept an incoming call | ||
468 | * @sock: The socket on which the impending call is waiting | ||
469 | * @user_call_ID: The tag to attach to the call | ||
470 | * | ||
471 | * Allow a kernel service to accept an incoming call, assuming the incoming | ||
472 | * call is still valid. | ||
473 | */ | ||
474 | struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *sock, | ||
475 | unsigned long user_call_ID) | ||
476 | { | ||
477 | struct rxrpc_call *call; | ||
478 | |||
479 | _enter(",%lx", user_call_ID); | ||
480 | call = rxrpc_accept_call(rxrpc_sk(sock->sk), user_call_ID); | ||
481 | _leave(" = %p", call); | ||
482 | return call; | ||
483 | } | ||
484 | |||
485 | EXPORT_SYMBOL(rxrpc_kernel_accept_call); | ||
486 | |||
487 | /** | ||
488 | * rxrpc_kernel_reject_call - Allow a kernel service to reject an incoming call | ||
489 | * @sock: The socket on which the impending call is waiting | ||
490 | * | ||
491 | * Allow a kernel service to reject an incoming call with a BUSY message, | ||
492 | * assuming the incoming call is still valid. | ||
493 | */ | ||
494 | int rxrpc_kernel_reject_call(struct socket *sock) | ||
495 | { | ||
496 | int ret; | ||
497 | |||
498 | _enter(""); | ||
499 | ret = rxrpc_reject_call(rxrpc_sk(sock->sk)); | ||
500 | _leave(" = %d", ret); | ||
501 | return ret; | ||
502 | } | ||
503 | |||
504 | EXPORT_SYMBOL(rxrpc_kernel_reject_call); | ||
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c new file mode 100644 index 000000000000..fc07a926df56 --- /dev/null +++ b/net/rxrpc/ar-ack.c | |||
@@ -0,0 +1,1250 @@ | |||
1 | /* Management of Tx window, Tx resend, ACKs and out-of-sequence reception | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/circ_buf.h> | ||
14 | #include <linux/net.h> | ||
15 | #include <linux/skbuff.h> | ||
16 | #include <linux/udp.h> | ||
17 | #include <net/sock.h> | ||
18 | #include <net/af_rxrpc.h> | ||
19 | #include "ar-internal.h" | ||
20 | |||
21 | static unsigned rxrpc_ack_defer = 1; | ||
22 | |||
23 | static const char *rxrpc_acks[] = { | ||
24 | "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL", | ||
25 | "-?-" | ||
26 | }; | ||
27 | |||
28 | static const s8 rxrpc_ack_priority[] = { | ||
29 | [0] = 0, | ||
30 | [RXRPC_ACK_DELAY] = 1, | ||
31 | [RXRPC_ACK_REQUESTED] = 2, | ||
32 | [RXRPC_ACK_IDLE] = 3, | ||
33 | [RXRPC_ACK_PING_RESPONSE] = 4, | ||
34 | [RXRPC_ACK_DUPLICATE] = 5, | ||
35 | [RXRPC_ACK_OUT_OF_SEQUENCE] = 6, | ||
36 | [RXRPC_ACK_EXCEEDS_WINDOW] = 7, | ||
37 | [RXRPC_ACK_NOSPACE] = 8, | ||
38 | }; | ||
39 | |||
40 | /* | ||
41 | * propose an ACK be sent | ||
42 | */ | ||
43 | void __rxrpc_propose_ACK(struct rxrpc_call *call, uint8_t ack_reason, | ||
44 | __be32 serial, bool immediate) | ||
45 | { | ||
46 | unsigned long expiry; | ||
47 | s8 prior = rxrpc_ack_priority[ack_reason]; | ||
48 | |||
49 | ASSERTCMP(prior, >, 0); | ||
50 | |||
51 | _enter("{%d},%s,%%%x,%u", | ||
52 | call->debug_id, rxrpc_acks[ack_reason], ntohl(serial), | ||
53 | immediate); | ||
54 | |||
55 | if (prior < rxrpc_ack_priority[call->ackr_reason]) { | ||
56 | if (immediate) | ||
57 | goto cancel_timer; | ||
58 | return; | ||
59 | } | ||
60 | |||
61 | /* update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial | ||
62 | * numbers */ | ||
63 | if (prior == rxrpc_ack_priority[call->ackr_reason]) { | ||
64 | if (prior <= 4) | ||
65 | call->ackr_serial = serial; | ||
66 | if (immediate) | ||
67 | goto cancel_timer; | ||
68 | return; | ||
69 | } | ||
70 | |||
71 | call->ackr_reason = ack_reason; | ||
72 | call->ackr_serial = serial; | ||
73 | |||
74 | switch (ack_reason) { | ||
75 | case RXRPC_ACK_DELAY: | ||
76 | _debug("run delay timer"); | ||
77 | call->ack_timer.expires = jiffies + rxrpc_ack_timeout * HZ; | ||
78 | add_timer(&call->ack_timer); | ||
79 | return; | ||
80 | |||
81 | case RXRPC_ACK_IDLE: | ||
82 | if (!immediate) { | ||
83 | _debug("run defer timer"); | ||
84 | expiry = 1; | ||
85 | goto run_timer; | ||
86 | } | ||
87 | goto cancel_timer; | ||
88 | |||
89 | case RXRPC_ACK_REQUESTED: | ||
90 | if (!rxrpc_ack_defer) | ||
91 | goto cancel_timer; | ||
92 | if (!immediate || serial == cpu_to_be32(1)) { | ||
93 | _debug("run defer timer"); | ||
94 | expiry = rxrpc_ack_defer; | ||
95 | goto run_timer; | ||
96 | } | ||
97 | |||
98 | default: | ||
99 | _debug("immediate ACK"); | ||
100 | goto cancel_timer; | ||
101 | } | ||
102 | |||
103 | run_timer: | ||
104 | expiry += jiffies; | ||
105 | if (!timer_pending(&call->ack_timer) || | ||
106 | time_after(call->ack_timer.expires, expiry)) | ||
107 | mod_timer(&call->ack_timer, expiry); | ||
108 | return; | ||
109 | |||
110 | cancel_timer: | ||
111 | _debug("cancel timer %%%u", ntohl(serial)); | ||
112 | try_to_del_timer_sync(&call->ack_timer); | ||
113 | read_lock_bh(&call->state_lock); | ||
114 | if (call->state <= RXRPC_CALL_COMPLETE && | ||
115 | !test_and_set_bit(RXRPC_CALL_ACK, &call->events)) | ||
116 | rxrpc_queue_call(call); | ||
117 | read_unlock_bh(&call->state_lock); | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * propose an ACK be sent, locking the call structure | ||
122 | */ | ||
123 | void rxrpc_propose_ACK(struct rxrpc_call *call, uint8_t ack_reason, | ||
124 | __be32 serial, bool immediate) | ||
125 | { | ||
126 | s8 prior = rxrpc_ack_priority[ack_reason]; | ||
127 | |||
128 | if (prior > rxrpc_ack_priority[call->ackr_reason]) { | ||
129 | spin_lock_bh(&call->lock); | ||
130 | __rxrpc_propose_ACK(call, ack_reason, serial, immediate); | ||
131 | spin_unlock_bh(&call->lock); | ||
132 | } | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * set the resend timer | ||
137 | */ | ||
138 | static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend, | ||
139 | unsigned long resend_at) | ||
140 | { | ||
141 | read_lock_bh(&call->state_lock); | ||
142 | if (call->state >= RXRPC_CALL_COMPLETE) | ||
143 | resend = 0; | ||
144 | |||
145 | if (resend & 1) { | ||
146 | _debug("SET RESEND"); | ||
147 | set_bit(RXRPC_CALL_RESEND, &call->events); | ||
148 | } | ||
149 | |||
150 | if (resend & 2) { | ||
151 | _debug("MODIFY RESEND TIMER"); | ||
152 | set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); | ||
153 | mod_timer(&call->resend_timer, resend_at); | ||
154 | } else { | ||
155 | _debug("KILL RESEND TIMER"); | ||
156 | del_timer_sync(&call->resend_timer); | ||
157 | clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events); | ||
158 | clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); | ||
159 | } | ||
160 | read_unlock_bh(&call->state_lock); | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * resend packets | ||
165 | */ | ||
166 | static void rxrpc_resend(struct rxrpc_call *call) | ||
167 | { | ||
168 | struct rxrpc_skb_priv *sp; | ||
169 | struct rxrpc_header *hdr; | ||
170 | struct sk_buff *txb; | ||
171 | unsigned long *p_txb, resend_at; | ||
172 | int loop, stop; | ||
173 | u8 resend; | ||
174 | |||
175 | _enter("{%d,%d,%d,%d},", | ||
176 | call->acks_hard, call->acks_unacked, | ||
177 | atomic_read(&call->sequence), | ||
178 | CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz)); | ||
179 | |||
180 | stop = 0; | ||
181 | resend = 0; | ||
182 | resend_at = 0; | ||
183 | |||
184 | for (loop = call->acks_tail; | ||
185 | loop != call->acks_head || stop; | ||
186 | loop = (loop + 1) & (call->acks_winsz - 1) | ||
187 | ) { | ||
188 | p_txb = call->acks_window + loop; | ||
189 | smp_read_barrier_depends(); | ||
190 | if (*p_txb & 1) | ||
191 | continue; | ||
192 | |||
193 | txb = (struct sk_buff *) *p_txb; | ||
194 | sp = rxrpc_skb(txb); | ||
195 | |||
196 | if (sp->need_resend) { | ||
197 | sp->need_resend = 0; | ||
198 | |||
199 | /* each Tx packet has a new serial number */ | ||
200 | sp->hdr.serial = | ||
201 | htonl(atomic_inc_return(&call->conn->serial)); | ||
202 | |||
203 | hdr = (struct rxrpc_header *) txb->head; | ||
204 | hdr->serial = sp->hdr.serial; | ||
205 | |||
206 | _proto("Tx DATA %%%u { #%d }", | ||
207 | ntohl(sp->hdr.serial), ntohl(sp->hdr.seq)); | ||
208 | if (rxrpc_send_packet(call->conn->trans, txb) < 0) { | ||
209 | stop = 0; | ||
210 | sp->resend_at = jiffies + 3; | ||
211 | } else { | ||
212 | sp->resend_at = | ||
213 | jiffies + rxrpc_resend_timeout * HZ; | ||
214 | } | ||
215 | } | ||
216 | |||
217 | if (time_after_eq(jiffies + 1, sp->resend_at)) { | ||
218 | sp->need_resend = 1; | ||
219 | resend |= 1; | ||
220 | } else if (resend & 2) { | ||
221 | if (time_before(sp->resend_at, resend_at)) | ||
222 | resend_at = sp->resend_at; | ||
223 | } else { | ||
224 | resend_at = sp->resend_at; | ||
225 | resend |= 2; | ||
226 | } | ||
227 | } | ||
228 | |||
229 | rxrpc_set_resend(call, resend, resend_at); | ||
230 | _leave(""); | ||
231 | } | ||
232 | |||
233 | /* | ||
234 | * handle resend timer expiry | ||
235 | */ | ||
236 | static void rxrpc_resend_timer(struct rxrpc_call *call) | ||
237 | { | ||
238 | struct rxrpc_skb_priv *sp; | ||
239 | struct sk_buff *txb; | ||
240 | unsigned long *p_txb, resend_at; | ||
241 | int loop; | ||
242 | u8 resend; | ||
243 | |||
244 | _enter("%d,%d,%d", | ||
245 | call->acks_tail, call->acks_unacked, call->acks_head); | ||
246 | |||
247 | resend = 0; | ||
248 | resend_at = 0; | ||
249 | |||
250 | for (loop = call->acks_unacked; | ||
251 | loop != call->acks_head; | ||
252 | loop = (loop + 1) & (call->acks_winsz - 1) | ||
253 | ) { | ||
254 | p_txb = call->acks_window + loop; | ||
255 | smp_read_barrier_depends(); | ||
256 | txb = (struct sk_buff *) (*p_txb & ~1); | ||
257 | sp = rxrpc_skb(txb); | ||
258 | |||
259 | ASSERT(!(*p_txb & 1)); | ||
260 | |||
261 | if (sp->need_resend) { | ||
262 | ; | ||
263 | } else if (time_after_eq(jiffies + 1, sp->resend_at)) { | ||
264 | sp->need_resend = 1; | ||
265 | resend |= 1; | ||
266 | } else if (resend & 2) { | ||
267 | if (time_before(sp->resend_at, resend_at)) | ||
268 | resend_at = sp->resend_at; | ||
269 | } else { | ||
270 | resend_at = sp->resend_at; | ||
271 | resend |= 2; | ||
272 | } | ||
273 | } | ||
274 | |||
275 | rxrpc_set_resend(call, resend, resend_at); | ||
276 | _leave(""); | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * process soft ACKs of our transmitted packets | ||
281 | * - these indicate packets the peer has or has not received, but hasn't yet | ||
282 | * given to the consumer, and so can still be discarded and re-requested | ||
283 | */ | ||
284 | static int rxrpc_process_soft_ACKs(struct rxrpc_call *call, | ||
285 | struct rxrpc_ackpacket *ack, | ||
286 | struct sk_buff *skb) | ||
287 | { | ||
288 | struct rxrpc_skb_priv *sp; | ||
289 | struct sk_buff *txb; | ||
290 | unsigned long *p_txb, resend_at; | ||
291 | int loop; | ||
292 | u8 sacks[RXRPC_MAXACKS], resend; | ||
293 | |||
294 | _enter("{%d,%d},{%d},", | ||
295 | call->acks_hard, | ||
296 | CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz), | ||
297 | ack->nAcks); | ||
298 | |||
299 | if (skb_copy_bits(skb, 0, sacks, ack->nAcks) < 0) | ||
300 | goto protocol_error; | ||
301 | |||
302 | resend = 0; | ||
303 | resend_at = 0; | ||
304 | for (loop = 0; loop < ack->nAcks; loop++) { | ||
305 | p_txb = call->acks_window; | ||
306 | p_txb += (call->acks_tail + loop) & (call->acks_winsz - 1); | ||
307 | smp_read_barrier_depends(); | ||
308 | txb = (struct sk_buff *) (*p_txb & ~1); | ||
309 | sp = rxrpc_skb(txb); | ||
310 | |||
311 | switch (sacks[loop]) { | ||
312 | case RXRPC_ACK_TYPE_ACK: | ||
313 | sp->need_resend = 0; | ||
314 | *p_txb |= 1; | ||
315 | break; | ||
316 | case RXRPC_ACK_TYPE_NACK: | ||
317 | sp->need_resend = 1; | ||
318 | *p_txb &= ~1; | ||
319 | resend = 1; | ||
320 | break; | ||
321 | default: | ||
322 | _debug("Unsupported ACK type %d", sacks[loop]); | ||
323 | goto protocol_error; | ||
324 | } | ||
325 | } | ||
326 | |||
327 | smp_mb(); | ||
328 | call->acks_unacked = (call->acks_tail + loop) & (call->acks_winsz - 1); | ||
329 | |||
330 | /* anything not explicitly ACK'd is implicitly NACK'd, but may just not | ||
331 | * have been received or processed yet by the far end */ | ||
332 | for (loop = call->acks_unacked; | ||
333 | loop != call->acks_head; | ||
334 | loop = (loop + 1) & (call->acks_winsz - 1) | ||
335 | ) { | ||
336 | p_txb = call->acks_window + loop; | ||
337 | smp_read_barrier_depends(); | ||
338 | txb = (struct sk_buff *) (*p_txb & ~1); | ||
339 | sp = rxrpc_skb(txb); | ||
340 | |||
341 | if (*p_txb & 1) { | ||
342 | /* packet must have been discarded */ | ||
343 | sp->need_resend = 1; | ||
344 | *p_txb &= ~1; | ||
345 | resend |= 1; | ||
346 | } else if (sp->need_resend) { | ||
347 | ; | ||
348 | } else if (time_after_eq(jiffies + 1, sp->resend_at)) { | ||
349 | sp->need_resend = 1; | ||
350 | resend |= 1; | ||
351 | } else if (resend & 2) { | ||
352 | if (time_before(sp->resend_at, resend_at)) | ||
353 | resend_at = sp->resend_at; | ||
354 | } else { | ||
355 | resend_at = sp->resend_at; | ||
356 | resend |= 2; | ||
357 | } | ||
358 | } | ||
359 | |||
360 | rxrpc_set_resend(call, resend, resend_at); | ||
361 | _leave(" = 0"); | ||
362 | return 0; | ||
363 | |||
364 | protocol_error: | ||
365 | _leave(" = -EPROTO"); | ||
366 | return -EPROTO; | ||
367 | } | ||
368 | |||
369 | /* | ||
370 | * discard hard-ACK'd packets from the Tx window | ||
371 | */ | ||
372 | static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard) | ||
373 | { | ||
374 | struct rxrpc_skb_priv *sp; | ||
375 | unsigned long _skb; | ||
376 | int tail = call->acks_tail, old_tail; | ||
377 | int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz); | ||
378 | |||
379 | _enter("{%u,%u},%u", call->acks_hard, win, hard); | ||
380 | |||
381 | ASSERTCMP(hard - call->acks_hard, <=, win); | ||
382 | |||
383 | while (call->acks_hard < hard) { | ||
384 | smp_read_barrier_depends(); | ||
385 | _skb = call->acks_window[tail] & ~1; | ||
386 | sp = rxrpc_skb((struct sk_buff *) _skb); | ||
387 | rxrpc_free_skb((struct sk_buff *) _skb); | ||
388 | old_tail = tail; | ||
389 | tail = (tail + 1) & (call->acks_winsz - 1); | ||
390 | call->acks_tail = tail; | ||
391 | if (call->acks_unacked == old_tail) | ||
392 | call->acks_unacked = tail; | ||
393 | call->acks_hard++; | ||
394 | } | ||
395 | |||
396 | wake_up(&call->tx_waitq); | ||
397 | } | ||
398 | |||
399 | /* | ||
400 | * clear the Tx window in the event of a failure | ||
401 | */ | ||
402 | static void rxrpc_clear_tx_window(struct rxrpc_call *call) | ||
403 | { | ||
404 | rxrpc_rotate_tx_window(call, atomic_read(&call->sequence)); | ||
405 | } | ||
406 | |||
407 | /* | ||
408 | * drain the out of sequence received packet queue into the packet Rx queue | ||
409 | */ | ||
410 | static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call) | ||
411 | { | ||
412 | struct rxrpc_skb_priv *sp; | ||
413 | struct sk_buff *skb; | ||
414 | bool terminal; | ||
415 | int ret; | ||
416 | |||
417 | _enter("{%d,%d}", call->rx_data_post, call->rx_first_oos); | ||
418 | |||
419 | spin_lock_bh(&call->lock); | ||
420 | |||
421 | ret = -ECONNRESET; | ||
422 | if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) | ||
423 | goto socket_unavailable; | ||
424 | |||
425 | skb = skb_dequeue(&call->rx_oos_queue); | ||
426 | if (skb) { | ||
427 | sp = rxrpc_skb(skb); | ||
428 | |||
429 | _debug("drain OOS packet %d [%d]", | ||
430 | ntohl(sp->hdr.seq), call->rx_first_oos); | ||
431 | |||
432 | if (ntohl(sp->hdr.seq) != call->rx_first_oos) { | ||
433 | skb_queue_head(&call->rx_oos_queue, skb); | ||
434 | call->rx_first_oos = ntohl(rxrpc_skb(skb)->hdr.seq); | ||
435 | _debug("requeue %p {%u}", skb, call->rx_first_oos); | ||
436 | } else { | ||
437 | skb->mark = RXRPC_SKB_MARK_DATA; | ||
438 | terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) && | ||
439 | !(sp->hdr.flags & RXRPC_CLIENT_INITIATED)); | ||
440 | ret = rxrpc_queue_rcv_skb(call, skb, true, terminal); | ||
441 | BUG_ON(ret < 0); | ||
442 | _debug("drain #%u", call->rx_data_post); | ||
443 | call->rx_data_post++; | ||
444 | |||
445 | /* find out what the next packet is */ | ||
446 | skb = skb_peek(&call->rx_oos_queue); | ||
447 | if (skb) | ||
448 | call->rx_first_oos = | ||
449 | ntohl(rxrpc_skb(skb)->hdr.seq); | ||
450 | else | ||
451 | call->rx_first_oos = 0; | ||
452 | _debug("peek %p {%u}", skb, call->rx_first_oos); | ||
453 | } | ||
454 | } | ||
455 | |||
456 | ret = 0; | ||
457 | socket_unavailable: | ||
458 | spin_unlock_bh(&call->lock); | ||
459 | _leave(" = %d", ret); | ||
460 | return ret; | ||
461 | } | ||
462 | |||
463 | /* | ||
464 | * insert an out of sequence packet into the buffer | ||
465 | */ | ||
466 | static void rxrpc_insert_oos_packet(struct rxrpc_call *call, | ||
467 | struct sk_buff *skb) | ||
468 | { | ||
469 | struct rxrpc_skb_priv *sp, *psp; | ||
470 | struct sk_buff *p; | ||
471 | u32 seq; | ||
472 | |||
473 | sp = rxrpc_skb(skb); | ||
474 | seq = ntohl(sp->hdr.seq); | ||
475 | _enter(",,{%u}", seq); | ||
476 | |||
477 | skb->destructor = rxrpc_packet_destructor; | ||
478 | ASSERTCMP(sp->call, ==, NULL); | ||
479 | sp->call = call; | ||
480 | rxrpc_get_call(call); | ||
481 | |||
482 | /* insert into the buffer in sequence order */ | ||
483 | spin_lock_bh(&call->lock); | ||
484 | |||
485 | skb_queue_walk(&call->rx_oos_queue, p) { | ||
486 | psp = rxrpc_skb(p); | ||
487 | if (ntohl(psp->hdr.seq) > seq) { | ||
488 | _debug("insert oos #%u before #%u", | ||
489 | seq, ntohl(psp->hdr.seq)); | ||
490 | skb_insert(p, skb, &call->rx_oos_queue); | ||
491 | goto inserted; | ||
492 | } | ||
493 | } | ||
494 | |||
495 | _debug("append oos #%u", seq); | ||
496 | skb_queue_tail(&call->rx_oos_queue, skb); | ||
497 | inserted: | ||
498 | |||
499 | /* we might now have a new front to the queue */ | ||
500 | if (call->rx_first_oos == 0 || seq < call->rx_first_oos) | ||
501 | call->rx_first_oos = seq; | ||
502 | |||
503 | read_lock(&call->state_lock); | ||
504 | if (call->state < RXRPC_CALL_COMPLETE && | ||
505 | call->rx_data_post == call->rx_first_oos) { | ||
506 | _debug("drain rx oos now"); | ||
507 | set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events); | ||
508 | } | ||
509 | read_unlock(&call->state_lock); | ||
510 | |||
511 | spin_unlock_bh(&call->lock); | ||
512 | _leave(" [stored #%u]", call->rx_first_oos); | ||
513 | } | ||
514 | |||
515 | /* | ||
516 | * clear the Tx window on final ACK reception | ||
517 | */ | ||
518 | static void rxrpc_zap_tx_window(struct rxrpc_call *call) | ||
519 | { | ||
520 | struct rxrpc_skb_priv *sp; | ||
521 | struct sk_buff *skb; | ||
522 | unsigned long _skb, *acks_window; | ||
523 | uint8_t winsz = call->acks_winsz; | ||
524 | int tail; | ||
525 | |||
526 | acks_window = call->acks_window; | ||
527 | call->acks_window = NULL; | ||
528 | |||
529 | while (CIRC_CNT(call->acks_head, call->acks_tail, winsz) > 0) { | ||
530 | tail = call->acks_tail; | ||
531 | smp_read_barrier_depends(); | ||
532 | _skb = acks_window[tail] & ~1; | ||
533 | smp_mb(); | ||
534 | call->acks_tail = (call->acks_tail + 1) & (winsz - 1); | ||
535 | |||
536 | skb = (struct sk_buff *) _skb; | ||
537 | sp = rxrpc_skb(skb); | ||
538 | _debug("+++ clear Tx %u", ntohl(sp->hdr.seq)); | ||
539 | rxrpc_free_skb(skb); | ||
540 | } | ||
541 | |||
542 | kfree(acks_window); | ||
543 | } | ||
544 | |||
545 | /* | ||
546 | * process packets in the reception queue | ||
547 | */ | ||
548 | static int rxrpc_process_rx_queue(struct rxrpc_call *call, | ||
549 | u32 *_abort_code) | ||
550 | { | ||
551 | struct rxrpc_ackpacket ack; | ||
552 | struct rxrpc_skb_priv *sp; | ||
553 | struct sk_buff *skb; | ||
554 | bool post_ACK; | ||
555 | int latest; | ||
556 | u32 hard, tx; | ||
557 | |||
558 | _enter(""); | ||
559 | |||
560 | process_further: | ||
561 | skb = skb_dequeue(&call->rx_queue); | ||
562 | if (!skb) | ||
563 | return -EAGAIN; | ||
564 | |||
565 | _net("deferred skb %p", skb); | ||
566 | |||
567 | sp = rxrpc_skb(skb); | ||
568 | |||
569 | _debug("process %s [st %d]", rxrpc_pkts[sp->hdr.type], call->state); | ||
570 | |||
571 | post_ACK = false; | ||
572 | |||
573 | switch (sp->hdr.type) { | ||
574 | /* data packets that wind up here have been received out of | ||
575 | * order, need security processing or are jumbo packets */ | ||
576 | case RXRPC_PACKET_TYPE_DATA: | ||
577 | _proto("OOSQ DATA %%%u { #%u }", | ||
578 | ntohl(sp->hdr.serial), ntohl(sp->hdr.seq)); | ||
579 | |||
580 | /* secured packets must be verified and possibly decrypted */ | ||
581 | if (rxrpc_verify_packet(call, skb, _abort_code) < 0) | ||
582 | goto protocol_error; | ||
583 | |||
584 | rxrpc_insert_oos_packet(call, skb); | ||
585 | goto process_further; | ||
586 | |||
587 | /* partial ACK to process */ | ||
588 | case RXRPC_PACKET_TYPE_ACK: | ||
589 | if (skb_copy_bits(skb, 0, &ack, sizeof(ack)) < 0) { | ||
590 | _debug("extraction failure"); | ||
591 | goto protocol_error; | ||
592 | } | ||
593 | if (!skb_pull(skb, sizeof(ack))) | ||
594 | BUG(); | ||
595 | |||
596 | latest = ntohl(sp->hdr.serial); | ||
597 | hard = ntohl(ack.firstPacket); | ||
598 | tx = atomic_read(&call->sequence); | ||
599 | |||
600 | _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", | ||
601 | latest, | ||
602 | ntohs(ack.maxSkew), | ||
603 | hard, | ||
604 | ntohl(ack.previousPacket), | ||
605 | ntohl(ack.serial), | ||
606 | rxrpc_acks[ack.reason], | ||
607 | ack.nAcks); | ||
608 | |||
609 | if (ack.reason == RXRPC_ACK_PING) { | ||
610 | _proto("Rx ACK %%%u PING Request", latest); | ||
611 | rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, | ||
612 | sp->hdr.serial, true); | ||
613 | } | ||
614 | |||
615 | /* discard any out-of-order or duplicate ACKs */ | ||
616 | if (latest - call->acks_latest <= 0) { | ||
617 | _debug("discard ACK %d <= %d", | ||
618 | latest, call->acks_latest); | ||
619 | goto discard; | ||
620 | } | ||
621 | call->acks_latest = latest; | ||
622 | |||
623 | if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST && | ||
624 | call->state != RXRPC_CALL_CLIENT_AWAIT_REPLY && | ||
625 | call->state != RXRPC_CALL_SERVER_SEND_REPLY && | ||
626 | call->state != RXRPC_CALL_SERVER_AWAIT_ACK) | ||
627 | goto discard; | ||
628 | |||
629 | _debug("Tx=%d H=%u S=%d", tx, call->acks_hard, call->state); | ||
630 | |||
631 | if (hard > 0) { | ||
632 | if (hard - 1 > tx) { | ||
633 | _debug("hard-ACK'd packet %d not transmitted" | ||
634 | " (%d top)", | ||
635 | hard - 1, tx); | ||
636 | goto protocol_error; | ||
637 | } | ||
638 | |||
639 | if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY || | ||
640 | call->state == RXRPC_CALL_SERVER_AWAIT_ACK) && | ||
641 | hard > tx) | ||
642 | goto all_acked; | ||
643 | |||
644 | smp_rmb(); | ||
645 | rxrpc_rotate_tx_window(call, hard - 1); | ||
646 | } | ||
647 | |||
648 | if (ack.nAcks > 0) { | ||
649 | if (hard - 1 + ack.nAcks > tx) { | ||
650 | _debug("soft-ACK'd packet %d+%d not" | ||
651 | " transmitted (%d top)", | ||
652 | hard - 1, ack.nAcks, tx); | ||
653 | goto protocol_error; | ||
654 | } | ||
655 | |||
656 | if (rxrpc_process_soft_ACKs(call, &ack, skb) < 0) | ||
657 | goto protocol_error; | ||
658 | } | ||
659 | goto discard; | ||
660 | |||
661 | /* complete ACK to process */ | ||
662 | case RXRPC_PACKET_TYPE_ACKALL: | ||
663 | goto all_acked; | ||
664 | |||
665 | /* abort and busy are handled elsewhere */ | ||
666 | case RXRPC_PACKET_TYPE_BUSY: | ||
667 | case RXRPC_PACKET_TYPE_ABORT: | ||
668 | BUG(); | ||
669 | |||
670 | /* connection level events - also handled elsewhere */ | ||
671 | case RXRPC_PACKET_TYPE_CHALLENGE: | ||
672 | case RXRPC_PACKET_TYPE_RESPONSE: | ||
673 | case RXRPC_PACKET_TYPE_DEBUG: | ||
674 | BUG(); | ||
675 | } | ||
676 | |||
677 | /* if we've had a hard ACK that covers all the packets we've sent, then | ||
678 | * that ends that phase of the operation */ | ||
679 | all_acked: | ||
680 | write_lock_bh(&call->state_lock); | ||
681 | _debug("ack all %d", call->state); | ||
682 | |||
683 | switch (call->state) { | ||
684 | case RXRPC_CALL_CLIENT_AWAIT_REPLY: | ||
685 | call->state = RXRPC_CALL_CLIENT_RECV_REPLY; | ||
686 | break; | ||
687 | case RXRPC_CALL_SERVER_AWAIT_ACK: | ||
688 | _debug("srv complete"); | ||
689 | call->state = RXRPC_CALL_COMPLETE; | ||
690 | post_ACK = true; | ||
691 | break; | ||
692 | case RXRPC_CALL_CLIENT_SEND_REQUEST: | ||
693 | case RXRPC_CALL_SERVER_RECV_REQUEST: | ||
694 | goto protocol_error_unlock; /* can't occur yet */ | ||
695 | default: | ||
696 | write_unlock_bh(&call->state_lock); | ||
697 | goto discard; /* assume packet left over from earlier phase */ | ||
698 | } | ||
699 | |||
700 | write_unlock_bh(&call->state_lock); | ||
701 | |||
702 | /* if all the packets we sent are hard-ACK'd, then we can discard | ||
703 | * whatever we've got left */ | ||
704 | _debug("clear Tx %d", | ||
705 | CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz)); | ||
706 | |||
707 | del_timer_sync(&call->resend_timer); | ||
708 | clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); | ||
709 | clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events); | ||
710 | |||
711 | if (call->acks_window) | ||
712 | rxrpc_zap_tx_window(call); | ||
713 | |||
714 | if (post_ACK) { | ||
715 | /* post the final ACK message for userspace to pick up */ | ||
716 | _debug("post ACK"); | ||
717 | skb->mark = RXRPC_SKB_MARK_FINAL_ACK; | ||
718 | sp->call = call; | ||
719 | rxrpc_get_call(call); | ||
720 | spin_lock_bh(&call->lock); | ||
721 | if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0) | ||
722 | BUG(); | ||
723 | spin_unlock_bh(&call->lock); | ||
724 | goto process_further; | ||
725 | } | ||
726 | |||
727 | discard: | ||
728 | rxrpc_free_skb(skb); | ||
729 | goto process_further; | ||
730 | |||
731 | protocol_error_unlock: | ||
732 | write_unlock_bh(&call->state_lock); | ||
733 | protocol_error: | ||
734 | rxrpc_free_skb(skb); | ||
735 | _leave(" = -EPROTO"); | ||
736 | return -EPROTO; | ||
737 | } | ||
738 | |||
739 | /* | ||
740 | * post a message to the socket Rx queue for recvmsg() to pick up | ||
741 | */ | ||
742 | static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error, | ||
743 | bool fatal) | ||
744 | { | ||
745 | struct rxrpc_skb_priv *sp; | ||
746 | struct sk_buff *skb; | ||
747 | int ret; | ||
748 | |||
749 | _enter("{%d,%lx},%u,%u,%d", | ||
750 | call->debug_id, call->flags, mark, error, fatal); | ||
751 | |||
752 | /* remove timers and things for fatal messages */ | ||
753 | if (fatal) { | ||
754 | del_timer_sync(&call->resend_timer); | ||
755 | del_timer_sync(&call->ack_timer); | ||
756 | clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); | ||
757 | } | ||
758 | |||
759 | if (mark != RXRPC_SKB_MARK_NEW_CALL && | ||
760 | !test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { | ||
761 | _leave("[no userid]"); | ||
762 | return 0; | ||
763 | } | ||
764 | |||
765 | if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) { | ||
766 | skb = alloc_skb(0, GFP_NOFS); | ||
767 | if (!skb) | ||
768 | return -ENOMEM; | ||
769 | |||
770 | rxrpc_new_skb(skb); | ||
771 | |||
772 | skb->mark = mark; | ||
773 | |||
774 | sp = rxrpc_skb(skb); | ||
775 | memset(sp, 0, sizeof(*sp)); | ||
776 | sp->error = error; | ||
777 | sp->call = call; | ||
778 | rxrpc_get_call(call); | ||
779 | |||
780 | spin_lock_bh(&call->lock); | ||
781 | ret = rxrpc_queue_rcv_skb(call, skb, true, fatal); | ||
782 | spin_unlock_bh(&call->lock); | ||
783 | if (ret < 0) | ||
784 | BUG(); | ||
785 | } | ||
786 | |||
787 | return 0; | ||
788 | } | ||
789 | |||
790 | /* | ||
791 | * handle background processing of incoming call packets and ACK / abort | ||
792 | * generation | ||
793 | */ | ||
794 | void rxrpc_process_call(struct work_struct *work) | ||
795 | { | ||
796 | struct rxrpc_call *call = | ||
797 | container_of(work, struct rxrpc_call, processor); | ||
798 | struct rxrpc_ackpacket ack; | ||
799 | struct rxrpc_ackinfo ackinfo; | ||
800 | struct rxrpc_header hdr; | ||
801 | struct msghdr msg; | ||
802 | struct kvec iov[5]; | ||
803 | unsigned long bits; | ||
804 | __be32 data; | ||
805 | size_t len; | ||
806 | int genbit, loop, nbit, ioc, ret; | ||
807 | u32 abort_code = RX_PROTOCOL_ERROR; | ||
808 | u8 *acks = NULL; | ||
809 | |||
810 | //printk("\n--------------------\n"); | ||
811 | _enter("{%d,%s,%lx} [%lu]", | ||
812 | call->debug_id, rxrpc_call_states[call->state], call->events, | ||
813 | (jiffies - call->creation_jif) / (HZ / 10)); | ||
814 | |||
815 | if (test_and_set_bit(RXRPC_CALL_PROC_BUSY, &call->flags)) { | ||
816 | _debug("XXXXXXXXXXXXX RUNNING ON MULTIPLE CPUS XXXXXXXXXXXXX"); | ||
817 | return; | ||
818 | } | ||
819 | |||
820 | /* there's a good chance we're going to have to send a message, so set | ||
821 | * one up in advance */ | ||
822 | msg.msg_name = &call->conn->trans->peer->srx.transport.sin; | ||
823 | msg.msg_namelen = sizeof(call->conn->trans->peer->srx.transport.sin); | ||
824 | msg.msg_control = NULL; | ||
825 | msg.msg_controllen = 0; | ||
826 | msg.msg_flags = 0; | ||
827 | |||
828 | hdr.epoch = call->conn->epoch; | ||
829 | hdr.cid = call->cid; | ||
830 | hdr.callNumber = call->call_id; | ||
831 | hdr.seq = 0; | ||
832 | hdr.type = RXRPC_PACKET_TYPE_ACK; | ||
833 | hdr.flags = call->conn->out_clientflag; | ||
834 | hdr.userStatus = 0; | ||
835 | hdr.securityIndex = call->conn->security_ix; | ||
836 | hdr._rsvd = 0; | ||
837 | hdr.serviceId = call->conn->service_id; | ||
838 | |||
839 | memset(iov, 0, sizeof(iov)); | ||
840 | iov[0].iov_base = &hdr; | ||
841 | iov[0].iov_len = sizeof(hdr); | ||
842 | |||
843 | /* deal with events of a final nature */ | ||
844 | if (test_bit(RXRPC_CALL_RELEASE, &call->events)) { | ||
845 | rxrpc_release_call(call); | ||
846 | clear_bit(RXRPC_CALL_RELEASE, &call->events); | ||
847 | } | ||
848 | |||
849 | if (test_bit(RXRPC_CALL_RCVD_ERROR, &call->events)) { | ||
850 | int error; | ||
851 | |||
852 | clear_bit(RXRPC_CALL_CONN_ABORT, &call->events); | ||
853 | clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events); | ||
854 | clear_bit(RXRPC_CALL_ABORT, &call->events); | ||
855 | |||
856 | error = call->conn->trans->peer->net_error; | ||
857 | _debug("post net error %d", error); | ||
858 | |||
859 | if (rxrpc_post_message(call, RXRPC_SKB_MARK_NET_ERROR, | ||
860 | error, true) < 0) | ||
861 | goto no_mem; | ||
862 | clear_bit(RXRPC_CALL_RCVD_ERROR, &call->events); | ||
863 | goto kill_ACKs; | ||
864 | } | ||
865 | |||
866 | if (test_bit(RXRPC_CALL_CONN_ABORT, &call->events)) { | ||
867 | ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE); | ||
868 | |||
869 | clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events); | ||
870 | clear_bit(RXRPC_CALL_ABORT, &call->events); | ||
871 | |||
872 | _debug("post conn abort"); | ||
873 | |||
874 | if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR, | ||
875 | call->conn->error, true) < 0) | ||
876 | goto no_mem; | ||
877 | clear_bit(RXRPC_CALL_CONN_ABORT, &call->events); | ||
878 | goto kill_ACKs; | ||
879 | } | ||
880 | |||
881 | if (test_bit(RXRPC_CALL_REJECT_BUSY, &call->events)) { | ||
882 | hdr.type = RXRPC_PACKET_TYPE_BUSY; | ||
883 | genbit = RXRPC_CALL_REJECT_BUSY; | ||
884 | goto send_message; | ||
885 | } | ||
886 | |||
887 | if (test_bit(RXRPC_CALL_ABORT, &call->events)) { | ||
888 | ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE); | ||
889 | |||
890 | if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR, | ||
891 | ECONNABORTED, true) < 0) | ||
892 | goto no_mem; | ||
893 | hdr.type = RXRPC_PACKET_TYPE_ABORT; | ||
894 | data = htonl(call->abort_code); | ||
895 | iov[1].iov_base = &data; | ||
896 | iov[1].iov_len = sizeof(data); | ||
897 | genbit = RXRPC_CALL_ABORT; | ||
898 | goto send_message; | ||
899 | } | ||
900 | |||
901 | if (test_bit(RXRPC_CALL_ACK_FINAL, &call->events)) { | ||
902 | hdr.type = RXRPC_PACKET_TYPE_ACKALL; | ||
903 | genbit = RXRPC_CALL_ACK_FINAL; | ||
904 | goto send_message; | ||
905 | } | ||
906 | |||
907 | if (call->events & ((1 << RXRPC_CALL_RCVD_BUSY) | | ||
908 | (1 << RXRPC_CALL_RCVD_ABORT)) | ||
909 | ) { | ||
910 | u32 mark; | ||
911 | |||
912 | if (test_bit(RXRPC_CALL_RCVD_ABORT, &call->events)) | ||
913 | mark = RXRPC_SKB_MARK_REMOTE_ABORT; | ||
914 | else | ||
915 | mark = RXRPC_SKB_MARK_BUSY; | ||
916 | |||
917 | _debug("post abort/busy"); | ||
918 | rxrpc_clear_tx_window(call); | ||
919 | if (rxrpc_post_message(call, mark, ECONNABORTED, true) < 0) | ||
920 | goto no_mem; | ||
921 | |||
922 | clear_bit(RXRPC_CALL_RCVD_BUSY, &call->events); | ||
923 | clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events); | ||
924 | goto kill_ACKs; | ||
925 | } | ||
926 | |||
927 | if (test_and_clear_bit(RXRPC_CALL_RCVD_ACKALL, &call->events)) { | ||
928 | _debug("do implicit ackall"); | ||
929 | rxrpc_clear_tx_window(call); | ||
930 | } | ||
931 | |||
932 | if (test_bit(RXRPC_CALL_LIFE_TIMER, &call->events)) { | ||
933 | write_lock_bh(&call->state_lock); | ||
934 | if (call->state <= RXRPC_CALL_COMPLETE) { | ||
935 | call->state = RXRPC_CALL_LOCALLY_ABORTED; | ||
936 | call->abort_code = RX_CALL_TIMEOUT; | ||
937 | set_bit(RXRPC_CALL_ABORT, &call->events); | ||
938 | } | ||
939 | write_unlock_bh(&call->state_lock); | ||
940 | |||
941 | _debug("post timeout"); | ||
942 | if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR, | ||
943 | ETIME, true) < 0) | ||
944 | goto no_mem; | ||
945 | |||
946 | clear_bit(RXRPC_CALL_LIFE_TIMER, &call->events); | ||
947 | goto kill_ACKs; | ||
948 | } | ||
949 | |||
950 | /* deal with assorted inbound messages */ | ||
951 | if (!skb_queue_empty(&call->rx_queue)) { | ||
952 | switch (rxrpc_process_rx_queue(call, &abort_code)) { | ||
953 | case 0: | ||
954 | case -EAGAIN: | ||
955 | break; | ||
956 | case -ENOMEM: | ||
957 | goto no_mem; | ||
958 | case -EKEYEXPIRED: | ||
959 | case -EKEYREJECTED: | ||
960 | case -EPROTO: | ||
961 | rxrpc_abort_call(call, abort_code); | ||
962 | goto kill_ACKs; | ||
963 | } | ||
964 | } | ||
965 | |||
966 | /* handle resending */ | ||
967 | if (test_and_clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) | ||
968 | rxrpc_resend_timer(call); | ||
969 | if (test_and_clear_bit(RXRPC_CALL_RESEND, &call->events)) | ||
970 | rxrpc_resend(call); | ||
971 | |||
972 | /* consider sending an ordinary ACK */ | ||
973 | if (test_bit(RXRPC_CALL_ACK, &call->events)) { | ||
974 | __be32 pad; | ||
975 | |||
976 | _debug("send ACK: window: %d - %d { %lx }", | ||
977 | call->rx_data_eaten, call->ackr_win_top, | ||
978 | call->ackr_window[0]); | ||
979 | |||
980 | if (call->state > RXRPC_CALL_SERVER_ACK_REQUEST && | ||
981 | call->ackr_reason != RXRPC_ACK_PING_RESPONSE) { | ||
982 | /* ACK by sending reply DATA packet in this state */ | ||
983 | clear_bit(RXRPC_CALL_ACK, &call->events); | ||
984 | goto maybe_reschedule; | ||
985 | } | ||
986 | |||
987 | genbit = RXRPC_CALL_ACK; | ||
988 | |||
989 | acks = kzalloc(call->ackr_win_top - call->rx_data_eaten, | ||
990 | GFP_NOFS); | ||
991 | if (!acks) | ||
992 | goto no_mem; | ||
993 | |||
994 | //hdr.flags = RXRPC_SLOW_START_OK; | ||
995 | ack.bufferSpace = htons(8); | ||
996 | ack.maxSkew = 0; | ||
997 | ack.serial = 0; | ||
998 | ack.reason = 0; | ||
999 | |||
1000 | ackinfo.rxMTU = htonl(5692); | ||
1001 | // ackinfo.rxMTU = htonl(call->conn->trans->peer->maxdata); | ||
1002 | ackinfo.maxMTU = htonl(call->conn->trans->peer->maxdata); | ||
1003 | ackinfo.rwind = htonl(32); | ||
1004 | ackinfo.jumbo_max = htonl(4); | ||
1005 | |||
1006 | spin_lock_bh(&call->lock); | ||
1007 | ack.reason = call->ackr_reason; | ||
1008 | ack.serial = call->ackr_serial; | ||
1009 | ack.previousPacket = call->ackr_prev_seq; | ||
1010 | ack.firstPacket = htonl(call->rx_data_eaten + 1); | ||
1011 | |||
1012 | ack.nAcks = 0; | ||
1013 | for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) { | ||
1014 | nbit = loop * BITS_PER_LONG; | ||
1015 | for (bits = call->ackr_window[loop]; bits; bits >>= 1 | ||
1016 | ) { | ||
1017 | _debug("- l=%d n=%d b=%lx", loop, nbit, bits); | ||
1018 | if (bits & 1) { | ||
1019 | acks[nbit] = RXRPC_ACK_TYPE_ACK; | ||
1020 | ack.nAcks = nbit + 1; | ||
1021 | } | ||
1022 | nbit++; | ||
1023 | } | ||
1024 | } | ||
1025 | call->ackr_reason = 0; | ||
1026 | spin_unlock_bh(&call->lock); | ||
1027 | |||
1028 | pad = 0; | ||
1029 | |||
1030 | iov[1].iov_base = &ack; | ||
1031 | iov[1].iov_len = sizeof(ack); | ||
1032 | iov[2].iov_base = acks; | ||
1033 | iov[2].iov_len = ack.nAcks; | ||
1034 | iov[3].iov_base = &pad; | ||
1035 | iov[3].iov_len = 3; | ||
1036 | iov[4].iov_base = &ackinfo; | ||
1037 | iov[4].iov_len = sizeof(ackinfo); | ||
1038 | |||
1039 | switch (ack.reason) { | ||
1040 | case RXRPC_ACK_REQUESTED: | ||
1041 | case RXRPC_ACK_DUPLICATE: | ||
1042 | case RXRPC_ACK_OUT_OF_SEQUENCE: | ||
1043 | case RXRPC_ACK_EXCEEDS_WINDOW: | ||
1044 | case RXRPC_ACK_NOSPACE: | ||
1045 | case RXRPC_ACK_PING: | ||
1046 | case RXRPC_ACK_PING_RESPONSE: | ||
1047 | goto send_ACK_with_skew; | ||
1048 | case RXRPC_ACK_DELAY: | ||
1049 | case RXRPC_ACK_IDLE: | ||
1050 | goto send_ACK; | ||
1051 | } | ||
1052 | } | ||
1053 | |||
1054 | /* handle completion of security negotiations on an incoming | ||
1055 | * connection */ | ||
1056 | if (test_and_clear_bit(RXRPC_CALL_SECURED, &call->events)) { | ||
1057 | _debug("secured"); | ||
1058 | spin_lock_bh(&call->lock); | ||
1059 | |||
1060 | if (call->state == RXRPC_CALL_SERVER_SECURING) { | ||
1061 | _debug("securing"); | ||
1062 | write_lock(&call->conn->lock); | ||
1063 | if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && | ||
1064 | !test_bit(RXRPC_CALL_RELEASE, &call->events)) { | ||
1065 | _debug("not released"); | ||
1066 | call->state = RXRPC_CALL_SERVER_ACCEPTING; | ||
1067 | list_move_tail(&call->accept_link, | ||
1068 | &call->socket->acceptq); | ||
1069 | } | ||
1070 | write_unlock(&call->conn->lock); | ||
1071 | read_lock(&call->state_lock); | ||
1072 | if (call->state < RXRPC_CALL_COMPLETE) | ||
1073 | set_bit(RXRPC_CALL_POST_ACCEPT, &call->events); | ||
1074 | read_unlock(&call->state_lock); | ||
1075 | } | ||
1076 | |||
1077 | spin_unlock_bh(&call->lock); | ||
1078 | if (!test_bit(RXRPC_CALL_POST_ACCEPT, &call->events)) | ||
1079 | goto maybe_reschedule; | ||
1080 | } | ||
1081 | |||
1082 | /* post a notification of an acceptable connection to the app */ | ||
1083 | if (test_bit(RXRPC_CALL_POST_ACCEPT, &call->events)) { | ||
1084 | _debug("post accept"); | ||
1085 | if (rxrpc_post_message(call, RXRPC_SKB_MARK_NEW_CALL, | ||
1086 | 0, false) < 0) | ||
1087 | goto no_mem; | ||
1088 | clear_bit(RXRPC_CALL_POST_ACCEPT, &call->events); | ||
1089 | goto maybe_reschedule; | ||
1090 | } | ||
1091 | |||
1092 | /* handle incoming call acceptance */ | ||
1093 | if (test_and_clear_bit(RXRPC_CALL_ACCEPTED, &call->events)) { | ||
1094 | _debug("accepted"); | ||
1095 | ASSERTCMP(call->rx_data_post, ==, 0); | ||
1096 | call->rx_data_post = 1; | ||
1097 | read_lock_bh(&call->state_lock); | ||
1098 | if (call->state < RXRPC_CALL_COMPLETE) | ||
1099 | set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events); | ||
1100 | read_unlock_bh(&call->state_lock); | ||
1101 | } | ||
1102 | |||
1103 | /* drain the out of sequence received packet queue into the packet Rx | ||
1104 | * queue */ | ||
1105 | if (test_and_clear_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events)) { | ||
1106 | while (call->rx_data_post == call->rx_first_oos) | ||
1107 | if (rxrpc_drain_rx_oos_queue(call) < 0) | ||
1108 | break; | ||
1109 | goto maybe_reschedule; | ||
1110 | } | ||
1111 | |||
1112 | /* other events may have been raised since we started checking */ | ||
1113 | goto maybe_reschedule; | ||
1114 | |||
1115 | send_ACK_with_skew: | ||
1116 | ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) - | ||
1117 | ntohl(ack.serial)); | ||
1118 | send_ACK: | ||
1119 | hdr.serial = htonl(atomic_inc_return(&call->conn->serial)); | ||
1120 | _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", | ||
1121 | ntohl(hdr.serial), | ||
1122 | ntohs(ack.maxSkew), | ||
1123 | ntohl(ack.firstPacket), | ||
1124 | ntohl(ack.previousPacket), | ||
1125 | ntohl(ack.serial), | ||
1126 | rxrpc_acks[ack.reason], | ||
1127 | ack.nAcks); | ||
1128 | |||
1129 | del_timer_sync(&call->ack_timer); | ||
1130 | if (ack.nAcks > 0) | ||
1131 | set_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags); | ||
1132 | goto send_message_2; | ||
1133 | |||
1134 | send_message: | ||
1135 | _debug("send message"); | ||
1136 | |||
1137 | hdr.serial = htonl(atomic_inc_return(&call->conn->serial)); | ||
1138 | _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial)); | ||
1139 | send_message_2: | ||
1140 | |||
1141 | len = iov[0].iov_len; | ||
1142 | ioc = 1; | ||
1143 | if (iov[4].iov_len) { | ||
1144 | ioc = 5; | ||
1145 | len += iov[4].iov_len; | ||
1146 | len += iov[3].iov_len; | ||
1147 | len += iov[2].iov_len; | ||
1148 | len += iov[1].iov_len; | ||
1149 | } else if (iov[3].iov_len) { | ||
1150 | ioc = 4; | ||
1151 | len += iov[3].iov_len; | ||
1152 | len += iov[2].iov_len; | ||
1153 | len += iov[1].iov_len; | ||
1154 | } else if (iov[2].iov_len) { | ||
1155 | ioc = 3; | ||
1156 | len += iov[2].iov_len; | ||
1157 | len += iov[1].iov_len; | ||
1158 | } else if (iov[1].iov_len) { | ||
1159 | ioc = 2; | ||
1160 | len += iov[1].iov_len; | ||
1161 | } | ||
1162 | |||
1163 | ret = kernel_sendmsg(call->conn->trans->local->socket, | ||
1164 | &msg, iov, ioc, len); | ||
1165 | if (ret < 0) { | ||
1166 | _debug("sendmsg failed: %d", ret); | ||
1167 | read_lock_bh(&call->state_lock); | ||
1168 | if (call->state < RXRPC_CALL_DEAD) | ||
1169 | rxrpc_queue_call(call); | ||
1170 | read_unlock_bh(&call->state_lock); | ||
1171 | goto error; | ||
1172 | } | ||
1173 | |||
1174 | switch (genbit) { | ||
1175 | case RXRPC_CALL_ABORT: | ||
1176 | clear_bit(genbit, &call->events); | ||
1177 | clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events); | ||
1178 | goto kill_ACKs; | ||
1179 | |||
1180 | case RXRPC_CALL_ACK_FINAL: | ||
1181 | write_lock_bh(&call->state_lock); | ||
1182 | if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK) | ||
1183 | call->state = RXRPC_CALL_COMPLETE; | ||
1184 | write_unlock_bh(&call->state_lock); | ||
1185 | goto kill_ACKs; | ||
1186 | |||
1187 | default: | ||
1188 | clear_bit(genbit, &call->events); | ||
1189 | switch (call->state) { | ||
1190 | case RXRPC_CALL_CLIENT_AWAIT_REPLY: | ||
1191 | case RXRPC_CALL_CLIENT_RECV_REPLY: | ||
1192 | case RXRPC_CALL_SERVER_RECV_REQUEST: | ||
1193 | case RXRPC_CALL_SERVER_ACK_REQUEST: | ||
1194 | _debug("start ACK timer"); | ||
1195 | rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, | ||
1196 | call->ackr_serial, false); | ||
1197 | default: | ||
1198 | break; | ||
1199 | } | ||
1200 | goto maybe_reschedule; | ||
1201 | } | ||
1202 | |||
1203 | kill_ACKs: | ||
1204 | del_timer_sync(&call->ack_timer); | ||
1205 | if (test_and_clear_bit(RXRPC_CALL_ACK_FINAL, &call->events)) | ||
1206 | rxrpc_put_call(call); | ||
1207 | clear_bit(RXRPC_CALL_ACK, &call->events); | ||
1208 | |||
1209 | maybe_reschedule: | ||
1210 | if (call->events || !skb_queue_empty(&call->rx_queue)) { | ||
1211 | read_lock_bh(&call->state_lock); | ||
1212 | if (call->state < RXRPC_CALL_DEAD) | ||
1213 | rxrpc_queue_call(call); | ||
1214 | read_unlock_bh(&call->state_lock); | ||
1215 | } | ||
1216 | |||
1217 | /* don't leave aborted connections on the accept queue */ | ||
1218 | if (call->state >= RXRPC_CALL_COMPLETE && | ||
1219 | !list_empty(&call->accept_link)) { | ||
1220 | _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }", | ||
1221 | call, call->events, call->flags, | ||
1222 | ntohl(call->conn->cid)); | ||
1223 | |||
1224 | read_lock_bh(&call->state_lock); | ||
1225 | if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && | ||
1226 | !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) | ||
1227 | rxrpc_queue_call(call); | ||
1228 | read_unlock_bh(&call->state_lock); | ||
1229 | } | ||
1230 | |||
1231 | error: | ||
1232 | clear_bit(RXRPC_CALL_PROC_BUSY, &call->flags); | ||
1233 | kfree(acks); | ||
1234 | |||
1235 | /* because we don't want two CPUs both processing the work item for one | ||
1236 | * call at the same time, we use a flag to note when it's busy; however | ||
1237 | * this means there's a race between clearing the flag and setting the | ||
1238 | * work pending bit and the work item being processed again */ | ||
1239 | if (call->events && !work_pending(&call->processor)) { | ||
1240 | _debug("jumpstart %x", ntohl(call->conn->cid)); | ||
1241 | rxrpc_queue_call(call); | ||
1242 | } | ||
1243 | |||
1244 | _leave(""); | ||
1245 | return; | ||
1246 | |||
1247 | no_mem: | ||
1248 | _debug("out of memory"); | ||
1249 | goto maybe_reschedule; | ||
1250 | } | ||
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c new file mode 100644 index 000000000000..4d92d88ff1fc --- /dev/null +++ b/net/rxrpc/ar-call.c | |||
@@ -0,0 +1,804 @@ | |||
1 | /* RxRPC individual remote procedure call handling | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/circ_buf.h> | ||
14 | #include <net/sock.h> | ||
15 | #include <net/af_rxrpc.h> | ||
16 | #include "ar-internal.h" | ||
17 | |||
18 | struct kmem_cache *rxrpc_call_jar; | ||
19 | LIST_HEAD(rxrpc_calls); | ||
20 | DEFINE_RWLOCK(rxrpc_call_lock); | ||
21 | static unsigned rxrpc_call_max_lifetime = 60; | ||
22 | static unsigned rxrpc_dead_call_timeout = 2; | ||
23 | |||
24 | static void rxrpc_destroy_call(struct work_struct *work); | ||
25 | static void rxrpc_call_life_expired(unsigned long _call); | ||
26 | static void rxrpc_dead_call_expired(unsigned long _call); | ||
27 | static void rxrpc_ack_time_expired(unsigned long _call); | ||
28 | static void rxrpc_resend_time_expired(unsigned long _call); | ||
29 | |||
30 | /* | ||
31 | * allocate a new call | ||
32 | */ | ||
33 | static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) | ||
34 | { | ||
35 | struct rxrpc_call *call; | ||
36 | |||
37 | call = kmem_cache_zalloc(rxrpc_call_jar, gfp); | ||
38 | if (!call) | ||
39 | return NULL; | ||
40 | |||
41 | call->acks_winsz = 16; | ||
42 | call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long), | ||
43 | gfp); | ||
44 | if (!call->acks_window) { | ||
45 | kmem_cache_free(rxrpc_call_jar, call); | ||
46 | return NULL; | ||
47 | } | ||
48 | |||
49 | setup_timer(&call->lifetimer, &rxrpc_call_life_expired, | ||
50 | (unsigned long) call); | ||
51 | setup_timer(&call->deadspan, &rxrpc_dead_call_expired, | ||
52 | (unsigned long) call); | ||
53 | setup_timer(&call->ack_timer, &rxrpc_ack_time_expired, | ||
54 | (unsigned long) call); | ||
55 | setup_timer(&call->resend_timer, &rxrpc_resend_time_expired, | ||
56 | (unsigned long) call); | ||
57 | INIT_WORK(&call->destroyer, &rxrpc_destroy_call); | ||
58 | INIT_WORK(&call->processor, &rxrpc_process_call); | ||
59 | INIT_LIST_HEAD(&call->accept_link); | ||
60 | skb_queue_head_init(&call->rx_queue); | ||
61 | skb_queue_head_init(&call->rx_oos_queue); | ||
62 | init_waitqueue_head(&call->tx_waitq); | ||
63 | spin_lock_init(&call->lock); | ||
64 | rwlock_init(&call->state_lock); | ||
65 | atomic_set(&call->usage, 1); | ||
66 | call->debug_id = atomic_inc_return(&rxrpc_debug_id); | ||
67 | call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; | ||
68 | |||
69 | memset(&call->sock_node, 0xed, sizeof(call->sock_node)); | ||
70 | |||
71 | call->rx_data_expect = 1; | ||
72 | call->rx_data_eaten = 0; | ||
73 | call->rx_first_oos = 0; | ||
74 | call->ackr_win_top = call->rx_data_eaten + 1 + RXRPC_MAXACKS; | ||
75 | call->creation_jif = jiffies; | ||
76 | return call; | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * allocate a new client call and attempt to to get a connection slot for it | ||
81 | */ | ||
82 | static struct rxrpc_call *rxrpc_alloc_client_call( | ||
83 | struct rxrpc_sock *rx, | ||
84 | struct rxrpc_transport *trans, | ||
85 | struct rxrpc_conn_bundle *bundle, | ||
86 | gfp_t gfp) | ||
87 | { | ||
88 | struct rxrpc_call *call; | ||
89 | int ret; | ||
90 | |||
91 | _enter(""); | ||
92 | |||
93 | ASSERT(rx != NULL); | ||
94 | ASSERT(trans != NULL); | ||
95 | ASSERT(bundle != NULL); | ||
96 | |||
97 | call = rxrpc_alloc_call(gfp); | ||
98 | if (!call) | ||
99 | return ERR_PTR(-ENOMEM); | ||
100 | |||
101 | sock_hold(&rx->sk); | ||
102 | call->socket = rx; | ||
103 | call->rx_data_post = 1; | ||
104 | |||
105 | ret = rxrpc_connect_call(rx, trans, bundle, call, gfp); | ||
106 | if (ret < 0) { | ||
107 | kmem_cache_free(rxrpc_call_jar, call); | ||
108 | return ERR_PTR(ret); | ||
109 | } | ||
110 | |||
111 | spin_lock(&call->conn->trans->peer->lock); | ||
112 | list_add(&call->error_link, &call->conn->trans->peer->error_targets); | ||
113 | spin_unlock(&call->conn->trans->peer->lock); | ||
114 | |||
115 | call->lifetimer.expires = jiffies + rxrpc_call_max_lifetime * HZ; | ||
116 | add_timer(&call->lifetimer); | ||
117 | |||
118 | _leave(" = %p", call); | ||
119 | return call; | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * set up a call for the given data | ||
124 | * - called in process context with IRQs enabled | ||
125 | */ | ||
126 | struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx, | ||
127 | struct rxrpc_transport *trans, | ||
128 | struct rxrpc_conn_bundle *bundle, | ||
129 | unsigned long user_call_ID, | ||
130 | int create, | ||
131 | gfp_t gfp) | ||
132 | { | ||
133 | struct rxrpc_call *call, *candidate; | ||
134 | struct rb_node *p, *parent, **pp; | ||
135 | |||
136 | _enter("%p,%d,%d,%lx,%d", | ||
137 | rx, trans ? trans->debug_id : -1, bundle ? bundle->debug_id : -1, | ||
138 | user_call_ID, create); | ||
139 | |||
140 | /* search the extant calls first for one that matches the specified | ||
141 | * user ID */ | ||
142 | read_lock(&rx->call_lock); | ||
143 | |||
144 | p = rx->calls.rb_node; | ||
145 | while (p) { | ||
146 | call = rb_entry(p, struct rxrpc_call, sock_node); | ||
147 | |||
148 | if (user_call_ID < call->user_call_ID) | ||
149 | p = p->rb_left; | ||
150 | else if (user_call_ID > call->user_call_ID) | ||
151 | p = p->rb_right; | ||
152 | else | ||
153 | goto found_extant_call; | ||
154 | } | ||
155 | |||
156 | read_unlock(&rx->call_lock); | ||
157 | |||
158 | if (!create || !trans) | ||
159 | return ERR_PTR(-EBADSLT); | ||
160 | |||
161 | /* not yet present - create a candidate for a new record and then | ||
162 | * redo the search */ | ||
163 | candidate = rxrpc_alloc_client_call(rx, trans, bundle, gfp); | ||
164 | if (IS_ERR(candidate)) { | ||
165 | _leave(" = %ld", PTR_ERR(candidate)); | ||
166 | return candidate; | ||
167 | } | ||
168 | |||
169 | candidate->user_call_ID = user_call_ID; | ||
170 | __set_bit(RXRPC_CALL_HAS_USERID, &candidate->flags); | ||
171 | |||
172 | write_lock(&rx->call_lock); | ||
173 | |||
174 | pp = &rx->calls.rb_node; | ||
175 | parent = NULL; | ||
176 | while (*pp) { | ||
177 | parent = *pp; | ||
178 | call = rb_entry(parent, struct rxrpc_call, sock_node); | ||
179 | |||
180 | if (user_call_ID < call->user_call_ID) | ||
181 | pp = &(*pp)->rb_left; | ||
182 | else if (user_call_ID > call->user_call_ID) | ||
183 | pp = &(*pp)->rb_right; | ||
184 | else | ||
185 | goto found_extant_second; | ||
186 | } | ||
187 | |||
188 | /* second search also failed; add the new call */ | ||
189 | call = candidate; | ||
190 | candidate = NULL; | ||
191 | rxrpc_get_call(call); | ||
192 | |||
193 | rb_link_node(&call->sock_node, parent, pp); | ||
194 | rb_insert_color(&call->sock_node, &rx->calls); | ||
195 | write_unlock(&rx->call_lock); | ||
196 | |||
197 | write_lock_bh(&rxrpc_call_lock); | ||
198 | list_add_tail(&call->link, &rxrpc_calls); | ||
199 | write_unlock_bh(&rxrpc_call_lock); | ||
200 | |||
201 | _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); | ||
202 | |||
203 | _leave(" = %p [new]", call); | ||
204 | return call; | ||
205 | |||
206 | /* we found the call in the list immediately */ | ||
207 | found_extant_call: | ||
208 | rxrpc_get_call(call); | ||
209 | read_unlock(&rx->call_lock); | ||
210 | _leave(" = %p [extant %d]", call, atomic_read(&call->usage)); | ||
211 | return call; | ||
212 | |||
213 | /* we found the call on the second time through the list */ | ||
214 | found_extant_second: | ||
215 | rxrpc_get_call(call); | ||
216 | write_unlock(&rx->call_lock); | ||
217 | rxrpc_put_call(candidate); | ||
218 | _leave(" = %p [second %d]", call, atomic_read(&call->usage)); | ||
219 | return call; | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * set up an incoming call | ||
224 | * - called in process context with IRQs enabled | ||
225 | */ | ||
226 | struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, | ||
227 | struct rxrpc_connection *conn, | ||
228 | struct rxrpc_header *hdr, | ||
229 | gfp_t gfp) | ||
230 | { | ||
231 | struct rxrpc_call *call, *candidate; | ||
232 | struct rb_node **p, *parent; | ||
233 | __be32 call_id; | ||
234 | |||
235 | _enter(",%d,,%x", conn->debug_id, gfp); | ||
236 | |||
237 | ASSERT(rx != NULL); | ||
238 | |||
239 | candidate = rxrpc_alloc_call(gfp); | ||
240 | if (!candidate) | ||
241 | return ERR_PTR(-EBUSY); | ||
242 | |||
243 | candidate->socket = rx; | ||
244 | candidate->conn = conn; | ||
245 | candidate->cid = hdr->cid; | ||
246 | candidate->call_id = hdr->callNumber; | ||
247 | candidate->channel = ntohl(hdr->cid) & RXRPC_CHANNELMASK; | ||
248 | candidate->rx_data_post = 0; | ||
249 | candidate->state = RXRPC_CALL_SERVER_ACCEPTING; | ||
250 | if (conn->security_ix > 0) | ||
251 | candidate->state = RXRPC_CALL_SERVER_SECURING; | ||
252 | |||
253 | write_lock_bh(&conn->lock); | ||
254 | |||
255 | /* set the channel for this call */ | ||
256 | call = conn->channels[candidate->channel]; | ||
257 | _debug("channel[%u] is %p", candidate->channel, call); | ||
258 | if (call && call->call_id == hdr->callNumber) { | ||
259 | /* already set; must've been a duplicate packet */ | ||
260 | _debug("extant call [%d]", call->state); | ||
261 | ASSERTCMP(call->conn, ==, conn); | ||
262 | |||
263 | read_lock(&call->state_lock); | ||
264 | switch (call->state) { | ||
265 | case RXRPC_CALL_LOCALLY_ABORTED: | ||
266 | if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) | ||
267 | rxrpc_queue_call(call); | ||
268 | case RXRPC_CALL_REMOTELY_ABORTED: | ||
269 | read_unlock(&call->state_lock); | ||
270 | goto aborted_call; | ||
271 | default: | ||
272 | rxrpc_get_call(call); | ||
273 | read_unlock(&call->state_lock); | ||
274 | goto extant_call; | ||
275 | } | ||
276 | } | ||
277 | |||
278 | if (call) { | ||
279 | /* it seems the channel is still in use from the previous call | ||
280 | * - ditch the old binding if its call is now complete */ | ||
281 | _debug("CALL: %u { %s }", | ||
282 | call->debug_id, rxrpc_call_states[call->state]); | ||
283 | |||
284 | if (call->state >= RXRPC_CALL_COMPLETE) { | ||
285 | conn->channels[call->channel] = NULL; | ||
286 | } else { | ||
287 | write_unlock_bh(&conn->lock); | ||
288 | kmem_cache_free(rxrpc_call_jar, candidate); | ||
289 | _leave(" = -EBUSY"); | ||
290 | return ERR_PTR(-EBUSY); | ||
291 | } | ||
292 | } | ||
293 | |||
294 | /* check the call number isn't duplicate */ | ||
295 | _debug("check dup"); | ||
296 | call_id = hdr->callNumber; | ||
297 | p = &conn->calls.rb_node; | ||
298 | parent = NULL; | ||
299 | while (*p) { | ||
300 | parent = *p; | ||
301 | call = rb_entry(parent, struct rxrpc_call, conn_node); | ||
302 | |||
303 | if (call_id < call->call_id) | ||
304 | p = &(*p)->rb_left; | ||
305 | else if (call_id > call->call_id) | ||
306 | p = &(*p)->rb_right; | ||
307 | else | ||
308 | goto old_call; | ||
309 | } | ||
310 | |||
311 | /* make the call available */ | ||
312 | _debug("new call"); | ||
313 | call = candidate; | ||
314 | candidate = NULL; | ||
315 | rb_link_node(&call->conn_node, parent, p); | ||
316 | rb_insert_color(&call->conn_node, &conn->calls); | ||
317 | conn->channels[call->channel] = call; | ||
318 | sock_hold(&rx->sk); | ||
319 | atomic_inc(&conn->usage); | ||
320 | write_unlock_bh(&conn->lock); | ||
321 | |||
322 | spin_lock(&conn->trans->peer->lock); | ||
323 | list_add(&call->error_link, &conn->trans->peer->error_targets); | ||
324 | spin_unlock(&conn->trans->peer->lock); | ||
325 | |||
326 | write_lock_bh(&rxrpc_call_lock); | ||
327 | list_add_tail(&call->link, &rxrpc_calls); | ||
328 | write_unlock_bh(&rxrpc_call_lock); | ||
329 | |||
330 | _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id); | ||
331 | |||
332 | call->lifetimer.expires = jiffies + rxrpc_call_max_lifetime * HZ; | ||
333 | add_timer(&call->lifetimer); | ||
334 | _leave(" = %p {%d} [new]", call, call->debug_id); | ||
335 | return call; | ||
336 | |||
337 | extant_call: | ||
338 | write_unlock_bh(&conn->lock); | ||
339 | kmem_cache_free(rxrpc_call_jar, candidate); | ||
340 | _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1); | ||
341 | return call; | ||
342 | |||
343 | aborted_call: | ||
344 | write_unlock_bh(&conn->lock); | ||
345 | kmem_cache_free(rxrpc_call_jar, candidate); | ||
346 | _leave(" = -ECONNABORTED"); | ||
347 | return ERR_PTR(-ECONNABORTED); | ||
348 | |||
349 | old_call: | ||
350 | write_unlock_bh(&conn->lock); | ||
351 | kmem_cache_free(rxrpc_call_jar, candidate); | ||
352 | _leave(" = -ECONNRESET [old]"); | ||
353 | return ERR_PTR(-ECONNRESET); | ||
354 | } | ||
355 | |||
356 | /* | ||
357 | * find an extant server call | ||
358 | * - called in process context with IRQs enabled | ||
359 | */ | ||
360 | struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *rx, | ||
361 | unsigned long user_call_ID) | ||
362 | { | ||
363 | struct rxrpc_call *call; | ||
364 | struct rb_node *p; | ||
365 | |||
366 | _enter("%p,%lx", rx, user_call_ID); | ||
367 | |||
368 | /* search the extant calls for one that matches the specified user | ||
369 | * ID */ | ||
370 | read_lock(&rx->call_lock); | ||
371 | |||
372 | p = rx->calls.rb_node; | ||
373 | while (p) { | ||
374 | call = rb_entry(p, struct rxrpc_call, sock_node); | ||
375 | |||
376 | if (user_call_ID < call->user_call_ID) | ||
377 | p = p->rb_left; | ||
378 | else if (user_call_ID > call->user_call_ID) | ||
379 | p = p->rb_right; | ||
380 | else | ||
381 | goto found_extant_call; | ||
382 | } | ||
383 | |||
384 | read_unlock(&rx->call_lock); | ||
385 | _leave(" = NULL"); | ||
386 | return NULL; | ||
387 | |||
388 | /* we found the call in the list immediately */ | ||
389 | found_extant_call: | ||
390 | rxrpc_get_call(call); | ||
391 | read_unlock(&rx->call_lock); | ||
392 | _leave(" = %p [%d]", call, atomic_read(&call->usage)); | ||
393 | return call; | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * detach a call from a socket and set up for release | ||
398 | */ | ||
399 | void rxrpc_release_call(struct rxrpc_call *call) | ||
400 | { | ||
401 | struct rxrpc_connection *conn = call->conn; | ||
402 | struct rxrpc_sock *rx = call->socket; | ||
403 | |||
404 | _enter("{%d,%d,%d,%d}", | ||
405 | call->debug_id, atomic_read(&call->usage), | ||
406 | atomic_read(&call->ackr_not_idle), | ||
407 | call->rx_first_oos); | ||
408 | |||
409 | spin_lock_bh(&call->lock); | ||
410 | if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags)) | ||
411 | BUG(); | ||
412 | spin_unlock_bh(&call->lock); | ||
413 | |||
414 | /* dissociate from the socket | ||
415 | * - the socket's ref on the call is passed to the death timer | ||
416 | */ | ||
417 | _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn); | ||
418 | |||
419 | write_lock_bh(&rx->call_lock); | ||
420 | if (!list_empty(&call->accept_link)) { | ||
421 | _debug("unlinking once-pending call %p { e=%lx f=%lx }", | ||
422 | call, call->events, call->flags); | ||
423 | ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); | ||
424 | list_del_init(&call->accept_link); | ||
425 | sk_acceptq_removed(&rx->sk); | ||
426 | } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { | ||
427 | rb_erase(&call->sock_node, &rx->calls); | ||
428 | memset(&call->sock_node, 0xdd, sizeof(call->sock_node)); | ||
429 | clear_bit(RXRPC_CALL_HAS_USERID, &call->flags); | ||
430 | } | ||
431 | write_unlock_bh(&rx->call_lock); | ||
432 | |||
433 | /* free up the channel for reuse */ | ||
434 | spin_lock(&conn->trans->client_lock); | ||
435 | write_lock_bh(&conn->lock); | ||
436 | write_lock(&call->state_lock); | ||
437 | |||
438 | if (conn->channels[call->channel] == call) | ||
439 | conn->channels[call->channel] = NULL; | ||
440 | |||
441 | if (conn->out_clientflag && conn->bundle) { | ||
442 | conn->avail_calls++; | ||
443 | switch (conn->avail_calls) { | ||
444 | case 1: | ||
445 | list_move_tail(&conn->bundle_link, | ||
446 | &conn->bundle->avail_conns); | ||
447 | case 2 ... RXRPC_MAXCALLS - 1: | ||
448 | ASSERT(conn->channels[0] == NULL || | ||
449 | conn->channels[1] == NULL || | ||
450 | conn->channels[2] == NULL || | ||
451 | conn->channels[3] == NULL); | ||
452 | break; | ||
453 | case RXRPC_MAXCALLS: | ||
454 | list_move_tail(&conn->bundle_link, | ||
455 | &conn->bundle->unused_conns); | ||
456 | ASSERT(conn->channels[0] == NULL && | ||
457 | conn->channels[1] == NULL && | ||
458 | conn->channels[2] == NULL && | ||
459 | conn->channels[3] == NULL); | ||
460 | break; | ||
461 | default: | ||
462 | printk(KERN_ERR "RxRPC: conn->avail_calls=%d\n", | ||
463 | conn->avail_calls); | ||
464 | BUG(); | ||
465 | } | ||
466 | } | ||
467 | |||
468 | spin_unlock(&conn->trans->client_lock); | ||
469 | |||
470 | if (call->state < RXRPC_CALL_COMPLETE && | ||
471 | call->state != RXRPC_CALL_CLIENT_FINAL_ACK) { | ||
472 | _debug("+++ ABORTING STATE %d +++\n", call->state); | ||
473 | call->state = RXRPC_CALL_LOCALLY_ABORTED; | ||
474 | call->abort_code = RX_CALL_DEAD; | ||
475 | set_bit(RXRPC_CALL_ABORT, &call->events); | ||
476 | rxrpc_queue_call(call); | ||
477 | } | ||
478 | write_unlock(&call->state_lock); | ||
479 | write_unlock_bh(&conn->lock); | ||
480 | |||
481 | /* clean up the Rx queue */ | ||
482 | if (!skb_queue_empty(&call->rx_queue) || | ||
483 | !skb_queue_empty(&call->rx_oos_queue)) { | ||
484 | struct rxrpc_skb_priv *sp; | ||
485 | struct sk_buff *skb; | ||
486 | |||
487 | _debug("purge Rx queues"); | ||
488 | |||
489 | spin_lock_bh(&call->lock); | ||
490 | while ((skb = skb_dequeue(&call->rx_queue)) || | ||
491 | (skb = skb_dequeue(&call->rx_oos_queue))) { | ||
492 | sp = rxrpc_skb(skb); | ||
493 | if (sp->call) { | ||
494 | ASSERTCMP(sp->call, ==, call); | ||
495 | rxrpc_put_call(call); | ||
496 | sp->call = NULL; | ||
497 | } | ||
498 | skb->destructor = NULL; | ||
499 | spin_unlock_bh(&call->lock); | ||
500 | |||
501 | _debug("- zap %s %%%u #%u", | ||
502 | rxrpc_pkts[sp->hdr.type], | ||
503 | ntohl(sp->hdr.serial), | ||
504 | ntohl(sp->hdr.seq)); | ||
505 | rxrpc_free_skb(skb); | ||
506 | spin_lock_bh(&call->lock); | ||
507 | } | ||
508 | spin_unlock_bh(&call->lock); | ||
509 | |||
510 | ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE); | ||
511 | } | ||
512 | |||
513 | del_timer_sync(&call->resend_timer); | ||
514 | del_timer_sync(&call->ack_timer); | ||
515 | del_timer_sync(&call->lifetimer); | ||
516 | call->deadspan.expires = jiffies + rxrpc_dead_call_timeout * HZ; | ||
517 | add_timer(&call->deadspan); | ||
518 | |||
519 | _leave(""); | ||
520 | } | ||
521 | |||
522 | /* | ||
523 | * handle a dead call being ready for reaping | ||
524 | */ | ||
525 | static void rxrpc_dead_call_expired(unsigned long _call) | ||
526 | { | ||
527 | struct rxrpc_call *call = (struct rxrpc_call *) _call; | ||
528 | |||
529 | _enter("{%d}", call->debug_id); | ||
530 | |||
531 | write_lock_bh(&call->state_lock); | ||
532 | call->state = RXRPC_CALL_DEAD; | ||
533 | write_unlock_bh(&call->state_lock); | ||
534 | rxrpc_put_call(call); | ||
535 | } | ||
536 | |||
537 | /* | ||
538 | * mark a call as to be released, aborting it if it's still in progress | ||
539 | * - called with softirqs disabled | ||
540 | */ | ||
541 | static void rxrpc_mark_call_released(struct rxrpc_call *call) | ||
542 | { | ||
543 | bool sched; | ||
544 | |||
545 | write_lock(&call->state_lock); | ||
546 | if (call->state < RXRPC_CALL_DEAD) { | ||
547 | sched = false; | ||
548 | if (call->state < RXRPC_CALL_COMPLETE) { | ||
549 | _debug("abort call %p", call); | ||
550 | call->state = RXRPC_CALL_LOCALLY_ABORTED; | ||
551 | call->abort_code = RX_CALL_DEAD; | ||
552 | if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) | ||
553 | sched = true; | ||
554 | } | ||
555 | if (!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) | ||
556 | sched = true; | ||
557 | if (sched) | ||
558 | rxrpc_queue_call(call); | ||
559 | } | ||
560 | write_unlock(&call->state_lock); | ||
561 | } | ||
562 | |||
563 | /* | ||
564 | * release all the calls associated with a socket | ||
565 | */ | ||
566 | void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) | ||
567 | { | ||
568 | struct rxrpc_call *call; | ||
569 | struct rb_node *p; | ||
570 | |||
571 | _enter("%p", rx); | ||
572 | |||
573 | read_lock_bh(&rx->call_lock); | ||
574 | |||
575 | /* mark all the calls as no longer wanting incoming packets */ | ||
576 | for (p = rb_first(&rx->calls); p; p = rb_next(p)) { | ||
577 | call = rb_entry(p, struct rxrpc_call, sock_node); | ||
578 | rxrpc_mark_call_released(call); | ||
579 | } | ||
580 | |||
581 | /* kill the not-yet-accepted incoming calls */ | ||
582 | list_for_each_entry(call, &rx->secureq, accept_link) { | ||
583 | rxrpc_mark_call_released(call); | ||
584 | } | ||
585 | |||
586 | list_for_each_entry(call, &rx->acceptq, accept_link) { | ||
587 | rxrpc_mark_call_released(call); | ||
588 | } | ||
589 | |||
590 | read_unlock_bh(&rx->call_lock); | ||
591 | _leave(""); | ||
592 | } | ||
593 | |||
594 | /* | ||
595 | * release a call | ||
596 | */ | ||
597 | void __rxrpc_put_call(struct rxrpc_call *call) | ||
598 | { | ||
599 | ASSERT(call != NULL); | ||
600 | |||
601 | _enter("%p{u=%d}", call, atomic_read(&call->usage)); | ||
602 | |||
603 | ASSERTCMP(atomic_read(&call->usage), >, 0); | ||
604 | |||
605 | if (atomic_dec_and_test(&call->usage)) { | ||
606 | _debug("call %d dead", call->debug_id); | ||
607 | ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); | ||
608 | rxrpc_queue_work(&call->destroyer); | ||
609 | } | ||
610 | _leave(""); | ||
611 | } | ||
612 | |||
613 | /* | ||
614 | * clean up a call | ||
615 | */ | ||
616 | static void rxrpc_cleanup_call(struct rxrpc_call *call) | ||
617 | { | ||
618 | _net("DESTROY CALL %d", call->debug_id); | ||
619 | |||
620 | ASSERT(call->socket); | ||
621 | |||
622 | memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); | ||
623 | |||
624 | del_timer_sync(&call->lifetimer); | ||
625 | del_timer_sync(&call->deadspan); | ||
626 | del_timer_sync(&call->ack_timer); | ||
627 | del_timer_sync(&call->resend_timer); | ||
628 | |||
629 | ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); | ||
630 | ASSERTCMP(call->events, ==, 0); | ||
631 | if (work_pending(&call->processor)) { | ||
632 | _debug("defer destroy"); | ||
633 | rxrpc_queue_work(&call->destroyer); | ||
634 | return; | ||
635 | } | ||
636 | |||
637 | if (call->conn) { | ||
638 | spin_lock(&call->conn->trans->peer->lock); | ||
639 | list_del(&call->error_link); | ||
640 | spin_unlock(&call->conn->trans->peer->lock); | ||
641 | |||
642 | write_lock_bh(&call->conn->lock); | ||
643 | rb_erase(&call->conn_node, &call->conn->calls); | ||
644 | write_unlock_bh(&call->conn->lock); | ||
645 | rxrpc_put_connection(call->conn); | ||
646 | } | ||
647 | |||
648 | if (call->acks_window) { | ||
649 | _debug("kill Tx window %d", | ||
650 | CIRC_CNT(call->acks_head, call->acks_tail, | ||
651 | call->acks_winsz)); | ||
652 | smp_mb(); | ||
653 | while (CIRC_CNT(call->acks_head, call->acks_tail, | ||
654 | call->acks_winsz) > 0) { | ||
655 | struct rxrpc_skb_priv *sp; | ||
656 | unsigned long _skb; | ||
657 | |||
658 | _skb = call->acks_window[call->acks_tail] & ~1; | ||
659 | sp = rxrpc_skb((struct sk_buff *) _skb); | ||
660 | _debug("+++ clear Tx %u", ntohl(sp->hdr.seq)); | ||
661 | rxrpc_free_skb((struct sk_buff *) _skb); | ||
662 | call->acks_tail = | ||
663 | (call->acks_tail + 1) & (call->acks_winsz - 1); | ||
664 | } | ||
665 | |||
666 | kfree(call->acks_window); | ||
667 | } | ||
668 | |||
669 | rxrpc_free_skb(call->tx_pending); | ||
670 | |||
671 | rxrpc_purge_queue(&call->rx_queue); | ||
672 | ASSERT(skb_queue_empty(&call->rx_oos_queue)); | ||
673 | sock_put(&call->socket->sk); | ||
674 | kmem_cache_free(rxrpc_call_jar, call); | ||
675 | } | ||
676 | |||
677 | /* | ||
678 | * destroy a call | ||
679 | */ | ||
680 | static void rxrpc_destroy_call(struct work_struct *work) | ||
681 | { | ||
682 | struct rxrpc_call *call = | ||
683 | container_of(work, struct rxrpc_call, destroyer); | ||
684 | |||
685 | _enter("%p{%d,%d,%p}", | ||
686 | call, atomic_read(&call->usage), call->channel, call->conn); | ||
687 | |||
688 | ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); | ||
689 | |||
690 | write_lock_bh(&rxrpc_call_lock); | ||
691 | list_del_init(&call->link); | ||
692 | write_unlock_bh(&rxrpc_call_lock); | ||
693 | |||
694 | rxrpc_cleanup_call(call); | ||
695 | _leave(""); | ||
696 | } | ||
697 | |||
698 | /* | ||
699 | * preemptively destroy all the call records from a transport endpoint rather | ||
700 | * than waiting for them to time out | ||
701 | */ | ||
702 | void __exit rxrpc_destroy_all_calls(void) | ||
703 | { | ||
704 | struct rxrpc_call *call; | ||
705 | |||
706 | _enter(""); | ||
707 | write_lock_bh(&rxrpc_call_lock); | ||
708 | |||
709 | while (!list_empty(&rxrpc_calls)) { | ||
710 | call = list_entry(rxrpc_calls.next, struct rxrpc_call, link); | ||
711 | _debug("Zapping call %p", call); | ||
712 | |||
713 | list_del_init(&call->link); | ||
714 | |||
715 | switch (atomic_read(&call->usage)) { | ||
716 | case 0: | ||
717 | ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); | ||
718 | break; | ||
719 | case 1: | ||
720 | if (del_timer_sync(&call->deadspan) != 0 && | ||
721 | call->state != RXRPC_CALL_DEAD) | ||
722 | rxrpc_dead_call_expired((unsigned long) call); | ||
723 | if (call->state != RXRPC_CALL_DEAD) | ||
724 | break; | ||
725 | default: | ||
726 | printk(KERN_ERR "RXRPC:" | ||
727 | " Call %p still in use (%d,%d,%s,%lx,%lx)!\n", | ||
728 | call, atomic_read(&call->usage), | ||
729 | atomic_read(&call->ackr_not_idle), | ||
730 | rxrpc_call_states[call->state], | ||
731 | call->flags, call->events); | ||
732 | if (!skb_queue_empty(&call->rx_queue)) | ||
733 | printk(KERN_ERR"RXRPC: Rx queue occupied\n"); | ||
734 | if (!skb_queue_empty(&call->rx_oos_queue)) | ||
735 | printk(KERN_ERR"RXRPC: OOS queue occupied\n"); | ||
736 | break; | ||
737 | } | ||
738 | |||
739 | write_unlock_bh(&rxrpc_call_lock); | ||
740 | cond_resched(); | ||
741 | write_lock_bh(&rxrpc_call_lock); | ||
742 | } | ||
743 | |||
744 | write_unlock_bh(&rxrpc_call_lock); | ||
745 | _leave(""); | ||
746 | } | ||
747 | |||
748 | /* | ||
749 | * handle call lifetime being exceeded | ||
750 | */ | ||
751 | static void rxrpc_call_life_expired(unsigned long _call) | ||
752 | { | ||
753 | struct rxrpc_call *call = (struct rxrpc_call *) _call; | ||
754 | |||
755 | if (call->state >= RXRPC_CALL_COMPLETE) | ||
756 | return; | ||
757 | |||
758 | _enter("{%d}", call->debug_id); | ||
759 | read_lock_bh(&call->state_lock); | ||
760 | if (call->state < RXRPC_CALL_COMPLETE) { | ||
761 | set_bit(RXRPC_CALL_LIFE_TIMER, &call->events); | ||
762 | rxrpc_queue_call(call); | ||
763 | } | ||
764 | read_unlock_bh(&call->state_lock); | ||
765 | } | ||
766 | |||
767 | /* | ||
768 | * handle resend timer expiry | ||
769 | */ | ||
770 | static void rxrpc_resend_time_expired(unsigned long _call) | ||
771 | { | ||
772 | struct rxrpc_call *call = (struct rxrpc_call *) _call; | ||
773 | |||
774 | _enter("{%d}", call->debug_id); | ||
775 | |||
776 | if (call->state >= RXRPC_CALL_COMPLETE) | ||
777 | return; | ||
778 | |||
779 | read_lock_bh(&call->state_lock); | ||
780 | clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); | ||
781 | if (call->state < RXRPC_CALL_COMPLETE && | ||
782 | !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) | ||
783 | rxrpc_queue_call(call); | ||
784 | read_unlock_bh(&call->state_lock); | ||
785 | } | ||
786 | |||
787 | /* | ||
788 | * handle ACK timer expiry | ||
789 | */ | ||
790 | static void rxrpc_ack_time_expired(unsigned long _call) | ||
791 | { | ||
792 | struct rxrpc_call *call = (struct rxrpc_call *) _call; | ||
793 | |||
794 | _enter("{%d}", call->debug_id); | ||
795 | |||
796 | if (call->state >= RXRPC_CALL_COMPLETE) | ||
797 | return; | ||
798 | |||
799 | read_lock_bh(&call->state_lock); | ||
800 | if (call->state < RXRPC_CALL_COMPLETE && | ||
801 | !test_and_set_bit(RXRPC_CALL_ACK, &call->events)) | ||
802 | rxrpc_queue_call(call); | ||
803 | read_unlock_bh(&call->state_lock); | ||
804 | } | ||
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c new file mode 100644 index 000000000000..43cb3e051ece --- /dev/null +++ b/net/rxrpc/ar-connection.c | |||
@@ -0,0 +1,911 @@ | |||
1 | /* RxRPC virtual connection handler | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/net.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/crypto.h> | ||
16 | #include <net/sock.h> | ||
17 | #include <net/af_rxrpc.h> | ||
18 | #include "ar-internal.h" | ||
19 | |||
20 | static void rxrpc_connection_reaper(struct work_struct *work); | ||
21 | |||
22 | LIST_HEAD(rxrpc_connections); | ||
23 | DEFINE_RWLOCK(rxrpc_connection_lock); | ||
24 | static unsigned long rxrpc_connection_timeout = 10 * 60; | ||
25 | static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper); | ||
26 | |||
27 | /* | ||
28 | * allocate a new client connection bundle | ||
29 | */ | ||
30 | static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp) | ||
31 | { | ||
32 | struct rxrpc_conn_bundle *bundle; | ||
33 | |||
34 | _enter(""); | ||
35 | |||
36 | bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp); | ||
37 | if (bundle) { | ||
38 | INIT_LIST_HEAD(&bundle->unused_conns); | ||
39 | INIT_LIST_HEAD(&bundle->avail_conns); | ||
40 | INIT_LIST_HEAD(&bundle->busy_conns); | ||
41 | init_waitqueue_head(&bundle->chanwait); | ||
42 | atomic_set(&bundle->usage, 1); | ||
43 | } | ||
44 | |||
45 | _leave(" = %p", bundle); | ||
46 | return bundle; | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * compare bundle parameters with what we're looking for | ||
51 | * - return -ve, 0 or +ve | ||
52 | */ | ||
53 | static inline | ||
54 | int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle, | ||
55 | struct key *key, __be16 service_id) | ||
56 | { | ||
57 | return (bundle->service_id - service_id) ?: | ||
58 | ((unsigned long) bundle->key - (unsigned long) key); | ||
59 | } | ||
60 | |||
61 | /* | ||
62 | * get bundle of client connections that a client socket can make use of | ||
63 | */ | ||
64 | struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx, | ||
65 | struct rxrpc_transport *trans, | ||
66 | struct key *key, | ||
67 | __be16 service_id, | ||
68 | gfp_t gfp) | ||
69 | { | ||
70 | struct rxrpc_conn_bundle *bundle, *candidate; | ||
71 | struct rb_node *p, *parent, **pp; | ||
72 | |||
73 | _enter("%p{%x},%x,%hx,", | ||
74 | rx, key_serial(key), trans->debug_id, ntohl(service_id)); | ||
75 | |||
76 | if (rx->trans == trans && rx->bundle) { | ||
77 | atomic_inc(&rx->bundle->usage); | ||
78 | return rx->bundle; | ||
79 | } | ||
80 | |||
81 | /* search the extant bundles first for one that matches the specified | ||
82 | * user ID */ | ||
83 | spin_lock(&trans->client_lock); | ||
84 | |||
85 | p = trans->bundles.rb_node; | ||
86 | while (p) { | ||
87 | bundle = rb_entry(p, struct rxrpc_conn_bundle, node); | ||
88 | |||
89 | if (rxrpc_cmp_bundle(bundle, key, service_id) < 0) | ||
90 | p = p->rb_left; | ||
91 | else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0) | ||
92 | p = p->rb_right; | ||
93 | else | ||
94 | goto found_extant_bundle; | ||
95 | } | ||
96 | |||
97 | spin_unlock(&trans->client_lock); | ||
98 | |||
99 | /* not yet present - create a candidate for a new record and then | ||
100 | * redo the search */ | ||
101 | candidate = rxrpc_alloc_bundle(gfp); | ||
102 | if (!candidate) { | ||
103 | _leave(" = -ENOMEM"); | ||
104 | return ERR_PTR(-ENOMEM); | ||
105 | } | ||
106 | |||
107 | candidate->key = key_get(key); | ||
108 | candidate->service_id = service_id; | ||
109 | |||
110 | spin_lock(&trans->client_lock); | ||
111 | |||
112 | pp = &trans->bundles.rb_node; | ||
113 | parent = NULL; | ||
114 | while (*pp) { | ||
115 | parent = *pp; | ||
116 | bundle = rb_entry(parent, struct rxrpc_conn_bundle, node); | ||
117 | |||
118 | if (rxrpc_cmp_bundle(bundle, key, service_id) < 0) | ||
119 | pp = &(*pp)->rb_left; | ||
120 | else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0) | ||
121 | pp = &(*pp)->rb_right; | ||
122 | else | ||
123 | goto found_extant_second; | ||
124 | } | ||
125 | |||
126 | /* second search also failed; add the new bundle */ | ||
127 | bundle = candidate; | ||
128 | candidate = NULL; | ||
129 | |||
130 | rb_link_node(&bundle->node, parent, pp); | ||
131 | rb_insert_color(&bundle->node, &trans->bundles); | ||
132 | spin_unlock(&trans->client_lock); | ||
133 | _net("BUNDLE new on trans %d", trans->debug_id); | ||
134 | if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) { | ||
135 | atomic_inc(&bundle->usage); | ||
136 | rx->bundle = bundle; | ||
137 | } | ||
138 | _leave(" = %p [new]", bundle); | ||
139 | return bundle; | ||
140 | |||
141 | /* we found the bundle in the list immediately */ | ||
142 | found_extant_bundle: | ||
143 | atomic_inc(&bundle->usage); | ||
144 | spin_unlock(&trans->client_lock); | ||
145 | _net("BUNDLE old on trans %d", trans->debug_id); | ||
146 | if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) { | ||
147 | atomic_inc(&bundle->usage); | ||
148 | rx->bundle = bundle; | ||
149 | } | ||
150 | _leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage)); | ||
151 | return bundle; | ||
152 | |||
153 | /* we found the bundle on the second time through the list */ | ||
154 | found_extant_second: | ||
155 | atomic_inc(&bundle->usage); | ||
156 | spin_unlock(&trans->client_lock); | ||
157 | kfree(candidate); | ||
158 | _net("BUNDLE old2 on trans %d", trans->debug_id); | ||
159 | if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) { | ||
160 | atomic_inc(&bundle->usage); | ||
161 | rx->bundle = bundle; | ||
162 | } | ||
163 | _leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage)); | ||
164 | return bundle; | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * release a bundle | ||
169 | */ | ||
170 | void rxrpc_put_bundle(struct rxrpc_transport *trans, | ||
171 | struct rxrpc_conn_bundle *bundle) | ||
172 | { | ||
173 | _enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage)); | ||
174 | |||
175 | if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) { | ||
176 | _debug("Destroy bundle"); | ||
177 | rb_erase(&bundle->node, &trans->bundles); | ||
178 | spin_unlock(&trans->client_lock); | ||
179 | ASSERT(list_empty(&bundle->unused_conns)); | ||
180 | ASSERT(list_empty(&bundle->avail_conns)); | ||
181 | ASSERT(list_empty(&bundle->busy_conns)); | ||
182 | ASSERTCMP(bundle->num_conns, ==, 0); | ||
183 | key_put(bundle->key); | ||
184 | kfree(bundle); | ||
185 | } | ||
186 | |||
187 | _leave(""); | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | * allocate a new connection | ||
192 | */ | ||
193 | static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) | ||
194 | { | ||
195 | struct rxrpc_connection *conn; | ||
196 | |||
197 | _enter(""); | ||
198 | |||
199 | conn = kzalloc(sizeof(struct rxrpc_connection), gfp); | ||
200 | if (conn) { | ||
201 | INIT_WORK(&conn->processor, &rxrpc_process_connection); | ||
202 | INIT_LIST_HEAD(&conn->bundle_link); | ||
203 | conn->calls = RB_ROOT; | ||
204 | skb_queue_head_init(&conn->rx_queue); | ||
205 | rwlock_init(&conn->lock); | ||
206 | spin_lock_init(&conn->state_lock); | ||
207 | atomic_set(&conn->usage, 1); | ||
208 | conn->debug_id = atomic_inc_return(&rxrpc_debug_id); | ||
209 | conn->avail_calls = RXRPC_MAXCALLS; | ||
210 | conn->size_align = 4; | ||
211 | conn->header_size = sizeof(struct rxrpc_header); | ||
212 | } | ||
213 | |||
214 | _leave(" = %p{%d}", conn, conn->debug_id); | ||
215 | return conn; | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * assign a connection ID to a connection and add it to the transport's | ||
220 | * connection lookup tree | ||
221 | * - called with transport client lock held | ||
222 | */ | ||
223 | static void rxrpc_assign_connection_id(struct rxrpc_connection *conn) | ||
224 | { | ||
225 | struct rxrpc_connection *xconn; | ||
226 | struct rb_node *parent, **p; | ||
227 | __be32 epoch; | ||
228 | u32 real_conn_id; | ||
229 | |||
230 | _enter(""); | ||
231 | |||
232 | epoch = conn->epoch; | ||
233 | |||
234 | write_lock_bh(&conn->trans->conn_lock); | ||
235 | |||
236 | conn->trans->conn_idcounter += RXRPC_CID_INC; | ||
237 | if (conn->trans->conn_idcounter < RXRPC_CID_INC) | ||
238 | conn->trans->conn_idcounter = RXRPC_CID_INC; | ||
239 | real_conn_id = conn->trans->conn_idcounter; | ||
240 | |||
241 | attempt_insertion: | ||
242 | parent = NULL; | ||
243 | p = &conn->trans->client_conns.rb_node; | ||
244 | |||
245 | while (*p) { | ||
246 | parent = *p; | ||
247 | xconn = rb_entry(parent, struct rxrpc_connection, node); | ||
248 | |||
249 | if (epoch < xconn->epoch) | ||
250 | p = &(*p)->rb_left; | ||
251 | else if (epoch > xconn->epoch) | ||
252 | p = &(*p)->rb_right; | ||
253 | else if (real_conn_id < xconn->real_conn_id) | ||
254 | p = &(*p)->rb_left; | ||
255 | else if (real_conn_id > xconn->real_conn_id) | ||
256 | p = &(*p)->rb_right; | ||
257 | else | ||
258 | goto id_exists; | ||
259 | } | ||
260 | |||
261 | /* we've found a suitable hole - arrange for this connection to occupy | ||
262 | * it */ | ||
263 | rb_link_node(&conn->node, parent, p); | ||
264 | rb_insert_color(&conn->node, &conn->trans->client_conns); | ||
265 | |||
266 | conn->real_conn_id = real_conn_id; | ||
267 | conn->cid = htonl(real_conn_id); | ||
268 | write_unlock_bh(&conn->trans->conn_lock); | ||
269 | _leave(" [CONNID %x CID %x]", real_conn_id, ntohl(conn->cid)); | ||
270 | return; | ||
271 | |||
272 | /* we found a connection with the proposed ID - walk the tree from that | ||
273 | * point looking for the next unused ID */ | ||
274 | id_exists: | ||
275 | for (;;) { | ||
276 | real_conn_id += RXRPC_CID_INC; | ||
277 | if (real_conn_id < RXRPC_CID_INC) { | ||
278 | real_conn_id = RXRPC_CID_INC; | ||
279 | conn->trans->conn_idcounter = real_conn_id; | ||
280 | goto attempt_insertion; | ||
281 | } | ||
282 | |||
283 | parent = rb_next(parent); | ||
284 | if (!parent) | ||
285 | goto attempt_insertion; | ||
286 | |||
287 | xconn = rb_entry(parent, struct rxrpc_connection, node); | ||
288 | if (epoch < xconn->epoch || | ||
289 | real_conn_id < xconn->real_conn_id) | ||
290 | goto attempt_insertion; | ||
291 | } | ||
292 | } | ||
293 | |||
294 | /* | ||
295 | * add a call to a connection's call-by-ID tree | ||
296 | */ | ||
297 | static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn, | ||
298 | struct rxrpc_call *call) | ||
299 | { | ||
300 | struct rxrpc_call *xcall; | ||
301 | struct rb_node *parent, **p; | ||
302 | __be32 call_id; | ||
303 | |||
304 | write_lock_bh(&conn->lock); | ||
305 | |||
306 | call_id = call->call_id; | ||
307 | p = &conn->calls.rb_node; | ||
308 | parent = NULL; | ||
309 | while (*p) { | ||
310 | parent = *p; | ||
311 | xcall = rb_entry(parent, struct rxrpc_call, conn_node); | ||
312 | |||
313 | if (call_id < xcall->call_id) | ||
314 | p = &(*p)->rb_left; | ||
315 | else if (call_id > xcall->call_id) | ||
316 | p = &(*p)->rb_right; | ||
317 | else | ||
318 | BUG(); | ||
319 | } | ||
320 | |||
321 | rb_link_node(&call->conn_node, parent, p); | ||
322 | rb_insert_color(&call->conn_node, &conn->calls); | ||
323 | |||
324 | write_unlock_bh(&conn->lock); | ||
325 | } | ||
326 | |||
327 | /* | ||
328 | * connect a call on an exclusive connection | ||
329 | */ | ||
330 | static int rxrpc_connect_exclusive(struct rxrpc_sock *rx, | ||
331 | struct rxrpc_transport *trans, | ||
332 | __be16 service_id, | ||
333 | struct rxrpc_call *call, | ||
334 | gfp_t gfp) | ||
335 | { | ||
336 | struct rxrpc_connection *conn; | ||
337 | int chan, ret; | ||
338 | |||
339 | _enter(""); | ||
340 | |||
341 | conn = rx->conn; | ||
342 | if (!conn) { | ||
343 | /* not yet present - create a candidate for a new connection | ||
344 | * and then redo the check */ | ||
345 | conn = rxrpc_alloc_connection(gfp); | ||
346 | if (IS_ERR(conn)) { | ||
347 | _leave(" = %ld", PTR_ERR(conn)); | ||
348 | return PTR_ERR(conn); | ||
349 | } | ||
350 | |||
351 | conn->trans = trans; | ||
352 | conn->bundle = NULL; | ||
353 | conn->service_id = service_id; | ||
354 | conn->epoch = rxrpc_epoch; | ||
355 | conn->in_clientflag = 0; | ||
356 | conn->out_clientflag = RXRPC_CLIENT_INITIATED; | ||
357 | conn->cid = 0; | ||
358 | conn->state = RXRPC_CONN_CLIENT; | ||
359 | conn->avail_calls = RXRPC_MAXCALLS - 1; | ||
360 | conn->security_level = rx->min_sec_level; | ||
361 | conn->key = key_get(rx->key); | ||
362 | |||
363 | ret = rxrpc_init_client_conn_security(conn); | ||
364 | if (ret < 0) { | ||
365 | key_put(conn->key); | ||
366 | kfree(conn); | ||
367 | _leave(" = %d [key]", ret); | ||
368 | return ret; | ||
369 | } | ||
370 | |||
371 | write_lock_bh(&rxrpc_connection_lock); | ||
372 | list_add_tail(&conn->link, &rxrpc_connections); | ||
373 | write_unlock_bh(&rxrpc_connection_lock); | ||
374 | |||
375 | spin_lock(&trans->client_lock); | ||
376 | atomic_inc(&trans->usage); | ||
377 | |||
378 | _net("CONNECT EXCL new %d on TRANS %d", | ||
379 | conn->debug_id, conn->trans->debug_id); | ||
380 | |||
381 | rxrpc_assign_connection_id(conn); | ||
382 | rx->conn = conn; | ||
383 | } | ||
384 | |||
385 | /* we've got a connection with a free channel and we can now attach the | ||
386 | * call to it | ||
387 | * - we're holding the transport's client lock | ||
388 | * - we're holding a reference on the connection | ||
389 | */ | ||
390 | for (chan = 0; chan < RXRPC_MAXCALLS; chan++) | ||
391 | if (!conn->channels[chan]) | ||
392 | goto found_channel; | ||
393 | goto no_free_channels; | ||
394 | |||
395 | found_channel: | ||
396 | atomic_inc(&conn->usage); | ||
397 | conn->channels[chan] = call; | ||
398 | call->conn = conn; | ||
399 | call->channel = chan; | ||
400 | call->cid = conn->cid | htonl(chan); | ||
401 | call->call_id = htonl(++conn->call_counter); | ||
402 | |||
403 | _net("CONNECT client on conn %d chan %d as call %x", | ||
404 | conn->debug_id, chan, ntohl(call->call_id)); | ||
405 | |||
406 | spin_unlock(&trans->client_lock); | ||
407 | |||
408 | rxrpc_add_call_ID_to_conn(conn, call); | ||
409 | _leave(" = 0"); | ||
410 | return 0; | ||
411 | |||
412 | no_free_channels: | ||
413 | spin_unlock(&trans->client_lock); | ||
414 | _leave(" = -ENOSR"); | ||
415 | return -ENOSR; | ||
416 | } | ||
417 | |||
418 | /* | ||
419 | * find a connection for a call | ||
420 | * - called in process context with IRQs enabled | ||
421 | */ | ||
422 | int rxrpc_connect_call(struct rxrpc_sock *rx, | ||
423 | struct rxrpc_transport *trans, | ||
424 | struct rxrpc_conn_bundle *bundle, | ||
425 | struct rxrpc_call *call, | ||
426 | gfp_t gfp) | ||
427 | { | ||
428 | struct rxrpc_connection *conn, *candidate; | ||
429 | int chan, ret; | ||
430 | |||
431 | DECLARE_WAITQUEUE(myself, current); | ||
432 | |||
433 | _enter("%p,%lx,", rx, call->user_call_ID); | ||
434 | |||
435 | if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags)) | ||
436 | return rxrpc_connect_exclusive(rx, trans, bundle->service_id, | ||
437 | call, gfp); | ||
438 | |||
439 | spin_lock(&trans->client_lock); | ||
440 | for (;;) { | ||
441 | /* see if the bundle has a call slot available */ | ||
442 | if (!list_empty(&bundle->avail_conns)) { | ||
443 | _debug("avail"); | ||
444 | conn = list_entry(bundle->avail_conns.next, | ||
445 | struct rxrpc_connection, | ||
446 | bundle_link); | ||
447 | if (--conn->avail_calls == 0) | ||
448 | list_move(&conn->bundle_link, | ||
449 | &bundle->busy_conns); | ||
450 | ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS); | ||
451 | ASSERT(conn->channels[0] == NULL || | ||
452 | conn->channels[1] == NULL || | ||
453 | conn->channels[2] == NULL || | ||
454 | conn->channels[3] == NULL); | ||
455 | atomic_inc(&conn->usage); | ||
456 | break; | ||
457 | } | ||
458 | |||
459 | if (!list_empty(&bundle->unused_conns)) { | ||
460 | _debug("unused"); | ||
461 | conn = list_entry(bundle->unused_conns.next, | ||
462 | struct rxrpc_connection, | ||
463 | bundle_link); | ||
464 | ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS); | ||
465 | conn->avail_calls = RXRPC_MAXCALLS - 1; | ||
466 | ASSERT(conn->channels[0] == NULL && | ||
467 | conn->channels[1] == NULL && | ||
468 | conn->channels[2] == NULL && | ||
469 | conn->channels[3] == NULL); | ||
470 | atomic_inc(&conn->usage); | ||
471 | list_move(&conn->bundle_link, &bundle->avail_conns); | ||
472 | break; | ||
473 | } | ||
474 | |||
475 | /* need to allocate a new connection */ | ||
476 | _debug("get new conn [%d]", bundle->num_conns); | ||
477 | |||
478 | spin_unlock(&trans->client_lock); | ||
479 | |||
480 | if (signal_pending(current)) | ||
481 | goto interrupted; | ||
482 | |||
483 | if (bundle->num_conns >= 20) { | ||
484 | _debug("too many conns"); | ||
485 | |||
486 | if (!(gfp & __GFP_WAIT)) { | ||
487 | _leave(" = -EAGAIN"); | ||
488 | return -EAGAIN; | ||
489 | } | ||
490 | |||
491 | add_wait_queue(&bundle->chanwait, &myself); | ||
492 | for (;;) { | ||
493 | set_current_state(TASK_INTERRUPTIBLE); | ||
494 | if (bundle->num_conns < 20 || | ||
495 | !list_empty(&bundle->unused_conns) || | ||
496 | !list_empty(&bundle->avail_conns)) | ||
497 | break; | ||
498 | if (signal_pending(current)) | ||
499 | goto interrupted_dequeue; | ||
500 | schedule(); | ||
501 | } | ||
502 | remove_wait_queue(&bundle->chanwait, &myself); | ||
503 | __set_current_state(TASK_RUNNING); | ||
504 | spin_lock(&trans->client_lock); | ||
505 | continue; | ||
506 | } | ||
507 | |||
508 | /* not yet present - create a candidate for a new connection and then | ||
509 | * redo the check */ | ||
510 | candidate = rxrpc_alloc_connection(gfp); | ||
511 | if (IS_ERR(candidate)) { | ||
512 | _leave(" = %ld", PTR_ERR(candidate)); | ||
513 | return PTR_ERR(candidate); | ||
514 | } | ||
515 | |||
516 | candidate->trans = trans; | ||
517 | candidate->bundle = bundle; | ||
518 | candidate->service_id = bundle->service_id; | ||
519 | candidate->epoch = rxrpc_epoch; | ||
520 | candidate->in_clientflag = 0; | ||
521 | candidate->out_clientflag = RXRPC_CLIENT_INITIATED; | ||
522 | candidate->cid = 0; | ||
523 | candidate->state = RXRPC_CONN_CLIENT; | ||
524 | candidate->avail_calls = RXRPC_MAXCALLS; | ||
525 | candidate->security_level = rx->min_sec_level; | ||
526 | candidate->key = key_get(bundle->key); | ||
527 | |||
528 | ret = rxrpc_init_client_conn_security(candidate); | ||
529 | if (ret < 0) { | ||
530 | key_put(candidate->key); | ||
531 | kfree(candidate); | ||
532 | _leave(" = %d [key]", ret); | ||
533 | return ret; | ||
534 | } | ||
535 | |||
536 | write_lock_bh(&rxrpc_connection_lock); | ||
537 | list_add_tail(&candidate->link, &rxrpc_connections); | ||
538 | write_unlock_bh(&rxrpc_connection_lock); | ||
539 | |||
540 | spin_lock(&trans->client_lock); | ||
541 | |||
542 | list_add(&candidate->bundle_link, &bundle->unused_conns); | ||
543 | bundle->num_conns++; | ||
544 | atomic_inc(&bundle->usage); | ||
545 | atomic_inc(&trans->usage); | ||
546 | |||
547 | _net("CONNECT new %d on TRANS %d", | ||
548 | candidate->debug_id, candidate->trans->debug_id); | ||
549 | |||
550 | rxrpc_assign_connection_id(candidate); | ||
551 | if (candidate->security) | ||
552 | candidate->security->prime_packet_security(candidate); | ||
553 | |||
554 | /* leave the candidate lurking in zombie mode attached to the | ||
555 | * bundle until we're ready for it */ | ||
556 | rxrpc_put_connection(candidate); | ||
557 | candidate = NULL; | ||
558 | } | ||
559 | |||
560 | /* we've got a connection with a free channel and we can now attach the | ||
561 | * call to it | ||
562 | * - we're holding the transport's client lock | ||
563 | * - we're holding a reference on the connection | ||
564 | * - we're holding a reference on the bundle | ||
565 | */ | ||
566 | for (chan = 0; chan < RXRPC_MAXCALLS; chan++) | ||
567 | if (!conn->channels[chan]) | ||
568 | goto found_channel; | ||
569 | ASSERT(conn->channels[0] == NULL || | ||
570 | conn->channels[1] == NULL || | ||
571 | conn->channels[2] == NULL || | ||
572 | conn->channels[3] == NULL); | ||
573 | BUG(); | ||
574 | |||
575 | found_channel: | ||
576 | conn->channels[chan] = call; | ||
577 | call->conn = conn; | ||
578 | call->channel = chan; | ||
579 | call->cid = conn->cid | htonl(chan); | ||
580 | call->call_id = htonl(++conn->call_counter); | ||
581 | |||
582 | _net("CONNECT client on conn %d chan %d as call %x", | ||
583 | conn->debug_id, chan, ntohl(call->call_id)); | ||
584 | |||
585 | ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS); | ||
586 | spin_unlock(&trans->client_lock); | ||
587 | |||
588 | rxrpc_add_call_ID_to_conn(conn, call); | ||
589 | |||
590 | _leave(" = 0"); | ||
591 | return 0; | ||
592 | |||
593 | interrupted_dequeue: | ||
594 | remove_wait_queue(&bundle->chanwait, &myself); | ||
595 | __set_current_state(TASK_RUNNING); | ||
596 | interrupted: | ||
597 | _leave(" = -ERESTARTSYS"); | ||
598 | return -ERESTARTSYS; | ||
599 | } | ||
600 | |||
601 | /* | ||
602 | * get a record of an incoming connection | ||
603 | */ | ||
604 | struct rxrpc_connection * | ||
605 | rxrpc_incoming_connection(struct rxrpc_transport *trans, | ||
606 | struct rxrpc_header *hdr, | ||
607 | gfp_t gfp) | ||
608 | { | ||
609 | struct rxrpc_connection *conn, *candidate = NULL; | ||
610 | struct rb_node *p, **pp; | ||
611 | const char *new = "old"; | ||
612 | __be32 epoch; | ||
613 | u32 conn_id; | ||
614 | |||
615 | _enter(""); | ||
616 | |||
617 | ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED); | ||
618 | |||
619 | epoch = hdr->epoch; | ||
620 | conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK; | ||
621 | |||
622 | /* search the connection list first */ | ||
623 | read_lock_bh(&trans->conn_lock); | ||
624 | |||
625 | p = trans->server_conns.rb_node; | ||
626 | while (p) { | ||
627 | conn = rb_entry(p, struct rxrpc_connection, node); | ||
628 | |||
629 | _debug("maybe %x", conn->real_conn_id); | ||
630 | |||
631 | if (epoch < conn->epoch) | ||
632 | p = p->rb_left; | ||
633 | else if (epoch > conn->epoch) | ||
634 | p = p->rb_right; | ||
635 | else if (conn_id < conn->real_conn_id) | ||
636 | p = p->rb_left; | ||
637 | else if (conn_id > conn->real_conn_id) | ||
638 | p = p->rb_right; | ||
639 | else | ||
640 | goto found_extant_connection; | ||
641 | } | ||
642 | read_unlock_bh(&trans->conn_lock); | ||
643 | |||
644 | /* not yet present - create a candidate for a new record and then | ||
645 | * redo the search */ | ||
646 | candidate = rxrpc_alloc_connection(gfp); | ||
647 | if (!candidate) { | ||
648 | _leave(" = -ENOMEM"); | ||
649 | return ERR_PTR(-ENOMEM); | ||
650 | } | ||
651 | |||
652 | candidate->trans = trans; | ||
653 | candidate->epoch = hdr->epoch; | ||
654 | candidate->cid = hdr->cid & __constant_cpu_to_be32(RXRPC_CIDMASK); | ||
655 | candidate->service_id = hdr->serviceId; | ||
656 | candidate->security_ix = hdr->securityIndex; | ||
657 | candidate->in_clientflag = RXRPC_CLIENT_INITIATED; | ||
658 | candidate->out_clientflag = 0; | ||
659 | candidate->real_conn_id = conn_id; | ||
660 | candidate->state = RXRPC_CONN_SERVER; | ||
661 | if (candidate->service_id) | ||
662 | candidate->state = RXRPC_CONN_SERVER_UNSECURED; | ||
663 | |||
664 | write_lock_bh(&trans->conn_lock); | ||
665 | |||
666 | pp = &trans->server_conns.rb_node; | ||
667 | p = NULL; | ||
668 | while (*pp) { | ||
669 | p = *pp; | ||
670 | conn = rb_entry(p, struct rxrpc_connection, node); | ||
671 | |||
672 | if (epoch < conn->epoch) | ||
673 | pp = &(*pp)->rb_left; | ||
674 | else if (epoch > conn->epoch) | ||
675 | pp = &(*pp)->rb_right; | ||
676 | else if (conn_id < conn->real_conn_id) | ||
677 | pp = &(*pp)->rb_left; | ||
678 | else if (conn_id > conn->real_conn_id) | ||
679 | pp = &(*pp)->rb_right; | ||
680 | else | ||
681 | goto found_extant_second; | ||
682 | } | ||
683 | |||
684 | /* we can now add the new candidate to the list */ | ||
685 | conn = candidate; | ||
686 | candidate = NULL; | ||
687 | rb_link_node(&conn->node, p, pp); | ||
688 | rb_insert_color(&conn->node, &trans->server_conns); | ||
689 | atomic_inc(&conn->trans->usage); | ||
690 | |||
691 | write_unlock_bh(&trans->conn_lock); | ||
692 | |||
693 | write_lock_bh(&rxrpc_connection_lock); | ||
694 | list_add_tail(&conn->link, &rxrpc_connections); | ||
695 | write_unlock_bh(&rxrpc_connection_lock); | ||
696 | |||
697 | new = "new"; | ||
698 | |||
699 | success: | ||
700 | _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->real_conn_id); | ||
701 | |||
702 | _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage)); | ||
703 | return conn; | ||
704 | |||
705 | /* we found the connection in the list immediately */ | ||
706 | found_extant_connection: | ||
707 | if (hdr->securityIndex != conn->security_ix) { | ||
708 | read_unlock_bh(&trans->conn_lock); | ||
709 | goto security_mismatch; | ||
710 | } | ||
711 | atomic_inc(&conn->usage); | ||
712 | read_unlock_bh(&trans->conn_lock); | ||
713 | goto success; | ||
714 | |||
715 | /* we found the connection on the second time through the list */ | ||
716 | found_extant_second: | ||
717 | if (hdr->securityIndex != conn->security_ix) { | ||
718 | write_unlock_bh(&trans->conn_lock); | ||
719 | goto security_mismatch; | ||
720 | } | ||
721 | atomic_inc(&conn->usage); | ||
722 | write_unlock_bh(&trans->conn_lock); | ||
723 | kfree(candidate); | ||
724 | goto success; | ||
725 | |||
726 | security_mismatch: | ||
727 | kfree(candidate); | ||
728 | _leave(" = -EKEYREJECTED"); | ||
729 | return ERR_PTR(-EKEYREJECTED); | ||
730 | } | ||
731 | |||
732 | /* | ||
733 | * find a connection based on transport and RxRPC connection ID for an incoming | ||
734 | * packet | ||
735 | */ | ||
736 | struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans, | ||
737 | struct rxrpc_header *hdr) | ||
738 | { | ||
739 | struct rxrpc_connection *conn; | ||
740 | struct rb_node *p; | ||
741 | __be32 epoch; | ||
742 | u32 conn_id; | ||
743 | |||
744 | _enter(",{%x,%x}", ntohl(hdr->cid), hdr->flags); | ||
745 | |||
746 | read_lock_bh(&trans->conn_lock); | ||
747 | |||
748 | conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK; | ||
749 | epoch = hdr->epoch; | ||
750 | |||
751 | if (hdr->flags & RXRPC_CLIENT_INITIATED) | ||
752 | p = trans->server_conns.rb_node; | ||
753 | else | ||
754 | p = trans->client_conns.rb_node; | ||
755 | |||
756 | while (p) { | ||
757 | conn = rb_entry(p, struct rxrpc_connection, node); | ||
758 | |||
759 | _debug("maybe %x", conn->real_conn_id); | ||
760 | |||
761 | if (epoch < conn->epoch) | ||
762 | p = p->rb_left; | ||
763 | else if (epoch > conn->epoch) | ||
764 | p = p->rb_right; | ||
765 | else if (conn_id < conn->real_conn_id) | ||
766 | p = p->rb_left; | ||
767 | else if (conn_id > conn->real_conn_id) | ||
768 | p = p->rb_right; | ||
769 | else | ||
770 | goto found; | ||
771 | } | ||
772 | |||
773 | read_unlock_bh(&trans->conn_lock); | ||
774 | _leave(" = NULL"); | ||
775 | return NULL; | ||
776 | |||
777 | found: | ||
778 | atomic_inc(&conn->usage); | ||
779 | read_unlock_bh(&trans->conn_lock); | ||
780 | _leave(" = %p", conn); | ||
781 | return conn; | ||
782 | } | ||
783 | |||
784 | /* | ||
785 | * release a virtual connection | ||
786 | */ | ||
787 | void rxrpc_put_connection(struct rxrpc_connection *conn) | ||
788 | { | ||
789 | _enter("%p{u=%d,d=%d}", | ||
790 | conn, atomic_read(&conn->usage), conn->debug_id); | ||
791 | |||
792 | ASSERTCMP(atomic_read(&conn->usage), >, 0); | ||
793 | |||
794 | conn->put_time = xtime.tv_sec; | ||
795 | if (atomic_dec_and_test(&conn->usage)) { | ||
796 | _debug("zombie"); | ||
797 | rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); | ||
798 | } | ||
799 | |||
800 | _leave(""); | ||
801 | } | ||
802 | |||
803 | /* | ||
804 | * destroy a virtual connection | ||
805 | */ | ||
806 | static void rxrpc_destroy_connection(struct rxrpc_connection *conn) | ||
807 | { | ||
808 | _enter("%p{%d}", conn, atomic_read(&conn->usage)); | ||
809 | |||
810 | ASSERTCMP(atomic_read(&conn->usage), ==, 0); | ||
811 | |||
812 | _net("DESTROY CONN %d", conn->debug_id); | ||
813 | |||
814 | if (conn->bundle) | ||
815 | rxrpc_put_bundle(conn->trans, conn->bundle); | ||
816 | |||
817 | ASSERT(RB_EMPTY_ROOT(&conn->calls)); | ||
818 | rxrpc_purge_queue(&conn->rx_queue); | ||
819 | |||
820 | rxrpc_clear_conn_security(conn); | ||
821 | rxrpc_put_transport(conn->trans); | ||
822 | kfree(conn); | ||
823 | _leave(""); | ||
824 | } | ||
825 | |||
826 | /* | ||
827 | * reap dead connections | ||
828 | */ | ||
829 | void rxrpc_connection_reaper(struct work_struct *work) | ||
830 | { | ||
831 | struct rxrpc_connection *conn, *_p; | ||
832 | unsigned long now, earliest, reap_time; | ||
833 | |||
834 | LIST_HEAD(graveyard); | ||
835 | |||
836 | _enter(""); | ||
837 | |||
838 | now = xtime.tv_sec; | ||
839 | earliest = ULONG_MAX; | ||
840 | |||
841 | write_lock_bh(&rxrpc_connection_lock); | ||
842 | list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) { | ||
843 | _debug("reap CONN %d { u=%d,t=%ld }", | ||
844 | conn->debug_id, atomic_read(&conn->usage), | ||
845 | (long) now - (long) conn->put_time); | ||
846 | |||
847 | if (likely(atomic_read(&conn->usage) > 0)) | ||
848 | continue; | ||
849 | |||
850 | spin_lock(&conn->trans->client_lock); | ||
851 | write_lock(&conn->trans->conn_lock); | ||
852 | reap_time = conn->put_time + rxrpc_connection_timeout; | ||
853 | |||
854 | if (atomic_read(&conn->usage) > 0) { | ||
855 | ; | ||
856 | } else if (reap_time <= now) { | ||
857 | list_move_tail(&conn->link, &graveyard); | ||
858 | if (conn->out_clientflag) | ||
859 | rb_erase(&conn->node, | ||
860 | &conn->trans->client_conns); | ||
861 | else | ||
862 | rb_erase(&conn->node, | ||
863 | &conn->trans->server_conns); | ||
864 | if (conn->bundle) { | ||
865 | list_del_init(&conn->bundle_link); | ||
866 | conn->bundle->num_conns--; | ||
867 | } | ||
868 | |||
869 | } else if (reap_time < earliest) { | ||
870 | earliest = reap_time; | ||
871 | } | ||
872 | |||
873 | write_unlock(&conn->trans->conn_lock); | ||
874 | spin_unlock(&conn->trans->client_lock); | ||
875 | } | ||
876 | write_unlock_bh(&rxrpc_connection_lock); | ||
877 | |||
878 | if (earliest != ULONG_MAX) { | ||
879 | _debug("reschedule reaper %ld", (long) earliest - now); | ||
880 | ASSERTCMP(earliest, >, now); | ||
881 | rxrpc_queue_delayed_work(&rxrpc_connection_reap, | ||
882 | (earliest - now) * HZ); | ||
883 | } | ||
884 | |||
885 | /* then destroy all those pulled out */ | ||
886 | while (!list_empty(&graveyard)) { | ||
887 | conn = list_entry(graveyard.next, struct rxrpc_connection, | ||
888 | link); | ||
889 | list_del_init(&conn->link); | ||
890 | |||
891 | ASSERTCMP(atomic_read(&conn->usage), ==, 0); | ||
892 | rxrpc_destroy_connection(conn); | ||
893 | } | ||
894 | |||
895 | _leave(""); | ||
896 | } | ||
897 | |||
898 | /* | ||
899 | * preemptively destroy all the connection records rather than waiting for them | ||
900 | * to time out | ||
901 | */ | ||
902 | void __exit rxrpc_destroy_all_connections(void) | ||
903 | { | ||
904 | _enter(""); | ||
905 | |||
906 | rxrpc_connection_timeout = 0; | ||
907 | cancel_delayed_work(&rxrpc_connection_reap); | ||
908 | rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); | ||
909 | |||
910 | _leave(""); | ||
911 | } | ||
diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c new file mode 100644 index 000000000000..1ada43d51165 --- /dev/null +++ b/net/rxrpc/ar-connevent.c | |||
@@ -0,0 +1,403 @@ | |||
1 | /* connection-level event handling | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/net.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/errqueue.h> | ||
16 | #include <linux/udp.h> | ||
17 | #include <linux/in.h> | ||
18 | #include <linux/in6.h> | ||
19 | #include <linux/icmp.h> | ||
20 | #include <net/sock.h> | ||
21 | #include <net/af_rxrpc.h> | ||
22 | #include <net/ip.h> | ||
23 | #include "ar-internal.h" | ||
24 | |||
25 | /* | ||
26 | * pass a connection-level abort onto all calls on that connection | ||
27 | */ | ||
28 | static void rxrpc_abort_calls(struct rxrpc_connection *conn, int state, | ||
29 | u32 abort_code) | ||
30 | { | ||
31 | struct rxrpc_call *call; | ||
32 | struct rb_node *p; | ||
33 | |||
34 | _enter("{%d},%x", conn->debug_id, abort_code); | ||
35 | |||
36 | read_lock_bh(&conn->lock); | ||
37 | |||
38 | for (p = rb_first(&conn->calls); p; p = rb_next(p)) { | ||
39 | call = rb_entry(p, struct rxrpc_call, conn_node); | ||
40 | write_lock(&call->state_lock); | ||
41 | if (call->state <= RXRPC_CALL_COMPLETE) { | ||
42 | call->state = state; | ||
43 | call->abort_code = abort_code; | ||
44 | if (state == RXRPC_CALL_LOCALLY_ABORTED) | ||
45 | set_bit(RXRPC_CALL_CONN_ABORT, &call->events); | ||
46 | else | ||
47 | set_bit(RXRPC_CALL_RCVD_ABORT, &call->events); | ||
48 | rxrpc_queue_call(call); | ||
49 | } | ||
50 | write_unlock(&call->state_lock); | ||
51 | } | ||
52 | |||
53 | read_unlock_bh(&conn->lock); | ||
54 | _leave(""); | ||
55 | } | ||
56 | |||
57 | /* | ||
58 | * generate a connection-level abort | ||
59 | */ | ||
60 | static int rxrpc_abort_connection(struct rxrpc_connection *conn, | ||
61 | u32 error, u32 abort_code) | ||
62 | { | ||
63 | struct rxrpc_header hdr; | ||
64 | struct msghdr msg; | ||
65 | struct kvec iov[2]; | ||
66 | __be32 word; | ||
67 | size_t len; | ||
68 | int ret; | ||
69 | |||
70 | _enter("%d,,%u,%u", conn->debug_id, error, abort_code); | ||
71 | |||
72 | /* generate a connection-level abort */ | ||
73 | spin_lock_bh(&conn->state_lock); | ||
74 | if (conn->state < RXRPC_CONN_REMOTELY_ABORTED) { | ||
75 | conn->state = RXRPC_CONN_LOCALLY_ABORTED; | ||
76 | conn->error = error; | ||
77 | spin_unlock_bh(&conn->state_lock); | ||
78 | } else { | ||
79 | spin_unlock_bh(&conn->state_lock); | ||
80 | _leave(" = 0 [already dead]"); | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code); | ||
85 | |||
86 | msg.msg_name = &conn->trans->peer->srx.transport.sin; | ||
87 | msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); | ||
88 | msg.msg_control = NULL; | ||
89 | msg.msg_controllen = 0; | ||
90 | msg.msg_flags = 0; | ||
91 | |||
92 | hdr.epoch = conn->epoch; | ||
93 | hdr.cid = conn->cid; | ||
94 | hdr.callNumber = 0; | ||
95 | hdr.seq = 0; | ||
96 | hdr.type = RXRPC_PACKET_TYPE_ABORT; | ||
97 | hdr.flags = conn->out_clientflag; | ||
98 | hdr.userStatus = 0; | ||
99 | hdr.securityIndex = conn->security_ix; | ||
100 | hdr._rsvd = 0; | ||
101 | hdr.serviceId = conn->service_id; | ||
102 | |||
103 | word = htonl(abort_code); | ||
104 | |||
105 | iov[0].iov_base = &hdr; | ||
106 | iov[0].iov_len = sizeof(hdr); | ||
107 | iov[1].iov_base = &word; | ||
108 | iov[1].iov_len = sizeof(word); | ||
109 | |||
110 | len = iov[0].iov_len + iov[1].iov_len; | ||
111 | |||
112 | hdr.serial = htonl(atomic_inc_return(&conn->serial)); | ||
113 | _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code); | ||
114 | |||
115 | ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); | ||
116 | if (ret < 0) { | ||
117 | _debug("sendmsg failed: %d", ret); | ||
118 | return -EAGAIN; | ||
119 | } | ||
120 | |||
121 | _leave(" = 0"); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * mark a call as being on a now-secured channel | ||
127 | * - must be called with softirqs disabled | ||
128 | */ | ||
129 | void rxrpc_call_is_secure(struct rxrpc_call *call) | ||
130 | { | ||
131 | _enter("%p", call); | ||
132 | if (call) { | ||
133 | read_lock(&call->state_lock); | ||
134 | if (call->state < RXRPC_CALL_COMPLETE && | ||
135 | !test_and_set_bit(RXRPC_CALL_SECURED, &call->events)) | ||
136 | rxrpc_queue_call(call); | ||
137 | read_unlock(&call->state_lock); | ||
138 | } | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * connection-level Rx packet processor | ||
143 | */ | ||
144 | static int rxrpc_process_event(struct rxrpc_connection *conn, | ||
145 | struct sk_buff *skb, | ||
146 | u32 *_abort_code) | ||
147 | { | ||
148 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | ||
149 | __be32 tmp; | ||
150 | u32 serial; | ||
151 | int loop, ret; | ||
152 | |||
153 | if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) | ||
154 | return -ECONNABORTED; | ||
155 | |||
156 | serial = ntohl(sp->hdr.serial); | ||
157 | |||
158 | switch (sp->hdr.type) { | ||
159 | case RXRPC_PACKET_TYPE_ABORT: | ||
160 | if (skb_copy_bits(skb, 0, &tmp, sizeof(tmp)) < 0) | ||
161 | return -EPROTO; | ||
162 | _proto("Rx ABORT %%%u { ac=%d }", serial, ntohl(tmp)); | ||
163 | |||
164 | conn->state = RXRPC_CONN_REMOTELY_ABORTED; | ||
165 | rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, | ||
166 | ntohl(tmp)); | ||
167 | return -ECONNABORTED; | ||
168 | |||
169 | case RXRPC_PACKET_TYPE_CHALLENGE: | ||
170 | if (conn->security) | ||
171 | return conn->security->respond_to_challenge( | ||
172 | conn, skb, _abort_code); | ||
173 | return -EPROTO; | ||
174 | |||
175 | case RXRPC_PACKET_TYPE_RESPONSE: | ||
176 | if (!conn->security) | ||
177 | return -EPROTO; | ||
178 | |||
179 | ret = conn->security->verify_response(conn, skb, _abort_code); | ||
180 | if (ret < 0) | ||
181 | return ret; | ||
182 | |||
183 | ret = conn->security->init_connection_security(conn); | ||
184 | if (ret < 0) | ||
185 | return ret; | ||
186 | |||
187 | conn->security->prime_packet_security(conn); | ||
188 | read_lock_bh(&conn->lock); | ||
189 | spin_lock(&conn->state_lock); | ||
190 | |||
191 | if (conn->state == RXRPC_CONN_SERVER_CHALLENGING) { | ||
192 | conn->state = RXRPC_CONN_SERVER; | ||
193 | for (loop = 0; loop < RXRPC_MAXCALLS; loop++) | ||
194 | rxrpc_call_is_secure(conn->channels[loop]); | ||
195 | } | ||
196 | |||
197 | spin_unlock(&conn->state_lock); | ||
198 | read_unlock_bh(&conn->lock); | ||
199 | return 0; | ||
200 | |||
201 | default: | ||
202 | return -EPROTO; | ||
203 | } | ||
204 | } | ||
205 | |||
206 | /* | ||
207 | * set up security and issue a challenge | ||
208 | */ | ||
209 | static void rxrpc_secure_connection(struct rxrpc_connection *conn) | ||
210 | { | ||
211 | u32 abort_code; | ||
212 | int ret; | ||
213 | |||
214 | _enter("{%d}", conn->debug_id); | ||
215 | |||
216 | ASSERT(conn->security_ix != 0); | ||
217 | |||
218 | if (!conn->key) { | ||
219 | _debug("set up security"); | ||
220 | ret = rxrpc_init_server_conn_security(conn); | ||
221 | switch (ret) { | ||
222 | case 0: | ||
223 | break; | ||
224 | case -ENOENT: | ||
225 | abort_code = RX_CALL_DEAD; | ||
226 | goto abort; | ||
227 | default: | ||
228 | abort_code = RXKADNOAUTH; | ||
229 | goto abort; | ||
230 | } | ||
231 | } | ||
232 | |||
233 | ASSERT(conn->security != NULL); | ||
234 | |||
235 | if (conn->security->issue_challenge(conn) < 0) { | ||
236 | abort_code = RX_CALL_DEAD; | ||
237 | ret = -ENOMEM; | ||
238 | goto abort; | ||
239 | } | ||
240 | |||
241 | _leave(""); | ||
242 | return; | ||
243 | |||
244 | abort: | ||
245 | _debug("abort %d, %d", ret, abort_code); | ||
246 | rxrpc_abort_connection(conn, -ret, abort_code); | ||
247 | _leave(" [aborted]"); | ||
248 | } | ||
249 | |||
250 | /* | ||
251 | * connection-level event processor | ||
252 | */ | ||
253 | void rxrpc_process_connection(struct work_struct *work) | ||
254 | { | ||
255 | struct rxrpc_connection *conn = | ||
256 | container_of(work, struct rxrpc_connection, processor); | ||
257 | struct rxrpc_skb_priv *sp; | ||
258 | struct sk_buff *skb; | ||
259 | u32 abort_code = RX_PROTOCOL_ERROR; | ||
260 | int ret; | ||
261 | |||
262 | _enter("{%d}", conn->debug_id); | ||
263 | |||
264 | atomic_inc(&conn->usage); | ||
265 | |||
266 | if (test_and_clear_bit(RXRPC_CONN_CHALLENGE, &conn->events)) { | ||
267 | rxrpc_secure_connection(conn); | ||
268 | rxrpc_put_connection(conn); | ||
269 | } | ||
270 | |||
271 | /* go through the conn-level event packets, releasing the ref on this | ||
272 | * connection that each one has when we've finished with it */ | ||
273 | while ((skb = skb_dequeue(&conn->rx_queue))) { | ||
274 | sp = rxrpc_skb(skb); | ||
275 | |||
276 | ret = rxrpc_process_event(conn, skb, &abort_code); | ||
277 | switch (ret) { | ||
278 | case -EPROTO: | ||
279 | case -EKEYEXPIRED: | ||
280 | case -EKEYREJECTED: | ||
281 | goto protocol_error; | ||
282 | case -EAGAIN: | ||
283 | goto requeue_and_leave; | ||
284 | case -ECONNABORTED: | ||
285 | default: | ||
286 | rxrpc_put_connection(conn); | ||
287 | rxrpc_free_skb(skb); | ||
288 | break; | ||
289 | } | ||
290 | } | ||
291 | |||
292 | out: | ||
293 | rxrpc_put_connection(conn); | ||
294 | _leave(""); | ||
295 | return; | ||
296 | |||
297 | requeue_and_leave: | ||
298 | skb_queue_head(&conn->rx_queue, skb); | ||
299 | goto out; | ||
300 | |||
301 | protocol_error: | ||
302 | if (rxrpc_abort_connection(conn, -ret, abort_code) < 0) | ||
303 | goto requeue_and_leave; | ||
304 | rxrpc_put_connection(conn); | ||
305 | rxrpc_free_skb(skb); | ||
306 | _leave(" [EPROTO]"); | ||
307 | goto out; | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * put a packet up for transport-level abort | ||
312 | */ | ||
313 | void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) | ||
314 | { | ||
315 | CHECK_SLAB_OKAY(&local->usage); | ||
316 | |||
317 | if (!atomic_inc_not_zero(&local->usage)) { | ||
318 | printk("resurrected on reject\n"); | ||
319 | BUG(); | ||
320 | } | ||
321 | |||
322 | skb_queue_tail(&local->reject_queue, skb); | ||
323 | rxrpc_queue_work(&local->rejecter); | ||
324 | } | ||
325 | |||
326 | /* | ||
327 | * reject packets through the local endpoint | ||
328 | */ | ||
329 | void rxrpc_reject_packets(struct work_struct *work) | ||
330 | { | ||
331 | union { | ||
332 | struct sockaddr sa; | ||
333 | struct sockaddr_in sin; | ||
334 | } sa; | ||
335 | struct rxrpc_skb_priv *sp; | ||
336 | struct rxrpc_header hdr; | ||
337 | struct rxrpc_local *local; | ||
338 | struct sk_buff *skb; | ||
339 | struct msghdr msg; | ||
340 | struct kvec iov[2]; | ||
341 | size_t size; | ||
342 | __be32 code; | ||
343 | |||
344 | local = container_of(work, struct rxrpc_local, rejecter); | ||
345 | rxrpc_get_local(local); | ||
346 | |||
347 | _enter("%d", local->debug_id); | ||
348 | |||
349 | iov[0].iov_base = &hdr; | ||
350 | iov[0].iov_len = sizeof(hdr); | ||
351 | iov[1].iov_base = &code; | ||
352 | iov[1].iov_len = sizeof(code); | ||
353 | size = sizeof(hdr) + sizeof(code); | ||
354 | |||
355 | msg.msg_name = &sa; | ||
356 | msg.msg_control = NULL; | ||
357 | msg.msg_controllen = 0; | ||
358 | msg.msg_flags = 0; | ||
359 | |||
360 | memset(&sa, 0, sizeof(sa)); | ||
361 | sa.sa.sa_family = local->srx.transport.family; | ||
362 | switch (sa.sa.sa_family) { | ||
363 | case AF_INET: | ||
364 | msg.msg_namelen = sizeof(sa.sin); | ||
365 | break; | ||
366 | default: | ||
367 | msg.msg_namelen = 0; | ||
368 | break; | ||
369 | } | ||
370 | |||
371 | memset(&hdr, 0, sizeof(hdr)); | ||
372 | hdr.type = RXRPC_PACKET_TYPE_ABORT; | ||
373 | |||
374 | while ((skb = skb_dequeue(&local->reject_queue))) { | ||
375 | sp = rxrpc_skb(skb); | ||
376 | switch (sa.sa.sa_family) { | ||
377 | case AF_INET: | ||
378 | sa.sin.sin_port = udp_hdr(skb)->source; | ||
379 | sa.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; | ||
380 | code = htonl(skb->priority); | ||
381 | |||
382 | hdr.epoch = sp->hdr.epoch; | ||
383 | hdr.cid = sp->hdr.cid; | ||
384 | hdr.callNumber = sp->hdr.callNumber; | ||
385 | hdr.serviceId = sp->hdr.serviceId; | ||
386 | hdr.flags = sp->hdr.flags; | ||
387 | hdr.flags ^= RXRPC_CLIENT_INITIATED; | ||
388 | hdr.flags &= RXRPC_CLIENT_INITIATED; | ||
389 | |||
390 | kernel_sendmsg(local->socket, &msg, iov, 2, size); | ||
391 | break; | ||
392 | |||
393 | default: | ||
394 | break; | ||
395 | } | ||
396 | |||
397 | rxrpc_free_skb(skb); | ||
398 | rxrpc_put_local(local); | ||
399 | } | ||
400 | |||
401 | rxrpc_put_local(local); | ||
402 | _leave(""); | ||
403 | } | ||
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c new file mode 100644 index 000000000000..2c27df1ffa17 --- /dev/null +++ b/net/rxrpc/ar-error.c | |||
@@ -0,0 +1,253 @@ | |||
1 | /* Error message handling (ICMP) | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/net.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/errqueue.h> | ||
16 | #include <linux/udp.h> | ||
17 | #include <linux/in.h> | ||
18 | #include <linux/in6.h> | ||
19 | #include <linux/icmp.h> | ||
20 | #include <net/sock.h> | ||
21 | #include <net/af_rxrpc.h> | ||
22 | #include <net/ip.h> | ||
23 | #include "ar-internal.h" | ||
24 | |||
25 | /* | ||
26 | * handle an error received on the local endpoint | ||
27 | */ | ||
28 | void rxrpc_UDP_error_report(struct sock *sk) | ||
29 | { | ||
30 | struct sock_exterr_skb *serr; | ||
31 | struct rxrpc_transport *trans; | ||
32 | struct rxrpc_local *local = sk->sk_user_data; | ||
33 | struct rxrpc_peer *peer; | ||
34 | struct sk_buff *skb; | ||
35 | __be32 addr; | ||
36 | __be16 port; | ||
37 | |||
38 | _enter("%p{%d}", sk, local->debug_id); | ||
39 | |||
40 | skb = skb_dequeue(&sk->sk_error_queue); | ||
41 | if (!skb) { | ||
42 | _leave("UDP socket errqueue empty"); | ||
43 | return; | ||
44 | } | ||
45 | |||
46 | rxrpc_new_skb(skb); | ||
47 | |||
48 | serr = SKB_EXT_ERR(skb); | ||
49 | addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); | ||
50 | port = serr->port; | ||
51 | |||
52 | _net("Rx UDP Error from "NIPQUAD_FMT":%hu", | ||
53 | NIPQUAD(addr), ntohs(port)); | ||
54 | _debug("Msg l:%d d:%d", skb->len, skb->data_len); | ||
55 | |||
56 | peer = rxrpc_find_peer(local, addr, port); | ||
57 | if (IS_ERR(peer)) { | ||
58 | rxrpc_free_skb(skb); | ||
59 | _leave(" [no peer]"); | ||
60 | return; | ||
61 | } | ||
62 | |||
63 | trans = rxrpc_find_transport(local, peer); | ||
64 | if (!trans) { | ||
65 | rxrpc_put_peer(peer); | ||
66 | rxrpc_free_skb(skb); | ||
67 | _leave(" [no trans]"); | ||
68 | return; | ||
69 | } | ||
70 | |||
71 | if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP && | ||
72 | serr->ee.ee_type == ICMP_DEST_UNREACH && | ||
73 | serr->ee.ee_code == ICMP_FRAG_NEEDED | ||
74 | ) { | ||
75 | u32 mtu = serr->ee.ee_info; | ||
76 | |||
77 | _net("Rx Received ICMP Fragmentation Needed (%d)", mtu); | ||
78 | |||
79 | /* wind down the local interface MTU */ | ||
80 | if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) { | ||
81 | peer->if_mtu = mtu; | ||
82 | _net("I/F MTU %u", mtu); | ||
83 | } | ||
84 | |||
85 | /* ip_rt_frag_needed() may have eaten the info */ | ||
86 | if (mtu == 0) | ||
87 | mtu = ntohs(icmp_hdr(skb)->un.frag.mtu); | ||
88 | |||
89 | if (mtu == 0) { | ||
90 | /* they didn't give us a size, estimate one */ | ||
91 | if (mtu > 1500) { | ||
92 | mtu >>= 1; | ||
93 | if (mtu < 1500) | ||
94 | mtu = 1500; | ||
95 | } else { | ||
96 | mtu -= 100; | ||
97 | if (mtu < peer->hdrsize) | ||
98 | mtu = peer->hdrsize + 4; | ||
99 | } | ||
100 | } | ||
101 | |||
102 | if (mtu < peer->mtu) { | ||
103 | peer->mtu = mtu; | ||
104 | peer->maxdata = peer->mtu - peer->hdrsize; | ||
105 | _net("Net MTU %u (maxdata %u)", | ||
106 | peer->mtu, peer->maxdata); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | rxrpc_put_peer(peer); | ||
111 | |||
112 | /* pass the transport ref to error_handler to release */ | ||
113 | skb_queue_tail(&trans->error_queue, skb); | ||
114 | rxrpc_queue_work(&trans->error_handler); | ||
115 | |||
116 | /* reset and regenerate socket error */ | ||
117 | spin_lock_bh(&sk->sk_error_queue.lock); | ||
118 | sk->sk_err = 0; | ||
119 | skb = skb_peek(&sk->sk_error_queue); | ||
120 | if (skb) { | ||
121 | sk->sk_err = SKB_EXT_ERR(skb)->ee.ee_errno; | ||
122 | spin_unlock_bh(&sk->sk_error_queue.lock); | ||
123 | sk->sk_error_report(sk); | ||
124 | } else { | ||
125 | spin_unlock_bh(&sk->sk_error_queue.lock); | ||
126 | } | ||
127 | |||
128 | _leave(""); | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * deal with UDP error messages | ||
133 | */ | ||
134 | void rxrpc_UDP_error_handler(struct work_struct *work) | ||
135 | { | ||
136 | struct sock_extended_err *ee; | ||
137 | struct sock_exterr_skb *serr; | ||
138 | struct rxrpc_transport *trans = | ||
139 | container_of(work, struct rxrpc_transport, error_handler); | ||
140 | struct sk_buff *skb; | ||
141 | int local, err; | ||
142 | |||
143 | _enter(""); | ||
144 | |||
145 | skb = skb_dequeue(&trans->error_queue); | ||
146 | if (!skb) | ||
147 | return; | ||
148 | |||
149 | serr = SKB_EXT_ERR(skb); | ||
150 | ee = &serr->ee; | ||
151 | |||
152 | _net("Rx Error o=%d t=%d c=%d e=%d", | ||
153 | ee->ee_origin, ee->ee_type, ee->ee_code, ee->ee_errno); | ||
154 | |||
155 | err = ee->ee_errno; | ||
156 | |||
157 | switch (ee->ee_origin) { | ||
158 | case SO_EE_ORIGIN_ICMP: | ||
159 | local = 0; | ||
160 | switch (ee->ee_type) { | ||
161 | case ICMP_DEST_UNREACH: | ||
162 | switch (ee->ee_code) { | ||
163 | case ICMP_NET_UNREACH: | ||
164 | _net("Rx Received ICMP Network Unreachable"); | ||
165 | err = ENETUNREACH; | ||
166 | break; | ||
167 | case ICMP_HOST_UNREACH: | ||
168 | _net("Rx Received ICMP Host Unreachable"); | ||
169 | err = EHOSTUNREACH; | ||
170 | break; | ||
171 | case ICMP_PORT_UNREACH: | ||
172 | _net("Rx Received ICMP Port Unreachable"); | ||
173 | err = ECONNREFUSED; | ||
174 | break; | ||
175 | case ICMP_FRAG_NEEDED: | ||
176 | _net("Rx Received ICMP Fragmentation Needed (%d)", | ||
177 | ee->ee_info); | ||
178 | err = 0; /* dealt with elsewhere */ | ||
179 | break; | ||
180 | case ICMP_NET_UNKNOWN: | ||
181 | _net("Rx Received ICMP Unknown Network"); | ||
182 | err = ENETUNREACH; | ||
183 | break; | ||
184 | case ICMP_HOST_UNKNOWN: | ||
185 | _net("Rx Received ICMP Unknown Host"); | ||
186 | err = EHOSTUNREACH; | ||
187 | break; | ||
188 | default: | ||
189 | _net("Rx Received ICMP DestUnreach code=%u", | ||
190 | ee->ee_code); | ||
191 | break; | ||
192 | } | ||
193 | break; | ||
194 | |||
195 | case ICMP_TIME_EXCEEDED: | ||
196 | _net("Rx Received ICMP TTL Exceeded"); | ||
197 | break; | ||
198 | |||
199 | default: | ||
200 | _proto("Rx Received ICMP error { type=%u code=%u }", | ||
201 | ee->ee_type, ee->ee_code); | ||
202 | break; | ||
203 | } | ||
204 | break; | ||
205 | |||
206 | case SO_EE_ORIGIN_LOCAL: | ||
207 | _proto("Rx Received local error { error=%d }", | ||
208 | ee->ee_errno); | ||
209 | local = 1; | ||
210 | break; | ||
211 | |||
212 | case SO_EE_ORIGIN_NONE: | ||
213 | case SO_EE_ORIGIN_ICMP6: | ||
214 | default: | ||
215 | _proto("Rx Received error report { orig=%u }", | ||
216 | ee->ee_origin); | ||
217 | local = 0; | ||
218 | break; | ||
219 | } | ||
220 | |||
221 | /* terminate all the affected calls if there's an unrecoverable | ||
222 | * error */ | ||
223 | if (err) { | ||
224 | struct rxrpc_call *call, *_n; | ||
225 | |||
226 | _debug("ISSUE ERROR %d", err); | ||
227 | |||
228 | spin_lock_bh(&trans->peer->lock); | ||
229 | trans->peer->net_error = err; | ||
230 | |||
231 | list_for_each_entry_safe(call, _n, &trans->peer->error_targets, | ||
232 | error_link) { | ||
233 | write_lock(&call->state_lock); | ||
234 | if (call->state != RXRPC_CALL_COMPLETE && | ||
235 | call->state < RXRPC_CALL_NETWORK_ERROR) { | ||
236 | call->state = RXRPC_CALL_NETWORK_ERROR; | ||
237 | set_bit(RXRPC_CALL_RCVD_ERROR, &call->events); | ||
238 | rxrpc_queue_call(call); | ||
239 | } | ||
240 | write_unlock(&call->state_lock); | ||
241 | list_del_init(&call->error_link); | ||
242 | } | ||
243 | |||
244 | spin_unlock_bh(&trans->peer->lock); | ||
245 | } | ||
246 | |||
247 | if (!skb_queue_empty(&trans->error_queue)) | ||
248 | rxrpc_queue_work(&trans->error_handler); | ||
249 | |||
250 | rxrpc_free_skb(skb); | ||
251 | rxrpc_put_transport(trans); | ||
252 | _leave(""); | ||
253 | } | ||
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c new file mode 100644 index 000000000000..91b5bbb003e2 --- /dev/null +++ b/net/rxrpc/ar-input.c | |||
@@ -0,0 +1,797 @@ | |||
1 | /* RxRPC packet reception | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/net.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/errqueue.h> | ||
16 | #include <linux/udp.h> | ||
17 | #include <linux/in.h> | ||
18 | #include <linux/in6.h> | ||
19 | #include <linux/icmp.h> | ||
20 | #include <net/sock.h> | ||
21 | #include <net/af_rxrpc.h> | ||
22 | #include <net/ip.h> | ||
23 | #include "ar-internal.h" | ||
24 | |||
25 | unsigned long rxrpc_ack_timeout = 1; | ||
26 | |||
27 | const char *rxrpc_pkts[] = { | ||
28 | "?00", | ||
29 | "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG", | ||
30 | "?09", "?10", "?11", "?12", "?13", "?14", "?15" | ||
31 | }; | ||
32 | |||
33 | /* | ||
34 | * queue a packet for recvmsg to pass to userspace | ||
35 | * - the caller must hold a lock on call->lock | ||
36 | * - must not be called with interrupts disabled (sk_filter() disables BH's) | ||
37 | * - eats the packet whether successful or not | ||
38 | * - there must be just one reference to the packet, which the caller passes to | ||
39 | * this function | ||
40 | */ | ||
41 | int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, | ||
42 | bool force, bool terminal) | ||
43 | { | ||
44 | struct rxrpc_skb_priv *sp; | ||
45 | struct rxrpc_sock *rx = call->socket; | ||
46 | struct sock *sk; | ||
47 | int skb_len, ret; | ||
48 | |||
49 | _enter(",,%d,%d", force, terminal); | ||
50 | |||
51 | ASSERT(!irqs_disabled()); | ||
52 | |||
53 | sp = rxrpc_skb(skb); | ||
54 | ASSERTCMP(sp->call, ==, call); | ||
55 | |||
56 | /* if we've already posted the terminal message for a call, then we | ||
57 | * don't post any more */ | ||
58 | if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) { | ||
59 | _debug("already terminated"); | ||
60 | ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE); | ||
61 | skb->destructor = NULL; | ||
62 | sp->call = NULL; | ||
63 | rxrpc_put_call(call); | ||
64 | rxrpc_free_skb(skb); | ||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | sk = &rx->sk; | ||
69 | |||
70 | if (!force) { | ||
71 | /* cast skb->rcvbuf to unsigned... It's pointless, but | ||
72 | * reduces number of warnings when compiling with -W | ||
73 | * --ANK */ | ||
74 | // ret = -ENOBUFS; | ||
75 | // if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | ||
76 | // (unsigned) sk->sk_rcvbuf) | ||
77 | // goto out; | ||
78 | |||
79 | ret = sk_filter(sk, skb); | ||
80 | if (ret < 0) | ||
81 | goto out; | ||
82 | } | ||
83 | |||
84 | spin_lock_bh(&sk->sk_receive_queue.lock); | ||
85 | if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags) && | ||
86 | !test_bit(RXRPC_CALL_RELEASED, &call->flags) && | ||
87 | call->socket->sk.sk_state != RXRPC_CLOSE) { | ||
88 | skb->destructor = rxrpc_packet_destructor; | ||
89 | skb->dev = NULL; | ||
90 | skb->sk = sk; | ||
91 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); | ||
92 | |||
93 | if (terminal) { | ||
94 | _debug("<<<< TERMINAL MESSAGE >>>>"); | ||
95 | set_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags); | ||
96 | } | ||
97 | |||
98 | /* allow interception by a kernel service */ | ||
99 | if (rx->interceptor) { | ||
100 | rx->interceptor(sk, call->user_call_ID, skb); | ||
101 | spin_unlock_bh(&sk->sk_receive_queue.lock); | ||
102 | } else { | ||
103 | |||
104 | /* Cache the SKB length before we tack it onto the | ||
105 | * receive queue. Once it is added it no longer | ||
106 | * belongs to us and may be freed by other threads of | ||
107 | * control pulling packets from the queue */ | ||
108 | skb_len = skb->len; | ||
109 | |||
110 | _net("post skb %p", skb); | ||
111 | __skb_queue_tail(&sk->sk_receive_queue, skb); | ||
112 | spin_unlock_bh(&sk->sk_receive_queue.lock); | ||
113 | |||
114 | if (!sock_flag(sk, SOCK_DEAD)) | ||
115 | sk->sk_data_ready(sk, skb_len); | ||
116 | } | ||
117 | skb = NULL; | ||
118 | } else { | ||
119 | spin_unlock_bh(&sk->sk_receive_queue.lock); | ||
120 | } | ||
121 | ret = 0; | ||
122 | |||
123 | out: | ||
124 | /* release the socket buffer */ | ||
125 | if (skb) { | ||
126 | skb->destructor = NULL; | ||
127 | sp->call = NULL; | ||
128 | rxrpc_put_call(call); | ||
129 | rxrpc_free_skb(skb); | ||
130 | } | ||
131 | |||
132 | _leave(" = %d", ret); | ||
133 | return ret; | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * process a DATA packet, posting the packet to the appropriate queue | ||
138 | * - eats the packet if successful | ||
139 | */ | ||
140 | static int rxrpc_fast_process_data(struct rxrpc_call *call, | ||
141 | struct sk_buff *skb, u32 seq) | ||
142 | { | ||
143 | struct rxrpc_skb_priv *sp; | ||
144 | bool terminal; | ||
145 | int ret, ackbit, ack; | ||
146 | |||
147 | _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq); | ||
148 | |||
149 | sp = rxrpc_skb(skb); | ||
150 | ASSERTCMP(sp->call, ==, NULL); | ||
151 | |||
152 | spin_lock(&call->lock); | ||
153 | |||
154 | if (call->state > RXRPC_CALL_COMPLETE) | ||
155 | goto discard; | ||
156 | |||
157 | ASSERTCMP(call->rx_data_expect, >=, call->rx_data_post); | ||
158 | ASSERTCMP(call->rx_data_post, >=, call->rx_data_recv); | ||
159 | ASSERTCMP(call->rx_data_recv, >=, call->rx_data_eaten); | ||
160 | |||
161 | if (seq < call->rx_data_post) { | ||
162 | _debug("dup #%u [-%u]", seq, call->rx_data_post); | ||
163 | ack = RXRPC_ACK_DUPLICATE; | ||
164 | ret = -ENOBUFS; | ||
165 | goto discard_and_ack; | ||
166 | } | ||
167 | |||
168 | /* we may already have the packet in the out of sequence queue */ | ||
169 | ackbit = seq - (call->rx_data_eaten + 1); | ||
170 | ASSERTCMP(ackbit, >=, 0); | ||
171 | if (__test_and_set_bit(ackbit, call->ackr_window)) { | ||
172 | _debug("dup oos #%u [%u,%u]", | ||
173 | seq, call->rx_data_eaten, call->rx_data_post); | ||
174 | ack = RXRPC_ACK_DUPLICATE; | ||
175 | goto discard_and_ack; | ||
176 | } | ||
177 | |||
178 | if (seq >= call->ackr_win_top) { | ||
179 | _debug("exceed #%u [%u]", seq, call->ackr_win_top); | ||
180 | __clear_bit(ackbit, call->ackr_window); | ||
181 | ack = RXRPC_ACK_EXCEEDS_WINDOW; | ||
182 | goto discard_and_ack; | ||
183 | } | ||
184 | |||
185 | if (seq == call->rx_data_expect) { | ||
186 | clear_bit(RXRPC_CALL_EXPECT_OOS, &call->flags); | ||
187 | call->rx_data_expect++; | ||
188 | } else if (seq > call->rx_data_expect) { | ||
189 | _debug("oos #%u [%u]", seq, call->rx_data_expect); | ||
190 | call->rx_data_expect = seq + 1; | ||
191 | if (test_and_set_bit(RXRPC_CALL_EXPECT_OOS, &call->flags)) { | ||
192 | ack = RXRPC_ACK_OUT_OF_SEQUENCE; | ||
193 | goto enqueue_and_ack; | ||
194 | } | ||
195 | goto enqueue_packet; | ||
196 | } | ||
197 | |||
198 | if (seq != call->rx_data_post) { | ||
199 | _debug("ahead #%u [%u]", seq, call->rx_data_post); | ||
200 | goto enqueue_packet; | ||
201 | } | ||
202 | |||
203 | if (test_bit(RXRPC_CALL_RCVD_LAST, &call->flags)) | ||
204 | goto protocol_error; | ||
205 | |||
206 | /* if the packet need security things doing to it, then it goes down | ||
207 | * the slow path */ | ||
208 | if (call->conn->security) | ||
209 | goto enqueue_packet; | ||
210 | |||
211 | sp->call = call; | ||
212 | rxrpc_get_call(call); | ||
213 | terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) && | ||
214 | !(sp->hdr.flags & RXRPC_CLIENT_INITIATED)); | ||
215 | ret = rxrpc_queue_rcv_skb(call, skb, false, terminal); | ||
216 | if (ret < 0) { | ||
217 | if (ret == -ENOMEM || ret == -ENOBUFS) { | ||
218 | __clear_bit(ackbit, call->ackr_window); | ||
219 | ack = RXRPC_ACK_NOSPACE; | ||
220 | goto discard_and_ack; | ||
221 | } | ||
222 | goto out; | ||
223 | } | ||
224 | |||
225 | skb = NULL; | ||
226 | |||
227 | _debug("post #%u", seq); | ||
228 | ASSERTCMP(call->rx_data_post, ==, seq); | ||
229 | call->rx_data_post++; | ||
230 | |||
231 | if (sp->hdr.flags & RXRPC_LAST_PACKET) | ||
232 | set_bit(RXRPC_CALL_RCVD_LAST, &call->flags); | ||
233 | |||
234 | /* if we've reached an out of sequence packet then we need to drain | ||
235 | * that queue into the socket Rx queue now */ | ||
236 | if (call->rx_data_post == call->rx_first_oos) { | ||
237 | _debug("drain rx oos now"); | ||
238 | read_lock(&call->state_lock); | ||
239 | if (call->state < RXRPC_CALL_COMPLETE && | ||
240 | !test_and_set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events)) | ||
241 | rxrpc_queue_call(call); | ||
242 | read_unlock(&call->state_lock); | ||
243 | } | ||
244 | |||
245 | spin_unlock(&call->lock); | ||
246 | atomic_inc(&call->ackr_not_idle); | ||
247 | rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, sp->hdr.serial, false); | ||
248 | _leave(" = 0 [posted]"); | ||
249 | return 0; | ||
250 | |||
251 | protocol_error: | ||
252 | ret = -EBADMSG; | ||
253 | out: | ||
254 | spin_unlock(&call->lock); | ||
255 | _leave(" = %d", ret); | ||
256 | return ret; | ||
257 | |||
258 | discard_and_ack: | ||
259 | _debug("discard and ACK packet %p", skb); | ||
260 | __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true); | ||
261 | discard: | ||
262 | spin_unlock(&call->lock); | ||
263 | rxrpc_free_skb(skb); | ||
264 | _leave(" = 0 [discarded]"); | ||
265 | return 0; | ||
266 | |||
267 | enqueue_and_ack: | ||
268 | __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true); | ||
269 | enqueue_packet: | ||
270 | _net("defer skb %p", skb); | ||
271 | spin_unlock(&call->lock); | ||
272 | skb_queue_tail(&call->rx_queue, skb); | ||
273 | atomic_inc(&call->ackr_not_idle); | ||
274 | read_lock(&call->state_lock); | ||
275 | if (call->state < RXRPC_CALL_DEAD) | ||
276 | rxrpc_queue_call(call); | ||
277 | read_unlock(&call->state_lock); | ||
278 | _leave(" = 0 [queued]"); | ||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | /* | ||
283 | * assume an implicit ACKALL of the transmission phase of a client socket upon | ||
284 | * reception of the first reply packet | ||
285 | */ | ||
286 | static void rxrpc_assume_implicit_ackall(struct rxrpc_call *call, u32 serial) | ||
287 | { | ||
288 | write_lock_bh(&call->state_lock); | ||
289 | |||
290 | switch (call->state) { | ||
291 | case RXRPC_CALL_CLIENT_AWAIT_REPLY: | ||
292 | call->state = RXRPC_CALL_CLIENT_RECV_REPLY; | ||
293 | call->acks_latest = serial; | ||
294 | |||
295 | _debug("implicit ACKALL %%%u", call->acks_latest); | ||
296 | set_bit(RXRPC_CALL_RCVD_ACKALL, &call->events); | ||
297 | write_unlock_bh(&call->state_lock); | ||
298 | |||
299 | if (try_to_del_timer_sync(&call->resend_timer) >= 0) { | ||
300 | clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events); | ||
301 | clear_bit(RXRPC_CALL_RESEND, &call->events); | ||
302 | clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); | ||
303 | } | ||
304 | break; | ||
305 | |||
306 | default: | ||
307 | write_unlock_bh(&call->state_lock); | ||
308 | break; | ||
309 | } | ||
310 | } | ||
311 | |||
312 | /* | ||
313 | * post an incoming packet to the nominated call to deal with | ||
314 | * - must get rid of the sk_buff, either by freeing it or by queuing it | ||
315 | */ | ||
316 | void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) | ||
317 | { | ||
318 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | ||
319 | __be32 _abort_code; | ||
320 | u32 serial, hi_serial, seq, abort_code; | ||
321 | |||
322 | _enter("%p,%p", call, skb); | ||
323 | |||
324 | ASSERT(!irqs_disabled()); | ||
325 | |||
326 | #if 0 // INJECT RX ERROR | ||
327 | if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) { | ||
328 | static int skip = 0; | ||
329 | if (++skip == 3) { | ||
330 | printk("DROPPED 3RD PACKET!!!!!!!!!!!!!\n"); | ||
331 | skip = 0; | ||
332 | goto free_packet; | ||
333 | } | ||
334 | } | ||
335 | #endif | ||
336 | |||
337 | /* track the latest serial number on this connection for ACK packet | ||
338 | * information */ | ||
339 | serial = ntohl(sp->hdr.serial); | ||
340 | hi_serial = atomic_read(&call->conn->hi_serial); | ||
341 | while (serial > hi_serial) | ||
342 | hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial, | ||
343 | serial); | ||
344 | |||
345 | /* request ACK generation for any ACK or DATA packet that requests | ||
346 | * it */ | ||
347 | if (sp->hdr.flags & RXRPC_REQUEST_ACK) { | ||
348 | _proto("ACK Requested on %%%u", serial); | ||
349 | rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, sp->hdr.serial, | ||
350 | !(sp->hdr.flags & RXRPC_MORE_PACKETS)); | ||
351 | } | ||
352 | |||
353 | switch (sp->hdr.type) { | ||
354 | case RXRPC_PACKET_TYPE_ABORT: | ||
355 | _debug("abort"); | ||
356 | |||
357 | if (skb_copy_bits(skb, 0, &_abort_code, | ||
358 | sizeof(_abort_code)) < 0) | ||
359 | goto protocol_error; | ||
360 | |||
361 | abort_code = ntohl(_abort_code); | ||
362 | _proto("Rx ABORT %%%u { %x }", serial, abort_code); | ||
363 | |||
364 | write_lock_bh(&call->state_lock); | ||
365 | if (call->state < RXRPC_CALL_COMPLETE) { | ||
366 | call->state = RXRPC_CALL_REMOTELY_ABORTED; | ||
367 | call->abort_code = abort_code; | ||
368 | set_bit(RXRPC_CALL_RCVD_ABORT, &call->events); | ||
369 | rxrpc_queue_call(call); | ||
370 | } | ||
371 | goto free_packet_unlock; | ||
372 | |||
373 | case RXRPC_PACKET_TYPE_BUSY: | ||
374 | _proto("Rx BUSY %%%u", serial); | ||
375 | |||
376 | if (call->conn->out_clientflag) | ||
377 | goto protocol_error; | ||
378 | |||
379 | write_lock_bh(&call->state_lock); | ||
380 | switch (call->state) { | ||
381 | case RXRPC_CALL_CLIENT_SEND_REQUEST: | ||
382 | call->state = RXRPC_CALL_SERVER_BUSY; | ||
383 | set_bit(RXRPC_CALL_RCVD_BUSY, &call->events); | ||
384 | rxrpc_queue_call(call); | ||
385 | case RXRPC_CALL_SERVER_BUSY: | ||
386 | goto free_packet_unlock; | ||
387 | default: | ||
388 | goto protocol_error_locked; | ||
389 | } | ||
390 | |||
391 | default: | ||
392 | _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], serial); | ||
393 | goto protocol_error; | ||
394 | |||
395 | case RXRPC_PACKET_TYPE_DATA: | ||
396 | seq = ntohl(sp->hdr.seq); | ||
397 | |||
398 | _proto("Rx DATA %%%u { #%u }", serial, seq); | ||
399 | |||
400 | if (seq == 0) | ||
401 | goto protocol_error; | ||
402 | |||
403 | call->ackr_prev_seq = sp->hdr.seq; | ||
404 | |||
405 | /* received data implicitly ACKs all of the request packets we | ||
406 | * sent when we're acting as a client */ | ||
407 | if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) | ||
408 | rxrpc_assume_implicit_ackall(call, serial); | ||
409 | |||
410 | switch (rxrpc_fast_process_data(call, skb, seq)) { | ||
411 | case 0: | ||
412 | skb = NULL; | ||
413 | goto done; | ||
414 | |||
415 | default: | ||
416 | BUG(); | ||
417 | |||
418 | /* data packet received beyond the last packet */ | ||
419 | case -EBADMSG: | ||
420 | goto protocol_error; | ||
421 | } | ||
422 | |||
423 | case RXRPC_PACKET_TYPE_ACK: | ||
424 | /* ACK processing is done in process context */ | ||
425 | read_lock_bh(&call->state_lock); | ||
426 | if (call->state < RXRPC_CALL_DEAD) { | ||
427 | skb_queue_tail(&call->rx_queue, skb); | ||
428 | rxrpc_queue_call(call); | ||
429 | skb = NULL; | ||
430 | } | ||
431 | read_unlock_bh(&call->state_lock); | ||
432 | goto free_packet; | ||
433 | } | ||
434 | |||
435 | protocol_error: | ||
436 | _debug("protocol error"); | ||
437 | write_lock_bh(&call->state_lock); | ||
438 | protocol_error_locked: | ||
439 | if (call->state <= RXRPC_CALL_COMPLETE) { | ||
440 | call->state = RXRPC_CALL_LOCALLY_ABORTED; | ||
441 | call->abort_code = RX_PROTOCOL_ERROR; | ||
442 | set_bit(RXRPC_CALL_ABORT, &call->events); | ||
443 | rxrpc_queue_call(call); | ||
444 | } | ||
445 | free_packet_unlock: | ||
446 | write_unlock_bh(&call->state_lock); | ||
447 | free_packet: | ||
448 | rxrpc_free_skb(skb); | ||
449 | done: | ||
450 | _leave(""); | ||
451 | } | ||
452 | |||
453 | /* | ||
454 | * split up a jumbo data packet | ||
455 | */ | ||
456 | static void rxrpc_process_jumbo_packet(struct rxrpc_call *call, | ||
457 | struct sk_buff *jumbo) | ||
458 | { | ||
459 | struct rxrpc_jumbo_header jhdr; | ||
460 | struct rxrpc_skb_priv *sp; | ||
461 | struct sk_buff *part; | ||
462 | |||
463 | _enter(",{%u,%u}", jumbo->data_len, jumbo->len); | ||
464 | |||
465 | sp = rxrpc_skb(jumbo); | ||
466 | |||
467 | do { | ||
468 | sp->hdr.flags &= ~RXRPC_JUMBO_PACKET; | ||
469 | |||
470 | /* make a clone to represent the first subpacket in what's left | ||
471 | * of the jumbo packet */ | ||
472 | part = skb_clone(jumbo, GFP_ATOMIC); | ||
473 | if (!part) { | ||
474 | /* simply ditch the tail in the event of ENOMEM */ | ||
475 | pskb_trim(jumbo, RXRPC_JUMBO_DATALEN); | ||
476 | break; | ||
477 | } | ||
478 | rxrpc_new_skb(part); | ||
479 | |||
480 | pskb_trim(part, RXRPC_JUMBO_DATALEN); | ||
481 | |||
482 | if (!pskb_pull(jumbo, RXRPC_JUMBO_DATALEN)) | ||
483 | goto protocol_error; | ||
484 | |||
485 | if (skb_copy_bits(jumbo, 0, &jhdr, sizeof(jhdr)) < 0) | ||
486 | goto protocol_error; | ||
487 | if (!pskb_pull(jumbo, sizeof(jhdr))) | ||
488 | BUG(); | ||
489 | |||
490 | sp->hdr.seq = htonl(ntohl(sp->hdr.seq) + 1); | ||
491 | sp->hdr.serial = htonl(ntohl(sp->hdr.serial) + 1); | ||
492 | sp->hdr.flags = jhdr.flags; | ||
493 | sp->hdr._rsvd = jhdr._rsvd; | ||
494 | |||
495 | _proto("Rx DATA Jumbo %%%u", ntohl(sp->hdr.serial) - 1); | ||
496 | |||
497 | rxrpc_fast_process_packet(call, part); | ||
498 | part = NULL; | ||
499 | |||
500 | } while (sp->hdr.flags & RXRPC_JUMBO_PACKET); | ||
501 | |||
502 | rxrpc_fast_process_packet(call, jumbo); | ||
503 | _leave(""); | ||
504 | return; | ||
505 | |||
506 | protocol_error: | ||
507 | _debug("protocol error"); | ||
508 | rxrpc_free_skb(part); | ||
509 | rxrpc_free_skb(jumbo); | ||
510 | write_lock_bh(&call->state_lock); | ||
511 | if (call->state <= RXRPC_CALL_COMPLETE) { | ||
512 | call->state = RXRPC_CALL_LOCALLY_ABORTED; | ||
513 | call->abort_code = RX_PROTOCOL_ERROR; | ||
514 | set_bit(RXRPC_CALL_ABORT, &call->events); | ||
515 | rxrpc_queue_call(call); | ||
516 | } | ||
517 | write_unlock_bh(&call->state_lock); | ||
518 | _leave(""); | ||
519 | } | ||
520 | |||
521 | /* | ||
522 | * post an incoming packet to the appropriate call/socket to deal with | ||
523 | * - must get rid of the sk_buff, either by freeing it or by queuing it | ||
524 | */ | ||
525 | static void rxrpc_post_packet_to_call(struct rxrpc_connection *conn, | ||
526 | struct sk_buff *skb) | ||
527 | { | ||
528 | struct rxrpc_skb_priv *sp; | ||
529 | struct rxrpc_call *call; | ||
530 | struct rb_node *p; | ||
531 | __be32 call_id; | ||
532 | |||
533 | _enter("%p,%p", conn, skb); | ||
534 | |||
535 | read_lock_bh(&conn->lock); | ||
536 | |||
537 | sp = rxrpc_skb(skb); | ||
538 | |||
539 | /* look at extant calls by channel number first */ | ||
540 | call = conn->channels[ntohl(sp->hdr.cid) & RXRPC_CHANNELMASK]; | ||
541 | if (!call || call->call_id != sp->hdr.callNumber) | ||
542 | goto call_not_extant; | ||
543 | |||
544 | _debug("extant call [%d]", call->state); | ||
545 | ASSERTCMP(call->conn, ==, conn); | ||
546 | |||
547 | read_lock(&call->state_lock); | ||
548 | switch (call->state) { | ||
549 | case RXRPC_CALL_LOCALLY_ABORTED: | ||
550 | if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) | ||
551 | rxrpc_queue_call(call); | ||
552 | case RXRPC_CALL_REMOTELY_ABORTED: | ||
553 | case RXRPC_CALL_NETWORK_ERROR: | ||
554 | case RXRPC_CALL_DEAD: | ||
555 | goto free_unlock; | ||
556 | default: | ||
557 | break; | ||
558 | } | ||
559 | |||
560 | read_unlock(&call->state_lock); | ||
561 | rxrpc_get_call(call); | ||
562 | read_unlock_bh(&conn->lock); | ||
563 | |||
564 | if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && | ||
565 | sp->hdr.flags & RXRPC_JUMBO_PACKET) | ||
566 | rxrpc_process_jumbo_packet(call, skb); | ||
567 | else | ||
568 | rxrpc_fast_process_packet(call, skb); | ||
569 | |||
570 | rxrpc_put_call(call); | ||
571 | goto done; | ||
572 | |||
573 | call_not_extant: | ||
574 | /* search the completed calls in case what we're dealing with is | ||
575 | * there */ | ||
576 | _debug("call not extant"); | ||
577 | |||
578 | call_id = sp->hdr.callNumber; | ||
579 | p = conn->calls.rb_node; | ||
580 | while (p) { | ||
581 | call = rb_entry(p, struct rxrpc_call, conn_node); | ||
582 | |||
583 | if (call_id < call->call_id) | ||
584 | p = p->rb_left; | ||
585 | else if (call_id > call->call_id) | ||
586 | p = p->rb_right; | ||
587 | else | ||
588 | goto found_completed_call; | ||
589 | } | ||
590 | |||
591 | dead_call: | ||
592 | /* it's a either a really old call that we no longer remember or its a | ||
593 | * new incoming call */ | ||
594 | read_unlock_bh(&conn->lock); | ||
595 | |||
596 | if (sp->hdr.flags & RXRPC_CLIENT_INITIATED && | ||
597 | sp->hdr.seq == __constant_cpu_to_be32(1)) { | ||
598 | _debug("incoming call"); | ||
599 | skb_queue_tail(&conn->trans->local->accept_queue, skb); | ||
600 | rxrpc_queue_work(&conn->trans->local->acceptor); | ||
601 | goto done; | ||
602 | } | ||
603 | |||
604 | _debug("dead call"); | ||
605 | skb->priority = RX_CALL_DEAD; | ||
606 | rxrpc_reject_packet(conn->trans->local, skb); | ||
607 | goto done; | ||
608 | |||
609 | /* resend last packet of a completed call | ||
610 | * - client calls may have been aborted or ACK'd | ||
611 | * - server calls may have been aborted | ||
612 | */ | ||
613 | found_completed_call: | ||
614 | _debug("completed call"); | ||
615 | |||
616 | if (atomic_read(&call->usage) == 0) | ||
617 | goto dead_call; | ||
618 | |||
619 | /* synchronise any state changes */ | ||
620 | read_lock(&call->state_lock); | ||
621 | ASSERTIFCMP(call->state != RXRPC_CALL_CLIENT_FINAL_ACK, | ||
622 | call->state, >=, RXRPC_CALL_COMPLETE); | ||
623 | |||
624 | if (call->state == RXRPC_CALL_LOCALLY_ABORTED || | ||
625 | call->state == RXRPC_CALL_REMOTELY_ABORTED || | ||
626 | call->state == RXRPC_CALL_DEAD) { | ||
627 | read_unlock(&call->state_lock); | ||
628 | goto dead_call; | ||
629 | } | ||
630 | |||
631 | if (call->conn->in_clientflag) { | ||
632 | read_unlock(&call->state_lock); | ||
633 | goto dead_call; /* complete server call */ | ||
634 | } | ||
635 | |||
636 | _debug("final ack again"); | ||
637 | rxrpc_get_call(call); | ||
638 | set_bit(RXRPC_CALL_ACK_FINAL, &call->events); | ||
639 | rxrpc_queue_call(call); | ||
640 | |||
641 | free_unlock: | ||
642 | read_unlock(&call->state_lock); | ||
643 | read_unlock_bh(&conn->lock); | ||
644 | rxrpc_free_skb(skb); | ||
645 | done: | ||
646 | _leave(""); | ||
647 | } | ||
648 | |||
649 | /* | ||
650 | * post connection-level events to the connection | ||
651 | * - this includes challenges, responses and some aborts | ||
652 | */ | ||
653 | static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, | ||
654 | struct sk_buff *skb) | ||
655 | { | ||
656 | _enter("%p,%p", conn, skb); | ||
657 | |||
658 | atomic_inc(&conn->usage); | ||
659 | skb_queue_tail(&conn->rx_queue, skb); | ||
660 | rxrpc_queue_conn(conn); | ||
661 | } | ||
662 | |||
663 | /* | ||
664 | * handle data received on the local endpoint | ||
665 | * - may be called in interrupt context | ||
666 | */ | ||
667 | void rxrpc_data_ready(struct sock *sk, int count) | ||
668 | { | ||
669 | struct rxrpc_connection *conn; | ||
670 | struct rxrpc_transport *trans; | ||
671 | struct rxrpc_skb_priv *sp; | ||
672 | struct rxrpc_local *local; | ||
673 | struct rxrpc_peer *peer; | ||
674 | struct sk_buff *skb; | ||
675 | int ret; | ||
676 | |||
677 | _enter("%p, %d", sk, count); | ||
678 | |||
679 | ASSERT(!irqs_disabled()); | ||
680 | |||
681 | read_lock_bh(&rxrpc_local_lock); | ||
682 | local = sk->sk_user_data; | ||
683 | if (local && atomic_read(&local->usage) > 0) | ||
684 | rxrpc_get_local(local); | ||
685 | else | ||
686 | local = NULL; | ||
687 | read_unlock_bh(&rxrpc_local_lock); | ||
688 | if (!local) { | ||
689 | _leave(" [local dead]"); | ||
690 | return; | ||
691 | } | ||
692 | |||
693 | skb = skb_recv_datagram(sk, 0, 1, &ret); | ||
694 | if (!skb) { | ||
695 | rxrpc_put_local(local); | ||
696 | if (ret == -EAGAIN) | ||
697 | return; | ||
698 | _debug("UDP socket error %d", ret); | ||
699 | return; | ||
700 | } | ||
701 | |||
702 | rxrpc_new_skb(skb); | ||
703 | |||
704 | _net("recv skb %p", skb); | ||
705 | |||
706 | /* we'll probably need to checksum it (didn't call sock_recvmsg) */ | ||
707 | if (skb_checksum_complete(skb)) { | ||
708 | rxrpc_free_skb(skb); | ||
709 | rxrpc_put_local(local); | ||
710 | _leave(" [CSUM failed]"); | ||
711 | return; | ||
712 | } | ||
713 | |||
714 | /* the socket buffer we have is owned by UDP, with UDP's data all over | ||
715 | * it, but we really want our own */ | ||
716 | skb_orphan(skb); | ||
717 | sp = rxrpc_skb(skb); | ||
718 | memset(sp, 0, sizeof(*sp)); | ||
719 | |||
720 | _net("Rx UDP packet from %08x:%04hu", | ||
721 | ntohl(ip_hdr(skb)->saddr), ntohs(udp_hdr(skb)->source)); | ||
722 | |||
723 | /* dig out the RxRPC connection details */ | ||
724 | if (skb_copy_bits(skb, sizeof(struct udphdr), &sp->hdr, | ||
725 | sizeof(sp->hdr)) < 0) | ||
726 | goto bad_message; | ||
727 | if (!pskb_pull(skb, sizeof(struct udphdr) + sizeof(sp->hdr))) | ||
728 | BUG(); | ||
729 | |||
730 | _net("Rx RxRPC %s ep=%x call=%x:%x", | ||
731 | sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient", | ||
732 | ntohl(sp->hdr.epoch), | ||
733 | ntohl(sp->hdr.cid), | ||
734 | ntohl(sp->hdr.callNumber)); | ||
735 | |||
736 | if (sp->hdr.type == 0 || sp->hdr.type >= RXRPC_N_PACKET_TYPES) { | ||
737 | _proto("Rx Bad Packet Type %u", sp->hdr.type); | ||
738 | goto bad_message; | ||
739 | } | ||
740 | |||
741 | if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && | ||
742 | (sp->hdr.callNumber == 0 || sp->hdr.seq == 0)) | ||
743 | goto bad_message; | ||
744 | |||
745 | peer = rxrpc_find_peer(local, ip_hdr(skb)->saddr, udp_hdr(skb)->source); | ||
746 | if (IS_ERR(peer)) | ||
747 | goto cant_route_call; | ||
748 | |||
749 | trans = rxrpc_find_transport(local, peer); | ||
750 | rxrpc_put_peer(peer); | ||
751 | if (!trans) | ||
752 | goto cant_route_call; | ||
753 | |||
754 | conn = rxrpc_find_connection(trans, &sp->hdr); | ||
755 | rxrpc_put_transport(trans); | ||
756 | if (!conn) | ||
757 | goto cant_route_call; | ||
758 | |||
759 | _debug("CONN %p {%d}", conn, conn->debug_id); | ||
760 | |||
761 | if (sp->hdr.callNumber == 0) | ||
762 | rxrpc_post_packet_to_conn(conn, skb); | ||
763 | else | ||
764 | rxrpc_post_packet_to_call(conn, skb); | ||
765 | rxrpc_put_connection(conn); | ||
766 | rxrpc_put_local(local); | ||
767 | return; | ||
768 | |||
769 | cant_route_call: | ||
770 | _debug("can't route call"); | ||
771 | if (sp->hdr.flags & RXRPC_CLIENT_INITIATED && | ||
772 | sp->hdr.type == RXRPC_PACKET_TYPE_DATA) { | ||
773 | if (sp->hdr.seq == __constant_cpu_to_be32(1)) { | ||
774 | _debug("first packet"); | ||
775 | skb_queue_tail(&local->accept_queue, skb); | ||
776 | rxrpc_queue_work(&local->acceptor); | ||
777 | rxrpc_put_local(local); | ||
778 | _leave(" [incoming]"); | ||
779 | return; | ||
780 | } | ||
781 | skb->priority = RX_INVALID_OPERATION; | ||
782 | } else { | ||
783 | skb->priority = RX_CALL_DEAD; | ||
784 | } | ||
785 | |||
786 | _debug("reject"); | ||
787 | rxrpc_reject_packet(local, skb); | ||
788 | rxrpc_put_local(local); | ||
789 | _leave(" [no call]"); | ||
790 | return; | ||
791 | |||
792 | bad_message: | ||
793 | skb->priority = RX_PROTOCOL_ERROR; | ||
794 | rxrpc_reject_packet(local, skb); | ||
795 | rxrpc_put_local(local); | ||
796 | _leave(" [badmsg]"); | ||
797 | } | ||
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h new file mode 100644 index 000000000000..58aaf892238e --- /dev/null +++ b/net/rxrpc/ar-internal.h | |||
@@ -0,0 +1,808 @@ | |||
1 | /* AF_RXRPC internal definitions | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <rxrpc/packet.h> | ||
13 | |||
14 | #if 0 | ||
15 | #define CHECK_SLAB_OKAY(X) \ | ||
16 | BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \ | ||
17 | (POISON_FREE << 8 | POISON_FREE)) | ||
18 | #else | ||
19 | #define CHECK_SLAB_OKAY(X) do {} while(0) | ||
20 | #endif | ||
21 | |||
22 | #define FCRYPT_BSIZE 8 | ||
23 | struct rxrpc_crypt { | ||
24 | union { | ||
25 | u8 x[FCRYPT_BSIZE]; | ||
26 | u32 n[2]; | ||
27 | }; | ||
28 | } __attribute__((aligned(8))); | ||
29 | |||
30 | #define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS)) | ||
31 | #define rxrpc_queue_delayed_work(WS,D) \ | ||
32 | queue_delayed_work(rxrpc_workqueue, (WS), (D)) | ||
33 | |||
34 | #define rxrpc_queue_call(CALL) rxrpc_queue_work(&(CALL)->processor) | ||
35 | #define rxrpc_queue_conn(CONN) rxrpc_queue_work(&(CONN)->processor) | ||
36 | |||
37 | /* | ||
38 | * sk_state for RxRPC sockets | ||
39 | */ | ||
40 | enum { | ||
41 | RXRPC_UNCONNECTED = 0, | ||
42 | RXRPC_CLIENT_BOUND, /* client local address bound */ | ||
43 | RXRPC_CLIENT_CONNECTED, /* client is connected */ | ||
44 | RXRPC_SERVER_BOUND, /* server local address bound */ | ||
45 | RXRPC_SERVER_LISTENING, /* server listening for connections */ | ||
46 | RXRPC_CLOSE, /* socket is being closed */ | ||
47 | }; | ||
48 | |||
49 | /* | ||
50 | * RxRPC socket definition | ||
51 | */ | ||
52 | struct rxrpc_sock { | ||
53 | /* WARNING: sk has to be the first member */ | ||
54 | struct sock sk; | ||
55 | rxrpc_interceptor_t interceptor; /* kernel service Rx interceptor function */ | ||
56 | struct rxrpc_local *local; /* local endpoint */ | ||
57 | struct rxrpc_transport *trans; /* transport handler */ | ||
58 | struct rxrpc_conn_bundle *bundle; /* virtual connection bundle */ | ||
59 | struct rxrpc_connection *conn; /* exclusive virtual connection */ | ||
60 | struct list_head listen_link; /* link in the local endpoint's listen list */ | ||
61 | struct list_head secureq; /* calls awaiting connection security clearance */ | ||
62 | struct list_head acceptq; /* calls awaiting acceptance */ | ||
63 | struct key *key; /* security for this socket */ | ||
64 | struct key *securities; /* list of server security descriptors */ | ||
65 | struct rb_root calls; /* outstanding calls on this socket */ | ||
66 | unsigned long flags; | ||
67 | #define RXRPC_SOCK_EXCLUSIVE_CONN 1 /* exclusive connection for a client socket */ | ||
68 | rwlock_t call_lock; /* lock for calls */ | ||
69 | u32 min_sec_level; /* minimum security level */ | ||
70 | #define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT | ||
71 | struct sockaddr_rxrpc srx; /* local address */ | ||
72 | sa_family_t proto; /* protocol created with */ | ||
73 | __be16 service_id; /* service ID of local/remote service */ | ||
74 | }; | ||
75 | |||
76 | #define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk) | ||
77 | |||
78 | /* | ||
79 | * RxRPC socket buffer private variables | ||
80 | * - max 48 bytes (struct sk_buff::cb) | ||
81 | */ | ||
82 | struct rxrpc_skb_priv { | ||
83 | struct rxrpc_call *call; /* call with which associated */ | ||
84 | unsigned long resend_at; /* time in jiffies at which to resend */ | ||
85 | union { | ||
86 | unsigned offset; /* offset into buffer of next read */ | ||
87 | int remain; /* amount of space remaining for next write */ | ||
88 | u32 error; /* network error code */ | ||
89 | bool need_resend; /* T if needs resending */ | ||
90 | }; | ||
91 | |||
92 | struct rxrpc_header hdr; /* RxRPC packet header from this packet */ | ||
93 | }; | ||
94 | |||
95 | #define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb) | ||
96 | |||
97 | enum rxrpc_command { | ||
98 | RXRPC_CMD_SEND_DATA, /* send data message */ | ||
99 | RXRPC_CMD_SEND_ABORT, /* request abort generation */ | ||
100 | RXRPC_CMD_ACCEPT, /* [server] accept incoming call */ | ||
101 | RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */ | ||
102 | }; | ||
103 | |||
104 | /* | ||
105 | * RxRPC security module interface | ||
106 | */ | ||
107 | struct rxrpc_security { | ||
108 | struct module *owner; /* providing module */ | ||
109 | struct list_head link; /* link in master list */ | ||
110 | const char *name; /* name of this service */ | ||
111 | u8 security_index; /* security type provided */ | ||
112 | |||
113 | /* initialise a connection's security */ | ||
114 | int (*init_connection_security)(struct rxrpc_connection *); | ||
115 | |||
116 | /* prime a connection's packet security */ | ||
117 | void (*prime_packet_security)(struct rxrpc_connection *); | ||
118 | |||
119 | /* impose security on a packet */ | ||
120 | int (*secure_packet)(const struct rxrpc_call *, | ||
121 | struct sk_buff *, | ||
122 | size_t, | ||
123 | void *); | ||
124 | |||
125 | /* verify the security on a received packet */ | ||
126 | int (*verify_packet)(const struct rxrpc_call *, struct sk_buff *, | ||
127 | u32 *); | ||
128 | |||
129 | /* issue a challenge */ | ||
130 | int (*issue_challenge)(struct rxrpc_connection *); | ||
131 | |||
132 | /* respond to a challenge */ | ||
133 | int (*respond_to_challenge)(struct rxrpc_connection *, | ||
134 | struct sk_buff *, | ||
135 | u32 *); | ||
136 | |||
137 | /* verify a response */ | ||
138 | int (*verify_response)(struct rxrpc_connection *, | ||
139 | struct sk_buff *, | ||
140 | u32 *); | ||
141 | |||
142 | /* clear connection security */ | ||
143 | void (*clear)(struct rxrpc_connection *); | ||
144 | }; | ||
145 | |||
146 | /* | ||
147 | * RxRPC local transport endpoint definition | ||
148 | * - matched by local port, address and protocol type | ||
149 | */ | ||
150 | struct rxrpc_local { | ||
151 | struct socket *socket; /* my UDP socket */ | ||
152 | struct work_struct destroyer; /* endpoint destroyer */ | ||
153 | struct work_struct acceptor; /* incoming call processor */ | ||
154 | struct work_struct rejecter; /* packet reject writer */ | ||
155 | struct list_head services; /* services listening on this endpoint */ | ||
156 | struct list_head link; /* link in endpoint list */ | ||
157 | struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */ | ||
158 | struct sk_buff_head accept_queue; /* incoming calls awaiting acceptance */ | ||
159 | struct sk_buff_head reject_queue; /* packets awaiting rejection */ | ||
160 | spinlock_t lock; /* access lock */ | ||
161 | rwlock_t services_lock; /* lock for services list */ | ||
162 | atomic_t usage; | ||
163 | int debug_id; /* debug ID for printks */ | ||
164 | volatile char error_rcvd; /* T if received ICMP error outstanding */ | ||
165 | struct sockaddr_rxrpc srx; /* local address */ | ||
166 | }; | ||
167 | |||
168 | /* | ||
169 | * RxRPC remote transport endpoint definition | ||
170 | * - matched by remote port, address and protocol type | ||
171 | * - holds the connection ID counter for connections between the two endpoints | ||
172 | */ | ||
173 | struct rxrpc_peer { | ||
174 | struct work_struct destroyer; /* peer destroyer */ | ||
175 | struct list_head link; /* link in master peer list */ | ||
176 | struct list_head error_targets; /* targets for net error distribution */ | ||
177 | spinlock_t lock; /* access lock */ | ||
178 | atomic_t usage; | ||
179 | unsigned if_mtu; /* interface MTU for this peer */ | ||
180 | unsigned mtu; /* network MTU for this peer */ | ||
181 | unsigned maxdata; /* data size (MTU - hdrsize) */ | ||
182 | unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */ | ||
183 | int debug_id; /* debug ID for printks */ | ||
184 | int net_error; /* network error distributed */ | ||
185 | struct sockaddr_rxrpc srx; /* remote address */ | ||
186 | |||
187 | /* calculated RTT cache */ | ||
188 | #define RXRPC_RTT_CACHE_SIZE 32 | ||
189 | suseconds_t rtt; /* current RTT estimate (in uS) */ | ||
190 | unsigned rtt_point; /* next entry at which to insert */ | ||
191 | unsigned rtt_usage; /* amount of cache actually used */ | ||
192 | suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */ | ||
193 | }; | ||
194 | |||
195 | /* | ||
196 | * RxRPC point-to-point transport / connection manager definition | ||
197 | * - handles a bundle of connections between two endpoints | ||
198 | * - matched by { local, peer } | ||
199 | */ | ||
200 | struct rxrpc_transport { | ||
201 | struct rxrpc_local *local; /* local transport endpoint */ | ||
202 | struct rxrpc_peer *peer; /* remote transport endpoint */ | ||
203 | struct work_struct error_handler; /* network error distributor */ | ||
204 | struct rb_root bundles; /* client connection bundles on this transport */ | ||
205 | struct rb_root client_conns; /* client connections on this transport */ | ||
206 | struct rb_root server_conns; /* server connections on this transport */ | ||
207 | struct list_head link; /* link in master session list */ | ||
208 | struct sk_buff_head error_queue; /* error packets awaiting processing */ | ||
209 | time_t put_time; /* time at which to reap */ | ||
210 | spinlock_t client_lock; /* client connection allocation lock */ | ||
211 | rwlock_t conn_lock; /* lock for active/dead connections */ | ||
212 | atomic_t usage; | ||
213 | int debug_id; /* debug ID for printks */ | ||
214 | unsigned int conn_idcounter; /* connection ID counter (client) */ | ||
215 | }; | ||
216 | |||
217 | /* | ||
218 | * RxRPC client connection bundle | ||
219 | * - matched by { transport, service_id, key } | ||
220 | */ | ||
221 | struct rxrpc_conn_bundle { | ||
222 | struct rb_node node; /* node in transport's lookup tree */ | ||
223 | struct list_head unused_conns; /* unused connections in this bundle */ | ||
224 | struct list_head avail_conns; /* available connections in this bundle */ | ||
225 | struct list_head busy_conns; /* busy connections in this bundle */ | ||
226 | struct key *key; /* security for this bundle */ | ||
227 | wait_queue_head_t chanwait; /* wait for channel to become available */ | ||
228 | atomic_t usage; | ||
229 | int debug_id; /* debug ID for printks */ | ||
230 | unsigned short num_conns; /* number of connections in this bundle */ | ||
231 | __be16 service_id; /* service ID */ | ||
232 | uint8_t security_ix; /* security type */ | ||
233 | }; | ||
234 | |||
235 | /* | ||
236 | * RxRPC connection definition | ||
237 | * - matched by { transport, service_id, conn_id, direction, key } | ||
238 | * - each connection can only handle four simultaneous calls | ||
239 | */ | ||
240 | struct rxrpc_connection { | ||
241 | struct rxrpc_transport *trans; /* transport session */ | ||
242 | struct rxrpc_conn_bundle *bundle; /* connection bundle (client) */ | ||
243 | struct work_struct processor; /* connection event processor */ | ||
244 | struct rb_node node; /* node in transport's lookup tree */ | ||
245 | struct list_head link; /* link in master connection list */ | ||
246 | struct list_head bundle_link; /* link in bundle */ | ||
247 | struct rb_root calls; /* calls on this connection */ | ||
248 | struct sk_buff_head rx_queue; /* received conn-level packets */ | ||
249 | struct rxrpc_call *channels[RXRPC_MAXCALLS]; /* channels (active calls) */ | ||
250 | struct rxrpc_security *security; /* applied security module */ | ||
251 | struct key *key; /* security for this connection (client) */ | ||
252 | struct key *server_key; /* security for this service */ | ||
253 | struct crypto_blkcipher *cipher; /* encryption handle */ | ||
254 | struct rxrpc_crypt csum_iv; /* packet checksum base */ | ||
255 | unsigned long events; | ||
256 | #define RXRPC_CONN_CHALLENGE 0 /* send challenge packet */ | ||
257 | time_t put_time; /* time at which to reap */ | ||
258 | rwlock_t lock; /* access lock */ | ||
259 | spinlock_t state_lock; /* state-change lock */ | ||
260 | atomic_t usage; | ||
261 | u32 real_conn_id; /* connection ID (host-endian) */ | ||
262 | enum { /* current state of connection */ | ||
263 | RXRPC_CONN_UNUSED, /* - connection not yet attempted */ | ||
264 | RXRPC_CONN_CLIENT, /* - client connection */ | ||
265 | RXRPC_CONN_SERVER_UNSECURED, /* - server unsecured connection */ | ||
266 | RXRPC_CONN_SERVER_CHALLENGING, /* - server challenging for security */ | ||
267 | RXRPC_CONN_SERVER, /* - server secured connection */ | ||
268 | RXRPC_CONN_REMOTELY_ABORTED, /* - conn aborted by peer */ | ||
269 | RXRPC_CONN_LOCALLY_ABORTED, /* - conn aborted locally */ | ||
270 | RXRPC_CONN_NETWORK_ERROR, /* - conn terminated by network error */ | ||
271 | } state; | ||
272 | int error; /* error code for local abort */ | ||
273 | int debug_id; /* debug ID for printks */ | ||
274 | unsigned call_counter; /* call ID counter */ | ||
275 | atomic_t serial; /* packet serial number counter */ | ||
276 | atomic_t hi_serial; /* highest serial number received */ | ||
277 | u8 avail_calls; /* number of calls available */ | ||
278 | u8 size_align; /* data size alignment (for security) */ | ||
279 | u8 header_size; /* rxrpc + security header size */ | ||
280 | u8 security_size; /* security header size */ | ||
281 | u32 security_level; /* security level negotiated */ | ||
282 | u32 security_nonce; /* response re-use preventer */ | ||
283 | |||
284 | /* the following are all in net order */ | ||
285 | __be32 epoch; /* epoch of this connection */ | ||
286 | __be32 cid; /* connection ID */ | ||
287 | __be16 service_id; /* service ID */ | ||
288 | u8 security_ix; /* security type */ | ||
289 | u8 in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */ | ||
290 | u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */ | ||
291 | }; | ||
292 | |||
293 | /* | ||
294 | * RxRPC call definition | ||
295 | * - matched by { connection, call_id } | ||
296 | */ | ||
297 | struct rxrpc_call { | ||
298 | struct rxrpc_connection *conn; /* connection carrying call */ | ||
299 | struct rxrpc_sock *socket; /* socket responsible */ | ||
300 | struct timer_list lifetimer; /* lifetime remaining on call */ | ||
301 | struct timer_list deadspan; /* reap timer for re-ACK'ing, etc */ | ||
302 | struct timer_list ack_timer; /* ACK generation timer */ | ||
303 | struct timer_list resend_timer; /* Tx resend timer */ | ||
304 | struct work_struct destroyer; /* call destroyer */ | ||
305 | struct work_struct processor; /* packet processor and ACK generator */ | ||
306 | struct list_head link; /* link in master call list */ | ||
307 | struct list_head error_link; /* link in error distribution list */ | ||
308 | struct list_head accept_link; /* calls awaiting acceptance */ | ||
309 | struct rb_node sock_node; /* node in socket call tree */ | ||
310 | struct rb_node conn_node; /* node in connection call tree */ | ||
311 | struct sk_buff_head rx_queue; /* received packets */ | ||
312 | struct sk_buff_head rx_oos_queue; /* packets received out of sequence */ | ||
313 | struct sk_buff *tx_pending; /* Tx socket buffer being filled */ | ||
314 | wait_queue_head_t tx_waitq; /* wait for Tx window space to become available */ | ||
315 | unsigned long user_call_ID; /* user-defined call ID */ | ||
316 | unsigned long creation_jif; /* time of call creation */ | ||
317 | unsigned long flags; | ||
318 | #define RXRPC_CALL_RELEASED 0 /* call has been released - no more message to userspace */ | ||
319 | #define RXRPC_CALL_TERMINAL_MSG 1 /* call has given the socket its final message */ | ||
320 | #define RXRPC_CALL_RCVD_LAST 2 /* all packets received */ | ||
321 | #define RXRPC_CALL_RUN_RTIMER 3 /* Tx resend timer started */ | ||
322 | #define RXRPC_CALL_TX_SOFT_ACK 4 /* sent some soft ACKs */ | ||
323 | #define RXRPC_CALL_PROC_BUSY 5 /* the processor is busy */ | ||
324 | #define RXRPC_CALL_INIT_ACCEPT 6 /* acceptance was initiated */ | ||
325 | #define RXRPC_CALL_HAS_USERID 7 /* has a user ID attached */ | ||
326 | #define RXRPC_CALL_EXPECT_OOS 8 /* expect out of sequence packets */ | ||
327 | unsigned long events; | ||
328 | #define RXRPC_CALL_RCVD_ACKALL 0 /* ACKALL or reply received */ | ||
329 | #define RXRPC_CALL_RCVD_BUSY 1 /* busy packet received */ | ||
330 | #define RXRPC_CALL_RCVD_ABORT 2 /* abort packet received */ | ||
331 | #define RXRPC_CALL_RCVD_ERROR 3 /* network error received */ | ||
332 | #define RXRPC_CALL_ACK_FINAL 4 /* need to generate final ACK (and release call) */ | ||
333 | #define RXRPC_CALL_ACK 5 /* need to generate ACK */ | ||
334 | #define RXRPC_CALL_REJECT_BUSY 6 /* need to generate busy message */ | ||
335 | #define RXRPC_CALL_ABORT 7 /* need to generate abort */ | ||
336 | #define RXRPC_CALL_CONN_ABORT 8 /* local connection abort generated */ | ||
337 | #define RXRPC_CALL_RESEND_TIMER 9 /* Tx resend timer expired */ | ||
338 | #define RXRPC_CALL_RESEND 10 /* Tx resend required */ | ||
339 | #define RXRPC_CALL_DRAIN_RX_OOS 11 /* drain the Rx out of sequence queue */ | ||
340 | #define RXRPC_CALL_LIFE_TIMER 12 /* call's lifetimer ran out */ | ||
341 | #define RXRPC_CALL_ACCEPTED 13 /* incoming call accepted by userspace app */ | ||
342 | #define RXRPC_CALL_SECURED 14 /* incoming call's connection is now secure */ | ||
343 | #define RXRPC_CALL_POST_ACCEPT 15 /* need to post an "accept?" message to the app */ | ||
344 | #define RXRPC_CALL_RELEASE 16 /* need to release the call's resources */ | ||
345 | |||
346 | spinlock_t lock; | ||
347 | rwlock_t state_lock; /* lock for state transition */ | ||
348 | atomic_t usage; | ||
349 | atomic_t sequence; /* Tx data packet sequence counter */ | ||
350 | u32 abort_code; /* local/remote abort code */ | ||
351 | enum { /* current state of call */ | ||
352 | RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */ | ||
353 | RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */ | ||
354 | RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */ | ||
355 | RXRPC_CALL_CLIENT_FINAL_ACK, /* - client sending final ACK phase */ | ||
356 | RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */ | ||
357 | RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */ | ||
358 | RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */ | ||
359 | RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */ | ||
360 | RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */ | ||
361 | RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */ | ||
362 | RXRPC_CALL_COMPLETE, /* - call completed */ | ||
363 | RXRPC_CALL_SERVER_BUSY, /* - call rejected by busy server */ | ||
364 | RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */ | ||
365 | RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */ | ||
366 | RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */ | ||
367 | RXRPC_CALL_DEAD, /* - call is dead */ | ||
368 | } state; | ||
369 | int debug_id; /* debug ID for printks */ | ||
370 | u8 channel; /* connection channel occupied by this call */ | ||
371 | |||
372 | /* transmission-phase ACK management */ | ||
373 | uint8_t acks_head; /* offset into window of first entry */ | ||
374 | uint8_t acks_tail; /* offset into window of last entry */ | ||
375 | uint8_t acks_winsz; /* size of un-ACK'd window */ | ||
376 | uint8_t acks_unacked; /* lowest unacked packet in last ACK received */ | ||
377 | int acks_latest; /* serial number of latest ACK received */ | ||
378 | rxrpc_seq_t acks_hard; /* highest definitively ACK'd msg seq */ | ||
379 | unsigned long *acks_window; /* sent packet window | ||
380 | * - elements are pointers with LSB set if ACK'd | ||
381 | */ | ||
382 | |||
383 | /* receive-phase ACK management */ | ||
384 | rxrpc_seq_t rx_data_expect; /* next data seq ID expected to be received */ | ||
385 | rxrpc_seq_t rx_data_post; /* next data seq ID expected to be posted */ | ||
386 | rxrpc_seq_t rx_data_recv; /* last data seq ID encountered by recvmsg */ | ||
387 | rxrpc_seq_t rx_data_eaten; /* last data seq ID consumed by recvmsg */ | ||
388 | rxrpc_seq_t rx_first_oos; /* first packet in rx_oos_queue (or 0) */ | ||
389 | rxrpc_seq_t ackr_win_top; /* top of ACK window (rx_data_eaten is bottom) */ | ||
390 | rxrpc_seq_net_t ackr_prev_seq; /* previous sequence number received */ | ||
391 | uint8_t ackr_reason; /* reason to ACK */ | ||
392 | __be32 ackr_serial; /* serial of packet being ACK'd */ | ||
393 | atomic_t ackr_not_idle; /* number of packets in Rx queue */ | ||
394 | |||
395 | /* received packet records, 1 bit per record */ | ||
396 | #define RXRPC_ACKR_WINDOW_ASZ DIV_ROUND_UP(RXRPC_MAXACKS, BITS_PER_LONG) | ||
397 | unsigned long ackr_window[RXRPC_ACKR_WINDOW_ASZ + 1]; | ||
398 | |||
399 | /* the following should all be in net order */ | ||
400 | __be32 cid; /* connection ID + channel index */ | ||
401 | __be32 call_id; /* call ID on connection */ | ||
402 | }; | ||
403 | |||
404 | /* | ||
405 | * RxRPC key for Kerberos (type-2 security) | ||
406 | */ | ||
407 | struct rxkad_key { | ||
408 | u16 security_index; /* RxRPC header security index */ | ||
409 | u16 ticket_len; /* length of ticket[] */ | ||
410 | u32 expiry; /* time at which expires */ | ||
411 | u32 kvno; /* key version number */ | ||
412 | u8 session_key[8]; /* DES session key */ | ||
413 | u8 ticket[0]; /* the encrypted ticket */ | ||
414 | }; | ||
415 | |||
416 | struct rxrpc_key_payload { | ||
417 | struct rxkad_key k; | ||
418 | }; | ||
419 | |||
420 | /* | ||
421 | * locally abort an RxRPC call | ||
422 | */ | ||
423 | static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code) | ||
424 | { | ||
425 | write_lock_bh(&call->state_lock); | ||
426 | if (call->state < RXRPC_CALL_COMPLETE) { | ||
427 | call->abort_code = abort_code; | ||
428 | call->state = RXRPC_CALL_LOCALLY_ABORTED; | ||
429 | set_bit(RXRPC_CALL_ABORT, &call->events); | ||
430 | } | ||
431 | write_unlock_bh(&call->state_lock); | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | * af_rxrpc.c | ||
436 | */ | ||
437 | extern atomic_t rxrpc_n_skbs; | ||
438 | extern __be32 rxrpc_epoch; | ||
439 | extern atomic_t rxrpc_debug_id; | ||
440 | extern struct workqueue_struct *rxrpc_workqueue; | ||
441 | |||
442 | /* | ||
443 | * ar-accept.c | ||
444 | */ | ||
445 | extern void rxrpc_accept_incoming_calls(struct work_struct *); | ||
446 | extern struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, | ||
447 | unsigned long); | ||
448 | extern int rxrpc_reject_call(struct rxrpc_sock *); | ||
449 | |||
450 | /* | ||
451 | * ar-ack.c | ||
452 | */ | ||
453 | extern void __rxrpc_propose_ACK(struct rxrpc_call *, uint8_t, __be32, bool); | ||
454 | extern void rxrpc_propose_ACK(struct rxrpc_call *, uint8_t, __be32, bool); | ||
455 | extern void rxrpc_process_call(struct work_struct *); | ||
456 | |||
457 | /* | ||
458 | * ar-call.c | ||
459 | */ | ||
460 | extern struct kmem_cache *rxrpc_call_jar; | ||
461 | extern struct list_head rxrpc_calls; | ||
462 | extern rwlock_t rxrpc_call_lock; | ||
463 | |||
464 | extern struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *, | ||
465 | struct rxrpc_transport *, | ||
466 | struct rxrpc_conn_bundle *, | ||
467 | unsigned long, int, gfp_t); | ||
468 | extern struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *, | ||
469 | struct rxrpc_connection *, | ||
470 | struct rxrpc_header *, gfp_t); | ||
471 | extern struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *, | ||
472 | unsigned long); | ||
473 | extern void rxrpc_release_call(struct rxrpc_call *); | ||
474 | extern void rxrpc_release_calls_on_socket(struct rxrpc_sock *); | ||
475 | extern void __rxrpc_put_call(struct rxrpc_call *); | ||
476 | extern void __exit rxrpc_destroy_all_calls(void); | ||
477 | |||
478 | /* | ||
479 | * ar-connection.c | ||
480 | */ | ||
481 | extern struct list_head rxrpc_connections; | ||
482 | extern rwlock_t rxrpc_connection_lock; | ||
483 | |||
484 | extern struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *, | ||
485 | struct rxrpc_transport *, | ||
486 | struct key *, | ||
487 | __be16, gfp_t); | ||
488 | extern void rxrpc_put_bundle(struct rxrpc_transport *, | ||
489 | struct rxrpc_conn_bundle *); | ||
490 | extern int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *, | ||
491 | struct rxrpc_conn_bundle *, struct rxrpc_call *, | ||
492 | gfp_t); | ||
493 | extern void rxrpc_put_connection(struct rxrpc_connection *); | ||
494 | extern void __exit rxrpc_destroy_all_connections(void); | ||
495 | extern struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *, | ||
496 | struct rxrpc_header *); | ||
497 | extern struct rxrpc_connection * | ||
498 | rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_header *, | ||
499 | gfp_t); | ||
500 | |||
501 | /* | ||
502 | * ar-connevent.c | ||
503 | */ | ||
504 | extern void rxrpc_process_connection(struct work_struct *); | ||
505 | extern void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *); | ||
506 | extern void rxrpc_reject_packets(struct work_struct *); | ||
507 | |||
508 | /* | ||
509 | * ar-error.c | ||
510 | */ | ||
511 | extern void rxrpc_UDP_error_report(struct sock *); | ||
512 | extern void rxrpc_UDP_error_handler(struct work_struct *); | ||
513 | |||
514 | /* | ||
515 | * ar-input.c | ||
516 | */ | ||
517 | extern unsigned long rxrpc_ack_timeout; | ||
518 | extern const char *rxrpc_pkts[]; | ||
519 | |||
520 | extern void rxrpc_data_ready(struct sock *, int); | ||
521 | extern int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, | ||
522 | bool); | ||
523 | extern void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *); | ||
524 | |||
525 | /* | ||
526 | * ar-local.c | ||
527 | */ | ||
528 | extern rwlock_t rxrpc_local_lock; | ||
529 | extern struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *); | ||
530 | extern void rxrpc_put_local(struct rxrpc_local *); | ||
531 | extern void __exit rxrpc_destroy_all_locals(void); | ||
532 | |||
533 | /* | ||
534 | * ar-key.c | ||
535 | */ | ||
536 | extern struct key_type key_type_rxrpc; | ||
537 | extern struct key_type key_type_rxrpc_s; | ||
538 | |||
539 | extern int rxrpc_request_key(struct rxrpc_sock *, char __user *, int); | ||
540 | extern int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int); | ||
541 | extern int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, | ||
542 | time_t, u32); | ||
543 | |||
544 | /* | ||
545 | * ar-output.c | ||
546 | */ | ||
547 | extern int rxrpc_resend_timeout; | ||
548 | |||
549 | extern int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *); | ||
550 | extern int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *, | ||
551 | struct rxrpc_transport *, struct msghdr *, | ||
552 | size_t); | ||
553 | extern int rxrpc_server_sendmsg(struct kiocb *, struct rxrpc_sock *, | ||
554 | struct msghdr *, size_t); | ||
555 | |||
556 | /* | ||
557 | * ar-peer.c | ||
558 | */ | ||
559 | extern struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *, gfp_t); | ||
560 | extern void rxrpc_put_peer(struct rxrpc_peer *); | ||
561 | extern struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *, | ||
562 | __be32, __be16); | ||
563 | extern void __exit rxrpc_destroy_all_peers(void); | ||
564 | |||
565 | /* | ||
566 | * ar-proc.c | ||
567 | */ | ||
568 | extern const char *rxrpc_call_states[]; | ||
569 | extern struct file_operations rxrpc_call_seq_fops; | ||
570 | extern struct file_operations rxrpc_connection_seq_fops; | ||
571 | |||
572 | /* | ||
573 | * ar-recvmsg.c | ||
574 | */ | ||
575 | extern void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *); | ||
576 | extern int rxrpc_recvmsg(struct kiocb *, struct socket *, struct msghdr *, | ||
577 | size_t, int); | ||
578 | |||
579 | /* | ||
580 | * ar-security.c | ||
581 | */ | ||
582 | extern int rxrpc_register_security(struct rxrpc_security *); | ||
583 | extern void rxrpc_unregister_security(struct rxrpc_security *); | ||
584 | extern int rxrpc_init_client_conn_security(struct rxrpc_connection *); | ||
585 | extern int rxrpc_init_server_conn_security(struct rxrpc_connection *); | ||
586 | extern int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *, | ||
587 | size_t, void *); | ||
588 | extern int rxrpc_verify_packet(const struct rxrpc_call *, struct sk_buff *, | ||
589 | u32 *); | ||
590 | extern void rxrpc_clear_conn_security(struct rxrpc_connection *); | ||
591 | |||
592 | /* | ||
593 | * ar-skbuff.c | ||
594 | */ | ||
595 | extern void rxrpc_packet_destructor(struct sk_buff *); | ||
596 | |||
597 | /* | ||
598 | * ar-transport.c | ||
599 | */ | ||
600 | extern struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *, | ||
601 | struct rxrpc_peer *, | ||
602 | gfp_t); | ||
603 | extern void rxrpc_put_transport(struct rxrpc_transport *); | ||
604 | extern void __exit rxrpc_destroy_all_transports(void); | ||
605 | extern struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *, | ||
606 | struct rxrpc_peer *); | ||
607 | |||
608 | /* | ||
609 | * debug tracing | ||
610 | */ | ||
611 | extern unsigned rxrpc_debug; | ||
612 | |||
613 | #define dbgprintk(FMT,...) \ | ||
614 | printk("[%x%-6.6s] "FMT"\n", smp_processor_id(), current->comm ,##__VA_ARGS__) | ||
615 | |||
616 | /* make sure we maintain the format strings, even when debugging is disabled */ | ||
617 | static inline __attribute__((format(printf,1,2))) | ||
618 | void _dbprintk(const char *fmt, ...) | ||
619 | { | ||
620 | } | ||
621 | |||
622 | #define kenter(FMT,...) dbgprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__) | ||
623 | #define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__) | ||
624 | #define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__) | ||
625 | #define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__) | ||
626 | #define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__) | ||
627 | |||
628 | |||
629 | #if defined(__KDEBUG) | ||
630 | #define _enter(FMT,...) kenter(FMT,##__VA_ARGS__) | ||
631 | #define _leave(FMT,...) kleave(FMT,##__VA_ARGS__) | ||
632 | #define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__) | ||
633 | #define _proto(FMT,...) kproto(FMT,##__VA_ARGS__) | ||
634 | #define _net(FMT,...) knet(FMT,##__VA_ARGS__) | ||
635 | |||
636 | #elif defined(CONFIG_AF_RXRPC_DEBUG) | ||
637 | #define RXRPC_DEBUG_KENTER 0x01 | ||
638 | #define RXRPC_DEBUG_KLEAVE 0x02 | ||
639 | #define RXRPC_DEBUG_KDEBUG 0x04 | ||
640 | #define RXRPC_DEBUG_KPROTO 0x08 | ||
641 | #define RXRPC_DEBUG_KNET 0x10 | ||
642 | |||
643 | #define _enter(FMT,...) \ | ||
644 | do { \ | ||
645 | if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \ | ||
646 | kenter(FMT,##__VA_ARGS__); \ | ||
647 | } while (0) | ||
648 | |||
649 | #define _leave(FMT,...) \ | ||
650 | do { \ | ||
651 | if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \ | ||
652 | kleave(FMT,##__VA_ARGS__); \ | ||
653 | } while (0) | ||
654 | |||
655 | #define _debug(FMT,...) \ | ||
656 | do { \ | ||
657 | if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \ | ||
658 | kdebug(FMT,##__VA_ARGS__); \ | ||
659 | } while (0) | ||
660 | |||
661 | #define _proto(FMT,...) \ | ||
662 | do { \ | ||
663 | if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \ | ||
664 | kproto(FMT,##__VA_ARGS__); \ | ||
665 | } while (0) | ||
666 | |||
667 | #define _net(FMT,...) \ | ||
668 | do { \ | ||
669 | if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \ | ||
670 | knet(FMT,##__VA_ARGS__); \ | ||
671 | } while (0) | ||
672 | |||
673 | #else | ||
674 | #define _enter(FMT,...) _dbprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__) | ||
675 | #define _leave(FMT,...) _dbprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__) | ||
676 | #define _debug(FMT,...) _dbprintk(" "FMT ,##__VA_ARGS__) | ||
677 | #define _proto(FMT,...) _dbprintk("### "FMT ,##__VA_ARGS__) | ||
678 | #define _net(FMT,...) _dbprintk("@@@ "FMT ,##__VA_ARGS__) | ||
679 | #endif | ||
680 | |||
681 | /* | ||
682 | * debug assertion checking | ||
683 | */ | ||
684 | #if 1 // defined(__KDEBUGALL) | ||
685 | |||
686 | #define ASSERT(X) \ | ||
687 | do { \ | ||
688 | if (unlikely(!(X))) { \ | ||
689 | printk(KERN_ERR "\n"); \ | ||
690 | printk(KERN_ERR "RxRPC: Assertion failed\n"); \ | ||
691 | BUG(); \ | ||
692 | } \ | ||
693 | } while(0) | ||
694 | |||
695 | #define ASSERTCMP(X, OP, Y) \ | ||
696 | do { \ | ||
697 | if (unlikely(!((X) OP (Y)))) { \ | ||
698 | printk(KERN_ERR "\n"); \ | ||
699 | printk(KERN_ERR "RxRPC: Assertion failed\n"); \ | ||
700 | printk(KERN_ERR "%lu " #OP " %lu is false\n", \ | ||
701 | (unsigned long)(X), (unsigned long)(Y)); \ | ||
702 | printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \ | ||
703 | (unsigned long)(X), (unsigned long)(Y)); \ | ||
704 | BUG(); \ | ||
705 | } \ | ||
706 | } while(0) | ||
707 | |||
708 | #define ASSERTIF(C, X) \ | ||
709 | do { \ | ||
710 | if (unlikely((C) && !(X))) { \ | ||
711 | printk(KERN_ERR "\n"); \ | ||
712 | printk(KERN_ERR "RxRPC: Assertion failed\n"); \ | ||
713 | BUG(); \ | ||
714 | } \ | ||
715 | } while(0) | ||
716 | |||
717 | #define ASSERTIFCMP(C, X, OP, Y) \ | ||
718 | do { \ | ||
719 | if (unlikely((C) && !((X) OP (Y)))) { \ | ||
720 | printk(KERN_ERR "\n"); \ | ||
721 | printk(KERN_ERR "RxRPC: Assertion failed\n"); \ | ||
722 | printk(KERN_ERR "%lu " #OP " %lu is false\n", \ | ||
723 | (unsigned long)(X), (unsigned long)(Y)); \ | ||
724 | printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \ | ||
725 | (unsigned long)(X), (unsigned long)(Y)); \ | ||
726 | BUG(); \ | ||
727 | } \ | ||
728 | } while(0) | ||
729 | |||
730 | #else | ||
731 | |||
732 | #define ASSERT(X) \ | ||
733 | do { \ | ||
734 | } while(0) | ||
735 | |||
736 | #define ASSERTCMP(X, OP, Y) \ | ||
737 | do { \ | ||
738 | } while(0) | ||
739 | |||
740 | #define ASSERTIF(C, X) \ | ||
741 | do { \ | ||
742 | } while(0) | ||
743 | |||
744 | #define ASSERTIFCMP(C, X, OP, Y) \ | ||
745 | do { \ | ||
746 | } while(0) | ||
747 | |||
748 | #endif /* __KDEBUGALL */ | ||
749 | |||
750 | /* | ||
751 | * socket buffer accounting / leak finding | ||
752 | */ | ||
753 | static inline void __rxrpc_new_skb(struct sk_buff *skb, const char *fn) | ||
754 | { | ||
755 | //_net("new skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs)); | ||
756 | //atomic_inc(&rxrpc_n_skbs); | ||
757 | } | ||
758 | |||
759 | #define rxrpc_new_skb(skb) __rxrpc_new_skb((skb), __func__) | ||
760 | |||
761 | static inline void __rxrpc_kill_skb(struct sk_buff *skb, const char *fn) | ||
762 | { | ||
763 | //_net("kill skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs)); | ||
764 | //atomic_dec(&rxrpc_n_skbs); | ||
765 | } | ||
766 | |||
767 | #define rxrpc_kill_skb(skb) __rxrpc_kill_skb((skb), __func__) | ||
768 | |||
769 | static inline void __rxrpc_free_skb(struct sk_buff *skb, const char *fn) | ||
770 | { | ||
771 | if (skb) { | ||
772 | CHECK_SLAB_OKAY(&skb->users); | ||
773 | //_net("free skb %p %s [%d]", | ||
774 | // skb, fn, atomic_read(&rxrpc_n_skbs)); | ||
775 | //atomic_dec(&rxrpc_n_skbs); | ||
776 | kfree_skb(skb); | ||
777 | } | ||
778 | } | ||
779 | |||
780 | #define rxrpc_free_skb(skb) __rxrpc_free_skb((skb), __func__) | ||
781 | |||
782 | static inline void rxrpc_purge_queue(struct sk_buff_head *list) | ||
783 | { | ||
784 | struct sk_buff *skb; | ||
785 | while ((skb = skb_dequeue((list))) != NULL) | ||
786 | rxrpc_free_skb(skb); | ||
787 | } | ||
788 | |||
789 | static inline void __rxrpc_get_local(struct rxrpc_local *local, const char *f) | ||
790 | { | ||
791 | CHECK_SLAB_OKAY(&local->usage); | ||
792 | if (atomic_inc_return(&local->usage) == 1) | ||
793 | printk("resurrected (%s)\n", f); | ||
794 | } | ||
795 | |||
796 | #define rxrpc_get_local(LOCAL) __rxrpc_get_local((LOCAL), __func__) | ||
797 | |||
798 | #define rxrpc_get_call(CALL) \ | ||
799 | do { \ | ||
800 | CHECK_SLAB_OKAY(&(CALL)->usage); \ | ||
801 | if (atomic_inc_return(&(CALL)->usage) == 1) \ | ||
802 | BUG(); \ | ||
803 | } while(0) | ||
804 | |||
805 | #define rxrpc_put_call(CALL) \ | ||
806 | do { \ | ||
807 | __rxrpc_put_call(CALL); \ | ||
808 | } while(0) | ||
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c new file mode 100644 index 000000000000..7e049ff6ae60 --- /dev/null +++ b/net/rxrpc/ar-key.c | |||
@@ -0,0 +1,334 @@ | |||
1 | /* RxRPC key management | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * RxRPC keys should have a description of describing their purpose: | ||
12 | * "afs@CAMBRIDGE.REDHAT.COM> | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/net.h> | ||
17 | #include <linux/skbuff.h> | ||
18 | #include <linux/key.h> | ||
19 | #include <linux/crypto.h> | ||
20 | #include <net/sock.h> | ||
21 | #include <net/af_rxrpc.h> | ||
22 | #include <keys/rxrpc-type.h> | ||
23 | #include <keys/user-type.h> | ||
24 | #include "ar-internal.h" | ||
25 | |||
26 | static int rxrpc_instantiate(struct key *, const void *, size_t); | ||
27 | static int rxrpc_instantiate_s(struct key *, const void *, size_t); | ||
28 | static void rxrpc_destroy(struct key *); | ||
29 | static void rxrpc_destroy_s(struct key *); | ||
30 | static void rxrpc_describe(const struct key *, struct seq_file *); | ||
31 | |||
32 | /* | ||
33 | * rxrpc defined keys take an arbitrary string as the description and an | ||
34 | * arbitrary blob of data as the payload | ||
35 | */ | ||
36 | struct key_type key_type_rxrpc = { | ||
37 | .name = "rxrpc", | ||
38 | .instantiate = rxrpc_instantiate, | ||
39 | .match = user_match, | ||
40 | .destroy = rxrpc_destroy, | ||
41 | .describe = rxrpc_describe, | ||
42 | }; | ||
43 | |||
44 | EXPORT_SYMBOL(key_type_rxrpc); | ||
45 | |||
46 | /* | ||
47 | * rxrpc server defined keys take "<serviceId>:<securityIndex>" as the | ||
48 | * description and an 8-byte decryption key as the payload | ||
49 | */ | ||
50 | struct key_type key_type_rxrpc_s = { | ||
51 | .name = "rxrpc_s", | ||
52 | .instantiate = rxrpc_instantiate_s, | ||
53 | .match = user_match, | ||
54 | .destroy = rxrpc_destroy_s, | ||
55 | .describe = rxrpc_describe, | ||
56 | }; | ||
57 | |||
58 | /* | ||
59 | * instantiate an rxrpc defined key | ||
60 | * data should be of the form: | ||
61 | * OFFSET LEN CONTENT | ||
62 | * 0 4 key interface version number | ||
63 | * 4 2 security index (type) | ||
64 | * 6 2 ticket length | ||
65 | * 8 4 key expiry time (time_t) | ||
66 | * 12 4 kvno | ||
67 | * 16 8 session key | ||
68 | * 24 [len] ticket | ||
69 | * | ||
70 | * if no data is provided, then a no-security key is made | ||
71 | */ | ||
72 | static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen) | ||
73 | { | ||
74 | const struct rxkad_key *tsec; | ||
75 | struct rxrpc_key_payload *upayload; | ||
76 | size_t plen; | ||
77 | u32 kver; | ||
78 | int ret; | ||
79 | |||
80 | _enter("{%x},,%zu", key_serial(key), datalen); | ||
81 | |||
82 | /* handle a no-security key */ | ||
83 | if (!data && datalen == 0) | ||
84 | return 0; | ||
85 | |||
86 | /* get the key interface version number */ | ||
87 | ret = -EINVAL; | ||
88 | if (datalen <= 4 || !data) | ||
89 | goto error; | ||
90 | memcpy(&kver, data, sizeof(kver)); | ||
91 | data += sizeof(kver); | ||
92 | datalen -= sizeof(kver); | ||
93 | |||
94 | _debug("KEY I/F VERSION: %u", kver); | ||
95 | |||
96 | ret = -EKEYREJECTED; | ||
97 | if (kver != 1) | ||
98 | goto error; | ||
99 | |||
100 | /* deal with a version 1 key */ | ||
101 | ret = -EINVAL; | ||
102 | if (datalen < sizeof(*tsec)) | ||
103 | goto error; | ||
104 | |||
105 | tsec = data; | ||
106 | if (datalen != sizeof(*tsec) + tsec->ticket_len) | ||
107 | goto error; | ||
108 | |||
109 | _debug("SCIX: %u", tsec->security_index); | ||
110 | _debug("TLEN: %u", tsec->ticket_len); | ||
111 | _debug("EXPY: %x", tsec->expiry); | ||
112 | _debug("KVNO: %u", tsec->kvno); | ||
113 | _debug("SKEY: %02x%02x%02x%02x%02x%02x%02x%02x", | ||
114 | tsec->session_key[0], tsec->session_key[1], | ||
115 | tsec->session_key[2], tsec->session_key[3], | ||
116 | tsec->session_key[4], tsec->session_key[5], | ||
117 | tsec->session_key[6], tsec->session_key[7]); | ||
118 | if (tsec->ticket_len >= 8) | ||
119 | _debug("TCKT: %02x%02x%02x%02x%02x%02x%02x%02x", | ||
120 | tsec->ticket[0], tsec->ticket[1], | ||
121 | tsec->ticket[2], tsec->ticket[3], | ||
122 | tsec->ticket[4], tsec->ticket[5], | ||
123 | tsec->ticket[6], tsec->ticket[7]); | ||
124 | |||
125 | ret = -EPROTONOSUPPORT; | ||
126 | if (tsec->security_index != 2) | ||
127 | goto error; | ||
128 | |||
129 | key->type_data.x[0] = tsec->security_index; | ||
130 | |||
131 | plen = sizeof(*upayload) + tsec->ticket_len; | ||
132 | ret = key_payload_reserve(key, plen); | ||
133 | if (ret < 0) | ||
134 | goto error; | ||
135 | |||
136 | ret = -ENOMEM; | ||
137 | upayload = kmalloc(plen, GFP_KERNEL); | ||
138 | if (!upayload) | ||
139 | goto error; | ||
140 | |||
141 | /* attach the data */ | ||
142 | memcpy(&upayload->k, tsec, sizeof(*tsec)); | ||
143 | memcpy(&upayload->k.ticket, (void *)tsec + sizeof(*tsec), | ||
144 | tsec->ticket_len); | ||
145 | key->payload.data = upayload; | ||
146 | key->expiry = tsec->expiry; | ||
147 | ret = 0; | ||
148 | |||
149 | error: | ||
150 | return ret; | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * instantiate a server secret key | ||
155 | * data should be a pointer to the 8-byte secret key | ||
156 | */ | ||
157 | static int rxrpc_instantiate_s(struct key *key, const void *data, | ||
158 | size_t datalen) | ||
159 | { | ||
160 | struct crypto_blkcipher *ci; | ||
161 | |||
162 | _enter("{%x},,%zu", key_serial(key), datalen); | ||
163 | |||
164 | if (datalen != 8) | ||
165 | return -EINVAL; | ||
166 | |||
167 | memcpy(&key->type_data, data, 8); | ||
168 | |||
169 | ci = crypto_alloc_blkcipher("pcbc(des)", 0, CRYPTO_ALG_ASYNC); | ||
170 | if (IS_ERR(ci)) { | ||
171 | _leave(" = %ld", PTR_ERR(ci)); | ||
172 | return PTR_ERR(ci); | ||
173 | } | ||
174 | |||
175 | if (crypto_blkcipher_setkey(ci, data, 8) < 0) | ||
176 | BUG(); | ||
177 | |||
178 | key->payload.data = ci; | ||
179 | _leave(" = 0"); | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * dispose of the data dangling from the corpse of a rxrpc key | ||
185 | */ | ||
186 | static void rxrpc_destroy(struct key *key) | ||
187 | { | ||
188 | kfree(key->payload.data); | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * dispose of the data dangling from the corpse of a rxrpc key | ||
193 | */ | ||
194 | static void rxrpc_destroy_s(struct key *key) | ||
195 | { | ||
196 | if (key->payload.data) { | ||
197 | crypto_free_blkcipher(key->payload.data); | ||
198 | key->payload.data = NULL; | ||
199 | } | ||
200 | } | ||
201 | |||
202 | /* | ||
203 | * describe the rxrpc key | ||
204 | */ | ||
205 | static void rxrpc_describe(const struct key *key, struct seq_file *m) | ||
206 | { | ||
207 | seq_puts(m, key->description); | ||
208 | } | ||
209 | |||
210 | /* | ||
211 | * grab the security key for a socket | ||
212 | */ | ||
213 | int rxrpc_request_key(struct rxrpc_sock *rx, char __user *optval, int optlen) | ||
214 | { | ||
215 | struct key *key; | ||
216 | char *description; | ||
217 | |||
218 | _enter(""); | ||
219 | |||
220 | if (optlen <= 0 || optlen > PAGE_SIZE - 1) | ||
221 | return -EINVAL; | ||
222 | |||
223 | description = kmalloc(optlen + 1, GFP_KERNEL); | ||
224 | if (!description) | ||
225 | return -ENOMEM; | ||
226 | |||
227 | if (copy_from_user(description, optval, optlen)) { | ||
228 | kfree(description); | ||
229 | return -EFAULT; | ||
230 | } | ||
231 | description[optlen] = 0; | ||
232 | |||
233 | key = request_key(&key_type_rxrpc, description, NULL); | ||
234 | if (IS_ERR(key)) { | ||
235 | kfree(description); | ||
236 | _leave(" = %ld", PTR_ERR(key)); | ||
237 | return PTR_ERR(key); | ||
238 | } | ||
239 | |||
240 | rx->key = key; | ||
241 | kfree(description); | ||
242 | _leave(" = 0 [key %x]", key->serial); | ||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * grab the security keyring for a server socket | ||
248 | */ | ||
249 | int rxrpc_server_keyring(struct rxrpc_sock *rx, char __user *optval, | ||
250 | int optlen) | ||
251 | { | ||
252 | struct key *key; | ||
253 | char *description; | ||
254 | |||
255 | _enter(""); | ||
256 | |||
257 | if (optlen <= 0 || optlen > PAGE_SIZE - 1) | ||
258 | return -EINVAL; | ||
259 | |||
260 | description = kmalloc(optlen + 1, GFP_KERNEL); | ||
261 | if (!description) | ||
262 | return -ENOMEM; | ||
263 | |||
264 | if (copy_from_user(description, optval, optlen)) { | ||
265 | kfree(description); | ||
266 | return -EFAULT; | ||
267 | } | ||
268 | description[optlen] = 0; | ||
269 | |||
270 | key = request_key(&key_type_keyring, description, NULL); | ||
271 | if (IS_ERR(key)) { | ||
272 | kfree(description); | ||
273 | _leave(" = %ld", PTR_ERR(key)); | ||
274 | return PTR_ERR(key); | ||
275 | } | ||
276 | |||
277 | rx->securities = key; | ||
278 | kfree(description); | ||
279 | _leave(" = 0 [key %x]", key->serial); | ||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | /* | ||
284 | * generate a server data key | ||
285 | */ | ||
286 | int rxrpc_get_server_data_key(struct rxrpc_connection *conn, | ||
287 | const void *session_key, | ||
288 | time_t expiry, | ||
289 | u32 kvno) | ||
290 | { | ||
291 | struct key *key; | ||
292 | int ret; | ||
293 | |||
294 | struct { | ||
295 | u32 kver; | ||
296 | struct rxkad_key tsec; | ||
297 | } data; | ||
298 | |||
299 | _enter(""); | ||
300 | |||
301 | key = key_alloc(&key_type_rxrpc, "x", 0, 0, current, 0, | ||
302 | KEY_ALLOC_NOT_IN_QUOTA); | ||
303 | if (IS_ERR(key)) { | ||
304 | _leave(" = -ENOMEM [alloc %ld]", PTR_ERR(key)); | ||
305 | return -ENOMEM; | ||
306 | } | ||
307 | |||
308 | _debug("key %d", key_serial(key)); | ||
309 | |||
310 | data.kver = 1; | ||
311 | data.tsec.security_index = 2; | ||
312 | data.tsec.ticket_len = 0; | ||
313 | data.tsec.expiry = expiry; | ||
314 | data.tsec.kvno = 0; | ||
315 | |||
316 | memcpy(&data.tsec.session_key, session_key, | ||
317 | sizeof(data.tsec.session_key)); | ||
318 | |||
319 | ret = key_instantiate_and_link(key, &data, sizeof(data), NULL, NULL); | ||
320 | if (ret < 0) | ||
321 | goto error; | ||
322 | |||
323 | conn->key = key; | ||
324 | _leave(" = 0 [%d]", key_serial(key)); | ||
325 | return 0; | ||
326 | |||
327 | error: | ||
328 | key_revoke(key); | ||
329 | key_put(key); | ||
330 | _leave(" = -ENOMEM [ins %d]", ret); | ||
331 | return -ENOMEM; | ||
332 | } | ||
333 | |||
334 | EXPORT_SYMBOL(rxrpc_get_server_data_key); | ||
diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c new file mode 100644 index 000000000000..fe03f71f17da --- /dev/null +++ b/net/rxrpc/ar-local.c | |||
@@ -0,0 +1,309 @@ | |||
1 | /* AF_RXRPC local endpoint management | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/net.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <net/sock.h> | ||
16 | #include <net/af_rxrpc.h> | ||
17 | #include "ar-internal.h" | ||
18 | |||
19 | static LIST_HEAD(rxrpc_locals); | ||
20 | DEFINE_RWLOCK(rxrpc_local_lock); | ||
21 | static DECLARE_RWSEM(rxrpc_local_sem); | ||
22 | static DECLARE_WAIT_QUEUE_HEAD(rxrpc_local_wq); | ||
23 | |||
24 | static void rxrpc_destroy_local(struct work_struct *work); | ||
25 | |||
26 | /* | ||
27 | * allocate a new local | ||
28 | */ | ||
29 | static | ||
30 | struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx) | ||
31 | { | ||
32 | struct rxrpc_local *local; | ||
33 | |||
34 | local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); | ||
35 | if (local) { | ||
36 | INIT_WORK(&local->destroyer, &rxrpc_destroy_local); | ||
37 | INIT_WORK(&local->acceptor, &rxrpc_accept_incoming_calls); | ||
38 | INIT_WORK(&local->rejecter, &rxrpc_reject_packets); | ||
39 | INIT_LIST_HEAD(&local->services); | ||
40 | INIT_LIST_HEAD(&local->link); | ||
41 | init_rwsem(&local->defrag_sem); | ||
42 | skb_queue_head_init(&local->accept_queue); | ||
43 | skb_queue_head_init(&local->reject_queue); | ||
44 | spin_lock_init(&local->lock); | ||
45 | rwlock_init(&local->services_lock); | ||
46 | atomic_set(&local->usage, 1); | ||
47 | local->debug_id = atomic_inc_return(&rxrpc_debug_id); | ||
48 | memcpy(&local->srx, srx, sizeof(*srx)); | ||
49 | } | ||
50 | |||
51 | _leave(" = %p", local); | ||
52 | return local; | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * create the local socket | ||
57 | * - must be called with rxrpc_local_sem writelocked | ||
58 | */ | ||
59 | static int rxrpc_create_local(struct rxrpc_local *local) | ||
60 | { | ||
61 | struct sock *sock; | ||
62 | int ret, opt; | ||
63 | |||
64 | _enter("%p{%d}", local, local->srx.transport_type); | ||
65 | |||
66 | /* create a socket to represent the local endpoint */ | ||
67 | ret = sock_create_kern(PF_INET, local->srx.transport_type, IPPROTO_UDP, | ||
68 | &local->socket); | ||
69 | if (ret < 0) { | ||
70 | _leave(" = %d [socket]", ret); | ||
71 | return ret; | ||
72 | } | ||
73 | |||
74 | /* if a local address was supplied then bind it */ | ||
75 | if (local->srx.transport_len > sizeof(sa_family_t)) { | ||
76 | _debug("bind"); | ||
77 | ret = kernel_bind(local->socket, | ||
78 | (struct sockaddr *) &local->srx.transport, | ||
79 | local->srx.transport_len); | ||
80 | if (ret < 0) { | ||
81 | _debug("bind failed"); | ||
82 | goto error; | ||
83 | } | ||
84 | } | ||
85 | |||
86 | /* we want to receive ICMP errors */ | ||
87 | opt = 1; | ||
88 | ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR, | ||
89 | (char *) &opt, sizeof(opt)); | ||
90 | if (ret < 0) { | ||
91 | _debug("setsockopt failed"); | ||
92 | goto error; | ||
93 | } | ||
94 | |||
95 | /* we want to set the don't fragment bit */ | ||
96 | opt = IP_PMTUDISC_DO; | ||
97 | ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER, | ||
98 | (char *) &opt, sizeof(opt)); | ||
99 | if (ret < 0) { | ||
100 | _debug("setsockopt failed"); | ||
101 | goto error; | ||
102 | } | ||
103 | |||
104 | write_lock_bh(&rxrpc_local_lock); | ||
105 | list_add(&local->link, &rxrpc_locals); | ||
106 | write_unlock_bh(&rxrpc_local_lock); | ||
107 | |||
108 | /* set the socket up */ | ||
109 | sock = local->socket->sk; | ||
110 | sock->sk_user_data = local; | ||
111 | sock->sk_data_ready = rxrpc_data_ready; | ||
112 | sock->sk_error_report = rxrpc_UDP_error_report; | ||
113 | _leave(" = 0"); | ||
114 | return 0; | ||
115 | |||
116 | error: | ||
117 | local->socket->ops->shutdown(local->socket, 2); | ||
118 | local->socket->sk->sk_user_data = NULL; | ||
119 | sock_release(local->socket); | ||
120 | local->socket = NULL; | ||
121 | |||
122 | _leave(" = %d", ret); | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * create a new local endpoint using the specified UDP address | ||
128 | */ | ||
129 | struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *srx) | ||
130 | { | ||
131 | struct rxrpc_local *local; | ||
132 | int ret; | ||
133 | |||
134 | _enter("{%d,%u,%u.%u.%u.%u+%hu}", | ||
135 | srx->transport_type, | ||
136 | srx->transport.family, | ||
137 | NIPQUAD(srx->transport.sin.sin_addr), | ||
138 | ntohs(srx->transport.sin.sin_port)); | ||
139 | |||
140 | down_write(&rxrpc_local_sem); | ||
141 | |||
142 | /* see if we have a suitable local local endpoint already */ | ||
143 | read_lock_bh(&rxrpc_local_lock); | ||
144 | |||
145 | list_for_each_entry(local, &rxrpc_locals, link) { | ||
146 | _debug("CMP {%d,%u,%u.%u.%u.%u+%hu}", | ||
147 | local->srx.transport_type, | ||
148 | local->srx.transport.family, | ||
149 | NIPQUAD(local->srx.transport.sin.sin_addr), | ||
150 | ntohs(local->srx.transport.sin.sin_port)); | ||
151 | |||
152 | if (local->srx.transport_type != srx->transport_type || | ||
153 | local->srx.transport.family != srx->transport.family) | ||
154 | continue; | ||
155 | |||
156 | switch (srx->transport.family) { | ||
157 | case AF_INET: | ||
158 | if (local->srx.transport.sin.sin_port != | ||
159 | srx->transport.sin.sin_port) | ||
160 | continue; | ||
161 | if (memcmp(&local->srx.transport.sin.sin_addr, | ||
162 | &srx->transport.sin.sin_addr, | ||
163 | sizeof(struct in_addr)) != 0) | ||
164 | continue; | ||
165 | goto found_local; | ||
166 | |||
167 | default: | ||
168 | BUG(); | ||
169 | } | ||
170 | } | ||
171 | |||
172 | read_unlock_bh(&rxrpc_local_lock); | ||
173 | |||
174 | /* we didn't find one, so we need to create one */ | ||
175 | local = rxrpc_alloc_local(srx); | ||
176 | if (!local) { | ||
177 | up_write(&rxrpc_local_sem); | ||
178 | return ERR_PTR(-ENOMEM); | ||
179 | } | ||
180 | |||
181 | ret = rxrpc_create_local(local); | ||
182 | if (ret < 0) { | ||
183 | up_write(&rxrpc_local_sem); | ||
184 | kfree(local); | ||
185 | _leave(" = %d", ret); | ||
186 | return ERR_PTR(ret); | ||
187 | } | ||
188 | |||
189 | up_write(&rxrpc_local_sem); | ||
190 | |||
191 | _net("LOCAL new %d {%d,%u,%u.%u.%u.%u+%hu}", | ||
192 | local->debug_id, | ||
193 | local->srx.transport_type, | ||
194 | local->srx.transport.family, | ||
195 | NIPQUAD(local->srx.transport.sin.sin_addr), | ||
196 | ntohs(local->srx.transport.sin.sin_port)); | ||
197 | |||
198 | _leave(" = %p [new]", local); | ||
199 | return local; | ||
200 | |||
201 | found_local: | ||
202 | rxrpc_get_local(local); | ||
203 | read_unlock_bh(&rxrpc_local_lock); | ||
204 | up_write(&rxrpc_local_sem); | ||
205 | |||
206 | _net("LOCAL old %d {%d,%u,%u.%u.%u.%u+%hu}", | ||
207 | local->debug_id, | ||
208 | local->srx.transport_type, | ||
209 | local->srx.transport.family, | ||
210 | NIPQUAD(local->srx.transport.sin.sin_addr), | ||
211 | ntohs(local->srx.transport.sin.sin_port)); | ||
212 | |||
213 | _leave(" = %p [reuse]", local); | ||
214 | return local; | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * release a local endpoint | ||
219 | */ | ||
220 | void rxrpc_put_local(struct rxrpc_local *local) | ||
221 | { | ||
222 | _enter("%p{u=%d}", local, atomic_read(&local->usage)); | ||
223 | |||
224 | ASSERTCMP(atomic_read(&local->usage), >, 0); | ||
225 | |||
226 | /* to prevent a race, the decrement and the dequeue must be effectively | ||
227 | * atomic */ | ||
228 | write_lock_bh(&rxrpc_local_lock); | ||
229 | if (unlikely(atomic_dec_and_test(&local->usage))) { | ||
230 | _debug("destroy local"); | ||
231 | rxrpc_queue_work(&local->destroyer); | ||
232 | } | ||
233 | write_unlock_bh(&rxrpc_local_lock); | ||
234 | _leave(""); | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * destroy a local endpoint | ||
239 | */ | ||
240 | static void rxrpc_destroy_local(struct work_struct *work) | ||
241 | { | ||
242 | struct rxrpc_local *local = | ||
243 | container_of(work, struct rxrpc_local, destroyer); | ||
244 | |||
245 | _enter("%p{%d}", local, atomic_read(&local->usage)); | ||
246 | |||
247 | down_write(&rxrpc_local_sem); | ||
248 | |||
249 | write_lock_bh(&rxrpc_local_lock); | ||
250 | if (atomic_read(&local->usage) > 0) { | ||
251 | write_unlock_bh(&rxrpc_local_lock); | ||
252 | up_read(&rxrpc_local_sem); | ||
253 | _leave(" [resurrected]"); | ||
254 | return; | ||
255 | } | ||
256 | |||
257 | list_del(&local->link); | ||
258 | local->socket->sk->sk_user_data = NULL; | ||
259 | write_unlock_bh(&rxrpc_local_lock); | ||
260 | |||
261 | downgrade_write(&rxrpc_local_sem); | ||
262 | |||
263 | ASSERT(list_empty(&local->services)); | ||
264 | ASSERT(!work_pending(&local->acceptor)); | ||
265 | ASSERT(!work_pending(&local->rejecter)); | ||
266 | |||
267 | /* finish cleaning up the local descriptor */ | ||
268 | rxrpc_purge_queue(&local->accept_queue); | ||
269 | rxrpc_purge_queue(&local->reject_queue); | ||
270 | local->socket->ops->shutdown(local->socket, 2); | ||
271 | sock_release(local->socket); | ||
272 | |||
273 | up_read(&rxrpc_local_sem); | ||
274 | |||
275 | _net("DESTROY LOCAL %d", local->debug_id); | ||
276 | kfree(local); | ||
277 | |||
278 | if (list_empty(&rxrpc_locals)) | ||
279 | wake_up_all(&rxrpc_local_wq); | ||
280 | |||
281 | _leave(""); | ||
282 | } | ||
283 | |||
284 | /* | ||
285 | * preemptively destroy all local local endpoint rather than waiting for | ||
286 | * them to be destroyed | ||
287 | */ | ||
288 | void __exit rxrpc_destroy_all_locals(void) | ||
289 | { | ||
290 | DECLARE_WAITQUEUE(myself,current); | ||
291 | |||
292 | _enter(""); | ||
293 | |||
294 | /* we simply have to wait for them to go away */ | ||
295 | if (!list_empty(&rxrpc_locals)) { | ||
296 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
297 | add_wait_queue(&rxrpc_local_wq, &myself); | ||
298 | |||
299 | while (!list_empty(&rxrpc_locals)) { | ||
300 | schedule(); | ||
301 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
302 | } | ||
303 | |||
304 | remove_wait_queue(&rxrpc_local_wq, &myself); | ||
305 | set_current_state(TASK_RUNNING); | ||
306 | } | ||
307 | |||
308 | _leave(""); | ||
309 | } | ||
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c new file mode 100644 index 000000000000..5cdde4a48ed1 --- /dev/null +++ b/net/rxrpc/ar-output.c | |||
@@ -0,0 +1,734 @@ | |||
1 | /* RxRPC packet transmission | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/net.h> | ||
13 | #include <linux/skbuff.h> | ||
14 | #include <linux/circ_buf.h> | ||
15 | #include <net/sock.h> | ||
16 | #include <net/af_rxrpc.h> | ||
17 | #include "ar-internal.h" | ||
18 | |||
19 | int rxrpc_resend_timeout = 4; | ||
20 | |||
21 | static int rxrpc_send_data(struct kiocb *iocb, | ||
22 | struct rxrpc_sock *rx, | ||
23 | struct rxrpc_call *call, | ||
24 | struct msghdr *msg, size_t len); | ||
25 | |||
26 | /* | ||
27 | * extract control messages from the sendmsg() control buffer | ||
28 | */ | ||
29 | static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg, | ||
30 | unsigned long *user_call_ID, | ||
31 | enum rxrpc_command *command, | ||
32 | u32 *abort_code, | ||
33 | bool server) | ||
34 | { | ||
35 | struct cmsghdr *cmsg; | ||
36 | int len; | ||
37 | |||
38 | *command = RXRPC_CMD_SEND_DATA; | ||
39 | |||
40 | if (msg->msg_controllen == 0) | ||
41 | return -EINVAL; | ||
42 | |||
43 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { | ||
44 | if (!CMSG_OK(msg, cmsg)) | ||
45 | return -EINVAL; | ||
46 | |||
47 | len = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); | ||
48 | _debug("CMSG %d, %d, %d", | ||
49 | cmsg->cmsg_level, cmsg->cmsg_type, len); | ||
50 | |||
51 | if (cmsg->cmsg_level != SOL_RXRPC) | ||
52 | continue; | ||
53 | |||
54 | switch (cmsg->cmsg_type) { | ||
55 | case RXRPC_USER_CALL_ID: | ||
56 | if (msg->msg_flags & MSG_CMSG_COMPAT) { | ||
57 | if (len != sizeof(u32)) | ||
58 | return -EINVAL; | ||
59 | *user_call_ID = *(u32 *) CMSG_DATA(cmsg); | ||
60 | } else { | ||
61 | if (len != sizeof(unsigned long)) | ||
62 | return -EINVAL; | ||
63 | *user_call_ID = *(unsigned long *) | ||
64 | CMSG_DATA(cmsg); | ||
65 | } | ||
66 | _debug("User Call ID %lx", *user_call_ID); | ||
67 | break; | ||
68 | |||
69 | case RXRPC_ABORT: | ||
70 | if (*command != RXRPC_CMD_SEND_DATA) | ||
71 | return -EINVAL; | ||
72 | *command = RXRPC_CMD_SEND_ABORT; | ||
73 | if (len != sizeof(*abort_code)) | ||
74 | return -EINVAL; | ||
75 | *abort_code = *(unsigned int *) CMSG_DATA(cmsg); | ||
76 | _debug("Abort %x", *abort_code); | ||
77 | if (*abort_code == 0) | ||
78 | return -EINVAL; | ||
79 | break; | ||
80 | |||
81 | case RXRPC_ACCEPT: | ||
82 | if (*command != RXRPC_CMD_SEND_DATA) | ||
83 | return -EINVAL; | ||
84 | *command = RXRPC_CMD_ACCEPT; | ||
85 | if (len != 0) | ||
86 | return -EINVAL; | ||
87 | if (!server) | ||
88 | return -EISCONN; | ||
89 | break; | ||
90 | |||
91 | default: | ||
92 | return -EINVAL; | ||
93 | } | ||
94 | } | ||
95 | |||
96 | _leave(" = 0"); | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * abort a call, sending an ABORT packet to the peer | ||
102 | */ | ||
103 | static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code) | ||
104 | { | ||
105 | write_lock_bh(&call->state_lock); | ||
106 | |||
107 | if (call->state <= RXRPC_CALL_COMPLETE) { | ||
108 | call->state = RXRPC_CALL_LOCALLY_ABORTED; | ||
109 | call->abort_code = abort_code; | ||
110 | set_bit(RXRPC_CALL_ABORT, &call->events); | ||
111 | del_timer_sync(&call->resend_timer); | ||
112 | del_timer_sync(&call->ack_timer); | ||
113 | clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events); | ||
114 | clear_bit(RXRPC_CALL_ACK, &call->events); | ||
115 | clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); | ||
116 | rxrpc_queue_call(call); | ||
117 | } | ||
118 | |||
119 | write_unlock_bh(&call->state_lock); | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * send a message forming part of a client call through an RxRPC socket | ||
124 | * - caller holds the socket locked | ||
125 | * - the socket may be either a client socket or a server socket | ||
126 | */ | ||
127 | int rxrpc_client_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx, | ||
128 | struct rxrpc_transport *trans, struct msghdr *msg, | ||
129 | size_t len) | ||
130 | { | ||
131 | struct rxrpc_conn_bundle *bundle; | ||
132 | enum rxrpc_command cmd; | ||
133 | struct rxrpc_call *call; | ||
134 | unsigned long user_call_ID = 0; | ||
135 | struct key *key; | ||
136 | __be16 service_id; | ||
137 | u32 abort_code = 0; | ||
138 | int ret; | ||
139 | |||
140 | _enter(""); | ||
141 | |||
142 | ASSERT(trans != NULL); | ||
143 | |||
144 | ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code, | ||
145 | false); | ||
146 | if (ret < 0) | ||
147 | return ret; | ||
148 | |||
149 | bundle = NULL; | ||
150 | if (trans) { | ||
151 | service_id = rx->service_id; | ||
152 | if (msg->msg_name) { | ||
153 | struct sockaddr_rxrpc *srx = | ||
154 | (struct sockaddr_rxrpc *) msg->msg_name; | ||
155 | service_id = htons(srx->srx_service); | ||
156 | } | ||
157 | key = rx->key; | ||
158 | if (key && !rx->key->payload.data) | ||
159 | key = NULL; | ||
160 | bundle = rxrpc_get_bundle(rx, trans, key, service_id, | ||
161 | GFP_KERNEL); | ||
162 | if (IS_ERR(bundle)) | ||
163 | return PTR_ERR(bundle); | ||
164 | } | ||
165 | |||
166 | call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, | ||
167 | abort_code == 0, GFP_KERNEL); | ||
168 | if (trans) | ||
169 | rxrpc_put_bundle(trans, bundle); | ||
170 | if (IS_ERR(call)) { | ||
171 | _leave(" = %ld", PTR_ERR(call)); | ||
172 | return PTR_ERR(call); | ||
173 | } | ||
174 | |||
175 | _debug("CALL %d USR %lx ST %d on CONN %p", | ||
176 | call->debug_id, call->user_call_ID, call->state, call->conn); | ||
177 | |||
178 | if (call->state >= RXRPC_CALL_COMPLETE) { | ||
179 | /* it's too late for this call */ | ||
180 | ret = -ESHUTDOWN; | ||
181 | } else if (cmd == RXRPC_CMD_SEND_ABORT) { | ||
182 | rxrpc_send_abort(call, abort_code); | ||
183 | } else if (cmd != RXRPC_CMD_SEND_DATA) { | ||
184 | ret = -EINVAL; | ||
185 | } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) { | ||
186 | /* request phase complete for this client call */ | ||
187 | ret = -EPROTO; | ||
188 | } else { | ||
189 | ret = rxrpc_send_data(iocb, rx, call, msg, len); | ||
190 | } | ||
191 | |||
192 | rxrpc_put_call(call); | ||
193 | _leave(" = %d", ret); | ||
194 | return ret; | ||
195 | } | ||
196 | |||
197 | /** | ||
198 | * rxrpc_kernel_send_data - Allow a kernel service to send data on a call | ||
199 | * @call: The call to send data through | ||
200 | * @msg: The data to send | ||
201 | * @len: The amount of data to send | ||
202 | * | ||
203 | * Allow a kernel service to send data on a call. The call must be in an state | ||
204 | * appropriate to sending data. No control data should be supplied in @msg, | ||
205 | * nor should an address be supplied. MSG_MORE should be flagged if there's | ||
206 | * more data to come, otherwise this data will end the transmission phase. | ||
207 | */ | ||
208 | int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg, | ||
209 | size_t len) | ||
210 | { | ||
211 | int ret; | ||
212 | |||
213 | _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]); | ||
214 | |||
215 | ASSERTCMP(msg->msg_name, ==, NULL); | ||
216 | ASSERTCMP(msg->msg_control, ==, NULL); | ||
217 | |||
218 | lock_sock(&call->socket->sk); | ||
219 | |||
220 | _debug("CALL %d USR %lx ST %d on CONN %p", | ||
221 | call->debug_id, call->user_call_ID, call->state, call->conn); | ||
222 | |||
223 | if (call->state >= RXRPC_CALL_COMPLETE) { | ||
224 | ret = -ESHUTDOWN; /* it's too late for this call */ | ||
225 | } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST && | ||
226 | call->state != RXRPC_CALL_SERVER_ACK_REQUEST && | ||
227 | call->state != RXRPC_CALL_SERVER_SEND_REPLY) { | ||
228 | ret = -EPROTO; /* request phase complete for this client call */ | ||
229 | } else { | ||
230 | mm_segment_t oldfs = get_fs(); | ||
231 | set_fs(KERNEL_DS); | ||
232 | ret = rxrpc_send_data(NULL, call->socket, call, msg, len); | ||
233 | set_fs(oldfs); | ||
234 | } | ||
235 | |||
236 | release_sock(&call->socket->sk); | ||
237 | _leave(" = %d", ret); | ||
238 | return ret; | ||
239 | } | ||
240 | |||
241 | EXPORT_SYMBOL(rxrpc_kernel_send_data); | ||
242 | |||
243 | /* | ||
244 | * rxrpc_kernel_abort_call - Allow a kernel service to abort a call | ||
245 | * @call: The call to be aborted | ||
246 | * @abort_code: The abort code to stick into the ABORT packet | ||
247 | * | ||
248 | * Allow a kernel service to abort a call, if it's still in an abortable state. | ||
249 | */ | ||
250 | void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code) | ||
251 | { | ||
252 | _enter("{%d},%d", call->debug_id, abort_code); | ||
253 | |||
254 | lock_sock(&call->socket->sk); | ||
255 | |||
256 | _debug("CALL %d USR %lx ST %d on CONN %p", | ||
257 | call->debug_id, call->user_call_ID, call->state, call->conn); | ||
258 | |||
259 | if (call->state < RXRPC_CALL_COMPLETE) | ||
260 | rxrpc_send_abort(call, abort_code); | ||
261 | |||
262 | release_sock(&call->socket->sk); | ||
263 | _leave(""); | ||
264 | } | ||
265 | |||
266 | EXPORT_SYMBOL(rxrpc_kernel_abort_call); | ||
267 | |||
268 | /* | ||
269 | * send a message through a server socket | ||
270 | * - caller holds the socket locked | ||
271 | */ | ||
272 | int rxrpc_server_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx, | ||
273 | struct msghdr *msg, size_t len) | ||
274 | { | ||
275 | enum rxrpc_command cmd; | ||
276 | struct rxrpc_call *call; | ||
277 | unsigned long user_call_ID = 0; | ||
278 | u32 abort_code = 0; | ||
279 | int ret; | ||
280 | |||
281 | _enter(""); | ||
282 | |||
283 | ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code, | ||
284 | true); | ||
285 | if (ret < 0) | ||
286 | return ret; | ||
287 | |||
288 | if (cmd == RXRPC_CMD_ACCEPT) { | ||
289 | call = rxrpc_accept_call(rx, user_call_ID); | ||
290 | if (IS_ERR(call)) | ||
291 | return PTR_ERR(call); | ||
292 | rxrpc_put_call(call); | ||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | call = rxrpc_find_server_call(rx, user_call_ID); | ||
297 | if (!call) | ||
298 | return -EBADSLT; | ||
299 | if (call->state >= RXRPC_CALL_COMPLETE) { | ||
300 | ret = -ESHUTDOWN; | ||
301 | goto out; | ||
302 | } | ||
303 | |||
304 | switch (cmd) { | ||
305 | case RXRPC_CMD_SEND_DATA: | ||
306 | if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST && | ||
307 | call->state != RXRPC_CALL_SERVER_ACK_REQUEST && | ||
308 | call->state != RXRPC_CALL_SERVER_SEND_REPLY) { | ||
309 | /* Tx phase not yet begun for this call */ | ||
310 | ret = -EPROTO; | ||
311 | break; | ||
312 | } | ||
313 | |||
314 | ret = rxrpc_send_data(iocb, rx, call, msg, len); | ||
315 | break; | ||
316 | |||
317 | case RXRPC_CMD_SEND_ABORT: | ||
318 | rxrpc_send_abort(call, abort_code); | ||
319 | break; | ||
320 | default: | ||
321 | BUG(); | ||
322 | } | ||
323 | |||
324 | out: | ||
325 | rxrpc_put_call(call); | ||
326 | _leave(" = %d", ret); | ||
327 | return ret; | ||
328 | } | ||
329 | |||
330 | /* | ||
331 | * send a packet through the transport endpoint | ||
332 | */ | ||
333 | int rxrpc_send_packet(struct rxrpc_transport *trans, struct sk_buff *skb) | ||
334 | { | ||
335 | struct kvec iov[1]; | ||
336 | struct msghdr msg; | ||
337 | int ret, opt; | ||
338 | |||
339 | _enter(",{%d}", skb->len); | ||
340 | |||
341 | iov[0].iov_base = skb->head; | ||
342 | iov[0].iov_len = skb->len; | ||
343 | |||
344 | msg.msg_name = &trans->peer->srx.transport.sin; | ||
345 | msg.msg_namelen = sizeof(trans->peer->srx.transport.sin); | ||
346 | msg.msg_control = NULL; | ||
347 | msg.msg_controllen = 0; | ||
348 | msg.msg_flags = 0; | ||
349 | |||
350 | /* send the packet with the don't fragment bit set if we currently | ||
351 | * think it's small enough */ | ||
352 | if (skb->len - sizeof(struct rxrpc_header) < trans->peer->maxdata) { | ||
353 | down_read(&trans->local->defrag_sem); | ||
354 | /* send the packet by UDP | ||
355 | * - returns -EMSGSIZE if UDP would have to fragment the packet | ||
356 | * to go out of the interface | ||
357 | * - in which case, we'll have processed the ICMP error | ||
358 | * message and update the peer record | ||
359 | */ | ||
360 | ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1, | ||
361 | iov[0].iov_len); | ||
362 | |||
363 | up_read(&trans->local->defrag_sem); | ||
364 | if (ret == -EMSGSIZE) | ||
365 | goto send_fragmentable; | ||
366 | |||
367 | _leave(" = %d [%u]", ret, trans->peer->maxdata); | ||
368 | return ret; | ||
369 | } | ||
370 | |||
371 | send_fragmentable: | ||
372 | /* attempt to send this message with fragmentation enabled */ | ||
373 | _debug("send fragment"); | ||
374 | |||
375 | down_write(&trans->local->defrag_sem); | ||
376 | opt = IP_PMTUDISC_DONT; | ||
377 | ret = kernel_setsockopt(trans->local->socket, SOL_IP, IP_MTU_DISCOVER, | ||
378 | (char *) &opt, sizeof(opt)); | ||
379 | if (ret == 0) { | ||
380 | ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1, | ||
381 | iov[0].iov_len); | ||
382 | |||
383 | opt = IP_PMTUDISC_DO; | ||
384 | kernel_setsockopt(trans->local->socket, SOL_IP, | ||
385 | IP_MTU_DISCOVER, (char *) &opt, sizeof(opt)); | ||
386 | } | ||
387 | |||
388 | up_write(&trans->local->defrag_sem); | ||
389 | _leave(" = %d [frag %u]", ret, trans->peer->maxdata); | ||
390 | return ret; | ||
391 | } | ||
392 | |||
393 | /* | ||
394 | * wait for space to appear in the transmit/ACK window | ||
395 | * - caller holds the socket locked | ||
396 | */ | ||
397 | static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, | ||
398 | struct rxrpc_call *call, | ||
399 | long *timeo) | ||
400 | { | ||
401 | DECLARE_WAITQUEUE(myself, current); | ||
402 | int ret; | ||
403 | |||
404 | _enter(",{%d},%ld", | ||
405 | CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz), | ||
406 | *timeo); | ||
407 | |||
408 | add_wait_queue(&call->tx_waitq, &myself); | ||
409 | |||
410 | for (;;) { | ||
411 | set_current_state(TASK_INTERRUPTIBLE); | ||
412 | ret = 0; | ||
413 | if (CIRC_SPACE(call->acks_head, call->acks_tail, | ||
414 | call->acks_winsz) > 0) | ||
415 | break; | ||
416 | if (signal_pending(current)) { | ||
417 | ret = sock_intr_errno(*timeo); | ||
418 | break; | ||
419 | } | ||
420 | |||
421 | release_sock(&rx->sk); | ||
422 | *timeo = schedule_timeout(*timeo); | ||
423 | lock_sock(&rx->sk); | ||
424 | } | ||
425 | |||
426 | remove_wait_queue(&call->tx_waitq, &myself); | ||
427 | set_current_state(TASK_RUNNING); | ||
428 | _leave(" = %d", ret); | ||
429 | return ret; | ||
430 | } | ||
431 | |||
432 | /* | ||
433 | * attempt to schedule an instant Tx resend | ||
434 | */ | ||
435 | static inline void rxrpc_instant_resend(struct rxrpc_call *call) | ||
436 | { | ||
437 | read_lock_bh(&call->state_lock); | ||
438 | if (try_to_del_timer_sync(&call->resend_timer) >= 0) { | ||
439 | clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); | ||
440 | if (call->state < RXRPC_CALL_COMPLETE && | ||
441 | !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) | ||
442 | rxrpc_queue_call(call); | ||
443 | } | ||
444 | read_unlock_bh(&call->state_lock); | ||
445 | } | ||
446 | |||
447 | /* | ||
448 | * queue a packet for transmission, set the resend timer and attempt | ||
449 | * to send the packet immediately | ||
450 | */ | ||
451 | static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb, | ||
452 | bool last) | ||
453 | { | ||
454 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | ||
455 | int ret; | ||
456 | |||
457 | _net("queue skb %p [%d]", skb, call->acks_head); | ||
458 | |||
459 | ASSERT(call->acks_window != NULL); | ||
460 | call->acks_window[call->acks_head] = (unsigned long) skb; | ||
461 | smp_wmb(); | ||
462 | call->acks_head = (call->acks_head + 1) & (call->acks_winsz - 1); | ||
463 | |||
464 | if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) { | ||
465 | _debug("________awaiting reply/ACK__________"); | ||
466 | write_lock_bh(&call->state_lock); | ||
467 | switch (call->state) { | ||
468 | case RXRPC_CALL_CLIENT_SEND_REQUEST: | ||
469 | call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; | ||
470 | break; | ||
471 | case RXRPC_CALL_SERVER_ACK_REQUEST: | ||
472 | call->state = RXRPC_CALL_SERVER_SEND_REPLY; | ||
473 | if (!last) | ||
474 | break; | ||
475 | case RXRPC_CALL_SERVER_SEND_REPLY: | ||
476 | call->state = RXRPC_CALL_SERVER_AWAIT_ACK; | ||
477 | break; | ||
478 | default: | ||
479 | break; | ||
480 | } | ||
481 | write_unlock_bh(&call->state_lock); | ||
482 | } | ||
483 | |||
484 | _proto("Tx DATA %%%u { #%u }", | ||
485 | ntohl(sp->hdr.serial), ntohl(sp->hdr.seq)); | ||
486 | |||
487 | sp->need_resend = 0; | ||
488 | sp->resend_at = jiffies + rxrpc_resend_timeout * HZ; | ||
489 | if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) { | ||
490 | _debug("run timer"); | ||
491 | call->resend_timer.expires = sp->resend_at; | ||
492 | add_timer(&call->resend_timer); | ||
493 | } | ||
494 | |||
495 | /* attempt to cancel the rx-ACK timer, deferring reply transmission if | ||
496 | * we're ACK'ing the request phase of an incoming call */ | ||
497 | ret = -EAGAIN; | ||
498 | if (try_to_del_timer_sync(&call->ack_timer) >= 0) { | ||
499 | /* the packet may be freed by rxrpc_process_call() before this | ||
500 | * returns */ | ||
501 | ret = rxrpc_send_packet(call->conn->trans, skb); | ||
502 | _net("sent skb %p", skb); | ||
503 | } else { | ||
504 | _debug("failed to delete ACK timer"); | ||
505 | } | ||
506 | |||
507 | if (ret < 0) { | ||
508 | _debug("need instant resend %d", ret); | ||
509 | sp->need_resend = 1; | ||
510 | rxrpc_instant_resend(call); | ||
511 | } | ||
512 | |||
513 | _leave(""); | ||
514 | } | ||
515 | |||
516 | /* | ||
517 | * send data through a socket | ||
518 | * - must be called in process context | ||
519 | * - caller holds the socket locked | ||
520 | */ | ||
521 | static int rxrpc_send_data(struct kiocb *iocb, | ||
522 | struct rxrpc_sock *rx, | ||
523 | struct rxrpc_call *call, | ||
524 | struct msghdr *msg, size_t len) | ||
525 | { | ||
526 | struct rxrpc_skb_priv *sp; | ||
527 | unsigned char __user *from; | ||
528 | struct sk_buff *skb; | ||
529 | struct iovec *iov; | ||
530 | struct sock *sk = &rx->sk; | ||
531 | long timeo; | ||
532 | bool more; | ||
533 | int ret, ioc, segment, copied; | ||
534 | |||
535 | _enter(",,,{%zu},%zu", msg->msg_iovlen, len); | ||
536 | |||
537 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); | ||
538 | |||
539 | /* this should be in poll */ | ||
540 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | ||
541 | |||
542 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | ||
543 | return -EPIPE; | ||
544 | |||
545 | iov = msg->msg_iov; | ||
546 | ioc = msg->msg_iovlen - 1; | ||
547 | from = iov->iov_base; | ||
548 | segment = iov->iov_len; | ||
549 | iov++; | ||
550 | more = msg->msg_flags & MSG_MORE; | ||
551 | |||
552 | skb = call->tx_pending; | ||
553 | call->tx_pending = NULL; | ||
554 | |||
555 | copied = 0; | ||
556 | do { | ||
557 | int copy; | ||
558 | |||
559 | if (segment > len) | ||
560 | segment = len; | ||
561 | |||
562 | _debug("SEGMENT %d @%p", segment, from); | ||
563 | |||
564 | if (!skb) { | ||
565 | size_t size, chunk, max, space; | ||
566 | |||
567 | _debug("alloc"); | ||
568 | |||
569 | if (CIRC_SPACE(call->acks_head, call->acks_tail, | ||
570 | call->acks_winsz) <= 0) { | ||
571 | ret = -EAGAIN; | ||
572 | if (msg->msg_flags & MSG_DONTWAIT) | ||
573 | goto maybe_error; | ||
574 | ret = rxrpc_wait_for_tx_window(rx, call, | ||
575 | &timeo); | ||
576 | if (ret < 0) | ||
577 | goto maybe_error; | ||
578 | } | ||
579 | |||
580 | max = call->conn->trans->peer->maxdata; | ||
581 | max -= call->conn->security_size; | ||
582 | max &= ~(call->conn->size_align - 1UL); | ||
583 | |||
584 | chunk = max; | ||
585 | if (chunk > len) | ||
586 | chunk = len; | ||
587 | |||
588 | space = chunk + call->conn->size_align; | ||
589 | space &= ~(call->conn->size_align - 1UL); | ||
590 | |||
591 | size = space + call->conn->header_size; | ||
592 | |||
593 | _debug("SIZE: %zu/%zu/%zu", chunk, space, size); | ||
594 | |||
595 | /* create a buffer that we can retain until it's ACK'd */ | ||
596 | skb = sock_alloc_send_skb( | ||
597 | sk, size, msg->msg_flags & MSG_DONTWAIT, &ret); | ||
598 | if (!skb) | ||
599 | goto maybe_error; | ||
600 | |||
601 | rxrpc_new_skb(skb); | ||
602 | |||
603 | _debug("ALLOC SEND %p", skb); | ||
604 | |||
605 | ASSERTCMP(skb->mark, ==, 0); | ||
606 | |||
607 | _debug("HS: %u", call->conn->header_size); | ||
608 | skb_reserve(skb, call->conn->header_size); | ||
609 | skb->len += call->conn->header_size; | ||
610 | |||
611 | sp = rxrpc_skb(skb); | ||
612 | sp->remain = chunk; | ||
613 | if (sp->remain > skb_tailroom(skb)) | ||
614 | sp->remain = skb_tailroom(skb); | ||
615 | |||
616 | _net("skb: hr %d, tr %d, hl %d, rm %d", | ||
617 | skb_headroom(skb), | ||
618 | skb_tailroom(skb), | ||
619 | skb_headlen(skb), | ||
620 | sp->remain); | ||
621 | |||
622 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
623 | } | ||
624 | |||
625 | _debug("append"); | ||
626 | sp = rxrpc_skb(skb); | ||
627 | |||
628 | /* append next segment of data to the current buffer */ | ||
629 | copy = skb_tailroom(skb); | ||
630 | ASSERTCMP(copy, >, 0); | ||
631 | if (copy > segment) | ||
632 | copy = segment; | ||
633 | if (copy > sp->remain) | ||
634 | copy = sp->remain; | ||
635 | |||
636 | _debug("add"); | ||
637 | ret = skb_add_data(skb, from, copy); | ||
638 | _debug("added"); | ||
639 | if (ret < 0) | ||
640 | goto efault; | ||
641 | sp->remain -= copy; | ||
642 | skb->mark += copy; | ||
643 | |||
644 | len -= copy; | ||
645 | segment -= copy; | ||
646 | from += copy; | ||
647 | while (segment == 0 && ioc > 0) { | ||
648 | from = iov->iov_base; | ||
649 | segment = iov->iov_len; | ||
650 | iov++; | ||
651 | ioc--; | ||
652 | } | ||
653 | if (len == 0) { | ||
654 | segment = 0; | ||
655 | ioc = 0; | ||
656 | } | ||
657 | |||
658 | /* check for the far side aborting the call or a network error | ||
659 | * occurring */ | ||
660 | if (call->state > RXRPC_CALL_COMPLETE) | ||
661 | goto call_aborted; | ||
662 | |||
663 | /* add the packet to the send queue if it's now full */ | ||
664 | if (sp->remain <= 0 || (segment == 0 && !more)) { | ||
665 | struct rxrpc_connection *conn = call->conn; | ||
666 | size_t pad; | ||
667 | |||
668 | /* pad out if we're using security */ | ||
669 | if (conn->security) { | ||
670 | pad = conn->security_size + skb->mark; | ||
671 | pad = conn->size_align - pad; | ||
672 | pad &= conn->size_align - 1; | ||
673 | _debug("pad %zu", pad); | ||
674 | if (pad) | ||
675 | memset(skb_put(skb, pad), 0, pad); | ||
676 | } | ||
677 | |||
678 | sp->hdr.epoch = conn->epoch; | ||
679 | sp->hdr.cid = call->cid; | ||
680 | sp->hdr.callNumber = call->call_id; | ||
681 | sp->hdr.seq = | ||
682 | htonl(atomic_inc_return(&call->sequence)); | ||
683 | sp->hdr.serial = | ||
684 | htonl(atomic_inc_return(&conn->serial)); | ||
685 | sp->hdr.type = RXRPC_PACKET_TYPE_DATA; | ||
686 | sp->hdr.userStatus = 0; | ||
687 | sp->hdr.securityIndex = conn->security_ix; | ||
688 | sp->hdr._rsvd = 0; | ||
689 | sp->hdr.serviceId = conn->service_id; | ||
690 | |||
691 | sp->hdr.flags = conn->out_clientflag; | ||
692 | if (len == 0 && !more) | ||
693 | sp->hdr.flags |= RXRPC_LAST_PACKET; | ||
694 | else if (CIRC_SPACE(call->acks_head, call->acks_tail, | ||
695 | call->acks_winsz) > 1) | ||
696 | sp->hdr.flags |= RXRPC_MORE_PACKETS; | ||
697 | |||
698 | ret = rxrpc_secure_packet( | ||
699 | call, skb, skb->mark, | ||
700 | skb->head + sizeof(struct rxrpc_header)); | ||
701 | if (ret < 0) | ||
702 | goto out; | ||
703 | |||
704 | memcpy(skb->head, &sp->hdr, | ||
705 | sizeof(struct rxrpc_header)); | ||
706 | rxrpc_queue_packet(call, skb, segment == 0 && !more); | ||
707 | skb = NULL; | ||
708 | } | ||
709 | |||
710 | } while (segment > 0); | ||
711 | |||
712 | out: | ||
713 | call->tx_pending = skb; | ||
714 | _leave(" = %d", ret); | ||
715 | return ret; | ||
716 | |||
717 | call_aborted: | ||
718 | rxrpc_free_skb(skb); | ||
719 | if (call->state == RXRPC_CALL_NETWORK_ERROR) | ||
720 | ret = call->conn->trans->peer->net_error; | ||
721 | else | ||
722 | ret = -ECONNABORTED; | ||
723 | _leave(" = %d", ret); | ||
724 | return ret; | ||
725 | |||
726 | maybe_error: | ||
727 | if (copied) | ||
728 | ret = copied; | ||
729 | goto out; | ||
730 | |||
731 | efault: | ||
732 | ret = -EFAULT; | ||
733 | goto out; | ||
734 | } | ||
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c new file mode 100644 index 000000000000..d399de4a7fe2 --- /dev/null +++ b/net/rxrpc/ar-peer.c | |||
@@ -0,0 +1,273 @@ | |||
1 | /* RxRPC remote transport endpoint management | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/net.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/udp.h> | ||
16 | #include <linux/in.h> | ||
17 | #include <linux/in6.h> | ||
18 | #include <linux/icmp.h> | ||
19 | #include <net/sock.h> | ||
20 | #include <net/af_rxrpc.h> | ||
21 | #include <net/ip.h> | ||
22 | #include "ar-internal.h" | ||
23 | |||
24 | static LIST_HEAD(rxrpc_peers); | ||
25 | static DEFINE_RWLOCK(rxrpc_peer_lock); | ||
26 | static DECLARE_WAIT_QUEUE_HEAD(rxrpc_peer_wq); | ||
27 | |||
28 | static void rxrpc_destroy_peer(struct work_struct *work); | ||
29 | |||
30 | /* | ||
31 | * allocate a new peer | ||
32 | */ | ||
33 | static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx, | ||
34 | gfp_t gfp) | ||
35 | { | ||
36 | struct rxrpc_peer *peer; | ||
37 | |||
38 | _enter(""); | ||
39 | |||
40 | peer = kzalloc(sizeof(struct rxrpc_peer), gfp); | ||
41 | if (peer) { | ||
42 | INIT_WORK(&peer->destroyer, &rxrpc_destroy_peer); | ||
43 | INIT_LIST_HEAD(&peer->link); | ||
44 | INIT_LIST_HEAD(&peer->error_targets); | ||
45 | spin_lock_init(&peer->lock); | ||
46 | atomic_set(&peer->usage, 1); | ||
47 | peer->debug_id = atomic_inc_return(&rxrpc_debug_id); | ||
48 | memcpy(&peer->srx, srx, sizeof(*srx)); | ||
49 | |||
50 | peer->mtu = peer->if_mtu = 65535; | ||
51 | |||
52 | if (srx->transport.family == AF_INET) { | ||
53 | peer->hdrsize = sizeof(struct iphdr); | ||
54 | switch (srx->transport_type) { | ||
55 | case SOCK_DGRAM: | ||
56 | peer->hdrsize += sizeof(struct udphdr); | ||
57 | break; | ||
58 | default: | ||
59 | BUG(); | ||
60 | break; | ||
61 | } | ||
62 | } else { | ||
63 | BUG(); | ||
64 | } | ||
65 | |||
66 | peer->hdrsize += sizeof(struct rxrpc_header); | ||
67 | peer->maxdata = peer->mtu - peer->hdrsize; | ||
68 | } | ||
69 | |||
70 | _leave(" = %p", peer); | ||
71 | return peer; | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * obtain a remote transport endpoint for the specified address | ||
76 | */ | ||
77 | struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *srx, gfp_t gfp) | ||
78 | { | ||
79 | struct rxrpc_peer *peer, *candidate; | ||
80 | const char *new = "old"; | ||
81 | int usage; | ||
82 | |||
83 | _enter("{%d,%d,%u.%u.%u.%u+%hu}", | ||
84 | srx->transport_type, | ||
85 | srx->transport_len, | ||
86 | NIPQUAD(srx->transport.sin.sin_addr), | ||
87 | ntohs(srx->transport.sin.sin_port)); | ||
88 | |||
89 | /* search the peer list first */ | ||
90 | read_lock_bh(&rxrpc_peer_lock); | ||
91 | list_for_each_entry(peer, &rxrpc_peers, link) { | ||
92 | _debug("check PEER %d { u=%d t=%d l=%d }", | ||
93 | peer->debug_id, | ||
94 | atomic_read(&peer->usage), | ||
95 | peer->srx.transport_type, | ||
96 | peer->srx.transport_len); | ||
97 | |||
98 | if (atomic_read(&peer->usage) > 0 && | ||
99 | peer->srx.transport_type == srx->transport_type && | ||
100 | peer->srx.transport_len == srx->transport_len && | ||
101 | memcmp(&peer->srx.transport, | ||
102 | &srx->transport, | ||
103 | srx->transport_len) == 0) | ||
104 | goto found_extant_peer; | ||
105 | } | ||
106 | read_unlock_bh(&rxrpc_peer_lock); | ||
107 | |||
108 | /* not yet present - create a candidate for a new record and then | ||
109 | * redo the search */ | ||
110 | candidate = rxrpc_alloc_peer(srx, gfp); | ||
111 | if (!candidate) { | ||
112 | _leave(" = -ENOMEM"); | ||
113 | return ERR_PTR(-ENOMEM); | ||
114 | } | ||
115 | |||
116 | write_lock_bh(&rxrpc_peer_lock); | ||
117 | |||
118 | list_for_each_entry(peer, &rxrpc_peers, link) { | ||
119 | if (atomic_read(&peer->usage) > 0 && | ||
120 | peer->srx.transport_type == srx->transport_type && | ||
121 | peer->srx.transport_len == srx->transport_len && | ||
122 | memcmp(&peer->srx.transport, | ||
123 | &srx->transport, | ||
124 | srx->transport_len) == 0) | ||
125 | goto found_extant_second; | ||
126 | } | ||
127 | |||
128 | /* we can now add the new candidate to the list */ | ||
129 | peer = candidate; | ||
130 | candidate = NULL; | ||
131 | |||
132 | list_add_tail(&peer->link, &rxrpc_peers); | ||
133 | write_unlock_bh(&rxrpc_peer_lock); | ||
134 | new = "new"; | ||
135 | |||
136 | success: | ||
137 | _net("PEER %s %d {%d,%u,%u.%u.%u.%u+%hu}", | ||
138 | new, | ||
139 | peer->debug_id, | ||
140 | peer->srx.transport_type, | ||
141 | peer->srx.transport.family, | ||
142 | NIPQUAD(peer->srx.transport.sin.sin_addr), | ||
143 | ntohs(peer->srx.transport.sin.sin_port)); | ||
144 | |||
145 | _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage)); | ||
146 | return peer; | ||
147 | |||
148 | /* we found the peer in the list immediately */ | ||
149 | found_extant_peer: | ||
150 | usage = atomic_inc_return(&peer->usage); | ||
151 | read_unlock_bh(&rxrpc_peer_lock); | ||
152 | goto success; | ||
153 | |||
154 | /* we found the peer on the second time through the list */ | ||
155 | found_extant_second: | ||
156 | usage = atomic_inc_return(&peer->usage); | ||
157 | write_unlock_bh(&rxrpc_peer_lock); | ||
158 | kfree(candidate); | ||
159 | goto success; | ||
160 | } | ||
161 | |||
162 | /* | ||
163 | * find the peer associated with a packet | ||
164 | */ | ||
165 | struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *local, | ||
166 | __be32 addr, __be16 port) | ||
167 | { | ||
168 | struct rxrpc_peer *peer; | ||
169 | |||
170 | _enter(""); | ||
171 | |||
172 | /* search the peer list */ | ||
173 | read_lock_bh(&rxrpc_peer_lock); | ||
174 | |||
175 | if (local->srx.transport.family == AF_INET && | ||
176 | local->srx.transport_type == SOCK_DGRAM | ||
177 | ) { | ||
178 | list_for_each_entry(peer, &rxrpc_peers, link) { | ||
179 | if (atomic_read(&peer->usage) > 0 && | ||
180 | peer->srx.transport_type == SOCK_DGRAM && | ||
181 | peer->srx.transport.family == AF_INET && | ||
182 | peer->srx.transport.sin.sin_port == port && | ||
183 | peer->srx.transport.sin.sin_addr.s_addr == addr) | ||
184 | goto found_UDP_peer; | ||
185 | } | ||
186 | |||
187 | goto new_UDP_peer; | ||
188 | } | ||
189 | |||
190 | read_unlock_bh(&rxrpc_peer_lock); | ||
191 | _leave(" = -EAFNOSUPPORT"); | ||
192 | return ERR_PTR(-EAFNOSUPPORT); | ||
193 | |||
194 | found_UDP_peer: | ||
195 | _net("Rx UDP DGRAM from peer %d", peer->debug_id); | ||
196 | atomic_inc(&peer->usage); | ||
197 | read_unlock_bh(&rxrpc_peer_lock); | ||
198 | _leave(" = %p", peer); | ||
199 | return peer; | ||
200 | |||
201 | new_UDP_peer: | ||
202 | _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id); | ||
203 | read_unlock_bh(&rxrpc_peer_lock); | ||
204 | _leave(" = -EBUSY [new]"); | ||
205 | return ERR_PTR(-EBUSY); | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * release a remote transport endpoint | ||
210 | */ | ||
211 | void rxrpc_put_peer(struct rxrpc_peer *peer) | ||
212 | { | ||
213 | _enter("%p{u=%d}", peer, atomic_read(&peer->usage)); | ||
214 | |||
215 | ASSERTCMP(atomic_read(&peer->usage), >, 0); | ||
216 | |||
217 | if (likely(!atomic_dec_and_test(&peer->usage))) { | ||
218 | _leave(" [in use]"); | ||
219 | return; | ||
220 | } | ||
221 | |||
222 | rxrpc_queue_work(&peer->destroyer); | ||
223 | _leave(""); | ||
224 | } | ||
225 | |||
226 | /* | ||
227 | * destroy a remote transport endpoint | ||
228 | */ | ||
229 | static void rxrpc_destroy_peer(struct work_struct *work) | ||
230 | { | ||
231 | struct rxrpc_peer *peer = | ||
232 | container_of(work, struct rxrpc_peer, destroyer); | ||
233 | |||
234 | _enter("%p{%d}", peer, atomic_read(&peer->usage)); | ||
235 | |||
236 | write_lock_bh(&rxrpc_peer_lock); | ||
237 | list_del(&peer->link); | ||
238 | write_unlock_bh(&rxrpc_peer_lock); | ||
239 | |||
240 | _net("DESTROY PEER %d", peer->debug_id); | ||
241 | kfree(peer); | ||
242 | |||
243 | if (list_empty(&rxrpc_peers)) | ||
244 | wake_up_all(&rxrpc_peer_wq); | ||
245 | _leave(""); | ||
246 | } | ||
247 | |||
248 | /* | ||
249 | * preemptively destroy all the peer records from a transport endpoint rather | ||
250 | * than waiting for them to time out | ||
251 | */ | ||
252 | void __exit rxrpc_destroy_all_peers(void) | ||
253 | { | ||
254 | DECLARE_WAITQUEUE(myself,current); | ||
255 | |||
256 | _enter(""); | ||
257 | |||
258 | /* we simply have to wait for them to go away */ | ||
259 | if (!list_empty(&rxrpc_peers)) { | ||
260 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
261 | add_wait_queue(&rxrpc_peer_wq, &myself); | ||
262 | |||
263 | while (!list_empty(&rxrpc_peers)) { | ||
264 | schedule(); | ||
265 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
266 | } | ||
267 | |||
268 | remove_wait_queue(&rxrpc_peer_wq, &myself); | ||
269 | set_current_state(TASK_RUNNING); | ||
270 | } | ||
271 | |||
272 | _leave(""); | ||
273 | } | ||
diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c new file mode 100644 index 000000000000..58f4b4e5cece --- /dev/null +++ b/net/rxrpc/ar-proc.c | |||
@@ -0,0 +1,247 @@ | |||
1 | /* /proc/net/ support for AF_RXRPC | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <net/sock.h> | ||
14 | #include <net/af_rxrpc.h> | ||
15 | #include "ar-internal.h" | ||
16 | |||
17 | static const char *rxrpc_conn_states[] = { | ||
18 | [RXRPC_CONN_UNUSED] = "Unused ", | ||
19 | [RXRPC_CONN_CLIENT] = "Client ", | ||
20 | [RXRPC_CONN_SERVER_UNSECURED] = "SvUnsec ", | ||
21 | [RXRPC_CONN_SERVER_CHALLENGING] = "SvChall ", | ||
22 | [RXRPC_CONN_SERVER] = "SvSecure", | ||
23 | [RXRPC_CONN_REMOTELY_ABORTED] = "RmtAbort", | ||
24 | [RXRPC_CONN_LOCALLY_ABORTED] = "LocAbort", | ||
25 | [RXRPC_CONN_NETWORK_ERROR] = "NetError", | ||
26 | }; | ||
27 | |||
28 | const char *rxrpc_call_states[] = { | ||
29 | [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq", | ||
30 | [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl", | ||
31 | [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl", | ||
32 | [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK", | ||
33 | [RXRPC_CALL_SERVER_SECURING] = "SvSecure", | ||
34 | [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept", | ||
35 | [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq", | ||
36 | [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq", | ||
37 | [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl", | ||
38 | [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK", | ||
39 | [RXRPC_CALL_COMPLETE] = "Complete", | ||
40 | [RXRPC_CALL_SERVER_BUSY] = "SvBusy ", | ||
41 | [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort", | ||
42 | [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort", | ||
43 | [RXRPC_CALL_NETWORK_ERROR] = "NetError", | ||
44 | [RXRPC_CALL_DEAD] = "Dead ", | ||
45 | }; | ||
46 | |||
47 | /* | ||
48 | * generate a list of extant and dead calls in /proc/net/rxrpc_calls | ||
49 | */ | ||
50 | static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos) | ||
51 | { | ||
52 | struct list_head *_p; | ||
53 | loff_t pos = *_pos; | ||
54 | |||
55 | read_lock(&rxrpc_call_lock); | ||
56 | if (!pos) | ||
57 | return SEQ_START_TOKEN; | ||
58 | pos--; | ||
59 | |||
60 | list_for_each(_p, &rxrpc_calls) | ||
61 | if (!pos--) | ||
62 | break; | ||
63 | |||
64 | return _p != &rxrpc_calls ? _p : NULL; | ||
65 | } | ||
66 | |||
67 | static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
68 | { | ||
69 | struct list_head *_p; | ||
70 | |||
71 | (*pos)++; | ||
72 | |||
73 | _p = v; | ||
74 | _p = (v == SEQ_START_TOKEN) ? rxrpc_calls.next : _p->next; | ||
75 | |||
76 | return _p != &rxrpc_calls ? _p : NULL; | ||
77 | } | ||
78 | |||
79 | static void rxrpc_call_seq_stop(struct seq_file *seq, void *v) | ||
80 | { | ||
81 | read_unlock(&rxrpc_call_lock); | ||
82 | } | ||
83 | |||
84 | static int rxrpc_call_seq_show(struct seq_file *seq, void *v) | ||
85 | { | ||
86 | struct rxrpc_transport *trans; | ||
87 | struct rxrpc_call *call; | ||
88 | char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1]; | ||
89 | |||
90 | if (v == SEQ_START_TOKEN) { | ||
91 | seq_puts(seq, | ||
92 | "Proto Local Remote " | ||
93 | " SvID ConnID CallID End Use State Abort " | ||
94 | " UserID\n"); | ||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | call = list_entry(v, struct rxrpc_call, link); | ||
99 | trans = call->conn->trans; | ||
100 | |||
101 | sprintf(lbuff, NIPQUAD_FMT":%u", | ||
102 | NIPQUAD(trans->local->srx.transport.sin.sin_addr), | ||
103 | ntohs(trans->local->srx.transport.sin.sin_port)); | ||
104 | |||
105 | sprintf(rbuff, NIPQUAD_FMT":%u", | ||
106 | NIPQUAD(trans->peer->srx.transport.sin.sin_addr), | ||
107 | ntohs(trans->peer->srx.transport.sin.sin_port)); | ||
108 | |||
109 | seq_printf(seq, | ||
110 | "UDP %-22.22s %-22.22s %4x %08x %08x %s %3u" | ||
111 | " %-8.8s %08x %lx\n", | ||
112 | lbuff, | ||
113 | rbuff, | ||
114 | ntohs(call->conn->service_id), | ||
115 | ntohl(call->conn->cid), | ||
116 | ntohl(call->call_id), | ||
117 | call->conn->in_clientflag ? "Svc" : "Clt", | ||
118 | atomic_read(&call->usage), | ||
119 | rxrpc_call_states[call->state], | ||
120 | call->abort_code, | ||
121 | call->user_call_ID); | ||
122 | |||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | static struct seq_operations rxrpc_call_seq_ops = { | ||
127 | .start = rxrpc_call_seq_start, | ||
128 | .next = rxrpc_call_seq_next, | ||
129 | .stop = rxrpc_call_seq_stop, | ||
130 | .show = rxrpc_call_seq_show, | ||
131 | }; | ||
132 | |||
133 | static int rxrpc_call_seq_open(struct inode *inode, struct file *file) | ||
134 | { | ||
135 | return seq_open(file, &rxrpc_call_seq_ops); | ||
136 | } | ||
137 | |||
138 | struct file_operations rxrpc_call_seq_fops = { | ||
139 | .owner = THIS_MODULE, | ||
140 | .open = rxrpc_call_seq_open, | ||
141 | .read = seq_read, | ||
142 | .llseek = seq_lseek, | ||
143 | .release = seq_release_private, | ||
144 | }; | ||
145 | |||
146 | /* | ||
147 | * generate a list of extant virtual connections in /proc/net/rxrpc_conns | ||
148 | */ | ||
149 | static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos) | ||
150 | { | ||
151 | struct list_head *_p; | ||
152 | loff_t pos = *_pos; | ||
153 | |||
154 | read_lock(&rxrpc_connection_lock); | ||
155 | if (!pos) | ||
156 | return SEQ_START_TOKEN; | ||
157 | pos--; | ||
158 | |||
159 | list_for_each(_p, &rxrpc_connections) | ||
160 | if (!pos--) | ||
161 | break; | ||
162 | |||
163 | return _p != &rxrpc_connections ? _p : NULL; | ||
164 | } | ||
165 | |||
166 | static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v, | ||
167 | loff_t *pos) | ||
168 | { | ||
169 | struct list_head *_p; | ||
170 | |||
171 | (*pos)++; | ||
172 | |||
173 | _p = v; | ||
174 | _p = (v == SEQ_START_TOKEN) ? rxrpc_connections.next : _p->next; | ||
175 | |||
176 | return _p != &rxrpc_connections ? _p : NULL; | ||
177 | } | ||
178 | |||
179 | static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v) | ||
180 | { | ||
181 | read_unlock(&rxrpc_connection_lock); | ||
182 | } | ||
183 | |||
184 | static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) | ||
185 | { | ||
186 | struct rxrpc_connection *conn; | ||
187 | struct rxrpc_transport *trans; | ||
188 | char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1]; | ||
189 | |||
190 | if (v == SEQ_START_TOKEN) { | ||
191 | seq_puts(seq, | ||
192 | "Proto Local Remote " | ||
193 | " SvID ConnID Calls End Use State Key " | ||
194 | " Serial ISerial\n" | ||
195 | ); | ||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | conn = list_entry(v, struct rxrpc_connection, link); | ||
200 | trans = conn->trans; | ||
201 | |||
202 | sprintf(lbuff, NIPQUAD_FMT":%u", | ||
203 | NIPQUAD(trans->local->srx.transport.sin.sin_addr), | ||
204 | ntohs(trans->local->srx.transport.sin.sin_port)); | ||
205 | |||
206 | sprintf(rbuff, NIPQUAD_FMT":%u", | ||
207 | NIPQUAD(trans->peer->srx.transport.sin.sin_addr), | ||
208 | ntohs(trans->peer->srx.transport.sin.sin_port)); | ||
209 | |||
210 | seq_printf(seq, | ||
211 | "UDP %-22.22s %-22.22s %4x %08x %08x %s %3u" | ||
212 | " %s %08x %08x %08x\n", | ||
213 | lbuff, | ||
214 | rbuff, | ||
215 | ntohs(conn->service_id), | ||
216 | ntohl(conn->cid), | ||
217 | conn->call_counter, | ||
218 | conn->in_clientflag ? "Svc" : "Clt", | ||
219 | atomic_read(&conn->usage), | ||
220 | rxrpc_conn_states[conn->state], | ||
221 | key_serial(conn->key), | ||
222 | atomic_read(&conn->serial), | ||
223 | atomic_read(&conn->hi_serial)); | ||
224 | |||
225 | return 0; | ||
226 | } | ||
227 | |||
228 | static struct seq_operations rxrpc_connection_seq_ops = { | ||
229 | .start = rxrpc_connection_seq_start, | ||
230 | .next = rxrpc_connection_seq_next, | ||
231 | .stop = rxrpc_connection_seq_stop, | ||
232 | .show = rxrpc_connection_seq_show, | ||
233 | }; | ||
234 | |||
235 | |||
236 | static int rxrpc_connection_seq_open(struct inode *inode, struct file *file) | ||
237 | { | ||
238 | return seq_open(file, &rxrpc_connection_seq_ops); | ||
239 | } | ||
240 | |||
241 | struct file_operations rxrpc_connection_seq_fops = { | ||
242 | .owner = THIS_MODULE, | ||
243 | .open = rxrpc_connection_seq_open, | ||
244 | .read = seq_read, | ||
245 | .llseek = seq_lseek, | ||
246 | .release = seq_release_private, | ||
247 | }; | ||
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c new file mode 100644 index 000000000000..f19121d4795b --- /dev/null +++ b/net/rxrpc/ar-recvmsg.c | |||
@@ -0,0 +1,437 @@ | |||
1 | /* RxRPC recvmsg() implementation | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/net.h> | ||
13 | #include <linux/skbuff.h> | ||
14 | #include <net/sock.h> | ||
15 | #include <net/af_rxrpc.h> | ||
16 | #include "ar-internal.h" | ||
17 | |||
18 | /* | ||
19 | * removal a call's user ID from the socket tree to make the user ID available | ||
20 | * again and so that it won't be seen again in association with that call | ||
21 | */ | ||
22 | void rxrpc_remove_user_ID(struct rxrpc_sock *rx, struct rxrpc_call *call) | ||
23 | { | ||
24 | _debug("RELEASE CALL %d", call->debug_id); | ||
25 | |||
26 | if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { | ||
27 | write_lock_bh(&rx->call_lock); | ||
28 | rb_erase(&call->sock_node, &call->socket->calls); | ||
29 | clear_bit(RXRPC_CALL_HAS_USERID, &call->flags); | ||
30 | write_unlock_bh(&rx->call_lock); | ||
31 | } | ||
32 | |||
33 | read_lock_bh(&call->state_lock); | ||
34 | if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && | ||
35 | !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) | ||
36 | rxrpc_queue_call(call); | ||
37 | read_unlock_bh(&call->state_lock); | ||
38 | } | ||
39 | |||
40 | /* | ||
41 | * receive a message from an RxRPC socket | ||
42 | * - we need to be careful about two or more threads calling recvmsg | ||
43 | * simultaneously | ||
44 | */ | ||
45 | int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, | ||
46 | struct msghdr *msg, size_t len, int flags) | ||
47 | { | ||
48 | struct rxrpc_skb_priv *sp; | ||
49 | struct rxrpc_call *call = NULL, *continue_call = NULL; | ||
50 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | ||
51 | struct sk_buff *skb; | ||
52 | long timeo; | ||
53 | int copy, ret, ullen, offset, copied = 0; | ||
54 | u32 abort_code; | ||
55 | |||
56 | DEFINE_WAIT(wait); | ||
57 | |||
58 | _enter(",,,%zu,%d", len, flags); | ||
59 | |||
60 | if (flags & (MSG_OOB | MSG_TRUNC)) | ||
61 | return -EOPNOTSUPP; | ||
62 | |||
63 | ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long); | ||
64 | |||
65 | timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); | ||
66 | msg->msg_flags |= MSG_MORE; | ||
67 | |||
68 | lock_sock(&rx->sk); | ||
69 | |||
70 | for (;;) { | ||
71 | /* return immediately if a client socket has no outstanding | ||
72 | * calls */ | ||
73 | if (RB_EMPTY_ROOT(&rx->calls)) { | ||
74 | if (copied) | ||
75 | goto out; | ||
76 | if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) { | ||
77 | release_sock(&rx->sk); | ||
78 | if (continue_call) | ||
79 | rxrpc_put_call(continue_call); | ||
80 | return -ENODATA; | ||
81 | } | ||
82 | } | ||
83 | |||
84 | /* get the next message on the Rx queue */ | ||
85 | skb = skb_peek(&rx->sk.sk_receive_queue); | ||
86 | if (!skb) { | ||
87 | /* nothing remains on the queue */ | ||
88 | if (copied && | ||
89 | (msg->msg_flags & MSG_PEEK || timeo == 0)) | ||
90 | goto out; | ||
91 | |||
92 | /* wait for a message to turn up */ | ||
93 | release_sock(&rx->sk); | ||
94 | prepare_to_wait_exclusive(rx->sk.sk_sleep, &wait, | ||
95 | TASK_INTERRUPTIBLE); | ||
96 | ret = sock_error(&rx->sk); | ||
97 | if (ret) | ||
98 | goto wait_error; | ||
99 | |||
100 | if (skb_queue_empty(&rx->sk.sk_receive_queue)) { | ||
101 | if (signal_pending(current)) | ||
102 | goto wait_interrupted; | ||
103 | timeo = schedule_timeout(timeo); | ||
104 | } | ||
105 | finish_wait(rx->sk.sk_sleep, &wait); | ||
106 | lock_sock(&rx->sk); | ||
107 | continue; | ||
108 | } | ||
109 | |||
110 | peek_next_packet: | ||
111 | sp = rxrpc_skb(skb); | ||
112 | call = sp->call; | ||
113 | ASSERT(call != NULL); | ||
114 | |||
115 | _debug("next pkt %s", rxrpc_pkts[sp->hdr.type]); | ||
116 | |||
117 | /* make sure we wait for the state to be updated in this call */ | ||
118 | spin_lock_bh(&call->lock); | ||
119 | spin_unlock_bh(&call->lock); | ||
120 | |||
121 | if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) { | ||
122 | _debug("packet from released call"); | ||
123 | if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) | ||
124 | BUG(); | ||
125 | rxrpc_free_skb(skb); | ||
126 | continue; | ||
127 | } | ||
128 | |||
129 | /* determine whether to continue last data receive */ | ||
130 | if (continue_call) { | ||
131 | _debug("maybe cont"); | ||
132 | if (call != continue_call || | ||
133 | skb->mark != RXRPC_SKB_MARK_DATA) { | ||
134 | release_sock(&rx->sk); | ||
135 | rxrpc_put_call(continue_call); | ||
136 | _leave(" = %d [noncont]", copied); | ||
137 | return copied; | ||
138 | } | ||
139 | } | ||
140 | |||
141 | rxrpc_get_call(call); | ||
142 | |||
143 | /* copy the peer address and timestamp */ | ||
144 | if (!continue_call) { | ||
145 | if (msg->msg_name && msg->msg_namelen > 0) | ||
146 | memcpy(&msg->msg_name, &call->conn->trans->peer->srx, | ||
147 | sizeof(call->conn->trans->peer->srx)); | ||
148 | sock_recv_timestamp(msg, &rx->sk, skb); | ||
149 | } | ||
150 | |||
151 | /* receive the message */ | ||
152 | if (skb->mark != RXRPC_SKB_MARK_DATA) | ||
153 | goto receive_non_data_message; | ||
154 | |||
155 | _debug("recvmsg DATA #%u { %d, %d }", | ||
156 | ntohl(sp->hdr.seq), skb->len, sp->offset); | ||
157 | |||
158 | if (!continue_call) { | ||
159 | /* only set the control data once per recvmsg() */ | ||
160 | ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, | ||
161 | ullen, &call->user_call_ID); | ||
162 | if (ret < 0) | ||
163 | goto copy_error; | ||
164 | ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); | ||
165 | } | ||
166 | |||
167 | ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv); | ||
168 | ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1); | ||
169 | call->rx_data_recv = ntohl(sp->hdr.seq); | ||
170 | |||
171 | ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten); | ||
172 | |||
173 | offset = sp->offset; | ||
174 | copy = skb->len - offset; | ||
175 | if (copy > len - copied) | ||
176 | copy = len - copied; | ||
177 | |||
178 | if (skb->ip_summed == CHECKSUM_UNNECESSARY) { | ||
179 | ret = skb_copy_datagram_iovec(skb, offset, | ||
180 | msg->msg_iov, copy); | ||
181 | } else { | ||
182 | ret = skb_copy_and_csum_datagram_iovec(skb, offset, | ||
183 | msg->msg_iov); | ||
184 | if (ret == -EINVAL) | ||
185 | goto csum_copy_error; | ||
186 | } | ||
187 | |||
188 | if (ret < 0) | ||
189 | goto copy_error; | ||
190 | |||
191 | /* handle piecemeal consumption of data packets */ | ||
192 | _debug("copied %d+%d", copy, copied); | ||
193 | |||
194 | offset += copy; | ||
195 | copied += copy; | ||
196 | |||
197 | if (!(flags & MSG_PEEK)) | ||
198 | sp->offset = offset; | ||
199 | |||
200 | if (sp->offset < skb->len) { | ||
201 | _debug("buffer full"); | ||
202 | ASSERTCMP(copied, ==, len); | ||
203 | break; | ||
204 | } | ||
205 | |||
206 | /* we transferred the whole data packet */ | ||
207 | if (sp->hdr.flags & RXRPC_LAST_PACKET) { | ||
208 | _debug("last"); | ||
209 | if (call->conn->out_clientflag) { | ||
210 | /* last byte of reply received */ | ||
211 | ret = copied; | ||
212 | goto terminal_message; | ||
213 | } | ||
214 | |||
215 | /* last bit of request received */ | ||
216 | if (!(flags & MSG_PEEK)) { | ||
217 | _debug("eat packet"); | ||
218 | if (skb_dequeue(&rx->sk.sk_receive_queue) != | ||
219 | skb) | ||
220 | BUG(); | ||
221 | rxrpc_free_skb(skb); | ||
222 | } | ||
223 | msg->msg_flags &= ~MSG_MORE; | ||
224 | break; | ||
225 | } | ||
226 | |||
227 | /* move on to the next data message */ | ||
228 | _debug("next"); | ||
229 | if (!continue_call) | ||
230 | continue_call = sp->call; | ||
231 | else | ||
232 | rxrpc_put_call(call); | ||
233 | call = NULL; | ||
234 | |||
235 | if (flags & MSG_PEEK) { | ||
236 | _debug("peek next"); | ||
237 | skb = skb->next; | ||
238 | if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue) | ||
239 | break; | ||
240 | goto peek_next_packet; | ||
241 | } | ||
242 | |||
243 | _debug("eat packet"); | ||
244 | if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) | ||
245 | BUG(); | ||
246 | rxrpc_free_skb(skb); | ||
247 | } | ||
248 | |||
249 | /* end of non-terminal data packet reception for the moment */ | ||
250 | _debug("end rcv data"); | ||
251 | out: | ||
252 | release_sock(&rx->sk); | ||
253 | if (call) | ||
254 | rxrpc_put_call(call); | ||
255 | if (continue_call) | ||
256 | rxrpc_put_call(continue_call); | ||
257 | _leave(" = %d [data]", copied); | ||
258 | return copied; | ||
259 | |||
260 | /* handle non-DATA messages such as aborts, incoming connections and | ||
261 | * final ACKs */ | ||
262 | receive_non_data_message: | ||
263 | _debug("non-data"); | ||
264 | |||
265 | if (skb->mark == RXRPC_SKB_MARK_NEW_CALL) { | ||
266 | _debug("RECV NEW CALL"); | ||
267 | ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &abort_code); | ||
268 | if (ret < 0) | ||
269 | goto copy_error; | ||
270 | if (!(flags & MSG_PEEK)) { | ||
271 | if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) | ||
272 | BUG(); | ||
273 | rxrpc_free_skb(skb); | ||
274 | } | ||
275 | goto out; | ||
276 | } | ||
277 | |||
278 | ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, | ||
279 | ullen, &call->user_call_ID); | ||
280 | if (ret < 0) | ||
281 | goto copy_error; | ||
282 | ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); | ||
283 | |||
284 | switch (skb->mark) { | ||
285 | case RXRPC_SKB_MARK_DATA: | ||
286 | BUG(); | ||
287 | case RXRPC_SKB_MARK_FINAL_ACK: | ||
288 | ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &abort_code); | ||
289 | break; | ||
290 | case RXRPC_SKB_MARK_BUSY: | ||
291 | ret = put_cmsg(msg, SOL_RXRPC, RXRPC_BUSY, 0, &abort_code); | ||
292 | break; | ||
293 | case RXRPC_SKB_MARK_REMOTE_ABORT: | ||
294 | abort_code = call->abort_code; | ||
295 | ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code); | ||
296 | break; | ||
297 | case RXRPC_SKB_MARK_NET_ERROR: | ||
298 | _debug("RECV NET ERROR %d", sp->error); | ||
299 | abort_code = sp->error; | ||
300 | ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &abort_code); | ||
301 | break; | ||
302 | case RXRPC_SKB_MARK_LOCAL_ERROR: | ||
303 | _debug("RECV LOCAL ERROR %d", sp->error); | ||
304 | abort_code = sp->error; | ||
305 | ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, | ||
306 | &abort_code); | ||
307 | break; | ||
308 | default: | ||
309 | BUG(); | ||
310 | break; | ||
311 | } | ||
312 | |||
313 | if (ret < 0) | ||
314 | goto copy_error; | ||
315 | |||
316 | terminal_message: | ||
317 | _debug("terminal"); | ||
318 | msg->msg_flags &= ~MSG_MORE; | ||
319 | msg->msg_flags |= MSG_EOR; | ||
320 | |||
321 | if (!(flags & MSG_PEEK)) { | ||
322 | _net("free terminal skb %p", skb); | ||
323 | if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) | ||
324 | BUG(); | ||
325 | rxrpc_free_skb(skb); | ||
326 | rxrpc_remove_user_ID(rx, call); | ||
327 | } | ||
328 | |||
329 | release_sock(&rx->sk); | ||
330 | rxrpc_put_call(call); | ||
331 | if (continue_call) | ||
332 | rxrpc_put_call(continue_call); | ||
333 | _leave(" = %d", ret); | ||
334 | return ret; | ||
335 | |||
336 | copy_error: | ||
337 | _debug("copy error"); | ||
338 | release_sock(&rx->sk); | ||
339 | rxrpc_put_call(call); | ||
340 | if (continue_call) | ||
341 | rxrpc_put_call(continue_call); | ||
342 | _leave(" = %d", ret); | ||
343 | return ret; | ||
344 | |||
345 | csum_copy_error: | ||
346 | _debug("csum error"); | ||
347 | release_sock(&rx->sk); | ||
348 | if (continue_call) | ||
349 | rxrpc_put_call(continue_call); | ||
350 | rxrpc_kill_skb(skb); | ||
351 | skb_kill_datagram(&rx->sk, skb, flags); | ||
352 | rxrpc_put_call(call); | ||
353 | return -EAGAIN; | ||
354 | |||
355 | wait_interrupted: | ||
356 | ret = sock_intr_errno(timeo); | ||
357 | wait_error: | ||
358 | finish_wait(rx->sk.sk_sleep, &wait); | ||
359 | if (continue_call) | ||
360 | rxrpc_put_call(continue_call); | ||
361 | if (copied) | ||
362 | copied = ret; | ||
363 | _leave(" = %d [waitfail %d]", copied, ret); | ||
364 | return copied; | ||
365 | |||
366 | } | ||
367 | |||
368 | /** | ||
369 | * rxrpc_kernel_data_delivered - Record delivery of data message | ||
370 | * @skb: Message holding data | ||
371 | * | ||
372 | * Record the delivery of a data message. This permits RxRPC to keep its | ||
373 | * tracking correct. The socket buffer will be deleted. | ||
374 | */ | ||
375 | void rxrpc_kernel_data_delivered(struct sk_buff *skb) | ||
376 | { | ||
377 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | ||
378 | struct rxrpc_call *call = sp->call; | ||
379 | |||
380 | ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv); | ||
381 | ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1); | ||
382 | call->rx_data_recv = ntohl(sp->hdr.seq); | ||
383 | |||
384 | ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten); | ||
385 | rxrpc_free_skb(skb); | ||
386 | } | ||
387 | |||
388 | EXPORT_SYMBOL(rxrpc_kernel_data_delivered); | ||
389 | |||
390 | /** | ||
391 | * rxrpc_kernel_is_data_last - Determine if data message is last one | ||
392 | * @skb: Message holding data | ||
393 | * | ||
394 | * Determine if data message is last one for the parent call. | ||
395 | */ | ||
396 | bool rxrpc_kernel_is_data_last(struct sk_buff *skb) | ||
397 | { | ||
398 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | ||
399 | |||
400 | ASSERTCMP(skb->mark, ==, RXRPC_SKB_MARK_DATA); | ||
401 | |||
402 | return sp->hdr.flags & RXRPC_LAST_PACKET; | ||
403 | } | ||
404 | |||
405 | EXPORT_SYMBOL(rxrpc_kernel_is_data_last); | ||
406 | |||
407 | /** | ||
408 | * rxrpc_kernel_get_abort_code - Get the abort code from an RxRPC abort message | ||
409 | * @skb: Message indicating an abort | ||
410 | * | ||
411 | * Get the abort code from an RxRPC abort message. | ||
412 | */ | ||
413 | u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb) | ||
414 | { | ||
415 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | ||
416 | |||
417 | ASSERTCMP(skb->mark, ==, RXRPC_SKB_MARK_REMOTE_ABORT); | ||
418 | |||
419 | return sp->call->abort_code; | ||
420 | } | ||
421 | |||
422 | EXPORT_SYMBOL(rxrpc_kernel_get_abort_code); | ||
423 | |||
424 | /** | ||
425 | * rxrpc_kernel_get_error - Get the error number from an RxRPC error message | ||
426 | * @skb: Message indicating an error | ||
427 | * | ||
428 | * Get the error number from an RxRPC error message. | ||
429 | */ | ||
430 | int rxrpc_kernel_get_error_number(struct sk_buff *skb) | ||
431 | { | ||
432 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | ||
433 | |||
434 | return sp->error; | ||
435 | } | ||
436 | |||
437 | EXPORT_SYMBOL(rxrpc_kernel_get_error_number); | ||
diff --git a/net/rxrpc/ar-security.c b/net/rxrpc/ar-security.c new file mode 100644 index 000000000000..60d1d364430a --- /dev/null +++ b/net/rxrpc/ar-security.c | |||
@@ -0,0 +1,258 @@ | |||
1 | /* RxRPC security handling | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/net.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/udp.h> | ||
16 | #include <linux/crypto.h> | ||
17 | #include <net/sock.h> | ||
18 | #include <net/af_rxrpc.h> | ||
19 | #include "ar-internal.h" | ||
20 | |||
21 | static LIST_HEAD(rxrpc_security_methods); | ||
22 | static DECLARE_RWSEM(rxrpc_security_sem); | ||
23 | |||
24 | /* | ||
25 | * get an RxRPC security module | ||
26 | */ | ||
27 | static struct rxrpc_security *rxrpc_security_get(struct rxrpc_security *sec) | ||
28 | { | ||
29 | return try_module_get(sec->owner) ? sec : NULL; | ||
30 | } | ||
31 | |||
32 | /* | ||
33 | * release an RxRPC security module | ||
34 | */ | ||
35 | static void rxrpc_security_put(struct rxrpc_security *sec) | ||
36 | { | ||
37 | module_put(sec->owner); | ||
38 | } | ||
39 | |||
40 | /* | ||
41 | * look up an rxrpc security module | ||
42 | */ | ||
43 | struct rxrpc_security *rxrpc_security_lookup(u8 security_index) | ||
44 | { | ||
45 | struct rxrpc_security *sec = NULL; | ||
46 | |||
47 | _enter(""); | ||
48 | |||
49 | down_read(&rxrpc_security_sem); | ||
50 | |||
51 | list_for_each_entry(sec, &rxrpc_security_methods, link) { | ||
52 | if (sec->security_index == security_index) { | ||
53 | if (unlikely(!rxrpc_security_get(sec))) | ||
54 | break; | ||
55 | goto out; | ||
56 | } | ||
57 | } | ||
58 | |||
59 | sec = NULL; | ||
60 | out: | ||
61 | up_read(&rxrpc_security_sem); | ||
62 | _leave(" = %p [%s]", sec, sec ? sec->name : ""); | ||
63 | return sec; | ||
64 | } | ||
65 | |||
66 | /** | ||
67 | * rxrpc_register_security - register an RxRPC security handler | ||
68 | * @sec: security module | ||
69 | * | ||
70 | * register an RxRPC security handler for use by RxRPC | ||
71 | */ | ||
72 | int rxrpc_register_security(struct rxrpc_security *sec) | ||
73 | { | ||
74 | struct rxrpc_security *psec; | ||
75 | int ret; | ||
76 | |||
77 | _enter(""); | ||
78 | down_write(&rxrpc_security_sem); | ||
79 | |||
80 | ret = -EEXIST; | ||
81 | list_for_each_entry(psec, &rxrpc_security_methods, link) { | ||
82 | if (psec->security_index == sec->security_index) | ||
83 | goto out; | ||
84 | } | ||
85 | |||
86 | list_add(&sec->link, &rxrpc_security_methods); | ||
87 | |||
88 | printk(KERN_NOTICE "RxRPC: Registered security type %d '%s'\n", | ||
89 | sec->security_index, sec->name); | ||
90 | ret = 0; | ||
91 | |||
92 | out: | ||
93 | up_write(&rxrpc_security_sem); | ||
94 | _leave(" = %d", ret); | ||
95 | return ret; | ||
96 | } | ||
97 | |||
98 | EXPORT_SYMBOL_GPL(rxrpc_register_security); | ||
99 | |||
100 | /** | ||
101 | * rxrpc_unregister_security - unregister an RxRPC security handler | ||
102 | * @sec: security module | ||
103 | * | ||
104 | * unregister an RxRPC security handler | ||
105 | */ | ||
106 | void rxrpc_unregister_security(struct rxrpc_security *sec) | ||
107 | { | ||
108 | |||
109 | _enter(""); | ||
110 | down_write(&rxrpc_security_sem); | ||
111 | list_del_init(&sec->link); | ||
112 | up_write(&rxrpc_security_sem); | ||
113 | |||
114 | printk(KERN_NOTICE "RxRPC: Unregistered security type %d '%s'\n", | ||
115 | sec->security_index, sec->name); | ||
116 | } | ||
117 | |||
118 | EXPORT_SYMBOL_GPL(rxrpc_unregister_security); | ||
119 | |||
120 | /* | ||
121 | * initialise the security on a client connection | ||
122 | */ | ||
123 | int rxrpc_init_client_conn_security(struct rxrpc_connection *conn) | ||
124 | { | ||
125 | struct rxrpc_security *sec; | ||
126 | struct key *key = conn->key; | ||
127 | int ret; | ||
128 | |||
129 | _enter("{%d},{%x}", conn->debug_id, key_serial(key)); | ||
130 | |||
131 | if (!key) | ||
132 | return 0; | ||
133 | |||
134 | ret = key_validate(key); | ||
135 | if (ret < 0) | ||
136 | return ret; | ||
137 | |||
138 | sec = rxrpc_security_lookup(key->type_data.x[0]); | ||
139 | if (!sec) | ||
140 | return -EKEYREJECTED; | ||
141 | conn->security = sec; | ||
142 | |||
143 | ret = conn->security->init_connection_security(conn); | ||
144 | if (ret < 0) { | ||
145 | rxrpc_security_put(conn->security); | ||
146 | conn->security = NULL; | ||
147 | return ret; | ||
148 | } | ||
149 | |||
150 | _leave(" = 0"); | ||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * initialise the security on a server connection | ||
156 | */ | ||
157 | int rxrpc_init_server_conn_security(struct rxrpc_connection *conn) | ||
158 | { | ||
159 | struct rxrpc_security *sec; | ||
160 | struct rxrpc_local *local = conn->trans->local; | ||
161 | struct rxrpc_sock *rx; | ||
162 | struct key *key; | ||
163 | key_ref_t kref; | ||
164 | char kdesc[5+1+3+1]; | ||
165 | |||
166 | _enter(""); | ||
167 | |||
168 | sprintf(kdesc, "%u:%u", ntohs(conn->service_id), conn->security_ix); | ||
169 | |||
170 | sec = rxrpc_security_lookup(conn->security_ix); | ||
171 | if (!sec) { | ||
172 | _leave(" = -ENOKEY [lookup]"); | ||
173 | return -ENOKEY; | ||
174 | } | ||
175 | |||
176 | /* find the service */ | ||
177 | read_lock_bh(&local->services_lock); | ||
178 | list_for_each_entry(rx, &local->services, listen_link) { | ||
179 | if (rx->service_id == conn->service_id) | ||
180 | goto found_service; | ||
181 | } | ||
182 | |||
183 | /* the service appears to have died */ | ||
184 | read_unlock_bh(&local->services_lock); | ||
185 | rxrpc_security_put(sec); | ||
186 | _leave(" = -ENOENT"); | ||
187 | return -ENOENT; | ||
188 | |||
189 | found_service: | ||
190 | if (!rx->securities) { | ||
191 | read_unlock_bh(&local->services_lock); | ||
192 | rxrpc_security_put(sec); | ||
193 | _leave(" = -ENOKEY"); | ||
194 | return -ENOKEY; | ||
195 | } | ||
196 | |||
197 | /* look through the service's keyring */ | ||
198 | kref = keyring_search(make_key_ref(rx->securities, 1UL), | ||
199 | &key_type_rxrpc_s, kdesc); | ||
200 | if (IS_ERR(kref)) { | ||
201 | read_unlock_bh(&local->services_lock); | ||
202 | rxrpc_security_put(sec); | ||
203 | _leave(" = %ld [search]", PTR_ERR(kref)); | ||
204 | return PTR_ERR(kref); | ||
205 | } | ||
206 | |||
207 | key = key_ref_to_ptr(kref); | ||
208 | read_unlock_bh(&local->services_lock); | ||
209 | |||
210 | conn->server_key = key; | ||
211 | conn->security = sec; | ||
212 | |||
213 | _leave(" = 0"); | ||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * secure a packet prior to transmission | ||
219 | */ | ||
220 | int rxrpc_secure_packet(const struct rxrpc_call *call, | ||
221 | struct sk_buff *skb, | ||
222 | size_t data_size, | ||
223 | void *sechdr) | ||
224 | { | ||
225 | if (call->conn->security) | ||
226 | return call->conn->security->secure_packet( | ||
227 | call, skb, data_size, sechdr); | ||
228 | return 0; | ||
229 | } | ||
230 | |||
231 | /* | ||
232 | * secure a packet prior to transmission | ||
233 | */ | ||
234 | int rxrpc_verify_packet(const struct rxrpc_call *call, struct sk_buff *skb, | ||
235 | u32 *_abort_code) | ||
236 | { | ||
237 | if (call->conn->security) | ||
238 | return call->conn->security->verify_packet( | ||
239 | call, skb, _abort_code); | ||
240 | return 0; | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * clear connection security | ||
245 | */ | ||
246 | void rxrpc_clear_conn_security(struct rxrpc_connection *conn) | ||
247 | { | ||
248 | _enter("{%d}", conn->debug_id); | ||
249 | |||
250 | if (conn->security) { | ||
251 | conn->security->clear(conn); | ||
252 | rxrpc_security_put(conn->security); | ||
253 | conn->security = NULL; | ||
254 | } | ||
255 | |||
256 | key_put(conn->key); | ||
257 | key_put(conn->server_key); | ||
258 | } | ||
diff --git a/net/rxrpc/ar-skbuff.c b/net/rxrpc/ar-skbuff.c new file mode 100644 index 000000000000..de755e04d29c --- /dev/null +++ b/net/rxrpc/ar-skbuff.c | |||
@@ -0,0 +1,132 @@ | |||
1 | /* ar-skbuff.c: socket buffer destruction handling | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/net.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <net/sock.h> | ||
16 | #include <net/af_rxrpc.h> | ||
17 | #include "ar-internal.h" | ||
18 | |||
19 | /* | ||
20 | * set up for the ACK at the end of the receive phase when we discard the final | ||
21 | * receive phase data packet | ||
22 | * - called with softirqs disabled | ||
23 | */ | ||
24 | static void rxrpc_request_final_ACK(struct rxrpc_call *call) | ||
25 | { | ||
26 | /* the call may be aborted before we have a chance to ACK it */ | ||
27 | write_lock(&call->state_lock); | ||
28 | |||
29 | switch (call->state) { | ||
30 | case RXRPC_CALL_CLIENT_RECV_REPLY: | ||
31 | call->state = RXRPC_CALL_CLIENT_FINAL_ACK; | ||
32 | _debug("request final ACK"); | ||
33 | |||
34 | /* get an extra ref on the call for the final-ACK generator to | ||
35 | * release */ | ||
36 | rxrpc_get_call(call); | ||
37 | set_bit(RXRPC_CALL_ACK_FINAL, &call->events); | ||
38 | if (try_to_del_timer_sync(&call->ack_timer) >= 0) | ||
39 | rxrpc_queue_call(call); | ||
40 | break; | ||
41 | |||
42 | case RXRPC_CALL_SERVER_RECV_REQUEST: | ||
43 | call->state = RXRPC_CALL_SERVER_ACK_REQUEST; | ||
44 | default: | ||
45 | break; | ||
46 | } | ||
47 | |||
48 | write_unlock(&call->state_lock); | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * drop the bottom ACK off of the call ACK window and advance the window | ||
53 | */ | ||
54 | static void rxrpc_hard_ACK_data(struct rxrpc_call *call, | ||
55 | struct rxrpc_skb_priv *sp) | ||
56 | { | ||
57 | int loop; | ||
58 | u32 seq; | ||
59 | |||
60 | spin_lock_bh(&call->lock); | ||
61 | |||
62 | _debug("hard ACK #%u", ntohl(sp->hdr.seq)); | ||
63 | |||
64 | for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) { | ||
65 | call->ackr_window[loop] >>= 1; | ||
66 | call->ackr_window[loop] |= | ||
67 | call->ackr_window[loop + 1] << (BITS_PER_LONG - 1); | ||
68 | } | ||
69 | |||
70 | seq = ntohl(sp->hdr.seq); | ||
71 | ASSERTCMP(seq, ==, call->rx_data_eaten + 1); | ||
72 | call->rx_data_eaten = seq; | ||
73 | |||
74 | if (call->ackr_win_top < UINT_MAX) | ||
75 | call->ackr_win_top++; | ||
76 | |||
77 | ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE, | ||
78 | call->rx_data_post, >=, call->rx_data_recv); | ||
79 | ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE, | ||
80 | call->rx_data_recv, >=, call->rx_data_eaten); | ||
81 | |||
82 | if (sp->hdr.flags & RXRPC_LAST_PACKET) { | ||
83 | rxrpc_request_final_ACK(call); | ||
84 | } else if (atomic_dec_and_test(&call->ackr_not_idle) && | ||
85 | test_and_clear_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags)) { | ||
86 | _debug("send Rx idle ACK"); | ||
87 | __rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, sp->hdr.serial, | ||
88 | true); | ||
89 | } | ||
90 | |||
91 | spin_unlock_bh(&call->lock); | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * destroy a packet that has an RxRPC control buffer | ||
96 | * - advance the hard-ACK state of the parent call (done here in case something | ||
97 | * in the kernel bypasses recvmsg() and steals the packet directly off of the | ||
98 | * socket receive queue) | ||
99 | */ | ||
100 | void rxrpc_packet_destructor(struct sk_buff *skb) | ||
101 | { | ||
102 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | ||
103 | struct rxrpc_call *call = sp->call; | ||
104 | |||
105 | _enter("%p{%p}", skb, call); | ||
106 | |||
107 | if (call) { | ||
108 | /* send the final ACK on a client call */ | ||
109 | if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) | ||
110 | rxrpc_hard_ACK_data(call, sp); | ||
111 | rxrpc_put_call(call); | ||
112 | sp->call = NULL; | ||
113 | } | ||
114 | |||
115 | if (skb->sk) | ||
116 | sock_rfree(skb); | ||
117 | _leave(""); | ||
118 | } | ||
119 | |||
120 | /** | ||
121 | * rxrpc_kernel_free_skb - Free an RxRPC socket buffer | ||
122 | * @skb: The socket buffer to be freed | ||
123 | * | ||
124 | * Let RxRPC free its own socket buffer, permitting it to maintain debug | ||
125 | * accounting. | ||
126 | */ | ||
127 | void rxrpc_kernel_free_skb(struct sk_buff *skb) | ||
128 | { | ||
129 | rxrpc_free_skb(skb); | ||
130 | } | ||
131 | |||
132 | EXPORT_SYMBOL(rxrpc_kernel_free_skb); | ||
diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c new file mode 100644 index 000000000000..d43d78f19302 --- /dev/null +++ b/net/rxrpc/ar-transport.c | |||
@@ -0,0 +1,276 @@ | |||
1 | /* RxRPC point-to-point transport session management | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/net.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <net/sock.h> | ||
16 | #include <net/af_rxrpc.h> | ||
17 | #include "ar-internal.h" | ||
18 | |||
19 | static void rxrpc_transport_reaper(struct work_struct *work); | ||
20 | |||
21 | static LIST_HEAD(rxrpc_transports); | ||
22 | static DEFINE_RWLOCK(rxrpc_transport_lock); | ||
23 | static unsigned long rxrpc_transport_timeout = 3600 * 24; | ||
24 | static DECLARE_DELAYED_WORK(rxrpc_transport_reap, rxrpc_transport_reaper); | ||
25 | |||
26 | /* | ||
27 | * allocate a new transport session manager | ||
28 | */ | ||
29 | static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local, | ||
30 | struct rxrpc_peer *peer, | ||
31 | gfp_t gfp) | ||
32 | { | ||
33 | struct rxrpc_transport *trans; | ||
34 | |||
35 | _enter(""); | ||
36 | |||
37 | trans = kzalloc(sizeof(struct rxrpc_transport), gfp); | ||
38 | if (trans) { | ||
39 | trans->local = local; | ||
40 | trans->peer = peer; | ||
41 | INIT_LIST_HEAD(&trans->link); | ||
42 | trans->bundles = RB_ROOT; | ||
43 | trans->client_conns = RB_ROOT; | ||
44 | trans->server_conns = RB_ROOT; | ||
45 | skb_queue_head_init(&trans->error_queue); | ||
46 | spin_lock_init(&trans->client_lock); | ||
47 | rwlock_init(&trans->conn_lock); | ||
48 | atomic_set(&trans->usage, 1); | ||
49 | trans->debug_id = atomic_inc_return(&rxrpc_debug_id); | ||
50 | |||
51 | if (peer->srx.transport.family == AF_INET) { | ||
52 | switch (peer->srx.transport_type) { | ||
53 | case SOCK_DGRAM: | ||
54 | INIT_WORK(&trans->error_handler, | ||
55 | rxrpc_UDP_error_handler); | ||
56 | break; | ||
57 | default: | ||
58 | BUG(); | ||
59 | break; | ||
60 | } | ||
61 | } else { | ||
62 | BUG(); | ||
63 | } | ||
64 | } | ||
65 | |||
66 | _leave(" = %p", trans); | ||
67 | return trans; | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * obtain a transport session for the nominated endpoints | ||
72 | */ | ||
73 | struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *local, | ||
74 | struct rxrpc_peer *peer, | ||
75 | gfp_t gfp) | ||
76 | { | ||
77 | struct rxrpc_transport *trans, *candidate; | ||
78 | const char *new = "old"; | ||
79 | int usage; | ||
80 | |||
81 | _enter("{%u.%u.%u.%u+%hu},{%u.%u.%u.%u+%hu},", | ||
82 | NIPQUAD(local->srx.transport.sin.sin_addr), | ||
83 | ntohs(local->srx.transport.sin.sin_port), | ||
84 | NIPQUAD(peer->srx.transport.sin.sin_addr), | ||
85 | ntohs(peer->srx.transport.sin.sin_port)); | ||
86 | |||
87 | /* search the transport list first */ | ||
88 | read_lock_bh(&rxrpc_transport_lock); | ||
89 | list_for_each_entry(trans, &rxrpc_transports, link) { | ||
90 | if (trans->local == local && trans->peer == peer) | ||
91 | goto found_extant_transport; | ||
92 | } | ||
93 | read_unlock_bh(&rxrpc_transport_lock); | ||
94 | |||
95 | /* not yet present - create a candidate for a new record and then | ||
96 | * redo the search */ | ||
97 | candidate = rxrpc_alloc_transport(local, peer, gfp); | ||
98 | if (!candidate) { | ||
99 | _leave(" = -ENOMEM"); | ||
100 | return ERR_PTR(-ENOMEM); | ||
101 | } | ||
102 | |||
103 | write_lock_bh(&rxrpc_transport_lock); | ||
104 | |||
105 | list_for_each_entry(trans, &rxrpc_transports, link) { | ||
106 | if (trans->local == local && trans->peer == peer) | ||
107 | goto found_extant_second; | ||
108 | } | ||
109 | |||
110 | /* we can now add the new candidate to the list */ | ||
111 | trans = candidate; | ||
112 | candidate = NULL; | ||
113 | |||
114 | rxrpc_get_local(trans->local); | ||
115 | atomic_inc(&trans->peer->usage); | ||
116 | list_add_tail(&trans->link, &rxrpc_transports); | ||
117 | write_unlock_bh(&rxrpc_transport_lock); | ||
118 | new = "new"; | ||
119 | |||
120 | success: | ||
121 | _net("TRANSPORT %s %d local %d -> peer %d", | ||
122 | new, | ||
123 | trans->debug_id, | ||
124 | trans->local->debug_id, | ||
125 | trans->peer->debug_id); | ||
126 | |||
127 | _leave(" = %p {u=%d}", trans, atomic_read(&trans->usage)); | ||
128 | return trans; | ||
129 | |||
130 | /* we found the transport in the list immediately */ | ||
131 | found_extant_transport: | ||
132 | usage = atomic_inc_return(&trans->usage); | ||
133 | read_unlock_bh(&rxrpc_transport_lock); | ||
134 | goto success; | ||
135 | |||
136 | /* we found the transport on the second time through the list */ | ||
137 | found_extant_second: | ||
138 | usage = atomic_inc_return(&trans->usage); | ||
139 | write_unlock_bh(&rxrpc_transport_lock); | ||
140 | kfree(candidate); | ||
141 | goto success; | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * find the transport connecting two endpoints | ||
146 | */ | ||
147 | struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *local, | ||
148 | struct rxrpc_peer *peer) | ||
149 | { | ||
150 | struct rxrpc_transport *trans; | ||
151 | |||
152 | _enter("{%u.%u.%u.%u+%hu},{%u.%u.%u.%u+%hu},", | ||
153 | NIPQUAD(local->srx.transport.sin.sin_addr), | ||
154 | ntohs(local->srx.transport.sin.sin_port), | ||
155 | NIPQUAD(peer->srx.transport.sin.sin_addr), | ||
156 | ntohs(peer->srx.transport.sin.sin_port)); | ||
157 | |||
158 | /* search the transport list */ | ||
159 | read_lock_bh(&rxrpc_transport_lock); | ||
160 | |||
161 | list_for_each_entry(trans, &rxrpc_transports, link) { | ||
162 | if (trans->local == local && trans->peer == peer) | ||
163 | goto found_extant_transport; | ||
164 | } | ||
165 | |||
166 | read_unlock_bh(&rxrpc_transport_lock); | ||
167 | _leave(" = NULL"); | ||
168 | return NULL; | ||
169 | |||
170 | found_extant_transport: | ||
171 | atomic_inc(&trans->usage); | ||
172 | read_unlock_bh(&rxrpc_transport_lock); | ||
173 | _leave(" = %p", trans); | ||
174 | return trans; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * release a transport session | ||
179 | */ | ||
180 | void rxrpc_put_transport(struct rxrpc_transport *trans) | ||
181 | { | ||
182 | _enter("%p{u=%d}", trans, atomic_read(&trans->usage)); | ||
183 | |||
184 | ASSERTCMP(atomic_read(&trans->usage), >, 0); | ||
185 | |||
186 | trans->put_time = xtime.tv_sec; | ||
187 | if (unlikely(atomic_dec_and_test(&trans->usage))) | ||
188 | _debug("zombie"); | ||
189 | /* let the reaper determine the timeout to avoid a race with | ||
190 | * overextending the timeout if the reaper is running at the | ||
191 | * same time */ | ||
192 | rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0); | ||
193 | _leave(""); | ||
194 | } | ||
195 | |||
196 | /* | ||
197 | * clean up a transport session | ||
198 | */ | ||
199 | static void rxrpc_cleanup_transport(struct rxrpc_transport *trans) | ||
200 | { | ||
201 | _net("DESTROY TRANS %d", trans->debug_id); | ||
202 | |||
203 | rxrpc_purge_queue(&trans->error_queue); | ||
204 | |||
205 | rxrpc_put_local(trans->local); | ||
206 | rxrpc_put_peer(trans->peer); | ||
207 | kfree(trans); | ||
208 | } | ||
209 | |||
210 | /* | ||
211 | * reap dead transports that have passed their expiry date | ||
212 | */ | ||
213 | static void rxrpc_transport_reaper(struct work_struct *work) | ||
214 | { | ||
215 | struct rxrpc_transport *trans, *_p; | ||
216 | unsigned long now, earliest, reap_time; | ||
217 | |||
218 | LIST_HEAD(graveyard); | ||
219 | |||
220 | _enter(""); | ||
221 | |||
222 | now = xtime.tv_sec; | ||
223 | earliest = ULONG_MAX; | ||
224 | |||
225 | /* extract all the transports that have been dead too long */ | ||
226 | write_lock_bh(&rxrpc_transport_lock); | ||
227 | list_for_each_entry_safe(trans, _p, &rxrpc_transports, link) { | ||
228 | _debug("reap TRANS %d { u=%d t=%ld }", | ||
229 | trans->debug_id, atomic_read(&trans->usage), | ||
230 | (long) now - (long) trans->put_time); | ||
231 | |||
232 | if (likely(atomic_read(&trans->usage) > 0)) | ||
233 | continue; | ||
234 | |||
235 | reap_time = trans->put_time + rxrpc_transport_timeout; | ||
236 | if (reap_time <= now) | ||
237 | list_move_tail(&trans->link, &graveyard); | ||
238 | else if (reap_time < earliest) | ||
239 | earliest = reap_time; | ||
240 | } | ||
241 | write_unlock_bh(&rxrpc_transport_lock); | ||
242 | |||
243 | if (earliest != ULONG_MAX) { | ||
244 | _debug("reschedule reaper %ld", (long) earliest - now); | ||
245 | ASSERTCMP(earliest, >, now); | ||
246 | rxrpc_queue_delayed_work(&rxrpc_transport_reap, | ||
247 | (earliest - now) * HZ); | ||
248 | } | ||
249 | |||
250 | /* then destroy all those pulled out */ | ||
251 | while (!list_empty(&graveyard)) { | ||
252 | trans = list_entry(graveyard.next, struct rxrpc_transport, | ||
253 | link); | ||
254 | list_del_init(&trans->link); | ||
255 | |||
256 | ASSERTCMP(atomic_read(&trans->usage), ==, 0); | ||
257 | rxrpc_cleanup_transport(trans); | ||
258 | } | ||
259 | |||
260 | _leave(""); | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * preemptively destroy all the transport session records rather than waiting | ||
265 | * for them to time out | ||
266 | */ | ||
267 | void __exit rxrpc_destroy_all_transports(void) | ||
268 | { | ||
269 | _enter(""); | ||
270 | |||
271 | rxrpc_transport_timeout = 0; | ||
272 | cancel_delayed_work(&rxrpc_transport_reap); | ||
273 | rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0); | ||
274 | |||
275 | _leave(""); | ||
276 | } | ||
diff --git a/net/rxrpc/call.c b/net/rxrpc/call.c deleted file mode 100644 index d07122b57e0d..000000000000 --- a/net/rxrpc/call.c +++ /dev/null | |||
@@ -1,2277 +0,0 @@ | |||
1 | /* call.c: Rx call routines | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <rxrpc/rxrpc.h> | ||
16 | #include <rxrpc/transport.h> | ||
17 | #include <rxrpc/peer.h> | ||
18 | #include <rxrpc/connection.h> | ||
19 | #include <rxrpc/call.h> | ||
20 | #include <rxrpc/message.h> | ||
21 | #include "internal.h" | ||
22 | |||
23 | __RXACCT_DECL(atomic_t rxrpc_call_count); | ||
24 | __RXACCT_DECL(atomic_t rxrpc_message_count); | ||
25 | |||
26 | LIST_HEAD(rxrpc_calls); | ||
27 | DECLARE_RWSEM(rxrpc_calls_sem); | ||
28 | |||
29 | unsigned rxrpc_call_rcv_timeout = HZ/3; | ||
30 | static unsigned rxrpc_call_acks_timeout = HZ/3; | ||
31 | static unsigned rxrpc_call_dfr_ack_timeout = HZ/20; | ||
32 | static unsigned short rxrpc_call_max_resend = HZ/10; | ||
33 | |||
34 | const char *rxrpc_call_states[] = { | ||
35 | "COMPLETE", | ||
36 | "ERROR", | ||
37 | "SRVR_RCV_OPID", | ||
38 | "SRVR_RCV_ARGS", | ||
39 | "SRVR_GOT_ARGS", | ||
40 | "SRVR_SND_REPLY", | ||
41 | "SRVR_RCV_FINAL_ACK", | ||
42 | "CLNT_SND_ARGS", | ||
43 | "CLNT_RCV_REPLY", | ||
44 | "CLNT_GOT_REPLY" | ||
45 | }; | ||
46 | |||
47 | const char *rxrpc_call_error_states[] = { | ||
48 | "NO_ERROR", | ||
49 | "LOCAL_ABORT", | ||
50 | "PEER_ABORT", | ||
51 | "LOCAL_ERROR", | ||
52 | "REMOTE_ERROR" | ||
53 | }; | ||
54 | |||
55 | const char *rxrpc_pkts[] = { | ||
56 | "?00", | ||
57 | "data", "ack", "busy", "abort", "ackall", "chall", "resp", "debug", | ||
58 | "?09", "?10", "?11", "?12", "?13", "?14", "?15" | ||
59 | }; | ||
60 | |||
61 | static const char *rxrpc_acks[] = { | ||
62 | "---", "REQ", "DUP", "SEQ", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL", | ||
63 | "-?-" | ||
64 | }; | ||
65 | |||
66 | static const char _acktype[] = "NA-"; | ||
67 | |||
68 | static void rxrpc_call_receive_packet(struct rxrpc_call *call); | ||
69 | static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, | ||
70 | struct rxrpc_message *msg); | ||
71 | static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, | ||
72 | struct rxrpc_message *msg); | ||
73 | static void rxrpc_call_definitively_ACK(struct rxrpc_call *call, | ||
74 | rxrpc_seq_t higest); | ||
75 | static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest); | ||
76 | static int __rxrpc_call_read_data(struct rxrpc_call *call); | ||
77 | |||
78 | static int rxrpc_call_record_ACK(struct rxrpc_call *call, | ||
79 | struct rxrpc_message *msg, | ||
80 | rxrpc_seq_t seq, | ||
81 | size_t count); | ||
82 | |||
83 | static int rxrpc_call_flush(struct rxrpc_call *call); | ||
84 | |||
85 | #define _state(call) \ | ||
86 | _debug("[[[ state %s ]]]", rxrpc_call_states[call->app_call_state]); | ||
87 | |||
88 | static void rxrpc_call_default_attn_func(struct rxrpc_call *call) | ||
89 | { | ||
90 | wake_up(&call->waitq); | ||
91 | } | ||
92 | |||
93 | static void rxrpc_call_default_error_func(struct rxrpc_call *call) | ||
94 | { | ||
95 | wake_up(&call->waitq); | ||
96 | } | ||
97 | |||
98 | static void rxrpc_call_default_aemap_func(struct rxrpc_call *call) | ||
99 | { | ||
100 | switch (call->app_err_state) { | ||
101 | case RXRPC_ESTATE_LOCAL_ABORT: | ||
102 | call->app_abort_code = -call->app_errno; | ||
103 | case RXRPC_ESTATE_PEER_ABORT: | ||
104 | call->app_errno = -ECONNABORTED; | ||
105 | default: | ||
106 | break; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | static void __rxrpc_call_acks_timeout(unsigned long _call) | ||
111 | { | ||
112 | struct rxrpc_call *call = (struct rxrpc_call *) _call; | ||
113 | |||
114 | _debug("ACKS TIMEOUT %05lu", jiffies - call->cjif); | ||
115 | |||
116 | call->flags |= RXRPC_CALL_ACKS_TIMO; | ||
117 | rxrpc_krxiod_queue_call(call); | ||
118 | } | ||
119 | |||
120 | static void __rxrpc_call_rcv_timeout(unsigned long _call) | ||
121 | { | ||
122 | struct rxrpc_call *call = (struct rxrpc_call *) _call; | ||
123 | |||
124 | _debug("RCV TIMEOUT %05lu", jiffies - call->cjif); | ||
125 | |||
126 | call->flags |= RXRPC_CALL_RCV_TIMO; | ||
127 | rxrpc_krxiod_queue_call(call); | ||
128 | } | ||
129 | |||
130 | static void __rxrpc_call_ackr_timeout(unsigned long _call) | ||
131 | { | ||
132 | struct rxrpc_call *call = (struct rxrpc_call *) _call; | ||
133 | |||
134 | _debug("ACKR TIMEOUT %05lu",jiffies - call->cjif); | ||
135 | |||
136 | call->flags |= RXRPC_CALL_ACKR_TIMO; | ||
137 | rxrpc_krxiod_queue_call(call); | ||
138 | } | ||
139 | |||
140 | /*****************************************************************************/ | ||
141 | /* | ||
142 | * calculate a timeout based on an RTT value | ||
143 | */ | ||
144 | static inline unsigned long __rxrpc_rtt_based_timeout(struct rxrpc_call *call, | ||
145 | unsigned long val) | ||
146 | { | ||
147 | unsigned long expiry = call->conn->peer->rtt / (1000000 / HZ); | ||
148 | |||
149 | expiry += 10; | ||
150 | if (expiry < HZ / 25) | ||
151 | expiry = HZ / 25; | ||
152 | if (expiry > HZ) | ||
153 | expiry = HZ; | ||
154 | |||
155 | _leave(" = %lu jiffies", expiry); | ||
156 | return jiffies + expiry; | ||
157 | } /* end __rxrpc_rtt_based_timeout() */ | ||
158 | |||
159 | /*****************************************************************************/ | ||
160 | /* | ||
161 | * create a new call record | ||
162 | */ | ||
163 | static inline int __rxrpc_create_call(struct rxrpc_connection *conn, | ||
164 | struct rxrpc_call **_call) | ||
165 | { | ||
166 | struct rxrpc_call *call; | ||
167 | |||
168 | _enter("%p", conn); | ||
169 | |||
170 | /* allocate and initialise a call record */ | ||
171 | call = (struct rxrpc_call *) get_zeroed_page(GFP_KERNEL); | ||
172 | if (!call) { | ||
173 | _leave(" ENOMEM"); | ||
174 | return -ENOMEM; | ||
175 | } | ||
176 | |||
177 | atomic_set(&call->usage, 1); | ||
178 | |||
179 | init_waitqueue_head(&call->waitq); | ||
180 | spin_lock_init(&call->lock); | ||
181 | INIT_LIST_HEAD(&call->link); | ||
182 | INIT_LIST_HEAD(&call->acks_pendq); | ||
183 | INIT_LIST_HEAD(&call->rcv_receiveq); | ||
184 | INIT_LIST_HEAD(&call->rcv_krxiodq_lk); | ||
185 | INIT_LIST_HEAD(&call->app_readyq); | ||
186 | INIT_LIST_HEAD(&call->app_unreadyq); | ||
187 | INIT_LIST_HEAD(&call->app_link); | ||
188 | INIT_LIST_HEAD(&call->app_attn_link); | ||
189 | |||
190 | init_timer(&call->acks_timeout); | ||
191 | call->acks_timeout.data = (unsigned long) call; | ||
192 | call->acks_timeout.function = __rxrpc_call_acks_timeout; | ||
193 | |||
194 | init_timer(&call->rcv_timeout); | ||
195 | call->rcv_timeout.data = (unsigned long) call; | ||
196 | call->rcv_timeout.function = __rxrpc_call_rcv_timeout; | ||
197 | |||
198 | init_timer(&call->ackr_dfr_timo); | ||
199 | call->ackr_dfr_timo.data = (unsigned long) call; | ||
200 | call->ackr_dfr_timo.function = __rxrpc_call_ackr_timeout; | ||
201 | |||
202 | call->conn = conn; | ||
203 | call->ackr_win_bot = 1; | ||
204 | call->ackr_win_top = call->ackr_win_bot + RXRPC_CALL_ACK_WINDOW_SIZE - 1; | ||
205 | call->ackr_prev_seq = 0; | ||
206 | call->app_mark = RXRPC_APP_MARK_EOF; | ||
207 | call->app_attn_func = rxrpc_call_default_attn_func; | ||
208 | call->app_error_func = rxrpc_call_default_error_func; | ||
209 | call->app_aemap_func = rxrpc_call_default_aemap_func; | ||
210 | call->app_scr_alloc = call->app_scratch; | ||
211 | |||
212 | call->cjif = jiffies; | ||
213 | |||
214 | _leave(" = 0 (%p)", call); | ||
215 | |||
216 | *_call = call; | ||
217 | |||
218 | return 0; | ||
219 | } /* end __rxrpc_create_call() */ | ||
220 | |||
221 | /*****************************************************************************/ | ||
222 | /* | ||
223 | * create a new call record for outgoing calls | ||
224 | */ | ||
225 | int rxrpc_create_call(struct rxrpc_connection *conn, | ||
226 | rxrpc_call_attn_func_t attn, | ||
227 | rxrpc_call_error_func_t error, | ||
228 | rxrpc_call_aemap_func_t aemap, | ||
229 | struct rxrpc_call **_call) | ||
230 | { | ||
231 | DECLARE_WAITQUEUE(myself, current); | ||
232 | |||
233 | struct rxrpc_call *call; | ||
234 | int ret, cix, loop; | ||
235 | |||
236 | _enter("%p", conn); | ||
237 | |||
238 | /* allocate and initialise a call record */ | ||
239 | ret = __rxrpc_create_call(conn, &call); | ||
240 | if (ret < 0) { | ||
241 | _leave(" = %d", ret); | ||
242 | return ret; | ||
243 | } | ||
244 | |||
245 | call->app_call_state = RXRPC_CSTATE_CLNT_SND_ARGS; | ||
246 | if (attn) | ||
247 | call->app_attn_func = attn; | ||
248 | if (error) | ||
249 | call->app_error_func = error; | ||
250 | if (aemap) | ||
251 | call->app_aemap_func = aemap; | ||
252 | |||
253 | _state(call); | ||
254 | |||
255 | spin_lock(&conn->lock); | ||
256 | set_current_state(TASK_INTERRUPTIBLE); | ||
257 | add_wait_queue(&conn->chanwait, &myself); | ||
258 | |||
259 | try_again: | ||
260 | /* try to find an unused channel */ | ||
261 | for (cix = 0; cix < 4; cix++) | ||
262 | if (!conn->channels[cix]) | ||
263 | goto obtained_chan; | ||
264 | |||
265 | /* no free channels - wait for one to become available */ | ||
266 | ret = -EINTR; | ||
267 | if (signal_pending(current)) | ||
268 | goto error_unwait; | ||
269 | |||
270 | spin_unlock(&conn->lock); | ||
271 | |||
272 | schedule(); | ||
273 | set_current_state(TASK_INTERRUPTIBLE); | ||
274 | |||
275 | spin_lock(&conn->lock); | ||
276 | goto try_again; | ||
277 | |||
278 | /* got a channel - now attach to the connection */ | ||
279 | obtained_chan: | ||
280 | remove_wait_queue(&conn->chanwait, &myself); | ||
281 | set_current_state(TASK_RUNNING); | ||
282 | |||
283 | /* concoct a unique call number */ | ||
284 | next_callid: | ||
285 | call->call_id = htonl(++conn->call_counter); | ||
286 | for (loop = 0; loop < 4; loop++) | ||
287 | if (conn->channels[loop] && | ||
288 | conn->channels[loop]->call_id == call->call_id) | ||
289 | goto next_callid; | ||
290 | |||
291 | rxrpc_get_connection(conn); | ||
292 | conn->channels[cix] = call; /* assign _after_ done callid check loop */ | ||
293 | do_gettimeofday(&conn->atime); | ||
294 | call->chan_ix = htonl(cix); | ||
295 | |||
296 | spin_unlock(&conn->lock); | ||
297 | |||
298 | down_write(&rxrpc_calls_sem); | ||
299 | list_add_tail(&call->call_link, &rxrpc_calls); | ||
300 | up_write(&rxrpc_calls_sem); | ||
301 | |||
302 | __RXACCT(atomic_inc(&rxrpc_call_count)); | ||
303 | *_call = call; | ||
304 | |||
305 | _leave(" = 0 (call=%p cix=%u)", call, cix); | ||
306 | return 0; | ||
307 | |||
308 | error_unwait: | ||
309 | remove_wait_queue(&conn->chanwait, &myself); | ||
310 | set_current_state(TASK_RUNNING); | ||
311 | spin_unlock(&conn->lock); | ||
312 | |||
313 | free_page((unsigned long) call); | ||
314 | _leave(" = %d", ret); | ||
315 | return ret; | ||
316 | } /* end rxrpc_create_call() */ | ||
317 | |||
318 | /*****************************************************************************/ | ||
319 | /* | ||
320 | * create a new call record for incoming calls | ||
321 | */ | ||
322 | int rxrpc_incoming_call(struct rxrpc_connection *conn, | ||
323 | struct rxrpc_message *msg, | ||
324 | struct rxrpc_call **_call) | ||
325 | { | ||
326 | struct rxrpc_call *call; | ||
327 | unsigned cix; | ||
328 | int ret; | ||
329 | |||
330 | cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK; | ||
331 | |||
332 | _enter("%p,%u,%u", conn, ntohl(msg->hdr.callNumber), cix); | ||
333 | |||
334 | /* allocate and initialise a call record */ | ||
335 | ret = __rxrpc_create_call(conn, &call); | ||
336 | if (ret < 0) { | ||
337 | _leave(" = %d", ret); | ||
338 | return ret; | ||
339 | } | ||
340 | |||
341 | call->pkt_rcv_count = 1; | ||
342 | call->app_call_state = RXRPC_CSTATE_SRVR_RCV_OPID; | ||
343 | call->app_mark = sizeof(uint32_t); | ||
344 | |||
345 | _state(call); | ||
346 | |||
347 | /* attach to the connection */ | ||
348 | ret = -EBUSY; | ||
349 | call->chan_ix = htonl(cix); | ||
350 | call->call_id = msg->hdr.callNumber; | ||
351 | |||
352 | spin_lock(&conn->lock); | ||
353 | |||
354 | if (!conn->channels[cix] || | ||
355 | conn->channels[cix]->app_call_state == RXRPC_CSTATE_COMPLETE || | ||
356 | conn->channels[cix]->app_call_state == RXRPC_CSTATE_ERROR | ||
357 | ) { | ||
358 | conn->channels[cix] = call; | ||
359 | rxrpc_get_connection(conn); | ||
360 | ret = 0; | ||
361 | } | ||
362 | |||
363 | spin_unlock(&conn->lock); | ||
364 | |||
365 | if (ret < 0) { | ||
366 | free_page((unsigned long) call); | ||
367 | call = NULL; | ||
368 | } | ||
369 | |||
370 | if (ret == 0) { | ||
371 | down_write(&rxrpc_calls_sem); | ||
372 | list_add_tail(&call->call_link, &rxrpc_calls); | ||
373 | up_write(&rxrpc_calls_sem); | ||
374 | __RXACCT(atomic_inc(&rxrpc_call_count)); | ||
375 | *_call = call; | ||
376 | } | ||
377 | |||
378 | _leave(" = %d [%p]", ret, call); | ||
379 | return ret; | ||
380 | } /* end rxrpc_incoming_call() */ | ||
381 | |||
382 | /*****************************************************************************/ | ||
383 | /* | ||
384 | * free a call record | ||
385 | */ | ||
386 | void rxrpc_put_call(struct rxrpc_call *call) | ||
387 | { | ||
388 | struct rxrpc_connection *conn = call->conn; | ||
389 | struct rxrpc_message *msg; | ||
390 | |||
391 | _enter("%p{u=%d}",call,atomic_read(&call->usage)); | ||
392 | |||
393 | /* sanity check */ | ||
394 | if (atomic_read(&call->usage) <= 0) | ||
395 | BUG(); | ||
396 | |||
397 | /* to prevent a race, the decrement and the de-list must be effectively | ||
398 | * atomic */ | ||
399 | spin_lock(&conn->lock); | ||
400 | if (likely(!atomic_dec_and_test(&call->usage))) { | ||
401 | spin_unlock(&conn->lock); | ||
402 | _leave(""); | ||
403 | return; | ||
404 | } | ||
405 | |||
406 | if (conn->channels[ntohl(call->chan_ix)] == call) | ||
407 | conn->channels[ntohl(call->chan_ix)] = NULL; | ||
408 | |||
409 | spin_unlock(&conn->lock); | ||
410 | |||
411 | wake_up(&conn->chanwait); | ||
412 | |||
413 | rxrpc_put_connection(conn); | ||
414 | |||
415 | /* clear the timers and dequeue from krxiod */ | ||
416 | del_timer_sync(&call->acks_timeout); | ||
417 | del_timer_sync(&call->rcv_timeout); | ||
418 | del_timer_sync(&call->ackr_dfr_timo); | ||
419 | |||
420 | rxrpc_krxiod_dequeue_call(call); | ||
421 | |||
422 | /* clean up the contents of the struct */ | ||
423 | if (call->snd_nextmsg) | ||
424 | rxrpc_put_message(call->snd_nextmsg); | ||
425 | |||
426 | if (call->snd_ping) | ||
427 | rxrpc_put_message(call->snd_ping); | ||
428 | |||
429 | while (!list_empty(&call->acks_pendq)) { | ||
430 | msg = list_entry(call->acks_pendq.next, | ||
431 | struct rxrpc_message, link); | ||
432 | list_del(&msg->link); | ||
433 | rxrpc_put_message(msg); | ||
434 | } | ||
435 | |||
436 | while (!list_empty(&call->rcv_receiveq)) { | ||
437 | msg = list_entry(call->rcv_receiveq.next, | ||
438 | struct rxrpc_message, link); | ||
439 | list_del(&msg->link); | ||
440 | rxrpc_put_message(msg); | ||
441 | } | ||
442 | |||
443 | while (!list_empty(&call->app_readyq)) { | ||
444 | msg = list_entry(call->app_readyq.next, | ||
445 | struct rxrpc_message, link); | ||
446 | list_del(&msg->link); | ||
447 | rxrpc_put_message(msg); | ||
448 | } | ||
449 | |||
450 | while (!list_empty(&call->app_unreadyq)) { | ||
451 | msg = list_entry(call->app_unreadyq.next, | ||
452 | struct rxrpc_message, link); | ||
453 | list_del(&msg->link); | ||
454 | rxrpc_put_message(msg); | ||
455 | } | ||
456 | |||
457 | module_put(call->owner); | ||
458 | |||
459 | down_write(&rxrpc_calls_sem); | ||
460 | list_del(&call->call_link); | ||
461 | up_write(&rxrpc_calls_sem); | ||
462 | |||
463 | __RXACCT(atomic_dec(&rxrpc_call_count)); | ||
464 | free_page((unsigned long) call); | ||
465 | |||
466 | _leave(" [destroyed]"); | ||
467 | } /* end rxrpc_put_call() */ | ||
468 | |||
469 | /*****************************************************************************/ | ||
470 | /* | ||
471 | * actually generate a normal ACK | ||
472 | */ | ||
473 | static inline int __rxrpc_call_gen_normal_ACK(struct rxrpc_call *call, | ||
474 | rxrpc_seq_t seq) | ||
475 | { | ||
476 | struct rxrpc_message *msg; | ||
477 | struct kvec diov[3]; | ||
478 | __be32 aux[4]; | ||
479 | int delta, ret; | ||
480 | |||
481 | /* ACKs default to DELAY */ | ||
482 | if (!call->ackr.reason) | ||
483 | call->ackr.reason = RXRPC_ACK_DELAY; | ||
484 | |||
485 | _proto("Rx %05lu Sending ACK { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", | ||
486 | jiffies - call->cjif, | ||
487 | ntohs(call->ackr.maxSkew), | ||
488 | ntohl(call->ackr.firstPacket), | ||
489 | ntohl(call->ackr.previousPacket), | ||
490 | ntohl(call->ackr.serial), | ||
491 | rxrpc_acks[call->ackr.reason], | ||
492 | call->ackr.nAcks); | ||
493 | |||
494 | aux[0] = htonl(call->conn->peer->if_mtu); /* interface MTU */ | ||
495 | aux[1] = htonl(1444); /* max MTU */ | ||
496 | aux[2] = htonl(16); /* rwind */ | ||
497 | aux[3] = htonl(4); /* max packets */ | ||
498 | |||
499 | diov[0].iov_len = sizeof(struct rxrpc_ackpacket); | ||
500 | diov[0].iov_base = &call->ackr; | ||
501 | diov[1].iov_len = call->ackr_pend_cnt + 3; | ||
502 | diov[1].iov_base = call->ackr_array; | ||
503 | diov[2].iov_len = sizeof(aux); | ||
504 | diov[2].iov_base = &aux; | ||
505 | |||
506 | /* build and send the message */ | ||
507 | ret = rxrpc_conn_newmsg(call->conn,call, RXRPC_PACKET_TYPE_ACK, | ||
508 | 3, diov, GFP_KERNEL, &msg); | ||
509 | if (ret < 0) | ||
510 | goto out; | ||
511 | |||
512 | msg->seq = seq; | ||
513 | msg->hdr.seq = htonl(seq); | ||
514 | msg->hdr.flags |= RXRPC_SLOW_START_OK; | ||
515 | |||
516 | ret = rxrpc_conn_sendmsg(call->conn, msg); | ||
517 | rxrpc_put_message(msg); | ||
518 | if (ret < 0) | ||
519 | goto out; | ||
520 | call->pkt_snd_count++; | ||
521 | |||
522 | /* count how many actual ACKs there were at the front */ | ||
523 | for (delta = 0; delta < call->ackr_pend_cnt; delta++) | ||
524 | if (call->ackr_array[delta] != RXRPC_ACK_TYPE_ACK) | ||
525 | break; | ||
526 | |||
527 | call->ackr_pend_cnt -= delta; /* all ACK'd to this point */ | ||
528 | |||
529 | /* crank the ACK window around */ | ||
530 | if (delta == 0) { | ||
531 | /* un-ACK'd window */ | ||
532 | } | ||
533 | else if (delta < RXRPC_CALL_ACK_WINDOW_SIZE) { | ||
534 | /* partially ACK'd window | ||
535 | * - shuffle down to avoid losing out-of-sequence packets | ||
536 | */ | ||
537 | call->ackr_win_bot += delta; | ||
538 | call->ackr_win_top += delta; | ||
539 | |||
540 | memmove(&call->ackr_array[0], | ||
541 | &call->ackr_array[delta], | ||
542 | call->ackr_pend_cnt); | ||
543 | |||
544 | memset(&call->ackr_array[call->ackr_pend_cnt], | ||
545 | RXRPC_ACK_TYPE_NACK, | ||
546 | sizeof(call->ackr_array) - call->ackr_pend_cnt); | ||
547 | } | ||
548 | else { | ||
549 | /* fully ACK'd window | ||
550 | * - just clear the whole thing | ||
551 | */ | ||
552 | memset(&call->ackr_array, | ||
553 | RXRPC_ACK_TYPE_NACK, | ||
554 | sizeof(call->ackr_array)); | ||
555 | } | ||
556 | |||
557 | /* clear this ACK */ | ||
558 | memset(&call->ackr, 0, sizeof(call->ackr)); | ||
559 | |||
560 | out: | ||
561 | if (!call->app_call_state) | ||
562 | printk("___ STATE 0 ___\n"); | ||
563 | return ret; | ||
564 | } /* end __rxrpc_call_gen_normal_ACK() */ | ||
565 | |||
566 | /*****************************************************************************/ | ||
567 | /* | ||
568 | * note the reception of a packet in the call's ACK records and generate an | ||
569 | * appropriate ACK packet if necessary | ||
570 | * - returns 0 if packet should be processed, 1 if packet should be ignored | ||
571 | * and -ve on an error | ||
572 | */ | ||
573 | static int rxrpc_call_generate_ACK(struct rxrpc_call *call, | ||
574 | struct rxrpc_header *hdr, | ||
575 | struct rxrpc_ackpacket *ack) | ||
576 | { | ||
577 | struct rxrpc_message *msg; | ||
578 | rxrpc_seq_t seq; | ||
579 | unsigned offset; | ||
580 | int ret = 0, err; | ||
581 | u8 special_ACK, do_ACK, force; | ||
582 | |||
583 | _enter("%p,%p { seq=%d tp=%d fl=%02x }", | ||
584 | call, hdr, ntohl(hdr->seq), hdr->type, hdr->flags); | ||
585 | |||
586 | seq = ntohl(hdr->seq); | ||
587 | offset = seq - call->ackr_win_bot; | ||
588 | do_ACK = RXRPC_ACK_DELAY; | ||
589 | special_ACK = 0; | ||
590 | force = (seq == 1); | ||
591 | |||
592 | if (call->ackr_high_seq < seq) | ||
593 | call->ackr_high_seq = seq; | ||
594 | |||
595 | /* deal with generation of obvious special ACKs first */ | ||
596 | if (ack && ack->reason == RXRPC_ACK_PING) { | ||
597 | special_ACK = RXRPC_ACK_PING_RESPONSE; | ||
598 | ret = 1; | ||
599 | goto gen_ACK; | ||
600 | } | ||
601 | |||
602 | if (seq < call->ackr_win_bot) { | ||
603 | special_ACK = RXRPC_ACK_DUPLICATE; | ||
604 | ret = 1; | ||
605 | goto gen_ACK; | ||
606 | } | ||
607 | |||
608 | if (seq >= call->ackr_win_top) { | ||
609 | special_ACK = RXRPC_ACK_EXCEEDS_WINDOW; | ||
610 | ret = 1; | ||
611 | goto gen_ACK; | ||
612 | } | ||
613 | |||
614 | if (call->ackr_array[offset] != RXRPC_ACK_TYPE_NACK) { | ||
615 | special_ACK = RXRPC_ACK_DUPLICATE; | ||
616 | ret = 1; | ||
617 | goto gen_ACK; | ||
618 | } | ||
619 | |||
620 | /* okay... it's a normal data packet inside the ACK window */ | ||
621 | call->ackr_array[offset] = RXRPC_ACK_TYPE_ACK; | ||
622 | |||
623 | if (offset < call->ackr_pend_cnt) { | ||
624 | } | ||
625 | else if (offset > call->ackr_pend_cnt) { | ||
626 | do_ACK = RXRPC_ACK_OUT_OF_SEQUENCE; | ||
627 | call->ackr_pend_cnt = offset; | ||
628 | goto gen_ACK; | ||
629 | } | ||
630 | |||
631 | if (hdr->flags & RXRPC_REQUEST_ACK) { | ||
632 | do_ACK = RXRPC_ACK_REQUESTED; | ||
633 | } | ||
634 | |||
635 | /* generate an ACK on the final packet of a reply just received */ | ||
636 | if (hdr->flags & RXRPC_LAST_PACKET) { | ||
637 | if (call->conn->out_clientflag) | ||
638 | force = 1; | ||
639 | } | ||
640 | else if (!(hdr->flags & RXRPC_MORE_PACKETS)) { | ||
641 | do_ACK = RXRPC_ACK_REQUESTED; | ||
642 | } | ||
643 | |||
644 | /* re-ACK packets previously received out-of-order */ | ||
645 | for (offset++; offset < RXRPC_CALL_ACK_WINDOW_SIZE; offset++) | ||
646 | if (call->ackr_array[offset] != RXRPC_ACK_TYPE_ACK) | ||
647 | break; | ||
648 | |||
649 | call->ackr_pend_cnt = offset; | ||
650 | |||
651 | /* generate an ACK if we fill up the window */ | ||
652 | if (call->ackr_pend_cnt >= RXRPC_CALL_ACK_WINDOW_SIZE) | ||
653 | force = 1; | ||
654 | |||
655 | gen_ACK: | ||
656 | _debug("%05lu ACKs pend=%u norm=%s special=%s%s", | ||
657 | jiffies - call->cjif, | ||
658 | call->ackr_pend_cnt, | ||
659 | rxrpc_acks[do_ACK], | ||
660 | rxrpc_acks[special_ACK], | ||
661 | force ? " immediate" : | ||
662 | do_ACK == RXRPC_ACK_REQUESTED ? " merge-req" : | ||
663 | hdr->flags & RXRPC_LAST_PACKET ? " finalise" : | ||
664 | " defer" | ||
665 | ); | ||
666 | |||
667 | /* send any pending normal ACKs if need be */ | ||
668 | if (call->ackr_pend_cnt > 0) { | ||
669 | /* fill out the appropriate form */ | ||
670 | call->ackr.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE); | ||
671 | call->ackr.maxSkew = htons(min(call->ackr_high_seq - seq, | ||
672 | 65535U)); | ||
673 | call->ackr.firstPacket = htonl(call->ackr_win_bot); | ||
674 | call->ackr.previousPacket = call->ackr_prev_seq; | ||
675 | call->ackr.serial = hdr->serial; | ||
676 | call->ackr.nAcks = call->ackr_pend_cnt; | ||
677 | |||
678 | if (do_ACK == RXRPC_ACK_REQUESTED) | ||
679 | call->ackr.reason = do_ACK; | ||
680 | |||
681 | /* generate the ACK immediately if necessary */ | ||
682 | if (special_ACK || force) { | ||
683 | err = __rxrpc_call_gen_normal_ACK( | ||
684 | call, do_ACK == RXRPC_ACK_DELAY ? 0 : seq); | ||
685 | if (err < 0) { | ||
686 | ret = err; | ||
687 | goto out; | ||
688 | } | ||
689 | } | ||
690 | } | ||
691 | |||
692 | if (call->ackr.reason == RXRPC_ACK_REQUESTED) | ||
693 | call->ackr_dfr_seq = seq; | ||
694 | |||
695 | /* start the ACK timer if not running if there are any pending deferred | ||
696 | * ACKs */ | ||
697 | if (call->ackr_pend_cnt > 0 && | ||
698 | call->ackr.reason != RXRPC_ACK_REQUESTED && | ||
699 | !timer_pending(&call->ackr_dfr_timo) | ||
700 | ) { | ||
701 | unsigned long timo; | ||
702 | |||
703 | timo = rxrpc_call_dfr_ack_timeout + jiffies; | ||
704 | |||
705 | _debug("START ACKR TIMER for cj=%lu", timo - call->cjif); | ||
706 | |||
707 | spin_lock(&call->lock); | ||
708 | mod_timer(&call->ackr_dfr_timo, timo); | ||
709 | spin_unlock(&call->lock); | ||
710 | } | ||
711 | else if ((call->ackr_pend_cnt == 0 || | ||
712 | call->ackr.reason == RXRPC_ACK_REQUESTED) && | ||
713 | timer_pending(&call->ackr_dfr_timo) | ||
714 | ) { | ||
715 | /* stop timer if no pending ACKs */ | ||
716 | _debug("CLEAR ACKR TIMER"); | ||
717 | del_timer_sync(&call->ackr_dfr_timo); | ||
718 | } | ||
719 | |||
720 | /* send a special ACK if one is required */ | ||
721 | if (special_ACK) { | ||
722 | struct rxrpc_ackpacket ack; | ||
723 | struct kvec diov[2]; | ||
724 | uint8_t acks[1] = { RXRPC_ACK_TYPE_ACK }; | ||
725 | |||
726 | /* fill out the appropriate form */ | ||
727 | ack.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE); | ||
728 | ack.maxSkew = htons(min(call->ackr_high_seq - seq, | ||
729 | 65535U)); | ||
730 | ack.firstPacket = htonl(call->ackr_win_bot); | ||
731 | ack.previousPacket = call->ackr_prev_seq; | ||
732 | ack.serial = hdr->serial; | ||
733 | ack.reason = special_ACK; | ||
734 | ack.nAcks = 0; | ||
735 | |||
736 | _proto("Rx Sending s-ACK" | ||
737 | " { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", | ||
738 | ntohs(ack.maxSkew), | ||
739 | ntohl(ack.firstPacket), | ||
740 | ntohl(ack.previousPacket), | ||
741 | ntohl(ack.serial), | ||
742 | rxrpc_acks[ack.reason], | ||
743 | ack.nAcks); | ||
744 | |||
745 | diov[0].iov_len = sizeof(struct rxrpc_ackpacket); | ||
746 | diov[0].iov_base = &ack; | ||
747 | diov[1].iov_len = sizeof(acks); | ||
748 | diov[1].iov_base = acks; | ||
749 | |||
750 | /* build and send the message */ | ||
751 | err = rxrpc_conn_newmsg(call->conn,call, RXRPC_PACKET_TYPE_ACK, | ||
752 | hdr->seq ? 2 : 1, diov, | ||
753 | GFP_KERNEL, | ||
754 | &msg); | ||
755 | if (err < 0) { | ||
756 | ret = err; | ||
757 | goto out; | ||
758 | } | ||
759 | |||
760 | msg->seq = seq; | ||
761 | msg->hdr.seq = htonl(seq); | ||
762 | msg->hdr.flags |= RXRPC_SLOW_START_OK; | ||
763 | |||
764 | err = rxrpc_conn_sendmsg(call->conn, msg); | ||
765 | rxrpc_put_message(msg); | ||
766 | if (err < 0) { | ||
767 | ret = err; | ||
768 | goto out; | ||
769 | } | ||
770 | call->pkt_snd_count++; | ||
771 | } | ||
772 | |||
773 | out: | ||
774 | if (hdr->seq) | ||
775 | call->ackr_prev_seq = hdr->seq; | ||
776 | |||
777 | _leave(" = %d", ret); | ||
778 | return ret; | ||
779 | } /* end rxrpc_call_generate_ACK() */ | ||
780 | |||
781 | /*****************************************************************************/ | ||
782 | /* | ||
783 | * handle work to be done on a call | ||
784 | * - includes packet reception and timeout processing | ||
785 | */ | ||
786 | void rxrpc_call_do_stuff(struct rxrpc_call *call) | ||
787 | { | ||
788 | _enter("%p{flags=%lx}", call, call->flags); | ||
789 | |||
790 | /* handle packet reception */ | ||
791 | if (call->flags & RXRPC_CALL_RCV_PKT) { | ||
792 | _debug("- receive packet"); | ||
793 | call->flags &= ~RXRPC_CALL_RCV_PKT; | ||
794 | rxrpc_call_receive_packet(call); | ||
795 | } | ||
796 | |||
797 | /* handle overdue ACKs */ | ||
798 | if (call->flags & RXRPC_CALL_ACKS_TIMO) { | ||
799 | _debug("- overdue ACK timeout"); | ||
800 | call->flags &= ~RXRPC_CALL_ACKS_TIMO; | ||
801 | rxrpc_call_resend(call, call->snd_seq_count); | ||
802 | } | ||
803 | |||
804 | /* handle lack of reception */ | ||
805 | if (call->flags & RXRPC_CALL_RCV_TIMO) { | ||
806 | _debug("- reception timeout"); | ||
807 | call->flags &= ~RXRPC_CALL_RCV_TIMO; | ||
808 | rxrpc_call_abort(call, -EIO); | ||
809 | } | ||
810 | |||
811 | /* handle deferred ACKs */ | ||
812 | if (call->flags & RXRPC_CALL_ACKR_TIMO || | ||
813 | (call->ackr.nAcks > 0 && call->ackr.reason == RXRPC_ACK_REQUESTED) | ||
814 | ) { | ||
815 | _debug("- deferred ACK timeout: cj=%05lu r=%s n=%u", | ||
816 | jiffies - call->cjif, | ||
817 | rxrpc_acks[call->ackr.reason], | ||
818 | call->ackr.nAcks); | ||
819 | |||
820 | call->flags &= ~RXRPC_CALL_ACKR_TIMO; | ||
821 | |||
822 | if (call->ackr.nAcks > 0 && | ||
823 | call->app_call_state != RXRPC_CSTATE_ERROR) { | ||
824 | /* generate ACK */ | ||
825 | __rxrpc_call_gen_normal_ACK(call, call->ackr_dfr_seq); | ||
826 | call->ackr_dfr_seq = 0; | ||
827 | } | ||
828 | } | ||
829 | |||
830 | _leave(""); | ||
831 | |||
832 | } /* end rxrpc_call_do_stuff() */ | ||
833 | |||
834 | /*****************************************************************************/ | ||
835 | /* | ||
836 | * send an abort message at call or connection level | ||
837 | * - must be called with call->lock held | ||
838 | * - the supplied error code is sent as the packet data | ||
839 | */ | ||
840 | static int __rxrpc_call_abort(struct rxrpc_call *call, int errno) | ||
841 | { | ||
842 | struct rxrpc_connection *conn = call->conn; | ||
843 | struct rxrpc_message *msg; | ||
844 | struct kvec diov[1]; | ||
845 | int ret; | ||
846 | __be32 _error; | ||
847 | |||
848 | _enter("%p{%08x},%p{%d},%d", | ||
849 | conn, ntohl(conn->conn_id), call, ntohl(call->call_id), errno); | ||
850 | |||
851 | /* if this call is already aborted, then just wake up any waiters */ | ||
852 | if (call->app_call_state == RXRPC_CSTATE_ERROR) { | ||
853 | spin_unlock(&call->lock); | ||
854 | call->app_error_func(call); | ||
855 | _leave(" = 0"); | ||
856 | return 0; | ||
857 | } | ||
858 | |||
859 | rxrpc_get_call(call); | ||
860 | |||
861 | /* change the state _with_ the lock still held */ | ||
862 | call->app_call_state = RXRPC_CSTATE_ERROR; | ||
863 | call->app_err_state = RXRPC_ESTATE_LOCAL_ABORT; | ||
864 | call->app_errno = errno; | ||
865 | call->app_mark = RXRPC_APP_MARK_EOF; | ||
866 | call->app_read_buf = NULL; | ||
867 | call->app_async_read = 0; | ||
868 | |||
869 | _state(call); | ||
870 | |||
871 | /* ask the app to translate the error code */ | ||
872 | call->app_aemap_func(call); | ||
873 | |||
874 | spin_unlock(&call->lock); | ||
875 | |||
876 | /* flush any outstanding ACKs */ | ||
877 | del_timer_sync(&call->acks_timeout); | ||
878 | del_timer_sync(&call->rcv_timeout); | ||
879 | del_timer_sync(&call->ackr_dfr_timo); | ||
880 | |||
881 | if (rxrpc_call_is_ack_pending(call)) | ||
882 | __rxrpc_call_gen_normal_ACK(call, 0); | ||
883 | |||
884 | /* send the abort packet only if we actually traded some other | ||
885 | * packets */ | ||
886 | ret = 0; | ||
887 | if (call->pkt_snd_count || call->pkt_rcv_count) { | ||
888 | /* actually send the abort */ | ||
889 | _proto("Rx Sending Call ABORT { data=%d }", | ||
890 | call->app_abort_code); | ||
891 | |||
892 | _error = htonl(call->app_abort_code); | ||
893 | |||
894 | diov[0].iov_len = sizeof(_error); | ||
895 | diov[0].iov_base = &_error; | ||
896 | |||
897 | ret = rxrpc_conn_newmsg(conn, call, RXRPC_PACKET_TYPE_ABORT, | ||
898 | 1, diov, GFP_KERNEL, &msg); | ||
899 | if (ret == 0) { | ||
900 | ret = rxrpc_conn_sendmsg(conn, msg); | ||
901 | rxrpc_put_message(msg); | ||
902 | } | ||
903 | } | ||
904 | |||
905 | /* tell the app layer to let go */ | ||
906 | call->app_error_func(call); | ||
907 | |||
908 | rxrpc_put_call(call); | ||
909 | |||
910 | _leave(" = %d", ret); | ||
911 | return ret; | ||
912 | } /* end __rxrpc_call_abort() */ | ||
913 | |||
914 | /*****************************************************************************/ | ||
915 | /* | ||
916 | * send an abort message at call or connection level | ||
917 | * - the supplied error code is sent as the packet data | ||
918 | */ | ||
919 | int rxrpc_call_abort(struct rxrpc_call *call, int error) | ||
920 | { | ||
921 | spin_lock(&call->lock); | ||
922 | |||
923 | return __rxrpc_call_abort(call, error); | ||
924 | |||
925 | } /* end rxrpc_call_abort() */ | ||
926 | |||
927 | /*****************************************************************************/ | ||
928 | /* | ||
929 | * process packets waiting for this call | ||
930 | */ | ||
931 | static void rxrpc_call_receive_packet(struct rxrpc_call *call) | ||
932 | { | ||
933 | struct rxrpc_message *msg; | ||
934 | struct list_head *_p; | ||
935 | |||
936 | _enter("%p", call); | ||
937 | |||
938 | rxrpc_get_call(call); /* must not go away too soon if aborted by | ||
939 | * app-layer */ | ||
940 | |||
941 | while (!list_empty(&call->rcv_receiveq)) { | ||
942 | /* try to get next packet */ | ||
943 | _p = NULL; | ||
944 | spin_lock(&call->lock); | ||
945 | if (!list_empty(&call->rcv_receiveq)) { | ||
946 | _p = call->rcv_receiveq.next; | ||
947 | list_del_init(_p); | ||
948 | } | ||
949 | spin_unlock(&call->lock); | ||
950 | |||
951 | if (!_p) | ||
952 | break; | ||
953 | |||
954 | msg = list_entry(_p, struct rxrpc_message, link); | ||
955 | |||
956 | _proto("Rx %05lu Received %s packet (%%%u,#%u,%c%c%c%c%c)", | ||
957 | jiffies - call->cjif, | ||
958 | rxrpc_pkts[msg->hdr.type], | ||
959 | ntohl(msg->hdr.serial), | ||
960 | msg->seq, | ||
961 | msg->hdr.flags & RXRPC_JUMBO_PACKET ? 'j' : '-', | ||
962 | msg->hdr.flags & RXRPC_MORE_PACKETS ? 'm' : '-', | ||
963 | msg->hdr.flags & RXRPC_LAST_PACKET ? 'l' : '-', | ||
964 | msg->hdr.flags & RXRPC_REQUEST_ACK ? 'r' : '-', | ||
965 | msg->hdr.flags & RXRPC_CLIENT_INITIATED ? 'C' : 'S' | ||
966 | ); | ||
967 | |||
968 | switch (msg->hdr.type) { | ||
969 | /* deal with data packets */ | ||
970 | case RXRPC_PACKET_TYPE_DATA: | ||
971 | /* ACK the packet if necessary */ | ||
972 | switch (rxrpc_call_generate_ACK(call, &msg->hdr, | ||
973 | NULL)) { | ||
974 | case 0: /* useful packet */ | ||
975 | rxrpc_call_receive_data_packet(call, msg); | ||
976 | break; | ||
977 | case 1: /* duplicate or out-of-window packet */ | ||
978 | break; | ||
979 | default: | ||
980 | rxrpc_put_message(msg); | ||
981 | goto out; | ||
982 | } | ||
983 | break; | ||
984 | |||
985 | /* deal with ACK packets */ | ||
986 | case RXRPC_PACKET_TYPE_ACK: | ||
987 | rxrpc_call_receive_ack_packet(call, msg); | ||
988 | break; | ||
989 | |||
990 | /* deal with abort packets */ | ||
991 | case RXRPC_PACKET_TYPE_ABORT: { | ||
992 | __be32 _dbuf, *dp; | ||
993 | |||
994 | dp = skb_header_pointer(msg->pkt, msg->offset, | ||
995 | sizeof(_dbuf), &_dbuf); | ||
996 | if (dp == NULL) | ||
997 | printk("Rx Received short ABORT packet\n"); | ||
998 | |||
999 | _proto("Rx Received Call ABORT { data=%d }", | ||
1000 | (dp ? ntohl(*dp) : 0)); | ||
1001 | |||
1002 | spin_lock(&call->lock); | ||
1003 | call->app_call_state = RXRPC_CSTATE_ERROR; | ||
1004 | call->app_err_state = RXRPC_ESTATE_PEER_ABORT; | ||
1005 | call->app_abort_code = (dp ? ntohl(*dp) : 0); | ||
1006 | call->app_errno = -ECONNABORTED; | ||
1007 | call->app_mark = RXRPC_APP_MARK_EOF; | ||
1008 | call->app_read_buf = NULL; | ||
1009 | call->app_async_read = 0; | ||
1010 | |||
1011 | /* ask the app to translate the error code */ | ||
1012 | call->app_aemap_func(call); | ||
1013 | _state(call); | ||
1014 | spin_unlock(&call->lock); | ||
1015 | call->app_error_func(call); | ||
1016 | break; | ||
1017 | } | ||
1018 | default: | ||
1019 | /* deal with other packet types */ | ||
1020 | _proto("Rx Unsupported packet type %u (#%u)", | ||
1021 | msg->hdr.type, msg->seq); | ||
1022 | break; | ||
1023 | } | ||
1024 | |||
1025 | rxrpc_put_message(msg); | ||
1026 | } | ||
1027 | |||
1028 | out: | ||
1029 | rxrpc_put_call(call); | ||
1030 | _leave(""); | ||
1031 | } /* end rxrpc_call_receive_packet() */ | ||
1032 | |||
1033 | /*****************************************************************************/ | ||
1034 | /* | ||
1035 | * process next data packet | ||
1036 | * - as the next data packet arrives: | ||
1037 | * - it is queued on app_readyq _if_ it is the next one expected | ||
1038 | * (app_ready_seq+1) | ||
1039 | * - it is queued on app_unreadyq _if_ it is not the next one expected | ||
1040 | * - if a packet placed on app_readyq completely fills a hole leading up to | ||
1041 | * the first packet on app_unreadyq, then packets now in sequence are | ||
1042 | * tranferred to app_readyq | ||
1043 | * - the application layer can only see packets on app_readyq | ||
1044 | * (app_ready_qty bytes) | ||
1045 | * - the application layer is prodded every time a new packet arrives | ||
1046 | */ | ||
1047 | static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, | ||
1048 | struct rxrpc_message *msg) | ||
1049 | { | ||
1050 | const struct rxrpc_operation *optbl, *op; | ||
1051 | struct rxrpc_message *pmsg; | ||
1052 | struct list_head *_p; | ||
1053 | int ret, lo, hi, rmtimo; | ||
1054 | __be32 opid; | ||
1055 | |||
1056 | _enter("%p{%u},%p{%u}", call, ntohl(call->call_id), msg, msg->seq); | ||
1057 | |||
1058 | rxrpc_get_message(msg); | ||
1059 | |||
1060 | /* add to the unready queue if we'd have to create a hole in the ready | ||
1061 | * queue otherwise */ | ||
1062 | if (msg->seq != call->app_ready_seq + 1) { | ||
1063 | _debug("Call add packet %d to unreadyq", msg->seq); | ||
1064 | |||
1065 | /* insert in seq order */ | ||
1066 | list_for_each(_p, &call->app_unreadyq) { | ||
1067 | pmsg = list_entry(_p, struct rxrpc_message, link); | ||
1068 | if (pmsg->seq > msg->seq) | ||
1069 | break; | ||
1070 | } | ||
1071 | |||
1072 | list_add_tail(&msg->link, _p); | ||
1073 | |||
1074 | _leave(" [unreadyq]"); | ||
1075 | return; | ||
1076 | } | ||
1077 | |||
1078 | /* next in sequence - simply append into the call's ready queue */ | ||
1079 | _debug("Call add packet %d to readyq (+%Zd => %Zd bytes)", | ||
1080 | msg->seq, msg->dsize, call->app_ready_qty); | ||
1081 | |||
1082 | spin_lock(&call->lock); | ||
1083 | call->app_ready_seq = msg->seq; | ||
1084 | call->app_ready_qty += msg->dsize; | ||
1085 | list_add_tail(&msg->link, &call->app_readyq); | ||
1086 | |||
1087 | /* move unready packets to the readyq if we got rid of a hole */ | ||
1088 | while (!list_empty(&call->app_unreadyq)) { | ||
1089 | pmsg = list_entry(call->app_unreadyq.next, | ||
1090 | struct rxrpc_message, link); | ||
1091 | |||
1092 | if (pmsg->seq != call->app_ready_seq + 1) | ||
1093 | break; | ||
1094 | |||
1095 | /* next in sequence - just move list-to-list */ | ||
1096 | _debug("Call transfer packet %d to readyq (+%Zd => %Zd bytes)", | ||
1097 | pmsg->seq, pmsg->dsize, call->app_ready_qty); | ||
1098 | |||
1099 | call->app_ready_seq = pmsg->seq; | ||
1100 | call->app_ready_qty += pmsg->dsize; | ||
1101 | list_move_tail(&pmsg->link, &call->app_readyq); | ||
1102 | } | ||
1103 | |||
1104 | /* see if we've got the last packet yet */ | ||
1105 | if (!list_empty(&call->app_readyq)) { | ||
1106 | pmsg = list_entry(call->app_readyq.prev, | ||
1107 | struct rxrpc_message, link); | ||
1108 | if (pmsg->hdr.flags & RXRPC_LAST_PACKET) { | ||
1109 | call->app_last_rcv = 1; | ||
1110 | _debug("Last packet on readyq"); | ||
1111 | } | ||
1112 | } | ||
1113 | |||
1114 | switch (call->app_call_state) { | ||
1115 | /* do nothing if call already aborted */ | ||
1116 | case RXRPC_CSTATE_ERROR: | ||
1117 | spin_unlock(&call->lock); | ||
1118 | _leave(" [error]"); | ||
1119 | return; | ||
1120 | |||
1121 | /* extract the operation ID from an incoming call if that's not | ||
1122 | * yet been done */ | ||
1123 | case RXRPC_CSTATE_SRVR_RCV_OPID: | ||
1124 | spin_unlock(&call->lock); | ||
1125 | |||
1126 | /* handle as yet insufficient data for the operation ID */ | ||
1127 | if (call->app_ready_qty < 4) { | ||
1128 | if (call->app_last_rcv) | ||
1129 | /* trouble - last packet seen */ | ||
1130 | rxrpc_call_abort(call, -EINVAL); | ||
1131 | |||
1132 | _leave(""); | ||
1133 | return; | ||
1134 | } | ||
1135 | |||
1136 | /* pull the operation ID out of the buffer */ | ||
1137 | ret = rxrpc_call_read_data(call, &opid, sizeof(opid), 0); | ||
1138 | if (ret < 0) { | ||
1139 | printk("Unexpected error from read-data: %d\n", ret); | ||
1140 | if (call->app_call_state != RXRPC_CSTATE_ERROR) | ||
1141 | rxrpc_call_abort(call, ret); | ||
1142 | _leave(""); | ||
1143 | return; | ||
1144 | } | ||
1145 | call->app_opcode = ntohl(opid); | ||
1146 | |||
1147 | /* locate the operation in the available ops table */ | ||
1148 | optbl = call->conn->service->ops_begin; | ||
1149 | lo = 0; | ||
1150 | hi = call->conn->service->ops_end - optbl; | ||
1151 | |||
1152 | while (lo < hi) { | ||
1153 | int mid = (hi + lo) / 2; | ||
1154 | op = &optbl[mid]; | ||
1155 | if (call->app_opcode == op->id) | ||
1156 | goto found_op; | ||
1157 | if (call->app_opcode > op->id) | ||
1158 | lo = mid + 1; | ||
1159 | else | ||
1160 | hi = mid; | ||
1161 | } | ||
1162 | |||
1163 | /* search failed */ | ||
1164 | kproto("Rx Client requested operation %d from %s service", | ||
1165 | call->app_opcode, call->conn->service->name); | ||
1166 | rxrpc_call_abort(call, -EINVAL); | ||
1167 | _leave(" [inval]"); | ||
1168 | return; | ||
1169 | |||
1170 | found_op: | ||
1171 | _proto("Rx Client requested operation %s from %s service", | ||
1172 | op->name, call->conn->service->name); | ||
1173 | |||
1174 | /* we're now waiting for the argument block (unless the call | ||
1175 | * was aborted) */ | ||
1176 | spin_lock(&call->lock); | ||
1177 | if (call->app_call_state == RXRPC_CSTATE_SRVR_RCV_OPID || | ||
1178 | call->app_call_state == RXRPC_CSTATE_SRVR_SND_REPLY) { | ||
1179 | if (!call->app_last_rcv) | ||
1180 | call->app_call_state = | ||
1181 | RXRPC_CSTATE_SRVR_RCV_ARGS; | ||
1182 | else if (call->app_ready_qty > 0) | ||
1183 | call->app_call_state = | ||
1184 | RXRPC_CSTATE_SRVR_GOT_ARGS; | ||
1185 | else | ||
1186 | call->app_call_state = | ||
1187 | RXRPC_CSTATE_SRVR_SND_REPLY; | ||
1188 | call->app_mark = op->asize; | ||
1189 | call->app_user = op->user; | ||
1190 | } | ||
1191 | spin_unlock(&call->lock); | ||
1192 | |||
1193 | _state(call); | ||
1194 | break; | ||
1195 | |||
1196 | case RXRPC_CSTATE_SRVR_RCV_ARGS: | ||
1197 | /* change state if just received last packet of arg block */ | ||
1198 | if (call->app_last_rcv) | ||
1199 | call->app_call_state = RXRPC_CSTATE_SRVR_GOT_ARGS; | ||
1200 | spin_unlock(&call->lock); | ||
1201 | |||
1202 | _state(call); | ||
1203 | break; | ||
1204 | |||
1205 | case RXRPC_CSTATE_CLNT_RCV_REPLY: | ||
1206 | /* change state if just received last packet of reply block */ | ||
1207 | rmtimo = 0; | ||
1208 | if (call->app_last_rcv) { | ||
1209 | call->app_call_state = RXRPC_CSTATE_CLNT_GOT_REPLY; | ||
1210 | rmtimo = 1; | ||
1211 | } | ||
1212 | spin_unlock(&call->lock); | ||
1213 | |||
1214 | if (rmtimo) { | ||
1215 | del_timer_sync(&call->acks_timeout); | ||
1216 | del_timer_sync(&call->rcv_timeout); | ||
1217 | del_timer_sync(&call->ackr_dfr_timo); | ||
1218 | } | ||
1219 | |||
1220 | _state(call); | ||
1221 | break; | ||
1222 | |||
1223 | default: | ||
1224 | /* deal with data reception in an unexpected state */ | ||
1225 | printk("Unexpected state [[[ %u ]]]\n", call->app_call_state); | ||
1226 | __rxrpc_call_abort(call, -EBADMSG); | ||
1227 | _leave(""); | ||
1228 | return; | ||
1229 | } | ||
1230 | |||
1231 | if (call->app_call_state == RXRPC_CSTATE_CLNT_RCV_REPLY && | ||
1232 | call->app_last_rcv) | ||
1233 | BUG(); | ||
1234 | |||
1235 | /* otherwise just invoke the data function whenever we can satisfy its desire for more | ||
1236 | * data | ||
1237 | */ | ||
1238 | _proto("Rx Received Op Data: st=%u qty=%Zu mk=%Zu%s", | ||
1239 | call->app_call_state, call->app_ready_qty, call->app_mark, | ||
1240 | call->app_last_rcv ? " last-rcvd" : ""); | ||
1241 | |||
1242 | spin_lock(&call->lock); | ||
1243 | |||
1244 | ret = __rxrpc_call_read_data(call); | ||
1245 | switch (ret) { | ||
1246 | case 0: | ||
1247 | spin_unlock(&call->lock); | ||
1248 | call->app_attn_func(call); | ||
1249 | break; | ||
1250 | case -EAGAIN: | ||
1251 | spin_unlock(&call->lock); | ||
1252 | break; | ||
1253 | case -ECONNABORTED: | ||
1254 | spin_unlock(&call->lock); | ||
1255 | break; | ||
1256 | default: | ||
1257 | __rxrpc_call_abort(call, ret); | ||
1258 | break; | ||
1259 | } | ||
1260 | |||
1261 | _state(call); | ||
1262 | |||
1263 | _leave(""); | ||
1264 | |||
1265 | } /* end rxrpc_call_receive_data_packet() */ | ||
1266 | |||
1267 | /*****************************************************************************/ | ||
1268 | /* | ||
1269 | * received an ACK packet | ||
1270 | */ | ||
1271 | static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, | ||
1272 | struct rxrpc_message *msg) | ||
1273 | { | ||
1274 | struct rxrpc_ackpacket _ack, *ap; | ||
1275 | rxrpc_serial_net_t serial; | ||
1276 | rxrpc_seq_t seq; | ||
1277 | int ret; | ||
1278 | |||
1279 | _enter("%p{%u},%p{%u}", call, ntohl(call->call_id), msg, msg->seq); | ||
1280 | |||
1281 | /* extract the basic ACK record */ | ||
1282 | ap = skb_header_pointer(msg->pkt, msg->offset, sizeof(_ack), &_ack); | ||
1283 | if (ap == NULL) { | ||
1284 | printk("Rx Received short ACK packet\n"); | ||
1285 | return; | ||
1286 | } | ||
1287 | msg->offset += sizeof(_ack); | ||
1288 | |||
1289 | serial = ap->serial; | ||
1290 | seq = ntohl(ap->firstPacket); | ||
1291 | |||
1292 | _proto("Rx Received ACK %%%d { b=%hu m=%hu f=%u p=%u s=%u r=%s n=%u }", | ||
1293 | ntohl(msg->hdr.serial), | ||
1294 | ntohs(ap->bufferSpace), | ||
1295 | ntohs(ap->maxSkew), | ||
1296 | seq, | ||
1297 | ntohl(ap->previousPacket), | ||
1298 | ntohl(serial), | ||
1299 | rxrpc_acks[ap->reason], | ||
1300 | call->ackr.nAcks | ||
1301 | ); | ||
1302 | |||
1303 | /* check the other side isn't ACK'ing a sequence number I haven't sent | ||
1304 | * yet */ | ||
1305 | if (ap->nAcks > 0 && | ||
1306 | (seq > call->snd_seq_count || | ||
1307 | seq + ap->nAcks - 1 > call->snd_seq_count)) { | ||
1308 | printk("Received ACK (#%u-#%u) for unsent packet\n", | ||
1309 | seq, seq + ap->nAcks - 1); | ||
1310 | rxrpc_call_abort(call, -EINVAL); | ||
1311 | _leave(""); | ||
1312 | return; | ||
1313 | } | ||
1314 | |||
1315 | /* deal with RTT calculation */ | ||
1316 | if (serial) { | ||
1317 | struct rxrpc_message *rttmsg; | ||
1318 | |||
1319 | /* find the prompting packet */ | ||
1320 | spin_lock(&call->lock); | ||
1321 | if (call->snd_ping && call->snd_ping->hdr.serial == serial) { | ||
1322 | /* it was a ping packet */ | ||
1323 | rttmsg = call->snd_ping; | ||
1324 | call->snd_ping = NULL; | ||
1325 | spin_unlock(&call->lock); | ||
1326 | |||
1327 | if (rttmsg) { | ||
1328 | rttmsg->rttdone = 1; | ||
1329 | rxrpc_peer_calculate_rtt(call->conn->peer, | ||
1330 | rttmsg, msg); | ||
1331 | rxrpc_put_message(rttmsg); | ||
1332 | } | ||
1333 | } | ||
1334 | else { | ||
1335 | struct list_head *_p; | ||
1336 | |||
1337 | /* it ought to be a data packet - look in the pending | ||
1338 | * ACK list */ | ||
1339 | list_for_each(_p, &call->acks_pendq) { | ||
1340 | rttmsg = list_entry(_p, struct rxrpc_message, | ||
1341 | link); | ||
1342 | if (rttmsg->hdr.serial == serial) { | ||
1343 | if (rttmsg->rttdone) | ||
1344 | /* never do RTT twice without | ||
1345 | * resending */ | ||
1346 | break; | ||
1347 | |||
1348 | rttmsg->rttdone = 1; | ||
1349 | rxrpc_peer_calculate_rtt( | ||
1350 | call->conn->peer, rttmsg, msg); | ||
1351 | break; | ||
1352 | } | ||
1353 | } | ||
1354 | spin_unlock(&call->lock); | ||
1355 | } | ||
1356 | } | ||
1357 | |||
1358 | switch (ap->reason) { | ||
1359 | /* deal with negative/positive acknowledgement of data | ||
1360 | * packets */ | ||
1361 | case RXRPC_ACK_REQUESTED: | ||
1362 | case RXRPC_ACK_DELAY: | ||
1363 | case RXRPC_ACK_IDLE: | ||
1364 | rxrpc_call_definitively_ACK(call, seq - 1); | ||
1365 | |||
1366 | case RXRPC_ACK_DUPLICATE: | ||
1367 | case RXRPC_ACK_OUT_OF_SEQUENCE: | ||
1368 | case RXRPC_ACK_EXCEEDS_WINDOW: | ||
1369 | call->snd_resend_cnt = 0; | ||
1370 | ret = rxrpc_call_record_ACK(call, msg, seq, ap->nAcks); | ||
1371 | if (ret < 0) | ||
1372 | rxrpc_call_abort(call, ret); | ||
1373 | break; | ||
1374 | |||
1375 | /* respond to ping packets immediately */ | ||
1376 | case RXRPC_ACK_PING: | ||
1377 | rxrpc_call_generate_ACK(call, &msg->hdr, ap); | ||
1378 | break; | ||
1379 | |||
1380 | /* only record RTT on ping response packets */ | ||
1381 | case RXRPC_ACK_PING_RESPONSE: | ||
1382 | if (call->snd_ping) { | ||
1383 | struct rxrpc_message *rttmsg; | ||
1384 | |||
1385 | /* only do RTT stuff if the response matches the | ||
1386 | * retained ping */ | ||
1387 | rttmsg = NULL; | ||
1388 | spin_lock(&call->lock); | ||
1389 | if (call->snd_ping && | ||
1390 | call->snd_ping->hdr.serial == ap->serial) { | ||
1391 | rttmsg = call->snd_ping; | ||
1392 | call->snd_ping = NULL; | ||
1393 | } | ||
1394 | spin_unlock(&call->lock); | ||
1395 | |||
1396 | if (rttmsg) { | ||
1397 | rttmsg->rttdone = 1; | ||
1398 | rxrpc_peer_calculate_rtt(call->conn->peer, | ||
1399 | rttmsg, msg); | ||
1400 | rxrpc_put_message(rttmsg); | ||
1401 | } | ||
1402 | } | ||
1403 | break; | ||
1404 | |||
1405 | default: | ||
1406 | printk("Unsupported ACK reason %u\n", ap->reason); | ||
1407 | break; | ||
1408 | } | ||
1409 | |||
1410 | _leave(""); | ||
1411 | } /* end rxrpc_call_receive_ack_packet() */ | ||
1412 | |||
1413 | /*****************************************************************************/ | ||
1414 | /* | ||
1415 | * record definitive ACKs for all messages up to and including the one with the | ||
1416 | * 'highest' seq | ||
1417 | */ | ||
1418 | static void rxrpc_call_definitively_ACK(struct rxrpc_call *call, | ||
1419 | rxrpc_seq_t highest) | ||
1420 | { | ||
1421 | struct rxrpc_message *msg; | ||
1422 | int now_complete; | ||
1423 | |||
1424 | _enter("%p{ads=%u},%u", call, call->acks_dftv_seq, highest); | ||
1425 | |||
1426 | while (call->acks_dftv_seq < highest) { | ||
1427 | call->acks_dftv_seq++; | ||
1428 | |||
1429 | _proto("Definitive ACK on packet #%u", call->acks_dftv_seq); | ||
1430 | |||
1431 | /* discard those at front of queue until message with highest | ||
1432 | * ACK is found */ | ||
1433 | spin_lock(&call->lock); | ||
1434 | msg = NULL; | ||
1435 | if (!list_empty(&call->acks_pendq)) { | ||
1436 | msg = list_entry(call->acks_pendq.next, | ||
1437 | struct rxrpc_message, link); | ||
1438 | list_del_init(&msg->link); /* dequeue */ | ||
1439 | if (msg->state == RXRPC_MSG_SENT) | ||
1440 | call->acks_pend_cnt--; | ||
1441 | } | ||
1442 | spin_unlock(&call->lock); | ||
1443 | |||
1444 | /* insanity check */ | ||
1445 | if (!msg) | ||
1446 | panic("%s(): acks_pendq unexpectedly empty\n", | ||
1447 | __FUNCTION__); | ||
1448 | |||
1449 | if (msg->seq != call->acks_dftv_seq) | ||
1450 | panic("%s(): Packet #%u expected at front of acks_pendq" | ||
1451 | " (#%u found)\n", | ||
1452 | __FUNCTION__, call->acks_dftv_seq, msg->seq); | ||
1453 | |||
1454 | /* discard the message */ | ||
1455 | msg->state = RXRPC_MSG_DONE; | ||
1456 | rxrpc_put_message(msg); | ||
1457 | } | ||
1458 | |||
1459 | /* if all sent packets are definitively ACK'd then prod any sleepers just in case */ | ||
1460 | now_complete = 0; | ||
1461 | spin_lock(&call->lock); | ||
1462 | if (call->acks_dftv_seq == call->snd_seq_count) { | ||
1463 | if (call->app_call_state != RXRPC_CSTATE_COMPLETE) { | ||
1464 | call->app_call_state = RXRPC_CSTATE_COMPLETE; | ||
1465 | _state(call); | ||
1466 | now_complete = 1; | ||
1467 | } | ||
1468 | } | ||
1469 | spin_unlock(&call->lock); | ||
1470 | |||
1471 | if (now_complete) { | ||
1472 | del_timer_sync(&call->acks_timeout); | ||
1473 | del_timer_sync(&call->rcv_timeout); | ||
1474 | del_timer_sync(&call->ackr_dfr_timo); | ||
1475 | call->app_attn_func(call); | ||
1476 | } | ||
1477 | |||
1478 | _leave(""); | ||
1479 | } /* end rxrpc_call_definitively_ACK() */ | ||
1480 | |||
1481 | /*****************************************************************************/ | ||
1482 | /* | ||
1483 | * record the specified amount of ACKs/NAKs | ||
1484 | */ | ||
1485 | static int rxrpc_call_record_ACK(struct rxrpc_call *call, | ||
1486 | struct rxrpc_message *msg, | ||
1487 | rxrpc_seq_t seq, | ||
1488 | size_t count) | ||
1489 | { | ||
1490 | struct rxrpc_message *dmsg; | ||
1491 | struct list_head *_p; | ||
1492 | rxrpc_seq_t highest; | ||
1493 | unsigned ix; | ||
1494 | size_t chunk; | ||
1495 | char resend, now_complete; | ||
1496 | u8 acks[16]; | ||
1497 | |||
1498 | _enter("%p{apc=%u ads=%u},%p,%u,%Zu", | ||
1499 | call, call->acks_pend_cnt, call->acks_dftv_seq, | ||
1500 | msg, seq, count); | ||
1501 | |||
1502 | /* handle re-ACK'ing of definitively ACK'd packets (may be out-of-order | ||
1503 | * ACKs) */ | ||
1504 | if (seq <= call->acks_dftv_seq) { | ||
1505 | unsigned delta = call->acks_dftv_seq - seq; | ||
1506 | |||
1507 | if (count <= delta) { | ||
1508 | _leave(" = 0 [all definitively ACK'd]"); | ||
1509 | return 0; | ||
1510 | } | ||
1511 | |||
1512 | seq += delta; | ||
1513 | count -= delta; | ||
1514 | msg->offset += delta; | ||
1515 | } | ||
1516 | |||
1517 | highest = seq + count - 1; | ||
1518 | resend = 0; | ||
1519 | while (count > 0) { | ||
1520 | /* extract up to 16 ACK slots at a time */ | ||
1521 | chunk = min(count, sizeof(acks)); | ||
1522 | count -= chunk; | ||
1523 | |||
1524 | memset(acks, 2, sizeof(acks)); | ||
1525 | |||
1526 | if (skb_copy_bits(msg->pkt, msg->offset, &acks, chunk) < 0) { | ||
1527 | printk("Rx Received short ACK packet\n"); | ||
1528 | _leave(" = -EINVAL"); | ||
1529 | return -EINVAL; | ||
1530 | } | ||
1531 | msg->offset += chunk; | ||
1532 | |||
1533 | /* check that the ACK set is valid */ | ||
1534 | for (ix = 0; ix < chunk; ix++) { | ||
1535 | switch (acks[ix]) { | ||
1536 | case RXRPC_ACK_TYPE_ACK: | ||
1537 | break; | ||
1538 | case RXRPC_ACK_TYPE_NACK: | ||
1539 | resend = 1; | ||
1540 | break; | ||
1541 | default: | ||
1542 | printk("Rx Received unsupported ACK state" | ||
1543 | " %u\n", acks[ix]); | ||
1544 | _leave(" = -EINVAL"); | ||
1545 | return -EINVAL; | ||
1546 | } | ||
1547 | } | ||
1548 | |||
1549 | _proto("Rx ACK of packets #%u-#%u " | ||
1550 | "[%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c] (pend=%u)", | ||
1551 | seq, (unsigned) (seq + chunk - 1), | ||
1552 | _acktype[acks[0x0]], | ||
1553 | _acktype[acks[0x1]], | ||
1554 | _acktype[acks[0x2]], | ||
1555 | _acktype[acks[0x3]], | ||
1556 | _acktype[acks[0x4]], | ||
1557 | _acktype[acks[0x5]], | ||
1558 | _acktype[acks[0x6]], | ||
1559 | _acktype[acks[0x7]], | ||
1560 | _acktype[acks[0x8]], | ||
1561 | _acktype[acks[0x9]], | ||
1562 | _acktype[acks[0xA]], | ||
1563 | _acktype[acks[0xB]], | ||
1564 | _acktype[acks[0xC]], | ||
1565 | _acktype[acks[0xD]], | ||
1566 | _acktype[acks[0xE]], | ||
1567 | _acktype[acks[0xF]], | ||
1568 | call->acks_pend_cnt | ||
1569 | ); | ||
1570 | |||
1571 | /* mark the packets in the ACK queue as being provisionally | ||
1572 | * ACK'd */ | ||
1573 | ix = 0; | ||
1574 | spin_lock(&call->lock); | ||
1575 | |||
1576 | /* find the first packet ACK'd/NAK'd here */ | ||
1577 | list_for_each(_p, &call->acks_pendq) { | ||
1578 | dmsg = list_entry(_p, struct rxrpc_message, link); | ||
1579 | if (dmsg->seq == seq) | ||
1580 | goto found_first; | ||
1581 | _debug("- %u: skipping #%u", ix, dmsg->seq); | ||
1582 | } | ||
1583 | goto bad_queue; | ||
1584 | |||
1585 | found_first: | ||
1586 | do { | ||
1587 | _debug("- %u: processing #%u (%c) apc=%u", | ||
1588 | ix, dmsg->seq, _acktype[acks[ix]], | ||
1589 | call->acks_pend_cnt); | ||
1590 | |||
1591 | if (acks[ix] == RXRPC_ACK_TYPE_ACK) { | ||
1592 | if (dmsg->state == RXRPC_MSG_SENT) | ||
1593 | call->acks_pend_cnt--; | ||
1594 | dmsg->state = RXRPC_MSG_ACKED; | ||
1595 | } | ||
1596 | else { | ||
1597 | if (dmsg->state == RXRPC_MSG_ACKED) | ||
1598 | call->acks_pend_cnt++; | ||
1599 | dmsg->state = RXRPC_MSG_SENT; | ||
1600 | } | ||
1601 | ix++; | ||
1602 | seq++; | ||
1603 | |||
1604 | _p = dmsg->link.next; | ||
1605 | dmsg = list_entry(_p, struct rxrpc_message, link); | ||
1606 | } while(ix < chunk && | ||
1607 | _p != &call->acks_pendq && | ||
1608 | dmsg->seq == seq); | ||
1609 | |||
1610 | if (ix < chunk) | ||
1611 | goto bad_queue; | ||
1612 | |||
1613 | spin_unlock(&call->lock); | ||
1614 | } | ||
1615 | |||
1616 | if (resend) | ||
1617 | rxrpc_call_resend(call, highest); | ||
1618 | |||
1619 | /* if all packets are provisionally ACK'd, then wake up anyone who's | ||
1620 | * waiting for that */ | ||
1621 | now_complete = 0; | ||
1622 | spin_lock(&call->lock); | ||
1623 | if (call->acks_pend_cnt == 0) { | ||
1624 | if (call->app_call_state == RXRPC_CSTATE_SRVR_RCV_FINAL_ACK) { | ||
1625 | call->app_call_state = RXRPC_CSTATE_COMPLETE; | ||
1626 | _state(call); | ||
1627 | } | ||
1628 | now_complete = 1; | ||
1629 | } | ||
1630 | spin_unlock(&call->lock); | ||
1631 | |||
1632 | if (now_complete) { | ||
1633 | _debug("- wake up waiters"); | ||
1634 | del_timer_sync(&call->acks_timeout); | ||
1635 | del_timer_sync(&call->rcv_timeout); | ||
1636 | del_timer_sync(&call->ackr_dfr_timo); | ||
1637 | call->app_attn_func(call); | ||
1638 | } | ||
1639 | |||
1640 | _leave(" = 0 (apc=%u)", call->acks_pend_cnt); | ||
1641 | return 0; | ||
1642 | |||
1643 | bad_queue: | ||
1644 | panic("%s(): acks_pendq in bad state (packet #%u absent)\n", | ||
1645 | __FUNCTION__, seq); | ||
1646 | |||
1647 | } /* end rxrpc_call_record_ACK() */ | ||
1648 | |||
1649 | /*****************************************************************************/ | ||
1650 | /* | ||
1651 | * transfer data from the ready packet queue to the asynchronous read buffer | ||
1652 | * - since this func is the only one going to look at packets queued on | ||
1653 | * app_readyq, we don't need a lock to modify or access them, only to modify | ||
1654 | * the queue pointers | ||
1655 | * - called with call->lock held | ||
1656 | * - the buffer must be in kernel space | ||
1657 | * - returns: | ||
1658 | * 0 if buffer filled | ||
1659 | * -EAGAIN if buffer not filled and more data to come | ||
1660 | * -EBADMSG if last packet received and insufficient data left | ||
1661 | * -ECONNABORTED if the call has in an error state | ||
1662 | */ | ||
1663 | static int __rxrpc_call_read_data(struct rxrpc_call *call) | ||
1664 | { | ||
1665 | struct rxrpc_message *msg; | ||
1666 | size_t qty; | ||
1667 | int ret; | ||
1668 | |||
1669 | _enter("%p{as=%d buf=%p qty=%Zu/%Zu}", | ||
1670 | call, | ||
1671 | call->app_async_read, call->app_read_buf, | ||
1672 | call->app_ready_qty, call->app_mark); | ||
1673 | |||
1674 | /* check the state */ | ||
1675 | switch (call->app_call_state) { | ||
1676 | case RXRPC_CSTATE_SRVR_RCV_ARGS: | ||
1677 | case RXRPC_CSTATE_CLNT_RCV_REPLY: | ||
1678 | if (call->app_last_rcv) { | ||
1679 | printk("%s(%p,%p,%Zd):" | ||
1680 | " Inconsistent call state (%s, last pkt)", | ||
1681 | __FUNCTION__, | ||
1682 | call, call->app_read_buf, call->app_mark, | ||
1683 | rxrpc_call_states[call->app_call_state]); | ||
1684 | BUG(); | ||
1685 | } | ||
1686 | break; | ||
1687 | |||
1688 | case RXRPC_CSTATE_SRVR_RCV_OPID: | ||
1689 | case RXRPC_CSTATE_SRVR_GOT_ARGS: | ||
1690 | case RXRPC_CSTATE_CLNT_GOT_REPLY: | ||
1691 | break; | ||
1692 | |||
1693 | case RXRPC_CSTATE_SRVR_SND_REPLY: | ||
1694 | if (!call->app_last_rcv) { | ||
1695 | printk("%s(%p,%p,%Zd):" | ||
1696 | " Inconsistent call state (%s, not last pkt)", | ||
1697 | __FUNCTION__, | ||
1698 | call, call->app_read_buf, call->app_mark, | ||
1699 | rxrpc_call_states[call->app_call_state]); | ||
1700 | BUG(); | ||
1701 | } | ||
1702 | _debug("Trying to read data from call in SND_REPLY state"); | ||
1703 | break; | ||
1704 | |||
1705 | case RXRPC_CSTATE_ERROR: | ||
1706 | _leave(" = -ECONNABORTED"); | ||
1707 | return -ECONNABORTED; | ||
1708 | |||
1709 | default: | ||
1710 | printk("reading in unexpected state [[[ %u ]]]\n", | ||
1711 | call->app_call_state); | ||
1712 | BUG(); | ||
1713 | } | ||
1714 | |||
1715 | /* handle the case of not having an async buffer */ | ||
1716 | if (!call->app_async_read) { | ||
1717 | if (call->app_mark == RXRPC_APP_MARK_EOF) { | ||
1718 | ret = call->app_last_rcv ? 0 : -EAGAIN; | ||
1719 | } | ||
1720 | else { | ||
1721 | if (call->app_mark >= call->app_ready_qty) { | ||
1722 | call->app_mark = RXRPC_APP_MARK_EOF; | ||
1723 | ret = 0; | ||
1724 | } | ||
1725 | else { | ||
1726 | ret = call->app_last_rcv ? -EBADMSG : -EAGAIN; | ||
1727 | } | ||
1728 | } | ||
1729 | |||
1730 | _leave(" = %d [no buf]", ret); | ||
1731 | return 0; | ||
1732 | } | ||
1733 | |||
1734 | while (!list_empty(&call->app_readyq) && call->app_mark > 0) { | ||
1735 | msg = list_entry(call->app_readyq.next, | ||
1736 | struct rxrpc_message, link); | ||
1737 | |||
1738 | /* drag as much data as we need out of this packet */ | ||
1739 | qty = min(call->app_mark, msg->dsize); | ||
1740 | |||
1741 | _debug("reading %Zu from skb=%p off=%lu", | ||
1742 | qty, msg->pkt, msg->offset); | ||
1743 | |||
1744 | if (call->app_read_buf) | ||
1745 | if (skb_copy_bits(msg->pkt, msg->offset, | ||
1746 | call->app_read_buf, qty) < 0) | ||
1747 | panic("%s: Failed to copy data from packet:" | ||
1748 | " (%p,%p,%Zd)", | ||
1749 | __FUNCTION__, | ||
1750 | call, call->app_read_buf, qty); | ||
1751 | |||
1752 | /* if that packet is now empty, discard it */ | ||
1753 | call->app_ready_qty -= qty; | ||
1754 | msg->dsize -= qty; | ||
1755 | |||
1756 | if (msg->dsize == 0) { | ||
1757 | list_del_init(&msg->link); | ||
1758 | rxrpc_put_message(msg); | ||
1759 | } | ||
1760 | else { | ||
1761 | msg->offset += qty; | ||
1762 | } | ||
1763 | |||
1764 | call->app_mark -= qty; | ||
1765 | if (call->app_read_buf) | ||
1766 | call->app_read_buf += qty; | ||
1767 | } | ||
1768 | |||
1769 | if (call->app_mark == 0) { | ||
1770 | call->app_async_read = 0; | ||
1771 | call->app_mark = RXRPC_APP_MARK_EOF; | ||
1772 | call->app_read_buf = NULL; | ||
1773 | |||
1774 | /* adjust the state if used up all packets */ | ||
1775 | if (list_empty(&call->app_readyq) && call->app_last_rcv) { | ||
1776 | switch (call->app_call_state) { | ||
1777 | case RXRPC_CSTATE_SRVR_RCV_OPID: | ||
1778 | call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY; | ||
1779 | call->app_mark = RXRPC_APP_MARK_EOF; | ||
1780 | _state(call); | ||
1781 | del_timer_sync(&call->rcv_timeout); | ||
1782 | break; | ||
1783 | case RXRPC_CSTATE_SRVR_GOT_ARGS: | ||
1784 | call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY; | ||
1785 | _state(call); | ||
1786 | del_timer_sync(&call->rcv_timeout); | ||
1787 | break; | ||
1788 | default: | ||
1789 | call->app_call_state = RXRPC_CSTATE_COMPLETE; | ||
1790 | _state(call); | ||
1791 | del_timer_sync(&call->acks_timeout); | ||
1792 | del_timer_sync(&call->ackr_dfr_timo); | ||
1793 | del_timer_sync(&call->rcv_timeout); | ||
1794 | break; | ||
1795 | } | ||
1796 | } | ||
1797 | |||
1798 | _leave(" = 0"); | ||
1799 | return 0; | ||
1800 | } | ||
1801 | |||
1802 | if (call->app_last_rcv) { | ||
1803 | _debug("Insufficient data (%Zu/%Zu)", | ||
1804 | call->app_ready_qty, call->app_mark); | ||
1805 | call->app_async_read = 0; | ||
1806 | call->app_mark = RXRPC_APP_MARK_EOF; | ||
1807 | call->app_read_buf = NULL; | ||
1808 | |||
1809 | _leave(" = -EBADMSG"); | ||
1810 | return -EBADMSG; | ||
1811 | } | ||
1812 | |||
1813 | _leave(" = -EAGAIN"); | ||
1814 | return -EAGAIN; | ||
1815 | } /* end __rxrpc_call_read_data() */ | ||
1816 | |||
1817 | /*****************************************************************************/ | ||
1818 | /* | ||
1819 | * attempt to read the specified amount of data from the call's ready queue | ||
1820 | * into the buffer provided | ||
1821 | * - since this func is the only one going to look at packets queued on | ||
1822 | * app_readyq, we don't need a lock to modify or access them, only to modify | ||
1823 | * the queue pointers | ||
1824 | * - if the buffer pointer is NULL, then data is merely drained, not copied | ||
1825 | * - if flags&RXRPC_CALL_READ_BLOCK, then the function will wait until there is | ||
1826 | * enough data or an error will be generated | ||
1827 | * - note that the caller must have added the calling task to the call's wait | ||
1828 | * queue beforehand | ||
1829 | * - if flags&RXRPC_CALL_READ_ALL, then an error will be generated if this | ||
1830 | * function doesn't read all available data | ||
1831 | */ | ||
1832 | int rxrpc_call_read_data(struct rxrpc_call *call, | ||
1833 | void *buffer, size_t size, int flags) | ||
1834 | { | ||
1835 | int ret; | ||
1836 | |||
1837 | _enter("%p{arq=%Zu},%p,%Zd,%x", | ||
1838 | call, call->app_ready_qty, buffer, size, flags); | ||
1839 | |||
1840 | spin_lock(&call->lock); | ||
1841 | |||
1842 | if (unlikely(!!call->app_read_buf)) { | ||
1843 | spin_unlock(&call->lock); | ||
1844 | _leave(" = -EBUSY"); | ||
1845 | return -EBUSY; | ||
1846 | } | ||
1847 | |||
1848 | call->app_mark = size; | ||
1849 | call->app_read_buf = buffer; | ||
1850 | call->app_async_read = 1; | ||
1851 | call->app_read_count++; | ||
1852 | |||
1853 | /* read as much data as possible */ | ||
1854 | ret = __rxrpc_call_read_data(call); | ||
1855 | switch (ret) { | ||
1856 | case 0: | ||
1857 | if (flags & RXRPC_CALL_READ_ALL && | ||
1858 | (!call->app_last_rcv || call->app_ready_qty > 0)) { | ||
1859 | _leave(" = -EBADMSG"); | ||
1860 | __rxrpc_call_abort(call, -EBADMSG); | ||
1861 | return -EBADMSG; | ||
1862 | } | ||
1863 | |||
1864 | spin_unlock(&call->lock); | ||
1865 | call->app_attn_func(call); | ||
1866 | _leave(" = 0"); | ||
1867 | return ret; | ||
1868 | |||
1869 | case -ECONNABORTED: | ||
1870 | spin_unlock(&call->lock); | ||
1871 | _leave(" = %d [aborted]", ret); | ||
1872 | return ret; | ||
1873 | |||
1874 | default: | ||
1875 | __rxrpc_call_abort(call, ret); | ||
1876 | _leave(" = %d", ret); | ||
1877 | return ret; | ||
1878 | |||
1879 | case -EAGAIN: | ||
1880 | spin_unlock(&call->lock); | ||
1881 | |||
1882 | if (!(flags & RXRPC_CALL_READ_BLOCK)) { | ||
1883 | _leave(" = -EAGAIN"); | ||
1884 | return -EAGAIN; | ||
1885 | } | ||
1886 | |||
1887 | /* wait for the data to arrive */ | ||
1888 | _debug("blocking for data arrival"); | ||
1889 | |||
1890 | for (;;) { | ||
1891 | set_current_state(TASK_INTERRUPTIBLE); | ||
1892 | if (!call->app_async_read || signal_pending(current)) | ||
1893 | break; | ||
1894 | schedule(); | ||
1895 | } | ||
1896 | set_current_state(TASK_RUNNING); | ||
1897 | |||
1898 | if (signal_pending(current)) { | ||
1899 | _leave(" = -EINTR"); | ||
1900 | return -EINTR; | ||
1901 | } | ||
1902 | |||
1903 | if (call->app_call_state == RXRPC_CSTATE_ERROR) { | ||
1904 | _leave(" = -ECONNABORTED"); | ||
1905 | return -ECONNABORTED; | ||
1906 | } | ||
1907 | |||
1908 | _leave(" = 0"); | ||
1909 | return 0; | ||
1910 | } | ||
1911 | |||
1912 | } /* end rxrpc_call_read_data() */ | ||
1913 | |||
1914 | /*****************************************************************************/ | ||
1915 | /* | ||
1916 | * write data to a call | ||
1917 | * - the data may not be sent immediately if it doesn't fill a buffer | ||
1918 | * - if we can't queue all the data for buffering now, siov[] will have been | ||
1919 | * adjusted to take account of what has been sent | ||
1920 | */ | ||
1921 | int rxrpc_call_write_data(struct rxrpc_call *call, | ||
1922 | size_t sioc, | ||
1923 | struct kvec *siov, | ||
1924 | u8 rxhdr_flags, | ||
1925 | gfp_t alloc_flags, | ||
1926 | int dup_data, | ||
1927 | size_t *size_sent) | ||
1928 | { | ||
1929 | struct rxrpc_message *msg; | ||
1930 | struct kvec *sptr; | ||
1931 | size_t space, size, chunk, tmp; | ||
1932 | char *buf; | ||
1933 | int ret; | ||
1934 | |||
1935 | _enter("%p,%Zu,%p,%02x,%x,%d,%p", | ||
1936 | call, sioc, siov, rxhdr_flags, alloc_flags, dup_data, | ||
1937 | size_sent); | ||
1938 | |||
1939 | *size_sent = 0; | ||
1940 | size = 0; | ||
1941 | ret = -EINVAL; | ||
1942 | |||
1943 | /* can't send more if we've sent last packet from this end */ | ||
1944 | switch (call->app_call_state) { | ||
1945 | case RXRPC_CSTATE_SRVR_SND_REPLY: | ||
1946 | case RXRPC_CSTATE_CLNT_SND_ARGS: | ||
1947 | break; | ||
1948 | case RXRPC_CSTATE_ERROR: | ||
1949 | ret = call->app_errno; | ||
1950 | default: | ||
1951 | goto out; | ||
1952 | } | ||
1953 | |||
1954 | /* calculate how much data we've been given */ | ||
1955 | sptr = siov; | ||
1956 | for (; sioc > 0; sptr++, sioc--) { | ||
1957 | if (!sptr->iov_len) | ||
1958 | continue; | ||
1959 | |||
1960 | if (!sptr->iov_base) | ||
1961 | goto out; | ||
1962 | |||
1963 | size += sptr->iov_len; | ||
1964 | } | ||
1965 | |||
1966 | _debug("- size=%Zu mtu=%Zu", size, call->conn->mtu_size); | ||
1967 | |||
1968 | do { | ||
1969 | /* make sure there's a message under construction */ | ||
1970 | if (!call->snd_nextmsg) { | ||
1971 | /* no - allocate a message with no data yet attached */ | ||
1972 | ret = rxrpc_conn_newmsg(call->conn, call, | ||
1973 | RXRPC_PACKET_TYPE_DATA, | ||
1974 | 0, NULL, alloc_flags, | ||
1975 | &call->snd_nextmsg); | ||
1976 | if (ret < 0) | ||
1977 | goto out; | ||
1978 | _debug("- allocated new message [ds=%Zu]", | ||
1979 | call->snd_nextmsg->dsize); | ||
1980 | } | ||
1981 | |||
1982 | msg = call->snd_nextmsg; | ||
1983 | msg->hdr.flags |= rxhdr_flags; | ||
1984 | |||
1985 | /* deal with zero-length terminal packet */ | ||
1986 | if (size == 0) { | ||
1987 | if (rxhdr_flags & RXRPC_LAST_PACKET) { | ||
1988 | ret = rxrpc_call_flush(call); | ||
1989 | if (ret < 0) | ||
1990 | goto out; | ||
1991 | } | ||
1992 | break; | ||
1993 | } | ||
1994 | |||
1995 | /* work out how much space current packet has available */ | ||
1996 | space = call->conn->mtu_size - msg->dsize; | ||
1997 | chunk = min(space, size); | ||
1998 | |||
1999 | _debug("- [before] space=%Zu chunk=%Zu", space, chunk); | ||
2000 | |||
2001 | while (!siov->iov_len) | ||
2002 | siov++; | ||
2003 | |||
2004 | /* if we are going to have to duplicate the data then coalesce | ||
2005 | * it too */ | ||
2006 | if (dup_data) { | ||
2007 | /* don't allocate more that 1 page at a time */ | ||
2008 | if (chunk > PAGE_SIZE) | ||
2009 | chunk = PAGE_SIZE; | ||
2010 | |||
2011 | /* allocate a data buffer and attach to the message */ | ||
2012 | buf = kmalloc(chunk, alloc_flags); | ||
2013 | if (unlikely(!buf)) { | ||
2014 | if (msg->dsize == | ||
2015 | sizeof(struct rxrpc_header)) { | ||
2016 | /* discard an empty msg and wind back | ||
2017 | * the seq counter */ | ||
2018 | rxrpc_put_message(msg); | ||
2019 | call->snd_nextmsg = NULL; | ||
2020 | call->snd_seq_count--; | ||
2021 | } | ||
2022 | |||
2023 | ret = -ENOMEM; | ||
2024 | goto out; | ||
2025 | } | ||
2026 | |||
2027 | tmp = msg->dcount++; | ||
2028 | set_bit(tmp, &msg->dfree); | ||
2029 | msg->data[tmp].iov_base = buf; | ||
2030 | msg->data[tmp].iov_len = chunk; | ||
2031 | msg->dsize += chunk; | ||
2032 | *size_sent += chunk; | ||
2033 | size -= chunk; | ||
2034 | |||
2035 | /* load the buffer with data */ | ||
2036 | while (chunk > 0) { | ||
2037 | tmp = min(chunk, siov->iov_len); | ||
2038 | memcpy(buf, siov->iov_base, tmp); | ||
2039 | buf += tmp; | ||
2040 | siov->iov_base += tmp; | ||
2041 | siov->iov_len -= tmp; | ||
2042 | if (!siov->iov_len) | ||
2043 | siov++; | ||
2044 | chunk -= tmp; | ||
2045 | } | ||
2046 | } | ||
2047 | else { | ||
2048 | /* we want to attach the supplied buffers directly */ | ||
2049 | while (chunk > 0 && | ||
2050 | msg->dcount < RXRPC_MSG_MAX_IOCS) { | ||
2051 | tmp = msg->dcount++; | ||
2052 | msg->data[tmp].iov_base = siov->iov_base; | ||
2053 | msg->data[tmp].iov_len = siov->iov_len; | ||
2054 | msg->dsize += siov->iov_len; | ||
2055 | *size_sent += siov->iov_len; | ||
2056 | size -= siov->iov_len; | ||
2057 | chunk -= siov->iov_len; | ||
2058 | siov++; | ||
2059 | } | ||
2060 | } | ||
2061 | |||
2062 | _debug("- [loaded] chunk=%Zu size=%Zu", chunk, size); | ||
2063 | |||
2064 | /* dispatch the message when full, final or requesting ACK */ | ||
2065 | if (msg->dsize >= call->conn->mtu_size || rxhdr_flags) { | ||
2066 | ret = rxrpc_call_flush(call); | ||
2067 | if (ret < 0) | ||
2068 | goto out; | ||
2069 | } | ||
2070 | |||
2071 | } while(size > 0); | ||
2072 | |||
2073 | ret = 0; | ||
2074 | out: | ||
2075 | _leave(" = %d (%Zd queued, %Zd rem)", ret, *size_sent, size); | ||
2076 | return ret; | ||
2077 | |||
2078 | } /* end rxrpc_call_write_data() */ | ||
2079 | |||
2080 | /*****************************************************************************/ | ||
2081 | /* | ||
2082 | * flush outstanding packets to the network | ||
2083 | */ | ||
2084 | static int rxrpc_call_flush(struct rxrpc_call *call) | ||
2085 | { | ||
2086 | struct rxrpc_message *msg; | ||
2087 | int ret = 0; | ||
2088 | |||
2089 | _enter("%p", call); | ||
2090 | |||
2091 | rxrpc_get_call(call); | ||
2092 | |||
2093 | /* if there's a packet under construction, then dispatch it now */ | ||
2094 | if (call->snd_nextmsg) { | ||
2095 | msg = call->snd_nextmsg; | ||
2096 | call->snd_nextmsg = NULL; | ||
2097 | |||
2098 | if (msg->hdr.flags & RXRPC_LAST_PACKET) { | ||
2099 | msg->hdr.flags &= ~RXRPC_MORE_PACKETS; | ||
2100 | if (call->app_call_state != RXRPC_CSTATE_CLNT_SND_ARGS) | ||
2101 | msg->hdr.flags |= RXRPC_REQUEST_ACK; | ||
2102 | } | ||
2103 | else { | ||
2104 | msg->hdr.flags |= RXRPC_MORE_PACKETS; | ||
2105 | } | ||
2106 | |||
2107 | _proto("Sending DATA message { ds=%Zu dc=%u df=%02lu }", | ||
2108 | msg->dsize, msg->dcount, msg->dfree); | ||
2109 | |||
2110 | /* queue and adjust call state */ | ||
2111 | spin_lock(&call->lock); | ||
2112 | list_add_tail(&msg->link, &call->acks_pendq); | ||
2113 | |||
2114 | /* decide what to do depending on current state and if this is | ||
2115 | * the last packet */ | ||
2116 | ret = -EINVAL; | ||
2117 | switch (call->app_call_state) { | ||
2118 | case RXRPC_CSTATE_SRVR_SND_REPLY: | ||
2119 | if (msg->hdr.flags & RXRPC_LAST_PACKET) { | ||
2120 | call->app_call_state = | ||
2121 | RXRPC_CSTATE_SRVR_RCV_FINAL_ACK; | ||
2122 | _state(call); | ||
2123 | } | ||
2124 | break; | ||
2125 | |||
2126 | case RXRPC_CSTATE_CLNT_SND_ARGS: | ||
2127 | if (msg->hdr.flags & RXRPC_LAST_PACKET) { | ||
2128 | call->app_call_state = | ||
2129 | RXRPC_CSTATE_CLNT_RCV_REPLY; | ||
2130 | _state(call); | ||
2131 | } | ||
2132 | break; | ||
2133 | |||
2134 | case RXRPC_CSTATE_ERROR: | ||
2135 | ret = call->app_errno; | ||
2136 | default: | ||
2137 | spin_unlock(&call->lock); | ||
2138 | goto out; | ||
2139 | } | ||
2140 | |||
2141 | call->acks_pend_cnt++; | ||
2142 | |||
2143 | mod_timer(&call->acks_timeout, | ||
2144 | __rxrpc_rtt_based_timeout(call, | ||
2145 | rxrpc_call_acks_timeout)); | ||
2146 | |||
2147 | spin_unlock(&call->lock); | ||
2148 | |||
2149 | ret = rxrpc_conn_sendmsg(call->conn, msg); | ||
2150 | if (ret == 0) | ||
2151 | call->pkt_snd_count++; | ||
2152 | } | ||
2153 | |||
2154 | out: | ||
2155 | rxrpc_put_call(call); | ||
2156 | |||
2157 | _leave(" = %d", ret); | ||
2158 | return ret; | ||
2159 | |||
2160 | } /* end rxrpc_call_flush() */ | ||
2161 | |||
2162 | /*****************************************************************************/ | ||
2163 | /* | ||
2164 | * resend NAK'd or unacknowledged packets up to the highest one specified | ||
2165 | */ | ||
2166 | static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest) | ||
2167 | { | ||
2168 | struct rxrpc_message *msg; | ||
2169 | struct list_head *_p; | ||
2170 | rxrpc_seq_t seq = 0; | ||
2171 | |||
2172 | _enter("%p,%u", call, highest); | ||
2173 | |||
2174 | _proto("Rx Resend required"); | ||
2175 | |||
2176 | /* handle too many resends */ | ||
2177 | if (call->snd_resend_cnt >= rxrpc_call_max_resend) { | ||
2178 | _debug("Aborting due to too many resends (rcv=%d)", | ||
2179 | call->pkt_rcv_count); | ||
2180 | rxrpc_call_abort(call, | ||
2181 | call->pkt_rcv_count > 0 ? -EIO : -ETIMEDOUT); | ||
2182 | _leave(""); | ||
2183 | return; | ||
2184 | } | ||
2185 | |||
2186 | spin_lock(&call->lock); | ||
2187 | call->snd_resend_cnt++; | ||
2188 | for (;;) { | ||
2189 | /* determine which the next packet we might need to ACK is */ | ||
2190 | if (seq <= call->acks_dftv_seq) | ||
2191 | seq = call->acks_dftv_seq; | ||
2192 | seq++; | ||
2193 | |||
2194 | if (seq > highest) | ||
2195 | break; | ||
2196 | |||
2197 | /* look for the packet in the pending-ACK queue */ | ||
2198 | list_for_each(_p, &call->acks_pendq) { | ||
2199 | msg = list_entry(_p, struct rxrpc_message, link); | ||
2200 | if (msg->seq == seq) | ||
2201 | goto found_msg; | ||
2202 | } | ||
2203 | |||
2204 | panic("%s(%p,%d):" | ||
2205 | " Inconsistent pending-ACK queue (ds=%u sc=%u sq=%u)\n", | ||
2206 | __FUNCTION__, call, highest, | ||
2207 | call->acks_dftv_seq, call->snd_seq_count, seq); | ||
2208 | |||
2209 | found_msg: | ||
2210 | if (msg->state != RXRPC_MSG_SENT) | ||
2211 | continue; /* only un-ACK'd packets */ | ||
2212 | |||
2213 | rxrpc_get_message(msg); | ||
2214 | spin_unlock(&call->lock); | ||
2215 | |||
2216 | /* send each message again (and ignore any errors we might | ||
2217 | * incur) */ | ||
2218 | _proto("Resending DATA message { ds=%Zu dc=%u df=%02lu }", | ||
2219 | msg->dsize, msg->dcount, msg->dfree); | ||
2220 | |||
2221 | if (rxrpc_conn_sendmsg(call->conn, msg) == 0) | ||
2222 | call->pkt_snd_count++; | ||
2223 | |||
2224 | rxrpc_put_message(msg); | ||
2225 | |||
2226 | spin_lock(&call->lock); | ||
2227 | } | ||
2228 | |||
2229 | /* reset the timeout */ | ||
2230 | mod_timer(&call->acks_timeout, | ||
2231 | __rxrpc_rtt_based_timeout(call, rxrpc_call_acks_timeout)); | ||
2232 | |||
2233 | spin_unlock(&call->lock); | ||
2234 | |||
2235 | _leave(""); | ||
2236 | } /* end rxrpc_call_resend() */ | ||
2237 | |||
2238 | /*****************************************************************************/ | ||
2239 | /* | ||
2240 | * handle an ICMP error being applied to a call | ||
2241 | */ | ||
2242 | void rxrpc_call_handle_error(struct rxrpc_call *call, int local, int errno) | ||
2243 | { | ||
2244 | _enter("%p{%u},%d", call, ntohl(call->call_id), errno); | ||
2245 | |||
2246 | /* if this call is already aborted, then just wake up any waiters */ | ||
2247 | if (call->app_call_state == RXRPC_CSTATE_ERROR) { | ||
2248 | call->app_error_func(call); | ||
2249 | } | ||
2250 | else { | ||
2251 | /* tell the app layer what happened */ | ||
2252 | spin_lock(&call->lock); | ||
2253 | call->app_call_state = RXRPC_CSTATE_ERROR; | ||
2254 | _state(call); | ||
2255 | if (local) | ||
2256 | call->app_err_state = RXRPC_ESTATE_LOCAL_ERROR; | ||
2257 | else | ||
2258 | call->app_err_state = RXRPC_ESTATE_REMOTE_ERROR; | ||
2259 | call->app_errno = errno; | ||
2260 | call->app_mark = RXRPC_APP_MARK_EOF; | ||
2261 | call->app_read_buf = NULL; | ||
2262 | call->app_async_read = 0; | ||
2263 | |||
2264 | /* map the error */ | ||
2265 | call->app_aemap_func(call); | ||
2266 | |||
2267 | del_timer_sync(&call->acks_timeout); | ||
2268 | del_timer_sync(&call->rcv_timeout); | ||
2269 | del_timer_sync(&call->ackr_dfr_timo); | ||
2270 | |||
2271 | spin_unlock(&call->lock); | ||
2272 | |||
2273 | call->app_error_func(call); | ||
2274 | } | ||
2275 | |||
2276 | _leave(""); | ||
2277 | } /* end rxrpc_call_handle_error() */ | ||
diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c deleted file mode 100644 index a7c929a9fdca..000000000000 --- a/net/rxrpc/connection.c +++ /dev/null | |||
@@ -1,777 +0,0 @@ | |||
1 | /* connection.c: Rx connection routines | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <rxrpc/rxrpc.h> | ||
16 | #include <rxrpc/transport.h> | ||
17 | #include <rxrpc/peer.h> | ||
18 | #include <rxrpc/connection.h> | ||
19 | #include <rxrpc/call.h> | ||
20 | #include <rxrpc/message.h> | ||
21 | #include <linux/udp.h> | ||
22 | #include <linux/ip.h> | ||
23 | #include <net/sock.h> | ||
24 | #include <asm/uaccess.h> | ||
25 | #include "internal.h" | ||
26 | |||
27 | __RXACCT_DECL(atomic_t rxrpc_connection_count); | ||
28 | |||
29 | LIST_HEAD(rxrpc_conns); | ||
30 | DECLARE_RWSEM(rxrpc_conns_sem); | ||
31 | unsigned long rxrpc_conn_timeout = 60 * 60; | ||
32 | |||
33 | static void rxrpc_conn_do_timeout(struct rxrpc_connection *conn); | ||
34 | |||
35 | static void __rxrpc_conn_timeout(rxrpc_timer_t *timer) | ||
36 | { | ||
37 | struct rxrpc_connection *conn = | ||
38 | list_entry(timer, struct rxrpc_connection, timeout); | ||
39 | |||
40 | _debug("Rx CONN TIMEOUT [%p{u=%d}]", conn, atomic_read(&conn->usage)); | ||
41 | |||
42 | rxrpc_conn_do_timeout(conn); | ||
43 | } | ||
44 | |||
45 | static const struct rxrpc_timer_ops rxrpc_conn_timer_ops = { | ||
46 | .timed_out = __rxrpc_conn_timeout, | ||
47 | }; | ||
48 | |||
49 | /*****************************************************************************/ | ||
50 | /* | ||
51 | * create a new connection record | ||
52 | */ | ||
53 | static inline int __rxrpc_create_connection(struct rxrpc_peer *peer, | ||
54 | struct rxrpc_connection **_conn) | ||
55 | { | ||
56 | struct rxrpc_connection *conn; | ||
57 | |||
58 | _enter("%p",peer); | ||
59 | |||
60 | /* allocate and initialise a connection record */ | ||
61 | conn = kzalloc(sizeof(struct rxrpc_connection), GFP_KERNEL); | ||
62 | if (!conn) { | ||
63 | _leave(" = -ENOMEM"); | ||
64 | return -ENOMEM; | ||
65 | } | ||
66 | |||
67 | atomic_set(&conn->usage, 1); | ||
68 | |||
69 | INIT_LIST_HEAD(&conn->link); | ||
70 | INIT_LIST_HEAD(&conn->id_link); | ||
71 | init_waitqueue_head(&conn->chanwait); | ||
72 | spin_lock_init(&conn->lock); | ||
73 | rxrpc_timer_init(&conn->timeout, &rxrpc_conn_timer_ops); | ||
74 | |||
75 | do_gettimeofday(&conn->atime); | ||
76 | conn->mtu_size = 1024; | ||
77 | conn->peer = peer; | ||
78 | conn->trans = peer->trans; | ||
79 | |||
80 | __RXACCT(atomic_inc(&rxrpc_connection_count)); | ||
81 | *_conn = conn; | ||
82 | _leave(" = 0 (%p)", conn); | ||
83 | |||
84 | return 0; | ||
85 | } /* end __rxrpc_create_connection() */ | ||
86 | |||
87 | /*****************************************************************************/ | ||
88 | /* | ||
89 | * create a new connection record for outgoing connections | ||
90 | */ | ||
91 | int rxrpc_create_connection(struct rxrpc_transport *trans, | ||
92 | __be16 port, | ||
93 | __be32 addr, | ||
94 | uint16_t service_id, | ||
95 | void *security, | ||
96 | struct rxrpc_connection **_conn) | ||
97 | { | ||
98 | struct rxrpc_connection *candidate, *conn; | ||
99 | struct rxrpc_peer *peer; | ||
100 | struct list_head *_p; | ||
101 | __be32 connid; | ||
102 | int ret; | ||
103 | |||
104 | _enter("%p{%hu},%u,%hu", trans, trans->port, ntohs(port), service_id); | ||
105 | |||
106 | /* get a peer record */ | ||
107 | ret = rxrpc_peer_lookup(trans, addr, &peer); | ||
108 | if (ret < 0) { | ||
109 | _leave(" = %d", ret); | ||
110 | return ret; | ||
111 | } | ||
112 | |||
113 | /* allocate and initialise a connection record */ | ||
114 | ret = __rxrpc_create_connection(peer, &candidate); | ||
115 | if (ret < 0) { | ||
116 | rxrpc_put_peer(peer); | ||
117 | _leave(" = %d", ret); | ||
118 | return ret; | ||
119 | } | ||
120 | |||
121 | /* fill in the specific bits */ | ||
122 | candidate->addr.sin_family = AF_INET; | ||
123 | candidate->addr.sin_port = port; | ||
124 | candidate->addr.sin_addr.s_addr = addr; | ||
125 | |||
126 | candidate->in_epoch = rxrpc_epoch; | ||
127 | candidate->out_epoch = rxrpc_epoch; | ||
128 | candidate->in_clientflag = 0; | ||
129 | candidate->out_clientflag = RXRPC_CLIENT_INITIATED; | ||
130 | candidate->service_id = htons(service_id); | ||
131 | |||
132 | /* invent a unique connection ID */ | ||
133 | write_lock(&peer->conn_idlock); | ||
134 | |||
135 | try_next_id: | ||
136 | connid = htonl(peer->conn_idcounter & RXRPC_CIDMASK); | ||
137 | peer->conn_idcounter += RXRPC_MAXCALLS; | ||
138 | |||
139 | list_for_each(_p, &peer->conn_idlist) { | ||
140 | conn = list_entry(_p, struct rxrpc_connection, id_link); | ||
141 | if (connid == conn->conn_id) | ||
142 | goto try_next_id; | ||
143 | if (connid > conn->conn_id) | ||
144 | break; | ||
145 | } | ||
146 | |||
147 | _debug("selected candidate conn ID %x.%u", | ||
148 | ntohl(peer->addr.s_addr), ntohl(connid)); | ||
149 | |||
150 | candidate->conn_id = connid; | ||
151 | list_add_tail(&candidate->id_link, _p); | ||
152 | |||
153 | write_unlock(&peer->conn_idlock); | ||
154 | |||
155 | /* attach to peer */ | ||
156 | candidate->peer = peer; | ||
157 | |||
158 | write_lock(&peer->conn_lock); | ||
159 | |||
160 | /* search the peer's transport graveyard list */ | ||
161 | spin_lock(&peer->conn_gylock); | ||
162 | list_for_each(_p, &peer->conn_graveyard) { | ||
163 | conn = list_entry(_p, struct rxrpc_connection, link); | ||
164 | if (conn->addr.sin_port == candidate->addr.sin_port && | ||
165 | conn->security_ix == candidate->security_ix && | ||
166 | conn->service_id == candidate->service_id && | ||
167 | conn->in_clientflag == 0) | ||
168 | goto found_in_graveyard; | ||
169 | } | ||
170 | spin_unlock(&peer->conn_gylock); | ||
171 | |||
172 | /* pick the new candidate */ | ||
173 | _debug("created connection: {%08x} [out]", ntohl(candidate->conn_id)); | ||
174 | atomic_inc(&peer->conn_count); | ||
175 | conn = candidate; | ||
176 | candidate = NULL; | ||
177 | |||
178 | make_active: | ||
179 | list_add_tail(&conn->link, &peer->conn_active); | ||
180 | write_unlock(&peer->conn_lock); | ||
181 | |||
182 | if (candidate) { | ||
183 | write_lock(&peer->conn_idlock); | ||
184 | list_del(&candidate->id_link); | ||
185 | write_unlock(&peer->conn_idlock); | ||
186 | |||
187 | __RXACCT(atomic_dec(&rxrpc_connection_count)); | ||
188 | kfree(candidate); | ||
189 | } | ||
190 | else { | ||
191 | down_write(&rxrpc_conns_sem); | ||
192 | list_add_tail(&conn->proc_link, &rxrpc_conns); | ||
193 | up_write(&rxrpc_conns_sem); | ||
194 | } | ||
195 | |||
196 | *_conn = conn; | ||
197 | _leave(" = 0 (%p)", conn); | ||
198 | |||
199 | return 0; | ||
200 | |||
201 | /* handle resurrecting a connection from the graveyard */ | ||
202 | found_in_graveyard: | ||
203 | _debug("resurrecting connection: {%08x} [out]", ntohl(conn->conn_id)); | ||
204 | rxrpc_get_connection(conn); | ||
205 | rxrpc_krxtimod_del_timer(&conn->timeout); | ||
206 | list_del_init(&conn->link); | ||
207 | spin_unlock(&peer->conn_gylock); | ||
208 | goto make_active; | ||
209 | } /* end rxrpc_create_connection() */ | ||
210 | |||
211 | /*****************************************************************************/ | ||
212 | /* | ||
213 | * lookup the connection for an incoming packet | ||
214 | * - create a new connection record for unrecorded incoming connections | ||
215 | */ | ||
216 | int rxrpc_connection_lookup(struct rxrpc_peer *peer, | ||
217 | struct rxrpc_message *msg, | ||
218 | struct rxrpc_connection **_conn) | ||
219 | { | ||
220 | struct rxrpc_connection *conn, *candidate = NULL; | ||
221 | struct list_head *_p; | ||
222 | struct sk_buff *pkt = msg->pkt; | ||
223 | int ret, fresh = 0; | ||
224 | __be32 x_epoch, x_connid; | ||
225 | __be16 x_port, x_servid; | ||
226 | __u32 x_secix; | ||
227 | u8 x_clflag; | ||
228 | |||
229 | _enter("%p{{%hu}},%u,%hu", | ||
230 | peer, | ||
231 | peer->trans->port, | ||
232 | ntohs(pkt->h.uh->source), | ||
233 | ntohs(msg->hdr.serviceId)); | ||
234 | |||
235 | x_port = pkt->h.uh->source; | ||
236 | x_epoch = msg->hdr.epoch; | ||
237 | x_clflag = msg->hdr.flags & RXRPC_CLIENT_INITIATED; | ||
238 | x_connid = htonl(ntohl(msg->hdr.cid) & RXRPC_CIDMASK); | ||
239 | x_servid = msg->hdr.serviceId; | ||
240 | x_secix = msg->hdr.securityIndex; | ||
241 | |||
242 | /* [common case] search the transport's active list first */ | ||
243 | read_lock(&peer->conn_lock); | ||
244 | list_for_each(_p, &peer->conn_active) { | ||
245 | conn = list_entry(_p, struct rxrpc_connection, link); | ||
246 | if (conn->addr.sin_port == x_port && | ||
247 | conn->in_epoch == x_epoch && | ||
248 | conn->conn_id == x_connid && | ||
249 | conn->security_ix == x_secix && | ||
250 | conn->service_id == x_servid && | ||
251 | conn->in_clientflag == x_clflag) | ||
252 | goto found_active; | ||
253 | } | ||
254 | read_unlock(&peer->conn_lock); | ||
255 | |||
256 | /* [uncommon case] not active | ||
257 | * - create a candidate for a new record if an inbound connection | ||
258 | * - only examine the graveyard for an outbound connection | ||
259 | */ | ||
260 | if (x_clflag) { | ||
261 | ret = __rxrpc_create_connection(peer, &candidate); | ||
262 | if (ret < 0) { | ||
263 | _leave(" = %d", ret); | ||
264 | return ret; | ||
265 | } | ||
266 | |||
267 | /* fill in the specifics */ | ||
268 | candidate->addr.sin_family = AF_INET; | ||
269 | candidate->addr.sin_port = x_port; | ||
270 | candidate->addr.sin_addr.s_addr = pkt->nh.iph->saddr; | ||
271 | candidate->in_epoch = x_epoch; | ||
272 | candidate->out_epoch = x_epoch; | ||
273 | candidate->in_clientflag = RXRPC_CLIENT_INITIATED; | ||
274 | candidate->out_clientflag = 0; | ||
275 | candidate->conn_id = x_connid; | ||
276 | candidate->service_id = x_servid; | ||
277 | candidate->security_ix = x_secix; | ||
278 | } | ||
279 | |||
280 | /* search the active list again, just in case it appeared whilst we | ||
281 | * were busy */ | ||
282 | write_lock(&peer->conn_lock); | ||
283 | list_for_each(_p, &peer->conn_active) { | ||
284 | conn = list_entry(_p, struct rxrpc_connection, link); | ||
285 | if (conn->addr.sin_port == x_port && | ||
286 | conn->in_epoch == x_epoch && | ||
287 | conn->conn_id == x_connid && | ||
288 | conn->security_ix == x_secix && | ||
289 | conn->service_id == x_servid && | ||
290 | conn->in_clientflag == x_clflag) | ||
291 | goto found_active_second_chance; | ||
292 | } | ||
293 | |||
294 | /* search the transport's graveyard list */ | ||
295 | spin_lock(&peer->conn_gylock); | ||
296 | list_for_each(_p, &peer->conn_graveyard) { | ||
297 | conn = list_entry(_p, struct rxrpc_connection, link); | ||
298 | if (conn->addr.sin_port == x_port && | ||
299 | conn->in_epoch == x_epoch && | ||
300 | conn->conn_id == x_connid && | ||
301 | conn->security_ix == x_secix && | ||
302 | conn->service_id == x_servid && | ||
303 | conn->in_clientflag == x_clflag) | ||
304 | goto found_in_graveyard; | ||
305 | } | ||
306 | spin_unlock(&peer->conn_gylock); | ||
307 | |||
308 | /* outbound connections aren't created here */ | ||
309 | if (!x_clflag) { | ||
310 | write_unlock(&peer->conn_lock); | ||
311 | _leave(" = -ENOENT"); | ||
312 | return -ENOENT; | ||
313 | } | ||
314 | |||
315 | /* we can now add the new candidate to the list */ | ||
316 | _debug("created connection: {%08x} [in]", ntohl(candidate->conn_id)); | ||
317 | rxrpc_get_peer(peer); | ||
318 | conn = candidate; | ||
319 | candidate = NULL; | ||
320 | atomic_inc(&peer->conn_count); | ||
321 | fresh = 1; | ||
322 | |||
323 | make_active: | ||
324 | list_add_tail(&conn->link, &peer->conn_active); | ||
325 | |||
326 | success_uwfree: | ||
327 | write_unlock(&peer->conn_lock); | ||
328 | |||
329 | if (candidate) { | ||
330 | write_lock(&peer->conn_idlock); | ||
331 | list_del(&candidate->id_link); | ||
332 | write_unlock(&peer->conn_idlock); | ||
333 | |||
334 | __RXACCT(atomic_dec(&rxrpc_connection_count)); | ||
335 | kfree(candidate); | ||
336 | } | ||
337 | |||
338 | if (fresh) { | ||
339 | down_write(&rxrpc_conns_sem); | ||
340 | list_add_tail(&conn->proc_link, &rxrpc_conns); | ||
341 | up_write(&rxrpc_conns_sem); | ||
342 | } | ||
343 | |||
344 | success: | ||
345 | *_conn = conn; | ||
346 | _leave(" = 0 (%p)", conn); | ||
347 | return 0; | ||
348 | |||
349 | /* handle the connection being found in the active list straight off */ | ||
350 | found_active: | ||
351 | rxrpc_get_connection(conn); | ||
352 | read_unlock(&peer->conn_lock); | ||
353 | goto success; | ||
354 | |||
355 | /* handle resurrecting a connection from the graveyard */ | ||
356 | found_in_graveyard: | ||
357 | _debug("resurrecting connection: {%08x} [in]", ntohl(conn->conn_id)); | ||
358 | rxrpc_get_peer(peer); | ||
359 | rxrpc_get_connection(conn); | ||
360 | rxrpc_krxtimod_del_timer(&conn->timeout); | ||
361 | list_del_init(&conn->link); | ||
362 | spin_unlock(&peer->conn_gylock); | ||
363 | goto make_active; | ||
364 | |||
365 | /* handle finding the connection on the second time through the active | ||
366 | * list */ | ||
367 | found_active_second_chance: | ||
368 | rxrpc_get_connection(conn); | ||
369 | goto success_uwfree; | ||
370 | |||
371 | } /* end rxrpc_connection_lookup() */ | ||
372 | |||
373 | /*****************************************************************************/ | ||
374 | /* | ||
375 | * finish using a connection record | ||
376 | * - it will be transferred to the peer's connection graveyard when refcount | ||
377 | * reaches 0 | ||
378 | */ | ||
379 | void rxrpc_put_connection(struct rxrpc_connection *conn) | ||
380 | { | ||
381 | struct rxrpc_peer *peer; | ||
382 | |||
383 | if (!conn) | ||
384 | return; | ||
385 | |||
386 | _enter("%p{u=%d p=%hu}", | ||
387 | conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port)); | ||
388 | |||
389 | peer = conn->peer; | ||
390 | spin_lock(&peer->conn_gylock); | ||
391 | |||
392 | /* sanity check */ | ||
393 | if (atomic_read(&conn->usage) <= 0) | ||
394 | BUG(); | ||
395 | |||
396 | if (likely(!atomic_dec_and_test(&conn->usage))) { | ||
397 | spin_unlock(&peer->conn_gylock); | ||
398 | _leave(""); | ||
399 | return; | ||
400 | } | ||
401 | |||
402 | /* move to graveyard queue */ | ||
403 | _debug("burying connection: {%08x}", ntohl(conn->conn_id)); | ||
404 | list_move_tail(&conn->link, &peer->conn_graveyard); | ||
405 | |||
406 | rxrpc_krxtimod_add_timer(&conn->timeout, rxrpc_conn_timeout * HZ); | ||
407 | |||
408 | spin_unlock(&peer->conn_gylock); | ||
409 | |||
410 | rxrpc_put_peer(conn->peer); | ||
411 | |||
412 | _leave(" [killed]"); | ||
413 | } /* end rxrpc_put_connection() */ | ||
414 | |||
415 | /*****************************************************************************/ | ||
416 | /* | ||
417 | * free a connection record | ||
418 | */ | ||
419 | static void rxrpc_conn_do_timeout(struct rxrpc_connection *conn) | ||
420 | { | ||
421 | struct rxrpc_peer *peer; | ||
422 | |||
423 | _enter("%p{u=%d p=%hu}", | ||
424 | conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port)); | ||
425 | |||
426 | peer = conn->peer; | ||
427 | |||
428 | if (atomic_read(&conn->usage) < 0) | ||
429 | BUG(); | ||
430 | |||
431 | /* remove from graveyard if still dead */ | ||
432 | spin_lock(&peer->conn_gylock); | ||
433 | if (atomic_read(&conn->usage) == 0) { | ||
434 | list_del_init(&conn->link); | ||
435 | } | ||
436 | else { | ||
437 | conn = NULL; | ||
438 | } | ||
439 | spin_unlock(&peer->conn_gylock); | ||
440 | |||
441 | if (!conn) { | ||
442 | _leave(""); | ||
443 | return; /* resurrected */ | ||
444 | } | ||
445 | |||
446 | _debug("--- Destroying Connection %p{%08x} ---", | ||
447 | conn, ntohl(conn->conn_id)); | ||
448 | |||
449 | down_write(&rxrpc_conns_sem); | ||
450 | list_del(&conn->proc_link); | ||
451 | up_write(&rxrpc_conns_sem); | ||
452 | |||
453 | write_lock(&peer->conn_idlock); | ||
454 | list_del(&conn->id_link); | ||
455 | write_unlock(&peer->conn_idlock); | ||
456 | |||
457 | __RXACCT(atomic_dec(&rxrpc_connection_count)); | ||
458 | kfree(conn); | ||
459 | |||
460 | /* if the graveyard is now empty, wake up anyone waiting for that */ | ||
461 | if (atomic_dec_and_test(&peer->conn_count)) | ||
462 | wake_up(&peer->conn_gy_waitq); | ||
463 | |||
464 | _leave(" [destroyed]"); | ||
465 | } /* end rxrpc_conn_do_timeout() */ | ||
466 | |||
467 | /*****************************************************************************/ | ||
468 | /* | ||
469 | * clear all connection records from a peer endpoint | ||
470 | */ | ||
471 | void rxrpc_conn_clearall(struct rxrpc_peer *peer) | ||
472 | { | ||
473 | DECLARE_WAITQUEUE(myself, current); | ||
474 | |||
475 | struct rxrpc_connection *conn; | ||
476 | int err; | ||
477 | |||
478 | _enter("%p", peer); | ||
479 | |||
480 | /* there shouldn't be any active conns remaining */ | ||
481 | if (!list_empty(&peer->conn_active)) | ||
482 | BUG(); | ||
483 | |||
484 | /* manually timeout all conns in the graveyard */ | ||
485 | spin_lock(&peer->conn_gylock); | ||
486 | while (!list_empty(&peer->conn_graveyard)) { | ||
487 | conn = list_entry(peer->conn_graveyard.next, | ||
488 | struct rxrpc_connection, link); | ||
489 | err = rxrpc_krxtimod_del_timer(&conn->timeout); | ||
490 | spin_unlock(&peer->conn_gylock); | ||
491 | |||
492 | if (err == 0) | ||
493 | rxrpc_conn_do_timeout(conn); | ||
494 | |||
495 | spin_lock(&peer->conn_gylock); | ||
496 | } | ||
497 | spin_unlock(&peer->conn_gylock); | ||
498 | |||
499 | /* wait for the the conn graveyard to be completely cleared */ | ||
500 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
501 | add_wait_queue(&peer->conn_gy_waitq, &myself); | ||
502 | |||
503 | while (atomic_read(&peer->conn_count) != 0) { | ||
504 | schedule(); | ||
505 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
506 | } | ||
507 | |||
508 | remove_wait_queue(&peer->conn_gy_waitq, &myself); | ||
509 | set_current_state(TASK_RUNNING); | ||
510 | |||
511 | _leave(""); | ||
512 | } /* end rxrpc_conn_clearall() */ | ||
513 | |||
514 | /*****************************************************************************/ | ||
515 | /* | ||
516 | * allocate and prepare a message for sending out through the transport | ||
517 | * endpoint | ||
518 | */ | ||
519 | int rxrpc_conn_newmsg(struct rxrpc_connection *conn, | ||
520 | struct rxrpc_call *call, | ||
521 | uint8_t type, | ||
522 | int dcount, | ||
523 | struct kvec diov[], | ||
524 | gfp_t alloc_flags, | ||
525 | struct rxrpc_message **_msg) | ||
526 | { | ||
527 | struct rxrpc_message *msg; | ||
528 | int loop; | ||
529 | |||
530 | _enter("%p{%d},%p,%u", conn, ntohs(conn->addr.sin_port), call, type); | ||
531 | |||
532 | if (dcount > 3) { | ||
533 | _leave(" = -EINVAL"); | ||
534 | return -EINVAL; | ||
535 | } | ||
536 | |||
537 | msg = kzalloc(sizeof(struct rxrpc_message), alloc_flags); | ||
538 | if (!msg) { | ||
539 | _leave(" = -ENOMEM"); | ||
540 | return -ENOMEM; | ||
541 | } | ||
542 | |||
543 | atomic_set(&msg->usage, 1); | ||
544 | |||
545 | INIT_LIST_HEAD(&msg->link); | ||
546 | |||
547 | msg->state = RXRPC_MSG_PREPARED; | ||
548 | |||
549 | msg->hdr.epoch = conn->out_epoch; | ||
550 | msg->hdr.cid = conn->conn_id | (call ? call->chan_ix : 0); | ||
551 | msg->hdr.callNumber = call ? call->call_id : 0; | ||
552 | msg->hdr.type = type; | ||
553 | msg->hdr.flags = conn->out_clientflag; | ||
554 | msg->hdr.securityIndex = conn->security_ix; | ||
555 | msg->hdr.serviceId = conn->service_id; | ||
556 | |||
557 | /* generate sequence numbers for data packets */ | ||
558 | if (call) { | ||
559 | switch (type) { | ||
560 | case RXRPC_PACKET_TYPE_DATA: | ||
561 | msg->seq = ++call->snd_seq_count; | ||
562 | msg->hdr.seq = htonl(msg->seq); | ||
563 | break; | ||
564 | case RXRPC_PACKET_TYPE_ACK: | ||
565 | /* ACK sequence numbers are complicated. The following | ||
566 | * may be wrong: | ||
567 | * - jumbo packet ACKs should have a seq number | ||
568 | * - normal ACKs should not | ||
569 | */ | ||
570 | default: | ||
571 | break; | ||
572 | } | ||
573 | } | ||
574 | |||
575 | msg->dcount = dcount + 1; | ||
576 | msg->dsize = sizeof(msg->hdr); | ||
577 | msg->data[0].iov_len = sizeof(msg->hdr); | ||
578 | msg->data[0].iov_base = &msg->hdr; | ||
579 | |||
580 | for (loop=0; loop < dcount; loop++) { | ||
581 | msg->dsize += diov[loop].iov_len; | ||
582 | msg->data[loop+1].iov_len = diov[loop].iov_len; | ||
583 | msg->data[loop+1].iov_base = diov[loop].iov_base; | ||
584 | } | ||
585 | |||
586 | __RXACCT(atomic_inc(&rxrpc_message_count)); | ||
587 | *_msg = msg; | ||
588 | _leave(" = 0 (%p) #%d", msg, atomic_read(&rxrpc_message_count)); | ||
589 | return 0; | ||
590 | } /* end rxrpc_conn_newmsg() */ | ||
591 | |||
592 | /*****************************************************************************/ | ||
593 | /* | ||
594 | * free a message | ||
595 | */ | ||
596 | void __rxrpc_put_message(struct rxrpc_message *msg) | ||
597 | { | ||
598 | int loop; | ||
599 | |||
600 | _enter("%p #%d", msg, atomic_read(&rxrpc_message_count)); | ||
601 | |||
602 | if (msg->pkt) | ||
603 | kfree_skb(msg->pkt); | ||
604 | rxrpc_put_connection(msg->conn); | ||
605 | |||
606 | for (loop = 0; loop < 8; loop++) | ||
607 | if (test_bit(loop, &msg->dfree)) | ||
608 | kfree(msg->data[loop].iov_base); | ||
609 | |||
610 | __RXACCT(atomic_dec(&rxrpc_message_count)); | ||
611 | kfree(msg); | ||
612 | |||
613 | _leave(""); | ||
614 | } /* end __rxrpc_put_message() */ | ||
615 | |||
616 | /*****************************************************************************/ | ||
617 | /* | ||
618 | * send a message out through the transport endpoint | ||
619 | */ | ||
620 | int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, | ||
621 | struct rxrpc_message *msg) | ||
622 | { | ||
623 | struct msghdr msghdr; | ||
624 | int ret; | ||
625 | |||
626 | _enter("%p{%d}", conn, ntohs(conn->addr.sin_port)); | ||
627 | |||
628 | /* fill in some fields in the header */ | ||
629 | spin_lock(&conn->lock); | ||
630 | msg->hdr.serial = htonl(++conn->serial_counter); | ||
631 | msg->rttdone = 0; | ||
632 | spin_unlock(&conn->lock); | ||
633 | |||
634 | /* set up the message to be transmitted */ | ||
635 | msghdr.msg_name = &conn->addr; | ||
636 | msghdr.msg_namelen = sizeof(conn->addr); | ||
637 | msghdr.msg_control = NULL; | ||
638 | msghdr.msg_controllen = 0; | ||
639 | msghdr.msg_flags = MSG_CONFIRM | MSG_DONTWAIT; | ||
640 | |||
641 | _net("Sending message type %d of %Zd bytes to %08x:%d", | ||
642 | msg->hdr.type, | ||
643 | msg->dsize, | ||
644 | ntohl(conn->addr.sin_addr.s_addr), | ||
645 | ntohs(conn->addr.sin_port)); | ||
646 | |||
647 | /* send the message */ | ||
648 | ret = kernel_sendmsg(conn->trans->socket, &msghdr, | ||
649 | msg->data, msg->dcount, msg->dsize); | ||
650 | if (ret < 0) { | ||
651 | msg->state = RXRPC_MSG_ERROR; | ||
652 | } else { | ||
653 | msg->state = RXRPC_MSG_SENT; | ||
654 | ret = 0; | ||
655 | |||
656 | spin_lock(&conn->lock); | ||
657 | do_gettimeofday(&conn->atime); | ||
658 | msg->stamp = conn->atime; | ||
659 | spin_unlock(&conn->lock); | ||
660 | } | ||
661 | |||
662 | _leave(" = %d", ret); | ||
663 | |||
664 | return ret; | ||
665 | } /* end rxrpc_conn_sendmsg() */ | ||
666 | |||
667 | /*****************************************************************************/ | ||
668 | /* | ||
669 | * deal with a subsequent call packet | ||
670 | */ | ||
671 | int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn, | ||
672 | struct rxrpc_call *call, | ||
673 | struct rxrpc_message *msg) | ||
674 | { | ||
675 | struct rxrpc_message *pmsg; | ||
676 | struct dst_entry *dst; | ||
677 | struct list_head *_p; | ||
678 | unsigned cix, seq; | ||
679 | int ret = 0; | ||
680 | |||
681 | _enter("%p,%p,%p", conn, call, msg); | ||
682 | |||
683 | if (!call) { | ||
684 | cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK; | ||
685 | |||
686 | spin_lock(&conn->lock); | ||
687 | call = conn->channels[cix]; | ||
688 | |||
689 | if (!call || call->call_id != msg->hdr.callNumber) { | ||
690 | spin_unlock(&conn->lock); | ||
691 | rxrpc_trans_immediate_abort(conn->trans, msg, -ENOENT); | ||
692 | goto out; | ||
693 | } | ||
694 | else { | ||
695 | rxrpc_get_call(call); | ||
696 | spin_unlock(&conn->lock); | ||
697 | } | ||
698 | } | ||
699 | else { | ||
700 | rxrpc_get_call(call); | ||
701 | } | ||
702 | |||
703 | _proto("Received packet %%%u [%u] on call %hu:%u:%u", | ||
704 | ntohl(msg->hdr.serial), | ||
705 | ntohl(msg->hdr.seq), | ||
706 | ntohs(msg->hdr.serviceId), | ||
707 | ntohl(conn->conn_id), | ||
708 | ntohl(call->call_id)); | ||
709 | |||
710 | call->pkt_rcv_count++; | ||
711 | |||
712 | dst = msg->pkt->dst; | ||
713 | if (dst && dst->dev) | ||
714 | conn->peer->if_mtu = | ||
715 | dst->dev->mtu - dst->dev->hard_header_len; | ||
716 | |||
717 | /* queue on the call in seq order */ | ||
718 | rxrpc_get_message(msg); | ||
719 | seq = msg->seq; | ||
720 | |||
721 | spin_lock(&call->lock); | ||
722 | list_for_each(_p, &call->rcv_receiveq) { | ||
723 | pmsg = list_entry(_p, struct rxrpc_message, link); | ||
724 | if (pmsg->seq > seq) | ||
725 | break; | ||
726 | } | ||
727 | list_add_tail(&msg->link, _p); | ||
728 | |||
729 | /* reset the activity timeout */ | ||
730 | call->flags |= RXRPC_CALL_RCV_PKT; | ||
731 | mod_timer(&call->rcv_timeout,jiffies + rxrpc_call_rcv_timeout * HZ); | ||
732 | |||
733 | spin_unlock(&call->lock); | ||
734 | |||
735 | rxrpc_krxiod_queue_call(call); | ||
736 | |||
737 | rxrpc_put_call(call); | ||
738 | out: | ||
739 | _leave(" = %d", ret); | ||
740 | return ret; | ||
741 | } /* end rxrpc_conn_receive_call_packet() */ | ||
742 | |||
743 | /*****************************************************************************/ | ||
744 | /* | ||
745 | * handle an ICMP error being applied to a connection | ||
746 | */ | ||
747 | void rxrpc_conn_handle_error(struct rxrpc_connection *conn, | ||
748 | int local, int errno) | ||
749 | { | ||
750 | struct rxrpc_call *calls[4]; | ||
751 | int loop; | ||
752 | |||
753 | _enter("%p{%d},%d", conn, ntohs(conn->addr.sin_port), errno); | ||
754 | |||
755 | /* get a ref to all my calls in one go */ | ||
756 | memset(calls, 0, sizeof(calls)); | ||
757 | spin_lock(&conn->lock); | ||
758 | |||
759 | for (loop = 3; loop >= 0; loop--) { | ||
760 | if (conn->channels[loop]) { | ||
761 | calls[loop] = conn->channels[loop]; | ||
762 | rxrpc_get_call(calls[loop]); | ||
763 | } | ||
764 | } | ||
765 | |||
766 | spin_unlock(&conn->lock); | ||
767 | |||
768 | /* now kick them all */ | ||
769 | for (loop = 3; loop >= 0; loop--) { | ||
770 | if (calls[loop]) { | ||
771 | rxrpc_call_handle_error(calls[loop], local, errno); | ||
772 | rxrpc_put_call(calls[loop]); | ||
773 | } | ||
774 | } | ||
775 | |||
776 | _leave(""); | ||
777 | } /* end rxrpc_conn_handle_error() */ | ||
diff --git a/net/rxrpc/internal.h b/net/rxrpc/internal.h deleted file mode 100644 index cc0c5795a103..000000000000 --- a/net/rxrpc/internal.h +++ /dev/null | |||
@@ -1,106 +0,0 @@ | |||
1 | /* internal.h: internal Rx RPC stuff | ||
2 | * | ||
3 | * Copyright (c) 2002 David Howells (dhowells@redhat.com). | ||
4 | */ | ||
5 | |||
6 | #ifndef RXRPC_INTERNAL_H | ||
7 | #define RXRPC_INTERNAL_H | ||
8 | |||
9 | #include <linux/compiler.h> | ||
10 | #include <linux/kernel.h> | ||
11 | |||
12 | /* | ||
13 | * debug accounting | ||
14 | */ | ||
15 | #if 1 | ||
16 | #define __RXACCT_DECL(X) X | ||
17 | #define __RXACCT(X) do { X; } while(0) | ||
18 | #else | ||
19 | #define __RXACCT_DECL(X) | ||
20 | #define __RXACCT(X) do { } while(0) | ||
21 | #endif | ||
22 | |||
23 | __RXACCT_DECL(extern atomic_t rxrpc_transport_count); | ||
24 | __RXACCT_DECL(extern atomic_t rxrpc_peer_count); | ||
25 | __RXACCT_DECL(extern atomic_t rxrpc_connection_count); | ||
26 | __RXACCT_DECL(extern atomic_t rxrpc_call_count); | ||
27 | __RXACCT_DECL(extern atomic_t rxrpc_message_count); | ||
28 | |||
29 | /* | ||
30 | * debug tracing | ||
31 | */ | ||
32 | #define kenter(FMT, a...) printk("==> %s("FMT")\n",__FUNCTION__ , ##a) | ||
33 | #define kleave(FMT, a...) printk("<== %s()"FMT"\n",__FUNCTION__ , ##a) | ||
34 | #define kdebug(FMT, a...) printk(" "FMT"\n" , ##a) | ||
35 | #define kproto(FMT, a...) printk("### "FMT"\n" , ##a) | ||
36 | #define knet(FMT, a...) printk(" "FMT"\n" , ##a) | ||
37 | |||
38 | #if 0 | ||
39 | #define _enter(FMT, a...) kenter(FMT , ##a) | ||
40 | #define _leave(FMT, a...) kleave(FMT , ##a) | ||
41 | #define _debug(FMT, a...) kdebug(FMT , ##a) | ||
42 | #define _proto(FMT, a...) kproto(FMT , ##a) | ||
43 | #define _net(FMT, a...) knet(FMT , ##a) | ||
44 | #else | ||
45 | #define _enter(FMT, a...) do { if (rxrpc_ktrace) kenter(FMT , ##a); } while(0) | ||
46 | #define _leave(FMT, a...) do { if (rxrpc_ktrace) kleave(FMT , ##a); } while(0) | ||
47 | #define _debug(FMT, a...) do { if (rxrpc_kdebug) kdebug(FMT , ##a); } while(0) | ||
48 | #define _proto(FMT, a...) do { if (rxrpc_kproto) kproto(FMT , ##a); } while(0) | ||
49 | #define _net(FMT, a...) do { if (rxrpc_knet) knet (FMT , ##a); } while(0) | ||
50 | #endif | ||
51 | |||
52 | static inline void rxrpc_discard_my_signals(void) | ||
53 | { | ||
54 | while (signal_pending(current)) { | ||
55 | siginfo_t sinfo; | ||
56 | |||
57 | spin_lock_irq(¤t->sighand->siglock); | ||
58 | dequeue_signal(current, ¤t->blocked, &sinfo); | ||
59 | spin_unlock_irq(¤t->sighand->siglock); | ||
60 | } | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * call.c | ||
65 | */ | ||
66 | extern struct list_head rxrpc_calls; | ||
67 | extern struct rw_semaphore rxrpc_calls_sem; | ||
68 | |||
69 | /* | ||
70 | * connection.c | ||
71 | */ | ||
72 | extern struct list_head rxrpc_conns; | ||
73 | extern struct rw_semaphore rxrpc_conns_sem; | ||
74 | extern unsigned long rxrpc_conn_timeout; | ||
75 | |||
76 | extern void rxrpc_conn_clearall(struct rxrpc_peer *peer); | ||
77 | |||
78 | /* | ||
79 | * peer.c | ||
80 | */ | ||
81 | extern struct list_head rxrpc_peers; | ||
82 | extern struct rw_semaphore rxrpc_peers_sem; | ||
83 | extern unsigned long rxrpc_peer_timeout; | ||
84 | |||
85 | extern void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer, | ||
86 | struct rxrpc_message *msg, | ||
87 | struct rxrpc_message *resp); | ||
88 | |||
89 | extern void rxrpc_peer_clearall(struct rxrpc_transport *trans); | ||
90 | |||
91 | |||
92 | /* | ||
93 | * proc.c | ||
94 | */ | ||
95 | #ifdef CONFIG_PROC_FS | ||
96 | extern int rxrpc_proc_init(void); | ||
97 | extern void rxrpc_proc_cleanup(void); | ||
98 | #endif | ||
99 | |||
100 | /* | ||
101 | * transport.c | ||
102 | */ | ||
103 | extern struct list_head rxrpc_proc_transports; | ||
104 | extern struct rw_semaphore rxrpc_proc_transports_sem; | ||
105 | |||
106 | #endif /* RXRPC_INTERNAL_H */ | ||
diff --git a/net/rxrpc/krxiod.c b/net/rxrpc/krxiod.c deleted file mode 100644 index bbbcd6c24048..000000000000 --- a/net/rxrpc/krxiod.c +++ /dev/null | |||
@@ -1,262 +0,0 @@ | |||
1 | /* krxiod.c: Rx I/O daemon | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/completion.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/freezer.h> | ||
17 | #include <rxrpc/krxiod.h> | ||
18 | #include <rxrpc/transport.h> | ||
19 | #include <rxrpc/peer.h> | ||
20 | #include <rxrpc/call.h> | ||
21 | #include "internal.h" | ||
22 | |||
23 | static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxiod_sleepq); | ||
24 | static DECLARE_COMPLETION(rxrpc_krxiod_dead); | ||
25 | |||
26 | static atomic_t rxrpc_krxiod_qcount = ATOMIC_INIT(0); | ||
27 | |||
28 | static LIST_HEAD(rxrpc_krxiod_transportq); | ||
29 | static DEFINE_SPINLOCK(rxrpc_krxiod_transportq_lock); | ||
30 | |||
31 | static LIST_HEAD(rxrpc_krxiod_callq); | ||
32 | static DEFINE_SPINLOCK(rxrpc_krxiod_callq_lock); | ||
33 | |||
34 | static volatile int rxrpc_krxiod_die; | ||
35 | |||
36 | /*****************************************************************************/ | ||
37 | /* | ||
38 | * Rx I/O daemon | ||
39 | */ | ||
40 | static int rxrpc_krxiod(void *arg) | ||
41 | { | ||
42 | DECLARE_WAITQUEUE(krxiod,current); | ||
43 | |||
44 | printk("Started krxiod %d\n",current->pid); | ||
45 | |||
46 | daemonize("krxiod"); | ||
47 | |||
48 | /* loop around waiting for work to do */ | ||
49 | do { | ||
50 | /* wait for work or to be told to exit */ | ||
51 | _debug("### Begin Wait"); | ||
52 | if (!atomic_read(&rxrpc_krxiod_qcount)) { | ||
53 | set_current_state(TASK_INTERRUPTIBLE); | ||
54 | |||
55 | add_wait_queue(&rxrpc_krxiod_sleepq, &krxiod); | ||
56 | |||
57 | for (;;) { | ||
58 | set_current_state(TASK_INTERRUPTIBLE); | ||
59 | if (atomic_read(&rxrpc_krxiod_qcount) || | ||
60 | rxrpc_krxiod_die || | ||
61 | signal_pending(current)) | ||
62 | break; | ||
63 | |||
64 | schedule(); | ||
65 | } | ||
66 | |||
67 | remove_wait_queue(&rxrpc_krxiod_sleepq, &krxiod); | ||
68 | set_current_state(TASK_RUNNING); | ||
69 | } | ||
70 | _debug("### End Wait"); | ||
71 | |||
72 | /* do work if been given some to do */ | ||
73 | _debug("### Begin Work"); | ||
74 | |||
75 | /* see if there's a transport in need of attention */ | ||
76 | if (!list_empty(&rxrpc_krxiod_transportq)) { | ||
77 | struct rxrpc_transport *trans = NULL; | ||
78 | |||
79 | spin_lock_irq(&rxrpc_krxiod_transportq_lock); | ||
80 | |||
81 | if (!list_empty(&rxrpc_krxiod_transportq)) { | ||
82 | trans = list_entry( | ||
83 | rxrpc_krxiod_transportq.next, | ||
84 | struct rxrpc_transport, | ||
85 | krxiodq_link); | ||
86 | |||
87 | list_del_init(&trans->krxiodq_link); | ||
88 | atomic_dec(&rxrpc_krxiod_qcount); | ||
89 | |||
90 | /* make sure it hasn't gone away and doesn't go | ||
91 | * away */ | ||
92 | if (atomic_read(&trans->usage)>0) | ||
93 | rxrpc_get_transport(trans); | ||
94 | else | ||
95 | trans = NULL; | ||
96 | } | ||
97 | |||
98 | spin_unlock_irq(&rxrpc_krxiod_transportq_lock); | ||
99 | |||
100 | if (trans) { | ||
101 | rxrpc_trans_receive_packet(trans); | ||
102 | rxrpc_put_transport(trans); | ||
103 | } | ||
104 | } | ||
105 | |||
106 | /* see if there's a call in need of attention */ | ||
107 | if (!list_empty(&rxrpc_krxiod_callq)) { | ||
108 | struct rxrpc_call *call = NULL; | ||
109 | |||
110 | spin_lock_irq(&rxrpc_krxiod_callq_lock); | ||
111 | |||
112 | if (!list_empty(&rxrpc_krxiod_callq)) { | ||
113 | call = list_entry(rxrpc_krxiod_callq.next, | ||
114 | struct rxrpc_call, | ||
115 | rcv_krxiodq_lk); | ||
116 | list_del_init(&call->rcv_krxiodq_lk); | ||
117 | atomic_dec(&rxrpc_krxiod_qcount); | ||
118 | |||
119 | /* make sure it hasn't gone away and doesn't go | ||
120 | * away */ | ||
121 | if (atomic_read(&call->usage) > 0) { | ||
122 | _debug("@@@ KRXIOD" | ||
123 | " Begin Attend Call %p", call); | ||
124 | rxrpc_get_call(call); | ||
125 | } | ||
126 | else { | ||
127 | call = NULL; | ||
128 | } | ||
129 | } | ||
130 | |||
131 | spin_unlock_irq(&rxrpc_krxiod_callq_lock); | ||
132 | |||
133 | if (call) { | ||
134 | rxrpc_call_do_stuff(call); | ||
135 | rxrpc_put_call(call); | ||
136 | _debug("@@@ KRXIOD End Attend Call %p", call); | ||
137 | } | ||
138 | } | ||
139 | |||
140 | _debug("### End Work"); | ||
141 | |||
142 | try_to_freeze(); | ||
143 | |||
144 | /* discard pending signals */ | ||
145 | rxrpc_discard_my_signals(); | ||
146 | |||
147 | } while (!rxrpc_krxiod_die); | ||
148 | |||
149 | /* and that's all */ | ||
150 | complete_and_exit(&rxrpc_krxiod_dead, 0); | ||
151 | |||
152 | } /* end rxrpc_krxiod() */ | ||
153 | |||
154 | /*****************************************************************************/ | ||
155 | /* | ||
156 | * start up a krxiod daemon | ||
157 | */ | ||
158 | int __init rxrpc_krxiod_init(void) | ||
159 | { | ||
160 | return kernel_thread(rxrpc_krxiod, NULL, 0); | ||
161 | |||
162 | } /* end rxrpc_krxiod_init() */ | ||
163 | |||
164 | /*****************************************************************************/ | ||
165 | /* | ||
166 | * kill the krxiod daemon and wait for it to complete | ||
167 | */ | ||
168 | void rxrpc_krxiod_kill(void) | ||
169 | { | ||
170 | rxrpc_krxiod_die = 1; | ||
171 | wake_up_all(&rxrpc_krxiod_sleepq); | ||
172 | wait_for_completion(&rxrpc_krxiod_dead); | ||
173 | |||
174 | } /* end rxrpc_krxiod_kill() */ | ||
175 | |||
176 | /*****************************************************************************/ | ||
177 | /* | ||
178 | * queue a transport for attention by krxiod | ||
179 | */ | ||
180 | void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans) | ||
181 | { | ||
182 | unsigned long flags; | ||
183 | |||
184 | _enter(""); | ||
185 | |||
186 | if (list_empty(&trans->krxiodq_link)) { | ||
187 | spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags); | ||
188 | |||
189 | if (list_empty(&trans->krxiodq_link)) { | ||
190 | if (atomic_read(&trans->usage) > 0) { | ||
191 | list_add_tail(&trans->krxiodq_link, | ||
192 | &rxrpc_krxiod_transportq); | ||
193 | atomic_inc(&rxrpc_krxiod_qcount); | ||
194 | } | ||
195 | } | ||
196 | |||
197 | spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags); | ||
198 | wake_up_all(&rxrpc_krxiod_sleepq); | ||
199 | } | ||
200 | |||
201 | _leave(""); | ||
202 | |||
203 | } /* end rxrpc_krxiod_queue_transport() */ | ||
204 | |||
205 | /*****************************************************************************/ | ||
206 | /* | ||
207 | * dequeue a transport from krxiod's attention queue | ||
208 | */ | ||
209 | void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans) | ||
210 | { | ||
211 | unsigned long flags; | ||
212 | |||
213 | _enter(""); | ||
214 | |||
215 | spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags); | ||
216 | if (!list_empty(&trans->krxiodq_link)) { | ||
217 | list_del_init(&trans->krxiodq_link); | ||
218 | atomic_dec(&rxrpc_krxiod_qcount); | ||
219 | } | ||
220 | spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags); | ||
221 | |||
222 | _leave(""); | ||
223 | |||
224 | } /* end rxrpc_krxiod_dequeue_transport() */ | ||
225 | |||
226 | /*****************************************************************************/ | ||
227 | /* | ||
228 | * queue a call for attention by krxiod | ||
229 | */ | ||
230 | void rxrpc_krxiod_queue_call(struct rxrpc_call *call) | ||
231 | { | ||
232 | unsigned long flags; | ||
233 | |||
234 | if (list_empty(&call->rcv_krxiodq_lk)) { | ||
235 | spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags); | ||
236 | if (atomic_read(&call->usage) > 0) { | ||
237 | list_add_tail(&call->rcv_krxiodq_lk, | ||
238 | &rxrpc_krxiod_callq); | ||
239 | atomic_inc(&rxrpc_krxiod_qcount); | ||
240 | } | ||
241 | spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags); | ||
242 | } | ||
243 | wake_up_all(&rxrpc_krxiod_sleepq); | ||
244 | |||
245 | } /* end rxrpc_krxiod_queue_call() */ | ||
246 | |||
247 | /*****************************************************************************/ | ||
248 | /* | ||
249 | * dequeue a call from krxiod's attention queue | ||
250 | */ | ||
251 | void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call) | ||
252 | { | ||
253 | unsigned long flags; | ||
254 | |||
255 | spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags); | ||
256 | if (!list_empty(&call->rcv_krxiodq_lk)) { | ||
257 | list_del_init(&call->rcv_krxiodq_lk); | ||
258 | atomic_dec(&rxrpc_krxiod_qcount); | ||
259 | } | ||
260 | spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags); | ||
261 | |||
262 | } /* end rxrpc_krxiod_dequeue_call() */ | ||
diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c deleted file mode 100644 index 9a1e7f5e034c..000000000000 --- a/net/rxrpc/krxsecd.c +++ /dev/null | |||
@@ -1,270 +0,0 @@ | |||
1 | /* krxsecd.c: Rx security daemon | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * This daemon deals with: | ||
12 | * - consulting the application as to whether inbound peers and calls should be authorised | ||
13 | * - generating security challenges for inbound connections | ||
14 | * - responding to security challenges on outbound connections | ||
15 | */ | ||
16 | |||
17 | #include <linux/module.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/completion.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <rxrpc/krxsecd.h> | ||
23 | #include <rxrpc/transport.h> | ||
24 | #include <rxrpc/connection.h> | ||
25 | #include <rxrpc/message.h> | ||
26 | #include <rxrpc/peer.h> | ||
27 | #include <rxrpc/call.h> | ||
28 | #include <linux/udp.h> | ||
29 | #include <linux/ip.h> | ||
30 | #include <linux/freezer.h> | ||
31 | #include <net/sock.h> | ||
32 | #include "internal.h" | ||
33 | |||
34 | static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxsecd_sleepq); | ||
35 | static DECLARE_COMPLETION(rxrpc_krxsecd_dead); | ||
36 | static volatile int rxrpc_krxsecd_die; | ||
37 | |||
38 | static atomic_t rxrpc_krxsecd_qcount; | ||
39 | |||
40 | /* queue of unprocessed inbound messages with seqno #1 and | ||
41 | * RXRPC_CLIENT_INITIATED flag set */ | ||
42 | static LIST_HEAD(rxrpc_krxsecd_initmsgq); | ||
43 | static DEFINE_SPINLOCK(rxrpc_krxsecd_initmsgq_lock); | ||
44 | |||
45 | static void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg); | ||
46 | |||
47 | /*****************************************************************************/ | ||
48 | /* | ||
49 | * Rx security daemon | ||
50 | */ | ||
51 | static int rxrpc_krxsecd(void *arg) | ||
52 | { | ||
53 | DECLARE_WAITQUEUE(krxsecd, current); | ||
54 | |||
55 | int die; | ||
56 | |||
57 | printk("Started krxsecd %d\n", current->pid); | ||
58 | |||
59 | daemonize("krxsecd"); | ||
60 | |||
61 | /* loop around waiting for work to do */ | ||
62 | do { | ||
63 | /* wait for work or to be told to exit */ | ||
64 | _debug("### Begin Wait"); | ||
65 | if (!atomic_read(&rxrpc_krxsecd_qcount)) { | ||
66 | set_current_state(TASK_INTERRUPTIBLE); | ||
67 | |||
68 | add_wait_queue(&rxrpc_krxsecd_sleepq, &krxsecd); | ||
69 | |||
70 | for (;;) { | ||
71 | set_current_state(TASK_INTERRUPTIBLE); | ||
72 | if (atomic_read(&rxrpc_krxsecd_qcount) || | ||
73 | rxrpc_krxsecd_die || | ||
74 | signal_pending(current)) | ||
75 | break; | ||
76 | |||
77 | schedule(); | ||
78 | } | ||
79 | |||
80 | remove_wait_queue(&rxrpc_krxsecd_sleepq, &krxsecd); | ||
81 | set_current_state(TASK_RUNNING); | ||
82 | } | ||
83 | die = rxrpc_krxsecd_die; | ||
84 | _debug("### End Wait"); | ||
85 | |||
86 | /* see if there're incoming calls in need of authenticating */ | ||
87 | _debug("### Begin Inbound Calls"); | ||
88 | |||
89 | if (!list_empty(&rxrpc_krxsecd_initmsgq)) { | ||
90 | struct rxrpc_message *msg = NULL; | ||
91 | |||
92 | spin_lock(&rxrpc_krxsecd_initmsgq_lock); | ||
93 | |||
94 | if (!list_empty(&rxrpc_krxsecd_initmsgq)) { | ||
95 | msg = list_entry(rxrpc_krxsecd_initmsgq.next, | ||
96 | struct rxrpc_message, link); | ||
97 | list_del_init(&msg->link); | ||
98 | atomic_dec(&rxrpc_krxsecd_qcount); | ||
99 | } | ||
100 | |||
101 | spin_unlock(&rxrpc_krxsecd_initmsgq_lock); | ||
102 | |||
103 | if (msg) { | ||
104 | rxrpc_krxsecd_process_incoming_call(msg); | ||
105 | rxrpc_put_message(msg); | ||
106 | } | ||
107 | } | ||
108 | |||
109 | _debug("### End Inbound Calls"); | ||
110 | |||
111 | try_to_freeze(); | ||
112 | |||
113 | /* discard pending signals */ | ||
114 | rxrpc_discard_my_signals(); | ||
115 | |||
116 | } while (!die); | ||
117 | |||
118 | /* and that's all */ | ||
119 | complete_and_exit(&rxrpc_krxsecd_dead, 0); | ||
120 | |||
121 | } /* end rxrpc_krxsecd() */ | ||
122 | |||
123 | /*****************************************************************************/ | ||
124 | /* | ||
125 | * start up a krxsecd daemon | ||
126 | */ | ||
127 | int __init rxrpc_krxsecd_init(void) | ||
128 | { | ||
129 | return kernel_thread(rxrpc_krxsecd, NULL, 0); | ||
130 | |||
131 | } /* end rxrpc_krxsecd_init() */ | ||
132 | |||
133 | /*****************************************************************************/ | ||
134 | /* | ||
135 | * kill the krxsecd daemon and wait for it to complete | ||
136 | */ | ||
137 | void rxrpc_krxsecd_kill(void) | ||
138 | { | ||
139 | rxrpc_krxsecd_die = 1; | ||
140 | wake_up_all(&rxrpc_krxsecd_sleepq); | ||
141 | wait_for_completion(&rxrpc_krxsecd_dead); | ||
142 | |||
143 | } /* end rxrpc_krxsecd_kill() */ | ||
144 | |||
145 | /*****************************************************************************/ | ||
146 | /* | ||
147 | * clear all pending incoming calls for the specified transport | ||
148 | */ | ||
149 | void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans) | ||
150 | { | ||
151 | LIST_HEAD(tmp); | ||
152 | |||
153 | struct rxrpc_message *msg; | ||
154 | struct list_head *_p, *_n; | ||
155 | |||
156 | _enter("%p",trans); | ||
157 | |||
158 | /* move all the messages for this transport onto a temp list */ | ||
159 | spin_lock(&rxrpc_krxsecd_initmsgq_lock); | ||
160 | |||
161 | list_for_each_safe(_p, _n, &rxrpc_krxsecd_initmsgq) { | ||
162 | msg = list_entry(_p, struct rxrpc_message, link); | ||
163 | if (msg->trans == trans) { | ||
164 | list_move_tail(&msg->link, &tmp); | ||
165 | atomic_dec(&rxrpc_krxsecd_qcount); | ||
166 | } | ||
167 | } | ||
168 | |||
169 | spin_unlock(&rxrpc_krxsecd_initmsgq_lock); | ||
170 | |||
171 | /* zap all messages on the temp list */ | ||
172 | while (!list_empty(&tmp)) { | ||
173 | msg = list_entry(tmp.next, struct rxrpc_message, link); | ||
174 | list_del_init(&msg->link); | ||
175 | rxrpc_put_message(msg); | ||
176 | } | ||
177 | |||
178 | _leave(""); | ||
179 | } /* end rxrpc_krxsecd_clear_transport() */ | ||
180 | |||
181 | /*****************************************************************************/ | ||
182 | /* | ||
183 | * queue a message on the incoming calls list | ||
184 | */ | ||
185 | void rxrpc_krxsecd_queue_incoming_call(struct rxrpc_message *msg) | ||
186 | { | ||
187 | _enter("%p", msg); | ||
188 | |||
189 | /* queue for processing by krxsecd */ | ||
190 | spin_lock(&rxrpc_krxsecd_initmsgq_lock); | ||
191 | |||
192 | if (!rxrpc_krxsecd_die) { | ||
193 | rxrpc_get_message(msg); | ||
194 | list_add_tail(&msg->link, &rxrpc_krxsecd_initmsgq); | ||
195 | atomic_inc(&rxrpc_krxsecd_qcount); | ||
196 | } | ||
197 | |||
198 | spin_unlock(&rxrpc_krxsecd_initmsgq_lock); | ||
199 | |||
200 | wake_up(&rxrpc_krxsecd_sleepq); | ||
201 | |||
202 | _leave(""); | ||
203 | } /* end rxrpc_krxsecd_queue_incoming_call() */ | ||
204 | |||
205 | /*****************************************************************************/ | ||
206 | /* | ||
207 | * process the initial message of an incoming call | ||
208 | */ | ||
209 | void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg) | ||
210 | { | ||
211 | struct rxrpc_transport *trans = msg->trans; | ||
212 | struct rxrpc_service *srv; | ||
213 | struct rxrpc_call *call; | ||
214 | struct list_head *_p; | ||
215 | unsigned short sid; | ||
216 | int ret; | ||
217 | |||
218 | _enter("%p{tr=%p}", msg, trans); | ||
219 | |||
220 | ret = rxrpc_incoming_call(msg->conn, msg, &call); | ||
221 | if (ret < 0) | ||
222 | goto out; | ||
223 | |||
224 | /* find the matching service on the transport */ | ||
225 | sid = ntohs(msg->hdr.serviceId); | ||
226 | srv = NULL; | ||
227 | |||
228 | spin_lock(&trans->lock); | ||
229 | list_for_each(_p, &trans->services) { | ||
230 | srv = list_entry(_p, struct rxrpc_service, link); | ||
231 | if (srv->service_id == sid && try_module_get(srv->owner)) { | ||
232 | /* found a match (made sure it won't vanish) */ | ||
233 | _debug("found service '%s'", srv->name); | ||
234 | call->owner = srv->owner; | ||
235 | break; | ||
236 | } | ||
237 | } | ||
238 | spin_unlock(&trans->lock); | ||
239 | |||
240 | /* report the new connection | ||
241 | * - the func must inc the call's usage count to keep it | ||
242 | */ | ||
243 | ret = -ENOENT; | ||
244 | if (_p != &trans->services) { | ||
245 | /* attempt to accept the call */ | ||
246 | call->conn->service = srv; | ||
247 | call->app_attn_func = srv->attn_func; | ||
248 | call->app_error_func = srv->error_func; | ||
249 | call->app_aemap_func = srv->aemap_func; | ||
250 | |||
251 | ret = srv->new_call(call); | ||
252 | |||
253 | /* send an abort if an error occurred */ | ||
254 | if (ret < 0) { | ||
255 | rxrpc_call_abort(call, ret); | ||
256 | } | ||
257 | else { | ||
258 | /* formally receive and ACK the new packet */ | ||
259 | ret = rxrpc_conn_receive_call_packet(call->conn, | ||
260 | call, msg); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | rxrpc_put_call(call); | ||
265 | out: | ||
266 | if (ret < 0) | ||
267 | rxrpc_trans_immediate_abort(trans, msg, ret); | ||
268 | |||
269 | _leave(" (%d)", ret); | ||
270 | } /* end rxrpc_krxsecd_process_incoming_call() */ | ||
diff --git a/net/rxrpc/krxtimod.c b/net/rxrpc/krxtimod.c deleted file mode 100644 index 9a9b6132dba4..000000000000 --- a/net/rxrpc/krxtimod.c +++ /dev/null | |||
@@ -1,204 +0,0 @@ | |||
1 | /* krxtimod.c: RXRPC timeout daemon | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/completion.h> | ||
16 | #include <linux/freezer.h> | ||
17 | #include <rxrpc/rxrpc.h> | ||
18 | #include <rxrpc/krxtimod.h> | ||
19 | #include <asm/errno.h> | ||
20 | #include "internal.h" | ||
21 | |||
22 | static DECLARE_COMPLETION(krxtimod_alive); | ||
23 | static DECLARE_COMPLETION(krxtimod_dead); | ||
24 | static DECLARE_WAIT_QUEUE_HEAD(krxtimod_sleepq); | ||
25 | static int krxtimod_die; | ||
26 | |||
27 | static LIST_HEAD(krxtimod_list); | ||
28 | static DEFINE_SPINLOCK(krxtimod_lock); | ||
29 | |||
30 | static int krxtimod(void *arg); | ||
31 | |||
32 | /*****************************************************************************/ | ||
33 | /* | ||
34 | * start the timeout daemon | ||
35 | */ | ||
36 | int rxrpc_krxtimod_start(void) | ||
37 | { | ||
38 | int ret; | ||
39 | |||
40 | ret = kernel_thread(krxtimod, NULL, 0); | ||
41 | if (ret < 0) | ||
42 | return ret; | ||
43 | |||
44 | wait_for_completion(&krxtimod_alive); | ||
45 | |||
46 | return ret; | ||
47 | } /* end rxrpc_krxtimod_start() */ | ||
48 | |||
49 | /*****************************************************************************/ | ||
50 | /* | ||
51 | * stop the timeout daemon | ||
52 | */ | ||
53 | void rxrpc_krxtimod_kill(void) | ||
54 | { | ||
55 | /* get rid of my daemon */ | ||
56 | krxtimod_die = 1; | ||
57 | wake_up(&krxtimod_sleepq); | ||
58 | wait_for_completion(&krxtimod_dead); | ||
59 | |||
60 | } /* end rxrpc_krxtimod_kill() */ | ||
61 | |||
62 | /*****************************************************************************/ | ||
63 | /* | ||
64 | * timeout processing daemon | ||
65 | */ | ||
66 | static int krxtimod(void *arg) | ||
67 | { | ||
68 | DECLARE_WAITQUEUE(myself, current); | ||
69 | |||
70 | rxrpc_timer_t *timer; | ||
71 | |||
72 | printk("Started krxtimod %d\n", current->pid); | ||
73 | |||
74 | daemonize("krxtimod"); | ||
75 | |||
76 | complete(&krxtimod_alive); | ||
77 | |||
78 | /* loop around looking for things to attend to */ | ||
79 | loop: | ||
80 | set_current_state(TASK_INTERRUPTIBLE); | ||
81 | add_wait_queue(&krxtimod_sleepq, &myself); | ||
82 | |||
83 | for (;;) { | ||
84 | unsigned long jif; | ||
85 | long timeout; | ||
86 | |||
87 | /* deal with the server being asked to die */ | ||
88 | if (krxtimod_die) { | ||
89 | remove_wait_queue(&krxtimod_sleepq, &myself); | ||
90 | _leave(""); | ||
91 | complete_and_exit(&krxtimod_dead, 0); | ||
92 | } | ||
93 | |||
94 | try_to_freeze(); | ||
95 | |||
96 | /* discard pending signals */ | ||
97 | rxrpc_discard_my_signals(); | ||
98 | |||
99 | /* work out the time to elapse before the next event */ | ||
100 | spin_lock(&krxtimod_lock); | ||
101 | if (list_empty(&krxtimod_list)) { | ||
102 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
103 | } | ||
104 | else { | ||
105 | timer = list_entry(krxtimod_list.next, | ||
106 | rxrpc_timer_t, link); | ||
107 | timeout = timer->timo_jif; | ||
108 | jif = jiffies; | ||
109 | |||
110 | if (time_before_eq((unsigned long) timeout, jif)) | ||
111 | goto immediate; | ||
112 | |||
113 | else { | ||
114 | timeout = (long) timeout - (long) jiffies; | ||
115 | } | ||
116 | } | ||
117 | spin_unlock(&krxtimod_lock); | ||
118 | |||
119 | schedule_timeout(timeout); | ||
120 | |||
121 | set_current_state(TASK_INTERRUPTIBLE); | ||
122 | } | ||
123 | |||
124 | /* the thing on the front of the queue needs processing | ||
125 | * - we come here with the lock held and timer pointing to the expired | ||
126 | * entry | ||
127 | */ | ||
128 | immediate: | ||
129 | remove_wait_queue(&krxtimod_sleepq, &myself); | ||
130 | set_current_state(TASK_RUNNING); | ||
131 | |||
132 | _debug("@@@ Begin Timeout of %p", timer); | ||
133 | |||
134 | /* dequeue the timer */ | ||
135 | list_del_init(&timer->link); | ||
136 | spin_unlock(&krxtimod_lock); | ||
137 | |||
138 | /* call the timeout function */ | ||
139 | timer->ops->timed_out(timer); | ||
140 | |||
141 | _debug("@@@ End Timeout"); | ||
142 | goto loop; | ||
143 | |||
144 | } /* end krxtimod() */ | ||
145 | |||
146 | /*****************************************************************************/ | ||
147 | /* | ||
148 | * (re-)queue a timer | ||
149 | */ | ||
150 | void rxrpc_krxtimod_add_timer(rxrpc_timer_t *timer, unsigned long timeout) | ||
151 | { | ||
152 | struct list_head *_p; | ||
153 | rxrpc_timer_t *ptimer; | ||
154 | |||
155 | _enter("%p,%lu", timer, timeout); | ||
156 | |||
157 | spin_lock(&krxtimod_lock); | ||
158 | |||
159 | list_del(&timer->link); | ||
160 | |||
161 | /* the timer was deferred or reset - put it back in the queue at the | ||
162 | * right place */ | ||
163 | timer->timo_jif = jiffies + timeout; | ||
164 | |||
165 | list_for_each(_p, &krxtimod_list) { | ||
166 | ptimer = list_entry(_p, rxrpc_timer_t, link); | ||
167 | if (time_before(timer->timo_jif, ptimer->timo_jif)) | ||
168 | break; | ||
169 | } | ||
170 | |||
171 | list_add_tail(&timer->link, _p); /* insert before stopping point */ | ||
172 | |||
173 | spin_unlock(&krxtimod_lock); | ||
174 | |||
175 | wake_up(&krxtimod_sleepq); | ||
176 | |||
177 | _leave(""); | ||
178 | } /* end rxrpc_krxtimod_add_timer() */ | ||
179 | |||
180 | /*****************************************************************************/ | ||
181 | /* | ||
182 | * dequeue a timer | ||
183 | * - returns 0 if the timer was deleted or -ENOENT if it wasn't queued | ||
184 | */ | ||
185 | int rxrpc_krxtimod_del_timer(rxrpc_timer_t *timer) | ||
186 | { | ||
187 | int ret = 0; | ||
188 | |||
189 | _enter("%p", timer); | ||
190 | |||
191 | spin_lock(&krxtimod_lock); | ||
192 | |||
193 | if (list_empty(&timer->link)) | ||
194 | ret = -ENOENT; | ||
195 | else | ||
196 | list_del_init(&timer->link); | ||
197 | |||
198 | spin_unlock(&krxtimod_lock); | ||
199 | |||
200 | wake_up(&krxtimod_sleepq); | ||
201 | |||
202 | _leave(" = %d", ret); | ||
203 | return ret; | ||
204 | } /* end rxrpc_krxtimod_del_timer() */ | ||
diff --git a/net/rxrpc/main.c b/net/rxrpc/main.c deleted file mode 100644 index baec1f7fd8b9..000000000000 --- a/net/rxrpc/main.c +++ /dev/null | |||
@@ -1,180 +0,0 @@ | |||
1 | /* main.c: Rx RPC interface | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <rxrpc/rxrpc.h> | ||
16 | #include <rxrpc/krxiod.h> | ||
17 | #include <rxrpc/krxsecd.h> | ||
18 | #include <rxrpc/krxtimod.h> | ||
19 | #include <rxrpc/transport.h> | ||
20 | #include <rxrpc/connection.h> | ||
21 | #include <rxrpc/call.h> | ||
22 | #include <rxrpc/message.h> | ||
23 | #include "internal.h" | ||
24 | |||
25 | MODULE_DESCRIPTION("Rx RPC implementation"); | ||
26 | MODULE_AUTHOR("Red Hat, Inc."); | ||
27 | MODULE_LICENSE("GPL"); | ||
28 | |||
29 | __be32 rxrpc_epoch; | ||
30 | |||
31 | /*****************************************************************************/ | ||
32 | /* | ||
33 | * initialise the Rx module | ||
34 | */ | ||
35 | static int __init rxrpc_initialise(void) | ||
36 | { | ||
37 | int ret; | ||
38 | |||
39 | /* my epoch value */ | ||
40 | rxrpc_epoch = htonl(xtime.tv_sec); | ||
41 | |||
42 | /* register the /proc interface */ | ||
43 | #ifdef CONFIG_PROC_FS | ||
44 | ret = rxrpc_proc_init(); | ||
45 | if (ret<0) | ||
46 | return ret; | ||
47 | #endif | ||
48 | |||
49 | /* register the sysctl files */ | ||
50 | #ifdef CONFIG_SYSCTL | ||
51 | ret = rxrpc_sysctl_init(); | ||
52 | if (ret<0) | ||
53 | goto error_proc; | ||
54 | #endif | ||
55 | |||
56 | /* start the krxtimod daemon */ | ||
57 | ret = rxrpc_krxtimod_start(); | ||
58 | if (ret<0) | ||
59 | goto error_sysctl; | ||
60 | |||
61 | /* start the krxiod daemon */ | ||
62 | ret = rxrpc_krxiod_init(); | ||
63 | if (ret<0) | ||
64 | goto error_krxtimod; | ||
65 | |||
66 | /* start the krxsecd daemon */ | ||
67 | ret = rxrpc_krxsecd_init(); | ||
68 | if (ret<0) | ||
69 | goto error_krxiod; | ||
70 | |||
71 | kdebug("\n\n"); | ||
72 | |||
73 | return 0; | ||
74 | |||
75 | error_krxiod: | ||
76 | rxrpc_krxiod_kill(); | ||
77 | error_krxtimod: | ||
78 | rxrpc_krxtimod_kill(); | ||
79 | error_sysctl: | ||
80 | #ifdef CONFIG_SYSCTL | ||
81 | rxrpc_sysctl_cleanup(); | ||
82 | error_proc: | ||
83 | #endif | ||
84 | #ifdef CONFIG_PROC_FS | ||
85 | rxrpc_proc_cleanup(); | ||
86 | #endif | ||
87 | return ret; | ||
88 | } /* end rxrpc_initialise() */ | ||
89 | |||
90 | module_init(rxrpc_initialise); | ||
91 | |||
92 | /*****************************************************************************/ | ||
93 | /* | ||
94 | * clean up the Rx module | ||
95 | */ | ||
96 | static void __exit rxrpc_cleanup(void) | ||
97 | { | ||
98 | kenter(""); | ||
99 | |||
100 | __RXACCT(printk("Outstanding Messages : %d\n", | ||
101 | atomic_read(&rxrpc_message_count))); | ||
102 | __RXACCT(printk("Outstanding Calls : %d\n", | ||
103 | atomic_read(&rxrpc_call_count))); | ||
104 | __RXACCT(printk("Outstanding Connections: %d\n", | ||
105 | atomic_read(&rxrpc_connection_count))); | ||
106 | __RXACCT(printk("Outstanding Peers : %d\n", | ||
107 | atomic_read(&rxrpc_peer_count))); | ||
108 | __RXACCT(printk("Outstanding Transports : %d\n", | ||
109 | atomic_read(&rxrpc_transport_count))); | ||
110 | |||
111 | rxrpc_krxsecd_kill(); | ||
112 | rxrpc_krxiod_kill(); | ||
113 | rxrpc_krxtimod_kill(); | ||
114 | #ifdef CONFIG_SYSCTL | ||
115 | rxrpc_sysctl_cleanup(); | ||
116 | #endif | ||
117 | #ifdef CONFIG_PROC_FS | ||
118 | rxrpc_proc_cleanup(); | ||
119 | #endif | ||
120 | |||
121 | __RXACCT(printk("Outstanding Messages : %d\n", | ||
122 | atomic_read(&rxrpc_message_count))); | ||
123 | __RXACCT(printk("Outstanding Calls : %d\n", | ||
124 | atomic_read(&rxrpc_call_count))); | ||
125 | __RXACCT(printk("Outstanding Connections: %d\n", | ||
126 | atomic_read(&rxrpc_connection_count))); | ||
127 | __RXACCT(printk("Outstanding Peers : %d\n", | ||
128 | atomic_read(&rxrpc_peer_count))); | ||
129 | __RXACCT(printk("Outstanding Transports : %d\n", | ||
130 | atomic_read(&rxrpc_transport_count))); | ||
131 | |||
132 | kleave(""); | ||
133 | } /* end rxrpc_cleanup() */ | ||
134 | |||
135 | module_exit(rxrpc_cleanup); | ||
136 | |||
137 | /*****************************************************************************/ | ||
138 | /* | ||
139 | * clear the dead space between task_struct and kernel stack | ||
140 | * - called by supplying -finstrument-functions to gcc | ||
141 | */ | ||
142 | #if 0 | ||
143 | void __cyg_profile_func_enter (void *this_fn, void *call_site) | ||
144 | __attribute__((no_instrument_function)); | ||
145 | |||
146 | void __cyg_profile_func_enter (void *this_fn, void *call_site) | ||
147 | { | ||
148 | asm volatile(" movl %%esp,%%edi \n" | ||
149 | " andl %0,%%edi \n" | ||
150 | " addl %1,%%edi \n" | ||
151 | " movl %%esp,%%ecx \n" | ||
152 | " subl %%edi,%%ecx \n" | ||
153 | " shrl $2,%%ecx \n" | ||
154 | " movl $0xedededed,%%eax \n" | ||
155 | " rep stosl \n" | ||
156 | : | ||
157 | : "i"(~(THREAD_SIZE-1)), "i"(sizeof(struct thread_info)) | ||
158 | : "eax", "ecx", "edi", "memory", "cc" | ||
159 | ); | ||
160 | } | ||
161 | |||
162 | void __cyg_profile_func_exit(void *this_fn, void *call_site) | ||
163 | __attribute__((no_instrument_function)); | ||
164 | |||
165 | void __cyg_profile_func_exit(void *this_fn, void *call_site) | ||
166 | { | ||
167 | asm volatile(" movl %%esp,%%edi \n" | ||
168 | " andl %0,%%edi \n" | ||
169 | " addl %1,%%edi \n" | ||
170 | " movl %%esp,%%ecx \n" | ||
171 | " subl %%edi,%%ecx \n" | ||
172 | " shrl $2,%%ecx \n" | ||
173 | " movl $0xdadadada,%%eax \n" | ||
174 | " rep stosl \n" | ||
175 | : | ||
176 | : "i"(~(THREAD_SIZE-1)), "i"(sizeof(struct thread_info)) | ||
177 | : "eax", "ecx", "edi", "memory", "cc" | ||
178 | ); | ||
179 | } | ||
180 | #endif | ||
diff --git a/net/rxrpc/peer.c b/net/rxrpc/peer.c deleted file mode 100644 index 8a275157a3bb..000000000000 --- a/net/rxrpc/peer.c +++ /dev/null | |||
@@ -1,398 +0,0 @@ | |||
1 | /* peer.c: Rx RPC peer management | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <rxrpc/rxrpc.h> | ||
16 | #include <rxrpc/transport.h> | ||
17 | #include <rxrpc/peer.h> | ||
18 | #include <rxrpc/connection.h> | ||
19 | #include <rxrpc/call.h> | ||
20 | #include <rxrpc/message.h> | ||
21 | #include <linux/udp.h> | ||
22 | #include <linux/ip.h> | ||
23 | #include <net/sock.h> | ||
24 | #include <asm/uaccess.h> | ||
25 | #include <asm/div64.h> | ||
26 | #include "internal.h" | ||
27 | |||
28 | __RXACCT_DECL(atomic_t rxrpc_peer_count); | ||
29 | LIST_HEAD(rxrpc_peers); | ||
30 | DECLARE_RWSEM(rxrpc_peers_sem); | ||
31 | unsigned long rxrpc_peer_timeout = 12 * 60 * 60; | ||
32 | |||
33 | static void rxrpc_peer_do_timeout(struct rxrpc_peer *peer); | ||
34 | |||
35 | static void __rxrpc_peer_timeout(rxrpc_timer_t *timer) | ||
36 | { | ||
37 | struct rxrpc_peer *peer = | ||
38 | list_entry(timer, struct rxrpc_peer, timeout); | ||
39 | |||
40 | _debug("Rx PEER TIMEOUT [%p{u=%d}]", peer, atomic_read(&peer->usage)); | ||
41 | |||
42 | rxrpc_peer_do_timeout(peer); | ||
43 | } | ||
44 | |||
45 | static const struct rxrpc_timer_ops rxrpc_peer_timer_ops = { | ||
46 | .timed_out = __rxrpc_peer_timeout, | ||
47 | }; | ||
48 | |||
49 | /*****************************************************************************/ | ||
50 | /* | ||
51 | * create a peer record | ||
52 | */ | ||
53 | static int __rxrpc_create_peer(struct rxrpc_transport *trans, __be32 addr, | ||
54 | struct rxrpc_peer **_peer) | ||
55 | { | ||
56 | struct rxrpc_peer *peer; | ||
57 | |||
58 | _enter("%p,%08x", trans, ntohl(addr)); | ||
59 | |||
60 | /* allocate and initialise a peer record */ | ||
61 | peer = kzalloc(sizeof(struct rxrpc_peer), GFP_KERNEL); | ||
62 | if (!peer) { | ||
63 | _leave(" = -ENOMEM"); | ||
64 | return -ENOMEM; | ||
65 | } | ||
66 | |||
67 | atomic_set(&peer->usage, 1); | ||
68 | |||
69 | INIT_LIST_HEAD(&peer->link); | ||
70 | INIT_LIST_HEAD(&peer->proc_link); | ||
71 | INIT_LIST_HEAD(&peer->conn_idlist); | ||
72 | INIT_LIST_HEAD(&peer->conn_active); | ||
73 | INIT_LIST_HEAD(&peer->conn_graveyard); | ||
74 | spin_lock_init(&peer->conn_gylock); | ||
75 | init_waitqueue_head(&peer->conn_gy_waitq); | ||
76 | rwlock_init(&peer->conn_idlock); | ||
77 | rwlock_init(&peer->conn_lock); | ||
78 | atomic_set(&peer->conn_count, 0); | ||
79 | spin_lock_init(&peer->lock); | ||
80 | rxrpc_timer_init(&peer->timeout, &rxrpc_peer_timer_ops); | ||
81 | |||
82 | peer->addr.s_addr = addr; | ||
83 | |||
84 | peer->trans = trans; | ||
85 | peer->ops = trans->peer_ops; | ||
86 | |||
87 | __RXACCT(atomic_inc(&rxrpc_peer_count)); | ||
88 | *_peer = peer; | ||
89 | _leave(" = 0 (%p)", peer); | ||
90 | |||
91 | return 0; | ||
92 | } /* end __rxrpc_create_peer() */ | ||
93 | |||
94 | /*****************************************************************************/ | ||
95 | /* | ||
96 | * find a peer record on the specified transport | ||
97 | * - returns (if successful) with peer record usage incremented | ||
98 | * - resurrects it from the graveyard if found there | ||
99 | */ | ||
100 | int rxrpc_peer_lookup(struct rxrpc_transport *trans, __be32 addr, | ||
101 | struct rxrpc_peer **_peer) | ||
102 | { | ||
103 | struct rxrpc_peer *peer, *candidate = NULL; | ||
104 | struct list_head *_p; | ||
105 | int ret; | ||
106 | |||
107 | _enter("%p{%hu},%08x", trans, trans->port, ntohl(addr)); | ||
108 | |||
109 | /* [common case] search the transport's active list first */ | ||
110 | read_lock(&trans->peer_lock); | ||
111 | list_for_each(_p, &trans->peer_active) { | ||
112 | peer = list_entry(_p, struct rxrpc_peer, link); | ||
113 | if (peer->addr.s_addr == addr) | ||
114 | goto found_active; | ||
115 | } | ||
116 | read_unlock(&trans->peer_lock); | ||
117 | |||
118 | /* [uncommon case] not active - create a candidate for a new record */ | ||
119 | ret = __rxrpc_create_peer(trans, addr, &candidate); | ||
120 | if (ret < 0) { | ||
121 | _leave(" = %d", ret); | ||
122 | return ret; | ||
123 | } | ||
124 | |||
125 | /* search the active list again, just in case it appeared whilst we | ||
126 | * were busy */ | ||
127 | write_lock(&trans->peer_lock); | ||
128 | list_for_each(_p, &trans->peer_active) { | ||
129 | peer = list_entry(_p, struct rxrpc_peer, link); | ||
130 | if (peer->addr.s_addr == addr) | ||
131 | goto found_active_second_chance; | ||
132 | } | ||
133 | |||
134 | /* search the transport's graveyard list */ | ||
135 | spin_lock(&trans->peer_gylock); | ||
136 | list_for_each(_p, &trans->peer_graveyard) { | ||
137 | peer = list_entry(_p, struct rxrpc_peer, link); | ||
138 | if (peer->addr.s_addr == addr) | ||
139 | goto found_in_graveyard; | ||
140 | } | ||
141 | spin_unlock(&trans->peer_gylock); | ||
142 | |||
143 | /* we can now add the new candidate to the list | ||
144 | * - tell the application layer that this peer has been added | ||
145 | */ | ||
146 | rxrpc_get_transport(trans); | ||
147 | peer = candidate; | ||
148 | candidate = NULL; | ||
149 | |||
150 | if (peer->ops && peer->ops->adding) { | ||
151 | ret = peer->ops->adding(peer); | ||
152 | if (ret < 0) { | ||
153 | write_unlock(&trans->peer_lock); | ||
154 | __RXACCT(atomic_dec(&rxrpc_peer_count)); | ||
155 | kfree(peer); | ||
156 | rxrpc_put_transport(trans); | ||
157 | _leave(" = %d", ret); | ||
158 | return ret; | ||
159 | } | ||
160 | } | ||
161 | |||
162 | atomic_inc(&trans->peer_count); | ||
163 | |||
164 | make_active: | ||
165 | list_add_tail(&peer->link, &trans->peer_active); | ||
166 | |||
167 | success_uwfree: | ||
168 | write_unlock(&trans->peer_lock); | ||
169 | |||
170 | if (candidate) { | ||
171 | __RXACCT(atomic_dec(&rxrpc_peer_count)); | ||
172 | kfree(candidate); | ||
173 | } | ||
174 | |||
175 | if (list_empty(&peer->proc_link)) { | ||
176 | down_write(&rxrpc_peers_sem); | ||
177 | list_add_tail(&peer->proc_link, &rxrpc_peers); | ||
178 | up_write(&rxrpc_peers_sem); | ||
179 | } | ||
180 | |||
181 | success: | ||
182 | *_peer = peer; | ||
183 | |||
184 | _leave(" = 0 (%p{u=%d cc=%d})", | ||
185 | peer, | ||
186 | atomic_read(&peer->usage), | ||
187 | atomic_read(&peer->conn_count)); | ||
188 | return 0; | ||
189 | |||
190 | /* handle the peer being found in the active list straight off */ | ||
191 | found_active: | ||
192 | rxrpc_get_peer(peer); | ||
193 | read_unlock(&trans->peer_lock); | ||
194 | goto success; | ||
195 | |||
196 | /* handle resurrecting a peer from the graveyard */ | ||
197 | found_in_graveyard: | ||
198 | rxrpc_get_peer(peer); | ||
199 | rxrpc_get_transport(peer->trans); | ||
200 | rxrpc_krxtimod_del_timer(&peer->timeout); | ||
201 | list_del_init(&peer->link); | ||
202 | spin_unlock(&trans->peer_gylock); | ||
203 | goto make_active; | ||
204 | |||
205 | /* handle finding the peer on the second time through the active | ||
206 | * list */ | ||
207 | found_active_second_chance: | ||
208 | rxrpc_get_peer(peer); | ||
209 | goto success_uwfree; | ||
210 | |||
211 | } /* end rxrpc_peer_lookup() */ | ||
212 | |||
213 | /*****************************************************************************/ | ||
214 | /* | ||
215 | * finish with a peer record | ||
216 | * - it gets sent to the graveyard from where it can be resurrected or timed | ||
217 | * out | ||
218 | */ | ||
219 | void rxrpc_put_peer(struct rxrpc_peer *peer) | ||
220 | { | ||
221 | struct rxrpc_transport *trans = peer->trans; | ||
222 | |||
223 | _enter("%p{cc=%d a=%08x}", | ||
224 | peer, | ||
225 | atomic_read(&peer->conn_count), | ||
226 | ntohl(peer->addr.s_addr)); | ||
227 | |||
228 | /* sanity check */ | ||
229 | if (atomic_read(&peer->usage) <= 0) | ||
230 | BUG(); | ||
231 | |||
232 | write_lock(&trans->peer_lock); | ||
233 | spin_lock(&trans->peer_gylock); | ||
234 | if (likely(!atomic_dec_and_test(&peer->usage))) { | ||
235 | spin_unlock(&trans->peer_gylock); | ||
236 | write_unlock(&trans->peer_lock); | ||
237 | _leave(""); | ||
238 | return; | ||
239 | } | ||
240 | |||
241 | /* move to graveyard queue */ | ||
242 | list_del(&peer->link); | ||
243 | write_unlock(&trans->peer_lock); | ||
244 | |||
245 | list_add_tail(&peer->link, &trans->peer_graveyard); | ||
246 | |||
247 | BUG_ON(!list_empty(&peer->conn_active)); | ||
248 | |||
249 | rxrpc_krxtimod_add_timer(&peer->timeout, rxrpc_peer_timeout * HZ); | ||
250 | |||
251 | spin_unlock(&trans->peer_gylock); | ||
252 | |||
253 | rxrpc_put_transport(trans); | ||
254 | |||
255 | _leave(" [killed]"); | ||
256 | } /* end rxrpc_put_peer() */ | ||
257 | |||
258 | /*****************************************************************************/ | ||
259 | /* | ||
260 | * handle a peer timing out in the graveyard | ||
261 | * - called from krxtimod | ||
262 | */ | ||
263 | static void rxrpc_peer_do_timeout(struct rxrpc_peer *peer) | ||
264 | { | ||
265 | struct rxrpc_transport *trans = peer->trans; | ||
266 | |||
267 | _enter("%p{u=%d cc=%d a=%08x}", | ||
268 | peer, | ||
269 | atomic_read(&peer->usage), | ||
270 | atomic_read(&peer->conn_count), | ||
271 | ntohl(peer->addr.s_addr)); | ||
272 | |||
273 | BUG_ON(atomic_read(&peer->usage) < 0); | ||
274 | |||
275 | /* remove from graveyard if still dead */ | ||
276 | spin_lock(&trans->peer_gylock); | ||
277 | if (atomic_read(&peer->usage) == 0) | ||
278 | list_del_init(&peer->link); | ||
279 | else | ||
280 | peer = NULL; | ||
281 | spin_unlock(&trans->peer_gylock); | ||
282 | |||
283 | if (!peer) { | ||
284 | _leave(""); | ||
285 | return; /* resurrected */ | ||
286 | } | ||
287 | |||
288 | /* clear all connections on this peer */ | ||
289 | rxrpc_conn_clearall(peer); | ||
290 | |||
291 | BUG_ON(!list_empty(&peer->conn_active)); | ||
292 | BUG_ON(!list_empty(&peer->conn_graveyard)); | ||
293 | |||
294 | /* inform the application layer */ | ||
295 | if (peer->ops && peer->ops->discarding) | ||
296 | peer->ops->discarding(peer); | ||
297 | |||
298 | if (!list_empty(&peer->proc_link)) { | ||
299 | down_write(&rxrpc_peers_sem); | ||
300 | list_del(&peer->proc_link); | ||
301 | up_write(&rxrpc_peers_sem); | ||
302 | } | ||
303 | |||
304 | __RXACCT(atomic_dec(&rxrpc_peer_count)); | ||
305 | kfree(peer); | ||
306 | |||
307 | /* if the graveyard is now empty, wake up anyone waiting for that */ | ||
308 | if (atomic_dec_and_test(&trans->peer_count)) | ||
309 | wake_up(&trans->peer_gy_waitq); | ||
310 | |||
311 | _leave(" [destroyed]"); | ||
312 | } /* end rxrpc_peer_do_timeout() */ | ||
313 | |||
314 | /*****************************************************************************/ | ||
315 | /* | ||
316 | * clear all peer records from a transport endpoint | ||
317 | */ | ||
318 | void rxrpc_peer_clearall(struct rxrpc_transport *trans) | ||
319 | { | ||
320 | DECLARE_WAITQUEUE(myself,current); | ||
321 | |||
322 | struct rxrpc_peer *peer; | ||
323 | int err; | ||
324 | |||
325 | _enter("%p",trans); | ||
326 | |||
327 | /* there shouldn't be any active peers remaining */ | ||
328 | BUG_ON(!list_empty(&trans->peer_active)); | ||
329 | |||
330 | /* manually timeout all peers in the graveyard */ | ||
331 | spin_lock(&trans->peer_gylock); | ||
332 | while (!list_empty(&trans->peer_graveyard)) { | ||
333 | peer = list_entry(trans->peer_graveyard.next, | ||
334 | struct rxrpc_peer, link); | ||
335 | _debug("Clearing peer %p\n", peer); | ||
336 | err = rxrpc_krxtimod_del_timer(&peer->timeout); | ||
337 | spin_unlock(&trans->peer_gylock); | ||
338 | |||
339 | if (err == 0) | ||
340 | rxrpc_peer_do_timeout(peer); | ||
341 | |||
342 | spin_lock(&trans->peer_gylock); | ||
343 | } | ||
344 | spin_unlock(&trans->peer_gylock); | ||
345 | |||
346 | /* wait for the the peer graveyard to be completely cleared */ | ||
347 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
348 | add_wait_queue(&trans->peer_gy_waitq, &myself); | ||
349 | |||
350 | while (atomic_read(&trans->peer_count) != 0) { | ||
351 | schedule(); | ||
352 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
353 | } | ||
354 | |||
355 | remove_wait_queue(&trans->peer_gy_waitq, &myself); | ||
356 | set_current_state(TASK_RUNNING); | ||
357 | |||
358 | _leave(""); | ||
359 | } /* end rxrpc_peer_clearall() */ | ||
360 | |||
361 | /*****************************************************************************/ | ||
362 | /* | ||
363 | * calculate and cache the Round-Trip-Time for a message and its response | ||
364 | */ | ||
365 | void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer, | ||
366 | struct rxrpc_message *msg, | ||
367 | struct rxrpc_message *resp) | ||
368 | { | ||
369 | unsigned long long rtt; | ||
370 | int loop; | ||
371 | |||
372 | _enter("%p,%p,%p", peer, msg, resp); | ||
373 | |||
374 | /* calculate the latest RTT */ | ||
375 | rtt = resp->stamp.tv_sec - msg->stamp.tv_sec; | ||
376 | rtt *= 1000000UL; | ||
377 | rtt += resp->stamp.tv_usec - msg->stamp.tv_usec; | ||
378 | |||
379 | /* add to cache */ | ||
380 | peer->rtt_cache[peer->rtt_point] = rtt; | ||
381 | peer->rtt_point++; | ||
382 | peer->rtt_point %= RXRPC_RTT_CACHE_SIZE; | ||
383 | |||
384 | if (peer->rtt_usage < RXRPC_RTT_CACHE_SIZE) | ||
385 | peer->rtt_usage++; | ||
386 | |||
387 | /* recalculate RTT */ | ||
388 | rtt = 0; | ||
389 | for (loop = peer->rtt_usage - 1; loop >= 0; loop--) | ||
390 | rtt += peer->rtt_cache[loop]; | ||
391 | |||
392 | do_div(rtt, peer->rtt_usage); | ||
393 | peer->rtt = rtt; | ||
394 | |||
395 | _leave(" RTT=%lu.%lums", | ||
396 | (long) (peer->rtt / 1000), (long) (peer->rtt % 1000)); | ||
397 | |||
398 | } /* end rxrpc_peer_calculate_rtt() */ | ||
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c deleted file mode 100644 index 8551c879e456..000000000000 --- a/net/rxrpc/proc.c +++ /dev/null | |||
@@ -1,617 +0,0 @@ | |||
1 | /* proc.c: /proc interface for RxRPC | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/proc_fs.h> | ||
16 | #include <linux/seq_file.h> | ||
17 | #include <rxrpc/rxrpc.h> | ||
18 | #include <rxrpc/transport.h> | ||
19 | #include <rxrpc/peer.h> | ||
20 | #include <rxrpc/connection.h> | ||
21 | #include <rxrpc/call.h> | ||
22 | #include <rxrpc/message.h> | ||
23 | #include "internal.h" | ||
24 | |||
25 | static struct proc_dir_entry *proc_rxrpc; | ||
26 | |||
27 | static int rxrpc_proc_transports_open(struct inode *inode, struct file *file); | ||
28 | static void *rxrpc_proc_transports_start(struct seq_file *p, loff_t *pos); | ||
29 | static void *rxrpc_proc_transports_next(struct seq_file *p, void *v, loff_t *pos); | ||
30 | static void rxrpc_proc_transports_stop(struct seq_file *p, void *v); | ||
31 | static int rxrpc_proc_transports_show(struct seq_file *m, void *v); | ||
32 | |||
33 | static struct seq_operations rxrpc_proc_transports_ops = { | ||
34 | .start = rxrpc_proc_transports_start, | ||
35 | .next = rxrpc_proc_transports_next, | ||
36 | .stop = rxrpc_proc_transports_stop, | ||
37 | .show = rxrpc_proc_transports_show, | ||
38 | }; | ||
39 | |||
40 | static const struct file_operations rxrpc_proc_transports_fops = { | ||
41 | .open = rxrpc_proc_transports_open, | ||
42 | .read = seq_read, | ||
43 | .llseek = seq_lseek, | ||
44 | .release = seq_release, | ||
45 | }; | ||
46 | |||
47 | static int rxrpc_proc_peers_open(struct inode *inode, struct file *file); | ||
48 | static void *rxrpc_proc_peers_start(struct seq_file *p, loff_t *pos); | ||
49 | static void *rxrpc_proc_peers_next(struct seq_file *p, void *v, loff_t *pos); | ||
50 | static void rxrpc_proc_peers_stop(struct seq_file *p, void *v); | ||
51 | static int rxrpc_proc_peers_show(struct seq_file *m, void *v); | ||
52 | |||
53 | static struct seq_operations rxrpc_proc_peers_ops = { | ||
54 | .start = rxrpc_proc_peers_start, | ||
55 | .next = rxrpc_proc_peers_next, | ||
56 | .stop = rxrpc_proc_peers_stop, | ||
57 | .show = rxrpc_proc_peers_show, | ||
58 | }; | ||
59 | |||
60 | static const struct file_operations rxrpc_proc_peers_fops = { | ||
61 | .open = rxrpc_proc_peers_open, | ||
62 | .read = seq_read, | ||
63 | .llseek = seq_lseek, | ||
64 | .release = seq_release, | ||
65 | }; | ||
66 | |||
67 | static int rxrpc_proc_conns_open(struct inode *inode, struct file *file); | ||
68 | static void *rxrpc_proc_conns_start(struct seq_file *p, loff_t *pos); | ||
69 | static void *rxrpc_proc_conns_next(struct seq_file *p, void *v, loff_t *pos); | ||
70 | static void rxrpc_proc_conns_stop(struct seq_file *p, void *v); | ||
71 | static int rxrpc_proc_conns_show(struct seq_file *m, void *v); | ||
72 | |||
73 | static struct seq_operations rxrpc_proc_conns_ops = { | ||
74 | .start = rxrpc_proc_conns_start, | ||
75 | .next = rxrpc_proc_conns_next, | ||
76 | .stop = rxrpc_proc_conns_stop, | ||
77 | .show = rxrpc_proc_conns_show, | ||
78 | }; | ||
79 | |||
80 | static const struct file_operations rxrpc_proc_conns_fops = { | ||
81 | .open = rxrpc_proc_conns_open, | ||
82 | .read = seq_read, | ||
83 | .llseek = seq_lseek, | ||
84 | .release = seq_release, | ||
85 | }; | ||
86 | |||
87 | static int rxrpc_proc_calls_open(struct inode *inode, struct file *file); | ||
88 | static void *rxrpc_proc_calls_start(struct seq_file *p, loff_t *pos); | ||
89 | static void *rxrpc_proc_calls_next(struct seq_file *p, void *v, loff_t *pos); | ||
90 | static void rxrpc_proc_calls_stop(struct seq_file *p, void *v); | ||
91 | static int rxrpc_proc_calls_show(struct seq_file *m, void *v); | ||
92 | |||
93 | static struct seq_operations rxrpc_proc_calls_ops = { | ||
94 | .start = rxrpc_proc_calls_start, | ||
95 | .next = rxrpc_proc_calls_next, | ||
96 | .stop = rxrpc_proc_calls_stop, | ||
97 | .show = rxrpc_proc_calls_show, | ||
98 | }; | ||
99 | |||
100 | static const struct file_operations rxrpc_proc_calls_fops = { | ||
101 | .open = rxrpc_proc_calls_open, | ||
102 | .read = seq_read, | ||
103 | .llseek = seq_lseek, | ||
104 | .release = seq_release, | ||
105 | }; | ||
106 | |||
107 | static const char *rxrpc_call_states7[] = { | ||
108 | "complet", | ||
109 | "error ", | ||
110 | "rcv_op ", | ||
111 | "rcv_arg", | ||
112 | "got_arg", | ||
113 | "snd_rpl", | ||
114 | "fin_ack", | ||
115 | "snd_arg", | ||
116 | "rcv_rpl", | ||
117 | "got_rpl" | ||
118 | }; | ||
119 | |||
120 | static const char *rxrpc_call_error_states7[] = { | ||
121 | "no_err ", | ||
122 | "loc_abt", | ||
123 | "rmt_abt", | ||
124 | "loc_err", | ||
125 | "rmt_err" | ||
126 | }; | ||
127 | |||
128 | /*****************************************************************************/ | ||
129 | /* | ||
130 | * initialise the /proc/net/rxrpc/ directory | ||
131 | */ | ||
132 | int rxrpc_proc_init(void) | ||
133 | { | ||
134 | struct proc_dir_entry *p; | ||
135 | |||
136 | proc_rxrpc = proc_mkdir("rxrpc", proc_net); | ||
137 | if (!proc_rxrpc) | ||
138 | goto error; | ||
139 | proc_rxrpc->owner = THIS_MODULE; | ||
140 | |||
141 | p = create_proc_entry("calls", 0, proc_rxrpc); | ||
142 | if (!p) | ||
143 | goto error_proc; | ||
144 | p->proc_fops = &rxrpc_proc_calls_fops; | ||
145 | p->owner = THIS_MODULE; | ||
146 | |||
147 | p = create_proc_entry("connections", 0, proc_rxrpc); | ||
148 | if (!p) | ||
149 | goto error_calls; | ||
150 | p->proc_fops = &rxrpc_proc_conns_fops; | ||
151 | p->owner = THIS_MODULE; | ||
152 | |||
153 | p = create_proc_entry("peers", 0, proc_rxrpc); | ||
154 | if (!p) | ||
155 | goto error_calls; | ||
156 | p->proc_fops = &rxrpc_proc_peers_fops; | ||
157 | p->owner = THIS_MODULE; | ||
158 | |||
159 | p = create_proc_entry("transports", 0, proc_rxrpc); | ||
160 | if (!p) | ||
161 | goto error_conns; | ||
162 | p->proc_fops = &rxrpc_proc_transports_fops; | ||
163 | p->owner = THIS_MODULE; | ||
164 | |||
165 | return 0; | ||
166 | |||
167 | error_conns: | ||
168 | remove_proc_entry("connections", proc_rxrpc); | ||
169 | error_calls: | ||
170 | remove_proc_entry("calls", proc_rxrpc); | ||
171 | error_proc: | ||
172 | remove_proc_entry("rxrpc", proc_net); | ||
173 | error: | ||
174 | return -ENOMEM; | ||
175 | } /* end rxrpc_proc_init() */ | ||
176 | |||
177 | /*****************************************************************************/ | ||
178 | /* | ||
179 | * clean up the /proc/net/rxrpc/ directory | ||
180 | */ | ||
181 | void rxrpc_proc_cleanup(void) | ||
182 | { | ||
183 | remove_proc_entry("transports", proc_rxrpc); | ||
184 | remove_proc_entry("peers", proc_rxrpc); | ||
185 | remove_proc_entry("connections", proc_rxrpc); | ||
186 | remove_proc_entry("calls", proc_rxrpc); | ||
187 | |||
188 | remove_proc_entry("rxrpc", proc_net); | ||
189 | |||
190 | } /* end rxrpc_proc_cleanup() */ | ||
191 | |||
192 | /*****************************************************************************/ | ||
193 | /* | ||
194 | * open "/proc/net/rxrpc/transports" which provides a summary of extant transports | ||
195 | */ | ||
196 | static int rxrpc_proc_transports_open(struct inode *inode, struct file *file) | ||
197 | { | ||
198 | struct seq_file *m; | ||
199 | int ret; | ||
200 | |||
201 | ret = seq_open(file, &rxrpc_proc_transports_ops); | ||
202 | if (ret < 0) | ||
203 | return ret; | ||
204 | |||
205 | m = file->private_data; | ||
206 | m->private = PDE(inode)->data; | ||
207 | |||
208 | return 0; | ||
209 | } /* end rxrpc_proc_transports_open() */ | ||
210 | |||
211 | /*****************************************************************************/ | ||
212 | /* | ||
213 | * set up the iterator to start reading from the transports list and return the first item | ||
214 | */ | ||
215 | static void *rxrpc_proc_transports_start(struct seq_file *m, loff_t *_pos) | ||
216 | { | ||
217 | struct list_head *_p; | ||
218 | loff_t pos = *_pos; | ||
219 | |||
220 | /* lock the list against modification */ | ||
221 | down_read(&rxrpc_proc_transports_sem); | ||
222 | |||
223 | /* allow for the header line */ | ||
224 | if (!pos) | ||
225 | return SEQ_START_TOKEN; | ||
226 | pos--; | ||
227 | |||
228 | /* find the n'th element in the list */ | ||
229 | list_for_each(_p, &rxrpc_proc_transports) | ||
230 | if (!pos--) | ||
231 | break; | ||
232 | |||
233 | return _p != &rxrpc_proc_transports ? _p : NULL; | ||
234 | } /* end rxrpc_proc_transports_start() */ | ||
235 | |||
236 | /*****************************************************************************/ | ||
237 | /* | ||
238 | * move to next call in transports list | ||
239 | */ | ||
240 | static void *rxrpc_proc_transports_next(struct seq_file *p, void *v, loff_t *pos) | ||
241 | { | ||
242 | struct list_head *_p; | ||
243 | |||
244 | (*pos)++; | ||
245 | |||
246 | _p = v; | ||
247 | _p = (v == SEQ_START_TOKEN) ? rxrpc_proc_transports.next : _p->next; | ||
248 | |||
249 | return _p != &rxrpc_proc_transports ? _p : NULL; | ||
250 | } /* end rxrpc_proc_transports_next() */ | ||
251 | |||
252 | /*****************************************************************************/ | ||
253 | /* | ||
254 | * clean up after reading from the transports list | ||
255 | */ | ||
256 | static void rxrpc_proc_transports_stop(struct seq_file *p, void *v) | ||
257 | { | ||
258 | up_read(&rxrpc_proc_transports_sem); | ||
259 | |||
260 | } /* end rxrpc_proc_transports_stop() */ | ||
261 | |||
262 | /*****************************************************************************/ | ||
263 | /* | ||
264 | * display a header line followed by a load of call lines | ||
265 | */ | ||
266 | static int rxrpc_proc_transports_show(struct seq_file *m, void *v) | ||
267 | { | ||
268 | struct rxrpc_transport *trans = | ||
269 | list_entry(v, struct rxrpc_transport, proc_link); | ||
270 | |||
271 | /* display header on line 1 */ | ||
272 | if (v == SEQ_START_TOKEN) { | ||
273 | seq_puts(m, "LOCAL USE\n"); | ||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | /* display one transport per line on subsequent lines */ | ||
278 | seq_printf(m, "%5hu %3d\n", | ||
279 | trans->port, | ||
280 | atomic_read(&trans->usage) | ||
281 | ); | ||
282 | |||
283 | return 0; | ||
284 | } /* end rxrpc_proc_transports_show() */ | ||
285 | |||
286 | /*****************************************************************************/ | ||
287 | /* | ||
288 | * open "/proc/net/rxrpc/peers" which provides a summary of extant peers | ||
289 | */ | ||
290 | static int rxrpc_proc_peers_open(struct inode *inode, struct file *file) | ||
291 | { | ||
292 | struct seq_file *m; | ||
293 | int ret; | ||
294 | |||
295 | ret = seq_open(file, &rxrpc_proc_peers_ops); | ||
296 | if (ret < 0) | ||
297 | return ret; | ||
298 | |||
299 | m = file->private_data; | ||
300 | m->private = PDE(inode)->data; | ||
301 | |||
302 | return 0; | ||
303 | } /* end rxrpc_proc_peers_open() */ | ||
304 | |||
305 | /*****************************************************************************/ | ||
306 | /* | ||
307 | * set up the iterator to start reading from the peers list and return the | ||
308 | * first item | ||
309 | */ | ||
310 | static void *rxrpc_proc_peers_start(struct seq_file *m, loff_t *_pos) | ||
311 | { | ||
312 | struct list_head *_p; | ||
313 | loff_t pos = *_pos; | ||
314 | |||
315 | /* lock the list against modification */ | ||
316 | down_read(&rxrpc_peers_sem); | ||
317 | |||
318 | /* allow for the header line */ | ||
319 | if (!pos) | ||
320 | return SEQ_START_TOKEN; | ||
321 | pos--; | ||
322 | |||
323 | /* find the n'th element in the list */ | ||
324 | list_for_each(_p, &rxrpc_peers) | ||
325 | if (!pos--) | ||
326 | break; | ||
327 | |||
328 | return _p != &rxrpc_peers ? _p : NULL; | ||
329 | } /* end rxrpc_proc_peers_start() */ | ||
330 | |||
331 | /*****************************************************************************/ | ||
332 | /* | ||
333 | * move to next conn in peers list | ||
334 | */ | ||
335 | static void *rxrpc_proc_peers_next(struct seq_file *p, void *v, loff_t *pos) | ||
336 | { | ||
337 | struct list_head *_p; | ||
338 | |||
339 | (*pos)++; | ||
340 | |||
341 | _p = v; | ||
342 | _p = (v == SEQ_START_TOKEN) ? rxrpc_peers.next : _p->next; | ||
343 | |||
344 | return _p != &rxrpc_peers ? _p : NULL; | ||
345 | } /* end rxrpc_proc_peers_next() */ | ||
346 | |||
347 | /*****************************************************************************/ | ||
348 | /* | ||
349 | * clean up after reading from the peers list | ||
350 | */ | ||
351 | static void rxrpc_proc_peers_stop(struct seq_file *p, void *v) | ||
352 | { | ||
353 | up_read(&rxrpc_peers_sem); | ||
354 | |||
355 | } /* end rxrpc_proc_peers_stop() */ | ||
356 | |||
357 | /*****************************************************************************/ | ||
358 | /* | ||
359 | * display a header line followed by a load of conn lines | ||
360 | */ | ||
361 | static int rxrpc_proc_peers_show(struct seq_file *m, void *v) | ||
362 | { | ||
363 | struct rxrpc_peer *peer = list_entry(v, struct rxrpc_peer, proc_link); | ||
364 | long timeout; | ||
365 | |||
366 | /* display header on line 1 */ | ||
367 | if (v == SEQ_START_TOKEN) { | ||
368 | seq_puts(m, "LOCAL REMOTE USAGE CONNS TIMEOUT" | ||
369 | " MTU RTT(uS)\n"); | ||
370 | return 0; | ||
371 | } | ||
372 | |||
373 | /* display one peer per line on subsequent lines */ | ||
374 | timeout = 0; | ||
375 | if (!list_empty(&peer->timeout.link)) | ||
376 | timeout = (long) peer->timeout.timo_jif - | ||
377 | (long) jiffies; | ||
378 | |||
379 | seq_printf(m, "%5hu %08x %5d %5d %8ld %5Zu %7lu\n", | ||
380 | peer->trans->port, | ||
381 | ntohl(peer->addr.s_addr), | ||
382 | atomic_read(&peer->usage), | ||
383 | atomic_read(&peer->conn_count), | ||
384 | timeout, | ||
385 | peer->if_mtu, | ||
386 | (long) peer->rtt | ||
387 | ); | ||
388 | |||
389 | return 0; | ||
390 | } /* end rxrpc_proc_peers_show() */ | ||
391 | |||
392 | /*****************************************************************************/ | ||
393 | /* | ||
394 | * open "/proc/net/rxrpc/connections" which provides a summary of extant | ||
395 | * connections | ||
396 | */ | ||
397 | static int rxrpc_proc_conns_open(struct inode *inode, struct file *file) | ||
398 | { | ||
399 | struct seq_file *m; | ||
400 | int ret; | ||
401 | |||
402 | ret = seq_open(file, &rxrpc_proc_conns_ops); | ||
403 | if (ret < 0) | ||
404 | return ret; | ||
405 | |||
406 | m = file->private_data; | ||
407 | m->private = PDE(inode)->data; | ||
408 | |||
409 | return 0; | ||
410 | } /* end rxrpc_proc_conns_open() */ | ||
411 | |||
412 | /*****************************************************************************/ | ||
413 | /* | ||
414 | * set up the iterator to start reading from the conns list and return the | ||
415 | * first item | ||
416 | */ | ||
417 | static void *rxrpc_proc_conns_start(struct seq_file *m, loff_t *_pos) | ||
418 | { | ||
419 | struct list_head *_p; | ||
420 | loff_t pos = *_pos; | ||
421 | |||
422 | /* lock the list against modification */ | ||
423 | down_read(&rxrpc_conns_sem); | ||
424 | |||
425 | /* allow for the header line */ | ||
426 | if (!pos) | ||
427 | return SEQ_START_TOKEN; | ||
428 | pos--; | ||
429 | |||
430 | /* find the n'th element in the list */ | ||
431 | list_for_each(_p, &rxrpc_conns) | ||
432 | if (!pos--) | ||
433 | break; | ||
434 | |||
435 | return _p != &rxrpc_conns ? _p : NULL; | ||
436 | } /* end rxrpc_proc_conns_start() */ | ||
437 | |||
438 | /*****************************************************************************/ | ||
439 | /* | ||
440 | * move to next conn in conns list | ||
441 | */ | ||
442 | static void *rxrpc_proc_conns_next(struct seq_file *p, void *v, loff_t *pos) | ||
443 | { | ||
444 | struct list_head *_p; | ||
445 | |||
446 | (*pos)++; | ||
447 | |||
448 | _p = v; | ||
449 | _p = (v == SEQ_START_TOKEN) ? rxrpc_conns.next : _p->next; | ||
450 | |||
451 | return _p != &rxrpc_conns ? _p : NULL; | ||
452 | } /* end rxrpc_proc_conns_next() */ | ||
453 | |||
454 | /*****************************************************************************/ | ||
455 | /* | ||
456 | * clean up after reading from the conns list | ||
457 | */ | ||
458 | static void rxrpc_proc_conns_stop(struct seq_file *p, void *v) | ||
459 | { | ||
460 | up_read(&rxrpc_conns_sem); | ||
461 | |||
462 | } /* end rxrpc_proc_conns_stop() */ | ||
463 | |||
464 | /*****************************************************************************/ | ||
465 | /* | ||
466 | * display a header line followed by a load of conn lines | ||
467 | */ | ||
468 | static int rxrpc_proc_conns_show(struct seq_file *m, void *v) | ||
469 | { | ||
470 | struct rxrpc_connection *conn; | ||
471 | long timeout; | ||
472 | |||
473 | conn = list_entry(v, struct rxrpc_connection, proc_link); | ||
474 | |||
475 | /* display header on line 1 */ | ||
476 | if (v == SEQ_START_TOKEN) { | ||
477 | seq_puts(m, | ||
478 | "LOCAL REMOTE RPORT SRVC CONN END SERIALNO " | ||
479 | "CALLNO MTU TIMEOUT" | ||
480 | "\n"); | ||
481 | return 0; | ||
482 | } | ||
483 | |||
484 | /* display one conn per line on subsequent lines */ | ||
485 | timeout = 0; | ||
486 | if (!list_empty(&conn->timeout.link)) | ||
487 | timeout = (long) conn->timeout.timo_jif - | ||
488 | (long) jiffies; | ||
489 | |||
490 | seq_printf(m, | ||
491 | "%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5Zu %8ld\n", | ||
492 | conn->trans->port, | ||
493 | ntohl(conn->addr.sin_addr.s_addr), | ||
494 | ntohs(conn->addr.sin_port), | ||
495 | ntohs(conn->service_id), | ||
496 | ntohl(conn->conn_id), | ||
497 | conn->out_clientflag ? "CLT" : "SRV", | ||
498 | conn->serial_counter, | ||
499 | conn->call_counter, | ||
500 | conn->mtu_size, | ||
501 | timeout | ||
502 | ); | ||
503 | |||
504 | return 0; | ||
505 | } /* end rxrpc_proc_conns_show() */ | ||
506 | |||
507 | /*****************************************************************************/ | ||
508 | /* | ||
509 | * open "/proc/net/rxrpc/calls" which provides a summary of extant calls | ||
510 | */ | ||
511 | static int rxrpc_proc_calls_open(struct inode *inode, struct file *file) | ||
512 | { | ||
513 | struct seq_file *m; | ||
514 | int ret; | ||
515 | |||
516 | ret = seq_open(file, &rxrpc_proc_calls_ops); | ||
517 | if (ret < 0) | ||
518 | return ret; | ||
519 | |||
520 | m = file->private_data; | ||
521 | m->private = PDE(inode)->data; | ||
522 | |||
523 | return 0; | ||
524 | } /* end rxrpc_proc_calls_open() */ | ||
525 | |||
526 | /*****************************************************************************/ | ||
527 | /* | ||
528 | * set up the iterator to start reading from the calls list and return the | ||
529 | * first item | ||
530 | */ | ||
531 | static void *rxrpc_proc_calls_start(struct seq_file *m, loff_t *_pos) | ||
532 | { | ||
533 | struct list_head *_p; | ||
534 | loff_t pos = *_pos; | ||
535 | |||
536 | /* lock the list against modification */ | ||
537 | down_read(&rxrpc_calls_sem); | ||
538 | |||
539 | /* allow for the header line */ | ||
540 | if (!pos) | ||
541 | return SEQ_START_TOKEN; | ||
542 | pos--; | ||
543 | |||
544 | /* find the n'th element in the list */ | ||
545 | list_for_each(_p, &rxrpc_calls) | ||
546 | if (!pos--) | ||
547 | break; | ||
548 | |||
549 | return _p != &rxrpc_calls ? _p : NULL; | ||
550 | } /* end rxrpc_proc_calls_start() */ | ||
551 | |||
552 | /*****************************************************************************/ | ||
553 | /* | ||
554 | * move to next call in calls list | ||
555 | */ | ||
556 | static void *rxrpc_proc_calls_next(struct seq_file *p, void *v, loff_t *pos) | ||
557 | { | ||
558 | struct list_head *_p; | ||
559 | |||
560 | (*pos)++; | ||
561 | |||
562 | _p = v; | ||
563 | _p = (v == SEQ_START_TOKEN) ? rxrpc_calls.next : _p->next; | ||
564 | |||
565 | return _p != &rxrpc_calls ? _p : NULL; | ||
566 | } /* end rxrpc_proc_calls_next() */ | ||
567 | |||
568 | /*****************************************************************************/ | ||
569 | /* | ||
570 | * clean up after reading from the calls list | ||
571 | */ | ||
572 | static void rxrpc_proc_calls_stop(struct seq_file *p, void *v) | ||
573 | { | ||
574 | up_read(&rxrpc_calls_sem); | ||
575 | |||
576 | } /* end rxrpc_proc_calls_stop() */ | ||
577 | |||
578 | /*****************************************************************************/ | ||
579 | /* | ||
580 | * display a header line followed by a load of call lines | ||
581 | */ | ||
582 | static int rxrpc_proc_calls_show(struct seq_file *m, void *v) | ||
583 | { | ||
584 | struct rxrpc_call *call = list_entry(v, struct rxrpc_call, call_link); | ||
585 | |||
586 | /* display header on line 1 */ | ||
587 | if (v == SEQ_START_TOKEN) { | ||
588 | seq_puts(m, | ||
589 | "LOCAL REMOT SRVC CONN CALL DIR USE " | ||
590 | " L STATE OPCODE ABORT ERRNO\n" | ||
591 | ); | ||
592 | return 0; | ||
593 | } | ||
594 | |||
595 | /* display one call per line on subsequent lines */ | ||
596 | seq_printf(m, | ||
597 | "%5hu %5hu %04hx %08x %08x %s %3u%c" | ||
598 | " %c %-7.7s %6d %08x %5d\n", | ||
599 | call->conn->trans->port, | ||
600 | ntohs(call->conn->addr.sin_port), | ||
601 | ntohs(call->conn->service_id), | ||
602 | ntohl(call->conn->conn_id), | ||
603 | ntohl(call->call_id), | ||
604 | call->conn->service ? "SVC" : "CLT", | ||
605 | atomic_read(&call->usage), | ||
606 | waitqueue_active(&call->waitq) ? 'w' : ' ', | ||
607 | call->app_last_rcv ? 'Y' : '-', | ||
608 | (call->app_call_state!=RXRPC_CSTATE_ERROR ? | ||
609 | rxrpc_call_states7[call->app_call_state] : | ||
610 | rxrpc_call_error_states7[call->app_err_state]), | ||
611 | call->app_opcode, | ||
612 | call->app_abort_code, | ||
613 | call->app_errno | ||
614 | ); | ||
615 | |||
616 | return 0; | ||
617 | } /* end rxrpc_proc_calls_show() */ | ||
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c new file mode 100644 index 000000000000..1eaf529efac1 --- /dev/null +++ b/net/rxrpc/rxkad.c | |||
@@ -0,0 +1,1153 @@ | |||
1 | /* Kerberos-based RxRPC security | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/net.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/udp.h> | ||
16 | #include <linux/crypto.h> | ||
17 | #include <linux/scatterlist.h> | ||
18 | #include <linux/ctype.h> | ||
19 | #include <net/sock.h> | ||
20 | #include <net/af_rxrpc.h> | ||
21 | #include "ar-internal.h" | ||
22 | |||
23 | #define RXKAD_VERSION 2 | ||
24 | #define MAXKRB5TICKETLEN 1024 | ||
25 | #define RXKAD_TKT_TYPE_KERBEROS_V5 256 | ||
26 | #define ANAME_SZ 40 /* size of authentication name */ | ||
27 | #define INST_SZ 40 /* size of principal's instance */ | ||
28 | #define REALM_SZ 40 /* size of principal's auth domain */ | ||
29 | #define SNAME_SZ 40 /* size of service name */ | ||
30 | |||
31 | unsigned rxrpc_debug; | ||
32 | module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); | ||
33 | MODULE_PARM_DESC(rxrpc_debug, "rxkad debugging mask"); | ||
34 | |||
35 | struct rxkad_level1_hdr { | ||
36 | __be32 data_size; /* true data size (excluding padding) */ | ||
37 | }; | ||
38 | |||
39 | struct rxkad_level2_hdr { | ||
40 | __be32 data_size; /* true data size (excluding padding) */ | ||
41 | __be32 checksum; /* decrypted data checksum */ | ||
42 | }; | ||
43 | |||
44 | MODULE_DESCRIPTION("RxRPC network protocol type-2 security (Kerberos)"); | ||
45 | MODULE_AUTHOR("Red Hat, Inc."); | ||
46 | MODULE_LICENSE("GPL"); | ||
47 | |||
48 | /* | ||
49 | * this holds a pinned cipher so that keventd doesn't get called by the cipher | ||
50 | * alloc routine, but since we have it to hand, we use it to decrypt RESPONSE | ||
51 | * packets | ||
52 | */ | ||
53 | static struct crypto_blkcipher *rxkad_ci; | ||
54 | static DEFINE_MUTEX(rxkad_ci_mutex); | ||
55 | |||
56 | /* | ||
57 | * initialise connection security | ||
58 | */ | ||
59 | static int rxkad_init_connection_security(struct rxrpc_connection *conn) | ||
60 | { | ||
61 | struct rxrpc_key_payload *payload; | ||
62 | struct crypto_blkcipher *ci; | ||
63 | int ret; | ||
64 | |||
65 | _enter("{%d},{%x}", conn->debug_id, key_serial(conn->key)); | ||
66 | |||
67 | payload = conn->key->payload.data; | ||
68 | conn->security_ix = payload->k.security_index; | ||
69 | |||
70 | ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); | ||
71 | if (IS_ERR(ci)) { | ||
72 | _debug("no cipher"); | ||
73 | ret = PTR_ERR(ci); | ||
74 | goto error; | ||
75 | } | ||
76 | |||
77 | if (crypto_blkcipher_setkey(ci, payload->k.session_key, | ||
78 | sizeof(payload->k.session_key)) < 0) | ||
79 | BUG(); | ||
80 | |||
81 | switch (conn->security_level) { | ||
82 | case RXRPC_SECURITY_PLAIN: | ||
83 | break; | ||
84 | case RXRPC_SECURITY_AUTH: | ||
85 | conn->size_align = 8; | ||
86 | conn->security_size = sizeof(struct rxkad_level1_hdr); | ||
87 | conn->header_size += sizeof(struct rxkad_level1_hdr); | ||
88 | break; | ||
89 | case RXRPC_SECURITY_ENCRYPT: | ||
90 | conn->size_align = 8; | ||
91 | conn->security_size = sizeof(struct rxkad_level2_hdr); | ||
92 | conn->header_size += sizeof(struct rxkad_level2_hdr); | ||
93 | break; | ||
94 | default: | ||
95 | ret = -EKEYREJECTED; | ||
96 | goto error; | ||
97 | } | ||
98 | |||
99 | conn->cipher = ci; | ||
100 | ret = 0; | ||
101 | error: | ||
102 | _leave(" = %d", ret); | ||
103 | return ret; | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * prime the encryption state with the invariant parts of a connection's | ||
108 | * description | ||
109 | */ | ||
110 | static void rxkad_prime_packet_security(struct rxrpc_connection *conn) | ||
111 | { | ||
112 | struct rxrpc_key_payload *payload; | ||
113 | struct blkcipher_desc desc; | ||
114 | struct scatterlist sg[2]; | ||
115 | struct rxrpc_crypt iv; | ||
116 | struct { | ||
117 | __be32 x[4]; | ||
118 | } tmpbuf __attribute__((aligned(16))); /* must all be in same page */ | ||
119 | |||
120 | _enter(""); | ||
121 | |||
122 | if (!conn->key) | ||
123 | return; | ||
124 | |||
125 | payload = conn->key->payload.data; | ||
126 | memcpy(&iv, payload->k.session_key, sizeof(iv)); | ||
127 | |||
128 | desc.tfm = conn->cipher; | ||
129 | desc.info = iv.x; | ||
130 | desc.flags = 0; | ||
131 | |||
132 | tmpbuf.x[0] = conn->epoch; | ||
133 | tmpbuf.x[1] = conn->cid; | ||
134 | tmpbuf.x[2] = 0; | ||
135 | tmpbuf.x[3] = htonl(conn->security_ix); | ||
136 | |||
137 | memset(sg, 0, sizeof(sg)); | ||
138 | sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); | ||
139 | sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); | ||
140 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); | ||
141 | |||
142 | memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv)); | ||
143 | ASSERTCMP(conn->csum_iv.n[0], ==, tmpbuf.x[2]); | ||
144 | |||
145 | _leave(""); | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * partially encrypt a packet (level 1 security) | ||
150 | */ | ||
151 | static int rxkad_secure_packet_auth(const struct rxrpc_call *call, | ||
152 | struct sk_buff *skb, | ||
153 | u32 data_size, | ||
154 | void *sechdr) | ||
155 | { | ||
156 | struct rxrpc_skb_priv *sp; | ||
157 | struct blkcipher_desc desc; | ||
158 | struct rxrpc_crypt iv; | ||
159 | struct scatterlist sg[2]; | ||
160 | struct { | ||
161 | struct rxkad_level1_hdr hdr; | ||
162 | __be32 first; /* first four bytes of data and padding */ | ||
163 | } tmpbuf __attribute__((aligned(8))); /* must all be in same page */ | ||
164 | u16 check; | ||
165 | |||
166 | sp = rxrpc_skb(skb); | ||
167 | |||
168 | _enter(""); | ||
169 | |||
170 | check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber); | ||
171 | data_size |= (u32) check << 16; | ||
172 | |||
173 | tmpbuf.hdr.data_size = htonl(data_size); | ||
174 | memcpy(&tmpbuf.first, sechdr + 4, sizeof(tmpbuf.first)); | ||
175 | |||
176 | /* start the encryption afresh */ | ||
177 | memset(&iv, 0, sizeof(iv)); | ||
178 | desc.tfm = call->conn->cipher; | ||
179 | desc.info = iv.x; | ||
180 | desc.flags = 0; | ||
181 | |||
182 | memset(sg, 0, sizeof(sg)); | ||
183 | sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); | ||
184 | sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); | ||
185 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); | ||
186 | |||
187 | memcpy(sechdr, &tmpbuf, sizeof(tmpbuf)); | ||
188 | |||
189 | _leave(" = 0"); | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * wholly encrypt a packet (level 2 security) | ||
195 | */ | ||
196 | static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, | ||
197 | struct sk_buff *skb, | ||
198 | u32 data_size, | ||
199 | void *sechdr) | ||
200 | { | ||
201 | const struct rxrpc_key_payload *payload; | ||
202 | struct rxkad_level2_hdr rxkhdr | ||
203 | __attribute__((aligned(8))); /* must be all on one page */ | ||
204 | struct rxrpc_skb_priv *sp; | ||
205 | struct blkcipher_desc desc; | ||
206 | struct rxrpc_crypt iv; | ||
207 | struct scatterlist sg[16]; | ||
208 | struct sk_buff *trailer; | ||
209 | unsigned len; | ||
210 | u16 check; | ||
211 | int nsg; | ||
212 | |||
213 | sp = rxrpc_skb(skb); | ||
214 | |||
215 | _enter(""); | ||
216 | |||
217 | check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber); | ||
218 | |||
219 | rxkhdr.data_size = htonl(data_size | (u32) check << 16); | ||
220 | rxkhdr.checksum = 0; | ||
221 | |||
222 | /* encrypt from the session key */ | ||
223 | payload = call->conn->key->payload.data; | ||
224 | memcpy(&iv, payload->k.session_key, sizeof(iv)); | ||
225 | desc.tfm = call->conn->cipher; | ||
226 | desc.info = iv.x; | ||
227 | desc.flags = 0; | ||
228 | |||
229 | memset(sg, 0, sizeof(sg[0]) * 2); | ||
230 | sg_set_buf(&sg[0], sechdr, sizeof(rxkhdr)); | ||
231 | sg_set_buf(&sg[1], &rxkhdr, sizeof(rxkhdr)); | ||
232 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr)); | ||
233 | |||
234 | /* we want to encrypt the skbuff in-place */ | ||
235 | nsg = skb_cow_data(skb, 0, &trailer); | ||
236 | if (nsg < 0 || nsg > 16) | ||
237 | return -ENOMEM; | ||
238 | |||
239 | len = data_size + call->conn->size_align - 1; | ||
240 | len &= ~(call->conn->size_align - 1); | ||
241 | |||
242 | skb_to_sgvec(skb, sg, 0, len); | ||
243 | crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); | ||
244 | |||
245 | _leave(" = 0"); | ||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * checksum an RxRPC packet header | ||
251 | */ | ||
252 | static int rxkad_secure_packet(const struct rxrpc_call *call, | ||
253 | struct sk_buff *skb, | ||
254 | size_t data_size, | ||
255 | void *sechdr) | ||
256 | { | ||
257 | struct rxrpc_skb_priv *sp; | ||
258 | struct blkcipher_desc desc; | ||
259 | struct rxrpc_crypt iv; | ||
260 | struct scatterlist sg[2]; | ||
261 | struct { | ||
262 | __be32 x[2]; | ||
263 | } tmpbuf __attribute__((aligned(8))); /* must all be in same page */ | ||
264 | __be32 x; | ||
265 | int ret; | ||
266 | |||
267 | sp = rxrpc_skb(skb); | ||
268 | |||
269 | _enter("{%d{%x}},{#%u},%zu,", | ||
270 | call->debug_id, key_serial(call->conn->key), ntohl(sp->hdr.seq), | ||
271 | data_size); | ||
272 | |||
273 | if (!call->conn->cipher) | ||
274 | return 0; | ||
275 | |||
276 | ret = key_validate(call->conn->key); | ||
277 | if (ret < 0) | ||
278 | return ret; | ||
279 | |||
280 | /* continue encrypting from where we left off */ | ||
281 | memcpy(&iv, call->conn->csum_iv.x, sizeof(iv)); | ||
282 | desc.tfm = call->conn->cipher; | ||
283 | desc.info = iv.x; | ||
284 | desc.flags = 0; | ||
285 | |||
286 | /* calculate the security checksum */ | ||
287 | x = htonl(call->channel << (32 - RXRPC_CIDSHIFT)); | ||
288 | x |= sp->hdr.seq & __constant_cpu_to_be32(0x3fffffff); | ||
289 | tmpbuf.x[0] = sp->hdr.callNumber; | ||
290 | tmpbuf.x[1] = x; | ||
291 | |||
292 | memset(&sg, 0, sizeof(sg)); | ||
293 | sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); | ||
294 | sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); | ||
295 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); | ||
296 | |||
297 | x = ntohl(tmpbuf.x[1]); | ||
298 | x = (x >> 16) & 0xffff; | ||
299 | if (x == 0) | ||
300 | x = 1; /* zero checksums are not permitted */ | ||
301 | sp->hdr.cksum = htons(x); | ||
302 | |||
303 | switch (call->conn->security_level) { | ||
304 | case RXRPC_SECURITY_PLAIN: | ||
305 | ret = 0; | ||
306 | break; | ||
307 | case RXRPC_SECURITY_AUTH: | ||
308 | ret = rxkad_secure_packet_auth(call, skb, data_size, sechdr); | ||
309 | break; | ||
310 | case RXRPC_SECURITY_ENCRYPT: | ||
311 | ret = rxkad_secure_packet_encrypt(call, skb, data_size, | ||
312 | sechdr); | ||
313 | break; | ||
314 | default: | ||
315 | ret = -EPERM; | ||
316 | break; | ||
317 | } | ||
318 | |||
319 | _leave(" = %d [set %hx]", ret, x); | ||
320 | return ret; | ||
321 | } | ||
322 | |||
323 | /* | ||
324 | * decrypt partial encryption on a packet (level 1 security) | ||
325 | */ | ||
326 | static int rxkad_verify_packet_auth(const struct rxrpc_call *call, | ||
327 | struct sk_buff *skb, | ||
328 | u32 *_abort_code) | ||
329 | { | ||
330 | struct rxkad_level1_hdr sechdr; | ||
331 | struct rxrpc_skb_priv *sp; | ||
332 | struct blkcipher_desc desc; | ||
333 | struct rxrpc_crypt iv; | ||
334 | struct scatterlist sg[2]; | ||
335 | struct sk_buff *trailer; | ||
336 | u32 data_size, buf; | ||
337 | u16 check; | ||
338 | |||
339 | _enter(""); | ||
340 | |||
341 | sp = rxrpc_skb(skb); | ||
342 | |||
343 | /* we want to decrypt the skbuff in-place */ | ||
344 | if (skb_cow_data(skb, 0, &trailer) < 0) | ||
345 | goto nomem; | ||
346 | |||
347 | skb_to_sgvec(skb, sg, 0, 8); | ||
348 | |||
349 | /* start the decryption afresh */ | ||
350 | memset(&iv, 0, sizeof(iv)); | ||
351 | desc.tfm = call->conn->cipher; | ||
352 | desc.info = iv.x; | ||
353 | desc.flags = 0; | ||
354 | |||
355 | crypto_blkcipher_decrypt_iv(&desc, sg, sg, 8); | ||
356 | |||
357 | /* remove the decrypted packet length */ | ||
358 | if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0) | ||
359 | goto datalen_error; | ||
360 | if (!skb_pull(skb, sizeof(sechdr))) | ||
361 | BUG(); | ||
362 | |||
363 | buf = ntohl(sechdr.data_size); | ||
364 | data_size = buf & 0xffff; | ||
365 | |||
366 | check = buf >> 16; | ||
367 | check ^= ntohl(sp->hdr.seq ^ sp->hdr.callNumber); | ||
368 | check &= 0xffff; | ||
369 | if (check != 0) { | ||
370 | *_abort_code = RXKADSEALEDINCON; | ||
371 | goto protocol_error; | ||
372 | } | ||
373 | |||
374 | /* shorten the packet to remove the padding */ | ||
375 | if (data_size > skb->len) | ||
376 | goto datalen_error; | ||
377 | else if (data_size < skb->len) | ||
378 | skb->len = data_size; | ||
379 | |||
380 | _leave(" = 0 [dlen=%x]", data_size); | ||
381 | return 0; | ||
382 | |||
383 | datalen_error: | ||
384 | *_abort_code = RXKADDATALEN; | ||
385 | protocol_error: | ||
386 | _leave(" = -EPROTO"); | ||
387 | return -EPROTO; | ||
388 | |||
389 | nomem: | ||
390 | _leave(" = -ENOMEM"); | ||
391 | return -ENOMEM; | ||
392 | } | ||
393 | |||
394 | /* | ||
395 | * wholly decrypt a packet (level 2 security) | ||
396 | */ | ||
397 | static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call, | ||
398 | struct sk_buff *skb, | ||
399 | u32 *_abort_code) | ||
400 | { | ||
401 | const struct rxrpc_key_payload *payload; | ||
402 | struct rxkad_level2_hdr sechdr; | ||
403 | struct rxrpc_skb_priv *sp; | ||
404 | struct blkcipher_desc desc; | ||
405 | struct rxrpc_crypt iv; | ||
406 | struct scatterlist _sg[4], *sg; | ||
407 | struct sk_buff *trailer; | ||
408 | u32 data_size, buf; | ||
409 | u16 check; | ||
410 | int nsg; | ||
411 | |||
412 | _enter(",{%d}", skb->len); | ||
413 | |||
414 | sp = rxrpc_skb(skb); | ||
415 | |||
416 | /* we want to decrypt the skbuff in-place */ | ||
417 | nsg = skb_cow_data(skb, 0, &trailer); | ||
418 | if (nsg < 0) | ||
419 | goto nomem; | ||
420 | |||
421 | sg = _sg; | ||
422 | if (unlikely(nsg > 4)) { | ||
423 | sg = kmalloc(sizeof(*sg) * nsg, GFP_NOIO); | ||
424 | if (!sg) | ||
425 | goto nomem; | ||
426 | } | ||
427 | |||
428 | skb_to_sgvec(skb, sg, 0, skb->len); | ||
429 | |||
430 | /* decrypt from the session key */ | ||
431 | payload = call->conn->key->payload.data; | ||
432 | memcpy(&iv, payload->k.session_key, sizeof(iv)); | ||
433 | desc.tfm = call->conn->cipher; | ||
434 | desc.info = iv.x; | ||
435 | desc.flags = 0; | ||
436 | |||
437 | crypto_blkcipher_decrypt_iv(&desc, sg, sg, skb->len); | ||
438 | if (sg != _sg) | ||
439 | kfree(sg); | ||
440 | |||
441 | /* remove the decrypted packet length */ | ||
442 | if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0) | ||
443 | goto datalen_error; | ||
444 | if (!skb_pull(skb, sizeof(sechdr))) | ||
445 | BUG(); | ||
446 | |||
447 | buf = ntohl(sechdr.data_size); | ||
448 | data_size = buf & 0xffff; | ||
449 | |||
450 | check = buf >> 16; | ||
451 | check ^= ntohl(sp->hdr.seq ^ sp->hdr.callNumber); | ||
452 | check &= 0xffff; | ||
453 | if (check != 0) { | ||
454 | *_abort_code = RXKADSEALEDINCON; | ||
455 | goto protocol_error; | ||
456 | } | ||
457 | |||
458 | /* shorten the packet to remove the padding */ | ||
459 | if (data_size > skb->len) | ||
460 | goto datalen_error; | ||
461 | else if (data_size < skb->len) | ||
462 | skb->len = data_size; | ||
463 | |||
464 | _leave(" = 0 [dlen=%x]", data_size); | ||
465 | return 0; | ||
466 | |||
467 | datalen_error: | ||
468 | *_abort_code = RXKADDATALEN; | ||
469 | protocol_error: | ||
470 | _leave(" = -EPROTO"); | ||
471 | return -EPROTO; | ||
472 | |||
473 | nomem: | ||
474 | _leave(" = -ENOMEM"); | ||
475 | return -ENOMEM; | ||
476 | } | ||
477 | |||
478 | /* | ||
479 | * verify the security on a received packet | ||
480 | */ | ||
481 | static int rxkad_verify_packet(const struct rxrpc_call *call, | ||
482 | struct sk_buff *skb, | ||
483 | u32 *_abort_code) | ||
484 | { | ||
485 | struct blkcipher_desc desc; | ||
486 | struct rxrpc_skb_priv *sp; | ||
487 | struct rxrpc_crypt iv; | ||
488 | struct scatterlist sg[2]; | ||
489 | struct { | ||
490 | __be32 x[2]; | ||
491 | } tmpbuf __attribute__((aligned(8))); /* must all be in same page */ | ||
492 | __be32 x; | ||
493 | __be16 cksum; | ||
494 | int ret; | ||
495 | |||
496 | sp = rxrpc_skb(skb); | ||
497 | |||
498 | _enter("{%d{%x}},{#%u}", | ||
499 | call->debug_id, key_serial(call->conn->key), | ||
500 | ntohl(sp->hdr.seq)); | ||
501 | |||
502 | if (!call->conn->cipher) | ||
503 | return 0; | ||
504 | |||
505 | if (sp->hdr.securityIndex != 2) { | ||
506 | *_abort_code = RXKADINCONSISTENCY; | ||
507 | _leave(" = -EPROTO [not rxkad]"); | ||
508 | return -EPROTO; | ||
509 | } | ||
510 | |||
511 | /* continue encrypting from where we left off */ | ||
512 | memcpy(&iv, call->conn->csum_iv.x, sizeof(iv)); | ||
513 | desc.tfm = call->conn->cipher; | ||
514 | desc.info = iv.x; | ||
515 | desc.flags = 0; | ||
516 | |||
517 | /* validate the security checksum */ | ||
518 | x = htonl(call->channel << (32 - RXRPC_CIDSHIFT)); | ||
519 | x |= sp->hdr.seq & __constant_cpu_to_be32(0x3fffffff); | ||
520 | tmpbuf.x[0] = call->call_id; | ||
521 | tmpbuf.x[1] = x; | ||
522 | |||
523 | memset(&sg, 0, sizeof(sg)); | ||
524 | sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); | ||
525 | sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); | ||
526 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); | ||
527 | |||
528 | x = ntohl(tmpbuf.x[1]); | ||
529 | x = (x >> 16) & 0xffff; | ||
530 | if (x == 0) | ||
531 | x = 1; /* zero checksums are not permitted */ | ||
532 | |||
533 | cksum = htons(x); | ||
534 | if (sp->hdr.cksum != cksum) { | ||
535 | *_abort_code = RXKADSEALEDINCON; | ||
536 | _leave(" = -EPROTO [csum failed]"); | ||
537 | return -EPROTO; | ||
538 | } | ||
539 | |||
540 | switch (call->conn->security_level) { | ||
541 | case RXRPC_SECURITY_PLAIN: | ||
542 | ret = 0; | ||
543 | break; | ||
544 | case RXRPC_SECURITY_AUTH: | ||
545 | ret = rxkad_verify_packet_auth(call, skb, _abort_code); | ||
546 | break; | ||
547 | case RXRPC_SECURITY_ENCRYPT: | ||
548 | ret = rxkad_verify_packet_encrypt(call, skb, _abort_code); | ||
549 | break; | ||
550 | default: | ||
551 | ret = -ENOANO; | ||
552 | break; | ||
553 | } | ||
554 | |||
555 | _leave(" = %d", ret); | ||
556 | return ret; | ||
557 | } | ||
558 | |||
559 | /* | ||
560 | * issue a challenge | ||
561 | */ | ||
562 | static int rxkad_issue_challenge(struct rxrpc_connection *conn) | ||
563 | { | ||
564 | struct rxkad_challenge challenge; | ||
565 | struct rxrpc_header hdr; | ||
566 | struct msghdr msg; | ||
567 | struct kvec iov[2]; | ||
568 | size_t len; | ||
569 | int ret; | ||
570 | |||
571 | _enter("{%d,%x}", conn->debug_id, key_serial(conn->key)); | ||
572 | |||
573 | ret = key_validate(conn->key); | ||
574 | if (ret < 0) | ||
575 | return ret; | ||
576 | |||
577 | get_random_bytes(&conn->security_nonce, sizeof(conn->security_nonce)); | ||
578 | |||
579 | challenge.version = htonl(2); | ||
580 | challenge.nonce = htonl(conn->security_nonce); | ||
581 | challenge.min_level = htonl(0); | ||
582 | challenge.__padding = 0; | ||
583 | |||
584 | msg.msg_name = &conn->trans->peer->srx.transport.sin; | ||
585 | msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); | ||
586 | msg.msg_control = NULL; | ||
587 | msg.msg_controllen = 0; | ||
588 | msg.msg_flags = 0; | ||
589 | |||
590 | hdr.epoch = conn->epoch; | ||
591 | hdr.cid = conn->cid; | ||
592 | hdr.callNumber = 0; | ||
593 | hdr.seq = 0; | ||
594 | hdr.type = RXRPC_PACKET_TYPE_CHALLENGE; | ||
595 | hdr.flags = conn->out_clientflag; | ||
596 | hdr.userStatus = 0; | ||
597 | hdr.securityIndex = conn->security_ix; | ||
598 | hdr._rsvd = 0; | ||
599 | hdr.serviceId = conn->service_id; | ||
600 | |||
601 | iov[0].iov_base = &hdr; | ||
602 | iov[0].iov_len = sizeof(hdr); | ||
603 | iov[1].iov_base = &challenge; | ||
604 | iov[1].iov_len = sizeof(challenge); | ||
605 | |||
606 | len = iov[0].iov_len + iov[1].iov_len; | ||
607 | |||
608 | hdr.serial = htonl(atomic_inc_return(&conn->serial)); | ||
609 | _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial)); | ||
610 | |||
611 | ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); | ||
612 | if (ret < 0) { | ||
613 | _debug("sendmsg failed: %d", ret); | ||
614 | return -EAGAIN; | ||
615 | } | ||
616 | |||
617 | _leave(" = 0"); | ||
618 | return 0; | ||
619 | } | ||
620 | |||
621 | /* | ||
622 | * send a Kerberos security response | ||
623 | */ | ||
624 | static int rxkad_send_response(struct rxrpc_connection *conn, | ||
625 | struct rxrpc_header *hdr, | ||
626 | struct rxkad_response *resp, | ||
627 | const struct rxkad_key *s2) | ||
628 | { | ||
629 | struct msghdr msg; | ||
630 | struct kvec iov[3]; | ||
631 | size_t len; | ||
632 | int ret; | ||
633 | |||
634 | _enter(""); | ||
635 | |||
636 | msg.msg_name = &conn->trans->peer->srx.transport.sin; | ||
637 | msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); | ||
638 | msg.msg_control = NULL; | ||
639 | msg.msg_controllen = 0; | ||
640 | msg.msg_flags = 0; | ||
641 | |||
642 | hdr->epoch = conn->epoch; | ||
643 | hdr->seq = 0; | ||
644 | hdr->type = RXRPC_PACKET_TYPE_RESPONSE; | ||
645 | hdr->flags = conn->out_clientflag; | ||
646 | hdr->userStatus = 0; | ||
647 | hdr->_rsvd = 0; | ||
648 | |||
649 | iov[0].iov_base = hdr; | ||
650 | iov[0].iov_len = sizeof(*hdr); | ||
651 | iov[1].iov_base = resp; | ||
652 | iov[1].iov_len = sizeof(*resp); | ||
653 | iov[2].iov_base = (void *) s2->ticket; | ||
654 | iov[2].iov_len = s2->ticket_len; | ||
655 | |||
656 | len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len; | ||
657 | |||
658 | hdr->serial = htonl(atomic_inc_return(&conn->serial)); | ||
659 | _proto("Tx RESPONSE %%%u", ntohl(hdr->serial)); | ||
660 | |||
661 | ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len); | ||
662 | if (ret < 0) { | ||
663 | _debug("sendmsg failed: %d", ret); | ||
664 | return -EAGAIN; | ||
665 | } | ||
666 | |||
667 | _leave(" = 0"); | ||
668 | return 0; | ||
669 | } | ||
670 | |||
671 | /* | ||
672 | * calculate the response checksum | ||
673 | */ | ||
674 | static void rxkad_calc_response_checksum(struct rxkad_response *response) | ||
675 | { | ||
676 | u32 csum = 1000003; | ||
677 | int loop; | ||
678 | u8 *p = (u8 *) response; | ||
679 | |||
680 | for (loop = sizeof(*response); loop > 0; loop--) | ||
681 | csum = csum * 0x10204081 + *p++; | ||
682 | |||
683 | response->encrypted.checksum = htonl(csum); | ||
684 | } | ||
685 | |||
686 | /* | ||
687 | * load a scatterlist with a potentially split-page buffer | ||
688 | */ | ||
689 | static void rxkad_sg_set_buf2(struct scatterlist sg[2], | ||
690 | void *buf, size_t buflen) | ||
691 | { | ||
692 | |||
693 | memset(sg, 0, sizeof(sg)); | ||
694 | |||
695 | sg_set_buf(&sg[0], buf, buflen); | ||
696 | if (sg[0].offset + buflen > PAGE_SIZE) { | ||
697 | /* the buffer was split over two pages */ | ||
698 | sg[0].length = PAGE_SIZE - sg[0].offset; | ||
699 | sg_set_buf(&sg[1], buf + sg[0].length, buflen - sg[0].length); | ||
700 | } | ||
701 | |||
702 | ASSERTCMP(sg[0].length + sg[1].length, ==, buflen); | ||
703 | } | ||
704 | |||
705 | /* | ||
706 | * encrypt the response packet | ||
707 | */ | ||
708 | static void rxkad_encrypt_response(struct rxrpc_connection *conn, | ||
709 | struct rxkad_response *resp, | ||
710 | const struct rxkad_key *s2) | ||
711 | { | ||
712 | struct blkcipher_desc desc; | ||
713 | struct rxrpc_crypt iv; | ||
714 | struct scatterlist ssg[2], dsg[2]; | ||
715 | |||
716 | /* continue encrypting from where we left off */ | ||
717 | memcpy(&iv, s2->session_key, sizeof(iv)); | ||
718 | desc.tfm = conn->cipher; | ||
719 | desc.info = iv.x; | ||
720 | desc.flags = 0; | ||
721 | |||
722 | rxkad_sg_set_buf2(ssg, &resp->encrypted, sizeof(resp->encrypted)); | ||
723 | memcpy(dsg, ssg, sizeof(dsg)); | ||
724 | crypto_blkcipher_encrypt_iv(&desc, dsg, ssg, sizeof(resp->encrypted)); | ||
725 | } | ||
726 | |||
727 | /* | ||
728 | * respond to a challenge packet | ||
729 | */ | ||
730 | static int rxkad_respond_to_challenge(struct rxrpc_connection *conn, | ||
731 | struct sk_buff *skb, | ||
732 | u32 *_abort_code) | ||
733 | { | ||
734 | const struct rxrpc_key_payload *payload; | ||
735 | struct rxkad_challenge challenge; | ||
736 | struct rxkad_response resp | ||
737 | __attribute__((aligned(8))); /* must be aligned for crypto */ | ||
738 | struct rxrpc_skb_priv *sp; | ||
739 | u32 version, nonce, min_level, abort_code; | ||
740 | int ret; | ||
741 | |||
742 | _enter("{%d,%x}", conn->debug_id, key_serial(conn->key)); | ||
743 | |||
744 | if (!conn->key) { | ||
745 | _leave(" = -EPROTO [no key]"); | ||
746 | return -EPROTO; | ||
747 | } | ||
748 | |||
749 | ret = key_validate(conn->key); | ||
750 | if (ret < 0) { | ||
751 | *_abort_code = RXKADEXPIRED; | ||
752 | return ret; | ||
753 | } | ||
754 | |||
755 | abort_code = RXKADPACKETSHORT; | ||
756 | sp = rxrpc_skb(skb); | ||
757 | if (skb_copy_bits(skb, 0, &challenge, sizeof(challenge)) < 0) | ||
758 | goto protocol_error; | ||
759 | |||
760 | version = ntohl(challenge.version); | ||
761 | nonce = ntohl(challenge.nonce); | ||
762 | min_level = ntohl(challenge.min_level); | ||
763 | |||
764 | _proto("Rx CHALLENGE %%%u { v=%u n=%u ml=%u }", | ||
765 | ntohl(sp->hdr.serial), version, nonce, min_level); | ||
766 | |||
767 | abort_code = RXKADINCONSISTENCY; | ||
768 | if (version != RXKAD_VERSION) | ||
769 | goto protocol_error; | ||
770 | |||
771 | abort_code = RXKADLEVELFAIL; | ||
772 | if (conn->security_level < min_level) | ||
773 | goto protocol_error; | ||
774 | |||
775 | payload = conn->key->payload.data; | ||
776 | |||
777 | /* build the response packet */ | ||
778 | memset(&resp, 0, sizeof(resp)); | ||
779 | |||
780 | resp.version = RXKAD_VERSION; | ||
781 | resp.encrypted.epoch = conn->epoch; | ||
782 | resp.encrypted.cid = conn->cid; | ||
783 | resp.encrypted.securityIndex = htonl(conn->security_ix); | ||
784 | resp.encrypted.call_id[0] = | ||
785 | (conn->channels[0] ? conn->channels[0]->call_id : 0); | ||
786 | resp.encrypted.call_id[1] = | ||
787 | (conn->channels[1] ? conn->channels[1]->call_id : 0); | ||
788 | resp.encrypted.call_id[2] = | ||
789 | (conn->channels[2] ? conn->channels[2]->call_id : 0); | ||
790 | resp.encrypted.call_id[3] = | ||
791 | (conn->channels[3] ? conn->channels[3]->call_id : 0); | ||
792 | resp.encrypted.inc_nonce = htonl(nonce + 1); | ||
793 | resp.encrypted.level = htonl(conn->security_level); | ||
794 | resp.kvno = htonl(payload->k.kvno); | ||
795 | resp.ticket_len = htonl(payload->k.ticket_len); | ||
796 | |||
797 | /* calculate the response checksum and then do the encryption */ | ||
798 | rxkad_calc_response_checksum(&resp); | ||
799 | rxkad_encrypt_response(conn, &resp, &payload->k); | ||
800 | return rxkad_send_response(conn, &sp->hdr, &resp, &payload->k); | ||
801 | |||
802 | protocol_error: | ||
803 | *_abort_code = abort_code; | ||
804 | _leave(" = -EPROTO [%d]", abort_code); | ||
805 | return -EPROTO; | ||
806 | } | ||
807 | |||
808 | /* | ||
809 | * decrypt the kerberos IV ticket in the response | ||
810 | */ | ||
811 | static int rxkad_decrypt_ticket(struct rxrpc_connection *conn, | ||
812 | void *ticket, size_t ticket_len, | ||
813 | struct rxrpc_crypt *_session_key, | ||
814 | time_t *_expiry, | ||
815 | u32 *_abort_code) | ||
816 | { | ||
817 | struct blkcipher_desc desc; | ||
818 | struct rxrpc_crypt iv, key; | ||
819 | struct scatterlist ssg[1], dsg[1]; | ||
820 | struct in_addr addr; | ||
821 | unsigned life; | ||
822 | time_t issue, now; | ||
823 | bool little_endian; | ||
824 | int ret; | ||
825 | u8 *p, *q, *name, *end; | ||
826 | |||
827 | _enter("{%d},{%x}", conn->debug_id, key_serial(conn->server_key)); | ||
828 | |||
829 | *_expiry = 0; | ||
830 | |||
831 | ret = key_validate(conn->server_key); | ||
832 | if (ret < 0) { | ||
833 | switch (ret) { | ||
834 | case -EKEYEXPIRED: | ||
835 | *_abort_code = RXKADEXPIRED; | ||
836 | goto error; | ||
837 | default: | ||
838 | *_abort_code = RXKADNOAUTH; | ||
839 | goto error; | ||
840 | } | ||
841 | } | ||
842 | |||
843 | ASSERT(conn->server_key->payload.data != NULL); | ||
844 | ASSERTCMP((unsigned long) ticket & 7UL, ==, 0); | ||
845 | |||
846 | memcpy(&iv, &conn->server_key->type_data, sizeof(iv)); | ||
847 | |||
848 | desc.tfm = conn->server_key->payload.data; | ||
849 | desc.info = iv.x; | ||
850 | desc.flags = 0; | ||
851 | |||
852 | sg_init_one(&ssg[0], ticket, ticket_len); | ||
853 | memcpy(dsg, ssg, sizeof(dsg)); | ||
854 | crypto_blkcipher_decrypt_iv(&desc, dsg, ssg, ticket_len); | ||
855 | |||
856 | p = ticket; | ||
857 | end = p + ticket_len; | ||
858 | |||
859 | #define Z(size) \ | ||
860 | ({ \ | ||
861 | u8 *__str = p; \ | ||
862 | q = memchr(p, 0, end - p); \ | ||
863 | if (!q || q - p > (size)) \ | ||
864 | goto bad_ticket; \ | ||
865 | for (; p < q; p++) \ | ||
866 | if (!isprint(*p)) \ | ||
867 | goto bad_ticket; \ | ||
868 | p++; \ | ||
869 | __str; \ | ||
870 | }) | ||
871 | |||
872 | /* extract the ticket flags */ | ||
873 | _debug("KIV FLAGS: %x", *p); | ||
874 | little_endian = *p & 1; | ||
875 | p++; | ||
876 | |||
877 | /* extract the authentication name */ | ||
878 | name = Z(ANAME_SZ); | ||
879 | _debug("KIV ANAME: %s", name); | ||
880 | |||
881 | /* extract the principal's instance */ | ||
882 | name = Z(INST_SZ); | ||
883 | _debug("KIV INST : %s", name); | ||
884 | |||
885 | /* extract the principal's authentication domain */ | ||
886 | name = Z(REALM_SZ); | ||
887 | _debug("KIV REALM: %s", name); | ||
888 | |||
889 | if (end - p < 4 + 8 + 4 + 2) | ||
890 | goto bad_ticket; | ||
891 | |||
892 | /* get the IPv4 address of the entity that requested the ticket */ | ||
893 | memcpy(&addr, p, sizeof(addr)); | ||
894 | p += 4; | ||
895 | _debug("KIV ADDR : "NIPQUAD_FMT, NIPQUAD(addr)); | ||
896 | |||
897 | /* get the session key from the ticket */ | ||
898 | memcpy(&key, p, sizeof(key)); | ||
899 | p += 8; | ||
900 | _debug("KIV KEY : %08x %08x", ntohl(key.n[0]), ntohl(key.n[1])); | ||
901 | memcpy(_session_key, &key, sizeof(key)); | ||
902 | |||
903 | /* get the ticket's lifetime */ | ||
904 | life = *p++ * 5 * 60; | ||
905 | _debug("KIV LIFE : %u", life); | ||
906 | |||
907 | /* get the issue time of the ticket */ | ||
908 | if (little_endian) { | ||
909 | __le32 stamp; | ||
910 | memcpy(&stamp, p, 4); | ||
911 | issue = le32_to_cpu(stamp); | ||
912 | } else { | ||
913 | __be32 stamp; | ||
914 | memcpy(&stamp, p, 4); | ||
915 | issue = be32_to_cpu(stamp); | ||
916 | } | ||
917 | p += 4; | ||
918 | now = xtime.tv_sec; | ||
919 | _debug("KIV ISSUE: %lx [%lx]", issue, now); | ||
920 | |||
921 | /* check the ticket is in date */ | ||
922 | if (issue > now) { | ||
923 | *_abort_code = RXKADNOAUTH; | ||
924 | ret = -EKEYREJECTED; | ||
925 | goto error; | ||
926 | } | ||
927 | |||
928 | if (issue < now - life) { | ||
929 | *_abort_code = RXKADEXPIRED; | ||
930 | ret = -EKEYEXPIRED; | ||
931 | goto error; | ||
932 | } | ||
933 | |||
934 | *_expiry = issue + life; | ||
935 | |||
936 | /* get the service name */ | ||
937 | name = Z(SNAME_SZ); | ||
938 | _debug("KIV SNAME: %s", name); | ||
939 | |||
940 | /* get the service instance name */ | ||
941 | name = Z(INST_SZ); | ||
942 | _debug("KIV SINST: %s", name); | ||
943 | |||
944 | ret = 0; | ||
945 | error: | ||
946 | _leave(" = %d", ret); | ||
947 | return ret; | ||
948 | |||
949 | bad_ticket: | ||
950 | *_abort_code = RXKADBADTICKET; | ||
951 | ret = -EBADMSG; | ||
952 | goto error; | ||
953 | } | ||
954 | |||
955 | /* | ||
956 | * decrypt the response packet | ||
957 | */ | ||
958 | static void rxkad_decrypt_response(struct rxrpc_connection *conn, | ||
959 | struct rxkad_response *resp, | ||
960 | const struct rxrpc_crypt *session_key) | ||
961 | { | ||
962 | struct blkcipher_desc desc; | ||
963 | struct scatterlist ssg[2], dsg[2]; | ||
964 | struct rxrpc_crypt iv; | ||
965 | |||
966 | _enter(",,%08x%08x", | ||
967 | ntohl(session_key->n[0]), ntohl(session_key->n[1])); | ||
968 | |||
969 | ASSERT(rxkad_ci != NULL); | ||
970 | |||
971 | mutex_lock(&rxkad_ci_mutex); | ||
972 | if (crypto_blkcipher_setkey(rxkad_ci, session_key->x, | ||
973 | sizeof(*session_key)) < 0) | ||
974 | BUG(); | ||
975 | |||
976 | memcpy(&iv, session_key, sizeof(iv)); | ||
977 | desc.tfm = rxkad_ci; | ||
978 | desc.info = iv.x; | ||
979 | desc.flags = 0; | ||
980 | |||
981 | rxkad_sg_set_buf2(ssg, &resp->encrypted, sizeof(resp->encrypted)); | ||
982 | memcpy(dsg, ssg, sizeof(dsg)); | ||
983 | crypto_blkcipher_decrypt_iv(&desc, dsg, ssg, sizeof(resp->encrypted)); | ||
984 | mutex_unlock(&rxkad_ci_mutex); | ||
985 | |||
986 | _leave(""); | ||
987 | } | ||
988 | |||
989 | /* | ||
990 | * verify a response | ||
991 | */ | ||
992 | static int rxkad_verify_response(struct rxrpc_connection *conn, | ||
993 | struct sk_buff *skb, | ||
994 | u32 *_abort_code) | ||
995 | { | ||
996 | struct rxkad_response response | ||
997 | __attribute__((aligned(8))); /* must be aligned for crypto */ | ||
998 | struct rxrpc_skb_priv *sp; | ||
999 | struct rxrpc_crypt session_key; | ||
1000 | time_t expiry; | ||
1001 | void *ticket; | ||
1002 | u32 abort_code, version, kvno, ticket_len, csum, level; | ||
1003 | int ret; | ||
1004 | |||
1005 | _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key)); | ||
1006 | |||
1007 | abort_code = RXKADPACKETSHORT; | ||
1008 | if (skb_copy_bits(skb, 0, &response, sizeof(response)) < 0) | ||
1009 | goto protocol_error; | ||
1010 | if (!pskb_pull(skb, sizeof(response))) | ||
1011 | BUG(); | ||
1012 | |||
1013 | version = ntohl(response.version); | ||
1014 | ticket_len = ntohl(response.ticket_len); | ||
1015 | kvno = ntohl(response.kvno); | ||
1016 | sp = rxrpc_skb(skb); | ||
1017 | _proto("Rx RESPONSE %%%u { v=%u kv=%u tl=%u }", | ||
1018 | ntohl(sp->hdr.serial), version, kvno, ticket_len); | ||
1019 | |||
1020 | abort_code = RXKADINCONSISTENCY; | ||
1021 | if (version != RXKAD_VERSION) | ||
1022 | |||
1023 | abort_code = RXKADTICKETLEN; | ||
1024 | if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN) | ||
1025 | goto protocol_error; | ||
1026 | |||
1027 | abort_code = RXKADUNKNOWNKEY; | ||
1028 | if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5) | ||
1029 | goto protocol_error; | ||
1030 | |||
1031 | /* extract the kerberos ticket and decrypt and decode it */ | ||
1032 | ticket = kmalloc(ticket_len, GFP_NOFS); | ||
1033 | if (!ticket) | ||
1034 | return -ENOMEM; | ||
1035 | |||
1036 | abort_code = RXKADPACKETSHORT; | ||
1037 | if (skb_copy_bits(skb, 0, ticket, ticket_len) < 0) | ||
1038 | goto protocol_error_free; | ||
1039 | |||
1040 | ret = rxkad_decrypt_ticket(conn, ticket, ticket_len, &session_key, | ||
1041 | &expiry, &abort_code); | ||
1042 | if (ret < 0) { | ||
1043 | *_abort_code = abort_code; | ||
1044 | kfree(ticket); | ||
1045 | return ret; | ||
1046 | } | ||
1047 | |||
1048 | /* use the session key from inside the ticket to decrypt the | ||
1049 | * response */ | ||
1050 | rxkad_decrypt_response(conn, &response, &session_key); | ||
1051 | |||
1052 | abort_code = RXKADSEALEDINCON; | ||
1053 | if (response.encrypted.epoch != conn->epoch) | ||
1054 | goto protocol_error_free; | ||
1055 | if (response.encrypted.cid != conn->cid) | ||
1056 | goto protocol_error_free; | ||
1057 | if (ntohl(response.encrypted.securityIndex) != conn->security_ix) | ||
1058 | goto protocol_error_free; | ||
1059 | csum = response.encrypted.checksum; | ||
1060 | response.encrypted.checksum = 0; | ||
1061 | rxkad_calc_response_checksum(&response); | ||
1062 | if (response.encrypted.checksum != csum) | ||
1063 | goto protocol_error_free; | ||
1064 | |||
1065 | if (ntohl(response.encrypted.call_id[0]) > INT_MAX || | ||
1066 | ntohl(response.encrypted.call_id[1]) > INT_MAX || | ||
1067 | ntohl(response.encrypted.call_id[2]) > INT_MAX || | ||
1068 | ntohl(response.encrypted.call_id[3]) > INT_MAX) | ||
1069 | goto protocol_error_free; | ||
1070 | |||
1071 | abort_code = RXKADOUTOFSEQUENCE; | ||
1072 | if (response.encrypted.inc_nonce != htonl(conn->security_nonce + 1)) | ||
1073 | goto protocol_error_free; | ||
1074 | |||
1075 | abort_code = RXKADLEVELFAIL; | ||
1076 | level = ntohl(response.encrypted.level); | ||
1077 | if (level > RXRPC_SECURITY_ENCRYPT) | ||
1078 | goto protocol_error_free; | ||
1079 | conn->security_level = level; | ||
1080 | |||
1081 | /* create a key to hold the security data and expiration time - after | ||
1082 | * this the connection security can be handled in exactly the same way | ||
1083 | * as for a client connection */ | ||
1084 | ret = rxrpc_get_server_data_key(conn, &session_key, expiry, kvno); | ||
1085 | if (ret < 0) { | ||
1086 | kfree(ticket); | ||
1087 | return ret; | ||
1088 | } | ||
1089 | |||
1090 | kfree(ticket); | ||
1091 | _leave(" = 0"); | ||
1092 | return 0; | ||
1093 | |||
1094 | protocol_error_free: | ||
1095 | kfree(ticket); | ||
1096 | protocol_error: | ||
1097 | *_abort_code = abort_code; | ||
1098 | _leave(" = -EPROTO [%d]", abort_code); | ||
1099 | return -EPROTO; | ||
1100 | } | ||
1101 | |||
1102 | /* | ||
1103 | * clear the connection security | ||
1104 | */ | ||
1105 | static void rxkad_clear(struct rxrpc_connection *conn) | ||
1106 | { | ||
1107 | _enter(""); | ||
1108 | |||
1109 | if (conn->cipher) | ||
1110 | crypto_free_blkcipher(conn->cipher); | ||
1111 | } | ||
1112 | |||
1113 | /* | ||
1114 | * RxRPC Kerberos-based security | ||
1115 | */ | ||
1116 | static struct rxrpc_security rxkad = { | ||
1117 | .owner = THIS_MODULE, | ||
1118 | .name = "rxkad", | ||
1119 | .security_index = RXKAD_VERSION, | ||
1120 | .init_connection_security = rxkad_init_connection_security, | ||
1121 | .prime_packet_security = rxkad_prime_packet_security, | ||
1122 | .secure_packet = rxkad_secure_packet, | ||
1123 | .verify_packet = rxkad_verify_packet, | ||
1124 | .issue_challenge = rxkad_issue_challenge, | ||
1125 | .respond_to_challenge = rxkad_respond_to_challenge, | ||
1126 | .verify_response = rxkad_verify_response, | ||
1127 | .clear = rxkad_clear, | ||
1128 | }; | ||
1129 | |||
1130 | static __init int rxkad_init(void) | ||
1131 | { | ||
1132 | _enter(""); | ||
1133 | |||
1134 | /* pin the cipher we need so that the crypto layer doesn't invoke | ||
1135 | * keventd to go get it */ | ||
1136 | rxkad_ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); | ||
1137 | if (IS_ERR(rxkad_ci)) | ||
1138 | return PTR_ERR(rxkad_ci); | ||
1139 | |||
1140 | return rxrpc_register_security(&rxkad); | ||
1141 | } | ||
1142 | |||
1143 | module_init(rxkad_init); | ||
1144 | |||
1145 | static __exit void rxkad_exit(void) | ||
1146 | { | ||
1147 | _enter(""); | ||
1148 | |||
1149 | rxrpc_unregister_security(&rxkad); | ||
1150 | crypto_free_blkcipher(rxkad_ci); | ||
1151 | } | ||
1152 | |||
1153 | module_exit(rxkad_exit); | ||
diff --git a/net/rxrpc/rxrpc_syms.c b/net/rxrpc/rxrpc_syms.c deleted file mode 100644 index 9896fd87a4d4..000000000000 --- a/net/rxrpc/rxrpc_syms.c +++ /dev/null | |||
@@ -1,34 +0,0 @@ | |||
1 | /* rxrpc_syms.c: exported Rx RPC layer interface symbols | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | |||
14 | #include <rxrpc/transport.h> | ||
15 | #include <rxrpc/connection.h> | ||
16 | #include <rxrpc/call.h> | ||
17 | #include <rxrpc/krxiod.h> | ||
18 | |||
19 | /* call.c */ | ||
20 | EXPORT_SYMBOL(rxrpc_create_call); | ||
21 | EXPORT_SYMBOL(rxrpc_put_call); | ||
22 | EXPORT_SYMBOL(rxrpc_call_abort); | ||
23 | EXPORT_SYMBOL(rxrpc_call_read_data); | ||
24 | EXPORT_SYMBOL(rxrpc_call_write_data); | ||
25 | |||
26 | /* connection.c */ | ||
27 | EXPORT_SYMBOL(rxrpc_create_connection); | ||
28 | EXPORT_SYMBOL(rxrpc_put_connection); | ||
29 | |||
30 | /* transport.c */ | ||
31 | EXPORT_SYMBOL(rxrpc_create_transport); | ||
32 | EXPORT_SYMBOL(rxrpc_put_transport); | ||
33 | EXPORT_SYMBOL(rxrpc_add_service); | ||
34 | EXPORT_SYMBOL(rxrpc_del_service); | ||
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c deleted file mode 100644 index 884290754af7..000000000000 --- a/net/rxrpc/sysctl.c +++ /dev/null | |||
@@ -1,121 +0,0 @@ | |||
1 | /* sysctl.c: Rx RPC control | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/sysctl.h> | ||
16 | #include <rxrpc/types.h> | ||
17 | #include <rxrpc/rxrpc.h> | ||
18 | #include <asm/errno.h> | ||
19 | #include "internal.h" | ||
20 | |||
21 | int rxrpc_ktrace; | ||
22 | int rxrpc_kdebug; | ||
23 | int rxrpc_kproto; | ||
24 | int rxrpc_knet; | ||
25 | |||
26 | #ifdef CONFIG_SYSCTL | ||
27 | static struct ctl_table_header *rxrpc_sysctl = NULL; | ||
28 | |||
29 | static ctl_table rxrpc_sysctl_table[] = { | ||
30 | { | ||
31 | .ctl_name = 1, | ||
32 | .procname = "kdebug", | ||
33 | .data = &rxrpc_kdebug, | ||
34 | .maxlen = sizeof(int), | ||
35 | .mode = 0644, | ||
36 | .proc_handler = &proc_dointvec | ||
37 | }, | ||
38 | { | ||
39 | .ctl_name = 2, | ||
40 | .procname = "ktrace", | ||
41 | .data = &rxrpc_ktrace, | ||
42 | .maxlen = sizeof(int), | ||
43 | .mode = 0644, | ||
44 | .proc_handler = &proc_dointvec | ||
45 | }, | ||
46 | { | ||
47 | .ctl_name = 3, | ||
48 | .procname = "kproto", | ||
49 | .data = &rxrpc_kproto, | ||
50 | .maxlen = sizeof(int), | ||
51 | .mode = 0644, | ||
52 | .proc_handler = &proc_dointvec | ||
53 | }, | ||
54 | { | ||
55 | .ctl_name = 4, | ||
56 | .procname = "knet", | ||
57 | .data = &rxrpc_knet, | ||
58 | .maxlen = sizeof(int), | ||
59 | .mode = 0644, | ||
60 | .proc_handler = &proc_dointvec | ||
61 | }, | ||
62 | { | ||
63 | .ctl_name = 5, | ||
64 | .procname = "peertimo", | ||
65 | .data = &rxrpc_peer_timeout, | ||
66 | .maxlen = sizeof(unsigned long), | ||
67 | .mode = 0644, | ||
68 | .proc_handler = &proc_doulongvec_minmax | ||
69 | }, | ||
70 | { | ||
71 | .ctl_name = 6, | ||
72 | .procname = "conntimo", | ||
73 | .data = &rxrpc_conn_timeout, | ||
74 | .maxlen = sizeof(unsigned long), | ||
75 | .mode = 0644, | ||
76 | .proc_handler = &proc_doulongvec_minmax | ||
77 | }, | ||
78 | { .ctl_name = 0 } | ||
79 | }; | ||
80 | |||
81 | static ctl_table rxrpc_dir_sysctl_table[] = { | ||
82 | { | ||
83 | .ctl_name = 1, | ||
84 | .procname = "rxrpc", | ||
85 | .maxlen = 0, | ||
86 | .mode = 0555, | ||
87 | .child = rxrpc_sysctl_table | ||
88 | }, | ||
89 | { .ctl_name = 0 } | ||
90 | }; | ||
91 | #endif /* CONFIG_SYSCTL */ | ||
92 | |||
93 | /*****************************************************************************/ | ||
94 | /* | ||
95 | * initialise the sysctl stuff for Rx RPC | ||
96 | */ | ||
97 | int rxrpc_sysctl_init(void) | ||
98 | { | ||
99 | #ifdef CONFIG_SYSCTL | ||
100 | rxrpc_sysctl = register_sysctl_table(rxrpc_dir_sysctl_table); | ||
101 | if (!rxrpc_sysctl) | ||
102 | return -ENOMEM; | ||
103 | #endif /* CONFIG_SYSCTL */ | ||
104 | |||
105 | return 0; | ||
106 | } /* end rxrpc_sysctl_init() */ | ||
107 | |||
108 | /*****************************************************************************/ | ||
109 | /* | ||
110 | * clean up the sysctl stuff for Rx RPC | ||
111 | */ | ||
112 | void rxrpc_sysctl_cleanup(void) | ||
113 | { | ||
114 | #ifdef CONFIG_SYSCTL | ||
115 | if (rxrpc_sysctl) { | ||
116 | unregister_sysctl_table(rxrpc_sysctl); | ||
117 | rxrpc_sysctl = NULL; | ||
118 | } | ||
119 | #endif /* CONFIG_SYSCTL */ | ||
120 | |||
121 | } /* end rxrpc_sysctl_cleanup() */ | ||
diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c deleted file mode 100644 index 8e57be2df936..000000000000 --- a/net/rxrpc/transport.c +++ /dev/null | |||
@@ -1,846 +0,0 @@ | |||
1 | /* transport.c: Rx Transport routines | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/slab.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <rxrpc/transport.h> | ||
15 | #include <rxrpc/peer.h> | ||
16 | #include <rxrpc/connection.h> | ||
17 | #include <rxrpc/call.h> | ||
18 | #include <rxrpc/message.h> | ||
19 | #include <rxrpc/krxiod.h> | ||
20 | #include <rxrpc/krxsecd.h> | ||
21 | #include <linux/udp.h> | ||
22 | #include <linux/in.h> | ||
23 | #include <linux/in6.h> | ||
24 | #include <linux/icmp.h> | ||
25 | #include <linux/skbuff.h> | ||
26 | #include <net/sock.h> | ||
27 | #include <net/ip.h> | ||
28 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | ||
29 | #include <linux/ipv6.h> /* this should _really_ be in errqueue.h.. */ | ||
30 | #endif | ||
31 | #include <linux/errqueue.h> | ||
32 | #include <asm/uaccess.h> | ||
33 | #include "internal.h" | ||
34 | |||
35 | struct errormsg { | ||
36 | struct cmsghdr cmsg; /* control message header */ | ||
37 | struct sock_extended_err ee; /* extended error information */ | ||
38 | struct sockaddr_in icmp_src; /* ICMP packet source address */ | ||
39 | }; | ||
40 | |||
41 | static DEFINE_SPINLOCK(rxrpc_transports_lock); | ||
42 | static struct list_head rxrpc_transports = LIST_HEAD_INIT(rxrpc_transports); | ||
43 | |||
44 | __RXACCT_DECL(atomic_t rxrpc_transport_count); | ||
45 | LIST_HEAD(rxrpc_proc_transports); | ||
46 | DECLARE_RWSEM(rxrpc_proc_transports_sem); | ||
47 | |||
48 | static void rxrpc_data_ready(struct sock *sk, int count); | ||
49 | static void rxrpc_error_report(struct sock *sk); | ||
50 | static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans, | ||
51 | struct list_head *msgq); | ||
52 | static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans); | ||
53 | |||
54 | /*****************************************************************************/ | ||
55 | /* | ||
56 | * create a new transport endpoint using the specified UDP port | ||
57 | */ | ||
58 | int rxrpc_create_transport(unsigned short port, | ||
59 | struct rxrpc_transport **_trans) | ||
60 | { | ||
61 | struct rxrpc_transport *trans; | ||
62 | struct sockaddr_in sin; | ||
63 | mm_segment_t oldfs; | ||
64 | struct sock *sock; | ||
65 | int ret, opt; | ||
66 | |||
67 | _enter("%hu", port); | ||
68 | |||
69 | trans = kzalloc(sizeof(struct rxrpc_transport), GFP_KERNEL); | ||
70 | if (!trans) | ||
71 | return -ENOMEM; | ||
72 | |||
73 | atomic_set(&trans->usage, 1); | ||
74 | INIT_LIST_HEAD(&trans->services); | ||
75 | INIT_LIST_HEAD(&trans->link); | ||
76 | INIT_LIST_HEAD(&trans->krxiodq_link); | ||
77 | spin_lock_init(&trans->lock); | ||
78 | INIT_LIST_HEAD(&trans->peer_active); | ||
79 | INIT_LIST_HEAD(&trans->peer_graveyard); | ||
80 | spin_lock_init(&trans->peer_gylock); | ||
81 | init_waitqueue_head(&trans->peer_gy_waitq); | ||
82 | rwlock_init(&trans->peer_lock); | ||
83 | atomic_set(&trans->peer_count, 0); | ||
84 | trans->port = port; | ||
85 | |||
86 | /* create a UDP socket to be my actual transport endpoint */ | ||
87 | ret = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &trans->socket); | ||
88 | if (ret < 0) | ||
89 | goto error; | ||
90 | |||
91 | /* use the specified port */ | ||
92 | if (port) { | ||
93 | memset(&sin, 0, sizeof(sin)); | ||
94 | sin.sin_family = AF_INET; | ||
95 | sin.sin_port = htons(port); | ||
96 | ret = trans->socket->ops->bind(trans->socket, | ||
97 | (struct sockaddr *) &sin, | ||
98 | sizeof(sin)); | ||
99 | if (ret < 0) | ||
100 | goto error; | ||
101 | } | ||
102 | |||
103 | opt = 1; | ||
104 | oldfs = get_fs(); | ||
105 | set_fs(KERNEL_DS); | ||
106 | ret = trans->socket->ops->setsockopt(trans->socket, SOL_IP, IP_RECVERR, | ||
107 | (char *) &opt, sizeof(opt)); | ||
108 | set_fs(oldfs); | ||
109 | |||
110 | spin_lock(&rxrpc_transports_lock); | ||
111 | list_add(&trans->link, &rxrpc_transports); | ||
112 | spin_unlock(&rxrpc_transports_lock); | ||
113 | |||
114 | /* set the socket up */ | ||
115 | sock = trans->socket->sk; | ||
116 | sock->sk_user_data = trans; | ||
117 | sock->sk_data_ready = rxrpc_data_ready; | ||
118 | sock->sk_error_report = rxrpc_error_report; | ||
119 | |||
120 | down_write(&rxrpc_proc_transports_sem); | ||
121 | list_add_tail(&trans->proc_link, &rxrpc_proc_transports); | ||
122 | up_write(&rxrpc_proc_transports_sem); | ||
123 | |||
124 | __RXACCT(atomic_inc(&rxrpc_transport_count)); | ||
125 | |||
126 | *_trans = trans; | ||
127 | _leave(" = 0 (%p)", trans); | ||
128 | return 0; | ||
129 | |||
130 | error: | ||
131 | /* finish cleaning up the transport (not really needed here, but...) */ | ||
132 | if (trans->socket) | ||
133 | trans->socket->ops->shutdown(trans->socket, 2); | ||
134 | |||
135 | /* close the socket */ | ||
136 | if (trans->socket) { | ||
137 | trans->socket->sk->sk_user_data = NULL; | ||
138 | sock_release(trans->socket); | ||
139 | trans->socket = NULL; | ||
140 | } | ||
141 | |||
142 | kfree(trans); | ||
143 | |||
144 | |||
145 | _leave(" = %d", ret); | ||
146 | return ret; | ||
147 | } /* end rxrpc_create_transport() */ | ||
148 | |||
149 | /*****************************************************************************/ | ||
150 | /* | ||
151 | * destroy a transport endpoint | ||
152 | */ | ||
153 | void rxrpc_put_transport(struct rxrpc_transport *trans) | ||
154 | { | ||
155 | _enter("%p{u=%d p=%hu}", | ||
156 | trans, atomic_read(&trans->usage), trans->port); | ||
157 | |||
158 | BUG_ON(atomic_read(&trans->usage) <= 0); | ||
159 | |||
160 | /* to prevent a race, the decrement and the dequeue must be | ||
161 | * effectively atomic */ | ||
162 | spin_lock(&rxrpc_transports_lock); | ||
163 | if (likely(!atomic_dec_and_test(&trans->usage))) { | ||
164 | spin_unlock(&rxrpc_transports_lock); | ||
165 | _leave(""); | ||
166 | return; | ||
167 | } | ||
168 | |||
169 | list_del(&trans->link); | ||
170 | spin_unlock(&rxrpc_transports_lock); | ||
171 | |||
172 | /* finish cleaning up the transport */ | ||
173 | if (trans->socket) | ||
174 | trans->socket->ops->shutdown(trans->socket, 2); | ||
175 | |||
176 | rxrpc_krxsecd_clear_transport(trans); | ||
177 | rxrpc_krxiod_dequeue_transport(trans); | ||
178 | |||
179 | /* discard all peer information */ | ||
180 | rxrpc_peer_clearall(trans); | ||
181 | |||
182 | down_write(&rxrpc_proc_transports_sem); | ||
183 | list_del(&trans->proc_link); | ||
184 | up_write(&rxrpc_proc_transports_sem); | ||
185 | __RXACCT(atomic_dec(&rxrpc_transport_count)); | ||
186 | |||
187 | /* close the socket */ | ||
188 | if (trans->socket) { | ||
189 | trans->socket->sk->sk_user_data = NULL; | ||
190 | sock_release(trans->socket); | ||
191 | trans->socket = NULL; | ||
192 | } | ||
193 | |||
194 | kfree(trans); | ||
195 | |||
196 | _leave(""); | ||
197 | } /* end rxrpc_put_transport() */ | ||
198 | |||
199 | /*****************************************************************************/ | ||
200 | /* | ||
201 | * add a service to a transport to be listened upon | ||
202 | */ | ||
203 | int rxrpc_add_service(struct rxrpc_transport *trans, | ||
204 | struct rxrpc_service *newsrv) | ||
205 | { | ||
206 | struct rxrpc_service *srv; | ||
207 | struct list_head *_p; | ||
208 | int ret = -EEXIST; | ||
209 | |||
210 | _enter("%p{%hu},%p{%hu}", | ||
211 | trans, trans->port, newsrv, newsrv->service_id); | ||
212 | |||
213 | /* verify that the service ID is not already present */ | ||
214 | spin_lock(&trans->lock); | ||
215 | |||
216 | list_for_each(_p, &trans->services) { | ||
217 | srv = list_entry(_p, struct rxrpc_service, link); | ||
218 | if (srv->service_id == newsrv->service_id) | ||
219 | goto out; | ||
220 | } | ||
221 | |||
222 | /* okay - add the transport to the list */ | ||
223 | list_add_tail(&newsrv->link, &trans->services); | ||
224 | rxrpc_get_transport(trans); | ||
225 | ret = 0; | ||
226 | |||
227 | out: | ||
228 | spin_unlock(&trans->lock); | ||
229 | |||
230 | _leave("= %d", ret); | ||
231 | return ret; | ||
232 | } /* end rxrpc_add_service() */ | ||
233 | |||
234 | /*****************************************************************************/ | ||
235 | /* | ||
236 | * remove a service from a transport | ||
237 | */ | ||
238 | void rxrpc_del_service(struct rxrpc_transport *trans, struct rxrpc_service *srv) | ||
239 | { | ||
240 | _enter("%p{%hu},%p{%hu}", trans, trans->port, srv, srv->service_id); | ||
241 | |||
242 | spin_lock(&trans->lock); | ||
243 | list_del(&srv->link); | ||
244 | spin_unlock(&trans->lock); | ||
245 | |||
246 | rxrpc_put_transport(trans); | ||
247 | |||
248 | _leave(""); | ||
249 | } /* end rxrpc_del_service() */ | ||
250 | |||
251 | /*****************************************************************************/ | ||
252 | /* | ||
253 | * INET callback when data has been received on the socket. | ||
254 | */ | ||
255 | static void rxrpc_data_ready(struct sock *sk, int count) | ||
256 | { | ||
257 | struct rxrpc_transport *trans; | ||
258 | |||
259 | _enter("%p{t=%p},%d", sk, sk->sk_user_data, count); | ||
260 | |||
261 | /* queue the transport for attention by krxiod */ | ||
262 | trans = (struct rxrpc_transport *) sk->sk_user_data; | ||
263 | if (trans) | ||
264 | rxrpc_krxiod_queue_transport(trans); | ||
265 | |||
266 | /* wake up anyone waiting on the socket */ | ||
267 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | ||
268 | wake_up_interruptible(sk->sk_sleep); | ||
269 | |||
270 | _leave(""); | ||
271 | } /* end rxrpc_data_ready() */ | ||
272 | |||
273 | /*****************************************************************************/ | ||
274 | /* | ||
275 | * INET callback when an ICMP error packet is received | ||
276 | * - sk->err is error (EHOSTUNREACH, EPROTO or EMSGSIZE) | ||
277 | */ | ||
278 | static void rxrpc_error_report(struct sock *sk) | ||
279 | { | ||
280 | struct rxrpc_transport *trans; | ||
281 | |||
282 | _enter("%p{t=%p}", sk, sk->sk_user_data); | ||
283 | |||
284 | /* queue the transport for attention by krxiod */ | ||
285 | trans = (struct rxrpc_transport *) sk->sk_user_data; | ||
286 | if (trans) { | ||
287 | trans->error_rcvd = 1; | ||
288 | rxrpc_krxiod_queue_transport(trans); | ||
289 | } | ||
290 | |||
291 | /* wake up anyone waiting on the socket */ | ||
292 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | ||
293 | wake_up_interruptible(sk->sk_sleep); | ||
294 | |||
295 | _leave(""); | ||
296 | } /* end rxrpc_error_report() */ | ||
297 | |||
298 | /*****************************************************************************/ | ||
299 | /* | ||
300 | * split a message up, allocating message records and filling them in | ||
301 | * from the contents of a socket buffer | ||
302 | */ | ||
303 | static int rxrpc_incoming_msg(struct rxrpc_transport *trans, | ||
304 | struct sk_buff *pkt, | ||
305 | struct list_head *msgq) | ||
306 | { | ||
307 | struct rxrpc_message *msg; | ||
308 | int ret; | ||
309 | |||
310 | _enter(""); | ||
311 | |||
312 | msg = kzalloc(sizeof(struct rxrpc_message), GFP_KERNEL); | ||
313 | if (!msg) { | ||
314 | _leave(" = -ENOMEM"); | ||
315 | return -ENOMEM; | ||
316 | } | ||
317 | |||
318 | atomic_set(&msg->usage, 1); | ||
319 | list_add_tail(&msg->link,msgq); | ||
320 | |||
321 | /* dig out the Rx routing parameters */ | ||
322 | if (skb_copy_bits(pkt, sizeof(struct udphdr), | ||
323 | &msg->hdr, sizeof(msg->hdr)) < 0) { | ||
324 | ret = -EBADMSG; | ||
325 | goto error; | ||
326 | } | ||
327 | |||
328 | msg->trans = trans; | ||
329 | msg->state = RXRPC_MSG_RECEIVED; | ||
330 | skb_get_timestamp(pkt, &msg->stamp); | ||
331 | if (msg->stamp.tv_sec == 0) { | ||
332 | do_gettimeofday(&msg->stamp); | ||
333 | if (pkt->sk) | ||
334 | sock_enable_timestamp(pkt->sk); | ||
335 | } | ||
336 | msg->seq = ntohl(msg->hdr.seq); | ||
337 | |||
338 | /* attach the packet */ | ||
339 | skb_get(pkt); | ||
340 | msg->pkt = pkt; | ||
341 | |||
342 | msg->offset = sizeof(struct udphdr) + sizeof(struct rxrpc_header); | ||
343 | msg->dsize = msg->pkt->len - msg->offset; | ||
344 | |||
345 | _net("Rx Received packet from %s (%08x;%08x,%1x,%d,%s,%02x,%d,%d)", | ||
346 | msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server", | ||
347 | ntohl(msg->hdr.epoch), | ||
348 | (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT, | ||
349 | ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK, | ||
350 | ntohl(msg->hdr.callNumber), | ||
351 | rxrpc_pkts[msg->hdr.type], | ||
352 | msg->hdr.flags, | ||
353 | ntohs(msg->hdr.serviceId), | ||
354 | msg->hdr.securityIndex); | ||
355 | |||
356 | __RXACCT(atomic_inc(&rxrpc_message_count)); | ||
357 | |||
358 | /* split off jumbo packets */ | ||
359 | while (msg->hdr.type == RXRPC_PACKET_TYPE_DATA && | ||
360 | msg->hdr.flags & RXRPC_JUMBO_PACKET | ||
361 | ) { | ||
362 | struct rxrpc_jumbo_header jumbo; | ||
363 | struct rxrpc_message *jumbomsg = msg; | ||
364 | |||
365 | _debug("split jumbo packet"); | ||
366 | |||
367 | /* quick sanity check */ | ||
368 | ret = -EBADMSG; | ||
369 | if (msg->dsize < | ||
370 | RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header)) | ||
371 | goto error; | ||
372 | if (msg->hdr.flags & RXRPC_LAST_PACKET) | ||
373 | goto error; | ||
374 | |||
375 | /* dig out the secondary header */ | ||
376 | if (skb_copy_bits(pkt, msg->offset + RXRPC_JUMBO_DATALEN, | ||
377 | &jumbo, sizeof(jumbo)) < 0) | ||
378 | goto error; | ||
379 | |||
380 | /* allocate a new message record */ | ||
381 | ret = -ENOMEM; | ||
382 | msg = kmemdup(jumbomsg, sizeof(struct rxrpc_message), GFP_KERNEL); | ||
383 | if (!msg) | ||
384 | goto error; | ||
385 | |||
386 | list_add_tail(&msg->link, msgq); | ||
387 | |||
388 | /* adjust the jumbo packet */ | ||
389 | jumbomsg->dsize = RXRPC_JUMBO_DATALEN; | ||
390 | |||
391 | /* attach the packet here too */ | ||
392 | skb_get(pkt); | ||
393 | |||
394 | /* adjust the parameters */ | ||
395 | msg->seq++; | ||
396 | msg->hdr.seq = htonl(msg->seq); | ||
397 | msg->hdr.serial = htonl(ntohl(msg->hdr.serial) + 1); | ||
398 | msg->offset += RXRPC_JUMBO_DATALEN + | ||
399 | sizeof(struct rxrpc_jumbo_header); | ||
400 | msg->dsize -= RXRPC_JUMBO_DATALEN + | ||
401 | sizeof(struct rxrpc_jumbo_header); | ||
402 | msg->hdr.flags = jumbo.flags; | ||
403 | msg->hdr._rsvd = jumbo._rsvd; | ||
404 | |||
405 | _net("Rx Split jumbo packet from %s" | ||
406 | " (%08x;%08x,%1x,%d,%s,%02x,%d,%d)", | ||
407 | msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server", | ||
408 | ntohl(msg->hdr.epoch), | ||
409 | (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT, | ||
410 | ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK, | ||
411 | ntohl(msg->hdr.callNumber), | ||
412 | rxrpc_pkts[msg->hdr.type], | ||
413 | msg->hdr.flags, | ||
414 | ntohs(msg->hdr.serviceId), | ||
415 | msg->hdr.securityIndex); | ||
416 | |||
417 | __RXACCT(atomic_inc(&rxrpc_message_count)); | ||
418 | } | ||
419 | |||
420 | _leave(" = 0 #%d", atomic_read(&rxrpc_message_count)); | ||
421 | return 0; | ||
422 | |||
423 | error: | ||
424 | while (!list_empty(msgq)) { | ||
425 | msg = list_entry(msgq->next, struct rxrpc_message, link); | ||
426 | list_del_init(&msg->link); | ||
427 | |||
428 | rxrpc_put_message(msg); | ||
429 | } | ||
430 | |||
431 | _leave(" = %d", ret); | ||
432 | return ret; | ||
433 | } /* end rxrpc_incoming_msg() */ | ||
434 | |||
435 | /*****************************************************************************/ | ||
436 | /* | ||
437 | * accept a new call | ||
438 | * - called from krxiod in process context | ||
439 | */ | ||
440 | void rxrpc_trans_receive_packet(struct rxrpc_transport *trans) | ||
441 | { | ||
442 | struct rxrpc_message *msg; | ||
443 | struct rxrpc_peer *peer; | ||
444 | struct sk_buff *pkt; | ||
445 | int ret; | ||
446 | __be32 addr; | ||
447 | __be16 port; | ||
448 | |||
449 | LIST_HEAD(msgq); | ||
450 | |||
451 | _enter("%p{%d}", trans, trans->port); | ||
452 | |||
453 | for (;;) { | ||
454 | /* deal with outstanting errors first */ | ||
455 | if (trans->error_rcvd) | ||
456 | rxrpc_trans_receive_error_report(trans); | ||
457 | |||
458 | /* attempt to receive a packet */ | ||
459 | pkt = skb_recv_datagram(trans->socket->sk, 0, 1, &ret); | ||
460 | if (!pkt) { | ||
461 | if (ret == -EAGAIN) { | ||
462 | _leave(" EAGAIN"); | ||
463 | return; | ||
464 | } | ||
465 | |||
466 | /* an icmp error may have occurred */ | ||
467 | rxrpc_krxiod_queue_transport(trans); | ||
468 | _leave(" error %d\n", ret); | ||
469 | return; | ||
470 | } | ||
471 | |||
472 | /* we'll probably need to checksum it (didn't call | ||
473 | * sock_recvmsg) */ | ||
474 | if (skb_checksum_complete(pkt)) { | ||
475 | kfree_skb(pkt); | ||
476 | rxrpc_krxiod_queue_transport(trans); | ||
477 | _leave(" CSUM failed"); | ||
478 | return; | ||
479 | } | ||
480 | |||
481 | addr = pkt->nh.iph->saddr; | ||
482 | port = pkt->h.uh->source; | ||
483 | |||
484 | _net("Rx Received UDP packet from %08x:%04hu", | ||
485 | ntohl(addr), ntohs(port)); | ||
486 | |||
487 | /* unmarshall the Rx parameters and split jumbo packets */ | ||
488 | ret = rxrpc_incoming_msg(trans, pkt, &msgq); | ||
489 | if (ret < 0) { | ||
490 | kfree_skb(pkt); | ||
491 | rxrpc_krxiod_queue_transport(trans); | ||
492 | _leave(" bad packet"); | ||
493 | return; | ||
494 | } | ||
495 | |||
496 | BUG_ON(list_empty(&msgq)); | ||
497 | |||
498 | msg = list_entry(msgq.next, struct rxrpc_message, link); | ||
499 | |||
500 | /* locate the record for the peer from which it | ||
501 | * originated */ | ||
502 | ret = rxrpc_peer_lookup(trans, addr, &peer); | ||
503 | if (ret < 0) { | ||
504 | kdebug("Rx No connections from that peer"); | ||
505 | rxrpc_trans_immediate_abort(trans, msg, -EINVAL); | ||
506 | goto finished_msg; | ||
507 | } | ||
508 | |||
509 | /* try and find a matching connection */ | ||
510 | ret = rxrpc_connection_lookup(peer, msg, &msg->conn); | ||
511 | if (ret < 0) { | ||
512 | kdebug("Rx Unknown Connection"); | ||
513 | rxrpc_trans_immediate_abort(trans, msg, -EINVAL); | ||
514 | rxrpc_put_peer(peer); | ||
515 | goto finished_msg; | ||
516 | } | ||
517 | rxrpc_put_peer(peer); | ||
518 | |||
519 | /* deal with the first packet of a new call */ | ||
520 | if (msg->hdr.flags & RXRPC_CLIENT_INITIATED && | ||
521 | msg->hdr.type == RXRPC_PACKET_TYPE_DATA && | ||
522 | ntohl(msg->hdr.seq) == 1 | ||
523 | ) { | ||
524 | _debug("Rx New server call"); | ||
525 | rxrpc_trans_receive_new_call(trans, &msgq); | ||
526 | goto finished_msg; | ||
527 | } | ||
528 | |||
529 | /* deal with subsequent packet(s) of call */ | ||
530 | _debug("Rx Call packet"); | ||
531 | while (!list_empty(&msgq)) { | ||
532 | msg = list_entry(msgq.next, struct rxrpc_message, link); | ||
533 | list_del_init(&msg->link); | ||
534 | |||
535 | ret = rxrpc_conn_receive_call_packet(msg->conn, NULL, msg); | ||
536 | if (ret < 0) { | ||
537 | rxrpc_trans_immediate_abort(trans, msg, ret); | ||
538 | rxrpc_put_message(msg); | ||
539 | goto finished_msg; | ||
540 | } | ||
541 | |||
542 | rxrpc_put_message(msg); | ||
543 | } | ||
544 | |||
545 | goto finished_msg; | ||
546 | |||
547 | /* dispose of the packets */ | ||
548 | finished_msg: | ||
549 | while (!list_empty(&msgq)) { | ||
550 | msg = list_entry(msgq.next, struct rxrpc_message, link); | ||
551 | list_del_init(&msg->link); | ||
552 | |||
553 | rxrpc_put_message(msg); | ||
554 | } | ||
555 | kfree_skb(pkt); | ||
556 | } | ||
557 | |||
558 | _leave(""); | ||
559 | |||
560 | } /* end rxrpc_trans_receive_packet() */ | ||
561 | |||
562 | /*****************************************************************************/ | ||
563 | /* | ||
564 | * accept a new call from a client trying to connect to one of my services | ||
565 | * - called in process context | ||
566 | */ | ||
567 | static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans, | ||
568 | struct list_head *msgq) | ||
569 | { | ||
570 | struct rxrpc_message *msg; | ||
571 | |||
572 | _enter(""); | ||
573 | |||
574 | /* only bother with the first packet */ | ||
575 | msg = list_entry(msgq->next, struct rxrpc_message, link); | ||
576 | list_del_init(&msg->link); | ||
577 | rxrpc_krxsecd_queue_incoming_call(msg); | ||
578 | rxrpc_put_message(msg); | ||
579 | |||
580 | _leave(" = 0"); | ||
581 | |||
582 | return 0; | ||
583 | } /* end rxrpc_trans_receive_new_call() */ | ||
584 | |||
585 | /*****************************************************************************/ | ||
586 | /* | ||
587 | * perform an immediate abort without connection or call structures | ||
588 | */ | ||
589 | int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans, | ||
590 | struct rxrpc_message *msg, | ||
591 | int error) | ||
592 | { | ||
593 | struct rxrpc_header ahdr; | ||
594 | struct sockaddr_in sin; | ||
595 | struct msghdr msghdr; | ||
596 | struct kvec iov[2]; | ||
597 | __be32 _error; | ||
598 | int len, ret; | ||
599 | |||
600 | _enter("%p,%p,%d", trans, msg, error); | ||
601 | |||
602 | /* don't abort an abort packet */ | ||
603 | if (msg->hdr.type == RXRPC_PACKET_TYPE_ABORT) { | ||
604 | _leave(" = 0"); | ||
605 | return 0; | ||
606 | } | ||
607 | |||
608 | _error = htonl(-error); | ||
609 | |||
610 | /* set up the message to be transmitted */ | ||
611 | memcpy(&ahdr, &msg->hdr, sizeof(ahdr)); | ||
612 | ahdr.epoch = msg->hdr.epoch; | ||
613 | ahdr.serial = htonl(1); | ||
614 | ahdr.seq = 0; | ||
615 | ahdr.type = RXRPC_PACKET_TYPE_ABORT; | ||
616 | ahdr.flags = RXRPC_LAST_PACKET; | ||
617 | ahdr.flags |= ~msg->hdr.flags & RXRPC_CLIENT_INITIATED; | ||
618 | |||
619 | iov[0].iov_len = sizeof(ahdr); | ||
620 | iov[0].iov_base = &ahdr; | ||
621 | iov[1].iov_len = sizeof(_error); | ||
622 | iov[1].iov_base = &_error; | ||
623 | |||
624 | len = sizeof(ahdr) + sizeof(_error); | ||
625 | |||
626 | memset(&sin,0,sizeof(sin)); | ||
627 | sin.sin_family = AF_INET; | ||
628 | sin.sin_port = msg->pkt->h.uh->source; | ||
629 | sin.sin_addr.s_addr = msg->pkt->nh.iph->saddr; | ||
630 | |||
631 | msghdr.msg_name = &sin; | ||
632 | msghdr.msg_namelen = sizeof(sin); | ||
633 | msghdr.msg_control = NULL; | ||
634 | msghdr.msg_controllen = 0; | ||
635 | msghdr.msg_flags = MSG_DONTWAIT; | ||
636 | |||
637 | _net("Sending message type %d of %d bytes to %08x:%d", | ||
638 | ahdr.type, | ||
639 | len, | ||
640 | ntohl(sin.sin_addr.s_addr), | ||
641 | ntohs(sin.sin_port)); | ||
642 | |||
643 | /* send the message */ | ||
644 | ret = kernel_sendmsg(trans->socket, &msghdr, iov, 2, len); | ||
645 | |||
646 | _leave(" = %d", ret); | ||
647 | return ret; | ||
648 | } /* end rxrpc_trans_immediate_abort() */ | ||
649 | |||
650 | /*****************************************************************************/ | ||
651 | /* | ||
652 | * receive an ICMP error report and percolate it to all connections | ||
653 | * heading to the affected host or port | ||
654 | */ | ||
655 | static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans) | ||
656 | { | ||
657 | struct rxrpc_connection *conn; | ||
658 | struct sockaddr_in sin; | ||
659 | struct rxrpc_peer *peer; | ||
660 | struct list_head connq, *_p; | ||
661 | struct errormsg emsg; | ||
662 | struct msghdr msg; | ||
663 | __be16 port; | ||
664 | int local, err; | ||
665 | |||
666 | _enter("%p", trans); | ||
667 | |||
668 | for (;;) { | ||
669 | trans->error_rcvd = 0; | ||
670 | |||
671 | /* try and receive an error message */ | ||
672 | msg.msg_name = &sin; | ||
673 | msg.msg_namelen = sizeof(sin); | ||
674 | msg.msg_control = &emsg; | ||
675 | msg.msg_controllen = sizeof(emsg); | ||
676 | msg.msg_flags = 0; | ||
677 | |||
678 | err = kernel_recvmsg(trans->socket, &msg, NULL, 0, 0, | ||
679 | MSG_ERRQUEUE | MSG_DONTWAIT | MSG_TRUNC); | ||
680 | |||
681 | if (err == -EAGAIN) { | ||
682 | _leave(""); | ||
683 | return; | ||
684 | } | ||
685 | |||
686 | if (err < 0) { | ||
687 | printk("%s: unable to recv an error report: %d\n", | ||
688 | __FUNCTION__, err); | ||
689 | _leave(""); | ||
690 | return; | ||
691 | } | ||
692 | |||
693 | msg.msg_controllen = (char *) msg.msg_control - (char *) &emsg; | ||
694 | |||
695 | if (msg.msg_controllen < sizeof(emsg.cmsg) || | ||
696 | msg.msg_namelen < sizeof(sin)) { | ||
697 | printk("%s: short control message" | ||
698 | " (nlen=%u clen=%Zu fl=%x)\n", | ||
699 | __FUNCTION__, | ||
700 | msg.msg_namelen, | ||
701 | msg.msg_controllen, | ||
702 | msg.msg_flags); | ||
703 | continue; | ||
704 | } | ||
705 | |||
706 | _net("Rx Received control message" | ||
707 | " { len=%Zu level=%u type=%u }", | ||
708 | emsg.cmsg.cmsg_len, | ||
709 | emsg.cmsg.cmsg_level, | ||
710 | emsg.cmsg.cmsg_type); | ||
711 | |||
712 | if (sin.sin_family != AF_INET) { | ||
713 | printk("Rx Ignoring error report with non-INET address" | ||
714 | " (fam=%u)", | ||
715 | sin.sin_family); | ||
716 | continue; | ||
717 | } | ||
718 | |||
719 | _net("Rx Received message pertaining to host addr=%x port=%hu", | ||
720 | ntohl(sin.sin_addr.s_addr), ntohs(sin.sin_port)); | ||
721 | |||
722 | if (emsg.cmsg.cmsg_level != SOL_IP || | ||
723 | emsg.cmsg.cmsg_type != IP_RECVERR) { | ||
724 | printk("Rx Ignoring unknown error report" | ||
725 | " { level=%u type=%u }", | ||
726 | emsg.cmsg.cmsg_level, | ||
727 | emsg.cmsg.cmsg_type); | ||
728 | continue; | ||
729 | } | ||
730 | |||
731 | if (msg.msg_controllen < sizeof(emsg.cmsg) + sizeof(emsg.ee)) { | ||
732 | printk("%s: short error message (%Zu)\n", | ||
733 | __FUNCTION__, msg.msg_controllen); | ||
734 | _leave(""); | ||
735 | return; | ||
736 | } | ||
737 | |||
738 | port = sin.sin_port; | ||
739 | |||
740 | switch (emsg.ee.ee_origin) { | ||
741 | case SO_EE_ORIGIN_ICMP: | ||
742 | local = 0; | ||
743 | switch (emsg.ee.ee_type) { | ||
744 | case ICMP_DEST_UNREACH: | ||
745 | switch (emsg.ee.ee_code) { | ||
746 | case ICMP_NET_UNREACH: | ||
747 | _net("Rx Received ICMP Network Unreachable"); | ||
748 | port = 0; | ||
749 | err = -ENETUNREACH; | ||
750 | break; | ||
751 | case ICMP_HOST_UNREACH: | ||
752 | _net("Rx Received ICMP Host Unreachable"); | ||
753 | port = 0; | ||
754 | err = -EHOSTUNREACH; | ||
755 | break; | ||
756 | case ICMP_PORT_UNREACH: | ||
757 | _net("Rx Received ICMP Port Unreachable"); | ||
758 | err = -ECONNREFUSED; | ||
759 | break; | ||
760 | case ICMP_NET_UNKNOWN: | ||
761 | _net("Rx Received ICMP Unknown Network"); | ||
762 | port = 0; | ||
763 | err = -ENETUNREACH; | ||
764 | break; | ||
765 | case ICMP_HOST_UNKNOWN: | ||
766 | _net("Rx Received ICMP Unknown Host"); | ||
767 | port = 0; | ||
768 | err = -EHOSTUNREACH; | ||
769 | break; | ||
770 | default: | ||
771 | _net("Rx Received ICMP DestUnreach { code=%u }", | ||
772 | emsg.ee.ee_code); | ||
773 | err = emsg.ee.ee_errno; | ||
774 | break; | ||
775 | } | ||
776 | break; | ||
777 | |||
778 | case ICMP_TIME_EXCEEDED: | ||
779 | _net("Rx Received ICMP TTL Exceeded"); | ||
780 | err = emsg.ee.ee_errno; | ||
781 | break; | ||
782 | |||
783 | default: | ||
784 | _proto("Rx Received ICMP error { type=%u code=%u }", | ||
785 | emsg.ee.ee_type, emsg.ee.ee_code); | ||
786 | err = emsg.ee.ee_errno; | ||
787 | break; | ||
788 | } | ||
789 | break; | ||
790 | |||
791 | case SO_EE_ORIGIN_LOCAL: | ||
792 | _proto("Rx Received local error { error=%d }", | ||
793 | emsg.ee.ee_errno); | ||
794 | local = 1; | ||
795 | err = emsg.ee.ee_errno; | ||
796 | break; | ||
797 | |||
798 | case SO_EE_ORIGIN_NONE: | ||
799 | case SO_EE_ORIGIN_ICMP6: | ||
800 | default: | ||
801 | _proto("Rx Received error report { orig=%u }", | ||
802 | emsg.ee.ee_origin); | ||
803 | local = 0; | ||
804 | err = emsg.ee.ee_errno; | ||
805 | break; | ||
806 | } | ||
807 | |||
808 | /* find all the connections between this transport and the | ||
809 | * affected destination */ | ||
810 | INIT_LIST_HEAD(&connq); | ||
811 | |||
812 | if (rxrpc_peer_lookup(trans, sin.sin_addr.s_addr, | ||
813 | &peer) == 0) { | ||
814 | read_lock(&peer->conn_lock); | ||
815 | list_for_each(_p, &peer->conn_active) { | ||
816 | conn = list_entry(_p, struct rxrpc_connection, | ||
817 | link); | ||
818 | if (port && conn->addr.sin_port != port) | ||
819 | continue; | ||
820 | if (!list_empty(&conn->err_link)) | ||
821 | continue; | ||
822 | |||
823 | rxrpc_get_connection(conn); | ||
824 | list_add_tail(&conn->err_link, &connq); | ||
825 | } | ||
826 | read_unlock(&peer->conn_lock); | ||
827 | |||
828 | /* service all those connections */ | ||
829 | while (!list_empty(&connq)) { | ||
830 | conn = list_entry(connq.next, | ||
831 | struct rxrpc_connection, | ||
832 | err_link); | ||
833 | list_del(&conn->err_link); | ||
834 | |||
835 | rxrpc_conn_handle_error(conn, local, err); | ||
836 | |||
837 | rxrpc_put_connection(conn); | ||
838 | } | ||
839 | |||
840 | rxrpc_put_peer(peer); | ||
841 | } | ||
842 | } | ||
843 | |||
844 | _leave(""); | ||
845 | return; | ||
846 | } /* end rxrpc_trans_receive_error_report() */ | ||