diff options
author | David Howells <dhowells@redhat.com> | 2007-04-26 18:55:48 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2007-04-26 18:55:48 -0400 |
commit | 63b6be55e8b51cb718468794d343058e96c7462c (patch) | |
tree | 83c06275ba6b7ca1ffbf8c2e52497d7bceed567c /net | |
parent | 08e0e7c82eeadec6f4871a386b86bf0f0fbcb4eb (diff) |
[AF_RXRPC]: Delete the old RxRPC code.
Delete the old RxRPC code as it's now no longer used.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/rxrpc/Makefile | 27 | ||||
-rw-r--r-- | net/rxrpc/call.c | 2277 | ||||
-rw-r--r-- | net/rxrpc/connection.c | 777 | ||||
-rw-r--r-- | net/rxrpc/internal.h | 106 | ||||
-rw-r--r-- | net/rxrpc/krxiod.c | 262 | ||||
-rw-r--r-- | net/rxrpc/krxsecd.c | 270 | ||||
-rw-r--r-- | net/rxrpc/krxtimod.c | 204 | ||||
-rw-r--r-- | net/rxrpc/main.c | 180 | ||||
-rw-r--r-- | net/rxrpc/peer.c | 398 | ||||
-rw-r--r-- | net/rxrpc/proc.c | 617 | ||||
-rw-r--r-- | net/rxrpc/rxrpc_syms.c | 34 | ||||
-rw-r--r-- | net/rxrpc/sysctl.c | 121 | ||||
-rw-r--r-- | net/rxrpc/transport.c | 846 |
13 files changed, 1 insertions, 6118 deletions
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile index 07bf82ffec6a..c46867c61c98 100644 --- a/net/rxrpc/Makefile +++ b/net/rxrpc/Makefile | |||
@@ -1,9 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Makefile for Linux kernel Rx RPC | 2 | # Makefile for Linux kernel RxRPC |
3 | # | 3 | # |
4 | 4 | ||
5 | #CFLAGS += -finstrument-functions | ||
6 | |||
7 | af-rxrpc-objs := \ | 5 | af-rxrpc-objs := \ |
8 | af_rxrpc.o \ | 6 | af_rxrpc.o \ |
9 | ar-accept.o \ | 7 | ar-accept.o \ |
@@ -29,26 +27,3 @@ endif | |||
29 | obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o | 27 | obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o |
30 | 28 | ||
31 | obj-$(CONFIG_RXKAD) += rxkad.o | 29 | obj-$(CONFIG_RXKAD) += rxkad.o |
32 | |||
33 | # | ||
34 | # obsolete RxRPC interface, still used by fs/afs/ | ||
35 | # | ||
36 | rxrpc-objs := \ | ||
37 | call.o \ | ||
38 | connection.o \ | ||
39 | krxiod.o \ | ||
40 | krxsecd.o \ | ||
41 | krxtimod.o \ | ||
42 | main.o \ | ||
43 | peer.o \ | ||
44 | rxrpc_syms.o \ | ||
45 | transport.o | ||
46 | |||
47 | ifeq ($(CONFIG_PROC_FS),y) | ||
48 | rxrpc-objs += proc.o | ||
49 | endif | ||
50 | ifeq ($(CONFIG_SYSCTL),y) | ||
51 | rxrpc-objs += sysctl.o | ||
52 | endif | ||
53 | |||
54 | obj-$(CONFIG_RXRPC) += rxrpc.o | ||
diff --git a/net/rxrpc/call.c b/net/rxrpc/call.c deleted file mode 100644 index d07122b57e0d..000000000000 --- a/net/rxrpc/call.c +++ /dev/null | |||
@@ -1,2277 +0,0 @@ | |||
1 | /* call.c: Rx call routines | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <rxrpc/rxrpc.h> | ||
16 | #include <rxrpc/transport.h> | ||
17 | #include <rxrpc/peer.h> | ||
18 | #include <rxrpc/connection.h> | ||
19 | #include <rxrpc/call.h> | ||
20 | #include <rxrpc/message.h> | ||
21 | #include "internal.h" | ||
22 | |||
23 | __RXACCT_DECL(atomic_t rxrpc_call_count); | ||
24 | __RXACCT_DECL(atomic_t rxrpc_message_count); | ||
25 | |||
26 | LIST_HEAD(rxrpc_calls); | ||
27 | DECLARE_RWSEM(rxrpc_calls_sem); | ||
28 | |||
29 | unsigned rxrpc_call_rcv_timeout = HZ/3; | ||
30 | static unsigned rxrpc_call_acks_timeout = HZ/3; | ||
31 | static unsigned rxrpc_call_dfr_ack_timeout = HZ/20; | ||
32 | static unsigned short rxrpc_call_max_resend = HZ/10; | ||
33 | |||
34 | const char *rxrpc_call_states[] = { | ||
35 | "COMPLETE", | ||
36 | "ERROR", | ||
37 | "SRVR_RCV_OPID", | ||
38 | "SRVR_RCV_ARGS", | ||
39 | "SRVR_GOT_ARGS", | ||
40 | "SRVR_SND_REPLY", | ||
41 | "SRVR_RCV_FINAL_ACK", | ||
42 | "CLNT_SND_ARGS", | ||
43 | "CLNT_RCV_REPLY", | ||
44 | "CLNT_GOT_REPLY" | ||
45 | }; | ||
46 | |||
47 | const char *rxrpc_call_error_states[] = { | ||
48 | "NO_ERROR", | ||
49 | "LOCAL_ABORT", | ||
50 | "PEER_ABORT", | ||
51 | "LOCAL_ERROR", | ||
52 | "REMOTE_ERROR" | ||
53 | }; | ||
54 | |||
55 | const char *rxrpc_pkts[] = { | ||
56 | "?00", | ||
57 | "data", "ack", "busy", "abort", "ackall", "chall", "resp", "debug", | ||
58 | "?09", "?10", "?11", "?12", "?13", "?14", "?15" | ||
59 | }; | ||
60 | |||
61 | static const char *rxrpc_acks[] = { | ||
62 | "---", "REQ", "DUP", "SEQ", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL", | ||
63 | "-?-" | ||
64 | }; | ||
65 | |||
66 | static const char _acktype[] = "NA-"; | ||
67 | |||
68 | static void rxrpc_call_receive_packet(struct rxrpc_call *call); | ||
69 | static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, | ||
70 | struct rxrpc_message *msg); | ||
71 | static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, | ||
72 | struct rxrpc_message *msg); | ||
73 | static void rxrpc_call_definitively_ACK(struct rxrpc_call *call, | ||
74 | rxrpc_seq_t higest); | ||
75 | static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest); | ||
76 | static int __rxrpc_call_read_data(struct rxrpc_call *call); | ||
77 | |||
78 | static int rxrpc_call_record_ACK(struct rxrpc_call *call, | ||
79 | struct rxrpc_message *msg, | ||
80 | rxrpc_seq_t seq, | ||
81 | size_t count); | ||
82 | |||
83 | static int rxrpc_call_flush(struct rxrpc_call *call); | ||
84 | |||
85 | #define _state(call) \ | ||
86 | _debug("[[[ state %s ]]]", rxrpc_call_states[call->app_call_state]); | ||
87 | |||
88 | static void rxrpc_call_default_attn_func(struct rxrpc_call *call) | ||
89 | { | ||
90 | wake_up(&call->waitq); | ||
91 | } | ||
92 | |||
93 | static void rxrpc_call_default_error_func(struct rxrpc_call *call) | ||
94 | { | ||
95 | wake_up(&call->waitq); | ||
96 | } | ||
97 | |||
98 | static void rxrpc_call_default_aemap_func(struct rxrpc_call *call) | ||
99 | { | ||
100 | switch (call->app_err_state) { | ||
101 | case RXRPC_ESTATE_LOCAL_ABORT: | ||
102 | call->app_abort_code = -call->app_errno; | ||
103 | case RXRPC_ESTATE_PEER_ABORT: | ||
104 | call->app_errno = -ECONNABORTED; | ||
105 | default: | ||
106 | break; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | static void __rxrpc_call_acks_timeout(unsigned long _call) | ||
111 | { | ||
112 | struct rxrpc_call *call = (struct rxrpc_call *) _call; | ||
113 | |||
114 | _debug("ACKS TIMEOUT %05lu", jiffies - call->cjif); | ||
115 | |||
116 | call->flags |= RXRPC_CALL_ACKS_TIMO; | ||
117 | rxrpc_krxiod_queue_call(call); | ||
118 | } | ||
119 | |||
120 | static void __rxrpc_call_rcv_timeout(unsigned long _call) | ||
121 | { | ||
122 | struct rxrpc_call *call = (struct rxrpc_call *) _call; | ||
123 | |||
124 | _debug("RCV TIMEOUT %05lu", jiffies - call->cjif); | ||
125 | |||
126 | call->flags |= RXRPC_CALL_RCV_TIMO; | ||
127 | rxrpc_krxiod_queue_call(call); | ||
128 | } | ||
129 | |||
130 | static void __rxrpc_call_ackr_timeout(unsigned long _call) | ||
131 | { | ||
132 | struct rxrpc_call *call = (struct rxrpc_call *) _call; | ||
133 | |||
134 | _debug("ACKR TIMEOUT %05lu",jiffies - call->cjif); | ||
135 | |||
136 | call->flags |= RXRPC_CALL_ACKR_TIMO; | ||
137 | rxrpc_krxiod_queue_call(call); | ||
138 | } | ||
139 | |||
140 | /*****************************************************************************/ | ||
141 | /* | ||
142 | * calculate a timeout based on an RTT value | ||
143 | */ | ||
144 | static inline unsigned long __rxrpc_rtt_based_timeout(struct rxrpc_call *call, | ||
145 | unsigned long val) | ||
146 | { | ||
147 | unsigned long expiry = call->conn->peer->rtt / (1000000 / HZ); | ||
148 | |||
149 | expiry += 10; | ||
150 | if (expiry < HZ / 25) | ||
151 | expiry = HZ / 25; | ||
152 | if (expiry > HZ) | ||
153 | expiry = HZ; | ||
154 | |||
155 | _leave(" = %lu jiffies", expiry); | ||
156 | return jiffies + expiry; | ||
157 | } /* end __rxrpc_rtt_based_timeout() */ | ||
158 | |||
159 | /*****************************************************************************/ | ||
160 | /* | ||
161 | * create a new call record | ||
162 | */ | ||
163 | static inline int __rxrpc_create_call(struct rxrpc_connection *conn, | ||
164 | struct rxrpc_call **_call) | ||
165 | { | ||
166 | struct rxrpc_call *call; | ||
167 | |||
168 | _enter("%p", conn); | ||
169 | |||
170 | /* allocate and initialise a call record */ | ||
171 | call = (struct rxrpc_call *) get_zeroed_page(GFP_KERNEL); | ||
172 | if (!call) { | ||
173 | _leave(" ENOMEM"); | ||
174 | return -ENOMEM; | ||
175 | } | ||
176 | |||
177 | atomic_set(&call->usage, 1); | ||
178 | |||
179 | init_waitqueue_head(&call->waitq); | ||
180 | spin_lock_init(&call->lock); | ||
181 | INIT_LIST_HEAD(&call->link); | ||
182 | INIT_LIST_HEAD(&call->acks_pendq); | ||
183 | INIT_LIST_HEAD(&call->rcv_receiveq); | ||
184 | INIT_LIST_HEAD(&call->rcv_krxiodq_lk); | ||
185 | INIT_LIST_HEAD(&call->app_readyq); | ||
186 | INIT_LIST_HEAD(&call->app_unreadyq); | ||
187 | INIT_LIST_HEAD(&call->app_link); | ||
188 | INIT_LIST_HEAD(&call->app_attn_link); | ||
189 | |||
190 | init_timer(&call->acks_timeout); | ||
191 | call->acks_timeout.data = (unsigned long) call; | ||
192 | call->acks_timeout.function = __rxrpc_call_acks_timeout; | ||
193 | |||
194 | init_timer(&call->rcv_timeout); | ||
195 | call->rcv_timeout.data = (unsigned long) call; | ||
196 | call->rcv_timeout.function = __rxrpc_call_rcv_timeout; | ||
197 | |||
198 | init_timer(&call->ackr_dfr_timo); | ||
199 | call->ackr_dfr_timo.data = (unsigned long) call; | ||
200 | call->ackr_dfr_timo.function = __rxrpc_call_ackr_timeout; | ||
201 | |||
202 | call->conn = conn; | ||
203 | call->ackr_win_bot = 1; | ||
204 | call->ackr_win_top = call->ackr_win_bot + RXRPC_CALL_ACK_WINDOW_SIZE - 1; | ||
205 | call->ackr_prev_seq = 0; | ||
206 | call->app_mark = RXRPC_APP_MARK_EOF; | ||
207 | call->app_attn_func = rxrpc_call_default_attn_func; | ||
208 | call->app_error_func = rxrpc_call_default_error_func; | ||
209 | call->app_aemap_func = rxrpc_call_default_aemap_func; | ||
210 | call->app_scr_alloc = call->app_scratch; | ||
211 | |||
212 | call->cjif = jiffies; | ||
213 | |||
214 | _leave(" = 0 (%p)", call); | ||
215 | |||
216 | *_call = call; | ||
217 | |||
218 | return 0; | ||
219 | } /* end __rxrpc_create_call() */ | ||
220 | |||
221 | /*****************************************************************************/ | ||
222 | /* | ||
223 | * create a new call record for outgoing calls | ||
224 | */ | ||
225 | int rxrpc_create_call(struct rxrpc_connection *conn, | ||
226 | rxrpc_call_attn_func_t attn, | ||
227 | rxrpc_call_error_func_t error, | ||
228 | rxrpc_call_aemap_func_t aemap, | ||
229 | struct rxrpc_call **_call) | ||
230 | { | ||
231 | DECLARE_WAITQUEUE(myself, current); | ||
232 | |||
233 | struct rxrpc_call *call; | ||
234 | int ret, cix, loop; | ||
235 | |||
236 | _enter("%p", conn); | ||
237 | |||
238 | /* allocate and initialise a call record */ | ||
239 | ret = __rxrpc_create_call(conn, &call); | ||
240 | if (ret < 0) { | ||
241 | _leave(" = %d", ret); | ||
242 | return ret; | ||
243 | } | ||
244 | |||
245 | call->app_call_state = RXRPC_CSTATE_CLNT_SND_ARGS; | ||
246 | if (attn) | ||
247 | call->app_attn_func = attn; | ||
248 | if (error) | ||
249 | call->app_error_func = error; | ||
250 | if (aemap) | ||
251 | call->app_aemap_func = aemap; | ||
252 | |||
253 | _state(call); | ||
254 | |||
255 | spin_lock(&conn->lock); | ||
256 | set_current_state(TASK_INTERRUPTIBLE); | ||
257 | add_wait_queue(&conn->chanwait, &myself); | ||
258 | |||
259 | try_again: | ||
260 | /* try to find an unused channel */ | ||
261 | for (cix = 0; cix < 4; cix++) | ||
262 | if (!conn->channels[cix]) | ||
263 | goto obtained_chan; | ||
264 | |||
265 | /* no free channels - wait for one to become available */ | ||
266 | ret = -EINTR; | ||
267 | if (signal_pending(current)) | ||
268 | goto error_unwait; | ||
269 | |||
270 | spin_unlock(&conn->lock); | ||
271 | |||
272 | schedule(); | ||
273 | set_current_state(TASK_INTERRUPTIBLE); | ||
274 | |||
275 | spin_lock(&conn->lock); | ||
276 | goto try_again; | ||
277 | |||
278 | /* got a channel - now attach to the connection */ | ||
279 | obtained_chan: | ||
280 | remove_wait_queue(&conn->chanwait, &myself); | ||
281 | set_current_state(TASK_RUNNING); | ||
282 | |||
283 | /* concoct a unique call number */ | ||
284 | next_callid: | ||
285 | call->call_id = htonl(++conn->call_counter); | ||
286 | for (loop = 0; loop < 4; loop++) | ||
287 | if (conn->channels[loop] && | ||
288 | conn->channels[loop]->call_id == call->call_id) | ||
289 | goto next_callid; | ||
290 | |||
291 | rxrpc_get_connection(conn); | ||
292 | conn->channels[cix] = call; /* assign _after_ done callid check loop */ | ||
293 | do_gettimeofday(&conn->atime); | ||
294 | call->chan_ix = htonl(cix); | ||
295 | |||
296 | spin_unlock(&conn->lock); | ||
297 | |||
298 | down_write(&rxrpc_calls_sem); | ||
299 | list_add_tail(&call->call_link, &rxrpc_calls); | ||
300 | up_write(&rxrpc_calls_sem); | ||
301 | |||
302 | __RXACCT(atomic_inc(&rxrpc_call_count)); | ||
303 | *_call = call; | ||
304 | |||
305 | _leave(" = 0 (call=%p cix=%u)", call, cix); | ||
306 | return 0; | ||
307 | |||
308 | error_unwait: | ||
309 | remove_wait_queue(&conn->chanwait, &myself); | ||
310 | set_current_state(TASK_RUNNING); | ||
311 | spin_unlock(&conn->lock); | ||
312 | |||
313 | free_page((unsigned long) call); | ||
314 | _leave(" = %d", ret); | ||
315 | return ret; | ||
316 | } /* end rxrpc_create_call() */ | ||
317 | |||
318 | /*****************************************************************************/ | ||
319 | /* | ||
320 | * create a new call record for incoming calls | ||
321 | */ | ||
322 | int rxrpc_incoming_call(struct rxrpc_connection *conn, | ||
323 | struct rxrpc_message *msg, | ||
324 | struct rxrpc_call **_call) | ||
325 | { | ||
326 | struct rxrpc_call *call; | ||
327 | unsigned cix; | ||
328 | int ret; | ||
329 | |||
330 | cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK; | ||
331 | |||
332 | _enter("%p,%u,%u", conn, ntohl(msg->hdr.callNumber), cix); | ||
333 | |||
334 | /* allocate and initialise a call record */ | ||
335 | ret = __rxrpc_create_call(conn, &call); | ||
336 | if (ret < 0) { | ||
337 | _leave(" = %d", ret); | ||
338 | return ret; | ||
339 | } | ||
340 | |||
341 | call->pkt_rcv_count = 1; | ||
342 | call->app_call_state = RXRPC_CSTATE_SRVR_RCV_OPID; | ||
343 | call->app_mark = sizeof(uint32_t); | ||
344 | |||
345 | _state(call); | ||
346 | |||
347 | /* attach to the connection */ | ||
348 | ret = -EBUSY; | ||
349 | call->chan_ix = htonl(cix); | ||
350 | call->call_id = msg->hdr.callNumber; | ||
351 | |||
352 | spin_lock(&conn->lock); | ||
353 | |||
354 | if (!conn->channels[cix] || | ||
355 | conn->channels[cix]->app_call_state == RXRPC_CSTATE_COMPLETE || | ||
356 | conn->channels[cix]->app_call_state == RXRPC_CSTATE_ERROR | ||
357 | ) { | ||
358 | conn->channels[cix] = call; | ||
359 | rxrpc_get_connection(conn); | ||
360 | ret = 0; | ||
361 | } | ||
362 | |||
363 | spin_unlock(&conn->lock); | ||
364 | |||
365 | if (ret < 0) { | ||
366 | free_page((unsigned long) call); | ||
367 | call = NULL; | ||
368 | } | ||
369 | |||
370 | if (ret == 0) { | ||
371 | down_write(&rxrpc_calls_sem); | ||
372 | list_add_tail(&call->call_link, &rxrpc_calls); | ||
373 | up_write(&rxrpc_calls_sem); | ||
374 | __RXACCT(atomic_inc(&rxrpc_call_count)); | ||
375 | *_call = call; | ||
376 | } | ||
377 | |||
378 | _leave(" = %d [%p]", ret, call); | ||
379 | return ret; | ||
380 | } /* end rxrpc_incoming_call() */ | ||
381 | |||
382 | /*****************************************************************************/ | ||
383 | /* | ||
384 | * free a call record | ||
385 | */ | ||
386 | void rxrpc_put_call(struct rxrpc_call *call) | ||
387 | { | ||
388 | struct rxrpc_connection *conn = call->conn; | ||
389 | struct rxrpc_message *msg; | ||
390 | |||
391 | _enter("%p{u=%d}",call,atomic_read(&call->usage)); | ||
392 | |||
393 | /* sanity check */ | ||
394 | if (atomic_read(&call->usage) <= 0) | ||
395 | BUG(); | ||
396 | |||
397 | /* to prevent a race, the decrement and the de-list must be effectively | ||
398 | * atomic */ | ||
399 | spin_lock(&conn->lock); | ||
400 | if (likely(!atomic_dec_and_test(&call->usage))) { | ||
401 | spin_unlock(&conn->lock); | ||
402 | _leave(""); | ||
403 | return; | ||
404 | } | ||
405 | |||
406 | if (conn->channels[ntohl(call->chan_ix)] == call) | ||
407 | conn->channels[ntohl(call->chan_ix)] = NULL; | ||
408 | |||
409 | spin_unlock(&conn->lock); | ||
410 | |||
411 | wake_up(&conn->chanwait); | ||
412 | |||
413 | rxrpc_put_connection(conn); | ||
414 | |||
415 | /* clear the timers and dequeue from krxiod */ | ||
416 | del_timer_sync(&call->acks_timeout); | ||
417 | del_timer_sync(&call->rcv_timeout); | ||
418 | del_timer_sync(&call->ackr_dfr_timo); | ||
419 | |||
420 | rxrpc_krxiod_dequeue_call(call); | ||
421 | |||
422 | /* clean up the contents of the struct */ | ||
423 | if (call->snd_nextmsg) | ||
424 | rxrpc_put_message(call->snd_nextmsg); | ||
425 | |||
426 | if (call->snd_ping) | ||
427 | rxrpc_put_message(call->snd_ping); | ||
428 | |||
429 | while (!list_empty(&call->acks_pendq)) { | ||
430 | msg = list_entry(call->acks_pendq.next, | ||
431 | struct rxrpc_message, link); | ||
432 | list_del(&msg->link); | ||
433 | rxrpc_put_message(msg); | ||
434 | } | ||
435 | |||
436 | while (!list_empty(&call->rcv_receiveq)) { | ||
437 | msg = list_entry(call->rcv_receiveq.next, | ||
438 | struct rxrpc_message, link); | ||
439 | list_del(&msg->link); | ||
440 | rxrpc_put_message(msg); | ||
441 | } | ||
442 | |||
443 | while (!list_empty(&call->app_readyq)) { | ||
444 | msg = list_entry(call->app_readyq.next, | ||
445 | struct rxrpc_message, link); | ||
446 | list_del(&msg->link); | ||
447 | rxrpc_put_message(msg); | ||
448 | } | ||
449 | |||
450 | while (!list_empty(&call->app_unreadyq)) { | ||
451 | msg = list_entry(call->app_unreadyq.next, | ||
452 | struct rxrpc_message, link); | ||
453 | list_del(&msg->link); | ||
454 | rxrpc_put_message(msg); | ||
455 | } | ||
456 | |||
457 | module_put(call->owner); | ||
458 | |||
459 | down_write(&rxrpc_calls_sem); | ||
460 | list_del(&call->call_link); | ||
461 | up_write(&rxrpc_calls_sem); | ||
462 | |||
463 | __RXACCT(atomic_dec(&rxrpc_call_count)); | ||
464 | free_page((unsigned long) call); | ||
465 | |||
466 | _leave(" [destroyed]"); | ||
467 | } /* end rxrpc_put_call() */ | ||
468 | |||
469 | /*****************************************************************************/ | ||
470 | /* | ||
471 | * actually generate a normal ACK | ||
472 | */ | ||
473 | static inline int __rxrpc_call_gen_normal_ACK(struct rxrpc_call *call, | ||
474 | rxrpc_seq_t seq) | ||
475 | { | ||
476 | struct rxrpc_message *msg; | ||
477 | struct kvec diov[3]; | ||
478 | __be32 aux[4]; | ||
479 | int delta, ret; | ||
480 | |||
481 | /* ACKs default to DELAY */ | ||
482 | if (!call->ackr.reason) | ||
483 | call->ackr.reason = RXRPC_ACK_DELAY; | ||
484 | |||
485 | _proto("Rx %05lu Sending ACK { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", | ||
486 | jiffies - call->cjif, | ||
487 | ntohs(call->ackr.maxSkew), | ||
488 | ntohl(call->ackr.firstPacket), | ||
489 | ntohl(call->ackr.previousPacket), | ||
490 | ntohl(call->ackr.serial), | ||
491 | rxrpc_acks[call->ackr.reason], | ||
492 | call->ackr.nAcks); | ||
493 | |||
494 | aux[0] = htonl(call->conn->peer->if_mtu); /* interface MTU */ | ||
495 | aux[1] = htonl(1444); /* max MTU */ | ||
496 | aux[2] = htonl(16); /* rwind */ | ||
497 | aux[3] = htonl(4); /* max packets */ | ||
498 | |||
499 | diov[0].iov_len = sizeof(struct rxrpc_ackpacket); | ||
500 | diov[0].iov_base = &call->ackr; | ||
501 | diov[1].iov_len = call->ackr_pend_cnt + 3; | ||
502 | diov[1].iov_base = call->ackr_array; | ||
503 | diov[2].iov_len = sizeof(aux); | ||
504 | diov[2].iov_base = &aux; | ||
505 | |||
506 | /* build and send the message */ | ||
507 | ret = rxrpc_conn_newmsg(call->conn,call, RXRPC_PACKET_TYPE_ACK, | ||
508 | 3, diov, GFP_KERNEL, &msg); | ||
509 | if (ret < 0) | ||
510 | goto out; | ||
511 | |||
512 | msg->seq = seq; | ||
513 | msg->hdr.seq = htonl(seq); | ||
514 | msg->hdr.flags |= RXRPC_SLOW_START_OK; | ||
515 | |||
516 | ret = rxrpc_conn_sendmsg(call->conn, msg); | ||
517 | rxrpc_put_message(msg); | ||
518 | if (ret < 0) | ||
519 | goto out; | ||
520 | call->pkt_snd_count++; | ||
521 | |||
522 | /* count how many actual ACKs there were at the front */ | ||
523 | for (delta = 0; delta < call->ackr_pend_cnt; delta++) | ||
524 | if (call->ackr_array[delta] != RXRPC_ACK_TYPE_ACK) | ||
525 | break; | ||
526 | |||
527 | call->ackr_pend_cnt -= delta; /* all ACK'd to this point */ | ||
528 | |||
529 | /* crank the ACK window around */ | ||
530 | if (delta == 0) { | ||
531 | /* un-ACK'd window */ | ||
532 | } | ||
533 | else if (delta < RXRPC_CALL_ACK_WINDOW_SIZE) { | ||
534 | /* partially ACK'd window | ||
535 | * - shuffle down to avoid losing out-of-sequence packets | ||
536 | */ | ||
537 | call->ackr_win_bot += delta; | ||
538 | call->ackr_win_top += delta; | ||
539 | |||
540 | memmove(&call->ackr_array[0], | ||
541 | &call->ackr_array[delta], | ||
542 | call->ackr_pend_cnt); | ||
543 | |||
544 | memset(&call->ackr_array[call->ackr_pend_cnt], | ||
545 | RXRPC_ACK_TYPE_NACK, | ||
546 | sizeof(call->ackr_array) - call->ackr_pend_cnt); | ||
547 | } | ||
548 | else { | ||
549 | /* fully ACK'd window | ||
550 | * - just clear the whole thing | ||
551 | */ | ||
552 | memset(&call->ackr_array, | ||
553 | RXRPC_ACK_TYPE_NACK, | ||
554 | sizeof(call->ackr_array)); | ||
555 | } | ||
556 | |||
557 | /* clear this ACK */ | ||
558 | memset(&call->ackr, 0, sizeof(call->ackr)); | ||
559 | |||
560 | out: | ||
561 | if (!call->app_call_state) | ||
562 | printk("___ STATE 0 ___\n"); | ||
563 | return ret; | ||
564 | } /* end __rxrpc_call_gen_normal_ACK() */ | ||
565 | |||
566 | /*****************************************************************************/ | ||
567 | /* | ||
568 | * note the reception of a packet in the call's ACK records and generate an | ||
569 | * appropriate ACK packet if necessary | ||
570 | * - returns 0 if packet should be processed, 1 if packet should be ignored | ||
571 | * and -ve on an error | ||
572 | */ | ||
573 | static int rxrpc_call_generate_ACK(struct rxrpc_call *call, | ||
574 | struct rxrpc_header *hdr, | ||
575 | struct rxrpc_ackpacket *ack) | ||
576 | { | ||
577 | struct rxrpc_message *msg; | ||
578 | rxrpc_seq_t seq; | ||
579 | unsigned offset; | ||
580 | int ret = 0, err; | ||
581 | u8 special_ACK, do_ACK, force; | ||
582 | |||
583 | _enter("%p,%p { seq=%d tp=%d fl=%02x }", | ||
584 | call, hdr, ntohl(hdr->seq), hdr->type, hdr->flags); | ||
585 | |||
586 | seq = ntohl(hdr->seq); | ||
587 | offset = seq - call->ackr_win_bot; | ||
588 | do_ACK = RXRPC_ACK_DELAY; | ||
589 | special_ACK = 0; | ||
590 | force = (seq == 1); | ||
591 | |||
592 | if (call->ackr_high_seq < seq) | ||
593 | call->ackr_high_seq = seq; | ||
594 | |||
595 | /* deal with generation of obvious special ACKs first */ | ||
596 | if (ack && ack->reason == RXRPC_ACK_PING) { | ||
597 | special_ACK = RXRPC_ACK_PING_RESPONSE; | ||
598 | ret = 1; | ||
599 | goto gen_ACK; | ||
600 | } | ||
601 | |||
602 | if (seq < call->ackr_win_bot) { | ||
603 | special_ACK = RXRPC_ACK_DUPLICATE; | ||
604 | ret = 1; | ||
605 | goto gen_ACK; | ||
606 | } | ||
607 | |||
608 | if (seq >= call->ackr_win_top) { | ||
609 | special_ACK = RXRPC_ACK_EXCEEDS_WINDOW; | ||
610 | ret = 1; | ||
611 | goto gen_ACK; | ||
612 | } | ||
613 | |||
614 | if (call->ackr_array[offset] != RXRPC_ACK_TYPE_NACK) { | ||
615 | special_ACK = RXRPC_ACK_DUPLICATE; | ||
616 | ret = 1; | ||
617 | goto gen_ACK; | ||
618 | } | ||
619 | |||
620 | /* okay... it's a normal data packet inside the ACK window */ | ||
621 | call->ackr_array[offset] = RXRPC_ACK_TYPE_ACK; | ||
622 | |||
623 | if (offset < call->ackr_pend_cnt) { | ||
624 | } | ||
625 | else if (offset > call->ackr_pend_cnt) { | ||
626 | do_ACK = RXRPC_ACK_OUT_OF_SEQUENCE; | ||
627 | call->ackr_pend_cnt = offset; | ||
628 | goto gen_ACK; | ||
629 | } | ||
630 | |||
631 | if (hdr->flags & RXRPC_REQUEST_ACK) { | ||
632 | do_ACK = RXRPC_ACK_REQUESTED; | ||
633 | } | ||
634 | |||
635 | /* generate an ACK on the final packet of a reply just received */ | ||
636 | if (hdr->flags & RXRPC_LAST_PACKET) { | ||
637 | if (call->conn->out_clientflag) | ||
638 | force = 1; | ||
639 | } | ||
640 | else if (!(hdr->flags & RXRPC_MORE_PACKETS)) { | ||
641 | do_ACK = RXRPC_ACK_REQUESTED; | ||
642 | } | ||
643 | |||
644 | /* re-ACK packets previously received out-of-order */ | ||
645 | for (offset++; offset < RXRPC_CALL_ACK_WINDOW_SIZE; offset++) | ||
646 | if (call->ackr_array[offset] != RXRPC_ACK_TYPE_ACK) | ||
647 | break; | ||
648 | |||
649 | call->ackr_pend_cnt = offset; | ||
650 | |||
651 | /* generate an ACK if we fill up the window */ | ||
652 | if (call->ackr_pend_cnt >= RXRPC_CALL_ACK_WINDOW_SIZE) | ||
653 | force = 1; | ||
654 | |||
655 | gen_ACK: | ||
656 | _debug("%05lu ACKs pend=%u norm=%s special=%s%s", | ||
657 | jiffies - call->cjif, | ||
658 | call->ackr_pend_cnt, | ||
659 | rxrpc_acks[do_ACK], | ||
660 | rxrpc_acks[special_ACK], | ||
661 | force ? " immediate" : | ||
662 | do_ACK == RXRPC_ACK_REQUESTED ? " merge-req" : | ||
663 | hdr->flags & RXRPC_LAST_PACKET ? " finalise" : | ||
664 | " defer" | ||
665 | ); | ||
666 | |||
667 | /* send any pending normal ACKs if need be */ | ||
668 | if (call->ackr_pend_cnt > 0) { | ||
669 | /* fill out the appropriate form */ | ||
670 | call->ackr.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE); | ||
671 | call->ackr.maxSkew = htons(min(call->ackr_high_seq - seq, | ||
672 | 65535U)); | ||
673 | call->ackr.firstPacket = htonl(call->ackr_win_bot); | ||
674 | call->ackr.previousPacket = call->ackr_prev_seq; | ||
675 | call->ackr.serial = hdr->serial; | ||
676 | call->ackr.nAcks = call->ackr_pend_cnt; | ||
677 | |||
678 | if (do_ACK == RXRPC_ACK_REQUESTED) | ||
679 | call->ackr.reason = do_ACK; | ||
680 | |||
681 | /* generate the ACK immediately if necessary */ | ||
682 | if (special_ACK || force) { | ||
683 | err = __rxrpc_call_gen_normal_ACK( | ||
684 | call, do_ACK == RXRPC_ACK_DELAY ? 0 : seq); | ||
685 | if (err < 0) { | ||
686 | ret = err; | ||
687 | goto out; | ||
688 | } | ||
689 | } | ||
690 | } | ||
691 | |||
692 | if (call->ackr.reason == RXRPC_ACK_REQUESTED) | ||
693 | call->ackr_dfr_seq = seq; | ||
694 | |||
695 | /* start the ACK timer if not running if there are any pending deferred | ||
696 | * ACKs */ | ||
697 | if (call->ackr_pend_cnt > 0 && | ||
698 | call->ackr.reason != RXRPC_ACK_REQUESTED && | ||
699 | !timer_pending(&call->ackr_dfr_timo) | ||
700 | ) { | ||
701 | unsigned long timo; | ||
702 | |||
703 | timo = rxrpc_call_dfr_ack_timeout + jiffies; | ||
704 | |||
705 | _debug("START ACKR TIMER for cj=%lu", timo - call->cjif); | ||
706 | |||
707 | spin_lock(&call->lock); | ||
708 | mod_timer(&call->ackr_dfr_timo, timo); | ||
709 | spin_unlock(&call->lock); | ||
710 | } | ||
711 | else if ((call->ackr_pend_cnt == 0 || | ||
712 | call->ackr.reason == RXRPC_ACK_REQUESTED) && | ||
713 | timer_pending(&call->ackr_dfr_timo) | ||
714 | ) { | ||
715 | /* stop timer if no pending ACKs */ | ||
716 | _debug("CLEAR ACKR TIMER"); | ||
717 | del_timer_sync(&call->ackr_dfr_timo); | ||
718 | } | ||
719 | |||
720 | /* send a special ACK if one is required */ | ||
721 | if (special_ACK) { | ||
722 | struct rxrpc_ackpacket ack; | ||
723 | struct kvec diov[2]; | ||
724 | uint8_t acks[1] = { RXRPC_ACK_TYPE_ACK }; | ||
725 | |||
726 | /* fill out the appropriate form */ | ||
727 | ack.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE); | ||
728 | ack.maxSkew = htons(min(call->ackr_high_seq - seq, | ||
729 | 65535U)); | ||
730 | ack.firstPacket = htonl(call->ackr_win_bot); | ||
731 | ack.previousPacket = call->ackr_prev_seq; | ||
732 | ack.serial = hdr->serial; | ||
733 | ack.reason = special_ACK; | ||
734 | ack.nAcks = 0; | ||
735 | |||
736 | _proto("Rx Sending s-ACK" | ||
737 | " { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", | ||
738 | ntohs(ack.maxSkew), | ||
739 | ntohl(ack.firstPacket), | ||
740 | ntohl(ack.previousPacket), | ||
741 | ntohl(ack.serial), | ||
742 | rxrpc_acks[ack.reason], | ||
743 | ack.nAcks); | ||
744 | |||
745 | diov[0].iov_len = sizeof(struct rxrpc_ackpacket); | ||
746 | diov[0].iov_base = &ack; | ||
747 | diov[1].iov_len = sizeof(acks); | ||
748 | diov[1].iov_base = acks; | ||
749 | |||
750 | /* build and send the message */ | ||
751 | err = rxrpc_conn_newmsg(call->conn,call, RXRPC_PACKET_TYPE_ACK, | ||
752 | hdr->seq ? 2 : 1, diov, | ||
753 | GFP_KERNEL, | ||
754 | &msg); | ||
755 | if (err < 0) { | ||
756 | ret = err; | ||
757 | goto out; | ||
758 | } | ||
759 | |||
760 | msg->seq = seq; | ||
761 | msg->hdr.seq = htonl(seq); | ||
762 | msg->hdr.flags |= RXRPC_SLOW_START_OK; | ||
763 | |||
764 | err = rxrpc_conn_sendmsg(call->conn, msg); | ||
765 | rxrpc_put_message(msg); | ||
766 | if (err < 0) { | ||
767 | ret = err; | ||
768 | goto out; | ||
769 | } | ||
770 | call->pkt_snd_count++; | ||
771 | } | ||
772 | |||
773 | out: | ||
774 | if (hdr->seq) | ||
775 | call->ackr_prev_seq = hdr->seq; | ||
776 | |||
777 | _leave(" = %d", ret); | ||
778 | return ret; | ||
779 | } /* end rxrpc_call_generate_ACK() */ | ||
780 | |||
781 | /*****************************************************************************/ | ||
782 | /* | ||
783 | * handle work to be done on a call | ||
784 | * - includes packet reception and timeout processing | ||
785 | */ | ||
786 | void rxrpc_call_do_stuff(struct rxrpc_call *call) | ||
787 | { | ||
788 | _enter("%p{flags=%lx}", call, call->flags); | ||
789 | |||
790 | /* handle packet reception */ | ||
791 | if (call->flags & RXRPC_CALL_RCV_PKT) { | ||
792 | _debug("- receive packet"); | ||
793 | call->flags &= ~RXRPC_CALL_RCV_PKT; | ||
794 | rxrpc_call_receive_packet(call); | ||
795 | } | ||
796 | |||
797 | /* handle overdue ACKs */ | ||
798 | if (call->flags & RXRPC_CALL_ACKS_TIMO) { | ||
799 | _debug("- overdue ACK timeout"); | ||
800 | call->flags &= ~RXRPC_CALL_ACKS_TIMO; | ||
801 | rxrpc_call_resend(call, call->snd_seq_count); | ||
802 | } | ||
803 | |||
804 | /* handle lack of reception */ | ||
805 | if (call->flags & RXRPC_CALL_RCV_TIMO) { | ||
806 | _debug("- reception timeout"); | ||
807 | call->flags &= ~RXRPC_CALL_RCV_TIMO; | ||
808 | rxrpc_call_abort(call, -EIO); | ||
809 | } | ||
810 | |||
811 | /* handle deferred ACKs */ | ||
812 | if (call->flags & RXRPC_CALL_ACKR_TIMO || | ||
813 | (call->ackr.nAcks > 0 && call->ackr.reason == RXRPC_ACK_REQUESTED) | ||
814 | ) { | ||
815 | _debug("- deferred ACK timeout: cj=%05lu r=%s n=%u", | ||
816 | jiffies - call->cjif, | ||
817 | rxrpc_acks[call->ackr.reason], | ||
818 | call->ackr.nAcks); | ||
819 | |||
820 | call->flags &= ~RXRPC_CALL_ACKR_TIMO; | ||
821 | |||
822 | if (call->ackr.nAcks > 0 && | ||
823 | call->app_call_state != RXRPC_CSTATE_ERROR) { | ||
824 | /* generate ACK */ | ||
825 | __rxrpc_call_gen_normal_ACK(call, call->ackr_dfr_seq); | ||
826 | call->ackr_dfr_seq = 0; | ||
827 | } | ||
828 | } | ||
829 | |||
830 | _leave(""); | ||
831 | |||
832 | } /* end rxrpc_call_do_stuff() */ | ||
833 | |||
834 | /*****************************************************************************/ | ||
835 | /* | ||
836 | * send an abort message at call or connection level | ||
837 | * - must be called with call->lock held | ||
838 | * - the supplied error code is sent as the packet data | ||
839 | */ | ||
840 | static int __rxrpc_call_abort(struct rxrpc_call *call, int errno) | ||
841 | { | ||
842 | struct rxrpc_connection *conn = call->conn; | ||
843 | struct rxrpc_message *msg; | ||
844 | struct kvec diov[1]; | ||
845 | int ret; | ||
846 | __be32 _error; | ||
847 | |||
848 | _enter("%p{%08x},%p{%d},%d", | ||
849 | conn, ntohl(conn->conn_id), call, ntohl(call->call_id), errno); | ||
850 | |||
851 | /* if this call is already aborted, then just wake up any waiters */ | ||
852 | if (call->app_call_state == RXRPC_CSTATE_ERROR) { | ||
853 | spin_unlock(&call->lock); | ||
854 | call->app_error_func(call); | ||
855 | _leave(" = 0"); | ||
856 | return 0; | ||
857 | } | ||
858 | |||
859 | rxrpc_get_call(call); | ||
860 | |||
861 | /* change the state _with_ the lock still held */ | ||
862 | call->app_call_state = RXRPC_CSTATE_ERROR; | ||
863 | call->app_err_state = RXRPC_ESTATE_LOCAL_ABORT; | ||
864 | call->app_errno = errno; | ||
865 | call->app_mark = RXRPC_APP_MARK_EOF; | ||
866 | call->app_read_buf = NULL; | ||
867 | call->app_async_read = 0; | ||
868 | |||
869 | _state(call); | ||
870 | |||
871 | /* ask the app to translate the error code */ | ||
872 | call->app_aemap_func(call); | ||
873 | |||
874 | spin_unlock(&call->lock); | ||
875 | |||
876 | /* flush any outstanding ACKs */ | ||
877 | del_timer_sync(&call->acks_timeout); | ||
878 | del_timer_sync(&call->rcv_timeout); | ||
879 | del_timer_sync(&call->ackr_dfr_timo); | ||
880 | |||
881 | if (rxrpc_call_is_ack_pending(call)) | ||
882 | __rxrpc_call_gen_normal_ACK(call, 0); | ||
883 | |||
884 | /* send the abort packet only if we actually traded some other | ||
885 | * packets */ | ||
886 | ret = 0; | ||
887 | if (call->pkt_snd_count || call->pkt_rcv_count) { | ||
888 | /* actually send the abort */ | ||
889 | _proto("Rx Sending Call ABORT { data=%d }", | ||
890 | call->app_abort_code); | ||
891 | |||
892 | _error = htonl(call->app_abort_code); | ||
893 | |||
894 | diov[0].iov_len = sizeof(_error); | ||
895 | diov[0].iov_base = &_error; | ||
896 | |||
897 | ret = rxrpc_conn_newmsg(conn, call, RXRPC_PACKET_TYPE_ABORT, | ||
898 | 1, diov, GFP_KERNEL, &msg); | ||
899 | if (ret == 0) { | ||
900 | ret = rxrpc_conn_sendmsg(conn, msg); | ||
901 | rxrpc_put_message(msg); | ||
902 | } | ||
903 | } | ||
904 | |||
905 | /* tell the app layer to let go */ | ||
906 | call->app_error_func(call); | ||
907 | |||
908 | rxrpc_put_call(call); | ||
909 | |||
910 | _leave(" = %d", ret); | ||
911 | return ret; | ||
912 | } /* end __rxrpc_call_abort() */ | ||
913 | |||
914 | /*****************************************************************************/ | ||
915 | /* | ||
916 | * send an abort message at call or connection level | ||
917 | * - the supplied error code is sent as the packet data | ||
918 | */ | ||
919 | int rxrpc_call_abort(struct rxrpc_call *call, int error) | ||
920 | { | ||
921 | spin_lock(&call->lock); | ||
922 | |||
923 | return __rxrpc_call_abort(call, error); | ||
924 | |||
925 | } /* end rxrpc_call_abort() */ | ||
926 | |||
927 | /*****************************************************************************/ | ||
928 | /* | ||
929 | * process packets waiting for this call | ||
930 | */ | ||
931 | static void rxrpc_call_receive_packet(struct rxrpc_call *call) | ||
932 | { | ||
933 | struct rxrpc_message *msg; | ||
934 | struct list_head *_p; | ||
935 | |||
936 | _enter("%p", call); | ||
937 | |||
938 | rxrpc_get_call(call); /* must not go away too soon if aborted by | ||
939 | * app-layer */ | ||
940 | |||
941 | while (!list_empty(&call->rcv_receiveq)) { | ||
942 | /* try to get next packet */ | ||
943 | _p = NULL; | ||
944 | spin_lock(&call->lock); | ||
945 | if (!list_empty(&call->rcv_receiveq)) { | ||
946 | _p = call->rcv_receiveq.next; | ||
947 | list_del_init(_p); | ||
948 | } | ||
949 | spin_unlock(&call->lock); | ||
950 | |||
951 | if (!_p) | ||
952 | break; | ||
953 | |||
954 | msg = list_entry(_p, struct rxrpc_message, link); | ||
955 | |||
956 | _proto("Rx %05lu Received %s packet (%%%u,#%u,%c%c%c%c%c)", | ||
957 | jiffies - call->cjif, | ||
958 | rxrpc_pkts[msg->hdr.type], | ||
959 | ntohl(msg->hdr.serial), | ||
960 | msg->seq, | ||
961 | msg->hdr.flags & RXRPC_JUMBO_PACKET ? 'j' : '-', | ||
962 | msg->hdr.flags & RXRPC_MORE_PACKETS ? 'm' : '-', | ||
963 | msg->hdr.flags & RXRPC_LAST_PACKET ? 'l' : '-', | ||
964 | msg->hdr.flags & RXRPC_REQUEST_ACK ? 'r' : '-', | ||
965 | msg->hdr.flags & RXRPC_CLIENT_INITIATED ? 'C' : 'S' | ||
966 | ); | ||
967 | |||
968 | switch (msg->hdr.type) { | ||
969 | /* deal with data packets */ | ||
970 | case RXRPC_PACKET_TYPE_DATA: | ||
971 | /* ACK the packet if necessary */ | ||
972 | switch (rxrpc_call_generate_ACK(call, &msg->hdr, | ||
973 | NULL)) { | ||
974 | case 0: /* useful packet */ | ||
975 | rxrpc_call_receive_data_packet(call, msg); | ||
976 | break; | ||
977 | case 1: /* duplicate or out-of-window packet */ | ||
978 | break; | ||
979 | default: | ||
980 | rxrpc_put_message(msg); | ||
981 | goto out; | ||
982 | } | ||
983 | break; | ||
984 | |||
985 | /* deal with ACK packets */ | ||
986 | case RXRPC_PACKET_TYPE_ACK: | ||
987 | rxrpc_call_receive_ack_packet(call, msg); | ||
988 | break; | ||
989 | |||
990 | /* deal with abort packets */ | ||
991 | case RXRPC_PACKET_TYPE_ABORT: { | ||
992 | __be32 _dbuf, *dp; | ||
993 | |||
994 | dp = skb_header_pointer(msg->pkt, msg->offset, | ||
995 | sizeof(_dbuf), &_dbuf); | ||
996 | if (dp == NULL) | ||
997 | printk("Rx Received short ABORT packet\n"); | ||
998 | |||
999 | _proto("Rx Received Call ABORT { data=%d }", | ||
1000 | (dp ? ntohl(*dp) : 0)); | ||
1001 | |||
1002 | spin_lock(&call->lock); | ||
1003 | call->app_call_state = RXRPC_CSTATE_ERROR; | ||
1004 | call->app_err_state = RXRPC_ESTATE_PEER_ABORT; | ||
1005 | call->app_abort_code = (dp ? ntohl(*dp) : 0); | ||
1006 | call->app_errno = -ECONNABORTED; | ||
1007 | call->app_mark = RXRPC_APP_MARK_EOF; | ||
1008 | call->app_read_buf = NULL; | ||
1009 | call->app_async_read = 0; | ||
1010 | |||
1011 | /* ask the app to translate the error code */ | ||
1012 | call->app_aemap_func(call); | ||
1013 | _state(call); | ||
1014 | spin_unlock(&call->lock); | ||
1015 | call->app_error_func(call); | ||
1016 | break; | ||
1017 | } | ||
1018 | default: | ||
1019 | /* deal with other packet types */ | ||
1020 | _proto("Rx Unsupported packet type %u (#%u)", | ||
1021 | msg->hdr.type, msg->seq); | ||
1022 | break; | ||
1023 | } | ||
1024 | |||
1025 | rxrpc_put_message(msg); | ||
1026 | } | ||
1027 | |||
1028 | out: | ||
1029 | rxrpc_put_call(call); | ||
1030 | _leave(""); | ||
1031 | } /* end rxrpc_call_receive_packet() */ | ||
1032 | |||
1033 | /*****************************************************************************/ | ||
1034 | /* | ||
1035 | * process next data packet | ||
1036 | * - as the next data packet arrives: | ||
1037 | * - it is queued on app_readyq _if_ it is the next one expected | ||
1038 | * (app_ready_seq+1) | ||
1039 | * - it is queued on app_unreadyq _if_ it is not the next one expected | ||
1040 | * - if a packet placed on app_readyq completely fills a hole leading up to | ||
1041 | * the first packet on app_unreadyq, then packets now in sequence are | ||
1042 | * tranferred to app_readyq | ||
1043 | * - the application layer can only see packets on app_readyq | ||
1044 | * (app_ready_qty bytes) | ||
1045 | * - the application layer is prodded every time a new packet arrives | ||
1046 | */ | ||
1047 | static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, | ||
1048 | struct rxrpc_message *msg) | ||
1049 | { | ||
1050 | const struct rxrpc_operation *optbl, *op; | ||
1051 | struct rxrpc_message *pmsg; | ||
1052 | struct list_head *_p; | ||
1053 | int ret, lo, hi, rmtimo; | ||
1054 | __be32 opid; | ||
1055 | |||
1056 | _enter("%p{%u},%p{%u}", call, ntohl(call->call_id), msg, msg->seq); | ||
1057 | |||
1058 | rxrpc_get_message(msg); | ||
1059 | |||
1060 | /* add to the unready queue if we'd have to create a hole in the ready | ||
1061 | * queue otherwise */ | ||
1062 | if (msg->seq != call->app_ready_seq + 1) { | ||
1063 | _debug("Call add packet %d to unreadyq", msg->seq); | ||
1064 | |||
1065 | /* insert in seq order */ | ||
1066 | list_for_each(_p, &call->app_unreadyq) { | ||
1067 | pmsg = list_entry(_p, struct rxrpc_message, link); | ||
1068 | if (pmsg->seq > msg->seq) | ||
1069 | break; | ||
1070 | } | ||
1071 | |||
1072 | list_add_tail(&msg->link, _p); | ||
1073 | |||
1074 | _leave(" [unreadyq]"); | ||
1075 | return; | ||
1076 | } | ||
1077 | |||
1078 | /* next in sequence - simply append into the call's ready queue */ | ||
1079 | _debug("Call add packet %d to readyq (+%Zd => %Zd bytes)", | ||
1080 | msg->seq, msg->dsize, call->app_ready_qty); | ||
1081 | |||
1082 | spin_lock(&call->lock); | ||
1083 | call->app_ready_seq = msg->seq; | ||
1084 | call->app_ready_qty += msg->dsize; | ||
1085 | list_add_tail(&msg->link, &call->app_readyq); | ||
1086 | |||
1087 | /* move unready packets to the readyq if we got rid of a hole */ | ||
1088 | while (!list_empty(&call->app_unreadyq)) { | ||
1089 | pmsg = list_entry(call->app_unreadyq.next, | ||
1090 | struct rxrpc_message, link); | ||
1091 | |||
1092 | if (pmsg->seq != call->app_ready_seq + 1) | ||
1093 | break; | ||
1094 | |||
1095 | /* next in sequence - just move list-to-list */ | ||
1096 | _debug("Call transfer packet %d to readyq (+%Zd => %Zd bytes)", | ||
1097 | pmsg->seq, pmsg->dsize, call->app_ready_qty); | ||
1098 | |||
1099 | call->app_ready_seq = pmsg->seq; | ||
1100 | call->app_ready_qty += pmsg->dsize; | ||
1101 | list_move_tail(&pmsg->link, &call->app_readyq); | ||
1102 | } | ||
1103 | |||
1104 | /* see if we've got the last packet yet */ | ||
1105 | if (!list_empty(&call->app_readyq)) { | ||
1106 | pmsg = list_entry(call->app_readyq.prev, | ||
1107 | struct rxrpc_message, link); | ||
1108 | if (pmsg->hdr.flags & RXRPC_LAST_PACKET) { | ||
1109 | call->app_last_rcv = 1; | ||
1110 | _debug("Last packet on readyq"); | ||
1111 | } | ||
1112 | } | ||
1113 | |||
1114 | switch (call->app_call_state) { | ||
1115 | /* do nothing if call already aborted */ | ||
1116 | case RXRPC_CSTATE_ERROR: | ||
1117 | spin_unlock(&call->lock); | ||
1118 | _leave(" [error]"); | ||
1119 | return; | ||
1120 | |||
1121 | /* extract the operation ID from an incoming call if that's not | ||
1122 | * yet been done */ | ||
1123 | case RXRPC_CSTATE_SRVR_RCV_OPID: | ||
1124 | spin_unlock(&call->lock); | ||
1125 | |||
1126 | /* handle as yet insufficient data for the operation ID */ | ||
1127 | if (call->app_ready_qty < 4) { | ||
1128 | if (call->app_last_rcv) | ||
1129 | /* trouble - last packet seen */ | ||
1130 | rxrpc_call_abort(call, -EINVAL); | ||
1131 | |||
1132 | _leave(""); | ||
1133 | return; | ||
1134 | } | ||
1135 | |||
1136 | /* pull the operation ID out of the buffer */ | ||
1137 | ret = rxrpc_call_read_data(call, &opid, sizeof(opid), 0); | ||
1138 | if (ret < 0) { | ||
1139 | printk("Unexpected error from read-data: %d\n", ret); | ||
1140 | if (call->app_call_state != RXRPC_CSTATE_ERROR) | ||
1141 | rxrpc_call_abort(call, ret); | ||
1142 | _leave(""); | ||
1143 | return; | ||
1144 | } | ||
1145 | call->app_opcode = ntohl(opid); | ||
1146 | |||
1147 | /* locate the operation in the available ops table */ | ||
1148 | optbl = call->conn->service->ops_begin; | ||
1149 | lo = 0; | ||
1150 | hi = call->conn->service->ops_end - optbl; | ||
1151 | |||
1152 | while (lo < hi) { | ||
1153 | int mid = (hi + lo) / 2; | ||
1154 | op = &optbl[mid]; | ||
1155 | if (call->app_opcode == op->id) | ||
1156 | goto found_op; | ||
1157 | if (call->app_opcode > op->id) | ||
1158 | lo = mid + 1; | ||
1159 | else | ||
1160 | hi = mid; | ||
1161 | } | ||
1162 | |||
1163 | /* search failed */ | ||
1164 | kproto("Rx Client requested operation %d from %s service", | ||
1165 | call->app_opcode, call->conn->service->name); | ||
1166 | rxrpc_call_abort(call, -EINVAL); | ||
1167 | _leave(" [inval]"); | ||
1168 | return; | ||
1169 | |||
1170 | found_op: | ||
1171 | _proto("Rx Client requested operation %s from %s service", | ||
1172 | op->name, call->conn->service->name); | ||
1173 | |||
1174 | /* we're now waiting for the argument block (unless the call | ||
1175 | * was aborted) */ | ||
1176 | spin_lock(&call->lock); | ||
1177 | if (call->app_call_state == RXRPC_CSTATE_SRVR_RCV_OPID || | ||
1178 | call->app_call_state == RXRPC_CSTATE_SRVR_SND_REPLY) { | ||
1179 | if (!call->app_last_rcv) | ||
1180 | call->app_call_state = | ||
1181 | RXRPC_CSTATE_SRVR_RCV_ARGS; | ||
1182 | else if (call->app_ready_qty > 0) | ||
1183 | call->app_call_state = | ||
1184 | RXRPC_CSTATE_SRVR_GOT_ARGS; | ||
1185 | else | ||
1186 | call->app_call_state = | ||
1187 | RXRPC_CSTATE_SRVR_SND_REPLY; | ||
1188 | call->app_mark = op->asize; | ||
1189 | call->app_user = op->user; | ||
1190 | } | ||
1191 | spin_unlock(&call->lock); | ||
1192 | |||
1193 | _state(call); | ||
1194 | break; | ||
1195 | |||
1196 | case RXRPC_CSTATE_SRVR_RCV_ARGS: | ||
1197 | /* change state if just received last packet of arg block */ | ||
1198 | if (call->app_last_rcv) | ||
1199 | call->app_call_state = RXRPC_CSTATE_SRVR_GOT_ARGS; | ||
1200 | spin_unlock(&call->lock); | ||
1201 | |||
1202 | _state(call); | ||
1203 | break; | ||
1204 | |||
1205 | case RXRPC_CSTATE_CLNT_RCV_REPLY: | ||
1206 | /* change state if just received last packet of reply block */ | ||
1207 | rmtimo = 0; | ||
1208 | if (call->app_last_rcv) { | ||
1209 | call->app_call_state = RXRPC_CSTATE_CLNT_GOT_REPLY; | ||
1210 | rmtimo = 1; | ||
1211 | } | ||
1212 | spin_unlock(&call->lock); | ||
1213 | |||
1214 | if (rmtimo) { | ||
1215 | del_timer_sync(&call->acks_timeout); | ||
1216 | del_timer_sync(&call->rcv_timeout); | ||
1217 | del_timer_sync(&call->ackr_dfr_timo); | ||
1218 | } | ||
1219 | |||
1220 | _state(call); | ||
1221 | break; | ||
1222 | |||
1223 | default: | ||
1224 | /* deal with data reception in an unexpected state */ | ||
1225 | printk("Unexpected state [[[ %u ]]]\n", call->app_call_state); | ||
1226 | __rxrpc_call_abort(call, -EBADMSG); | ||
1227 | _leave(""); | ||
1228 | return; | ||
1229 | } | ||
1230 | |||
1231 | if (call->app_call_state == RXRPC_CSTATE_CLNT_RCV_REPLY && | ||
1232 | call->app_last_rcv) | ||
1233 | BUG(); | ||
1234 | |||
1235 | /* otherwise just invoke the data function whenever we can satisfy its desire for more | ||
1236 | * data | ||
1237 | */ | ||
1238 | _proto("Rx Received Op Data: st=%u qty=%Zu mk=%Zu%s", | ||
1239 | call->app_call_state, call->app_ready_qty, call->app_mark, | ||
1240 | call->app_last_rcv ? " last-rcvd" : ""); | ||
1241 | |||
1242 | spin_lock(&call->lock); | ||
1243 | |||
1244 | ret = __rxrpc_call_read_data(call); | ||
1245 | switch (ret) { | ||
1246 | case 0: | ||
1247 | spin_unlock(&call->lock); | ||
1248 | call->app_attn_func(call); | ||
1249 | break; | ||
1250 | case -EAGAIN: | ||
1251 | spin_unlock(&call->lock); | ||
1252 | break; | ||
1253 | case -ECONNABORTED: | ||
1254 | spin_unlock(&call->lock); | ||
1255 | break; | ||
1256 | default: | ||
1257 | __rxrpc_call_abort(call, ret); | ||
1258 | break; | ||
1259 | } | ||
1260 | |||
1261 | _state(call); | ||
1262 | |||
1263 | _leave(""); | ||
1264 | |||
1265 | } /* end rxrpc_call_receive_data_packet() */ | ||
1266 | |||
1267 | /*****************************************************************************/ | ||
1268 | /* | ||
1269 | * received an ACK packet | ||
1270 | */ | ||
1271 | static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, | ||
1272 | struct rxrpc_message *msg) | ||
1273 | { | ||
1274 | struct rxrpc_ackpacket _ack, *ap; | ||
1275 | rxrpc_serial_net_t serial; | ||
1276 | rxrpc_seq_t seq; | ||
1277 | int ret; | ||
1278 | |||
1279 | _enter("%p{%u},%p{%u}", call, ntohl(call->call_id), msg, msg->seq); | ||
1280 | |||
1281 | /* extract the basic ACK record */ | ||
1282 | ap = skb_header_pointer(msg->pkt, msg->offset, sizeof(_ack), &_ack); | ||
1283 | if (ap == NULL) { | ||
1284 | printk("Rx Received short ACK packet\n"); | ||
1285 | return; | ||
1286 | } | ||
1287 | msg->offset += sizeof(_ack); | ||
1288 | |||
1289 | serial = ap->serial; | ||
1290 | seq = ntohl(ap->firstPacket); | ||
1291 | |||
1292 | _proto("Rx Received ACK %%%d { b=%hu m=%hu f=%u p=%u s=%u r=%s n=%u }", | ||
1293 | ntohl(msg->hdr.serial), | ||
1294 | ntohs(ap->bufferSpace), | ||
1295 | ntohs(ap->maxSkew), | ||
1296 | seq, | ||
1297 | ntohl(ap->previousPacket), | ||
1298 | ntohl(serial), | ||
1299 | rxrpc_acks[ap->reason], | ||
1300 | call->ackr.nAcks | ||
1301 | ); | ||
1302 | |||
1303 | /* check the other side isn't ACK'ing a sequence number I haven't sent | ||
1304 | * yet */ | ||
1305 | if (ap->nAcks > 0 && | ||
1306 | (seq > call->snd_seq_count || | ||
1307 | seq + ap->nAcks - 1 > call->snd_seq_count)) { | ||
1308 | printk("Received ACK (#%u-#%u) for unsent packet\n", | ||
1309 | seq, seq + ap->nAcks - 1); | ||
1310 | rxrpc_call_abort(call, -EINVAL); | ||
1311 | _leave(""); | ||
1312 | return; | ||
1313 | } | ||
1314 | |||
1315 | /* deal with RTT calculation */ | ||
1316 | if (serial) { | ||
1317 | struct rxrpc_message *rttmsg; | ||
1318 | |||
1319 | /* find the prompting packet */ | ||
1320 | spin_lock(&call->lock); | ||
1321 | if (call->snd_ping && call->snd_ping->hdr.serial == serial) { | ||
1322 | /* it was a ping packet */ | ||
1323 | rttmsg = call->snd_ping; | ||
1324 | call->snd_ping = NULL; | ||
1325 | spin_unlock(&call->lock); | ||
1326 | |||
1327 | if (rttmsg) { | ||
1328 | rttmsg->rttdone = 1; | ||
1329 | rxrpc_peer_calculate_rtt(call->conn->peer, | ||
1330 | rttmsg, msg); | ||
1331 | rxrpc_put_message(rttmsg); | ||
1332 | } | ||
1333 | } | ||
1334 | else { | ||
1335 | struct list_head *_p; | ||
1336 | |||
1337 | /* it ought to be a data packet - look in the pending | ||
1338 | * ACK list */ | ||
1339 | list_for_each(_p, &call->acks_pendq) { | ||
1340 | rttmsg = list_entry(_p, struct rxrpc_message, | ||
1341 | link); | ||
1342 | if (rttmsg->hdr.serial == serial) { | ||
1343 | if (rttmsg->rttdone) | ||
1344 | /* never do RTT twice without | ||
1345 | * resending */ | ||
1346 | break; | ||
1347 | |||
1348 | rttmsg->rttdone = 1; | ||
1349 | rxrpc_peer_calculate_rtt( | ||
1350 | call->conn->peer, rttmsg, msg); | ||
1351 | break; | ||
1352 | } | ||
1353 | } | ||
1354 | spin_unlock(&call->lock); | ||
1355 | } | ||
1356 | } | ||
1357 | |||
1358 | switch (ap->reason) { | ||
1359 | /* deal with negative/positive acknowledgement of data | ||
1360 | * packets */ | ||
1361 | case RXRPC_ACK_REQUESTED: | ||
1362 | case RXRPC_ACK_DELAY: | ||
1363 | case RXRPC_ACK_IDLE: | ||
1364 | rxrpc_call_definitively_ACK(call, seq - 1); | ||
1365 | |||
1366 | case RXRPC_ACK_DUPLICATE: | ||
1367 | case RXRPC_ACK_OUT_OF_SEQUENCE: | ||
1368 | case RXRPC_ACK_EXCEEDS_WINDOW: | ||
1369 | call->snd_resend_cnt = 0; | ||
1370 | ret = rxrpc_call_record_ACK(call, msg, seq, ap->nAcks); | ||
1371 | if (ret < 0) | ||
1372 | rxrpc_call_abort(call, ret); | ||
1373 | break; | ||
1374 | |||
1375 | /* respond to ping packets immediately */ | ||
1376 | case RXRPC_ACK_PING: | ||
1377 | rxrpc_call_generate_ACK(call, &msg->hdr, ap); | ||
1378 | break; | ||
1379 | |||
1380 | /* only record RTT on ping response packets */ | ||
1381 | case RXRPC_ACK_PING_RESPONSE: | ||
1382 | if (call->snd_ping) { | ||
1383 | struct rxrpc_message *rttmsg; | ||
1384 | |||
1385 | /* only do RTT stuff if the response matches the | ||
1386 | * retained ping */ | ||
1387 | rttmsg = NULL; | ||
1388 | spin_lock(&call->lock); | ||
1389 | if (call->snd_ping && | ||
1390 | call->snd_ping->hdr.serial == ap->serial) { | ||
1391 | rttmsg = call->snd_ping; | ||
1392 | call->snd_ping = NULL; | ||
1393 | } | ||
1394 | spin_unlock(&call->lock); | ||
1395 | |||
1396 | if (rttmsg) { | ||
1397 | rttmsg->rttdone = 1; | ||
1398 | rxrpc_peer_calculate_rtt(call->conn->peer, | ||
1399 | rttmsg, msg); | ||
1400 | rxrpc_put_message(rttmsg); | ||
1401 | } | ||
1402 | } | ||
1403 | break; | ||
1404 | |||
1405 | default: | ||
1406 | printk("Unsupported ACK reason %u\n", ap->reason); | ||
1407 | break; | ||
1408 | } | ||
1409 | |||
1410 | _leave(""); | ||
1411 | } /* end rxrpc_call_receive_ack_packet() */ | ||
1412 | |||
1413 | /*****************************************************************************/ | ||
1414 | /* | ||
1415 | * record definitive ACKs for all messages up to and including the one with the | ||
1416 | * 'highest' seq | ||
1417 | */ | ||
1418 | static void rxrpc_call_definitively_ACK(struct rxrpc_call *call, | ||
1419 | rxrpc_seq_t highest) | ||
1420 | { | ||
1421 | struct rxrpc_message *msg; | ||
1422 | int now_complete; | ||
1423 | |||
1424 | _enter("%p{ads=%u},%u", call, call->acks_dftv_seq, highest); | ||
1425 | |||
1426 | while (call->acks_dftv_seq < highest) { | ||
1427 | call->acks_dftv_seq++; | ||
1428 | |||
1429 | _proto("Definitive ACK on packet #%u", call->acks_dftv_seq); | ||
1430 | |||
1431 | /* discard those at front of queue until message with highest | ||
1432 | * ACK is found */ | ||
1433 | spin_lock(&call->lock); | ||
1434 | msg = NULL; | ||
1435 | if (!list_empty(&call->acks_pendq)) { | ||
1436 | msg = list_entry(call->acks_pendq.next, | ||
1437 | struct rxrpc_message, link); | ||
1438 | list_del_init(&msg->link); /* dequeue */ | ||
1439 | if (msg->state == RXRPC_MSG_SENT) | ||
1440 | call->acks_pend_cnt--; | ||
1441 | } | ||
1442 | spin_unlock(&call->lock); | ||
1443 | |||
1444 | /* insanity check */ | ||
1445 | if (!msg) | ||
1446 | panic("%s(): acks_pendq unexpectedly empty\n", | ||
1447 | __FUNCTION__); | ||
1448 | |||
1449 | if (msg->seq != call->acks_dftv_seq) | ||
1450 | panic("%s(): Packet #%u expected at front of acks_pendq" | ||
1451 | " (#%u found)\n", | ||
1452 | __FUNCTION__, call->acks_dftv_seq, msg->seq); | ||
1453 | |||
1454 | /* discard the message */ | ||
1455 | msg->state = RXRPC_MSG_DONE; | ||
1456 | rxrpc_put_message(msg); | ||
1457 | } | ||
1458 | |||
1459 | /* if all sent packets are definitively ACK'd then prod any sleepers just in case */ | ||
1460 | now_complete = 0; | ||
1461 | spin_lock(&call->lock); | ||
1462 | if (call->acks_dftv_seq == call->snd_seq_count) { | ||
1463 | if (call->app_call_state != RXRPC_CSTATE_COMPLETE) { | ||
1464 | call->app_call_state = RXRPC_CSTATE_COMPLETE; | ||
1465 | _state(call); | ||
1466 | now_complete = 1; | ||
1467 | } | ||
1468 | } | ||
1469 | spin_unlock(&call->lock); | ||
1470 | |||
1471 | if (now_complete) { | ||
1472 | del_timer_sync(&call->acks_timeout); | ||
1473 | del_timer_sync(&call->rcv_timeout); | ||
1474 | del_timer_sync(&call->ackr_dfr_timo); | ||
1475 | call->app_attn_func(call); | ||
1476 | } | ||
1477 | |||
1478 | _leave(""); | ||
1479 | } /* end rxrpc_call_definitively_ACK() */ | ||
1480 | |||
1481 | /*****************************************************************************/ | ||
1482 | /* | ||
1483 | * record the specified amount of ACKs/NAKs | ||
1484 | */ | ||
1485 | static int rxrpc_call_record_ACK(struct rxrpc_call *call, | ||
1486 | struct rxrpc_message *msg, | ||
1487 | rxrpc_seq_t seq, | ||
1488 | size_t count) | ||
1489 | { | ||
1490 | struct rxrpc_message *dmsg; | ||
1491 | struct list_head *_p; | ||
1492 | rxrpc_seq_t highest; | ||
1493 | unsigned ix; | ||
1494 | size_t chunk; | ||
1495 | char resend, now_complete; | ||
1496 | u8 acks[16]; | ||
1497 | |||
1498 | _enter("%p{apc=%u ads=%u},%p,%u,%Zu", | ||
1499 | call, call->acks_pend_cnt, call->acks_dftv_seq, | ||
1500 | msg, seq, count); | ||
1501 | |||
1502 | /* handle re-ACK'ing of definitively ACK'd packets (may be out-of-order | ||
1503 | * ACKs) */ | ||
1504 | if (seq <= call->acks_dftv_seq) { | ||
1505 | unsigned delta = call->acks_dftv_seq - seq; | ||
1506 | |||
1507 | if (count <= delta) { | ||
1508 | _leave(" = 0 [all definitively ACK'd]"); | ||
1509 | return 0; | ||
1510 | } | ||
1511 | |||
1512 | seq += delta; | ||
1513 | count -= delta; | ||
1514 | msg->offset += delta; | ||
1515 | } | ||
1516 | |||
1517 | highest = seq + count - 1; | ||
1518 | resend = 0; | ||
1519 | while (count > 0) { | ||
1520 | /* extract up to 16 ACK slots at a time */ | ||
1521 | chunk = min(count, sizeof(acks)); | ||
1522 | count -= chunk; | ||
1523 | |||
1524 | memset(acks, 2, sizeof(acks)); | ||
1525 | |||
1526 | if (skb_copy_bits(msg->pkt, msg->offset, &acks, chunk) < 0) { | ||
1527 | printk("Rx Received short ACK packet\n"); | ||
1528 | _leave(" = -EINVAL"); | ||
1529 | return -EINVAL; | ||
1530 | } | ||
1531 | msg->offset += chunk; | ||
1532 | |||
1533 | /* check that the ACK set is valid */ | ||
1534 | for (ix = 0; ix < chunk; ix++) { | ||
1535 | switch (acks[ix]) { | ||
1536 | case RXRPC_ACK_TYPE_ACK: | ||
1537 | break; | ||
1538 | case RXRPC_ACK_TYPE_NACK: | ||
1539 | resend = 1; | ||
1540 | break; | ||
1541 | default: | ||
1542 | printk("Rx Received unsupported ACK state" | ||
1543 | " %u\n", acks[ix]); | ||
1544 | _leave(" = -EINVAL"); | ||
1545 | return -EINVAL; | ||
1546 | } | ||
1547 | } | ||
1548 | |||
1549 | _proto("Rx ACK of packets #%u-#%u " | ||
1550 | "[%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c] (pend=%u)", | ||
1551 | seq, (unsigned) (seq + chunk - 1), | ||
1552 | _acktype[acks[0x0]], | ||
1553 | _acktype[acks[0x1]], | ||
1554 | _acktype[acks[0x2]], | ||
1555 | _acktype[acks[0x3]], | ||
1556 | _acktype[acks[0x4]], | ||
1557 | _acktype[acks[0x5]], | ||
1558 | _acktype[acks[0x6]], | ||
1559 | _acktype[acks[0x7]], | ||
1560 | _acktype[acks[0x8]], | ||
1561 | _acktype[acks[0x9]], | ||
1562 | _acktype[acks[0xA]], | ||
1563 | _acktype[acks[0xB]], | ||
1564 | _acktype[acks[0xC]], | ||
1565 | _acktype[acks[0xD]], | ||
1566 | _acktype[acks[0xE]], | ||
1567 | _acktype[acks[0xF]], | ||
1568 | call->acks_pend_cnt | ||
1569 | ); | ||
1570 | |||
1571 | /* mark the packets in the ACK queue as being provisionally | ||
1572 | * ACK'd */ | ||
1573 | ix = 0; | ||
1574 | spin_lock(&call->lock); | ||
1575 | |||
1576 | /* find the first packet ACK'd/NAK'd here */ | ||
1577 | list_for_each(_p, &call->acks_pendq) { | ||
1578 | dmsg = list_entry(_p, struct rxrpc_message, link); | ||
1579 | if (dmsg->seq == seq) | ||
1580 | goto found_first; | ||
1581 | _debug("- %u: skipping #%u", ix, dmsg->seq); | ||
1582 | } | ||
1583 | goto bad_queue; | ||
1584 | |||
1585 | found_first: | ||
1586 | do { | ||
1587 | _debug("- %u: processing #%u (%c) apc=%u", | ||
1588 | ix, dmsg->seq, _acktype[acks[ix]], | ||
1589 | call->acks_pend_cnt); | ||
1590 | |||
1591 | if (acks[ix] == RXRPC_ACK_TYPE_ACK) { | ||
1592 | if (dmsg->state == RXRPC_MSG_SENT) | ||
1593 | call->acks_pend_cnt--; | ||
1594 | dmsg->state = RXRPC_MSG_ACKED; | ||
1595 | } | ||
1596 | else { | ||
1597 | if (dmsg->state == RXRPC_MSG_ACKED) | ||
1598 | call->acks_pend_cnt++; | ||
1599 | dmsg->state = RXRPC_MSG_SENT; | ||
1600 | } | ||
1601 | ix++; | ||
1602 | seq++; | ||
1603 | |||
1604 | _p = dmsg->link.next; | ||
1605 | dmsg = list_entry(_p, struct rxrpc_message, link); | ||
1606 | } while(ix < chunk && | ||
1607 | _p != &call->acks_pendq && | ||
1608 | dmsg->seq == seq); | ||
1609 | |||
1610 | if (ix < chunk) | ||
1611 | goto bad_queue; | ||
1612 | |||
1613 | spin_unlock(&call->lock); | ||
1614 | } | ||
1615 | |||
1616 | if (resend) | ||
1617 | rxrpc_call_resend(call, highest); | ||
1618 | |||
1619 | /* if all packets are provisionally ACK'd, then wake up anyone who's | ||
1620 | * waiting for that */ | ||
1621 | now_complete = 0; | ||
1622 | spin_lock(&call->lock); | ||
1623 | if (call->acks_pend_cnt == 0) { | ||
1624 | if (call->app_call_state == RXRPC_CSTATE_SRVR_RCV_FINAL_ACK) { | ||
1625 | call->app_call_state = RXRPC_CSTATE_COMPLETE; | ||
1626 | _state(call); | ||
1627 | } | ||
1628 | now_complete = 1; | ||
1629 | } | ||
1630 | spin_unlock(&call->lock); | ||
1631 | |||
1632 | if (now_complete) { | ||
1633 | _debug("- wake up waiters"); | ||
1634 | del_timer_sync(&call->acks_timeout); | ||
1635 | del_timer_sync(&call->rcv_timeout); | ||
1636 | del_timer_sync(&call->ackr_dfr_timo); | ||
1637 | call->app_attn_func(call); | ||
1638 | } | ||
1639 | |||
1640 | _leave(" = 0 (apc=%u)", call->acks_pend_cnt); | ||
1641 | return 0; | ||
1642 | |||
1643 | bad_queue: | ||
1644 | panic("%s(): acks_pendq in bad state (packet #%u absent)\n", | ||
1645 | __FUNCTION__, seq); | ||
1646 | |||
1647 | } /* end rxrpc_call_record_ACK() */ | ||
1648 | |||
1649 | /*****************************************************************************/ | ||
1650 | /* | ||
1651 | * transfer data from the ready packet queue to the asynchronous read buffer | ||
1652 | * - since this func is the only one going to look at packets queued on | ||
1653 | * app_readyq, we don't need a lock to modify or access them, only to modify | ||
1654 | * the queue pointers | ||
1655 | * - called with call->lock held | ||
1656 | * - the buffer must be in kernel space | ||
1657 | * - returns: | ||
1658 | * 0 if buffer filled | ||
1659 | * -EAGAIN if buffer not filled and more data to come | ||
1660 | * -EBADMSG if last packet received and insufficient data left | ||
1661 | * -ECONNABORTED if the call has in an error state | ||
1662 | */ | ||
1663 | static int __rxrpc_call_read_data(struct rxrpc_call *call) | ||
1664 | { | ||
1665 | struct rxrpc_message *msg; | ||
1666 | size_t qty; | ||
1667 | int ret; | ||
1668 | |||
1669 | _enter("%p{as=%d buf=%p qty=%Zu/%Zu}", | ||
1670 | call, | ||
1671 | call->app_async_read, call->app_read_buf, | ||
1672 | call->app_ready_qty, call->app_mark); | ||
1673 | |||
1674 | /* check the state */ | ||
1675 | switch (call->app_call_state) { | ||
1676 | case RXRPC_CSTATE_SRVR_RCV_ARGS: | ||
1677 | case RXRPC_CSTATE_CLNT_RCV_REPLY: | ||
1678 | if (call->app_last_rcv) { | ||
1679 | printk("%s(%p,%p,%Zd):" | ||
1680 | " Inconsistent call state (%s, last pkt)", | ||
1681 | __FUNCTION__, | ||
1682 | call, call->app_read_buf, call->app_mark, | ||
1683 | rxrpc_call_states[call->app_call_state]); | ||
1684 | BUG(); | ||
1685 | } | ||
1686 | break; | ||
1687 | |||
1688 | case RXRPC_CSTATE_SRVR_RCV_OPID: | ||
1689 | case RXRPC_CSTATE_SRVR_GOT_ARGS: | ||
1690 | case RXRPC_CSTATE_CLNT_GOT_REPLY: | ||
1691 | break; | ||
1692 | |||
1693 | case RXRPC_CSTATE_SRVR_SND_REPLY: | ||
1694 | if (!call->app_last_rcv) { | ||
1695 | printk("%s(%p,%p,%Zd):" | ||
1696 | " Inconsistent call state (%s, not last pkt)", | ||
1697 | __FUNCTION__, | ||
1698 | call, call->app_read_buf, call->app_mark, | ||
1699 | rxrpc_call_states[call->app_call_state]); | ||
1700 | BUG(); | ||
1701 | } | ||
1702 | _debug("Trying to read data from call in SND_REPLY state"); | ||
1703 | break; | ||
1704 | |||
1705 | case RXRPC_CSTATE_ERROR: | ||
1706 | _leave(" = -ECONNABORTED"); | ||
1707 | return -ECONNABORTED; | ||
1708 | |||
1709 | default: | ||
1710 | printk("reading in unexpected state [[[ %u ]]]\n", | ||
1711 | call->app_call_state); | ||
1712 | BUG(); | ||
1713 | } | ||
1714 | |||
1715 | /* handle the case of not having an async buffer */ | ||
1716 | if (!call->app_async_read) { | ||
1717 | if (call->app_mark == RXRPC_APP_MARK_EOF) { | ||
1718 | ret = call->app_last_rcv ? 0 : -EAGAIN; | ||
1719 | } | ||
1720 | else { | ||
1721 | if (call->app_mark >= call->app_ready_qty) { | ||
1722 | call->app_mark = RXRPC_APP_MARK_EOF; | ||
1723 | ret = 0; | ||
1724 | } | ||
1725 | else { | ||
1726 | ret = call->app_last_rcv ? -EBADMSG : -EAGAIN; | ||
1727 | } | ||
1728 | } | ||
1729 | |||
1730 | _leave(" = %d [no buf]", ret); | ||
1731 | return 0; | ||
1732 | } | ||
1733 | |||
1734 | while (!list_empty(&call->app_readyq) && call->app_mark > 0) { | ||
1735 | msg = list_entry(call->app_readyq.next, | ||
1736 | struct rxrpc_message, link); | ||
1737 | |||
1738 | /* drag as much data as we need out of this packet */ | ||
1739 | qty = min(call->app_mark, msg->dsize); | ||
1740 | |||
1741 | _debug("reading %Zu from skb=%p off=%lu", | ||
1742 | qty, msg->pkt, msg->offset); | ||
1743 | |||
1744 | if (call->app_read_buf) | ||
1745 | if (skb_copy_bits(msg->pkt, msg->offset, | ||
1746 | call->app_read_buf, qty) < 0) | ||
1747 | panic("%s: Failed to copy data from packet:" | ||
1748 | " (%p,%p,%Zd)", | ||
1749 | __FUNCTION__, | ||
1750 | call, call->app_read_buf, qty); | ||
1751 | |||
1752 | /* if that packet is now empty, discard it */ | ||
1753 | call->app_ready_qty -= qty; | ||
1754 | msg->dsize -= qty; | ||
1755 | |||
1756 | if (msg->dsize == 0) { | ||
1757 | list_del_init(&msg->link); | ||
1758 | rxrpc_put_message(msg); | ||
1759 | } | ||
1760 | else { | ||
1761 | msg->offset += qty; | ||
1762 | } | ||
1763 | |||
1764 | call->app_mark -= qty; | ||
1765 | if (call->app_read_buf) | ||
1766 | call->app_read_buf += qty; | ||
1767 | } | ||
1768 | |||
1769 | if (call->app_mark == 0) { | ||
1770 | call->app_async_read = 0; | ||
1771 | call->app_mark = RXRPC_APP_MARK_EOF; | ||
1772 | call->app_read_buf = NULL; | ||
1773 | |||
1774 | /* adjust the state if used up all packets */ | ||
1775 | if (list_empty(&call->app_readyq) && call->app_last_rcv) { | ||
1776 | switch (call->app_call_state) { | ||
1777 | case RXRPC_CSTATE_SRVR_RCV_OPID: | ||
1778 | call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY; | ||
1779 | call->app_mark = RXRPC_APP_MARK_EOF; | ||
1780 | _state(call); | ||
1781 | del_timer_sync(&call->rcv_timeout); | ||
1782 | break; | ||
1783 | case RXRPC_CSTATE_SRVR_GOT_ARGS: | ||
1784 | call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY; | ||
1785 | _state(call); | ||
1786 | del_timer_sync(&call->rcv_timeout); | ||
1787 | break; | ||
1788 | default: | ||
1789 | call->app_call_state = RXRPC_CSTATE_COMPLETE; | ||
1790 | _state(call); | ||
1791 | del_timer_sync(&call->acks_timeout); | ||
1792 | del_timer_sync(&call->ackr_dfr_timo); | ||
1793 | del_timer_sync(&call->rcv_timeout); | ||
1794 | break; | ||
1795 | } | ||
1796 | } | ||
1797 | |||
1798 | _leave(" = 0"); | ||
1799 | return 0; | ||
1800 | } | ||
1801 | |||
1802 | if (call->app_last_rcv) { | ||
1803 | _debug("Insufficient data (%Zu/%Zu)", | ||
1804 | call->app_ready_qty, call->app_mark); | ||
1805 | call->app_async_read = 0; | ||
1806 | call->app_mark = RXRPC_APP_MARK_EOF; | ||
1807 | call->app_read_buf = NULL; | ||
1808 | |||
1809 | _leave(" = -EBADMSG"); | ||
1810 | return -EBADMSG; | ||
1811 | } | ||
1812 | |||
1813 | _leave(" = -EAGAIN"); | ||
1814 | return -EAGAIN; | ||
1815 | } /* end __rxrpc_call_read_data() */ | ||
1816 | |||
1817 | /*****************************************************************************/ | ||
1818 | /* | ||
1819 | * attempt to read the specified amount of data from the call's ready queue | ||
1820 | * into the buffer provided | ||
1821 | * - since this func is the only one going to look at packets queued on | ||
1822 | * app_readyq, we don't need a lock to modify or access them, only to modify | ||
1823 | * the queue pointers | ||
1824 | * - if the buffer pointer is NULL, then data is merely drained, not copied | ||
1825 | * - if flags&RXRPC_CALL_READ_BLOCK, then the function will wait until there is | ||
1826 | * enough data or an error will be generated | ||
1827 | * - note that the caller must have added the calling task to the call's wait | ||
1828 | * queue beforehand | ||
1829 | * - if flags&RXRPC_CALL_READ_ALL, then an error will be generated if this | ||
1830 | * function doesn't read all available data | ||
1831 | */ | ||
1832 | int rxrpc_call_read_data(struct rxrpc_call *call, | ||
1833 | void *buffer, size_t size, int flags) | ||
1834 | { | ||
1835 | int ret; | ||
1836 | |||
1837 | _enter("%p{arq=%Zu},%p,%Zd,%x", | ||
1838 | call, call->app_ready_qty, buffer, size, flags); | ||
1839 | |||
1840 | spin_lock(&call->lock); | ||
1841 | |||
1842 | if (unlikely(!!call->app_read_buf)) { | ||
1843 | spin_unlock(&call->lock); | ||
1844 | _leave(" = -EBUSY"); | ||
1845 | return -EBUSY; | ||
1846 | } | ||
1847 | |||
1848 | call->app_mark = size; | ||
1849 | call->app_read_buf = buffer; | ||
1850 | call->app_async_read = 1; | ||
1851 | call->app_read_count++; | ||
1852 | |||
1853 | /* read as much data as possible */ | ||
1854 | ret = __rxrpc_call_read_data(call); | ||
1855 | switch (ret) { | ||
1856 | case 0: | ||
1857 | if (flags & RXRPC_CALL_READ_ALL && | ||
1858 | (!call->app_last_rcv || call->app_ready_qty > 0)) { | ||
1859 | _leave(" = -EBADMSG"); | ||
1860 | __rxrpc_call_abort(call, -EBADMSG); | ||
1861 | return -EBADMSG; | ||
1862 | } | ||
1863 | |||
1864 | spin_unlock(&call->lock); | ||
1865 | call->app_attn_func(call); | ||
1866 | _leave(" = 0"); | ||
1867 | return ret; | ||
1868 | |||
1869 | case -ECONNABORTED: | ||
1870 | spin_unlock(&call->lock); | ||
1871 | _leave(" = %d [aborted]", ret); | ||
1872 | return ret; | ||
1873 | |||
1874 | default: | ||
1875 | __rxrpc_call_abort(call, ret); | ||
1876 | _leave(" = %d", ret); | ||
1877 | return ret; | ||
1878 | |||
1879 | case -EAGAIN: | ||
1880 | spin_unlock(&call->lock); | ||
1881 | |||
1882 | if (!(flags & RXRPC_CALL_READ_BLOCK)) { | ||
1883 | _leave(" = -EAGAIN"); | ||
1884 | return -EAGAIN; | ||
1885 | } | ||
1886 | |||
1887 | /* wait for the data to arrive */ | ||
1888 | _debug("blocking for data arrival"); | ||
1889 | |||
1890 | for (;;) { | ||
1891 | set_current_state(TASK_INTERRUPTIBLE); | ||
1892 | if (!call->app_async_read || signal_pending(current)) | ||
1893 | break; | ||
1894 | schedule(); | ||
1895 | } | ||
1896 | set_current_state(TASK_RUNNING); | ||
1897 | |||
1898 | if (signal_pending(current)) { | ||
1899 | _leave(" = -EINTR"); | ||
1900 | return -EINTR; | ||
1901 | } | ||
1902 | |||
1903 | if (call->app_call_state == RXRPC_CSTATE_ERROR) { | ||
1904 | _leave(" = -ECONNABORTED"); | ||
1905 | return -ECONNABORTED; | ||
1906 | } | ||
1907 | |||
1908 | _leave(" = 0"); | ||
1909 | return 0; | ||
1910 | } | ||
1911 | |||
1912 | } /* end rxrpc_call_read_data() */ | ||
1913 | |||
1914 | /*****************************************************************************/ | ||
1915 | /* | ||
1916 | * write data to a call | ||
1917 | * - the data may not be sent immediately if it doesn't fill a buffer | ||
1918 | * - if we can't queue all the data for buffering now, siov[] will have been | ||
1919 | * adjusted to take account of what has been sent | ||
1920 | */ | ||
1921 | int rxrpc_call_write_data(struct rxrpc_call *call, | ||
1922 | size_t sioc, | ||
1923 | struct kvec *siov, | ||
1924 | u8 rxhdr_flags, | ||
1925 | gfp_t alloc_flags, | ||
1926 | int dup_data, | ||
1927 | size_t *size_sent) | ||
1928 | { | ||
1929 | struct rxrpc_message *msg; | ||
1930 | struct kvec *sptr; | ||
1931 | size_t space, size, chunk, tmp; | ||
1932 | char *buf; | ||
1933 | int ret; | ||
1934 | |||
1935 | _enter("%p,%Zu,%p,%02x,%x,%d,%p", | ||
1936 | call, sioc, siov, rxhdr_flags, alloc_flags, dup_data, | ||
1937 | size_sent); | ||
1938 | |||
1939 | *size_sent = 0; | ||
1940 | size = 0; | ||
1941 | ret = -EINVAL; | ||
1942 | |||
1943 | /* can't send more if we've sent last packet from this end */ | ||
1944 | switch (call->app_call_state) { | ||
1945 | case RXRPC_CSTATE_SRVR_SND_REPLY: | ||
1946 | case RXRPC_CSTATE_CLNT_SND_ARGS: | ||
1947 | break; | ||
1948 | case RXRPC_CSTATE_ERROR: | ||
1949 | ret = call->app_errno; | ||
1950 | default: | ||
1951 | goto out; | ||
1952 | } | ||
1953 | |||
1954 | /* calculate how much data we've been given */ | ||
1955 | sptr = siov; | ||
1956 | for (; sioc > 0; sptr++, sioc--) { | ||
1957 | if (!sptr->iov_len) | ||
1958 | continue; | ||
1959 | |||
1960 | if (!sptr->iov_base) | ||
1961 | goto out; | ||
1962 | |||
1963 | size += sptr->iov_len; | ||
1964 | } | ||
1965 | |||
1966 | _debug("- size=%Zu mtu=%Zu", size, call->conn->mtu_size); | ||
1967 | |||
1968 | do { | ||
1969 | /* make sure there's a message under construction */ | ||
1970 | if (!call->snd_nextmsg) { | ||
1971 | /* no - allocate a message with no data yet attached */ | ||
1972 | ret = rxrpc_conn_newmsg(call->conn, call, | ||
1973 | RXRPC_PACKET_TYPE_DATA, | ||
1974 | 0, NULL, alloc_flags, | ||
1975 | &call->snd_nextmsg); | ||
1976 | if (ret < 0) | ||
1977 | goto out; | ||
1978 | _debug("- allocated new message [ds=%Zu]", | ||
1979 | call->snd_nextmsg->dsize); | ||
1980 | } | ||
1981 | |||
1982 | msg = call->snd_nextmsg; | ||
1983 | msg->hdr.flags |= rxhdr_flags; | ||
1984 | |||
1985 | /* deal with zero-length terminal packet */ | ||
1986 | if (size == 0) { | ||
1987 | if (rxhdr_flags & RXRPC_LAST_PACKET) { | ||
1988 | ret = rxrpc_call_flush(call); | ||
1989 | if (ret < 0) | ||
1990 | goto out; | ||
1991 | } | ||
1992 | break; | ||
1993 | } | ||
1994 | |||
1995 | /* work out how much space current packet has available */ | ||
1996 | space = call->conn->mtu_size - msg->dsize; | ||
1997 | chunk = min(space, size); | ||
1998 | |||
1999 | _debug("- [before] space=%Zu chunk=%Zu", space, chunk); | ||
2000 | |||
2001 | while (!siov->iov_len) | ||
2002 | siov++; | ||
2003 | |||
2004 | /* if we are going to have to duplicate the data then coalesce | ||
2005 | * it too */ | ||
2006 | if (dup_data) { | ||
2007 | /* don't allocate more that 1 page at a time */ | ||
2008 | if (chunk > PAGE_SIZE) | ||
2009 | chunk = PAGE_SIZE; | ||
2010 | |||
2011 | /* allocate a data buffer and attach to the message */ | ||
2012 | buf = kmalloc(chunk, alloc_flags); | ||
2013 | if (unlikely(!buf)) { | ||
2014 | if (msg->dsize == | ||
2015 | sizeof(struct rxrpc_header)) { | ||
2016 | /* discard an empty msg and wind back | ||
2017 | * the seq counter */ | ||
2018 | rxrpc_put_message(msg); | ||
2019 | call->snd_nextmsg = NULL; | ||
2020 | call->snd_seq_count--; | ||
2021 | } | ||
2022 | |||
2023 | ret = -ENOMEM; | ||
2024 | goto out; | ||
2025 | } | ||
2026 | |||
2027 | tmp = msg->dcount++; | ||
2028 | set_bit(tmp, &msg->dfree); | ||
2029 | msg->data[tmp].iov_base = buf; | ||
2030 | msg->data[tmp].iov_len = chunk; | ||
2031 | msg->dsize += chunk; | ||
2032 | *size_sent += chunk; | ||
2033 | size -= chunk; | ||
2034 | |||
2035 | /* load the buffer with data */ | ||
2036 | while (chunk > 0) { | ||
2037 | tmp = min(chunk, siov->iov_len); | ||
2038 | memcpy(buf, siov->iov_base, tmp); | ||
2039 | buf += tmp; | ||
2040 | siov->iov_base += tmp; | ||
2041 | siov->iov_len -= tmp; | ||
2042 | if (!siov->iov_len) | ||
2043 | siov++; | ||
2044 | chunk -= tmp; | ||
2045 | } | ||
2046 | } | ||
2047 | else { | ||
2048 | /* we want to attach the supplied buffers directly */ | ||
2049 | while (chunk > 0 && | ||
2050 | msg->dcount < RXRPC_MSG_MAX_IOCS) { | ||
2051 | tmp = msg->dcount++; | ||
2052 | msg->data[tmp].iov_base = siov->iov_base; | ||
2053 | msg->data[tmp].iov_len = siov->iov_len; | ||
2054 | msg->dsize += siov->iov_len; | ||
2055 | *size_sent += siov->iov_len; | ||
2056 | size -= siov->iov_len; | ||
2057 | chunk -= siov->iov_len; | ||
2058 | siov++; | ||
2059 | } | ||
2060 | } | ||
2061 | |||
2062 | _debug("- [loaded] chunk=%Zu size=%Zu", chunk, size); | ||
2063 | |||
2064 | /* dispatch the message when full, final or requesting ACK */ | ||
2065 | if (msg->dsize >= call->conn->mtu_size || rxhdr_flags) { | ||
2066 | ret = rxrpc_call_flush(call); | ||
2067 | if (ret < 0) | ||
2068 | goto out; | ||
2069 | } | ||
2070 | |||
2071 | } while(size > 0); | ||
2072 | |||
2073 | ret = 0; | ||
2074 | out: | ||
2075 | _leave(" = %d (%Zd queued, %Zd rem)", ret, *size_sent, size); | ||
2076 | return ret; | ||
2077 | |||
2078 | } /* end rxrpc_call_write_data() */ | ||
2079 | |||
2080 | /*****************************************************************************/ | ||
2081 | /* | ||
2082 | * flush outstanding packets to the network | ||
2083 | */ | ||
2084 | static int rxrpc_call_flush(struct rxrpc_call *call) | ||
2085 | { | ||
2086 | struct rxrpc_message *msg; | ||
2087 | int ret = 0; | ||
2088 | |||
2089 | _enter("%p", call); | ||
2090 | |||
2091 | rxrpc_get_call(call); | ||
2092 | |||
2093 | /* if there's a packet under construction, then dispatch it now */ | ||
2094 | if (call->snd_nextmsg) { | ||
2095 | msg = call->snd_nextmsg; | ||
2096 | call->snd_nextmsg = NULL; | ||
2097 | |||
2098 | if (msg->hdr.flags & RXRPC_LAST_PACKET) { | ||
2099 | msg->hdr.flags &= ~RXRPC_MORE_PACKETS; | ||
2100 | if (call->app_call_state != RXRPC_CSTATE_CLNT_SND_ARGS) | ||
2101 | msg->hdr.flags |= RXRPC_REQUEST_ACK; | ||
2102 | } | ||
2103 | else { | ||
2104 | msg->hdr.flags |= RXRPC_MORE_PACKETS; | ||
2105 | } | ||
2106 | |||
2107 | _proto("Sending DATA message { ds=%Zu dc=%u df=%02lu }", | ||
2108 | msg->dsize, msg->dcount, msg->dfree); | ||
2109 | |||
2110 | /* queue and adjust call state */ | ||
2111 | spin_lock(&call->lock); | ||
2112 | list_add_tail(&msg->link, &call->acks_pendq); | ||
2113 | |||
2114 | /* decide what to do depending on current state and if this is | ||
2115 | * the last packet */ | ||
2116 | ret = -EINVAL; | ||
2117 | switch (call->app_call_state) { | ||
2118 | case RXRPC_CSTATE_SRVR_SND_REPLY: | ||
2119 | if (msg->hdr.flags & RXRPC_LAST_PACKET) { | ||
2120 | call->app_call_state = | ||
2121 | RXRPC_CSTATE_SRVR_RCV_FINAL_ACK; | ||
2122 | _state(call); | ||
2123 | } | ||
2124 | break; | ||
2125 | |||
2126 | case RXRPC_CSTATE_CLNT_SND_ARGS: | ||
2127 | if (msg->hdr.flags & RXRPC_LAST_PACKET) { | ||
2128 | call->app_call_state = | ||
2129 | RXRPC_CSTATE_CLNT_RCV_REPLY; | ||
2130 | _state(call); | ||
2131 | } | ||
2132 | break; | ||
2133 | |||
2134 | case RXRPC_CSTATE_ERROR: | ||
2135 | ret = call->app_errno; | ||
2136 | default: | ||
2137 | spin_unlock(&call->lock); | ||
2138 | goto out; | ||
2139 | } | ||
2140 | |||
2141 | call->acks_pend_cnt++; | ||
2142 | |||
2143 | mod_timer(&call->acks_timeout, | ||
2144 | __rxrpc_rtt_based_timeout(call, | ||
2145 | rxrpc_call_acks_timeout)); | ||
2146 | |||
2147 | spin_unlock(&call->lock); | ||
2148 | |||
2149 | ret = rxrpc_conn_sendmsg(call->conn, msg); | ||
2150 | if (ret == 0) | ||
2151 | call->pkt_snd_count++; | ||
2152 | } | ||
2153 | |||
2154 | out: | ||
2155 | rxrpc_put_call(call); | ||
2156 | |||
2157 | _leave(" = %d", ret); | ||
2158 | return ret; | ||
2159 | |||
2160 | } /* end rxrpc_call_flush() */ | ||
2161 | |||
2162 | /*****************************************************************************/ | ||
2163 | /* | ||
2164 | * resend NAK'd or unacknowledged packets up to the highest one specified | ||
2165 | */ | ||
2166 | static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest) | ||
2167 | { | ||
2168 | struct rxrpc_message *msg; | ||
2169 | struct list_head *_p; | ||
2170 | rxrpc_seq_t seq = 0; | ||
2171 | |||
2172 | _enter("%p,%u", call, highest); | ||
2173 | |||
2174 | _proto("Rx Resend required"); | ||
2175 | |||
2176 | /* handle too many resends */ | ||
2177 | if (call->snd_resend_cnt >= rxrpc_call_max_resend) { | ||
2178 | _debug("Aborting due to too many resends (rcv=%d)", | ||
2179 | call->pkt_rcv_count); | ||
2180 | rxrpc_call_abort(call, | ||
2181 | call->pkt_rcv_count > 0 ? -EIO : -ETIMEDOUT); | ||
2182 | _leave(""); | ||
2183 | return; | ||
2184 | } | ||
2185 | |||
2186 | spin_lock(&call->lock); | ||
2187 | call->snd_resend_cnt++; | ||
2188 | for (;;) { | ||
2189 | /* determine which the next packet we might need to ACK is */ | ||
2190 | if (seq <= call->acks_dftv_seq) | ||
2191 | seq = call->acks_dftv_seq; | ||
2192 | seq++; | ||
2193 | |||
2194 | if (seq > highest) | ||
2195 | break; | ||
2196 | |||
2197 | /* look for the packet in the pending-ACK queue */ | ||
2198 | list_for_each(_p, &call->acks_pendq) { | ||
2199 | msg = list_entry(_p, struct rxrpc_message, link); | ||
2200 | if (msg->seq == seq) | ||
2201 | goto found_msg; | ||
2202 | } | ||
2203 | |||
2204 | panic("%s(%p,%d):" | ||
2205 | " Inconsistent pending-ACK queue (ds=%u sc=%u sq=%u)\n", | ||
2206 | __FUNCTION__, call, highest, | ||
2207 | call->acks_dftv_seq, call->snd_seq_count, seq); | ||
2208 | |||
2209 | found_msg: | ||
2210 | if (msg->state != RXRPC_MSG_SENT) | ||
2211 | continue; /* only un-ACK'd packets */ | ||
2212 | |||
2213 | rxrpc_get_message(msg); | ||
2214 | spin_unlock(&call->lock); | ||
2215 | |||
2216 | /* send each message again (and ignore any errors we might | ||
2217 | * incur) */ | ||
2218 | _proto("Resending DATA message { ds=%Zu dc=%u df=%02lu }", | ||
2219 | msg->dsize, msg->dcount, msg->dfree); | ||
2220 | |||
2221 | if (rxrpc_conn_sendmsg(call->conn, msg) == 0) | ||
2222 | call->pkt_snd_count++; | ||
2223 | |||
2224 | rxrpc_put_message(msg); | ||
2225 | |||
2226 | spin_lock(&call->lock); | ||
2227 | } | ||
2228 | |||
2229 | /* reset the timeout */ | ||
2230 | mod_timer(&call->acks_timeout, | ||
2231 | __rxrpc_rtt_based_timeout(call, rxrpc_call_acks_timeout)); | ||
2232 | |||
2233 | spin_unlock(&call->lock); | ||
2234 | |||
2235 | _leave(""); | ||
2236 | } /* end rxrpc_call_resend() */ | ||
2237 | |||
2238 | /*****************************************************************************/ | ||
2239 | /* | ||
2240 | * handle an ICMP error being applied to a call | ||
2241 | */ | ||
2242 | void rxrpc_call_handle_error(struct rxrpc_call *call, int local, int errno) | ||
2243 | { | ||
2244 | _enter("%p{%u},%d", call, ntohl(call->call_id), errno); | ||
2245 | |||
2246 | /* if this call is already aborted, then just wake up any waiters */ | ||
2247 | if (call->app_call_state == RXRPC_CSTATE_ERROR) { | ||
2248 | call->app_error_func(call); | ||
2249 | } | ||
2250 | else { | ||
2251 | /* tell the app layer what happened */ | ||
2252 | spin_lock(&call->lock); | ||
2253 | call->app_call_state = RXRPC_CSTATE_ERROR; | ||
2254 | _state(call); | ||
2255 | if (local) | ||
2256 | call->app_err_state = RXRPC_ESTATE_LOCAL_ERROR; | ||
2257 | else | ||
2258 | call->app_err_state = RXRPC_ESTATE_REMOTE_ERROR; | ||
2259 | call->app_errno = errno; | ||
2260 | call->app_mark = RXRPC_APP_MARK_EOF; | ||
2261 | call->app_read_buf = NULL; | ||
2262 | call->app_async_read = 0; | ||
2263 | |||
2264 | /* map the error */ | ||
2265 | call->app_aemap_func(call); | ||
2266 | |||
2267 | del_timer_sync(&call->acks_timeout); | ||
2268 | del_timer_sync(&call->rcv_timeout); | ||
2269 | del_timer_sync(&call->ackr_dfr_timo); | ||
2270 | |||
2271 | spin_unlock(&call->lock); | ||
2272 | |||
2273 | call->app_error_func(call); | ||
2274 | } | ||
2275 | |||
2276 | _leave(""); | ||
2277 | } /* end rxrpc_call_handle_error() */ | ||
diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c deleted file mode 100644 index 665a99952440..000000000000 --- a/net/rxrpc/connection.c +++ /dev/null | |||
@@ -1,777 +0,0 @@ | |||
1 | /* connection.c: Rx connection routines | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <rxrpc/rxrpc.h> | ||
16 | #include <rxrpc/transport.h> | ||
17 | #include <rxrpc/peer.h> | ||
18 | #include <rxrpc/connection.h> | ||
19 | #include <rxrpc/call.h> | ||
20 | #include <rxrpc/message.h> | ||
21 | #include <linux/udp.h> | ||
22 | #include <linux/ip.h> | ||
23 | #include <net/sock.h> | ||
24 | #include <asm/uaccess.h> | ||
25 | #include "internal.h" | ||
26 | |||
27 | __RXACCT_DECL(atomic_t rxrpc_connection_count); | ||
28 | |||
29 | LIST_HEAD(rxrpc_conns); | ||
30 | DECLARE_RWSEM(rxrpc_conns_sem); | ||
31 | unsigned long rxrpc_conn_timeout = 60 * 60; | ||
32 | |||
33 | static void rxrpc_conn_do_timeout(struct rxrpc_connection *conn); | ||
34 | |||
35 | static void __rxrpc_conn_timeout(rxrpc_timer_t *timer) | ||
36 | { | ||
37 | struct rxrpc_connection *conn = | ||
38 | list_entry(timer, struct rxrpc_connection, timeout); | ||
39 | |||
40 | _debug("Rx CONN TIMEOUT [%p{u=%d}]", conn, atomic_read(&conn->usage)); | ||
41 | |||
42 | rxrpc_conn_do_timeout(conn); | ||
43 | } | ||
44 | |||
45 | static const struct rxrpc_timer_ops rxrpc_conn_timer_ops = { | ||
46 | .timed_out = __rxrpc_conn_timeout, | ||
47 | }; | ||
48 | |||
49 | /*****************************************************************************/ | ||
50 | /* | ||
51 | * create a new connection record | ||
52 | */ | ||
53 | static inline int __rxrpc_create_connection(struct rxrpc_peer *peer, | ||
54 | struct rxrpc_connection **_conn) | ||
55 | { | ||
56 | struct rxrpc_connection *conn; | ||
57 | |||
58 | _enter("%p",peer); | ||
59 | |||
60 | /* allocate and initialise a connection record */ | ||
61 | conn = kzalloc(sizeof(struct rxrpc_connection), GFP_KERNEL); | ||
62 | if (!conn) { | ||
63 | _leave(" = -ENOMEM"); | ||
64 | return -ENOMEM; | ||
65 | } | ||
66 | |||
67 | atomic_set(&conn->usage, 1); | ||
68 | |||
69 | INIT_LIST_HEAD(&conn->link); | ||
70 | INIT_LIST_HEAD(&conn->id_link); | ||
71 | init_waitqueue_head(&conn->chanwait); | ||
72 | spin_lock_init(&conn->lock); | ||
73 | rxrpc_timer_init(&conn->timeout, &rxrpc_conn_timer_ops); | ||
74 | |||
75 | do_gettimeofday(&conn->atime); | ||
76 | conn->mtu_size = 1024; | ||
77 | conn->peer = peer; | ||
78 | conn->trans = peer->trans; | ||
79 | |||
80 | __RXACCT(atomic_inc(&rxrpc_connection_count)); | ||
81 | *_conn = conn; | ||
82 | _leave(" = 0 (%p)", conn); | ||
83 | |||
84 | return 0; | ||
85 | } /* end __rxrpc_create_connection() */ | ||
86 | |||
87 | /*****************************************************************************/ | ||
88 | /* | ||
89 | * create a new connection record for outgoing connections | ||
90 | */ | ||
91 | int rxrpc_create_connection(struct rxrpc_transport *trans, | ||
92 | __be16 port, | ||
93 | __be32 addr, | ||
94 | uint16_t service_id, | ||
95 | void *security, | ||
96 | struct rxrpc_connection **_conn) | ||
97 | { | ||
98 | struct rxrpc_connection *candidate, *conn; | ||
99 | struct rxrpc_peer *peer; | ||
100 | struct list_head *_p; | ||
101 | __be32 connid; | ||
102 | int ret; | ||
103 | |||
104 | _enter("%p{%hu},%u,%hu", trans, trans->port, ntohs(port), service_id); | ||
105 | |||
106 | /* get a peer record */ | ||
107 | ret = rxrpc_peer_lookup(trans, addr, &peer); | ||
108 | if (ret < 0) { | ||
109 | _leave(" = %d", ret); | ||
110 | return ret; | ||
111 | } | ||
112 | |||
113 | /* allocate and initialise a connection record */ | ||
114 | ret = __rxrpc_create_connection(peer, &candidate); | ||
115 | if (ret < 0) { | ||
116 | rxrpc_put_peer(peer); | ||
117 | _leave(" = %d", ret); | ||
118 | return ret; | ||
119 | } | ||
120 | |||
121 | /* fill in the specific bits */ | ||
122 | candidate->addr.sin_family = AF_INET; | ||
123 | candidate->addr.sin_port = port; | ||
124 | candidate->addr.sin_addr.s_addr = addr; | ||
125 | |||
126 | candidate->in_epoch = rxrpc_epoch; | ||
127 | candidate->out_epoch = rxrpc_epoch; | ||
128 | candidate->in_clientflag = 0; | ||
129 | candidate->out_clientflag = RXRPC_CLIENT_INITIATED; | ||
130 | candidate->service_id = htons(service_id); | ||
131 | |||
132 | /* invent a unique connection ID */ | ||
133 | write_lock(&peer->conn_idlock); | ||
134 | |||
135 | try_next_id: | ||
136 | connid = htonl(peer->conn_idcounter & RXRPC_CIDMASK); | ||
137 | peer->conn_idcounter += RXRPC_MAXCALLS; | ||
138 | |||
139 | list_for_each(_p, &peer->conn_idlist) { | ||
140 | conn = list_entry(_p, struct rxrpc_connection, id_link); | ||
141 | if (connid == conn->conn_id) | ||
142 | goto try_next_id; | ||
143 | if (connid > conn->conn_id) | ||
144 | break; | ||
145 | } | ||
146 | |||
147 | _debug("selected candidate conn ID %x.%u", | ||
148 | ntohl(peer->addr.s_addr), ntohl(connid)); | ||
149 | |||
150 | candidate->conn_id = connid; | ||
151 | list_add_tail(&candidate->id_link, _p); | ||
152 | |||
153 | write_unlock(&peer->conn_idlock); | ||
154 | |||
155 | /* attach to peer */ | ||
156 | candidate->peer = peer; | ||
157 | |||
158 | write_lock(&peer->conn_lock); | ||
159 | |||
160 | /* search the peer's transport graveyard list */ | ||
161 | spin_lock(&peer->conn_gylock); | ||
162 | list_for_each(_p, &peer->conn_graveyard) { | ||
163 | conn = list_entry(_p, struct rxrpc_connection, link); | ||
164 | if (conn->addr.sin_port == candidate->addr.sin_port && | ||
165 | conn->security_ix == candidate->security_ix && | ||
166 | conn->service_id == candidate->service_id && | ||
167 | conn->in_clientflag == 0) | ||
168 | goto found_in_graveyard; | ||
169 | } | ||
170 | spin_unlock(&peer->conn_gylock); | ||
171 | |||
172 | /* pick the new candidate */ | ||
173 | _debug("created connection: {%08x} [out]", ntohl(candidate->conn_id)); | ||
174 | atomic_inc(&peer->conn_count); | ||
175 | conn = candidate; | ||
176 | candidate = NULL; | ||
177 | |||
178 | make_active: | ||
179 | list_add_tail(&conn->link, &peer->conn_active); | ||
180 | write_unlock(&peer->conn_lock); | ||
181 | |||
182 | if (candidate) { | ||
183 | write_lock(&peer->conn_idlock); | ||
184 | list_del(&candidate->id_link); | ||
185 | write_unlock(&peer->conn_idlock); | ||
186 | |||
187 | __RXACCT(atomic_dec(&rxrpc_connection_count)); | ||
188 | kfree(candidate); | ||
189 | } | ||
190 | else { | ||
191 | down_write(&rxrpc_conns_sem); | ||
192 | list_add_tail(&conn->proc_link, &rxrpc_conns); | ||
193 | up_write(&rxrpc_conns_sem); | ||
194 | } | ||
195 | |||
196 | *_conn = conn; | ||
197 | _leave(" = 0 (%p)", conn); | ||
198 | |||
199 | return 0; | ||
200 | |||
201 | /* handle resurrecting a connection from the graveyard */ | ||
202 | found_in_graveyard: | ||
203 | _debug("resurrecting connection: {%08x} [out]", ntohl(conn->conn_id)); | ||
204 | rxrpc_get_connection(conn); | ||
205 | rxrpc_krxtimod_del_timer(&conn->timeout); | ||
206 | list_del_init(&conn->link); | ||
207 | spin_unlock(&peer->conn_gylock); | ||
208 | goto make_active; | ||
209 | } /* end rxrpc_create_connection() */ | ||
210 | |||
211 | /*****************************************************************************/ | ||
212 | /* | ||
213 | * lookup the connection for an incoming packet | ||
214 | * - create a new connection record for unrecorded incoming connections | ||
215 | */ | ||
216 | int rxrpc_connection_lookup(struct rxrpc_peer *peer, | ||
217 | struct rxrpc_message *msg, | ||
218 | struct rxrpc_connection **_conn) | ||
219 | { | ||
220 | struct rxrpc_connection *conn, *candidate = NULL; | ||
221 | struct list_head *_p; | ||
222 | struct sk_buff *pkt = msg->pkt; | ||
223 | int ret, fresh = 0; | ||
224 | __be32 x_epoch, x_connid; | ||
225 | __be16 x_port, x_servid; | ||
226 | __u32 x_secix; | ||
227 | u8 x_clflag; | ||
228 | |||
229 | _enter("%p{{%hu}},%u,%hu", | ||
230 | peer, | ||
231 | peer->trans->port, | ||
232 | ntohs(udp_hdr(pkt)->source), | ||
233 | ntohs(msg->hdr.serviceId)); | ||
234 | |||
235 | x_port = udp_hdr(pkt)->source; | ||
236 | x_epoch = msg->hdr.epoch; | ||
237 | x_clflag = msg->hdr.flags & RXRPC_CLIENT_INITIATED; | ||
238 | x_connid = htonl(ntohl(msg->hdr.cid) & RXRPC_CIDMASK); | ||
239 | x_servid = msg->hdr.serviceId; | ||
240 | x_secix = msg->hdr.securityIndex; | ||
241 | |||
242 | /* [common case] search the transport's active list first */ | ||
243 | read_lock(&peer->conn_lock); | ||
244 | list_for_each(_p, &peer->conn_active) { | ||
245 | conn = list_entry(_p, struct rxrpc_connection, link); | ||
246 | if (conn->addr.sin_port == x_port && | ||
247 | conn->in_epoch == x_epoch && | ||
248 | conn->conn_id == x_connid && | ||
249 | conn->security_ix == x_secix && | ||
250 | conn->service_id == x_servid && | ||
251 | conn->in_clientflag == x_clflag) | ||
252 | goto found_active; | ||
253 | } | ||
254 | read_unlock(&peer->conn_lock); | ||
255 | |||
256 | /* [uncommon case] not active | ||
257 | * - create a candidate for a new record if an inbound connection | ||
258 | * - only examine the graveyard for an outbound connection | ||
259 | */ | ||
260 | if (x_clflag) { | ||
261 | ret = __rxrpc_create_connection(peer, &candidate); | ||
262 | if (ret < 0) { | ||
263 | _leave(" = %d", ret); | ||
264 | return ret; | ||
265 | } | ||
266 | |||
267 | /* fill in the specifics */ | ||
268 | candidate->addr.sin_family = AF_INET; | ||
269 | candidate->addr.sin_port = x_port; | ||
270 | candidate->addr.sin_addr.s_addr = ip_hdr(pkt)->saddr; | ||
271 | candidate->in_epoch = x_epoch; | ||
272 | candidate->out_epoch = x_epoch; | ||
273 | candidate->in_clientflag = RXRPC_CLIENT_INITIATED; | ||
274 | candidate->out_clientflag = 0; | ||
275 | candidate->conn_id = x_connid; | ||
276 | candidate->service_id = x_servid; | ||
277 | candidate->security_ix = x_secix; | ||
278 | } | ||
279 | |||
280 | /* search the active list again, just in case it appeared whilst we | ||
281 | * were busy */ | ||
282 | write_lock(&peer->conn_lock); | ||
283 | list_for_each(_p, &peer->conn_active) { | ||
284 | conn = list_entry(_p, struct rxrpc_connection, link); | ||
285 | if (conn->addr.sin_port == x_port && | ||
286 | conn->in_epoch == x_epoch && | ||
287 | conn->conn_id == x_connid && | ||
288 | conn->security_ix == x_secix && | ||
289 | conn->service_id == x_servid && | ||
290 | conn->in_clientflag == x_clflag) | ||
291 | goto found_active_second_chance; | ||
292 | } | ||
293 | |||
294 | /* search the transport's graveyard list */ | ||
295 | spin_lock(&peer->conn_gylock); | ||
296 | list_for_each(_p, &peer->conn_graveyard) { | ||
297 | conn = list_entry(_p, struct rxrpc_connection, link); | ||
298 | if (conn->addr.sin_port == x_port && | ||
299 | conn->in_epoch == x_epoch && | ||
300 | conn->conn_id == x_connid && | ||
301 | conn->security_ix == x_secix && | ||
302 | conn->service_id == x_servid && | ||
303 | conn->in_clientflag == x_clflag) | ||
304 | goto found_in_graveyard; | ||
305 | } | ||
306 | spin_unlock(&peer->conn_gylock); | ||
307 | |||
308 | /* outbound connections aren't created here */ | ||
309 | if (!x_clflag) { | ||
310 | write_unlock(&peer->conn_lock); | ||
311 | _leave(" = -ENOENT"); | ||
312 | return -ENOENT; | ||
313 | } | ||
314 | |||
315 | /* we can now add the new candidate to the list */ | ||
316 | _debug("created connection: {%08x} [in]", ntohl(candidate->conn_id)); | ||
317 | rxrpc_get_peer(peer); | ||
318 | conn = candidate; | ||
319 | candidate = NULL; | ||
320 | atomic_inc(&peer->conn_count); | ||
321 | fresh = 1; | ||
322 | |||
323 | make_active: | ||
324 | list_add_tail(&conn->link, &peer->conn_active); | ||
325 | |||
326 | success_uwfree: | ||
327 | write_unlock(&peer->conn_lock); | ||
328 | |||
329 | if (candidate) { | ||
330 | write_lock(&peer->conn_idlock); | ||
331 | list_del(&candidate->id_link); | ||
332 | write_unlock(&peer->conn_idlock); | ||
333 | |||
334 | __RXACCT(atomic_dec(&rxrpc_connection_count)); | ||
335 | kfree(candidate); | ||
336 | } | ||
337 | |||
338 | if (fresh) { | ||
339 | down_write(&rxrpc_conns_sem); | ||
340 | list_add_tail(&conn->proc_link, &rxrpc_conns); | ||
341 | up_write(&rxrpc_conns_sem); | ||
342 | } | ||
343 | |||
344 | success: | ||
345 | *_conn = conn; | ||
346 | _leave(" = 0 (%p)", conn); | ||
347 | return 0; | ||
348 | |||
349 | /* handle the connection being found in the active list straight off */ | ||
350 | found_active: | ||
351 | rxrpc_get_connection(conn); | ||
352 | read_unlock(&peer->conn_lock); | ||
353 | goto success; | ||
354 | |||
355 | /* handle resurrecting a connection from the graveyard */ | ||
356 | found_in_graveyard: | ||
357 | _debug("resurrecting connection: {%08x} [in]", ntohl(conn->conn_id)); | ||
358 | rxrpc_get_peer(peer); | ||
359 | rxrpc_get_connection(conn); | ||
360 | rxrpc_krxtimod_del_timer(&conn->timeout); | ||
361 | list_del_init(&conn->link); | ||
362 | spin_unlock(&peer->conn_gylock); | ||
363 | goto make_active; | ||
364 | |||
365 | /* handle finding the connection on the second time through the active | ||
366 | * list */ | ||
367 | found_active_second_chance: | ||
368 | rxrpc_get_connection(conn); | ||
369 | goto success_uwfree; | ||
370 | |||
371 | } /* end rxrpc_connection_lookup() */ | ||
372 | |||
373 | /*****************************************************************************/ | ||
374 | /* | ||
375 | * finish using a connection record | ||
376 | * - it will be transferred to the peer's connection graveyard when refcount | ||
377 | * reaches 0 | ||
378 | */ | ||
379 | void rxrpc_put_connection(struct rxrpc_connection *conn) | ||
380 | { | ||
381 | struct rxrpc_peer *peer; | ||
382 | |||
383 | if (!conn) | ||
384 | return; | ||
385 | |||
386 | _enter("%p{u=%d p=%hu}", | ||
387 | conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port)); | ||
388 | |||
389 | peer = conn->peer; | ||
390 | spin_lock(&peer->conn_gylock); | ||
391 | |||
392 | /* sanity check */ | ||
393 | if (atomic_read(&conn->usage) <= 0) | ||
394 | BUG(); | ||
395 | |||
396 | if (likely(!atomic_dec_and_test(&conn->usage))) { | ||
397 | spin_unlock(&peer->conn_gylock); | ||
398 | _leave(""); | ||
399 | return; | ||
400 | } | ||
401 | |||
402 | /* move to graveyard queue */ | ||
403 | _debug("burying connection: {%08x}", ntohl(conn->conn_id)); | ||
404 | list_move_tail(&conn->link, &peer->conn_graveyard); | ||
405 | |||
406 | rxrpc_krxtimod_add_timer(&conn->timeout, rxrpc_conn_timeout * HZ); | ||
407 | |||
408 | spin_unlock(&peer->conn_gylock); | ||
409 | |||
410 | rxrpc_put_peer(conn->peer); | ||
411 | |||
412 | _leave(" [killed]"); | ||
413 | } /* end rxrpc_put_connection() */ | ||
414 | |||
415 | /*****************************************************************************/ | ||
416 | /* | ||
417 | * free a connection record | ||
418 | */ | ||
419 | static void rxrpc_conn_do_timeout(struct rxrpc_connection *conn) | ||
420 | { | ||
421 | struct rxrpc_peer *peer; | ||
422 | |||
423 | _enter("%p{u=%d p=%hu}", | ||
424 | conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port)); | ||
425 | |||
426 | peer = conn->peer; | ||
427 | |||
428 | if (atomic_read(&conn->usage) < 0) | ||
429 | BUG(); | ||
430 | |||
431 | /* remove from graveyard if still dead */ | ||
432 | spin_lock(&peer->conn_gylock); | ||
433 | if (atomic_read(&conn->usage) == 0) { | ||
434 | list_del_init(&conn->link); | ||
435 | } | ||
436 | else { | ||
437 | conn = NULL; | ||
438 | } | ||
439 | spin_unlock(&peer->conn_gylock); | ||
440 | |||
441 | if (!conn) { | ||
442 | _leave(""); | ||
443 | return; /* resurrected */ | ||
444 | } | ||
445 | |||
446 | _debug("--- Destroying Connection %p{%08x} ---", | ||
447 | conn, ntohl(conn->conn_id)); | ||
448 | |||
449 | down_write(&rxrpc_conns_sem); | ||
450 | list_del(&conn->proc_link); | ||
451 | up_write(&rxrpc_conns_sem); | ||
452 | |||
453 | write_lock(&peer->conn_idlock); | ||
454 | list_del(&conn->id_link); | ||
455 | write_unlock(&peer->conn_idlock); | ||
456 | |||
457 | __RXACCT(atomic_dec(&rxrpc_connection_count)); | ||
458 | kfree(conn); | ||
459 | |||
460 | /* if the graveyard is now empty, wake up anyone waiting for that */ | ||
461 | if (atomic_dec_and_test(&peer->conn_count)) | ||
462 | wake_up(&peer->conn_gy_waitq); | ||
463 | |||
464 | _leave(" [destroyed]"); | ||
465 | } /* end rxrpc_conn_do_timeout() */ | ||
466 | |||
467 | /*****************************************************************************/ | ||
468 | /* | ||
469 | * clear all connection records from a peer endpoint | ||
470 | */ | ||
471 | void rxrpc_conn_clearall(struct rxrpc_peer *peer) | ||
472 | { | ||
473 | DECLARE_WAITQUEUE(myself, current); | ||
474 | |||
475 | struct rxrpc_connection *conn; | ||
476 | int err; | ||
477 | |||
478 | _enter("%p", peer); | ||
479 | |||
480 | /* there shouldn't be any active conns remaining */ | ||
481 | if (!list_empty(&peer->conn_active)) | ||
482 | BUG(); | ||
483 | |||
484 | /* manually timeout all conns in the graveyard */ | ||
485 | spin_lock(&peer->conn_gylock); | ||
486 | while (!list_empty(&peer->conn_graveyard)) { | ||
487 | conn = list_entry(peer->conn_graveyard.next, | ||
488 | struct rxrpc_connection, link); | ||
489 | err = rxrpc_krxtimod_del_timer(&conn->timeout); | ||
490 | spin_unlock(&peer->conn_gylock); | ||
491 | |||
492 | if (err == 0) | ||
493 | rxrpc_conn_do_timeout(conn); | ||
494 | |||
495 | spin_lock(&peer->conn_gylock); | ||
496 | } | ||
497 | spin_unlock(&peer->conn_gylock); | ||
498 | |||
499 | /* wait for the the conn graveyard to be completely cleared */ | ||
500 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
501 | add_wait_queue(&peer->conn_gy_waitq, &myself); | ||
502 | |||
503 | while (atomic_read(&peer->conn_count) != 0) { | ||
504 | schedule(); | ||
505 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
506 | } | ||
507 | |||
508 | remove_wait_queue(&peer->conn_gy_waitq, &myself); | ||
509 | set_current_state(TASK_RUNNING); | ||
510 | |||
511 | _leave(""); | ||
512 | } /* end rxrpc_conn_clearall() */ | ||
513 | |||
514 | /*****************************************************************************/ | ||
515 | /* | ||
516 | * allocate and prepare a message for sending out through the transport | ||
517 | * endpoint | ||
518 | */ | ||
519 | int rxrpc_conn_newmsg(struct rxrpc_connection *conn, | ||
520 | struct rxrpc_call *call, | ||
521 | uint8_t type, | ||
522 | int dcount, | ||
523 | struct kvec diov[], | ||
524 | gfp_t alloc_flags, | ||
525 | struct rxrpc_message **_msg) | ||
526 | { | ||
527 | struct rxrpc_message *msg; | ||
528 | int loop; | ||
529 | |||
530 | _enter("%p{%d},%p,%u", conn, ntohs(conn->addr.sin_port), call, type); | ||
531 | |||
532 | if (dcount > 3) { | ||
533 | _leave(" = -EINVAL"); | ||
534 | return -EINVAL; | ||
535 | } | ||
536 | |||
537 | msg = kzalloc(sizeof(struct rxrpc_message), alloc_flags); | ||
538 | if (!msg) { | ||
539 | _leave(" = -ENOMEM"); | ||
540 | return -ENOMEM; | ||
541 | } | ||
542 | |||
543 | atomic_set(&msg->usage, 1); | ||
544 | |||
545 | INIT_LIST_HEAD(&msg->link); | ||
546 | |||
547 | msg->state = RXRPC_MSG_PREPARED; | ||
548 | |||
549 | msg->hdr.epoch = conn->out_epoch; | ||
550 | msg->hdr.cid = conn->conn_id | (call ? call->chan_ix : 0); | ||
551 | msg->hdr.callNumber = call ? call->call_id : 0; | ||
552 | msg->hdr.type = type; | ||
553 | msg->hdr.flags = conn->out_clientflag; | ||
554 | msg->hdr.securityIndex = conn->security_ix; | ||
555 | msg->hdr.serviceId = conn->service_id; | ||
556 | |||
557 | /* generate sequence numbers for data packets */ | ||
558 | if (call) { | ||
559 | switch (type) { | ||
560 | case RXRPC_PACKET_TYPE_DATA: | ||
561 | msg->seq = ++call->snd_seq_count; | ||
562 | msg->hdr.seq = htonl(msg->seq); | ||
563 | break; | ||
564 | case RXRPC_PACKET_TYPE_ACK: | ||
565 | /* ACK sequence numbers are complicated. The following | ||
566 | * may be wrong: | ||
567 | * - jumbo packet ACKs should have a seq number | ||
568 | * - normal ACKs should not | ||
569 | */ | ||
570 | default: | ||
571 | break; | ||
572 | } | ||
573 | } | ||
574 | |||
575 | msg->dcount = dcount + 1; | ||
576 | msg->dsize = sizeof(msg->hdr); | ||
577 | msg->data[0].iov_len = sizeof(msg->hdr); | ||
578 | msg->data[0].iov_base = &msg->hdr; | ||
579 | |||
580 | for (loop=0; loop < dcount; loop++) { | ||
581 | msg->dsize += diov[loop].iov_len; | ||
582 | msg->data[loop+1].iov_len = diov[loop].iov_len; | ||
583 | msg->data[loop+1].iov_base = diov[loop].iov_base; | ||
584 | } | ||
585 | |||
586 | __RXACCT(atomic_inc(&rxrpc_message_count)); | ||
587 | *_msg = msg; | ||
588 | _leave(" = 0 (%p) #%d", msg, atomic_read(&rxrpc_message_count)); | ||
589 | return 0; | ||
590 | } /* end rxrpc_conn_newmsg() */ | ||
591 | |||
592 | /*****************************************************************************/ | ||
593 | /* | ||
594 | * free a message | ||
595 | */ | ||
596 | void __rxrpc_put_message(struct rxrpc_message *msg) | ||
597 | { | ||
598 | int loop; | ||
599 | |||
600 | _enter("%p #%d", msg, atomic_read(&rxrpc_message_count)); | ||
601 | |||
602 | if (msg->pkt) | ||
603 | kfree_skb(msg->pkt); | ||
604 | rxrpc_put_connection(msg->conn); | ||
605 | |||
606 | for (loop = 0; loop < 8; loop++) | ||
607 | if (test_bit(loop, &msg->dfree)) | ||
608 | kfree(msg->data[loop].iov_base); | ||
609 | |||
610 | __RXACCT(atomic_dec(&rxrpc_message_count)); | ||
611 | kfree(msg); | ||
612 | |||
613 | _leave(""); | ||
614 | } /* end __rxrpc_put_message() */ | ||
615 | |||
616 | /*****************************************************************************/ | ||
617 | /* | ||
618 | * send a message out through the transport endpoint | ||
619 | */ | ||
620 | int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, | ||
621 | struct rxrpc_message *msg) | ||
622 | { | ||
623 | struct msghdr msghdr; | ||
624 | int ret; | ||
625 | |||
626 | _enter("%p{%d}", conn, ntohs(conn->addr.sin_port)); | ||
627 | |||
628 | /* fill in some fields in the header */ | ||
629 | spin_lock(&conn->lock); | ||
630 | msg->hdr.serial = htonl(++conn->serial_counter); | ||
631 | msg->rttdone = 0; | ||
632 | spin_unlock(&conn->lock); | ||
633 | |||
634 | /* set up the message to be transmitted */ | ||
635 | msghdr.msg_name = &conn->addr; | ||
636 | msghdr.msg_namelen = sizeof(conn->addr); | ||
637 | msghdr.msg_control = NULL; | ||
638 | msghdr.msg_controllen = 0; | ||
639 | msghdr.msg_flags = MSG_CONFIRM | MSG_DONTWAIT; | ||
640 | |||
641 | _net("Sending message type %d of %Zd bytes to %08x:%d", | ||
642 | msg->hdr.type, | ||
643 | msg->dsize, | ||
644 | ntohl(conn->addr.sin_addr.s_addr), | ||
645 | ntohs(conn->addr.sin_port)); | ||
646 | |||
647 | /* send the message */ | ||
648 | ret = kernel_sendmsg(conn->trans->socket, &msghdr, | ||
649 | msg->data, msg->dcount, msg->dsize); | ||
650 | if (ret < 0) { | ||
651 | msg->state = RXRPC_MSG_ERROR; | ||
652 | } else { | ||
653 | msg->state = RXRPC_MSG_SENT; | ||
654 | ret = 0; | ||
655 | |||
656 | spin_lock(&conn->lock); | ||
657 | do_gettimeofday(&conn->atime); | ||
658 | msg->stamp = conn->atime; | ||
659 | spin_unlock(&conn->lock); | ||
660 | } | ||
661 | |||
662 | _leave(" = %d", ret); | ||
663 | |||
664 | return ret; | ||
665 | } /* end rxrpc_conn_sendmsg() */ | ||
666 | |||
667 | /*****************************************************************************/ | ||
668 | /* | ||
669 | * deal with a subsequent call packet | ||
670 | */ | ||
671 | int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn, | ||
672 | struct rxrpc_call *call, | ||
673 | struct rxrpc_message *msg) | ||
674 | { | ||
675 | struct rxrpc_message *pmsg; | ||
676 | struct dst_entry *dst; | ||
677 | struct list_head *_p; | ||
678 | unsigned cix, seq; | ||
679 | int ret = 0; | ||
680 | |||
681 | _enter("%p,%p,%p", conn, call, msg); | ||
682 | |||
683 | if (!call) { | ||
684 | cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK; | ||
685 | |||
686 | spin_lock(&conn->lock); | ||
687 | call = conn->channels[cix]; | ||
688 | |||
689 | if (!call || call->call_id != msg->hdr.callNumber) { | ||
690 | spin_unlock(&conn->lock); | ||
691 | rxrpc_trans_immediate_abort(conn->trans, msg, -ENOENT); | ||
692 | goto out; | ||
693 | } | ||
694 | else { | ||
695 | rxrpc_get_call(call); | ||
696 | spin_unlock(&conn->lock); | ||
697 | } | ||
698 | } | ||
699 | else { | ||
700 | rxrpc_get_call(call); | ||
701 | } | ||
702 | |||
703 | _proto("Received packet %%%u [%u] on call %hu:%u:%u", | ||
704 | ntohl(msg->hdr.serial), | ||
705 | ntohl(msg->hdr.seq), | ||
706 | ntohs(msg->hdr.serviceId), | ||
707 | ntohl(conn->conn_id), | ||
708 | ntohl(call->call_id)); | ||
709 | |||
710 | call->pkt_rcv_count++; | ||
711 | |||
712 | dst = msg->pkt->dst; | ||
713 | if (dst && dst->dev) | ||
714 | conn->peer->if_mtu = | ||
715 | dst->dev->mtu - dst->dev->hard_header_len; | ||
716 | |||
717 | /* queue on the call in seq order */ | ||
718 | rxrpc_get_message(msg); | ||
719 | seq = msg->seq; | ||
720 | |||
721 | spin_lock(&call->lock); | ||
722 | list_for_each(_p, &call->rcv_receiveq) { | ||
723 | pmsg = list_entry(_p, struct rxrpc_message, link); | ||
724 | if (pmsg->seq > seq) | ||
725 | break; | ||
726 | } | ||
727 | list_add_tail(&msg->link, _p); | ||
728 | |||
729 | /* reset the activity timeout */ | ||
730 | call->flags |= RXRPC_CALL_RCV_PKT; | ||
731 | mod_timer(&call->rcv_timeout,jiffies + rxrpc_call_rcv_timeout * HZ); | ||
732 | |||
733 | spin_unlock(&call->lock); | ||
734 | |||
735 | rxrpc_krxiod_queue_call(call); | ||
736 | |||
737 | rxrpc_put_call(call); | ||
738 | out: | ||
739 | _leave(" = %d", ret); | ||
740 | return ret; | ||
741 | } /* end rxrpc_conn_receive_call_packet() */ | ||
742 | |||
743 | /*****************************************************************************/ | ||
744 | /* | ||
745 | * handle an ICMP error being applied to a connection | ||
746 | */ | ||
747 | void rxrpc_conn_handle_error(struct rxrpc_connection *conn, | ||
748 | int local, int errno) | ||
749 | { | ||
750 | struct rxrpc_call *calls[4]; | ||
751 | int loop; | ||
752 | |||
753 | _enter("%p{%d},%d", conn, ntohs(conn->addr.sin_port), errno); | ||
754 | |||
755 | /* get a ref to all my calls in one go */ | ||
756 | memset(calls, 0, sizeof(calls)); | ||
757 | spin_lock(&conn->lock); | ||
758 | |||
759 | for (loop = 3; loop >= 0; loop--) { | ||
760 | if (conn->channels[loop]) { | ||
761 | calls[loop] = conn->channels[loop]; | ||
762 | rxrpc_get_call(calls[loop]); | ||
763 | } | ||
764 | } | ||
765 | |||
766 | spin_unlock(&conn->lock); | ||
767 | |||
768 | /* now kick them all */ | ||
769 | for (loop = 3; loop >= 0; loop--) { | ||
770 | if (calls[loop]) { | ||
771 | rxrpc_call_handle_error(calls[loop], local, errno); | ||
772 | rxrpc_put_call(calls[loop]); | ||
773 | } | ||
774 | } | ||
775 | |||
776 | _leave(""); | ||
777 | } /* end rxrpc_conn_handle_error() */ | ||
diff --git a/net/rxrpc/internal.h b/net/rxrpc/internal.h deleted file mode 100644 index cc0c5795a103..000000000000 --- a/net/rxrpc/internal.h +++ /dev/null | |||
@@ -1,106 +0,0 @@ | |||
1 | /* internal.h: internal Rx RPC stuff | ||
2 | * | ||
3 | * Copyright (c) 2002 David Howells (dhowells@redhat.com). | ||
4 | */ | ||
5 | |||
6 | #ifndef RXRPC_INTERNAL_H | ||
7 | #define RXRPC_INTERNAL_H | ||
8 | |||
9 | #include <linux/compiler.h> | ||
10 | #include <linux/kernel.h> | ||
11 | |||
12 | /* | ||
13 | * debug accounting | ||
14 | */ | ||
15 | #if 1 | ||
16 | #define __RXACCT_DECL(X) X | ||
17 | #define __RXACCT(X) do { X; } while(0) | ||
18 | #else | ||
19 | #define __RXACCT_DECL(X) | ||
20 | #define __RXACCT(X) do { } while(0) | ||
21 | #endif | ||
22 | |||
23 | __RXACCT_DECL(extern atomic_t rxrpc_transport_count); | ||
24 | __RXACCT_DECL(extern atomic_t rxrpc_peer_count); | ||
25 | __RXACCT_DECL(extern atomic_t rxrpc_connection_count); | ||
26 | __RXACCT_DECL(extern atomic_t rxrpc_call_count); | ||
27 | __RXACCT_DECL(extern atomic_t rxrpc_message_count); | ||
28 | |||
29 | /* | ||
30 | * debug tracing | ||
31 | */ | ||
32 | #define kenter(FMT, a...) printk("==> %s("FMT")\n",__FUNCTION__ , ##a) | ||
33 | #define kleave(FMT, a...) printk("<== %s()"FMT"\n",__FUNCTION__ , ##a) | ||
34 | #define kdebug(FMT, a...) printk(" "FMT"\n" , ##a) | ||
35 | #define kproto(FMT, a...) printk("### "FMT"\n" , ##a) | ||
36 | #define knet(FMT, a...) printk(" "FMT"\n" , ##a) | ||
37 | |||
38 | #if 0 | ||
39 | #define _enter(FMT, a...) kenter(FMT , ##a) | ||
40 | #define _leave(FMT, a...) kleave(FMT , ##a) | ||
41 | #define _debug(FMT, a...) kdebug(FMT , ##a) | ||
42 | #define _proto(FMT, a...) kproto(FMT , ##a) | ||
43 | #define _net(FMT, a...) knet(FMT , ##a) | ||
44 | #else | ||
45 | #define _enter(FMT, a...) do { if (rxrpc_ktrace) kenter(FMT , ##a); } while(0) | ||
46 | #define _leave(FMT, a...) do { if (rxrpc_ktrace) kleave(FMT , ##a); } while(0) | ||
47 | #define _debug(FMT, a...) do { if (rxrpc_kdebug) kdebug(FMT , ##a); } while(0) | ||
48 | #define _proto(FMT, a...) do { if (rxrpc_kproto) kproto(FMT , ##a); } while(0) | ||
49 | #define _net(FMT, a...) do { if (rxrpc_knet) knet (FMT , ##a); } while(0) | ||
50 | #endif | ||
51 | |||
52 | static inline void rxrpc_discard_my_signals(void) | ||
53 | { | ||
54 | while (signal_pending(current)) { | ||
55 | siginfo_t sinfo; | ||
56 | |||
57 | spin_lock_irq(¤t->sighand->siglock); | ||
58 | dequeue_signal(current, ¤t->blocked, &sinfo); | ||
59 | spin_unlock_irq(¤t->sighand->siglock); | ||
60 | } | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * call.c | ||
65 | */ | ||
66 | extern struct list_head rxrpc_calls; | ||
67 | extern struct rw_semaphore rxrpc_calls_sem; | ||
68 | |||
69 | /* | ||
70 | * connection.c | ||
71 | */ | ||
72 | extern struct list_head rxrpc_conns; | ||
73 | extern struct rw_semaphore rxrpc_conns_sem; | ||
74 | extern unsigned long rxrpc_conn_timeout; | ||
75 | |||
76 | extern void rxrpc_conn_clearall(struct rxrpc_peer *peer); | ||
77 | |||
78 | /* | ||
79 | * peer.c | ||
80 | */ | ||
81 | extern struct list_head rxrpc_peers; | ||
82 | extern struct rw_semaphore rxrpc_peers_sem; | ||
83 | extern unsigned long rxrpc_peer_timeout; | ||
84 | |||
85 | extern void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer, | ||
86 | struct rxrpc_message *msg, | ||
87 | struct rxrpc_message *resp); | ||
88 | |||
89 | extern void rxrpc_peer_clearall(struct rxrpc_transport *trans); | ||
90 | |||
91 | |||
92 | /* | ||
93 | * proc.c | ||
94 | */ | ||
95 | #ifdef CONFIG_PROC_FS | ||
96 | extern int rxrpc_proc_init(void); | ||
97 | extern void rxrpc_proc_cleanup(void); | ||
98 | #endif | ||
99 | |||
100 | /* | ||
101 | * transport.c | ||
102 | */ | ||
103 | extern struct list_head rxrpc_proc_transports; | ||
104 | extern struct rw_semaphore rxrpc_proc_transports_sem; | ||
105 | |||
106 | #endif /* RXRPC_INTERNAL_H */ | ||
diff --git a/net/rxrpc/krxiod.c b/net/rxrpc/krxiod.c deleted file mode 100644 index bbbcd6c24048..000000000000 --- a/net/rxrpc/krxiod.c +++ /dev/null | |||
@@ -1,262 +0,0 @@ | |||
1 | /* krxiod.c: Rx I/O daemon | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/completion.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/freezer.h> | ||
17 | #include <rxrpc/krxiod.h> | ||
18 | #include <rxrpc/transport.h> | ||
19 | #include <rxrpc/peer.h> | ||
20 | #include <rxrpc/call.h> | ||
21 | #include "internal.h" | ||
22 | |||
23 | static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxiod_sleepq); | ||
24 | static DECLARE_COMPLETION(rxrpc_krxiod_dead); | ||
25 | |||
26 | static atomic_t rxrpc_krxiod_qcount = ATOMIC_INIT(0); | ||
27 | |||
28 | static LIST_HEAD(rxrpc_krxiod_transportq); | ||
29 | static DEFINE_SPINLOCK(rxrpc_krxiod_transportq_lock); | ||
30 | |||
31 | static LIST_HEAD(rxrpc_krxiod_callq); | ||
32 | static DEFINE_SPINLOCK(rxrpc_krxiod_callq_lock); | ||
33 | |||
34 | static volatile int rxrpc_krxiod_die; | ||
35 | |||
36 | /*****************************************************************************/ | ||
37 | /* | ||
38 | * Rx I/O daemon | ||
39 | */ | ||
40 | static int rxrpc_krxiod(void *arg) | ||
41 | { | ||
42 | DECLARE_WAITQUEUE(krxiod,current); | ||
43 | |||
44 | printk("Started krxiod %d\n",current->pid); | ||
45 | |||
46 | daemonize("krxiod"); | ||
47 | |||
48 | /* loop around waiting for work to do */ | ||
49 | do { | ||
50 | /* wait for work or to be told to exit */ | ||
51 | _debug("### Begin Wait"); | ||
52 | if (!atomic_read(&rxrpc_krxiod_qcount)) { | ||
53 | set_current_state(TASK_INTERRUPTIBLE); | ||
54 | |||
55 | add_wait_queue(&rxrpc_krxiod_sleepq, &krxiod); | ||
56 | |||
57 | for (;;) { | ||
58 | set_current_state(TASK_INTERRUPTIBLE); | ||
59 | if (atomic_read(&rxrpc_krxiod_qcount) || | ||
60 | rxrpc_krxiod_die || | ||
61 | signal_pending(current)) | ||
62 | break; | ||
63 | |||
64 | schedule(); | ||
65 | } | ||
66 | |||
67 | remove_wait_queue(&rxrpc_krxiod_sleepq, &krxiod); | ||
68 | set_current_state(TASK_RUNNING); | ||
69 | } | ||
70 | _debug("### End Wait"); | ||
71 | |||
72 | /* do work if been given some to do */ | ||
73 | _debug("### Begin Work"); | ||
74 | |||
75 | /* see if there's a transport in need of attention */ | ||
76 | if (!list_empty(&rxrpc_krxiod_transportq)) { | ||
77 | struct rxrpc_transport *trans = NULL; | ||
78 | |||
79 | spin_lock_irq(&rxrpc_krxiod_transportq_lock); | ||
80 | |||
81 | if (!list_empty(&rxrpc_krxiod_transportq)) { | ||
82 | trans = list_entry( | ||
83 | rxrpc_krxiod_transportq.next, | ||
84 | struct rxrpc_transport, | ||
85 | krxiodq_link); | ||
86 | |||
87 | list_del_init(&trans->krxiodq_link); | ||
88 | atomic_dec(&rxrpc_krxiod_qcount); | ||
89 | |||
90 | /* make sure it hasn't gone away and doesn't go | ||
91 | * away */ | ||
92 | if (atomic_read(&trans->usage)>0) | ||
93 | rxrpc_get_transport(trans); | ||
94 | else | ||
95 | trans = NULL; | ||
96 | } | ||
97 | |||
98 | spin_unlock_irq(&rxrpc_krxiod_transportq_lock); | ||
99 | |||
100 | if (trans) { | ||
101 | rxrpc_trans_receive_packet(trans); | ||
102 | rxrpc_put_transport(trans); | ||
103 | } | ||
104 | } | ||
105 | |||
106 | /* see if there's a call in need of attention */ | ||
107 | if (!list_empty(&rxrpc_krxiod_callq)) { | ||
108 | struct rxrpc_call *call = NULL; | ||
109 | |||
110 | spin_lock_irq(&rxrpc_krxiod_callq_lock); | ||
111 | |||
112 | if (!list_empty(&rxrpc_krxiod_callq)) { | ||
113 | call = list_entry(rxrpc_krxiod_callq.next, | ||
114 | struct rxrpc_call, | ||
115 | rcv_krxiodq_lk); | ||
116 | list_del_init(&call->rcv_krxiodq_lk); | ||
117 | atomic_dec(&rxrpc_krxiod_qcount); | ||
118 | |||
119 | /* make sure it hasn't gone away and doesn't go | ||
120 | * away */ | ||
121 | if (atomic_read(&call->usage) > 0) { | ||
122 | _debug("@@@ KRXIOD" | ||
123 | " Begin Attend Call %p", call); | ||
124 | rxrpc_get_call(call); | ||
125 | } | ||
126 | else { | ||
127 | call = NULL; | ||
128 | } | ||
129 | } | ||
130 | |||
131 | spin_unlock_irq(&rxrpc_krxiod_callq_lock); | ||
132 | |||
133 | if (call) { | ||
134 | rxrpc_call_do_stuff(call); | ||
135 | rxrpc_put_call(call); | ||
136 | _debug("@@@ KRXIOD End Attend Call %p", call); | ||
137 | } | ||
138 | } | ||
139 | |||
140 | _debug("### End Work"); | ||
141 | |||
142 | try_to_freeze(); | ||
143 | |||
144 | /* discard pending signals */ | ||
145 | rxrpc_discard_my_signals(); | ||
146 | |||
147 | } while (!rxrpc_krxiod_die); | ||
148 | |||
149 | /* and that's all */ | ||
150 | complete_and_exit(&rxrpc_krxiod_dead, 0); | ||
151 | |||
152 | } /* end rxrpc_krxiod() */ | ||
153 | |||
154 | /*****************************************************************************/ | ||
155 | /* | ||
156 | * start up a krxiod daemon | ||
157 | */ | ||
158 | int __init rxrpc_krxiod_init(void) | ||
159 | { | ||
160 | return kernel_thread(rxrpc_krxiod, NULL, 0); | ||
161 | |||
162 | } /* end rxrpc_krxiod_init() */ | ||
163 | |||
164 | /*****************************************************************************/ | ||
165 | /* | ||
166 | * kill the krxiod daemon and wait for it to complete | ||
167 | */ | ||
168 | void rxrpc_krxiod_kill(void) | ||
169 | { | ||
170 | rxrpc_krxiod_die = 1; | ||
171 | wake_up_all(&rxrpc_krxiod_sleepq); | ||
172 | wait_for_completion(&rxrpc_krxiod_dead); | ||
173 | |||
174 | } /* end rxrpc_krxiod_kill() */ | ||
175 | |||
176 | /*****************************************************************************/ | ||
177 | /* | ||
178 | * queue a transport for attention by krxiod | ||
179 | */ | ||
180 | void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans) | ||
181 | { | ||
182 | unsigned long flags; | ||
183 | |||
184 | _enter(""); | ||
185 | |||
186 | if (list_empty(&trans->krxiodq_link)) { | ||
187 | spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags); | ||
188 | |||
189 | if (list_empty(&trans->krxiodq_link)) { | ||
190 | if (atomic_read(&trans->usage) > 0) { | ||
191 | list_add_tail(&trans->krxiodq_link, | ||
192 | &rxrpc_krxiod_transportq); | ||
193 | atomic_inc(&rxrpc_krxiod_qcount); | ||
194 | } | ||
195 | } | ||
196 | |||
197 | spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags); | ||
198 | wake_up_all(&rxrpc_krxiod_sleepq); | ||
199 | } | ||
200 | |||
201 | _leave(""); | ||
202 | |||
203 | } /* end rxrpc_krxiod_queue_transport() */ | ||
204 | |||
205 | /*****************************************************************************/ | ||
206 | /* | ||
207 | * dequeue a transport from krxiod's attention queue | ||
208 | */ | ||
209 | void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans) | ||
210 | { | ||
211 | unsigned long flags; | ||
212 | |||
213 | _enter(""); | ||
214 | |||
215 | spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags); | ||
216 | if (!list_empty(&trans->krxiodq_link)) { | ||
217 | list_del_init(&trans->krxiodq_link); | ||
218 | atomic_dec(&rxrpc_krxiod_qcount); | ||
219 | } | ||
220 | spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags); | ||
221 | |||
222 | _leave(""); | ||
223 | |||
224 | } /* end rxrpc_krxiod_dequeue_transport() */ | ||
225 | |||
226 | /*****************************************************************************/ | ||
227 | /* | ||
228 | * queue a call for attention by krxiod | ||
229 | */ | ||
230 | void rxrpc_krxiod_queue_call(struct rxrpc_call *call) | ||
231 | { | ||
232 | unsigned long flags; | ||
233 | |||
234 | if (list_empty(&call->rcv_krxiodq_lk)) { | ||
235 | spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags); | ||
236 | if (atomic_read(&call->usage) > 0) { | ||
237 | list_add_tail(&call->rcv_krxiodq_lk, | ||
238 | &rxrpc_krxiod_callq); | ||
239 | atomic_inc(&rxrpc_krxiod_qcount); | ||
240 | } | ||
241 | spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags); | ||
242 | } | ||
243 | wake_up_all(&rxrpc_krxiod_sleepq); | ||
244 | |||
245 | } /* end rxrpc_krxiod_queue_call() */ | ||
246 | |||
247 | /*****************************************************************************/ | ||
248 | /* | ||
249 | * dequeue a call from krxiod's attention queue | ||
250 | */ | ||
251 | void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call) | ||
252 | { | ||
253 | unsigned long flags; | ||
254 | |||
255 | spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags); | ||
256 | if (!list_empty(&call->rcv_krxiodq_lk)) { | ||
257 | list_del_init(&call->rcv_krxiodq_lk); | ||
258 | atomic_dec(&rxrpc_krxiod_qcount); | ||
259 | } | ||
260 | spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags); | ||
261 | |||
262 | } /* end rxrpc_krxiod_dequeue_call() */ | ||
diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c deleted file mode 100644 index 9a1e7f5e034c..000000000000 --- a/net/rxrpc/krxsecd.c +++ /dev/null | |||
@@ -1,270 +0,0 @@ | |||
1 | /* krxsecd.c: Rx security daemon | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * This daemon deals with: | ||
12 | * - consulting the application as to whether inbound peers and calls should be authorised | ||
13 | * - generating security challenges for inbound connections | ||
14 | * - responding to security challenges on outbound connections | ||
15 | */ | ||
16 | |||
17 | #include <linux/module.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/completion.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <rxrpc/krxsecd.h> | ||
23 | #include <rxrpc/transport.h> | ||
24 | #include <rxrpc/connection.h> | ||
25 | #include <rxrpc/message.h> | ||
26 | #include <rxrpc/peer.h> | ||
27 | #include <rxrpc/call.h> | ||
28 | #include <linux/udp.h> | ||
29 | #include <linux/ip.h> | ||
30 | #include <linux/freezer.h> | ||
31 | #include <net/sock.h> | ||
32 | #include "internal.h" | ||
33 | |||
34 | static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxsecd_sleepq); | ||
35 | static DECLARE_COMPLETION(rxrpc_krxsecd_dead); | ||
36 | static volatile int rxrpc_krxsecd_die; | ||
37 | |||
38 | static atomic_t rxrpc_krxsecd_qcount; | ||
39 | |||
40 | /* queue of unprocessed inbound messages with seqno #1 and | ||
41 | * RXRPC_CLIENT_INITIATED flag set */ | ||
42 | static LIST_HEAD(rxrpc_krxsecd_initmsgq); | ||
43 | static DEFINE_SPINLOCK(rxrpc_krxsecd_initmsgq_lock); | ||
44 | |||
45 | static void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg); | ||
46 | |||
47 | /*****************************************************************************/ | ||
48 | /* | ||
49 | * Rx security daemon | ||
50 | */ | ||
51 | static int rxrpc_krxsecd(void *arg) | ||
52 | { | ||
53 | DECLARE_WAITQUEUE(krxsecd, current); | ||
54 | |||
55 | int die; | ||
56 | |||
57 | printk("Started krxsecd %d\n", current->pid); | ||
58 | |||
59 | daemonize("krxsecd"); | ||
60 | |||
61 | /* loop around waiting for work to do */ | ||
62 | do { | ||
63 | /* wait for work or to be told to exit */ | ||
64 | _debug("### Begin Wait"); | ||
65 | if (!atomic_read(&rxrpc_krxsecd_qcount)) { | ||
66 | set_current_state(TASK_INTERRUPTIBLE); | ||
67 | |||
68 | add_wait_queue(&rxrpc_krxsecd_sleepq, &krxsecd); | ||
69 | |||
70 | for (;;) { | ||
71 | set_current_state(TASK_INTERRUPTIBLE); | ||
72 | if (atomic_read(&rxrpc_krxsecd_qcount) || | ||
73 | rxrpc_krxsecd_die || | ||
74 | signal_pending(current)) | ||
75 | break; | ||
76 | |||
77 | schedule(); | ||
78 | } | ||
79 | |||
80 | remove_wait_queue(&rxrpc_krxsecd_sleepq, &krxsecd); | ||
81 | set_current_state(TASK_RUNNING); | ||
82 | } | ||
83 | die = rxrpc_krxsecd_die; | ||
84 | _debug("### End Wait"); | ||
85 | |||
86 | /* see if there're incoming calls in need of authenticating */ | ||
87 | _debug("### Begin Inbound Calls"); | ||
88 | |||
89 | if (!list_empty(&rxrpc_krxsecd_initmsgq)) { | ||
90 | struct rxrpc_message *msg = NULL; | ||
91 | |||
92 | spin_lock(&rxrpc_krxsecd_initmsgq_lock); | ||
93 | |||
94 | if (!list_empty(&rxrpc_krxsecd_initmsgq)) { | ||
95 | msg = list_entry(rxrpc_krxsecd_initmsgq.next, | ||
96 | struct rxrpc_message, link); | ||
97 | list_del_init(&msg->link); | ||
98 | atomic_dec(&rxrpc_krxsecd_qcount); | ||
99 | } | ||
100 | |||
101 | spin_unlock(&rxrpc_krxsecd_initmsgq_lock); | ||
102 | |||
103 | if (msg) { | ||
104 | rxrpc_krxsecd_process_incoming_call(msg); | ||
105 | rxrpc_put_message(msg); | ||
106 | } | ||
107 | } | ||
108 | |||
109 | _debug("### End Inbound Calls"); | ||
110 | |||
111 | try_to_freeze(); | ||
112 | |||
113 | /* discard pending signals */ | ||
114 | rxrpc_discard_my_signals(); | ||
115 | |||
116 | } while (!die); | ||
117 | |||
118 | /* and that's all */ | ||
119 | complete_and_exit(&rxrpc_krxsecd_dead, 0); | ||
120 | |||
121 | } /* end rxrpc_krxsecd() */ | ||
122 | |||
123 | /*****************************************************************************/ | ||
124 | /* | ||
125 | * start up a krxsecd daemon | ||
126 | */ | ||
127 | int __init rxrpc_krxsecd_init(void) | ||
128 | { | ||
129 | return kernel_thread(rxrpc_krxsecd, NULL, 0); | ||
130 | |||
131 | } /* end rxrpc_krxsecd_init() */ | ||
132 | |||
133 | /*****************************************************************************/ | ||
134 | /* | ||
135 | * kill the krxsecd daemon and wait for it to complete | ||
136 | */ | ||
137 | void rxrpc_krxsecd_kill(void) | ||
138 | { | ||
139 | rxrpc_krxsecd_die = 1; | ||
140 | wake_up_all(&rxrpc_krxsecd_sleepq); | ||
141 | wait_for_completion(&rxrpc_krxsecd_dead); | ||
142 | |||
143 | } /* end rxrpc_krxsecd_kill() */ | ||
144 | |||
145 | /*****************************************************************************/ | ||
146 | /* | ||
147 | * clear all pending incoming calls for the specified transport | ||
148 | */ | ||
149 | void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans) | ||
150 | { | ||
151 | LIST_HEAD(tmp); | ||
152 | |||
153 | struct rxrpc_message *msg; | ||
154 | struct list_head *_p, *_n; | ||
155 | |||
156 | _enter("%p",trans); | ||
157 | |||
158 | /* move all the messages for this transport onto a temp list */ | ||
159 | spin_lock(&rxrpc_krxsecd_initmsgq_lock); | ||
160 | |||
161 | list_for_each_safe(_p, _n, &rxrpc_krxsecd_initmsgq) { | ||
162 | msg = list_entry(_p, struct rxrpc_message, link); | ||
163 | if (msg->trans == trans) { | ||
164 | list_move_tail(&msg->link, &tmp); | ||
165 | atomic_dec(&rxrpc_krxsecd_qcount); | ||
166 | } | ||
167 | } | ||
168 | |||
169 | spin_unlock(&rxrpc_krxsecd_initmsgq_lock); | ||
170 | |||
171 | /* zap all messages on the temp list */ | ||
172 | while (!list_empty(&tmp)) { | ||
173 | msg = list_entry(tmp.next, struct rxrpc_message, link); | ||
174 | list_del_init(&msg->link); | ||
175 | rxrpc_put_message(msg); | ||
176 | } | ||
177 | |||
178 | _leave(""); | ||
179 | } /* end rxrpc_krxsecd_clear_transport() */ | ||
180 | |||
181 | /*****************************************************************************/ | ||
182 | /* | ||
183 | * queue a message on the incoming calls list | ||
184 | */ | ||
185 | void rxrpc_krxsecd_queue_incoming_call(struct rxrpc_message *msg) | ||
186 | { | ||
187 | _enter("%p", msg); | ||
188 | |||
189 | /* queue for processing by krxsecd */ | ||
190 | spin_lock(&rxrpc_krxsecd_initmsgq_lock); | ||
191 | |||
192 | if (!rxrpc_krxsecd_die) { | ||
193 | rxrpc_get_message(msg); | ||
194 | list_add_tail(&msg->link, &rxrpc_krxsecd_initmsgq); | ||
195 | atomic_inc(&rxrpc_krxsecd_qcount); | ||
196 | } | ||
197 | |||
198 | spin_unlock(&rxrpc_krxsecd_initmsgq_lock); | ||
199 | |||
200 | wake_up(&rxrpc_krxsecd_sleepq); | ||
201 | |||
202 | _leave(""); | ||
203 | } /* end rxrpc_krxsecd_queue_incoming_call() */ | ||
204 | |||
205 | /*****************************************************************************/ | ||
206 | /* | ||
207 | * process the initial message of an incoming call | ||
208 | */ | ||
209 | void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg) | ||
210 | { | ||
211 | struct rxrpc_transport *trans = msg->trans; | ||
212 | struct rxrpc_service *srv; | ||
213 | struct rxrpc_call *call; | ||
214 | struct list_head *_p; | ||
215 | unsigned short sid; | ||
216 | int ret; | ||
217 | |||
218 | _enter("%p{tr=%p}", msg, trans); | ||
219 | |||
220 | ret = rxrpc_incoming_call(msg->conn, msg, &call); | ||
221 | if (ret < 0) | ||
222 | goto out; | ||
223 | |||
224 | /* find the matching service on the transport */ | ||
225 | sid = ntohs(msg->hdr.serviceId); | ||
226 | srv = NULL; | ||
227 | |||
228 | spin_lock(&trans->lock); | ||
229 | list_for_each(_p, &trans->services) { | ||
230 | srv = list_entry(_p, struct rxrpc_service, link); | ||
231 | if (srv->service_id == sid && try_module_get(srv->owner)) { | ||
232 | /* found a match (made sure it won't vanish) */ | ||
233 | _debug("found service '%s'", srv->name); | ||
234 | call->owner = srv->owner; | ||
235 | break; | ||
236 | } | ||
237 | } | ||
238 | spin_unlock(&trans->lock); | ||
239 | |||
240 | /* report the new connection | ||
241 | * - the func must inc the call's usage count to keep it | ||
242 | */ | ||
243 | ret = -ENOENT; | ||
244 | if (_p != &trans->services) { | ||
245 | /* attempt to accept the call */ | ||
246 | call->conn->service = srv; | ||
247 | call->app_attn_func = srv->attn_func; | ||
248 | call->app_error_func = srv->error_func; | ||
249 | call->app_aemap_func = srv->aemap_func; | ||
250 | |||
251 | ret = srv->new_call(call); | ||
252 | |||
253 | /* send an abort if an error occurred */ | ||
254 | if (ret < 0) { | ||
255 | rxrpc_call_abort(call, ret); | ||
256 | } | ||
257 | else { | ||
258 | /* formally receive and ACK the new packet */ | ||
259 | ret = rxrpc_conn_receive_call_packet(call->conn, | ||
260 | call, msg); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | rxrpc_put_call(call); | ||
265 | out: | ||
266 | if (ret < 0) | ||
267 | rxrpc_trans_immediate_abort(trans, msg, ret); | ||
268 | |||
269 | _leave(" (%d)", ret); | ||
270 | } /* end rxrpc_krxsecd_process_incoming_call() */ | ||
diff --git a/net/rxrpc/krxtimod.c b/net/rxrpc/krxtimod.c deleted file mode 100644 index 9a9b6132dba4..000000000000 --- a/net/rxrpc/krxtimod.c +++ /dev/null | |||
@@ -1,204 +0,0 @@ | |||
1 | /* krxtimod.c: RXRPC timeout daemon | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/completion.h> | ||
16 | #include <linux/freezer.h> | ||
17 | #include <rxrpc/rxrpc.h> | ||
18 | #include <rxrpc/krxtimod.h> | ||
19 | #include <asm/errno.h> | ||
20 | #include "internal.h" | ||
21 | |||
22 | static DECLARE_COMPLETION(krxtimod_alive); | ||
23 | static DECLARE_COMPLETION(krxtimod_dead); | ||
24 | static DECLARE_WAIT_QUEUE_HEAD(krxtimod_sleepq); | ||
25 | static int krxtimod_die; | ||
26 | |||
27 | static LIST_HEAD(krxtimod_list); | ||
28 | static DEFINE_SPINLOCK(krxtimod_lock); | ||
29 | |||
30 | static int krxtimod(void *arg); | ||
31 | |||
32 | /*****************************************************************************/ | ||
33 | /* | ||
34 | * start the timeout daemon | ||
35 | */ | ||
36 | int rxrpc_krxtimod_start(void) | ||
37 | { | ||
38 | int ret; | ||
39 | |||
40 | ret = kernel_thread(krxtimod, NULL, 0); | ||
41 | if (ret < 0) | ||
42 | return ret; | ||
43 | |||
44 | wait_for_completion(&krxtimod_alive); | ||
45 | |||
46 | return ret; | ||
47 | } /* end rxrpc_krxtimod_start() */ | ||
48 | |||
49 | /*****************************************************************************/ | ||
50 | /* | ||
51 | * stop the timeout daemon | ||
52 | */ | ||
53 | void rxrpc_krxtimod_kill(void) | ||
54 | { | ||
55 | /* get rid of my daemon */ | ||
56 | krxtimod_die = 1; | ||
57 | wake_up(&krxtimod_sleepq); | ||
58 | wait_for_completion(&krxtimod_dead); | ||
59 | |||
60 | } /* end rxrpc_krxtimod_kill() */ | ||
61 | |||
62 | /*****************************************************************************/ | ||
63 | /* | ||
64 | * timeout processing daemon | ||
65 | */ | ||
66 | static int krxtimod(void *arg) | ||
67 | { | ||
68 | DECLARE_WAITQUEUE(myself, current); | ||
69 | |||
70 | rxrpc_timer_t *timer; | ||
71 | |||
72 | printk("Started krxtimod %d\n", current->pid); | ||
73 | |||
74 | daemonize("krxtimod"); | ||
75 | |||
76 | complete(&krxtimod_alive); | ||
77 | |||
78 | /* loop around looking for things to attend to */ | ||
79 | loop: | ||
80 | set_current_state(TASK_INTERRUPTIBLE); | ||
81 | add_wait_queue(&krxtimod_sleepq, &myself); | ||
82 | |||
83 | for (;;) { | ||
84 | unsigned long jif; | ||
85 | long timeout; | ||
86 | |||
87 | /* deal with the server being asked to die */ | ||
88 | if (krxtimod_die) { | ||
89 | remove_wait_queue(&krxtimod_sleepq, &myself); | ||
90 | _leave(""); | ||
91 | complete_and_exit(&krxtimod_dead, 0); | ||
92 | } | ||
93 | |||
94 | try_to_freeze(); | ||
95 | |||
96 | /* discard pending signals */ | ||
97 | rxrpc_discard_my_signals(); | ||
98 | |||
99 | /* work out the time to elapse before the next event */ | ||
100 | spin_lock(&krxtimod_lock); | ||
101 | if (list_empty(&krxtimod_list)) { | ||
102 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
103 | } | ||
104 | else { | ||
105 | timer = list_entry(krxtimod_list.next, | ||
106 | rxrpc_timer_t, link); | ||
107 | timeout = timer->timo_jif; | ||
108 | jif = jiffies; | ||
109 | |||
110 | if (time_before_eq((unsigned long) timeout, jif)) | ||
111 | goto immediate; | ||
112 | |||
113 | else { | ||
114 | timeout = (long) timeout - (long) jiffies; | ||
115 | } | ||
116 | } | ||
117 | spin_unlock(&krxtimod_lock); | ||
118 | |||
119 | schedule_timeout(timeout); | ||
120 | |||
121 | set_current_state(TASK_INTERRUPTIBLE); | ||
122 | } | ||
123 | |||
124 | /* the thing on the front of the queue needs processing | ||
125 | * - we come here with the lock held and timer pointing to the expired | ||
126 | * entry | ||
127 | */ | ||
128 | immediate: | ||
129 | remove_wait_queue(&krxtimod_sleepq, &myself); | ||
130 | set_current_state(TASK_RUNNING); | ||
131 | |||
132 | _debug("@@@ Begin Timeout of %p", timer); | ||
133 | |||
134 | /* dequeue the timer */ | ||
135 | list_del_init(&timer->link); | ||
136 | spin_unlock(&krxtimod_lock); | ||
137 | |||
138 | /* call the timeout function */ | ||
139 | timer->ops->timed_out(timer); | ||
140 | |||
141 | _debug("@@@ End Timeout"); | ||
142 | goto loop; | ||
143 | |||
144 | } /* end krxtimod() */ | ||
145 | |||
146 | /*****************************************************************************/ | ||
147 | /* | ||
148 | * (re-)queue a timer | ||
149 | */ | ||
150 | void rxrpc_krxtimod_add_timer(rxrpc_timer_t *timer, unsigned long timeout) | ||
151 | { | ||
152 | struct list_head *_p; | ||
153 | rxrpc_timer_t *ptimer; | ||
154 | |||
155 | _enter("%p,%lu", timer, timeout); | ||
156 | |||
157 | spin_lock(&krxtimod_lock); | ||
158 | |||
159 | list_del(&timer->link); | ||
160 | |||
161 | /* the timer was deferred or reset - put it back in the queue at the | ||
162 | * right place */ | ||
163 | timer->timo_jif = jiffies + timeout; | ||
164 | |||
165 | list_for_each(_p, &krxtimod_list) { | ||
166 | ptimer = list_entry(_p, rxrpc_timer_t, link); | ||
167 | if (time_before(timer->timo_jif, ptimer->timo_jif)) | ||
168 | break; | ||
169 | } | ||
170 | |||
171 | list_add_tail(&timer->link, _p); /* insert before stopping point */ | ||
172 | |||
173 | spin_unlock(&krxtimod_lock); | ||
174 | |||
175 | wake_up(&krxtimod_sleepq); | ||
176 | |||
177 | _leave(""); | ||
178 | } /* end rxrpc_krxtimod_add_timer() */ | ||
179 | |||
180 | /*****************************************************************************/ | ||
181 | /* | ||
182 | * dequeue a timer | ||
183 | * - returns 0 if the timer was deleted or -ENOENT if it wasn't queued | ||
184 | */ | ||
185 | int rxrpc_krxtimod_del_timer(rxrpc_timer_t *timer) | ||
186 | { | ||
187 | int ret = 0; | ||
188 | |||
189 | _enter("%p", timer); | ||
190 | |||
191 | spin_lock(&krxtimod_lock); | ||
192 | |||
193 | if (list_empty(&timer->link)) | ||
194 | ret = -ENOENT; | ||
195 | else | ||
196 | list_del_init(&timer->link); | ||
197 | |||
198 | spin_unlock(&krxtimod_lock); | ||
199 | |||
200 | wake_up(&krxtimod_sleepq); | ||
201 | |||
202 | _leave(" = %d", ret); | ||
203 | return ret; | ||
204 | } /* end rxrpc_krxtimod_del_timer() */ | ||
diff --git a/net/rxrpc/main.c b/net/rxrpc/main.c deleted file mode 100644 index cead31b5bdf5..000000000000 --- a/net/rxrpc/main.c +++ /dev/null | |||
@@ -1,180 +0,0 @@ | |||
1 | /* main.c: Rx RPC interface | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <rxrpc/rxrpc.h> | ||
16 | #include <rxrpc/krxiod.h> | ||
17 | #include <rxrpc/krxsecd.h> | ||
18 | #include <rxrpc/krxtimod.h> | ||
19 | #include <rxrpc/transport.h> | ||
20 | #include <rxrpc/connection.h> | ||
21 | #include <rxrpc/call.h> | ||
22 | #include <rxrpc/message.h> | ||
23 | #include "internal.h" | ||
24 | |||
25 | MODULE_DESCRIPTION("Rx RPC implementation"); | ||
26 | MODULE_AUTHOR("Red Hat, Inc."); | ||
27 | MODULE_LICENSE("GPL"); | ||
28 | |||
29 | __be32 rxrpc_epoch; | ||
30 | |||
31 | /*****************************************************************************/ | ||
32 | /* | ||
33 | * initialise the Rx module | ||
34 | */ | ||
35 | static int __init rxrpc_initialise(void) | ||
36 | { | ||
37 | int ret; | ||
38 | |||
39 | /* my epoch value */ | ||
40 | rxrpc_epoch = htonl(get_seconds()); | ||
41 | |||
42 | /* register the /proc interface */ | ||
43 | #ifdef CONFIG_PROC_FS | ||
44 | ret = rxrpc_proc_init(); | ||
45 | if (ret<0) | ||
46 | return ret; | ||
47 | #endif | ||
48 | |||
49 | /* register the sysctl files */ | ||
50 | #ifdef CONFIG_SYSCTL | ||
51 | ret = rxrpc_sysctl_init(); | ||
52 | if (ret<0) | ||
53 | goto error_proc; | ||
54 | #endif | ||
55 | |||
56 | /* start the krxtimod daemon */ | ||
57 | ret = rxrpc_krxtimod_start(); | ||
58 | if (ret<0) | ||
59 | goto error_sysctl; | ||
60 | |||
61 | /* start the krxiod daemon */ | ||
62 | ret = rxrpc_krxiod_init(); | ||
63 | if (ret<0) | ||
64 | goto error_krxtimod; | ||
65 | |||
66 | /* start the krxsecd daemon */ | ||
67 | ret = rxrpc_krxsecd_init(); | ||
68 | if (ret<0) | ||
69 | goto error_krxiod; | ||
70 | |||
71 | kdebug("\n\n"); | ||
72 | |||
73 | return 0; | ||
74 | |||
75 | error_krxiod: | ||
76 | rxrpc_krxiod_kill(); | ||
77 | error_krxtimod: | ||
78 | rxrpc_krxtimod_kill(); | ||
79 | error_sysctl: | ||
80 | #ifdef CONFIG_SYSCTL | ||
81 | rxrpc_sysctl_cleanup(); | ||
82 | error_proc: | ||
83 | #endif | ||
84 | #ifdef CONFIG_PROC_FS | ||
85 | rxrpc_proc_cleanup(); | ||
86 | #endif | ||
87 | return ret; | ||
88 | } /* end rxrpc_initialise() */ | ||
89 | |||
90 | module_init(rxrpc_initialise); | ||
91 | |||
92 | /*****************************************************************************/ | ||
93 | /* | ||
94 | * clean up the Rx module | ||
95 | */ | ||
96 | static void __exit rxrpc_cleanup(void) | ||
97 | { | ||
98 | kenter(""); | ||
99 | |||
100 | __RXACCT(printk("Outstanding Messages : %d\n", | ||
101 | atomic_read(&rxrpc_message_count))); | ||
102 | __RXACCT(printk("Outstanding Calls : %d\n", | ||
103 | atomic_read(&rxrpc_call_count))); | ||
104 | __RXACCT(printk("Outstanding Connections: %d\n", | ||
105 | atomic_read(&rxrpc_connection_count))); | ||
106 | __RXACCT(printk("Outstanding Peers : %d\n", | ||
107 | atomic_read(&rxrpc_peer_count))); | ||
108 | __RXACCT(printk("Outstanding Transports : %d\n", | ||
109 | atomic_read(&rxrpc_transport_count))); | ||
110 | |||
111 | rxrpc_krxsecd_kill(); | ||
112 | rxrpc_krxiod_kill(); | ||
113 | rxrpc_krxtimod_kill(); | ||
114 | #ifdef CONFIG_SYSCTL | ||
115 | rxrpc_sysctl_cleanup(); | ||
116 | #endif | ||
117 | #ifdef CONFIG_PROC_FS | ||
118 | rxrpc_proc_cleanup(); | ||
119 | #endif | ||
120 | |||
121 | __RXACCT(printk("Outstanding Messages : %d\n", | ||
122 | atomic_read(&rxrpc_message_count))); | ||
123 | __RXACCT(printk("Outstanding Calls : %d\n", | ||
124 | atomic_read(&rxrpc_call_count))); | ||
125 | __RXACCT(printk("Outstanding Connections: %d\n", | ||
126 | atomic_read(&rxrpc_connection_count))); | ||
127 | __RXACCT(printk("Outstanding Peers : %d\n", | ||
128 | atomic_read(&rxrpc_peer_count))); | ||
129 | __RXACCT(printk("Outstanding Transports : %d\n", | ||
130 | atomic_read(&rxrpc_transport_count))); | ||
131 | |||
132 | kleave(""); | ||
133 | } /* end rxrpc_cleanup() */ | ||
134 | |||
135 | module_exit(rxrpc_cleanup); | ||
136 | |||
137 | /*****************************************************************************/ | ||
138 | /* | ||
139 | * clear the dead space between task_struct and kernel stack | ||
140 | * - called by supplying -finstrument-functions to gcc | ||
141 | */ | ||
142 | #if 0 | ||
143 | void __cyg_profile_func_enter (void *this_fn, void *call_site) | ||
144 | __attribute__((no_instrument_function)); | ||
145 | |||
146 | void __cyg_profile_func_enter (void *this_fn, void *call_site) | ||
147 | { | ||
148 | asm volatile(" movl %%esp,%%edi \n" | ||
149 | " andl %0,%%edi \n" | ||
150 | " addl %1,%%edi \n" | ||
151 | " movl %%esp,%%ecx \n" | ||
152 | " subl %%edi,%%ecx \n" | ||
153 | " shrl $2,%%ecx \n" | ||
154 | " movl $0xedededed,%%eax \n" | ||
155 | " rep stosl \n" | ||
156 | : | ||
157 | : "i"(~(THREAD_SIZE-1)), "i"(sizeof(struct thread_info)) | ||
158 | : "eax", "ecx", "edi", "memory", "cc" | ||
159 | ); | ||
160 | } | ||
161 | |||
162 | void __cyg_profile_func_exit(void *this_fn, void *call_site) | ||
163 | __attribute__((no_instrument_function)); | ||
164 | |||
165 | void __cyg_profile_func_exit(void *this_fn, void *call_site) | ||
166 | { | ||
167 | asm volatile(" movl %%esp,%%edi \n" | ||
168 | " andl %0,%%edi \n" | ||
169 | " addl %1,%%edi \n" | ||
170 | " movl %%esp,%%ecx \n" | ||
171 | " subl %%edi,%%ecx \n" | ||
172 | " shrl $2,%%ecx \n" | ||
173 | " movl $0xdadadada,%%eax \n" | ||
174 | " rep stosl \n" | ||
175 | : | ||
176 | : "i"(~(THREAD_SIZE-1)), "i"(sizeof(struct thread_info)) | ||
177 | : "eax", "ecx", "edi", "memory", "cc" | ||
178 | ); | ||
179 | } | ||
180 | #endif | ||
diff --git a/net/rxrpc/peer.c b/net/rxrpc/peer.c deleted file mode 100644 index 8a275157a3bb..000000000000 --- a/net/rxrpc/peer.c +++ /dev/null | |||
@@ -1,398 +0,0 @@ | |||
1 | /* peer.c: Rx RPC peer management | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <rxrpc/rxrpc.h> | ||
16 | #include <rxrpc/transport.h> | ||
17 | #include <rxrpc/peer.h> | ||
18 | #include <rxrpc/connection.h> | ||
19 | #include <rxrpc/call.h> | ||
20 | #include <rxrpc/message.h> | ||
21 | #include <linux/udp.h> | ||
22 | #include <linux/ip.h> | ||
23 | #include <net/sock.h> | ||
24 | #include <asm/uaccess.h> | ||
25 | #include <asm/div64.h> | ||
26 | #include "internal.h" | ||
27 | |||
28 | __RXACCT_DECL(atomic_t rxrpc_peer_count); | ||
29 | LIST_HEAD(rxrpc_peers); | ||
30 | DECLARE_RWSEM(rxrpc_peers_sem); | ||
31 | unsigned long rxrpc_peer_timeout = 12 * 60 * 60; | ||
32 | |||
33 | static void rxrpc_peer_do_timeout(struct rxrpc_peer *peer); | ||
34 | |||
35 | static void __rxrpc_peer_timeout(rxrpc_timer_t *timer) | ||
36 | { | ||
37 | struct rxrpc_peer *peer = | ||
38 | list_entry(timer, struct rxrpc_peer, timeout); | ||
39 | |||
40 | _debug("Rx PEER TIMEOUT [%p{u=%d}]", peer, atomic_read(&peer->usage)); | ||
41 | |||
42 | rxrpc_peer_do_timeout(peer); | ||
43 | } | ||
44 | |||
45 | static const struct rxrpc_timer_ops rxrpc_peer_timer_ops = { | ||
46 | .timed_out = __rxrpc_peer_timeout, | ||
47 | }; | ||
48 | |||
49 | /*****************************************************************************/ | ||
50 | /* | ||
51 | * create a peer record | ||
52 | */ | ||
53 | static int __rxrpc_create_peer(struct rxrpc_transport *trans, __be32 addr, | ||
54 | struct rxrpc_peer **_peer) | ||
55 | { | ||
56 | struct rxrpc_peer *peer; | ||
57 | |||
58 | _enter("%p,%08x", trans, ntohl(addr)); | ||
59 | |||
60 | /* allocate and initialise a peer record */ | ||
61 | peer = kzalloc(sizeof(struct rxrpc_peer), GFP_KERNEL); | ||
62 | if (!peer) { | ||
63 | _leave(" = -ENOMEM"); | ||
64 | return -ENOMEM; | ||
65 | } | ||
66 | |||
67 | atomic_set(&peer->usage, 1); | ||
68 | |||
69 | INIT_LIST_HEAD(&peer->link); | ||
70 | INIT_LIST_HEAD(&peer->proc_link); | ||
71 | INIT_LIST_HEAD(&peer->conn_idlist); | ||
72 | INIT_LIST_HEAD(&peer->conn_active); | ||
73 | INIT_LIST_HEAD(&peer->conn_graveyard); | ||
74 | spin_lock_init(&peer->conn_gylock); | ||
75 | init_waitqueue_head(&peer->conn_gy_waitq); | ||
76 | rwlock_init(&peer->conn_idlock); | ||
77 | rwlock_init(&peer->conn_lock); | ||
78 | atomic_set(&peer->conn_count, 0); | ||
79 | spin_lock_init(&peer->lock); | ||
80 | rxrpc_timer_init(&peer->timeout, &rxrpc_peer_timer_ops); | ||
81 | |||
82 | peer->addr.s_addr = addr; | ||
83 | |||
84 | peer->trans = trans; | ||
85 | peer->ops = trans->peer_ops; | ||
86 | |||
87 | __RXACCT(atomic_inc(&rxrpc_peer_count)); | ||
88 | *_peer = peer; | ||
89 | _leave(" = 0 (%p)", peer); | ||
90 | |||
91 | return 0; | ||
92 | } /* end __rxrpc_create_peer() */ | ||
93 | |||
94 | /*****************************************************************************/ | ||
95 | /* | ||
96 | * find a peer record on the specified transport | ||
97 | * - returns (if successful) with peer record usage incremented | ||
98 | * - resurrects it from the graveyard if found there | ||
99 | */ | ||
100 | int rxrpc_peer_lookup(struct rxrpc_transport *trans, __be32 addr, | ||
101 | struct rxrpc_peer **_peer) | ||
102 | { | ||
103 | struct rxrpc_peer *peer, *candidate = NULL; | ||
104 | struct list_head *_p; | ||
105 | int ret; | ||
106 | |||
107 | _enter("%p{%hu},%08x", trans, trans->port, ntohl(addr)); | ||
108 | |||
109 | /* [common case] search the transport's active list first */ | ||
110 | read_lock(&trans->peer_lock); | ||
111 | list_for_each(_p, &trans->peer_active) { | ||
112 | peer = list_entry(_p, struct rxrpc_peer, link); | ||
113 | if (peer->addr.s_addr == addr) | ||
114 | goto found_active; | ||
115 | } | ||
116 | read_unlock(&trans->peer_lock); | ||
117 | |||
118 | /* [uncommon case] not active - create a candidate for a new record */ | ||
119 | ret = __rxrpc_create_peer(trans, addr, &candidate); | ||
120 | if (ret < 0) { | ||
121 | _leave(" = %d", ret); | ||
122 | return ret; | ||
123 | } | ||
124 | |||
125 | /* search the active list again, just in case it appeared whilst we | ||
126 | * were busy */ | ||
127 | write_lock(&trans->peer_lock); | ||
128 | list_for_each(_p, &trans->peer_active) { | ||
129 | peer = list_entry(_p, struct rxrpc_peer, link); | ||
130 | if (peer->addr.s_addr == addr) | ||
131 | goto found_active_second_chance; | ||
132 | } | ||
133 | |||
134 | /* search the transport's graveyard list */ | ||
135 | spin_lock(&trans->peer_gylock); | ||
136 | list_for_each(_p, &trans->peer_graveyard) { | ||
137 | peer = list_entry(_p, struct rxrpc_peer, link); | ||
138 | if (peer->addr.s_addr == addr) | ||
139 | goto found_in_graveyard; | ||
140 | } | ||
141 | spin_unlock(&trans->peer_gylock); | ||
142 | |||
143 | /* we can now add the new candidate to the list | ||
144 | * - tell the application layer that this peer has been added | ||
145 | */ | ||
146 | rxrpc_get_transport(trans); | ||
147 | peer = candidate; | ||
148 | candidate = NULL; | ||
149 | |||
150 | if (peer->ops && peer->ops->adding) { | ||
151 | ret = peer->ops->adding(peer); | ||
152 | if (ret < 0) { | ||
153 | write_unlock(&trans->peer_lock); | ||
154 | __RXACCT(atomic_dec(&rxrpc_peer_count)); | ||
155 | kfree(peer); | ||
156 | rxrpc_put_transport(trans); | ||
157 | _leave(" = %d", ret); | ||
158 | return ret; | ||
159 | } | ||
160 | } | ||
161 | |||
162 | atomic_inc(&trans->peer_count); | ||
163 | |||
164 | make_active: | ||
165 | list_add_tail(&peer->link, &trans->peer_active); | ||
166 | |||
167 | success_uwfree: | ||
168 | write_unlock(&trans->peer_lock); | ||
169 | |||
170 | if (candidate) { | ||
171 | __RXACCT(atomic_dec(&rxrpc_peer_count)); | ||
172 | kfree(candidate); | ||
173 | } | ||
174 | |||
175 | if (list_empty(&peer->proc_link)) { | ||
176 | down_write(&rxrpc_peers_sem); | ||
177 | list_add_tail(&peer->proc_link, &rxrpc_peers); | ||
178 | up_write(&rxrpc_peers_sem); | ||
179 | } | ||
180 | |||
181 | success: | ||
182 | *_peer = peer; | ||
183 | |||
184 | _leave(" = 0 (%p{u=%d cc=%d})", | ||
185 | peer, | ||
186 | atomic_read(&peer->usage), | ||
187 | atomic_read(&peer->conn_count)); | ||
188 | return 0; | ||
189 | |||
190 | /* handle the peer being found in the active list straight off */ | ||
191 | found_active: | ||
192 | rxrpc_get_peer(peer); | ||
193 | read_unlock(&trans->peer_lock); | ||
194 | goto success; | ||
195 | |||
196 | /* handle resurrecting a peer from the graveyard */ | ||
197 | found_in_graveyard: | ||
198 | rxrpc_get_peer(peer); | ||
199 | rxrpc_get_transport(peer->trans); | ||
200 | rxrpc_krxtimod_del_timer(&peer->timeout); | ||
201 | list_del_init(&peer->link); | ||
202 | spin_unlock(&trans->peer_gylock); | ||
203 | goto make_active; | ||
204 | |||
205 | /* handle finding the peer on the second time through the active | ||
206 | * list */ | ||
207 | found_active_second_chance: | ||
208 | rxrpc_get_peer(peer); | ||
209 | goto success_uwfree; | ||
210 | |||
211 | } /* end rxrpc_peer_lookup() */ | ||
212 | |||
213 | /*****************************************************************************/ | ||
214 | /* | ||
215 | * finish with a peer record | ||
216 | * - it gets sent to the graveyard from where it can be resurrected or timed | ||
217 | * out | ||
218 | */ | ||
219 | void rxrpc_put_peer(struct rxrpc_peer *peer) | ||
220 | { | ||
221 | struct rxrpc_transport *trans = peer->trans; | ||
222 | |||
223 | _enter("%p{cc=%d a=%08x}", | ||
224 | peer, | ||
225 | atomic_read(&peer->conn_count), | ||
226 | ntohl(peer->addr.s_addr)); | ||
227 | |||
228 | /* sanity check */ | ||
229 | if (atomic_read(&peer->usage) <= 0) | ||
230 | BUG(); | ||
231 | |||
232 | write_lock(&trans->peer_lock); | ||
233 | spin_lock(&trans->peer_gylock); | ||
234 | if (likely(!atomic_dec_and_test(&peer->usage))) { | ||
235 | spin_unlock(&trans->peer_gylock); | ||
236 | write_unlock(&trans->peer_lock); | ||
237 | _leave(""); | ||
238 | return; | ||
239 | } | ||
240 | |||
241 | /* move to graveyard queue */ | ||
242 | list_del(&peer->link); | ||
243 | write_unlock(&trans->peer_lock); | ||
244 | |||
245 | list_add_tail(&peer->link, &trans->peer_graveyard); | ||
246 | |||
247 | BUG_ON(!list_empty(&peer->conn_active)); | ||
248 | |||
249 | rxrpc_krxtimod_add_timer(&peer->timeout, rxrpc_peer_timeout * HZ); | ||
250 | |||
251 | spin_unlock(&trans->peer_gylock); | ||
252 | |||
253 | rxrpc_put_transport(trans); | ||
254 | |||
255 | _leave(" [killed]"); | ||
256 | } /* end rxrpc_put_peer() */ | ||
257 | |||
258 | /*****************************************************************************/ | ||
259 | /* | ||
260 | * handle a peer timing out in the graveyard | ||
261 | * - called from krxtimod | ||
262 | */ | ||
263 | static void rxrpc_peer_do_timeout(struct rxrpc_peer *peer) | ||
264 | { | ||
265 | struct rxrpc_transport *trans = peer->trans; | ||
266 | |||
267 | _enter("%p{u=%d cc=%d a=%08x}", | ||
268 | peer, | ||
269 | atomic_read(&peer->usage), | ||
270 | atomic_read(&peer->conn_count), | ||
271 | ntohl(peer->addr.s_addr)); | ||
272 | |||
273 | BUG_ON(atomic_read(&peer->usage) < 0); | ||
274 | |||
275 | /* remove from graveyard if still dead */ | ||
276 | spin_lock(&trans->peer_gylock); | ||
277 | if (atomic_read(&peer->usage) == 0) | ||
278 | list_del_init(&peer->link); | ||
279 | else | ||
280 | peer = NULL; | ||
281 | spin_unlock(&trans->peer_gylock); | ||
282 | |||
283 | if (!peer) { | ||
284 | _leave(""); | ||
285 | return; /* resurrected */ | ||
286 | } | ||
287 | |||
288 | /* clear all connections on this peer */ | ||
289 | rxrpc_conn_clearall(peer); | ||
290 | |||
291 | BUG_ON(!list_empty(&peer->conn_active)); | ||
292 | BUG_ON(!list_empty(&peer->conn_graveyard)); | ||
293 | |||
294 | /* inform the application layer */ | ||
295 | if (peer->ops && peer->ops->discarding) | ||
296 | peer->ops->discarding(peer); | ||
297 | |||
298 | if (!list_empty(&peer->proc_link)) { | ||
299 | down_write(&rxrpc_peers_sem); | ||
300 | list_del(&peer->proc_link); | ||
301 | up_write(&rxrpc_peers_sem); | ||
302 | } | ||
303 | |||
304 | __RXACCT(atomic_dec(&rxrpc_peer_count)); | ||
305 | kfree(peer); | ||
306 | |||
307 | /* if the graveyard is now empty, wake up anyone waiting for that */ | ||
308 | if (atomic_dec_and_test(&trans->peer_count)) | ||
309 | wake_up(&trans->peer_gy_waitq); | ||
310 | |||
311 | _leave(" [destroyed]"); | ||
312 | } /* end rxrpc_peer_do_timeout() */ | ||
313 | |||
314 | /*****************************************************************************/ | ||
315 | /* | ||
316 | * clear all peer records from a transport endpoint | ||
317 | */ | ||
318 | void rxrpc_peer_clearall(struct rxrpc_transport *trans) | ||
319 | { | ||
320 | DECLARE_WAITQUEUE(myself,current); | ||
321 | |||
322 | struct rxrpc_peer *peer; | ||
323 | int err; | ||
324 | |||
325 | _enter("%p",trans); | ||
326 | |||
327 | /* there shouldn't be any active peers remaining */ | ||
328 | BUG_ON(!list_empty(&trans->peer_active)); | ||
329 | |||
330 | /* manually timeout all peers in the graveyard */ | ||
331 | spin_lock(&trans->peer_gylock); | ||
332 | while (!list_empty(&trans->peer_graveyard)) { | ||
333 | peer = list_entry(trans->peer_graveyard.next, | ||
334 | struct rxrpc_peer, link); | ||
335 | _debug("Clearing peer %p\n", peer); | ||
336 | err = rxrpc_krxtimod_del_timer(&peer->timeout); | ||
337 | spin_unlock(&trans->peer_gylock); | ||
338 | |||
339 | if (err == 0) | ||
340 | rxrpc_peer_do_timeout(peer); | ||
341 | |||
342 | spin_lock(&trans->peer_gylock); | ||
343 | } | ||
344 | spin_unlock(&trans->peer_gylock); | ||
345 | |||
346 | /* wait for the the peer graveyard to be completely cleared */ | ||
347 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
348 | add_wait_queue(&trans->peer_gy_waitq, &myself); | ||
349 | |||
350 | while (atomic_read(&trans->peer_count) != 0) { | ||
351 | schedule(); | ||
352 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
353 | } | ||
354 | |||
355 | remove_wait_queue(&trans->peer_gy_waitq, &myself); | ||
356 | set_current_state(TASK_RUNNING); | ||
357 | |||
358 | _leave(""); | ||
359 | } /* end rxrpc_peer_clearall() */ | ||
360 | |||
361 | /*****************************************************************************/ | ||
362 | /* | ||
363 | * calculate and cache the Round-Trip-Time for a message and its response | ||
364 | */ | ||
365 | void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer, | ||
366 | struct rxrpc_message *msg, | ||
367 | struct rxrpc_message *resp) | ||
368 | { | ||
369 | unsigned long long rtt; | ||
370 | int loop; | ||
371 | |||
372 | _enter("%p,%p,%p", peer, msg, resp); | ||
373 | |||
374 | /* calculate the latest RTT */ | ||
375 | rtt = resp->stamp.tv_sec - msg->stamp.tv_sec; | ||
376 | rtt *= 1000000UL; | ||
377 | rtt += resp->stamp.tv_usec - msg->stamp.tv_usec; | ||
378 | |||
379 | /* add to cache */ | ||
380 | peer->rtt_cache[peer->rtt_point] = rtt; | ||
381 | peer->rtt_point++; | ||
382 | peer->rtt_point %= RXRPC_RTT_CACHE_SIZE; | ||
383 | |||
384 | if (peer->rtt_usage < RXRPC_RTT_CACHE_SIZE) | ||
385 | peer->rtt_usage++; | ||
386 | |||
387 | /* recalculate RTT */ | ||
388 | rtt = 0; | ||
389 | for (loop = peer->rtt_usage - 1; loop >= 0; loop--) | ||
390 | rtt += peer->rtt_cache[loop]; | ||
391 | |||
392 | do_div(rtt, peer->rtt_usage); | ||
393 | peer->rtt = rtt; | ||
394 | |||
395 | _leave(" RTT=%lu.%lums", | ||
396 | (long) (peer->rtt / 1000), (long) (peer->rtt % 1000)); | ||
397 | |||
398 | } /* end rxrpc_peer_calculate_rtt() */ | ||
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c deleted file mode 100644 index 8551c879e456..000000000000 --- a/net/rxrpc/proc.c +++ /dev/null | |||
@@ -1,617 +0,0 @@ | |||
1 | /* proc.c: /proc interface for RxRPC | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/proc_fs.h> | ||
16 | #include <linux/seq_file.h> | ||
17 | #include <rxrpc/rxrpc.h> | ||
18 | #include <rxrpc/transport.h> | ||
19 | #include <rxrpc/peer.h> | ||
20 | #include <rxrpc/connection.h> | ||
21 | #include <rxrpc/call.h> | ||
22 | #include <rxrpc/message.h> | ||
23 | #include "internal.h" | ||
24 | |||
25 | static struct proc_dir_entry *proc_rxrpc; | ||
26 | |||
27 | static int rxrpc_proc_transports_open(struct inode *inode, struct file *file); | ||
28 | static void *rxrpc_proc_transports_start(struct seq_file *p, loff_t *pos); | ||
29 | static void *rxrpc_proc_transports_next(struct seq_file *p, void *v, loff_t *pos); | ||
30 | static void rxrpc_proc_transports_stop(struct seq_file *p, void *v); | ||
31 | static int rxrpc_proc_transports_show(struct seq_file *m, void *v); | ||
32 | |||
33 | static struct seq_operations rxrpc_proc_transports_ops = { | ||
34 | .start = rxrpc_proc_transports_start, | ||
35 | .next = rxrpc_proc_transports_next, | ||
36 | .stop = rxrpc_proc_transports_stop, | ||
37 | .show = rxrpc_proc_transports_show, | ||
38 | }; | ||
39 | |||
40 | static const struct file_operations rxrpc_proc_transports_fops = { | ||
41 | .open = rxrpc_proc_transports_open, | ||
42 | .read = seq_read, | ||
43 | .llseek = seq_lseek, | ||
44 | .release = seq_release, | ||
45 | }; | ||
46 | |||
47 | static int rxrpc_proc_peers_open(struct inode *inode, struct file *file); | ||
48 | static void *rxrpc_proc_peers_start(struct seq_file *p, loff_t *pos); | ||
49 | static void *rxrpc_proc_peers_next(struct seq_file *p, void *v, loff_t *pos); | ||
50 | static void rxrpc_proc_peers_stop(struct seq_file *p, void *v); | ||
51 | static int rxrpc_proc_peers_show(struct seq_file *m, void *v); | ||
52 | |||
53 | static struct seq_operations rxrpc_proc_peers_ops = { | ||
54 | .start = rxrpc_proc_peers_start, | ||
55 | .next = rxrpc_proc_peers_next, | ||
56 | .stop = rxrpc_proc_peers_stop, | ||
57 | .show = rxrpc_proc_peers_show, | ||
58 | }; | ||
59 | |||
60 | static const struct file_operations rxrpc_proc_peers_fops = { | ||
61 | .open = rxrpc_proc_peers_open, | ||
62 | .read = seq_read, | ||
63 | .llseek = seq_lseek, | ||
64 | .release = seq_release, | ||
65 | }; | ||
66 | |||
67 | static int rxrpc_proc_conns_open(struct inode *inode, struct file *file); | ||
68 | static void *rxrpc_proc_conns_start(struct seq_file *p, loff_t *pos); | ||
69 | static void *rxrpc_proc_conns_next(struct seq_file *p, void *v, loff_t *pos); | ||
70 | static void rxrpc_proc_conns_stop(struct seq_file *p, void *v); | ||
71 | static int rxrpc_proc_conns_show(struct seq_file *m, void *v); | ||
72 | |||
73 | static struct seq_operations rxrpc_proc_conns_ops = { | ||
74 | .start = rxrpc_proc_conns_start, | ||
75 | .next = rxrpc_proc_conns_next, | ||
76 | .stop = rxrpc_proc_conns_stop, | ||
77 | .show = rxrpc_proc_conns_show, | ||
78 | }; | ||
79 | |||
80 | static const struct file_operations rxrpc_proc_conns_fops = { | ||
81 | .open = rxrpc_proc_conns_open, | ||
82 | .read = seq_read, | ||
83 | .llseek = seq_lseek, | ||
84 | .release = seq_release, | ||
85 | }; | ||
86 | |||
87 | static int rxrpc_proc_calls_open(struct inode *inode, struct file *file); | ||
88 | static void *rxrpc_proc_calls_start(struct seq_file *p, loff_t *pos); | ||
89 | static void *rxrpc_proc_calls_next(struct seq_file *p, void *v, loff_t *pos); | ||
90 | static void rxrpc_proc_calls_stop(struct seq_file *p, void *v); | ||
91 | static int rxrpc_proc_calls_show(struct seq_file *m, void *v); | ||
92 | |||
93 | static struct seq_operations rxrpc_proc_calls_ops = { | ||
94 | .start = rxrpc_proc_calls_start, | ||
95 | .next = rxrpc_proc_calls_next, | ||
96 | .stop = rxrpc_proc_calls_stop, | ||
97 | .show = rxrpc_proc_calls_show, | ||
98 | }; | ||
99 | |||
100 | static const struct file_operations rxrpc_proc_calls_fops = { | ||
101 | .open = rxrpc_proc_calls_open, | ||
102 | .read = seq_read, | ||
103 | .llseek = seq_lseek, | ||
104 | .release = seq_release, | ||
105 | }; | ||
106 | |||
107 | static const char *rxrpc_call_states7[] = { | ||
108 | "complet", | ||
109 | "error ", | ||
110 | "rcv_op ", | ||
111 | "rcv_arg", | ||
112 | "got_arg", | ||
113 | "snd_rpl", | ||
114 | "fin_ack", | ||
115 | "snd_arg", | ||
116 | "rcv_rpl", | ||
117 | "got_rpl" | ||
118 | }; | ||
119 | |||
120 | static const char *rxrpc_call_error_states7[] = { | ||
121 | "no_err ", | ||
122 | "loc_abt", | ||
123 | "rmt_abt", | ||
124 | "loc_err", | ||
125 | "rmt_err" | ||
126 | }; | ||
127 | |||
128 | /*****************************************************************************/ | ||
129 | /* | ||
130 | * initialise the /proc/net/rxrpc/ directory | ||
131 | */ | ||
132 | int rxrpc_proc_init(void) | ||
133 | { | ||
134 | struct proc_dir_entry *p; | ||
135 | |||
136 | proc_rxrpc = proc_mkdir("rxrpc", proc_net); | ||
137 | if (!proc_rxrpc) | ||
138 | goto error; | ||
139 | proc_rxrpc->owner = THIS_MODULE; | ||
140 | |||
141 | p = create_proc_entry("calls", 0, proc_rxrpc); | ||
142 | if (!p) | ||
143 | goto error_proc; | ||
144 | p->proc_fops = &rxrpc_proc_calls_fops; | ||
145 | p->owner = THIS_MODULE; | ||
146 | |||
147 | p = create_proc_entry("connections", 0, proc_rxrpc); | ||
148 | if (!p) | ||
149 | goto error_calls; | ||
150 | p->proc_fops = &rxrpc_proc_conns_fops; | ||
151 | p->owner = THIS_MODULE; | ||
152 | |||
153 | p = create_proc_entry("peers", 0, proc_rxrpc); | ||
154 | if (!p) | ||
155 | goto error_calls; | ||
156 | p->proc_fops = &rxrpc_proc_peers_fops; | ||
157 | p->owner = THIS_MODULE; | ||
158 | |||
159 | p = create_proc_entry("transports", 0, proc_rxrpc); | ||
160 | if (!p) | ||
161 | goto error_conns; | ||
162 | p->proc_fops = &rxrpc_proc_transports_fops; | ||
163 | p->owner = THIS_MODULE; | ||
164 | |||
165 | return 0; | ||
166 | |||
167 | error_conns: | ||
168 | remove_proc_entry("connections", proc_rxrpc); | ||
169 | error_calls: | ||
170 | remove_proc_entry("calls", proc_rxrpc); | ||
171 | error_proc: | ||
172 | remove_proc_entry("rxrpc", proc_net); | ||
173 | error: | ||
174 | return -ENOMEM; | ||
175 | } /* end rxrpc_proc_init() */ | ||
176 | |||
177 | /*****************************************************************************/ | ||
178 | /* | ||
179 | * clean up the /proc/net/rxrpc/ directory | ||
180 | */ | ||
181 | void rxrpc_proc_cleanup(void) | ||
182 | { | ||
183 | remove_proc_entry("transports", proc_rxrpc); | ||
184 | remove_proc_entry("peers", proc_rxrpc); | ||
185 | remove_proc_entry("connections", proc_rxrpc); | ||
186 | remove_proc_entry("calls", proc_rxrpc); | ||
187 | |||
188 | remove_proc_entry("rxrpc", proc_net); | ||
189 | |||
190 | } /* end rxrpc_proc_cleanup() */ | ||
191 | |||
192 | /*****************************************************************************/ | ||
193 | /* | ||
194 | * open "/proc/net/rxrpc/transports" which provides a summary of extant transports | ||
195 | */ | ||
196 | static int rxrpc_proc_transports_open(struct inode *inode, struct file *file) | ||
197 | { | ||
198 | struct seq_file *m; | ||
199 | int ret; | ||
200 | |||
201 | ret = seq_open(file, &rxrpc_proc_transports_ops); | ||
202 | if (ret < 0) | ||
203 | return ret; | ||
204 | |||
205 | m = file->private_data; | ||
206 | m->private = PDE(inode)->data; | ||
207 | |||
208 | return 0; | ||
209 | } /* end rxrpc_proc_transports_open() */ | ||
210 | |||
211 | /*****************************************************************************/ | ||
212 | /* | ||
213 | * set up the iterator to start reading from the transports list and return the first item | ||
214 | */ | ||
215 | static void *rxrpc_proc_transports_start(struct seq_file *m, loff_t *_pos) | ||
216 | { | ||
217 | struct list_head *_p; | ||
218 | loff_t pos = *_pos; | ||
219 | |||
220 | /* lock the list against modification */ | ||
221 | down_read(&rxrpc_proc_transports_sem); | ||
222 | |||
223 | /* allow for the header line */ | ||
224 | if (!pos) | ||
225 | return SEQ_START_TOKEN; | ||
226 | pos--; | ||
227 | |||
228 | /* find the n'th element in the list */ | ||
229 | list_for_each(_p, &rxrpc_proc_transports) | ||
230 | if (!pos--) | ||
231 | break; | ||
232 | |||
233 | return _p != &rxrpc_proc_transports ? _p : NULL; | ||
234 | } /* end rxrpc_proc_transports_start() */ | ||
235 | |||
236 | /*****************************************************************************/ | ||
237 | /* | ||
238 | * move to next call in transports list | ||
239 | */ | ||
240 | static void *rxrpc_proc_transports_next(struct seq_file *p, void *v, loff_t *pos) | ||
241 | { | ||
242 | struct list_head *_p; | ||
243 | |||
244 | (*pos)++; | ||
245 | |||
246 | _p = v; | ||
247 | _p = (v == SEQ_START_TOKEN) ? rxrpc_proc_transports.next : _p->next; | ||
248 | |||
249 | return _p != &rxrpc_proc_transports ? _p : NULL; | ||
250 | } /* end rxrpc_proc_transports_next() */ | ||
251 | |||
252 | /*****************************************************************************/ | ||
253 | /* | ||
254 | * clean up after reading from the transports list | ||
255 | */ | ||
256 | static void rxrpc_proc_transports_stop(struct seq_file *p, void *v) | ||
257 | { | ||
258 | up_read(&rxrpc_proc_transports_sem); | ||
259 | |||
260 | } /* end rxrpc_proc_transports_stop() */ | ||
261 | |||
262 | /*****************************************************************************/ | ||
263 | /* | ||
264 | * display a header line followed by a load of call lines | ||
265 | */ | ||
266 | static int rxrpc_proc_transports_show(struct seq_file *m, void *v) | ||
267 | { | ||
268 | struct rxrpc_transport *trans = | ||
269 | list_entry(v, struct rxrpc_transport, proc_link); | ||
270 | |||
271 | /* display header on line 1 */ | ||
272 | if (v == SEQ_START_TOKEN) { | ||
273 | seq_puts(m, "LOCAL USE\n"); | ||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | /* display one transport per line on subsequent lines */ | ||
278 | seq_printf(m, "%5hu %3d\n", | ||
279 | trans->port, | ||
280 | atomic_read(&trans->usage) | ||
281 | ); | ||
282 | |||
283 | return 0; | ||
284 | } /* end rxrpc_proc_transports_show() */ | ||
285 | |||
286 | /*****************************************************************************/ | ||
287 | /* | ||
288 | * open "/proc/net/rxrpc/peers" which provides a summary of extant peers | ||
289 | */ | ||
290 | static int rxrpc_proc_peers_open(struct inode *inode, struct file *file) | ||
291 | { | ||
292 | struct seq_file *m; | ||
293 | int ret; | ||
294 | |||
295 | ret = seq_open(file, &rxrpc_proc_peers_ops); | ||
296 | if (ret < 0) | ||
297 | return ret; | ||
298 | |||
299 | m = file->private_data; | ||
300 | m->private = PDE(inode)->data; | ||
301 | |||
302 | return 0; | ||
303 | } /* end rxrpc_proc_peers_open() */ | ||
304 | |||
305 | /*****************************************************************************/ | ||
306 | /* | ||
307 | * set up the iterator to start reading from the peers list and return the | ||
308 | * first item | ||
309 | */ | ||
310 | static void *rxrpc_proc_peers_start(struct seq_file *m, loff_t *_pos) | ||
311 | { | ||
312 | struct list_head *_p; | ||
313 | loff_t pos = *_pos; | ||
314 | |||
315 | /* lock the list against modification */ | ||
316 | down_read(&rxrpc_peers_sem); | ||
317 | |||
318 | /* allow for the header line */ | ||
319 | if (!pos) | ||
320 | return SEQ_START_TOKEN; | ||
321 | pos--; | ||
322 | |||
323 | /* find the n'th element in the list */ | ||
324 | list_for_each(_p, &rxrpc_peers) | ||
325 | if (!pos--) | ||
326 | break; | ||
327 | |||
328 | return _p != &rxrpc_peers ? _p : NULL; | ||
329 | } /* end rxrpc_proc_peers_start() */ | ||
330 | |||
331 | /*****************************************************************************/ | ||
332 | /* | ||
333 | * move to next conn in peers list | ||
334 | */ | ||
335 | static void *rxrpc_proc_peers_next(struct seq_file *p, void *v, loff_t *pos) | ||
336 | { | ||
337 | struct list_head *_p; | ||
338 | |||
339 | (*pos)++; | ||
340 | |||
341 | _p = v; | ||
342 | _p = (v == SEQ_START_TOKEN) ? rxrpc_peers.next : _p->next; | ||
343 | |||
344 | return _p != &rxrpc_peers ? _p : NULL; | ||
345 | } /* end rxrpc_proc_peers_next() */ | ||
346 | |||
347 | /*****************************************************************************/ | ||
348 | /* | ||
349 | * clean up after reading from the peers list | ||
350 | */ | ||
351 | static void rxrpc_proc_peers_stop(struct seq_file *p, void *v) | ||
352 | { | ||
353 | up_read(&rxrpc_peers_sem); | ||
354 | |||
355 | } /* end rxrpc_proc_peers_stop() */ | ||
356 | |||
357 | /*****************************************************************************/ | ||
358 | /* | ||
359 | * display a header line followed by a load of conn lines | ||
360 | */ | ||
361 | static int rxrpc_proc_peers_show(struct seq_file *m, void *v) | ||
362 | { | ||
363 | struct rxrpc_peer *peer = list_entry(v, struct rxrpc_peer, proc_link); | ||
364 | long timeout; | ||
365 | |||
366 | /* display header on line 1 */ | ||
367 | if (v == SEQ_START_TOKEN) { | ||
368 | seq_puts(m, "LOCAL REMOTE USAGE CONNS TIMEOUT" | ||
369 | " MTU RTT(uS)\n"); | ||
370 | return 0; | ||
371 | } | ||
372 | |||
373 | /* display one peer per line on subsequent lines */ | ||
374 | timeout = 0; | ||
375 | if (!list_empty(&peer->timeout.link)) | ||
376 | timeout = (long) peer->timeout.timo_jif - | ||
377 | (long) jiffies; | ||
378 | |||
379 | seq_printf(m, "%5hu %08x %5d %5d %8ld %5Zu %7lu\n", | ||
380 | peer->trans->port, | ||
381 | ntohl(peer->addr.s_addr), | ||
382 | atomic_read(&peer->usage), | ||
383 | atomic_read(&peer->conn_count), | ||
384 | timeout, | ||
385 | peer->if_mtu, | ||
386 | (long) peer->rtt | ||
387 | ); | ||
388 | |||
389 | return 0; | ||
390 | } /* end rxrpc_proc_peers_show() */ | ||
391 | |||
392 | /*****************************************************************************/ | ||
393 | /* | ||
394 | * open "/proc/net/rxrpc/connections" which provides a summary of extant | ||
395 | * connections | ||
396 | */ | ||
397 | static int rxrpc_proc_conns_open(struct inode *inode, struct file *file) | ||
398 | { | ||
399 | struct seq_file *m; | ||
400 | int ret; | ||
401 | |||
402 | ret = seq_open(file, &rxrpc_proc_conns_ops); | ||
403 | if (ret < 0) | ||
404 | return ret; | ||
405 | |||
406 | m = file->private_data; | ||
407 | m->private = PDE(inode)->data; | ||
408 | |||
409 | return 0; | ||
410 | } /* end rxrpc_proc_conns_open() */ | ||
411 | |||
412 | /*****************************************************************************/ | ||
413 | /* | ||
414 | * set up the iterator to start reading from the conns list and return the | ||
415 | * first item | ||
416 | */ | ||
417 | static void *rxrpc_proc_conns_start(struct seq_file *m, loff_t *_pos) | ||
418 | { | ||
419 | struct list_head *_p; | ||
420 | loff_t pos = *_pos; | ||
421 | |||
422 | /* lock the list against modification */ | ||
423 | down_read(&rxrpc_conns_sem); | ||
424 | |||
425 | /* allow for the header line */ | ||
426 | if (!pos) | ||
427 | return SEQ_START_TOKEN; | ||
428 | pos--; | ||
429 | |||
430 | /* find the n'th element in the list */ | ||
431 | list_for_each(_p, &rxrpc_conns) | ||
432 | if (!pos--) | ||
433 | break; | ||
434 | |||
435 | return _p != &rxrpc_conns ? _p : NULL; | ||
436 | } /* end rxrpc_proc_conns_start() */ | ||
437 | |||
438 | /*****************************************************************************/ | ||
439 | /* | ||
440 | * move to next conn in conns list | ||
441 | */ | ||
442 | static void *rxrpc_proc_conns_next(struct seq_file *p, void *v, loff_t *pos) | ||
443 | { | ||
444 | struct list_head *_p; | ||
445 | |||
446 | (*pos)++; | ||
447 | |||
448 | _p = v; | ||
449 | _p = (v == SEQ_START_TOKEN) ? rxrpc_conns.next : _p->next; | ||
450 | |||
451 | return _p != &rxrpc_conns ? _p : NULL; | ||
452 | } /* end rxrpc_proc_conns_next() */ | ||
453 | |||
454 | /*****************************************************************************/ | ||
455 | /* | ||
456 | * clean up after reading from the conns list | ||
457 | */ | ||
458 | static void rxrpc_proc_conns_stop(struct seq_file *p, void *v) | ||
459 | { | ||
460 | up_read(&rxrpc_conns_sem); | ||
461 | |||
462 | } /* end rxrpc_proc_conns_stop() */ | ||
463 | |||
464 | /*****************************************************************************/ | ||
465 | /* | ||
466 | * display a header line followed by a load of conn lines | ||
467 | */ | ||
468 | static int rxrpc_proc_conns_show(struct seq_file *m, void *v) | ||
469 | { | ||
470 | struct rxrpc_connection *conn; | ||
471 | long timeout; | ||
472 | |||
473 | conn = list_entry(v, struct rxrpc_connection, proc_link); | ||
474 | |||
475 | /* display header on line 1 */ | ||
476 | if (v == SEQ_START_TOKEN) { | ||
477 | seq_puts(m, | ||
478 | "LOCAL REMOTE RPORT SRVC CONN END SERIALNO " | ||
479 | "CALLNO MTU TIMEOUT" | ||
480 | "\n"); | ||
481 | return 0; | ||
482 | } | ||
483 | |||
484 | /* display one conn per line on subsequent lines */ | ||
485 | timeout = 0; | ||
486 | if (!list_empty(&conn->timeout.link)) | ||
487 | timeout = (long) conn->timeout.timo_jif - | ||
488 | (long) jiffies; | ||
489 | |||
490 | seq_printf(m, | ||
491 | "%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5Zu %8ld\n", | ||
492 | conn->trans->port, | ||
493 | ntohl(conn->addr.sin_addr.s_addr), | ||
494 | ntohs(conn->addr.sin_port), | ||
495 | ntohs(conn->service_id), | ||
496 | ntohl(conn->conn_id), | ||
497 | conn->out_clientflag ? "CLT" : "SRV", | ||
498 | conn->serial_counter, | ||
499 | conn->call_counter, | ||
500 | conn->mtu_size, | ||
501 | timeout | ||
502 | ); | ||
503 | |||
504 | return 0; | ||
505 | } /* end rxrpc_proc_conns_show() */ | ||
506 | |||
507 | /*****************************************************************************/ | ||
508 | /* | ||
509 | * open "/proc/net/rxrpc/calls" which provides a summary of extant calls | ||
510 | */ | ||
511 | static int rxrpc_proc_calls_open(struct inode *inode, struct file *file) | ||
512 | { | ||
513 | struct seq_file *m; | ||
514 | int ret; | ||
515 | |||
516 | ret = seq_open(file, &rxrpc_proc_calls_ops); | ||
517 | if (ret < 0) | ||
518 | return ret; | ||
519 | |||
520 | m = file->private_data; | ||
521 | m->private = PDE(inode)->data; | ||
522 | |||
523 | return 0; | ||
524 | } /* end rxrpc_proc_calls_open() */ | ||
525 | |||
526 | /*****************************************************************************/ | ||
527 | /* | ||
528 | * set up the iterator to start reading from the calls list and return the | ||
529 | * first item | ||
530 | */ | ||
531 | static void *rxrpc_proc_calls_start(struct seq_file *m, loff_t *_pos) | ||
532 | { | ||
533 | struct list_head *_p; | ||
534 | loff_t pos = *_pos; | ||
535 | |||
536 | /* lock the list against modification */ | ||
537 | down_read(&rxrpc_calls_sem); | ||
538 | |||
539 | /* allow for the header line */ | ||
540 | if (!pos) | ||
541 | return SEQ_START_TOKEN; | ||
542 | pos--; | ||
543 | |||
544 | /* find the n'th element in the list */ | ||
545 | list_for_each(_p, &rxrpc_calls) | ||
546 | if (!pos--) | ||
547 | break; | ||
548 | |||
549 | return _p != &rxrpc_calls ? _p : NULL; | ||
550 | } /* end rxrpc_proc_calls_start() */ | ||
551 | |||
552 | /*****************************************************************************/ | ||
553 | /* | ||
554 | * move to next call in calls list | ||
555 | */ | ||
556 | static void *rxrpc_proc_calls_next(struct seq_file *p, void *v, loff_t *pos) | ||
557 | { | ||
558 | struct list_head *_p; | ||
559 | |||
560 | (*pos)++; | ||
561 | |||
562 | _p = v; | ||
563 | _p = (v == SEQ_START_TOKEN) ? rxrpc_calls.next : _p->next; | ||
564 | |||
565 | return _p != &rxrpc_calls ? _p : NULL; | ||
566 | } /* end rxrpc_proc_calls_next() */ | ||
567 | |||
568 | /*****************************************************************************/ | ||
569 | /* | ||
570 | * clean up after reading from the calls list | ||
571 | */ | ||
572 | static void rxrpc_proc_calls_stop(struct seq_file *p, void *v) | ||
573 | { | ||
574 | up_read(&rxrpc_calls_sem); | ||
575 | |||
576 | } /* end rxrpc_proc_calls_stop() */ | ||
577 | |||
578 | /*****************************************************************************/ | ||
579 | /* | ||
580 | * display a header line followed by a load of call lines | ||
581 | */ | ||
582 | static int rxrpc_proc_calls_show(struct seq_file *m, void *v) | ||
583 | { | ||
584 | struct rxrpc_call *call = list_entry(v, struct rxrpc_call, call_link); | ||
585 | |||
586 | /* display header on line 1 */ | ||
587 | if (v == SEQ_START_TOKEN) { | ||
588 | seq_puts(m, | ||
589 | "LOCAL REMOT SRVC CONN CALL DIR USE " | ||
590 | " L STATE OPCODE ABORT ERRNO\n" | ||
591 | ); | ||
592 | return 0; | ||
593 | } | ||
594 | |||
595 | /* display one call per line on subsequent lines */ | ||
596 | seq_printf(m, | ||
597 | "%5hu %5hu %04hx %08x %08x %s %3u%c" | ||
598 | " %c %-7.7s %6d %08x %5d\n", | ||
599 | call->conn->trans->port, | ||
600 | ntohs(call->conn->addr.sin_port), | ||
601 | ntohs(call->conn->service_id), | ||
602 | ntohl(call->conn->conn_id), | ||
603 | ntohl(call->call_id), | ||
604 | call->conn->service ? "SVC" : "CLT", | ||
605 | atomic_read(&call->usage), | ||
606 | waitqueue_active(&call->waitq) ? 'w' : ' ', | ||
607 | call->app_last_rcv ? 'Y' : '-', | ||
608 | (call->app_call_state!=RXRPC_CSTATE_ERROR ? | ||
609 | rxrpc_call_states7[call->app_call_state] : | ||
610 | rxrpc_call_error_states7[call->app_err_state]), | ||
611 | call->app_opcode, | ||
612 | call->app_abort_code, | ||
613 | call->app_errno | ||
614 | ); | ||
615 | |||
616 | return 0; | ||
617 | } /* end rxrpc_proc_calls_show() */ | ||
diff --git a/net/rxrpc/rxrpc_syms.c b/net/rxrpc/rxrpc_syms.c deleted file mode 100644 index 9896fd87a4d4..000000000000 --- a/net/rxrpc/rxrpc_syms.c +++ /dev/null | |||
@@ -1,34 +0,0 @@ | |||
1 | /* rxrpc_syms.c: exported Rx RPC layer interface symbols | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | |||
14 | #include <rxrpc/transport.h> | ||
15 | #include <rxrpc/connection.h> | ||
16 | #include <rxrpc/call.h> | ||
17 | #include <rxrpc/krxiod.h> | ||
18 | |||
19 | /* call.c */ | ||
20 | EXPORT_SYMBOL(rxrpc_create_call); | ||
21 | EXPORT_SYMBOL(rxrpc_put_call); | ||
22 | EXPORT_SYMBOL(rxrpc_call_abort); | ||
23 | EXPORT_SYMBOL(rxrpc_call_read_data); | ||
24 | EXPORT_SYMBOL(rxrpc_call_write_data); | ||
25 | |||
26 | /* connection.c */ | ||
27 | EXPORT_SYMBOL(rxrpc_create_connection); | ||
28 | EXPORT_SYMBOL(rxrpc_put_connection); | ||
29 | |||
30 | /* transport.c */ | ||
31 | EXPORT_SYMBOL(rxrpc_create_transport); | ||
32 | EXPORT_SYMBOL(rxrpc_put_transport); | ||
33 | EXPORT_SYMBOL(rxrpc_add_service); | ||
34 | EXPORT_SYMBOL(rxrpc_del_service); | ||
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c deleted file mode 100644 index 884290754af7..000000000000 --- a/net/rxrpc/sysctl.c +++ /dev/null | |||
@@ -1,121 +0,0 @@ | |||
1 | /* sysctl.c: Rx RPC control | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/sysctl.h> | ||
16 | #include <rxrpc/types.h> | ||
17 | #include <rxrpc/rxrpc.h> | ||
18 | #include <asm/errno.h> | ||
19 | #include "internal.h" | ||
20 | |||
21 | int rxrpc_ktrace; | ||
22 | int rxrpc_kdebug; | ||
23 | int rxrpc_kproto; | ||
24 | int rxrpc_knet; | ||
25 | |||
26 | #ifdef CONFIG_SYSCTL | ||
27 | static struct ctl_table_header *rxrpc_sysctl = NULL; | ||
28 | |||
29 | static ctl_table rxrpc_sysctl_table[] = { | ||
30 | { | ||
31 | .ctl_name = 1, | ||
32 | .procname = "kdebug", | ||
33 | .data = &rxrpc_kdebug, | ||
34 | .maxlen = sizeof(int), | ||
35 | .mode = 0644, | ||
36 | .proc_handler = &proc_dointvec | ||
37 | }, | ||
38 | { | ||
39 | .ctl_name = 2, | ||
40 | .procname = "ktrace", | ||
41 | .data = &rxrpc_ktrace, | ||
42 | .maxlen = sizeof(int), | ||
43 | .mode = 0644, | ||
44 | .proc_handler = &proc_dointvec | ||
45 | }, | ||
46 | { | ||
47 | .ctl_name = 3, | ||
48 | .procname = "kproto", | ||
49 | .data = &rxrpc_kproto, | ||
50 | .maxlen = sizeof(int), | ||
51 | .mode = 0644, | ||
52 | .proc_handler = &proc_dointvec | ||
53 | }, | ||
54 | { | ||
55 | .ctl_name = 4, | ||
56 | .procname = "knet", | ||
57 | .data = &rxrpc_knet, | ||
58 | .maxlen = sizeof(int), | ||
59 | .mode = 0644, | ||
60 | .proc_handler = &proc_dointvec | ||
61 | }, | ||
62 | { | ||
63 | .ctl_name = 5, | ||
64 | .procname = "peertimo", | ||
65 | .data = &rxrpc_peer_timeout, | ||
66 | .maxlen = sizeof(unsigned long), | ||
67 | .mode = 0644, | ||
68 | .proc_handler = &proc_doulongvec_minmax | ||
69 | }, | ||
70 | { | ||
71 | .ctl_name = 6, | ||
72 | .procname = "conntimo", | ||
73 | .data = &rxrpc_conn_timeout, | ||
74 | .maxlen = sizeof(unsigned long), | ||
75 | .mode = 0644, | ||
76 | .proc_handler = &proc_doulongvec_minmax | ||
77 | }, | ||
78 | { .ctl_name = 0 } | ||
79 | }; | ||
80 | |||
81 | static ctl_table rxrpc_dir_sysctl_table[] = { | ||
82 | { | ||
83 | .ctl_name = 1, | ||
84 | .procname = "rxrpc", | ||
85 | .maxlen = 0, | ||
86 | .mode = 0555, | ||
87 | .child = rxrpc_sysctl_table | ||
88 | }, | ||
89 | { .ctl_name = 0 } | ||
90 | }; | ||
91 | #endif /* CONFIG_SYSCTL */ | ||
92 | |||
93 | /*****************************************************************************/ | ||
94 | /* | ||
95 | * initialise the sysctl stuff for Rx RPC | ||
96 | */ | ||
97 | int rxrpc_sysctl_init(void) | ||
98 | { | ||
99 | #ifdef CONFIG_SYSCTL | ||
100 | rxrpc_sysctl = register_sysctl_table(rxrpc_dir_sysctl_table); | ||
101 | if (!rxrpc_sysctl) | ||
102 | return -ENOMEM; | ||
103 | #endif /* CONFIG_SYSCTL */ | ||
104 | |||
105 | return 0; | ||
106 | } /* end rxrpc_sysctl_init() */ | ||
107 | |||
108 | /*****************************************************************************/ | ||
109 | /* | ||
110 | * clean up the sysctl stuff for Rx RPC | ||
111 | */ | ||
112 | void rxrpc_sysctl_cleanup(void) | ||
113 | { | ||
114 | #ifdef CONFIG_SYSCTL | ||
115 | if (rxrpc_sysctl) { | ||
116 | unregister_sysctl_table(rxrpc_sysctl); | ||
117 | rxrpc_sysctl = NULL; | ||
118 | } | ||
119 | #endif /* CONFIG_SYSCTL */ | ||
120 | |||
121 | } /* end rxrpc_sysctl_cleanup() */ | ||
diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c deleted file mode 100644 index 62398fd01f85..000000000000 --- a/net/rxrpc/transport.c +++ /dev/null | |||
@@ -1,846 +0,0 @@ | |||
1 | /* transport.c: Rx Transport routines | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/slab.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <rxrpc/transport.h> | ||
15 | #include <rxrpc/peer.h> | ||
16 | #include <rxrpc/connection.h> | ||
17 | #include <rxrpc/call.h> | ||
18 | #include <rxrpc/message.h> | ||
19 | #include <rxrpc/krxiod.h> | ||
20 | #include <rxrpc/krxsecd.h> | ||
21 | #include <linux/udp.h> | ||
22 | #include <linux/in.h> | ||
23 | #include <linux/in6.h> | ||
24 | #include <linux/icmp.h> | ||
25 | #include <linux/skbuff.h> | ||
26 | #include <net/sock.h> | ||
27 | #include <net/ip.h> | ||
28 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | ||
29 | #include <linux/ipv6.h> /* this should _really_ be in errqueue.h.. */ | ||
30 | #endif | ||
31 | #include <linux/errqueue.h> | ||
32 | #include <asm/uaccess.h> | ||
33 | #include "internal.h" | ||
34 | |||
35 | struct errormsg { | ||
36 | struct cmsghdr cmsg; /* control message header */ | ||
37 | struct sock_extended_err ee; /* extended error information */ | ||
38 | struct sockaddr_in icmp_src; /* ICMP packet source address */ | ||
39 | }; | ||
40 | |||
41 | static DEFINE_SPINLOCK(rxrpc_transports_lock); | ||
42 | static struct list_head rxrpc_transports = LIST_HEAD_INIT(rxrpc_transports); | ||
43 | |||
44 | __RXACCT_DECL(atomic_t rxrpc_transport_count); | ||
45 | LIST_HEAD(rxrpc_proc_transports); | ||
46 | DECLARE_RWSEM(rxrpc_proc_transports_sem); | ||
47 | |||
48 | static void rxrpc_data_ready(struct sock *sk, int count); | ||
49 | static void rxrpc_error_report(struct sock *sk); | ||
50 | static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans, | ||
51 | struct list_head *msgq); | ||
52 | static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans); | ||
53 | |||
54 | /*****************************************************************************/ | ||
55 | /* | ||
56 | * create a new transport endpoint using the specified UDP port | ||
57 | */ | ||
58 | int rxrpc_create_transport(unsigned short port, | ||
59 | struct rxrpc_transport **_trans) | ||
60 | { | ||
61 | struct rxrpc_transport *trans; | ||
62 | struct sockaddr_in sin; | ||
63 | mm_segment_t oldfs; | ||
64 | struct sock *sock; | ||
65 | int ret, opt; | ||
66 | |||
67 | _enter("%hu", port); | ||
68 | |||
69 | trans = kzalloc(sizeof(struct rxrpc_transport), GFP_KERNEL); | ||
70 | if (!trans) | ||
71 | return -ENOMEM; | ||
72 | |||
73 | atomic_set(&trans->usage, 1); | ||
74 | INIT_LIST_HEAD(&trans->services); | ||
75 | INIT_LIST_HEAD(&trans->link); | ||
76 | INIT_LIST_HEAD(&trans->krxiodq_link); | ||
77 | spin_lock_init(&trans->lock); | ||
78 | INIT_LIST_HEAD(&trans->peer_active); | ||
79 | INIT_LIST_HEAD(&trans->peer_graveyard); | ||
80 | spin_lock_init(&trans->peer_gylock); | ||
81 | init_waitqueue_head(&trans->peer_gy_waitq); | ||
82 | rwlock_init(&trans->peer_lock); | ||
83 | atomic_set(&trans->peer_count, 0); | ||
84 | trans->port = port; | ||
85 | |||
86 | /* create a UDP socket to be my actual transport endpoint */ | ||
87 | ret = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &trans->socket); | ||
88 | if (ret < 0) | ||
89 | goto error; | ||
90 | |||
91 | /* use the specified port */ | ||
92 | if (port) { | ||
93 | memset(&sin, 0, sizeof(sin)); | ||
94 | sin.sin_family = AF_INET; | ||
95 | sin.sin_port = htons(port); | ||
96 | ret = trans->socket->ops->bind(trans->socket, | ||
97 | (struct sockaddr *) &sin, | ||
98 | sizeof(sin)); | ||
99 | if (ret < 0) | ||
100 | goto error; | ||
101 | } | ||
102 | |||
103 | opt = 1; | ||
104 | oldfs = get_fs(); | ||
105 | set_fs(KERNEL_DS); | ||
106 | ret = trans->socket->ops->setsockopt(trans->socket, SOL_IP, IP_RECVERR, | ||
107 | (char *) &opt, sizeof(opt)); | ||
108 | set_fs(oldfs); | ||
109 | |||
110 | spin_lock(&rxrpc_transports_lock); | ||
111 | list_add(&trans->link, &rxrpc_transports); | ||
112 | spin_unlock(&rxrpc_transports_lock); | ||
113 | |||
114 | /* set the socket up */ | ||
115 | sock = trans->socket->sk; | ||
116 | sock->sk_user_data = trans; | ||
117 | sock->sk_data_ready = rxrpc_data_ready; | ||
118 | sock->sk_error_report = rxrpc_error_report; | ||
119 | |||
120 | down_write(&rxrpc_proc_transports_sem); | ||
121 | list_add_tail(&trans->proc_link, &rxrpc_proc_transports); | ||
122 | up_write(&rxrpc_proc_transports_sem); | ||
123 | |||
124 | __RXACCT(atomic_inc(&rxrpc_transport_count)); | ||
125 | |||
126 | *_trans = trans; | ||
127 | _leave(" = 0 (%p)", trans); | ||
128 | return 0; | ||
129 | |||
130 | error: | ||
131 | /* finish cleaning up the transport (not really needed here, but...) */ | ||
132 | if (trans->socket) | ||
133 | trans->socket->ops->shutdown(trans->socket, 2); | ||
134 | |||
135 | /* close the socket */ | ||
136 | if (trans->socket) { | ||
137 | trans->socket->sk->sk_user_data = NULL; | ||
138 | sock_release(trans->socket); | ||
139 | trans->socket = NULL; | ||
140 | } | ||
141 | |||
142 | kfree(trans); | ||
143 | |||
144 | |||
145 | _leave(" = %d", ret); | ||
146 | return ret; | ||
147 | } /* end rxrpc_create_transport() */ | ||
148 | |||
149 | /*****************************************************************************/ | ||
150 | /* | ||
151 | * destroy a transport endpoint | ||
152 | */ | ||
153 | void rxrpc_put_transport(struct rxrpc_transport *trans) | ||
154 | { | ||
155 | _enter("%p{u=%d p=%hu}", | ||
156 | trans, atomic_read(&trans->usage), trans->port); | ||
157 | |||
158 | BUG_ON(atomic_read(&trans->usage) <= 0); | ||
159 | |||
160 | /* to prevent a race, the decrement and the dequeue must be | ||
161 | * effectively atomic */ | ||
162 | spin_lock(&rxrpc_transports_lock); | ||
163 | if (likely(!atomic_dec_and_test(&trans->usage))) { | ||
164 | spin_unlock(&rxrpc_transports_lock); | ||
165 | _leave(""); | ||
166 | return; | ||
167 | } | ||
168 | |||
169 | list_del(&trans->link); | ||
170 | spin_unlock(&rxrpc_transports_lock); | ||
171 | |||
172 | /* finish cleaning up the transport */ | ||
173 | if (trans->socket) | ||
174 | trans->socket->ops->shutdown(trans->socket, 2); | ||
175 | |||
176 | rxrpc_krxsecd_clear_transport(trans); | ||
177 | rxrpc_krxiod_dequeue_transport(trans); | ||
178 | |||
179 | /* discard all peer information */ | ||
180 | rxrpc_peer_clearall(trans); | ||
181 | |||
182 | down_write(&rxrpc_proc_transports_sem); | ||
183 | list_del(&trans->proc_link); | ||
184 | up_write(&rxrpc_proc_transports_sem); | ||
185 | __RXACCT(atomic_dec(&rxrpc_transport_count)); | ||
186 | |||
187 | /* close the socket */ | ||
188 | if (trans->socket) { | ||
189 | trans->socket->sk->sk_user_data = NULL; | ||
190 | sock_release(trans->socket); | ||
191 | trans->socket = NULL; | ||
192 | } | ||
193 | |||
194 | kfree(trans); | ||
195 | |||
196 | _leave(""); | ||
197 | } /* end rxrpc_put_transport() */ | ||
198 | |||
199 | /*****************************************************************************/ | ||
200 | /* | ||
201 | * add a service to a transport to be listened upon | ||
202 | */ | ||
203 | int rxrpc_add_service(struct rxrpc_transport *trans, | ||
204 | struct rxrpc_service *newsrv) | ||
205 | { | ||
206 | struct rxrpc_service *srv; | ||
207 | struct list_head *_p; | ||
208 | int ret = -EEXIST; | ||
209 | |||
210 | _enter("%p{%hu},%p{%hu}", | ||
211 | trans, trans->port, newsrv, newsrv->service_id); | ||
212 | |||
213 | /* verify that the service ID is not already present */ | ||
214 | spin_lock(&trans->lock); | ||
215 | |||
216 | list_for_each(_p, &trans->services) { | ||
217 | srv = list_entry(_p, struct rxrpc_service, link); | ||
218 | if (srv->service_id == newsrv->service_id) | ||
219 | goto out; | ||
220 | } | ||
221 | |||
222 | /* okay - add the transport to the list */ | ||
223 | list_add_tail(&newsrv->link, &trans->services); | ||
224 | rxrpc_get_transport(trans); | ||
225 | ret = 0; | ||
226 | |||
227 | out: | ||
228 | spin_unlock(&trans->lock); | ||
229 | |||
230 | _leave("= %d", ret); | ||
231 | return ret; | ||
232 | } /* end rxrpc_add_service() */ | ||
233 | |||
234 | /*****************************************************************************/ | ||
235 | /* | ||
236 | * remove a service from a transport | ||
237 | */ | ||
238 | void rxrpc_del_service(struct rxrpc_transport *trans, struct rxrpc_service *srv) | ||
239 | { | ||
240 | _enter("%p{%hu},%p{%hu}", trans, trans->port, srv, srv->service_id); | ||
241 | |||
242 | spin_lock(&trans->lock); | ||
243 | list_del(&srv->link); | ||
244 | spin_unlock(&trans->lock); | ||
245 | |||
246 | rxrpc_put_transport(trans); | ||
247 | |||
248 | _leave(""); | ||
249 | } /* end rxrpc_del_service() */ | ||
250 | |||
251 | /*****************************************************************************/ | ||
252 | /* | ||
253 | * INET callback when data has been received on the socket. | ||
254 | */ | ||
255 | static void rxrpc_data_ready(struct sock *sk, int count) | ||
256 | { | ||
257 | struct rxrpc_transport *trans; | ||
258 | |||
259 | _enter("%p{t=%p},%d", sk, sk->sk_user_data, count); | ||
260 | |||
261 | /* queue the transport for attention by krxiod */ | ||
262 | trans = (struct rxrpc_transport *) sk->sk_user_data; | ||
263 | if (trans) | ||
264 | rxrpc_krxiod_queue_transport(trans); | ||
265 | |||
266 | /* wake up anyone waiting on the socket */ | ||
267 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | ||
268 | wake_up_interruptible(sk->sk_sleep); | ||
269 | |||
270 | _leave(""); | ||
271 | } /* end rxrpc_data_ready() */ | ||
272 | |||
273 | /*****************************************************************************/ | ||
274 | /* | ||
275 | * INET callback when an ICMP error packet is received | ||
276 | * - sk->err is error (EHOSTUNREACH, EPROTO or EMSGSIZE) | ||
277 | */ | ||
278 | static void rxrpc_error_report(struct sock *sk) | ||
279 | { | ||
280 | struct rxrpc_transport *trans; | ||
281 | |||
282 | _enter("%p{t=%p}", sk, sk->sk_user_data); | ||
283 | |||
284 | /* queue the transport for attention by krxiod */ | ||
285 | trans = (struct rxrpc_transport *) sk->sk_user_data; | ||
286 | if (trans) { | ||
287 | trans->error_rcvd = 1; | ||
288 | rxrpc_krxiod_queue_transport(trans); | ||
289 | } | ||
290 | |||
291 | /* wake up anyone waiting on the socket */ | ||
292 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | ||
293 | wake_up_interruptible(sk->sk_sleep); | ||
294 | |||
295 | _leave(""); | ||
296 | } /* end rxrpc_error_report() */ | ||
297 | |||
298 | /*****************************************************************************/ | ||
299 | /* | ||
300 | * split a message up, allocating message records and filling them in | ||
301 | * from the contents of a socket buffer | ||
302 | */ | ||
303 | static int rxrpc_incoming_msg(struct rxrpc_transport *trans, | ||
304 | struct sk_buff *pkt, | ||
305 | struct list_head *msgq) | ||
306 | { | ||
307 | struct rxrpc_message *msg; | ||
308 | int ret; | ||
309 | |||
310 | _enter(""); | ||
311 | |||
312 | msg = kzalloc(sizeof(struct rxrpc_message), GFP_KERNEL); | ||
313 | if (!msg) { | ||
314 | _leave(" = -ENOMEM"); | ||
315 | return -ENOMEM; | ||
316 | } | ||
317 | |||
318 | atomic_set(&msg->usage, 1); | ||
319 | list_add_tail(&msg->link,msgq); | ||
320 | |||
321 | /* dig out the Rx routing parameters */ | ||
322 | if (skb_copy_bits(pkt, sizeof(struct udphdr), | ||
323 | &msg->hdr, sizeof(msg->hdr)) < 0) { | ||
324 | ret = -EBADMSG; | ||
325 | goto error; | ||
326 | } | ||
327 | |||
328 | msg->trans = trans; | ||
329 | msg->state = RXRPC_MSG_RECEIVED; | ||
330 | skb_get_timestamp(pkt, &msg->stamp); | ||
331 | if (msg->stamp.tv_sec == 0) { | ||
332 | do_gettimeofday(&msg->stamp); | ||
333 | if (pkt->sk) | ||
334 | sock_enable_timestamp(pkt->sk); | ||
335 | } | ||
336 | msg->seq = ntohl(msg->hdr.seq); | ||
337 | |||
338 | /* attach the packet */ | ||
339 | skb_get(pkt); | ||
340 | msg->pkt = pkt; | ||
341 | |||
342 | msg->offset = sizeof(struct udphdr) + sizeof(struct rxrpc_header); | ||
343 | msg->dsize = msg->pkt->len - msg->offset; | ||
344 | |||
345 | _net("Rx Received packet from %s (%08x;%08x,%1x,%d,%s,%02x,%d,%d)", | ||
346 | msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server", | ||
347 | ntohl(msg->hdr.epoch), | ||
348 | (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT, | ||
349 | ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK, | ||
350 | ntohl(msg->hdr.callNumber), | ||
351 | rxrpc_pkts[msg->hdr.type], | ||
352 | msg->hdr.flags, | ||
353 | ntohs(msg->hdr.serviceId), | ||
354 | msg->hdr.securityIndex); | ||
355 | |||
356 | __RXACCT(atomic_inc(&rxrpc_message_count)); | ||
357 | |||
358 | /* split off jumbo packets */ | ||
359 | while (msg->hdr.type == RXRPC_PACKET_TYPE_DATA && | ||
360 | msg->hdr.flags & RXRPC_JUMBO_PACKET | ||
361 | ) { | ||
362 | struct rxrpc_jumbo_header jumbo; | ||
363 | struct rxrpc_message *jumbomsg = msg; | ||
364 | |||
365 | _debug("split jumbo packet"); | ||
366 | |||
367 | /* quick sanity check */ | ||
368 | ret = -EBADMSG; | ||
369 | if (msg->dsize < | ||
370 | RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header)) | ||
371 | goto error; | ||
372 | if (msg->hdr.flags & RXRPC_LAST_PACKET) | ||
373 | goto error; | ||
374 | |||
375 | /* dig out the secondary header */ | ||
376 | if (skb_copy_bits(pkt, msg->offset + RXRPC_JUMBO_DATALEN, | ||
377 | &jumbo, sizeof(jumbo)) < 0) | ||
378 | goto error; | ||
379 | |||
380 | /* allocate a new message record */ | ||
381 | ret = -ENOMEM; | ||
382 | msg = kmemdup(jumbomsg, sizeof(struct rxrpc_message), GFP_KERNEL); | ||
383 | if (!msg) | ||
384 | goto error; | ||
385 | |||
386 | list_add_tail(&msg->link, msgq); | ||
387 | |||
388 | /* adjust the jumbo packet */ | ||
389 | jumbomsg->dsize = RXRPC_JUMBO_DATALEN; | ||
390 | |||
391 | /* attach the packet here too */ | ||
392 | skb_get(pkt); | ||
393 | |||
394 | /* adjust the parameters */ | ||
395 | msg->seq++; | ||
396 | msg->hdr.seq = htonl(msg->seq); | ||
397 | msg->hdr.serial = htonl(ntohl(msg->hdr.serial) + 1); | ||
398 | msg->offset += RXRPC_JUMBO_DATALEN + | ||
399 | sizeof(struct rxrpc_jumbo_header); | ||
400 | msg->dsize -= RXRPC_JUMBO_DATALEN + | ||
401 | sizeof(struct rxrpc_jumbo_header); | ||
402 | msg->hdr.flags = jumbo.flags; | ||
403 | msg->hdr._rsvd = jumbo._rsvd; | ||
404 | |||
405 | _net("Rx Split jumbo packet from %s" | ||
406 | " (%08x;%08x,%1x,%d,%s,%02x,%d,%d)", | ||
407 | msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server", | ||
408 | ntohl(msg->hdr.epoch), | ||
409 | (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT, | ||
410 | ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK, | ||
411 | ntohl(msg->hdr.callNumber), | ||
412 | rxrpc_pkts[msg->hdr.type], | ||
413 | msg->hdr.flags, | ||
414 | ntohs(msg->hdr.serviceId), | ||
415 | msg->hdr.securityIndex); | ||
416 | |||
417 | __RXACCT(atomic_inc(&rxrpc_message_count)); | ||
418 | } | ||
419 | |||
420 | _leave(" = 0 #%d", atomic_read(&rxrpc_message_count)); | ||
421 | return 0; | ||
422 | |||
423 | error: | ||
424 | while (!list_empty(msgq)) { | ||
425 | msg = list_entry(msgq->next, struct rxrpc_message, link); | ||
426 | list_del_init(&msg->link); | ||
427 | |||
428 | rxrpc_put_message(msg); | ||
429 | } | ||
430 | |||
431 | _leave(" = %d", ret); | ||
432 | return ret; | ||
433 | } /* end rxrpc_incoming_msg() */ | ||
434 | |||
435 | /*****************************************************************************/ | ||
436 | /* | ||
437 | * accept a new call | ||
438 | * - called from krxiod in process context | ||
439 | */ | ||
440 | void rxrpc_trans_receive_packet(struct rxrpc_transport *trans) | ||
441 | { | ||
442 | struct rxrpc_message *msg; | ||
443 | struct rxrpc_peer *peer; | ||
444 | struct sk_buff *pkt; | ||
445 | int ret; | ||
446 | __be32 addr; | ||
447 | __be16 port; | ||
448 | |||
449 | LIST_HEAD(msgq); | ||
450 | |||
451 | _enter("%p{%d}", trans, trans->port); | ||
452 | |||
453 | for (;;) { | ||
454 | /* deal with outstanting errors first */ | ||
455 | if (trans->error_rcvd) | ||
456 | rxrpc_trans_receive_error_report(trans); | ||
457 | |||
458 | /* attempt to receive a packet */ | ||
459 | pkt = skb_recv_datagram(trans->socket->sk, 0, 1, &ret); | ||
460 | if (!pkt) { | ||
461 | if (ret == -EAGAIN) { | ||
462 | _leave(" EAGAIN"); | ||
463 | return; | ||
464 | } | ||
465 | |||
466 | /* an icmp error may have occurred */ | ||
467 | rxrpc_krxiod_queue_transport(trans); | ||
468 | _leave(" error %d\n", ret); | ||
469 | return; | ||
470 | } | ||
471 | |||
472 | /* we'll probably need to checksum it (didn't call | ||
473 | * sock_recvmsg) */ | ||
474 | if (skb_checksum_complete(pkt)) { | ||
475 | kfree_skb(pkt); | ||
476 | rxrpc_krxiod_queue_transport(trans); | ||
477 | _leave(" CSUM failed"); | ||
478 | return; | ||
479 | } | ||
480 | |||
481 | addr = ip_hdr(pkt)->saddr; | ||
482 | port = udp_hdr(pkt)->source; | ||
483 | |||
484 | _net("Rx Received UDP packet from %08x:%04hu", | ||
485 | ntohl(addr), ntohs(port)); | ||
486 | |||
487 | /* unmarshall the Rx parameters and split jumbo packets */ | ||
488 | ret = rxrpc_incoming_msg(trans, pkt, &msgq); | ||
489 | if (ret < 0) { | ||
490 | kfree_skb(pkt); | ||
491 | rxrpc_krxiod_queue_transport(trans); | ||
492 | _leave(" bad packet"); | ||
493 | return; | ||
494 | } | ||
495 | |||
496 | BUG_ON(list_empty(&msgq)); | ||
497 | |||
498 | msg = list_entry(msgq.next, struct rxrpc_message, link); | ||
499 | |||
500 | /* locate the record for the peer from which it | ||
501 | * originated */ | ||
502 | ret = rxrpc_peer_lookup(trans, addr, &peer); | ||
503 | if (ret < 0) { | ||
504 | kdebug("Rx No connections from that peer"); | ||
505 | rxrpc_trans_immediate_abort(trans, msg, -EINVAL); | ||
506 | goto finished_msg; | ||
507 | } | ||
508 | |||
509 | /* try and find a matching connection */ | ||
510 | ret = rxrpc_connection_lookup(peer, msg, &msg->conn); | ||
511 | if (ret < 0) { | ||
512 | kdebug("Rx Unknown Connection"); | ||
513 | rxrpc_trans_immediate_abort(trans, msg, -EINVAL); | ||
514 | rxrpc_put_peer(peer); | ||
515 | goto finished_msg; | ||
516 | } | ||
517 | rxrpc_put_peer(peer); | ||
518 | |||
519 | /* deal with the first packet of a new call */ | ||
520 | if (msg->hdr.flags & RXRPC_CLIENT_INITIATED && | ||
521 | msg->hdr.type == RXRPC_PACKET_TYPE_DATA && | ||
522 | ntohl(msg->hdr.seq) == 1 | ||
523 | ) { | ||
524 | _debug("Rx New server call"); | ||
525 | rxrpc_trans_receive_new_call(trans, &msgq); | ||
526 | goto finished_msg; | ||
527 | } | ||
528 | |||
529 | /* deal with subsequent packet(s) of call */ | ||
530 | _debug("Rx Call packet"); | ||
531 | while (!list_empty(&msgq)) { | ||
532 | msg = list_entry(msgq.next, struct rxrpc_message, link); | ||
533 | list_del_init(&msg->link); | ||
534 | |||
535 | ret = rxrpc_conn_receive_call_packet(msg->conn, NULL, msg); | ||
536 | if (ret < 0) { | ||
537 | rxrpc_trans_immediate_abort(trans, msg, ret); | ||
538 | rxrpc_put_message(msg); | ||
539 | goto finished_msg; | ||
540 | } | ||
541 | |||
542 | rxrpc_put_message(msg); | ||
543 | } | ||
544 | |||
545 | goto finished_msg; | ||
546 | |||
547 | /* dispose of the packets */ | ||
548 | finished_msg: | ||
549 | while (!list_empty(&msgq)) { | ||
550 | msg = list_entry(msgq.next, struct rxrpc_message, link); | ||
551 | list_del_init(&msg->link); | ||
552 | |||
553 | rxrpc_put_message(msg); | ||
554 | } | ||
555 | kfree_skb(pkt); | ||
556 | } | ||
557 | |||
558 | _leave(""); | ||
559 | |||
560 | } /* end rxrpc_trans_receive_packet() */ | ||
561 | |||
562 | /*****************************************************************************/ | ||
563 | /* | ||
564 | * accept a new call from a client trying to connect to one of my services | ||
565 | * - called in process context | ||
566 | */ | ||
567 | static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans, | ||
568 | struct list_head *msgq) | ||
569 | { | ||
570 | struct rxrpc_message *msg; | ||
571 | |||
572 | _enter(""); | ||
573 | |||
574 | /* only bother with the first packet */ | ||
575 | msg = list_entry(msgq->next, struct rxrpc_message, link); | ||
576 | list_del_init(&msg->link); | ||
577 | rxrpc_krxsecd_queue_incoming_call(msg); | ||
578 | rxrpc_put_message(msg); | ||
579 | |||
580 | _leave(" = 0"); | ||
581 | |||
582 | return 0; | ||
583 | } /* end rxrpc_trans_receive_new_call() */ | ||
584 | |||
585 | /*****************************************************************************/ | ||
586 | /* | ||
587 | * perform an immediate abort without connection or call structures | ||
588 | */ | ||
589 | int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans, | ||
590 | struct rxrpc_message *msg, | ||
591 | int error) | ||
592 | { | ||
593 | struct rxrpc_header ahdr; | ||
594 | struct sockaddr_in sin; | ||
595 | struct msghdr msghdr; | ||
596 | struct kvec iov[2]; | ||
597 | __be32 _error; | ||
598 | int len, ret; | ||
599 | |||
600 | _enter("%p,%p,%d", trans, msg, error); | ||
601 | |||
602 | /* don't abort an abort packet */ | ||
603 | if (msg->hdr.type == RXRPC_PACKET_TYPE_ABORT) { | ||
604 | _leave(" = 0"); | ||
605 | return 0; | ||
606 | } | ||
607 | |||
608 | _error = htonl(-error); | ||
609 | |||
610 | /* set up the message to be transmitted */ | ||
611 | memcpy(&ahdr, &msg->hdr, sizeof(ahdr)); | ||
612 | ahdr.epoch = msg->hdr.epoch; | ||
613 | ahdr.serial = htonl(1); | ||
614 | ahdr.seq = 0; | ||
615 | ahdr.type = RXRPC_PACKET_TYPE_ABORT; | ||
616 | ahdr.flags = RXRPC_LAST_PACKET; | ||
617 | ahdr.flags |= ~msg->hdr.flags & RXRPC_CLIENT_INITIATED; | ||
618 | |||
619 | iov[0].iov_len = sizeof(ahdr); | ||
620 | iov[0].iov_base = &ahdr; | ||
621 | iov[1].iov_len = sizeof(_error); | ||
622 | iov[1].iov_base = &_error; | ||
623 | |||
624 | len = sizeof(ahdr) + sizeof(_error); | ||
625 | |||
626 | memset(&sin,0,sizeof(sin)); | ||
627 | sin.sin_family = AF_INET; | ||
628 | sin.sin_port = udp_hdr(msg->pkt)->source; | ||
629 | sin.sin_addr.s_addr = ip_hdr(msg->pkt)->saddr; | ||
630 | |||
631 | msghdr.msg_name = &sin; | ||
632 | msghdr.msg_namelen = sizeof(sin); | ||
633 | msghdr.msg_control = NULL; | ||
634 | msghdr.msg_controllen = 0; | ||
635 | msghdr.msg_flags = MSG_DONTWAIT; | ||
636 | |||
637 | _net("Sending message type %d of %d bytes to %08x:%d", | ||
638 | ahdr.type, | ||
639 | len, | ||
640 | ntohl(sin.sin_addr.s_addr), | ||
641 | ntohs(sin.sin_port)); | ||
642 | |||
643 | /* send the message */ | ||
644 | ret = kernel_sendmsg(trans->socket, &msghdr, iov, 2, len); | ||
645 | |||
646 | _leave(" = %d", ret); | ||
647 | return ret; | ||
648 | } /* end rxrpc_trans_immediate_abort() */ | ||
649 | |||
650 | /*****************************************************************************/ | ||
651 | /* | ||
652 | * receive an ICMP error report and percolate it to all connections | ||
653 | * heading to the affected host or port | ||
654 | */ | ||
655 | static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans) | ||
656 | { | ||
657 | struct rxrpc_connection *conn; | ||
658 | struct sockaddr_in sin; | ||
659 | struct rxrpc_peer *peer; | ||
660 | struct list_head connq, *_p; | ||
661 | struct errormsg emsg; | ||
662 | struct msghdr msg; | ||
663 | __be16 port; | ||
664 | int local, err; | ||
665 | |||
666 | _enter("%p", trans); | ||
667 | |||
668 | for (;;) { | ||
669 | trans->error_rcvd = 0; | ||
670 | |||
671 | /* try and receive an error message */ | ||
672 | msg.msg_name = &sin; | ||
673 | msg.msg_namelen = sizeof(sin); | ||
674 | msg.msg_control = &emsg; | ||
675 | msg.msg_controllen = sizeof(emsg); | ||
676 | msg.msg_flags = 0; | ||
677 | |||
678 | err = kernel_recvmsg(trans->socket, &msg, NULL, 0, 0, | ||
679 | MSG_ERRQUEUE | MSG_DONTWAIT | MSG_TRUNC); | ||
680 | |||
681 | if (err == -EAGAIN) { | ||
682 | _leave(""); | ||
683 | return; | ||
684 | } | ||
685 | |||
686 | if (err < 0) { | ||
687 | printk("%s: unable to recv an error report: %d\n", | ||
688 | __FUNCTION__, err); | ||
689 | _leave(""); | ||
690 | return; | ||
691 | } | ||
692 | |||
693 | msg.msg_controllen = (char *) msg.msg_control - (char *) &emsg; | ||
694 | |||
695 | if (msg.msg_controllen < sizeof(emsg.cmsg) || | ||
696 | msg.msg_namelen < sizeof(sin)) { | ||
697 | printk("%s: short control message" | ||
698 | " (nlen=%u clen=%Zu fl=%x)\n", | ||
699 | __FUNCTION__, | ||
700 | msg.msg_namelen, | ||
701 | msg.msg_controllen, | ||
702 | msg.msg_flags); | ||
703 | continue; | ||
704 | } | ||
705 | |||
706 | _net("Rx Received control message" | ||
707 | " { len=%Zu level=%u type=%u }", | ||
708 | emsg.cmsg.cmsg_len, | ||
709 | emsg.cmsg.cmsg_level, | ||
710 | emsg.cmsg.cmsg_type); | ||
711 | |||
712 | if (sin.sin_family != AF_INET) { | ||
713 | printk("Rx Ignoring error report with non-INET address" | ||
714 | " (fam=%u)", | ||
715 | sin.sin_family); | ||
716 | continue; | ||
717 | } | ||
718 | |||
719 | _net("Rx Received message pertaining to host addr=%x port=%hu", | ||
720 | ntohl(sin.sin_addr.s_addr), ntohs(sin.sin_port)); | ||
721 | |||
722 | if (emsg.cmsg.cmsg_level != SOL_IP || | ||
723 | emsg.cmsg.cmsg_type != IP_RECVERR) { | ||
724 | printk("Rx Ignoring unknown error report" | ||
725 | " { level=%u type=%u }", | ||
726 | emsg.cmsg.cmsg_level, | ||
727 | emsg.cmsg.cmsg_type); | ||
728 | continue; | ||
729 | } | ||
730 | |||
731 | if (msg.msg_controllen < sizeof(emsg.cmsg) + sizeof(emsg.ee)) { | ||
732 | printk("%s: short error message (%Zu)\n", | ||
733 | __FUNCTION__, msg.msg_controllen); | ||
734 | _leave(""); | ||
735 | return; | ||
736 | } | ||
737 | |||
738 | port = sin.sin_port; | ||
739 | |||
740 | switch (emsg.ee.ee_origin) { | ||
741 | case SO_EE_ORIGIN_ICMP: | ||
742 | local = 0; | ||
743 | switch (emsg.ee.ee_type) { | ||
744 | case ICMP_DEST_UNREACH: | ||
745 | switch (emsg.ee.ee_code) { | ||
746 | case ICMP_NET_UNREACH: | ||
747 | _net("Rx Received ICMP Network Unreachable"); | ||
748 | port = 0; | ||
749 | err = -ENETUNREACH; | ||
750 | break; | ||
751 | case ICMP_HOST_UNREACH: | ||
752 | _net("Rx Received ICMP Host Unreachable"); | ||
753 | port = 0; | ||
754 | err = -EHOSTUNREACH; | ||
755 | break; | ||
756 | case ICMP_PORT_UNREACH: | ||
757 | _net("Rx Received ICMP Port Unreachable"); | ||
758 | err = -ECONNREFUSED; | ||
759 | break; | ||
760 | case ICMP_NET_UNKNOWN: | ||
761 | _net("Rx Received ICMP Unknown Network"); | ||
762 | port = 0; | ||
763 | err = -ENETUNREACH; | ||
764 | break; | ||
765 | case ICMP_HOST_UNKNOWN: | ||
766 | _net("Rx Received ICMP Unknown Host"); | ||
767 | port = 0; | ||
768 | err = -EHOSTUNREACH; | ||
769 | break; | ||
770 | default: | ||
771 | _net("Rx Received ICMP DestUnreach { code=%u }", | ||
772 | emsg.ee.ee_code); | ||
773 | err = emsg.ee.ee_errno; | ||
774 | break; | ||
775 | } | ||
776 | break; | ||
777 | |||
778 | case ICMP_TIME_EXCEEDED: | ||
779 | _net("Rx Received ICMP TTL Exceeded"); | ||
780 | err = emsg.ee.ee_errno; | ||
781 | break; | ||
782 | |||
783 | default: | ||
784 | _proto("Rx Received ICMP error { type=%u code=%u }", | ||
785 | emsg.ee.ee_type, emsg.ee.ee_code); | ||
786 | err = emsg.ee.ee_errno; | ||
787 | break; | ||
788 | } | ||
789 | break; | ||
790 | |||
791 | case SO_EE_ORIGIN_LOCAL: | ||
792 | _proto("Rx Received local error { error=%d }", | ||
793 | emsg.ee.ee_errno); | ||
794 | local = 1; | ||
795 | err = emsg.ee.ee_errno; | ||
796 | break; | ||
797 | |||
798 | case SO_EE_ORIGIN_NONE: | ||
799 | case SO_EE_ORIGIN_ICMP6: | ||
800 | default: | ||
801 | _proto("Rx Received error report { orig=%u }", | ||
802 | emsg.ee.ee_origin); | ||
803 | local = 0; | ||
804 | err = emsg.ee.ee_errno; | ||
805 | break; | ||
806 | } | ||
807 | |||
808 | /* find all the connections between this transport and the | ||
809 | * affected destination */ | ||
810 | INIT_LIST_HEAD(&connq); | ||
811 | |||
812 | if (rxrpc_peer_lookup(trans, sin.sin_addr.s_addr, | ||
813 | &peer) == 0) { | ||
814 | read_lock(&peer->conn_lock); | ||
815 | list_for_each(_p, &peer->conn_active) { | ||
816 | conn = list_entry(_p, struct rxrpc_connection, | ||
817 | link); | ||
818 | if (port && conn->addr.sin_port != port) | ||
819 | continue; | ||
820 | if (!list_empty(&conn->err_link)) | ||
821 | continue; | ||
822 | |||
823 | rxrpc_get_connection(conn); | ||
824 | list_add_tail(&conn->err_link, &connq); | ||
825 | } | ||
826 | read_unlock(&peer->conn_lock); | ||
827 | |||
828 | /* service all those connections */ | ||
829 | while (!list_empty(&connq)) { | ||
830 | conn = list_entry(connq.next, | ||
831 | struct rxrpc_connection, | ||
832 | err_link); | ||
833 | list_del(&conn->err_link); | ||
834 | |||
835 | rxrpc_conn_handle_error(conn, local, err); | ||
836 | |||
837 | rxrpc_put_connection(conn); | ||
838 | } | ||
839 | |||
840 | rxrpc_put_peer(peer); | ||
841 | } | ||
842 | } | ||
843 | |||
844 | _leave(""); | ||
845 | return; | ||
846 | } /* end rxrpc_trans_receive_error_report() */ | ||