aboutsummaryrefslogtreecommitdiffstats
path: root/net/rxrpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /net/rxrpc
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'net/rxrpc')
-rw-r--r--net/rxrpc/Makefile25
-rw-r--r--net/rxrpc/call.c2278
-rw-r--r--net/rxrpc/connection.c778
-rw-r--r--net/rxrpc/internal.h106
-rw-r--r--net/rxrpc/krxiod.c261
-rw-r--r--net/rxrpc/krxsecd.c270
-rw-r--r--net/rxrpc/krxtimod.c203
-rw-r--r--net/rxrpc/main.c180
-rw-r--r--net/rxrpc/peer.c399
-rw-r--r--net/rxrpc/proc.c617
-rw-r--r--net/rxrpc/rxrpc_syms.c35
-rw-r--r--net/rxrpc/sysctl.c122
-rw-r--r--net/rxrpc/transport.c854
13 files changed, 6128 insertions, 0 deletions
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile
new file mode 100644
index 000000000000..6efcb6f162a0
--- /dev/null
+++ b/net/rxrpc/Makefile
@@ -0,0 +1,25 @@
1#
2# Makefile for Linux kernel Rx RPC
3#
4
5#CFLAGS += -finstrument-functions
6
7rxrpc-objs := \
8 call.o \
9 connection.o \
10 krxiod.o \
11 krxsecd.o \
12 krxtimod.o \
13 main.o \
14 peer.o \
15 rxrpc_syms.o \
16 transport.o
17
18ifeq ($(CONFIG_PROC_FS),y)
19rxrpc-objs += proc.o
20endif
21ifeq ($(CONFIG_SYSCTL),y)
22rxrpc-objs += sysctl.o
23endif
24
25obj-$(CONFIG_RXRPC) := rxrpc.o
diff --git a/net/rxrpc/call.c b/net/rxrpc/call.c
new file mode 100644
index 000000000000..5cfd4cadee42
--- /dev/null
+++ b/net/rxrpc/call.c
@@ -0,0 +1,2278 @@
1/* call.c: Rx call routines
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <rxrpc/rxrpc.h>
16#include <rxrpc/transport.h>
17#include <rxrpc/peer.h>
18#include <rxrpc/connection.h>
19#include <rxrpc/call.h>
20#include <rxrpc/message.h>
21#include "internal.h"
22
23__RXACCT_DECL(atomic_t rxrpc_call_count);
24__RXACCT_DECL(atomic_t rxrpc_message_count);
25
26LIST_HEAD(rxrpc_calls);
27DECLARE_RWSEM(rxrpc_calls_sem);
28
29unsigned rxrpc_call_rcv_timeout = HZ/3;
30static unsigned rxrpc_call_acks_timeout = HZ/3;
31static unsigned rxrpc_call_dfr_ack_timeout = HZ/20;
32static unsigned short rxrpc_call_max_resend = HZ/10;
33
34const char *rxrpc_call_states[] = {
35 "COMPLETE",
36 "ERROR",
37 "SRVR_RCV_OPID",
38 "SRVR_RCV_ARGS",
39 "SRVR_GOT_ARGS",
40 "SRVR_SND_REPLY",
41 "SRVR_RCV_FINAL_ACK",
42 "CLNT_SND_ARGS",
43 "CLNT_RCV_REPLY",
44 "CLNT_GOT_REPLY"
45};
46
47const char *rxrpc_call_error_states[] = {
48 "NO_ERROR",
49 "LOCAL_ABORT",
50 "PEER_ABORT",
51 "LOCAL_ERROR",
52 "REMOTE_ERROR"
53};
54
55const char *rxrpc_pkts[] = {
56 "?00",
57 "data", "ack", "busy", "abort", "ackall", "chall", "resp", "debug",
58 "?09", "?10", "?11", "?12", "?13", "?14", "?15"
59};
60
61static const char *rxrpc_acks[] = {
62 "---", "REQ", "DUP", "SEQ", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL",
63 "-?-"
64};
65
66static const char _acktype[] = "NA-";
67
68static void rxrpc_call_receive_packet(struct rxrpc_call *call);
69static void rxrpc_call_receive_data_packet(struct rxrpc_call *call,
70 struct rxrpc_message *msg);
71static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call,
72 struct rxrpc_message *msg);
73static void rxrpc_call_definitively_ACK(struct rxrpc_call *call,
74 rxrpc_seq_t higest);
75static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest);
76static int __rxrpc_call_read_data(struct rxrpc_call *call);
77
78static int rxrpc_call_record_ACK(struct rxrpc_call *call,
79 struct rxrpc_message *msg,
80 rxrpc_seq_t seq,
81 size_t count);
82
83static int rxrpc_call_flush(struct rxrpc_call *call);
84
85#define _state(call) \
86 _debug("[[[ state %s ]]]", rxrpc_call_states[call->app_call_state]);
87
88static void rxrpc_call_default_attn_func(struct rxrpc_call *call)
89{
90 wake_up(&call->waitq);
91}
92
93static void rxrpc_call_default_error_func(struct rxrpc_call *call)
94{
95 wake_up(&call->waitq);
96}
97
98static void rxrpc_call_default_aemap_func(struct rxrpc_call *call)
99{
100 switch (call->app_err_state) {
101 case RXRPC_ESTATE_LOCAL_ABORT:
102 call->app_abort_code = -call->app_errno;
103 case RXRPC_ESTATE_PEER_ABORT:
104 call->app_errno = -ECONNABORTED;
105 default:
106 break;
107 }
108}
109
110static void __rxrpc_call_acks_timeout(unsigned long _call)
111{
112 struct rxrpc_call *call = (struct rxrpc_call *) _call;
113
114 _debug("ACKS TIMEOUT %05lu", jiffies - call->cjif);
115
116 call->flags |= RXRPC_CALL_ACKS_TIMO;
117 rxrpc_krxiod_queue_call(call);
118}
119
120static void __rxrpc_call_rcv_timeout(unsigned long _call)
121{
122 struct rxrpc_call *call = (struct rxrpc_call *) _call;
123
124 _debug("RCV TIMEOUT %05lu", jiffies - call->cjif);
125
126 call->flags |= RXRPC_CALL_RCV_TIMO;
127 rxrpc_krxiod_queue_call(call);
128}
129
130static void __rxrpc_call_ackr_timeout(unsigned long _call)
131{
132 struct rxrpc_call *call = (struct rxrpc_call *) _call;
133
134 _debug("ACKR TIMEOUT %05lu",jiffies - call->cjif);
135
136 call->flags |= RXRPC_CALL_ACKR_TIMO;
137 rxrpc_krxiod_queue_call(call);
138}
139
140/*****************************************************************************/
141/*
142 * calculate a timeout based on an RTT value
143 */
144static inline unsigned long __rxrpc_rtt_based_timeout(struct rxrpc_call *call,
145 unsigned long val)
146{
147 unsigned long expiry = call->conn->peer->rtt / (1000000 / HZ);
148
149 expiry += 10;
150 if (expiry < HZ / 25)
151 expiry = HZ / 25;
152 if (expiry > HZ)
153 expiry = HZ;
154
155 _leave(" = %lu jiffies", expiry);
156 return jiffies + expiry;
157} /* end __rxrpc_rtt_based_timeout() */
158
159/*****************************************************************************/
160/*
161 * create a new call record
162 */
163static inline int __rxrpc_create_call(struct rxrpc_connection *conn,
164 struct rxrpc_call **_call)
165{
166 struct rxrpc_call *call;
167
168 _enter("%p", conn);
169
170 /* allocate and initialise a call record */
171 call = (struct rxrpc_call *) get_zeroed_page(GFP_KERNEL);
172 if (!call) {
173 _leave(" ENOMEM");
174 return -ENOMEM;
175 }
176
177 atomic_set(&call->usage, 1);
178
179 init_waitqueue_head(&call->waitq);
180 spin_lock_init(&call->lock);
181 INIT_LIST_HEAD(&call->link);
182 INIT_LIST_HEAD(&call->acks_pendq);
183 INIT_LIST_HEAD(&call->rcv_receiveq);
184 INIT_LIST_HEAD(&call->rcv_krxiodq_lk);
185 INIT_LIST_HEAD(&call->app_readyq);
186 INIT_LIST_HEAD(&call->app_unreadyq);
187 INIT_LIST_HEAD(&call->app_link);
188 INIT_LIST_HEAD(&call->app_attn_link);
189
190 init_timer(&call->acks_timeout);
191 call->acks_timeout.data = (unsigned long) call;
192 call->acks_timeout.function = __rxrpc_call_acks_timeout;
193
194 init_timer(&call->rcv_timeout);
195 call->rcv_timeout.data = (unsigned long) call;
196 call->rcv_timeout.function = __rxrpc_call_rcv_timeout;
197
198 init_timer(&call->ackr_dfr_timo);
199 call->ackr_dfr_timo.data = (unsigned long) call;
200 call->ackr_dfr_timo.function = __rxrpc_call_ackr_timeout;
201
202 call->conn = conn;
203 call->ackr_win_bot = 1;
204 call->ackr_win_top = call->ackr_win_bot + RXRPC_CALL_ACK_WINDOW_SIZE - 1;
205 call->ackr_prev_seq = 0;
206 call->app_mark = RXRPC_APP_MARK_EOF;
207 call->app_attn_func = rxrpc_call_default_attn_func;
208 call->app_error_func = rxrpc_call_default_error_func;
209 call->app_aemap_func = rxrpc_call_default_aemap_func;
210 call->app_scr_alloc = call->app_scratch;
211
212 call->cjif = jiffies;
213
214 _leave(" = 0 (%p)", call);
215
216 *_call = call;
217
218 return 0;
219} /* end __rxrpc_create_call() */
220
221/*****************************************************************************/
222/*
223 * create a new call record for outgoing calls
224 */
225int rxrpc_create_call(struct rxrpc_connection *conn,
226 rxrpc_call_attn_func_t attn,
227 rxrpc_call_error_func_t error,
228 rxrpc_call_aemap_func_t aemap,
229 struct rxrpc_call **_call)
230{
231 DECLARE_WAITQUEUE(myself, current);
232
233 struct rxrpc_call *call;
234 int ret, cix, loop;
235
236 _enter("%p", conn);
237
238 /* allocate and initialise a call record */
239 ret = __rxrpc_create_call(conn, &call);
240 if (ret < 0) {
241 _leave(" = %d", ret);
242 return ret;
243 }
244
245 call->app_call_state = RXRPC_CSTATE_CLNT_SND_ARGS;
246 if (attn)
247 call->app_attn_func = attn;
248 if (error)
249 call->app_error_func = error;
250 if (aemap)
251 call->app_aemap_func = aemap;
252
253 _state(call);
254
255 spin_lock(&conn->lock);
256 set_current_state(TASK_INTERRUPTIBLE);
257 add_wait_queue(&conn->chanwait, &myself);
258
259 try_again:
260 /* try to find an unused channel */
261 for (cix = 0; cix < 4; cix++)
262 if (!conn->channels[cix])
263 goto obtained_chan;
264
265 /* no free channels - wait for one to become available */
266 ret = -EINTR;
267 if (signal_pending(current))
268 goto error_unwait;
269
270 spin_unlock(&conn->lock);
271
272 schedule();
273 set_current_state(TASK_INTERRUPTIBLE);
274
275 spin_lock(&conn->lock);
276 goto try_again;
277
278 /* got a channel - now attach to the connection */
279 obtained_chan:
280 remove_wait_queue(&conn->chanwait, &myself);
281 set_current_state(TASK_RUNNING);
282
283 /* concoct a unique call number */
284 next_callid:
285 call->call_id = htonl(++conn->call_counter);
286 for (loop = 0; loop < 4; loop++)
287 if (conn->channels[loop] &&
288 conn->channels[loop]->call_id == call->call_id)
289 goto next_callid;
290
291 rxrpc_get_connection(conn);
292 conn->channels[cix] = call; /* assign _after_ done callid check loop */
293 do_gettimeofday(&conn->atime);
294 call->chan_ix = htonl(cix);
295
296 spin_unlock(&conn->lock);
297
298 down_write(&rxrpc_calls_sem);
299 list_add_tail(&call->call_link, &rxrpc_calls);
300 up_write(&rxrpc_calls_sem);
301
302 __RXACCT(atomic_inc(&rxrpc_call_count));
303 *_call = call;
304
305 _leave(" = 0 (call=%p cix=%u)", call, cix);
306 return 0;
307
308 error_unwait:
309 remove_wait_queue(&conn->chanwait, &myself);
310 set_current_state(TASK_RUNNING);
311 spin_unlock(&conn->lock);
312
313 free_page((unsigned long) call);
314 _leave(" = %d", ret);
315 return ret;
316} /* end rxrpc_create_call() */
317
318/*****************************************************************************/
319/*
320 * create a new call record for incoming calls
321 */
322int rxrpc_incoming_call(struct rxrpc_connection *conn,
323 struct rxrpc_message *msg,
324 struct rxrpc_call **_call)
325{
326 struct rxrpc_call *call;
327 unsigned cix;
328 int ret;
329
330 cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK;
331
332 _enter("%p,%u,%u", conn, ntohl(msg->hdr.callNumber), cix);
333
334 /* allocate and initialise a call record */
335 ret = __rxrpc_create_call(conn, &call);
336 if (ret < 0) {
337 _leave(" = %d", ret);
338 return ret;
339 }
340
341 call->pkt_rcv_count = 1;
342 call->app_call_state = RXRPC_CSTATE_SRVR_RCV_OPID;
343 call->app_mark = sizeof(uint32_t);
344
345 _state(call);
346
347 /* attach to the connection */
348 ret = -EBUSY;
349 call->chan_ix = htonl(cix);
350 call->call_id = msg->hdr.callNumber;
351
352 spin_lock(&conn->lock);
353
354 if (!conn->channels[cix] ||
355 conn->channels[cix]->app_call_state == RXRPC_CSTATE_COMPLETE ||
356 conn->channels[cix]->app_call_state == RXRPC_CSTATE_ERROR
357 ) {
358 conn->channels[cix] = call;
359 rxrpc_get_connection(conn);
360 ret = 0;
361 }
362
363 spin_unlock(&conn->lock);
364
365 if (ret < 0) {
366 free_page((unsigned long) call);
367 call = NULL;
368 }
369
370 if (ret == 0) {
371 down_write(&rxrpc_calls_sem);
372 list_add_tail(&call->call_link, &rxrpc_calls);
373 up_write(&rxrpc_calls_sem);
374 __RXACCT(atomic_inc(&rxrpc_call_count));
375 *_call = call;
376 }
377
378 _leave(" = %d [%p]", ret, call);
379 return ret;
380} /* end rxrpc_incoming_call() */
381
382/*****************************************************************************/
383/*
384 * free a call record
385 */
386void rxrpc_put_call(struct rxrpc_call *call)
387{
388 struct rxrpc_connection *conn = call->conn;
389 struct rxrpc_message *msg;
390
391 _enter("%p{u=%d}",call,atomic_read(&call->usage));
392
393 /* sanity check */
394 if (atomic_read(&call->usage) <= 0)
395 BUG();
396
397 /* to prevent a race, the decrement and the de-list must be effectively
398 * atomic */
399 spin_lock(&conn->lock);
400 if (likely(!atomic_dec_and_test(&call->usage))) {
401 spin_unlock(&conn->lock);
402 _leave("");
403 return;
404 }
405
406 if (conn->channels[ntohl(call->chan_ix)] == call)
407 conn->channels[ntohl(call->chan_ix)] = NULL;
408
409 spin_unlock(&conn->lock);
410
411 wake_up(&conn->chanwait);
412
413 rxrpc_put_connection(conn);
414
415 /* clear the timers and dequeue from krxiod */
416 del_timer_sync(&call->acks_timeout);
417 del_timer_sync(&call->rcv_timeout);
418 del_timer_sync(&call->ackr_dfr_timo);
419
420 rxrpc_krxiod_dequeue_call(call);
421
422 /* clean up the contents of the struct */
423 if (call->snd_nextmsg)
424 rxrpc_put_message(call->snd_nextmsg);
425
426 if (call->snd_ping)
427 rxrpc_put_message(call->snd_ping);
428
429 while (!list_empty(&call->acks_pendq)) {
430 msg = list_entry(call->acks_pendq.next,
431 struct rxrpc_message, link);
432 list_del(&msg->link);
433 rxrpc_put_message(msg);
434 }
435
436 while (!list_empty(&call->rcv_receiveq)) {
437 msg = list_entry(call->rcv_receiveq.next,
438 struct rxrpc_message, link);
439 list_del(&msg->link);
440 rxrpc_put_message(msg);
441 }
442
443 while (!list_empty(&call->app_readyq)) {
444 msg = list_entry(call->app_readyq.next,
445 struct rxrpc_message, link);
446 list_del(&msg->link);
447 rxrpc_put_message(msg);
448 }
449
450 while (!list_empty(&call->app_unreadyq)) {
451 msg = list_entry(call->app_unreadyq.next,
452 struct rxrpc_message, link);
453 list_del(&msg->link);
454 rxrpc_put_message(msg);
455 }
456
457 module_put(call->owner);
458
459 down_write(&rxrpc_calls_sem);
460 list_del(&call->call_link);
461 up_write(&rxrpc_calls_sem);
462
463 __RXACCT(atomic_dec(&rxrpc_call_count));
464 free_page((unsigned long) call);
465
466 _leave(" [destroyed]");
467} /* end rxrpc_put_call() */
468
469/*****************************************************************************/
470/*
471 * actually generate a normal ACK
472 */
473static inline int __rxrpc_call_gen_normal_ACK(struct rxrpc_call *call,
474 rxrpc_seq_t seq)
475{
476 struct rxrpc_message *msg;
477 struct kvec diov[3];
478 __be32 aux[4];
479 int delta, ret;
480
481 /* ACKs default to DELAY */
482 if (!call->ackr.reason)
483 call->ackr.reason = RXRPC_ACK_DELAY;
484
485 _proto("Rx %05lu Sending ACK { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
486 jiffies - call->cjif,
487 ntohs(call->ackr.maxSkew),
488 ntohl(call->ackr.firstPacket),
489 ntohl(call->ackr.previousPacket),
490 ntohl(call->ackr.serial),
491 rxrpc_acks[call->ackr.reason],
492 call->ackr.nAcks);
493
494 aux[0] = htonl(call->conn->peer->if_mtu); /* interface MTU */
495 aux[1] = htonl(1444); /* max MTU */
496 aux[2] = htonl(16); /* rwind */
497 aux[3] = htonl(4); /* max packets */
498
499 diov[0].iov_len = sizeof(struct rxrpc_ackpacket);
500 diov[0].iov_base = &call->ackr;
501 diov[1].iov_len = call->ackr_pend_cnt + 3;
502 diov[1].iov_base = call->ackr_array;
503 diov[2].iov_len = sizeof(aux);
504 diov[2].iov_base = &aux;
505
506 /* build and send the message */
507 ret = rxrpc_conn_newmsg(call->conn,call, RXRPC_PACKET_TYPE_ACK,
508 3, diov, GFP_KERNEL, &msg);
509 if (ret < 0)
510 goto out;
511
512 msg->seq = seq;
513 msg->hdr.seq = htonl(seq);
514 msg->hdr.flags |= RXRPC_SLOW_START_OK;
515
516 ret = rxrpc_conn_sendmsg(call->conn, msg);
517 rxrpc_put_message(msg);
518 if (ret < 0)
519 goto out;
520 call->pkt_snd_count++;
521
522 /* count how many actual ACKs there were at the front */
523 for (delta = 0; delta < call->ackr_pend_cnt; delta++)
524 if (call->ackr_array[delta] != RXRPC_ACK_TYPE_ACK)
525 break;
526
527 call->ackr_pend_cnt -= delta; /* all ACK'd to this point */
528
529 /* crank the ACK window around */
530 if (delta == 0) {
531 /* un-ACK'd window */
532 }
533 else if (delta < RXRPC_CALL_ACK_WINDOW_SIZE) {
534 /* partially ACK'd window
535 * - shuffle down to avoid losing out-of-sequence packets
536 */
537 call->ackr_win_bot += delta;
538 call->ackr_win_top += delta;
539
540 memmove(&call->ackr_array[0],
541 &call->ackr_array[delta],
542 call->ackr_pend_cnt);
543
544 memset(&call->ackr_array[call->ackr_pend_cnt],
545 RXRPC_ACK_TYPE_NACK,
546 sizeof(call->ackr_array) - call->ackr_pend_cnt);
547 }
548 else {
549 /* fully ACK'd window
550 * - just clear the whole thing
551 */
552 memset(&call->ackr_array,
553 RXRPC_ACK_TYPE_NACK,
554 sizeof(call->ackr_array));
555 }
556
557 /* clear this ACK */
558 memset(&call->ackr, 0, sizeof(call->ackr));
559
560 out:
561 if (!call->app_call_state)
562 printk("___ STATE 0 ___\n");
563 return ret;
564} /* end __rxrpc_call_gen_normal_ACK() */
565
566/*****************************************************************************/
567/*
568 * note the reception of a packet in the call's ACK records and generate an
569 * appropriate ACK packet if necessary
570 * - returns 0 if packet should be processed, 1 if packet should be ignored
571 * and -ve on an error
572 */
573static int rxrpc_call_generate_ACK(struct rxrpc_call *call,
574 struct rxrpc_header *hdr,
575 struct rxrpc_ackpacket *ack)
576{
577 struct rxrpc_message *msg;
578 rxrpc_seq_t seq;
579 unsigned offset;
580 int ret = 0, err;
581 u8 special_ACK, do_ACK, force;
582
583 _enter("%p,%p { seq=%d tp=%d fl=%02x }",
584 call, hdr, ntohl(hdr->seq), hdr->type, hdr->flags);
585
586 seq = ntohl(hdr->seq);
587 offset = seq - call->ackr_win_bot;
588 do_ACK = RXRPC_ACK_DELAY;
589 special_ACK = 0;
590 force = (seq == 1);
591
592 if (call->ackr_high_seq < seq)
593 call->ackr_high_seq = seq;
594
595 /* deal with generation of obvious special ACKs first */
596 if (ack && ack->reason == RXRPC_ACK_PING) {
597 special_ACK = RXRPC_ACK_PING_RESPONSE;
598 ret = 1;
599 goto gen_ACK;
600 }
601
602 if (seq < call->ackr_win_bot) {
603 special_ACK = RXRPC_ACK_DUPLICATE;
604 ret = 1;
605 goto gen_ACK;
606 }
607
608 if (seq >= call->ackr_win_top) {
609 special_ACK = RXRPC_ACK_EXCEEDS_WINDOW;
610 ret = 1;
611 goto gen_ACK;
612 }
613
614 if (call->ackr_array[offset] != RXRPC_ACK_TYPE_NACK) {
615 special_ACK = RXRPC_ACK_DUPLICATE;
616 ret = 1;
617 goto gen_ACK;
618 }
619
620 /* okay... it's a normal data packet inside the ACK window */
621 call->ackr_array[offset] = RXRPC_ACK_TYPE_ACK;
622
623 if (offset < call->ackr_pend_cnt) {
624 }
625 else if (offset > call->ackr_pend_cnt) {
626 do_ACK = RXRPC_ACK_OUT_OF_SEQUENCE;
627 call->ackr_pend_cnt = offset;
628 goto gen_ACK;
629 }
630
631 if (hdr->flags & RXRPC_REQUEST_ACK) {
632 do_ACK = RXRPC_ACK_REQUESTED;
633 }
634
635 /* generate an ACK on the final packet of a reply just received */
636 if (hdr->flags & RXRPC_LAST_PACKET) {
637 if (call->conn->out_clientflag)
638 force = 1;
639 }
640 else if (!(hdr->flags & RXRPC_MORE_PACKETS)) {
641 do_ACK = RXRPC_ACK_REQUESTED;
642 }
643
644 /* re-ACK packets previously received out-of-order */
645 for (offset++; offset < RXRPC_CALL_ACK_WINDOW_SIZE; offset++)
646 if (call->ackr_array[offset] != RXRPC_ACK_TYPE_ACK)
647 break;
648
649 call->ackr_pend_cnt = offset;
650
651 /* generate an ACK if we fill up the window */
652 if (call->ackr_pend_cnt >= RXRPC_CALL_ACK_WINDOW_SIZE)
653 force = 1;
654
655 gen_ACK:
656 _debug("%05lu ACKs pend=%u norm=%s special=%s%s",
657 jiffies - call->cjif,
658 call->ackr_pend_cnt,
659 rxrpc_acks[do_ACK],
660 rxrpc_acks[special_ACK],
661 force ? " immediate" :
662 do_ACK == RXRPC_ACK_REQUESTED ? " merge-req" :
663 hdr->flags & RXRPC_LAST_PACKET ? " finalise" :
664 " defer"
665 );
666
667 /* send any pending normal ACKs if need be */
668 if (call->ackr_pend_cnt > 0) {
669 /* fill out the appropriate form */
670 call->ackr.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE);
671 call->ackr.maxSkew = htons(min(call->ackr_high_seq - seq,
672 65535U));
673 call->ackr.firstPacket = htonl(call->ackr_win_bot);
674 call->ackr.previousPacket = call->ackr_prev_seq;
675 call->ackr.serial = hdr->serial;
676 call->ackr.nAcks = call->ackr_pend_cnt;
677
678 if (do_ACK == RXRPC_ACK_REQUESTED)
679 call->ackr.reason = do_ACK;
680
681 /* generate the ACK immediately if necessary */
682 if (special_ACK || force) {
683 err = __rxrpc_call_gen_normal_ACK(
684 call, do_ACK == RXRPC_ACK_DELAY ? 0 : seq);
685 if (err < 0) {
686 ret = err;
687 goto out;
688 }
689 }
690 }
691
692 if (call->ackr.reason == RXRPC_ACK_REQUESTED)
693 call->ackr_dfr_seq = seq;
694
695 /* start the ACK timer if not running if there are any pending deferred
696 * ACKs */
697 if (call->ackr_pend_cnt > 0 &&
698 call->ackr.reason != RXRPC_ACK_REQUESTED &&
699 !timer_pending(&call->ackr_dfr_timo)
700 ) {
701 unsigned long timo;
702
703 timo = rxrpc_call_dfr_ack_timeout + jiffies;
704
705 _debug("START ACKR TIMER for cj=%lu", timo - call->cjif);
706
707 spin_lock(&call->lock);
708 mod_timer(&call->ackr_dfr_timo, timo);
709 spin_unlock(&call->lock);
710 }
711 else if ((call->ackr_pend_cnt == 0 ||
712 call->ackr.reason == RXRPC_ACK_REQUESTED) &&
713 timer_pending(&call->ackr_dfr_timo)
714 ) {
715 /* stop timer if no pending ACKs */
716 _debug("CLEAR ACKR TIMER");
717 del_timer_sync(&call->ackr_dfr_timo);
718 }
719
720 /* send a special ACK if one is required */
721 if (special_ACK) {
722 struct rxrpc_ackpacket ack;
723 struct kvec diov[2];
724 uint8_t acks[1] = { RXRPC_ACK_TYPE_ACK };
725
726 /* fill out the appropriate form */
727 ack.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE);
728 ack.maxSkew = htons(min(call->ackr_high_seq - seq,
729 65535U));
730 ack.firstPacket = htonl(call->ackr_win_bot);
731 ack.previousPacket = call->ackr_prev_seq;
732 ack.serial = hdr->serial;
733 ack.reason = special_ACK;
734 ack.nAcks = 0;
735
736 _proto("Rx Sending s-ACK"
737 " { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
738 ntohs(ack.maxSkew),
739 ntohl(ack.firstPacket),
740 ntohl(ack.previousPacket),
741 ntohl(ack.serial),
742 rxrpc_acks[ack.reason],
743 ack.nAcks);
744
745 diov[0].iov_len = sizeof(struct rxrpc_ackpacket);
746 diov[0].iov_base = &ack;
747 diov[1].iov_len = sizeof(acks);
748 diov[1].iov_base = acks;
749
750 /* build and send the message */
751 err = rxrpc_conn_newmsg(call->conn,call, RXRPC_PACKET_TYPE_ACK,
752 hdr->seq ? 2 : 1, diov,
753 GFP_KERNEL,
754 &msg);
755 if (err < 0) {
756 ret = err;
757 goto out;
758 }
759
760 msg->seq = seq;
761 msg->hdr.seq = htonl(seq);
762 msg->hdr.flags |= RXRPC_SLOW_START_OK;
763
764 err = rxrpc_conn_sendmsg(call->conn, msg);
765 rxrpc_put_message(msg);
766 if (err < 0) {
767 ret = err;
768 goto out;
769 }
770 call->pkt_snd_count++;
771 }
772
773 out:
774 if (hdr->seq)
775 call->ackr_prev_seq = hdr->seq;
776
777 _leave(" = %d", ret);
778 return ret;
779} /* end rxrpc_call_generate_ACK() */
780
781/*****************************************************************************/
782/*
783 * handle work to be done on a call
784 * - includes packet reception and timeout processing
785 */
786void rxrpc_call_do_stuff(struct rxrpc_call *call)
787{
788 _enter("%p{flags=%lx}", call, call->flags);
789
790 /* handle packet reception */
791 if (call->flags & RXRPC_CALL_RCV_PKT) {
792 _debug("- receive packet");
793 call->flags &= ~RXRPC_CALL_RCV_PKT;
794 rxrpc_call_receive_packet(call);
795 }
796
797 /* handle overdue ACKs */
798 if (call->flags & RXRPC_CALL_ACKS_TIMO) {
799 _debug("- overdue ACK timeout");
800 call->flags &= ~RXRPC_CALL_ACKS_TIMO;
801 rxrpc_call_resend(call, call->snd_seq_count);
802 }
803
804 /* handle lack of reception */
805 if (call->flags & RXRPC_CALL_RCV_TIMO) {
806 _debug("- reception timeout");
807 call->flags &= ~RXRPC_CALL_RCV_TIMO;
808 rxrpc_call_abort(call, -EIO);
809 }
810
811 /* handle deferred ACKs */
812 if (call->flags & RXRPC_CALL_ACKR_TIMO ||
813 (call->ackr.nAcks > 0 && call->ackr.reason == RXRPC_ACK_REQUESTED)
814 ) {
815 _debug("- deferred ACK timeout: cj=%05lu r=%s n=%u",
816 jiffies - call->cjif,
817 rxrpc_acks[call->ackr.reason],
818 call->ackr.nAcks);
819
820 call->flags &= ~RXRPC_CALL_ACKR_TIMO;
821
822 if (call->ackr.nAcks > 0 &&
823 call->app_call_state != RXRPC_CSTATE_ERROR) {
824 /* generate ACK */
825 __rxrpc_call_gen_normal_ACK(call, call->ackr_dfr_seq);
826 call->ackr_dfr_seq = 0;
827 }
828 }
829
830 _leave("");
831
832} /* end rxrpc_call_do_stuff() */
833
834/*****************************************************************************/
835/*
836 * send an abort message at call or connection level
837 * - must be called with call->lock held
838 * - the supplied error code is sent as the packet data
839 */
840static int __rxrpc_call_abort(struct rxrpc_call *call, int errno)
841{
842 struct rxrpc_connection *conn = call->conn;
843 struct rxrpc_message *msg;
844 struct kvec diov[1];
845 int ret;
846 __be32 _error;
847
848 _enter("%p{%08x},%p{%d},%d",
849 conn, ntohl(conn->conn_id), call, ntohl(call->call_id), errno);
850
851 /* if this call is already aborted, then just wake up any waiters */
852 if (call->app_call_state == RXRPC_CSTATE_ERROR) {
853 spin_unlock(&call->lock);
854 call->app_error_func(call);
855 _leave(" = 0");
856 return 0;
857 }
858
859 rxrpc_get_call(call);
860
861 /* change the state _with_ the lock still held */
862 call->app_call_state = RXRPC_CSTATE_ERROR;
863 call->app_err_state = RXRPC_ESTATE_LOCAL_ABORT;
864 call->app_errno = errno;
865 call->app_mark = RXRPC_APP_MARK_EOF;
866 call->app_read_buf = NULL;
867 call->app_async_read = 0;
868
869 _state(call);
870
871 /* ask the app to translate the error code */
872 call->app_aemap_func(call);
873
874 spin_unlock(&call->lock);
875
876 /* flush any outstanding ACKs */
877 del_timer_sync(&call->acks_timeout);
878 del_timer_sync(&call->rcv_timeout);
879 del_timer_sync(&call->ackr_dfr_timo);
880
881 if (rxrpc_call_is_ack_pending(call))
882 __rxrpc_call_gen_normal_ACK(call, 0);
883
884 /* send the abort packet only if we actually traded some other
885 * packets */
886 ret = 0;
887 if (call->pkt_snd_count || call->pkt_rcv_count) {
888 /* actually send the abort */
889 _proto("Rx Sending Call ABORT { data=%d }",
890 call->app_abort_code);
891
892 _error = htonl(call->app_abort_code);
893
894 diov[0].iov_len = sizeof(_error);
895 diov[0].iov_base = &_error;
896
897 ret = rxrpc_conn_newmsg(conn, call, RXRPC_PACKET_TYPE_ABORT,
898 1, diov, GFP_KERNEL, &msg);
899 if (ret == 0) {
900 ret = rxrpc_conn_sendmsg(conn, msg);
901 rxrpc_put_message(msg);
902 }
903 }
904
905 /* tell the app layer to let go */
906 call->app_error_func(call);
907
908 rxrpc_put_call(call);
909
910 _leave(" = %d", ret);
911 return ret;
912} /* end __rxrpc_call_abort() */
913
914/*****************************************************************************/
915/*
916 * send an abort message at call or connection level
917 * - the supplied error code is sent as the packet data
918 */
919int rxrpc_call_abort(struct rxrpc_call *call, int error)
920{
921 spin_lock(&call->lock);
922
923 return __rxrpc_call_abort(call, error);
924
925} /* end rxrpc_call_abort() */
926
927/*****************************************************************************/
928/*
929 * process packets waiting for this call
930 */
931static void rxrpc_call_receive_packet(struct rxrpc_call *call)
932{
933 struct rxrpc_message *msg;
934 struct list_head *_p;
935
936 _enter("%p", call);
937
938 rxrpc_get_call(call); /* must not go away too soon if aborted by
939 * app-layer */
940
941 while (!list_empty(&call->rcv_receiveq)) {
942 /* try to get next packet */
943 _p = NULL;
944 spin_lock(&call->lock);
945 if (!list_empty(&call->rcv_receiveq)) {
946 _p = call->rcv_receiveq.next;
947 list_del_init(_p);
948 }
949 spin_unlock(&call->lock);
950
951 if (!_p)
952 break;
953
954 msg = list_entry(_p, struct rxrpc_message, link);
955
956 _proto("Rx %05lu Received %s packet (%%%u,#%u,%c%c%c%c%c)",
957 jiffies - call->cjif,
958 rxrpc_pkts[msg->hdr.type],
959 ntohl(msg->hdr.serial),
960 msg->seq,
961 msg->hdr.flags & RXRPC_JUMBO_PACKET ? 'j' : '-',
962 msg->hdr.flags & RXRPC_MORE_PACKETS ? 'm' : '-',
963 msg->hdr.flags & RXRPC_LAST_PACKET ? 'l' : '-',
964 msg->hdr.flags & RXRPC_REQUEST_ACK ? 'r' : '-',
965 msg->hdr.flags & RXRPC_CLIENT_INITIATED ? 'C' : 'S'
966 );
967
968 switch (msg->hdr.type) {
969 /* deal with data packets */
970 case RXRPC_PACKET_TYPE_DATA:
971 /* ACK the packet if necessary */
972 switch (rxrpc_call_generate_ACK(call, &msg->hdr,
973 NULL)) {
974 case 0: /* useful packet */
975 rxrpc_call_receive_data_packet(call, msg);
976 break;
977 case 1: /* duplicate or out-of-window packet */
978 break;
979 default:
980 rxrpc_put_message(msg);
981 goto out;
982 }
983 break;
984
985 /* deal with ACK packets */
986 case RXRPC_PACKET_TYPE_ACK:
987 rxrpc_call_receive_ack_packet(call, msg);
988 break;
989
990 /* deal with abort packets */
991 case RXRPC_PACKET_TYPE_ABORT: {
992 __be32 _dbuf, *dp;
993
994 dp = skb_header_pointer(msg->pkt, msg->offset,
995 sizeof(_dbuf), &_dbuf);
996 if (dp == NULL)
997 printk("Rx Received short ABORT packet\n");
998
999 _proto("Rx Received Call ABORT { data=%d }",
1000 (dp ? ntohl(*dp) : 0));
1001
1002 spin_lock(&call->lock);
1003 call->app_call_state = RXRPC_CSTATE_ERROR;
1004 call->app_err_state = RXRPC_ESTATE_PEER_ABORT;
1005 call->app_abort_code = (dp ? ntohl(*dp) : 0);
1006 call->app_errno = -ECONNABORTED;
1007 call->app_mark = RXRPC_APP_MARK_EOF;
1008 call->app_read_buf = NULL;
1009 call->app_async_read = 0;
1010
1011 /* ask the app to translate the error code */
1012 call->app_aemap_func(call);
1013 _state(call);
1014 spin_unlock(&call->lock);
1015 call->app_error_func(call);
1016 break;
1017 }
1018 default:
1019 /* deal with other packet types */
1020 _proto("Rx Unsupported packet type %u (#%u)",
1021 msg->hdr.type, msg->seq);
1022 break;
1023 }
1024
1025 rxrpc_put_message(msg);
1026 }
1027
1028 out:
1029 rxrpc_put_call(call);
1030 _leave("");
1031} /* end rxrpc_call_receive_packet() */
1032
1033/*****************************************************************************/
1034/*
1035 * process next data packet
1036 * - as the next data packet arrives:
1037 * - it is queued on app_readyq _if_ it is the next one expected
1038 * (app_ready_seq+1)
1039 * - it is queued on app_unreadyq _if_ it is not the next one expected
1040 * - if a packet placed on app_readyq completely fills a hole leading up to
1041 * the first packet on app_unreadyq, then packets now in sequence are
1042 * tranferred to app_readyq
1043 * - the application layer can only see packets on app_readyq
1044 * (app_ready_qty bytes)
1045 * - the application layer is prodded every time a new packet arrives
1046 */
1047static void rxrpc_call_receive_data_packet(struct rxrpc_call *call,
1048 struct rxrpc_message *msg)
1049{
1050 const struct rxrpc_operation *optbl, *op;
1051 struct rxrpc_message *pmsg;
1052 struct list_head *_p;
1053 int ret, lo, hi, rmtimo;
1054 __be32 opid;
1055
1056 _enter("%p{%u},%p{%u}", call, ntohl(call->call_id), msg, msg->seq);
1057
1058 rxrpc_get_message(msg);
1059
1060 /* add to the unready queue if we'd have to create a hole in the ready
1061 * queue otherwise */
1062 if (msg->seq != call->app_ready_seq + 1) {
1063 _debug("Call add packet %d to unreadyq", msg->seq);
1064
1065 /* insert in seq order */
1066 list_for_each(_p, &call->app_unreadyq) {
1067 pmsg = list_entry(_p, struct rxrpc_message, link);
1068 if (pmsg->seq > msg->seq)
1069 break;
1070 }
1071
1072 list_add_tail(&msg->link, _p);
1073
1074 _leave(" [unreadyq]");
1075 return;
1076 }
1077
1078 /* next in sequence - simply append into the call's ready queue */
1079 _debug("Call add packet %d to readyq (+%Zd => %Zd bytes)",
1080 msg->seq, msg->dsize, call->app_ready_qty);
1081
1082 spin_lock(&call->lock);
1083 call->app_ready_seq = msg->seq;
1084 call->app_ready_qty += msg->dsize;
1085 list_add_tail(&msg->link, &call->app_readyq);
1086
1087 /* move unready packets to the readyq if we got rid of a hole */
1088 while (!list_empty(&call->app_unreadyq)) {
1089 pmsg = list_entry(call->app_unreadyq.next,
1090 struct rxrpc_message, link);
1091
1092 if (pmsg->seq != call->app_ready_seq + 1)
1093 break;
1094
1095 /* next in sequence - just move list-to-list */
1096 _debug("Call transfer packet %d to readyq (+%Zd => %Zd bytes)",
1097 pmsg->seq, pmsg->dsize, call->app_ready_qty);
1098
1099 call->app_ready_seq = pmsg->seq;
1100 call->app_ready_qty += pmsg->dsize;
1101 list_del_init(&pmsg->link);
1102 list_add_tail(&pmsg->link, &call->app_readyq);
1103 }
1104
1105 /* see if we've got the last packet yet */
1106 if (!list_empty(&call->app_readyq)) {
1107 pmsg = list_entry(call->app_readyq.prev,
1108 struct rxrpc_message, link);
1109 if (pmsg->hdr.flags & RXRPC_LAST_PACKET) {
1110 call->app_last_rcv = 1;
1111 _debug("Last packet on readyq");
1112 }
1113 }
1114
1115 switch (call->app_call_state) {
1116 /* do nothing if call already aborted */
1117 case RXRPC_CSTATE_ERROR:
1118 spin_unlock(&call->lock);
1119 _leave(" [error]");
1120 return;
1121
1122 /* extract the operation ID from an incoming call if that's not
1123 * yet been done */
1124 case RXRPC_CSTATE_SRVR_RCV_OPID:
1125 spin_unlock(&call->lock);
1126
1127 /* handle as yet insufficient data for the operation ID */
1128 if (call->app_ready_qty < 4) {
1129 if (call->app_last_rcv)
1130 /* trouble - last packet seen */
1131 rxrpc_call_abort(call, -EINVAL);
1132
1133 _leave("");
1134 return;
1135 }
1136
1137 /* pull the operation ID out of the buffer */
1138 ret = rxrpc_call_read_data(call, &opid, sizeof(opid), 0);
1139 if (ret < 0) {
1140 printk("Unexpected error from read-data: %d\n", ret);
1141 if (call->app_call_state != RXRPC_CSTATE_ERROR)
1142 rxrpc_call_abort(call, ret);
1143 _leave("");
1144 return;
1145 }
1146 call->app_opcode = ntohl(opid);
1147
1148 /* locate the operation in the available ops table */
1149 optbl = call->conn->service->ops_begin;
1150 lo = 0;
1151 hi = call->conn->service->ops_end - optbl;
1152
1153 while (lo < hi) {
1154 int mid = (hi + lo) / 2;
1155 op = &optbl[mid];
1156 if (call->app_opcode == op->id)
1157 goto found_op;
1158 if (call->app_opcode > op->id)
1159 lo = mid + 1;
1160 else
1161 hi = mid;
1162 }
1163
1164 /* search failed */
1165 kproto("Rx Client requested operation %d from %s service",
1166 call->app_opcode, call->conn->service->name);
1167 rxrpc_call_abort(call, -EINVAL);
1168 _leave(" [inval]");
1169 return;
1170
1171 found_op:
1172 _proto("Rx Client requested operation %s from %s service",
1173 op->name, call->conn->service->name);
1174
1175 /* we're now waiting for the argument block (unless the call
1176 * was aborted) */
1177 spin_lock(&call->lock);
1178 if (call->app_call_state == RXRPC_CSTATE_SRVR_RCV_OPID ||
1179 call->app_call_state == RXRPC_CSTATE_SRVR_SND_REPLY) {
1180 if (!call->app_last_rcv)
1181 call->app_call_state =
1182 RXRPC_CSTATE_SRVR_RCV_ARGS;
1183 else if (call->app_ready_qty > 0)
1184 call->app_call_state =
1185 RXRPC_CSTATE_SRVR_GOT_ARGS;
1186 else
1187 call->app_call_state =
1188 RXRPC_CSTATE_SRVR_SND_REPLY;
1189 call->app_mark = op->asize;
1190 call->app_user = op->user;
1191 }
1192 spin_unlock(&call->lock);
1193
1194 _state(call);
1195 break;
1196
1197 case RXRPC_CSTATE_SRVR_RCV_ARGS:
1198 /* change state if just received last packet of arg block */
1199 if (call->app_last_rcv)
1200 call->app_call_state = RXRPC_CSTATE_SRVR_GOT_ARGS;
1201 spin_unlock(&call->lock);
1202
1203 _state(call);
1204 break;
1205
1206 case RXRPC_CSTATE_CLNT_RCV_REPLY:
1207 /* change state if just received last packet of reply block */
1208 rmtimo = 0;
1209 if (call->app_last_rcv) {
1210 call->app_call_state = RXRPC_CSTATE_CLNT_GOT_REPLY;
1211 rmtimo = 1;
1212 }
1213 spin_unlock(&call->lock);
1214
1215 if (rmtimo) {
1216 del_timer_sync(&call->acks_timeout);
1217 del_timer_sync(&call->rcv_timeout);
1218 del_timer_sync(&call->ackr_dfr_timo);
1219 }
1220
1221 _state(call);
1222 break;
1223
1224 default:
1225 /* deal with data reception in an unexpected state */
1226 printk("Unexpected state [[[ %u ]]]\n", call->app_call_state);
1227 __rxrpc_call_abort(call, -EBADMSG);
1228 _leave("");
1229 return;
1230 }
1231
1232 if (call->app_call_state == RXRPC_CSTATE_CLNT_RCV_REPLY &&
1233 call->app_last_rcv)
1234 BUG();
1235
1236 /* otherwise just invoke the data function whenever we can satisfy its desire for more
1237 * data
1238 */
1239 _proto("Rx Received Op Data: st=%u qty=%Zu mk=%Zu%s",
1240 call->app_call_state, call->app_ready_qty, call->app_mark,
1241 call->app_last_rcv ? " last-rcvd" : "");
1242
1243 spin_lock(&call->lock);
1244
1245 ret = __rxrpc_call_read_data(call);
1246 switch (ret) {
1247 case 0:
1248 spin_unlock(&call->lock);
1249 call->app_attn_func(call);
1250 break;
1251 case -EAGAIN:
1252 spin_unlock(&call->lock);
1253 break;
1254 case -ECONNABORTED:
1255 spin_unlock(&call->lock);
1256 break;
1257 default:
1258 __rxrpc_call_abort(call, ret);
1259 break;
1260 }
1261
1262 _state(call);
1263
1264 _leave("");
1265
1266} /* end rxrpc_call_receive_data_packet() */
1267
1268/*****************************************************************************/
1269/*
1270 * received an ACK packet
1271 */
1272static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call,
1273 struct rxrpc_message *msg)
1274{
1275 struct rxrpc_ackpacket _ack, *ap;
1276 rxrpc_serial_net_t serial;
1277 rxrpc_seq_t seq;
1278 int ret;
1279
1280 _enter("%p{%u},%p{%u}", call, ntohl(call->call_id), msg, msg->seq);
1281
1282 /* extract the basic ACK record */
1283 ap = skb_header_pointer(msg->pkt, msg->offset, sizeof(_ack), &_ack);
1284 if (ap == NULL) {
1285 printk("Rx Received short ACK packet\n");
1286 return;
1287 }
1288 msg->offset += sizeof(_ack);
1289
1290 serial = ap->serial;
1291 seq = ntohl(ap->firstPacket);
1292
1293 _proto("Rx Received ACK %%%d { b=%hu m=%hu f=%u p=%u s=%u r=%s n=%u }",
1294 ntohl(msg->hdr.serial),
1295 ntohs(ap->bufferSpace),
1296 ntohs(ap->maxSkew),
1297 seq,
1298 ntohl(ap->previousPacket),
1299 ntohl(serial),
1300 rxrpc_acks[ap->reason],
1301 call->ackr.nAcks
1302 );
1303
1304 /* check the other side isn't ACK'ing a sequence number I haven't sent
1305 * yet */
1306 if (ap->nAcks > 0 &&
1307 (seq > call->snd_seq_count ||
1308 seq + ap->nAcks - 1 > call->snd_seq_count)) {
1309 printk("Received ACK (#%u-#%u) for unsent packet\n",
1310 seq, seq + ap->nAcks - 1);
1311 rxrpc_call_abort(call, -EINVAL);
1312 _leave("");
1313 return;
1314 }
1315
1316 /* deal with RTT calculation */
1317 if (serial) {
1318 struct rxrpc_message *rttmsg;
1319
1320 /* find the prompting packet */
1321 spin_lock(&call->lock);
1322 if (call->snd_ping && call->snd_ping->hdr.serial == serial) {
1323 /* it was a ping packet */
1324 rttmsg = call->snd_ping;
1325 call->snd_ping = NULL;
1326 spin_unlock(&call->lock);
1327
1328 if (rttmsg) {
1329 rttmsg->rttdone = 1;
1330 rxrpc_peer_calculate_rtt(call->conn->peer,
1331 rttmsg, msg);
1332 rxrpc_put_message(rttmsg);
1333 }
1334 }
1335 else {
1336 struct list_head *_p;
1337
1338 /* it ought to be a data packet - look in the pending
1339 * ACK list */
1340 list_for_each(_p, &call->acks_pendq) {
1341 rttmsg = list_entry(_p, struct rxrpc_message,
1342 link);
1343 if (rttmsg->hdr.serial == serial) {
1344 if (rttmsg->rttdone)
1345 /* never do RTT twice without
1346 * resending */
1347 break;
1348
1349 rttmsg->rttdone = 1;
1350 rxrpc_peer_calculate_rtt(
1351 call->conn->peer, rttmsg, msg);
1352 break;
1353 }
1354 }
1355 spin_unlock(&call->lock);
1356 }
1357 }
1358
1359 switch (ap->reason) {
1360 /* deal with negative/positive acknowledgement of data
1361 * packets */
1362 case RXRPC_ACK_REQUESTED:
1363 case RXRPC_ACK_DELAY:
1364 case RXRPC_ACK_IDLE:
1365 rxrpc_call_definitively_ACK(call, seq - 1);
1366
1367 case RXRPC_ACK_DUPLICATE:
1368 case RXRPC_ACK_OUT_OF_SEQUENCE:
1369 case RXRPC_ACK_EXCEEDS_WINDOW:
1370 call->snd_resend_cnt = 0;
1371 ret = rxrpc_call_record_ACK(call, msg, seq, ap->nAcks);
1372 if (ret < 0)
1373 rxrpc_call_abort(call, ret);
1374 break;
1375
1376 /* respond to ping packets immediately */
1377 case RXRPC_ACK_PING:
1378 rxrpc_call_generate_ACK(call, &msg->hdr, ap);
1379 break;
1380
1381 /* only record RTT on ping response packets */
1382 case RXRPC_ACK_PING_RESPONSE:
1383 if (call->snd_ping) {
1384 struct rxrpc_message *rttmsg;
1385
1386 /* only do RTT stuff if the response matches the
1387 * retained ping */
1388 rttmsg = NULL;
1389 spin_lock(&call->lock);
1390 if (call->snd_ping &&
1391 call->snd_ping->hdr.serial == ap->serial) {
1392 rttmsg = call->snd_ping;
1393 call->snd_ping = NULL;
1394 }
1395 spin_unlock(&call->lock);
1396
1397 if (rttmsg) {
1398 rttmsg->rttdone = 1;
1399 rxrpc_peer_calculate_rtt(call->conn->peer,
1400 rttmsg, msg);
1401 rxrpc_put_message(rttmsg);
1402 }
1403 }
1404 break;
1405
1406 default:
1407 printk("Unsupported ACK reason %u\n", ap->reason);
1408 break;
1409 }
1410
1411 _leave("");
1412} /* end rxrpc_call_receive_ack_packet() */
1413
1414/*****************************************************************************/
1415/*
1416 * record definitive ACKs for all messages up to and including the one with the
1417 * 'highest' seq
1418 */
1419static void rxrpc_call_definitively_ACK(struct rxrpc_call *call,
1420 rxrpc_seq_t highest)
1421{
1422 struct rxrpc_message *msg;
1423 int now_complete;
1424
1425 _enter("%p{ads=%u},%u", call, call->acks_dftv_seq, highest);
1426
1427 while (call->acks_dftv_seq < highest) {
1428 call->acks_dftv_seq++;
1429
1430 _proto("Definitive ACK on packet #%u", call->acks_dftv_seq);
1431
1432 /* discard those at front of queue until message with highest
1433 * ACK is found */
1434 spin_lock(&call->lock);
1435 msg = NULL;
1436 if (!list_empty(&call->acks_pendq)) {
1437 msg = list_entry(call->acks_pendq.next,
1438 struct rxrpc_message, link);
1439 list_del_init(&msg->link); /* dequeue */
1440 if (msg->state == RXRPC_MSG_SENT)
1441 call->acks_pend_cnt--;
1442 }
1443 spin_unlock(&call->lock);
1444
1445 /* insanity check */
1446 if (!msg)
1447 panic("%s(): acks_pendq unexpectedly empty\n",
1448 __FUNCTION__);
1449
1450 if (msg->seq != call->acks_dftv_seq)
1451 panic("%s(): Packet #%u expected at front of acks_pendq"
1452 " (#%u found)\n",
1453 __FUNCTION__, call->acks_dftv_seq, msg->seq);
1454
1455 /* discard the message */
1456 msg->state = RXRPC_MSG_DONE;
1457 rxrpc_put_message(msg);
1458 }
1459
1460 /* if all sent packets are definitively ACK'd then prod any sleepers just in case */
1461 now_complete = 0;
1462 spin_lock(&call->lock);
1463 if (call->acks_dftv_seq == call->snd_seq_count) {
1464 if (call->app_call_state != RXRPC_CSTATE_COMPLETE) {
1465 call->app_call_state = RXRPC_CSTATE_COMPLETE;
1466 _state(call);
1467 now_complete = 1;
1468 }
1469 }
1470 spin_unlock(&call->lock);
1471
1472 if (now_complete) {
1473 del_timer_sync(&call->acks_timeout);
1474 del_timer_sync(&call->rcv_timeout);
1475 del_timer_sync(&call->ackr_dfr_timo);
1476 call->app_attn_func(call);
1477 }
1478
1479 _leave("");
1480} /* end rxrpc_call_definitively_ACK() */
1481
1482/*****************************************************************************/
1483/*
1484 * record the specified amount of ACKs/NAKs
1485 */
1486static int rxrpc_call_record_ACK(struct rxrpc_call *call,
1487 struct rxrpc_message *msg,
1488 rxrpc_seq_t seq,
1489 size_t count)
1490{
1491 struct rxrpc_message *dmsg;
1492 struct list_head *_p;
1493 rxrpc_seq_t highest;
1494 unsigned ix;
1495 size_t chunk;
1496 char resend, now_complete;
1497 u8 acks[16];
1498
1499 _enter("%p{apc=%u ads=%u},%p,%u,%Zu",
1500 call, call->acks_pend_cnt, call->acks_dftv_seq,
1501 msg, seq, count);
1502
1503 /* handle re-ACK'ing of definitively ACK'd packets (may be out-of-order
1504 * ACKs) */
1505 if (seq <= call->acks_dftv_seq) {
1506 unsigned delta = call->acks_dftv_seq - seq;
1507
1508 if (count <= delta) {
1509 _leave(" = 0 [all definitively ACK'd]");
1510 return 0;
1511 }
1512
1513 seq += delta;
1514 count -= delta;
1515 msg->offset += delta;
1516 }
1517
1518 highest = seq + count - 1;
1519 resend = 0;
1520 while (count > 0) {
1521 /* extract up to 16 ACK slots at a time */
1522 chunk = min(count, sizeof(acks));
1523 count -= chunk;
1524
1525 memset(acks, 2, sizeof(acks));
1526
1527 if (skb_copy_bits(msg->pkt, msg->offset, &acks, chunk) < 0) {
1528 printk("Rx Received short ACK packet\n");
1529 _leave(" = -EINVAL");
1530 return -EINVAL;
1531 }
1532 msg->offset += chunk;
1533
1534 /* check that the ACK set is valid */
1535 for (ix = 0; ix < chunk; ix++) {
1536 switch (acks[ix]) {
1537 case RXRPC_ACK_TYPE_ACK:
1538 break;
1539 case RXRPC_ACK_TYPE_NACK:
1540 resend = 1;
1541 break;
1542 default:
1543 printk("Rx Received unsupported ACK state"
1544 " %u\n", acks[ix]);
1545 _leave(" = -EINVAL");
1546 return -EINVAL;
1547 }
1548 }
1549
1550 _proto("Rx ACK of packets #%u-#%u "
1551 "[%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c] (pend=%u)",
1552 seq, (unsigned) (seq + chunk - 1),
1553 _acktype[acks[0x0]],
1554 _acktype[acks[0x1]],
1555 _acktype[acks[0x2]],
1556 _acktype[acks[0x3]],
1557 _acktype[acks[0x4]],
1558 _acktype[acks[0x5]],
1559 _acktype[acks[0x6]],
1560 _acktype[acks[0x7]],
1561 _acktype[acks[0x8]],
1562 _acktype[acks[0x9]],
1563 _acktype[acks[0xA]],
1564 _acktype[acks[0xB]],
1565 _acktype[acks[0xC]],
1566 _acktype[acks[0xD]],
1567 _acktype[acks[0xE]],
1568 _acktype[acks[0xF]],
1569 call->acks_pend_cnt
1570 );
1571
1572 /* mark the packets in the ACK queue as being provisionally
1573 * ACK'd */
1574 ix = 0;
1575 spin_lock(&call->lock);
1576
1577 /* find the first packet ACK'd/NAK'd here */
1578 list_for_each(_p, &call->acks_pendq) {
1579 dmsg = list_entry(_p, struct rxrpc_message, link);
1580 if (dmsg->seq == seq)
1581 goto found_first;
1582 _debug("- %u: skipping #%u", ix, dmsg->seq);
1583 }
1584 goto bad_queue;
1585
1586 found_first:
1587 do {
1588 _debug("- %u: processing #%u (%c) apc=%u",
1589 ix, dmsg->seq, _acktype[acks[ix]],
1590 call->acks_pend_cnt);
1591
1592 if (acks[ix] == RXRPC_ACK_TYPE_ACK) {
1593 if (dmsg->state == RXRPC_MSG_SENT)
1594 call->acks_pend_cnt--;
1595 dmsg->state = RXRPC_MSG_ACKED;
1596 }
1597 else {
1598 if (dmsg->state == RXRPC_MSG_ACKED)
1599 call->acks_pend_cnt++;
1600 dmsg->state = RXRPC_MSG_SENT;
1601 }
1602 ix++;
1603 seq++;
1604
1605 _p = dmsg->link.next;
1606 dmsg = list_entry(_p, struct rxrpc_message, link);
1607 } while(ix < chunk &&
1608 _p != &call->acks_pendq &&
1609 dmsg->seq == seq);
1610
1611 if (ix < chunk)
1612 goto bad_queue;
1613
1614 spin_unlock(&call->lock);
1615 }
1616
1617 if (resend)
1618 rxrpc_call_resend(call, highest);
1619
1620 /* if all packets are provisionally ACK'd, then wake up anyone who's
1621 * waiting for that */
1622 now_complete = 0;
1623 spin_lock(&call->lock);
1624 if (call->acks_pend_cnt == 0) {
1625 if (call->app_call_state == RXRPC_CSTATE_SRVR_RCV_FINAL_ACK) {
1626 call->app_call_state = RXRPC_CSTATE_COMPLETE;
1627 _state(call);
1628 }
1629 now_complete = 1;
1630 }
1631 spin_unlock(&call->lock);
1632
1633 if (now_complete) {
1634 _debug("- wake up waiters");
1635 del_timer_sync(&call->acks_timeout);
1636 del_timer_sync(&call->rcv_timeout);
1637 del_timer_sync(&call->ackr_dfr_timo);
1638 call->app_attn_func(call);
1639 }
1640
1641 _leave(" = 0 (apc=%u)", call->acks_pend_cnt);
1642 return 0;
1643
1644 bad_queue:
1645 panic("%s(): acks_pendq in bad state (packet #%u absent)\n",
1646 __FUNCTION__, seq);
1647
1648} /* end rxrpc_call_record_ACK() */
1649
1650/*****************************************************************************/
1651/*
1652 * transfer data from the ready packet queue to the asynchronous read buffer
1653 * - since this func is the only one going to look at packets queued on
1654 * app_readyq, we don't need a lock to modify or access them, only to modify
1655 * the queue pointers
1656 * - called with call->lock held
1657 * - the buffer must be in kernel space
1658 * - returns:
1659 * 0 if buffer filled
1660 * -EAGAIN if buffer not filled and more data to come
1661 * -EBADMSG if last packet received and insufficient data left
1662 * -ECONNABORTED if the call has in an error state
1663 */
1664static int __rxrpc_call_read_data(struct rxrpc_call *call)
1665{
1666 struct rxrpc_message *msg;
1667 size_t qty;
1668 int ret;
1669
1670 _enter("%p{as=%d buf=%p qty=%Zu/%Zu}",
1671 call,
1672 call->app_async_read, call->app_read_buf,
1673 call->app_ready_qty, call->app_mark);
1674
1675 /* check the state */
1676 switch (call->app_call_state) {
1677 case RXRPC_CSTATE_SRVR_RCV_ARGS:
1678 case RXRPC_CSTATE_CLNT_RCV_REPLY:
1679 if (call->app_last_rcv) {
1680 printk("%s(%p,%p,%Zd):"
1681 " Inconsistent call state (%s, last pkt)",
1682 __FUNCTION__,
1683 call, call->app_read_buf, call->app_mark,
1684 rxrpc_call_states[call->app_call_state]);
1685 BUG();
1686 }
1687 break;
1688
1689 case RXRPC_CSTATE_SRVR_RCV_OPID:
1690 case RXRPC_CSTATE_SRVR_GOT_ARGS:
1691 case RXRPC_CSTATE_CLNT_GOT_REPLY:
1692 break;
1693
1694 case RXRPC_CSTATE_SRVR_SND_REPLY:
1695 if (!call->app_last_rcv) {
1696 printk("%s(%p,%p,%Zd):"
1697 " Inconsistent call state (%s, not last pkt)",
1698 __FUNCTION__,
1699 call, call->app_read_buf, call->app_mark,
1700 rxrpc_call_states[call->app_call_state]);
1701 BUG();
1702 }
1703 _debug("Trying to read data from call in SND_REPLY state");
1704 break;
1705
1706 case RXRPC_CSTATE_ERROR:
1707 _leave(" = -ECONNABORTED");
1708 return -ECONNABORTED;
1709
1710 default:
1711 printk("reading in unexpected state [[[ %u ]]]\n",
1712 call->app_call_state);
1713 BUG();
1714 }
1715
1716 /* handle the case of not having an async buffer */
1717 if (!call->app_async_read) {
1718 if (call->app_mark == RXRPC_APP_MARK_EOF) {
1719 ret = call->app_last_rcv ? 0 : -EAGAIN;
1720 }
1721 else {
1722 if (call->app_mark >= call->app_ready_qty) {
1723 call->app_mark = RXRPC_APP_MARK_EOF;
1724 ret = 0;
1725 }
1726 else {
1727 ret = call->app_last_rcv ? -EBADMSG : -EAGAIN;
1728 }
1729 }
1730
1731 _leave(" = %d [no buf]", ret);
1732 return 0;
1733 }
1734
1735 while (!list_empty(&call->app_readyq) && call->app_mark > 0) {
1736 msg = list_entry(call->app_readyq.next,
1737 struct rxrpc_message, link);
1738
1739 /* drag as much data as we need out of this packet */
1740 qty = min(call->app_mark, msg->dsize);
1741
1742 _debug("reading %Zu from skb=%p off=%lu",
1743 qty, msg->pkt, msg->offset);
1744
1745 if (call->app_read_buf)
1746 if (skb_copy_bits(msg->pkt, msg->offset,
1747 call->app_read_buf, qty) < 0)
1748 panic("%s: Failed to copy data from packet:"
1749 " (%p,%p,%Zd)",
1750 __FUNCTION__,
1751 call, call->app_read_buf, qty);
1752
1753 /* if that packet is now empty, discard it */
1754 call->app_ready_qty -= qty;
1755 msg->dsize -= qty;
1756
1757 if (msg->dsize == 0) {
1758 list_del_init(&msg->link);
1759 rxrpc_put_message(msg);
1760 }
1761 else {
1762 msg->offset += qty;
1763 }
1764
1765 call->app_mark -= qty;
1766 if (call->app_read_buf)
1767 call->app_read_buf += qty;
1768 }
1769
1770 if (call->app_mark == 0) {
1771 call->app_async_read = 0;
1772 call->app_mark = RXRPC_APP_MARK_EOF;
1773 call->app_read_buf = NULL;
1774
1775 /* adjust the state if used up all packets */
1776 if (list_empty(&call->app_readyq) && call->app_last_rcv) {
1777 switch (call->app_call_state) {
1778 case RXRPC_CSTATE_SRVR_RCV_OPID:
1779 call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY;
1780 call->app_mark = RXRPC_APP_MARK_EOF;
1781 _state(call);
1782 del_timer_sync(&call->rcv_timeout);
1783 break;
1784 case RXRPC_CSTATE_SRVR_GOT_ARGS:
1785 call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY;
1786 _state(call);
1787 del_timer_sync(&call->rcv_timeout);
1788 break;
1789 default:
1790 call->app_call_state = RXRPC_CSTATE_COMPLETE;
1791 _state(call);
1792 del_timer_sync(&call->acks_timeout);
1793 del_timer_sync(&call->ackr_dfr_timo);
1794 del_timer_sync(&call->rcv_timeout);
1795 break;
1796 }
1797 }
1798
1799 _leave(" = 0");
1800 return 0;
1801 }
1802
1803 if (call->app_last_rcv) {
1804 _debug("Insufficient data (%Zu/%Zu)",
1805 call->app_ready_qty, call->app_mark);
1806 call->app_async_read = 0;
1807 call->app_mark = RXRPC_APP_MARK_EOF;
1808 call->app_read_buf = NULL;
1809
1810 _leave(" = -EBADMSG");
1811 return -EBADMSG;
1812 }
1813
1814 _leave(" = -EAGAIN");
1815 return -EAGAIN;
1816} /* end __rxrpc_call_read_data() */
1817
1818/*****************************************************************************/
1819/*
1820 * attempt to read the specified amount of data from the call's ready queue
1821 * into the buffer provided
1822 * - since this func is the only one going to look at packets queued on
1823 * app_readyq, we don't need a lock to modify or access them, only to modify
1824 * the queue pointers
1825 * - if the buffer pointer is NULL, then data is merely drained, not copied
1826 * - if flags&RXRPC_CALL_READ_BLOCK, then the function will wait until there is
1827 * enough data or an error will be generated
1828 * - note that the caller must have added the calling task to the call's wait
1829 * queue beforehand
1830 * - if flags&RXRPC_CALL_READ_ALL, then an error will be generated if this
1831 * function doesn't read all available data
1832 */
1833int rxrpc_call_read_data(struct rxrpc_call *call,
1834 void *buffer, size_t size, int flags)
1835{
1836 int ret;
1837
1838 _enter("%p{arq=%Zu},%p,%Zd,%x",
1839 call, call->app_ready_qty, buffer, size, flags);
1840
1841 spin_lock(&call->lock);
1842
1843 if (unlikely(!!call->app_read_buf)) {
1844 spin_unlock(&call->lock);
1845 _leave(" = -EBUSY");
1846 return -EBUSY;
1847 }
1848
1849 call->app_mark = size;
1850 call->app_read_buf = buffer;
1851 call->app_async_read = 1;
1852 call->app_read_count++;
1853
1854 /* read as much data as possible */
1855 ret = __rxrpc_call_read_data(call);
1856 switch (ret) {
1857 case 0:
1858 if (flags & RXRPC_CALL_READ_ALL &&
1859 (!call->app_last_rcv || call->app_ready_qty > 0)) {
1860 _leave(" = -EBADMSG");
1861 __rxrpc_call_abort(call, -EBADMSG);
1862 return -EBADMSG;
1863 }
1864
1865 spin_unlock(&call->lock);
1866 call->app_attn_func(call);
1867 _leave(" = 0");
1868 return ret;
1869
1870 case -ECONNABORTED:
1871 spin_unlock(&call->lock);
1872 _leave(" = %d [aborted]", ret);
1873 return ret;
1874
1875 default:
1876 __rxrpc_call_abort(call, ret);
1877 _leave(" = %d", ret);
1878 return ret;
1879
1880 case -EAGAIN:
1881 spin_unlock(&call->lock);
1882
1883 if (!(flags & RXRPC_CALL_READ_BLOCK)) {
1884 _leave(" = -EAGAIN");
1885 return -EAGAIN;
1886 }
1887
1888 /* wait for the data to arrive */
1889 _debug("blocking for data arrival");
1890
1891 for (;;) {
1892 set_current_state(TASK_INTERRUPTIBLE);
1893 if (!call->app_async_read || signal_pending(current))
1894 break;
1895 schedule();
1896 }
1897 set_current_state(TASK_RUNNING);
1898
1899 if (signal_pending(current)) {
1900 _leave(" = -EINTR");
1901 return -EINTR;
1902 }
1903
1904 if (call->app_call_state == RXRPC_CSTATE_ERROR) {
1905 _leave(" = -ECONNABORTED");
1906 return -ECONNABORTED;
1907 }
1908
1909 _leave(" = 0");
1910 return 0;
1911 }
1912
1913} /* end rxrpc_call_read_data() */
1914
1915/*****************************************************************************/
1916/*
1917 * write data to a call
1918 * - the data may not be sent immediately if it doesn't fill a buffer
1919 * - if we can't queue all the data for buffering now, siov[] will have been
1920 * adjusted to take account of what has been sent
1921 */
1922int rxrpc_call_write_data(struct rxrpc_call *call,
1923 size_t sioc,
1924 struct kvec *siov,
1925 u8 rxhdr_flags,
1926 int alloc_flags,
1927 int dup_data,
1928 size_t *size_sent)
1929{
1930 struct rxrpc_message *msg;
1931 struct kvec *sptr;
1932 size_t space, size, chunk, tmp;
1933 char *buf;
1934 int ret;
1935
1936 _enter("%p,%Zu,%p,%02x,%x,%d,%p",
1937 call, sioc, siov, rxhdr_flags, alloc_flags, dup_data,
1938 size_sent);
1939
1940 *size_sent = 0;
1941 size = 0;
1942 ret = -EINVAL;
1943
1944 /* can't send more if we've sent last packet from this end */
1945 switch (call->app_call_state) {
1946 case RXRPC_CSTATE_SRVR_SND_REPLY:
1947 case RXRPC_CSTATE_CLNT_SND_ARGS:
1948 break;
1949 case RXRPC_CSTATE_ERROR:
1950 ret = call->app_errno;
1951 default:
1952 goto out;
1953 }
1954
1955 /* calculate how much data we've been given */
1956 sptr = siov;
1957 for (; sioc > 0; sptr++, sioc--) {
1958 if (!sptr->iov_len)
1959 continue;
1960
1961 if (!sptr->iov_base)
1962 goto out;
1963
1964 size += sptr->iov_len;
1965 }
1966
1967 _debug("- size=%Zu mtu=%Zu", size, call->conn->mtu_size);
1968
1969 do {
1970 /* make sure there's a message under construction */
1971 if (!call->snd_nextmsg) {
1972 /* no - allocate a message with no data yet attached */
1973 ret = rxrpc_conn_newmsg(call->conn, call,
1974 RXRPC_PACKET_TYPE_DATA,
1975 0, NULL, alloc_flags,
1976 &call->snd_nextmsg);
1977 if (ret < 0)
1978 goto out;
1979 _debug("- allocated new message [ds=%Zu]",
1980 call->snd_nextmsg->dsize);
1981 }
1982
1983 msg = call->snd_nextmsg;
1984 msg->hdr.flags |= rxhdr_flags;
1985
1986 /* deal with zero-length terminal packet */
1987 if (size == 0) {
1988 if (rxhdr_flags & RXRPC_LAST_PACKET) {
1989 ret = rxrpc_call_flush(call);
1990 if (ret < 0)
1991 goto out;
1992 }
1993 break;
1994 }
1995
1996 /* work out how much space current packet has available */
1997 space = call->conn->mtu_size - msg->dsize;
1998 chunk = min(space, size);
1999
2000 _debug("- [before] space=%Zu chunk=%Zu", space, chunk);
2001
2002 while (!siov->iov_len)
2003 siov++;
2004
2005 /* if we are going to have to duplicate the data then coalesce
2006 * it too */
2007 if (dup_data) {
2008 /* don't allocate more that 1 page at a time */
2009 if (chunk > PAGE_SIZE)
2010 chunk = PAGE_SIZE;
2011
2012 /* allocate a data buffer and attach to the message */
2013 buf = kmalloc(chunk, alloc_flags);
2014 if (unlikely(!buf)) {
2015 if (msg->dsize ==
2016 sizeof(struct rxrpc_header)) {
2017 /* discard an empty msg and wind back
2018 * the seq counter */
2019 rxrpc_put_message(msg);
2020 call->snd_nextmsg = NULL;
2021 call->snd_seq_count--;
2022 }
2023
2024 ret = -ENOMEM;
2025 goto out;
2026 }
2027
2028 tmp = msg->dcount++;
2029 set_bit(tmp, &msg->dfree);
2030 msg->data[tmp].iov_base = buf;
2031 msg->data[tmp].iov_len = chunk;
2032 msg->dsize += chunk;
2033 *size_sent += chunk;
2034 size -= chunk;
2035
2036 /* load the buffer with data */
2037 while (chunk > 0) {
2038 tmp = min(chunk, siov->iov_len);
2039 memcpy(buf, siov->iov_base, tmp);
2040 buf += tmp;
2041 siov->iov_base += tmp;
2042 siov->iov_len -= tmp;
2043 if (!siov->iov_len)
2044 siov++;
2045 chunk -= tmp;
2046 }
2047 }
2048 else {
2049 /* we want to attach the supplied buffers directly */
2050 while (chunk > 0 &&
2051 msg->dcount < RXRPC_MSG_MAX_IOCS) {
2052 tmp = msg->dcount++;
2053 msg->data[tmp].iov_base = siov->iov_base;
2054 msg->data[tmp].iov_len = siov->iov_len;
2055 msg->dsize += siov->iov_len;
2056 *size_sent += siov->iov_len;
2057 size -= siov->iov_len;
2058 chunk -= siov->iov_len;
2059 siov++;
2060 }
2061 }
2062
2063 _debug("- [loaded] chunk=%Zu size=%Zu", chunk, size);
2064
2065 /* dispatch the message when full, final or requesting ACK */
2066 if (msg->dsize >= call->conn->mtu_size || rxhdr_flags) {
2067 ret = rxrpc_call_flush(call);
2068 if (ret < 0)
2069 goto out;
2070 }
2071
2072 } while(size > 0);
2073
2074 ret = 0;
2075 out:
2076 _leave(" = %d (%Zd queued, %Zd rem)", ret, *size_sent, size);
2077 return ret;
2078
2079} /* end rxrpc_call_write_data() */
2080
2081/*****************************************************************************/
2082/*
2083 * flush outstanding packets to the network
2084 */
2085static int rxrpc_call_flush(struct rxrpc_call *call)
2086{
2087 struct rxrpc_message *msg;
2088 int ret = 0;
2089
2090 _enter("%p", call);
2091
2092 rxrpc_get_call(call);
2093
2094 /* if there's a packet under construction, then dispatch it now */
2095 if (call->snd_nextmsg) {
2096 msg = call->snd_nextmsg;
2097 call->snd_nextmsg = NULL;
2098
2099 if (msg->hdr.flags & RXRPC_LAST_PACKET) {
2100 msg->hdr.flags &= ~RXRPC_MORE_PACKETS;
2101 if (call->app_call_state != RXRPC_CSTATE_CLNT_SND_ARGS)
2102 msg->hdr.flags |= RXRPC_REQUEST_ACK;
2103 }
2104 else {
2105 msg->hdr.flags |= RXRPC_MORE_PACKETS;
2106 }
2107
2108 _proto("Sending DATA message { ds=%Zu dc=%u df=%02lu }",
2109 msg->dsize, msg->dcount, msg->dfree);
2110
2111 /* queue and adjust call state */
2112 spin_lock(&call->lock);
2113 list_add_tail(&msg->link, &call->acks_pendq);
2114
2115 /* decide what to do depending on current state and if this is
2116 * the last packet */
2117 ret = -EINVAL;
2118 switch (call->app_call_state) {
2119 case RXRPC_CSTATE_SRVR_SND_REPLY:
2120 if (msg->hdr.flags & RXRPC_LAST_PACKET) {
2121 call->app_call_state =
2122 RXRPC_CSTATE_SRVR_RCV_FINAL_ACK;
2123 _state(call);
2124 }
2125 break;
2126
2127 case RXRPC_CSTATE_CLNT_SND_ARGS:
2128 if (msg->hdr.flags & RXRPC_LAST_PACKET) {
2129 call->app_call_state =
2130 RXRPC_CSTATE_CLNT_RCV_REPLY;
2131 _state(call);
2132 }
2133 break;
2134
2135 case RXRPC_CSTATE_ERROR:
2136 ret = call->app_errno;
2137 default:
2138 spin_unlock(&call->lock);
2139 goto out;
2140 }
2141
2142 call->acks_pend_cnt++;
2143
2144 mod_timer(&call->acks_timeout,
2145 __rxrpc_rtt_based_timeout(call,
2146 rxrpc_call_acks_timeout));
2147
2148 spin_unlock(&call->lock);
2149
2150 ret = rxrpc_conn_sendmsg(call->conn, msg);
2151 if (ret == 0)
2152 call->pkt_snd_count++;
2153 }
2154
2155 out:
2156 rxrpc_put_call(call);
2157
2158 _leave(" = %d", ret);
2159 return ret;
2160
2161} /* end rxrpc_call_flush() */
2162
2163/*****************************************************************************/
2164/*
2165 * resend NAK'd or unacknowledged packets up to the highest one specified
2166 */
2167static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest)
2168{
2169 struct rxrpc_message *msg;
2170 struct list_head *_p;
2171 rxrpc_seq_t seq = 0;
2172
2173 _enter("%p,%u", call, highest);
2174
2175 _proto("Rx Resend required");
2176
2177 /* handle too many resends */
2178 if (call->snd_resend_cnt >= rxrpc_call_max_resend) {
2179 _debug("Aborting due to too many resends (rcv=%d)",
2180 call->pkt_rcv_count);
2181 rxrpc_call_abort(call,
2182 call->pkt_rcv_count > 0 ? -EIO : -ETIMEDOUT);
2183 _leave("");
2184 return;
2185 }
2186
2187 spin_lock(&call->lock);
2188 call->snd_resend_cnt++;
2189 for (;;) {
2190 /* determine which the next packet we might need to ACK is */
2191 if (seq <= call->acks_dftv_seq)
2192 seq = call->acks_dftv_seq;
2193 seq++;
2194
2195 if (seq > highest)
2196 break;
2197
2198 /* look for the packet in the pending-ACK queue */
2199 list_for_each(_p, &call->acks_pendq) {
2200 msg = list_entry(_p, struct rxrpc_message, link);
2201 if (msg->seq == seq)
2202 goto found_msg;
2203 }
2204
2205 panic("%s(%p,%d):"
2206 " Inconsistent pending-ACK queue (ds=%u sc=%u sq=%u)\n",
2207 __FUNCTION__, call, highest,
2208 call->acks_dftv_seq, call->snd_seq_count, seq);
2209
2210 found_msg:
2211 if (msg->state != RXRPC_MSG_SENT)
2212 continue; /* only un-ACK'd packets */
2213
2214 rxrpc_get_message(msg);
2215 spin_unlock(&call->lock);
2216
2217 /* send each message again (and ignore any errors we might
2218 * incur) */
2219 _proto("Resending DATA message { ds=%Zu dc=%u df=%02lu }",
2220 msg->dsize, msg->dcount, msg->dfree);
2221
2222 if (rxrpc_conn_sendmsg(call->conn, msg) == 0)
2223 call->pkt_snd_count++;
2224
2225 rxrpc_put_message(msg);
2226
2227 spin_lock(&call->lock);
2228 }
2229
2230 /* reset the timeout */
2231 mod_timer(&call->acks_timeout,
2232 __rxrpc_rtt_based_timeout(call, rxrpc_call_acks_timeout));
2233
2234 spin_unlock(&call->lock);
2235
2236 _leave("");
2237} /* end rxrpc_call_resend() */
2238
2239/*****************************************************************************/
2240/*
2241 * handle an ICMP error being applied to a call
2242 */
2243void rxrpc_call_handle_error(struct rxrpc_call *call, int local, int errno)
2244{
2245 _enter("%p{%u},%d", call, ntohl(call->call_id), errno);
2246
2247 /* if this call is already aborted, then just wake up any waiters */
2248 if (call->app_call_state == RXRPC_CSTATE_ERROR) {
2249 call->app_error_func(call);
2250 }
2251 else {
2252 /* tell the app layer what happened */
2253 spin_lock(&call->lock);
2254 call->app_call_state = RXRPC_CSTATE_ERROR;
2255 _state(call);
2256 if (local)
2257 call->app_err_state = RXRPC_ESTATE_LOCAL_ERROR;
2258 else
2259 call->app_err_state = RXRPC_ESTATE_REMOTE_ERROR;
2260 call->app_errno = errno;
2261 call->app_mark = RXRPC_APP_MARK_EOF;
2262 call->app_read_buf = NULL;
2263 call->app_async_read = 0;
2264
2265 /* map the error */
2266 call->app_aemap_func(call);
2267
2268 del_timer_sync(&call->acks_timeout);
2269 del_timer_sync(&call->rcv_timeout);
2270 del_timer_sync(&call->ackr_dfr_timo);
2271
2272 spin_unlock(&call->lock);
2273
2274 call->app_error_func(call);
2275 }
2276
2277 _leave("");
2278} /* end rxrpc_call_handle_error() */
diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c
new file mode 100644
index 000000000000..61463c74f8cc
--- /dev/null
+++ b/net/rxrpc/connection.c
@@ -0,0 +1,778 @@
1/* connection.c: Rx connection routines
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <rxrpc/rxrpc.h>
16#include <rxrpc/transport.h>
17#include <rxrpc/peer.h>
18#include <rxrpc/connection.h>
19#include <rxrpc/call.h>
20#include <rxrpc/message.h>
21#include <linux/udp.h>
22#include <linux/ip.h>
23#include <net/sock.h>
24#include <asm/uaccess.h>
25#include "internal.h"
26
27__RXACCT_DECL(atomic_t rxrpc_connection_count);
28
29LIST_HEAD(rxrpc_conns);
30DECLARE_RWSEM(rxrpc_conns_sem);
31unsigned long rxrpc_conn_timeout = 60 * 60;
32
33static void rxrpc_conn_do_timeout(struct rxrpc_connection *conn);
34
35static void __rxrpc_conn_timeout(rxrpc_timer_t *timer)
36{
37 struct rxrpc_connection *conn =
38 list_entry(timer, struct rxrpc_connection, timeout);
39
40 _debug("Rx CONN TIMEOUT [%p{u=%d}]", conn, atomic_read(&conn->usage));
41
42 rxrpc_conn_do_timeout(conn);
43}
44
45static const struct rxrpc_timer_ops rxrpc_conn_timer_ops = {
46 .timed_out = __rxrpc_conn_timeout,
47};
48
49/*****************************************************************************/
50/*
51 * create a new connection record
52 */
53static inline int __rxrpc_create_connection(struct rxrpc_peer *peer,
54 struct rxrpc_connection **_conn)
55{
56 struct rxrpc_connection *conn;
57
58 _enter("%p",peer);
59
60 /* allocate and initialise a connection record */
61 conn = kmalloc(sizeof(struct rxrpc_connection), GFP_KERNEL);
62 if (!conn) {
63 _leave(" = -ENOMEM");
64 return -ENOMEM;
65 }
66
67 memset(conn, 0, sizeof(struct rxrpc_connection));
68 atomic_set(&conn->usage, 1);
69
70 INIT_LIST_HEAD(&conn->link);
71 INIT_LIST_HEAD(&conn->id_link);
72 init_waitqueue_head(&conn->chanwait);
73 spin_lock_init(&conn->lock);
74 rxrpc_timer_init(&conn->timeout, &rxrpc_conn_timer_ops);
75
76 do_gettimeofday(&conn->atime);
77 conn->mtu_size = 1024;
78 conn->peer = peer;
79 conn->trans = peer->trans;
80
81 __RXACCT(atomic_inc(&rxrpc_connection_count));
82 *_conn = conn;
83 _leave(" = 0 (%p)", conn);
84
85 return 0;
86} /* end __rxrpc_create_connection() */
87
88/*****************************************************************************/
89/*
90 * create a new connection record for outgoing connections
91 */
92int rxrpc_create_connection(struct rxrpc_transport *trans,
93 __be16 port,
94 __be32 addr,
95 uint16_t service_id,
96 void *security,
97 struct rxrpc_connection **_conn)
98{
99 struct rxrpc_connection *candidate, *conn;
100 struct rxrpc_peer *peer;
101 struct list_head *_p;
102 __be32 connid;
103 int ret;
104
105 _enter("%p{%hu},%u,%hu", trans, trans->port, ntohs(port), service_id);
106
107 /* get a peer record */
108 ret = rxrpc_peer_lookup(trans, addr, &peer);
109 if (ret < 0) {
110 _leave(" = %d", ret);
111 return ret;
112 }
113
114 /* allocate and initialise a connection record */
115 ret = __rxrpc_create_connection(peer, &candidate);
116 if (ret < 0) {
117 rxrpc_put_peer(peer);
118 _leave(" = %d", ret);
119 return ret;
120 }
121
122 /* fill in the specific bits */
123 candidate->addr.sin_family = AF_INET;
124 candidate->addr.sin_port = port;
125 candidate->addr.sin_addr.s_addr = addr;
126
127 candidate->in_epoch = rxrpc_epoch;
128 candidate->out_epoch = rxrpc_epoch;
129 candidate->in_clientflag = 0;
130 candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
131 candidate->service_id = htons(service_id);
132
133 /* invent a unique connection ID */
134 write_lock(&peer->conn_idlock);
135
136 try_next_id:
137 connid = htonl(peer->conn_idcounter & RXRPC_CIDMASK);
138 peer->conn_idcounter += RXRPC_MAXCALLS;
139
140 list_for_each(_p, &peer->conn_idlist) {
141 conn = list_entry(_p, struct rxrpc_connection, id_link);
142 if (connid == conn->conn_id)
143 goto try_next_id;
144 if (connid > conn->conn_id)
145 break;
146 }
147
148 _debug("selected candidate conn ID %x.%u",
149 ntohl(peer->addr.s_addr), ntohl(connid));
150
151 candidate->conn_id = connid;
152 list_add_tail(&candidate->id_link, _p);
153
154 write_unlock(&peer->conn_idlock);
155
156 /* attach to peer */
157 candidate->peer = peer;
158
159 write_lock(&peer->conn_lock);
160
161 /* search the peer's transport graveyard list */
162 spin_lock(&peer->conn_gylock);
163 list_for_each(_p, &peer->conn_graveyard) {
164 conn = list_entry(_p, struct rxrpc_connection, link);
165 if (conn->addr.sin_port == candidate->addr.sin_port &&
166 conn->security_ix == candidate->security_ix &&
167 conn->service_id == candidate->service_id &&
168 conn->in_clientflag == 0)
169 goto found_in_graveyard;
170 }
171 spin_unlock(&peer->conn_gylock);
172
173 /* pick the new candidate */
174 _debug("created connection: {%08x} [out]", ntohl(candidate->conn_id));
175 atomic_inc(&peer->conn_count);
176 conn = candidate;
177 candidate = NULL;
178
179 make_active:
180 list_add_tail(&conn->link, &peer->conn_active);
181 write_unlock(&peer->conn_lock);
182
183 if (candidate) {
184 write_lock(&peer->conn_idlock);
185 list_del(&candidate->id_link);
186 write_unlock(&peer->conn_idlock);
187
188 __RXACCT(atomic_dec(&rxrpc_connection_count));
189 kfree(candidate);
190 }
191 else {
192 down_write(&rxrpc_conns_sem);
193 list_add_tail(&conn->proc_link, &rxrpc_conns);
194 up_write(&rxrpc_conns_sem);
195 }
196
197 *_conn = conn;
198 _leave(" = 0 (%p)", conn);
199
200 return 0;
201
202 /* handle resurrecting a connection from the graveyard */
203 found_in_graveyard:
204 _debug("resurrecting connection: {%08x} [out]", ntohl(conn->conn_id));
205 rxrpc_get_connection(conn);
206 rxrpc_krxtimod_del_timer(&conn->timeout);
207 list_del_init(&conn->link);
208 spin_unlock(&peer->conn_gylock);
209 goto make_active;
210} /* end rxrpc_create_connection() */
211
212/*****************************************************************************/
213/*
214 * lookup the connection for an incoming packet
215 * - create a new connection record for unrecorded incoming connections
216 */
217int rxrpc_connection_lookup(struct rxrpc_peer *peer,
218 struct rxrpc_message *msg,
219 struct rxrpc_connection **_conn)
220{
221 struct rxrpc_connection *conn, *candidate = NULL;
222 struct list_head *_p;
223 int ret, fresh = 0;
224 __be32 x_epoch, x_connid;
225 __be16 x_port, x_servid;
226 __u32 x_secix;
227 u8 x_clflag;
228
229 _enter("%p{{%hu}},%u,%hu",
230 peer,
231 peer->trans->port,
232 ntohs(msg->pkt->h.uh->source),
233 ntohs(msg->hdr.serviceId));
234
235 x_port = msg->pkt->h.uh->source;
236 x_epoch = msg->hdr.epoch;
237 x_clflag = msg->hdr.flags & RXRPC_CLIENT_INITIATED;
238 x_connid = htonl(ntohl(msg->hdr.cid) & RXRPC_CIDMASK);
239 x_servid = msg->hdr.serviceId;
240 x_secix = msg->hdr.securityIndex;
241
242 /* [common case] search the transport's active list first */
243 read_lock(&peer->conn_lock);
244 list_for_each(_p, &peer->conn_active) {
245 conn = list_entry(_p, struct rxrpc_connection, link);
246 if (conn->addr.sin_port == x_port &&
247 conn->in_epoch == x_epoch &&
248 conn->conn_id == x_connid &&
249 conn->security_ix == x_secix &&
250 conn->service_id == x_servid &&
251 conn->in_clientflag == x_clflag)
252 goto found_active;
253 }
254 read_unlock(&peer->conn_lock);
255
256 /* [uncommon case] not active
257 * - create a candidate for a new record if an inbound connection
258 * - only examine the graveyard for an outbound connection
259 */
260 if (x_clflag) {
261 ret = __rxrpc_create_connection(peer, &candidate);
262 if (ret < 0) {
263 _leave(" = %d", ret);
264 return ret;
265 }
266
267 /* fill in the specifics */
268 candidate->addr.sin_family = AF_INET;
269 candidate->addr.sin_port = x_port;
270 candidate->addr.sin_addr.s_addr = msg->pkt->nh.iph->saddr;
271 candidate->in_epoch = x_epoch;
272 candidate->out_epoch = x_epoch;
273 candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
274 candidate->out_clientflag = 0;
275 candidate->conn_id = x_connid;
276 candidate->service_id = x_servid;
277 candidate->security_ix = x_secix;
278 }
279
280 /* search the active list again, just in case it appeared whilst we
281 * were busy */
282 write_lock(&peer->conn_lock);
283 list_for_each(_p, &peer->conn_active) {
284 conn = list_entry(_p, struct rxrpc_connection, link);
285 if (conn->addr.sin_port == x_port &&
286 conn->in_epoch == x_epoch &&
287 conn->conn_id == x_connid &&
288 conn->security_ix == x_secix &&
289 conn->service_id == x_servid &&
290 conn->in_clientflag == x_clflag)
291 goto found_active_second_chance;
292 }
293
294 /* search the transport's graveyard list */
295 spin_lock(&peer->conn_gylock);
296 list_for_each(_p, &peer->conn_graveyard) {
297 conn = list_entry(_p, struct rxrpc_connection, link);
298 if (conn->addr.sin_port == x_port &&
299 conn->in_epoch == x_epoch &&
300 conn->conn_id == x_connid &&
301 conn->security_ix == x_secix &&
302 conn->service_id == x_servid &&
303 conn->in_clientflag == x_clflag)
304 goto found_in_graveyard;
305 }
306 spin_unlock(&peer->conn_gylock);
307
308 /* outbound connections aren't created here */
309 if (!x_clflag) {
310 write_unlock(&peer->conn_lock);
311 _leave(" = -ENOENT");
312 return -ENOENT;
313 }
314
315 /* we can now add the new candidate to the list */
316 _debug("created connection: {%08x} [in]", ntohl(candidate->conn_id));
317 rxrpc_get_peer(peer);
318 conn = candidate;
319 candidate = NULL;
320 atomic_inc(&peer->conn_count);
321 fresh = 1;
322
323 make_active:
324 list_add_tail(&conn->link, &peer->conn_active);
325
326 success_uwfree:
327 write_unlock(&peer->conn_lock);
328
329 if (candidate) {
330 write_lock(&peer->conn_idlock);
331 list_del(&candidate->id_link);
332 write_unlock(&peer->conn_idlock);
333
334 __RXACCT(atomic_dec(&rxrpc_connection_count));
335 kfree(candidate);
336 }
337
338 if (fresh) {
339 down_write(&rxrpc_conns_sem);
340 list_add_tail(&conn->proc_link, &rxrpc_conns);
341 up_write(&rxrpc_conns_sem);
342 }
343
344 success:
345 *_conn = conn;
346 _leave(" = 0 (%p)", conn);
347 return 0;
348
349 /* handle the connection being found in the active list straight off */
350 found_active:
351 rxrpc_get_connection(conn);
352 read_unlock(&peer->conn_lock);
353 goto success;
354
355 /* handle resurrecting a connection from the graveyard */
356 found_in_graveyard:
357 _debug("resurrecting connection: {%08x} [in]", ntohl(conn->conn_id));
358 rxrpc_get_peer(peer);
359 rxrpc_get_connection(conn);
360 rxrpc_krxtimod_del_timer(&conn->timeout);
361 list_del_init(&conn->link);
362 spin_unlock(&peer->conn_gylock);
363 goto make_active;
364
365 /* handle finding the connection on the second time through the active
366 * list */
367 found_active_second_chance:
368 rxrpc_get_connection(conn);
369 goto success_uwfree;
370
371} /* end rxrpc_connection_lookup() */
372
373/*****************************************************************************/
374/*
375 * finish using a connection record
376 * - it will be transferred to the peer's connection graveyard when refcount
377 * reaches 0
378 */
379void rxrpc_put_connection(struct rxrpc_connection *conn)
380{
381 struct rxrpc_peer *peer;
382
383 if (!conn)
384 return;
385
386 _enter("%p{u=%d p=%hu}",
387 conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port));
388
389 peer = conn->peer;
390 spin_lock(&peer->conn_gylock);
391
392 /* sanity check */
393 if (atomic_read(&conn->usage) <= 0)
394 BUG();
395
396 if (likely(!atomic_dec_and_test(&conn->usage))) {
397 spin_unlock(&peer->conn_gylock);
398 _leave("");
399 return;
400 }
401
402 /* move to graveyard queue */
403 _debug("burying connection: {%08x}", ntohl(conn->conn_id));
404 list_del(&conn->link);
405 list_add_tail(&conn->link, &peer->conn_graveyard);
406
407 rxrpc_krxtimod_add_timer(&conn->timeout, rxrpc_conn_timeout * HZ);
408
409 spin_unlock(&peer->conn_gylock);
410
411 rxrpc_put_peer(conn->peer);
412
413 _leave(" [killed]");
414} /* end rxrpc_put_connection() */
415
416/*****************************************************************************/
417/*
418 * free a connection record
419 */
420static void rxrpc_conn_do_timeout(struct rxrpc_connection *conn)
421{
422 struct rxrpc_peer *peer;
423
424 _enter("%p{u=%d p=%hu}",
425 conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port));
426
427 peer = conn->peer;
428
429 if (atomic_read(&conn->usage) < 0)
430 BUG();
431
432 /* remove from graveyard if still dead */
433 spin_lock(&peer->conn_gylock);
434 if (atomic_read(&conn->usage) == 0) {
435 list_del_init(&conn->link);
436 }
437 else {
438 conn = NULL;
439 }
440 spin_unlock(&peer->conn_gylock);
441
442 if (!conn) {
443 _leave("");
444 return; /* resurrected */
445 }
446
447 _debug("--- Destroying Connection %p{%08x} ---",
448 conn, ntohl(conn->conn_id));
449
450 down_write(&rxrpc_conns_sem);
451 list_del(&conn->proc_link);
452 up_write(&rxrpc_conns_sem);
453
454 write_lock(&peer->conn_idlock);
455 list_del(&conn->id_link);
456 write_unlock(&peer->conn_idlock);
457
458 __RXACCT(atomic_dec(&rxrpc_connection_count));
459 kfree(conn);
460
461 /* if the graveyard is now empty, wake up anyone waiting for that */
462 if (atomic_dec_and_test(&peer->conn_count))
463 wake_up(&peer->conn_gy_waitq);
464
465 _leave(" [destroyed]");
466} /* end rxrpc_conn_do_timeout() */
467
468/*****************************************************************************/
469/*
470 * clear all connection records from a peer endpoint
471 */
472void rxrpc_conn_clearall(struct rxrpc_peer *peer)
473{
474 DECLARE_WAITQUEUE(myself, current);
475
476 struct rxrpc_connection *conn;
477 int err;
478
479 _enter("%p", peer);
480
481 /* there shouldn't be any active conns remaining */
482 if (!list_empty(&peer->conn_active))
483 BUG();
484
485 /* manually timeout all conns in the graveyard */
486 spin_lock(&peer->conn_gylock);
487 while (!list_empty(&peer->conn_graveyard)) {
488 conn = list_entry(peer->conn_graveyard.next,
489 struct rxrpc_connection, link);
490 err = rxrpc_krxtimod_del_timer(&conn->timeout);
491 spin_unlock(&peer->conn_gylock);
492
493 if (err == 0)
494 rxrpc_conn_do_timeout(conn);
495
496 spin_lock(&peer->conn_gylock);
497 }
498 spin_unlock(&peer->conn_gylock);
499
500 /* wait for the the conn graveyard to be completely cleared */
501 set_current_state(TASK_UNINTERRUPTIBLE);
502 add_wait_queue(&peer->conn_gy_waitq, &myself);
503
504 while (atomic_read(&peer->conn_count) != 0) {
505 schedule();
506 set_current_state(TASK_UNINTERRUPTIBLE);
507 }
508
509 remove_wait_queue(&peer->conn_gy_waitq, &myself);
510 set_current_state(TASK_RUNNING);
511
512 _leave("");
513} /* end rxrpc_conn_clearall() */
514
515/*****************************************************************************/
516/*
517 * allocate and prepare a message for sending out through the transport
518 * endpoint
519 */
520int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
521 struct rxrpc_call *call,
522 uint8_t type,
523 int dcount,
524 struct kvec diov[],
525 int alloc_flags,
526 struct rxrpc_message **_msg)
527{
528 struct rxrpc_message *msg;
529 int loop;
530
531 _enter("%p{%d},%p,%u", conn, ntohs(conn->addr.sin_port), call, type);
532
533 if (dcount > 3) {
534 _leave(" = -EINVAL");
535 return -EINVAL;
536 }
537
538 msg = kmalloc(sizeof(struct rxrpc_message), alloc_flags);
539 if (!msg) {
540 _leave(" = -ENOMEM");
541 return -ENOMEM;
542 }
543
544 memset(msg, 0, sizeof(*msg));
545 atomic_set(&msg->usage, 1);
546
547 INIT_LIST_HEAD(&msg->link);
548
549 msg->state = RXRPC_MSG_PREPARED;
550
551 msg->hdr.epoch = conn->out_epoch;
552 msg->hdr.cid = conn->conn_id | (call ? call->chan_ix : 0);
553 msg->hdr.callNumber = call ? call->call_id : 0;
554 msg->hdr.type = type;
555 msg->hdr.flags = conn->out_clientflag;
556 msg->hdr.securityIndex = conn->security_ix;
557 msg->hdr.serviceId = conn->service_id;
558
559 /* generate sequence numbers for data packets */
560 if (call) {
561 switch (type) {
562 case RXRPC_PACKET_TYPE_DATA:
563 msg->seq = ++call->snd_seq_count;
564 msg->hdr.seq = htonl(msg->seq);
565 break;
566 case RXRPC_PACKET_TYPE_ACK:
567 /* ACK sequence numbers are complicated. The following
568 * may be wrong:
569 * - jumbo packet ACKs should have a seq number
570 * - normal ACKs should not
571 */
572 default:
573 break;
574 }
575 }
576
577 msg->dcount = dcount + 1;
578 msg->dsize = sizeof(msg->hdr);
579 msg->data[0].iov_len = sizeof(msg->hdr);
580 msg->data[0].iov_base = &msg->hdr;
581
582 for (loop=0; loop < dcount; loop++) {
583 msg->dsize += diov[loop].iov_len;
584 msg->data[loop+1].iov_len = diov[loop].iov_len;
585 msg->data[loop+1].iov_base = diov[loop].iov_base;
586 }
587
588 __RXACCT(atomic_inc(&rxrpc_message_count));
589 *_msg = msg;
590 _leave(" = 0 (%p) #%d", msg, atomic_read(&rxrpc_message_count));
591 return 0;
592} /* end rxrpc_conn_newmsg() */
593
594/*****************************************************************************/
595/*
596 * free a message
597 */
598void __rxrpc_put_message(struct rxrpc_message *msg)
599{
600 int loop;
601
602 _enter("%p #%d", msg, atomic_read(&rxrpc_message_count));
603
604 if (msg->pkt)
605 kfree_skb(msg->pkt);
606 rxrpc_put_connection(msg->conn);
607
608 for (loop = 0; loop < 8; loop++)
609 if (test_bit(loop, &msg->dfree))
610 kfree(msg->data[loop].iov_base);
611
612 __RXACCT(atomic_dec(&rxrpc_message_count));
613 kfree(msg);
614
615 _leave("");
616} /* end __rxrpc_put_message() */
617
618/*****************************************************************************/
619/*
620 * send a message out through the transport endpoint
621 */
622int rxrpc_conn_sendmsg(struct rxrpc_connection *conn,
623 struct rxrpc_message *msg)
624{
625 struct msghdr msghdr;
626 int ret;
627
628 _enter("%p{%d}", conn, ntohs(conn->addr.sin_port));
629
630 /* fill in some fields in the header */
631 spin_lock(&conn->lock);
632 msg->hdr.serial = htonl(++conn->serial_counter);
633 msg->rttdone = 0;
634 spin_unlock(&conn->lock);
635
636 /* set up the message to be transmitted */
637 msghdr.msg_name = &conn->addr;
638 msghdr.msg_namelen = sizeof(conn->addr);
639 msghdr.msg_control = NULL;
640 msghdr.msg_controllen = 0;
641 msghdr.msg_flags = MSG_CONFIRM | MSG_DONTWAIT;
642
643 _net("Sending message type %d of %Zd bytes to %08x:%d",
644 msg->hdr.type,
645 msg->dsize,
646 ntohl(conn->addr.sin_addr.s_addr),
647 ntohs(conn->addr.sin_port));
648
649 /* send the message */
650 ret = kernel_sendmsg(conn->trans->socket, &msghdr,
651 msg->data, msg->dcount, msg->dsize);
652 if (ret < 0) {
653 msg->state = RXRPC_MSG_ERROR;
654 } else {
655 msg->state = RXRPC_MSG_SENT;
656 ret = 0;
657
658 spin_lock(&conn->lock);
659 do_gettimeofday(&conn->atime);
660 msg->stamp = conn->atime;
661 spin_unlock(&conn->lock);
662 }
663
664 _leave(" = %d", ret);
665
666 return ret;
667} /* end rxrpc_conn_sendmsg() */
668
669/*****************************************************************************/
670/*
671 * deal with a subsequent call packet
672 */
673int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
674 struct rxrpc_call *call,
675 struct rxrpc_message *msg)
676{
677 struct rxrpc_message *pmsg;
678 struct list_head *_p;
679 unsigned cix, seq;
680 int ret = 0;
681
682 _enter("%p,%p,%p", conn, call, msg);
683
684 if (!call) {
685 cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK;
686
687 spin_lock(&conn->lock);
688 call = conn->channels[cix];
689
690 if (!call || call->call_id != msg->hdr.callNumber) {
691 spin_unlock(&conn->lock);
692 rxrpc_trans_immediate_abort(conn->trans, msg, -ENOENT);
693 goto out;
694 }
695 else {
696 rxrpc_get_call(call);
697 spin_unlock(&conn->lock);
698 }
699 }
700 else {
701 rxrpc_get_call(call);
702 }
703
704 _proto("Received packet %%%u [%u] on call %hu:%u:%u",
705 ntohl(msg->hdr.serial),
706 ntohl(msg->hdr.seq),
707 ntohs(msg->hdr.serviceId),
708 ntohl(conn->conn_id),
709 ntohl(call->call_id));
710
711 call->pkt_rcv_count++;
712
713 if (msg->pkt->dst && msg->pkt->dst->dev)
714 conn->peer->if_mtu =
715 msg->pkt->dst->dev->mtu -
716 msg->pkt->dst->dev->hard_header_len;
717
718 /* queue on the call in seq order */
719 rxrpc_get_message(msg);
720 seq = msg->seq;
721
722 spin_lock(&call->lock);
723 list_for_each(_p, &call->rcv_receiveq) {
724 pmsg = list_entry(_p, struct rxrpc_message, link);
725 if (pmsg->seq > seq)
726 break;
727 }
728 list_add_tail(&msg->link, _p);
729
730 /* reset the activity timeout */
731 call->flags |= RXRPC_CALL_RCV_PKT;
732 mod_timer(&call->rcv_timeout,jiffies + rxrpc_call_rcv_timeout * HZ);
733
734 spin_unlock(&call->lock);
735
736 rxrpc_krxiod_queue_call(call);
737
738 rxrpc_put_call(call);
739 out:
740 _leave(" = %d", ret);
741 return ret;
742} /* end rxrpc_conn_receive_call_packet() */
743
744/*****************************************************************************/
745/*
746 * handle an ICMP error being applied to a connection
747 */
748void rxrpc_conn_handle_error(struct rxrpc_connection *conn,
749 int local, int errno)
750{
751 struct rxrpc_call *calls[4];
752 int loop;
753
754 _enter("%p{%d},%d", conn, ntohs(conn->addr.sin_port), errno);
755
756 /* get a ref to all my calls in one go */
757 memset(calls, 0, sizeof(calls));
758 spin_lock(&conn->lock);
759
760 for (loop = 3; loop >= 0; loop--) {
761 if (conn->channels[loop]) {
762 calls[loop] = conn->channels[loop];
763 rxrpc_get_call(calls[loop]);
764 }
765 }
766
767 spin_unlock(&conn->lock);
768
769 /* now kick them all */
770 for (loop = 3; loop >= 0; loop--) {
771 if (calls[loop]) {
772 rxrpc_call_handle_error(calls[loop], local, errno);
773 rxrpc_put_call(calls[loop]);
774 }
775 }
776
777 _leave("");
778} /* end rxrpc_conn_handle_error() */
diff --git a/net/rxrpc/internal.h b/net/rxrpc/internal.h
new file mode 100644
index 000000000000..70e52f6b0b64
--- /dev/null
+++ b/net/rxrpc/internal.h
@@ -0,0 +1,106 @@
1/* internal.h: internal Rx RPC stuff
2 *
3 * Copyright (c) 2002 David Howells (dhowells@redhat.com).
4 */
5
6#ifndef RXRPC_INTERNAL_H
7#define RXRPC_INTERNAL_H
8
9#include <linux/compiler.h>
10#include <linux/kernel.h>
11
12/*
13 * debug accounting
14 */
15#if 1
16#define __RXACCT_DECL(X) X
17#define __RXACCT(X) do { X; } while(0)
18#else
19#define __RXACCT_DECL(X)
20#define __RXACCT(X) do { } while(0)
21#endif
22
23__RXACCT_DECL(extern atomic_t rxrpc_transport_count);
24__RXACCT_DECL(extern atomic_t rxrpc_peer_count);
25__RXACCT_DECL(extern atomic_t rxrpc_connection_count);
26__RXACCT_DECL(extern atomic_t rxrpc_call_count);
27__RXACCT_DECL(extern atomic_t rxrpc_message_count);
28
29/*
30 * debug tracing
31 */
32#define kenter(FMT, a...) printk("==> %s("FMT")\n",__FUNCTION__ , ##a)
33#define kleave(FMT, a...) printk("<== %s()"FMT"\n",__FUNCTION__ , ##a)
34#define kdebug(FMT, a...) printk(" "FMT"\n" , ##a)
35#define kproto(FMT, a...) printk("### "FMT"\n" , ##a)
36#define knet(FMT, a...) printk(" "FMT"\n" , ##a)
37
38#if 0
39#define _enter(FMT, a...) kenter(FMT , ##a)
40#define _leave(FMT, a...) kleave(FMT , ##a)
41#define _debug(FMT, a...) kdebug(FMT , ##a)
42#define _proto(FMT, a...) kproto(FMT , ##a)
43#define _net(FMT, a...) knet(FMT , ##a)
44#else
45#define _enter(FMT, a...) do { if (rxrpc_ktrace) kenter(FMT , ##a); } while(0)
46#define _leave(FMT, a...) do { if (rxrpc_ktrace) kleave(FMT , ##a); } while(0)
47#define _debug(FMT, a...) do { if (rxrpc_kdebug) kdebug(FMT , ##a); } while(0)
48#define _proto(FMT, a...) do { if (rxrpc_kproto) kproto(FMT , ##a); } while(0)
49#define _net(FMT, a...) do { if (rxrpc_knet) knet (FMT , ##a); } while(0)
50#endif
51
52static inline void rxrpc_discard_my_signals(void)
53{
54 while (signal_pending(current)) {
55 siginfo_t sinfo;
56
57 spin_lock_irq(&current->sighand->siglock);
58 dequeue_signal(current, &current->blocked, &sinfo);
59 spin_unlock_irq(&current->sighand->siglock);
60 }
61}
62
63/*
64 * call.c
65 */
66extern struct list_head rxrpc_calls;
67extern struct rw_semaphore rxrpc_calls_sem;
68
69/*
70 * connection.c
71 */
72extern struct list_head rxrpc_conns;
73extern struct rw_semaphore rxrpc_conns_sem;
74extern unsigned long rxrpc_conn_timeout;
75
76extern void rxrpc_conn_clearall(struct rxrpc_peer *peer);
77
78/*
79 * peer.c
80 */
81extern struct list_head rxrpc_peers;
82extern struct rw_semaphore rxrpc_peers_sem;
83extern unsigned long rxrpc_peer_timeout;
84
85extern void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer,
86 struct rxrpc_message *msg,
87 struct rxrpc_message *resp);
88
89extern void rxrpc_peer_clearall(struct rxrpc_transport *trans);
90
91
92/*
93 * proc.c
94 */
95#ifdef CONFIG_PROC_FS
96extern int rxrpc_proc_init(void);
97extern void rxrpc_proc_cleanup(void);
98#endif
99
100/*
101 * transport.c
102 */
103extern struct list_head rxrpc_proc_transports;
104extern struct rw_semaphore rxrpc_proc_transports_sem;
105
106#endif /* RXRPC_INTERNAL_H */
diff --git a/net/rxrpc/krxiod.c b/net/rxrpc/krxiod.c
new file mode 100644
index 000000000000..2b537f425a17
--- /dev/null
+++ b/net/rxrpc/krxiod.c
@@ -0,0 +1,261 @@
1/* krxiod.c: Rx I/O daemon
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sched.h>
13#include <linux/completion.h>
14#include <linux/spinlock.h>
15#include <linux/init.h>
16#include <rxrpc/krxiod.h>
17#include <rxrpc/transport.h>
18#include <rxrpc/peer.h>
19#include <rxrpc/call.h>
20#include "internal.h"
21
22static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxiod_sleepq);
23static DECLARE_COMPLETION(rxrpc_krxiod_dead);
24
25static atomic_t rxrpc_krxiod_qcount = ATOMIC_INIT(0);
26
27static LIST_HEAD(rxrpc_krxiod_transportq);
28static DEFINE_SPINLOCK(rxrpc_krxiod_transportq_lock);
29
30static LIST_HEAD(rxrpc_krxiod_callq);
31static DEFINE_SPINLOCK(rxrpc_krxiod_callq_lock);
32
33static volatile int rxrpc_krxiod_die;
34
35/*****************************************************************************/
36/*
37 * Rx I/O daemon
38 */
39static int rxrpc_krxiod(void *arg)
40{
41 DECLARE_WAITQUEUE(krxiod,current);
42
43 printk("Started krxiod %d\n",current->pid);
44
45 daemonize("krxiod");
46
47 /* loop around waiting for work to do */
48 do {
49 /* wait for work or to be told to exit */
50 _debug("### Begin Wait");
51 if (!atomic_read(&rxrpc_krxiod_qcount)) {
52 set_current_state(TASK_INTERRUPTIBLE);
53
54 add_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
55
56 for (;;) {
57 set_current_state(TASK_INTERRUPTIBLE);
58 if (atomic_read(&rxrpc_krxiod_qcount) ||
59 rxrpc_krxiod_die ||
60 signal_pending(current))
61 break;
62
63 schedule();
64 }
65
66 remove_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
67 set_current_state(TASK_RUNNING);
68 }
69 _debug("### End Wait");
70
71 /* do work if been given some to do */
72 _debug("### Begin Work");
73
74 /* see if there's a transport in need of attention */
75 if (!list_empty(&rxrpc_krxiod_transportq)) {
76 struct rxrpc_transport *trans = NULL;
77
78 spin_lock_irq(&rxrpc_krxiod_transportq_lock);
79
80 if (!list_empty(&rxrpc_krxiod_transportq)) {
81 trans = list_entry(
82 rxrpc_krxiod_transportq.next,
83 struct rxrpc_transport,
84 krxiodq_link);
85
86 list_del_init(&trans->krxiodq_link);
87 atomic_dec(&rxrpc_krxiod_qcount);
88
89 /* make sure it hasn't gone away and doesn't go
90 * away */
91 if (atomic_read(&trans->usage)>0)
92 rxrpc_get_transport(trans);
93 else
94 trans = NULL;
95 }
96
97 spin_unlock_irq(&rxrpc_krxiod_transportq_lock);
98
99 if (trans) {
100 rxrpc_trans_receive_packet(trans);
101 rxrpc_put_transport(trans);
102 }
103 }
104
105 /* see if there's a call in need of attention */
106 if (!list_empty(&rxrpc_krxiod_callq)) {
107 struct rxrpc_call *call = NULL;
108
109 spin_lock_irq(&rxrpc_krxiod_callq_lock);
110
111 if (!list_empty(&rxrpc_krxiod_callq)) {
112 call = list_entry(rxrpc_krxiod_callq.next,
113 struct rxrpc_call,
114 rcv_krxiodq_lk);
115 list_del_init(&call->rcv_krxiodq_lk);
116 atomic_dec(&rxrpc_krxiod_qcount);
117
118 /* make sure it hasn't gone away and doesn't go
119 * away */
120 if (atomic_read(&call->usage) > 0) {
121 _debug("@@@ KRXIOD"
122 " Begin Attend Call %p", call);
123 rxrpc_get_call(call);
124 }
125 else {
126 call = NULL;
127 }
128 }
129
130 spin_unlock_irq(&rxrpc_krxiod_callq_lock);
131
132 if (call) {
133 rxrpc_call_do_stuff(call);
134 rxrpc_put_call(call);
135 _debug("@@@ KRXIOD End Attend Call %p", call);
136 }
137 }
138
139 _debug("### End Work");
140
141 try_to_freeze(PF_FREEZE);
142
143 /* discard pending signals */
144 rxrpc_discard_my_signals();
145
146 } while (!rxrpc_krxiod_die);
147
148 /* and that's all */
149 complete_and_exit(&rxrpc_krxiod_dead, 0);
150
151} /* end rxrpc_krxiod() */
152
153/*****************************************************************************/
154/*
155 * start up a krxiod daemon
156 */
157int __init rxrpc_krxiod_init(void)
158{
159 return kernel_thread(rxrpc_krxiod, NULL, 0);
160
161} /* end rxrpc_krxiod_init() */
162
163/*****************************************************************************/
164/*
165 * kill the krxiod daemon and wait for it to complete
166 */
167void rxrpc_krxiod_kill(void)
168{
169 rxrpc_krxiod_die = 1;
170 wake_up_all(&rxrpc_krxiod_sleepq);
171 wait_for_completion(&rxrpc_krxiod_dead);
172
173} /* end rxrpc_krxiod_kill() */
174
175/*****************************************************************************/
176/*
177 * queue a transport for attention by krxiod
178 */
179void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans)
180{
181 unsigned long flags;
182
183 _enter("");
184
185 if (list_empty(&trans->krxiodq_link)) {
186 spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags);
187
188 if (list_empty(&trans->krxiodq_link)) {
189 if (atomic_read(&trans->usage) > 0) {
190 list_add_tail(&trans->krxiodq_link,
191 &rxrpc_krxiod_transportq);
192 atomic_inc(&rxrpc_krxiod_qcount);
193 }
194 }
195
196 spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags);
197 wake_up_all(&rxrpc_krxiod_sleepq);
198 }
199
200 _leave("");
201
202} /* end rxrpc_krxiod_queue_transport() */
203
204/*****************************************************************************/
205/*
206 * dequeue a transport from krxiod's attention queue
207 */
208void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans)
209{
210 unsigned long flags;
211
212 _enter("");
213
214 spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags);
215 if (!list_empty(&trans->krxiodq_link)) {
216 list_del_init(&trans->krxiodq_link);
217 atomic_dec(&rxrpc_krxiod_qcount);
218 }
219 spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags);
220
221 _leave("");
222
223} /* end rxrpc_krxiod_dequeue_transport() */
224
225/*****************************************************************************/
226/*
227 * queue a call for attention by krxiod
228 */
229void rxrpc_krxiod_queue_call(struct rxrpc_call *call)
230{
231 unsigned long flags;
232
233 if (list_empty(&call->rcv_krxiodq_lk)) {
234 spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags);
235 if (atomic_read(&call->usage) > 0) {
236 list_add_tail(&call->rcv_krxiodq_lk,
237 &rxrpc_krxiod_callq);
238 atomic_inc(&rxrpc_krxiod_qcount);
239 }
240 spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags);
241 }
242 wake_up_all(&rxrpc_krxiod_sleepq);
243
244} /* end rxrpc_krxiod_queue_call() */
245
246/*****************************************************************************/
247/*
248 * dequeue a call from krxiod's attention queue
249 */
250void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call)
251{
252 unsigned long flags;
253
254 spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags);
255 if (!list_empty(&call->rcv_krxiodq_lk)) {
256 list_del_init(&call->rcv_krxiodq_lk);
257 atomic_dec(&rxrpc_krxiod_qcount);
258 }
259 spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags);
260
261} /* end rxrpc_krxiod_dequeue_call() */
diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c
new file mode 100644
index 000000000000..6020c89d9228
--- /dev/null
+++ b/net/rxrpc/krxsecd.c
@@ -0,0 +1,270 @@
1/* krxsecd.c: Rx security daemon
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * This daemon deals with:
12 * - consulting the application as to whether inbound peers and calls should be authorised
13 * - generating security challenges for inbound connections
14 * - responding to security challenges on outbound connections
15 */
16
17#include <linux/module.h>
18#include <linux/sched.h>
19#include <linux/completion.h>
20#include <linux/spinlock.h>
21#include <linux/init.h>
22#include <rxrpc/krxsecd.h>
23#include <rxrpc/transport.h>
24#include <rxrpc/connection.h>
25#include <rxrpc/message.h>
26#include <rxrpc/peer.h>
27#include <rxrpc/call.h>
28#include <linux/udp.h>
29#include <linux/ip.h>
30#include <net/sock.h>
31#include "internal.h"
32
33static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxsecd_sleepq);
34static DECLARE_COMPLETION(rxrpc_krxsecd_dead);
35static volatile int rxrpc_krxsecd_die;
36
37static atomic_t rxrpc_krxsecd_qcount;
38
39/* queue of unprocessed inbound messages with seqno #1 and
40 * RXRPC_CLIENT_INITIATED flag set */
41static LIST_HEAD(rxrpc_krxsecd_initmsgq);
42static DEFINE_SPINLOCK(rxrpc_krxsecd_initmsgq_lock);
43
44static void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg);
45
46/*****************************************************************************/
47/*
48 * Rx security daemon
49 */
50static int rxrpc_krxsecd(void *arg)
51{
52 DECLARE_WAITQUEUE(krxsecd, current);
53
54 int die;
55
56 printk("Started krxsecd %d\n", current->pid);
57
58 daemonize("krxsecd");
59
60 /* loop around waiting for work to do */
61 do {
62 /* wait for work or to be told to exit */
63 _debug("### Begin Wait");
64 if (!atomic_read(&rxrpc_krxsecd_qcount)) {
65 set_current_state(TASK_INTERRUPTIBLE);
66
67 add_wait_queue(&rxrpc_krxsecd_sleepq, &krxsecd);
68
69 for (;;) {
70 set_current_state(TASK_INTERRUPTIBLE);
71 if (atomic_read(&rxrpc_krxsecd_qcount) ||
72 rxrpc_krxsecd_die ||
73 signal_pending(current))
74 break;
75
76 schedule();
77 }
78
79 remove_wait_queue(&rxrpc_krxsecd_sleepq, &krxsecd);
80 set_current_state(TASK_RUNNING);
81 }
82 die = rxrpc_krxsecd_die;
83 _debug("### End Wait");
84
85 /* see if there're incoming calls in need of authenticating */
86 _debug("### Begin Inbound Calls");
87
88 if (!list_empty(&rxrpc_krxsecd_initmsgq)) {
89 struct rxrpc_message *msg = NULL;
90
91 spin_lock(&rxrpc_krxsecd_initmsgq_lock);
92
93 if (!list_empty(&rxrpc_krxsecd_initmsgq)) {
94 msg = list_entry(rxrpc_krxsecd_initmsgq.next,
95 struct rxrpc_message, link);
96 list_del_init(&msg->link);
97 atomic_dec(&rxrpc_krxsecd_qcount);
98 }
99
100 spin_unlock(&rxrpc_krxsecd_initmsgq_lock);
101
102 if (msg) {
103 rxrpc_krxsecd_process_incoming_call(msg);
104 rxrpc_put_message(msg);
105 }
106 }
107
108 _debug("### End Inbound Calls");
109
110 try_to_freeze(PF_FREEZE);
111
112 /* discard pending signals */
113 rxrpc_discard_my_signals();
114
115 } while (!die);
116
117 /* and that's all */
118 complete_and_exit(&rxrpc_krxsecd_dead, 0);
119
120} /* end rxrpc_krxsecd() */
121
122/*****************************************************************************/
123/*
124 * start up a krxsecd daemon
125 */
126int __init rxrpc_krxsecd_init(void)
127{
128 return kernel_thread(rxrpc_krxsecd, NULL, 0);
129
130} /* end rxrpc_krxsecd_init() */
131
132/*****************************************************************************/
133/*
134 * kill the krxsecd daemon and wait for it to complete
135 */
136void rxrpc_krxsecd_kill(void)
137{
138 rxrpc_krxsecd_die = 1;
139 wake_up_all(&rxrpc_krxsecd_sleepq);
140 wait_for_completion(&rxrpc_krxsecd_dead);
141
142} /* end rxrpc_krxsecd_kill() */
143
144/*****************************************************************************/
145/*
146 * clear all pending incoming calls for the specified transport
147 */
148void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans)
149{
150 LIST_HEAD(tmp);
151
152 struct rxrpc_message *msg;
153 struct list_head *_p, *_n;
154
155 _enter("%p",trans);
156
157 /* move all the messages for this transport onto a temp list */
158 spin_lock(&rxrpc_krxsecd_initmsgq_lock);
159
160 list_for_each_safe(_p, _n, &rxrpc_krxsecd_initmsgq) {
161 msg = list_entry(_p, struct rxrpc_message, link);
162 if (msg->trans == trans) {
163 list_del(&msg->link);
164 list_add_tail(&msg->link, &tmp);
165 atomic_dec(&rxrpc_krxsecd_qcount);
166 }
167 }
168
169 spin_unlock(&rxrpc_krxsecd_initmsgq_lock);
170
171 /* zap all messages on the temp list */
172 while (!list_empty(&tmp)) {
173 msg = list_entry(tmp.next, struct rxrpc_message, link);
174 list_del_init(&msg->link);
175 rxrpc_put_message(msg);
176 }
177
178 _leave("");
179} /* end rxrpc_krxsecd_clear_transport() */
180
181/*****************************************************************************/
182/*
183 * queue a message on the incoming calls list
184 */
185void rxrpc_krxsecd_queue_incoming_call(struct rxrpc_message *msg)
186{
187 _enter("%p", msg);
188
189 /* queue for processing by krxsecd */
190 spin_lock(&rxrpc_krxsecd_initmsgq_lock);
191
192 if (!rxrpc_krxsecd_die) {
193 rxrpc_get_message(msg);
194 list_add_tail(&msg->link, &rxrpc_krxsecd_initmsgq);
195 atomic_inc(&rxrpc_krxsecd_qcount);
196 }
197
198 spin_unlock(&rxrpc_krxsecd_initmsgq_lock);
199
200 wake_up(&rxrpc_krxsecd_sleepq);
201
202 _leave("");
203} /* end rxrpc_krxsecd_queue_incoming_call() */
204
205/*****************************************************************************/
206/*
207 * process the initial message of an incoming call
208 */
209void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg)
210{
211 struct rxrpc_transport *trans = msg->trans;
212 struct rxrpc_service *srv;
213 struct rxrpc_call *call;
214 struct list_head *_p;
215 unsigned short sid;
216 int ret;
217
218 _enter("%p{tr=%p}", msg, trans);
219
220 ret = rxrpc_incoming_call(msg->conn, msg, &call);
221 if (ret < 0)
222 goto out;
223
224 /* find the matching service on the transport */
225 sid = ntohs(msg->hdr.serviceId);
226 srv = NULL;
227
228 spin_lock(&trans->lock);
229 list_for_each(_p, &trans->services) {
230 srv = list_entry(_p, struct rxrpc_service, link);
231 if (srv->service_id == sid && try_module_get(srv->owner)) {
232 /* found a match (made sure it won't vanish) */
233 _debug("found service '%s'", srv->name);
234 call->owner = srv->owner;
235 break;
236 }
237 }
238 spin_unlock(&trans->lock);
239
240 /* report the new connection
241 * - the func must inc the call's usage count to keep it
242 */
243 ret = -ENOENT;
244 if (_p != &trans->services) {
245 /* attempt to accept the call */
246 call->conn->service = srv;
247 call->app_attn_func = srv->attn_func;
248 call->app_error_func = srv->error_func;
249 call->app_aemap_func = srv->aemap_func;
250
251 ret = srv->new_call(call);
252
253 /* send an abort if an error occurred */
254 if (ret < 0) {
255 rxrpc_call_abort(call, ret);
256 }
257 else {
258 /* formally receive and ACK the new packet */
259 ret = rxrpc_conn_receive_call_packet(call->conn,
260 call, msg);
261 }
262 }
263
264 rxrpc_put_call(call);
265 out:
266 if (ret < 0)
267 rxrpc_trans_immediate_abort(trans, msg, ret);
268
269 _leave(" (%d)", ret);
270} /* end rxrpc_krxsecd_process_incoming_call() */
diff --git a/net/rxrpc/krxtimod.c b/net/rxrpc/krxtimod.c
new file mode 100644
index 000000000000..249c2b0290bb
--- /dev/null
+++ b/net/rxrpc/krxtimod.c
@@ -0,0 +1,203 @@
1/* krxtimod.c: RXRPC timeout daemon
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/sched.h>
15#include <linux/completion.h>
16#include <rxrpc/rxrpc.h>
17#include <rxrpc/krxtimod.h>
18#include <asm/errno.h>
19#include "internal.h"
20
21static DECLARE_COMPLETION(krxtimod_alive);
22static DECLARE_COMPLETION(krxtimod_dead);
23static DECLARE_WAIT_QUEUE_HEAD(krxtimod_sleepq);
24static int krxtimod_die;
25
26static LIST_HEAD(krxtimod_list);
27static DEFINE_SPINLOCK(krxtimod_lock);
28
29static int krxtimod(void *arg);
30
31/*****************************************************************************/
32/*
33 * start the timeout daemon
34 */
35int rxrpc_krxtimod_start(void)
36{
37 int ret;
38
39 ret = kernel_thread(krxtimod, NULL, 0);
40 if (ret < 0)
41 return ret;
42
43 wait_for_completion(&krxtimod_alive);
44
45 return ret;
46} /* end rxrpc_krxtimod_start() */
47
48/*****************************************************************************/
49/*
50 * stop the timeout daemon
51 */
52void rxrpc_krxtimod_kill(void)
53{
54 /* get rid of my daemon */
55 krxtimod_die = 1;
56 wake_up(&krxtimod_sleepq);
57 wait_for_completion(&krxtimod_dead);
58
59} /* end rxrpc_krxtimod_kill() */
60
61/*****************************************************************************/
62/*
63 * timeout processing daemon
64 */
65static int krxtimod(void *arg)
66{
67 DECLARE_WAITQUEUE(myself, current);
68
69 rxrpc_timer_t *timer;
70
71 printk("Started krxtimod %d\n", current->pid);
72
73 daemonize("krxtimod");
74
75 complete(&krxtimod_alive);
76
77 /* loop around looking for things to attend to */
78 loop:
79 set_current_state(TASK_INTERRUPTIBLE);
80 add_wait_queue(&krxtimod_sleepq, &myself);
81
82 for (;;) {
83 unsigned long jif;
84 signed long timeout;
85
86 /* deal with the server being asked to die */
87 if (krxtimod_die) {
88 remove_wait_queue(&krxtimod_sleepq, &myself);
89 _leave("");
90 complete_and_exit(&krxtimod_dead, 0);
91 }
92
93 try_to_freeze(PF_FREEZE);
94
95 /* discard pending signals */
96 rxrpc_discard_my_signals();
97
98 /* work out the time to elapse before the next event */
99 spin_lock(&krxtimod_lock);
100 if (list_empty(&krxtimod_list)) {
101 timeout = MAX_SCHEDULE_TIMEOUT;
102 }
103 else {
104 timer = list_entry(krxtimod_list.next,
105 rxrpc_timer_t, link);
106 timeout = timer->timo_jif;
107 jif = jiffies;
108
109 if (time_before_eq((unsigned long) timeout, jif))
110 goto immediate;
111
112 else {
113 timeout = (long) timeout - (long) jiffies;
114 }
115 }
116 spin_unlock(&krxtimod_lock);
117
118 schedule_timeout(timeout);
119
120 set_current_state(TASK_INTERRUPTIBLE);
121 }
122
123 /* the thing on the front of the queue needs processing
124 * - we come here with the lock held and timer pointing to the expired
125 * entry
126 */
127 immediate:
128 remove_wait_queue(&krxtimod_sleepq, &myself);
129 set_current_state(TASK_RUNNING);
130
131 _debug("@@@ Begin Timeout of %p", timer);
132
133 /* dequeue the timer */
134 list_del_init(&timer->link);
135 spin_unlock(&krxtimod_lock);
136
137 /* call the timeout function */
138 timer->ops->timed_out(timer);
139
140 _debug("@@@ End Timeout");
141 goto loop;
142
143} /* end krxtimod() */
144
145/*****************************************************************************/
146/*
147 * (re-)queue a timer
148 */
149void rxrpc_krxtimod_add_timer(rxrpc_timer_t *timer, unsigned long timeout)
150{
151 struct list_head *_p;
152 rxrpc_timer_t *ptimer;
153
154 _enter("%p,%lu", timer, timeout);
155
156 spin_lock(&krxtimod_lock);
157
158 list_del(&timer->link);
159
160 /* the timer was deferred or reset - put it back in the queue at the
161 * right place */
162 timer->timo_jif = jiffies + timeout;
163
164 list_for_each(_p, &krxtimod_list) {
165 ptimer = list_entry(_p, rxrpc_timer_t, link);
166 if (time_before(timer->timo_jif, ptimer->timo_jif))
167 break;
168 }
169
170 list_add_tail(&timer->link, _p); /* insert before stopping point */
171
172 spin_unlock(&krxtimod_lock);
173
174 wake_up(&krxtimod_sleepq);
175
176 _leave("");
177} /* end rxrpc_krxtimod_add_timer() */
178
179/*****************************************************************************/
180/*
181 * dequeue a timer
182 * - returns 0 if the timer was deleted or -ENOENT if it wasn't queued
183 */
184int rxrpc_krxtimod_del_timer(rxrpc_timer_t *timer)
185{
186 int ret = 0;
187
188 _enter("%p", timer);
189
190 spin_lock(&krxtimod_lock);
191
192 if (list_empty(&timer->link))
193 ret = -ENOENT;
194 else
195 list_del_init(&timer->link);
196
197 spin_unlock(&krxtimod_lock);
198
199 wake_up(&krxtimod_sleepq);
200
201 _leave(" = %d", ret);
202 return ret;
203} /* end rxrpc_krxtimod_del_timer() */
diff --git a/net/rxrpc/main.c b/net/rxrpc/main.c
new file mode 100644
index 000000000000..36fdcbcd80d1
--- /dev/null
+++ b/net/rxrpc/main.c
@@ -0,0 +1,180 @@
1/* main.c: Rx RPC interface
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/sched.h>
15#include <rxrpc/rxrpc.h>
16#include <rxrpc/krxiod.h>
17#include <rxrpc/krxsecd.h>
18#include <rxrpc/krxtimod.h>
19#include <rxrpc/transport.h>
20#include <rxrpc/connection.h>
21#include <rxrpc/call.h>
22#include <rxrpc/message.h>
23#include "internal.h"
24
25MODULE_DESCRIPTION("Rx RPC implementation");
26MODULE_AUTHOR("Red Hat, Inc.");
27MODULE_LICENSE("GPL");
28
29__be32 rxrpc_epoch;
30
31/*****************************************************************************/
32/*
33 * initialise the Rx module
34 */
35static int __init rxrpc_initialise(void)
36{
37 int ret;
38
39 /* my epoch value */
40 rxrpc_epoch = htonl(xtime.tv_sec);
41
42 /* register the /proc interface */
43#ifdef CONFIG_PROC_FS
44 ret = rxrpc_proc_init();
45 if (ret<0)
46 return ret;
47#endif
48
49 /* register the sysctl files */
50#ifdef CONFIG_SYSCTL
51 ret = rxrpc_sysctl_init();
52 if (ret<0)
53 goto error_proc;
54#endif
55
56 /* start the krxtimod daemon */
57 ret = rxrpc_krxtimod_start();
58 if (ret<0)
59 goto error_sysctl;
60
61 /* start the krxiod daemon */
62 ret = rxrpc_krxiod_init();
63 if (ret<0)
64 goto error_krxtimod;
65
66 /* start the krxsecd daemon */
67 ret = rxrpc_krxsecd_init();
68 if (ret<0)
69 goto error_krxiod;
70
71 kdebug("\n\n");
72
73 return 0;
74
75 error_krxiod:
76 rxrpc_krxiod_kill();
77 error_krxtimod:
78 rxrpc_krxtimod_kill();
79 error_sysctl:
80#ifdef CONFIG_SYSCTL
81 rxrpc_sysctl_cleanup();
82#endif
83 error_proc:
84#ifdef CONFIG_PROC_FS
85 rxrpc_proc_cleanup();
86#endif
87 return ret;
88} /* end rxrpc_initialise() */
89
90module_init(rxrpc_initialise);
91
92/*****************************************************************************/
93/*
94 * clean up the Rx module
95 */
96static void __exit rxrpc_cleanup(void)
97{
98 kenter("");
99
100 __RXACCT(printk("Outstanding Messages : %d\n",
101 atomic_read(&rxrpc_message_count)));
102 __RXACCT(printk("Outstanding Calls : %d\n",
103 atomic_read(&rxrpc_call_count)));
104 __RXACCT(printk("Outstanding Connections: %d\n",
105 atomic_read(&rxrpc_connection_count)));
106 __RXACCT(printk("Outstanding Peers : %d\n",
107 atomic_read(&rxrpc_peer_count)));
108 __RXACCT(printk("Outstanding Transports : %d\n",
109 atomic_read(&rxrpc_transport_count)));
110
111 rxrpc_krxsecd_kill();
112 rxrpc_krxiod_kill();
113 rxrpc_krxtimod_kill();
114#ifdef CONFIG_SYSCTL
115 rxrpc_sysctl_cleanup();
116#endif
117#ifdef CONFIG_PROC_FS
118 rxrpc_proc_cleanup();
119#endif
120
121 __RXACCT(printk("Outstanding Messages : %d\n",
122 atomic_read(&rxrpc_message_count)));
123 __RXACCT(printk("Outstanding Calls : %d\n",
124 atomic_read(&rxrpc_call_count)));
125 __RXACCT(printk("Outstanding Connections: %d\n",
126 atomic_read(&rxrpc_connection_count)));
127 __RXACCT(printk("Outstanding Peers : %d\n",
128 atomic_read(&rxrpc_peer_count)));
129 __RXACCT(printk("Outstanding Transports : %d\n",
130 atomic_read(&rxrpc_transport_count)));
131
132 kleave("");
133} /* end rxrpc_cleanup() */
134
135module_exit(rxrpc_cleanup);
136
137/*****************************************************************************/
138/*
139 * clear the dead space between task_struct and kernel stack
140 * - called by supplying -finstrument-functions to gcc
141 */
142#if 0
143void __cyg_profile_func_enter (void *this_fn, void *call_site)
144__attribute__((no_instrument_function));
145
146void __cyg_profile_func_enter (void *this_fn, void *call_site)
147{
148 asm volatile(" movl %%esp,%%edi \n"
149 " andl %0,%%edi \n"
150 " addl %1,%%edi \n"
151 " movl %%esp,%%ecx \n"
152 " subl %%edi,%%ecx \n"
153 " shrl $2,%%ecx \n"
154 " movl $0xedededed,%%eax \n"
155 " rep stosl \n"
156 :
157 : "i"(~(THREAD_SIZE-1)), "i"(sizeof(struct thread_info))
158 : "eax", "ecx", "edi", "memory", "cc"
159 );
160}
161
162void __cyg_profile_func_exit(void *this_fn, void *call_site)
163__attribute__((no_instrument_function));
164
165void __cyg_profile_func_exit(void *this_fn, void *call_site)
166{
167 asm volatile(" movl %%esp,%%edi \n"
168 " andl %0,%%edi \n"
169 " addl %1,%%edi \n"
170 " movl %%esp,%%ecx \n"
171 " subl %%edi,%%ecx \n"
172 " shrl $2,%%ecx \n"
173 " movl $0xdadadada,%%eax \n"
174 " rep stosl \n"
175 :
176 : "i"(~(THREAD_SIZE-1)), "i"(sizeof(struct thread_info))
177 : "eax", "ecx", "edi", "memory", "cc"
178 );
179}
180#endif
diff --git a/net/rxrpc/peer.c b/net/rxrpc/peer.c
new file mode 100644
index 000000000000..ed38f5b17c1b
--- /dev/null
+++ b/net/rxrpc/peer.c
@@ -0,0 +1,399 @@
1/* peer.c: Rx RPC peer management
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <rxrpc/rxrpc.h>
16#include <rxrpc/transport.h>
17#include <rxrpc/peer.h>
18#include <rxrpc/connection.h>
19#include <rxrpc/call.h>
20#include <rxrpc/message.h>
21#include <linux/udp.h>
22#include <linux/ip.h>
23#include <net/sock.h>
24#include <asm/uaccess.h>
25#include <asm/div64.h>
26#include "internal.h"
27
28__RXACCT_DECL(atomic_t rxrpc_peer_count);
29LIST_HEAD(rxrpc_peers);
30DECLARE_RWSEM(rxrpc_peers_sem);
31unsigned long rxrpc_peer_timeout = 12 * 60 * 60;
32
33static void rxrpc_peer_do_timeout(struct rxrpc_peer *peer);
34
35static void __rxrpc_peer_timeout(rxrpc_timer_t *timer)
36{
37 struct rxrpc_peer *peer =
38 list_entry(timer, struct rxrpc_peer, timeout);
39
40 _debug("Rx PEER TIMEOUT [%p{u=%d}]", peer, atomic_read(&peer->usage));
41
42 rxrpc_peer_do_timeout(peer);
43}
44
45static const struct rxrpc_timer_ops rxrpc_peer_timer_ops = {
46 .timed_out = __rxrpc_peer_timeout,
47};
48
49/*****************************************************************************/
50/*
51 * create a peer record
52 */
53static int __rxrpc_create_peer(struct rxrpc_transport *trans, __be32 addr,
54 struct rxrpc_peer **_peer)
55{
56 struct rxrpc_peer *peer;
57
58 _enter("%p,%08x", trans, ntohl(addr));
59
60 /* allocate and initialise a peer record */
61 peer = kmalloc(sizeof(struct rxrpc_peer), GFP_KERNEL);
62 if (!peer) {
63 _leave(" = -ENOMEM");
64 return -ENOMEM;
65 }
66
67 memset(peer, 0, sizeof(struct rxrpc_peer));
68 atomic_set(&peer->usage, 1);
69
70 INIT_LIST_HEAD(&peer->link);
71 INIT_LIST_HEAD(&peer->proc_link);
72 INIT_LIST_HEAD(&peer->conn_idlist);
73 INIT_LIST_HEAD(&peer->conn_active);
74 INIT_LIST_HEAD(&peer->conn_graveyard);
75 spin_lock_init(&peer->conn_gylock);
76 init_waitqueue_head(&peer->conn_gy_waitq);
77 rwlock_init(&peer->conn_idlock);
78 rwlock_init(&peer->conn_lock);
79 atomic_set(&peer->conn_count, 0);
80 spin_lock_init(&peer->lock);
81 rxrpc_timer_init(&peer->timeout, &rxrpc_peer_timer_ops);
82
83 peer->addr.s_addr = addr;
84
85 peer->trans = trans;
86 peer->ops = trans->peer_ops;
87
88 __RXACCT(atomic_inc(&rxrpc_peer_count));
89 *_peer = peer;
90 _leave(" = 0 (%p)", peer);
91
92 return 0;
93} /* end __rxrpc_create_peer() */
94
95/*****************************************************************************/
96/*
97 * find a peer record on the specified transport
98 * - returns (if successful) with peer record usage incremented
99 * - resurrects it from the graveyard if found there
100 */
101int rxrpc_peer_lookup(struct rxrpc_transport *trans, __be32 addr,
102 struct rxrpc_peer **_peer)
103{
104 struct rxrpc_peer *peer, *candidate = NULL;
105 struct list_head *_p;
106 int ret;
107
108 _enter("%p{%hu},%08x", trans, trans->port, ntohl(addr));
109
110 /* [common case] search the transport's active list first */
111 read_lock(&trans->peer_lock);
112 list_for_each(_p, &trans->peer_active) {
113 peer = list_entry(_p, struct rxrpc_peer, link);
114 if (peer->addr.s_addr == addr)
115 goto found_active;
116 }
117 read_unlock(&trans->peer_lock);
118
119 /* [uncommon case] not active - create a candidate for a new record */
120 ret = __rxrpc_create_peer(trans, addr, &candidate);
121 if (ret < 0) {
122 _leave(" = %d", ret);
123 return ret;
124 }
125
126 /* search the active list again, just in case it appeared whilst we
127 * were busy */
128 write_lock(&trans->peer_lock);
129 list_for_each(_p, &trans->peer_active) {
130 peer = list_entry(_p, struct rxrpc_peer, link);
131 if (peer->addr.s_addr == addr)
132 goto found_active_second_chance;
133 }
134
135 /* search the transport's graveyard list */
136 spin_lock(&trans->peer_gylock);
137 list_for_each(_p, &trans->peer_graveyard) {
138 peer = list_entry(_p, struct rxrpc_peer, link);
139 if (peer->addr.s_addr == addr)
140 goto found_in_graveyard;
141 }
142 spin_unlock(&trans->peer_gylock);
143
144 /* we can now add the new candidate to the list
145 * - tell the application layer that this peer has been added
146 */
147 rxrpc_get_transport(trans);
148 peer = candidate;
149 candidate = NULL;
150
151 if (peer->ops && peer->ops->adding) {
152 ret = peer->ops->adding(peer);
153 if (ret < 0) {
154 write_unlock(&trans->peer_lock);
155 __RXACCT(atomic_dec(&rxrpc_peer_count));
156 kfree(peer);
157 rxrpc_put_transport(trans);
158 _leave(" = %d", ret);
159 return ret;
160 }
161 }
162
163 atomic_inc(&trans->peer_count);
164
165 make_active:
166 list_add_tail(&peer->link, &trans->peer_active);
167
168 success_uwfree:
169 write_unlock(&trans->peer_lock);
170
171 if (candidate) {
172 __RXACCT(atomic_dec(&rxrpc_peer_count));
173 kfree(candidate);
174 }
175
176 if (list_empty(&peer->proc_link)) {
177 down_write(&rxrpc_peers_sem);
178 list_add_tail(&peer->proc_link, &rxrpc_peers);
179 up_write(&rxrpc_peers_sem);
180 }
181
182 success:
183 *_peer = peer;
184
185 _leave(" = 0 (%p{u=%d cc=%d})",
186 peer,
187 atomic_read(&peer->usage),
188 atomic_read(&peer->conn_count));
189 return 0;
190
191 /* handle the peer being found in the active list straight off */
192 found_active:
193 rxrpc_get_peer(peer);
194 read_unlock(&trans->peer_lock);
195 goto success;
196
197 /* handle resurrecting a peer from the graveyard */
198 found_in_graveyard:
199 rxrpc_get_peer(peer);
200 rxrpc_get_transport(peer->trans);
201 rxrpc_krxtimod_del_timer(&peer->timeout);
202 list_del_init(&peer->link);
203 spin_unlock(&trans->peer_gylock);
204 goto make_active;
205
206 /* handle finding the peer on the second time through the active
207 * list */
208 found_active_second_chance:
209 rxrpc_get_peer(peer);
210 goto success_uwfree;
211
212} /* end rxrpc_peer_lookup() */
213
214/*****************************************************************************/
215/*
216 * finish with a peer record
217 * - it gets sent to the graveyard from where it can be resurrected or timed
218 * out
219 */
220void rxrpc_put_peer(struct rxrpc_peer *peer)
221{
222 struct rxrpc_transport *trans = peer->trans;
223
224 _enter("%p{cc=%d a=%08x}",
225 peer,
226 atomic_read(&peer->conn_count),
227 ntohl(peer->addr.s_addr));
228
229 /* sanity check */
230 if (atomic_read(&peer->usage) <= 0)
231 BUG();
232
233 write_lock(&trans->peer_lock);
234 spin_lock(&trans->peer_gylock);
235 if (likely(!atomic_dec_and_test(&peer->usage))) {
236 spin_unlock(&trans->peer_gylock);
237 write_unlock(&trans->peer_lock);
238 _leave("");
239 return;
240 }
241
242 /* move to graveyard queue */
243 list_del(&peer->link);
244 write_unlock(&trans->peer_lock);
245
246 list_add_tail(&peer->link, &trans->peer_graveyard);
247
248 BUG_ON(!list_empty(&peer->conn_active));
249
250 rxrpc_krxtimod_add_timer(&peer->timeout, rxrpc_peer_timeout * HZ);
251
252 spin_unlock(&trans->peer_gylock);
253
254 rxrpc_put_transport(trans);
255
256 _leave(" [killed]");
257} /* end rxrpc_put_peer() */
258
259/*****************************************************************************/
260/*
261 * handle a peer timing out in the graveyard
262 * - called from krxtimod
263 */
264static void rxrpc_peer_do_timeout(struct rxrpc_peer *peer)
265{
266 struct rxrpc_transport *trans = peer->trans;
267
268 _enter("%p{u=%d cc=%d a=%08x}",
269 peer,
270 atomic_read(&peer->usage),
271 atomic_read(&peer->conn_count),
272 ntohl(peer->addr.s_addr));
273
274 BUG_ON(atomic_read(&peer->usage) < 0);
275
276 /* remove from graveyard if still dead */
277 spin_lock(&trans->peer_gylock);
278 if (atomic_read(&peer->usage) == 0)
279 list_del_init(&peer->link);
280 else
281 peer = NULL;
282 spin_unlock(&trans->peer_gylock);
283
284 if (!peer) {
285 _leave("");
286 return; /* resurrected */
287 }
288
289 /* clear all connections on this peer */
290 rxrpc_conn_clearall(peer);
291
292 BUG_ON(!list_empty(&peer->conn_active));
293 BUG_ON(!list_empty(&peer->conn_graveyard));
294
295 /* inform the application layer */
296 if (peer->ops && peer->ops->discarding)
297 peer->ops->discarding(peer);
298
299 if (!list_empty(&peer->proc_link)) {
300 down_write(&rxrpc_peers_sem);
301 list_del(&peer->proc_link);
302 up_write(&rxrpc_peers_sem);
303 }
304
305 __RXACCT(atomic_dec(&rxrpc_peer_count));
306 kfree(peer);
307
308 /* if the graveyard is now empty, wake up anyone waiting for that */
309 if (atomic_dec_and_test(&trans->peer_count))
310 wake_up(&trans->peer_gy_waitq);
311
312 _leave(" [destroyed]");
313} /* end rxrpc_peer_do_timeout() */
314
315/*****************************************************************************/
316/*
317 * clear all peer records from a transport endpoint
318 */
319void rxrpc_peer_clearall(struct rxrpc_transport *trans)
320{
321 DECLARE_WAITQUEUE(myself,current);
322
323 struct rxrpc_peer *peer;
324 int err;
325
326 _enter("%p",trans);
327
328 /* there shouldn't be any active peers remaining */
329 BUG_ON(!list_empty(&trans->peer_active));
330
331 /* manually timeout all peers in the graveyard */
332 spin_lock(&trans->peer_gylock);
333 while (!list_empty(&trans->peer_graveyard)) {
334 peer = list_entry(trans->peer_graveyard.next,
335 struct rxrpc_peer, link);
336 _debug("Clearing peer %p\n", peer);
337 err = rxrpc_krxtimod_del_timer(&peer->timeout);
338 spin_unlock(&trans->peer_gylock);
339
340 if (err == 0)
341 rxrpc_peer_do_timeout(peer);
342
343 spin_lock(&trans->peer_gylock);
344 }
345 spin_unlock(&trans->peer_gylock);
346
347 /* wait for the the peer graveyard to be completely cleared */
348 set_current_state(TASK_UNINTERRUPTIBLE);
349 add_wait_queue(&trans->peer_gy_waitq, &myself);
350
351 while (atomic_read(&trans->peer_count) != 0) {
352 schedule();
353 set_current_state(TASK_UNINTERRUPTIBLE);
354 }
355
356 remove_wait_queue(&trans->peer_gy_waitq, &myself);
357 set_current_state(TASK_RUNNING);
358
359 _leave("");
360} /* end rxrpc_peer_clearall() */
361
362/*****************************************************************************/
363/*
364 * calculate and cache the Round-Trip-Time for a message and its response
365 */
366void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer,
367 struct rxrpc_message *msg,
368 struct rxrpc_message *resp)
369{
370 unsigned long long rtt;
371 int loop;
372
373 _enter("%p,%p,%p", peer, msg, resp);
374
375 /* calculate the latest RTT */
376 rtt = resp->stamp.tv_sec - msg->stamp.tv_sec;
377 rtt *= 1000000UL;
378 rtt += resp->stamp.tv_usec - msg->stamp.tv_usec;
379
380 /* add to cache */
381 peer->rtt_cache[peer->rtt_point] = rtt;
382 peer->rtt_point++;
383 peer->rtt_point %= RXRPC_RTT_CACHE_SIZE;
384
385 if (peer->rtt_usage < RXRPC_RTT_CACHE_SIZE)
386 peer->rtt_usage++;
387
388 /* recalculate RTT */
389 rtt = 0;
390 for (loop = peer->rtt_usage - 1; loop >= 0; loop--)
391 rtt += peer->rtt_cache[loop];
392
393 do_div(rtt, peer->rtt_usage);
394 peer->rtt = rtt;
395
396 _leave(" RTT=%lu.%lums",
397 (long) (peer->rtt / 1000), (long) (peer->rtt % 1000));
398
399} /* end rxrpc_peer_calculate_rtt() */
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
new file mode 100644
index 000000000000..3b5ecd8e2401
--- /dev/null
+++ b/net/rxrpc/proc.c
@@ -0,0 +1,617 @@
1/* proc.c: /proc interface for RxRPC
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/proc_fs.h>
16#include <linux/seq_file.h>
17#include <rxrpc/rxrpc.h>
18#include <rxrpc/transport.h>
19#include <rxrpc/peer.h>
20#include <rxrpc/connection.h>
21#include <rxrpc/call.h>
22#include <rxrpc/message.h>
23#include "internal.h"
24
25static struct proc_dir_entry *proc_rxrpc;
26
27static int rxrpc_proc_transports_open(struct inode *inode, struct file *file);
28static void *rxrpc_proc_transports_start(struct seq_file *p, loff_t *pos);
29static void *rxrpc_proc_transports_next(struct seq_file *p, void *v, loff_t *pos);
30static void rxrpc_proc_transports_stop(struct seq_file *p, void *v);
31static int rxrpc_proc_transports_show(struct seq_file *m, void *v);
32
33static struct seq_operations rxrpc_proc_transports_ops = {
34 .start = rxrpc_proc_transports_start,
35 .next = rxrpc_proc_transports_next,
36 .stop = rxrpc_proc_transports_stop,
37 .show = rxrpc_proc_transports_show,
38};
39
40static struct file_operations rxrpc_proc_transports_fops = {
41 .open = rxrpc_proc_transports_open,
42 .read = seq_read,
43 .llseek = seq_lseek,
44 .release = seq_release,
45};
46
47static int rxrpc_proc_peers_open(struct inode *inode, struct file *file);
48static void *rxrpc_proc_peers_start(struct seq_file *p, loff_t *pos);
49static void *rxrpc_proc_peers_next(struct seq_file *p, void *v, loff_t *pos);
50static void rxrpc_proc_peers_stop(struct seq_file *p, void *v);
51static int rxrpc_proc_peers_show(struct seq_file *m, void *v);
52
53static struct seq_operations rxrpc_proc_peers_ops = {
54 .start = rxrpc_proc_peers_start,
55 .next = rxrpc_proc_peers_next,
56 .stop = rxrpc_proc_peers_stop,
57 .show = rxrpc_proc_peers_show,
58};
59
60static struct file_operations rxrpc_proc_peers_fops = {
61 .open = rxrpc_proc_peers_open,
62 .read = seq_read,
63 .llseek = seq_lseek,
64 .release = seq_release,
65};
66
67static int rxrpc_proc_conns_open(struct inode *inode, struct file *file);
68static void *rxrpc_proc_conns_start(struct seq_file *p, loff_t *pos);
69static void *rxrpc_proc_conns_next(struct seq_file *p, void *v, loff_t *pos);
70static void rxrpc_proc_conns_stop(struct seq_file *p, void *v);
71static int rxrpc_proc_conns_show(struct seq_file *m, void *v);
72
73static struct seq_operations rxrpc_proc_conns_ops = {
74 .start = rxrpc_proc_conns_start,
75 .next = rxrpc_proc_conns_next,
76 .stop = rxrpc_proc_conns_stop,
77 .show = rxrpc_proc_conns_show,
78};
79
80static struct file_operations rxrpc_proc_conns_fops = {
81 .open = rxrpc_proc_conns_open,
82 .read = seq_read,
83 .llseek = seq_lseek,
84 .release = seq_release,
85};
86
87static int rxrpc_proc_calls_open(struct inode *inode, struct file *file);
88static void *rxrpc_proc_calls_start(struct seq_file *p, loff_t *pos);
89static void *rxrpc_proc_calls_next(struct seq_file *p, void *v, loff_t *pos);
90static void rxrpc_proc_calls_stop(struct seq_file *p, void *v);
91static int rxrpc_proc_calls_show(struct seq_file *m, void *v);
92
93static struct seq_operations rxrpc_proc_calls_ops = {
94 .start = rxrpc_proc_calls_start,
95 .next = rxrpc_proc_calls_next,
96 .stop = rxrpc_proc_calls_stop,
97 .show = rxrpc_proc_calls_show,
98};
99
100static struct file_operations rxrpc_proc_calls_fops = {
101 .open = rxrpc_proc_calls_open,
102 .read = seq_read,
103 .llseek = seq_lseek,
104 .release = seq_release,
105};
106
107static const char *rxrpc_call_states7[] = {
108 "complet",
109 "error ",
110 "rcv_op ",
111 "rcv_arg",
112 "got_arg",
113 "snd_rpl",
114 "fin_ack",
115 "snd_arg",
116 "rcv_rpl",
117 "got_rpl"
118};
119
120static const char *rxrpc_call_error_states7[] = {
121 "no_err ",
122 "loc_abt",
123 "rmt_abt",
124 "loc_err",
125 "rmt_err"
126};
127
128/*****************************************************************************/
129/*
130 * initialise the /proc/net/rxrpc/ directory
131 */
132int rxrpc_proc_init(void)
133{
134 struct proc_dir_entry *p;
135
136 proc_rxrpc = proc_mkdir("rxrpc", proc_net);
137 if (!proc_rxrpc)
138 goto error;
139 proc_rxrpc->owner = THIS_MODULE;
140
141 p = create_proc_entry("calls", 0, proc_rxrpc);
142 if (!p)
143 goto error_proc;
144 p->proc_fops = &rxrpc_proc_calls_fops;
145 p->owner = THIS_MODULE;
146
147 p = create_proc_entry("connections", 0, proc_rxrpc);
148 if (!p)
149 goto error_calls;
150 p->proc_fops = &rxrpc_proc_conns_fops;
151 p->owner = THIS_MODULE;
152
153 p = create_proc_entry("peers", 0, proc_rxrpc);
154 if (!p)
155 goto error_calls;
156 p->proc_fops = &rxrpc_proc_peers_fops;
157 p->owner = THIS_MODULE;
158
159 p = create_proc_entry("transports", 0, proc_rxrpc);
160 if (!p)
161 goto error_conns;
162 p->proc_fops = &rxrpc_proc_transports_fops;
163 p->owner = THIS_MODULE;
164
165 return 0;
166
167 error_conns:
168 remove_proc_entry("connections", proc_rxrpc);
169 error_calls:
170 remove_proc_entry("calls", proc_rxrpc);
171 error_proc:
172 remove_proc_entry("rxrpc", proc_net);
173 error:
174 return -ENOMEM;
175} /* end rxrpc_proc_init() */
176
177/*****************************************************************************/
178/*
179 * clean up the /proc/net/rxrpc/ directory
180 */
181void rxrpc_proc_cleanup(void)
182{
183 remove_proc_entry("transports", proc_rxrpc);
184 remove_proc_entry("peers", proc_rxrpc);
185 remove_proc_entry("connections", proc_rxrpc);
186 remove_proc_entry("calls", proc_rxrpc);
187
188 remove_proc_entry("rxrpc", proc_net);
189
190} /* end rxrpc_proc_cleanup() */
191
192/*****************************************************************************/
193/*
194 * open "/proc/net/rxrpc/transports" which provides a summary of extant transports
195 */
196static int rxrpc_proc_transports_open(struct inode *inode, struct file *file)
197{
198 struct seq_file *m;
199 int ret;
200
201 ret = seq_open(file, &rxrpc_proc_transports_ops);
202 if (ret < 0)
203 return ret;
204
205 m = file->private_data;
206 m->private = PDE(inode)->data;
207
208 return 0;
209} /* end rxrpc_proc_transports_open() */
210
211/*****************************************************************************/
212/*
213 * set up the iterator to start reading from the transports list and return the first item
214 */
215static void *rxrpc_proc_transports_start(struct seq_file *m, loff_t *_pos)
216{
217 struct list_head *_p;
218 loff_t pos = *_pos;
219
220 /* lock the list against modification */
221 down_read(&rxrpc_proc_transports_sem);
222
223 /* allow for the header line */
224 if (!pos)
225 return SEQ_START_TOKEN;
226 pos--;
227
228 /* find the n'th element in the list */
229 list_for_each(_p, &rxrpc_proc_transports)
230 if (!pos--)
231 break;
232
233 return _p != &rxrpc_proc_transports ? _p : NULL;
234} /* end rxrpc_proc_transports_start() */
235
236/*****************************************************************************/
237/*
238 * move to next call in transports list
239 */
240static void *rxrpc_proc_transports_next(struct seq_file *p, void *v, loff_t *pos)
241{
242 struct list_head *_p;
243
244 (*pos)++;
245
246 _p = v;
247 _p = (v == SEQ_START_TOKEN) ? rxrpc_proc_transports.next : _p->next;
248
249 return _p != &rxrpc_proc_transports ? _p : NULL;
250} /* end rxrpc_proc_transports_next() */
251
252/*****************************************************************************/
253/*
254 * clean up after reading from the transports list
255 */
256static void rxrpc_proc_transports_stop(struct seq_file *p, void *v)
257{
258 up_read(&rxrpc_proc_transports_sem);
259
260} /* end rxrpc_proc_transports_stop() */
261
262/*****************************************************************************/
263/*
264 * display a header line followed by a load of call lines
265 */
266static int rxrpc_proc_transports_show(struct seq_file *m, void *v)
267{
268 struct rxrpc_transport *trans =
269 list_entry(v, struct rxrpc_transport, proc_link);
270
271 /* display header on line 1 */
272 if (v == SEQ_START_TOKEN) {
273 seq_puts(m, "LOCAL USE\n");
274 return 0;
275 }
276
277 /* display one transport per line on subsequent lines */
278 seq_printf(m, "%5hu %3d\n",
279 trans->port,
280 atomic_read(&trans->usage)
281 );
282
283 return 0;
284} /* end rxrpc_proc_transports_show() */
285
286/*****************************************************************************/
287/*
288 * open "/proc/net/rxrpc/peers" which provides a summary of extant peers
289 */
290static int rxrpc_proc_peers_open(struct inode *inode, struct file *file)
291{
292 struct seq_file *m;
293 int ret;
294
295 ret = seq_open(file, &rxrpc_proc_peers_ops);
296 if (ret < 0)
297 return ret;
298
299 m = file->private_data;
300 m->private = PDE(inode)->data;
301
302 return 0;
303} /* end rxrpc_proc_peers_open() */
304
305/*****************************************************************************/
306/*
307 * set up the iterator to start reading from the peers list and return the
308 * first item
309 */
310static void *rxrpc_proc_peers_start(struct seq_file *m, loff_t *_pos)
311{
312 struct list_head *_p;
313 loff_t pos = *_pos;
314
315 /* lock the list against modification */
316 down_read(&rxrpc_peers_sem);
317
318 /* allow for the header line */
319 if (!pos)
320 return SEQ_START_TOKEN;
321 pos--;
322
323 /* find the n'th element in the list */
324 list_for_each(_p, &rxrpc_peers)
325 if (!pos--)
326 break;
327
328 return _p != &rxrpc_peers ? _p : NULL;
329} /* end rxrpc_proc_peers_start() */
330
331/*****************************************************************************/
332/*
333 * move to next conn in peers list
334 */
335static void *rxrpc_proc_peers_next(struct seq_file *p, void *v, loff_t *pos)
336{
337 struct list_head *_p;
338
339 (*pos)++;
340
341 _p = v;
342 _p = (v == SEQ_START_TOKEN) ? rxrpc_peers.next : _p->next;
343
344 return _p != &rxrpc_peers ? _p : NULL;
345} /* end rxrpc_proc_peers_next() */
346
347/*****************************************************************************/
348/*
349 * clean up after reading from the peers list
350 */
351static void rxrpc_proc_peers_stop(struct seq_file *p, void *v)
352{
353 up_read(&rxrpc_peers_sem);
354
355} /* end rxrpc_proc_peers_stop() */
356
357/*****************************************************************************/
358/*
359 * display a header line followed by a load of conn lines
360 */
361static int rxrpc_proc_peers_show(struct seq_file *m, void *v)
362{
363 struct rxrpc_peer *peer = list_entry(v, struct rxrpc_peer, proc_link);
364 signed long timeout;
365
366 /* display header on line 1 */
367 if (v == SEQ_START_TOKEN) {
368 seq_puts(m, "LOCAL REMOTE USAGE CONNS TIMEOUT"
369 " MTU RTT(uS)\n");
370 return 0;
371 }
372
373 /* display one peer per line on subsequent lines */
374 timeout = 0;
375 if (!list_empty(&peer->timeout.link))
376 timeout = (signed long) peer->timeout.timo_jif -
377 (signed long) jiffies;
378
379 seq_printf(m, "%5hu %08x %5d %5d %8ld %5Zu %7lu\n",
380 peer->trans->port,
381 ntohl(peer->addr.s_addr),
382 atomic_read(&peer->usage),
383 atomic_read(&peer->conn_count),
384 timeout,
385 peer->if_mtu,
386 (long) peer->rtt
387 );
388
389 return 0;
390} /* end rxrpc_proc_peers_show() */
391
392/*****************************************************************************/
393/*
394 * open "/proc/net/rxrpc/connections" which provides a summary of extant
395 * connections
396 */
397static int rxrpc_proc_conns_open(struct inode *inode, struct file *file)
398{
399 struct seq_file *m;
400 int ret;
401
402 ret = seq_open(file, &rxrpc_proc_conns_ops);
403 if (ret < 0)
404 return ret;
405
406 m = file->private_data;
407 m->private = PDE(inode)->data;
408
409 return 0;
410} /* end rxrpc_proc_conns_open() */
411
412/*****************************************************************************/
413/*
414 * set up the iterator to start reading from the conns list and return the
415 * first item
416 */
417static void *rxrpc_proc_conns_start(struct seq_file *m, loff_t *_pos)
418{
419 struct list_head *_p;
420 loff_t pos = *_pos;
421
422 /* lock the list against modification */
423 down_read(&rxrpc_conns_sem);
424
425 /* allow for the header line */
426 if (!pos)
427 return SEQ_START_TOKEN;
428 pos--;
429
430 /* find the n'th element in the list */
431 list_for_each(_p, &rxrpc_conns)
432 if (!pos--)
433 break;
434
435 return _p != &rxrpc_conns ? _p : NULL;
436} /* end rxrpc_proc_conns_start() */
437
438/*****************************************************************************/
439/*
440 * move to next conn in conns list
441 */
442static void *rxrpc_proc_conns_next(struct seq_file *p, void *v, loff_t *pos)
443{
444 struct list_head *_p;
445
446 (*pos)++;
447
448 _p = v;
449 _p = (v == SEQ_START_TOKEN) ? rxrpc_conns.next : _p->next;
450
451 return _p != &rxrpc_conns ? _p : NULL;
452} /* end rxrpc_proc_conns_next() */
453
454/*****************************************************************************/
455/*
456 * clean up after reading from the conns list
457 */
458static void rxrpc_proc_conns_stop(struct seq_file *p, void *v)
459{
460 up_read(&rxrpc_conns_sem);
461
462} /* end rxrpc_proc_conns_stop() */
463
464/*****************************************************************************/
465/*
466 * display a header line followed by a load of conn lines
467 */
468static int rxrpc_proc_conns_show(struct seq_file *m, void *v)
469{
470 struct rxrpc_connection *conn;
471 signed long timeout;
472
473 conn = list_entry(v, struct rxrpc_connection, proc_link);
474
475 /* display header on line 1 */
476 if (v == SEQ_START_TOKEN) {
477 seq_puts(m,
478 "LOCAL REMOTE RPORT SRVC CONN END SERIALNO "
479 "CALLNO MTU TIMEOUT"
480 "\n");
481 return 0;
482 }
483
484 /* display one conn per line on subsequent lines */
485 timeout = 0;
486 if (!list_empty(&conn->timeout.link))
487 timeout = (signed long) conn->timeout.timo_jif -
488 (signed long) jiffies;
489
490 seq_printf(m,
491 "%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5Zu %8ld\n",
492 conn->trans->port,
493 ntohl(conn->addr.sin_addr.s_addr),
494 ntohs(conn->addr.sin_port),
495 ntohs(conn->service_id),
496 ntohl(conn->conn_id),
497 conn->out_clientflag ? "CLT" : "SRV",
498 conn->serial_counter,
499 conn->call_counter,
500 conn->mtu_size,
501 timeout
502 );
503
504 return 0;
505} /* end rxrpc_proc_conns_show() */
506
507/*****************************************************************************/
508/*
509 * open "/proc/net/rxrpc/calls" which provides a summary of extant calls
510 */
511static int rxrpc_proc_calls_open(struct inode *inode, struct file *file)
512{
513 struct seq_file *m;
514 int ret;
515
516 ret = seq_open(file, &rxrpc_proc_calls_ops);
517 if (ret < 0)
518 return ret;
519
520 m = file->private_data;
521 m->private = PDE(inode)->data;
522
523 return 0;
524} /* end rxrpc_proc_calls_open() */
525
526/*****************************************************************************/
527/*
528 * set up the iterator to start reading from the calls list and return the
529 * first item
530 */
531static void *rxrpc_proc_calls_start(struct seq_file *m, loff_t *_pos)
532{
533 struct list_head *_p;
534 loff_t pos = *_pos;
535
536 /* lock the list against modification */
537 down_read(&rxrpc_calls_sem);
538
539 /* allow for the header line */
540 if (!pos)
541 return SEQ_START_TOKEN;
542 pos--;
543
544 /* find the n'th element in the list */
545 list_for_each(_p, &rxrpc_calls)
546 if (!pos--)
547 break;
548
549 return _p != &rxrpc_calls ? _p : NULL;
550} /* end rxrpc_proc_calls_start() */
551
552/*****************************************************************************/
553/*
554 * move to next call in calls list
555 */
556static void *rxrpc_proc_calls_next(struct seq_file *p, void *v, loff_t *pos)
557{
558 struct list_head *_p;
559
560 (*pos)++;
561
562 _p = v;
563 _p = (v == SEQ_START_TOKEN) ? rxrpc_calls.next : _p->next;
564
565 return _p != &rxrpc_calls ? _p : NULL;
566} /* end rxrpc_proc_calls_next() */
567
568/*****************************************************************************/
569/*
570 * clean up after reading from the calls list
571 */
572static void rxrpc_proc_calls_stop(struct seq_file *p, void *v)
573{
574 up_read(&rxrpc_calls_sem);
575
576} /* end rxrpc_proc_calls_stop() */
577
578/*****************************************************************************/
579/*
580 * display a header line followed by a load of call lines
581 */
582static int rxrpc_proc_calls_show(struct seq_file *m, void *v)
583{
584 struct rxrpc_call *call = list_entry(v, struct rxrpc_call, call_link);
585
586 /* display header on line 1 */
587 if (v == SEQ_START_TOKEN) {
588 seq_puts(m,
589 "LOCAL REMOT SRVC CONN CALL DIR USE "
590 " L STATE OPCODE ABORT ERRNO\n"
591 );
592 return 0;
593 }
594
595 /* display one call per line on subsequent lines */
596 seq_printf(m,
597 "%5hu %5hu %04hx %08x %08x %s %3u%c"
598 " %c %-7.7s %6d %08x %5d\n",
599 call->conn->trans->port,
600 ntohs(call->conn->addr.sin_port),
601 ntohs(call->conn->service_id),
602 ntohl(call->conn->conn_id),
603 ntohl(call->call_id),
604 call->conn->service ? "SVC" : "CLT",
605 atomic_read(&call->usage),
606 waitqueue_active(&call->waitq) ? 'w' : ' ',
607 call->app_last_rcv ? 'Y' : '-',
608 (call->app_call_state!=RXRPC_CSTATE_ERROR ?
609 rxrpc_call_states7[call->app_call_state] :
610 rxrpc_call_error_states7[call->app_err_state]),
611 call->app_opcode,
612 call->app_abort_code,
613 call->app_errno
614 );
615
616 return 0;
617} /* end rxrpc_proc_calls_show() */
diff --git a/net/rxrpc/rxrpc_syms.c b/net/rxrpc/rxrpc_syms.c
new file mode 100644
index 000000000000..56adf16fed0c
--- /dev/null
+++ b/net/rxrpc/rxrpc_syms.c
@@ -0,0 +1,35 @@
1/* rxrpc_syms.c: exported Rx RPC layer interface symbols
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14
15#include <rxrpc/transport.h>
16#include <rxrpc/connection.h>
17#include <rxrpc/call.h>
18#include <rxrpc/krxiod.h>
19
20/* call.c */
21EXPORT_SYMBOL(rxrpc_create_call);
22EXPORT_SYMBOL(rxrpc_put_call);
23EXPORT_SYMBOL(rxrpc_call_abort);
24EXPORT_SYMBOL(rxrpc_call_read_data);
25EXPORT_SYMBOL(rxrpc_call_write_data);
26
27/* connection.c */
28EXPORT_SYMBOL(rxrpc_create_connection);
29EXPORT_SYMBOL(rxrpc_put_connection);
30
31/* transport.c */
32EXPORT_SYMBOL(rxrpc_create_transport);
33EXPORT_SYMBOL(rxrpc_put_transport);
34EXPORT_SYMBOL(rxrpc_add_service);
35EXPORT_SYMBOL(rxrpc_del_service);
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
new file mode 100644
index 000000000000..fbf98729c748
--- /dev/null
+++ b/net/rxrpc/sysctl.c
@@ -0,0 +1,122 @@
1/* sysctl.c: Rx RPC control
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/sysctl.h>
17#include <rxrpc/types.h>
18#include <rxrpc/rxrpc.h>
19#include <asm/errno.h>
20#include "internal.h"
21
22int rxrpc_ktrace;
23int rxrpc_kdebug;
24int rxrpc_kproto;
25int rxrpc_knet;
26
27#ifdef CONFIG_SYSCTL
28static struct ctl_table_header *rxrpc_sysctl = NULL;
29
30static ctl_table rxrpc_sysctl_table[] = {
31 {
32 .ctl_name = 1,
33 .procname = "kdebug",
34 .data = &rxrpc_kdebug,
35 .maxlen = sizeof(int),
36 .mode = 0644,
37 .proc_handler = &proc_dointvec
38 },
39 {
40 .ctl_name = 2,
41 .procname = "ktrace",
42 .data = &rxrpc_ktrace,
43 .maxlen = sizeof(int),
44 .mode = 0644,
45 .proc_handler = &proc_dointvec
46 },
47 {
48 .ctl_name = 3,
49 .procname = "kproto",
50 .data = &rxrpc_kproto,
51 .maxlen = sizeof(int),
52 .mode = 0644,
53 .proc_handler = &proc_dointvec
54 },
55 {
56 .ctl_name = 4,
57 .procname = "knet",
58 .data = &rxrpc_knet,
59 .maxlen = sizeof(int),
60 .mode = 0644,
61 .proc_handler = &proc_dointvec
62 },
63 {
64 .ctl_name = 5,
65 .procname = "peertimo",
66 .data = &rxrpc_peer_timeout,
67 .maxlen = sizeof(unsigned long),
68 .mode = 0644,
69 .proc_handler = &proc_doulongvec_minmax
70 },
71 {
72 .ctl_name = 6,
73 .procname = "conntimo",
74 .data = &rxrpc_conn_timeout,
75 .maxlen = sizeof(unsigned long),
76 .mode = 0644,
77 .proc_handler = &proc_doulongvec_minmax
78 },
79 { .ctl_name = 0 }
80};
81
82static ctl_table rxrpc_dir_sysctl_table[] = {
83 {
84 .ctl_name = 1,
85 .procname = "rxrpc",
86 .maxlen = 0,
87 .mode = 0555,
88 .child = rxrpc_sysctl_table
89 },
90 { .ctl_name = 0 }
91};
92#endif /* CONFIG_SYSCTL */
93
94/*****************************************************************************/
95/*
96 * initialise the sysctl stuff for Rx RPC
97 */
98int rxrpc_sysctl_init(void)
99{
100#ifdef CONFIG_SYSCTL
101 rxrpc_sysctl = register_sysctl_table(rxrpc_dir_sysctl_table, 0);
102 if (!rxrpc_sysctl)
103 return -ENOMEM;
104#endif /* CONFIG_SYSCTL */
105
106 return 0;
107} /* end rxrpc_sysctl_init() */
108
109/*****************************************************************************/
110/*
111 * clean up the sysctl stuff for Rx RPC
112 */
113void rxrpc_sysctl_cleanup(void)
114{
115#ifdef CONFIG_SYSCTL
116 if (rxrpc_sysctl) {
117 unregister_sysctl_table(rxrpc_sysctl);
118 rxrpc_sysctl = NULL;
119 }
120#endif /* CONFIG_SYSCTL */
121
122} /* end rxrpc_sysctl_cleanup() */
diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c
new file mode 100644
index 000000000000..9bce7794130a
--- /dev/null
+++ b/net/rxrpc/transport.c
@@ -0,0 +1,854 @@
1/* transport.c: Rx Transport routines
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <rxrpc/transport.h>
16#include <rxrpc/peer.h>
17#include <rxrpc/connection.h>
18#include <rxrpc/call.h>
19#include <rxrpc/message.h>
20#include <rxrpc/krxiod.h>
21#include <rxrpc/krxsecd.h>
22#include <linux/udp.h>
23#include <linux/in.h>
24#include <linux/in6.h>
25#include <linux/icmp.h>
26#include <net/sock.h>
27#include <net/ip.h>
28#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
29#include <linux/ipv6.h> /* this should _really_ be in errqueue.h.. */
30#endif
31#include <linux/errqueue.h>
32#include <asm/uaccess.h>
33#include <asm/checksum.h>
34#include "internal.h"
35
36struct errormsg {
37 struct cmsghdr cmsg; /* control message header */
38 struct sock_extended_err ee; /* extended error information */
39 struct sockaddr_in icmp_src; /* ICMP packet source address */
40};
41
42static DEFINE_SPINLOCK(rxrpc_transports_lock);
43static struct list_head rxrpc_transports = LIST_HEAD_INIT(rxrpc_transports);
44
45__RXACCT_DECL(atomic_t rxrpc_transport_count);
46LIST_HEAD(rxrpc_proc_transports);
47DECLARE_RWSEM(rxrpc_proc_transports_sem);
48
49static void rxrpc_data_ready(struct sock *sk, int count);
50static void rxrpc_error_report(struct sock *sk);
51static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
52 struct list_head *msgq);
53static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans);
54
55/*****************************************************************************/
56/*
57 * create a new transport endpoint using the specified UDP port
58 */
59int rxrpc_create_transport(unsigned short port,
60 struct rxrpc_transport **_trans)
61{
62 struct rxrpc_transport *trans;
63 struct sockaddr_in sin;
64 mm_segment_t oldfs;
65 struct sock *sock;
66 int ret, opt;
67
68 _enter("%hu", port);
69
70 trans = kmalloc(sizeof(struct rxrpc_transport), GFP_KERNEL);
71 if (!trans)
72 return -ENOMEM;
73
74 memset(trans, 0, sizeof(struct rxrpc_transport));
75 atomic_set(&trans->usage, 1);
76 INIT_LIST_HEAD(&trans->services);
77 INIT_LIST_HEAD(&trans->link);
78 INIT_LIST_HEAD(&trans->krxiodq_link);
79 spin_lock_init(&trans->lock);
80 INIT_LIST_HEAD(&trans->peer_active);
81 INIT_LIST_HEAD(&trans->peer_graveyard);
82 spin_lock_init(&trans->peer_gylock);
83 init_waitqueue_head(&trans->peer_gy_waitq);
84 rwlock_init(&trans->peer_lock);
85 atomic_set(&trans->peer_count, 0);
86 trans->port = port;
87
88 /* create a UDP socket to be my actual transport endpoint */
89 ret = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &trans->socket);
90 if (ret < 0)
91 goto error;
92
93 /* use the specified port */
94 if (port) {
95 memset(&sin, 0, sizeof(sin));
96 sin.sin_family = AF_INET;
97 sin.sin_port = htons(port);
98 ret = trans->socket->ops->bind(trans->socket,
99 (struct sockaddr *) &sin,
100 sizeof(sin));
101 if (ret < 0)
102 goto error;
103 }
104
105 opt = 1;
106 oldfs = get_fs();
107 set_fs(KERNEL_DS);
108 ret = trans->socket->ops->setsockopt(trans->socket, SOL_IP, IP_RECVERR,
109 (char *) &opt, sizeof(opt));
110 set_fs(oldfs);
111
112 spin_lock(&rxrpc_transports_lock);
113 list_add(&trans->link, &rxrpc_transports);
114 spin_unlock(&rxrpc_transports_lock);
115
116 /* set the socket up */
117 sock = trans->socket->sk;
118 sock->sk_user_data = trans;
119 sock->sk_data_ready = rxrpc_data_ready;
120 sock->sk_error_report = rxrpc_error_report;
121
122 down_write(&rxrpc_proc_transports_sem);
123 list_add_tail(&trans->proc_link, &rxrpc_proc_transports);
124 up_write(&rxrpc_proc_transports_sem);
125
126 __RXACCT(atomic_inc(&rxrpc_transport_count));
127
128 *_trans = trans;
129 _leave(" = 0 (%p)", trans);
130 return 0;
131
132 error:
133 /* finish cleaning up the transport (not really needed here, but...) */
134 if (trans->socket)
135 trans->socket->ops->shutdown(trans->socket, 2);
136
137 /* close the socket */
138 if (trans->socket) {
139 trans->socket->sk->sk_user_data = NULL;
140 sock_release(trans->socket);
141 trans->socket = NULL;
142 }
143
144 kfree(trans);
145
146
147 _leave(" = %d", ret);
148 return ret;
149} /* end rxrpc_create_transport() */
150
151/*****************************************************************************/
152/*
153 * destroy a transport endpoint
154 */
155void rxrpc_put_transport(struct rxrpc_transport *trans)
156{
157 _enter("%p{u=%d p=%hu}",
158 trans, atomic_read(&trans->usage), trans->port);
159
160 BUG_ON(atomic_read(&trans->usage) <= 0);
161
162 /* to prevent a race, the decrement and the dequeue must be
163 * effectively atomic */
164 spin_lock(&rxrpc_transports_lock);
165 if (likely(!atomic_dec_and_test(&trans->usage))) {
166 spin_unlock(&rxrpc_transports_lock);
167 _leave("");
168 return;
169 }
170
171 list_del(&trans->link);
172 spin_unlock(&rxrpc_transports_lock);
173
174 /* finish cleaning up the transport */
175 if (trans->socket)
176 trans->socket->ops->shutdown(trans->socket, 2);
177
178 rxrpc_krxsecd_clear_transport(trans);
179 rxrpc_krxiod_dequeue_transport(trans);
180
181 /* discard all peer information */
182 rxrpc_peer_clearall(trans);
183
184 down_write(&rxrpc_proc_transports_sem);
185 list_del(&trans->proc_link);
186 up_write(&rxrpc_proc_transports_sem);
187 __RXACCT(atomic_dec(&rxrpc_transport_count));
188
189 /* close the socket */
190 if (trans->socket) {
191 trans->socket->sk->sk_user_data = NULL;
192 sock_release(trans->socket);
193 trans->socket = NULL;
194 }
195
196 kfree(trans);
197
198 _leave("");
199} /* end rxrpc_put_transport() */
200
201/*****************************************************************************/
202/*
203 * add a service to a transport to be listened upon
204 */
205int rxrpc_add_service(struct rxrpc_transport *trans,
206 struct rxrpc_service *newsrv)
207{
208 struct rxrpc_service *srv;
209 struct list_head *_p;
210 int ret = -EEXIST;
211
212 _enter("%p{%hu},%p{%hu}",
213 trans, trans->port, newsrv, newsrv->service_id);
214
215 /* verify that the service ID is not already present */
216 spin_lock(&trans->lock);
217
218 list_for_each(_p, &trans->services) {
219 srv = list_entry(_p, struct rxrpc_service, link);
220 if (srv->service_id == newsrv->service_id)
221 goto out;
222 }
223
224 /* okay - add the transport to the list */
225 list_add_tail(&newsrv->link, &trans->services);
226 rxrpc_get_transport(trans);
227 ret = 0;
228
229 out:
230 spin_unlock(&trans->lock);
231
232 _leave("= %d", ret);
233 return ret;
234} /* end rxrpc_add_service() */
235
236/*****************************************************************************/
237/*
238 * remove a service from a transport
239 */
240void rxrpc_del_service(struct rxrpc_transport *trans, struct rxrpc_service *srv)
241{
242 _enter("%p{%hu},%p{%hu}", trans, trans->port, srv, srv->service_id);
243
244 spin_lock(&trans->lock);
245 list_del(&srv->link);
246 spin_unlock(&trans->lock);
247
248 rxrpc_put_transport(trans);
249
250 _leave("");
251} /* end rxrpc_del_service() */
252
253/*****************************************************************************/
254/*
255 * INET callback when data has been received on the socket.
256 */
257static void rxrpc_data_ready(struct sock *sk, int count)
258{
259 struct rxrpc_transport *trans;
260
261 _enter("%p{t=%p},%d", sk, sk->sk_user_data, count);
262
263 /* queue the transport for attention by krxiod */
264 trans = (struct rxrpc_transport *) sk->sk_user_data;
265 if (trans)
266 rxrpc_krxiod_queue_transport(trans);
267
268 /* wake up anyone waiting on the socket */
269 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
270 wake_up_interruptible(sk->sk_sleep);
271
272 _leave("");
273} /* end rxrpc_data_ready() */
274
275/*****************************************************************************/
276/*
277 * INET callback when an ICMP error packet is received
278 * - sk->err is error (EHOSTUNREACH, EPROTO or EMSGSIZE)
279 */
280static void rxrpc_error_report(struct sock *sk)
281{
282 struct rxrpc_transport *trans;
283
284 _enter("%p{t=%p}", sk, sk->sk_user_data);
285
286 /* queue the transport for attention by krxiod */
287 trans = (struct rxrpc_transport *) sk->sk_user_data;
288 if (trans) {
289 trans->error_rcvd = 1;
290 rxrpc_krxiod_queue_transport(trans);
291 }
292
293 /* wake up anyone waiting on the socket */
294 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
295 wake_up_interruptible(sk->sk_sleep);
296
297 _leave("");
298} /* end rxrpc_error_report() */
299
300/*****************************************************************************/
301/*
302 * split a message up, allocating message records and filling them in
303 * from the contents of a socket buffer
304 */
305static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
306 struct sk_buff *pkt,
307 struct list_head *msgq)
308{
309 struct rxrpc_message *msg;
310 int ret;
311
312 _enter("");
313
314 msg = kmalloc(sizeof(struct rxrpc_message), GFP_KERNEL);
315 if (!msg) {
316 _leave(" = -ENOMEM");
317 return -ENOMEM;
318 }
319
320 memset(msg, 0, sizeof(*msg));
321 atomic_set(&msg->usage, 1);
322 list_add_tail(&msg->link,msgq);
323
324 /* dig out the Rx routing parameters */
325 if (skb_copy_bits(pkt, sizeof(struct udphdr),
326 &msg->hdr, sizeof(msg->hdr)) < 0) {
327 ret = -EBADMSG;
328 goto error;
329 }
330
331 msg->trans = trans;
332 msg->state = RXRPC_MSG_RECEIVED;
333 msg->stamp = pkt->stamp;
334 if (msg->stamp.tv_sec == 0) {
335 do_gettimeofday(&msg->stamp);
336 if (pkt->sk)
337 sock_enable_timestamp(pkt->sk);
338 }
339 msg->seq = ntohl(msg->hdr.seq);
340
341 /* attach the packet */
342 skb_get(pkt);
343 msg->pkt = pkt;
344
345 msg->offset = sizeof(struct udphdr) + sizeof(struct rxrpc_header);
346 msg->dsize = msg->pkt->len - msg->offset;
347
348 _net("Rx Received packet from %s (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
349 msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
350 ntohl(msg->hdr.epoch),
351 (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
352 ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
353 ntohl(msg->hdr.callNumber),
354 rxrpc_pkts[msg->hdr.type],
355 msg->hdr.flags,
356 ntohs(msg->hdr.serviceId),
357 msg->hdr.securityIndex);
358
359 __RXACCT(atomic_inc(&rxrpc_message_count));
360
361 /* split off jumbo packets */
362 while (msg->hdr.type == RXRPC_PACKET_TYPE_DATA &&
363 msg->hdr.flags & RXRPC_JUMBO_PACKET
364 ) {
365 struct rxrpc_jumbo_header jumbo;
366 struct rxrpc_message *jumbomsg = msg;
367
368 _debug("split jumbo packet");
369
370 /* quick sanity check */
371 ret = -EBADMSG;
372 if (msg->dsize <
373 RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
374 goto error;
375 if (msg->hdr.flags & RXRPC_LAST_PACKET)
376 goto error;
377
378 /* dig out the secondary header */
379 if (skb_copy_bits(pkt, msg->offset + RXRPC_JUMBO_DATALEN,
380 &jumbo, sizeof(jumbo)) < 0)
381 goto error;
382
383 /* allocate a new message record */
384 ret = -ENOMEM;
385 msg = kmalloc(sizeof(struct rxrpc_message), GFP_KERNEL);
386 if (!msg)
387 goto error;
388
389 memcpy(msg, jumbomsg, sizeof(*msg));
390 list_add_tail(&msg->link, msgq);
391
392 /* adjust the jumbo packet */
393 jumbomsg->dsize = RXRPC_JUMBO_DATALEN;
394
395 /* attach the packet here too */
396 skb_get(pkt);
397
398 /* adjust the parameters */
399 msg->seq++;
400 msg->hdr.seq = htonl(msg->seq);
401 msg->hdr.serial = htonl(ntohl(msg->hdr.serial) + 1);
402 msg->offset += RXRPC_JUMBO_DATALEN +
403 sizeof(struct rxrpc_jumbo_header);
404 msg->dsize -= RXRPC_JUMBO_DATALEN +
405 sizeof(struct rxrpc_jumbo_header);
406 msg->hdr.flags = jumbo.flags;
407 msg->hdr._rsvd = jumbo._rsvd;
408
409 _net("Rx Split jumbo packet from %s"
410 " (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
411 msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
412 ntohl(msg->hdr.epoch),
413 (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
414 ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
415 ntohl(msg->hdr.callNumber),
416 rxrpc_pkts[msg->hdr.type],
417 msg->hdr.flags,
418 ntohs(msg->hdr.serviceId),
419 msg->hdr.securityIndex);
420
421 __RXACCT(atomic_inc(&rxrpc_message_count));
422 }
423
424 _leave(" = 0 #%d", atomic_read(&rxrpc_message_count));
425 return 0;
426
427 error:
428 while (!list_empty(msgq)) {
429 msg = list_entry(msgq->next, struct rxrpc_message, link);
430 list_del_init(&msg->link);
431
432 rxrpc_put_message(msg);
433 }
434
435 _leave(" = %d", ret);
436 return ret;
437} /* end rxrpc_incoming_msg() */
438
439/*****************************************************************************/
440/*
441 * accept a new call
442 * - called from krxiod in process context
443 */
444void rxrpc_trans_receive_packet(struct rxrpc_transport *trans)
445{
446 struct rxrpc_message *msg;
447 struct rxrpc_peer *peer;
448 struct sk_buff *pkt;
449 int ret;
450 __be32 addr;
451 __be16 port;
452
453 LIST_HEAD(msgq);
454
455 _enter("%p{%d}", trans, trans->port);
456
457 for (;;) {
458 /* deal with outstanting errors first */
459 if (trans->error_rcvd)
460 rxrpc_trans_receive_error_report(trans);
461
462 /* attempt to receive a packet */
463 pkt = skb_recv_datagram(trans->socket->sk, 0, 1, &ret);
464 if (!pkt) {
465 if (ret == -EAGAIN) {
466 _leave(" EAGAIN");
467 return;
468 }
469
470 /* an icmp error may have occurred */
471 rxrpc_krxiod_queue_transport(trans);
472 _leave(" error %d\n", ret);
473 return;
474 }
475
476 /* we'll probably need to checksum it (didn't call
477 * sock_recvmsg) */
478 if (pkt->ip_summed != CHECKSUM_UNNECESSARY) {
479 if ((unsigned short)
480 csum_fold(skb_checksum(pkt, 0, pkt->len,
481 pkt->csum))) {
482 kfree_skb(pkt);
483 rxrpc_krxiod_queue_transport(trans);
484 _leave(" CSUM failed");
485 return;
486 }
487 }
488
489 addr = pkt->nh.iph->saddr;
490 port = pkt->h.uh->source;
491
492 _net("Rx Received UDP packet from %08x:%04hu",
493 ntohl(addr), ntohs(port));
494
495 /* unmarshall the Rx parameters and split jumbo packets */
496 ret = rxrpc_incoming_msg(trans, pkt, &msgq);
497 if (ret < 0) {
498 kfree_skb(pkt);
499 rxrpc_krxiod_queue_transport(trans);
500 _leave(" bad packet");
501 return;
502 }
503
504 BUG_ON(list_empty(&msgq));
505
506 msg = list_entry(msgq.next, struct rxrpc_message, link);
507
508 /* locate the record for the peer from which it
509 * originated */
510 ret = rxrpc_peer_lookup(trans, addr, &peer);
511 if (ret < 0) {
512 kdebug("Rx No connections from that peer");
513 rxrpc_trans_immediate_abort(trans, msg, -EINVAL);
514 goto finished_msg;
515 }
516
517 /* try and find a matching connection */
518 ret = rxrpc_connection_lookup(peer, msg, &msg->conn);
519 if (ret < 0) {
520 kdebug("Rx Unknown Connection");
521 rxrpc_trans_immediate_abort(trans, msg, -EINVAL);
522 rxrpc_put_peer(peer);
523 goto finished_msg;
524 }
525 rxrpc_put_peer(peer);
526
527 /* deal with the first packet of a new call */
528 if (msg->hdr.flags & RXRPC_CLIENT_INITIATED &&
529 msg->hdr.type == RXRPC_PACKET_TYPE_DATA &&
530 ntohl(msg->hdr.seq) == 1
531 ) {
532 _debug("Rx New server call");
533 rxrpc_trans_receive_new_call(trans, &msgq);
534 goto finished_msg;
535 }
536
537 /* deal with subsequent packet(s) of call */
538 _debug("Rx Call packet");
539 while (!list_empty(&msgq)) {
540 msg = list_entry(msgq.next, struct rxrpc_message, link);
541 list_del_init(&msg->link);
542
543 ret = rxrpc_conn_receive_call_packet(msg->conn, NULL, msg);
544 if (ret < 0) {
545 rxrpc_trans_immediate_abort(trans, msg, ret);
546 rxrpc_put_message(msg);
547 goto finished_msg;
548 }
549
550 rxrpc_put_message(msg);
551 }
552
553 goto finished_msg;
554
555 /* dispose of the packets */
556 finished_msg:
557 while (!list_empty(&msgq)) {
558 msg = list_entry(msgq.next, struct rxrpc_message, link);
559 list_del_init(&msg->link);
560
561 rxrpc_put_message(msg);
562 }
563 kfree_skb(pkt);
564 }
565
566 _leave("");
567
568} /* end rxrpc_trans_receive_packet() */
569
570/*****************************************************************************/
571/*
572 * accept a new call from a client trying to connect to one of my services
573 * - called in process context
574 */
575static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
576 struct list_head *msgq)
577{
578 struct rxrpc_message *msg;
579
580 _enter("");
581
582 /* only bother with the first packet */
583 msg = list_entry(msgq->next, struct rxrpc_message, link);
584 list_del_init(&msg->link);
585 rxrpc_krxsecd_queue_incoming_call(msg);
586 rxrpc_put_message(msg);
587
588 _leave(" = 0");
589
590 return 0;
591} /* end rxrpc_trans_receive_new_call() */
592
593/*****************************************************************************/
594/*
595 * perform an immediate abort without connection or call structures
596 */
597int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans,
598 struct rxrpc_message *msg,
599 int error)
600{
601 struct rxrpc_header ahdr;
602 struct sockaddr_in sin;
603 struct msghdr msghdr;
604 struct kvec iov[2];
605 __be32 _error;
606 int len, ret;
607
608 _enter("%p,%p,%d", trans, msg, error);
609
610 /* don't abort an abort packet */
611 if (msg->hdr.type == RXRPC_PACKET_TYPE_ABORT) {
612 _leave(" = 0");
613 return 0;
614 }
615
616 _error = htonl(-error);
617
618 /* set up the message to be transmitted */
619 memcpy(&ahdr, &msg->hdr, sizeof(ahdr));
620 ahdr.epoch = msg->hdr.epoch;
621 ahdr.serial = htonl(1);
622 ahdr.seq = 0;
623 ahdr.type = RXRPC_PACKET_TYPE_ABORT;
624 ahdr.flags = RXRPC_LAST_PACKET;
625 ahdr.flags |= ~msg->hdr.flags & RXRPC_CLIENT_INITIATED;
626
627 iov[0].iov_len = sizeof(ahdr);
628 iov[0].iov_base = &ahdr;
629 iov[1].iov_len = sizeof(_error);
630 iov[1].iov_base = &_error;
631
632 len = sizeof(ahdr) + sizeof(_error);
633
634 memset(&sin,0,sizeof(sin));
635 sin.sin_family = AF_INET;
636 sin.sin_port = msg->pkt->h.uh->source;
637 sin.sin_addr.s_addr = msg->pkt->nh.iph->saddr;
638
639 msghdr.msg_name = &sin;
640 msghdr.msg_namelen = sizeof(sin);
641 msghdr.msg_control = NULL;
642 msghdr.msg_controllen = 0;
643 msghdr.msg_flags = MSG_DONTWAIT;
644
645 _net("Sending message type %d of %d bytes to %08x:%d",
646 ahdr.type,
647 len,
648 ntohl(sin.sin_addr.s_addr),
649 ntohs(sin.sin_port));
650
651 /* send the message */
652 ret = kernel_sendmsg(trans->socket, &msghdr, iov, 2, len);
653
654 _leave(" = %d", ret);
655 return ret;
656} /* end rxrpc_trans_immediate_abort() */
657
658/*****************************************************************************/
659/*
660 * receive an ICMP error report and percolate it to all connections
661 * heading to the affected host or port
662 */
663static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans)
664{
665 struct rxrpc_connection *conn;
666 struct sockaddr_in sin;
667 struct rxrpc_peer *peer;
668 struct list_head connq, *_p;
669 struct errormsg emsg;
670 struct msghdr msg;
671 __be16 port;
672 int local, err;
673
674 _enter("%p", trans);
675
676 for (;;) {
677 trans->error_rcvd = 0;
678
679 /* try and receive an error message */
680 msg.msg_name = &sin;
681 msg.msg_namelen = sizeof(sin);
682 msg.msg_control = &emsg;
683 msg.msg_controllen = sizeof(emsg);
684 msg.msg_flags = 0;
685
686 err = kernel_recvmsg(trans->socket, &msg, NULL, 0, 0,
687 MSG_ERRQUEUE | MSG_DONTWAIT | MSG_TRUNC);
688
689 if (err == -EAGAIN) {
690 _leave("");
691 return;
692 }
693
694 if (err < 0) {
695 printk("%s: unable to recv an error report: %d\n",
696 __FUNCTION__, err);
697 _leave("");
698 return;
699 }
700
701 msg.msg_controllen = (char *) msg.msg_control - (char *) &emsg;
702
703 if (msg.msg_controllen < sizeof(emsg.cmsg) ||
704 msg.msg_namelen < sizeof(sin)) {
705 printk("%s: short control message"
706 " (nlen=%u clen=%Zu fl=%x)\n",
707 __FUNCTION__,
708 msg.msg_namelen,
709 msg.msg_controllen,
710 msg.msg_flags);
711 continue;
712 }
713
714 _net("Rx Received control message"
715 " { len=%Zu level=%u type=%u }",
716 emsg.cmsg.cmsg_len,
717 emsg.cmsg.cmsg_level,
718 emsg.cmsg.cmsg_type);
719
720 if (sin.sin_family != AF_INET) {
721 printk("Rx Ignoring error report with non-INET address"
722 " (fam=%u)",
723 sin.sin_family);
724 continue;
725 }
726
727 _net("Rx Received message pertaining to host addr=%x port=%hu",
728 ntohl(sin.sin_addr.s_addr), ntohs(sin.sin_port));
729
730 if (emsg.cmsg.cmsg_level != SOL_IP ||
731 emsg.cmsg.cmsg_type != IP_RECVERR) {
732 printk("Rx Ignoring unknown error report"
733 " { level=%u type=%u }",
734 emsg.cmsg.cmsg_level,
735 emsg.cmsg.cmsg_type);
736 continue;
737 }
738
739 if (msg.msg_controllen < sizeof(emsg.cmsg) + sizeof(emsg.ee)) {
740 printk("%s: short error message (%Zu)\n",
741 __FUNCTION__, msg.msg_controllen);
742 _leave("");
743 return;
744 }
745
746 port = sin.sin_port;
747
748 switch (emsg.ee.ee_origin) {
749 case SO_EE_ORIGIN_ICMP:
750 local = 0;
751 switch (emsg.ee.ee_type) {
752 case ICMP_DEST_UNREACH:
753 switch (emsg.ee.ee_code) {
754 case ICMP_NET_UNREACH:
755 _net("Rx Received ICMP Network Unreachable");
756 port = 0;
757 err = -ENETUNREACH;
758 break;
759 case ICMP_HOST_UNREACH:
760 _net("Rx Received ICMP Host Unreachable");
761 port = 0;
762 err = -EHOSTUNREACH;
763 break;
764 case ICMP_PORT_UNREACH:
765 _net("Rx Received ICMP Port Unreachable");
766 err = -ECONNREFUSED;
767 break;
768 case ICMP_NET_UNKNOWN:
769 _net("Rx Received ICMP Unknown Network");
770 port = 0;
771 err = -ENETUNREACH;
772 break;
773 case ICMP_HOST_UNKNOWN:
774 _net("Rx Received ICMP Unknown Host");
775 port = 0;
776 err = -EHOSTUNREACH;
777 break;
778 default:
779 _net("Rx Received ICMP DestUnreach { code=%u }",
780 emsg.ee.ee_code);
781 err = emsg.ee.ee_errno;
782 break;
783 }
784 break;
785
786 case ICMP_TIME_EXCEEDED:
787 _net("Rx Received ICMP TTL Exceeded");
788 err = emsg.ee.ee_errno;
789 break;
790
791 default:
792 _proto("Rx Received ICMP error { type=%u code=%u }",
793 emsg.ee.ee_type, emsg.ee.ee_code);
794 err = emsg.ee.ee_errno;
795 break;
796 }
797 break;
798
799 case SO_EE_ORIGIN_LOCAL:
800 _proto("Rx Received local error { error=%d }",
801 emsg.ee.ee_errno);
802 local = 1;
803 err = emsg.ee.ee_errno;
804 break;
805
806 case SO_EE_ORIGIN_NONE:
807 case SO_EE_ORIGIN_ICMP6:
808 default:
809 _proto("Rx Received error report { orig=%u }",
810 emsg.ee.ee_origin);
811 local = 0;
812 err = emsg.ee.ee_errno;
813 break;
814 }
815
816 /* find all the connections between this transport and the
817 * affected destination */
818 INIT_LIST_HEAD(&connq);
819
820 if (rxrpc_peer_lookup(trans, sin.sin_addr.s_addr,
821 &peer) == 0) {
822 read_lock(&peer->conn_lock);
823 list_for_each(_p, &peer->conn_active) {
824 conn = list_entry(_p, struct rxrpc_connection,
825 link);
826 if (port && conn->addr.sin_port != port)
827 continue;
828 if (!list_empty(&conn->err_link))
829 continue;
830
831 rxrpc_get_connection(conn);
832 list_add_tail(&conn->err_link, &connq);
833 }
834 read_unlock(&peer->conn_lock);
835
836 /* service all those connections */
837 while (!list_empty(&connq)) {
838 conn = list_entry(connq.next,
839 struct rxrpc_connection,
840 err_link);
841 list_del(&conn->err_link);
842
843 rxrpc_conn_handle_error(conn, local, err);
844
845 rxrpc_put_connection(conn);
846 }
847
848 rxrpc_put_peer(peer);
849 }
850 }
851
852 _leave("");
853 return;
854} /* end rxrpc_trans_receive_error_report() */