aboutsummaryrefslogtreecommitdiffstats
path: root/net/rxrpc/ar-connection.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/rxrpc/ar-connection.c')
-rw-r--r--net/rxrpc/ar-connection.c895
1 files changed, 895 insertions, 0 deletions
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
new file mode 100644
index 000000000000..01eb33c30571
--- /dev/null
+++ b/net/rxrpc/ar-connection.c
@@ -0,0 +1,895 @@
1/* RxRPC virtual connection handler
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/crypto.h>
16#include <net/sock.h>
17#include <net/af_rxrpc.h>
18#include "ar-internal.h"
19
20static void rxrpc_connection_reaper(struct work_struct *work);
21
22LIST_HEAD(rxrpc_connections);
23DEFINE_RWLOCK(rxrpc_connection_lock);
24static unsigned long rxrpc_connection_timeout = 10 * 60;
25static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
26
27/*
28 * allocate a new client connection bundle
29 */
30static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp)
31{
32 struct rxrpc_conn_bundle *bundle;
33
34 _enter("");
35
36 bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp);
37 if (bundle) {
38 INIT_LIST_HEAD(&bundle->unused_conns);
39 INIT_LIST_HEAD(&bundle->avail_conns);
40 INIT_LIST_HEAD(&bundle->busy_conns);
41 init_waitqueue_head(&bundle->chanwait);
42 atomic_set(&bundle->usage, 1);
43 }
44
45 _leave(" = %p", bundle);
46 return bundle;
47}
48
49/*
50 * compare bundle parameters with what we're looking for
51 * - return -ve, 0 or +ve
52 */
53static inline
54int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle,
55 struct key *key, __be16 service_id)
56{
57 return (bundle->service_id - service_id) ?:
58 ((unsigned long) bundle->key - (unsigned long) key);
59}
60
61/*
62 * get bundle of client connections that a client socket can make use of
63 */
64struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
65 struct rxrpc_transport *trans,
66 struct key *key,
67 __be16 service_id,
68 gfp_t gfp)
69{
70 struct rxrpc_conn_bundle *bundle, *candidate;
71 struct rb_node *p, *parent, **pp;
72
73 _enter("%p{%x},%x,%hx,",
74 rx, key_serial(key), trans->debug_id, ntohl(service_id));
75
76 if (rx->trans == trans && rx->bundle) {
77 atomic_inc(&rx->bundle->usage);
78 return rx->bundle;
79 }
80
81 /* search the extant bundles first for one that matches the specified
82 * user ID */
83 spin_lock(&trans->client_lock);
84
85 p = trans->bundles.rb_node;
86 while (p) {
87 bundle = rb_entry(p, struct rxrpc_conn_bundle, node);
88
89 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
90 p = p->rb_left;
91 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
92 p = p->rb_right;
93 else
94 goto found_extant_bundle;
95 }
96
97 spin_unlock(&trans->client_lock);
98
99 /* not yet present - create a candidate for a new record and then
100 * redo the search */
101 candidate = rxrpc_alloc_bundle(gfp);
102 if (!candidate) {
103 _leave(" = -ENOMEM");
104 return ERR_PTR(-ENOMEM);
105 }
106
107 candidate->key = key_get(key);
108 candidate->service_id = service_id;
109
110 spin_lock(&trans->client_lock);
111
112 pp = &trans->bundles.rb_node;
113 parent = NULL;
114 while (*pp) {
115 parent = *pp;
116 bundle = rb_entry(parent, struct rxrpc_conn_bundle, node);
117
118 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
119 pp = &(*pp)->rb_left;
120 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
121 pp = &(*pp)->rb_right;
122 else
123 goto found_extant_second;
124 }
125
126 /* second search also failed; add the new bundle */
127 bundle = candidate;
128 candidate = NULL;
129
130 rb_link_node(&bundle->node, parent, pp);
131 rb_insert_color(&bundle->node, &trans->bundles);
132 spin_unlock(&trans->client_lock);
133 _net("BUNDLE new on trans %d", trans->debug_id);
134 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
135 atomic_inc(&bundle->usage);
136 rx->bundle = bundle;
137 }
138 _leave(" = %p [new]", bundle);
139 return bundle;
140
141 /* we found the bundle in the list immediately */
142found_extant_bundle:
143 atomic_inc(&bundle->usage);
144 spin_unlock(&trans->client_lock);
145 _net("BUNDLE old on trans %d", trans->debug_id);
146 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
147 atomic_inc(&bundle->usage);
148 rx->bundle = bundle;
149 }
150 _leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage));
151 return bundle;
152
153 /* we found the bundle on the second time through the list */
154found_extant_second:
155 atomic_inc(&bundle->usage);
156 spin_unlock(&trans->client_lock);
157 kfree(candidate);
158 _net("BUNDLE old2 on trans %d", trans->debug_id);
159 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
160 atomic_inc(&bundle->usage);
161 rx->bundle = bundle;
162 }
163 _leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage));
164 return bundle;
165}
166
167/*
168 * release a bundle
169 */
170void rxrpc_put_bundle(struct rxrpc_transport *trans,
171 struct rxrpc_conn_bundle *bundle)
172{
173 _enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage));
174
175 if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) {
176 _debug("Destroy bundle");
177 rb_erase(&bundle->node, &trans->bundles);
178 spin_unlock(&trans->client_lock);
179 ASSERT(list_empty(&bundle->unused_conns));
180 ASSERT(list_empty(&bundle->avail_conns));
181 ASSERT(list_empty(&bundle->busy_conns));
182 ASSERTCMP(bundle->num_conns, ==, 0);
183 key_put(bundle->key);
184 kfree(bundle);
185 }
186
187 _leave("");
188}
189
190/*
191 * allocate a new connection
192 */
193static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
194{
195 struct rxrpc_connection *conn;
196
197 _enter("");
198
199 conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
200 if (conn) {
201 INIT_WORK(&conn->processor, &rxrpc_process_connection);
202 INIT_LIST_HEAD(&conn->bundle_link);
203 conn->calls = RB_ROOT;
204 skb_queue_head_init(&conn->rx_queue);
205 rwlock_init(&conn->lock);
206 spin_lock_init(&conn->state_lock);
207 atomic_set(&conn->usage, 1);
208 conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
209 conn->avail_calls = RXRPC_MAXCALLS;
210 conn->size_align = 4;
211 conn->header_size = sizeof(struct rxrpc_header);
212 }
213
214 _leave(" = %p{%d}", conn, conn->debug_id);
215 return conn;
216}
217
218/*
219 * assign a connection ID to a connection and add it to the transport's
220 * connection lookup tree
221 * - called with transport client lock held
222 */
223static void rxrpc_assign_connection_id(struct rxrpc_connection *conn)
224{
225 struct rxrpc_connection *xconn;
226 struct rb_node *parent, **p;
227 __be32 epoch;
228 u32 real_conn_id;
229
230 _enter("");
231
232 epoch = conn->epoch;
233
234 write_lock_bh(&conn->trans->conn_lock);
235
236 conn->trans->conn_idcounter += RXRPC_CID_INC;
237 if (conn->trans->conn_idcounter < RXRPC_CID_INC)
238 conn->trans->conn_idcounter = RXRPC_CID_INC;
239 real_conn_id = conn->trans->conn_idcounter;
240
241attempt_insertion:
242 parent = NULL;
243 p = &conn->trans->client_conns.rb_node;
244
245 while (*p) {
246 parent = *p;
247 xconn = rb_entry(parent, struct rxrpc_connection, node);
248
249 if (epoch < xconn->epoch)
250 p = &(*p)->rb_left;
251 else if (epoch > xconn->epoch)
252 p = &(*p)->rb_right;
253 else if (real_conn_id < xconn->real_conn_id)
254 p = &(*p)->rb_left;
255 else if (real_conn_id > xconn->real_conn_id)
256 p = &(*p)->rb_right;
257 else
258 goto id_exists;
259 }
260
261 /* we've found a suitable hole - arrange for this connection to occupy
262 * it */
263 rb_link_node(&conn->node, parent, p);
264 rb_insert_color(&conn->node, &conn->trans->client_conns);
265
266 conn->real_conn_id = real_conn_id;
267 conn->cid = htonl(real_conn_id);
268 write_unlock_bh(&conn->trans->conn_lock);
269 _leave(" [CONNID %x CID %x]", real_conn_id, ntohl(conn->cid));
270 return;
271
272 /* we found a connection with the proposed ID - walk the tree from that
273 * point looking for the next unused ID */
274id_exists:
275 for (;;) {
276 real_conn_id += RXRPC_CID_INC;
277 if (real_conn_id < RXRPC_CID_INC) {
278 real_conn_id = RXRPC_CID_INC;
279 conn->trans->conn_idcounter = real_conn_id;
280 goto attempt_insertion;
281 }
282
283 parent = rb_next(parent);
284 if (!parent)
285 goto attempt_insertion;
286
287 xconn = rb_entry(parent, struct rxrpc_connection, node);
288 if (epoch < xconn->epoch ||
289 real_conn_id < xconn->real_conn_id)
290 goto attempt_insertion;
291 }
292}
293
294/*
295 * add a call to a connection's call-by-ID tree
296 */
297static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
298 struct rxrpc_call *call)
299{
300 struct rxrpc_call *xcall;
301 struct rb_node *parent, **p;
302 __be32 call_id;
303
304 write_lock_bh(&conn->lock);
305
306 call_id = call->call_id;
307 p = &conn->calls.rb_node;
308 parent = NULL;
309 while (*p) {
310 parent = *p;
311 xcall = rb_entry(parent, struct rxrpc_call, conn_node);
312
313 if (call_id < xcall->call_id)
314 p = &(*p)->rb_left;
315 else if (call_id > xcall->call_id)
316 p = &(*p)->rb_right;
317 else
318 BUG();
319 }
320
321 rb_link_node(&call->conn_node, parent, p);
322 rb_insert_color(&call->conn_node, &conn->calls);
323
324 write_unlock_bh(&conn->lock);
325}
326
327/*
328 * connect a call on an exclusive connection
329 */
330static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
331 struct rxrpc_transport *trans,
332 __be16 service_id,
333 struct rxrpc_call *call,
334 gfp_t gfp)
335{
336 struct rxrpc_connection *conn;
337 int chan, ret;
338
339 _enter("");
340
341 conn = rx->conn;
342 if (!conn) {
343 /* not yet present - create a candidate for a new connection
344 * and then redo the check */
345 conn = rxrpc_alloc_connection(gfp);
346 if (IS_ERR(conn)) {
347 _leave(" = %ld", PTR_ERR(conn));
348 return PTR_ERR(conn);
349 }
350
351 conn->trans = trans;
352 conn->bundle = NULL;
353 conn->service_id = service_id;
354 conn->epoch = rxrpc_epoch;
355 conn->in_clientflag = 0;
356 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
357 conn->cid = 0;
358 conn->state = RXRPC_CONN_CLIENT;
359 conn->avail_calls = RXRPC_MAXCALLS;
360 conn->security_level = rx->min_sec_level;
361 conn->key = key_get(rx->key);
362
363 ret = rxrpc_init_client_conn_security(conn);
364 if (ret < 0) {
365 key_put(conn->key);
366 kfree(conn);
367 _leave(" = %d [key]", ret);
368 return ret;
369 }
370
371 write_lock_bh(&rxrpc_connection_lock);
372 list_add_tail(&conn->link, &rxrpc_connections);
373 write_unlock_bh(&rxrpc_connection_lock);
374
375 spin_lock(&trans->client_lock);
376 atomic_inc(&trans->usage);
377
378 _net("CONNECT EXCL new %d on TRANS %d",
379 conn->debug_id, conn->trans->debug_id);
380
381 rxrpc_assign_connection_id(conn);
382 rx->conn = conn;
383 }
384
385 /* we've got a connection with a free channel and we can now attach the
386 * call to it
387 * - we're holding the transport's client lock
388 * - we're holding a reference on the connection
389 */
390 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
391 if (!conn->channels[chan])
392 goto found_channel;
393 goto no_free_channels;
394
395found_channel:
396 atomic_inc(&conn->usage);
397 conn->channels[chan] = call;
398 call->conn = conn;
399 call->channel = chan;
400 call->cid = conn->cid | htonl(chan);
401 call->call_id = htonl(++conn->call_counter);
402
403 _net("CONNECT client on conn %d chan %d as call %x",
404 conn->debug_id, chan, ntohl(call->call_id));
405
406 spin_unlock(&trans->client_lock);
407
408 rxrpc_add_call_ID_to_conn(conn, call);
409 _leave(" = 0");
410 return 0;
411
412no_free_channels:
413 spin_unlock(&trans->client_lock);
414 _leave(" = -ENOSR");
415 return -ENOSR;
416}
417
418/*
419 * find a connection for a call
420 * - called in process context with IRQs enabled
421 */
422int rxrpc_connect_call(struct rxrpc_sock *rx,
423 struct rxrpc_transport *trans,
424 struct rxrpc_conn_bundle *bundle,
425 struct rxrpc_call *call,
426 gfp_t gfp)
427{
428 struct rxrpc_connection *conn, *candidate;
429 int chan, ret;
430
431 DECLARE_WAITQUEUE(myself, current);
432
433 _enter("%p,%lx,", rx, call->user_call_ID);
434
435 if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags))
436 return rxrpc_connect_exclusive(rx, trans, bundle->service_id,
437 call, gfp);
438
439 spin_lock(&trans->client_lock);
440 for (;;) {
441 /* see if the bundle has a call slot available */
442 if (!list_empty(&bundle->avail_conns)) {
443 _debug("avail");
444 conn = list_entry(bundle->avail_conns.next,
445 struct rxrpc_connection,
446 bundle_link);
447 if (--conn->avail_calls == 0)
448 list_move(&conn->bundle_link,
449 &bundle->busy_conns);
450 atomic_inc(&conn->usage);
451 break;
452 }
453
454 if (!list_empty(&bundle->unused_conns)) {
455 _debug("unused");
456 conn = list_entry(bundle->unused_conns.next,
457 struct rxrpc_connection,
458 bundle_link);
459 atomic_inc(&conn->usage);
460 list_move(&conn->bundle_link, &bundle->avail_conns);
461 break;
462 }
463
464 /* need to allocate a new connection */
465 _debug("get new conn [%d]", bundle->num_conns);
466
467 spin_unlock(&trans->client_lock);
468
469 if (signal_pending(current))
470 goto interrupted;
471
472 if (bundle->num_conns >= 20) {
473 _debug("too many conns");
474
475 if (!(gfp & __GFP_WAIT)) {
476 _leave(" = -EAGAIN");
477 return -EAGAIN;
478 }
479
480 add_wait_queue(&bundle->chanwait, &myself);
481 for (;;) {
482 set_current_state(TASK_INTERRUPTIBLE);
483 if (bundle->num_conns < 20 ||
484 !list_empty(&bundle->unused_conns) ||
485 !list_empty(&bundle->avail_conns))
486 break;
487 if (signal_pending(current))
488 goto interrupted_dequeue;
489 schedule();
490 }
491 remove_wait_queue(&bundle->chanwait, &myself);
492 __set_current_state(TASK_RUNNING);
493 spin_lock(&trans->client_lock);
494 continue;
495 }
496
497 /* not yet present - create a candidate for a new connection and then
498 * redo the check */
499 candidate = rxrpc_alloc_connection(gfp);
500 if (IS_ERR(candidate)) {
501 _leave(" = %ld", PTR_ERR(candidate));
502 return PTR_ERR(candidate);
503 }
504
505 candidate->trans = trans;
506 candidate->bundle = bundle;
507 candidate->service_id = bundle->service_id;
508 candidate->epoch = rxrpc_epoch;
509 candidate->in_clientflag = 0;
510 candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
511 candidate->cid = 0;
512 candidate->state = RXRPC_CONN_CLIENT;
513 candidate->avail_calls = RXRPC_MAXCALLS;
514 candidate->security_level = rx->min_sec_level;
515 candidate->key = key_get(rx->key);
516
517 ret = rxrpc_init_client_conn_security(candidate);
518 if (ret < 0) {
519 key_put(candidate->key);
520 kfree(candidate);
521 _leave(" = %d [key]", ret);
522 return ret;
523 }
524
525 write_lock_bh(&rxrpc_connection_lock);
526 list_add_tail(&candidate->link, &rxrpc_connections);
527 write_unlock_bh(&rxrpc_connection_lock);
528
529 spin_lock(&trans->client_lock);
530
531 list_add(&candidate->bundle_link, &bundle->unused_conns);
532 bundle->num_conns++;
533 atomic_inc(&bundle->usage);
534 atomic_inc(&trans->usage);
535
536 _net("CONNECT new %d on TRANS %d",
537 candidate->debug_id, candidate->trans->debug_id);
538
539 rxrpc_assign_connection_id(candidate);
540 if (candidate->security)
541 candidate->security->prime_packet_security(candidate);
542
543 /* leave the candidate lurking in zombie mode attached to the
544 * bundle until we're ready for it */
545 rxrpc_put_connection(candidate);
546 candidate = NULL;
547 }
548
549 /* we've got a connection with a free channel and we can now attach the
550 * call to it
551 * - we're holding the transport's client lock
552 * - we're holding a reference on the connection
553 * - we're holding a reference on the bundle
554 */
555 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
556 if (!conn->channels[chan])
557 goto found_channel;
558 BUG();
559
560found_channel:
561 conn->channels[chan] = call;
562 call->conn = conn;
563 call->channel = chan;
564 call->cid = conn->cid | htonl(chan);
565 call->call_id = htonl(++conn->call_counter);
566
567 _net("CONNECT client on conn %d chan %d as call %x",
568 conn->debug_id, chan, ntohl(call->call_id));
569
570 spin_unlock(&trans->client_lock);
571
572 rxrpc_add_call_ID_to_conn(conn, call);
573
574 _leave(" = 0");
575 return 0;
576
577interrupted_dequeue:
578 remove_wait_queue(&bundle->chanwait, &myself);
579 __set_current_state(TASK_RUNNING);
580interrupted:
581 _leave(" = -ERESTARTSYS");
582 return -ERESTARTSYS;
583}
584
585/*
586 * get a record of an incoming connection
587 */
588struct rxrpc_connection *
589rxrpc_incoming_connection(struct rxrpc_transport *trans,
590 struct rxrpc_header *hdr,
591 gfp_t gfp)
592{
593 struct rxrpc_connection *conn, *candidate = NULL;
594 struct rb_node *p, **pp;
595 const char *new = "old";
596 __be32 epoch;
597 u32 conn_id;
598
599 _enter("");
600
601 ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED);
602
603 epoch = hdr->epoch;
604 conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
605
606 /* search the connection list first */
607 read_lock_bh(&trans->conn_lock);
608
609 p = trans->server_conns.rb_node;
610 while (p) {
611 conn = rb_entry(p, struct rxrpc_connection, node);
612
613 _debug("maybe %x", conn->real_conn_id);
614
615 if (epoch < conn->epoch)
616 p = p->rb_left;
617 else if (epoch > conn->epoch)
618 p = p->rb_right;
619 else if (conn_id < conn->real_conn_id)
620 p = p->rb_left;
621 else if (conn_id > conn->real_conn_id)
622 p = p->rb_right;
623 else
624 goto found_extant_connection;
625 }
626 read_unlock_bh(&trans->conn_lock);
627
628 /* not yet present - create a candidate for a new record and then
629 * redo the search */
630 candidate = rxrpc_alloc_connection(gfp);
631 if (!candidate) {
632 _leave(" = -ENOMEM");
633 return ERR_PTR(-ENOMEM);
634 }
635
636 candidate->trans = trans;
637 candidate->epoch = hdr->epoch;
638 candidate->cid = hdr->cid & __constant_cpu_to_be32(RXRPC_CIDMASK);
639 candidate->service_id = hdr->serviceId;
640 candidate->security_ix = hdr->securityIndex;
641 candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
642 candidate->out_clientflag = 0;
643 candidate->real_conn_id = conn_id;
644 candidate->state = RXRPC_CONN_SERVER;
645 if (candidate->service_id)
646 candidate->state = RXRPC_CONN_SERVER_UNSECURED;
647
648 write_lock_bh(&trans->conn_lock);
649
650 pp = &trans->server_conns.rb_node;
651 p = NULL;
652 while (*pp) {
653 p = *pp;
654 conn = rb_entry(p, struct rxrpc_connection, node);
655
656 if (epoch < conn->epoch)
657 pp = &(*pp)->rb_left;
658 else if (epoch > conn->epoch)
659 pp = &(*pp)->rb_right;
660 else if (conn_id < conn->real_conn_id)
661 pp = &(*pp)->rb_left;
662 else if (conn_id > conn->real_conn_id)
663 pp = &(*pp)->rb_right;
664 else
665 goto found_extant_second;
666 }
667
668 /* we can now add the new candidate to the list */
669 conn = candidate;
670 candidate = NULL;
671 rb_link_node(&conn->node, p, pp);
672 rb_insert_color(&conn->node, &trans->server_conns);
673 atomic_inc(&conn->trans->usage);
674
675 write_unlock_bh(&trans->conn_lock);
676
677 write_lock_bh(&rxrpc_connection_lock);
678 list_add_tail(&conn->link, &rxrpc_connections);
679 write_unlock_bh(&rxrpc_connection_lock);
680
681 new = "new";
682
683success:
684 _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->real_conn_id);
685
686 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
687 return conn;
688
689 /* we found the connection in the list immediately */
690found_extant_connection:
691 if (hdr->securityIndex != conn->security_ix) {
692 read_unlock_bh(&trans->conn_lock);
693 goto security_mismatch;
694 }
695 atomic_inc(&conn->usage);
696 read_unlock_bh(&trans->conn_lock);
697 goto success;
698
699 /* we found the connection on the second time through the list */
700found_extant_second:
701 if (hdr->securityIndex != conn->security_ix) {
702 write_unlock_bh(&trans->conn_lock);
703 goto security_mismatch;
704 }
705 atomic_inc(&conn->usage);
706 write_unlock_bh(&trans->conn_lock);
707 kfree(candidate);
708 goto success;
709
710security_mismatch:
711 kfree(candidate);
712 _leave(" = -EKEYREJECTED");
713 return ERR_PTR(-EKEYREJECTED);
714}
715
716/*
717 * find a connection based on transport and RxRPC connection ID for an incoming
718 * packet
719 */
720struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans,
721 struct rxrpc_header *hdr)
722{
723 struct rxrpc_connection *conn;
724 struct rb_node *p;
725 __be32 epoch;
726 u32 conn_id;
727
728 _enter(",{%x,%x}", ntohl(hdr->cid), hdr->flags);
729
730 read_lock_bh(&trans->conn_lock);
731
732 conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
733 epoch = hdr->epoch;
734
735 if (hdr->flags & RXRPC_CLIENT_INITIATED)
736 p = trans->server_conns.rb_node;
737 else
738 p = trans->client_conns.rb_node;
739
740 while (p) {
741 conn = rb_entry(p, struct rxrpc_connection, node);
742
743 _debug("maybe %x", conn->real_conn_id);
744
745 if (epoch < conn->epoch)
746 p = p->rb_left;
747 else if (epoch > conn->epoch)
748 p = p->rb_right;
749 else if (conn_id < conn->real_conn_id)
750 p = p->rb_left;
751 else if (conn_id > conn->real_conn_id)
752 p = p->rb_right;
753 else
754 goto found;
755 }
756
757 read_unlock_bh(&trans->conn_lock);
758 _leave(" = NULL");
759 return NULL;
760
761found:
762 atomic_inc(&conn->usage);
763 read_unlock_bh(&trans->conn_lock);
764 _leave(" = %p", conn);
765 return conn;
766}
767
768/*
769 * release a virtual connection
770 */
771void rxrpc_put_connection(struct rxrpc_connection *conn)
772{
773 _enter("%p{u=%d,d=%d}",
774 conn, atomic_read(&conn->usage), conn->debug_id);
775
776 ASSERTCMP(atomic_read(&conn->usage), >, 0);
777
778 conn->put_time = xtime.tv_sec;
779 if (atomic_dec_and_test(&conn->usage)) {
780 _debug("zombie");
781 schedule_delayed_work(&rxrpc_connection_reap, 0);
782 }
783
784 _leave("");
785}
786
787/*
788 * destroy a virtual connection
789 */
790static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
791{
792 _enter("%p{%d}", conn, atomic_read(&conn->usage));
793
794 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
795
796 _net("DESTROY CONN %d", conn->debug_id);
797
798 if (conn->bundle)
799 rxrpc_put_bundle(conn->trans, conn->bundle);
800
801 ASSERT(RB_EMPTY_ROOT(&conn->calls));
802 rxrpc_purge_queue(&conn->rx_queue);
803
804 rxrpc_clear_conn_security(conn);
805 rxrpc_put_transport(conn->trans);
806 kfree(conn);
807 _leave("");
808}
809
810/*
811 * reap dead connections
812 */
813void rxrpc_connection_reaper(struct work_struct *work)
814{
815 struct rxrpc_connection *conn, *_p;
816 unsigned long now, earliest, reap_time;
817
818 LIST_HEAD(graveyard);
819
820 _enter("");
821
822 now = xtime.tv_sec;
823 earliest = ULONG_MAX;
824
825 write_lock_bh(&rxrpc_connection_lock);
826 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
827 _debug("reap CONN %d { u=%d,t=%ld }",
828 conn->debug_id, atomic_read(&conn->usage),
829 (long) now - (long) conn->put_time);
830
831 if (likely(atomic_read(&conn->usage) > 0))
832 continue;
833
834 spin_lock(&conn->trans->client_lock);
835 write_lock(&conn->trans->conn_lock);
836 reap_time = conn->put_time + rxrpc_connection_timeout;
837
838 if (atomic_read(&conn->usage) > 0) {
839 ;
840 } else if (reap_time <= now) {
841 list_move_tail(&conn->link, &graveyard);
842 if (conn->out_clientflag)
843 rb_erase(&conn->node,
844 &conn->trans->client_conns);
845 else
846 rb_erase(&conn->node,
847 &conn->trans->server_conns);
848 if (conn->bundle) {
849 list_del_init(&conn->bundle_link);
850 conn->bundle->num_conns--;
851 }
852
853 } else if (reap_time < earliest) {
854 earliest = reap_time;
855 }
856
857 write_unlock(&conn->trans->conn_lock);
858 spin_unlock(&conn->trans->client_lock);
859 }
860 write_unlock_bh(&rxrpc_connection_lock);
861
862 if (earliest != ULONG_MAX) {
863 _debug("reschedule reaper %ld", (long) earliest - now);
864 ASSERTCMP(earliest, >, now);
865 schedule_delayed_work(&rxrpc_connection_reap,
866 (earliest - now) * HZ);
867 }
868
869 /* then destroy all those pulled out */
870 while (!list_empty(&graveyard)) {
871 conn = list_entry(graveyard.next, struct rxrpc_connection,
872 link);
873 list_del_init(&conn->link);
874
875 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
876 rxrpc_destroy_connection(conn);
877 }
878
879 _leave("");
880}
881
882/*
883 * preemptively destroy all the connection records rather than waiting for them
884 * to time out
885 */
886void __exit rxrpc_destroy_all_connections(void)
887{
888 _enter("");
889
890 rxrpc_connection_timeout = 0;
891 cancel_delayed_work(&rxrpc_connection_reap);
892 schedule_delayed_work(&rxrpc_connection_reap, 0);
893
894 _leave("");
895}