aboutsummaryrefslogtreecommitdiffstats
path: root/net/rxrpc/ar-input.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2007-04-26 18:48:28 -0400
committerDavid S. Miller <davem@davemloft.net>2007-04-26 18:48:28 -0400
commit17926a79320afa9b95df6b977b40cca6d8713cea (patch)
tree5cedff43b69520ad17b86783d3752053686ec99c /net/rxrpc/ar-input.c
parente19dff1fdd99a25819af74cf0710e147fff4fd3a (diff)
[AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both
Provide AF_RXRPC sockets that can be used to talk to AFS servers, or serve answers to AFS clients. KerberosIV security is fully supported. The patches and some example test programs can be found in: http://people.redhat.com/~dhowells/rxrpc/ This will eventually replace the old implementation of kernel-only RxRPC currently resident in net/rxrpc/. Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rxrpc/ar-input.c')
-rw-r--r--net/rxrpc/ar-input.c791
1 files changed, 791 insertions, 0 deletions
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
new file mode 100644
index 000000000000..323c3454561c
--- /dev/null
+++ b/net/rxrpc/ar-input.c
@@ -0,0 +1,791 @@
1/* RxRPC packet reception
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/errqueue.h>
16#include <linux/udp.h>
17#include <linux/in.h>
18#include <linux/in6.h>
19#include <linux/icmp.h>
20#include <net/sock.h>
21#include <net/af_rxrpc.h>
22#include <net/ip.h>
23#include "ar-internal.h"
24
25unsigned long rxrpc_ack_timeout = 1;
26
27const char *rxrpc_pkts[] = {
28 "?00",
29 "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG",
30 "?09", "?10", "?11", "?12", "?13", "?14", "?15"
31};
32
33/*
34 * queue a packet for recvmsg to pass to userspace
35 * - the caller must hold a lock on call->lock
36 * - must not be called with interrupts disabled (sk_filter() disables BH's)
37 * - eats the packet whether successful or not
38 * - there must be just one reference to the packet, which the caller passes to
39 * this function
40 */
41int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
42 bool force, bool terminal)
43{
44 struct rxrpc_skb_priv *sp;
45 struct sock *sk;
46 int skb_len, ret;
47
48 _enter(",,%d,%d", force, terminal);
49
50 ASSERT(!irqs_disabled());
51
52 sp = rxrpc_skb(skb);
53 ASSERTCMP(sp->call, ==, call);
54
55 /* if we've already posted the terminal message for a call, then we
56 * don't post any more */
57 if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
58 _debug("already terminated");
59 ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE);
60 skb->destructor = NULL;
61 sp->call = NULL;
62 rxrpc_put_call(call);
63 rxrpc_free_skb(skb);
64 return 0;
65 }
66
67 sk = &call->socket->sk;
68
69 if (!force) {
70 /* cast skb->rcvbuf to unsigned... It's pointless, but
71 * reduces number of warnings when compiling with -W
72 * --ANK */
73// ret = -ENOBUFS;
74// if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
75// (unsigned) sk->sk_rcvbuf)
76// goto out;
77
78 ret = sk_filter(sk, skb);
79 if (ret < 0)
80 goto out;
81 }
82
83 spin_lock_bh(&sk->sk_receive_queue.lock);
84 if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags) &&
85 !test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
86 call->socket->sk.sk_state != RXRPC_CLOSE) {
87 skb->destructor = rxrpc_packet_destructor;
88 skb->dev = NULL;
89 skb->sk = sk;
90 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
91
92 /* Cache the SKB length before we tack it onto the receive
93 * queue. Once it is added it no longer belongs to us and
94 * may be freed by other threads of control pulling packets
95 * from the queue.
96 */
97 skb_len = skb->len;
98
99 _net("post skb %p", skb);
100 __skb_queue_tail(&sk->sk_receive_queue, skb);
101 spin_unlock_bh(&sk->sk_receive_queue.lock);
102
103 if (!sock_flag(sk, SOCK_DEAD))
104 sk->sk_data_ready(sk, skb_len);
105
106 if (terminal) {
107 _debug("<<<< TERMINAL MESSAGE >>>>");
108 set_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags);
109 }
110
111 skb = NULL;
112 } else {
113 spin_unlock_bh(&sk->sk_receive_queue.lock);
114 }
115 ret = 0;
116
117out:
118 /* release the socket buffer */
119 if (skb) {
120 skb->destructor = NULL;
121 sp->call = NULL;
122 rxrpc_put_call(call);
123 rxrpc_free_skb(skb);
124 }
125
126 _leave(" = %d", ret);
127 return ret;
128}
129
130/*
131 * process a DATA packet, posting the packet to the appropriate queue
132 * - eats the packet if successful
133 */
134static int rxrpc_fast_process_data(struct rxrpc_call *call,
135 struct sk_buff *skb, u32 seq)
136{
137 struct rxrpc_skb_priv *sp;
138 bool terminal;
139 int ret, ackbit, ack;
140
141 _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq);
142
143 sp = rxrpc_skb(skb);
144 ASSERTCMP(sp->call, ==, NULL);
145
146 spin_lock(&call->lock);
147
148 if (call->state > RXRPC_CALL_COMPLETE)
149 goto discard;
150
151 ASSERTCMP(call->rx_data_expect, >=, call->rx_data_post);
152 ASSERTCMP(call->rx_data_post, >=, call->rx_data_recv);
153 ASSERTCMP(call->rx_data_recv, >=, call->rx_data_eaten);
154
155 if (seq < call->rx_data_post) {
156 _debug("dup #%u [-%u]", seq, call->rx_data_post);
157 ack = RXRPC_ACK_DUPLICATE;
158 ret = -ENOBUFS;
159 goto discard_and_ack;
160 }
161
162 /* we may already have the packet in the out of sequence queue */
163 ackbit = seq - (call->rx_data_eaten + 1);
164 ASSERTCMP(ackbit, >=, 0);
165 if (__test_and_set_bit(ackbit, &call->ackr_window)) {
166 _debug("dup oos #%u [%u,%u]",
167 seq, call->rx_data_eaten, call->rx_data_post);
168 ack = RXRPC_ACK_DUPLICATE;
169 goto discard_and_ack;
170 }
171
172 if (seq >= call->ackr_win_top) {
173 _debug("exceed #%u [%u]", seq, call->ackr_win_top);
174 __clear_bit(ackbit, &call->ackr_window);
175 ack = RXRPC_ACK_EXCEEDS_WINDOW;
176 goto discard_and_ack;
177 }
178
179 if (seq == call->rx_data_expect) {
180 clear_bit(RXRPC_CALL_EXPECT_OOS, &call->flags);
181 call->rx_data_expect++;
182 } else if (seq > call->rx_data_expect) {
183 _debug("oos #%u [%u]", seq, call->rx_data_expect);
184 call->rx_data_expect = seq + 1;
185 if (test_and_set_bit(RXRPC_CALL_EXPECT_OOS, &call->flags)) {
186 ack = RXRPC_ACK_OUT_OF_SEQUENCE;
187 goto enqueue_and_ack;
188 }
189 goto enqueue_packet;
190 }
191
192 if (seq != call->rx_data_post) {
193 _debug("ahead #%u [%u]", seq, call->rx_data_post);
194 goto enqueue_packet;
195 }
196
197 if (test_bit(RXRPC_CALL_RCVD_LAST, &call->flags))
198 goto protocol_error;
199
200 /* if the packet need security things doing to it, then it goes down
201 * the slow path */
202 if (call->conn->security)
203 goto enqueue_packet;
204
205 sp->call = call;
206 rxrpc_get_call(call);
207 terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
208 !(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
209 ret = rxrpc_queue_rcv_skb(call, skb, false, terminal);
210 if (ret < 0) {
211 if (ret == -ENOMEM || ret == -ENOBUFS) {
212 __clear_bit(ackbit, &call->ackr_window);
213 ack = RXRPC_ACK_NOSPACE;
214 goto discard_and_ack;
215 }
216 goto out;
217 }
218
219 skb = NULL;
220
221 _debug("post #%u", seq);
222 ASSERTCMP(call->rx_data_post, ==, seq);
223 call->rx_data_post++;
224
225 if (sp->hdr.flags & RXRPC_LAST_PACKET)
226 set_bit(RXRPC_CALL_RCVD_LAST, &call->flags);
227
228 /* if we've reached an out of sequence packet then we need to drain
229 * that queue into the socket Rx queue now */
230 if (call->rx_data_post == call->rx_first_oos) {
231 _debug("drain rx oos now");
232 read_lock(&call->state_lock);
233 if (call->state < RXRPC_CALL_COMPLETE &&
234 !test_and_set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events))
235 schedule_work(&call->processor);
236 read_unlock(&call->state_lock);
237 }
238
239 spin_unlock(&call->lock);
240 atomic_inc(&call->ackr_not_idle);
241 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, sp->hdr.serial, false);
242 _leave(" = 0 [posted]");
243 return 0;
244
245protocol_error:
246 ret = -EBADMSG;
247out:
248 spin_unlock(&call->lock);
249 _leave(" = %d", ret);
250 return ret;
251
252discard_and_ack:
253 _debug("discard and ACK packet %p", skb);
254 __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true);
255discard:
256 spin_unlock(&call->lock);
257 rxrpc_free_skb(skb);
258 _leave(" = 0 [discarded]");
259 return 0;
260
261enqueue_and_ack:
262 __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true);
263enqueue_packet:
264 _net("defer skb %p", skb);
265 spin_unlock(&call->lock);
266 skb_queue_tail(&call->rx_queue, skb);
267 atomic_inc(&call->ackr_not_idle);
268 read_lock(&call->state_lock);
269 if (call->state < RXRPC_CALL_DEAD)
270 schedule_work(&call->processor);
271 read_unlock(&call->state_lock);
272 _leave(" = 0 [queued]");
273 return 0;
274}
275
276/*
277 * assume an implicit ACKALL of the transmission phase of a client socket upon
278 * reception of the first reply packet
279 */
280static void rxrpc_assume_implicit_ackall(struct rxrpc_call *call, u32 serial)
281{
282 write_lock_bh(&call->state_lock);
283
284 switch (call->state) {
285 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
286 call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
287 call->acks_latest = serial;
288
289 _debug("implicit ACKALL %%%u", call->acks_latest);
290 set_bit(RXRPC_CALL_RCVD_ACKALL, &call->events);
291 write_unlock_bh(&call->state_lock);
292
293 if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
294 clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
295 clear_bit(RXRPC_CALL_RESEND, &call->events);
296 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
297 }
298 break;
299
300 default:
301 write_unlock_bh(&call->state_lock);
302 break;
303 }
304}
305
306/*
307 * post an incoming packet to the nominated call to deal with
308 * - must get rid of the sk_buff, either by freeing it or by queuing it
309 */
310void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
311{
312 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
313 __be32 _abort_code;
314 u32 serial, hi_serial, seq, abort_code;
315
316 _enter("%p,%p", call, skb);
317
318 ASSERT(!irqs_disabled());
319
320#if 0 // INJECT RX ERROR
321 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
322 static int skip = 0;
323 if (++skip == 3) {
324 printk("DROPPED 3RD PACKET!!!!!!!!!!!!!\n");
325 skip = 0;
326 goto free_packet;
327 }
328 }
329#endif
330
331 /* track the latest serial number on this connection for ACK packet
332 * information */
333 serial = ntohl(sp->hdr.serial);
334 hi_serial = atomic_read(&call->conn->hi_serial);
335 while (serial > hi_serial)
336 hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
337 serial);
338
339 /* request ACK generation for any ACK or DATA packet that requests
340 * it */
341 if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
342 _proto("ACK Requested on %%%u", serial);
343 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, sp->hdr.serial,
344 !(sp->hdr.flags & RXRPC_MORE_PACKETS));
345 }
346
347 switch (sp->hdr.type) {
348 case RXRPC_PACKET_TYPE_ABORT:
349 _debug("abort");
350
351 if (skb_copy_bits(skb, 0, &_abort_code,
352 sizeof(_abort_code)) < 0)
353 goto protocol_error;
354
355 abort_code = ntohl(_abort_code);
356 _proto("Rx ABORT %%%u { %x }", serial, abort_code);
357
358 write_lock_bh(&call->state_lock);
359 if (call->state < RXRPC_CALL_COMPLETE) {
360 call->state = RXRPC_CALL_REMOTELY_ABORTED;
361 call->abort_code = abort_code;
362 set_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
363 schedule_work(&call->processor);
364 }
365 goto free_packet_unlock;
366
367 case RXRPC_PACKET_TYPE_BUSY:
368 _proto("Rx BUSY %%%u", serial);
369
370 if (call->conn->out_clientflag)
371 goto protocol_error;
372
373 write_lock_bh(&call->state_lock);
374 switch (call->state) {
375 case RXRPC_CALL_CLIENT_SEND_REQUEST:
376 call->state = RXRPC_CALL_SERVER_BUSY;
377 set_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
378 schedule_work(&call->processor);
379 case RXRPC_CALL_SERVER_BUSY:
380 goto free_packet_unlock;
381 default:
382 goto protocol_error_locked;
383 }
384
385 default:
386 _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], serial);
387 goto protocol_error;
388
389 case RXRPC_PACKET_TYPE_DATA:
390 seq = ntohl(sp->hdr.seq);
391
392 _proto("Rx DATA %%%u { #%u }", serial, seq);
393
394 if (seq == 0)
395 goto protocol_error;
396
397 call->ackr_prev_seq = sp->hdr.seq;
398
399 /* received data implicitly ACKs all of the request packets we
400 * sent when we're acting as a client */
401 if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
402 rxrpc_assume_implicit_ackall(call, serial);
403
404 switch (rxrpc_fast_process_data(call, skb, seq)) {
405 case 0:
406 skb = NULL;
407 goto done;
408
409 default:
410 BUG();
411
412 /* data packet received beyond the last packet */
413 case -EBADMSG:
414 goto protocol_error;
415 }
416
417 case RXRPC_PACKET_TYPE_ACK:
418 /* ACK processing is done in process context */
419 read_lock_bh(&call->state_lock);
420 if (call->state < RXRPC_CALL_DEAD) {
421 skb_queue_tail(&call->rx_queue, skb);
422 schedule_work(&call->processor);
423 skb = NULL;
424 }
425 read_unlock_bh(&call->state_lock);
426 goto free_packet;
427 }
428
429protocol_error:
430 _debug("protocol error");
431 write_lock_bh(&call->state_lock);
432protocol_error_locked:
433 if (call->state <= RXRPC_CALL_COMPLETE) {
434 call->state = RXRPC_CALL_LOCALLY_ABORTED;
435 call->abort_code = RX_PROTOCOL_ERROR;
436 set_bit(RXRPC_CALL_ABORT, &call->events);
437 schedule_work(&call->processor);
438 }
439free_packet_unlock:
440 write_unlock_bh(&call->state_lock);
441free_packet:
442 rxrpc_free_skb(skb);
443done:
444 _leave("");
445}
446
447/*
448 * split up a jumbo data packet
449 */
450static void rxrpc_process_jumbo_packet(struct rxrpc_call *call,
451 struct sk_buff *jumbo)
452{
453 struct rxrpc_jumbo_header jhdr;
454 struct rxrpc_skb_priv *sp;
455 struct sk_buff *part;
456
457 _enter(",{%u,%u}", jumbo->data_len, jumbo->len);
458
459 sp = rxrpc_skb(jumbo);
460
461 do {
462 sp->hdr.flags &= ~RXRPC_JUMBO_PACKET;
463
464 /* make a clone to represent the first subpacket in what's left
465 * of the jumbo packet */
466 part = skb_clone(jumbo, GFP_ATOMIC);
467 if (!part) {
468 /* simply ditch the tail in the event of ENOMEM */
469 pskb_trim(jumbo, RXRPC_JUMBO_DATALEN);
470 break;
471 }
472 rxrpc_new_skb(part);
473
474 pskb_trim(part, RXRPC_JUMBO_DATALEN);
475
476 if (!pskb_pull(jumbo, RXRPC_JUMBO_DATALEN))
477 goto protocol_error;
478
479 if (skb_copy_bits(jumbo, 0, &jhdr, sizeof(jhdr)) < 0)
480 goto protocol_error;
481 if (!pskb_pull(jumbo, sizeof(jhdr)))
482 BUG();
483
484 sp->hdr.seq = htonl(ntohl(sp->hdr.seq) + 1);
485 sp->hdr.serial = htonl(ntohl(sp->hdr.serial) + 1);
486 sp->hdr.flags = jhdr.flags;
487 sp->hdr._rsvd = jhdr._rsvd;
488
489 _proto("Rx DATA Jumbo %%%u", ntohl(sp->hdr.serial) - 1);
490
491 rxrpc_fast_process_packet(call, part);
492 part = NULL;
493
494 } while (sp->hdr.flags & RXRPC_JUMBO_PACKET);
495
496 rxrpc_fast_process_packet(call, jumbo);
497 _leave("");
498 return;
499
500protocol_error:
501 _debug("protocol error");
502 rxrpc_free_skb(part);
503 rxrpc_free_skb(jumbo);
504 write_lock_bh(&call->state_lock);
505 if (call->state <= RXRPC_CALL_COMPLETE) {
506 call->state = RXRPC_CALL_LOCALLY_ABORTED;
507 call->abort_code = RX_PROTOCOL_ERROR;
508 set_bit(RXRPC_CALL_ABORT, &call->events);
509 schedule_work(&call->processor);
510 }
511 write_unlock_bh(&call->state_lock);
512 _leave("");
513}
514
515/*
516 * post an incoming packet to the appropriate call/socket to deal with
517 * - must get rid of the sk_buff, either by freeing it or by queuing it
518 */
519static void rxrpc_post_packet_to_call(struct rxrpc_connection *conn,
520 struct sk_buff *skb)
521{
522 struct rxrpc_skb_priv *sp;
523 struct rxrpc_call *call;
524 struct rb_node *p;
525 __be32 call_id;
526
527 _enter("%p,%p", conn, skb);
528
529 read_lock_bh(&conn->lock);
530
531 sp = rxrpc_skb(skb);
532
533 /* look at extant calls by channel number first */
534 call = conn->channels[ntohl(sp->hdr.cid) & RXRPC_CHANNELMASK];
535 if (!call || call->call_id != sp->hdr.callNumber)
536 goto call_not_extant;
537
538 _debug("extant call [%d]", call->state);
539 ASSERTCMP(call->conn, ==, conn);
540
541 read_lock(&call->state_lock);
542 switch (call->state) {
543 case RXRPC_CALL_LOCALLY_ABORTED:
544 if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
545 schedule_work(&call->processor);
546 case RXRPC_CALL_REMOTELY_ABORTED:
547 case RXRPC_CALL_NETWORK_ERROR:
548 case RXRPC_CALL_DEAD:
549 goto free_unlock;
550 default:
551 break;
552 }
553
554 read_unlock(&call->state_lock);
555 rxrpc_get_call(call);
556 read_unlock_bh(&conn->lock);
557
558 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
559 sp->hdr.flags & RXRPC_JUMBO_PACKET)
560 rxrpc_process_jumbo_packet(call, skb);
561 else
562 rxrpc_fast_process_packet(call, skb);
563
564 rxrpc_put_call(call);
565 goto done;
566
567call_not_extant:
568 /* search the completed calls in case what we're dealing with is
569 * there */
570 _debug("call not extant");
571
572 call_id = sp->hdr.callNumber;
573 p = conn->calls.rb_node;
574 while (p) {
575 call = rb_entry(p, struct rxrpc_call, conn_node);
576
577 if (call_id < call->call_id)
578 p = p->rb_left;
579 else if (call_id > call->call_id)
580 p = p->rb_right;
581 else
582 goto found_completed_call;
583 }
584
585dead_call:
586 /* it's a either a really old call that we no longer remember or its a
587 * new incoming call */
588 read_unlock_bh(&conn->lock);
589
590 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED &&
591 sp->hdr.seq == __constant_cpu_to_be32(1)) {
592 _debug("incoming call");
593 skb_queue_tail(&conn->trans->local->accept_queue, skb);
594 schedule_work(&conn->trans->local->acceptor);
595 goto done;
596 }
597
598 _debug("dead call");
599 skb->priority = RX_CALL_DEAD;
600 rxrpc_reject_packet(conn->trans->local, skb);
601 goto done;
602
603 /* resend last packet of a completed call
604 * - client calls may have been aborted or ACK'd
605 * - server calls may have been aborted
606 */
607found_completed_call:
608 _debug("completed call");
609
610 if (atomic_read(&call->usage) == 0)
611 goto dead_call;
612
613 /* synchronise any state changes */
614 read_lock(&call->state_lock);
615 ASSERTIFCMP(call->state != RXRPC_CALL_CLIENT_FINAL_ACK,
616 call->state, >=, RXRPC_CALL_COMPLETE);
617
618 if (call->state == RXRPC_CALL_LOCALLY_ABORTED ||
619 call->state == RXRPC_CALL_REMOTELY_ABORTED ||
620 call->state == RXRPC_CALL_DEAD) {
621 read_unlock(&call->state_lock);
622 goto dead_call;
623 }
624
625 if (call->conn->in_clientflag) {
626 read_unlock(&call->state_lock);
627 goto dead_call; /* complete server call */
628 }
629
630 _debug("final ack again");
631 rxrpc_get_call(call);
632 set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
633 schedule_work(&call->processor);
634
635free_unlock:
636 read_unlock(&call->state_lock);
637 read_unlock_bh(&conn->lock);
638 rxrpc_free_skb(skb);
639done:
640 _leave("");
641}
642
643/*
644 * post connection-level events to the connection
645 * - this includes challenges, responses and some aborts
646 */
647static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
648 struct sk_buff *skb)
649{
650 _enter("%p,%p", conn, skb);
651
652 atomic_inc(&conn->usage);
653 skb_queue_tail(&conn->rx_queue, skb);
654 schedule_work(&conn->processor);
655}
656
657/*
658 * handle data received on the local endpoint
659 * - may be called in interrupt context
660 */
661void rxrpc_data_ready(struct sock *sk, int count)
662{
663 struct rxrpc_connection *conn;
664 struct rxrpc_transport *trans;
665 struct rxrpc_skb_priv *sp;
666 struct rxrpc_local *local;
667 struct rxrpc_peer *peer;
668 struct sk_buff *skb;
669 int ret;
670
671 _enter("%p, %d", sk, count);
672
673 ASSERT(!irqs_disabled());
674
675 read_lock_bh(&rxrpc_local_lock);
676 local = sk->sk_user_data;
677 if (local && atomic_read(&local->usage) > 0)
678 rxrpc_get_local(local);
679 else
680 local = NULL;
681 read_unlock_bh(&rxrpc_local_lock);
682 if (!local) {
683 _leave(" [local dead]");
684 return;
685 }
686
687 skb = skb_recv_datagram(sk, 0, 1, &ret);
688 if (!skb) {
689 rxrpc_put_local(local);
690 if (ret == -EAGAIN)
691 return;
692 _debug("UDP socket error %d", ret);
693 return;
694 }
695
696 rxrpc_new_skb(skb);
697
698 _net("recv skb %p", skb);
699
700 /* we'll probably need to checksum it (didn't call sock_recvmsg) */
701 if (skb_checksum_complete(skb)) {
702 rxrpc_free_skb(skb);
703 rxrpc_put_local(local);
704 _leave(" [CSUM failed]");
705 return;
706 }
707
708 /* the socket buffer we have is owned by UDP, with UDP's data all over
709 * it, but we really want our own */
710 skb_orphan(skb);
711 sp = rxrpc_skb(skb);
712 memset(sp, 0, sizeof(*sp));
713
714 _net("Rx UDP packet from %08x:%04hu",
715 ntohl(ip_hdr(skb)->saddr), ntohs(udp_hdr(skb)->source));
716
717 /* dig out the RxRPC connection details */
718 if (skb_copy_bits(skb, sizeof(struct udphdr), &sp->hdr,
719 sizeof(sp->hdr)) < 0)
720 goto bad_message;
721 if (!pskb_pull(skb, sizeof(struct udphdr) + sizeof(sp->hdr)))
722 BUG();
723
724 _net("Rx RxRPC %s ep=%x call=%x:%x",
725 sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
726 ntohl(sp->hdr.epoch),
727 ntohl(sp->hdr.cid),
728 ntohl(sp->hdr.callNumber));
729
730 if (sp->hdr.type == 0 || sp->hdr.type >= RXRPC_N_PACKET_TYPES) {
731 _proto("Rx Bad Packet Type %u", sp->hdr.type);
732 goto bad_message;
733 }
734
735 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
736 (sp->hdr.callNumber == 0 || sp->hdr.seq == 0))
737 goto bad_message;
738
739 peer = rxrpc_find_peer(local, ip_hdr(skb)->saddr, udp_hdr(skb)->source);
740 if (IS_ERR(peer))
741 goto cant_route_call;
742
743 trans = rxrpc_find_transport(local, peer);
744 rxrpc_put_peer(peer);
745 if (!trans)
746 goto cant_route_call;
747
748 conn = rxrpc_find_connection(trans, &sp->hdr);
749 rxrpc_put_transport(trans);
750 if (!conn)
751 goto cant_route_call;
752
753 _debug("CONN %p {%d}", conn, conn->debug_id);
754
755 if (sp->hdr.callNumber == 0)
756 rxrpc_post_packet_to_conn(conn, skb);
757 else
758 rxrpc_post_packet_to_call(conn, skb);
759 rxrpc_put_connection(conn);
760 rxrpc_put_local(local);
761 return;
762
763cant_route_call:
764 _debug("can't route call");
765 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED &&
766 sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
767 if (sp->hdr.seq == __constant_cpu_to_be32(1)) {
768 _debug("first packet");
769 skb_queue_tail(&local->accept_queue, skb);
770 schedule_work(&local->acceptor);
771 rxrpc_put_local(local);
772 _leave(" [incoming]");
773 return;
774 }
775 skb->priority = RX_INVALID_OPERATION;
776 } else {
777 skb->priority = RX_CALL_DEAD;
778 }
779
780 _debug("reject");
781 rxrpc_reject_packet(local, skb);
782 rxrpc_put_local(local);
783 _leave(" [no call]");
784 return;
785
786bad_message:
787 skb->priority = RX_PROTOCOL_ERROR;
788 rxrpc_reject_packet(local, skb);
789 rxrpc_put_local(local);
790 _leave(" [badmsg]");
791}