aboutsummaryrefslogtreecommitdiffstats
path: root/net/rxrpc/ar-ack.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2007-04-26 18:48:28 -0400
committerDavid S. Miller <davem@davemloft.net>2007-04-26 18:48:28 -0400
commit17926a79320afa9b95df6b977b40cca6d8713cea (patch)
tree5cedff43b69520ad17b86783d3752053686ec99c /net/rxrpc/ar-ack.c
parente19dff1fdd99a25819af74cf0710e147fff4fd3a (diff)
[AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both
Provide AF_RXRPC sockets that can be used to talk to AFS servers, or serve answers to AFS clients. KerberosIV security is fully supported. The patches and some example test programs can be found in: http://people.redhat.com/~dhowells/rxrpc/ This will eventually replace the old implementation of kernel-only RxRPC currently resident in net/rxrpc/. Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rxrpc/ar-ack.c')
-rw-r--r--net/rxrpc/ar-ack.c1250
1 files changed, 1250 insertions, 0 deletions
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
new file mode 100644
index 000000000000..8f7764eca96c
--- /dev/null
+++ b/net/rxrpc/ar-ack.c
@@ -0,0 +1,1250 @@
1/* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/circ_buf.h>
14#include <linux/net.h>
15#include <linux/skbuff.h>
16#include <linux/udp.h>
17#include <net/sock.h>
18#include <net/af_rxrpc.h>
19#include "ar-internal.h"
20
21static unsigned rxrpc_ack_defer = 1;
22
23static const char *rxrpc_acks[] = {
24 "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL",
25 "-?-"
26};
27
28static const s8 rxrpc_ack_priority[] = {
29 [0] = 0,
30 [RXRPC_ACK_DELAY] = 1,
31 [RXRPC_ACK_REQUESTED] = 2,
32 [RXRPC_ACK_IDLE] = 3,
33 [RXRPC_ACK_PING_RESPONSE] = 4,
34 [RXRPC_ACK_DUPLICATE] = 5,
35 [RXRPC_ACK_OUT_OF_SEQUENCE] = 6,
36 [RXRPC_ACK_EXCEEDS_WINDOW] = 7,
37 [RXRPC_ACK_NOSPACE] = 8,
38};
39
40/*
41 * propose an ACK be sent
42 */
43void __rxrpc_propose_ACK(struct rxrpc_call *call, uint8_t ack_reason,
44 __be32 serial, bool immediate)
45{
46 unsigned long expiry;
47 s8 prior = rxrpc_ack_priority[ack_reason];
48
49 ASSERTCMP(prior, >, 0);
50
51 _enter("{%d},%s,%%%x,%u",
52 call->debug_id, rxrpc_acks[ack_reason], ntohl(serial),
53 immediate);
54
55 if (prior < rxrpc_ack_priority[call->ackr_reason]) {
56 if (immediate)
57 goto cancel_timer;
58 return;
59 }
60
61 /* update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
62 * numbers */
63 if (prior == rxrpc_ack_priority[call->ackr_reason]) {
64 if (prior <= 4)
65 call->ackr_serial = serial;
66 if (immediate)
67 goto cancel_timer;
68 return;
69 }
70
71 call->ackr_reason = ack_reason;
72 call->ackr_serial = serial;
73
74 switch (ack_reason) {
75 case RXRPC_ACK_DELAY:
76 _debug("run delay timer");
77 call->ack_timer.expires = jiffies + rxrpc_ack_timeout * HZ;
78 add_timer(&call->ack_timer);
79 return;
80
81 case RXRPC_ACK_IDLE:
82 if (!immediate) {
83 _debug("run defer timer");
84 expiry = 1;
85 goto run_timer;
86 }
87 goto cancel_timer;
88
89 case RXRPC_ACK_REQUESTED:
90 if (!rxrpc_ack_defer)
91 goto cancel_timer;
92 if (!immediate || serial == cpu_to_be32(1)) {
93 _debug("run defer timer");
94 expiry = rxrpc_ack_defer;
95 goto run_timer;
96 }
97
98 default:
99 _debug("immediate ACK");
100 goto cancel_timer;
101 }
102
103run_timer:
104 expiry += jiffies;
105 if (!timer_pending(&call->ack_timer) ||
106 time_after(call->ack_timer.expires, expiry))
107 mod_timer(&call->ack_timer, expiry);
108 return;
109
110cancel_timer:
111 _debug("cancel timer %%%u", ntohl(serial));
112 try_to_del_timer_sync(&call->ack_timer);
113 read_lock_bh(&call->state_lock);
114 if (call->state <= RXRPC_CALL_COMPLETE &&
115 !test_and_set_bit(RXRPC_CALL_ACK, &call->events))
116 schedule_work(&call->processor);
117 read_unlock_bh(&call->state_lock);
118}
119
120/*
121 * propose an ACK be sent, locking the call structure
122 */
123void rxrpc_propose_ACK(struct rxrpc_call *call, uint8_t ack_reason,
124 __be32 serial, bool immediate)
125{
126 s8 prior = rxrpc_ack_priority[ack_reason];
127
128 if (prior > rxrpc_ack_priority[call->ackr_reason]) {
129 spin_lock_bh(&call->lock);
130 __rxrpc_propose_ACK(call, ack_reason, serial, immediate);
131 spin_unlock_bh(&call->lock);
132 }
133}
134
135/*
136 * set the resend timer
137 */
138static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend,
139 unsigned long resend_at)
140{
141 read_lock_bh(&call->state_lock);
142 if (call->state >= RXRPC_CALL_COMPLETE)
143 resend = 0;
144
145 if (resend & 1) {
146 _debug("SET RESEND");
147 set_bit(RXRPC_CALL_RESEND, &call->events);
148 }
149
150 if (resend & 2) {
151 _debug("MODIFY RESEND TIMER");
152 set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
153 mod_timer(&call->resend_timer, resend_at);
154 } else {
155 _debug("KILL RESEND TIMER");
156 del_timer_sync(&call->resend_timer);
157 clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
158 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
159 }
160 read_unlock_bh(&call->state_lock);
161}
162
163/*
164 * resend packets
165 */
166static void rxrpc_resend(struct rxrpc_call *call)
167{
168 struct rxrpc_skb_priv *sp;
169 struct rxrpc_header *hdr;
170 struct sk_buff *txb;
171 unsigned long *p_txb, resend_at;
172 int loop, stop;
173 u8 resend;
174
175 _enter("{%d,%d,%d,%d},",
176 call->acks_hard, call->acks_unacked,
177 atomic_read(&call->sequence),
178 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
179
180 stop = 0;
181 resend = 0;
182 resend_at = 0;
183
184 for (loop = call->acks_tail;
185 loop != call->acks_head || stop;
186 loop = (loop + 1) & (call->acks_winsz - 1)
187 ) {
188 p_txb = call->acks_window + loop;
189 smp_read_barrier_depends();
190 if (*p_txb & 1)
191 continue;
192
193 txb = (struct sk_buff *) *p_txb;
194 sp = rxrpc_skb(txb);
195
196 if (sp->need_resend) {
197 sp->need_resend = 0;
198
199 /* each Tx packet has a new serial number */
200 sp->hdr.serial =
201 htonl(atomic_inc_return(&call->conn->serial));
202
203 hdr = (struct rxrpc_header *) txb->head;
204 hdr->serial = sp->hdr.serial;
205
206 _proto("Tx DATA %%%u { #%d }",
207 ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
208 if (rxrpc_send_packet(call->conn->trans, txb) < 0) {
209 stop = 0;
210 sp->resend_at = jiffies + 3;
211 } else {
212 sp->resend_at =
213 jiffies + rxrpc_resend_timeout * HZ;
214 }
215 }
216
217 if (time_after_eq(jiffies + 1, sp->resend_at)) {
218 sp->need_resend = 1;
219 resend |= 1;
220 } else if (resend & 2) {
221 if (time_before(sp->resend_at, resend_at))
222 resend_at = sp->resend_at;
223 } else {
224 resend_at = sp->resend_at;
225 resend |= 2;
226 }
227 }
228
229 rxrpc_set_resend(call, resend, resend_at);
230 _leave("");
231}
232
233/*
234 * handle resend timer expiry
235 */
236static void rxrpc_resend_timer(struct rxrpc_call *call)
237{
238 struct rxrpc_skb_priv *sp;
239 struct sk_buff *txb;
240 unsigned long *p_txb, resend_at;
241 int loop;
242 u8 resend;
243
244 _enter("%d,%d,%d",
245 call->acks_tail, call->acks_unacked, call->acks_head);
246
247 resend = 0;
248 resend_at = 0;
249
250 for (loop = call->acks_unacked;
251 loop != call->acks_head;
252 loop = (loop + 1) & (call->acks_winsz - 1)
253 ) {
254 p_txb = call->acks_window + loop;
255 smp_read_barrier_depends();
256 txb = (struct sk_buff *) (*p_txb & ~1);
257 sp = rxrpc_skb(txb);
258
259 ASSERT(!(*p_txb & 1));
260
261 if (sp->need_resend) {
262 ;
263 } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
264 sp->need_resend = 1;
265 resend |= 1;
266 } else if (resend & 2) {
267 if (time_before(sp->resend_at, resend_at))
268 resend_at = sp->resend_at;
269 } else {
270 resend_at = sp->resend_at;
271 resend |= 2;
272 }
273 }
274
275 rxrpc_set_resend(call, resend, resend_at);
276 _leave("");
277}
278
279/*
280 * process soft ACKs of our transmitted packets
281 * - these indicate packets the peer has or has not received, but hasn't yet
282 * given to the consumer, and so can still be discarded and re-requested
283 */
284static int rxrpc_process_soft_ACKs(struct rxrpc_call *call,
285 struct rxrpc_ackpacket *ack,
286 struct sk_buff *skb)
287{
288 struct rxrpc_skb_priv *sp;
289 struct sk_buff *txb;
290 unsigned long *p_txb, resend_at;
291 int loop;
292 u8 sacks[RXRPC_MAXACKS], resend;
293
294 _enter("{%d,%d},{%d},",
295 call->acks_hard,
296 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz),
297 ack->nAcks);
298
299 if (skb_copy_bits(skb, 0, sacks, ack->nAcks) < 0)
300 goto protocol_error;
301
302 resend = 0;
303 resend_at = 0;
304 for (loop = 0; loop < ack->nAcks; loop++) {
305 p_txb = call->acks_window;
306 p_txb += (call->acks_tail + loop) & (call->acks_winsz - 1);
307 smp_read_barrier_depends();
308 txb = (struct sk_buff *) (*p_txb & ~1);
309 sp = rxrpc_skb(txb);
310
311 switch (sacks[loop]) {
312 case RXRPC_ACK_TYPE_ACK:
313 sp->need_resend = 0;
314 *p_txb |= 1;
315 break;
316 case RXRPC_ACK_TYPE_NACK:
317 sp->need_resend = 1;
318 *p_txb &= ~1;
319 resend = 1;
320 break;
321 default:
322 _debug("Unsupported ACK type %d", sacks[loop]);
323 goto protocol_error;
324 }
325 }
326
327 smp_mb();
328 call->acks_unacked = (call->acks_tail + loop) & (call->acks_winsz - 1);
329
330 /* anything not explicitly ACK'd is implicitly NACK'd, but may just not
331 * have been received or processed yet by the far end */
332 for (loop = call->acks_unacked;
333 loop != call->acks_head;
334 loop = (loop + 1) & (call->acks_winsz - 1)
335 ) {
336 p_txb = call->acks_window + loop;
337 smp_read_barrier_depends();
338 txb = (struct sk_buff *) (*p_txb & ~1);
339 sp = rxrpc_skb(txb);
340
341 if (*p_txb & 1) {
342 /* packet must have been discarded */
343 sp->need_resend = 1;
344 *p_txb &= ~1;
345 resend |= 1;
346 } else if (sp->need_resend) {
347 ;
348 } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
349 sp->need_resend = 1;
350 resend |= 1;
351 } else if (resend & 2) {
352 if (time_before(sp->resend_at, resend_at))
353 resend_at = sp->resend_at;
354 } else {
355 resend_at = sp->resend_at;
356 resend |= 2;
357 }
358 }
359
360 rxrpc_set_resend(call, resend, resend_at);
361 _leave(" = 0");
362 return 0;
363
364protocol_error:
365 _leave(" = -EPROTO");
366 return -EPROTO;
367}
368
369/*
370 * discard hard-ACK'd packets from the Tx window
371 */
372static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
373{
374 struct rxrpc_skb_priv *sp;
375 unsigned long _skb;
376 int tail = call->acks_tail, old_tail;
377 int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz);
378
379 _enter("{%u,%u},%u", call->acks_hard, win, hard);
380
381 ASSERTCMP(hard - call->acks_hard, <=, win);
382
383 while (call->acks_hard < hard) {
384 smp_read_barrier_depends();
385 _skb = call->acks_window[tail] & ~1;
386 sp = rxrpc_skb((struct sk_buff *) _skb);
387 rxrpc_free_skb((struct sk_buff *) _skb);
388 old_tail = tail;
389 tail = (tail + 1) & (call->acks_winsz - 1);
390 call->acks_tail = tail;
391 if (call->acks_unacked == old_tail)
392 call->acks_unacked = tail;
393 call->acks_hard++;
394 }
395
396 wake_up(&call->tx_waitq);
397}
398
399/*
400 * clear the Tx window in the event of a failure
401 */
402static void rxrpc_clear_tx_window(struct rxrpc_call *call)
403{
404 rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
405}
406
407/*
408 * drain the out of sequence received packet queue into the packet Rx queue
409 */
410static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call)
411{
412 struct rxrpc_skb_priv *sp;
413 struct sk_buff *skb;
414 bool terminal;
415 int ret;
416
417 _enter("{%d,%d}", call->rx_data_post, call->rx_first_oos);
418
419 spin_lock_bh(&call->lock);
420
421 ret = -ECONNRESET;
422 if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
423 goto socket_unavailable;
424
425 skb = skb_dequeue(&call->rx_oos_queue);
426 if (skb) {
427 sp = rxrpc_skb(skb);
428
429 _debug("drain OOS packet %d [%d]",
430 ntohl(sp->hdr.seq), call->rx_first_oos);
431
432 if (ntohl(sp->hdr.seq) != call->rx_first_oos) {
433 skb_queue_head(&call->rx_oos_queue, skb);
434 call->rx_first_oos = ntohl(rxrpc_skb(skb)->hdr.seq);
435 _debug("requeue %p {%u}", skb, call->rx_first_oos);
436 } else {
437 skb->mark = RXRPC_SKB_MARK_DATA;
438 terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
439 !(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
440 ret = rxrpc_queue_rcv_skb(call, skb, true, terminal);
441 BUG_ON(ret < 0);
442 _debug("drain #%u", call->rx_data_post);
443 call->rx_data_post++;
444
445 /* find out what the next packet is */
446 skb = skb_peek(&call->rx_oos_queue);
447 if (skb)
448 call->rx_first_oos =
449 ntohl(rxrpc_skb(skb)->hdr.seq);
450 else
451 call->rx_first_oos = 0;
452 _debug("peek %p {%u}", skb, call->rx_first_oos);
453 }
454 }
455
456 ret = 0;
457socket_unavailable:
458 spin_unlock_bh(&call->lock);
459 _leave(" = %d", ret);
460 return ret;
461}
462
463/*
464 * insert an out of sequence packet into the buffer
465 */
466static void rxrpc_insert_oos_packet(struct rxrpc_call *call,
467 struct sk_buff *skb)
468{
469 struct rxrpc_skb_priv *sp, *psp;
470 struct sk_buff *p;
471 u32 seq;
472
473 sp = rxrpc_skb(skb);
474 seq = ntohl(sp->hdr.seq);
475 _enter(",,{%u}", seq);
476
477 skb->destructor = rxrpc_packet_destructor;
478 ASSERTCMP(sp->call, ==, NULL);
479 sp->call = call;
480 rxrpc_get_call(call);
481
482 /* insert into the buffer in sequence order */
483 spin_lock_bh(&call->lock);
484
485 skb_queue_walk(&call->rx_oos_queue, p) {
486 psp = rxrpc_skb(p);
487 if (ntohl(psp->hdr.seq) > seq) {
488 _debug("insert oos #%u before #%u",
489 seq, ntohl(psp->hdr.seq));
490 skb_insert(p, skb, &call->rx_oos_queue);
491 goto inserted;
492 }
493 }
494
495 _debug("append oos #%u", seq);
496 skb_queue_tail(&call->rx_oos_queue, skb);
497inserted:
498
499 /* we might now have a new front to the queue */
500 if (call->rx_first_oos == 0 || seq < call->rx_first_oos)
501 call->rx_first_oos = seq;
502
503 read_lock(&call->state_lock);
504 if (call->state < RXRPC_CALL_COMPLETE &&
505 call->rx_data_post == call->rx_first_oos) {
506 _debug("drain rx oos now");
507 set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
508 }
509 read_unlock(&call->state_lock);
510
511 spin_unlock_bh(&call->lock);
512 _leave(" [stored #%u]", call->rx_first_oos);
513}
514
515/*
516 * clear the Tx window on final ACK reception
517 */
518static void rxrpc_zap_tx_window(struct rxrpc_call *call)
519{
520 struct rxrpc_skb_priv *sp;
521 struct sk_buff *skb;
522 unsigned long _skb, *acks_window;
523 uint8_t winsz = call->acks_winsz;
524 int tail;
525
526 acks_window = call->acks_window;
527 call->acks_window = NULL;
528
529 while (CIRC_CNT(call->acks_head, call->acks_tail, winsz) > 0) {
530 tail = call->acks_tail;
531 smp_read_barrier_depends();
532 _skb = acks_window[tail] & ~1;
533 smp_mb();
534 call->acks_tail = (call->acks_tail + 1) & (winsz - 1);
535
536 skb = (struct sk_buff *) _skb;
537 sp = rxrpc_skb(skb);
538 _debug("+++ clear Tx %u", ntohl(sp->hdr.seq));
539 rxrpc_free_skb(skb);
540 }
541
542 kfree(acks_window);
543}
544
545/*
546 * process packets in the reception queue
547 */
548static int rxrpc_process_rx_queue(struct rxrpc_call *call,
549 u32 *_abort_code)
550{
551 struct rxrpc_ackpacket ack;
552 struct rxrpc_skb_priv *sp;
553 struct sk_buff *skb;
554 bool post_ACK;
555 int latest;
556 u32 hard, tx;
557
558 _enter("");
559
560process_further:
561 skb = skb_dequeue(&call->rx_queue);
562 if (!skb)
563 return -EAGAIN;
564
565 _net("deferred skb %p", skb);
566
567 sp = rxrpc_skb(skb);
568
569 _debug("process %s [st %d]", rxrpc_pkts[sp->hdr.type], call->state);
570
571 post_ACK = false;
572
573 switch (sp->hdr.type) {
574 /* data packets that wind up here have been received out of
575 * order, need security processing or are jumbo packets */
576 case RXRPC_PACKET_TYPE_DATA:
577 _proto("OOSQ DATA %%%u { #%u }",
578 ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
579
580 /* secured packets must be verified and possibly decrypted */
581 if (rxrpc_verify_packet(call, skb, _abort_code) < 0)
582 goto protocol_error;
583
584 rxrpc_insert_oos_packet(call, skb);
585 goto process_further;
586
587 /* partial ACK to process */
588 case RXRPC_PACKET_TYPE_ACK:
589 if (skb_copy_bits(skb, 0, &ack, sizeof(ack)) < 0) {
590 _debug("extraction failure");
591 goto protocol_error;
592 }
593 if (!skb_pull(skb, sizeof(ack)))
594 BUG();
595
596 latest = ntohl(sp->hdr.serial);
597 hard = ntohl(ack.firstPacket);
598 tx = atomic_read(&call->sequence);
599
600 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
601 latest,
602 ntohs(ack.maxSkew),
603 hard,
604 ntohl(ack.previousPacket),
605 ntohl(ack.serial),
606 rxrpc_acks[ack.reason],
607 ack.nAcks);
608
609 if (ack.reason == RXRPC_ACK_PING) {
610 _proto("Rx ACK %%%u PING Request", latest);
611 rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
612 sp->hdr.serial, true);
613 }
614
615 /* discard any out-of-order or duplicate ACKs */
616 if (latest - call->acks_latest <= 0) {
617 _debug("discard ACK %d <= %d",
618 latest, call->acks_latest);
619 goto discard;
620 }
621 call->acks_latest = latest;
622
623 if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
624 call->state != RXRPC_CALL_CLIENT_AWAIT_REPLY &&
625 call->state != RXRPC_CALL_SERVER_SEND_REPLY &&
626 call->state != RXRPC_CALL_SERVER_AWAIT_ACK)
627 goto discard;
628
629 _debug("Tx=%d H=%u S=%d", tx, call->acks_hard, call->state);
630
631 if (hard > 0) {
632 if (hard - 1 > tx) {
633 _debug("hard-ACK'd packet %d not transmitted"
634 " (%d top)",
635 hard - 1, tx);
636 goto protocol_error;
637 }
638
639 if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY ||
640 call->state == RXRPC_CALL_SERVER_AWAIT_ACK) &&
641 hard > tx)
642 goto all_acked;
643
644 smp_rmb();
645 rxrpc_rotate_tx_window(call, hard - 1);
646 }
647
648 if (ack.nAcks > 0) {
649 if (hard - 1 + ack.nAcks > tx) {
650 _debug("soft-ACK'd packet %d+%d not"
651 " transmitted (%d top)",
652 hard - 1, ack.nAcks, tx);
653 goto protocol_error;
654 }
655
656 if (rxrpc_process_soft_ACKs(call, &ack, skb) < 0)
657 goto protocol_error;
658 }
659 goto discard;
660
661 /* complete ACK to process */
662 case RXRPC_PACKET_TYPE_ACKALL:
663 goto all_acked;
664
665 /* abort and busy are handled elsewhere */
666 case RXRPC_PACKET_TYPE_BUSY:
667 case RXRPC_PACKET_TYPE_ABORT:
668 BUG();
669
670 /* connection level events - also handled elsewhere */
671 case RXRPC_PACKET_TYPE_CHALLENGE:
672 case RXRPC_PACKET_TYPE_RESPONSE:
673 case RXRPC_PACKET_TYPE_DEBUG:
674 BUG();
675 }
676
677 /* if we've had a hard ACK that covers all the packets we've sent, then
678 * that ends that phase of the operation */
679all_acked:
680 write_lock_bh(&call->state_lock);
681 _debug("ack all %d", call->state);
682
683 switch (call->state) {
684 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
685 call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
686 break;
687 case RXRPC_CALL_SERVER_AWAIT_ACK:
688 _debug("srv complete");
689 call->state = RXRPC_CALL_COMPLETE;
690 post_ACK = true;
691 break;
692 case RXRPC_CALL_CLIENT_SEND_REQUEST:
693 case RXRPC_CALL_SERVER_RECV_REQUEST:
694 goto protocol_error_unlock; /* can't occur yet */
695 default:
696 write_unlock_bh(&call->state_lock);
697 goto discard; /* assume packet left over from earlier phase */
698 }
699
700 write_unlock_bh(&call->state_lock);
701
702 /* if all the packets we sent are hard-ACK'd, then we can discard
703 * whatever we've got left */
704 _debug("clear Tx %d",
705 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
706
707 del_timer_sync(&call->resend_timer);
708 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
709 clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
710
711 if (call->acks_window)
712 rxrpc_zap_tx_window(call);
713
714 if (post_ACK) {
715 /* post the final ACK message for userspace to pick up */
716 _debug("post ACK");
717 skb->mark = RXRPC_SKB_MARK_FINAL_ACK;
718 sp->call = call;
719 rxrpc_get_call(call);
720 spin_lock_bh(&call->lock);
721 if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0)
722 BUG();
723 spin_unlock_bh(&call->lock);
724 goto process_further;
725 }
726
727discard:
728 rxrpc_free_skb(skb);
729 goto process_further;
730
731protocol_error_unlock:
732 write_unlock_bh(&call->state_lock);
733protocol_error:
734 rxrpc_free_skb(skb);
735 _leave(" = -EPROTO");
736 return -EPROTO;
737}
738
739/*
740 * post a message to the socket Rx queue for recvmsg() to pick up
741 */
742static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
743 bool fatal)
744{
745 struct rxrpc_skb_priv *sp;
746 struct sk_buff *skb;
747 int ret;
748
749 _enter("{%d,%lx},%u,%u,%d",
750 call->debug_id, call->flags, mark, error, fatal);
751
752 /* remove timers and things for fatal messages */
753 if (fatal) {
754 del_timer_sync(&call->resend_timer);
755 del_timer_sync(&call->ack_timer);
756 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
757 }
758
759 if (mark != RXRPC_SKB_MARK_NEW_CALL &&
760 !test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
761 _leave("[no userid]");
762 return 0;
763 }
764
765 if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
766 skb = alloc_skb(0, GFP_NOFS);
767 if (!skb)
768 return -ENOMEM;
769
770 rxrpc_new_skb(skb);
771
772 skb->mark = mark;
773
774 sp = rxrpc_skb(skb);
775 memset(sp, 0, sizeof(*sp));
776 sp->error = error;
777 sp->call = call;
778 rxrpc_get_call(call);
779
780 spin_lock_bh(&call->lock);
781 ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
782 spin_unlock_bh(&call->lock);
783 if (ret < 0)
784 BUG();
785 }
786
787 return 0;
788}
789
790/*
791 * handle background processing of incoming call packets and ACK / abort
792 * generation
793 */
794void rxrpc_process_call(struct work_struct *work)
795{
796 struct rxrpc_call *call =
797 container_of(work, struct rxrpc_call, processor);
798 struct rxrpc_ackpacket ack;
799 struct rxrpc_ackinfo ackinfo;
800 struct rxrpc_header hdr;
801 struct msghdr msg;
802 struct kvec iov[5];
803 unsigned long bits;
804 __be32 data;
805 size_t len;
806 int genbit, loop, nbit, ioc, ret;
807 u32 abort_code = RX_PROTOCOL_ERROR;
808 u8 *acks = NULL;
809
810 //printk("\n--------------------\n");
811 _enter("{%d,%s,%lx} [%lu]",
812 call->debug_id, rxrpc_call_states[call->state], call->events,
813 (jiffies - call->creation_jif) / (HZ / 10));
814
815 if (test_and_set_bit(RXRPC_CALL_PROC_BUSY, &call->flags)) {
816 _debug("XXXXXXXXXXXXX RUNNING ON MULTIPLE CPUS XXXXXXXXXXXXX");
817 return;
818 }
819
820 /* there's a good chance we're going to have to send a message, so set
821 * one up in advance */
822 msg.msg_name = &call->conn->trans->peer->srx.transport.sin;
823 msg.msg_namelen = sizeof(call->conn->trans->peer->srx.transport.sin);
824 msg.msg_control = NULL;
825 msg.msg_controllen = 0;
826 msg.msg_flags = 0;
827
828 hdr.epoch = call->conn->epoch;
829 hdr.cid = call->cid;
830 hdr.callNumber = call->call_id;
831 hdr.seq = 0;
832 hdr.type = RXRPC_PACKET_TYPE_ACK;
833 hdr.flags = call->conn->out_clientflag;
834 hdr.userStatus = 0;
835 hdr.securityIndex = call->conn->security_ix;
836 hdr._rsvd = 0;
837 hdr.serviceId = call->conn->service_id;
838
839 memset(iov, 0, sizeof(iov));
840 iov[0].iov_base = &hdr;
841 iov[0].iov_len = sizeof(hdr);
842
843 /* deal with events of a final nature */
844 if (test_bit(RXRPC_CALL_RELEASE, &call->events)) {
845 rxrpc_release_call(call);
846 clear_bit(RXRPC_CALL_RELEASE, &call->events);
847 }
848
849 if (test_bit(RXRPC_CALL_RCVD_ERROR, &call->events)) {
850 int error;
851
852 clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
853 clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
854 clear_bit(RXRPC_CALL_ABORT, &call->events);
855
856 error = call->conn->trans->peer->net_error;
857 _debug("post net error %d", error);
858
859 if (rxrpc_post_message(call, RXRPC_SKB_MARK_NET_ERROR,
860 error, true) < 0)
861 goto no_mem;
862 clear_bit(RXRPC_CALL_RCVD_ERROR, &call->events);
863 goto kill_ACKs;
864 }
865
866 if (test_bit(RXRPC_CALL_CONN_ABORT, &call->events)) {
867 ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
868
869 clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
870 clear_bit(RXRPC_CALL_ABORT, &call->events);
871
872 _debug("post conn abort");
873
874 if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
875 call->conn->error, true) < 0)
876 goto no_mem;
877 clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
878 goto kill_ACKs;
879 }
880
881 if (test_bit(RXRPC_CALL_REJECT_BUSY, &call->events)) {
882 hdr.type = RXRPC_PACKET_TYPE_BUSY;
883 genbit = RXRPC_CALL_REJECT_BUSY;
884 goto send_message;
885 }
886
887 if (test_bit(RXRPC_CALL_ABORT, &call->events)) {
888 ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
889
890 if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
891 ECONNABORTED, true) < 0)
892 goto no_mem;
893 hdr.type = RXRPC_PACKET_TYPE_ABORT;
894 data = htonl(call->abort_code);
895 iov[1].iov_base = &data;
896 iov[1].iov_len = sizeof(data);
897 genbit = RXRPC_CALL_ABORT;
898 goto send_message;
899 }
900
901 if (test_bit(RXRPC_CALL_ACK_FINAL, &call->events)) {
902 hdr.type = RXRPC_PACKET_TYPE_ACKALL;
903 genbit = RXRPC_CALL_ACK_FINAL;
904 goto send_message;
905 }
906
907 if (call->events & ((1 << RXRPC_CALL_RCVD_BUSY) |
908 (1 << RXRPC_CALL_RCVD_ABORT))
909 ) {
910 u32 mark;
911
912 if (test_bit(RXRPC_CALL_RCVD_ABORT, &call->events))
913 mark = RXRPC_SKB_MARK_REMOTE_ABORT;
914 else
915 mark = RXRPC_SKB_MARK_BUSY;
916
917 _debug("post abort/busy");
918 rxrpc_clear_tx_window(call);
919 if (rxrpc_post_message(call, mark, ECONNABORTED, true) < 0)
920 goto no_mem;
921
922 clear_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
923 clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
924 goto kill_ACKs;
925 }
926
927 if (test_and_clear_bit(RXRPC_CALL_RCVD_ACKALL, &call->events)) {
928 _debug("do implicit ackall");
929 rxrpc_clear_tx_window(call);
930 }
931
932 if (test_bit(RXRPC_CALL_LIFE_TIMER, &call->events)) {
933 write_lock_bh(&call->state_lock);
934 if (call->state <= RXRPC_CALL_COMPLETE) {
935 call->state = RXRPC_CALL_LOCALLY_ABORTED;
936 call->abort_code = RX_CALL_TIMEOUT;
937 set_bit(RXRPC_CALL_ABORT, &call->events);
938 }
939 write_unlock_bh(&call->state_lock);
940
941 _debug("post timeout");
942 if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
943 ETIME, true) < 0)
944 goto no_mem;
945
946 clear_bit(RXRPC_CALL_LIFE_TIMER, &call->events);
947 goto kill_ACKs;
948 }
949
950 /* deal with assorted inbound messages */
951 if (!skb_queue_empty(&call->rx_queue)) {
952 switch (rxrpc_process_rx_queue(call, &abort_code)) {
953 case 0:
954 case -EAGAIN:
955 break;
956 case -ENOMEM:
957 goto no_mem;
958 case -EKEYEXPIRED:
959 case -EKEYREJECTED:
960 case -EPROTO:
961 rxrpc_abort_call(call, abort_code);
962 goto kill_ACKs;
963 }
964 }
965
966 /* handle resending */
967 if (test_and_clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
968 rxrpc_resend_timer(call);
969 if (test_and_clear_bit(RXRPC_CALL_RESEND, &call->events))
970 rxrpc_resend(call);
971
972 /* consider sending an ordinary ACK */
973 if (test_bit(RXRPC_CALL_ACK, &call->events)) {
974 __be32 pad;
975
976 _debug("send ACK: window: %d - %d { %lx }",
977 call->rx_data_eaten, call->ackr_win_top,
978 call->ackr_window[0]);
979
980 if (call->state > RXRPC_CALL_SERVER_ACK_REQUEST &&
981 call->ackr_reason != RXRPC_ACK_PING_RESPONSE) {
982 /* ACK by sending reply DATA packet in this state */
983 clear_bit(RXRPC_CALL_ACK, &call->events);
984 goto maybe_reschedule;
985 }
986
987 genbit = RXRPC_CALL_ACK;
988
989 acks = kzalloc(call->ackr_win_top - call->rx_data_eaten,
990 GFP_NOFS);
991 if (!acks)
992 goto no_mem;
993
994 //hdr.flags = RXRPC_SLOW_START_OK;
995 ack.bufferSpace = htons(8);
996 ack.maxSkew = 0;
997 ack.serial = 0;
998 ack.reason = 0;
999
1000 ackinfo.rxMTU = htonl(5692);
1001// ackinfo.rxMTU = htonl(call->conn->trans->peer->maxdata);
1002 ackinfo.maxMTU = htonl(call->conn->trans->peer->maxdata);
1003 ackinfo.rwind = htonl(32);
1004 ackinfo.jumbo_max = htonl(4);
1005
1006 spin_lock_bh(&call->lock);
1007 ack.reason = call->ackr_reason;
1008 ack.serial = call->ackr_serial;
1009 ack.previousPacket = call->ackr_prev_seq;
1010 ack.firstPacket = htonl(call->rx_data_eaten + 1);
1011
1012 ack.nAcks = 0;
1013 for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
1014 nbit = loop * BITS_PER_LONG;
1015 for (bits = call->ackr_window[loop]; bits; bits >>= 1
1016 ) {
1017 _debug("- l=%d n=%d b=%lx", loop, nbit, bits);
1018 if (bits & 1) {
1019 acks[nbit] = RXRPC_ACK_TYPE_ACK;
1020 ack.nAcks = nbit + 1;
1021 }
1022 nbit++;
1023 }
1024 }
1025 call->ackr_reason = 0;
1026 spin_unlock_bh(&call->lock);
1027
1028 pad = 0;
1029
1030 iov[1].iov_base = &ack;
1031 iov[1].iov_len = sizeof(ack);
1032 iov[2].iov_base = acks;
1033 iov[2].iov_len = ack.nAcks;
1034 iov[3].iov_base = &pad;
1035 iov[3].iov_len = 3;
1036 iov[4].iov_base = &ackinfo;
1037 iov[4].iov_len = sizeof(ackinfo);
1038
1039 switch (ack.reason) {
1040 case RXRPC_ACK_REQUESTED:
1041 case RXRPC_ACK_DUPLICATE:
1042 case RXRPC_ACK_OUT_OF_SEQUENCE:
1043 case RXRPC_ACK_EXCEEDS_WINDOW:
1044 case RXRPC_ACK_NOSPACE:
1045 case RXRPC_ACK_PING:
1046 case RXRPC_ACK_PING_RESPONSE:
1047 goto send_ACK_with_skew;
1048 case RXRPC_ACK_DELAY:
1049 case RXRPC_ACK_IDLE:
1050 goto send_ACK;
1051 }
1052 }
1053
1054 /* handle completion of security negotiations on an incoming
1055 * connection */
1056 if (test_and_clear_bit(RXRPC_CALL_SECURED, &call->events)) {
1057 _debug("secured");
1058 spin_lock_bh(&call->lock);
1059
1060 if (call->state == RXRPC_CALL_SERVER_SECURING) {
1061 _debug("securing");
1062 write_lock(&call->conn->lock);
1063 if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
1064 !test_bit(RXRPC_CALL_RELEASE, &call->events)) {
1065 _debug("not released");
1066 call->state = RXRPC_CALL_SERVER_ACCEPTING;
1067 list_move_tail(&call->accept_link,
1068 &call->socket->acceptq);
1069 }
1070 write_unlock(&call->conn->lock);
1071 read_lock(&call->state_lock);
1072 if (call->state < RXRPC_CALL_COMPLETE)
1073 set_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
1074 read_unlock(&call->state_lock);
1075 }
1076
1077 spin_unlock_bh(&call->lock);
1078 if (!test_bit(RXRPC_CALL_POST_ACCEPT, &call->events))
1079 goto maybe_reschedule;
1080 }
1081
1082 /* post a notification of an acceptable connection to the app */
1083 if (test_bit(RXRPC_CALL_POST_ACCEPT, &call->events)) {
1084 _debug("post accept");
1085 if (rxrpc_post_message(call, RXRPC_SKB_MARK_NEW_CALL,
1086 0, false) < 0)
1087 goto no_mem;
1088 clear_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
1089 goto maybe_reschedule;
1090 }
1091
1092 /* handle incoming call acceptance */
1093 if (test_and_clear_bit(RXRPC_CALL_ACCEPTED, &call->events)) {
1094 _debug("accepted");
1095 ASSERTCMP(call->rx_data_post, ==, 0);
1096 call->rx_data_post = 1;
1097 read_lock_bh(&call->state_lock);
1098 if (call->state < RXRPC_CALL_COMPLETE)
1099 set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
1100 read_unlock_bh(&call->state_lock);
1101 }
1102
1103 /* drain the out of sequence received packet queue into the packet Rx
1104 * queue */
1105 if (test_and_clear_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events)) {
1106 while (call->rx_data_post == call->rx_first_oos)
1107 if (rxrpc_drain_rx_oos_queue(call) < 0)
1108 break;
1109 goto maybe_reschedule;
1110 }
1111
1112 /* other events may have been raised since we started checking */
1113 goto maybe_reschedule;
1114
1115send_ACK_with_skew:
1116 ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
1117 ntohl(ack.serial));
1118send_ACK:
1119 hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
1120 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
1121 ntohl(hdr.serial),
1122 ntohs(ack.maxSkew),
1123 ntohl(ack.firstPacket),
1124 ntohl(ack.previousPacket),
1125 ntohl(ack.serial),
1126 rxrpc_acks[ack.reason],
1127 ack.nAcks);
1128
1129 del_timer_sync(&call->ack_timer);
1130 if (ack.nAcks > 0)
1131 set_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags);
1132 goto send_message_2;
1133
1134send_message:
1135 _debug("send message");
1136
1137 hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
1138 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
1139send_message_2:
1140
1141 len = iov[0].iov_len;
1142 ioc = 1;
1143 if (iov[4].iov_len) {
1144 ioc = 5;
1145 len += iov[4].iov_len;
1146 len += iov[3].iov_len;
1147 len += iov[2].iov_len;
1148 len += iov[1].iov_len;
1149 } else if (iov[3].iov_len) {
1150 ioc = 4;
1151 len += iov[3].iov_len;
1152 len += iov[2].iov_len;
1153 len += iov[1].iov_len;
1154 } else if (iov[2].iov_len) {
1155 ioc = 3;
1156 len += iov[2].iov_len;
1157 len += iov[1].iov_len;
1158 } else if (iov[1].iov_len) {
1159 ioc = 2;
1160 len += iov[1].iov_len;
1161 }
1162
1163 ret = kernel_sendmsg(call->conn->trans->local->socket,
1164 &msg, iov, ioc, len);
1165 if (ret < 0) {
1166 _debug("sendmsg failed: %d", ret);
1167 read_lock_bh(&call->state_lock);
1168 if (call->state < RXRPC_CALL_DEAD)
1169 schedule_work(&call->processor);
1170 read_unlock_bh(&call->state_lock);
1171 goto error;
1172 }
1173
1174 switch (genbit) {
1175 case RXRPC_CALL_ABORT:
1176 clear_bit(genbit, &call->events);
1177 clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
1178 goto kill_ACKs;
1179
1180 case RXRPC_CALL_ACK_FINAL:
1181 write_lock_bh(&call->state_lock);
1182 if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK)
1183 call->state = RXRPC_CALL_COMPLETE;
1184 write_unlock_bh(&call->state_lock);
1185 goto kill_ACKs;
1186
1187 default:
1188 clear_bit(genbit, &call->events);
1189 switch (call->state) {
1190 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
1191 case RXRPC_CALL_CLIENT_RECV_REPLY:
1192 case RXRPC_CALL_SERVER_RECV_REQUEST:
1193 case RXRPC_CALL_SERVER_ACK_REQUEST:
1194 _debug("start ACK timer");
1195 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY,
1196 call->ackr_serial, false);
1197 default:
1198 break;
1199 }
1200 goto maybe_reschedule;
1201 }
1202
1203kill_ACKs:
1204 del_timer_sync(&call->ack_timer);
1205 if (test_and_clear_bit(RXRPC_CALL_ACK_FINAL, &call->events))
1206 rxrpc_put_call(call);
1207 clear_bit(RXRPC_CALL_ACK, &call->events);
1208
1209maybe_reschedule:
1210 if (call->events || !skb_queue_empty(&call->rx_queue)) {
1211 read_lock_bh(&call->state_lock);
1212 if (call->state < RXRPC_CALL_DEAD)
1213 schedule_work(&call->processor);
1214 read_unlock_bh(&call->state_lock);
1215 }
1216
1217 /* don't leave aborted connections on the accept queue */
1218 if (call->state >= RXRPC_CALL_COMPLETE &&
1219 !list_empty(&call->accept_link)) {
1220 _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }",
1221 call, call->events, call->flags,
1222 ntohl(call->conn->cid));
1223
1224 read_lock_bh(&call->state_lock);
1225 if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
1226 !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
1227 schedule_work(&call->processor);
1228 read_unlock_bh(&call->state_lock);
1229 }
1230
1231error:
1232 clear_bit(RXRPC_CALL_PROC_BUSY, &call->flags);
1233 kfree(acks);
1234
1235 /* because we don't want two CPUs both processing the work item for one
1236 * call at the same time, we use a flag to note when it's busy; however
1237 * this means there's a race between clearing the flag and setting the
1238 * work pending bit and the work item being processed again */
1239 if (call->events && !work_pending(&call->processor)) {
1240 _debug("jumpstart %x", ntohl(call->conn->cid));
1241 schedule_work(&call->processor);
1242 }
1243
1244 _leave("");
1245 return;
1246
1247no_mem:
1248 _debug("out of memory");
1249 goto maybe_reschedule;
1250}