diff options
author | Linus Walleij <linus.walleij@linaro.org> | 2019-09-05 05:40:54 -0400 |
---|---|---|
committer | Linus Walleij <linus.walleij@linaro.org> | 2019-09-05 05:40:54 -0400 |
commit | 151a41014bff92f353263cadc051435dc9c3258e (patch) | |
tree | aa082a0745edd5b7051668f455dfc0ee1e4a9de0 /net/rxrpc/input.c | |
parent | ae0755b56da9db4190288155ea884331993ed51b (diff) | |
parent | 089cf7f6ecb266b6a4164919a2e69bd2f938374a (diff) |
Merge tag 'v5.3-rc7' into devel
Linux 5.3-rc7
Diffstat (limited to 'net/rxrpc/input.c')
-rw-r--r-- | net/rxrpc/input.c | 359 |
1 files changed, 193 insertions, 166 deletions
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 5bd6f1546e5c..d122c53c8697 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c | |||
@@ -196,15 +196,14 @@ send_extra_data: | |||
196 | * Ping the other end to fill our RTT cache and to retrieve the rwind | 196 | * Ping the other end to fill our RTT cache and to retrieve the rwind |
197 | * and MTU parameters. | 197 | * and MTU parameters. |
198 | */ | 198 | */ |
199 | static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb, | 199 | static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb) |
200 | int skew) | ||
201 | { | 200 | { |
202 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | 201 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
203 | ktime_t now = skb->tstamp; | 202 | ktime_t now = skb->tstamp; |
204 | 203 | ||
205 | if (call->peer->rtt_usage < 3 || | 204 | if (call->peer->rtt_usage < 3 || |
206 | ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) | 205 | ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) |
207 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, | 206 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, |
208 | true, true, | 207 | true, true, |
209 | rxrpc_propose_ack_ping_for_params); | 208 | rxrpc_propose_ack_ping_for_params); |
210 | } | 209 | } |
@@ -234,7 +233,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, | |||
234 | ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK; | 233 | ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK; |
235 | skb = call->rxtx_buffer[ix]; | 234 | skb = call->rxtx_buffer[ix]; |
236 | annotation = call->rxtx_annotations[ix]; | 235 | annotation = call->rxtx_annotations[ix]; |
237 | rxrpc_see_skb(skb, rxrpc_skb_tx_rotated); | 236 | rxrpc_see_skb(skb, rxrpc_skb_rotated); |
238 | call->rxtx_buffer[ix] = NULL; | 237 | call->rxtx_buffer[ix] = NULL; |
239 | call->rxtx_annotations[ix] = 0; | 238 | call->rxtx_annotations[ix] = 0; |
240 | skb->next = list; | 239 | skb->next = list; |
@@ -259,7 +258,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, | |||
259 | skb = list; | 258 | skb = list; |
260 | list = skb->next; | 259 | list = skb->next; |
261 | skb_mark_not_on_list(skb); | 260 | skb_mark_not_on_list(skb); |
262 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); | 261 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
263 | } | 262 | } |
264 | 263 | ||
265 | return rot_last; | 264 | return rot_last; |
@@ -348,7 +347,7 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call) | |||
348 | } | 347 | } |
349 | 348 | ||
350 | /* | 349 | /* |
351 | * Scan a jumbo packet to validate its structure and to work out how many | 350 | * Scan a data packet to validate its structure and to work out how many |
352 | * subpackets it contains. | 351 | * subpackets it contains. |
353 | * | 352 | * |
354 | * A jumbo packet is a collection of consecutive packets glued together with | 353 | * A jumbo packet is a collection of consecutive packets glued together with |
@@ -359,16 +358,21 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call) | |||
359 | * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any | 358 | * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any |
360 | * size. | 359 | * size. |
361 | */ | 360 | */ |
362 | static bool rxrpc_validate_jumbo(struct sk_buff *skb) | 361 | static bool rxrpc_validate_data(struct sk_buff *skb) |
363 | { | 362 | { |
364 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | 363 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
365 | unsigned int offset = sizeof(struct rxrpc_wire_header); | 364 | unsigned int offset = sizeof(struct rxrpc_wire_header); |
366 | unsigned int len = skb->len; | 365 | unsigned int len = skb->len; |
367 | int nr_jumbo = 1; | ||
368 | u8 flags = sp->hdr.flags; | 366 | u8 flags = sp->hdr.flags; |
369 | 367 | ||
370 | do { | 368 | for (;;) { |
371 | nr_jumbo++; | 369 | if (flags & RXRPC_REQUEST_ACK) |
370 | __set_bit(sp->nr_subpackets, sp->rx_req_ack); | ||
371 | sp->nr_subpackets++; | ||
372 | |||
373 | if (!(flags & RXRPC_JUMBO_PACKET)) | ||
374 | break; | ||
375 | |||
372 | if (len - offset < RXRPC_JUMBO_SUBPKTLEN) | 376 | if (len - offset < RXRPC_JUMBO_SUBPKTLEN) |
373 | goto protocol_error; | 377 | goto protocol_error; |
374 | if (flags & RXRPC_LAST_PACKET) | 378 | if (flags & RXRPC_LAST_PACKET) |
@@ -377,9 +381,10 @@ static bool rxrpc_validate_jumbo(struct sk_buff *skb) | |||
377 | if (skb_copy_bits(skb, offset, &flags, 1) < 0) | 381 | if (skb_copy_bits(skb, offset, &flags, 1) < 0) |
378 | goto protocol_error; | 382 | goto protocol_error; |
379 | offset += sizeof(struct rxrpc_jumbo_header); | 383 | offset += sizeof(struct rxrpc_jumbo_header); |
380 | } while (flags & RXRPC_JUMBO_PACKET); | 384 | } |
381 | 385 | ||
382 | sp->nr_jumbo = nr_jumbo; | 386 | if (flags & RXRPC_LAST_PACKET) |
387 | sp->rx_flags |= RXRPC_SKB_INCL_LAST; | ||
383 | return true; | 388 | return true; |
384 | 389 | ||
385 | protocol_error: | 390 | protocol_error: |
@@ -400,10 +405,10 @@ protocol_error: | |||
400 | * (that information is encoded in the ACK packet). | 405 | * (that information is encoded in the ACK packet). |
401 | */ | 406 | */ |
402 | static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq, | 407 | static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq, |
403 | u8 annotation, bool *_jumbo_bad) | 408 | bool is_jumbo, bool *_jumbo_bad) |
404 | { | 409 | { |
405 | /* Discard normal packets that are duplicates. */ | 410 | /* Discard normal packets that are duplicates. */ |
406 | if (annotation == 0) | 411 | if (is_jumbo) |
407 | return; | 412 | return; |
408 | 413 | ||
409 | /* Skip jumbo subpackets that are duplicates. When we've had three or | 414 | /* Skip jumbo subpackets that are duplicates. When we've had three or |
@@ -417,30 +422,30 @@ static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq, | |||
417 | } | 422 | } |
418 | 423 | ||
419 | /* | 424 | /* |
420 | * Process a DATA packet, adding the packet to the Rx ring. | 425 | * Process a DATA packet, adding the packet to the Rx ring. The caller's |
426 | * packet ref must be passed on or discarded. | ||
421 | */ | 427 | */ |
422 | static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, | 428 | static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) |
423 | u16 skew) | ||
424 | { | 429 | { |
425 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | 430 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
426 | enum rxrpc_call_state state; | 431 | enum rxrpc_call_state state; |
427 | unsigned int offset = sizeof(struct rxrpc_wire_header); | 432 | unsigned int j; |
428 | unsigned int ix; | ||
429 | rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; | 433 | rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; |
430 | rxrpc_seq_t seq = sp->hdr.seq, hard_ack; | 434 | rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack; |
431 | bool immediate_ack = false, jumbo_bad = false, queued; | 435 | bool immediate_ack = false, jumbo_bad = false; |
432 | u16 len; | 436 | u8 ack = 0; |
433 | u8 ack = 0, flags, annotation = 0; | ||
434 | 437 | ||
435 | _enter("{%u,%u},{%u,%u}", | 438 | _enter("{%u,%u},{%u,%u}", |
436 | call->rx_hard_ack, call->rx_top, skb->len, seq); | 439 | call->rx_hard_ack, call->rx_top, skb->len, seq0); |
437 | 440 | ||
438 | _proto("Rx DATA %%%u { #%u f=%02x }", | 441 | _proto("Rx DATA %%%u { #%u f=%02x n=%u }", |
439 | sp->hdr.serial, seq, sp->hdr.flags); | 442 | sp->hdr.serial, seq0, sp->hdr.flags, sp->nr_subpackets); |
440 | 443 | ||
441 | state = READ_ONCE(call->state); | 444 | state = READ_ONCE(call->state); |
442 | if (state >= RXRPC_CALL_COMPLETE) | 445 | if (state >= RXRPC_CALL_COMPLETE) { |
446 | rxrpc_free_skb(skb, rxrpc_skb_freed); | ||
443 | return; | 447 | return; |
448 | } | ||
444 | 449 | ||
445 | if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) { | 450 | if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) { |
446 | unsigned long timo = READ_ONCE(call->next_req_timo); | 451 | unsigned long timo = READ_ONCE(call->next_req_timo); |
@@ -465,156 +470,157 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, | |||
465 | !rxrpc_receiving_reply(call)) | 470 | !rxrpc_receiving_reply(call)) |
466 | goto unlock; | 471 | goto unlock; |
467 | 472 | ||
468 | call->ackr_prev_seq = seq; | 473 | call->ackr_prev_seq = seq0; |
469 | |||
470 | hard_ack = READ_ONCE(call->rx_hard_ack); | 474 | hard_ack = READ_ONCE(call->rx_hard_ack); |
471 | if (after(seq, hard_ack + call->rx_winsize)) { | ||
472 | ack = RXRPC_ACK_EXCEEDS_WINDOW; | ||
473 | ack_serial = serial; | ||
474 | goto ack; | ||
475 | } | ||
476 | 475 | ||
477 | flags = sp->hdr.flags; | 476 | if (sp->nr_subpackets > 1) { |
478 | if (flags & RXRPC_JUMBO_PACKET) { | ||
479 | if (call->nr_jumbo_bad > 3) { | 477 | if (call->nr_jumbo_bad > 3) { |
480 | ack = RXRPC_ACK_NOSPACE; | 478 | ack = RXRPC_ACK_NOSPACE; |
481 | ack_serial = serial; | 479 | ack_serial = serial; |
482 | goto ack; | 480 | goto ack; |
483 | } | 481 | } |
484 | annotation = 1; | ||
485 | } | 482 | } |
486 | 483 | ||
487 | next_subpacket: | 484 | for (j = 0; j < sp->nr_subpackets; j++) { |
488 | queued = false; | 485 | rxrpc_serial_t serial = sp->hdr.serial + j; |
489 | ix = seq & RXRPC_RXTX_BUFF_MASK; | 486 | rxrpc_seq_t seq = seq0 + j; |
490 | len = skb->len; | 487 | unsigned int ix = seq & RXRPC_RXTX_BUFF_MASK; |
491 | if (flags & RXRPC_JUMBO_PACKET) | 488 | bool terminal = (j == sp->nr_subpackets - 1); |
492 | len = RXRPC_JUMBO_DATALEN; | 489 | bool last = terminal && (sp->rx_flags & RXRPC_SKB_INCL_LAST); |
493 | 490 | u8 flags, annotation = j; | |
494 | if (flags & RXRPC_LAST_PACKET) { | 491 | |
495 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && | 492 | _proto("Rx DATA+%u %%%u { #%x t=%u l=%u }", |
496 | seq != call->rx_top) { | 493 | j, serial, seq, terminal, last); |
497 | rxrpc_proto_abort("LSN", call, seq); | 494 | |
498 | goto unlock; | 495 | if (last) { |
499 | } | 496 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && |
500 | } else { | 497 | seq != call->rx_top) { |
501 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && | 498 | rxrpc_proto_abort("LSN", call, seq); |
502 | after_eq(seq, call->rx_top)) { | 499 | goto unlock; |
503 | rxrpc_proto_abort("LSA", call, seq); | 500 | } |
504 | goto unlock; | 501 | } else { |
502 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && | ||
503 | after_eq(seq, call->rx_top)) { | ||
504 | rxrpc_proto_abort("LSA", call, seq); | ||
505 | goto unlock; | ||
506 | } | ||
505 | } | 507 | } |
506 | } | ||
507 | |||
508 | trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); | ||
509 | if (before_eq(seq, hard_ack)) { | ||
510 | ack = RXRPC_ACK_DUPLICATE; | ||
511 | ack_serial = serial; | ||
512 | goto skip; | ||
513 | } | ||
514 | 508 | ||
515 | if (flags & RXRPC_REQUEST_ACK && !ack) { | 509 | flags = 0; |
516 | ack = RXRPC_ACK_REQUESTED; | 510 | if (last) |
517 | ack_serial = serial; | 511 | flags |= RXRPC_LAST_PACKET; |
518 | } | 512 | if (!terminal) |
513 | flags |= RXRPC_JUMBO_PACKET; | ||
514 | if (test_bit(j, sp->rx_req_ack)) | ||
515 | flags |= RXRPC_REQUEST_ACK; | ||
516 | trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); | ||
519 | 517 | ||
520 | if (call->rxtx_buffer[ix]) { | 518 | if (before_eq(seq, hard_ack)) { |
521 | rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad); | ||
522 | if (ack != RXRPC_ACK_DUPLICATE) { | ||
523 | ack = RXRPC_ACK_DUPLICATE; | 519 | ack = RXRPC_ACK_DUPLICATE; |
524 | ack_serial = serial; | 520 | ack_serial = serial; |
521 | continue; | ||
525 | } | 522 | } |
526 | immediate_ack = true; | ||
527 | goto skip; | ||
528 | } | ||
529 | |||
530 | /* Queue the packet. We use a couple of memory barriers here as need | ||
531 | * to make sure that rx_top is perceived to be set after the buffer | ||
532 | * pointer and that the buffer pointer is set after the annotation and | ||
533 | * the skb data. | ||
534 | * | ||
535 | * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window() | ||
536 | * and also rxrpc_fill_out_ack(). | ||
537 | */ | ||
538 | rxrpc_get_skb(skb, rxrpc_skb_rx_got); | ||
539 | call->rxtx_annotations[ix] = annotation; | ||
540 | smp_wmb(); | ||
541 | call->rxtx_buffer[ix] = skb; | ||
542 | if (after(seq, call->rx_top)) { | ||
543 | smp_store_release(&call->rx_top, seq); | ||
544 | } else if (before(seq, call->rx_top)) { | ||
545 | /* Send an immediate ACK if we fill in a hole */ | ||
546 | if (!ack) { | ||
547 | ack = RXRPC_ACK_DELAY; | ||
548 | ack_serial = serial; | ||
549 | } | ||
550 | immediate_ack = true; | ||
551 | } | ||
552 | if (flags & RXRPC_LAST_PACKET) { | ||
553 | set_bit(RXRPC_CALL_RX_LAST, &call->flags); | ||
554 | trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq); | ||
555 | } else { | ||
556 | trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq); | ||
557 | } | ||
558 | queued = true; | ||
559 | 523 | ||
560 | if (after_eq(seq, call->rx_expect_next)) { | 524 | if (call->rxtx_buffer[ix]) { |
561 | if (after(seq, call->rx_expect_next)) { | 525 | rxrpc_input_dup_data(call, seq, sp->nr_subpackets > 1, |
562 | _net("OOS %u > %u", seq, call->rx_expect_next); | 526 | &jumbo_bad); |
563 | ack = RXRPC_ACK_OUT_OF_SEQUENCE; | 527 | if (ack != RXRPC_ACK_DUPLICATE) { |
564 | ack_serial = serial; | 528 | ack = RXRPC_ACK_DUPLICATE; |
529 | ack_serial = serial; | ||
530 | } | ||
531 | immediate_ack = true; | ||
532 | continue; | ||
565 | } | 533 | } |
566 | call->rx_expect_next = seq + 1; | ||
567 | } | ||
568 | 534 | ||
569 | skip: | ||
570 | offset += len; | ||
571 | if (flags & RXRPC_JUMBO_PACKET) { | ||
572 | if (skb_copy_bits(skb, offset, &flags, 1) < 0) { | ||
573 | rxrpc_proto_abort("XJF", call, seq); | ||
574 | goto unlock; | ||
575 | } | ||
576 | offset += sizeof(struct rxrpc_jumbo_header); | ||
577 | seq++; | ||
578 | serial++; | ||
579 | annotation++; | ||
580 | if (flags & RXRPC_JUMBO_PACKET) | ||
581 | annotation |= RXRPC_RX_ANNO_JLAST; | ||
582 | if (after(seq, hard_ack + call->rx_winsize)) { | 535 | if (after(seq, hard_ack + call->rx_winsize)) { |
583 | ack = RXRPC_ACK_EXCEEDS_WINDOW; | 536 | ack = RXRPC_ACK_EXCEEDS_WINDOW; |
584 | ack_serial = serial; | 537 | ack_serial = serial; |
585 | if (!jumbo_bad) { | 538 | if (flags & RXRPC_JUMBO_PACKET) { |
586 | call->nr_jumbo_bad++; | 539 | if (!jumbo_bad) { |
587 | jumbo_bad = true; | 540 | call->nr_jumbo_bad++; |
541 | jumbo_bad = true; | ||
542 | } | ||
588 | } | 543 | } |
544 | |||
589 | goto ack; | 545 | goto ack; |
590 | } | 546 | } |
591 | 547 | ||
592 | _proto("Rx DATA Jumbo %%%u", serial); | 548 | if (flags & RXRPC_REQUEST_ACK && !ack) { |
593 | goto next_subpacket; | 549 | ack = RXRPC_ACK_REQUESTED; |
594 | } | 550 | ack_serial = serial; |
551 | } | ||
552 | |||
553 | /* Queue the packet. We use a couple of memory barriers here as need | ||
554 | * to make sure that rx_top is perceived to be set after the buffer | ||
555 | * pointer and that the buffer pointer is set after the annotation and | ||
556 | * the skb data. | ||
557 | * | ||
558 | * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window() | ||
559 | * and also rxrpc_fill_out_ack(). | ||
560 | */ | ||
561 | if (!terminal) | ||
562 | rxrpc_get_skb(skb, rxrpc_skb_got); | ||
563 | call->rxtx_annotations[ix] = annotation; | ||
564 | smp_wmb(); | ||
565 | call->rxtx_buffer[ix] = skb; | ||
566 | if (after(seq, call->rx_top)) { | ||
567 | smp_store_release(&call->rx_top, seq); | ||
568 | } else if (before(seq, call->rx_top)) { | ||
569 | /* Send an immediate ACK if we fill in a hole */ | ||
570 | if (!ack) { | ||
571 | ack = RXRPC_ACK_DELAY; | ||
572 | ack_serial = serial; | ||
573 | } | ||
574 | immediate_ack = true; | ||
575 | } | ||
595 | 576 | ||
596 | if (queued && flags & RXRPC_LAST_PACKET && !ack) { | 577 | if (terminal) { |
597 | ack = RXRPC_ACK_DELAY; | 578 | /* From this point on, we're not allowed to touch the |
598 | ack_serial = serial; | 579 | * packet any longer as its ref now belongs to the Rx |
580 | * ring. | ||
581 | */ | ||
582 | skb = NULL; | ||
583 | } | ||
584 | |||
585 | if (last) { | ||
586 | set_bit(RXRPC_CALL_RX_LAST, &call->flags); | ||
587 | if (!ack) { | ||
588 | ack = RXRPC_ACK_DELAY; | ||
589 | ack_serial = serial; | ||
590 | } | ||
591 | trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq); | ||
592 | } else { | ||
593 | trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq); | ||
594 | } | ||
595 | |||
596 | if (after_eq(seq, call->rx_expect_next)) { | ||
597 | if (after(seq, call->rx_expect_next)) { | ||
598 | _net("OOS %u > %u", seq, call->rx_expect_next); | ||
599 | ack = RXRPC_ACK_OUT_OF_SEQUENCE; | ||
600 | ack_serial = serial; | ||
601 | } | ||
602 | call->rx_expect_next = seq + 1; | ||
603 | } | ||
599 | } | 604 | } |
600 | 605 | ||
601 | ack: | 606 | ack: |
602 | if (ack) | 607 | if (ack) |
603 | rxrpc_propose_ACK(call, ack, skew, ack_serial, | 608 | rxrpc_propose_ACK(call, ack, ack_serial, |
604 | immediate_ack, true, | 609 | immediate_ack, true, |
605 | rxrpc_propose_ack_input_data); | 610 | rxrpc_propose_ack_input_data); |
606 | else | 611 | else |
607 | rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, skew, serial, | 612 | rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, |
608 | false, true, | 613 | false, true, |
609 | rxrpc_propose_ack_input_data); | 614 | rxrpc_propose_ack_input_data); |
610 | 615 | ||
611 | if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1) { | 616 | if (seq0 == READ_ONCE(call->rx_hard_ack) + 1) { |
612 | trace_rxrpc_notify_socket(call->debug_id, serial); | 617 | trace_rxrpc_notify_socket(call->debug_id, serial); |
613 | rxrpc_notify_socket(call); | 618 | rxrpc_notify_socket(call); |
614 | } | 619 | } |
615 | 620 | ||
616 | unlock: | 621 | unlock: |
617 | spin_unlock(&call->input_lock); | 622 | spin_unlock(&call->input_lock); |
623 | rxrpc_free_skb(skb, rxrpc_skb_freed); | ||
618 | _leave(" [queued]"); | 624 | _leave(" [queued]"); |
619 | } | 625 | } |
620 | 626 | ||
@@ -822,8 +828,7 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks, | |||
822 | * soft-ACK means that the packet may be discarded and retransmission | 828 | * soft-ACK means that the packet may be discarded and retransmission |
823 | * requested. A phase is complete when all packets are hard-ACK'd. | 829 | * requested. A phase is complete when all packets are hard-ACK'd. |
824 | */ | 830 | */ |
825 | static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | 831 | static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) |
826 | u16 skew) | ||
827 | { | 832 | { |
828 | struct rxrpc_ack_summary summary = { 0 }; | 833 | struct rxrpc_ack_summary summary = { 0 }; |
829 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | 834 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
@@ -867,11 +872,11 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
867 | if (buf.ack.reason == RXRPC_ACK_PING) { | 872 | if (buf.ack.reason == RXRPC_ACK_PING) { |
868 | _proto("Rx ACK %%%u PING Request", sp->hdr.serial); | 873 | _proto("Rx ACK %%%u PING Request", sp->hdr.serial); |
869 | rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, | 874 | rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, |
870 | skew, sp->hdr.serial, true, true, | 875 | sp->hdr.serial, true, true, |
871 | rxrpc_propose_ack_respond_to_ping); | 876 | rxrpc_propose_ack_respond_to_ping); |
872 | } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { | 877 | } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { |
873 | rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, | 878 | rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, |
874 | skew, sp->hdr.serial, true, true, | 879 | sp->hdr.serial, true, true, |
875 | rxrpc_propose_ack_respond_to_ack); | 880 | rxrpc_propose_ack_respond_to_ack); |
876 | } | 881 | } |
877 | 882 | ||
@@ -948,7 +953,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
948 | RXRPC_TX_ANNO_LAST && | 953 | RXRPC_TX_ANNO_LAST && |
949 | summary.nr_acks == call->tx_top - hard_ack && | 954 | summary.nr_acks == call->tx_top - hard_ack && |
950 | rxrpc_is_client_call(call)) | 955 | rxrpc_is_client_call(call)) |
951 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, | 956 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, |
952 | false, true, | 957 | false, true, |
953 | rxrpc_propose_ack_ping_for_lost_reply); | 958 | rxrpc_propose_ack_ping_for_lost_reply); |
954 | 959 | ||
@@ -1004,7 +1009,7 @@ static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb) | |||
1004 | * Process an incoming call packet. | 1009 | * Process an incoming call packet. |
1005 | */ | 1010 | */ |
1006 | static void rxrpc_input_call_packet(struct rxrpc_call *call, | 1011 | static void rxrpc_input_call_packet(struct rxrpc_call *call, |
1007 | struct sk_buff *skb, u16 skew) | 1012 | struct sk_buff *skb) |
1008 | { | 1013 | { |
1009 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | 1014 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
1010 | unsigned long timo; | 1015 | unsigned long timo; |
@@ -1023,11 +1028,11 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, | |||
1023 | 1028 | ||
1024 | switch (sp->hdr.type) { | 1029 | switch (sp->hdr.type) { |
1025 | case RXRPC_PACKET_TYPE_DATA: | 1030 | case RXRPC_PACKET_TYPE_DATA: |
1026 | rxrpc_input_data(call, skb, skew); | 1031 | rxrpc_input_data(call, skb); |
1027 | break; | 1032 | goto no_free; |
1028 | 1033 | ||
1029 | case RXRPC_PACKET_TYPE_ACK: | 1034 | case RXRPC_PACKET_TYPE_ACK: |
1030 | rxrpc_input_ack(call, skb, skew); | 1035 | rxrpc_input_ack(call, skb); |
1031 | break; | 1036 | break; |
1032 | 1037 | ||
1033 | case RXRPC_PACKET_TYPE_BUSY: | 1038 | case RXRPC_PACKET_TYPE_BUSY: |
@@ -1051,6 +1056,8 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, | |||
1051 | break; | 1056 | break; |
1052 | } | 1057 | } |
1053 | 1058 | ||
1059 | rxrpc_free_skb(skb, rxrpc_skb_freed); | ||
1060 | no_free: | ||
1054 | _leave(""); | 1061 | _leave(""); |
1055 | } | 1062 | } |
1056 | 1063 | ||
@@ -1108,8 +1115,12 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local, | |||
1108 | { | 1115 | { |
1109 | _enter("%p,%p", local, skb); | 1116 | _enter("%p,%p", local, skb); |
1110 | 1117 | ||
1111 | skb_queue_tail(&local->event_queue, skb); | 1118 | if (rxrpc_get_local_maybe(local)) { |
1112 | rxrpc_queue_local(local); | 1119 | skb_queue_tail(&local->event_queue, skb); |
1120 | rxrpc_queue_local(local); | ||
1121 | } else { | ||
1122 | rxrpc_free_skb(skb, rxrpc_skb_freed); | ||
1123 | } | ||
1113 | } | 1124 | } |
1114 | 1125 | ||
1115 | /* | 1126 | /* |
@@ -1119,8 +1130,12 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) | |||
1119 | { | 1130 | { |
1120 | CHECK_SLAB_OKAY(&local->usage); | 1131 | CHECK_SLAB_OKAY(&local->usage); |
1121 | 1132 | ||
1122 | skb_queue_tail(&local->reject_queue, skb); | 1133 | if (rxrpc_get_local_maybe(local)) { |
1123 | rxrpc_queue_local(local); | 1134 | skb_queue_tail(&local->reject_queue, skb); |
1135 | rxrpc_queue_local(local); | ||
1136 | } else { | ||
1137 | rxrpc_free_skb(skb, rxrpc_skb_freed); | ||
1138 | } | ||
1124 | } | 1139 | } |
1125 | 1140 | ||
1126 | /* | 1141 | /* |
@@ -1173,7 +1188,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) | |||
1173 | struct rxrpc_peer *peer = NULL; | 1188 | struct rxrpc_peer *peer = NULL; |
1174 | struct rxrpc_sock *rx = NULL; | 1189 | struct rxrpc_sock *rx = NULL; |
1175 | unsigned int channel; | 1190 | unsigned int channel; |
1176 | int skew = 0; | ||
1177 | 1191 | ||
1178 | _enter("%p", udp_sk); | 1192 | _enter("%p", udp_sk); |
1179 | 1193 | ||
@@ -1184,7 +1198,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) | |||
1184 | if (skb->tstamp == 0) | 1198 | if (skb->tstamp == 0) |
1185 | skb->tstamp = ktime_get_real(); | 1199 | skb->tstamp = ktime_get_real(); |
1186 | 1200 | ||
1187 | rxrpc_new_skb(skb, rxrpc_skb_rx_received); | 1201 | rxrpc_new_skb(skb, rxrpc_skb_received); |
1188 | 1202 | ||
1189 | skb_pull(skb, sizeof(struct udphdr)); | 1203 | skb_pull(skb, sizeof(struct udphdr)); |
1190 | 1204 | ||
@@ -1201,7 +1215,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) | |||
1201 | static int lose; | 1215 | static int lose; |
1202 | if ((lose++ & 7) == 7) { | 1216 | if ((lose++ & 7) == 7) { |
1203 | trace_rxrpc_rx_lose(sp); | 1217 | trace_rxrpc_rx_lose(sp); |
1204 | rxrpc_free_skb(skb, rxrpc_skb_rx_lost); | 1218 | rxrpc_free_skb(skb, rxrpc_skb_lost); |
1205 | return 0; | 1219 | return 0; |
1206 | } | 1220 | } |
1207 | } | 1221 | } |
@@ -1233,9 +1247,26 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) | |||
1233 | if (sp->hdr.callNumber == 0 || | 1247 | if (sp->hdr.callNumber == 0 || |
1234 | sp->hdr.seq == 0) | 1248 | sp->hdr.seq == 0) |
1235 | goto bad_message; | 1249 | goto bad_message; |
1236 | if (sp->hdr.flags & RXRPC_JUMBO_PACKET && | 1250 | if (!rxrpc_validate_data(skb)) |
1237 | !rxrpc_validate_jumbo(skb)) | ||
1238 | goto bad_message; | 1251 | goto bad_message; |
1252 | |||
1253 | /* Unshare the packet so that it can be modified for in-place | ||
1254 | * decryption. | ||
1255 | */ | ||
1256 | if (sp->hdr.securityIndex != 0) { | ||
1257 | struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC); | ||
1258 | if (!nskb) { | ||
1259 | rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem); | ||
1260 | goto out; | ||
1261 | } | ||
1262 | |||
1263 | if (nskb != skb) { | ||
1264 | rxrpc_eaten_skb(skb, rxrpc_skb_received); | ||
1265 | rxrpc_new_skb(skb, rxrpc_skb_unshared); | ||
1266 | skb = nskb; | ||
1267 | sp = rxrpc_skb(skb); | ||
1268 | } | ||
1269 | } | ||
1239 | break; | 1270 | break; |
1240 | 1271 | ||
1241 | case RXRPC_PACKET_TYPE_CHALLENGE: | 1272 | case RXRPC_PACKET_TYPE_CHALLENGE: |
@@ -1301,15 +1332,8 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) | |||
1301 | goto out; | 1332 | goto out; |
1302 | } | 1333 | } |
1303 | 1334 | ||
1304 | /* Note the serial number skew here */ | 1335 | if ((int)sp->hdr.serial - (int)conn->hi_serial > 0) |
1305 | skew = (int)sp->hdr.serial - (int)conn->hi_serial; | 1336 | conn->hi_serial = sp->hdr.serial; |
1306 | if (skew >= 0) { | ||
1307 | if (skew > 0) | ||
1308 | conn->hi_serial = sp->hdr.serial; | ||
1309 | } else { | ||
1310 | skew = -skew; | ||
1311 | skew = min(skew, 65535); | ||
1312 | } | ||
1313 | 1337 | ||
1314 | /* Call-bound packets are routed by connection channel. */ | 1338 | /* Call-bound packets are routed by connection channel. */ |
1315 | channel = sp->hdr.cid & RXRPC_CHANNELMASK; | 1339 | channel = sp->hdr.cid & RXRPC_CHANNELMASK; |
@@ -1372,15 +1396,18 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) | |||
1372 | call = rxrpc_new_incoming_call(local, rx, skb); | 1396 | call = rxrpc_new_incoming_call(local, rx, skb); |
1373 | if (!call) | 1397 | if (!call) |
1374 | goto reject_packet; | 1398 | goto reject_packet; |
1375 | rxrpc_send_ping(call, skb, skew); | 1399 | rxrpc_send_ping(call, skb); |
1376 | mutex_unlock(&call->user_mutex); | 1400 | mutex_unlock(&call->user_mutex); |
1377 | } | 1401 | } |
1378 | 1402 | ||
1379 | rxrpc_input_call_packet(call, skb, skew); | 1403 | /* Process a call packet; this either discards or passes on the ref |
1380 | goto discard; | 1404 | * elsewhere. |
1405 | */ | ||
1406 | rxrpc_input_call_packet(call, skb); | ||
1407 | goto out; | ||
1381 | 1408 | ||
1382 | discard: | 1409 | discard: |
1383 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); | 1410 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
1384 | out: | 1411 | out: |
1385 | trace_rxrpc_rx_done(0, 0); | 1412 | trace_rxrpc_rx_done(0, 0); |
1386 | return 0; | 1413 | return 0; |