diff options
-rw-r--r-- | net/ipv4/tcp.c | 17 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 118 |
2 files changed, 90 insertions, 45 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0fb8b441f1f9..17b89c523f9d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -439,12 +439,14 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
439 | !tp->urg_data || | 439 | !tp->urg_data || |
440 | before(tp->urg_seq, tp->copied_seq) || | 440 | before(tp->urg_seq, tp->copied_seq) || |
441 | !before(tp->urg_seq, tp->rcv_nxt)) { | 441 | !before(tp->urg_seq, tp->rcv_nxt)) { |
442 | struct sk_buff *skb; | ||
443 | |||
442 | answ = tp->rcv_nxt - tp->copied_seq; | 444 | answ = tp->rcv_nxt - tp->copied_seq; |
443 | 445 | ||
444 | /* Subtract 1, if FIN is in queue. */ | 446 | /* Subtract 1, if FIN is in queue. */ |
445 | if (answ && !skb_queue_empty(&sk->sk_receive_queue)) | 447 | skb = skb_peek_tail(&sk->sk_receive_queue); |
446 | answ -= | 448 | if (answ && skb) |
447 | tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin; | 449 | answ -= tcp_hdr(skb)->fin; |
448 | } else | 450 | } else |
449 | answ = tp->urg_seq - tp->copied_seq; | 451 | answ = tp->urg_seq - tp->copied_seq; |
450 | release_sock(sk); | 452 | release_sock(sk); |
@@ -1382,11 +1384,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1382 | 1384 | ||
1383 | /* Next get a buffer. */ | 1385 | /* Next get a buffer. */ |
1384 | 1386 | ||
1385 | skb = skb_peek(&sk->sk_receive_queue); | 1387 | skb_queue_walk(&sk->sk_receive_queue, skb) { |
1386 | do { | ||
1387 | if (!skb) | ||
1388 | break; | ||
1389 | |||
1390 | /* Now that we have two receive queues this | 1388 | /* Now that we have two receive queues this |
1391 | * shouldn't happen. | 1389 | * shouldn't happen. |
1392 | */ | 1390 | */ |
@@ -1403,8 +1401,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1403 | if (tcp_hdr(skb)->fin) | 1401 | if (tcp_hdr(skb)->fin) |
1404 | goto found_fin_ok; | 1402 | goto found_fin_ok; |
1405 | WARN_ON(!(flags & MSG_PEEK)); | 1403 | WARN_ON(!(flags & MSG_PEEK)); |
1406 | skb = skb->next; | 1404 | } |
1407 | } while (skb != (struct sk_buff *)&sk->sk_receive_queue); | ||
1408 | 1405 | ||
1409 | /* Well, if we have backlog, try to process it now yet. */ | 1406 | /* Well, if we have backlog, try to process it now yet. */ |
1410 | 1407 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index eeb8a92aa416..ba34a23c1bfb 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -4426,7 +4426,7 @@ drop: | |||
4426 | } | 4426 | } |
4427 | __skb_queue_head(&tp->out_of_order_queue, skb); | 4427 | __skb_queue_head(&tp->out_of_order_queue, skb); |
4428 | } else { | 4428 | } else { |
4429 | struct sk_buff *skb1 = tp->out_of_order_queue.prev; | 4429 | struct sk_buff *skb1 = skb_peek_tail(&tp->out_of_order_queue); |
4430 | u32 seq = TCP_SKB_CB(skb)->seq; | 4430 | u32 seq = TCP_SKB_CB(skb)->seq; |
4431 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; | 4431 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; |
4432 | 4432 | ||
@@ -4443,15 +4443,18 @@ drop: | |||
4443 | } | 4443 | } |
4444 | 4444 | ||
4445 | /* Find place to insert this segment. */ | 4445 | /* Find place to insert this segment. */ |
4446 | do { | 4446 | while (1) { |
4447 | if (!after(TCP_SKB_CB(skb1)->seq, seq)) | 4447 | if (!after(TCP_SKB_CB(skb1)->seq, seq)) |
4448 | break; | 4448 | break; |
4449 | } while ((skb1 = skb1->prev) != | 4449 | if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) { |
4450 | (struct sk_buff *)&tp->out_of_order_queue); | 4450 | skb1 = NULL; |
4451 | break; | ||
4452 | } | ||
4453 | skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1); | ||
4454 | } | ||
4451 | 4455 | ||
4452 | /* Do skb overlap to previous one? */ | 4456 | /* Do skb overlap to previous one? */ |
4453 | if (skb1 != (struct sk_buff *)&tp->out_of_order_queue && | 4457 | if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { |
4454 | before(seq, TCP_SKB_CB(skb1)->end_seq)) { | ||
4455 | if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { | 4458 | if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { |
4456 | /* All the bits are present. Drop. */ | 4459 | /* All the bits are present. Drop. */ |
4457 | __kfree_skb(skb); | 4460 | __kfree_skb(skb); |
@@ -4463,24 +4466,41 @@ drop: | |||
4463 | tcp_dsack_set(sk, seq, | 4466 | tcp_dsack_set(sk, seq, |
4464 | TCP_SKB_CB(skb1)->end_seq); | 4467 | TCP_SKB_CB(skb1)->end_seq); |
4465 | } else { | 4468 | } else { |
4466 | skb1 = skb1->prev; | 4469 | if (skb_queue_is_first(&tp->out_of_order_queue, |
4470 | skb1)) | ||
4471 | skb1 = NULL; | ||
4472 | else | ||
4473 | skb1 = skb_queue_prev( | ||
4474 | &tp->out_of_order_queue, | ||
4475 | skb1); | ||
4467 | } | 4476 | } |
4468 | } | 4477 | } |
4469 | __skb_queue_after(&tp->out_of_order_queue, skb1, skb); | 4478 | if (!skb1) |
4479 | __skb_queue_head(&tp->out_of_order_queue, skb); | ||
4480 | else | ||
4481 | __skb_queue_after(&tp->out_of_order_queue, skb1, skb); | ||
4470 | 4482 | ||
4471 | /* And clean segments covered by new one as whole. */ | 4483 | /* And clean segments covered by new one as whole. */ |
4472 | while ((skb1 = skb->next) != | 4484 | if (skb1 && !skb_queue_is_last(&tp->out_of_order_queue, skb1)) { |
4473 | (struct sk_buff *)&tp->out_of_order_queue && | 4485 | struct sk_buff *n; |
4474 | after(end_seq, TCP_SKB_CB(skb1)->seq)) { | 4486 | |
4475 | if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { | 4487 | skb1 = skb_queue_next(&tp->out_of_order_queue, skb1); |
4488 | skb_queue_walk_from_safe(&tp->out_of_order_queue, | ||
4489 | skb1, n) { | ||
4490 | if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) | ||
4491 | break; | ||
4492 | if (before(end_seq, | ||
4493 | TCP_SKB_CB(skb1)->end_seq)) { | ||
4494 | tcp_dsack_extend(sk, | ||
4495 | TCP_SKB_CB(skb1)->seq, | ||
4496 | end_seq); | ||
4497 | break; | ||
4498 | } | ||
4499 | __skb_unlink(skb1, &tp->out_of_order_queue); | ||
4476 | tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, | 4500 | tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, |
4477 | end_seq); | 4501 | TCP_SKB_CB(skb1)->end_seq); |
4478 | break; | 4502 | __kfree_skb(skb1); |
4479 | } | 4503 | } |
4480 | __skb_unlink(skb1, &tp->out_of_order_queue); | ||
4481 | tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, | ||
4482 | TCP_SKB_CB(skb1)->end_seq); | ||
4483 | __kfree_skb(skb1); | ||
4484 | } | 4504 | } |
4485 | 4505 | ||
4486 | add_sack: | 4506 | add_sack: |
@@ -4492,7 +4512,10 @@ add_sack: | |||
4492 | static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, | 4512 | static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, |
4493 | struct sk_buff_head *list) | 4513 | struct sk_buff_head *list) |
4494 | { | 4514 | { |
4495 | struct sk_buff *next = skb->next; | 4515 | struct sk_buff *next = NULL; |
4516 | |||
4517 | if (!skb_queue_is_last(list, skb)) | ||
4518 | next = skb_queue_next(list, skb); | ||
4496 | 4519 | ||
4497 | __skb_unlink(skb, list); | 4520 | __skb_unlink(skb, list); |
4498 | __kfree_skb(skb); | 4521 | __kfree_skb(skb); |
@@ -4503,6 +4526,9 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, | |||
4503 | 4526 | ||
4504 | /* Collapse contiguous sequence of skbs head..tail with | 4527 | /* Collapse contiguous sequence of skbs head..tail with |
4505 | * sequence numbers start..end. | 4528 | * sequence numbers start..end. |
4529 | * | ||
4530 | * If tail is NULL, this means until the end of the list. | ||
4531 | * | ||
4506 | * Segments with FIN/SYN are not collapsed (only because this | 4532 | * Segments with FIN/SYN are not collapsed (only because this |
4507 | * simplifies code) | 4533 | * simplifies code) |
4508 | */ | 4534 | */ |
@@ -4511,15 +4537,23 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
4511 | struct sk_buff *head, struct sk_buff *tail, | 4537 | struct sk_buff *head, struct sk_buff *tail, |
4512 | u32 start, u32 end) | 4538 | u32 start, u32 end) |
4513 | { | 4539 | { |
4514 | struct sk_buff *skb; | 4540 | struct sk_buff *skb, *n; |
4541 | bool end_of_skbs; | ||
4515 | 4542 | ||
4516 | /* First, check that queue is collapsible and find | 4543 | /* First, check that queue is collapsible and find |
4517 | * the point where collapsing can be useful. */ | 4544 | * the point where collapsing can be useful. */ |
4518 | for (skb = head; skb != tail;) { | 4545 | skb = head; |
4546 | restart: | ||
4547 | end_of_skbs = true; | ||
4548 | skb_queue_walk_from_safe(list, skb, n) { | ||
4549 | if (skb == tail) | ||
4550 | break; | ||
4519 | /* No new bits? It is possible on ofo queue. */ | 4551 | /* No new bits? It is possible on ofo queue. */ |
4520 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { | 4552 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { |
4521 | skb = tcp_collapse_one(sk, skb, list); | 4553 | skb = tcp_collapse_one(sk, skb, list); |
4522 | continue; | 4554 | if (!skb) |
4555 | break; | ||
4556 | goto restart; | ||
4523 | } | 4557 | } |
4524 | 4558 | ||
4525 | /* The first skb to collapse is: | 4559 | /* The first skb to collapse is: |
@@ -4529,16 +4563,24 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
4529 | */ | 4563 | */ |
4530 | if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && | 4564 | if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && |
4531 | (tcp_win_from_space(skb->truesize) > skb->len || | 4565 | (tcp_win_from_space(skb->truesize) > skb->len || |
4532 | before(TCP_SKB_CB(skb)->seq, start) || | 4566 | before(TCP_SKB_CB(skb)->seq, start))) { |
4533 | (skb->next != tail && | 4567 | end_of_skbs = false; |
4534 | TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb->next)->seq))) | ||
4535 | break; | 4568 | break; |
4569 | } | ||
4570 | |||
4571 | if (!skb_queue_is_last(list, skb)) { | ||
4572 | struct sk_buff *next = skb_queue_next(list, skb); | ||
4573 | if (next != tail && | ||
4574 | TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) { | ||
4575 | end_of_skbs = false; | ||
4576 | break; | ||
4577 | } | ||
4578 | } | ||
4536 | 4579 | ||
4537 | /* Decided to skip this, advance start seq. */ | 4580 | /* Decided to skip this, advance start seq. */ |
4538 | start = TCP_SKB_CB(skb)->end_seq; | 4581 | start = TCP_SKB_CB(skb)->end_seq; |
4539 | skb = skb->next; | ||
4540 | } | 4582 | } |
4541 | if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) | 4583 | if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) |
4542 | return; | 4584 | return; |
4543 | 4585 | ||
4544 | while (before(start, end)) { | 4586 | while (before(start, end)) { |
@@ -4583,7 +4625,8 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
4583 | } | 4625 | } |
4584 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { | 4626 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { |
4585 | skb = tcp_collapse_one(sk, skb, list); | 4627 | skb = tcp_collapse_one(sk, skb, list); |
4586 | if (skb == tail || | 4628 | if (!skb || |
4629 | skb == tail || | ||
4587 | tcp_hdr(skb)->syn || | 4630 | tcp_hdr(skb)->syn || |
4588 | tcp_hdr(skb)->fin) | 4631 | tcp_hdr(skb)->fin) |
4589 | return; | 4632 | return; |
@@ -4610,17 +4653,21 @@ static void tcp_collapse_ofo_queue(struct sock *sk) | |||
4610 | head = skb; | 4653 | head = skb; |
4611 | 4654 | ||
4612 | for (;;) { | 4655 | for (;;) { |
4613 | skb = skb->next; | 4656 | struct sk_buff *next = NULL; |
4657 | |||
4658 | if (!skb_queue_is_last(&tp->out_of_order_queue, skb)) | ||
4659 | next = skb_queue_next(&tp->out_of_order_queue, skb); | ||
4660 | skb = next; | ||
4614 | 4661 | ||
4615 | /* Segment is terminated when we see gap or when | 4662 | /* Segment is terminated when we see gap or when |
4616 | * we are at the end of all the queue. */ | 4663 | * we are at the end of all the queue. */ |
4617 | if (skb == (struct sk_buff *)&tp->out_of_order_queue || | 4664 | if (!skb || |
4618 | after(TCP_SKB_CB(skb)->seq, end) || | 4665 | after(TCP_SKB_CB(skb)->seq, end) || |
4619 | before(TCP_SKB_CB(skb)->end_seq, start)) { | 4666 | before(TCP_SKB_CB(skb)->end_seq, start)) { |
4620 | tcp_collapse(sk, &tp->out_of_order_queue, | 4667 | tcp_collapse(sk, &tp->out_of_order_queue, |
4621 | head, skb, start, end); | 4668 | head, skb, start, end); |
4622 | head = skb; | 4669 | head = skb; |
4623 | if (skb == (struct sk_buff *)&tp->out_of_order_queue) | 4670 | if (!skb) |
4624 | break; | 4671 | break; |
4625 | /* Start new segment */ | 4672 | /* Start new segment */ |
4626 | start = TCP_SKB_CB(skb)->seq; | 4673 | start = TCP_SKB_CB(skb)->seq; |
@@ -4681,10 +4728,11 @@ static int tcp_prune_queue(struct sock *sk) | |||
4681 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); | 4728 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); |
4682 | 4729 | ||
4683 | tcp_collapse_ofo_queue(sk); | 4730 | tcp_collapse_ofo_queue(sk); |
4684 | tcp_collapse(sk, &sk->sk_receive_queue, | 4731 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
4685 | sk->sk_receive_queue.next, | 4732 | tcp_collapse(sk, &sk->sk_receive_queue, |
4686 | (struct sk_buff *)&sk->sk_receive_queue, | 4733 | skb_peek(&sk->sk_receive_queue), |
4687 | tp->copied_seq, tp->rcv_nxt); | 4734 | NULL, |
4735 | tp->copied_seq, tp->rcv_nxt); | ||
4688 | sk_mem_reclaim(sk); | 4736 | sk_mem_reclaim(sk); |
4689 | 4737 | ||
4690 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) | 4738 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) |