diff options
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r-- | net/ipv4/tcp.c | 168 |
1 files changed, 111 insertions, 57 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0cd71b84e483..19a0612b8a20 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -339,7 +339,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
339 | struct sock *sk = sock->sk; | 339 | struct sock *sk = sock->sk; |
340 | struct tcp_sock *tp = tcp_sk(sk); | 340 | struct tcp_sock *tp = tcp_sk(sk); |
341 | 341 | ||
342 | poll_wait(file, sk->sk_sleep, wait); | 342 | sock_poll_wait(file, sk->sk_sleep, wait); |
343 | if (sk->sk_state == TCP_LISTEN) | 343 | if (sk->sk_state == TCP_LISTEN) |
344 | return inet_csk_listen_poll(sk); | 344 | return inet_csk_listen_poll(sk); |
345 | 345 | ||
@@ -439,12 +439,14 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
439 | !tp->urg_data || | 439 | !tp->urg_data || |
440 | before(tp->urg_seq, tp->copied_seq) || | 440 | before(tp->urg_seq, tp->copied_seq) || |
441 | !before(tp->urg_seq, tp->rcv_nxt)) { | 441 | !before(tp->urg_seq, tp->rcv_nxt)) { |
442 | struct sk_buff *skb; | ||
443 | |||
442 | answ = tp->rcv_nxt - tp->copied_seq; | 444 | answ = tp->rcv_nxt - tp->copied_seq; |
443 | 445 | ||
444 | /* Subtract 1, if FIN is in queue. */ | 446 | /* Subtract 1, if FIN is in queue. */ |
445 | if (answ && !skb_queue_empty(&sk->sk_receive_queue)) | 447 | skb = skb_peek_tail(&sk->sk_receive_queue); |
446 | answ -= | 448 | if (answ && skb) |
447 | tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin; | 449 | answ -= tcp_hdr(skb)->fin; |
448 | } else | 450 | } else |
449 | answ = tp->urg_seq - tp->copied_seq; | 451 | answ = tp->urg_seq - tp->copied_seq; |
450 | release_sock(sk); | 452 | release_sock(sk); |
@@ -524,7 +526,8 @@ static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, | |||
524 | struct tcp_splice_state *tss = rd_desc->arg.data; | 526 | struct tcp_splice_state *tss = rd_desc->arg.data; |
525 | int ret; | 527 | int ret; |
526 | 528 | ||
527 | ret = skb_splice_bits(skb, offset, tss->pipe, rd_desc->count, tss->flags); | 529 | ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len), |
530 | tss->flags); | ||
528 | if (ret > 0) | 531 | if (ret > 0) |
529 | rd_desc->count -= ret; | 532 | rd_desc->count -= ret; |
530 | return ret; | 533 | return ret; |
@@ -660,6 +663,47 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) | |||
660 | return NULL; | 663 | return NULL; |
661 | } | 664 | } |
662 | 665 | ||
666 | static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, | ||
667 | int large_allowed) | ||
668 | { | ||
669 | struct tcp_sock *tp = tcp_sk(sk); | ||
670 | u32 xmit_size_goal, old_size_goal; | ||
671 | |||
672 | xmit_size_goal = mss_now; | ||
673 | |||
674 | if (large_allowed && sk_can_gso(sk)) { | ||
675 | xmit_size_goal = ((sk->sk_gso_max_size - 1) - | ||
676 | inet_csk(sk)->icsk_af_ops->net_header_len - | ||
677 | inet_csk(sk)->icsk_ext_hdr_len - | ||
678 | tp->tcp_header_len); | ||
679 | |||
680 | xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); | ||
681 | |||
682 | /* We try hard to avoid divides here */ | ||
683 | old_size_goal = tp->xmit_size_goal_segs * mss_now; | ||
684 | |||
685 | if (likely(old_size_goal <= xmit_size_goal && | ||
686 | old_size_goal + mss_now > xmit_size_goal)) { | ||
687 | xmit_size_goal = old_size_goal; | ||
688 | } else { | ||
689 | tp->xmit_size_goal_segs = xmit_size_goal / mss_now; | ||
690 | xmit_size_goal = tp->xmit_size_goal_segs * mss_now; | ||
691 | } | ||
692 | } | ||
693 | |||
694 | return max(xmit_size_goal, mss_now); | ||
695 | } | ||
696 | |||
697 | static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) | ||
698 | { | ||
699 | int mss_now; | ||
700 | |||
701 | mss_now = tcp_current_mss(sk); | ||
702 | *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); | ||
703 | |||
704 | return mss_now; | ||
705 | } | ||
706 | |||
663 | static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, | 707 | static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, |
664 | size_t psize, int flags) | 708 | size_t psize, int flags) |
665 | { | 709 | { |
@@ -676,13 +720,12 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse | |||
676 | 720 | ||
677 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 721 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); |
678 | 722 | ||
679 | mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); | 723 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
680 | size_goal = tp->xmit_size_goal; | ||
681 | copied = 0; | 724 | copied = 0; |
682 | 725 | ||
683 | err = -EPIPE; | 726 | err = -EPIPE; |
684 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | 727 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) |
685 | goto do_error; | 728 | goto out_err; |
686 | 729 | ||
687 | while (psize > 0) { | 730 | while (psize > 0) { |
688 | struct sk_buff *skb = tcp_write_queue_tail(sk); | 731 | struct sk_buff *skb = tcp_write_queue_tail(sk); |
@@ -760,8 +803,7 @@ wait_for_memory: | |||
760 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) | 803 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) |
761 | goto do_error; | 804 | goto do_error; |
762 | 805 | ||
763 | mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); | 806 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
764 | size_goal = tp->xmit_size_goal; | ||
765 | } | 807 | } |
766 | 808 | ||
767 | out: | 809 | out: |
@@ -843,8 +885,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
843 | /* This should be in poll */ | 885 | /* This should be in poll */ |
844 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 886 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); |
845 | 887 | ||
846 | mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); | 888 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
847 | size_goal = tp->xmit_size_goal; | ||
848 | 889 | ||
849 | /* Ok commence sending. */ | 890 | /* Ok commence sending. */ |
850 | iovlen = msg->msg_iovlen; | 891 | iovlen = msg->msg_iovlen; |
@@ -853,7 +894,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
853 | 894 | ||
854 | err = -EPIPE; | 895 | err = -EPIPE; |
855 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | 896 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) |
856 | goto do_error; | 897 | goto out_err; |
857 | 898 | ||
858 | while (--iovlen >= 0) { | 899 | while (--iovlen >= 0) { |
859 | int seglen = iov->iov_len; | 900 | int seglen = iov->iov_len; |
@@ -862,13 +903,17 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
862 | iov++; | 903 | iov++; |
863 | 904 | ||
864 | while (seglen > 0) { | 905 | while (seglen > 0) { |
865 | int copy; | 906 | int copy = 0; |
907 | int max = size_goal; | ||
866 | 908 | ||
867 | skb = tcp_write_queue_tail(sk); | 909 | skb = tcp_write_queue_tail(sk); |
910 | if (tcp_send_head(sk)) { | ||
911 | if (skb->ip_summed == CHECKSUM_NONE) | ||
912 | max = mss_now; | ||
913 | copy = max - skb->len; | ||
914 | } | ||
868 | 915 | ||
869 | if (!tcp_send_head(sk) || | 916 | if (copy <= 0) { |
870 | (copy = size_goal - skb->len) <= 0) { | ||
871 | |||
872 | new_segment: | 917 | new_segment: |
873 | /* Allocate new segment. If the interface is SG, | 918 | /* Allocate new segment. If the interface is SG, |
874 | * allocate skb fitting to single page. | 919 | * allocate skb fitting to single page. |
@@ -889,6 +934,7 @@ new_segment: | |||
889 | 934 | ||
890 | skb_entail(sk, skb); | 935 | skb_entail(sk, skb); |
891 | copy = size_goal; | 936 | copy = size_goal; |
937 | max = size_goal; | ||
892 | } | 938 | } |
893 | 939 | ||
894 | /* Try to append data to the end of skb. */ | 940 | /* Try to append data to the end of skb. */ |
@@ -987,7 +1033,7 @@ new_segment: | |||
987 | if ((seglen -= copy) == 0 && iovlen == 0) | 1033 | if ((seglen -= copy) == 0 && iovlen == 0) |
988 | goto out; | 1034 | goto out; |
989 | 1035 | ||
990 | if (skb->len < size_goal || (flags & MSG_OOB)) | 1036 | if (skb->len < max || (flags & MSG_OOB)) |
991 | continue; | 1037 | continue; |
992 | 1038 | ||
993 | if (forced_push(tp)) { | 1039 | if (forced_push(tp)) { |
@@ -1006,8 +1052,7 @@ wait_for_memory: | |||
1006 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) | 1052 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) |
1007 | goto do_error; | 1053 | goto do_error; |
1008 | 1054 | ||
1009 | mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); | 1055 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
1010 | size_goal = tp->xmit_size_goal; | ||
1011 | } | 1056 | } |
1012 | } | 1057 | } |
1013 | 1058 | ||
@@ -1043,9 +1088,7 @@ out_err: | |||
1043 | * this, no blocking and very strange errors 8) | 1088 | * this, no blocking and very strange errors 8) |
1044 | */ | 1089 | */ |
1045 | 1090 | ||
1046 | static int tcp_recv_urg(struct sock *sk, long timeo, | 1091 | static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) |
1047 | struct msghdr *msg, int len, int flags, | ||
1048 | int *addr_len) | ||
1049 | { | 1092 | { |
1050 | struct tcp_sock *tp = tcp_sk(sk); | 1093 | struct tcp_sock *tp = tcp_sk(sk); |
1051 | 1094 | ||
@@ -1285,6 +1328,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1285 | struct task_struct *user_recv = NULL; | 1328 | struct task_struct *user_recv = NULL; |
1286 | int copied_early = 0; | 1329 | int copied_early = 0; |
1287 | struct sk_buff *skb; | 1330 | struct sk_buff *skb; |
1331 | u32 urg_hole = 0; | ||
1288 | 1332 | ||
1289 | lock_sock(sk); | 1333 | lock_sock(sk); |
1290 | 1334 | ||
@@ -1345,11 +1389,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1345 | 1389 | ||
1346 | /* Next get a buffer. */ | 1390 | /* Next get a buffer. */ |
1347 | 1391 | ||
1348 | skb = skb_peek(&sk->sk_receive_queue); | 1392 | skb_queue_walk(&sk->sk_receive_queue, skb) { |
1349 | do { | ||
1350 | if (!skb) | ||
1351 | break; | ||
1352 | |||
1353 | /* Now that we have two receive queues this | 1393 | /* Now that we have two receive queues this |
1354 | * shouldn't happen. | 1394 | * shouldn't happen. |
1355 | */ | 1395 | */ |
@@ -1366,8 +1406,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1366 | if (tcp_hdr(skb)->fin) | 1406 | if (tcp_hdr(skb)->fin) |
1367 | goto found_fin_ok; | 1407 | goto found_fin_ok; |
1368 | WARN_ON(!(flags & MSG_PEEK)); | 1408 | WARN_ON(!(flags & MSG_PEEK)); |
1369 | skb = skb->next; | 1409 | } |
1370 | } while (skb != (struct sk_buff *)&sk->sk_receive_queue); | ||
1371 | 1410 | ||
1372 | /* Well, if we have backlog, try to process it now yet. */ | 1411 | /* Well, if we have backlog, try to process it now yet. */ |
1373 | 1412 | ||
@@ -1496,7 +1535,8 @@ do_prequeue: | |||
1496 | } | 1535 | } |
1497 | } | 1536 | } |
1498 | } | 1537 | } |
1499 | if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) { | 1538 | if ((flags & MSG_PEEK) && |
1539 | (peek_seq - copied - urg_hole != tp->copied_seq)) { | ||
1500 | if (net_ratelimit()) | 1540 | if (net_ratelimit()) |
1501 | printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n", | 1541 | printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n", |
1502 | current->comm, task_pid_nr(current)); | 1542 | current->comm, task_pid_nr(current)); |
@@ -1517,6 +1557,7 @@ do_prequeue: | |||
1517 | if (!urg_offset) { | 1557 | if (!urg_offset) { |
1518 | if (!sock_flag(sk, SOCK_URGINLINE)) { | 1558 | if (!sock_flag(sk, SOCK_URGINLINE)) { |
1519 | ++*seq; | 1559 | ++*seq; |
1560 | urg_hole++; | ||
1520 | offset++; | 1561 | offset++; |
1521 | used--; | 1562 | used--; |
1522 | if (!used) | 1563 | if (!used) |
@@ -1660,7 +1701,7 @@ out: | |||
1660 | return err; | 1701 | return err; |
1661 | 1702 | ||
1662 | recv_urg: | 1703 | recv_urg: |
1663 | err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len); | 1704 | err = tcp_recv_urg(sk, msg, len, flags); |
1664 | goto out; | 1705 | goto out; |
1665 | } | 1706 | } |
1666 | 1707 | ||
@@ -1798,7 +1839,7 @@ void tcp_close(struct sock *sk, long timeout) | |||
1798 | /* Unread data was tossed, zap the connection. */ | 1839 | /* Unread data was tossed, zap the connection. */ |
1799 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); | 1840 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); |
1800 | tcp_set_state(sk, TCP_CLOSE); | 1841 | tcp_set_state(sk, TCP_CLOSE); |
1801 | tcp_send_active_reset(sk, GFP_KERNEL); | 1842 | tcp_send_active_reset(sk, sk->sk_allocation); |
1802 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { | 1843 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { |
1803 | /* Check zero linger _after_ checking for unread data. */ | 1844 | /* Check zero linger _after_ checking for unread data. */ |
1804 | sk->sk_prot->disconnect(sk, 0); | 1845 | sk->sk_prot->disconnect(sk, 0); |
@@ -1971,7 +2012,7 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
1971 | tp->snd_cwnd = 2; | 2012 | tp->snd_cwnd = 2; |
1972 | icsk->icsk_probes_out = 0; | 2013 | icsk->icsk_probes_out = 0; |
1973 | tp->packets_out = 0; | 2014 | tp->packets_out = 0; |
1974 | tp->snd_ssthresh = 0x7fffffff; | 2015 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
1975 | tp->snd_cwnd_cnt = 0; | 2016 | tp->snd_cwnd_cnt = 0; |
1976 | tp->bytes_acked = 0; | 2017 | tp->bytes_acked = 0; |
1977 | tcp_set_ca_state(sk, TCP_CA_Open); | 2018 | tcp_set_ca_state(sk, TCP_CA_Open); |
@@ -2295,13 +2336,13 @@ static int do_tcp_getsockopt(struct sock *sk, int level, | |||
2295 | val = !!(tp->nonagle&TCP_NAGLE_CORK); | 2336 | val = !!(tp->nonagle&TCP_NAGLE_CORK); |
2296 | break; | 2337 | break; |
2297 | case TCP_KEEPIDLE: | 2338 | case TCP_KEEPIDLE: |
2298 | val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ; | 2339 | val = keepalive_time_when(tp) / HZ; |
2299 | break; | 2340 | break; |
2300 | case TCP_KEEPINTVL: | 2341 | case TCP_KEEPINTVL: |
2301 | val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ; | 2342 | val = keepalive_intvl_when(tp) / HZ; |
2302 | break; | 2343 | break; |
2303 | case TCP_KEEPCNT: | 2344 | case TCP_KEEPCNT: |
2304 | val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; | 2345 | val = keepalive_probes(tp); |
2305 | break; | 2346 | break; |
2306 | case TCP_SYNCNT: | 2347 | case TCP_SYNCNT: |
2307 | val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; | 2348 | val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
@@ -2475,26 +2516,38 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2475 | struct sk_buff *p; | 2516 | struct sk_buff *p; |
2476 | struct tcphdr *th; | 2517 | struct tcphdr *th; |
2477 | struct tcphdr *th2; | 2518 | struct tcphdr *th2; |
2519 | unsigned int len; | ||
2478 | unsigned int thlen; | 2520 | unsigned int thlen; |
2479 | unsigned int flags; | 2521 | unsigned int flags; |
2480 | unsigned int total; | ||
2481 | unsigned int mss = 1; | 2522 | unsigned int mss = 1; |
2523 | unsigned int hlen; | ||
2524 | unsigned int off; | ||
2482 | int flush = 1; | 2525 | int flush = 1; |
2526 | int i; | ||
2527 | |||
2528 | off = skb_gro_offset(skb); | ||
2529 | hlen = off + sizeof(*th); | ||
2530 | th = skb_gro_header_fast(skb, off); | ||
2531 | if (skb_gro_header_hard(skb, hlen)) { | ||
2532 | th = skb_gro_header_slow(skb, hlen, off); | ||
2533 | if (unlikely(!th)) | ||
2534 | goto out; | ||
2535 | } | ||
2483 | 2536 | ||
2484 | if (!pskb_may_pull(skb, sizeof(*th))) | ||
2485 | goto out; | ||
2486 | |||
2487 | th = tcp_hdr(skb); | ||
2488 | thlen = th->doff * 4; | 2537 | thlen = th->doff * 4; |
2489 | if (thlen < sizeof(*th)) | 2538 | if (thlen < sizeof(*th)) |
2490 | goto out; | 2539 | goto out; |
2491 | 2540 | ||
2492 | if (!pskb_may_pull(skb, thlen)) | 2541 | hlen = off + thlen; |
2493 | goto out; | 2542 | if (skb_gro_header_hard(skb, hlen)) { |
2543 | th = skb_gro_header_slow(skb, hlen, off); | ||
2544 | if (unlikely(!th)) | ||
2545 | goto out; | ||
2546 | } | ||
2494 | 2547 | ||
2495 | th = tcp_hdr(skb); | 2548 | skb_gro_pull(skb, thlen); |
2496 | __skb_pull(skb, thlen); | ||
2497 | 2549 | ||
2550 | len = skb_gro_len(skb); | ||
2498 | flags = tcp_flag_word(th); | 2551 | flags = tcp_flag_word(th); |
2499 | 2552 | ||
2500 | for (; (p = *head); head = &p->next) { | 2553 | for (; (p = *head); head = &p->next) { |
@@ -2503,7 +2556,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2503 | 2556 | ||
2504 | th2 = tcp_hdr(p); | 2557 | th2 = tcp_hdr(p); |
2505 | 2558 | ||
2506 | if (th->source != th2->source || th->dest != th2->dest) { | 2559 | if (*(u32 *)&th->source ^ *(u32 *)&th2->source) { |
2507 | NAPI_GRO_CB(p)->same_flow = 0; | 2560 | NAPI_GRO_CB(p)->same_flow = 0; |
2508 | continue; | 2561 | continue; |
2509 | } | 2562 | } |
@@ -2518,14 +2571,15 @@ found: | |||
2518 | flush |= flags & TCP_FLAG_CWR; | 2571 | flush |= flags & TCP_FLAG_CWR; |
2519 | flush |= (flags ^ tcp_flag_word(th2)) & | 2572 | flush |= (flags ^ tcp_flag_word(th2)) & |
2520 | ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH); | 2573 | ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH); |
2521 | flush |= th->ack_seq != th2->ack_seq || th->window != th2->window; | 2574 | flush |= th->ack_seq ^ th2->ack_seq; |
2522 | flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th)); | 2575 | for (i = sizeof(*th); i < thlen; i += 4) |
2576 | flush |= *(u32 *)((u8 *)th + i) ^ | ||
2577 | *(u32 *)((u8 *)th2 + i); | ||
2523 | 2578 | ||
2524 | total = p->len; | ||
2525 | mss = skb_shinfo(p)->gso_size; | 2579 | mss = skb_shinfo(p)->gso_size; |
2526 | 2580 | ||
2527 | flush |= skb->len > mss || skb->len <= 0; | 2581 | flush |= (len - 1) >= mss; |
2528 | flush |= ntohl(th2->seq) + total != ntohl(th->seq); | 2582 | flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); |
2529 | 2583 | ||
2530 | if (flush || skb_gro_receive(head, skb)) { | 2584 | if (flush || skb_gro_receive(head, skb)) { |
2531 | mss = 1; | 2585 | mss = 1; |
@@ -2537,7 +2591,7 @@ found: | |||
2537 | tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); | 2591 | tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); |
2538 | 2592 | ||
2539 | out_check_final: | 2593 | out_check_final: |
2540 | flush = skb->len < mss; | 2594 | flush = len < mss; |
2541 | flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | | 2595 | flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | |
2542 | TCP_FLAG_SYN | TCP_FLAG_FIN); | 2596 | TCP_FLAG_SYN | TCP_FLAG_FIN); |
2543 | 2597 | ||
@@ -2604,7 +2658,7 @@ void tcp_free_md5sig_pool(void) | |||
2604 | 2658 | ||
2605 | EXPORT_SYMBOL(tcp_free_md5sig_pool); | 2659 | EXPORT_SYMBOL(tcp_free_md5sig_pool); |
2606 | 2660 | ||
2607 | static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void) | 2661 | static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(struct sock *sk) |
2608 | { | 2662 | { |
2609 | int cpu; | 2663 | int cpu; |
2610 | struct tcp_md5sig_pool **pool; | 2664 | struct tcp_md5sig_pool **pool; |
@@ -2617,7 +2671,7 @@ static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void) | |||
2617 | struct tcp_md5sig_pool *p; | 2671 | struct tcp_md5sig_pool *p; |
2618 | struct crypto_hash *hash; | 2672 | struct crypto_hash *hash; |
2619 | 2673 | ||
2620 | p = kzalloc(sizeof(*p), GFP_KERNEL); | 2674 | p = kzalloc(sizeof(*p), sk->sk_allocation); |
2621 | if (!p) | 2675 | if (!p) |
2622 | goto out_free; | 2676 | goto out_free; |
2623 | *per_cpu_ptr(pool, cpu) = p; | 2677 | *per_cpu_ptr(pool, cpu) = p; |
@@ -2634,7 +2688,7 @@ out_free: | |||
2634 | return NULL; | 2688 | return NULL; |
2635 | } | 2689 | } |
2636 | 2690 | ||
2637 | struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void) | 2691 | struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *sk) |
2638 | { | 2692 | { |
2639 | struct tcp_md5sig_pool **pool; | 2693 | struct tcp_md5sig_pool **pool; |
2640 | int alloc = 0; | 2694 | int alloc = 0; |
@@ -2655,7 +2709,7 @@ retry: | |||
2655 | 2709 | ||
2656 | if (alloc) { | 2710 | if (alloc) { |
2657 | /* we cannot hold spinlock here because this may sleep. */ | 2711 | /* we cannot hold spinlock here because this may sleep. */ |
2658 | struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(); | 2712 | struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(sk); |
2659 | spin_lock_bh(&tcp_md5sig_pool_lock); | 2713 | spin_lock_bh(&tcp_md5sig_pool_lock); |
2660 | if (!p) { | 2714 | if (!p) { |
2661 | tcp_md5sig_users--; | 2715 | tcp_md5sig_users--; |