diff options
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 93 |
1 files changed, 79 insertions, 14 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 4a538bc1683d..e08245bdda3a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -71,6 +71,7 @@ | |||
71 | #include <net/inet_common.h> | 71 | #include <net/inet_common.h> |
72 | #include <linux/ipsec.h> | 72 | #include <linux/ipsec.h> |
73 | #include <asm/unaligned.h> | 73 | #include <asm/unaligned.h> |
74 | #include <net/netdma.h> | ||
74 | 75 | ||
75 | int sysctl_tcp_timestamps = 1; | 76 | int sysctl_tcp_timestamps = 1; |
76 | int sysctl_tcp_window_scaling = 1; | 77 | int sysctl_tcp_window_scaling = 1; |
@@ -1649,7 +1650,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) | |||
1649 | * Hence, we can detect timed out packets during fast | 1650 | * Hence, we can detect timed out packets during fast |
1650 | * retransmit without falling to slow start. | 1651 | * retransmit without falling to slow start. |
1651 | */ | 1652 | */ |
1652 | if (tcp_head_timedout(sk, tp)) { | 1653 | if (!IsReno(tp) && tcp_head_timedout(sk, tp)) { |
1653 | struct sk_buff *skb; | 1654 | struct sk_buff *skb; |
1654 | 1655 | ||
1655 | skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint | 1656 | skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint |
@@ -1662,8 +1663,6 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) | |||
1662 | if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { | 1663 | if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { |
1663 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | 1664 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; |
1664 | tp->lost_out += tcp_skb_pcount(skb); | 1665 | tp->lost_out += tcp_skb_pcount(skb); |
1665 | if (IsReno(tp)) | ||
1666 | tcp_remove_reno_sacks(sk, tp, tcp_skb_pcount(skb) + 1); | ||
1667 | 1666 | ||
1668 | /* clear xmit_retrans hint */ | 1667 | /* clear xmit_retrans hint */ |
1669 | if (tp->retransmit_skb_hint && | 1668 | if (tp->retransmit_skb_hint && |
@@ -1690,17 +1689,26 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp) | |||
1690 | tp->snd_cwnd_stamp = tcp_time_stamp; | 1689 | tp->snd_cwnd_stamp = tcp_time_stamp; |
1691 | } | 1690 | } |
1692 | 1691 | ||
1692 | /* Lower bound on congestion window is slow start threshold | ||
1693 | * unless congestion avoidance choice decides to overide it. | ||
1694 | */ | ||
1695 | static inline u32 tcp_cwnd_min(const struct sock *sk) | ||
1696 | { | ||
1697 | const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; | ||
1698 | |||
1699 | return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh; | ||
1700 | } | ||
1701 | |||
1693 | /* Decrease cwnd each second ack. */ | 1702 | /* Decrease cwnd each second ack. */ |
1694 | static void tcp_cwnd_down(struct sock *sk) | 1703 | static void tcp_cwnd_down(struct sock *sk) |
1695 | { | 1704 | { |
1696 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
1697 | struct tcp_sock *tp = tcp_sk(sk); | 1705 | struct tcp_sock *tp = tcp_sk(sk); |
1698 | int decr = tp->snd_cwnd_cnt + 1; | 1706 | int decr = tp->snd_cwnd_cnt + 1; |
1699 | 1707 | ||
1700 | tp->snd_cwnd_cnt = decr&1; | 1708 | tp->snd_cwnd_cnt = decr&1; |
1701 | decr >>= 1; | 1709 | decr >>= 1; |
1702 | 1710 | ||
1703 | if (decr && tp->snd_cwnd > icsk->icsk_ca_ops->min_cwnd(sk)) | 1711 | if (decr && tp->snd_cwnd > tcp_cwnd_min(sk)) |
1704 | tp->snd_cwnd -= decr; | 1712 | tp->snd_cwnd -= decr; |
1705 | 1713 | ||
1706 | tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); | 1714 | tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); |
@@ -3787,6 +3795,50 @@ static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *sk | |||
3787 | __tcp_checksum_complete_user(sk, skb); | 3795 | __tcp_checksum_complete_user(sk, skb); |
3788 | } | 3796 | } |
3789 | 3797 | ||
3798 | #ifdef CONFIG_NET_DMA | ||
3799 | static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen) | ||
3800 | { | ||
3801 | struct tcp_sock *tp = tcp_sk(sk); | ||
3802 | int chunk = skb->len - hlen; | ||
3803 | int dma_cookie; | ||
3804 | int copied_early = 0; | ||
3805 | |||
3806 | if (tp->ucopy.wakeup) | ||
3807 | return 0; | ||
3808 | |||
3809 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | ||
3810 | tp->ucopy.dma_chan = get_softnet_dma(); | ||
3811 | |||
3812 | if (tp->ucopy.dma_chan && skb->ip_summed == CHECKSUM_UNNECESSARY) { | ||
3813 | |||
3814 | dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan, | ||
3815 | skb, hlen, tp->ucopy.iov, chunk, tp->ucopy.pinned_list); | ||
3816 | |||
3817 | if (dma_cookie < 0) | ||
3818 | goto out; | ||
3819 | |||
3820 | tp->ucopy.dma_cookie = dma_cookie; | ||
3821 | copied_early = 1; | ||
3822 | |||
3823 | tp->ucopy.len -= chunk; | ||
3824 | tp->copied_seq += chunk; | ||
3825 | tcp_rcv_space_adjust(sk); | ||
3826 | |||
3827 | if ((tp->ucopy.len == 0) || | ||
3828 | (tcp_flag_word(skb->h.th) & TCP_FLAG_PSH) || | ||
3829 | (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { | ||
3830 | tp->ucopy.wakeup = 1; | ||
3831 | sk->sk_data_ready(sk, 0); | ||
3832 | } | ||
3833 | } else if (chunk > 0) { | ||
3834 | tp->ucopy.wakeup = 1; | ||
3835 | sk->sk_data_ready(sk, 0); | ||
3836 | } | ||
3837 | out: | ||
3838 | return copied_early; | ||
3839 | } | ||
3840 | #endif /* CONFIG_NET_DMA */ | ||
3841 | |||
3790 | /* | 3842 | /* |
3791 | * TCP receive function for the ESTABLISHED state. | 3843 | * TCP receive function for the ESTABLISHED state. |
3792 | * | 3844 | * |
@@ -3888,8 +3940,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3888 | tp->rcv_nxt == tp->rcv_wup) | 3940 | tp->rcv_nxt == tp->rcv_wup) |
3889 | tcp_store_ts_recent(tp); | 3941 | tcp_store_ts_recent(tp); |
3890 | 3942 | ||
3891 | tcp_rcv_rtt_measure_ts(sk, skb); | ||
3892 | |||
3893 | /* We know that such packets are checksummed | 3943 | /* We know that such packets are checksummed |
3894 | * on entry. | 3944 | * on entry. |
3895 | */ | 3945 | */ |
@@ -3903,14 +3953,23 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3903 | } | 3953 | } |
3904 | } else { | 3954 | } else { |
3905 | int eaten = 0; | 3955 | int eaten = 0; |
3956 | int copied_early = 0; | ||
3906 | 3957 | ||
3907 | if (tp->ucopy.task == current && | 3958 | if (tp->copied_seq == tp->rcv_nxt && |
3908 | tp->copied_seq == tp->rcv_nxt && | 3959 | len - tcp_header_len <= tp->ucopy.len) { |
3909 | len - tcp_header_len <= tp->ucopy.len && | 3960 | #ifdef CONFIG_NET_DMA |
3910 | sock_owned_by_user(sk)) { | 3961 | if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) { |
3911 | __set_current_state(TASK_RUNNING); | 3962 | copied_early = 1; |
3963 | eaten = 1; | ||
3964 | } | ||
3965 | #endif | ||
3966 | if (tp->ucopy.task == current && sock_owned_by_user(sk) && !copied_early) { | ||
3967 | __set_current_state(TASK_RUNNING); | ||
3912 | 3968 | ||
3913 | if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) { | 3969 | if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) |
3970 | eaten = 1; | ||
3971 | } | ||
3972 | if (eaten) { | ||
3914 | /* Predicted packet is in window by definition. | 3973 | /* Predicted packet is in window by definition. |
3915 | * seq == rcv_nxt and rcv_wup <= rcv_nxt. | 3974 | * seq == rcv_nxt and rcv_wup <= rcv_nxt. |
3916 | * Hence, check seq<=rcv_wup reduces to: | 3975 | * Hence, check seq<=rcv_wup reduces to: |
@@ -3926,8 +3985,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3926 | __skb_pull(skb, tcp_header_len); | 3985 | __skb_pull(skb, tcp_header_len); |
3927 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | 3986 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; |
3928 | NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER); | 3987 | NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER); |
3929 | eaten = 1; | ||
3930 | } | 3988 | } |
3989 | if (copied_early) | ||
3990 | tcp_cleanup_rbuf(sk, skb->len); | ||
3931 | } | 3991 | } |
3932 | if (!eaten) { | 3992 | if (!eaten) { |
3933 | if (tcp_checksum_complete_user(sk, skb)) | 3993 | if (tcp_checksum_complete_user(sk, skb)) |
@@ -3968,6 +4028,11 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3968 | 4028 | ||
3969 | __tcp_ack_snd_check(sk, 0); | 4029 | __tcp_ack_snd_check(sk, 0); |
3970 | no_ack: | 4030 | no_ack: |
4031 | #ifdef CONFIG_NET_DMA | ||
4032 | if (copied_early) | ||
4033 | __skb_queue_tail(&sk->sk_async_wait_queue, skb); | ||
4034 | else | ||
4035 | #endif | ||
3971 | if (eaten) | 4036 | if (eaten) |
3972 | __kfree_skb(skb); | 4037 | __kfree_skb(skb); |
3973 | else | 4038 | else |