diff options
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 89 |
1 files changed, 78 insertions, 11 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b5521a9d3dc1..e08245bdda3a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -71,6 +71,7 @@ | |||
71 | #include <net/inet_common.h> | 71 | #include <net/inet_common.h> |
72 | #include <linux/ipsec.h> | 72 | #include <linux/ipsec.h> |
73 | #include <asm/unaligned.h> | 73 | #include <asm/unaligned.h> |
74 | #include <net/netdma.h> | ||
74 | 75 | ||
75 | int sysctl_tcp_timestamps = 1; | 76 | int sysctl_tcp_timestamps = 1; |
76 | int sysctl_tcp_window_scaling = 1; | 77 | int sysctl_tcp_window_scaling = 1; |
@@ -1688,17 +1689,26 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp) | |||
1688 | tp->snd_cwnd_stamp = tcp_time_stamp; | 1689 | tp->snd_cwnd_stamp = tcp_time_stamp; |
1689 | } | 1690 | } |
1690 | 1691 | ||
1692 | /* Lower bound on congestion window is slow start threshold | ||
1693 | * unless congestion avoidance choice decides to overide it. | ||
1694 | */ | ||
1695 | static inline u32 tcp_cwnd_min(const struct sock *sk) | ||
1696 | { | ||
1697 | const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; | ||
1698 | |||
1699 | return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh; | ||
1700 | } | ||
1701 | |||
1691 | /* Decrease cwnd each second ack. */ | 1702 | /* Decrease cwnd each second ack. */ |
1692 | static void tcp_cwnd_down(struct sock *sk) | 1703 | static void tcp_cwnd_down(struct sock *sk) |
1693 | { | 1704 | { |
1694 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
1695 | struct tcp_sock *tp = tcp_sk(sk); | 1705 | struct tcp_sock *tp = tcp_sk(sk); |
1696 | int decr = tp->snd_cwnd_cnt + 1; | 1706 | int decr = tp->snd_cwnd_cnt + 1; |
1697 | 1707 | ||
1698 | tp->snd_cwnd_cnt = decr&1; | 1708 | tp->snd_cwnd_cnt = decr&1; |
1699 | decr >>= 1; | 1709 | decr >>= 1; |
1700 | 1710 | ||
1701 | if (decr && tp->snd_cwnd > icsk->icsk_ca_ops->min_cwnd(sk)) | 1711 | if (decr && tp->snd_cwnd > tcp_cwnd_min(sk)) |
1702 | tp->snd_cwnd -= decr; | 1712 | tp->snd_cwnd -= decr; |
1703 | 1713 | ||
1704 | tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); | 1714 | tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); |
@@ -3785,6 +3795,50 @@ static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *sk | |||
3785 | __tcp_checksum_complete_user(sk, skb); | 3795 | __tcp_checksum_complete_user(sk, skb); |
3786 | } | 3796 | } |
3787 | 3797 | ||
3798 | #ifdef CONFIG_NET_DMA | ||
3799 | static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen) | ||
3800 | { | ||
3801 | struct tcp_sock *tp = tcp_sk(sk); | ||
3802 | int chunk = skb->len - hlen; | ||
3803 | int dma_cookie; | ||
3804 | int copied_early = 0; | ||
3805 | |||
3806 | if (tp->ucopy.wakeup) | ||
3807 | return 0; | ||
3808 | |||
3809 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | ||
3810 | tp->ucopy.dma_chan = get_softnet_dma(); | ||
3811 | |||
3812 | if (tp->ucopy.dma_chan && skb->ip_summed == CHECKSUM_UNNECESSARY) { | ||
3813 | |||
3814 | dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan, | ||
3815 | skb, hlen, tp->ucopy.iov, chunk, tp->ucopy.pinned_list); | ||
3816 | |||
3817 | if (dma_cookie < 0) | ||
3818 | goto out; | ||
3819 | |||
3820 | tp->ucopy.dma_cookie = dma_cookie; | ||
3821 | copied_early = 1; | ||
3822 | |||
3823 | tp->ucopy.len -= chunk; | ||
3824 | tp->copied_seq += chunk; | ||
3825 | tcp_rcv_space_adjust(sk); | ||
3826 | |||
3827 | if ((tp->ucopy.len == 0) || | ||
3828 | (tcp_flag_word(skb->h.th) & TCP_FLAG_PSH) || | ||
3829 | (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { | ||
3830 | tp->ucopy.wakeup = 1; | ||
3831 | sk->sk_data_ready(sk, 0); | ||
3832 | } | ||
3833 | } else if (chunk > 0) { | ||
3834 | tp->ucopy.wakeup = 1; | ||
3835 | sk->sk_data_ready(sk, 0); | ||
3836 | } | ||
3837 | out: | ||
3838 | return copied_early; | ||
3839 | } | ||
3840 | #endif /* CONFIG_NET_DMA */ | ||
3841 | |||
3788 | /* | 3842 | /* |
3789 | * TCP receive function for the ESTABLISHED state. | 3843 | * TCP receive function for the ESTABLISHED state. |
3790 | * | 3844 | * |
@@ -3886,8 +3940,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3886 | tp->rcv_nxt == tp->rcv_wup) | 3940 | tp->rcv_nxt == tp->rcv_wup) |
3887 | tcp_store_ts_recent(tp); | 3941 | tcp_store_ts_recent(tp); |
3888 | 3942 | ||
3889 | tcp_rcv_rtt_measure_ts(sk, skb); | ||
3890 | |||
3891 | /* We know that such packets are checksummed | 3943 | /* We know that such packets are checksummed |
3892 | * on entry. | 3944 | * on entry. |
3893 | */ | 3945 | */ |
@@ -3901,14 +3953,23 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3901 | } | 3953 | } |
3902 | } else { | 3954 | } else { |
3903 | int eaten = 0; | 3955 | int eaten = 0; |
3956 | int copied_early = 0; | ||
3904 | 3957 | ||
3905 | if (tp->ucopy.task == current && | 3958 | if (tp->copied_seq == tp->rcv_nxt && |
3906 | tp->copied_seq == tp->rcv_nxt && | 3959 | len - tcp_header_len <= tp->ucopy.len) { |
3907 | len - tcp_header_len <= tp->ucopy.len && | 3960 | #ifdef CONFIG_NET_DMA |
3908 | sock_owned_by_user(sk)) { | 3961 | if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) { |
3909 | __set_current_state(TASK_RUNNING); | 3962 | copied_early = 1; |
3963 | eaten = 1; | ||
3964 | } | ||
3965 | #endif | ||
3966 | if (tp->ucopy.task == current && sock_owned_by_user(sk) && !copied_early) { | ||
3967 | __set_current_state(TASK_RUNNING); | ||
3910 | 3968 | ||
3911 | if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) { | 3969 | if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) |
3970 | eaten = 1; | ||
3971 | } | ||
3972 | if (eaten) { | ||
3912 | /* Predicted packet is in window by definition. | 3973 | /* Predicted packet is in window by definition. |
3913 | * seq == rcv_nxt and rcv_wup <= rcv_nxt. | 3974 | * seq == rcv_nxt and rcv_wup <= rcv_nxt. |
3914 | * Hence, check seq<=rcv_wup reduces to: | 3975 | * Hence, check seq<=rcv_wup reduces to: |
@@ -3924,8 +3985,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3924 | __skb_pull(skb, tcp_header_len); | 3985 | __skb_pull(skb, tcp_header_len); |
3925 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | 3986 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; |
3926 | NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER); | 3987 | NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER); |
3927 | eaten = 1; | ||
3928 | } | 3988 | } |
3989 | if (copied_early) | ||
3990 | tcp_cleanup_rbuf(sk, skb->len); | ||
3929 | } | 3991 | } |
3930 | if (!eaten) { | 3992 | if (!eaten) { |
3931 | if (tcp_checksum_complete_user(sk, skb)) | 3993 | if (tcp_checksum_complete_user(sk, skb)) |
@@ -3966,6 +4028,11 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3966 | 4028 | ||
3967 | __tcp_ack_snd_check(sk, 0); | 4029 | __tcp_ack_snd_check(sk, 0); |
3968 | no_ack: | 4030 | no_ack: |
4031 | #ifdef CONFIG_NET_DMA | ||
4032 | if (copied_early) | ||
4033 | __skb_queue_tail(&sk->sk_async_wait_queue, skb); | ||
4034 | else | ||
4035 | #endif | ||
3969 | if (eaten) | 4036 | if (eaten) |
3970 | __kfree_skb(skb); | 4037 | __kfree_skb(skb); |
3971 | else | 4038 | else |