diff options
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 78 |
1 files changed, 52 insertions, 26 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1bdb1bd22134..cdc051bfdb4d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -3854,8 +3854,28 @@ static void tcp_ofo_queue(struct sock *sk) | |||
3854 | } | 3854 | } |
3855 | } | 3855 | } |
3856 | 3856 | ||
3857 | static int tcp_prune_ofo_queue(struct sock *sk); | ||
3857 | static int tcp_prune_queue(struct sock *sk); | 3858 | static int tcp_prune_queue(struct sock *sk); |
3858 | 3859 | ||
3860 | static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) | ||
3861 | { | ||
3862 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | ||
3863 | !sk_rmem_schedule(sk, size)) { | ||
3864 | |||
3865 | if (tcp_prune_queue(sk) < 0) | ||
3866 | return -1; | ||
3867 | |||
3868 | if (!sk_rmem_schedule(sk, size)) { | ||
3869 | if (!tcp_prune_ofo_queue(sk)) | ||
3870 | return -1; | ||
3871 | |||
3872 | if (!sk_rmem_schedule(sk, size)) | ||
3873 | return -1; | ||
3874 | } | ||
3875 | } | ||
3876 | return 0; | ||
3877 | } | ||
3878 | |||
3859 | static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) | 3879 | static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) |
3860 | { | 3880 | { |
3861 | struct tcphdr *th = tcp_hdr(skb); | 3881 | struct tcphdr *th = tcp_hdr(skb); |
@@ -3905,12 +3925,9 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) | |||
3905 | if (eaten <= 0) { | 3925 | if (eaten <= 0) { |
3906 | queue_and_out: | 3926 | queue_and_out: |
3907 | if (eaten < 0 && | 3927 | if (eaten < 0 && |
3908 | (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | 3928 | tcp_try_rmem_schedule(sk, skb->truesize)) |
3909 | !sk_rmem_schedule(sk, skb->truesize))) { | 3929 | goto drop; |
3910 | if (tcp_prune_queue(sk) < 0 || | 3930 | |
3911 | !sk_rmem_schedule(sk, skb->truesize)) | ||
3912 | goto drop; | ||
3913 | } | ||
3914 | skb_set_owner_r(skb, sk); | 3931 | skb_set_owner_r(skb, sk); |
3915 | __skb_queue_tail(&sk->sk_receive_queue, skb); | 3932 | __skb_queue_tail(&sk->sk_receive_queue, skb); |
3916 | } | 3933 | } |
@@ -3979,12 +3996,8 @@ drop: | |||
3979 | 3996 | ||
3980 | TCP_ECN_check_ce(tp, skb); | 3997 | TCP_ECN_check_ce(tp, skb); |
3981 | 3998 | ||
3982 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | 3999 | if (tcp_try_rmem_schedule(sk, skb->truesize)) |
3983 | !sk_rmem_schedule(sk, skb->truesize)) { | 4000 | goto drop; |
3984 | if (tcp_prune_queue(sk) < 0 || | ||
3985 | !sk_rmem_schedule(sk, skb->truesize)) | ||
3986 | goto drop; | ||
3987 | } | ||
3988 | 4001 | ||
3989 | /* Disable header prediction. */ | 4002 | /* Disable header prediction. */ |
3990 | tp->pred_flags = 0; | 4003 | tp->pred_flags = 0; |
@@ -4211,6 +4224,32 @@ static void tcp_collapse_ofo_queue(struct sock *sk) | |||
4211 | } | 4224 | } |
4212 | } | 4225 | } |
4213 | 4226 | ||
4227 | /* | ||
4228 | * Purge the out-of-order queue. | ||
4229 | * Return true if queue was pruned. | ||
4230 | */ | ||
4231 | static int tcp_prune_ofo_queue(struct sock *sk) | ||
4232 | { | ||
4233 | struct tcp_sock *tp = tcp_sk(sk); | ||
4234 | int res = 0; | ||
4235 | |||
4236 | if (!skb_queue_empty(&tp->out_of_order_queue)) { | ||
4237 | NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); | ||
4238 | __skb_queue_purge(&tp->out_of_order_queue); | ||
4239 | |||
4240 | /* Reset SACK state. A conforming SACK implementation will | ||
4241 | * do the same at a timeout based retransmit. When a connection | ||
4242 | * is in a sad state like this, we care only about integrity | ||
4243 | * of the connection not performance. | ||
4244 | */ | ||
4245 | if (tp->rx_opt.sack_ok) | ||
4246 | tcp_sack_reset(&tp->rx_opt); | ||
4247 | sk_mem_reclaim(sk); | ||
4248 | res = 1; | ||
4249 | } | ||
4250 | return res; | ||
4251 | } | ||
4252 | |||
4214 | /* Reduce allocated memory if we can, trying to get | 4253 | /* Reduce allocated memory if we can, trying to get |
4215 | * the socket within its memory limits again. | 4254 | * the socket within its memory limits again. |
4216 | * | 4255 | * |
@@ -4244,20 +4283,7 @@ static int tcp_prune_queue(struct sock *sk) | |||
4244 | /* Collapsing did not help, destructive actions follow. | 4283 | /* Collapsing did not help, destructive actions follow. |
4245 | * This must not ever occur. */ | 4284 | * This must not ever occur. */ |
4246 | 4285 | ||
4247 | /* First, purge the out_of_order queue. */ | 4286 | tcp_prune_ofo_queue(sk); |
4248 | if (!skb_queue_empty(&tp->out_of_order_queue)) { | ||
4249 | NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); | ||
4250 | __skb_queue_purge(&tp->out_of_order_queue); | ||
4251 | |||
4252 | /* Reset SACK state. A conforming SACK implementation will | ||
4253 | * do the same at a timeout based retransmit. When a connection | ||
4254 | * is in a sad state like this, we care only about integrity | ||
4255 | * of the connection not performance. | ||
4256 | */ | ||
4257 | if (tcp_is_sack(tp)) | ||
4258 | tcp_sack_reset(&tp->rx_opt); | ||
4259 | sk_mem_reclaim(sk); | ||
4260 | } | ||
4261 | 4287 | ||
4262 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) | 4288 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) |
4263 | return 0; | 4289 | return 0; |