aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-16 10:44:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-16 10:44:27 -0400
commitb4b8f57965e007afbbb0175ea28f733723c5260b (patch)
tree7f4aeda48ada35771ea3e63bbbb36e52c47aeb62 /net/ipv4
parent424b00e2c0f0c38f2cf5331391742ec998f6d89f (diff)
parent56f367bbfd5a7439961499ca6a2f0822d2074d83 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: [TCP]: Add return value indication to tcp_prune_ofo_queue(). PS3: gelic: fix the oops on the broken IE returned from the hypervisor b43legacy: fix DMA mapping leakage mac80211: remove message on receiving unexpected unencrypted frames Update rt2x00 MAINTAINERS entry Add rfkill to MAINTAINERS file rfkill: Fix device type check when toggling states b43legacy: Fix usage of struct device used for DMAing ssb: Fix usage of struct device used for DMAing MAINTAINERS: move to generic repository for iwlwifi b43legacy: fix initvals loading on bcm4303 rtl8187: Add missing priv->vif assignments netconsole: only set CON_PRINTBUFFER if the user specifies a netconsole [CAN]: Update documentation of struct sockaddr_can MAINTAINERS: isdn4linux@listserv.isdn4linux.de is subscribers-only [TCP]: Fix never pruned tcp out-of-order queue. [NET_SCHED] sch_api: fix qdisc_tree_decrease_qlen() loop
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_input.c78
1 files changed, 52 insertions, 26 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 5119856017ab..bbb7d88a16b4 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3841,8 +3841,28 @@ static void tcp_ofo_queue(struct sock *sk)
3841 } 3841 }
3842} 3842}
3843 3843
3844static int tcp_prune_ofo_queue(struct sock *sk);
3844static int tcp_prune_queue(struct sock *sk); 3845static int tcp_prune_queue(struct sock *sk);
3845 3846
3847static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
3848{
3849 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
3850 !sk_rmem_schedule(sk, size)) {
3851
3852 if (tcp_prune_queue(sk) < 0)
3853 return -1;
3854
3855 if (!sk_rmem_schedule(sk, size)) {
3856 if (!tcp_prune_ofo_queue(sk))
3857 return -1;
3858
3859 if (!sk_rmem_schedule(sk, size))
3860 return -1;
3861 }
3862 }
3863 return 0;
3864}
3865
3846static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 3866static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
3847{ 3867{
3848 struct tcphdr *th = tcp_hdr(skb); 3868 struct tcphdr *th = tcp_hdr(skb);
@@ -3892,12 +3912,9 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
3892 if (eaten <= 0) { 3912 if (eaten <= 0) {
3893queue_and_out: 3913queue_and_out:
3894 if (eaten < 0 && 3914 if (eaten < 0 &&
3895 (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 3915 tcp_try_rmem_schedule(sk, skb->truesize))
3896 !sk_rmem_schedule(sk, skb->truesize))) { 3916 goto drop;
3897 if (tcp_prune_queue(sk) < 0 || 3917
3898 !sk_rmem_schedule(sk, skb->truesize))
3899 goto drop;
3900 }
3901 skb_set_owner_r(skb, sk); 3918 skb_set_owner_r(skb, sk);
3902 __skb_queue_tail(&sk->sk_receive_queue, skb); 3919 __skb_queue_tail(&sk->sk_receive_queue, skb);
3903 } 3920 }
@@ -3966,12 +3983,8 @@ drop:
3966 3983
3967 TCP_ECN_check_ce(tp, skb); 3984 TCP_ECN_check_ce(tp, skb);
3968 3985
3969 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 3986 if (tcp_try_rmem_schedule(sk, skb->truesize))
3970 !sk_rmem_schedule(sk, skb->truesize)) { 3987 goto drop;
3971 if (tcp_prune_queue(sk) < 0 ||
3972 !sk_rmem_schedule(sk, skb->truesize))
3973 goto drop;
3974 }
3975 3988
3976 /* Disable header prediction. */ 3989 /* Disable header prediction. */
3977 tp->pred_flags = 0; 3990 tp->pred_flags = 0;
@@ -4198,6 +4211,32 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
4198 } 4211 }
4199} 4212}
4200 4213
4214/*
4215 * Purge the out-of-order queue.
4216 * Return true if queue was pruned.
4217 */
4218static int tcp_prune_ofo_queue(struct sock *sk)
4219{
4220 struct tcp_sock *tp = tcp_sk(sk);
4221 int res = 0;
4222
4223 if (!skb_queue_empty(&tp->out_of_order_queue)) {
4224 NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
4225 __skb_queue_purge(&tp->out_of_order_queue);
4226
4227 /* Reset SACK state. A conforming SACK implementation will
4228 * do the same at a timeout based retransmit. When a connection
4229 * is in a sad state like this, we care only about integrity
4230 * of the connection not performance.
4231 */
4232 if (tp->rx_opt.sack_ok)
4233 tcp_sack_reset(&tp->rx_opt);
4234 sk_mem_reclaim(sk);
4235 res = 1;
4236 }
4237 return res;
4238}
4239
4201/* Reduce allocated memory if we can, trying to get 4240/* Reduce allocated memory if we can, trying to get
4202 * the socket within its memory limits again. 4241 * the socket within its memory limits again.
4203 * 4242 *
@@ -4231,20 +4270,7 @@ static int tcp_prune_queue(struct sock *sk)
4231 /* Collapsing did not help, destructive actions follow. 4270 /* Collapsing did not help, destructive actions follow.
4232 * This must not ever occur. */ 4271 * This must not ever occur. */
4233 4272
4234 /* First, purge the out_of_order queue. */ 4273 tcp_prune_ofo_queue(sk);
4235 if (!skb_queue_empty(&tp->out_of_order_queue)) {
4236 NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
4237 __skb_queue_purge(&tp->out_of_order_queue);
4238
4239 /* Reset SACK state. A conforming SACK implementation will
4240 * do the same at a timeout based retransmit. When a connection
4241 * is in a sad state like this, we care only about integrity
4242 * of the connection not performance.
4243 */
4244 if (tcp_is_sack(tp))
4245 tcp_sack_reset(&tp->rx_opt);
4246 sk_mem_reclaim(sk);
4247 }
4248 4274
4249 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4275 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
4250 return 0; 4276 return 0;