aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-01-24 06:47:48 -0500
committerIngo Molnar <mingo@kernel.org>2013-01-24 06:47:48 -0500
commitbefddb21c845f8fb49e637997891ef97c6a869dc (patch)
tree0e7629123184f2dd50291ad6d477b894175f0f26 /net/ipv4/tcp_input.c
parente716efde75267eab919cdb2bef5b2cb77f305326 (diff)
parent7d1f9aeff1ee4a20b1aeb377dd0f579fe9647619 (diff)
Merge tag 'v3.8-rc4' into irq/core
Merge Linux 3.8-rc4 before pulling in new commits - we were on an old v3.7 base. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c83
1 files changed, 54 insertions, 29 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 1db663983587..18f97ca76b00 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3552,6 +3552,24 @@ static bool tcp_process_frto(struct sock *sk, int flag)
3552 return false; 3552 return false;
3553} 3553}
3554 3554
3555/* RFC 5961 7 [ACK Throttling] */
3556static void tcp_send_challenge_ack(struct sock *sk)
3557{
3558 /* unprotected vars, we dont care of overwrites */
3559 static u32 challenge_timestamp;
3560 static unsigned int challenge_count;
3561 u32 now = jiffies / HZ;
3562
3563 if (now != challenge_timestamp) {
3564 challenge_timestamp = now;
3565 challenge_count = 0;
3566 }
3567 if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
3568 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
3569 tcp_send_ack(sk);
3570 }
3571}
3572
3555/* This routine deals with incoming acks, but not outgoing ones. */ 3573/* This routine deals with incoming acks, but not outgoing ones. */
3556static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) 3574static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3557{ 3575{
@@ -3571,8 +3589,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3571 /* If the ack is older than previous acks 3589 /* If the ack is older than previous acks
3572 * then we can probably ignore it. 3590 * then we can probably ignore it.
3573 */ 3591 */
3574 if (before(ack, prior_snd_una)) 3592 if (before(ack, prior_snd_una)) {
3593 /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
3594 if (before(ack, prior_snd_una - tp->max_window)) {
3595 tcp_send_challenge_ack(sk);
3596 return -1;
3597 }
3575 goto old_ack; 3598 goto old_ack;
3599 }
3576 3600
3577 /* If the ack includes data we haven't sent yet, discard 3601 /* If the ack includes data we haven't sent yet, discard
3578 * this segment (RFC793 Section 3.9). 3602 * this segment (RFC793 Section 3.9).
@@ -4529,6 +4553,9 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
4529 struct tcphdr *th; 4553 struct tcphdr *th;
4530 bool fragstolen; 4554 bool fragstolen;
4531 4555
4556 if (size == 0)
4557 return 0;
4558
4532 skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); 4559 skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
4533 if (!skb) 4560 if (!skb)
4534 goto err; 4561 goto err;
@@ -5241,23 +5268,6 @@ out:
5241} 5268}
5242#endif /* CONFIG_NET_DMA */ 5269#endif /* CONFIG_NET_DMA */
5243 5270
5244static void tcp_send_challenge_ack(struct sock *sk)
5245{
5246 /* unprotected vars, we dont care of overwrites */
5247 static u32 challenge_timestamp;
5248 static unsigned int challenge_count;
5249 u32 now = jiffies / HZ;
5250
5251 if (now != challenge_timestamp) {
5252 challenge_timestamp = now;
5253 challenge_count = 0;
5254 }
5255 if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
5256 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
5257 tcp_send_ack(sk);
5258 }
5259}
5260
5261/* Does PAWS and seqno based validation of an incoming segment, flags will 5271/* Does PAWS and seqno based validation of an incoming segment, flags will
5262 * play significant role here. 5272 * play significant role here.
5263 */ 5273 */
@@ -5310,11 +5320,6 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5310 goto discard; 5320 goto discard;
5311 } 5321 }
5312 5322
5313 /* ts_recent update must be made after we are sure that the packet
5314 * is in window.
5315 */
5316 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
5317
5318 /* step 3: check security and precedence [ignored] */ 5323 /* step 3: check security and precedence [ignored] */
5319 5324
5320 /* step 4: Check for a SYN 5325 /* step 4: Check for a SYN
@@ -5538,6 +5543,9 @@ slow_path:
5538 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) 5543 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
5539 goto csum_error; 5544 goto csum_error;
5540 5545
5546 if (!th->ack && !th->rst)
5547 goto discard;
5548
5541 /* 5549 /*
5542 * Standard slow path. 5550 * Standard slow path.
5543 */ 5551 */
@@ -5546,9 +5554,14 @@ slow_path:
5546 return 0; 5554 return 0;
5547 5555
5548step5: 5556step5:
5549 if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) 5557 if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
5550 goto discard; 5558 goto discard;
5551 5559
5560 /* ts_recent update must be made after we are sure that the packet
5561 * is in window.
5562 */
5563 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
5564
5552 tcp_rcv_rtt_measure_ts(sk, skb); 5565 tcp_rcv_rtt_measure_ts(sk, skb);
5553 5566
5554 /* Process urgent data. */ 5567 /* Process urgent data. */
@@ -5642,7 +5655,11 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5642 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); 5655 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
5643 5656
5644 if (data) { /* Retransmit unacked data in SYN */ 5657 if (data) { /* Retransmit unacked data in SYN */
5645 tcp_retransmit_skb(sk, data); 5658 tcp_for_write_queue_from(data, sk) {
5659 if (data == tcp_send_head(sk) ||
5660 __tcp_retransmit_skb(sk, data))
5661 break;
5662 }
5646 tcp_rearm_rto(sk); 5663 tcp_rearm_rto(sk);
5647 return true; 5664 return true;
5648 } 5665 }
@@ -5970,11 +5987,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5970 if (tcp_check_req(sk, skb, req, NULL, true) == NULL) 5987 if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
5971 goto discard; 5988 goto discard;
5972 } 5989 }
5990
5991 if (!th->ack && !th->rst)
5992 goto discard;
5993
5973 if (!tcp_validate_incoming(sk, skb, th, 0)) 5994 if (!tcp_validate_incoming(sk, skb, th, 0))
5974 return 0; 5995 return 0;
5975 5996
5976 /* step 5: check the ACK field */ 5997 /* step 5: check the ACK field */
5977 if (th->ack) { 5998 if (true) {
5978 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; 5999 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
5979 6000
5980 switch (sk->sk_state) { 6001 switch (sk->sk_state) {
@@ -5985,7 +6006,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5985 */ 6006 */
5986 if (req) { 6007 if (req) {
5987 tcp_synack_rtt_meas(sk, req); 6008 tcp_synack_rtt_meas(sk, req);
5988 tp->total_retrans = req->retrans; 6009 tp->total_retrans = req->num_retrans;
5989 6010
5990 reqsk_fastopen_remove(sk, req, false); 6011 reqsk_fastopen_remove(sk, req, false);
5991 } else { 6012 } else {
@@ -6124,8 +6145,12 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6124 } 6145 }
6125 break; 6146 break;
6126 } 6147 }
6127 } else 6148 }
6128 goto discard; 6149
6150 /* ts_recent update must be made after we are sure that the packet
6151 * is in window.
6152 */
6153 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
6129 6154
6130 /* step 6: check the URG bit */ 6155 /* step 6: check the URG bit */
6131 tcp_urg(sk, skb, th); 6156 tcp_urg(sk, skb, th);