aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-08-09 19:28:45 -0400
committerDavid S. Miller <davem@davemloft.net>2017-08-09 19:28:45 -0400
commit3118e6e19da7b8d76b2456b880c74a9aa3a2268b (patch)
tree3060d11297c1195ef2d1f120d9c2247b4b1de4ae /net/ipv4/tcp_input.c
parentfeca7d8c135bc1527b244fe817b8b6498066ccec (diff)
parent48fb6f4db940e92cfb16cd878cddd59ea6120d06 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
The UDP offload conflict is dealt with by simply taking what is in net-next where we have removed all of the UFO handling code entirely. The TCP conflict was a case of local variables in a function being removed from both net and net-next. In netvsc we had an assignment right next to where a missing set of u64 stats sync object inits were added. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c34
1 files changed, 19 insertions, 15 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 842ed75ccb25..d73903fe8c83 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -106,6 +106,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
106#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ 106#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
107#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 107#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
108#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ 108#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
109#define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */
109#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ 110#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
110#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ 111#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
111#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ 112#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */
@@ -2520,8 +2521,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
2520 return; 2521 return;
2521 2522
2522 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ 2523 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
2523 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || 2524 if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
2524 (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { 2525 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
2525 tp->snd_cwnd = tp->snd_ssthresh; 2526 tp->snd_cwnd = tp->snd_ssthresh;
2526 tp->snd_cwnd_stamp = tcp_jiffies32; 2527 tp->snd_cwnd_stamp = tcp_jiffies32;
2527 } 2528 }
@@ -3004,10 +3005,7 @@ void tcp_rearm_rto(struct sock *sk)
3004 /* Offset the time elapsed after installing regular RTO */ 3005 /* Offset the time elapsed after installing regular RTO */
3005 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || 3006 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
3006 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 3007 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
3007 struct sk_buff *skb = tcp_write_queue_head(sk); 3008 s64 delta_us = tcp_rto_delta_us(sk);
3008 u64 rto_time_stamp = skb->skb_mstamp +
3009 jiffies_to_usecs(rto);
3010 s64 delta_us = rto_time_stamp - tp->tcp_mstamp;
3011 /* delta_us may not be positive if the socket is locked 3009 /* delta_us may not be positive if the socket is locked
3012 * when the retrans timer fires and is rescheduled. 3010 * when the retrans timer fires and is rescheduled.
3013 */ 3011 */
@@ -3019,6 +3017,13 @@ void tcp_rearm_rto(struct sock *sk)
3019 } 3017 }
3020} 3018}
3021 3019
3020/* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
3021static void tcp_set_xmit_timer(struct sock *sk)
3022{
3023 if (!tcp_schedule_loss_probe(sk))
3024 tcp_rearm_rto(sk);
3025}
3026
3022/* If we get here, the whole TSO packet has not been acked. */ 3027/* If we get here, the whole TSO packet has not been acked. */
3023static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) 3028static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
3024{ 3029{
@@ -3180,7 +3185,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3180 ca_rtt_us, sack->rate); 3185 ca_rtt_us, sack->rate);
3181 3186
3182 if (flag & FLAG_ACKED) { 3187 if (flag & FLAG_ACKED) {
3183 tcp_rearm_rto(sk); 3188 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
3184 if (unlikely(icsk->icsk_mtup.probe_size && 3189 if (unlikely(icsk->icsk_mtup.probe_size &&
3185 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { 3190 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
3186 tcp_mtup_probe_success(sk); 3191 tcp_mtup_probe_success(sk);
@@ -3208,7 +3213,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3208 * after when the head was last (re)transmitted. Otherwise the 3213 * after when the head was last (re)transmitted. Otherwise the
3209 * timeout may continue to extend in loss recovery. 3214 * timeout may continue to extend in loss recovery.
3210 */ 3215 */
3211 tcp_rearm_rto(sk); 3216 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
3212 } 3217 }
3213 3218
3214 if (icsk->icsk_ca_ops->pkts_acked) { 3219 if (icsk->icsk_ca_ops->pkts_acked) {
@@ -3575,9 +3580,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3575 if (after(ack, tp->snd_nxt)) 3580 if (after(ack, tp->snd_nxt))
3576 goto invalid_ack; 3581 goto invalid_ack;
3577 3582
3578 if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
3579 tcp_rearm_rto(sk);
3580
3581 if (after(ack, prior_snd_una)) { 3583 if (after(ack, prior_snd_una)) {
3582 flag |= FLAG_SND_UNA_ADVANCED; 3584 flag |= FLAG_SND_UNA_ADVANCED;
3583 icsk->icsk_retransmits = 0; 3585 icsk->icsk_retransmits = 0;
@@ -3626,18 +3628,20 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3626 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked, 3628 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,
3627 &sack_state); 3629 &sack_state);
3628 3630
3631 if (tp->tlp_high_seq)
3632 tcp_process_tlp_ack(sk, ack, flag);
3633 /* If needed, reset TLP/RTO timer; RACK may later override this. */
3634 if (flag & FLAG_SET_XMIT_TIMER)
3635 tcp_set_xmit_timer(sk);
3636
3629 if (tcp_ack_is_dubious(sk, flag)) { 3637 if (tcp_ack_is_dubious(sk, flag)) {
3630 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3638 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
3631 tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); 3639 tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
3632 } 3640 }
3633 if (tp->tlp_high_seq)
3634 tcp_process_tlp_ack(sk, ack, flag);
3635 3641
3636 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3642 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
3637 sk_dst_confirm(sk); 3643 sk_dst_confirm(sk);
3638 3644
3639 if (icsk->icsk_pending == ICSK_TIME_RETRANS)
3640 tcp_schedule_loss_probe(sk);
3641 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ 3645 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */
3642 lost = tp->lost - lost; /* freshly marked lost */ 3646 lost = tp->lost - lost; /* freshly marked lost */
3643 tcp_rate_gen(sk, delivered, lost, sack_state.rate); 3647 tcp_rate_gen(sk, delivered, lost, sack_state.rate);