aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-11-04 13:48:30 -0500
committerDavid S. Miller <davem@davemloft.net>2013-11-04 13:48:30 -0500
commit394efd19d5fcae936261bd48e5b33b21897aacf8 (patch)
treec48cf3ddbb07fd87309f1abdf31a27c71330e587 /net/ipv4/tcp_input.c
parentf421436a591d34fa5279b54a96ac07d70250cc8d (diff)
parentbe408cd3e1fef73e9408b196a79b9934697fe3b1 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/emulex/benet/be.h drivers/net/netconsole.c net/bridge/br_private.h Three mostly trivial conflicts. The net/bridge/br_private.h conflict was a function signature (argument addition) change overlapping with the extern removals from Joe Perches. In drivers/net/netconsole.c we had one change adjusting a printk message whilst another changed "printk(KERN_INFO" into "pr_info(". Lastly, the emulex change was a new inline function addition overlapping with Joe Perches's extern removals. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c34
1 files changed, 25 insertions, 9 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b935397c703c..63095b218b4a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2903,7 +2903,8 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
2903 * left edge of the send window. 2903 * left edge of the send window.
2904 * See draft-ietf-tcplw-high-performance-00, section 3.3. 2904 * See draft-ietf-tcplw-high-performance-00, section 3.3.
2905 */ 2905 */
2906 if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 2906 if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2907 flag & FLAG_ACKED)
2907 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; 2908 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
2908 2909
2909 if (seq_rtt < 0) 2910 if (seq_rtt < 0)
@@ -2918,14 +2919,19 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
2918} 2919}
2919 2920
2920/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */ 2921/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
2921static void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req) 2922static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
2922{ 2923{
2923 struct tcp_sock *tp = tcp_sk(sk); 2924 struct tcp_sock *tp = tcp_sk(sk);
2924 s32 seq_rtt = -1; 2925 s32 seq_rtt = -1;
2925 2926
2926 if (tp->lsndtime && !tp->total_retrans) 2927 if (synack_stamp && !tp->total_retrans)
2927 seq_rtt = tcp_time_stamp - tp->lsndtime; 2928 seq_rtt = tcp_time_stamp - synack_stamp;
2928 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1); 2929
2930 /* If the ACK acks both the SYNACK and the (Fast Open'd) data packets
2931 * sent in SYN_RECV, SYNACK RTT is the smooth RTT computed in tcp_ack()
2932 */
2933 if (!tp->srtt)
2934 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
2929} 2935}
2930 2936
2931static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 2937static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
@@ -3028,6 +3034,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3028 s32 seq_rtt = -1; 3034 s32 seq_rtt = -1;
3029 s32 ca_seq_rtt = -1; 3035 s32 ca_seq_rtt = -1;
3030 ktime_t last_ackt = net_invalid_timestamp(); 3036 ktime_t last_ackt = net_invalid_timestamp();
3037 bool rtt_update;
3031 3038
3032 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { 3039 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
3033 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 3040 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
@@ -3104,14 +3111,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3104 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 3111 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
3105 flag |= FLAG_SACK_RENEGING; 3112 flag |= FLAG_SACK_RENEGING;
3106 3113
3107 if (tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt) || 3114 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt);
3108 (flag & FLAG_ACKED))
3109 tcp_rearm_rto(sk);
3110 3115
3111 if (flag & FLAG_ACKED) { 3116 if (flag & FLAG_ACKED) {
3112 const struct tcp_congestion_ops *ca_ops 3117 const struct tcp_congestion_ops *ca_ops
3113 = inet_csk(sk)->icsk_ca_ops; 3118 = inet_csk(sk)->icsk_ca_ops;
3114 3119
3120 tcp_rearm_rto(sk);
3115 if (unlikely(icsk->icsk_mtup.probe_size && 3121 if (unlikely(icsk->icsk_mtup.probe_size &&
3116 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { 3122 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
3117 tcp_mtup_probe_success(sk); 3123 tcp_mtup_probe_success(sk);
@@ -3150,6 +3156,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3150 3156
3151 ca_ops->pkts_acked(sk, pkts_acked, rtt_us); 3157 ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
3152 } 3158 }
3159 } else if (skb && rtt_update && sack_rtt >= 0 &&
3160 sack_rtt > (s32)(now - TCP_SKB_CB(skb)->when)) {
3161 /* Do not re-arm RTO if the sack RTT is measured from data sent
3162 * after when the head was last (re)transmitted. Otherwise the
3163 * timeout may continue to extend in loss recovery.
3164 */
3165 tcp_rearm_rto(sk);
3153 } 3166 }
3154 3167
3155#if FASTRETRANS_DEBUG > 0 3168#if FASTRETRANS_DEBUG > 0
@@ -5626,6 +5639,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5626 struct request_sock *req; 5639 struct request_sock *req;
5627 int queued = 0; 5640 int queued = 0;
5628 bool acceptable; 5641 bool acceptable;
5642 u32 synack_stamp;
5629 5643
5630 tp->rx_opt.saw_tstamp = 0; 5644 tp->rx_opt.saw_tstamp = 0;
5631 5645
@@ -5708,9 +5722,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5708 * so release it. 5722 * so release it.
5709 */ 5723 */
5710 if (req) { 5724 if (req) {
5725 synack_stamp = tcp_rsk(req)->snt_synack;
5711 tp->total_retrans = req->num_retrans; 5726 tp->total_retrans = req->num_retrans;
5712 reqsk_fastopen_remove(sk, req, false); 5727 reqsk_fastopen_remove(sk, req, false);
5713 } else { 5728 } else {
5729 synack_stamp = tp->lsndtime;
5714 /* Make sure socket is routed, for correct metrics. */ 5730 /* Make sure socket is routed, for correct metrics. */
5715 icsk->icsk_af_ops->rebuild_header(sk); 5731 icsk->icsk_af_ops->rebuild_header(sk);
5716 tcp_init_congestion_control(sk); 5732 tcp_init_congestion_control(sk);
@@ -5733,7 +5749,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5733 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; 5749 tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
5734 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; 5750 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
5735 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); 5751 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
5736 tcp_synack_rtt_meas(sk, req); 5752 tcp_synack_rtt_meas(sk, synack_stamp);
5737 5753
5738 if (tp->rx_opt.tstamp_ok) 5754 if (tp->rx_opt.tstamp_ok)
5739 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 5755 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;