aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-08-09 19:28:45 -0400
committerDavid S. Miller <davem@davemloft.net>2017-08-09 19:28:45 -0400
commit3118e6e19da7b8d76b2456b880c74a9aa3a2268b (patch)
tree3060d11297c1195ef2d1f120d9c2247b4b1de4ae /net/ipv4
parentfeca7d8c135bc1527b244fe817b8b6498066ccec (diff)
parent48fb6f4db940e92cfb16cd878cddd59ea6120d06 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
The UDP offload conflict is dealt with by simply taking what is in net-next where we have removed all of the UFO handling code entirely. The TCP conflict was a case of local variables in a function being removed from both net and net-next. In netvsc we had an assignment right next to where a missing set of u64 stats sync object inits were added. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/cipso_ipv4.c12
-rw-r--r--net/ipv4/fou.c1
-rw-r--r--net/ipv4/tcp_input.c34
-rw-r--r--net/ipv4/tcp_output.c27
-rw-r--r--net/ipv4/tcp_timer.c3
5 files changed, 41 insertions, 36 deletions
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index c4c6e1969ed0..2ae8f54cb321 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1523,9 +1523,17 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
1523 int taglen; 1523 int taglen;
1524 1524
1525 for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) { 1525 for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
1526 if (optptr[0] == IPOPT_CIPSO) 1526 switch (optptr[0]) {
1527 case IPOPT_CIPSO:
1527 return optptr; 1528 return optptr;
1528 taglen = optptr[1]; 1529 case IPOPT_END:
1530 return NULL;
1531 case IPOPT_NOOP:
1532 taglen = 1;
1533 break;
1534 default:
1535 taglen = optptr[1];
1536 }
1529 optlen -= taglen; 1537 optlen -= taglen;
1530 optptr += taglen; 1538 optptr += taglen;
1531 } 1539 }
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 8e0257d01200..1540db65241a 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -450,6 +450,7 @@ out_unlock:
450out: 450out:
451 NAPI_GRO_CB(skb)->flush |= flush; 451 NAPI_GRO_CB(skb)->flush |= flush;
452 skb_gro_remcsum_cleanup(skb, &grc); 452 skb_gro_remcsum_cleanup(skb, &grc);
453 skb->remcsum_offload = 0;
453 454
454 return pp; 455 return pp;
455} 456}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 842ed75ccb25..d73903fe8c83 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -106,6 +106,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
106#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ 106#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
107#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 107#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
108#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ 108#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
109#define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */
109#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ 110#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
110#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ 111#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
111#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ 112#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */
@@ -2520,8 +2521,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
2520 return; 2521 return;
2521 2522
2522 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ 2523 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
2523 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || 2524 if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
2524 (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { 2525 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
2525 tp->snd_cwnd = tp->snd_ssthresh; 2526 tp->snd_cwnd = tp->snd_ssthresh;
2526 tp->snd_cwnd_stamp = tcp_jiffies32; 2527 tp->snd_cwnd_stamp = tcp_jiffies32;
2527 } 2528 }
@@ -3004,10 +3005,7 @@ void tcp_rearm_rto(struct sock *sk)
3004 /* Offset the time elapsed after installing regular RTO */ 3005 /* Offset the time elapsed after installing regular RTO */
3005 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || 3006 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
3006 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 3007 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
3007 struct sk_buff *skb = tcp_write_queue_head(sk); 3008 s64 delta_us = tcp_rto_delta_us(sk);
3008 u64 rto_time_stamp = skb->skb_mstamp +
3009 jiffies_to_usecs(rto);
3010 s64 delta_us = rto_time_stamp - tp->tcp_mstamp;
3011 /* delta_us may not be positive if the socket is locked 3009 /* delta_us may not be positive if the socket is locked
3012 * when the retrans timer fires and is rescheduled. 3010 * when the retrans timer fires and is rescheduled.
3013 */ 3011 */
@@ -3019,6 +3017,13 @@ void tcp_rearm_rto(struct sock *sk)
3019 } 3017 }
3020} 3018}
3021 3019
3020/* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
3021static void tcp_set_xmit_timer(struct sock *sk)
3022{
3023 if (!tcp_schedule_loss_probe(sk))
3024 tcp_rearm_rto(sk);
3025}
3026
3022/* If we get here, the whole TSO packet has not been acked. */ 3027/* If we get here, the whole TSO packet has not been acked. */
3023static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) 3028static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
3024{ 3029{
@@ -3180,7 +3185,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3180 ca_rtt_us, sack->rate); 3185 ca_rtt_us, sack->rate);
3181 3186
3182 if (flag & FLAG_ACKED) { 3187 if (flag & FLAG_ACKED) {
3183 tcp_rearm_rto(sk); 3188 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
3184 if (unlikely(icsk->icsk_mtup.probe_size && 3189 if (unlikely(icsk->icsk_mtup.probe_size &&
3185 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { 3190 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
3186 tcp_mtup_probe_success(sk); 3191 tcp_mtup_probe_success(sk);
@@ -3208,7 +3213,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3208 * after when the head was last (re)transmitted. Otherwise the 3213 * after when the head was last (re)transmitted. Otherwise the
3209 * timeout may continue to extend in loss recovery. 3214 * timeout may continue to extend in loss recovery.
3210 */ 3215 */
3211 tcp_rearm_rto(sk); 3216 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
3212 } 3217 }
3213 3218
3214 if (icsk->icsk_ca_ops->pkts_acked) { 3219 if (icsk->icsk_ca_ops->pkts_acked) {
@@ -3575,9 +3580,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3575 if (after(ack, tp->snd_nxt)) 3580 if (after(ack, tp->snd_nxt))
3576 goto invalid_ack; 3581 goto invalid_ack;
3577 3582
3578 if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
3579 tcp_rearm_rto(sk);
3580
3581 if (after(ack, prior_snd_una)) { 3583 if (after(ack, prior_snd_una)) {
3582 flag |= FLAG_SND_UNA_ADVANCED; 3584 flag |= FLAG_SND_UNA_ADVANCED;
3583 icsk->icsk_retransmits = 0; 3585 icsk->icsk_retransmits = 0;
@@ -3626,18 +3628,20 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3626 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked, 3628 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,
3627 &sack_state); 3629 &sack_state);
3628 3630
3631 if (tp->tlp_high_seq)
3632 tcp_process_tlp_ack(sk, ack, flag);
3633 /* If needed, reset TLP/RTO timer; RACK may later override this. */
3634 if (flag & FLAG_SET_XMIT_TIMER)
3635 tcp_set_xmit_timer(sk);
3636
3629 if (tcp_ack_is_dubious(sk, flag)) { 3637 if (tcp_ack_is_dubious(sk, flag)) {
3630 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3638 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
3631 tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); 3639 tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
3632 } 3640 }
3633 if (tp->tlp_high_seq)
3634 tcp_process_tlp_ack(sk, ack, flag);
3635 3641
3636 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3642 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
3637 sk_dst_confirm(sk); 3643 sk_dst_confirm(sk);
3638 3644
3639 if (icsk->icsk_pending == ICSK_TIME_RETRANS)
3640 tcp_schedule_loss_probe(sk);
3641 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ 3645 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */
3642 lost = tp->lost - lost; /* freshly marked lost */ 3646 lost = tp->lost - lost; /* freshly marked lost */
3643 tcp_rate_gen(sk, delivered, lost, sack_state.rate); 3647 tcp_rate_gen(sk, delivered, lost, sack_state.rate);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index d49bff51bdb7..3e0d19631534 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2375,23 +2375,14 @@ bool tcp_schedule_loss_probe(struct sock *sk)
2375{ 2375{
2376 struct inet_connection_sock *icsk = inet_csk(sk); 2376 struct inet_connection_sock *icsk = inet_csk(sk);
2377 struct tcp_sock *tp = tcp_sk(sk); 2377 struct tcp_sock *tp = tcp_sk(sk);
2378 u32 timeout, tlp_time_stamp, rto_time_stamp; 2378 u32 timeout, rto_delta_us;
2379 2379
2380 /* No consecutive loss probes. */
2381 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
2382 tcp_rearm_rto(sk);
2383 return false;
2384 }
2385 /* Don't do any loss probe on a Fast Open connection before 3WHS 2380 /* Don't do any loss probe on a Fast Open connection before 3WHS
2386 * finishes. 2381 * finishes.
2387 */ 2382 */
2388 if (tp->fastopen_rsk) 2383 if (tp->fastopen_rsk)
2389 return false; 2384 return false;
2390 2385
2391 /* TLP is only scheduled when next timer event is RTO. */
2392 if (icsk->icsk_pending != ICSK_TIME_RETRANS)
2393 return false;
2394
2395 /* Schedule a loss probe in 2*RTT for SACK capable connections 2386 /* Schedule a loss probe in 2*RTT for SACK capable connections
2396 * in Open state, that are either limited by cwnd or application. 2387 * in Open state, that are either limited by cwnd or application.
2397 */ 2388 */
@@ -2418,14 +2409,10 @@ bool tcp_schedule_loss_probe(struct sock *sk)
2418 timeout = TCP_TIMEOUT_INIT; 2409 timeout = TCP_TIMEOUT_INIT;
2419 } 2410 }
2420 2411
2421 /* If RTO is shorter, just schedule TLP in its place. */ 2412 /* If the RTO formula yields an earlier time, then use that time. */
2422 tlp_time_stamp = tcp_jiffies32 + timeout; 2413 rto_delta_us = tcp_rto_delta_us(sk); /* How far in future is RTO? */
2423 rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; 2414 if (rto_delta_us > 0)
2424 if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) { 2415 timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
2425 s32 delta = rto_time_stamp - tcp_jiffies32;
2426 if (delta > 0)
2427 timeout = delta;
2428 }
2429 2416
2430 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, 2417 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
2431 TCP_RTO_MAX); 2418 TCP_RTO_MAX);
@@ -3450,6 +3437,10 @@ int tcp_connect(struct sock *sk)
3450 int err; 3437 int err;
3451 3438
3452 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB); 3439 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB);
3440
3441 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
3442 return -EHOSTUNREACH; /* Routing failure or similar. */
3443
3453 tcp_connect_init(sk); 3444 tcp_connect_init(sk);
3454 3445
3455 if (unlikely(tp->repair)) { 3446 if (unlikely(tp->repair)) {
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index f753f9d2fee3..655dd8d7f064 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -640,7 +640,8 @@ static void tcp_keepalive_timer (unsigned long data)
640 goto death; 640 goto death;
641 } 641 }
642 642
643 if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) 643 if (!sock_flag(sk, SOCK_KEEPOPEN) ||
644 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
644 goto out; 645 goto out;
645 646
646 elapsed = keepalive_time_when(tp); 647 elapsed = keepalive_time_when(tp);