aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2018-02-19 14:56:52 -0500
committerDavid S. Miller <davem@davemloft.net>2018-02-21 14:24:14 -0500
commit98be9b12096fb46773b4a509d3822fd17c82218e (patch)
treefdee55dc628e78ddae4acbb7f1e97821aae779ae /net/ipv4/tcp_output.c
parent4a64fd6ccf127973d1e2b2fc2f8024e550130617 (diff)
tcp: remove dead code after CHECKSUM_PARTIAL adoption
Since all skbs in write/rtx queues have CHECKSUM_PARTIAL, we can remove dead code. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c38
1 files changed, 5 insertions, 33 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 0196923aec42..8795d76f987c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1335,21 +1335,9 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1335 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 1335 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1336 tcp_skb_fragment_eor(skb, buff); 1336 tcp_skb_fragment_eor(skb, buff);
1337 1337
1338 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { 1338 skb_split(skb, buff, len);
1339 /* Copy and checksum data tail into the new buffer. */
1340 buff->csum = csum_partial_copy_nocheck(skb->data + len,
1341 skb_put(buff, nsize),
1342 nsize, 0);
1343
1344 skb_trim(skb, len);
1345
1346 skb->csum = csum_block_sub(skb->csum, buff->csum, len);
1347 } else {
1348 skb->ip_summed = CHECKSUM_PARTIAL;
1349 skb_split(skb, buff, len);
1350 }
1351 1339
1352 buff->ip_summed = skb->ip_summed; 1340 buff->ip_summed = CHECKSUM_PARTIAL;
1353 1341
1354 buff->tstamp = skb->tstamp; 1342 buff->tstamp = skb->tstamp;
1355 tcp_fragment_tstamp(skb, buff); 1343 tcp_fragment_tstamp(skb, buff);
@@ -1901,7 +1889,7 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1901 1889
1902 tcp_skb_fragment_eor(skb, buff); 1890 tcp_skb_fragment_eor(skb, buff);
1903 1891
1904 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; 1892 buff->ip_summed = CHECKSUM_PARTIAL;
1905 skb_split(skb, buff, len); 1893 skb_split(skb, buff, len);
1906 tcp_fragment_tstamp(skb, buff); 1894 tcp_fragment_tstamp(skb, buff);
1907 1895
@@ -2134,7 +2122,7 @@ static int tcp_mtu_probe(struct sock *sk)
2134 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; 2122 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
2135 TCP_SKB_CB(nskb)->sacked = 0; 2123 TCP_SKB_CB(nskb)->sacked = 0;
2136 nskb->csum = 0; 2124 nskb->csum = 0;
2137 nskb->ip_summed = skb->ip_summed; 2125 nskb->ip_summed = CHECKSUM_PARTIAL;
2138 2126
2139 tcp_insert_write_queue_before(nskb, skb, sk); 2127 tcp_insert_write_queue_before(nskb, skb, sk);
2140 tcp_highest_sack_replace(sk, skb, nskb); 2128 tcp_highest_sack_replace(sk, skb, nskb);
@@ -2142,14 +2130,7 @@ static int tcp_mtu_probe(struct sock *sk)
2142 len = 0; 2130 len = 0;
2143 tcp_for_write_queue_from_safe(skb, next, sk) { 2131 tcp_for_write_queue_from_safe(skb, next, sk) {
2144 copy = min_t(int, skb->len, probe_size - len); 2132 copy = min_t(int, skb->len, probe_size - len);
2145 if (nskb->ip_summed) { 2133 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
2146 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
2147 } else {
2148 __wsum csum = skb_copy_and_csum_bits(skb, 0,
2149 skb_put(nskb, copy),
2150 copy, 0);
2151 nskb->csum = csum_block_add(nskb->csum, csum, len);
2152 }
2153 2134
2154 if (skb->len <= copy) { 2135 if (skb->len <= copy) {
2155 /* We've eaten all the data from this skb. 2136 /* We've eaten all the data from this skb.
@@ -2166,9 +2147,6 @@ static int tcp_mtu_probe(struct sock *sk)
2166 ~(TCPHDR_FIN|TCPHDR_PSH); 2147 ~(TCPHDR_FIN|TCPHDR_PSH);
2167 if (!skb_shinfo(skb)->nr_frags) { 2148 if (!skb_shinfo(skb)->nr_frags) {
2168 skb_pull(skb, copy); 2149 skb_pull(skb, copy);
2169 if (skb->ip_summed != CHECKSUM_PARTIAL)
2170 skb->csum = csum_partial(skb->data,
2171 skb->len, 0);
2172 } else { 2150 } else {
2173 __pskb_trim_head(skb, copy); 2151 __pskb_trim_head(skb, copy);
2174 tcp_set_skb_tso_segs(skb, mss_now); 2152 tcp_set_skb_tso_segs(skb, mss_now);
@@ -2746,12 +2724,6 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2746 } 2724 }
2747 tcp_highest_sack_replace(sk, next_skb, skb); 2725 tcp_highest_sack_replace(sk, next_skb, skb);
2748 2726
2749 if (next_skb->ip_summed == CHECKSUM_PARTIAL)
2750 skb->ip_summed = CHECKSUM_PARTIAL;
2751
2752 if (skb->ip_summed != CHECKSUM_PARTIAL)
2753 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
2754
2755 /* Update sequence range on original skb. */ 2727 /* Update sequence range on original skb. */
2756 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 2728 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
2757 2729