aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c27
1 files changed, 19 insertions, 8 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 63170e297540..8c8de2780c7a 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1093,6 +1093,13 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
1093{ 1093{
1094 int i, k, eat; 1094 int i, k, eat;
1095 1095
1096 eat = min_t(int, len, skb_headlen(skb));
1097 if (eat) {
1098 __skb_pull(skb, eat);
1099 len -= eat;
1100 if (!len)
1101 return;
1102 }
1096 eat = len; 1103 eat = len;
1097 k = 0; 1104 k = 0;
1098 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1105 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
@@ -1124,11 +1131,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1124 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1131 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1125 return -ENOMEM; 1132 return -ENOMEM;
1126 1133
1127 /* If len == headlen, we avoid __skb_pull to preserve alignment. */ 1134 __pskb_trim_head(skb, len);
1128 if (unlikely(len < skb_headlen(skb)))
1129 __skb_pull(skb, len);
1130 else
1131 __pskb_trim_head(skb, len - skb_headlen(skb));
1132 1135
1133 TCP_SKB_CB(skb)->seq += len; 1136 TCP_SKB_CB(skb)->seq += len;
1134 skb->ip_summed = CHECKSUM_PARTIAL; 1137 skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1581,7 +1584,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1581 * frame, so if we have space for more than 3 frames 1584 * frame, so if we have space for more than 3 frames
1582 * then send now. 1585 * then send now.
1583 */ 1586 */
1584 if (limit > tcp_max_burst(tp) * tp->mss_cache) 1587 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1585 goto send_now; 1588 goto send_now;
1586 } 1589 }
1587 1590
@@ -1919,7 +1922,7 @@ u32 __tcp_select_window(struct sock *sk)
1919 if (free_space < (full_space >> 1)) { 1922 if (free_space < (full_space >> 1)) {
1920 icsk->icsk_ack.quick = 0; 1923 icsk->icsk_ack.quick = 0;
1921 1924
1922 if (tcp_memory_pressure) 1925 if (sk_under_memory_pressure(sk))
1923 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 1926 tp->rcv_ssthresh = min(tp->rcv_ssthresh,
1924 4U * tp->advmss); 1927 4U * tp->advmss);
1925 1928
@@ -2147,7 +2150,15 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2147 */ 2150 */
2148 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2151 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2149 2152
2150 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2153 /* make sure skb->data is aligned on arches that require it */
2154 if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
2155 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2156 GFP_ATOMIC);
2157 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2158 -ENOBUFS;
2159 } else {
2160 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2161 }
2151 2162
2152 if (err == 0) { 2163 if (err == 0) {
2153 /* Update global TCP statistics. */ 2164 /* Update global TCP statistics. */