aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorOctavian Purdila <octavian.purdila@intel.com>2014-06-06 10:32:37 -0400
committerDavid S. Miller <davem@davemloft.net>2014-06-11 01:30:58 -0400
commit6cc55e096f1f2a8585cf8dc9049862f2376f66d4 (patch)
tree3ead1ba34000e454889258a30a37eb3ba1d69b62 /net/ipv4
parent27fa589de5d74f4d5a9b8dcab632e7370c8b4fc9 (diff)
tcp: add gfp parameter to tcp_fragment
tcp_fragment can be called from process context (from tso_fragment). Add a new gfp parameter to allow it to preserve atomic memory if possible. Signed-off-by: Octavian Purdila <octavian.purdila@intel.com> Reviewed-by: Christoph Paasch <christoph.paasch@uclouvain.be> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_input.c5
-rw-r--r--net/ipv4/tcp_output.c15
2 files changed, 11 insertions, 9 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 931529d5daa2..40661fc1e233 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1167,7 +1167,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1167 } 1167 }
1168 pkt_len = new_len; 1168 pkt_len = new_len;
1169 } 1169 }
1170 err = tcp_fragment(sk, skb, pkt_len, mss); 1170 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC);
1171 if (err < 0) 1171 if (err < 0)
1172 return err; 1172 return err;
1173 } 1173 }
@@ -2241,7 +2241,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2241 break; 2241 break;
2242 2242
2243 mss = skb_shinfo(skb)->gso_size; 2243 mss = skb_shinfo(skb)->gso_size;
2244 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss); 2244 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss,
2245 mss, GFP_ATOMIC);
2245 if (err < 0) 2246 if (err < 0)
2246 break; 2247 break;
2247 cnt = packets; 2248 cnt = packets;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index d463c35db33d..ad7549f1d0ad 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1074,7 +1074,7 @@ static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int de
1074 * Remember, these are still headerless SKBs at this point. 1074 * Remember, these are still headerless SKBs at this point.
1075 */ 1075 */
1076int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, 1076int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1077 unsigned int mss_now) 1077 unsigned int mss_now, gfp_t gfp)
1078{ 1078{
1079 struct tcp_sock *tp = tcp_sk(sk); 1079 struct tcp_sock *tp = tcp_sk(sk);
1080 struct sk_buff *buff; 1080 struct sk_buff *buff;
@@ -1089,11 +1089,11 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1089 if (nsize < 0) 1089 if (nsize < 0)
1090 nsize = 0; 1090 nsize = 0;
1091 1091
1092 if (skb_unclone(skb, GFP_ATOMIC)) 1092 if (skb_unclone(skb, gfp))
1093 return -ENOMEM; 1093 return -ENOMEM;
1094 1094
1095 /* Get a new skb... force flag on. */ 1095 /* Get a new skb... force flag on. */
1096 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 1096 buff = sk_stream_alloc_skb(sk, nsize, gfp);
1097 if (buff == NULL) 1097 if (buff == NULL)
1098 return -ENOMEM; /* We'll just try again later. */ 1098 return -ENOMEM; /* We'll just try again later. */
1099 1099
@@ -1625,7 +1625,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1625 1625
1626 /* All of a TSO frame must be composed of paged data. */ 1626 /* All of a TSO frame must be composed of paged data. */
1627 if (skb->len != skb->data_len) 1627 if (skb->len != skb->data_len)
1628 return tcp_fragment(sk, skb, len, mss_now); 1628 return tcp_fragment(sk, skb, len, mss_now, gfp);
1629 1629
1630 buff = sk_stream_alloc_skb(sk, 0, gfp); 1630 buff = sk_stream_alloc_skb(sk, 0, gfp);
1631 if (unlikely(buff == NULL)) 1631 if (unlikely(buff == NULL))
@@ -2122,7 +2122,8 @@ void tcp_send_loss_probe(struct sock *sk)
2122 goto rearm_timer; 2122 goto rearm_timer;
2123 2123
2124 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { 2124 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
2125 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss))) 2125 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss,
2126 GFP_ATOMIC)))
2126 goto rearm_timer; 2127 goto rearm_timer;
2127 skb = tcp_write_queue_tail(sk); 2128 skb = tcp_write_queue_tail(sk);
2128 } 2129 }
@@ -2463,7 +2464,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2463 return -EAGAIN; 2464 return -EAGAIN;
2464 2465
2465 if (skb->len > cur_mss) { 2466 if (skb->len > cur_mss) {
2466 if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 2467 if (tcp_fragment(sk, skb, cur_mss, cur_mss, GFP_ATOMIC))
2467 return -ENOMEM; /* We'll try again later. */ 2468 return -ENOMEM; /* We'll try again later. */
2468 } else { 2469 } else {
2469 int oldpcount = tcp_skb_pcount(skb); 2470 int oldpcount = tcp_skb_pcount(skb);
@@ -3244,7 +3245,7 @@ int tcp_write_wakeup(struct sock *sk)
3244 skb->len > mss) { 3245 skb->len > mss) {
3245 seg_size = min(seg_size, mss); 3246 seg_size = min(seg_size, mss);
3246 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 3247 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3247 if (tcp_fragment(sk, skb, seg_size, mss)) 3248 if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC))
3248 return -1; 3249 return -1;
3249 } else if (!tcp_skb_pcount(skb)) 3250 } else if (!tcp_skb_pcount(skb))
3250 tcp_set_skb_tso_segs(sk, skb, mss); 3251 tcp_set_skb_tso_segs(sk, skb, mss);