aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/skbuff.h5
-rw-r--r--net/core/skbuff.c86
-rw-r--r--net/ipv4/tcp_input.c67
3 files changed, 94 insertions, 64 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index fe37c21d3a60..0e501714d47f 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -562,6 +562,11 @@ extern void kfree_skb(struct sk_buff *skb);
562extern void consume_skb(struct sk_buff *skb); 562extern void consume_skb(struct sk_buff *skb);
563extern void __kfree_skb(struct sk_buff *skb); 563extern void __kfree_skb(struct sk_buff *skb);
564extern struct kmem_cache *skbuff_head_cache; 564extern struct kmem_cache *skbuff_head_cache;
565
566extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
567extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
568 bool *fragstolen, int *delta_truesize);
569
565extern struct sk_buff *__alloc_skb(unsigned int size, 570extern struct sk_buff *__alloc_skb(unsigned int size,
566 gfp_t priority, int fclone, int node); 571 gfp_t priority, int fclone, int node);
567extern struct sk_buff *build_skb(void *data, unsigned int frag_size); 572extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7ceb673d622f..016694d62484 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3346,3 +3346,89 @@ void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3346 skb->dev->name); 3346 skb->dev->name);
3347} 3347}
3348EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3348EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3349
3350void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
3351{
3352 if (head_stolen)
3353 kmem_cache_free(skbuff_head_cache, skb);
3354 else
3355 __kfree_skb(skb);
3356}
3357EXPORT_SYMBOL(kfree_skb_partial);
3358
3359/**
3360 * skb_try_coalesce - try to merge skb to prior one
3361 * @to: prior buffer
3362 * @from: buffer to add
3363 * @fragstolen: pointer to boolean
3364 *
3365 */
3366bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
3367 bool *fragstolen, int *delta_truesize)
3368{
3369 int i, delta, len = from->len;
3370
3371 *fragstolen = false;
3372
3373 if (skb_cloned(to))
3374 return false;
3375
3376 if (len <= skb_tailroom(to)) {
3377 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
3378 *delta_truesize = 0;
3379 return true;
3380 }
3381
3382 if (skb_has_frag_list(to) || skb_has_frag_list(from))
3383 return false;
3384
3385 if (skb_headlen(from) != 0) {
3386 struct page *page;
3387 unsigned int offset;
3388
3389 if (skb_shinfo(to)->nr_frags +
3390 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
3391 return false;
3392
3393 if (skb_head_is_locked(from))
3394 return false;
3395
3396 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3397
3398 page = virt_to_head_page(from->head);
3399 offset = from->data - (unsigned char *)page_address(page);
3400
3401 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
3402 page, offset, skb_headlen(from));
3403 *fragstolen = true;
3404 } else {
3405 if (skb_shinfo(to)->nr_frags +
3406 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
3407 return false;
3408
3409 delta = from->truesize -
3410 SKB_TRUESIZE(skb_end_pointer(from) - from->head);
3411 }
3412
3413 WARN_ON_ONCE(delta < len);
3414
3415 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
3416 skb_shinfo(from)->frags,
3417 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
3418 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
3419
3420 if (!skb_cloned(from))
3421 skb_shinfo(from)->nr_frags = 0;
3422
3423 /* if the skb is cloned this does nothing since we set nr_frags to 0 */
3424 for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
3425 skb_frag_ref(from, i);
3426
3427 to->truesize += delta;
3428 to->len += len;
3429 to->data_len += len;
3430
3431 *delta_truesize = delta;
3432 return true;
3433}
3434EXPORT_SYMBOL(skb_try_coalesce);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b961ef54b17d..cfa2aa128342 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4549,84 +4549,23 @@ static bool tcp_try_coalesce(struct sock *sk,
4549 struct sk_buff *from, 4549 struct sk_buff *from,
4550 bool *fragstolen) 4550 bool *fragstolen)
4551{ 4551{
4552 int i, delta, len = from->len; 4552 int delta;
4553 4553
4554 *fragstolen = false; 4554 *fragstolen = false;
4555 4555
4556 if (tcp_hdr(from)->fin || skb_cloned(to)) 4556 if (tcp_hdr(from)->fin)
4557 return false; 4557 return false;
4558 4558 if (!skb_try_coalesce(to, from, fragstolen, &delta))
4559 if (len <= skb_tailroom(to)) {
4560 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
4561 goto merge;
4562 }
4563
4564 if (skb_has_frag_list(to) || skb_has_frag_list(from))
4565 return false; 4559 return false;
4566 4560
4567 if (skb_headlen(from) != 0) {
4568 struct page *page;
4569 unsigned int offset;
4570
4571 if (skb_shinfo(to)->nr_frags +
4572 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
4573 return false;
4574
4575 if (skb_head_is_locked(from))
4576 return false;
4577
4578 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4579
4580 page = virt_to_head_page(from->head);
4581 offset = from->data - (unsigned char *)page_address(page);
4582
4583 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
4584 page, offset, skb_headlen(from));
4585 *fragstolen = true;
4586 } else {
4587 if (skb_shinfo(to)->nr_frags +
4588 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
4589 return false;
4590
4591 delta = from->truesize -
4592 SKB_TRUESIZE(skb_end_pointer(from) - from->head);
4593 }
4594
4595 WARN_ON_ONCE(delta < len);
4596
4597 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
4598 skb_shinfo(from)->frags,
4599 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
4600 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
4601
4602 if (!skb_cloned(from))
4603 skb_shinfo(from)->nr_frags = 0;
4604
4605 /* if the skb is cloned this does nothing since we set nr_frags to 0 */
4606 for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
4607 skb_frag_ref(from, i);
4608
4609 to->truesize += delta;
4610 atomic_add(delta, &sk->sk_rmem_alloc); 4561 atomic_add(delta, &sk->sk_rmem_alloc);
4611 sk_mem_charge(sk, delta); 4562 sk_mem_charge(sk, delta);
4612 to->len += len;
4613 to->data_len += len;
4614
4615merge:
4616 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); 4563 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
4617 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; 4564 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
4618 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; 4565 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
4619 return true; 4566 return true;
4620} 4567}
4621 4568
4622static void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
4623{
4624 if (head_stolen)
4625 kmem_cache_free(skbuff_head_cache, skb);
4626 else
4627 __kfree_skb(skb);
4628}
4629
4630static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) 4569static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4631{ 4570{
4632 struct tcp_sock *tp = tcp_sk(sk); 4571 struct tcp_sock *tp = tcp_sk(sk);