aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c141
1 files changed, 92 insertions, 49 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d81cff119f73..8c5197fe55a4 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -476,6 +476,18 @@ void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
476} 476}
477EXPORT_SYMBOL(skb_add_rx_frag); 477EXPORT_SYMBOL(skb_add_rx_frag);
478 478
479void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
480 unsigned int truesize)
481{
482 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
483
484 skb_frag_size_add(frag, size);
485 skb->len += size;
486 skb->data_len += size;
487 skb->truesize += truesize;
488}
489EXPORT_SYMBOL(skb_coalesce_rx_frag);
490
479static void skb_drop_list(struct sk_buff **listp) 491static void skb_drop_list(struct sk_buff **listp)
480{ 492{
481 kfree_skb_list(*listp); 493 kfree_skb_list(*listp);
@@ -903,6 +915,9 @@ EXPORT_SYMBOL(skb_clone);
903 915
904static void skb_headers_offset_update(struct sk_buff *skb, int off) 916static void skb_headers_offset_update(struct sk_buff *skb, int off)
905{ 917{
918 /* Only adjust this if it actually is csum_start rather than csum */
919 if (skb->ip_summed == CHECKSUM_PARTIAL)
920 skb->csum_start += off;
906 /* {transport,network,mac}_header and tail are relative to skb->head */ 921 /* {transport,network,mac}_header and tail are relative to skb->head */
907 skb->transport_header += off; 922 skb->transport_header += off;
908 skb->network_header += off; 923 skb->network_header += off;
@@ -1036,8 +1051,8 @@ EXPORT_SYMBOL(__pskb_copy);
1036 * @ntail: room to add at tail 1051 * @ntail: room to add at tail
1037 * @gfp_mask: allocation priority 1052 * @gfp_mask: allocation priority
1038 * 1053 *
1039 * Expands (or creates identical copy, if &nhead and &ntail are zero) 1054 * Expands (or creates identical copy, if @nhead and @ntail are zero)
1040 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 1055 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
1041 * reference count of 1. Returns zero in the case of success or error, 1056 * reference count of 1. Returns zero in the case of success or error,
1042 * if expansion failed. In the last case, &sk_buff is not changed. 1057 * if expansion failed. In the last case, &sk_buff is not changed.
1043 * 1058 *
@@ -1109,9 +1124,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1109#endif 1124#endif
1110 skb->tail += off; 1125 skb->tail += off;
1111 skb_headers_offset_update(skb, nhead); 1126 skb_headers_offset_update(skb, nhead);
1112 /* Only adjust this if it actually is csum_start rather than csum */
1113 if (skb->ip_summed == CHECKSUM_PARTIAL)
1114 skb->csum_start += nhead;
1115 skb->cloned = 0; 1127 skb->cloned = 0;
1116 skb->hdr_len = 0; 1128 skb->hdr_len = 0;
1117 skb->nohdr = 0; 1129 skb->nohdr = 0;
@@ -1176,7 +1188,6 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1176 NUMA_NO_NODE); 1188 NUMA_NO_NODE);
1177 int oldheadroom = skb_headroom(skb); 1189 int oldheadroom = skb_headroom(skb);
1178 int head_copy_len, head_copy_off; 1190 int head_copy_len, head_copy_off;
1179 int off;
1180 1191
1181 if (!n) 1192 if (!n)
1182 return NULL; 1193 return NULL;
@@ -1200,11 +1211,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1200 1211
1201 copy_skb_header(n, skb); 1212 copy_skb_header(n, skb);
1202 1213
1203 off = newheadroom - oldheadroom; 1214 skb_headers_offset_update(n, newheadroom - oldheadroom);
1204 if (n->ip_summed == CHECKSUM_PARTIAL)
1205 n->csum_start += off;
1206
1207 skb_headers_offset_update(n, off);
1208 1215
1209 return n; 1216 return n;
1210} 1217}
@@ -1257,6 +1264,29 @@ free_skb:
1257EXPORT_SYMBOL(skb_pad); 1264EXPORT_SYMBOL(skb_pad);
1258 1265
1259/** 1266/**
1267 * pskb_put - add data to the tail of a potentially fragmented buffer
1268 * @skb: start of the buffer to use
1269 * @tail: tail fragment of the buffer to use
1270 * @len: amount of data to add
1271 *
1272 * This function extends the used data area of the potentially
1273 * fragmented buffer. @tail must be the last fragment of @skb -- or
1274 * @skb itself. If this would exceed the total buffer size the kernel
1275 * will panic. A pointer to the first byte of the extra data is
1276 * returned.
1277 */
1278
1279unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1280{
1281 if (tail != skb) {
1282 skb->data_len += len;
1283 skb->len += len;
1284 }
1285 return skb_put(tail, len);
1286}
1287EXPORT_SYMBOL_GPL(pskb_put);
1288
1289/**
1260 * skb_put - add data to a buffer 1290 * skb_put - add data to a buffer
1261 * @skb: buffer to use 1291 * @skb: buffer to use
1262 * @len: amount of data to add 1292 * @len: amount of data to add
@@ -1933,9 +1963,8 @@ fault:
1933EXPORT_SYMBOL(skb_store_bits); 1963EXPORT_SYMBOL(skb_store_bits);
1934 1964
1935/* Checksum skb data. */ 1965/* Checksum skb data. */
1936 1966__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
1937__wsum skb_checksum(const struct sk_buff *skb, int offset, 1967 __wsum csum, const struct skb_checksum_ops *ops)
1938 int len, __wsum csum)
1939{ 1968{
1940 int start = skb_headlen(skb); 1969 int start = skb_headlen(skb);
1941 int i, copy = start - offset; 1970 int i, copy = start - offset;
@@ -1946,7 +1975,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1946 if (copy > 0) { 1975 if (copy > 0) {
1947 if (copy > len) 1976 if (copy > len)
1948 copy = len; 1977 copy = len;
1949 csum = csum_partial(skb->data + offset, copy, csum); 1978 csum = ops->update(skb->data + offset, copy, csum);
1950 if ((len -= copy) == 0) 1979 if ((len -= copy) == 0)
1951 return csum; 1980 return csum;
1952 offset += copy; 1981 offset += copy;
@@ -1967,10 +1996,10 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1967 if (copy > len) 1996 if (copy > len)
1968 copy = len; 1997 copy = len;
1969 vaddr = kmap_atomic(skb_frag_page(frag)); 1998 vaddr = kmap_atomic(skb_frag_page(frag));
1970 csum2 = csum_partial(vaddr + frag->page_offset + 1999 csum2 = ops->update(vaddr + frag->page_offset +
1971 offset - start, copy, 0); 2000 offset - start, copy, 0);
1972 kunmap_atomic(vaddr); 2001 kunmap_atomic(vaddr);
1973 csum = csum_block_add(csum, csum2, pos); 2002 csum = ops->combine(csum, csum2, pos, copy);
1974 if (!(len -= copy)) 2003 if (!(len -= copy))
1975 return csum; 2004 return csum;
1976 offset += copy; 2005 offset += copy;
@@ -1989,9 +2018,9 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1989 __wsum csum2; 2018 __wsum csum2;
1990 if (copy > len) 2019 if (copy > len)
1991 copy = len; 2020 copy = len;
1992 csum2 = skb_checksum(frag_iter, offset - start, 2021 csum2 = __skb_checksum(frag_iter, offset - start,
1993 copy, 0); 2022 copy, 0, ops);
1994 csum = csum_block_add(csum, csum2, pos); 2023 csum = ops->combine(csum, csum2, pos, copy);
1995 if ((len -= copy) == 0) 2024 if ((len -= copy) == 0)
1996 return csum; 2025 return csum;
1997 offset += copy; 2026 offset += copy;
@@ -2003,6 +2032,18 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
2003 2032
2004 return csum; 2033 return csum;
2005} 2034}
2035EXPORT_SYMBOL(__skb_checksum);
2036
2037__wsum skb_checksum(const struct sk_buff *skb, int offset,
2038 int len, __wsum csum)
2039{
2040 const struct skb_checksum_ops ops = {
2041 .update = csum_partial_ext,
2042 .combine = csum_block_add_ext,
2043 };
2044
2045 return __skb_checksum(skb, offset, len, csum, &ops);
2046}
2006EXPORT_SYMBOL(skb_checksum); 2047EXPORT_SYMBOL(skb_checksum);
2007 2048
2008/* Both of above in one bottle. */ 2049/* Both of above in one bottle. */
@@ -2522,14 +2563,14 @@ EXPORT_SYMBOL(skb_prepare_seq_read);
2522 * @data: destination pointer for data to be returned 2563 * @data: destination pointer for data to be returned
2523 * @st: state variable 2564 * @st: state variable
2524 * 2565 *
2525 * Reads a block of skb data at &consumed relative to the 2566 * Reads a block of skb data at @consumed relative to the
2526 * lower offset specified to skb_prepare_seq_read(). Assigns 2567 * lower offset specified to skb_prepare_seq_read(). Assigns
2527 * the head of the data block to &data and returns the length 2568 * the head of the data block to @data and returns the length
2528 * of the block or 0 if the end of the skb data or the upper 2569 * of the block or 0 if the end of the skb data or the upper
2529 * offset has been reached. 2570 * offset has been reached.
2530 * 2571 *
2531 * The caller is not required to consume all of the data 2572 * The caller is not required to consume all of the data
2532 * returned, i.e. &consumed is typically set to the number 2573 * returned, i.e. @consumed is typically set to the number
2533 * of bytes already consumed and the next call to 2574 * of bytes already consumed and the next call to
2534 * skb_seq_read() will return the remaining part of the block. 2575 * skb_seq_read() will return the remaining part of the block.
2535 * 2576 *
@@ -2837,14 +2878,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2837 __copy_skb_header(nskb, skb); 2878 __copy_skb_header(nskb, skb);
2838 nskb->mac_len = skb->mac_len; 2879 nskb->mac_len = skb->mac_len;
2839 2880
2840 /* nskb and skb might have different headroom */ 2881 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
2841 if (nskb->ip_summed == CHECKSUM_PARTIAL)
2842 nskb->csum_start += skb_headroom(nskb) - headroom;
2843
2844 skb_reset_mac_header(nskb);
2845 skb_set_network_header(nskb, skb->mac_len);
2846 nskb->transport_header = (nskb->network_header +
2847 skb_network_header_len(skb));
2848 2882
2849 skb_copy_from_linear_data_offset(skb, -tnl_hlen, 2883 skb_copy_from_linear_data_offset(skb, -tnl_hlen,
2850 nskb->data - tnl_hlen, 2884 nskb->data - tnl_hlen,
@@ -2936,32 +2970,30 @@ EXPORT_SYMBOL_GPL(skb_segment);
2936 2970
2937int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2971int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2938{ 2972{
2939 struct sk_buff *p = *head; 2973 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
2940 struct sk_buff *nskb;
2941 struct skb_shared_info *skbinfo = skb_shinfo(skb);
2942 struct skb_shared_info *pinfo = skb_shinfo(p);
2943 unsigned int headroom;
2944 unsigned int len = skb_gro_len(skb);
2945 unsigned int offset = skb_gro_offset(skb); 2974 unsigned int offset = skb_gro_offset(skb);
2946 unsigned int headlen = skb_headlen(skb); 2975 unsigned int headlen = skb_headlen(skb);
2976 struct sk_buff *nskb, *lp, *p = *head;
2977 unsigned int len = skb_gro_len(skb);
2947 unsigned int delta_truesize; 2978 unsigned int delta_truesize;
2979 unsigned int headroom;
2948 2980
2949 if (p->len + len >= 65536) 2981 if (unlikely(p->len + len >= 65536))
2950 return -E2BIG; 2982 return -E2BIG;
2951 2983
2952 if (pinfo->frag_list) 2984 lp = NAPI_GRO_CB(p)->last ?: p;
2953 goto merge; 2985 pinfo = skb_shinfo(lp);
2954 else if (headlen <= offset) { 2986
2987 if (headlen <= offset) {
2955 skb_frag_t *frag; 2988 skb_frag_t *frag;
2956 skb_frag_t *frag2; 2989 skb_frag_t *frag2;
2957 int i = skbinfo->nr_frags; 2990 int i = skbinfo->nr_frags;
2958 int nr_frags = pinfo->nr_frags + i; 2991 int nr_frags = pinfo->nr_frags + i;
2959 2992
2960 offset -= headlen;
2961
2962 if (nr_frags > MAX_SKB_FRAGS) 2993 if (nr_frags > MAX_SKB_FRAGS)
2963 return -E2BIG; 2994 goto merge;
2964 2995
2996 offset -= headlen;
2965 pinfo->nr_frags = nr_frags; 2997 pinfo->nr_frags = nr_frags;
2966 skbinfo->nr_frags = 0; 2998 skbinfo->nr_frags = 0;
2967 2999
@@ -2992,7 +3024,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2992 unsigned int first_offset; 3024 unsigned int first_offset;
2993 3025
2994 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 3026 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
2995 return -E2BIG; 3027 goto merge;
2996 3028
2997 first_offset = skb->data - 3029 first_offset = skb->data -
2998 (unsigned char *)page_address(page) + 3030 (unsigned char *)page_address(page) +
@@ -3010,7 +3042,10 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3010 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 3042 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3011 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 3043 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
3012 goto done; 3044 goto done;
3013 } else if (skb_gro_len(p) != pinfo->gso_size) 3045 }
3046 if (pinfo->frag_list)
3047 goto merge;
3048 if (skb_gro_len(p) != pinfo->gso_size)
3014 return -E2BIG; 3049 return -E2BIG;
3015 3050
3016 headroom = skb_headroom(p); 3051 headroom = skb_headroom(p);
@@ -3062,16 +3097,24 @@ merge:
3062 3097
3063 __skb_pull(skb, offset); 3098 __skb_pull(skb, offset);
3064 3099
3065 NAPI_GRO_CB(p)->last->next = skb; 3100 if (!NAPI_GRO_CB(p)->last)
3101 skb_shinfo(p)->frag_list = skb;
3102 else
3103 NAPI_GRO_CB(p)->last->next = skb;
3066 NAPI_GRO_CB(p)->last = skb; 3104 NAPI_GRO_CB(p)->last = skb;
3067 skb_header_release(skb); 3105 skb_header_release(skb);
3106 lp = p;
3068 3107
3069done: 3108done:
3070 NAPI_GRO_CB(p)->count++; 3109 NAPI_GRO_CB(p)->count++;
3071 p->data_len += len; 3110 p->data_len += len;
3072 p->truesize += delta_truesize; 3111 p->truesize += delta_truesize;
3073 p->len += len; 3112 p->len += len;
3074 3113 if (lp != p) {
3114 lp->data_len += len;
3115 lp->truesize += delta_truesize;
3116 lp->len += len;
3117 }
3075 NAPI_GRO_CB(skb)->same_flow = 1; 3118 NAPI_GRO_CB(skb)->same_flow = 1;
3076 return 0; 3119 return 0;
3077} 3120}