diff options
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r-- | net/core/skbuff.c | 89 |
1 files changed, 79 insertions, 10 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 3c30ee4a5710..da0c97f2fab4 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -245,6 +245,55 @@ nodata: | |||
245 | EXPORT_SYMBOL(__alloc_skb); | 245 | EXPORT_SYMBOL(__alloc_skb); |
246 | 246 | ||
247 | /** | 247 | /** |
248 | * build_skb - build a network buffer | ||
249 | * @data: data buffer provided by caller | ||
250 | * | ||
251 | * Allocate a new &sk_buff. Caller provides space holding head and | ||
252 | * skb_shared_info. @data must have been allocated by kmalloc() | ||
253 | * The return is the new skb buffer. | ||
254 | * On a failure the return is %NULL, and @data is not freed. | ||
255 | * Notes : | ||
256 | * Before IO, driver allocates only data buffer where NIC put incoming frame | ||
257 | * Driver should add room at head (NET_SKB_PAD) and | ||
258 | * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) | ||
259 | * After IO, driver calls build_skb(), to allocate sk_buff and populate it | ||
260 | * before giving packet to stack. | ||
261 | * RX rings only contains data buffers, not full skbs. | ||
262 | */ | ||
263 | struct sk_buff *build_skb(void *data) | ||
264 | { | ||
265 | struct skb_shared_info *shinfo; | ||
266 | struct sk_buff *skb; | ||
267 | unsigned int size; | ||
268 | |||
269 | skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); | ||
270 | if (!skb) | ||
271 | return NULL; | ||
272 | |||
273 | size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
274 | |||
275 | memset(skb, 0, offsetof(struct sk_buff, tail)); | ||
276 | skb->truesize = SKB_TRUESIZE(size); | ||
277 | atomic_set(&skb->users, 1); | ||
278 | skb->head = data; | ||
279 | skb->data = data; | ||
280 | skb_reset_tail_pointer(skb); | ||
281 | skb->end = skb->tail + size; | ||
282 | #ifdef NET_SKBUFF_DATA_USES_OFFSET | ||
283 | skb->mac_header = ~0U; | ||
284 | #endif | ||
285 | |||
286 | /* make sure we initialize shinfo sequentially */ | ||
287 | shinfo = skb_shinfo(skb); | ||
288 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); | ||
289 | atomic_set(&shinfo->dataref, 1); | ||
290 | kmemcheck_annotate_variable(shinfo->destructor_arg); | ||
291 | |||
292 | return skb; | ||
293 | } | ||
294 | EXPORT_SYMBOL(build_skb); | ||
295 | |||
296 | /** | ||
248 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device | 297 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device |
249 | * @dev: network device to receive on | 298 | * @dev: network device to receive on |
250 | * @length: length to allocate | 299 | * @length: length to allocate |
@@ -403,7 +452,7 @@ static void skb_release_head_state(struct sk_buff *skb) | |||
403 | WARN_ON(in_irq()); | 452 | WARN_ON(in_irq()); |
404 | skb->destructor(skb); | 453 | skb->destructor(skb); |
405 | } | 454 | } |
406 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | 455 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
407 | nf_conntrack_put(skb->nfct); | 456 | nf_conntrack_put(skb->nfct); |
408 | #endif | 457 | #endif |
409 | #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED | 458 | #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED |
@@ -553,15 +602,14 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
553 | new->ip_summed = old->ip_summed; | 602 | new->ip_summed = old->ip_summed; |
554 | skb_copy_queue_mapping(new, old); | 603 | skb_copy_queue_mapping(new, old); |
555 | new->priority = old->priority; | 604 | new->priority = old->priority; |
556 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) | 605 | #if IS_ENABLED(CONFIG_IP_VS) |
557 | new->ipvs_property = old->ipvs_property; | 606 | new->ipvs_property = old->ipvs_property; |
558 | #endif | 607 | #endif |
559 | new->protocol = old->protocol; | 608 | new->protocol = old->protocol; |
560 | new->mark = old->mark; | 609 | new->mark = old->mark; |
561 | new->skb_iif = old->skb_iif; | 610 | new->skb_iif = old->skb_iif; |
562 | __nf_copy(new, old); | 611 | __nf_copy(new, old); |
563 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | 612 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) |
564 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) | ||
565 | new->nf_trace = old->nf_trace; | 613 | new->nf_trace = old->nf_trace; |
566 | #endif | 614 | #endif |
567 | #ifdef CONFIG_NET_SCHED | 615 | #ifdef CONFIG_NET_SCHED |
@@ -791,8 +839,9 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) | |||
791 | EXPORT_SYMBOL(skb_copy); | 839 | EXPORT_SYMBOL(skb_copy); |
792 | 840 | ||
793 | /** | 841 | /** |
794 | * pskb_copy - create copy of an sk_buff with private head. | 842 | * __pskb_copy - create copy of an sk_buff with private head. |
795 | * @skb: buffer to copy | 843 | * @skb: buffer to copy |
844 | * @headroom: headroom of new skb | ||
796 | * @gfp_mask: allocation priority | 845 | * @gfp_mask: allocation priority |
797 | * | 846 | * |
798 | * Make a copy of both an &sk_buff and part of its data, located | 847 | * Make a copy of both an &sk_buff and part of its data, located |
@@ -803,16 +852,16 @@ EXPORT_SYMBOL(skb_copy); | |||
803 | * The returned buffer has a reference count of 1. | 852 | * The returned buffer has a reference count of 1. |
804 | */ | 853 | */ |
805 | 854 | ||
806 | struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) | 855 | struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) |
807 | { | 856 | { |
808 | unsigned int size = skb_end_pointer(skb) - skb->head; | 857 | unsigned int size = skb_headlen(skb) + headroom; |
809 | struct sk_buff *n = alloc_skb(size, gfp_mask); | 858 | struct sk_buff *n = alloc_skb(size, gfp_mask); |
810 | 859 | ||
811 | if (!n) | 860 | if (!n) |
812 | goto out; | 861 | goto out; |
813 | 862 | ||
814 | /* Set the data pointer */ | 863 | /* Set the data pointer */ |
815 | skb_reserve(n, skb_headroom(skb)); | 864 | skb_reserve(n, headroom); |
816 | /* Set the tail pointer and length */ | 865 | /* Set the tail pointer and length */ |
817 | skb_put(n, skb_headlen(skb)); | 866 | skb_put(n, skb_headlen(skb)); |
818 | /* Copy the bytes */ | 867 | /* Copy the bytes */ |
@@ -848,7 +897,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) | |||
848 | out: | 897 | out: |
849 | return n; | 898 | return n; |
850 | } | 899 | } |
851 | EXPORT_SYMBOL(pskb_copy); | 900 | EXPORT_SYMBOL(__pskb_copy); |
852 | 901 | ||
853 | /** | 902 | /** |
854 | * pskb_expand_head - reallocate header of &sk_buff | 903 | * pskb_expand_head - reallocate header of &sk_buff |
@@ -2621,7 +2670,7 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum); | |||
2621 | * a pointer to the first in a list of new skbs for the segments. | 2670 | * a pointer to the first in a list of new skbs for the segments. |
2622 | * In case of error it returns ERR_PTR(err). | 2671 | * In case of error it returns ERR_PTR(err). |
2623 | */ | 2672 | */ |
2624 | struct sk_buff *skb_segment(struct sk_buff *skb, u32 features) | 2673 | struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) |
2625 | { | 2674 | { |
2626 | struct sk_buff *segs = NULL; | 2675 | struct sk_buff *segs = NULL; |
2627 | struct sk_buff *tail = NULL; | 2676 | struct sk_buff *tail = NULL; |
@@ -3169,6 +3218,26 @@ void skb_tstamp_tx(struct sk_buff *orig_skb, | |||
3169 | } | 3218 | } |
3170 | EXPORT_SYMBOL_GPL(skb_tstamp_tx); | 3219 | EXPORT_SYMBOL_GPL(skb_tstamp_tx); |
3171 | 3220 | ||
3221 | void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) | ||
3222 | { | ||
3223 | struct sock *sk = skb->sk; | ||
3224 | struct sock_exterr_skb *serr; | ||
3225 | int err; | ||
3226 | |||
3227 | skb->wifi_acked_valid = 1; | ||
3228 | skb->wifi_acked = acked; | ||
3229 | |||
3230 | serr = SKB_EXT_ERR(skb); | ||
3231 | memset(serr, 0, sizeof(*serr)); | ||
3232 | serr->ee.ee_errno = ENOMSG; | ||
3233 | serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; | ||
3234 | |||
3235 | err = sock_queue_err_skb(sk, skb); | ||
3236 | if (err) | ||
3237 | kfree_skb(skb); | ||
3238 | } | ||
3239 | EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); | ||
3240 | |||
3172 | 3241 | ||
3173 | /** | 3242 | /** |
3174 | * skb_partial_csum_set - set up and verify partial csum values for packet | 3243 | * skb_partial_csum_set - set up and verify partial csum values for packet |