aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h347
1 files changed, 256 insertions, 91 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index abde271c18ae..3ab0749d6875 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -28,7 +28,6 @@
28#include <linux/textsearch.h> 28#include <linux/textsearch.h>
29#include <net/checksum.h> 29#include <net/checksum.h>
30#include <linux/rcupdate.h> 30#include <linux/rcupdate.h>
31#include <linux/dmaengine.h>
32#include <linux/hrtimer.h> 31#include <linux/hrtimer.h>
33#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
34#include <linux/netdev_features.h> 33#include <linux/netdev_features.h>
@@ -47,11 +46,29 @@
47 * 46 *
48 * The hardware you're dealing with doesn't calculate the full checksum 47 * The hardware you're dealing with doesn't calculate the full checksum
49 * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums 48 * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
50 * for specific protocols e.g. TCP/UDP/SCTP, then, for such packets it will 49 * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
51 * set CHECKSUM_UNNECESSARY if their checksums are okay. skb->csum is still 50 * if their checksums are okay. skb->csum is still undefined in this case
52 * undefined in this case though. It is a bad option, but, unfortunately, 51 * though. It is a bad option, but, unfortunately, nowadays most vendors do
53 * nowadays most vendors do this. Apparently with the secret goal to sell 52 * this. Apparently with the secret goal to sell you new devices, when you
54 * you new devices, when you will add new protocol to your host, f.e. IPv6 8) 53 * will add new protocol to your host, f.e. IPv6 8)
54 *
55 * CHECKSUM_UNNECESSARY is applicable to following protocols:
56 * TCP: IPv6 and IPv4.
57 * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
58 * zero UDP checksum for either IPv4 or IPv6, the networking stack
59 * may perform further validation in this case.
60 * GRE: only if the checksum is present in the header.
61 * SCTP: indicates the CRC in SCTP header has been validated.
62 *
63 * skb->csum_level indicates the number of consecutive checksums found in
64 * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
65 * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
66 * and a device is able to verify the checksums for UDP (possibly zero),
67 * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
68 * two. If the device were only able to verify the UDP checksum and not
69 * GRE, either because it doesn't support GRE checksum of because GRE
70 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
71 * not considered in this case).
55 * 72 *
56 * CHECKSUM_COMPLETE: 73 * CHECKSUM_COMPLETE:
57 * 74 *
@@ -112,6 +129,9 @@
112#define CHECKSUM_COMPLETE 2 129#define CHECKSUM_COMPLETE 2
113#define CHECKSUM_PARTIAL 3 130#define CHECKSUM_PARTIAL 3
114 131
132/* Maximum value in skb->csum_level */
133#define SKB_MAX_CSUM_LEVEL 3
134
115#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) 135#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
116#define SKB_WITH_OVERHEAD(X) \ 136#define SKB_WITH_OVERHEAD(X) \
117 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 137 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
@@ -135,7 +155,7 @@ struct nf_conntrack {
135}; 155};
136#endif 156#endif
137 157
138#ifdef CONFIG_BRIDGE_NETFILTER 158#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
139struct nf_bridge_info { 159struct nf_bridge_info {
140 atomic_t use; 160 atomic_t use;
141 unsigned int mask; 161 unsigned int mask;
@@ -318,9 +338,10 @@ struct skb_shared_info {
318 338
319 339
320enum { 340enum {
321 SKB_FCLONE_UNAVAILABLE, 341 SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
322 SKB_FCLONE_ORIG, 342 SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
323 SKB_FCLONE_CLONE, 343 SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
344 SKB_FCLONE_FREE, /* this companion fclone skb is available */
324}; 345};
325 346
326enum { 347enum {
@@ -452,6 +473,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
452 * @tc_verd: traffic control verdict 473 * @tc_verd: traffic control verdict
453 * @hash: the packet hash 474 * @hash: the packet hash
454 * @queue_mapping: Queue mapping for multiqueue devices 475 * @queue_mapping: Queue mapping for multiqueue devices
476 * @xmit_more: More SKBs are pending for this queue
455 * @ndisc_nodetype: router type (from link layer) 477 * @ndisc_nodetype: router type (from link layer)
456 * @ooo_okay: allow the mapping of a socket to a queue to be changed 478 * @ooo_okay: allow the mapping of a socket to a queue to be changed
457 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport 479 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -460,8 +482,6 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
460 * @wifi_acked_valid: wifi_acked was set 482 * @wifi_acked_valid: wifi_acked was set
461 * @wifi_acked: whether frame was acked on wifi or not 483 * @wifi_acked: whether frame was acked on wifi or not
462 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS 484 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
463 * @dma_cookie: a cookie to one of several possible DMA operations
464 * done by skb DMA functions
465 * @napi_id: id of the NAPI struct this skb came from 485 * @napi_id: id of the NAPI struct this skb came from
466 * @secmark: security marking 486 * @secmark: security marking
467 * @mark: Generic packet mark 487 * @mark: Generic packet mark
@@ -505,87 +525,99 @@ struct sk_buff {
505 char cb[48] __aligned(8); 525 char cb[48] __aligned(8);
506 526
507 unsigned long _skb_refdst; 527 unsigned long _skb_refdst;
528 void (*destructor)(struct sk_buff *skb);
508#ifdef CONFIG_XFRM 529#ifdef CONFIG_XFRM
509 struct sec_path *sp; 530 struct sec_path *sp;
510#endif 531#endif
532#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
533 struct nf_conntrack *nfct;
534#endif
535#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
536 struct nf_bridge_info *nf_bridge;
537#endif
511 unsigned int len, 538 unsigned int len,
512 data_len; 539 data_len;
513 __u16 mac_len, 540 __u16 mac_len,
514 hdr_len; 541 hdr_len;
515 union { 542
516 __wsum csum; 543 /* Following fields are _not_ copied in __copy_skb_header()
517 struct { 544 * Note that queue_mapping is here mostly to fill a hole.
518 __u16 csum_start; 545 */
519 __u16 csum_offset;
520 };
521 };
522 __u32 priority;
523 kmemcheck_bitfield_begin(flags1); 546 kmemcheck_bitfield_begin(flags1);
524 __u8 ignore_df:1, 547 __u16 queue_mapping;
525 cloned:1, 548 __u8 cloned:1,
526 ip_summed:2,
527 nohdr:1, 549 nohdr:1,
528 nfctinfo:3;
529 __u8 pkt_type:3,
530 fclone:2, 550 fclone:2,
531 ipvs_property:1,
532 peeked:1, 551 peeked:1,
533 nf_trace:1; 552 head_frag:1,
553 xmit_more:1;
554 /* one bit hole */
534 kmemcheck_bitfield_end(flags1); 555 kmemcheck_bitfield_end(flags1);
535 __be16 protocol;
536
537 void (*destructor)(struct sk_buff *skb);
538#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
539 struct nf_conntrack *nfct;
540#endif
541#ifdef CONFIG_BRIDGE_NETFILTER
542 struct nf_bridge_info *nf_bridge;
543#endif
544
545 int skb_iif;
546
547 __u32 hash;
548 556
549 __be16 vlan_proto; 557 /* fields enclosed in headers_start/headers_end are copied
550 __u16 vlan_tci; 558 * using a single memcpy() in __copy_skb_header()
559 */
560 __u32 headers_start[0];
551 561
552#ifdef CONFIG_NET_SCHED 562/* if you move pkt_type around you also must adapt those constants */
553 __u16 tc_index; /* traffic control index */ 563#ifdef __BIG_ENDIAN_BITFIELD
554#ifdef CONFIG_NET_CLS_ACT 564#define PKT_TYPE_MAX (7 << 5)
555 __u16 tc_verd; /* traffic control verdict */ 565#else
556#endif 566#define PKT_TYPE_MAX 7
557#endif 567#endif
568#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
558 569
559 __u16 queue_mapping; 570 __u8 __pkt_type_offset[0];
560 kmemcheck_bitfield_begin(flags2); 571 __u8 pkt_type:3;
561#ifdef CONFIG_IPV6_NDISC_NODETYPE
562 __u8 ndisc_nodetype:2;
563#endif
564 __u8 pfmemalloc:1; 572 __u8 pfmemalloc:1;
573 __u8 ignore_df:1;
574 __u8 nfctinfo:3;
575
576 __u8 nf_trace:1;
577 __u8 ip_summed:2;
565 __u8 ooo_okay:1; 578 __u8 ooo_okay:1;
566 __u8 l4_hash:1; 579 __u8 l4_hash:1;
567 __u8 sw_hash:1; 580 __u8 sw_hash:1;
568 __u8 wifi_acked_valid:1; 581 __u8 wifi_acked_valid:1;
569 __u8 wifi_acked:1; 582 __u8 wifi_acked:1;
583
570 __u8 no_fcs:1; 584 __u8 no_fcs:1;
571 __u8 head_frag:1; 585 /* Indicates the inner headers are valid in the skbuff. */
572 /* Encapsulation protocol and NIC drivers should use
573 * this flag to indicate to each other if the skb contains
574 * encapsulated packet or not and maybe use the inner packet
575 * headers if needed
576 */
577 __u8 encapsulation:1; 586 __u8 encapsulation:1;
578 __u8 encap_hdr_csum:1; 587 __u8 encap_hdr_csum:1;
579 __u8 csum_valid:1; 588 __u8 csum_valid:1;
580 __u8 csum_complete_sw:1; 589 __u8 csum_complete_sw:1;
581 /* 2/4 bit hole (depending on ndisc_nodetype presence) */ 590 __u8 csum_level:2;
582 kmemcheck_bitfield_end(flags2); 591 __u8 csum_bad:1;
592
593#ifdef CONFIG_IPV6_NDISC_NODETYPE
594 __u8 ndisc_nodetype:2;
595#endif
596 __u8 ipvs_property:1;
597 __u8 inner_protocol_type:1;
598 /* 4 or 6 bit hole */
599
600#ifdef CONFIG_NET_SCHED
601 __u16 tc_index; /* traffic control index */
602#ifdef CONFIG_NET_CLS_ACT
603 __u16 tc_verd; /* traffic control verdict */
604#endif
605#endif
583 606
584#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
585 union { 607 union {
586 unsigned int napi_id; 608 __wsum csum;
587 dma_cookie_t dma_cookie; 609 struct {
610 __u16 csum_start;
611 __u16 csum_offset;
612 };
588 }; 613 };
614 __u32 priority;
615 int skb_iif;
616 __u32 hash;
617 __be16 vlan_proto;
618 __u16 vlan_tci;
619#ifdef CONFIG_NET_RX_BUSY_POLL
620 unsigned int napi_id;
589#endif 621#endif
590#ifdef CONFIG_NETWORK_SECMARK 622#ifdef CONFIG_NETWORK_SECMARK
591 __u32 secmark; 623 __u32 secmark;
@@ -596,13 +628,22 @@ struct sk_buff {
596 __u32 reserved_tailroom; 628 __u32 reserved_tailroom;
597 }; 629 };
598 630
599 __be16 inner_protocol; 631 union {
632 __be16 inner_protocol;
633 __u8 inner_ipproto;
634 };
635
600 __u16 inner_transport_header; 636 __u16 inner_transport_header;
601 __u16 inner_network_header; 637 __u16 inner_network_header;
602 __u16 inner_mac_header; 638 __u16 inner_mac_header;
639
640 __be16 protocol;
603 __u16 transport_header; 641 __u16 transport_header;
604 __u16 network_header; 642 __u16 network_header;
605 __u16 mac_header; 643 __u16 mac_header;
644
645 __u32 headers_end[0];
646
606 /* These elements must be at the end, see alloc_skb() for details. */ 647 /* These elements must be at the end, see alloc_skb() for details. */
607 sk_buff_data_t tail; 648 sk_buff_data_t tail;
608 sk_buff_data_t end; 649 sk_buff_data_t end;
@@ -734,6 +775,37 @@ static inline struct sk_buff *alloc_skb(unsigned int size,
734 return __alloc_skb(size, priority, 0, NUMA_NO_NODE); 775 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
735} 776}
736 777
778struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
779 unsigned long data_len,
780 int max_page_order,
781 int *errcode,
782 gfp_t gfp_mask);
783
784/* Layout of fast clones : [skb1][skb2][fclone_ref] */
785struct sk_buff_fclones {
786 struct sk_buff skb1;
787
788 struct sk_buff skb2;
789
790 atomic_t fclone_ref;
791};
792
793/**
794 * skb_fclone_busy - check if fclone is busy
795 * @skb: buffer
796 *
797 * Returns true is skb is a fast clone, and its clone is not freed.
798 */
799static inline bool skb_fclone_busy(const struct sk_buff *skb)
800{
801 const struct sk_buff_fclones *fclones;
802
803 fclones = container_of(skb, struct sk_buff_fclones, skb1);
804
805 return skb->fclone == SKB_FCLONE_ORIG &&
806 fclones->skb2.fclone == SKB_FCLONE_CLONE;
807}
808
737static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 809static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
738 gfp_t priority) 810 gfp_t priority)
739{ 811{
@@ -1042,6 +1114,7 @@ static inline int skb_header_cloned(const struct sk_buff *skb)
1042 * Drop a reference to the header part of the buffer. This is done 1114 * Drop a reference to the header part of the buffer. This is done
1043 * by acquiring a payload reference. You must not read from the header 1115 * by acquiring a payload reference. You must not read from the header
1044 * part of skb->data after this. 1116 * part of skb->data after this.
1117 * Note : Check if you can use __skb_header_release() instead.
1045 */ 1118 */
1046static inline void skb_header_release(struct sk_buff *skb) 1119static inline void skb_header_release(struct sk_buff *skb)
1047{ 1120{
@@ -1051,6 +1124,20 @@ static inline void skb_header_release(struct sk_buff *skb)
1051} 1124}
1052 1125
1053/** 1126/**
1127 * __skb_header_release - release reference to header
1128 * @skb: buffer to operate on
1129 *
1130 * Variant of skb_header_release() assuming skb is private to caller.
1131 * We can avoid one atomic operation.
1132 */
1133static inline void __skb_header_release(struct sk_buff *skb)
1134{
1135 skb->nohdr = 1;
1136 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1137}
1138
1139
1140/**
1054 * skb_shared - is the buffer shared 1141 * skb_shared - is the buffer shared
1055 * @skb: buffer to check 1142 * @skb: buffer to check
1056 * 1143 *
@@ -1675,6 +1762,23 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
1675 skb->tail += len; 1762 skb->tail += len;
1676} 1763}
1677 1764
1765#define ENCAP_TYPE_ETHER 0
1766#define ENCAP_TYPE_IPPROTO 1
1767
1768static inline void skb_set_inner_protocol(struct sk_buff *skb,
1769 __be16 protocol)
1770{
1771 skb->inner_protocol = protocol;
1772 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
1773}
1774
1775static inline void skb_set_inner_ipproto(struct sk_buff *skb,
1776 __u8 ipproto)
1777{
1778 skb->inner_ipproto = ipproto;
1779 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
1780}
1781
1678static inline void skb_reset_inner_headers(struct sk_buff *skb) 1782static inline void skb_reset_inner_headers(struct sk_buff *skb)
1679{ 1783{
1680 skb->inner_mac_header = skb->mac_header; 1784 skb->inner_mac_header = skb->mac_header;
@@ -1860,18 +1964,6 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1860 return pskb_may_pull(skb, skb_network_offset(skb) + len); 1964 return pskb_may_pull(skb, skb_network_offset(skb) + len);
1861} 1965}
1862 1966
1863static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb)
1864{
1865 /* Only continue with checksum unnecessary if device indicated
1866 * it is valid across encapsulation (skb->encapsulation was set).
1867 */
1868 if (skb->ip_summed == CHECKSUM_UNNECESSARY && !skb->encapsulation)
1869 skb->ip_summed = CHECKSUM_NONE;
1870
1871 skb->encapsulation = 0;
1872 skb->csum_valid = 0;
1873}
1874
1875/* 1967/*
1876 * CPUs often take a performance hit when accessing unaligned memory 1968 * CPUs often take a performance hit when accessing unaligned memory
1877 * locations. The actual performance hit varies, it can be small if the 1969 * locations. The actual performance hit varies, it can be small if the
@@ -2567,20 +2659,26 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2567__wsum skb_checksum(const struct sk_buff *skb, int offset, int len, 2659__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
2568 __wsum csum); 2660 __wsum csum);
2569 2661
2570static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 2662static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
2571 int len, void *buffer) 2663 int len, void *data, int hlen, void *buffer)
2572{ 2664{
2573 int hlen = skb_headlen(skb);
2574
2575 if (hlen - offset >= len) 2665 if (hlen - offset >= len)
2576 return skb->data + offset; 2666 return data + offset;
2577 2667
2578 if (skb_copy_bits(skb, offset, buffer, len) < 0) 2668 if (!skb ||
2669 skb_copy_bits(skb, offset, buffer, len) < 0)
2579 return NULL; 2670 return NULL;
2580 2671
2581 return buffer; 2672 return buffer;
2582} 2673}
2583 2674
2675static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2676 int len, void *buffer)
2677{
2678 return __skb_header_pointer(skb, offset, len, skb->data,
2679 skb_headlen(skb), buffer);
2680}
2681
2584/** 2682/**
2585 * skb_needs_linearize - check if we need to linearize a given skb 2683 * skb_needs_linearize - check if we need to linearize a given skb
2586 * depending on the given device features. 2684 * depending on the given device features.
@@ -2671,6 +2769,8 @@ static inline ktime_t net_invalid_timestamp(void)
2671 return ktime_set(0, 0); 2769 return ktime_set(0, 0);
2672} 2770}
2673 2771
2772struct sk_buff *skb_clone_sk(struct sk_buff *skb);
2773
2674#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING 2774#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2675 2775
2676void skb_clone_tx_timestamp(struct sk_buff *skb); 2776void skb_clone_tx_timestamp(struct sk_buff *skb);
@@ -2786,6 +2886,42 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2786 0 : __skb_checksum_complete(skb); 2886 0 : __skb_checksum_complete(skb);
2787} 2887}
2788 2888
2889static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
2890{
2891 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2892 if (skb->csum_level == 0)
2893 skb->ip_summed = CHECKSUM_NONE;
2894 else
2895 skb->csum_level--;
2896 }
2897}
2898
2899static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
2900{
2901 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2902 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
2903 skb->csum_level++;
2904 } else if (skb->ip_summed == CHECKSUM_NONE) {
2905 skb->ip_summed = CHECKSUM_UNNECESSARY;
2906 skb->csum_level = 0;
2907 }
2908}
2909
2910static inline void __skb_mark_checksum_bad(struct sk_buff *skb)
2911{
2912 /* Mark current checksum as bad (typically called from GRO
2913 * path). In the case that ip_summed is CHECKSUM_NONE
2914 * this must be the first checksum encountered in the packet.
2915 * When ip_summed is CHECKSUM_UNNECESSARY, this is the first
2916 * checksum after the last one validated. For UDP, a zero
2917 * checksum can not be marked as bad.
2918 */
2919
2920 if (skb->ip_summed == CHECKSUM_NONE ||
2921 skb->ip_summed == CHECKSUM_UNNECESSARY)
2922 skb->csum_bad = 1;
2923}
2924
2789/* Check if we need to perform checksum complete validation. 2925/* Check if we need to perform checksum complete validation.
2790 * 2926 *
2791 * Returns true if checksum complete is needed, false otherwise 2927 * Returns true if checksum complete is needed, false otherwise
@@ -2797,6 +2933,7 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
2797{ 2933{
2798 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { 2934 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
2799 skb->csum_valid = 1; 2935 skb->csum_valid = 1;
2936 __skb_decr_checksum_unnecessary(skb);
2800 return false; 2937 return false;
2801 } 2938 }
2802 2939
@@ -2826,6 +2963,9 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
2826 skb->csum_valid = 1; 2963 skb->csum_valid = 1;
2827 return 0; 2964 return 0;
2828 } 2965 }
2966 } else if (skb->csum_bad) {
2967 /* ip_summed == CHECKSUM_NONE in this case */
2968 return 1;
2829 } 2969 }
2830 2970
2831 skb->csum = psum; 2971 skb->csum = psum;
@@ -2883,6 +3023,26 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
2883#define skb_checksum_simple_validate(skb) \ 3023#define skb_checksum_simple_validate(skb) \
2884 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo) 3024 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
2885 3025
3026static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3027{
3028 return (skb->ip_summed == CHECKSUM_NONE &&
3029 skb->csum_valid && !skb->csum_bad);
3030}
3031
3032static inline void __skb_checksum_convert(struct sk_buff *skb,
3033 __sum16 check, __wsum pseudo)
3034{
3035 skb->csum = ~pseudo;
3036 skb->ip_summed = CHECKSUM_COMPLETE;
3037}
3038
3039#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
3040do { \
3041 if (__skb_checksum_convert_check(skb)) \
3042 __skb_checksum_convert(skb, check, \
3043 compute_pseudo(skb, proto)); \
3044} while (0)
3045
2886#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3046#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2887void nf_conntrack_destroy(struct nf_conntrack *nfct); 3047void nf_conntrack_destroy(struct nf_conntrack *nfct);
2888static inline void nf_conntrack_put(struct nf_conntrack *nfct) 3048static inline void nf_conntrack_put(struct nf_conntrack *nfct)
@@ -2896,7 +3056,7 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2896 atomic_inc(&nfct->use); 3056 atomic_inc(&nfct->use);
2897} 3057}
2898#endif 3058#endif
2899#ifdef CONFIG_BRIDGE_NETFILTER 3059#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
2900static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 3060static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2901{ 3061{
2902 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) 3062 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
@@ -2914,7 +3074,7 @@ static inline void nf_reset(struct sk_buff *skb)
2914 nf_conntrack_put(skb->nfct); 3074 nf_conntrack_put(skb->nfct);
2915 skb->nfct = NULL; 3075 skb->nfct = NULL;
2916#endif 3076#endif
2917#ifdef CONFIG_BRIDGE_NETFILTER 3077#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
2918 nf_bridge_put(skb->nf_bridge); 3078 nf_bridge_put(skb->nf_bridge);
2919 skb->nf_bridge = NULL; 3079 skb->nf_bridge = NULL;
2920#endif 3080#endif
@@ -2928,19 +3088,22 @@ static inline void nf_reset_trace(struct sk_buff *skb)
2928} 3088}
2929 3089
2930/* Note: This doesn't put any conntrack and bridge info in dst. */ 3090/* Note: This doesn't put any conntrack and bridge info in dst. */
2931static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) 3091static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
3092 bool copy)
2932{ 3093{
2933#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3094#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2934 dst->nfct = src->nfct; 3095 dst->nfct = src->nfct;
2935 nf_conntrack_get(src->nfct); 3096 nf_conntrack_get(src->nfct);
2936 dst->nfctinfo = src->nfctinfo; 3097 if (copy)
3098 dst->nfctinfo = src->nfctinfo;
2937#endif 3099#endif
2938#ifdef CONFIG_BRIDGE_NETFILTER 3100#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
2939 dst->nf_bridge = src->nf_bridge; 3101 dst->nf_bridge = src->nf_bridge;
2940 nf_bridge_get(src->nf_bridge); 3102 nf_bridge_get(src->nf_bridge);
2941#endif 3103#endif
2942#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) 3104#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
2943 dst->nf_trace = src->nf_trace; 3105 if (copy)
3106 dst->nf_trace = src->nf_trace;
2944#endif 3107#endif
2945} 3108}
2946 3109
@@ -2949,10 +3112,10 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2949#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3112#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2950 nf_conntrack_put(dst->nfct); 3113 nf_conntrack_put(dst->nfct);
2951#endif 3114#endif
2952#ifdef CONFIG_BRIDGE_NETFILTER 3115#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
2953 nf_bridge_put(dst->nf_bridge); 3116 nf_bridge_put(dst->nf_bridge);
2954#endif 3117#endif
2955 __nf_copy(dst, src); 3118 __nf_copy(dst, src, true);
2956} 3119}
2957 3120
2958#ifdef CONFIG_NETWORK_SECMARK 3121#ifdef CONFIG_NETWORK_SECMARK
@@ -3137,7 +3300,9 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
3137 3300
3138int skb_checksum_setup(struct sk_buff *skb, bool recalculate); 3301int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
3139 3302
3140u32 __skb_get_poff(const struct sk_buff *skb); 3303u32 skb_get_poff(const struct sk_buff *skb);
3304u32 __skb_get_poff(const struct sk_buff *skb, void *data,
3305 const struct flow_keys *keys, int hlen);
3141 3306
3142/** 3307/**
3143 * skb_head_is_locked - Determine if the skb->head is locked down 3308 * skb_head_is_locked - Determine if the skb->head is locked down