diff options
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r-- | include/linux/skbuff.h | 195 |
1 files changed, 136 insertions, 59 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 77c7aae1c6b2..c5cd016f5120 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -34,11 +34,82 @@ | |||
34 | #include <linux/netdev_features.h> | 34 | #include <linux/netdev_features.h> |
35 | #include <net/flow_keys.h> | 35 | #include <net/flow_keys.h> |
36 | 36 | ||
37 | /* A. Checksumming of received packets by device. | ||
38 | * | ||
39 | * CHECKSUM_NONE: | ||
40 | * | ||
41 | * Device failed to checksum this packet e.g. due to lack of capabilities. | ||
42 | * The packet contains full (though not verified) checksum in packet but | ||
43 | * not in skb->csum. Thus, skb->csum is undefined in this case. | ||
44 | * | ||
45 | * CHECKSUM_UNNECESSARY: | ||
46 | * | ||
47 | * The hardware you're dealing with doesn't calculate the full checksum | ||
48 | * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums | ||
49 | * for specific protocols e.g. TCP/UDP/SCTP, then, for such packets it will | ||
50 | * set CHECKSUM_UNNECESSARY if their checksums are okay. skb->csum is still | ||
51 | * undefined in this case though. It is a bad option, but, unfortunately, | ||
52 | * nowadays most vendors do this. Apparently with the secret goal to sell | ||
53 | * you new devices, when you will add new protocol to your host, f.e. IPv6 8) | ||
54 | * | ||
55 | * CHECKSUM_COMPLETE: | ||
56 | * | ||
57 | * This is the most generic way. The device supplied checksum of the _whole_ | ||
58 | * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the | ||
59 | * hardware doesn't need to parse L3/L4 headers to implement this. | ||
60 | * | ||
61 | * Note: Even if device supports only some protocols, but is able to produce | ||
62 | * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY. | ||
63 | * | ||
64 | * CHECKSUM_PARTIAL: | ||
65 | * | ||
66 | * This is identical to the case for output below. This may occur on a packet | ||
67 | * received directly from another Linux OS, e.g., a virtualized Linux kernel | ||
68 | * on the same host. The packet can be treated in the same way as | ||
69 | * CHECKSUM_UNNECESSARY, except that on output (i.e., forwarding) the | ||
70 | * checksum must be filled in by the OS or the hardware. | ||
71 | * | ||
72 | * B. Checksumming on output. | ||
73 | * | ||
74 | * CHECKSUM_NONE: | ||
75 | * | ||
76 | * The skb was already checksummed by the protocol, or a checksum is not | ||
77 | * required. | ||
78 | * | ||
79 | * CHECKSUM_PARTIAL: | ||
80 | * | ||
81 | * The device is required to checksum the packet as seen by hard_start_xmit() | ||
82 | * from skb->csum_start up to the end, and to record/write the checksum at | ||
83 | * offset skb->csum_start + skb->csum_offset. | ||
84 | * | ||
85 | * The device must show its capabilities in dev->features, set up at device | ||
86 | * setup time, e.g. netdev_features.h: | ||
87 | * | ||
88 | * NETIF_F_HW_CSUM - It's a clever device, it's able to checksum everything. | ||
89 | * NETIF_F_IP_CSUM - Device is dumb, it's able to checksum only TCP/UDP over | ||
90 | * IPv4. Sigh. Vendors like this way for an unknown reason. | ||
91 | * Though, see comment above about CHECKSUM_UNNECESSARY. 8) | ||
92 | * NETIF_F_IPV6_CSUM - About as dumb as the last one but does IPv6 instead. | ||
93 | * NETIF_F_... - Well, you get the picture. | ||
94 | * | ||
95 | * CHECKSUM_UNNECESSARY: | ||
96 | * | ||
97 | * Normally, the device will do per protocol specific checksumming. Protocol | ||
98 | * implementations that do not want the NIC to perform the checksum | ||
99 | * calculation should use this flag in their outgoing skbs. | ||
100 | * | ||
101 | * NETIF_F_FCOE_CRC - This indicates that the device can do FCoE FC CRC | ||
102 | * offload. Correspondingly, the FCoE protocol driver | ||
103 | * stack should use CHECKSUM_UNNECESSARY. | ||
104 | * | ||
105 | * Any questions? No questions, good. --ANK | ||
106 | */ | ||
107 | |||
37 | /* Don't change this without changing skb_csum_unnecessary! */ | 108 | /* Don't change this without changing skb_csum_unnecessary! */ |
38 | #define CHECKSUM_NONE 0 | 109 | #define CHECKSUM_NONE 0 |
39 | #define CHECKSUM_UNNECESSARY 1 | 110 | #define CHECKSUM_UNNECESSARY 1 |
40 | #define CHECKSUM_COMPLETE 2 | 111 | #define CHECKSUM_COMPLETE 2 |
41 | #define CHECKSUM_PARTIAL 3 | 112 | #define CHECKSUM_PARTIAL 3 |
42 | 113 | ||
43 | #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ | 114 | #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ |
44 | ~(SMP_CACHE_BYTES - 1)) | 115 | ~(SMP_CACHE_BYTES - 1)) |
@@ -54,58 +125,6 @@ | |||
54 | SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ | 125 | SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ |
55 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) | 126 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) |
56 | 127 | ||
57 | /* A. Checksumming of received packets by device. | ||
58 | * | ||
59 | * NONE: device failed to checksum this packet. | ||
60 | * skb->csum is undefined. | ||
61 | * | ||
62 | * UNNECESSARY: device parsed packet and wouldbe verified checksum. | ||
63 | * skb->csum is undefined. | ||
64 | * It is bad option, but, unfortunately, many of vendors do this. | ||
65 | * Apparently with secret goal to sell you new device, when you | ||
66 | * will add new protocol to your host. F.e. IPv6. 8) | ||
67 | * | ||
68 | * COMPLETE: the most generic way. Device supplied checksum of _all_ | ||
69 | * the packet as seen by netif_rx in skb->csum. | ||
70 | * NOTE: Even if device supports only some protocols, but | ||
71 | * is able to produce some skb->csum, it MUST use COMPLETE, | ||
72 | * not UNNECESSARY. | ||
73 | * | ||
74 | * PARTIAL: identical to the case for output below. This may occur | ||
75 | * on a packet received directly from another Linux OS, e.g., | ||
76 | * a virtualised Linux kernel on the same host. The packet can | ||
77 | * be treated in the same way as UNNECESSARY except that on | ||
78 | * output (i.e., forwarding) the checksum must be filled in | ||
79 | * by the OS or the hardware. | ||
80 | * | ||
81 | * B. Checksumming on output. | ||
82 | * | ||
83 | * NONE: skb is checksummed by protocol or csum is not required. | ||
84 | * | ||
85 | * PARTIAL: device is required to csum packet as seen by hard_start_xmit | ||
86 | * from skb->csum_start to the end and to record the checksum | ||
87 | * at skb->csum_start + skb->csum_offset. | ||
88 | * | ||
89 | * Device must show its capabilities in dev->features, set | ||
90 | * at device setup time. | ||
91 | * NETIF_F_HW_CSUM - it is clever device, it is able to checksum | ||
92 | * everything. | ||
93 | * NETIF_F_IP_CSUM - device is dumb. It is able to csum only | ||
94 | * TCP/UDP over IPv4. Sigh. Vendors like this | ||
95 | * way by an unknown reason. Though, see comment above | ||
96 | * about CHECKSUM_UNNECESSARY. 8) | ||
97 | * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead. | ||
98 | * | ||
99 | * UNNECESSARY: device will do per protocol specific csum. Protocol drivers | ||
100 | * that do not want net to perform the checksum calculation should use | ||
101 | * this flag in their outgoing skbs. | ||
102 | * NETIF_F_FCOE_CRC this indicates the device can do FCoE FC CRC | ||
103 | * offload. Correspondingly, the FCoE protocol driver | ||
104 | * stack should use CHECKSUM_UNNECESSARY. | ||
105 | * | ||
106 | * Any questions? No questions, good. --ANK | ||
107 | */ | ||
108 | |||
109 | struct net_device; | 128 | struct net_device; |
110 | struct scatterlist; | 129 | struct scatterlist; |
111 | struct pipe_inode_info; | 130 | struct pipe_inode_info; |
@@ -703,15 +722,73 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, | |||
703 | unsigned int to, struct ts_config *config, | 722 | unsigned int to, struct ts_config *config, |
704 | struct ts_state *state); | 723 | struct ts_state *state); |
705 | 724 | ||
706 | void __skb_get_rxhash(struct sk_buff *skb); | 725 | /* |
707 | static inline __u32 skb_get_rxhash(struct sk_buff *skb) | 726 | * Packet hash types specify the type of hash in skb_set_hash. |
727 | * | ||
728 | * Hash types refer to the protocol layer addresses which are used to | ||
729 | * construct a packet's hash. The hashes are used to differentiate or identify | ||
730 | * flows of the protocol layer for the hash type. Hash types are either | ||
731 | * layer-2 (L2), layer-3 (L3), or layer-4 (L4). | ||
732 | * | ||
733 | * Properties of hashes: | ||
734 | * | ||
735 | * 1) Two packets in different flows have different hash values | ||
736 | * 2) Two packets in the same flow should have the same hash value | ||
737 | * | ||
738 | * A hash at a higher layer is considered to be more specific. A driver should | ||
739 | * set the most specific hash possible. | ||
740 | * | ||
741 | * A driver cannot indicate a more specific hash than the layer at which a hash | ||
742 | * was computed. For instance an L3 hash cannot be set as an L4 hash. | ||
743 | * | ||
744 | * A driver may indicate a hash level which is less specific than the | ||
745 | * actual layer the hash was computed on. For instance, a hash computed | ||
746 | * at L4 may be considered an L3 hash. This should only be done if the | ||
747 | * driver can't unambiguously determine that the HW computed the hash at | ||
748 | * the higher layer. Note that the "should" in the second property above | ||
749 | * permits this. | ||
750 | */ | ||
751 | enum pkt_hash_types { | ||
752 | PKT_HASH_TYPE_NONE, /* Undefined type */ | ||
753 | PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */ | ||
754 | PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */ | ||
755 | PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */ | ||
756 | }; | ||
757 | |||
758 | static inline void | ||
759 | skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type) | ||
760 | { | ||
761 | skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); | ||
762 | skb->rxhash = hash; | ||
763 | } | ||
764 | |||
765 | void __skb_get_hash(struct sk_buff *skb); | ||
766 | static inline __u32 skb_get_hash(struct sk_buff *skb) | ||
708 | { | 767 | { |
709 | if (!skb->l4_rxhash) | 768 | if (!skb->l4_rxhash) |
710 | __skb_get_rxhash(skb); | 769 | __skb_get_hash(skb); |
711 | 770 | ||
712 | return skb->rxhash; | 771 | return skb->rxhash; |
713 | } | 772 | } |
714 | 773 | ||
774 | static inline void skb_clear_hash(struct sk_buff *skb) | ||
775 | { | ||
776 | skb->rxhash = 0; | ||
777 | skb->l4_rxhash = 0; | ||
778 | } | ||
779 | |||
780 | static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb) | ||
781 | { | ||
782 | if (!skb->l4_rxhash) | ||
783 | skb_clear_hash(skb); | ||
784 | } | ||
785 | |||
786 | static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) | ||
787 | { | ||
788 | to->rxhash = from->rxhash; | ||
789 | to->l4_rxhash = from->l4_rxhash; | ||
790 | }; | ||
791 | |||
715 | #ifdef NET_SKBUFF_DATA_USES_OFFSET | 792 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
716 | static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) | 793 | static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) |
717 | { | 794 | { |