diff options
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r-- | include/linux/skbuff.h | 92 |
1 files changed, 73 insertions, 19 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 16eef03ce0eb..85577a4ffa61 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -34,8 +34,9 @@ | |||
34 | #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ | 34 | #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ |
35 | 35 | ||
36 | #define CHECKSUM_NONE 0 | 36 | #define CHECKSUM_NONE 0 |
37 | #define CHECKSUM_HW 1 | 37 | #define CHECKSUM_PARTIAL 1 |
38 | #define CHECKSUM_UNNECESSARY 2 | 38 | #define CHECKSUM_UNNECESSARY 2 |
39 | #define CHECKSUM_COMPLETE 3 | ||
39 | 40 | ||
40 | #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ | 41 | #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ |
41 | ~(SMP_CACHE_BYTES - 1)) | 42 | ~(SMP_CACHE_BYTES - 1)) |
@@ -56,17 +57,17 @@ | |||
56 | * Apparently with secret goal to sell you new device, when you | 57 | * Apparently with secret goal to sell you new device, when you |
57 | * will add new protocol to your host. F.e. IPv6. 8) | 58 | * will add new protocol to your host. F.e. IPv6. 8) |
58 | * | 59 | * |
59 | * HW: the most generic way. Device supplied checksum of _all_ | 60 | * COMPLETE: the most generic way. Device supplied checksum of _all_ |
60 | * the packet as seen by netif_rx in skb->csum. | 61 | * the packet as seen by netif_rx in skb->csum. |
61 | * NOTE: Even if device supports only some protocols, but | 62 | * NOTE: Even if device supports only some protocols, but |
62 | * is able to produce some skb->csum, it MUST use HW, | 63 | * is able to produce some skb->csum, it MUST use COMPLETE, |
63 | * not UNNECESSARY. | 64 | * not UNNECESSARY. |
64 | * | 65 | * |
65 | * B. Checksumming on output. | 66 | * B. Checksumming on output. |
66 | * | 67 | * |
67 | * NONE: skb is checksummed by protocol or csum is not required. | 68 | * NONE: skb is checksummed by protocol or csum is not required. |
68 | * | 69 | * |
69 | * HW: device is required to csum packet as seen by hard_start_xmit | 70 | * PARTIAL: device is required to csum packet as seen by hard_start_xmit |
70 | * from skb->h.raw to the end and to record the checksum | 71 | * from skb->h.raw to the end and to record the checksum |
71 | * at skb->h.raw+skb->csum. | 72 | * at skb->h.raw+skb->csum. |
72 | * | 73 | * |
@@ -171,7 +172,15 @@ enum { | |||
171 | 172 | ||
172 | enum { | 173 | enum { |
173 | SKB_GSO_TCPV4 = 1 << 0, | 174 | SKB_GSO_TCPV4 = 1 << 0, |
174 | SKB_GSO_UDPV4 = 1 << 1, | 175 | SKB_GSO_UDP = 1 << 1, |
176 | |||
177 | /* This indicates the skb is from an untrusted source. */ | ||
178 | SKB_GSO_DODGY = 1 << 2, | ||
179 | |||
180 | /* This indicates the tcp segment has CWR set. */ | ||
181 | SKB_GSO_TCP_ECN = 1 << 3, | ||
182 | |||
183 | SKB_GSO_TCPV6 = 1 << 4, | ||
175 | }; | 184 | }; |
176 | 185 | ||
177 | /** | 186 | /** |
@@ -596,6 +605,14 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) | |||
596 | return list_->qlen; | 605 | return list_->qlen; |
597 | } | 606 | } |
598 | 607 | ||
608 | /* | ||
609 | * This function creates a split out lock class for each invocation; | ||
610 | * this is needed for now since a whole lot of users of the skb-queue | ||
611 | * infrastructure in drivers have different locking usage (in hardirq) | ||
612 | * than the networking core (in softirq only). In the long run either the | ||
613 | * network layer or drivers should need annotation to consolidate the | ||
614 | * main types of usage into 3 classes. | ||
615 | */ | ||
599 | static inline void skb_queue_head_init(struct sk_buff_head *list) | 616 | static inline void skb_queue_head_init(struct sk_buff_head *list) |
600 | { | 617 | { |
601 | spin_lock_init(&list->lock); | 618 | spin_lock_init(&list->lock); |
@@ -1024,6 +1041,21 @@ static inline int pskb_trim(struct sk_buff *skb, unsigned int len) | |||
1024 | } | 1041 | } |
1025 | 1042 | ||
1026 | /** | 1043 | /** |
1044 | * pskb_trim_unique - remove end from a paged unique (not cloned) buffer | ||
1045 | * @skb: buffer to alter | ||
1046 | * @len: new length | ||
1047 | * | ||
1048 | * This is identical to pskb_trim except that the caller knows that | ||
1049 | * the skb is not cloned so we should never get an error due to out- | ||
1050 | * of-memory. | ||
1051 | */ | ||
1052 | static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) | ||
1053 | { | ||
1054 | int err = pskb_trim(skb, len); | ||
1055 | BUG_ON(err); | ||
1056 | } | ||
1057 | |||
1058 | /** | ||
1027 | * skb_orphan - orphan a buffer | 1059 | * skb_orphan - orphan a buffer |
1028 | * @skb: buffer to orphan | 1060 | * @skb: buffer to orphan |
1029 | * | 1061 | * |
@@ -1055,9 +1087,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) | |||
1055 | kfree_skb(skb); | 1087 | kfree_skb(skb); |
1056 | } | 1088 | } |
1057 | 1089 | ||
1058 | #ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB | ||
1059 | /** | 1090 | /** |
1060 | * __dev_alloc_skb - allocate an skbuff for sending | 1091 | * __dev_alloc_skb - allocate an skbuff for receiving |
1061 | * @length: length to allocate | 1092 | * @length: length to allocate |
1062 | * @gfp_mask: get_free_pages mask, passed to alloc_skb | 1093 | * @gfp_mask: get_free_pages mask, passed to alloc_skb |
1063 | * | 1094 | * |
@@ -1066,7 +1097,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) | |||
1066 | * the headroom they think they need without accounting for the | 1097 | * the headroom they think they need without accounting for the |
1067 | * built in space. The built in space is used for optimisations. | 1098 | * built in space. The built in space is used for optimisations. |
1068 | * | 1099 | * |
1069 | * %NULL is returned in there is no free memory. | 1100 | * %NULL is returned if there is no free memory. |
1070 | */ | 1101 | */ |
1071 | static inline struct sk_buff *__dev_alloc_skb(unsigned int length, | 1102 | static inline struct sk_buff *__dev_alloc_skb(unsigned int length, |
1072 | gfp_t gfp_mask) | 1103 | gfp_t gfp_mask) |
@@ -1076,12 +1107,9 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, | |||
1076 | skb_reserve(skb, NET_SKB_PAD); | 1107 | skb_reserve(skb, NET_SKB_PAD); |
1077 | return skb; | 1108 | return skb; |
1078 | } | 1109 | } |
1079 | #else | ||
1080 | extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask); | ||
1081 | #endif | ||
1082 | 1110 | ||
1083 | /** | 1111 | /** |
1084 | * dev_alloc_skb - allocate an skbuff for sending | 1112 | * dev_alloc_skb - allocate an skbuff for receiving |
1085 | * @length: length to allocate | 1113 | * @length: length to allocate |
1086 | * | 1114 | * |
1087 | * Allocate a new &sk_buff and assign it a usage count of one. The | 1115 | * Allocate a new &sk_buff and assign it a usage count of one. The |
@@ -1089,7 +1117,7 @@ extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask); | |||
1089 | * the headroom they think they need without accounting for the | 1117 | * the headroom they think they need without accounting for the |
1090 | * built in space. The built in space is used for optimisations. | 1118 | * built in space. The built in space is used for optimisations. |
1091 | * | 1119 | * |
1092 | * %NULL is returned in there is no free memory. Although this function | 1120 | * %NULL is returned if there is no free memory. Although this function |
1093 | * allocates memory it can be called from an interrupt. | 1121 | * allocates memory it can be called from an interrupt. |
1094 | */ | 1122 | */ |
1095 | static inline struct sk_buff *dev_alloc_skb(unsigned int length) | 1123 | static inline struct sk_buff *dev_alloc_skb(unsigned int length) |
@@ -1097,6 +1125,28 @@ static inline struct sk_buff *dev_alloc_skb(unsigned int length) | |||
1097 | return __dev_alloc_skb(length, GFP_ATOMIC); | 1125 | return __dev_alloc_skb(length, GFP_ATOMIC); |
1098 | } | 1126 | } |
1099 | 1127 | ||
1128 | extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, | ||
1129 | unsigned int length, gfp_t gfp_mask); | ||
1130 | |||
1131 | /** | ||
1132 | * netdev_alloc_skb - allocate an skbuff for rx on a specific device | ||
1133 | * @dev: network device to receive on | ||
1134 | * @length: length to allocate | ||
1135 | * | ||
1136 | * Allocate a new &sk_buff and assign it a usage count of one. The | ||
1137 | * buffer has unspecified headroom built in. Users should allocate | ||
1138 | * the headroom they think they need without accounting for the | ||
1139 | * built in space. The built in space is used for optimisations. | ||
1140 | * | ||
1141 | * %NULL is returned if there is no free memory. Although this function | ||
1142 | * allocates memory it can be called from an interrupt. | ||
1143 | */ | ||
1144 | static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, | ||
1145 | unsigned int length) | ||
1146 | { | ||
1147 | return __netdev_alloc_skb(dev, length, GFP_ATOMIC); | ||
1148 | } | ||
1149 | |||
1100 | /** | 1150 | /** |
1101 | * skb_cow - copy header of skb when it is required | 1151 | * skb_cow - copy header of skb when it is required |
1102 | * @skb: buffer to cow | 1152 | * @skb: buffer to cow |
@@ -1212,14 +1262,14 @@ static inline int skb_linearize_cow(struct sk_buff *skb) | |||
1212 | * @len: length of data pulled | 1262 | * @len: length of data pulled |
1213 | * | 1263 | * |
1214 | * After doing a pull on a received packet, you need to call this to | 1264 | * After doing a pull on a received packet, you need to call this to |
1215 | * update the CHECKSUM_HW checksum, or set ip_summed to CHECKSUM_NONE | 1265 | * update the CHECKSUM_COMPLETE checksum, or set ip_summed to |
1216 | * so that it can be recomputed from scratch. | 1266 | * CHECKSUM_NONE so that it can be recomputed from scratch. |
1217 | */ | 1267 | */ |
1218 | 1268 | ||
1219 | static inline void skb_postpull_rcsum(struct sk_buff *skb, | 1269 | static inline void skb_postpull_rcsum(struct sk_buff *skb, |
1220 | const void *start, unsigned int len) | 1270 | const void *start, unsigned int len) |
1221 | { | 1271 | { |
1222 | if (skb->ip_summed == CHECKSUM_HW) | 1272 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
1223 | skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); | 1273 | skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); |
1224 | } | 1274 | } |
1225 | 1275 | ||
@@ -1238,7 +1288,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) | |||
1238 | { | 1288 | { |
1239 | if (likely(len >= skb->len)) | 1289 | if (likely(len >= skb->len)) |
1240 | return 0; | 1290 | return 0; |
1241 | if (skb->ip_summed == CHECKSUM_HW) | 1291 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
1242 | skb->ip_summed = CHECKSUM_NONE; | 1292 | skb->ip_summed = CHECKSUM_NONE; |
1243 | return __pskb_trim(skb, len); | 1293 | return __pskb_trim(skb, len); |
1244 | } | 1294 | } |
@@ -1298,8 +1348,7 @@ extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); | |||
1298 | extern void skb_split(struct sk_buff *skb, | 1348 | extern void skb_split(struct sk_buff *skb, |
1299 | struct sk_buff *skb1, const u32 len); | 1349 | struct sk_buff *skb1, const u32 len); |
1300 | 1350 | ||
1301 | extern void skb_release_data(struct sk_buff *skb); | 1351 | extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); |
1302 | extern struct sk_buff *skb_segment(struct sk_buff *skb, int sg); | ||
1303 | 1352 | ||
1304 | static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, | 1353 | static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, |
1305 | int len, void *buffer) | 1354 | int len, void *buffer) |
@@ -1445,5 +1494,10 @@ static inline void skb_init_secmark(struct sk_buff *skb) | |||
1445 | { } | 1494 | { } |
1446 | #endif | 1495 | #endif |
1447 | 1496 | ||
1497 | static inline int skb_is_gso(const struct sk_buff *skb) | ||
1498 | { | ||
1499 | return skb_shinfo(skb)->gso_size; | ||
1500 | } | ||
1501 | |||
1448 | #endif /* __KERNEL__ */ | 1502 | #endif /* __KERNEL__ */ |
1449 | #endif /* _LINUX_SKBUFF_H */ | 1503 | #endif /* _LINUX_SKBUFF_H */ |