aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h139
1 files changed, 102 insertions, 37 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2741c0c55e83..8c5d6001a923 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -137,6 +137,8 @@ struct skb_shared_info {
137 unsigned int nr_frags; 137 unsigned int nr_frags;
138 unsigned short tso_size; 138 unsigned short tso_size;
139 unsigned short tso_segs; 139 unsigned short tso_segs;
140 unsigned short ufo_size;
141 unsigned int ip6_frag_id;
140 struct sk_buff *frag_list; 142 struct sk_buff *frag_list;
141 skb_frag_t frags[MAX_SKB_FRAGS]; 143 skb_frag_t frags[MAX_SKB_FRAGS];
142}; 144};
@@ -155,8 +157,6 @@ struct skb_shared_info {
155#define SKB_DATAREF_SHIFT 16 157#define SKB_DATAREF_SHIFT 16
156#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 158#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
157 159
158extern struct timeval skb_tv_base;
159
160struct skb_timeval { 160struct skb_timeval {
161 u32 off_sec; 161 u32 off_sec;
162 u32 off_usec; 162 u32 off_usec;
@@ -173,9 +173,8 @@ enum {
173 * struct sk_buff - socket buffer 173 * struct sk_buff - socket buffer
174 * @next: Next buffer in list 174 * @next: Next buffer in list
175 * @prev: Previous buffer in list 175 * @prev: Previous buffer in list
176 * @list: List we are on
177 * @sk: Socket we are owned by 176 * @sk: Socket we are owned by
178 * @tstamp: Time we arrived stored as offset to skb_tv_base 177 * @tstamp: Time we arrived
179 * @dev: Device we arrived on/are leaving by 178 * @dev: Device we arrived on/are leaving by
180 * @input_dev: Device we arrived on 179 * @input_dev: Device we arrived on
181 * @h: Transport layer header 180 * @h: Transport layer header
@@ -192,6 +191,7 @@ enum {
192 * @cloned: Head may be cloned (check refcnt to be sure) 191 * @cloned: Head may be cloned (check refcnt to be sure)
193 * @nohdr: Payload reference only, must not modify header 192 * @nohdr: Payload reference only, must not modify header
194 * @pkt_type: Packet class 193 * @pkt_type: Packet class
194 * @fclone: skbuff clone status
195 * @ip_summed: Driver fed us an IP checksum 195 * @ip_summed: Driver fed us an IP checksum
196 * @priority: Packet queueing priority 196 * @priority: Packet queueing priority
197 * @users: User count - see {datagram,tcp}.c 197 * @users: User count - see {datagram,tcp}.c
@@ -204,7 +204,9 @@ enum {
204 * @destructor: Destruct function 204 * @destructor: Destruct function
205 * @nfmark: Can be used for communication between hooks 205 * @nfmark: Can be used for communication between hooks
206 * @nfct: Associated connection, if any 206 * @nfct: Associated connection, if any
207 * @ipvs_property: skbuff is owned by ipvs
207 * @nfctinfo: Relationship of this skb to the connection 208 * @nfctinfo: Relationship of this skb to the connection
209 * @nfct_reasm: netfilter conntrack re-assembly pointer
208 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 210 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
209 * @tc_index: Traffic control index 211 * @tc_index: Traffic control index
210 * @tc_verd: traffic control verdict 212 * @tc_verd: traffic control verdict
@@ -263,15 +265,16 @@ struct sk_buff {
263 nohdr:1, 265 nohdr:1,
264 nfctinfo:3; 266 nfctinfo:3;
265 __u8 pkt_type:3, 267 __u8 pkt_type:3,
266 fclone:2; 268 fclone:2,
269 ipvs_property:1;
267 __be16 protocol; 270 __be16 protocol;
268 271
269 void (*destructor)(struct sk_buff *skb); 272 void (*destructor)(struct sk_buff *skb);
270#ifdef CONFIG_NETFILTER 273#ifdef CONFIG_NETFILTER
271 __u32 nfmark; 274 __u32 nfmark;
272 struct nf_conntrack *nfct; 275 struct nf_conntrack *nfct;
273#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 276#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
274 __u8 ipvs_property:1; 277 struct sk_buff *nfct_reasm;
275#endif 278#endif
276#ifdef CONFIG_BRIDGE_NETFILTER 279#ifdef CONFIG_BRIDGE_NETFILTER
277 struct nf_bridge_info *nf_bridge; 280 struct nf_bridge_info *nf_bridge;
@@ -304,37 +307,37 @@ struct sk_buff {
304 307
305extern void __kfree_skb(struct sk_buff *skb); 308extern void __kfree_skb(struct sk_buff *skb);
306extern struct sk_buff *__alloc_skb(unsigned int size, 309extern struct sk_buff *__alloc_skb(unsigned int size,
307 unsigned int __nocast priority, int fclone); 310 gfp_t priority, int fclone);
308static inline struct sk_buff *alloc_skb(unsigned int size, 311static inline struct sk_buff *alloc_skb(unsigned int size,
309 unsigned int __nocast priority) 312 gfp_t priority)
310{ 313{
311 return __alloc_skb(size, priority, 0); 314 return __alloc_skb(size, priority, 0);
312} 315}
313 316
314static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 317static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
315 unsigned int __nocast priority) 318 gfp_t priority)
316{ 319{
317 return __alloc_skb(size, priority, 1); 320 return __alloc_skb(size, priority, 1);
318} 321}
319 322
320extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, 323extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
321 unsigned int size, 324 unsigned int size,
322 unsigned int __nocast priority); 325 gfp_t priority);
323extern void kfree_skbmem(struct sk_buff *skb); 326extern void kfree_skbmem(struct sk_buff *skb);
324extern struct sk_buff *skb_clone(struct sk_buff *skb, 327extern struct sk_buff *skb_clone(struct sk_buff *skb,
325 unsigned int __nocast priority); 328 gfp_t priority);
326extern struct sk_buff *skb_copy(const struct sk_buff *skb, 329extern struct sk_buff *skb_copy(const struct sk_buff *skb,
327 unsigned int __nocast priority); 330 gfp_t priority);
328extern struct sk_buff *pskb_copy(struct sk_buff *skb, 331extern struct sk_buff *pskb_copy(struct sk_buff *skb,
329 unsigned int __nocast gfp_mask); 332 gfp_t gfp_mask);
330extern int pskb_expand_head(struct sk_buff *skb, 333extern int pskb_expand_head(struct sk_buff *skb,
331 int nhead, int ntail, 334 int nhead, int ntail,
332 unsigned int __nocast gfp_mask); 335 gfp_t gfp_mask);
333extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 336extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
334 unsigned int headroom); 337 unsigned int headroom);
335extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 338extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
336 int newheadroom, int newtailroom, 339 int newheadroom, int newtailroom,
337 unsigned int __nocast priority); 340 gfp_t priority);
338extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad); 341extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad);
339#define dev_kfree_skb(a) kfree_skb(a) 342#define dev_kfree_skb(a) kfree_skb(a)
340extern void skb_over_panic(struct sk_buff *skb, int len, 343extern void skb_over_panic(struct sk_buff *skb, int len,
@@ -342,6 +345,11 @@ extern void skb_over_panic(struct sk_buff *skb, int len,
342extern void skb_under_panic(struct sk_buff *skb, int len, 345extern void skb_under_panic(struct sk_buff *skb, int len,
343 void *here); 346 void *here);
344 347
348extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
349 int getfrag(void *from, char *to, int offset,
350 int len,int odd, struct sk_buff *skb),
351 void *from, int length);
352
345struct skb_seq_state 353struct skb_seq_state
346{ 354{
347 __u32 lower_offset; 355 __u32 lower_offset;
@@ -486,7 +494,7 @@ static inline int skb_shared(const struct sk_buff *skb)
486 * NULL is returned on a memory allocation failure. 494 * NULL is returned on a memory allocation failure.
487 */ 495 */
488static inline struct sk_buff *skb_share_check(struct sk_buff *skb, 496static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
489 unsigned int __nocast pri) 497 gfp_t pri)
490{ 498{
491 might_sleep_if(pri & __GFP_WAIT); 499 might_sleep_if(pri & __GFP_WAIT);
492 if (skb_shared(skb)) { 500 if (skb_shared(skb)) {
@@ -518,7 +526,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
518 * %NULL is returned on a memory allocation failure. 526 * %NULL is returned on a memory allocation failure.
519 */ 527 */
520static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 528static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
521 unsigned int __nocast pri) 529 gfp_t pri)
522{ 530{
523 might_sleep_if(pri & __GFP_WAIT); 531 might_sleep_if(pri & __GFP_WAIT);
524 if (skb_cloned(skb)) { 532 if (skb_cloned(skb)) {
@@ -597,23 +605,23 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
597 */ 605 */
598 606
599/** 607/**
600 * __skb_queue_head - queue a buffer at the list head 608 * __skb_queue_after - queue a buffer at the list head
601 * @list: list to use 609 * @list: list to use
610 * @prev: place after this buffer
602 * @newsk: buffer to queue 611 * @newsk: buffer to queue
603 * 612 *
604 * Queue a buffer at the start of a list. This function takes no locks 613 * Queue a buffer int the middle of a list. This function takes no locks
605 * and you must therefore hold required locks before calling it. 614 * and you must therefore hold required locks before calling it.
606 * 615 *
607 * A buffer cannot be placed on two lists at the same time. 616 * A buffer cannot be placed on two lists at the same time.
608 */ 617 */
609extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 618static inline void __skb_queue_after(struct sk_buff_head *list,
610static inline void __skb_queue_head(struct sk_buff_head *list, 619 struct sk_buff *prev,
611 struct sk_buff *newsk) 620 struct sk_buff *newsk)
612{ 621{
613 struct sk_buff *prev, *next; 622 struct sk_buff *next;
614
615 list->qlen++; 623 list->qlen++;
616 prev = (struct sk_buff *)list; 624
617 next = prev->next; 625 next = prev->next;
618 newsk->next = next; 626 newsk->next = next;
619 newsk->prev = prev; 627 newsk->prev = prev;
@@ -621,6 +629,23 @@ static inline void __skb_queue_head(struct sk_buff_head *list,
621} 629}
622 630
623/** 631/**
632 * __skb_queue_head - queue a buffer at the list head
633 * @list: list to use
634 * @newsk: buffer to queue
635 *
636 * Queue a buffer at the start of a list. This function takes no locks
637 * and you must therefore hold required locks before calling it.
638 *
639 * A buffer cannot be placed on two lists at the same time.
640 */
641extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
642static inline void __skb_queue_head(struct sk_buff_head *list,
643 struct sk_buff *newsk)
644{
645 __skb_queue_after(list, (struct sk_buff *)list, newsk);
646}
647
648/**
624 * __skb_queue_tail - queue a buffer at the list tail 649 * __skb_queue_tail - queue a buffer at the list tail
625 * @list: list to use 650 * @list: list to use
626 * @newsk: buffer to queue 651 * @newsk: buffer to queue
@@ -1019,7 +1044,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
1019 * %NULL is returned in there is no free memory. 1044 * %NULL is returned in there is no free memory.
1020 */ 1045 */
1021static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1046static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1022 unsigned int __nocast gfp_mask) 1047 gfp_t gfp_mask)
1023{ 1048{
1024 struct sk_buff *skb = alloc_skb(length + 16, gfp_mask); 1049 struct sk_buff *skb = alloc_skb(length + 16, gfp_mask);
1025 if (likely(skb)) 1050 if (likely(skb))
@@ -1132,8 +1157,8 @@ static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1132 * If there is no free memory -ENOMEM is returned, otherwise zero 1157 * If there is no free memory -ENOMEM is returned, otherwise zero
1133 * is returned and the old skb data released. 1158 * is returned and the old skb data released.
1134 */ 1159 */
1135extern int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp); 1160extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp);
1136static inline int skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp) 1161static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp)
1137{ 1162{
1138 return __skb_linearize(skb, gfp); 1163 return __skb_linearize(skb, gfp);
1139} 1164}
@@ -1197,6 +1222,11 @@ static inline void kunmap_skb_frag(void *vaddr)
1197 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ 1222 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1198 skb = skb->next) 1223 skb = skb->next)
1199 1224
1225#define skb_queue_reverse_walk(queue, skb) \
1226 for (skb = (queue)->prev; \
1227 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
1228 skb = skb->prev)
1229
1200 1230
1201extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 1231extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
1202 int noblock, int *err); 1232 int noblock, int *err);
@@ -1205,8 +1235,7 @@ extern unsigned int datagram_poll(struct file *file, struct socket *sock,
1205extern int skb_copy_datagram_iovec(const struct sk_buff *from, 1235extern int skb_copy_datagram_iovec(const struct sk_buff *from,
1206 int offset, struct iovec *to, 1236 int offset, struct iovec *to,
1207 int size); 1237 int size);
1208extern int skb_copy_and_csum_datagram_iovec(const 1238extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
1209 struct sk_buff *skb,
1210 int hlen, 1239 int hlen,
1211 struct iovec *iov); 1240 struct iovec *iov);
1212extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 1241extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
@@ -1255,10 +1284,6 @@ static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *
1255{ 1284{
1256 stamp->tv_sec = skb->tstamp.off_sec; 1285 stamp->tv_sec = skb->tstamp.off_sec;
1257 stamp->tv_usec = skb->tstamp.off_usec; 1286 stamp->tv_usec = skb->tstamp.off_usec;
1258 if (skb->tstamp.off_sec) {
1259 stamp->tv_sec += skb_tv_base.tv_sec;
1260 stamp->tv_usec += skb_tv_base.tv_usec;
1261 }
1262} 1287}
1263 1288
1264/** 1289/**
@@ -1272,12 +1297,36 @@ static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *
1272 */ 1297 */
1273static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp) 1298static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp)
1274{ 1299{
1275 skb->tstamp.off_sec = stamp->tv_sec - skb_tv_base.tv_sec; 1300 skb->tstamp.off_sec = stamp->tv_sec;
1276 skb->tstamp.off_usec = stamp->tv_usec - skb_tv_base.tv_usec; 1301 skb->tstamp.off_usec = stamp->tv_usec;
1277} 1302}
1278 1303
1279extern void __net_timestamp(struct sk_buff *skb); 1304extern void __net_timestamp(struct sk_buff *skb);
1280 1305
1306extern unsigned int __skb_checksum_complete(struct sk_buff *skb);
1307
1308/**
1309 * skb_checksum_complete - Calculate checksum of an entire packet
1310 * @skb: packet to process
1311 *
1312 * This function calculates the checksum over the entire packet plus
1313 * the value of skb->csum. The latter can be used to supply the
1314 * checksum of a pseudo header as used by TCP/UDP. It returns the
1315 * checksum.
1316 *
1317 * For protocols that contain complete checksums such as ICMP/TCP/UDP,
1318 * this function can be used to verify that checksum on received
1319 * packets. In that case the function should return zero if the
1320 * checksum is correct. In particular, this function will return zero
1321 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
1322 * hardware has already verified the correctness of the checksum.
1323 */
1324static inline unsigned int skb_checksum_complete(struct sk_buff *skb)
1325{
1326 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
1327 __skb_checksum_complete(skb);
1328}
1329
1281#ifdef CONFIG_NETFILTER 1330#ifdef CONFIG_NETFILTER
1282static inline void nf_conntrack_put(struct nf_conntrack *nfct) 1331static inline void nf_conntrack_put(struct nf_conntrack *nfct)
1283{ 1332{
@@ -1289,10 +1338,26 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
1289 if (nfct) 1338 if (nfct)
1290 atomic_inc(&nfct->use); 1339 atomic_inc(&nfct->use);
1291} 1340}
1341#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1342static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
1343{
1344 if (skb)
1345 atomic_inc(&skb->users);
1346}
1347static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
1348{
1349 if (skb)
1350 kfree_skb(skb);
1351}
1352#endif
1292static inline void nf_reset(struct sk_buff *skb) 1353static inline void nf_reset(struct sk_buff *skb)
1293{ 1354{
1294 nf_conntrack_put(skb->nfct); 1355 nf_conntrack_put(skb->nfct);
1295 skb->nfct = NULL; 1356 skb->nfct = NULL;
1357#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1358 nf_conntrack_put_reasm(skb->nfct_reasm);
1359 skb->nfct_reasm = NULL;
1360#endif
1296} 1361}
1297 1362
1298#ifdef CONFIG_BRIDGE_NETFILTER 1363#ifdef CONFIG_BRIDGE_NETFILTER