aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h56
1 files changed, 27 insertions, 29 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index bcdd6606f468..124f90cd5a38 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -190,9 +190,6 @@ struct skb_shared_info {
190 atomic_t dataref; 190 atomic_t dataref;
191 unsigned short nr_frags; 191 unsigned short nr_frags;
192 unsigned short gso_size; 192 unsigned short gso_size;
193#ifdef CONFIG_HAS_DMA
194 dma_addr_t dma_head;
195#endif
196 /* Warning: this field is not always filled in (UFO)! */ 193 /* Warning: this field is not always filled in (UFO)! */
197 unsigned short gso_segs; 194 unsigned short gso_segs;
198 unsigned short gso_type; 195 unsigned short gso_type;
@@ -201,9 +198,6 @@ struct skb_shared_info {
201 struct sk_buff *frag_list; 198 struct sk_buff *frag_list;
202 struct skb_shared_hwtstamps hwtstamps; 199 struct skb_shared_hwtstamps hwtstamps;
203 skb_frag_t frags[MAX_SKB_FRAGS]; 200 skb_frag_t frags[MAX_SKB_FRAGS];
204#ifdef CONFIG_HAS_DMA
205 dma_addr_t dma_maps[MAX_SKB_FRAGS];
206#endif
207 /* Intermediate layers must ensure that destructor_arg 201 /* Intermediate layers must ensure that destructor_arg
208 * remains valid until skb destructor */ 202 * remains valid until skb destructor */
209 void * destructor_arg; 203 void * destructor_arg;
@@ -299,7 +293,7 @@ typedef unsigned char *sk_buff_data_t;
299 * @nfctinfo: Relationship of this skb to the connection 293 * @nfctinfo: Relationship of this skb to the connection
300 * @nfct_reasm: netfilter conntrack re-assembly pointer 294 * @nfct_reasm: netfilter conntrack re-assembly pointer
301 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 295 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
302 * @iif: ifindex of device we arrived on 296 * @skb_iif: ifindex of device we arrived on
303 * @queue_mapping: Queue mapping for multiqueue devices 297 * @queue_mapping: Queue mapping for multiqueue devices
304 * @tc_index: Traffic control index 298 * @tc_index: Traffic control index
305 * @tc_verd: traffic control verdict 299 * @tc_verd: traffic control verdict
@@ -315,22 +309,23 @@ struct sk_buff {
315 struct sk_buff *next; 309 struct sk_buff *next;
316 struct sk_buff *prev; 310 struct sk_buff *prev;
317 311
318 struct sock *sk;
319 ktime_t tstamp; 312 ktime_t tstamp;
313
314 struct sock *sk;
320 struct net_device *dev; 315 struct net_device *dev;
321 316
322 unsigned long _skb_dst;
323#ifdef CONFIG_XFRM
324 struct sec_path *sp;
325#endif
326 /* 317 /*
327 * This is the control buffer. It is free to use for every 318 * This is the control buffer. It is free to use for every
328 * layer. Please put your private variables there. If you 319 * layer. Please put your private variables there. If you
329 * want to keep them across layers you have to do a skb_clone() 320 * want to keep them across layers you have to do a skb_clone()
330 * first. This is owned by whoever has the skb queued ATM. 321 * first. This is owned by whoever has the skb queued ATM.
331 */ 322 */
332 char cb[48]; 323 char cb[48] __aligned(8);
333 324
325 unsigned long _skb_dst;
326#ifdef CONFIG_XFRM
327 struct sec_path *sp;
328#endif
334 unsigned int len, 329 unsigned int len,
335 data_len; 330 data_len;
336 __u16 mac_len, 331 __u16 mac_len,
@@ -354,8 +349,8 @@ struct sk_buff {
354 ipvs_property:1, 349 ipvs_property:1,
355 peeked:1, 350 peeked:1,
356 nf_trace:1; 351 nf_trace:1;
357 __be16 protocol:16;
358 kmemcheck_bitfield_end(flags1); 352 kmemcheck_bitfield_end(flags1);
353 __be16 protocol;
359 354
360 void (*destructor)(struct sk_buff *skb); 355 void (*destructor)(struct sk_buff *skb);
361#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 356#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@@ -366,7 +361,7 @@ struct sk_buff {
366 struct nf_bridge_info *nf_bridge; 361 struct nf_bridge_info *nf_bridge;
367#endif 362#endif
368 363
369 int iif; 364 int skb_iif;
370#ifdef CONFIG_NET_SCHED 365#ifdef CONFIG_NET_SCHED
371 __u16 tc_index; /* traffic control index */ 366 __u16 tc_index; /* traffic control index */
372#ifdef CONFIG_NET_CLS_ACT 367#ifdef CONFIG_NET_CLS_ACT
@@ -389,8 +384,10 @@ struct sk_buff {
389#ifdef CONFIG_NETWORK_SECMARK 384#ifdef CONFIG_NETWORK_SECMARK
390 __u32 secmark; 385 __u32 secmark;
391#endif 386#endif
392 387 union {
393 __u32 mark; 388 __u32 mark;
389 __u32 dropcount;
390 };
394 391
395 __u16 vlan_tci; 392 __u16 vlan_tci;
396 393
@@ -414,14 +411,6 @@ struct sk_buff {
414 411
415#include <asm/system.h> 412#include <asm/system.h>
416 413
417#ifdef CONFIG_HAS_DMA
418#include <linux/dma-mapping.h>
419extern int skb_dma_map(struct device *dev, struct sk_buff *skb,
420 enum dma_data_direction dir);
421extern void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
422 enum dma_data_direction dir);
423#endif
424
425static inline struct dst_entry *skb_dst(const struct sk_buff *skb) 414static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
426{ 415{
427 return (struct dst_entry *)skb->_skb_dst; 416 return (struct dst_entry *)skb->_skb_dst;
@@ -489,8 +478,7 @@ extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
489 int len,int odd, struct sk_buff *skb), 478 int len,int odd, struct sk_buff *skb),
490 void *from, int length); 479 void *from, int length);
491 480
492struct skb_seq_state 481struct skb_seq_state {
493{
494 __u32 lower_offset; 482 __u32 lower_offset;
495 __u32 upper_offset; 483 __u32 upper_offset;
496 __u32 frag_idx; 484 __u32 frag_idx;
@@ -745,7 +733,7 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
745} 733}
746 734
747/** 735/**
748 * skb_peek 736 * skb_peek - peek at the head of an &sk_buff_head
749 * @list_: list to peek at 737 * @list_: list to peek at
750 * 738 *
751 * Peek an &sk_buff. Unlike most other operations you _MUST_ 739 * Peek an &sk_buff. Unlike most other operations you _MUST_
@@ -766,7 +754,7 @@ static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
766} 754}
767 755
768/** 756/**
769 * skb_peek_tail 757 * skb_peek_tail - peek at the tail of an &sk_buff_head
770 * @list_: list to peek at 758 * @list_: list to peek at
771 * 759 *
772 * Peek an &sk_buff. Unlike most other operations you _MUST_ 760 * Peek an &sk_buff. Unlike most other operations you _MUST_
@@ -1489,6 +1477,16 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1489 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 1477 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1490} 1478}
1491 1479
1480static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1481 unsigned int length)
1482{
1483 struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN);
1484
1485 if (NET_IP_ALIGN && skb)
1486 skb_reserve(skb, NET_IP_ALIGN);
1487 return skb;
1488}
1489
1492extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask); 1490extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
1493 1491
1494/** 1492/**