aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h25
1 files changed, 10 insertions, 15 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ae836fded53..124f90cd5a3 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -190,9 +190,6 @@ struct skb_shared_info {
190 atomic_t dataref; 190 atomic_t dataref;
191 unsigned short nr_frags; 191 unsigned short nr_frags;
192 unsigned short gso_size; 192 unsigned short gso_size;
193#ifdef CONFIG_HAS_DMA
194 dma_addr_t dma_head;
195#endif
196 /* Warning: this field is not always filled in (UFO)! */ 193 /* Warning: this field is not always filled in (UFO)! */
197 unsigned short gso_segs; 194 unsigned short gso_segs;
198 unsigned short gso_type; 195 unsigned short gso_type;
@@ -201,9 +198,6 @@ struct skb_shared_info {
201 struct sk_buff *frag_list; 198 struct sk_buff *frag_list;
202 struct skb_shared_hwtstamps hwtstamps; 199 struct skb_shared_hwtstamps hwtstamps;
203 skb_frag_t frags[MAX_SKB_FRAGS]; 200 skb_frag_t frags[MAX_SKB_FRAGS];
204#ifdef CONFIG_HAS_DMA
205 dma_addr_t dma_maps[MAX_SKB_FRAGS];
206#endif
207 /* Intermediate layers must ensure that destructor_arg 201 /* Intermediate layers must ensure that destructor_arg
208 * remains valid until skb destructor */ 202 * remains valid until skb destructor */
209 void * destructor_arg; 203 void * destructor_arg;
@@ -315,22 +309,23 @@ struct sk_buff {
315 struct sk_buff *next; 309 struct sk_buff *next;
316 struct sk_buff *prev; 310 struct sk_buff *prev;
317 311
318 struct sock *sk;
319 ktime_t tstamp; 312 ktime_t tstamp;
313
314 struct sock *sk;
320 struct net_device *dev; 315 struct net_device *dev;
321 316
322 unsigned long _skb_dst;
323#ifdef CONFIG_XFRM
324 struct sec_path *sp;
325#endif
326 /* 317 /*
327 * This is the control buffer. It is free to use for every 318 * This is the control buffer. It is free to use for every
328 * layer. Please put your private variables there. If you 319 * layer. Please put your private variables there. If you
329 * want to keep them across layers you have to do a skb_clone() 320 * want to keep them across layers you have to do a skb_clone()
330 * first. This is owned by whoever has the skb queued ATM. 321 * first. This is owned by whoever has the skb queued ATM.
331 */ 322 */
332 char cb[48]; 323 char cb[48] __aligned(8);
333 324
325 unsigned long _skb_dst;
326#ifdef CONFIG_XFRM
327 struct sec_path *sp;
328#endif
334 unsigned int len, 329 unsigned int len,
335 data_len; 330 data_len;
336 __u16 mac_len, 331 __u16 mac_len,
@@ -354,8 +349,8 @@ struct sk_buff {
354 ipvs_property:1, 349 ipvs_property:1,
355 peeked:1, 350 peeked:1,
356 nf_trace:1; 351 nf_trace:1;
357 __be16 protocol:16;
358 kmemcheck_bitfield_end(flags1); 352 kmemcheck_bitfield_end(flags1);
353 __be16 protocol;
359 354
360 void (*destructor)(struct sk_buff *skb); 355 void (*destructor)(struct sk_buff *skb);
361#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 356#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@@ -738,7 +733,7 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
738} 733}
739 734
740/** 735/**
741 * skb_peek 736 * skb_peek - peek at the head of an &sk_buff_head
742 * @list_: list to peek at 737 * @list_: list to peek at
743 * 738 *
744 * Peek an &sk_buff. Unlike most other operations you _MUST_ 739 * Peek an &sk_buff. Unlike most other operations you _MUST_
@@ -759,7 +754,7 @@ static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
759} 754}
760 755
761/** 756/**
762 * skb_peek_tail 757 * skb_peek_tail - peek at the tail of an &sk_buff_head
763 * @list_: list to peek at 758 * @list_: list to peek at
764 * 759 *
765 * Peek an &sk_buff. Unlike most other operations you _MUST_ 760 * Peek an &sk_buff. Unlike most other operations you _MUST_