diff options
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r-- | include/linux/skbuff.h | 1253 |
1 files changed, 1253 insertions, 0 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h new file mode 100644 index 000000000000..aa35797ebfbf --- /dev/null +++ b/include/linux/skbuff.h | |||
@@ -0,0 +1,1253 @@ | |||
1 | /* | ||
2 | * Definitions for the 'struct sk_buff' memory handlers. | ||
3 | * | ||
4 | * Authors: | ||
5 | * Alan Cox, <gw4pts@gw4pts.ampr.org> | ||
6 | * Florian La Roche, <rzsfl@rz.uni-sb.de> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #ifndef _LINUX_SKBUFF_H | ||
15 | #define _LINUX_SKBUFF_H | ||
16 | |||
17 | #include <linux/config.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/compiler.h> | ||
20 | #include <linux/time.h> | ||
21 | #include <linux/cache.h> | ||
22 | |||
23 | #include <asm/atomic.h> | ||
24 | #include <asm/types.h> | ||
25 | #include <linux/spinlock.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/highmem.h> | ||
28 | #include <linux/poll.h> | ||
29 | #include <linux/net.h> | ||
30 | #include <net/checksum.h> | ||
31 | |||
32 | #define HAVE_ALLOC_SKB /* For the drivers to know */ | ||
33 | #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ | ||
34 | #define SLAB_SKB /* Slabified skbuffs */ | ||
35 | |||
36 | #define CHECKSUM_NONE 0 | ||
37 | #define CHECKSUM_HW 1 | ||
38 | #define CHECKSUM_UNNECESSARY 2 | ||
39 | |||
40 | #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ | ||
41 | ~(SMP_CACHE_BYTES - 1)) | ||
42 | #define SKB_MAX_ORDER(X, ORDER) (((PAGE_SIZE << (ORDER)) - (X) - \ | ||
43 | sizeof(struct skb_shared_info)) & \ | ||
44 | ~(SMP_CACHE_BYTES - 1)) | ||
45 | #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) | ||
46 | #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) | ||
47 | |||
48 | /* A. Checksumming of received packets by device. | ||
49 | * | ||
50 | * NONE: device failed to checksum this packet. | ||
51 | * skb->csum is undefined. | ||
52 | * | ||
53 | * UNNECESSARY: device parsed packet and wouldbe verified checksum. | ||
54 | * skb->csum is undefined. | ||
55 | * It is bad option, but, unfortunately, many of vendors do this. | ||
56 | * Apparently with secret goal to sell you new device, when you | ||
57 | * will add new protocol to your host. F.e. IPv6. 8) | ||
58 | * | ||
59 | * HW: the most generic way. Device supplied checksum of _all_ | ||
60 | * the packet as seen by netif_rx in skb->csum. | ||
61 | * NOTE: Even if device supports only some protocols, but | ||
62 | * is able to produce some skb->csum, it MUST use HW, | ||
63 | * not UNNECESSARY. | ||
64 | * | ||
65 | * B. Checksumming on output. | ||
66 | * | ||
67 | * NONE: skb is checksummed by protocol or csum is not required. | ||
68 | * | ||
69 | * HW: device is required to csum packet as seen by hard_start_xmit | ||
70 | * from skb->h.raw to the end and to record the checksum | ||
71 | * at skb->h.raw+skb->csum. | ||
72 | * | ||
73 | * Device must show its capabilities in dev->features, set | ||
74 | * at device setup time. | ||
75 | * NETIF_F_HW_CSUM - it is clever device, it is able to checksum | ||
76 | * everything. | ||
77 | * NETIF_F_NO_CSUM - loopback or reliable single hop media. | ||
78 | * NETIF_F_IP_CSUM - device is dumb. It is able to csum only | ||
79 | * TCP/UDP over IPv4. Sigh. Vendors like this | ||
80 | * way by an unknown reason. Though, see comment above | ||
81 | * about CHECKSUM_UNNECESSARY. 8) | ||
82 | * | ||
83 | * Any questions? No questions, good. --ANK | ||
84 | */ | ||
85 | |||
86 | #ifdef __i386__ | ||
87 | #define NET_CALLER(arg) (*(((void **)&arg) - 1)) | ||
88 | #else | ||
89 | #define NET_CALLER(arg) __builtin_return_address(0) | ||
90 | #endif | ||
91 | |||
92 | struct net_device; | ||
93 | |||
94 | #ifdef CONFIG_NETFILTER | ||
95 | struct nf_conntrack { | ||
96 | atomic_t use; | ||
97 | void (*destroy)(struct nf_conntrack *); | ||
98 | }; | ||
99 | |||
100 | #ifdef CONFIG_BRIDGE_NETFILTER | ||
101 | struct nf_bridge_info { | ||
102 | atomic_t use; | ||
103 | struct net_device *physindev; | ||
104 | struct net_device *physoutdev; | ||
105 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
106 | struct net_device *netoutdev; | ||
107 | #endif | ||
108 | unsigned int mask; | ||
109 | unsigned long data[32 / sizeof(unsigned long)]; | ||
110 | }; | ||
111 | #endif | ||
112 | |||
113 | #endif | ||
114 | |||
115 | struct sk_buff_head { | ||
116 | /* These two members must be first. */ | ||
117 | struct sk_buff *next; | ||
118 | struct sk_buff *prev; | ||
119 | |||
120 | __u32 qlen; | ||
121 | spinlock_t lock; | ||
122 | }; | ||
123 | |||
124 | struct sk_buff; | ||
125 | |||
126 | /* To allow 64K frame to be packed as single skb without frag_list */ | ||
127 | #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) | ||
128 | |||
129 | typedef struct skb_frag_struct skb_frag_t; | ||
130 | |||
131 | struct skb_frag_struct { | ||
132 | struct page *page; | ||
133 | __u16 page_offset; | ||
134 | __u16 size; | ||
135 | }; | ||
136 | |||
137 | /* This data is invariant across clones and lives at | ||
138 | * the end of the header data, ie. at skb->end. | ||
139 | */ | ||
140 | struct skb_shared_info { | ||
141 | atomic_t dataref; | ||
142 | unsigned int nr_frags; | ||
143 | unsigned short tso_size; | ||
144 | unsigned short tso_segs; | ||
145 | struct sk_buff *frag_list; | ||
146 | skb_frag_t frags[MAX_SKB_FRAGS]; | ||
147 | }; | ||
148 | |||
149 | /* We divide dataref into two halves. The higher 16 bits hold references | ||
150 | * to the payload part of skb->data. The lower 16 bits hold references to | ||
151 | * the entire skb->data. It is up to the users of the skb to agree on | ||
152 | * where the payload starts. | ||
153 | * | ||
154 | * All users must obey the rule that the skb->data reference count must be | ||
155 | * greater than or equal to the payload reference count. | ||
156 | * | ||
157 | * Holding a reference to the payload part means that the user does not | ||
158 | * care about modifications to the header part of skb->data. | ||
159 | */ | ||
160 | #define SKB_DATAREF_SHIFT 16 | ||
161 | #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) | ||
162 | |||
163 | /** | ||
164 | * struct sk_buff - socket buffer | ||
165 | * @next: Next buffer in list | ||
166 | * @prev: Previous buffer in list | ||
167 | * @list: List we are on | ||
168 | * @sk: Socket we are owned by | ||
169 | * @stamp: Time we arrived | ||
170 | * @dev: Device we arrived on/are leaving by | ||
171 | * @input_dev: Device we arrived on | ||
172 | * @real_dev: The real device we are using | ||
173 | * @h: Transport layer header | ||
174 | * @nh: Network layer header | ||
175 | * @mac: Link layer header | ||
176 | * @dst: FIXME: Describe this field | ||
177 | * @cb: Control buffer. Free for use by every layer. Put private vars here | ||
178 | * @len: Length of actual data | ||
179 | * @data_len: Data length | ||
180 | * @mac_len: Length of link layer header | ||
181 | * @csum: Checksum | ||
182 | * @__unused: Dead field, may be reused | ||
183 | * @cloned: Head may be cloned (check refcnt to be sure) | ||
184 | * @nohdr: Payload reference only, must not modify header | ||
185 | * @pkt_type: Packet class | ||
186 | * @ip_summed: Driver fed us an IP checksum | ||
187 | * @priority: Packet queueing priority | ||
188 | * @users: User count - see {datagram,tcp}.c | ||
189 | * @protocol: Packet protocol from driver | ||
190 | * @security: Security level of packet | ||
191 | * @truesize: Buffer size | ||
192 | * @head: Head of buffer | ||
193 | * @data: Data head pointer | ||
194 | * @tail: Tail pointer | ||
195 | * @end: End pointer | ||
196 | * @destructor: Destruct function | ||
197 | * @nfmark: Can be used for communication between hooks | ||
198 | * @nfcache: Cache info | ||
199 | * @nfct: Associated connection, if any | ||
200 | * @nfctinfo: Relationship of this skb to the connection | ||
201 | * @nf_debug: Netfilter debugging | ||
202 | * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c | ||
203 | * @private: Data which is private to the HIPPI implementation | ||
204 | * @tc_index: Traffic control index | ||
205 | * @tc_verd: traffic control verdict | ||
206 | * @tc_classid: traffic control classid | ||
207 | */ | ||
208 | |||
209 | struct sk_buff { | ||
210 | /* These two members must be first. */ | ||
211 | struct sk_buff *next; | ||
212 | struct sk_buff *prev; | ||
213 | |||
214 | struct sk_buff_head *list; | ||
215 | struct sock *sk; | ||
216 | struct timeval stamp; | ||
217 | struct net_device *dev; | ||
218 | struct net_device *input_dev; | ||
219 | struct net_device *real_dev; | ||
220 | |||
221 | union { | ||
222 | struct tcphdr *th; | ||
223 | struct udphdr *uh; | ||
224 | struct icmphdr *icmph; | ||
225 | struct igmphdr *igmph; | ||
226 | struct iphdr *ipiph; | ||
227 | struct ipv6hdr *ipv6h; | ||
228 | unsigned char *raw; | ||
229 | } h; | ||
230 | |||
231 | union { | ||
232 | struct iphdr *iph; | ||
233 | struct ipv6hdr *ipv6h; | ||
234 | struct arphdr *arph; | ||
235 | unsigned char *raw; | ||
236 | } nh; | ||
237 | |||
238 | union { | ||
239 | unsigned char *raw; | ||
240 | } mac; | ||
241 | |||
242 | struct dst_entry *dst; | ||
243 | struct sec_path *sp; | ||
244 | |||
245 | /* | ||
246 | * This is the control buffer. It is free to use for every | ||
247 | * layer. Please put your private variables there. If you | ||
248 | * want to keep them across layers you have to do a skb_clone() | ||
249 | * first. This is owned by whoever has the skb queued ATM. | ||
250 | */ | ||
251 | char cb[40]; | ||
252 | |||
253 | unsigned int len, | ||
254 | data_len, | ||
255 | mac_len, | ||
256 | csum; | ||
257 | unsigned char local_df, | ||
258 | cloned:1, | ||
259 | nohdr:1, | ||
260 | pkt_type, | ||
261 | ip_summed; | ||
262 | __u32 priority; | ||
263 | unsigned short protocol, | ||
264 | security; | ||
265 | |||
266 | void (*destructor)(struct sk_buff *skb); | ||
267 | #ifdef CONFIG_NETFILTER | ||
268 | unsigned long nfmark; | ||
269 | __u32 nfcache; | ||
270 | __u32 nfctinfo; | ||
271 | struct nf_conntrack *nfct; | ||
272 | #ifdef CONFIG_NETFILTER_DEBUG | ||
273 | unsigned int nf_debug; | ||
274 | #endif | ||
275 | #ifdef CONFIG_BRIDGE_NETFILTER | ||
276 | struct nf_bridge_info *nf_bridge; | ||
277 | #endif | ||
278 | #endif /* CONFIG_NETFILTER */ | ||
279 | #if defined(CONFIG_HIPPI) | ||
280 | union { | ||
281 | __u32 ifield; | ||
282 | } private; | ||
283 | #endif | ||
284 | #ifdef CONFIG_NET_SCHED | ||
285 | __u32 tc_index; /* traffic control index */ | ||
286 | #ifdef CONFIG_NET_CLS_ACT | ||
287 | __u32 tc_verd; /* traffic control verdict */ | ||
288 | __u32 tc_classid; /* traffic control classid */ | ||
289 | #endif | ||
290 | |||
291 | #endif | ||
292 | |||
293 | |||
294 | /* These elements must be at the end, see alloc_skb() for details. */ | ||
295 | unsigned int truesize; | ||
296 | atomic_t users; | ||
297 | unsigned char *head, | ||
298 | *data, | ||
299 | *tail, | ||
300 | *end; | ||
301 | }; | ||
302 | |||
303 | #ifdef __KERNEL__ | ||
304 | /* | ||
305 | * Handling routines are only of interest to the kernel | ||
306 | */ | ||
307 | #include <linux/slab.h> | ||
308 | |||
309 | #include <asm/system.h> | ||
310 | |||
311 | extern void __kfree_skb(struct sk_buff *skb); | ||
312 | extern struct sk_buff *alloc_skb(unsigned int size, int priority); | ||
313 | extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, | ||
314 | unsigned int size, int priority); | ||
315 | extern void kfree_skbmem(struct sk_buff *skb); | ||
316 | extern struct sk_buff *skb_clone(struct sk_buff *skb, int priority); | ||
317 | extern struct sk_buff *skb_copy(const struct sk_buff *skb, int priority); | ||
318 | extern struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask); | ||
319 | extern int pskb_expand_head(struct sk_buff *skb, | ||
320 | int nhead, int ntail, int gfp_mask); | ||
321 | extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, | ||
322 | unsigned int headroom); | ||
323 | extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, | ||
324 | int newheadroom, int newtailroom, | ||
325 | int priority); | ||
326 | extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad); | ||
327 | #define dev_kfree_skb(a) kfree_skb(a) | ||
328 | extern void skb_over_panic(struct sk_buff *skb, int len, | ||
329 | void *here); | ||
330 | extern void skb_under_panic(struct sk_buff *skb, int len, | ||
331 | void *here); | ||
332 | |||
333 | /* Internal */ | ||
334 | #define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end)) | ||
335 | |||
336 | /** | ||
337 | * skb_queue_empty - check if a queue is empty | ||
338 | * @list: queue head | ||
339 | * | ||
340 | * Returns true if the queue is empty, false otherwise. | ||
341 | */ | ||
342 | static inline int skb_queue_empty(const struct sk_buff_head *list) | ||
343 | { | ||
344 | return list->next == (struct sk_buff *)list; | ||
345 | } | ||
346 | |||
347 | /** | ||
348 | * skb_get - reference buffer | ||
349 | * @skb: buffer to reference | ||
350 | * | ||
351 | * Makes another reference to a socket buffer and returns a pointer | ||
352 | * to the buffer. | ||
353 | */ | ||
354 | static inline struct sk_buff *skb_get(struct sk_buff *skb) | ||
355 | { | ||
356 | atomic_inc(&skb->users); | ||
357 | return skb; | ||
358 | } | ||
359 | |||
360 | /* | ||
361 | * If users == 1, we are the only owner and are can avoid redundant | ||
362 | * atomic change. | ||
363 | */ | ||
364 | |||
365 | /** | ||
366 | * kfree_skb - free an sk_buff | ||
367 | * @skb: buffer to free | ||
368 | * | ||
369 | * Drop a reference to the buffer and free it if the usage count has | ||
370 | * hit zero. | ||
371 | */ | ||
372 | static inline void kfree_skb(struct sk_buff *skb) | ||
373 | { | ||
374 | if (likely(atomic_read(&skb->users) == 1)) | ||
375 | smp_rmb(); | ||
376 | else if (likely(!atomic_dec_and_test(&skb->users))) | ||
377 | return; | ||
378 | __kfree_skb(skb); | ||
379 | } | ||
380 | |||
381 | /** | ||
382 | * skb_cloned - is the buffer a clone | ||
383 | * @skb: buffer to check | ||
384 | * | ||
385 | * Returns true if the buffer was generated with skb_clone() and is | ||
386 | * one of multiple shared copies of the buffer. Cloned buffers are | ||
387 | * shared data so must not be written to under normal circumstances. | ||
388 | */ | ||
389 | static inline int skb_cloned(const struct sk_buff *skb) | ||
390 | { | ||
391 | return skb->cloned && | ||
392 | (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; | ||
393 | } | ||
394 | |||
395 | /** | ||
396 | * skb_header_cloned - is the header a clone | ||
397 | * @skb: buffer to check | ||
398 | * | ||
399 | * Returns true if modifying the header part of the buffer requires | ||
400 | * the data to be copied. | ||
401 | */ | ||
402 | static inline int skb_header_cloned(const struct sk_buff *skb) | ||
403 | { | ||
404 | int dataref; | ||
405 | |||
406 | if (!skb->cloned) | ||
407 | return 0; | ||
408 | |||
409 | dataref = atomic_read(&skb_shinfo(skb)->dataref); | ||
410 | dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); | ||
411 | return dataref != 1; | ||
412 | } | ||
413 | |||
414 | /** | ||
415 | * skb_header_release - release reference to header | ||
416 | * @skb: buffer to operate on | ||
417 | * | ||
418 | * Drop a reference to the header part of the buffer. This is done | ||
419 | * by acquiring a payload reference. You must not read from the header | ||
420 | * part of skb->data after this. | ||
421 | */ | ||
422 | static inline void skb_header_release(struct sk_buff *skb) | ||
423 | { | ||
424 | BUG_ON(skb->nohdr); | ||
425 | skb->nohdr = 1; | ||
426 | atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); | ||
427 | } | ||
428 | |||
429 | /** | ||
430 | * skb_shared - is the buffer shared | ||
431 | * @skb: buffer to check | ||
432 | * | ||
433 | * Returns true if more than one person has a reference to this | ||
434 | * buffer. | ||
435 | */ | ||
436 | static inline int skb_shared(const struct sk_buff *skb) | ||
437 | { | ||
438 | return atomic_read(&skb->users) != 1; | ||
439 | } | ||
440 | |||
441 | /** | ||
442 | * skb_share_check - check if buffer is shared and if so clone it | ||
443 | * @skb: buffer to check | ||
444 | * @pri: priority for memory allocation | ||
445 | * | ||
446 | * If the buffer is shared the buffer is cloned and the old copy | ||
447 | * drops a reference. A new clone with a single reference is returned. | ||
448 | * If the buffer is not shared the original buffer is returned. When | ||
449 | * being called from interrupt status or with spinlocks held pri must | ||
450 | * be GFP_ATOMIC. | ||
451 | * | ||
452 | * NULL is returned on a memory allocation failure. | ||
453 | */ | ||
454 | static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri) | ||
455 | { | ||
456 | might_sleep_if(pri & __GFP_WAIT); | ||
457 | if (skb_shared(skb)) { | ||
458 | struct sk_buff *nskb = skb_clone(skb, pri); | ||
459 | kfree_skb(skb); | ||
460 | skb = nskb; | ||
461 | } | ||
462 | return skb; | ||
463 | } | ||
464 | |||
465 | /* | ||
466 | * Copy shared buffers into a new sk_buff. We effectively do COW on | ||
467 | * packets to handle cases where we have a local reader and forward | ||
468 | * and a couple of other messy ones. The normal one is tcpdumping | ||
469 | * a packet thats being forwarded. | ||
470 | */ | ||
471 | |||
472 | /** | ||
473 | * skb_unshare - make a copy of a shared buffer | ||
474 | * @skb: buffer to check | ||
475 | * @pri: priority for memory allocation | ||
476 | * | ||
477 | * If the socket buffer is a clone then this function creates a new | ||
478 | * copy of the data, drops a reference count on the old copy and returns | ||
479 | * the new copy with the reference count at 1. If the buffer is not a clone | ||
480 | * the original buffer is returned. When called with a spinlock held or | ||
481 | * from interrupt state @pri must be %GFP_ATOMIC | ||
482 | * | ||
483 | * %NULL is returned on a memory allocation failure. | ||
484 | */ | ||
485 | static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri) | ||
486 | { | ||
487 | might_sleep_if(pri & __GFP_WAIT); | ||
488 | if (skb_cloned(skb)) { | ||
489 | struct sk_buff *nskb = skb_copy(skb, pri); | ||
490 | kfree_skb(skb); /* Free our shared copy */ | ||
491 | skb = nskb; | ||
492 | } | ||
493 | return skb; | ||
494 | } | ||
495 | |||
496 | /** | ||
497 | * skb_peek | ||
498 | * @list_: list to peek at | ||
499 | * | ||
500 | * Peek an &sk_buff. Unlike most other operations you _MUST_ | ||
501 | * be careful with this one. A peek leaves the buffer on the | ||
502 | * list and someone else may run off with it. You must hold | ||
503 | * the appropriate locks or have a private queue to do this. | ||
504 | * | ||
505 | * Returns %NULL for an empty list or a pointer to the head element. | ||
506 | * The reference count is not incremented and the reference is therefore | ||
507 | * volatile. Use with caution. | ||
508 | */ | ||
509 | static inline struct sk_buff *skb_peek(struct sk_buff_head *list_) | ||
510 | { | ||
511 | struct sk_buff *list = ((struct sk_buff *)list_)->next; | ||
512 | if (list == (struct sk_buff *)list_) | ||
513 | list = NULL; | ||
514 | return list; | ||
515 | } | ||
516 | |||
517 | /** | ||
518 | * skb_peek_tail | ||
519 | * @list_: list to peek at | ||
520 | * | ||
521 | * Peek an &sk_buff. Unlike most other operations you _MUST_ | ||
522 | * be careful with this one. A peek leaves the buffer on the | ||
523 | * list and someone else may run off with it. You must hold | ||
524 | * the appropriate locks or have a private queue to do this. | ||
525 | * | ||
526 | * Returns %NULL for an empty list or a pointer to the tail element. | ||
527 | * The reference count is not incremented and the reference is therefore | ||
528 | * volatile. Use with caution. | ||
529 | */ | ||
530 | static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_) | ||
531 | { | ||
532 | struct sk_buff *list = ((struct sk_buff *)list_)->prev; | ||
533 | if (list == (struct sk_buff *)list_) | ||
534 | list = NULL; | ||
535 | return list; | ||
536 | } | ||
537 | |||
538 | /** | ||
539 | * skb_queue_len - get queue length | ||
540 | * @list_: list to measure | ||
541 | * | ||
542 | * Return the length of an &sk_buff queue. | ||
543 | */ | ||
544 | static inline __u32 skb_queue_len(const struct sk_buff_head *list_) | ||
545 | { | ||
546 | return list_->qlen; | ||
547 | } | ||
548 | |||
549 | static inline void skb_queue_head_init(struct sk_buff_head *list) | ||
550 | { | ||
551 | spin_lock_init(&list->lock); | ||
552 | list->prev = list->next = (struct sk_buff *)list; | ||
553 | list->qlen = 0; | ||
554 | } | ||
555 | |||
556 | /* | ||
557 | * Insert an sk_buff at the start of a list. | ||
558 | * | ||
559 | * The "__skb_xxxx()" functions are the non-atomic ones that | ||
560 | * can only be called with interrupts disabled. | ||
561 | */ | ||
562 | |||
563 | /** | ||
564 | * __skb_queue_head - queue a buffer at the list head | ||
565 | * @list: list to use | ||
566 | * @newsk: buffer to queue | ||
567 | * | ||
568 | * Queue a buffer at the start of a list. This function takes no locks | ||
569 | * and you must therefore hold required locks before calling it. | ||
570 | * | ||
571 | * A buffer cannot be placed on two lists at the same time. | ||
572 | */ | ||
573 | extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); | ||
574 | static inline void __skb_queue_head(struct sk_buff_head *list, | ||
575 | struct sk_buff *newsk) | ||
576 | { | ||
577 | struct sk_buff *prev, *next; | ||
578 | |||
579 | newsk->list = list; | ||
580 | list->qlen++; | ||
581 | prev = (struct sk_buff *)list; | ||
582 | next = prev->next; | ||
583 | newsk->next = next; | ||
584 | newsk->prev = prev; | ||
585 | next->prev = prev->next = newsk; | ||
586 | } | ||
587 | |||
588 | /** | ||
589 | * __skb_queue_tail - queue a buffer at the list tail | ||
590 | * @list: list to use | ||
591 | * @newsk: buffer to queue | ||
592 | * | ||
593 | * Queue a buffer at the end of a list. This function takes no locks | ||
594 | * and you must therefore hold required locks before calling it. | ||
595 | * | ||
596 | * A buffer cannot be placed on two lists at the same time. | ||
597 | */ | ||
598 | extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); | ||
599 | static inline void __skb_queue_tail(struct sk_buff_head *list, | ||
600 | struct sk_buff *newsk) | ||
601 | { | ||
602 | struct sk_buff *prev, *next; | ||
603 | |||
604 | newsk->list = list; | ||
605 | list->qlen++; | ||
606 | next = (struct sk_buff *)list; | ||
607 | prev = next->prev; | ||
608 | newsk->next = next; | ||
609 | newsk->prev = prev; | ||
610 | next->prev = prev->next = newsk; | ||
611 | } | ||
612 | |||
613 | |||
614 | /** | ||
615 | * __skb_dequeue - remove from the head of the queue | ||
616 | * @list: list to dequeue from | ||
617 | * | ||
618 | * Remove the head of the list. This function does not take any locks | ||
619 | * so must be used with appropriate locks held only. The head item is | ||
620 | * returned or %NULL if the list is empty. | ||
621 | */ | ||
622 | extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); | ||
623 | static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) | ||
624 | { | ||
625 | struct sk_buff *next, *prev, *result; | ||
626 | |||
627 | prev = (struct sk_buff *) list; | ||
628 | next = prev->next; | ||
629 | result = NULL; | ||
630 | if (next != prev) { | ||
631 | result = next; | ||
632 | next = next->next; | ||
633 | list->qlen--; | ||
634 | next->prev = prev; | ||
635 | prev->next = next; | ||
636 | result->next = result->prev = NULL; | ||
637 | result->list = NULL; | ||
638 | } | ||
639 | return result; | ||
640 | } | ||
641 | |||
642 | |||
643 | /* | ||
644 | * Insert a packet on a list. | ||
645 | */ | ||
646 | extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk); | ||
647 | static inline void __skb_insert(struct sk_buff *newsk, | ||
648 | struct sk_buff *prev, struct sk_buff *next, | ||
649 | struct sk_buff_head *list) | ||
650 | { | ||
651 | newsk->next = next; | ||
652 | newsk->prev = prev; | ||
653 | next->prev = prev->next = newsk; | ||
654 | newsk->list = list; | ||
655 | list->qlen++; | ||
656 | } | ||
657 | |||
658 | /* | ||
659 | * Place a packet after a given packet in a list. | ||
660 | */ | ||
661 | extern void skb_append(struct sk_buff *old, struct sk_buff *newsk); | ||
662 | static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk) | ||
663 | { | ||
664 | __skb_insert(newsk, old, old->next, old->list); | ||
665 | } | ||
666 | |||
667 | /* | ||
668 | * remove sk_buff from list. _Must_ be called atomically, and with | ||
669 | * the list known.. | ||
670 | */ | ||
671 | extern void skb_unlink(struct sk_buff *skb); | ||
672 | static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) | ||
673 | { | ||
674 | struct sk_buff *next, *prev; | ||
675 | |||
676 | list->qlen--; | ||
677 | next = skb->next; | ||
678 | prev = skb->prev; | ||
679 | skb->next = skb->prev = NULL; | ||
680 | skb->list = NULL; | ||
681 | next->prev = prev; | ||
682 | prev->next = next; | ||
683 | } | ||
684 | |||
685 | |||
686 | /* XXX: more streamlined implementation */ | ||
687 | |||
688 | /** | ||
689 | * __skb_dequeue_tail - remove from the tail of the queue | ||
690 | * @list: list to dequeue from | ||
691 | * | ||
692 | * Remove the tail of the list. This function does not take any locks | ||
693 | * so must be used with appropriate locks held only. The tail item is | ||
694 | * returned or %NULL if the list is empty. | ||
695 | */ | ||
696 | extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); | ||
697 | static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) | ||
698 | { | ||
699 | struct sk_buff *skb = skb_peek_tail(list); | ||
700 | if (skb) | ||
701 | __skb_unlink(skb, list); | ||
702 | return skb; | ||
703 | } | ||
704 | |||
705 | |||
706 | static inline int skb_is_nonlinear(const struct sk_buff *skb) | ||
707 | { | ||
708 | return skb->data_len; | ||
709 | } | ||
710 | |||
711 | static inline unsigned int skb_headlen(const struct sk_buff *skb) | ||
712 | { | ||
713 | return skb->len - skb->data_len; | ||
714 | } | ||
715 | |||
716 | static inline int skb_pagelen(const struct sk_buff *skb) | ||
717 | { | ||
718 | int i, len = 0; | ||
719 | |||
720 | for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) | ||
721 | len += skb_shinfo(skb)->frags[i].size; | ||
722 | return len + skb_headlen(skb); | ||
723 | } | ||
724 | |||
725 | static inline void skb_fill_page_desc(struct sk_buff *skb, int i, | ||
726 | struct page *page, int off, int size) | ||
727 | { | ||
728 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
729 | |||
730 | frag->page = page; | ||
731 | frag->page_offset = off; | ||
732 | frag->size = size; | ||
733 | skb_shinfo(skb)->nr_frags = i + 1; | ||
734 | } | ||
735 | |||
736 | #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) | ||
737 | #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list) | ||
738 | #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) | ||
739 | |||
740 | /* | ||
741 | * Add data to an sk_buff | ||
742 | */ | ||
743 | static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) | ||
744 | { | ||
745 | unsigned char *tmp = skb->tail; | ||
746 | SKB_LINEAR_ASSERT(skb); | ||
747 | skb->tail += len; | ||
748 | skb->len += len; | ||
749 | return tmp; | ||
750 | } | ||
751 | |||
752 | /** | ||
753 | * skb_put - add data to a buffer | ||
754 | * @skb: buffer to use | ||
755 | * @len: amount of data to add | ||
756 | * | ||
757 | * This function extends the used data area of the buffer. If this would | ||
758 | * exceed the total buffer size the kernel will panic. A pointer to the | ||
759 | * first byte of the extra data is returned. | ||
760 | */ | ||
761 | static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len) | ||
762 | { | ||
763 | unsigned char *tmp = skb->tail; | ||
764 | SKB_LINEAR_ASSERT(skb); | ||
765 | skb->tail += len; | ||
766 | skb->len += len; | ||
767 | if (unlikely(skb->tail>skb->end)) | ||
768 | skb_over_panic(skb, len, current_text_addr()); | ||
769 | return tmp; | ||
770 | } | ||
771 | |||
772 | static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) | ||
773 | { | ||
774 | skb->data -= len; | ||
775 | skb->len += len; | ||
776 | return skb->data; | ||
777 | } | ||
778 | |||
779 | /** | ||
780 | * skb_push - add data to the start of a buffer | ||
781 | * @skb: buffer to use | ||
782 | * @len: amount of data to add | ||
783 | * | ||
784 | * This function extends the used data area of the buffer at the buffer | ||
785 | * start. If this would exceed the total buffer headroom the kernel will | ||
786 | * panic. A pointer to the first byte of the extra data is returned. | ||
787 | */ | ||
788 | static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len) | ||
789 | { | ||
790 | skb->data -= len; | ||
791 | skb->len += len; | ||
792 | if (unlikely(skb->data<skb->head)) | ||
793 | skb_under_panic(skb, len, current_text_addr()); | ||
794 | return skb->data; | ||
795 | } | ||
796 | |||
797 | static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) | ||
798 | { | ||
799 | skb->len -= len; | ||
800 | BUG_ON(skb->len < skb->data_len); | ||
801 | return skb->data += len; | ||
802 | } | ||
803 | |||
804 | /** | ||
805 | * skb_pull - remove data from the start of a buffer | ||
806 | * @skb: buffer to use | ||
807 | * @len: amount of data to remove | ||
808 | * | ||
809 | * This function removes data from the start of a buffer, returning | ||
810 | * the memory to the headroom. A pointer to the next data in the buffer | ||
811 | * is returned. Once the data has been pulled future pushes will overwrite | ||
812 | * the old data. | ||
813 | */ | ||
814 | static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) | ||
815 | { | ||
816 | return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); | ||
817 | } | ||
818 | |||
819 | extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); | ||
820 | |||
821 | static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) | ||
822 | { | ||
823 | if (len > skb_headlen(skb) && | ||
824 | !__pskb_pull_tail(skb, len-skb_headlen(skb))) | ||
825 | return NULL; | ||
826 | skb->len -= len; | ||
827 | return skb->data += len; | ||
828 | } | ||
829 | |||
830 | static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) | ||
831 | { | ||
832 | return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); | ||
833 | } | ||
834 | |||
835 | static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) | ||
836 | { | ||
837 | if (likely(len <= skb_headlen(skb))) | ||
838 | return 1; | ||
839 | if (unlikely(len > skb->len)) | ||
840 | return 0; | ||
841 | return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL; | ||
842 | } | ||
843 | |||
844 | /** | ||
845 | * skb_headroom - bytes at buffer head | ||
846 | * @skb: buffer to check | ||
847 | * | ||
848 | * Return the number of bytes of free space at the head of an &sk_buff. | ||
849 | */ | ||
850 | static inline int skb_headroom(const struct sk_buff *skb) | ||
851 | { | ||
852 | return skb->data - skb->head; | ||
853 | } | ||
854 | |||
855 | /** | ||
856 | * skb_tailroom - bytes at buffer end | ||
857 | * @skb: buffer to check | ||
858 | * | ||
859 | * Return the number of bytes of free space at the tail of an sk_buff | ||
860 | */ | ||
861 | static inline int skb_tailroom(const struct sk_buff *skb) | ||
862 | { | ||
863 | return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; | ||
864 | } | ||
865 | |||
866 | /** | ||
867 | * skb_reserve - adjust headroom | ||
868 | * @skb: buffer to alter | ||
869 | * @len: bytes to move | ||
870 | * | ||
871 | * Increase the headroom of an empty &sk_buff by reducing the tail | ||
872 | * room. This is only allowed for an empty buffer. | ||
873 | */ | ||
874 | static inline void skb_reserve(struct sk_buff *skb, unsigned int len) | ||
875 | { | ||
876 | skb->data += len; | ||
877 | skb->tail += len; | ||
878 | } | ||
879 | |||
880 | /* | ||
881 | * CPUs often take a performance hit when accessing unaligned memory | ||
882 | * locations. The actual performance hit varies, it can be small if the | ||
883 | * hardware handles it or large if we have to take an exception and fix it | ||
884 | * in software. | ||
885 | * | ||
886 | * Since an ethernet header is 14 bytes network drivers often end up with | ||
887 | * the IP header at an unaligned offset. The IP header can be aligned by | ||
888 | * shifting the start of the packet by 2 bytes. Drivers should do this | ||
889 | * with: | ||
890 | * | ||
891 | * skb_reserve(NET_IP_ALIGN); | ||
892 | * | ||
893 | * The downside to this alignment of the IP header is that the DMA is now | ||
894 | * unaligned. On some architectures the cost of an unaligned DMA is high | ||
895 | * and this cost outweighs the gains made by aligning the IP header. | ||
896 | * | ||
897 | * Since this trade off varies between architectures, we allow NET_IP_ALIGN | ||
898 | * to be overridden. | ||
899 | */ | ||
900 | #ifndef NET_IP_ALIGN | ||
901 | #define NET_IP_ALIGN 2 | ||
902 | #endif | ||
903 | |||
904 | extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc); | ||
905 | |||
906 | static inline void __skb_trim(struct sk_buff *skb, unsigned int len) | ||
907 | { | ||
908 | if (!skb->data_len) { | ||
909 | skb->len = len; | ||
910 | skb->tail = skb->data + len; | ||
911 | } else | ||
912 | ___pskb_trim(skb, len, 0); | ||
913 | } | ||
914 | |||
915 | /** | ||
916 | * skb_trim - remove end from a buffer | ||
917 | * @skb: buffer to alter | ||
918 | * @len: new length | ||
919 | * | ||
920 | * Cut the length of a buffer down by removing data from the tail. If | ||
921 | * the buffer is already under the length specified it is not modified. | ||
922 | */ | ||
923 | static inline void skb_trim(struct sk_buff *skb, unsigned int len) | ||
924 | { | ||
925 | if (skb->len > len) | ||
926 | __skb_trim(skb, len); | ||
927 | } | ||
928 | |||
929 | |||
930 | static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) | ||
931 | { | ||
932 | if (!skb->data_len) { | ||
933 | skb->len = len; | ||
934 | skb->tail = skb->data+len; | ||
935 | return 0; | ||
936 | } | ||
937 | return ___pskb_trim(skb, len, 1); | ||
938 | } | ||
939 | |||
940 | static inline int pskb_trim(struct sk_buff *skb, unsigned int len) | ||
941 | { | ||
942 | return (len < skb->len) ? __pskb_trim(skb, len) : 0; | ||
943 | } | ||
944 | |||
945 | /** | ||
946 | * skb_orphan - orphan a buffer | ||
947 | * @skb: buffer to orphan | ||
948 | * | ||
949 | * If a buffer currently has an owner then we call the owner's | ||
950 | * destructor function and make the @skb unowned. The buffer continues | ||
951 | * to exist but is no longer charged to its former owner. | ||
952 | */ | ||
953 | static inline void skb_orphan(struct sk_buff *skb) | ||
954 | { | ||
955 | if (skb->destructor) | ||
956 | skb->destructor(skb); | ||
957 | skb->destructor = NULL; | ||
958 | skb->sk = NULL; | ||
959 | } | ||
960 | |||
961 | /** | ||
962 | * __skb_queue_purge - empty a list | ||
963 | * @list: list to empty | ||
964 | * | ||
965 | * Delete all buffers on an &sk_buff list. Each buffer is removed from | ||
966 | * the list and one reference dropped. This function does not take the | ||
967 | * list lock and the caller must hold the relevant locks to use it. | ||
968 | */ | ||
969 | extern void skb_queue_purge(struct sk_buff_head *list); | ||
970 | static inline void __skb_queue_purge(struct sk_buff_head *list) | ||
971 | { | ||
972 | struct sk_buff *skb; | ||
973 | while ((skb = __skb_dequeue(list)) != NULL) | ||
974 | kfree_skb(skb); | ||
975 | } | ||
976 | |||
977 | /** | ||
978 | * __dev_alloc_skb - allocate an skbuff for sending | ||
979 | * @length: length to allocate | ||
980 | * @gfp_mask: get_free_pages mask, passed to alloc_skb | ||
981 | * | ||
982 | * Allocate a new &sk_buff and assign it a usage count of one. The | ||
983 | * buffer has unspecified headroom built in. Users should allocate | ||
984 | * the headroom they think they need without accounting for the | ||
985 | * built in space. The built in space is used for optimisations. | ||
986 | * | ||
987 | * %NULL is returned in there is no free memory. | ||
988 | */ | ||
989 | #ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB | ||
990 | static inline struct sk_buff *__dev_alloc_skb(unsigned int length, | ||
991 | int gfp_mask) | ||
992 | { | ||
993 | struct sk_buff *skb = alloc_skb(length + 16, gfp_mask); | ||
994 | if (likely(skb)) | ||
995 | skb_reserve(skb, 16); | ||
996 | return skb; | ||
997 | } | ||
998 | #else | ||
999 | extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask); | ||
1000 | #endif | ||
1001 | |||
1002 | /** | ||
1003 | * dev_alloc_skb - allocate an skbuff for sending | ||
1004 | * @length: length to allocate | ||
1005 | * | ||
1006 | * Allocate a new &sk_buff and assign it a usage count of one. The | ||
1007 | * buffer has unspecified headroom built in. Users should allocate | ||
1008 | * the headroom they think they need without accounting for the | ||
1009 | * built in space. The built in space is used for optimisations. | ||
1010 | * | ||
1011 | * %NULL is returned in there is no free memory. Although this function | ||
1012 | * allocates memory it can be called from an interrupt. | ||
1013 | */ | ||
1014 | static inline struct sk_buff *dev_alloc_skb(unsigned int length) | ||
1015 | { | ||
1016 | return __dev_alloc_skb(length, GFP_ATOMIC); | ||
1017 | } | ||
1018 | |||
1019 | /** | ||
1020 | * skb_cow - copy header of skb when it is required | ||
1021 | * @skb: buffer to cow | ||
1022 | * @headroom: needed headroom | ||
1023 | * | ||
1024 | * If the skb passed lacks sufficient headroom or its data part | ||
1025 | * is shared, data is reallocated. If reallocation fails, an error | ||
1026 | * is returned and original skb is not changed. | ||
1027 | * | ||
1028 | * The result is skb with writable area skb->head...skb->tail | ||
1029 | * and at least @headroom of space at head. | ||
1030 | */ | ||
1031 | static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) | ||
1032 | { | ||
1033 | int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb); | ||
1034 | |||
1035 | if (delta < 0) | ||
1036 | delta = 0; | ||
1037 | |||
1038 | if (delta || skb_cloned(skb)) | ||
1039 | return pskb_expand_head(skb, (delta + 15) & ~15, 0, GFP_ATOMIC); | ||
1040 | return 0; | ||
1041 | } | ||
1042 | |||
1043 | /** | ||
1044 | * skb_padto - pad an skbuff up to a minimal size | ||
1045 | * @skb: buffer to pad | ||
1046 | * @len: minimal length | ||
1047 | * | ||
1048 | * Pads up a buffer to ensure the trailing bytes exist and are | ||
1049 | * blanked. If the buffer already contains sufficient data it | ||
1050 | * is untouched. Returns the buffer, which may be a replacement | ||
1051 | * for the original, or NULL for out of memory - in which case | ||
1052 | * the original buffer is still freed. | ||
1053 | */ | ||
1054 | |||
1055 | static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len) | ||
1056 | { | ||
1057 | unsigned int size = skb->len; | ||
1058 | if (likely(size >= len)) | ||
1059 | return skb; | ||
1060 | return skb_pad(skb, len-size); | ||
1061 | } | ||
1062 | |||
1063 | static inline int skb_add_data(struct sk_buff *skb, | ||
1064 | char __user *from, int copy) | ||
1065 | { | ||
1066 | const int off = skb->len; | ||
1067 | |||
1068 | if (skb->ip_summed == CHECKSUM_NONE) { | ||
1069 | int err = 0; | ||
1070 | unsigned int csum = csum_and_copy_from_user(from, | ||
1071 | skb_put(skb, copy), | ||
1072 | copy, 0, &err); | ||
1073 | if (!err) { | ||
1074 | skb->csum = csum_block_add(skb->csum, csum, off); | ||
1075 | return 0; | ||
1076 | } | ||
1077 | } else if (!copy_from_user(skb_put(skb, copy), from, copy)) | ||
1078 | return 0; | ||
1079 | |||
1080 | __skb_trim(skb, off); | ||
1081 | return -EFAULT; | ||
1082 | } | ||
1083 | |||
1084 | static inline int skb_can_coalesce(struct sk_buff *skb, int i, | ||
1085 | struct page *page, int off) | ||
1086 | { | ||
1087 | if (i) { | ||
1088 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; | ||
1089 | |||
1090 | return page == frag->page && | ||
1091 | off == frag->page_offset + frag->size; | ||
1092 | } | ||
1093 | return 0; | ||
1094 | } | ||
1095 | |||
1096 | /** | ||
1097 | * skb_linearize - convert paged skb to linear one | ||
1098 | * @skb: buffer to linarize | ||
1099 | * @gfp: allocation mode | ||
1100 | * | ||
1101 | * If there is no free memory -ENOMEM is returned, otherwise zero | ||
1102 | * is returned and the old skb data released. | ||
1103 | */ | ||
1104 | extern int __skb_linearize(struct sk_buff *skb, int gfp); | ||
1105 | static inline int skb_linearize(struct sk_buff *skb, int gfp) | ||
1106 | { | ||
1107 | return __skb_linearize(skb, gfp); | ||
1108 | } | ||
1109 | |||
1110 | /** | ||
1111 | * skb_postpull_rcsum - update checksum for received skb after pull | ||
1112 | * @skb: buffer to update | ||
1113 | * @start: start of data before pull | ||
1114 | * @len: length of data pulled | ||
1115 | * | ||
1116 | * After doing a pull on a received packet, you need to call this to | ||
1117 | * update the CHECKSUM_HW checksum, or set ip_summed to CHECKSUM_NONE | ||
1118 | * so that it can be recomputed from scratch. | ||
1119 | */ | ||
1120 | |||
1121 | static inline void skb_postpull_rcsum(struct sk_buff *skb, | ||
1122 | const void *start, int len) | ||
1123 | { | ||
1124 | if (skb->ip_summed == CHECKSUM_HW) | ||
1125 | skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); | ||
1126 | } | ||
1127 | |||
1128 | /** | ||
1129 | * pskb_trim_rcsum - trim received skb and update checksum | ||
1130 | * @skb: buffer to trim | ||
1131 | * @len: new length | ||
1132 | * | ||
1133 | * This is exactly the same as pskb_trim except that it ensures the | ||
1134 | * checksum of received packets are still valid after the operation. | ||
1135 | */ | ||
1136 | |||
1137 | static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) | ||
1138 | { | ||
1139 | if (len >= skb->len) | ||
1140 | return 0; | ||
1141 | if (skb->ip_summed == CHECKSUM_HW) | ||
1142 | skb->ip_summed = CHECKSUM_NONE; | ||
1143 | return __pskb_trim(skb, len); | ||
1144 | } | ||
1145 | |||
1146 | static inline void *kmap_skb_frag(const skb_frag_t *frag) | ||
1147 | { | ||
1148 | #ifdef CONFIG_HIGHMEM | ||
1149 | BUG_ON(in_irq()); | ||
1150 | |||
1151 | local_bh_disable(); | ||
1152 | #endif | ||
1153 | return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ); | ||
1154 | } | ||
1155 | |||
1156 | static inline void kunmap_skb_frag(void *vaddr) | ||
1157 | { | ||
1158 | kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); | ||
1159 | #ifdef CONFIG_HIGHMEM | ||
1160 | local_bh_enable(); | ||
1161 | #endif | ||
1162 | } | ||
1163 | |||
1164 | #define skb_queue_walk(queue, skb) \ | ||
1165 | for (skb = (queue)->next; \ | ||
1166 | prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ | ||
1167 | skb = skb->next) | ||
1168 | |||
1169 | |||
1170 | extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, | ||
1171 | int noblock, int *err); | ||
1172 | extern unsigned int datagram_poll(struct file *file, struct socket *sock, | ||
1173 | struct poll_table_struct *wait); | ||
1174 | extern int skb_copy_datagram_iovec(const struct sk_buff *from, | ||
1175 | int offset, struct iovec *to, | ||
1176 | int size); | ||
1177 | extern int skb_copy_and_csum_datagram_iovec(const | ||
1178 | struct sk_buff *skb, | ||
1179 | int hlen, | ||
1180 | struct iovec *iov); | ||
1181 | extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); | ||
1182 | extern unsigned int skb_checksum(const struct sk_buff *skb, int offset, | ||
1183 | int len, unsigned int csum); | ||
1184 | extern int skb_copy_bits(const struct sk_buff *skb, int offset, | ||
1185 | void *to, int len); | ||
1186 | extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, | ||
1187 | int offset, u8 *to, int len, | ||
1188 | unsigned int csum); | ||
1189 | extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); | ||
1190 | extern void skb_split(struct sk_buff *skb, | ||
1191 | struct sk_buff *skb1, const u32 len); | ||
1192 | |||
1193 | static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, | ||
1194 | int len, void *buffer) | ||
1195 | { | ||
1196 | int hlen = skb_headlen(skb); | ||
1197 | |||
1198 | if (offset + len <= hlen) | ||
1199 | return skb->data + offset; | ||
1200 | |||
1201 | if (skb_copy_bits(skb, offset, buffer, len) < 0) | ||
1202 | return NULL; | ||
1203 | |||
1204 | return buffer; | ||
1205 | } | ||
1206 | |||
1207 | extern void skb_init(void); | ||
1208 | extern void skb_add_mtu(int mtu); | ||
1209 | |||
1210 | #ifdef CONFIG_NETFILTER | ||
1211 | static inline void nf_conntrack_put(struct nf_conntrack *nfct) | ||
1212 | { | ||
1213 | if (nfct && atomic_dec_and_test(&nfct->use)) | ||
1214 | nfct->destroy(nfct); | ||
1215 | } | ||
1216 | static inline void nf_conntrack_get(struct nf_conntrack *nfct) | ||
1217 | { | ||
1218 | if (nfct) | ||
1219 | atomic_inc(&nfct->use); | ||
1220 | } | ||
1221 | static inline void nf_reset(struct sk_buff *skb) | ||
1222 | { | ||
1223 | nf_conntrack_put(skb->nfct); | ||
1224 | skb->nfct = NULL; | ||
1225 | #ifdef CONFIG_NETFILTER_DEBUG | ||
1226 | skb->nf_debug = 0; | ||
1227 | #endif | ||
1228 | } | ||
1229 | static inline void nf_reset_debug(struct sk_buff *skb) | ||
1230 | { | ||
1231 | #ifdef CONFIG_NETFILTER_DEBUG | ||
1232 | skb->nf_debug = 0; | ||
1233 | #endif | ||
1234 | } | ||
1235 | |||
1236 | #ifdef CONFIG_BRIDGE_NETFILTER | ||
1237 | static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) | ||
1238 | { | ||
1239 | if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) | ||
1240 | kfree(nf_bridge); | ||
1241 | } | ||
1242 | static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) | ||
1243 | { | ||
1244 | if (nf_bridge) | ||
1245 | atomic_inc(&nf_bridge->use); | ||
1246 | } | ||
1247 | #endif /* CONFIG_BRIDGE_NETFILTER */ | ||
1248 | #else /* CONFIG_NETFILTER */ | ||
1249 | static inline void nf_reset(struct sk_buff *skb) {} | ||
1250 | #endif /* CONFIG_NETFILTER */ | ||
1251 | |||
1252 | #endif /* __KERNEL__ */ | ||
1253 | #endif /* _LINUX_SKBUFF_H */ | ||