diff options
author | Vegard Nossum <vegard.nossum@gmail.com> | 2008-08-30 06:16:35 -0400 |
---|---|---|
committer | Vegard Nossum <vegard.nossum@gmail.com> | 2009-06-15 09:49:25 -0400 |
commit | fe55f6d5c0cfec4a710ef6ff63f162b99d5f7842 (patch) | |
tree | 4c4f0960d7355647e67f62e30e10c9215c123b65 /net | |
parent | fc7d0c9f2122e8bf58deaf1252b0e750df5b0e91 (diff) |
net: use kmemcheck bitfields API for skbuff
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/skbuff.c | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c2e4fb8f3546..f0c4c6ad774b 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <linux/types.h> | 40 | #include <linux/types.h> |
41 | #include <linux/kernel.h> | 41 | #include <linux/kernel.h> |
42 | #include <linux/kmemcheck.h> | ||
42 | #include <linux/mm.h> | 43 | #include <linux/mm.h> |
43 | #include <linux/interrupt.h> | 44 | #include <linux/interrupt.h> |
44 | #include <linux/in.h> | 45 | #include <linux/in.h> |
@@ -201,6 +202,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |||
201 | skb->data = data; | 202 | skb->data = data; |
202 | skb_reset_tail_pointer(skb); | 203 | skb_reset_tail_pointer(skb); |
203 | skb->end = skb->tail + size; | 204 | skb->end = skb->tail + size; |
205 | kmemcheck_annotate_bitfield(skb, flags1); | ||
206 | kmemcheck_annotate_bitfield(skb, flags2); | ||
204 | /* make sure we initialize shinfo sequentially */ | 207 | /* make sure we initialize shinfo sequentially */ |
205 | shinfo = skb_shinfo(skb); | 208 | shinfo = skb_shinfo(skb); |
206 | atomic_set(&shinfo->dataref, 1); | 209 | atomic_set(&shinfo->dataref, 1); |
@@ -217,6 +220,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |||
217 | struct sk_buff *child = skb + 1; | 220 | struct sk_buff *child = skb + 1; |
218 | atomic_t *fclone_ref = (atomic_t *) (child + 1); | 221 | atomic_t *fclone_ref = (atomic_t *) (child + 1); |
219 | 222 | ||
223 | kmemcheck_annotate_bitfield(child, flags1); | ||
224 | kmemcheck_annotate_bitfield(child, flags2); | ||
220 | skb->fclone = SKB_FCLONE_ORIG; | 225 | skb->fclone = SKB_FCLONE_ORIG; |
221 | atomic_set(fclone_ref, 1); | 226 | atomic_set(fclone_ref, 1); |
222 | 227 | ||
@@ -633,6 +638,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) | |||
633 | n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); | 638 | n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); |
634 | if (!n) | 639 | if (!n) |
635 | return NULL; | 640 | return NULL; |
641 | |||
642 | kmemcheck_annotate_bitfield(n, flags1); | ||
643 | kmemcheck_annotate_bitfield(n, flags2); | ||
636 | n->fclone = SKB_FCLONE_UNAVAILABLE; | 644 | n->fclone = SKB_FCLONE_UNAVAILABLE; |
637 | } | 645 | } |
638 | 646 | ||