diff options
author | Victor Fusco <victor@cetuc.puc-rio.br> | 2005-07-08 17:57:47 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-07-08 17:57:47 -0400 |
commit | 86a76caf8705e3524e15f343f3c4806939a06dc8 (patch) | |
tree | ac2022a6073b5af228e009178048bdab070d2230 /net/core/skbuff.c | |
parent | b03efcfb2180289718991bb984044ce6c5b7d1b0 (diff) |
[NET]: Fix sparse warnings
From: Victor Fusco <victor@cetuc.puc-rio.br>
Fix the sparse warning "implicit cast to nocast type"
Signed-off-by: Victor Fusco <victor@cetuc.puc-rio.br>
Signed-off-by: Domen Puncer <domen@coderock.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r-- | net/core/skbuff.c | 17 |
1 files changed, 10 insertions, 7 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 733deee24b9f..d9f7b06fe886 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -129,7 +129,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) | |||
129 | * Buffers may only be allocated from interrupts using a @gfp_mask of | 129 | * Buffers may only be allocated from interrupts using a @gfp_mask of |
130 | * %GFP_ATOMIC. | 130 | * %GFP_ATOMIC. |
131 | */ | 131 | */ |
132 | struct sk_buff *alloc_skb(unsigned int size, int gfp_mask) | 132 | struct sk_buff *alloc_skb(unsigned int size, unsigned int __nocast gfp_mask) |
133 | { | 133 | { |
134 | struct sk_buff *skb; | 134 | struct sk_buff *skb; |
135 | u8 *data; | 135 | u8 *data; |
@@ -182,7 +182,8 @@ nodata: | |||
182 | * %GFP_ATOMIC. | 182 | * %GFP_ATOMIC. |
183 | */ | 183 | */ |
184 | struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, | 184 | struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, |
185 | unsigned int size, int gfp_mask) | 185 | unsigned int size, |
186 | unsigned int __nocast gfp_mask) | ||
186 | { | 187 | { |
187 | struct sk_buff *skb; | 188 | struct sk_buff *skb; |
188 | u8 *data; | 189 | u8 *data; |
@@ -322,7 +323,7 @@ void __kfree_skb(struct sk_buff *skb) | |||
322 | * %GFP_ATOMIC. | 323 | * %GFP_ATOMIC. |
323 | */ | 324 | */ |
324 | 325 | ||
325 | struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask) | 326 | struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask) |
326 | { | 327 | { |
327 | struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); | 328 | struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); |
328 | 329 | ||
@@ -460,7 +461,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
460 | * header is going to be modified. Use pskb_copy() instead. | 461 | * header is going to be modified. Use pskb_copy() instead. |
461 | */ | 462 | */ |
462 | 463 | ||
463 | struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask) | 464 | struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_mask) |
464 | { | 465 | { |
465 | int headerlen = skb->data - skb->head; | 466 | int headerlen = skb->data - skb->head; |
466 | /* | 467 | /* |
@@ -499,7 +500,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask) | |||
499 | * The returned buffer has a reference count of 1. | 500 | * The returned buffer has a reference count of 1. |
500 | */ | 501 | */ |
501 | 502 | ||
502 | struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask) | 503 | struct sk_buff *pskb_copy(struct sk_buff *skb, unsigned int __nocast gfp_mask) |
503 | { | 504 | { |
504 | /* | 505 | /* |
505 | * Allocate the copy buffer | 506 | * Allocate the copy buffer |
@@ -557,7 +558,8 @@ out: | |||
557 | * reloaded after call to this function. | 558 | * reloaded after call to this function. |
558 | */ | 559 | */ |
559 | 560 | ||
560 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask) | 561 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, |
562 | unsigned int __nocast gfp_mask) | ||
561 | { | 563 | { |
562 | int i; | 564 | int i; |
563 | u8 *data; | 565 | u8 *data; |
@@ -647,7 +649,8 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) | |||
647 | * only by netfilter in the cases when checksum is recalculated? --ANK | 649 | * only by netfilter in the cases when checksum is recalculated? --ANK |
648 | */ | 650 | */ |
649 | struct sk_buff *skb_copy_expand(const struct sk_buff *skb, | 651 | struct sk_buff *skb_copy_expand(const struct sk_buff *skb, |
650 | int newheadroom, int newtailroom, int gfp_mask) | 652 | int newheadroom, int newtailroom, |
653 | unsigned int __nocast gfp_mask) | ||
651 | { | 654 | { |
652 | /* | 655 | /* |
653 | * Allocate the copy buffer | 656 | * Allocate the copy buffer |