diff options
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 2 | ||||
-rw-r--r-- | net/core/skbuff.c | 14 | ||||
-rw-r--r-- | net/core/sock.c | 10 |
3 files changed, 13 insertions, 13 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 9066c874e273..a44eeef24edf 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1132,7 +1132,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) | |||
1132 | #endif | 1132 | #endif |
1133 | 1133 | ||
1134 | /* Keep head the same: replace data */ | 1134 | /* Keep head the same: replace data */ |
1135 | int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp_mask) | 1135 | int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask) |
1136 | { | 1136 | { |
1137 | unsigned int size; | 1137 | unsigned int size; |
1138 | u8 *data; | 1138 | u8 *data; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 0e9431b59fb2..af9b1516e21f 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -130,7 +130,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) | |||
130 | * Buffers may only be allocated from interrupts using a @gfp_mask of | 130 | * Buffers may only be allocated from interrupts using a @gfp_mask of |
131 | * %GFP_ATOMIC. | 131 | * %GFP_ATOMIC. |
132 | */ | 132 | */ |
133 | struct sk_buff *__alloc_skb(unsigned int size, unsigned int __nocast gfp_mask, | 133 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, |
134 | int fclone) | 134 | int fclone) |
135 | { | 135 | { |
136 | struct sk_buff *skb; | 136 | struct sk_buff *skb; |
@@ -198,7 +198,7 @@ nodata: | |||
198 | */ | 198 | */ |
199 | struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, | 199 | struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, |
200 | unsigned int size, | 200 | unsigned int size, |
201 | unsigned int __nocast gfp_mask) | 201 | gfp_t gfp_mask) |
202 | { | 202 | { |
203 | struct sk_buff *skb; | 203 | struct sk_buff *skb; |
204 | u8 *data; | 204 | u8 *data; |
@@ -361,7 +361,7 @@ void __kfree_skb(struct sk_buff *skb) | |||
361 | * %GFP_ATOMIC. | 361 | * %GFP_ATOMIC. |
362 | */ | 362 | */ |
363 | 363 | ||
364 | struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask) | 364 | struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) |
365 | { | 365 | { |
366 | struct sk_buff *n; | 366 | struct sk_buff *n; |
367 | 367 | ||
@@ -500,7 +500,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
500 | * header is going to be modified. Use pskb_copy() instead. | 500 | * header is going to be modified. Use pskb_copy() instead. |
501 | */ | 501 | */ |
502 | 502 | ||
503 | struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_mask) | 503 | struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) |
504 | { | 504 | { |
505 | int headerlen = skb->data - skb->head; | 505 | int headerlen = skb->data - skb->head; |
506 | /* | 506 | /* |
@@ -539,7 +539,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_ma | |||
539 | * The returned buffer has a reference count of 1. | 539 | * The returned buffer has a reference count of 1. |
540 | */ | 540 | */ |
541 | 541 | ||
542 | struct sk_buff *pskb_copy(struct sk_buff *skb, unsigned int __nocast gfp_mask) | 542 | struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) |
543 | { | 543 | { |
544 | /* | 544 | /* |
545 | * Allocate the copy buffer | 545 | * Allocate the copy buffer |
@@ -598,7 +598,7 @@ out: | |||
598 | */ | 598 | */ |
599 | 599 | ||
600 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | 600 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, |
601 | unsigned int __nocast gfp_mask) | 601 | gfp_t gfp_mask) |
602 | { | 602 | { |
603 | int i; | 603 | int i; |
604 | u8 *data; | 604 | u8 *data; |
@@ -689,7 +689,7 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) | |||
689 | */ | 689 | */ |
690 | struct sk_buff *skb_copy_expand(const struct sk_buff *skb, | 690 | struct sk_buff *skb_copy_expand(const struct sk_buff *skb, |
691 | int newheadroom, int newtailroom, | 691 | int newheadroom, int newtailroom, |
692 | unsigned int __nocast gfp_mask) | 692 | gfp_t gfp_mask) |
693 | { | 693 | { |
694 | /* | 694 | /* |
695 | * Allocate the copy buffer | 695 | * Allocate the copy buffer |
diff --git a/net/core/sock.c b/net/core/sock.c index 928d2a1d6d8e..1c52fe809eda 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -637,7 +637,7 @@ lenout: | |||
637 | * @prot: struct proto associated with this new sock instance | 637 | * @prot: struct proto associated with this new sock instance |
638 | * @zero_it: if we should zero the newly allocated sock | 638 | * @zero_it: if we should zero the newly allocated sock |
639 | */ | 639 | */ |
640 | struct sock *sk_alloc(int family, unsigned int __nocast priority, | 640 | struct sock *sk_alloc(int family, gfp_t priority, |
641 | struct proto *prot, int zero_it) | 641 | struct proto *prot, int zero_it) |
642 | { | 642 | { |
643 | struct sock *sk = NULL; | 643 | struct sock *sk = NULL; |
@@ -704,7 +704,7 @@ void sk_free(struct sock *sk) | |||
704 | module_put(owner); | 704 | module_put(owner); |
705 | } | 705 | } |
706 | 706 | ||
707 | struct sock *sk_clone(const struct sock *sk, const unsigned int __nocast priority) | 707 | struct sock *sk_clone(const struct sock *sk, const gfp_t priority) |
708 | { | 708 | { |
709 | struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0); | 709 | struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0); |
710 | 710 | ||
@@ -845,7 +845,7 @@ unsigned long sock_i_ino(struct sock *sk) | |||
845 | * Allocate a skb from the socket's send buffer. | 845 | * Allocate a skb from the socket's send buffer. |
846 | */ | 846 | */ |
847 | struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, | 847 | struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, |
848 | unsigned int __nocast priority) | 848 | gfp_t priority) |
849 | { | 849 | { |
850 | if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { | 850 | if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { |
851 | struct sk_buff * skb = alloc_skb(size, priority); | 851 | struct sk_buff * skb = alloc_skb(size, priority); |
@@ -861,7 +861,7 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, | |||
861 | * Allocate a skb from the socket's receive buffer. | 861 | * Allocate a skb from the socket's receive buffer. |
862 | */ | 862 | */ |
863 | struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, | 863 | struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, |
864 | unsigned int __nocast priority) | 864 | gfp_t priority) |
865 | { | 865 | { |
866 | if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { | 866 | if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { |
867 | struct sk_buff *skb = alloc_skb(size, priority); | 867 | struct sk_buff *skb = alloc_skb(size, priority); |
@@ -876,7 +876,7 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, | |||
876 | /* | 876 | /* |
877 | * Allocate a memory block from the socket's option memory buffer. | 877 | * Allocate a memory block from the socket's option memory buffer. |
878 | */ | 878 | */ |
879 | void *sock_kmalloc(struct sock *sk, int size, unsigned int __nocast priority) | 879 | void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) |
880 | { | 880 | { |
881 | if ((unsigned)size <= sysctl_optmem_max && | 881 | if ((unsigned)size <= sysctl_optmem_max && |
882 | atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { | 882 | atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { |