aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/skbuff.c17
-rw-r--r--net/core/sock.c11
-rw-r--r--net/ipv4/tcp_output.c2
4 files changed, 19 insertions, 13 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 7f5f62c65115..ff9dc029233a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1127,7 +1127,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1127extern void skb_release_data(struct sk_buff *); 1127extern void skb_release_data(struct sk_buff *);
1128 1128
1129/* Keep head the same: replace data */ 1129/* Keep head the same: replace data */
1130int __skb_linearize(struct sk_buff *skb, int gfp_mask) 1130int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp_mask)
1131{ 1131{
1132 unsigned int size; 1132 unsigned int size;
1133 u8 *data; 1133 u8 *data;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 733deee24b9f..d9f7b06fe886 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -129,7 +129,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
129 * Buffers may only be allocated from interrupts using a @gfp_mask of 129 * Buffers may only be allocated from interrupts using a @gfp_mask of
130 * %GFP_ATOMIC. 130 * %GFP_ATOMIC.
131 */ 131 */
132struct sk_buff *alloc_skb(unsigned int size, int gfp_mask) 132struct sk_buff *alloc_skb(unsigned int size, unsigned int __nocast gfp_mask)
133{ 133{
134 struct sk_buff *skb; 134 struct sk_buff *skb;
135 u8 *data; 135 u8 *data;
@@ -182,7 +182,8 @@ nodata:
182 * %GFP_ATOMIC. 182 * %GFP_ATOMIC.
183 */ 183 */
184struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, 184struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
185 unsigned int size, int gfp_mask) 185 unsigned int size,
186 unsigned int __nocast gfp_mask)
186{ 187{
187 struct sk_buff *skb; 188 struct sk_buff *skb;
188 u8 *data; 189 u8 *data;
@@ -322,7 +323,7 @@ void __kfree_skb(struct sk_buff *skb)
322 * %GFP_ATOMIC. 323 * %GFP_ATOMIC.
323 */ 324 */
324 325
325struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask) 326struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
326{ 327{
327 struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 328 struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
328 329
@@ -460,7 +461,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
460 * header is going to be modified. Use pskb_copy() instead. 461 * header is going to be modified. Use pskb_copy() instead.
461 */ 462 */
462 463
463struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask) 464struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_mask)
464{ 465{
465 int headerlen = skb->data - skb->head; 466 int headerlen = skb->data - skb->head;
466 /* 467 /*
@@ -499,7 +500,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask)
499 * The returned buffer has a reference count of 1. 500 * The returned buffer has a reference count of 1.
500 */ 501 */
501 502
502struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask) 503struct sk_buff *pskb_copy(struct sk_buff *skb, unsigned int __nocast gfp_mask)
503{ 504{
504 /* 505 /*
505 * Allocate the copy buffer 506 * Allocate the copy buffer
@@ -557,7 +558,8 @@ out:
557 * reloaded after call to this function. 558 * reloaded after call to this function.
558 */ 559 */
559 560
560int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask) 561int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
562 unsigned int __nocast gfp_mask)
561{ 563{
562 int i; 564 int i;
563 u8 *data; 565 u8 *data;
@@ -647,7 +649,8 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
647 * only by netfilter in the cases when checksum is recalculated? --ANK 649 * only by netfilter in the cases when checksum is recalculated? --ANK
648 */ 650 */
649struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 651struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
650 int newheadroom, int newtailroom, int gfp_mask) 652 int newheadroom, int newtailroom,
653 unsigned int __nocast gfp_mask)
651{ 654{
652 /* 655 /*
653 * Allocate the copy buffer 656 * Allocate the copy buffer
diff --git a/net/core/sock.c b/net/core/sock.c
index a6ec3ada7f9e..8b35ccdc2b3b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -622,7 +622,8 @@ lenout:
622 * @prot: struct proto associated with this new sock instance 622 * @prot: struct proto associated with this new sock instance
623 * @zero_it: if we should zero the newly allocated sock 623 * @zero_it: if we should zero the newly allocated sock
624 */ 624 */
625struct sock *sk_alloc(int family, int priority, struct proto *prot, int zero_it) 625struct sock *sk_alloc(int family, unsigned int __nocast priority,
626 struct proto *prot, int zero_it)
626{ 627{
627 struct sock *sk = NULL; 628 struct sock *sk = NULL;
628 kmem_cache_t *slab = prot->slab; 629 kmem_cache_t *slab = prot->slab;
@@ -750,7 +751,8 @@ unsigned long sock_i_ino(struct sock *sk)
750/* 751/*
751 * Allocate a skb from the socket's send buffer. 752 * Allocate a skb from the socket's send buffer.
752 */ 753 */
753struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority) 754struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
755 unsigned int __nocast priority)
754{ 756{
755 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 757 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
756 struct sk_buff * skb = alloc_skb(size, priority); 758 struct sk_buff * skb = alloc_skb(size, priority);
@@ -765,7 +767,8 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int
765/* 767/*
766 * Allocate a skb from the socket's receive buffer. 768 * Allocate a skb from the socket's receive buffer.
767 */ 769 */
768struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority) 770struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
771 unsigned int __nocast priority)
769{ 772{
770 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 773 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
771 struct sk_buff *skb = alloc_skb(size, priority); 774 struct sk_buff *skb = alloc_skb(size, priority);
@@ -780,7 +783,7 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int
780/* 783/*
781 * Allocate a memory block from the socket's option memory buffer. 784 * Allocate a memory block from the socket's option memory buffer.
782 */ 785 */
783void *sock_kmalloc(struct sock *sk, int size, int priority) 786void *sock_kmalloc(struct sock *sk, int size, unsigned int __nocast priority)
784{ 787{
785 if ((unsigned)size <= sysctl_optmem_max && 788 if ((unsigned)size <= sysctl_optmem_max &&
786 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 789 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e041d057ec86..e3f8ea1bfa9c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1613,7 +1613,7 @@ void tcp_send_fin(struct sock *sk)
1613 * was unread data in the receive queue. This behavior is recommended 1613 * was unread data in the receive queue. This behavior is recommended
1614 * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM 1614 * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM
1615 */ 1615 */
1616void tcp_send_active_reset(struct sock *sk, int priority) 1616void tcp_send_active_reset(struct sock *sk, unsigned int __nocast priority)
1617{ 1617{
1618 struct tcp_sock *tp = tcp_sk(sk); 1618 struct tcp_sock *tp = tcp_sk(sk);
1619 struct sk_buff *skb; 1619 struct sk_buff *skb;