diff options
| -rw-r--r-- | include/linux/skbuff.h | 29 | ||||
| -rw-r--r-- | include/net/sock.h | 18 | ||||
| -rw-r--r-- | include/net/tcp.h | 3 | ||||
| -rw-r--r-- | net/core/dev.c | 2 | ||||
| -rw-r--r-- | net/core/skbuff.c | 17 | ||||
| -rw-r--r-- | net/core/sock.c | 11 | ||||
| -rw-r--r-- | net/ipv4/tcp_output.c | 2 |
7 files changed, 50 insertions, 32 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 14b950413495..5d4a990d5577 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -300,20 +300,26 @@ struct sk_buff { | |||
| 300 | #include <asm/system.h> | 300 | #include <asm/system.h> |
| 301 | 301 | ||
| 302 | extern void __kfree_skb(struct sk_buff *skb); | 302 | extern void __kfree_skb(struct sk_buff *skb); |
| 303 | extern struct sk_buff *alloc_skb(unsigned int size, int priority); | 303 | extern struct sk_buff *alloc_skb(unsigned int size, |
| 304 | unsigned int __nocast priority); | ||
| 304 | extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, | 305 | extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, |
| 305 | unsigned int size, int priority); | 306 | unsigned int size, |
| 307 | unsigned int __nocast priority); | ||
| 306 | extern void kfree_skbmem(struct sk_buff *skb); | 308 | extern void kfree_skbmem(struct sk_buff *skb); |
| 307 | extern struct sk_buff *skb_clone(struct sk_buff *skb, int priority); | 309 | extern struct sk_buff *skb_clone(struct sk_buff *skb, |
| 308 | extern struct sk_buff *skb_copy(const struct sk_buff *skb, int priority); | 310 | unsigned int __nocast priority); |
| 309 | extern struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask); | 311 | extern struct sk_buff *skb_copy(const struct sk_buff *skb, |
| 312 | unsigned int __nocast priority); | ||
| 313 | extern struct sk_buff *pskb_copy(struct sk_buff *skb, | ||
| 314 | unsigned int __nocast gfp_mask); | ||
| 310 | extern int pskb_expand_head(struct sk_buff *skb, | 315 | extern int pskb_expand_head(struct sk_buff *skb, |
| 311 | int nhead, int ntail, int gfp_mask); | 316 | int nhead, int ntail, |
| 317 | unsigned int __nocast gfp_mask); | ||
| 312 | extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, | 318 | extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, |
| 313 | unsigned int headroom); | 319 | unsigned int headroom); |
| 314 | extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, | 320 | extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, |
| 315 | int newheadroom, int newtailroom, | 321 | int newheadroom, int newtailroom, |
| 316 | int priority); | 322 | unsigned int __nocast priority); |
| 317 | extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad); | 323 | extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad); |
| 318 | #define dev_kfree_skb(a) kfree_skb(a) | 324 | #define dev_kfree_skb(a) kfree_skb(a) |
| 319 | extern void skb_over_panic(struct sk_buff *skb, int len, | 325 | extern void skb_over_panic(struct sk_buff *skb, int len, |
| @@ -464,7 +470,8 @@ static inline int skb_shared(const struct sk_buff *skb) | |||
| 464 | * | 470 | * |
| 465 | * NULL is returned on a memory allocation failure. | 471 | * NULL is returned on a memory allocation failure. |
| 466 | */ | 472 | */ |
| 467 | static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri) | 473 | static inline struct sk_buff *skb_share_check(struct sk_buff *skb, |
| 474 | unsigned int __nocast pri) | ||
| 468 | { | 475 | { |
| 469 | might_sleep_if(pri & __GFP_WAIT); | 476 | might_sleep_if(pri & __GFP_WAIT); |
| 470 | if (skb_shared(skb)) { | 477 | if (skb_shared(skb)) { |
| @@ -1001,7 +1008,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) | |||
| 1001 | * %NULL is returned in there is no free memory. | 1008 | * %NULL is returned in there is no free memory. |
| 1002 | */ | 1009 | */ |
| 1003 | static inline struct sk_buff *__dev_alloc_skb(unsigned int length, | 1010 | static inline struct sk_buff *__dev_alloc_skb(unsigned int length, |
| 1004 | int gfp_mask) | 1011 | unsigned int __nocast gfp_mask) |
| 1005 | { | 1012 | { |
| 1006 | struct sk_buff *skb = alloc_skb(length + 16, gfp_mask); | 1013 | struct sk_buff *skb = alloc_skb(length + 16, gfp_mask); |
| 1007 | if (likely(skb)) | 1014 | if (likely(skb)) |
| @@ -1114,8 +1121,8 @@ static inline int skb_can_coalesce(struct sk_buff *skb, int i, | |||
| 1114 | * If there is no free memory -ENOMEM is returned, otherwise zero | 1121 | * If there is no free memory -ENOMEM is returned, otherwise zero |
| 1115 | * is returned and the old skb data released. | 1122 | * is returned and the old skb data released. |
| 1116 | */ | 1123 | */ |
| 1117 | extern int __skb_linearize(struct sk_buff *skb, int gfp); | 1124 | extern int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp); |
| 1118 | static inline int skb_linearize(struct sk_buff *skb, int gfp) | 1125 | static inline int skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp) |
| 1119 | { | 1126 | { |
| 1120 | return __skb_linearize(skb, gfp); | 1127 | return __skb_linearize(skb, gfp); |
| 1121 | } | 1128 | } |
diff --git a/include/net/sock.h b/include/net/sock.h index 7b76f891ae2d..a1042d08becd 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
| @@ -684,16 +684,17 @@ extern void FASTCALL(release_sock(struct sock *sk)); | |||
| 684 | #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) | 684 | #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) |
| 685 | #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) | 685 | #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) |
| 686 | 686 | ||
| 687 | extern struct sock *sk_alloc(int family, int priority, | 687 | extern struct sock *sk_alloc(int family, |
| 688 | unsigned int __nocast priority, | ||
| 688 | struct proto *prot, int zero_it); | 689 | struct proto *prot, int zero_it); |
| 689 | extern void sk_free(struct sock *sk); | 690 | extern void sk_free(struct sock *sk); |
| 690 | 691 | ||
| 691 | extern struct sk_buff *sock_wmalloc(struct sock *sk, | 692 | extern struct sk_buff *sock_wmalloc(struct sock *sk, |
| 692 | unsigned long size, int force, | 693 | unsigned long size, int force, |
| 693 | int priority); | 694 | unsigned int __nocast priority); |
| 694 | extern struct sk_buff *sock_rmalloc(struct sock *sk, | 695 | extern struct sk_buff *sock_rmalloc(struct sock *sk, |
| 695 | unsigned long size, int force, | 696 | unsigned long size, int force, |
| 696 | int priority); | 697 | unsigned int __nocast priority); |
| 697 | extern void sock_wfree(struct sk_buff *skb); | 698 | extern void sock_wfree(struct sk_buff *skb); |
| 698 | extern void sock_rfree(struct sk_buff *skb); | 699 | extern void sock_rfree(struct sk_buff *skb); |
| 699 | 700 | ||
| @@ -708,7 +709,8 @@ extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, | |||
| 708 | unsigned long size, | 709 | unsigned long size, |
| 709 | int noblock, | 710 | int noblock, |
| 710 | int *errcode); | 711 | int *errcode); |
| 711 | extern void *sock_kmalloc(struct sock *sk, int size, int priority); | 712 | extern void *sock_kmalloc(struct sock *sk, int size, |
| 713 | unsigned int __nocast priority); | ||
| 712 | extern void sock_kfree_s(struct sock *sk, void *mem, int size); | 714 | extern void sock_kfree_s(struct sock *sk, void *mem, int size); |
| 713 | extern void sk_send_sigurg(struct sock *sk); | 715 | extern void sk_send_sigurg(struct sock *sk); |
| 714 | 716 | ||
| @@ -1132,7 +1134,8 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk) | |||
| 1132 | } | 1134 | } |
| 1133 | 1135 | ||
| 1134 | static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, | 1136 | static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, |
| 1135 | int size, int mem, int gfp) | 1137 | int size, int mem, |
| 1138 | unsigned int __nocast gfp) | ||
| 1136 | { | 1139 | { |
| 1137 | struct sk_buff *skb; | 1140 | struct sk_buff *skb; |
| 1138 | int hdr_len; | 1141 | int hdr_len; |
| @@ -1155,7 +1158,8 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, | |||
| 1155 | } | 1158 | } |
| 1156 | 1159 | ||
| 1157 | static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, | 1160 | static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, |
| 1158 | int size, int gfp) | 1161 | int size, |
| 1162 | unsigned int __nocast gfp) | ||
| 1159 | { | 1163 | { |
| 1160 | return sk_stream_alloc_pskb(sk, size, 0, gfp); | 1164 | return sk_stream_alloc_pskb(sk, size, 0, gfp); |
| 1161 | } | 1165 | } |
| @@ -1188,7 +1192,7 @@ static inline int sock_writeable(const struct sock *sk) | |||
| 1188 | return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); | 1192 | return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); |
| 1189 | } | 1193 | } |
| 1190 | 1194 | ||
| 1191 | static inline int gfp_any(void) | 1195 | static inline unsigned int __nocast gfp_any(void) |
| 1192 | { | 1196 | { |
| 1193 | return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; | 1197 | return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; |
| 1194 | } | 1198 | } |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 4d5b12e4dc11..f4f9aba07ac2 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
| @@ -860,7 +860,8 @@ extern void tcp_send_probe0(struct sock *); | |||
| 860 | extern void tcp_send_partial(struct sock *); | 860 | extern void tcp_send_partial(struct sock *); |
| 861 | extern int tcp_write_wakeup(struct sock *); | 861 | extern int tcp_write_wakeup(struct sock *); |
| 862 | extern void tcp_send_fin(struct sock *sk); | 862 | extern void tcp_send_fin(struct sock *sk); |
| 863 | extern void tcp_send_active_reset(struct sock *sk, int priority); | 863 | extern void tcp_send_active_reset(struct sock *sk, |
| 864 | unsigned int __nocast priority); | ||
| 864 | extern int tcp_send_synack(struct sock *); | 865 | extern int tcp_send_synack(struct sock *); |
| 865 | extern void tcp_push_one(struct sock *, unsigned int mss_now); | 866 | extern void tcp_push_one(struct sock *, unsigned int mss_now); |
| 866 | extern void tcp_send_ack(struct sock *sk); | 867 | extern void tcp_send_ack(struct sock *sk); |
diff --git a/net/core/dev.c b/net/core/dev.c index 7f5f62c65115..ff9dc029233a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -1127,7 +1127,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) | |||
| 1127 | extern void skb_release_data(struct sk_buff *); | 1127 | extern void skb_release_data(struct sk_buff *); |
| 1128 | 1128 | ||
| 1129 | /* Keep head the same: replace data */ | 1129 | /* Keep head the same: replace data */ |
| 1130 | int __skb_linearize(struct sk_buff *skb, int gfp_mask) | 1130 | int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp_mask) |
| 1131 | { | 1131 | { |
| 1132 | unsigned int size; | 1132 | unsigned int size; |
| 1133 | u8 *data; | 1133 | u8 *data; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 733deee24b9f..d9f7b06fe886 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -129,7 +129,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) | |||
| 129 | * Buffers may only be allocated from interrupts using a @gfp_mask of | 129 | * Buffers may only be allocated from interrupts using a @gfp_mask of |
| 130 | * %GFP_ATOMIC. | 130 | * %GFP_ATOMIC. |
| 131 | */ | 131 | */ |
| 132 | struct sk_buff *alloc_skb(unsigned int size, int gfp_mask) | 132 | struct sk_buff *alloc_skb(unsigned int size, unsigned int __nocast gfp_mask) |
| 133 | { | 133 | { |
| 134 | struct sk_buff *skb; | 134 | struct sk_buff *skb; |
| 135 | u8 *data; | 135 | u8 *data; |
| @@ -182,7 +182,8 @@ nodata: | |||
| 182 | * %GFP_ATOMIC. | 182 | * %GFP_ATOMIC. |
| 183 | */ | 183 | */ |
| 184 | struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, | 184 | struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, |
| 185 | unsigned int size, int gfp_mask) | 185 | unsigned int size, |
| 186 | unsigned int __nocast gfp_mask) | ||
| 186 | { | 187 | { |
| 187 | struct sk_buff *skb; | 188 | struct sk_buff *skb; |
| 188 | u8 *data; | 189 | u8 *data; |
| @@ -322,7 +323,7 @@ void __kfree_skb(struct sk_buff *skb) | |||
| 322 | * %GFP_ATOMIC. | 323 | * %GFP_ATOMIC. |
| 323 | */ | 324 | */ |
| 324 | 325 | ||
| 325 | struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask) | 326 | struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask) |
| 326 | { | 327 | { |
| 327 | struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); | 328 | struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); |
| 328 | 329 | ||
| @@ -460,7 +461,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
| 460 | * header is going to be modified. Use pskb_copy() instead. | 461 | * header is going to be modified. Use pskb_copy() instead. |
| 461 | */ | 462 | */ |
| 462 | 463 | ||
| 463 | struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask) | 464 | struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_mask) |
| 464 | { | 465 | { |
| 465 | int headerlen = skb->data - skb->head; | 466 | int headerlen = skb->data - skb->head; |
| 466 | /* | 467 | /* |
| @@ -499,7 +500,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask) | |||
| 499 | * The returned buffer has a reference count of 1. | 500 | * The returned buffer has a reference count of 1. |
| 500 | */ | 501 | */ |
| 501 | 502 | ||
| 502 | struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask) | 503 | struct sk_buff *pskb_copy(struct sk_buff *skb, unsigned int __nocast gfp_mask) |
| 503 | { | 504 | { |
| 504 | /* | 505 | /* |
| 505 | * Allocate the copy buffer | 506 | * Allocate the copy buffer |
| @@ -557,7 +558,8 @@ out: | |||
| 557 | * reloaded after call to this function. | 558 | * reloaded after call to this function. |
| 558 | */ | 559 | */ |
| 559 | 560 | ||
| 560 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask) | 561 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, |
| 562 | unsigned int __nocast gfp_mask) | ||
| 561 | { | 563 | { |
| 562 | int i; | 564 | int i; |
| 563 | u8 *data; | 565 | u8 *data; |
| @@ -647,7 +649,8 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) | |||
| 647 | * only by netfilter in the cases when checksum is recalculated? --ANK | 649 | * only by netfilter in the cases when checksum is recalculated? --ANK |
| 648 | */ | 650 | */ |
| 649 | struct sk_buff *skb_copy_expand(const struct sk_buff *skb, | 651 | struct sk_buff *skb_copy_expand(const struct sk_buff *skb, |
| 650 | int newheadroom, int newtailroom, int gfp_mask) | 652 | int newheadroom, int newtailroom, |
| 653 | unsigned int __nocast gfp_mask) | ||
| 651 | { | 654 | { |
| 652 | /* | 655 | /* |
| 653 | * Allocate the copy buffer | 656 | * Allocate the copy buffer |
diff --git a/net/core/sock.c b/net/core/sock.c index a6ec3ada7f9e..8b35ccdc2b3b 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -622,7 +622,8 @@ lenout: | |||
| 622 | * @prot: struct proto associated with this new sock instance | 622 | * @prot: struct proto associated with this new sock instance |
| 623 | * @zero_it: if we should zero the newly allocated sock | 623 | * @zero_it: if we should zero the newly allocated sock |
| 624 | */ | 624 | */ |
| 625 | struct sock *sk_alloc(int family, int priority, struct proto *prot, int zero_it) | 625 | struct sock *sk_alloc(int family, unsigned int __nocast priority, |
| 626 | struct proto *prot, int zero_it) | ||
| 626 | { | 627 | { |
| 627 | struct sock *sk = NULL; | 628 | struct sock *sk = NULL; |
| 628 | kmem_cache_t *slab = prot->slab; | 629 | kmem_cache_t *slab = prot->slab; |
| @@ -750,7 +751,8 @@ unsigned long sock_i_ino(struct sock *sk) | |||
| 750 | /* | 751 | /* |
| 751 | * Allocate a skb from the socket's send buffer. | 752 | * Allocate a skb from the socket's send buffer. |
| 752 | */ | 753 | */ |
| 753 | struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority) | 754 | struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, |
| 755 | unsigned int __nocast priority) | ||
| 754 | { | 756 | { |
| 755 | if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { | 757 | if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { |
| 756 | struct sk_buff * skb = alloc_skb(size, priority); | 758 | struct sk_buff * skb = alloc_skb(size, priority); |
| @@ -765,7 +767,8 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int | |||
| 765 | /* | 767 | /* |
| 766 | * Allocate a skb from the socket's receive buffer. | 768 | * Allocate a skb from the socket's receive buffer. |
| 767 | */ | 769 | */ |
| 768 | struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority) | 770 | struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, |
| 771 | unsigned int __nocast priority) | ||
| 769 | { | 772 | { |
| 770 | if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { | 773 | if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { |
| 771 | struct sk_buff *skb = alloc_skb(size, priority); | 774 | struct sk_buff *skb = alloc_skb(size, priority); |
| @@ -780,7 +783,7 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int | |||
| 780 | /* | 783 | /* |
| 781 | * Allocate a memory block from the socket's option memory buffer. | 784 | * Allocate a memory block from the socket's option memory buffer. |
| 782 | */ | 785 | */ |
| 783 | void *sock_kmalloc(struct sock *sk, int size, int priority) | 786 | void *sock_kmalloc(struct sock *sk, int size, unsigned int __nocast priority) |
| 784 | { | 787 | { |
| 785 | if ((unsigned)size <= sysctl_optmem_max && | 788 | if ((unsigned)size <= sysctl_optmem_max && |
| 786 | atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { | 789 | atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e041d057ec86..e3f8ea1bfa9c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -1613,7 +1613,7 @@ void tcp_send_fin(struct sock *sk) | |||
| 1613 | * was unread data in the receive queue. This behavior is recommended | 1613 | * was unread data in the receive queue. This behavior is recommended |
| 1614 | * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM | 1614 | * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM |
| 1615 | */ | 1615 | */ |
| 1616 | void tcp_send_active_reset(struct sock *sk, int priority) | 1616 | void tcp_send_active_reset(struct sock *sk, unsigned int __nocast priority) |
| 1617 | { | 1617 | { |
| 1618 | struct tcp_sock *tp = tcp_sk(sk); | 1618 | struct tcp_sock *tp = tcp_sk(sk); |
| 1619 | struct sk_buff *skb; | 1619 | struct sk_buff *skb; |
