aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/proc/proc_net.c2
-rw-r--r--include/linux/seq_file.h2
-rw-r--r--include/linux/skbuff.h89
-rw-r--r--include/net/dst.h10
-rw-r--r--include/net/ipv6.h17
-rw-r--r--include/net/pkt_cls.h2
-rw-r--r--include/net/sctp/command.h3
-rw-r--r--include/net/sctp/sm.h8
-rw-r--r--lib/kobject_uevent.c8
-rw-r--r--net/core/dst.c10
-rw-r--r--net/core/skbuff.c90
-rw-r--r--net/sctp/command.c10
-rw-r--r--net/sctp/sm_statefuns.c8
13 files changed, 122 insertions, 137 deletions
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 13cd7835d0df..7034facf8b8f 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -51,6 +51,7 @@ int seq_open_net(struct inode *ino, struct file *f,
51} 51}
52EXPORT_SYMBOL_GPL(seq_open_net); 52EXPORT_SYMBOL_GPL(seq_open_net);
53 53
54#ifdef CONFIG_NET
54int seq_release_net(struct inode *ino, struct file *f) 55int seq_release_net(struct inode *ino, struct file *f)
55{ 56{
56 struct seq_file *seq; 57 struct seq_file *seq;
@@ -218,3 +219,4 @@ int __init proc_net_init(void)
218 219
219 return register_pernet_subsys(&proc_net_ns_ops); 220 return register_pernet_subsys(&proc_net_ns_ops);
220} 221}
222#endif /* CONFIG_NET */
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index d870a8253769..5da70c3f4417 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -63,6 +63,7 @@ extern struct list_head *seq_list_start_head(struct list_head *head,
63extern struct list_head *seq_list_next(void *v, struct list_head *head, 63extern struct list_head *seq_list_next(void *v, struct list_head *head,
64 loff_t *ppos); 64 loff_t *ppos);
65 65
66#ifdef CONFIG_NET
66struct net; 67struct net;
67struct seq_net_private { 68struct seq_net_private {
68#ifdef CONFIG_NET_NS 69#ifdef CONFIG_NET_NS
@@ -81,6 +82,7 @@ static inline struct net *seq_file_net(struct seq_file *seq)
81 return &init_net; 82 return &init_net;
82#endif 83#endif
83} 84}
85#endif /* CONFIG_NET */
84 86
85#endif 87#endif
86#endif 88#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 7beb239d2ee0..ff72145d5d9e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -892,6 +892,7 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
892/* 892/*
893 * Add data to an sk_buff 893 * Add data to an sk_buff
894 */ 894 */
895extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
895static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 896static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
896{ 897{
897 unsigned char *tmp = skb_tail_pointer(skb); 898 unsigned char *tmp = skb_tail_pointer(skb);
@@ -901,26 +902,7 @@ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
901 return tmp; 902 return tmp;
902} 903}
903 904
904/** 905extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
905 * skb_put - add data to a buffer
906 * @skb: buffer to use
907 * @len: amount of data to add
908 *
909 * This function extends the used data area of the buffer. If this would
910 * exceed the total buffer size the kernel will panic. A pointer to the
911 * first byte of the extra data is returned.
912 */
913static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
914{
915 unsigned char *tmp = skb_tail_pointer(skb);
916 SKB_LINEAR_ASSERT(skb);
917 skb->tail += len;
918 skb->len += len;
919 if (unlikely(skb->tail > skb->end))
920 skb_over_panic(skb, len, current_text_addr());
921 return tmp;
922}
923
924static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) 906static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
925{ 907{
926 skb->data -= len; 908 skb->data -= len;
@@ -928,24 +910,7 @@ static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
928 return skb->data; 910 return skb->data;
929} 911}
930 912
931/** 913extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
932 * skb_push - add data to the start of a buffer
933 * @skb: buffer to use
934 * @len: amount of data to add
935 *
936 * This function extends the used data area of the buffer at the buffer
937 * start. If this would exceed the total buffer headroom the kernel will
938 * panic. A pointer to the first byte of the extra data is returned.
939 */
940static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
941{
942 skb->data -= len;
943 skb->len += len;
944 if (unlikely(skb->data<skb->head))
945 skb_under_panic(skb, len, current_text_addr());
946 return skb->data;
947}
948
949static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) 914static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
950{ 915{
951 skb->len -= len; 916 skb->len -= len;
@@ -953,21 +918,6 @@ static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
953 return skb->data += len; 918 return skb->data += len;
954} 919}
955 920
956/**
957 * skb_pull - remove data from the start of a buffer
958 * @skb: buffer to use
959 * @len: amount of data to remove
960 *
961 * This function removes data from the start of a buffer, returning
962 * the memory to the headroom. A pointer to the next data in the buffer
963 * is returned. Once the data has been pulled future pushes will overwrite
964 * the old data.
965 */
966static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
967{
968 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
969}
970
971extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 921extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
972 922
973static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 923static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
@@ -1208,21 +1158,7 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1208 skb_set_tail_pointer(skb, len); 1158 skb_set_tail_pointer(skb, len);
1209} 1159}
1210 1160
1211/** 1161extern void skb_trim(struct sk_buff *skb, unsigned int len);
1212 * skb_trim - remove end from a buffer
1213 * @skb: buffer to alter
1214 * @len: new length
1215 *
1216 * Cut the length of a buffer down by removing data from the tail. If
1217 * the buffer is already under the length specified it is not modified.
1218 * The skb must be linear.
1219 */
1220static inline void skb_trim(struct sk_buff *skb, unsigned int len)
1221{
1222 if (skb->len > len)
1223 __skb_trim(skb, len);
1224}
1225
1226 1162
1227static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 1163static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1228{ 1164{
@@ -1305,22 +1241,7 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1305 return skb; 1241 return skb;
1306} 1242}
1307 1243
1308/** 1244extern struct sk_buff *dev_alloc_skb(unsigned int length);
1309 * dev_alloc_skb - allocate an skbuff for receiving
1310 * @length: length to allocate
1311 *
1312 * Allocate a new &sk_buff and assign it a usage count of one. The
1313 * buffer has unspecified headroom built in. Users should allocate
1314 * the headroom they think they need without accounting for the
1315 * built in space. The built in space is used for optimisations.
1316 *
1317 * %NULL is returned if there is no free memory. Although this function
1318 * allocates memory it can be called from an interrupt.
1319 */
1320static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1321{
1322 return __dev_alloc_skb(length, GFP_ATOMIC);
1323}
1324 1245
1325extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1246extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1326 unsigned int length, gfp_t gfp_mask); 1247 unsigned int length, gfp_t gfp_mask);
diff --git a/include/net/dst.h b/include/net/dst.h
index ae13370e8484..002500e631f5 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -163,15 +163,7 @@ struct dst_entry * dst_clone(struct dst_entry * dst)
163 return dst; 163 return dst;
164} 164}
165 165
166static inline 166extern void dst_release(struct dst_entry *dst);
167void dst_release(struct dst_entry * dst)
168{
169 if (dst) {
170 WARN_ON(atomic_read(&dst->__refcnt) < 1);
171 smp_mb__before_atomic_dec();
172 atomic_dec(&dst->__refcnt);
173 }
174}
175 167
176/* Children define the path of the packet through the 168/* Children define the path of the packet through the
177 * Linux networking. Thus, destinations are stackable. 169 * Linux networking. Thus, destinations are stackable.
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 296f61d84709..5738c1c73ac1 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -250,15 +250,6 @@ int ip6_frag_mem(struct net *net);
250 250
251#define IPV6_FRAG_TIMEOUT (60*HZ) /* 60 seconds */ 251#define IPV6_FRAG_TIMEOUT (60*HZ) /* 60 seconds */
252 252
253/*
254 * Function prototype for build_xmit
255 */
256
257typedef int (*inet_getfrag_t) (const void *data,
258 struct in6_addr *addr,
259 char *,
260 unsigned int, unsigned int);
261
262extern int __ipv6_addr_type(const struct in6_addr *addr); 253extern int __ipv6_addr_type(const struct in6_addr *addr);
263static inline int ipv6_addr_type(const struct in6_addr *addr) 254static inline int ipv6_addr_type(const struct in6_addr *addr)
264{ 255{
@@ -510,14 +501,6 @@ extern int ip6_local_out(struct sk_buff *skb);
510 * Extension header (options) processing 501 * Extension header (options) processing
511 */ 502 */
512 503
513extern u8 * ipv6_build_nfrag_opts(struct sk_buff *skb,
514 u8 *prev_hdr,
515 struct ipv6_txoptions *opt,
516 struct in6_addr *daddr,
517 u32 jumbolen);
518extern u8 * ipv6_build_frag_opts(struct sk_buff *skb,
519 u8 *prev_hdr,
520 struct ipv6_txoptions *opt);
521extern void ipv6_push_nfrag_opts(struct sk_buff *skb, 504extern void ipv6_push_nfrag_opts(struct sk_buff *skb,
522 struct ipv6_txoptions *opt, 505 struct ipv6_txoptions *opt,
523 u8 *proto, 506 u8 *proto,
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index d349c66ef828..aa9e282db485 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -353,7 +353,7 @@ tcf_match_indev(struct sk_buff *skb, char *indev)
353 if (indev[0]) { 353 if (indev[0]) {
354 if (!skb->iif) 354 if (!skb->iif)
355 return 0; 355 return 0;
356 dev = __dev_get_by_index(&init_net, skb->iif); 356 dev = __dev_get_by_index(dev_net(skb->dev), skb->iif);
357 if (!dev || strcmp(indev, dev->name)) 357 if (!dev || strcmp(indev, dev->name))
358 return 0; 358 return 0;
359 } 359 }
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 10ae2da6f93b..4263af857794 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -205,12 +205,11 @@ typedef struct {
205int sctp_init_cmd_seq(sctp_cmd_seq_t *seq); 205int sctp_init_cmd_seq(sctp_cmd_seq_t *seq);
206 206
207/* Add a command to an sctp_cmd_seq_t. 207/* Add a command to an sctp_cmd_seq_t.
208 * Return 0 if the command sequence is full.
209 * 208 *
210 * Use the SCTP_* constructors defined by SCTP_ARG_CONSTRUCTOR() above 209 * Use the SCTP_* constructors defined by SCTP_ARG_CONSTRUCTOR() above
211 * to wrap data which goes in the obj argument. 210 * to wrap data which goes in the obj argument.
212 */ 211 */
213int sctp_add_cmd(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj); 212void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj);
214 213
215/* Return the next command structure in an sctp_cmd_seq. 214/* Return the next command structure in an sctp_cmd_seq.
216 * Return NULL at the end of the sequence. 215 * Return NULL at the end of the sequence.
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index ef9e7ed2c82e..24811732bdb2 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -385,14 +385,6 @@ static inline int ADDIP_SERIAL_gte(__u16 s, __u16 t)
385 return (((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT)); 385 return (((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT));
386} 386}
387 387
388
389/* Run sctp_add_cmd() generating a BUG() if there is a failure. */
390static inline void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj)
391{
392 if (unlikely(!sctp_add_cmd(seq, verb, obj)))
393 BUG();
394}
395
396/* Check VTAG of the packet matches the sender's own tag. */ 388/* Check VTAG of the packet matches the sender's own tag. */
397static inline int 389static inline int
398sctp_vtag_verify(const struct sctp_chunk *chunk, 390sctp_vtag_verify(const struct sctp_chunk *chunk,
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 5a402e2982af..0d56dad319ad 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -15,12 +15,16 @@
15 */ 15 */
16 16
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/string.h>
19#include <linux/kobject.h>
20#include <linux/module.h>
21
22#ifdef CONFIG_NET
18#include <linux/socket.h> 23#include <linux/socket.h>
19#include <linux/skbuff.h> 24#include <linux/skbuff.h>
20#include <linux/netlink.h> 25#include <linux/netlink.h>
21#include <linux/string.h>
22#include <linux/kobject.h>
23#include <net/sock.h> 26#include <net/sock.h>
27#endif
24 28
25 29
26u64 uevent_seqnum; 30u64 uevent_seqnum;
diff --git a/net/core/dst.c b/net/core/dst.c
index 694cd2a3f6d2..fe03266130b6 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -259,6 +259,16 @@ again:
259 return NULL; 259 return NULL;
260} 260}
261 261
262void dst_release(struct dst_entry *dst)
263{
264 if (dst) {
265 WARN_ON(atomic_read(&dst->__refcnt) < 1);
266 smp_mb__before_atomic_dec();
267 atomic_dec(&dst->__refcnt);
268 }
269}
270EXPORT_SYMBOL(dst_release);
271
262/* Dirty hack. We did it in 2.2 (in __dst_free), 272/* Dirty hack. We did it in 2.2 (in __dst_free),
263 * we have _very_ good reasons not to repeat 273 * we have _very_ good reasons not to repeat
264 * this mistake in 2.3, but we have no choice 274 * this mistake in 2.3, but we have no choice
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 0d0fd28a9041..86e5682728be 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -263,6 +263,24 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
263 return skb; 263 return skb;
264} 264}
265 265
266/**
267 * dev_alloc_skb - allocate an skbuff for receiving
268 * @length: length to allocate
269 *
270 * Allocate a new &sk_buff and assign it a usage count of one. The
271 * buffer has unspecified headroom built in. Users should allocate
272 * the headroom they think they need without accounting for the
273 * built in space. The built in space is used for optimisations.
274 *
275 * %NULL is returned if there is no free memory. Although this function
276 * allocates memory it can be called from an interrupt.
277 */
278struct sk_buff *dev_alloc_skb(unsigned int length)
279{
280 return __dev_alloc_skb(length, GFP_ATOMIC);
281}
282EXPORT_SYMBOL(dev_alloc_skb);
283
266static void skb_drop_list(struct sk_buff **listp) 284static void skb_drop_list(struct sk_buff **listp)
267{ 285{
268 struct sk_buff *list = *listp; 286 struct sk_buff *list = *listp;
@@ -857,6 +875,78 @@ free_skb:
857 return err; 875 return err;
858} 876}
859 877
878/**
879 * skb_put - add data to a buffer
880 * @skb: buffer to use
881 * @len: amount of data to add
882 *
883 * This function extends the used data area of the buffer. If this would
884 * exceed the total buffer size the kernel will panic. A pointer to the
885 * first byte of the extra data is returned.
886 */
887unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
888{
889 unsigned char *tmp = skb_tail_pointer(skb);
890 SKB_LINEAR_ASSERT(skb);
891 skb->tail += len;
892 skb->len += len;
893 if (unlikely(skb->tail > skb->end))
894 skb_over_panic(skb, len, __builtin_return_address(0));
895 return tmp;
896}
897EXPORT_SYMBOL(skb_put);
898
899/**
900 * skb_push - add data to the start of a buffer
901 * @skb: buffer to use
902 * @len: amount of data to add
903 *
904 * This function extends the used data area of the buffer at the buffer
905 * start. If this would exceed the total buffer headroom the kernel will
906 * panic. A pointer to the first byte of the extra data is returned.
907 */
908unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
909{
910 skb->data -= len;
911 skb->len += len;
912 if (unlikely(skb->data<skb->head))
913 skb_under_panic(skb, len, __builtin_return_address(0));
914 return skb->data;
915}
916EXPORT_SYMBOL(skb_push);
917
918/**
919 * skb_pull - remove data from the start of a buffer
920 * @skb: buffer to use
921 * @len: amount of data to remove
922 *
923 * This function removes data from the start of a buffer, returning
924 * the memory to the headroom. A pointer to the next data in the buffer
925 * is returned. Once the data has been pulled future pushes will overwrite
926 * the old data.
927 */
928unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
929{
930 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
931}
932EXPORT_SYMBOL(skb_pull);
933
934/**
935 * skb_trim - remove end from a buffer
936 * @skb: buffer to alter
937 * @len: new length
938 *
939 * Cut the length of a buffer down by removing data from the tail. If
940 * the buffer is already under the length specified it is not modified.
941 * The skb must be linear.
942 */
943void skb_trim(struct sk_buff *skb, unsigned int len)
944{
945 if (skb->len > len)
946 __skb_trim(skb, len);
947}
948EXPORT_SYMBOL(skb_trim);
949
860/* Trims skb to length len. It can change skb pointers. 950/* Trims skb to length len. It can change skb pointers.
861 */ 951 */
862 952
diff --git a/net/sctp/command.c b/net/sctp/command.c
index bb977330002a..c0044019db9e 100644
--- a/net/sctp/command.c
+++ b/net/sctp/command.c
@@ -52,18 +52,12 @@ int sctp_init_cmd_seq(sctp_cmd_seq_t *seq)
52/* Add a command to a sctp_cmd_seq_t. 52/* Add a command to a sctp_cmd_seq_t.
53 * Return 0 if the command sequence is full. 53 * Return 0 if the command sequence is full.
54 */ 54 */
55int sctp_add_cmd(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj) 55void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj)
56{ 56{
57 if (seq->next_free_slot >= SCTP_MAX_NUM_COMMANDS) 57 BUG_ON(seq->next_free_slot >= SCTP_MAX_NUM_COMMANDS);
58 goto fail;
59 58
60 seq->cmds[seq->next_free_slot].verb = verb; 59 seq->cmds[seq->next_free_slot].verb = verb;
61 seq->cmds[seq->next_free_slot++].obj = obj; 60 seq->cmds[seq->next_free_slot++].obj = obj;
62
63 return 1;
64
65fail:
66 return 0;
67} 61}
68 62
69/* Return the next command structure in a sctp_cmd_seq. 63/* Return the next command structure in a sctp_cmd_seq.
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 6545b5fcbc73..b534dbef864f 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3135,12 +3135,8 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
3135 if (!ev) 3135 if (!ev)
3136 goto nomem; 3136 goto nomem;
3137 3137
3138 if (!sctp_add_cmd(commands, SCTP_CMD_EVENT_ULP, 3138 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
3139 SCTP_ULPEVENT(ev))) { 3139 SCTP_ULPEVENT(ev));
3140 sctp_ulpevent_free(ev);
3141 goto nomem;
3142 }
3143
3144 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR, 3140 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
3145 SCTP_CHUNK(chunk)); 3141 SCTP_CHUNK(chunk));
3146 } 3142 }