diff options
author | David Howells <dhowells@redhat.com> | 2006-12-05 09:37:56 -0500 |
---|---|---|
committer | David Howells <dhowells@warthog.cambridge.redhat.com> | 2006-12-05 09:37:56 -0500 |
commit | 4c1ac1b49122b805adfa4efc620592f68dccf5db (patch) | |
tree | 87557f4bc2fd4fe65b7570489c2f610c45c0adcd /net/core | |
parent | c4028958b6ecad064b1a6303a6a5906d4fe48d73 (diff) | |
parent | d916faace3efc0bf19fe9a615a1ab8fa1a24cd93 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/infiniband/core/iwcm.c
drivers/net/chelsio/cxgb2.c
drivers/net/wireless/bcm43xx/bcm43xx_main.c
drivers/net/wireless/prism54/islpci_eth.c
drivers/usb/core/hub.h
drivers/usb/input/hid-core.c
net/core/netpoll.c
Fix up merge failures with Linus's head and fix new compilation failures.
Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/Makefile | 1 | ||||
-rw-r--r-- | net/core/datagram.c | 16 | ||||
-rw-r--r-- | net/core/dev.c | 39 | ||||
-rw-r--r-- | net/core/dv.c | 546 | ||||
-rw-r--r-- | net/core/fib_rules.c | 71 | ||||
-rw-r--r-- | net/core/filter.c | 6 | ||||
-rw-r--r-- | net/core/iovec.c | 4 | ||||
-rw-r--r-- | net/core/neighbour.c | 24 | ||||
-rw-r--r-- | net/core/netpoll.c | 327 | ||||
-rw-r--r-- | net/core/pktgen.c | 68 | ||||
-rw-r--r-- | net/core/request_sock.c | 35 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 60 | ||||
-rw-r--r-- | net/core/skbuff.c | 26 | ||||
-rw-r--r-- | net/core/sock.c | 13 | ||||
-rw-r--r-- | net/core/sysctl_net_core.c | 14 | ||||
-rw-r--r-- | net/core/utils.c | 10 |
16 files changed, 382 insertions, 878 deletions
diff --git a/net/core/Makefile b/net/core/Makefile index 119568077dab..73272d506e93 100644 --- a/net/core/Makefile +++ b/net/core/Makefile | |||
@@ -12,7 +12,6 @@ obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ | |||
12 | 12 | ||
13 | obj-$(CONFIG_XFRM) += flow.o | 13 | obj-$(CONFIG_XFRM) += flow.o |
14 | obj-$(CONFIG_SYSFS) += net-sysfs.o | 14 | obj-$(CONFIG_SYSFS) += net-sysfs.o |
15 | obj-$(CONFIG_NET_DIVERT) += dv.o | ||
16 | obj-$(CONFIG_NET_PKTGEN) += pktgen.o | 15 | obj-$(CONFIG_NET_PKTGEN) += pktgen.o |
17 | obj-$(CONFIG_WIRELESS_EXT) += wireless.o | 16 | obj-$(CONFIG_WIRELESS_EXT) += wireless.o |
18 | obj-$(CONFIG_NETPOLL) += netpoll.o | 17 | obj-$(CONFIG_NETPOLL) += netpoll.o |
diff --git a/net/core/datagram.c b/net/core/datagram.c index f558c61aecc7..797fdd4352ce 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -321,7 +321,7 @@ fault: | |||
321 | 321 | ||
322 | static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, | 322 | static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, |
323 | u8 __user *to, int len, | 323 | u8 __user *to, int len, |
324 | unsigned int *csump) | 324 | __wsum *csump) |
325 | { | 325 | { |
326 | int start = skb_headlen(skb); | 326 | int start = skb_headlen(skb); |
327 | int pos = 0; | 327 | int pos = 0; |
@@ -350,7 +350,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, | |||
350 | 350 | ||
351 | end = start + skb_shinfo(skb)->frags[i].size; | 351 | end = start + skb_shinfo(skb)->frags[i].size; |
352 | if ((copy = end - offset) > 0) { | 352 | if ((copy = end - offset) > 0) { |
353 | unsigned int csum2; | 353 | __wsum csum2; |
354 | int err = 0; | 354 | int err = 0; |
355 | u8 *vaddr; | 355 | u8 *vaddr; |
356 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 356 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
@@ -386,7 +386,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, | |||
386 | 386 | ||
387 | end = start + list->len; | 387 | end = start + list->len; |
388 | if ((copy = end - offset) > 0) { | 388 | if ((copy = end - offset) > 0) { |
389 | unsigned int csum2 = 0; | 389 | __wsum csum2 = 0; |
390 | if (copy > len) | 390 | if (copy > len) |
391 | copy = len; | 391 | copy = len; |
392 | if (skb_copy_and_csum_datagram(list, | 392 | if (skb_copy_and_csum_datagram(list, |
@@ -411,11 +411,11 @@ fault: | |||
411 | return -EFAULT; | 411 | return -EFAULT; |
412 | } | 412 | } |
413 | 413 | ||
414 | unsigned int __skb_checksum_complete(struct sk_buff *skb) | 414 | __sum16 __skb_checksum_complete(struct sk_buff *skb) |
415 | { | 415 | { |
416 | unsigned int sum; | 416 | __sum16 sum; |
417 | 417 | ||
418 | sum = (u16)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)); | 418 | sum = csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)); |
419 | if (likely(!sum)) { | 419 | if (likely(!sum)) { |
420 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) | 420 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) |
421 | netdev_rx_csum_fault(skb->dev); | 421 | netdev_rx_csum_fault(skb->dev); |
@@ -441,7 +441,7 @@ EXPORT_SYMBOL(__skb_checksum_complete); | |||
441 | int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, | 441 | int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, |
442 | int hlen, struct iovec *iov) | 442 | int hlen, struct iovec *iov) |
443 | { | 443 | { |
444 | unsigned int csum; | 444 | __wsum csum; |
445 | int chunk = skb->len - hlen; | 445 | int chunk = skb->len - hlen; |
446 | 446 | ||
447 | /* Skip filled elements. | 447 | /* Skip filled elements. |
@@ -460,7 +460,7 @@ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, | |||
460 | if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, | 460 | if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, |
461 | chunk, &csum)) | 461 | chunk, &csum)) |
462 | goto fault; | 462 | goto fault; |
463 | if ((unsigned short)csum_fold(csum)) | 463 | if (csum_fold(csum)) |
464 | goto csum_error; | 464 | goto csum_error; |
465 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) | 465 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) |
466 | netdev_rx_csum_fault(skb->dev); | 466 | netdev_rx_csum_fault(skb->dev); |
diff --git a/net/core/dev.c b/net/core/dev.c index 81c426adcd1e..59d058a3b504 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -98,7 +98,6 @@ | |||
98 | #include <linux/seq_file.h> | 98 | #include <linux/seq_file.h> |
99 | #include <linux/stat.h> | 99 | #include <linux/stat.h> |
100 | #include <linux/if_bridge.h> | 100 | #include <linux/if_bridge.h> |
101 | #include <linux/divert.h> | ||
102 | #include <net/dst.h> | 101 | #include <net/dst.h> |
103 | #include <net/pkt_sched.h> | 102 | #include <net/pkt_sched.h> |
104 | #include <net/checksum.h> | 103 | #include <net/checksum.h> |
@@ -1170,7 +1169,7 @@ EXPORT_SYMBOL(netif_device_attach); | |||
1170 | */ | 1169 | */ |
1171 | int skb_checksum_help(struct sk_buff *skb) | 1170 | int skb_checksum_help(struct sk_buff *skb) |
1172 | { | 1171 | { |
1173 | unsigned int csum; | 1172 | __wsum csum; |
1174 | int ret = 0, offset = skb->h.raw - skb->data; | 1173 | int ret = 0, offset = skb->h.raw - skb->data; |
1175 | 1174 | ||
1176 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 1175 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
@@ -1192,9 +1191,9 @@ int skb_checksum_help(struct sk_buff *skb) | |||
1192 | 1191 | ||
1193 | offset = skb->tail - skb->h.raw; | 1192 | offset = skb->tail - skb->h.raw; |
1194 | BUG_ON(offset <= 0); | 1193 | BUG_ON(offset <= 0); |
1195 | BUG_ON(skb->csum + 2 > offset); | 1194 | BUG_ON(skb->csum_offset + 2 > offset); |
1196 | 1195 | ||
1197 | *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); | 1196 | *(__sum16*)(skb->h.raw + skb->csum_offset) = csum_fold(csum); |
1198 | 1197 | ||
1199 | out_set_summed: | 1198 | out_set_summed: |
1200 | skb->ip_summed = CHECKSUM_NONE; | 1199 | skb->ip_summed = CHECKSUM_NONE; |
@@ -1216,7 +1215,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) | |||
1216 | { | 1215 | { |
1217 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); | 1216 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); |
1218 | struct packet_type *ptype; | 1217 | struct packet_type *ptype; |
1219 | int type = skb->protocol; | 1218 | __be16 type = skb->protocol; |
1220 | int err; | 1219 | int err; |
1221 | 1220 | ||
1222 | BUG_ON(skb_shinfo(skb)->frag_list); | 1221 | BUG_ON(skb_shinfo(skb)->frag_list); |
@@ -1767,7 +1766,7 @@ int netif_receive_skb(struct sk_buff *skb) | |||
1767 | struct packet_type *ptype, *pt_prev; | 1766 | struct packet_type *ptype, *pt_prev; |
1768 | struct net_device *orig_dev; | 1767 | struct net_device *orig_dev; |
1769 | int ret = NET_RX_DROP; | 1768 | int ret = NET_RX_DROP; |
1770 | unsigned short type; | 1769 | __be16 type; |
1771 | 1770 | ||
1772 | /* if we've gotten here through NAPI, check netpoll */ | 1771 | /* if we've gotten here through NAPI, check netpoll */ |
1773 | if (skb->dev->poll && netpoll_rx(skb)) | 1772 | if (skb->dev->poll && netpoll_rx(skb)) |
@@ -1827,8 +1826,6 @@ int netif_receive_skb(struct sk_buff *skb) | |||
1827 | ncls: | 1826 | ncls: |
1828 | #endif | 1827 | #endif |
1829 | 1828 | ||
1830 | handle_diverter(skb); | ||
1831 | |||
1832 | if (handle_bridge(&skb, &pt_prev, &ret, orig_dev)) | 1829 | if (handle_bridge(&skb, &pt_prev, &ret, orig_dev)) |
1833 | goto out; | 1830 | goto out; |
1834 | 1831 | ||
@@ -2898,10 +2895,6 @@ int register_netdevice(struct net_device *dev) | |||
2898 | spin_lock_init(&dev->ingress_lock); | 2895 | spin_lock_init(&dev->ingress_lock); |
2899 | #endif | 2896 | #endif |
2900 | 2897 | ||
2901 | ret = alloc_divert_blk(dev); | ||
2902 | if (ret) | ||
2903 | goto out; | ||
2904 | |||
2905 | dev->iflink = -1; | 2898 | dev->iflink = -1; |
2906 | 2899 | ||
2907 | /* Init, if this function is available */ | 2900 | /* Init, if this function is available */ |
@@ -2910,13 +2903,13 @@ int register_netdevice(struct net_device *dev) | |||
2910 | if (ret) { | 2903 | if (ret) { |
2911 | if (ret > 0) | 2904 | if (ret > 0) |
2912 | ret = -EIO; | 2905 | ret = -EIO; |
2913 | goto out_err; | 2906 | goto out; |
2914 | } | 2907 | } |
2915 | } | 2908 | } |
2916 | 2909 | ||
2917 | if (!dev_valid_name(dev->name)) { | 2910 | if (!dev_valid_name(dev->name)) { |
2918 | ret = -EINVAL; | 2911 | ret = -EINVAL; |
2919 | goto out_err; | 2912 | goto out; |
2920 | } | 2913 | } |
2921 | 2914 | ||
2922 | dev->ifindex = dev_new_index(); | 2915 | dev->ifindex = dev_new_index(); |
@@ -2930,7 +2923,7 @@ int register_netdevice(struct net_device *dev) | |||
2930 | = hlist_entry(p, struct net_device, name_hlist); | 2923 | = hlist_entry(p, struct net_device, name_hlist); |
2931 | if (!strncmp(d->name, dev->name, IFNAMSIZ)) { | 2924 | if (!strncmp(d->name, dev->name, IFNAMSIZ)) { |
2932 | ret = -EEXIST; | 2925 | ret = -EEXIST; |
2933 | goto out_err; | 2926 | goto out; |
2934 | } | 2927 | } |
2935 | } | 2928 | } |
2936 | 2929 | ||
@@ -2974,7 +2967,7 @@ int register_netdevice(struct net_device *dev) | |||
2974 | 2967 | ||
2975 | ret = netdev_register_sysfs(dev); | 2968 | ret = netdev_register_sysfs(dev); |
2976 | if (ret) | 2969 | if (ret) |
2977 | goto out_err; | 2970 | goto out; |
2978 | dev->reg_state = NETREG_REGISTERED; | 2971 | dev->reg_state = NETREG_REGISTERED; |
2979 | 2972 | ||
2980 | /* | 2973 | /* |
@@ -3001,9 +2994,6 @@ int register_netdevice(struct net_device *dev) | |||
3001 | 2994 | ||
3002 | out: | 2995 | out: |
3003 | return ret; | 2996 | return ret; |
3004 | out_err: | ||
3005 | free_divert_blk(dev); | ||
3006 | goto out; | ||
3007 | } | 2997 | } |
3008 | 2998 | ||
3009 | /** | 2999 | /** |
@@ -3035,15 +3025,6 @@ int register_netdev(struct net_device *dev) | |||
3035 | goto out; | 3025 | goto out; |
3036 | } | 3026 | } |
3037 | 3027 | ||
3038 | /* | ||
3039 | * Back compatibility hook. Kill this one in 2.5 | ||
3040 | */ | ||
3041 | if (dev->name[0] == 0 || dev->name[0] == ' ') { | ||
3042 | err = dev_alloc_name(dev, "eth%d"); | ||
3043 | if (err < 0) | ||
3044 | goto out; | ||
3045 | } | ||
3046 | |||
3047 | err = register_netdevice(dev); | 3028 | err = register_netdevice(dev); |
3048 | out: | 3029 | out: |
3049 | rtnl_unlock(); | 3030 | rtnl_unlock(); |
@@ -3329,8 +3310,6 @@ int unregister_netdevice(struct net_device *dev) | |||
3329 | /* Notifier chain MUST detach us from master device. */ | 3310 | /* Notifier chain MUST detach us from master device. */ |
3330 | BUG_TRAP(!dev->master); | 3311 | BUG_TRAP(!dev->master); |
3331 | 3312 | ||
3332 | free_divert_blk(dev); | ||
3333 | |||
3334 | /* Finish processing unregister after unlock */ | 3313 | /* Finish processing unregister after unlock */ |
3335 | net_set_todo(dev); | 3314 | net_set_todo(dev); |
3336 | 3315 | ||
diff --git a/net/core/dv.c b/net/core/dv.c deleted file mode 100644 index 29ee77f15932..000000000000 --- a/net/core/dv.c +++ /dev/null | |||
@@ -1,546 +0,0 @@ | |||
1 | /* | ||
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | ||
3 | * operating system. INET is implemented using the BSD Socket | ||
4 | * interface as the means of communication with the user level. | ||
5 | * | ||
6 | * Generic frame diversion | ||
7 | * | ||
8 | * Authors: | ||
9 | * Benoit LOCHER: initial integration within the kernel with support for ethernet | ||
10 | * Dave Miller: improvement on the code (correctness, performance and source files) | ||
11 | * | ||
12 | */ | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/string.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/socket.h> | ||
20 | #include <linux/in.h> | ||
21 | #include <linux/inet.h> | ||
22 | #include <linux/ip.h> | ||
23 | #include <linux/udp.h> | ||
24 | #include <linux/netdevice.h> | ||
25 | #include <linux/etherdevice.h> | ||
26 | #include <linux/skbuff.h> | ||
27 | #include <linux/capability.h> | ||
28 | #include <linux/errno.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <net/dst.h> | ||
31 | #include <net/arp.h> | ||
32 | #include <net/sock.h> | ||
33 | #include <net/ipv6.h> | ||
34 | #include <net/ip.h> | ||
35 | #include <asm/uaccess.h> | ||
36 | #include <asm/system.h> | ||
37 | #include <asm/checksum.h> | ||
38 | #include <linux/divert.h> | ||
39 | #include <linux/sockios.h> | ||
40 | |||
41 | const char sysctl_divert_version[32]="0.46"; /* Current version */ | ||
42 | |||
43 | static int __init dv_init(void) | ||
44 | { | ||
45 | return 0; | ||
46 | } | ||
47 | module_init(dv_init); | ||
48 | |||
49 | /* | ||
50 | * Allocate a divert_blk for a device. This must be an ethernet nic. | ||
51 | */ | ||
52 | int alloc_divert_blk(struct net_device *dev) | ||
53 | { | ||
54 | int alloc_size = (sizeof(struct divert_blk) + 3) & ~3; | ||
55 | |||
56 | dev->divert = NULL; | ||
57 | if (dev->type == ARPHRD_ETHER) { | ||
58 | dev->divert = kzalloc(alloc_size, GFP_KERNEL); | ||
59 | if (dev->divert == NULL) { | ||
60 | printk(KERN_INFO "divert: unable to allocate divert_blk for %s\n", | ||
61 | dev->name); | ||
62 | return -ENOMEM; | ||
63 | } | ||
64 | dev_hold(dev); | ||
65 | } | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * Free a divert_blk allocated by the above function, if it was | ||
72 | * allocated on that device. | ||
73 | */ | ||
74 | void free_divert_blk(struct net_device *dev) | ||
75 | { | ||
76 | if (dev->divert) { | ||
77 | kfree(dev->divert); | ||
78 | dev->divert=NULL; | ||
79 | dev_put(dev); | ||
80 | } | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * Adds a tcp/udp (source or dest) port to an array | ||
85 | */ | ||
86 | static int add_port(u16 ports[], u16 port) | ||
87 | { | ||
88 | int i; | ||
89 | |||
90 | if (port == 0) | ||
91 | return -EINVAL; | ||
92 | |||
93 | /* Storing directly in network format for performance, | ||
94 | * thanks Dave :) | ||
95 | */ | ||
96 | port = htons(port); | ||
97 | |||
98 | for (i = 0; i < MAX_DIVERT_PORTS; i++) { | ||
99 | if (ports[i] == port) | ||
100 | return -EALREADY; | ||
101 | } | ||
102 | |||
103 | for (i = 0; i < MAX_DIVERT_PORTS; i++) { | ||
104 | if (ports[i] == 0) { | ||
105 | ports[i] = port; | ||
106 | return 0; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | return -ENOBUFS; | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Removes a port from an array tcp/udp (source or dest) | ||
115 | */ | ||
116 | static int remove_port(u16 ports[], u16 port) | ||
117 | { | ||
118 | int i; | ||
119 | |||
120 | if (port == 0) | ||
121 | return -EINVAL; | ||
122 | |||
123 | /* Storing directly in network format for performance, | ||
124 | * thanks Dave ! | ||
125 | */ | ||
126 | port = htons(port); | ||
127 | |||
128 | for (i = 0; i < MAX_DIVERT_PORTS; i++) { | ||
129 | if (ports[i] == port) { | ||
130 | ports[i] = 0; | ||
131 | return 0; | ||
132 | } | ||
133 | } | ||
134 | |||
135 | return -EINVAL; | ||
136 | } | ||
137 | |||
138 | /* Some basic sanity checks on the arguments passed to divert_ioctl() */ | ||
139 | static int check_args(struct divert_cf *div_cf, struct net_device **dev) | ||
140 | { | ||
141 | char devname[32]; | ||
142 | int ret; | ||
143 | |||
144 | if (dev == NULL) | ||
145 | return -EFAULT; | ||
146 | |||
147 | /* GETVERSION: all other args are unused */ | ||
148 | if (div_cf->cmd == DIVCMD_GETVERSION) | ||
149 | return 0; | ||
150 | |||
151 | /* Network device index should reasonably be between 0 and 1000 :) */ | ||
152 | if (div_cf->dev_index < 0 || div_cf->dev_index > 1000) | ||
153 | return -EINVAL; | ||
154 | |||
155 | /* Let's try to find the ifname */ | ||
156 | sprintf(devname, "eth%d", div_cf->dev_index); | ||
157 | *dev = dev_get_by_name(devname); | ||
158 | |||
159 | /* dev should NOT be null */ | ||
160 | if (*dev == NULL) | ||
161 | return -EINVAL; | ||
162 | |||
163 | ret = 0; | ||
164 | |||
165 | /* user issuing the ioctl must be a super one :) */ | ||
166 | if (!capable(CAP_SYS_ADMIN)) { | ||
167 | ret = -EPERM; | ||
168 | goto out; | ||
169 | } | ||
170 | |||
171 | /* Device must have a divert_blk member NOT null */ | ||
172 | if ((*dev)->divert == NULL) | ||
173 | ret = -EINVAL; | ||
174 | out: | ||
175 | dev_put(*dev); | ||
176 | return ret; | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * control function of the diverter | ||
181 | */ | ||
182 | #if 0 | ||
183 | #define DVDBG(a) \ | ||
184 | printk(KERN_DEBUG "divert_ioctl() line %d %s\n", __LINE__, (a)) | ||
185 | #else | ||
186 | #define DVDBG(a) | ||
187 | #endif | ||
188 | |||
189 | int divert_ioctl(unsigned int cmd, struct divert_cf __user *arg) | ||
190 | { | ||
191 | struct divert_cf div_cf; | ||
192 | struct divert_blk *div_blk; | ||
193 | struct net_device *dev; | ||
194 | int ret; | ||
195 | |||
196 | switch (cmd) { | ||
197 | case SIOCGIFDIVERT: | ||
198 | DVDBG("SIOCGIFDIVERT, copy_from_user"); | ||
199 | if (copy_from_user(&div_cf, arg, sizeof(struct divert_cf))) | ||
200 | return -EFAULT; | ||
201 | DVDBG("before check_args"); | ||
202 | ret = check_args(&div_cf, &dev); | ||
203 | if (ret) | ||
204 | return ret; | ||
205 | DVDBG("after checkargs"); | ||
206 | div_blk = dev->divert; | ||
207 | |||
208 | DVDBG("befre switch()"); | ||
209 | switch (div_cf.cmd) { | ||
210 | case DIVCMD_GETSTATUS: | ||
211 | /* Now, just give the user the raw divert block | ||
212 | * for him to play with :) | ||
213 | */ | ||
214 | if (copy_to_user(div_cf.arg1.ptr, dev->divert, | ||
215 | sizeof(struct divert_blk))) | ||
216 | return -EFAULT; | ||
217 | break; | ||
218 | |||
219 | case DIVCMD_GETVERSION: | ||
220 | DVDBG("GETVERSION: checking ptr"); | ||
221 | if (div_cf.arg1.ptr == NULL) | ||
222 | return -EINVAL; | ||
223 | DVDBG("GETVERSION: copying data to userland"); | ||
224 | if (copy_to_user(div_cf.arg1.ptr, | ||
225 | sysctl_divert_version, 32)) | ||
226 | return -EFAULT; | ||
227 | DVDBG("GETVERSION: data copied"); | ||
228 | break; | ||
229 | |||
230 | default: | ||
231 | return -EINVAL; | ||
232 | } | ||
233 | |||
234 | break; | ||
235 | |||
236 | case SIOCSIFDIVERT: | ||
237 | if (copy_from_user(&div_cf, arg, sizeof(struct divert_cf))) | ||
238 | return -EFAULT; | ||
239 | |||
240 | ret = check_args(&div_cf, &dev); | ||
241 | if (ret) | ||
242 | return ret; | ||
243 | |||
244 | div_blk = dev->divert; | ||
245 | |||
246 | switch(div_cf.cmd) { | ||
247 | case DIVCMD_RESET: | ||
248 | div_blk->divert = 0; | ||
249 | div_blk->protos = DIVERT_PROTO_NONE; | ||
250 | memset(div_blk->tcp_dst, 0, | ||
251 | MAX_DIVERT_PORTS * sizeof(u16)); | ||
252 | memset(div_blk->tcp_src, 0, | ||
253 | MAX_DIVERT_PORTS * sizeof(u16)); | ||
254 | memset(div_blk->udp_dst, 0, | ||
255 | MAX_DIVERT_PORTS * sizeof(u16)); | ||
256 | memset(div_blk->udp_src, 0, | ||
257 | MAX_DIVERT_PORTS * sizeof(u16)); | ||
258 | return 0; | ||
259 | |||
260 | case DIVCMD_DIVERT: | ||
261 | switch(div_cf.arg1.int32) { | ||
262 | case DIVARG1_ENABLE: | ||
263 | if (div_blk->divert) | ||
264 | return -EALREADY; | ||
265 | div_blk->divert = 1; | ||
266 | break; | ||
267 | |||
268 | case DIVARG1_DISABLE: | ||
269 | if (!div_blk->divert) | ||
270 | return -EALREADY; | ||
271 | div_blk->divert = 0; | ||
272 | break; | ||
273 | |||
274 | default: | ||
275 | return -EINVAL; | ||
276 | } | ||
277 | |||
278 | break; | ||
279 | |||
280 | case DIVCMD_IP: | ||
281 | switch(div_cf.arg1.int32) { | ||
282 | case DIVARG1_ENABLE: | ||
283 | if (div_blk->protos & DIVERT_PROTO_IP) | ||
284 | return -EALREADY; | ||
285 | div_blk->protos |= DIVERT_PROTO_IP; | ||
286 | break; | ||
287 | |||
288 | case DIVARG1_DISABLE: | ||
289 | if (!(div_blk->protos & DIVERT_PROTO_IP)) | ||
290 | return -EALREADY; | ||
291 | div_blk->protos &= ~DIVERT_PROTO_IP; | ||
292 | break; | ||
293 | |||
294 | default: | ||
295 | return -EINVAL; | ||
296 | } | ||
297 | |||
298 | break; | ||
299 | |||
300 | case DIVCMD_TCP: | ||
301 | switch(div_cf.arg1.int32) { | ||
302 | case DIVARG1_ENABLE: | ||
303 | if (div_blk->protos & DIVERT_PROTO_TCP) | ||
304 | return -EALREADY; | ||
305 | div_blk->protos |= DIVERT_PROTO_TCP; | ||
306 | break; | ||
307 | |||
308 | case DIVARG1_DISABLE: | ||
309 | if (!(div_blk->protos & DIVERT_PROTO_TCP)) | ||
310 | return -EALREADY; | ||
311 | div_blk->protos &= ~DIVERT_PROTO_TCP; | ||
312 | break; | ||
313 | |||
314 | default: | ||
315 | return -EINVAL; | ||
316 | } | ||
317 | |||
318 | break; | ||
319 | |||
320 | case DIVCMD_TCPDST: | ||
321 | switch(div_cf.arg1.int32) { | ||
322 | case DIVARG1_ADD: | ||
323 | return add_port(div_blk->tcp_dst, | ||
324 | div_cf.arg2.uint16); | ||
325 | |||
326 | case DIVARG1_REMOVE: | ||
327 | return remove_port(div_blk->tcp_dst, | ||
328 | div_cf.arg2.uint16); | ||
329 | |||
330 | default: | ||
331 | return -EINVAL; | ||
332 | } | ||
333 | |||
334 | break; | ||
335 | |||
336 | case DIVCMD_TCPSRC: | ||
337 | switch(div_cf.arg1.int32) { | ||
338 | case DIVARG1_ADD: | ||
339 | return add_port(div_blk->tcp_src, | ||
340 | div_cf.arg2.uint16); | ||
341 | |||
342 | case DIVARG1_REMOVE: | ||
343 | return remove_port(div_blk->tcp_src, | ||
344 | div_cf.arg2.uint16); | ||
345 | |||
346 | default: | ||
347 | return -EINVAL; | ||
348 | } | ||
349 | |||
350 | break; | ||
351 | |||
352 | case DIVCMD_UDP: | ||
353 | switch(div_cf.arg1.int32) { | ||
354 | case DIVARG1_ENABLE: | ||
355 | if (div_blk->protos & DIVERT_PROTO_UDP) | ||
356 | return -EALREADY; | ||
357 | div_blk->protos |= DIVERT_PROTO_UDP; | ||
358 | break; | ||
359 | |||
360 | case DIVARG1_DISABLE: | ||
361 | if (!(div_blk->protos & DIVERT_PROTO_UDP)) | ||
362 | return -EALREADY; | ||
363 | div_blk->protos &= ~DIVERT_PROTO_UDP; | ||
364 | break; | ||
365 | |||
366 | default: | ||
367 | return -EINVAL; | ||
368 | } | ||
369 | |||
370 | break; | ||
371 | |||
372 | case DIVCMD_UDPDST: | ||
373 | switch(div_cf.arg1.int32) { | ||
374 | case DIVARG1_ADD: | ||
375 | return add_port(div_blk->udp_dst, | ||
376 | div_cf.arg2.uint16); | ||
377 | |||
378 | case DIVARG1_REMOVE: | ||
379 | return remove_port(div_blk->udp_dst, | ||
380 | div_cf.arg2.uint16); | ||
381 | |||
382 | default: | ||
383 | return -EINVAL; | ||
384 | } | ||
385 | |||
386 | break; | ||
387 | |||
388 | case DIVCMD_UDPSRC: | ||
389 | switch(div_cf.arg1.int32) { | ||
390 | case DIVARG1_ADD: | ||
391 | return add_port(div_blk->udp_src, | ||
392 | div_cf.arg2.uint16); | ||
393 | |||
394 | case DIVARG1_REMOVE: | ||
395 | return remove_port(div_blk->udp_src, | ||
396 | div_cf.arg2.uint16); | ||
397 | |||
398 | default: | ||
399 | return -EINVAL; | ||
400 | } | ||
401 | |||
402 | break; | ||
403 | |||
404 | case DIVCMD_ICMP: | ||
405 | switch(div_cf.arg1.int32) { | ||
406 | case DIVARG1_ENABLE: | ||
407 | if (div_blk->protos & DIVERT_PROTO_ICMP) | ||
408 | return -EALREADY; | ||
409 | div_blk->protos |= DIVERT_PROTO_ICMP; | ||
410 | break; | ||
411 | |||
412 | case DIVARG1_DISABLE: | ||
413 | if (!(div_blk->protos & DIVERT_PROTO_ICMP)) | ||
414 | return -EALREADY; | ||
415 | div_blk->protos &= ~DIVERT_PROTO_ICMP; | ||
416 | break; | ||
417 | |||
418 | default: | ||
419 | return -EINVAL; | ||
420 | } | ||
421 | |||
422 | break; | ||
423 | |||
424 | default: | ||
425 | return -EINVAL; | ||
426 | } | ||
427 | |||
428 | break; | ||
429 | |||
430 | default: | ||
431 | return -EINVAL; | ||
432 | } | ||
433 | |||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | |||
438 | /* | ||
439 | * Check if packet should have its dest mac address set to the box itself | ||
440 | * for diversion | ||
441 | */ | ||
442 | |||
443 | #define ETH_DIVERT_FRAME(skb) \ | ||
444 | memcpy(eth_hdr(skb), skb->dev->dev_addr, ETH_ALEN); \ | ||
445 | skb->pkt_type=PACKET_HOST | ||
446 | |||
447 | void divert_frame(struct sk_buff *skb) | ||
448 | { | ||
449 | struct ethhdr *eth = eth_hdr(skb); | ||
450 | struct iphdr *iph; | ||
451 | struct tcphdr *tcph; | ||
452 | struct udphdr *udph; | ||
453 | struct divert_blk *divert = skb->dev->divert; | ||
454 | int i, src, dst; | ||
455 | unsigned char *skb_data_end = skb->data + skb->len; | ||
456 | |||
457 | /* Packet is already aimed at us, return */ | ||
458 | if (!compare_ether_addr(eth->h_dest, skb->dev->dev_addr)) | ||
459 | return; | ||
460 | |||
461 | /* proto is not IP, do nothing */ | ||
462 | if (eth->h_proto != htons(ETH_P_IP)) | ||
463 | return; | ||
464 | |||
465 | /* Divert all IP frames ? */ | ||
466 | if (divert->protos & DIVERT_PROTO_IP) { | ||
467 | ETH_DIVERT_FRAME(skb); | ||
468 | return; | ||
469 | } | ||
470 | |||
471 | /* Check for possible (maliciously) malformed IP frame (thanks Dave) */ | ||
472 | iph = (struct iphdr *) skb->data; | ||
473 | if (((iph->ihl<<2)+(unsigned char*)(iph)) >= skb_data_end) { | ||
474 | printk(KERN_INFO "divert: malformed IP packet !\n"); | ||
475 | return; | ||
476 | } | ||
477 | |||
478 | switch (iph->protocol) { | ||
479 | /* Divert all ICMP frames ? */ | ||
480 | case IPPROTO_ICMP: | ||
481 | if (divert->protos & DIVERT_PROTO_ICMP) { | ||
482 | ETH_DIVERT_FRAME(skb); | ||
483 | return; | ||
484 | } | ||
485 | break; | ||
486 | |||
487 | /* Divert all TCP frames ? */ | ||
488 | case IPPROTO_TCP: | ||
489 | if (divert->protos & DIVERT_PROTO_TCP) { | ||
490 | ETH_DIVERT_FRAME(skb); | ||
491 | return; | ||
492 | } | ||
493 | |||
494 | /* Check for possible (maliciously) malformed IP | ||
495 | * frame (thanx Dave) | ||
496 | */ | ||
497 | tcph = (struct tcphdr *) | ||
498 | (((unsigned char *)iph) + (iph->ihl<<2)); | ||
499 | if (((unsigned char *)(tcph+1)) >= skb_data_end) { | ||
500 | printk(KERN_INFO "divert: malformed TCP packet !\n"); | ||
501 | return; | ||
502 | } | ||
503 | |||
504 | /* Divert some tcp dst/src ports only ?*/ | ||
505 | for (i = 0; i < MAX_DIVERT_PORTS; i++) { | ||
506 | dst = divert->tcp_dst[i]; | ||
507 | src = divert->tcp_src[i]; | ||
508 | if ((dst && dst == tcph->dest) || | ||
509 | (src && src == tcph->source)) { | ||
510 | ETH_DIVERT_FRAME(skb); | ||
511 | return; | ||
512 | } | ||
513 | } | ||
514 | break; | ||
515 | |||
516 | /* Divert all UDP frames ? */ | ||
517 | case IPPROTO_UDP: | ||
518 | if (divert->protos & DIVERT_PROTO_UDP) { | ||
519 | ETH_DIVERT_FRAME(skb); | ||
520 | return; | ||
521 | } | ||
522 | |||
523 | /* Check for possible (maliciously) malformed IP | ||
524 | * packet (thanks Dave) | ||
525 | */ | ||
526 | udph = (struct udphdr *) | ||
527 | (((unsigned char *)iph) + (iph->ihl<<2)); | ||
528 | if (((unsigned char *)(udph+1)) >= skb_data_end) { | ||
529 | printk(KERN_INFO | ||
530 | "divert: malformed UDP packet !\n"); | ||
531 | return; | ||
532 | } | ||
533 | |||
534 | /* Divert some udp dst/src ports only ? */ | ||
535 | for (i = 0; i < MAX_DIVERT_PORTS; i++) { | ||
536 | dst = divert->udp_dst[i]; | ||
537 | src = divert->udp_src[i]; | ||
538 | if ((dst && dst == udph->dest) || | ||
539 | (src && src == udph->source)) { | ||
540 | ETH_DIVERT_FRAME(skb); | ||
541 | return; | ||
542 | } | ||
543 | } | ||
544 | break; | ||
545 | } | ||
546 | } | ||
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 6b0e63cacd93..1df6cd4568d3 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -107,6 +107,22 @@ out: | |||
107 | 107 | ||
108 | EXPORT_SYMBOL_GPL(fib_rules_unregister); | 108 | EXPORT_SYMBOL_GPL(fib_rules_unregister); |
109 | 109 | ||
110 | static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, | ||
111 | struct flowi *fl, int flags) | ||
112 | { | ||
113 | int ret = 0; | ||
114 | |||
115 | if (rule->ifindex && (rule->ifindex != fl->iif)) | ||
116 | goto out; | ||
117 | |||
118 | if ((rule->mark ^ fl->mark) & rule->mark_mask) | ||
119 | goto out; | ||
120 | |||
121 | ret = ops->match(rule, fl, flags); | ||
122 | out: | ||
123 | return (rule->flags & FIB_RULE_INVERT) ? !ret : ret; | ||
124 | } | ||
125 | |||
110 | int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, | 126 | int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, |
111 | int flags, struct fib_lookup_arg *arg) | 127 | int flags, struct fib_lookup_arg *arg) |
112 | { | 128 | { |
@@ -116,10 +132,7 @@ int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, | |||
116 | rcu_read_lock(); | 132 | rcu_read_lock(); |
117 | 133 | ||
118 | list_for_each_entry_rcu(rule, ops->rules_list, list) { | 134 | list_for_each_entry_rcu(rule, ops->rules_list, list) { |
119 | if (rule->ifindex && (rule->ifindex != fl->iif)) | 135 | if (!fib_rule_match(rule, ops, fl, flags)) |
120 | continue; | ||
121 | |||
122 | if (!ops->match(rule, fl, flags)) | ||
123 | continue; | 136 | continue; |
124 | 137 | ||
125 | err = ops->action(rule, fl, flags, arg); | 138 | err = ops->action(rule, fl, flags, arg); |
@@ -179,6 +192,18 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
179 | rule->ifindex = dev->ifindex; | 192 | rule->ifindex = dev->ifindex; |
180 | } | 193 | } |
181 | 194 | ||
195 | if (tb[FRA_FWMARK]) { | ||
196 | rule->mark = nla_get_u32(tb[FRA_FWMARK]); | ||
197 | if (rule->mark) | ||
198 | /* compatibility: if the mark value is non-zero all bits | ||
199 | * are compared unless a mask is explicitly specified. | ||
200 | */ | ||
201 | rule->mark_mask = 0xFFFFFFFF; | ||
202 | } | ||
203 | |||
204 | if (tb[FRA_FWMASK]) | ||
205 | rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]); | ||
206 | |||
182 | rule->action = frh->action; | 207 | rule->action = frh->action; |
183 | rule->flags = frh->flags; | 208 | rule->flags = frh->flags; |
184 | rule->table = frh_get_table(frh, tb); | 209 | rule->table = frh_get_table(frh, tb); |
@@ -250,6 +275,14 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
250 | nla_strcmp(tb[FRA_IFNAME], rule->ifname)) | 275 | nla_strcmp(tb[FRA_IFNAME], rule->ifname)) |
251 | continue; | 276 | continue; |
252 | 277 | ||
278 | if (tb[FRA_FWMARK] && | ||
279 | (rule->mark != nla_get_u32(tb[FRA_FWMARK]))) | ||
280 | continue; | ||
281 | |||
282 | if (tb[FRA_FWMASK] && | ||
283 | (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK]))) | ||
284 | continue; | ||
285 | |||
253 | if (!ops->compare(rule, frh, tb)) | 286 | if (!ops->compare(rule, frh, tb)) |
254 | continue; | 287 | continue; |
255 | 288 | ||
@@ -273,6 +306,22 @@ errout: | |||
273 | return err; | 306 | return err; |
274 | } | 307 | } |
275 | 308 | ||
309 | static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, | ||
310 | struct fib_rule *rule) | ||
311 | { | ||
312 | size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)) | ||
313 | + nla_total_size(IFNAMSIZ) /* FRA_IFNAME */ | ||
314 | + nla_total_size(4) /* FRA_PRIORITY */ | ||
315 | + nla_total_size(4) /* FRA_TABLE */ | ||
316 | + nla_total_size(4) /* FRA_FWMARK */ | ||
317 | + nla_total_size(4); /* FRA_FWMASK */ | ||
318 | |||
319 | if (ops->nlmsg_payload) | ||
320 | payload += ops->nlmsg_payload(rule); | ||
321 | |||
322 | return payload; | ||
323 | } | ||
324 | |||
276 | static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, | 325 | static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, |
277 | u32 pid, u32 seq, int type, int flags, | 326 | u32 pid, u32 seq, int type, int flags, |
278 | struct fib_rules_ops *ops) | 327 | struct fib_rules_ops *ops) |
@@ -298,6 +347,12 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, | |||
298 | if (rule->pref) | 347 | if (rule->pref) |
299 | NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref); | 348 | NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref); |
300 | 349 | ||
350 | if (rule->mark) | ||
351 | NLA_PUT_U32(skb, FRA_FWMARK, rule->mark); | ||
352 | |||
353 | if (rule->mark_mask || rule->mark) | ||
354 | NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask); | ||
355 | |||
301 | if (ops->fill(rule, skb, nlh, frh) < 0) | 356 | if (ops->fill(rule, skb, nlh, frh) < 0) |
302 | goto nla_put_failure; | 357 | goto nla_put_failure; |
303 | 358 | ||
@@ -345,15 +400,13 @@ static void notify_rule_change(int event, struct fib_rule *rule, | |||
345 | struct sk_buff *skb; | 400 | struct sk_buff *skb; |
346 | int err = -ENOBUFS; | 401 | int err = -ENOBUFS; |
347 | 402 | ||
348 | skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | 403 | skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL); |
349 | if (skb == NULL) | 404 | if (skb == NULL) |
350 | goto errout; | 405 | goto errout; |
351 | 406 | ||
352 | err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops); | 407 | err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops); |
353 | if (err < 0) { | 408 | /* failure implies BUG in fib_rule_nlmsg_size() */ |
354 | kfree_skb(skb); | 409 | BUG_ON(err < 0); |
355 | goto errout; | ||
356 | } | ||
357 | 410 | ||
358 | err = rtnl_notify(skb, pid, ops->nlgroup, nlh, GFP_KERNEL); | 411 | err = rtnl_notify(skb, pid, ops->nlgroup, nlh, GFP_KERNEL); |
359 | errout: | 412 | errout: |
diff --git a/net/core/filter.c b/net/core/filter.c index 6732782a5a40..0df843b667f4 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -178,7 +178,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int | |||
178 | load_w: | 178 | load_w: |
179 | ptr = load_pointer(skb, k, 4, &tmp); | 179 | ptr = load_pointer(skb, k, 4, &tmp); |
180 | if (ptr != NULL) { | 180 | if (ptr != NULL) { |
181 | A = ntohl(get_unaligned((u32 *)ptr)); | 181 | A = ntohl(get_unaligned((__be32 *)ptr)); |
182 | continue; | 182 | continue; |
183 | } | 183 | } |
184 | break; | 184 | break; |
@@ -187,7 +187,7 @@ load_w: | |||
187 | load_h: | 187 | load_h: |
188 | ptr = load_pointer(skb, k, 2, &tmp); | 188 | ptr = load_pointer(skb, k, 2, &tmp); |
189 | if (ptr != NULL) { | 189 | if (ptr != NULL) { |
190 | A = ntohs(get_unaligned((u16 *)ptr)); | 190 | A = ntohs(get_unaligned((__be16 *)ptr)); |
191 | continue; | 191 | continue; |
192 | } | 192 | } |
193 | break; | 193 | break; |
@@ -261,7 +261,7 @@ load_b: | |||
261 | */ | 261 | */ |
262 | switch (k-SKF_AD_OFF) { | 262 | switch (k-SKF_AD_OFF) { |
263 | case SKF_AD_PROTOCOL: | 263 | case SKF_AD_PROTOCOL: |
264 | A = htons(skb->protocol); | 264 | A = ntohs(skb->protocol); |
265 | continue; | 265 | continue; |
266 | case SKF_AD_PKTTYPE: | 266 | case SKF_AD_PKTTYPE: |
267 | A = skb->pkt_type; | 267 | A = skb->pkt_type; |
diff --git a/net/core/iovec.c b/net/core/iovec.c index 65e4b56fbc77..04b249c40b5b 100644 --- a/net/core/iovec.c +++ b/net/core/iovec.c | |||
@@ -158,9 +158,9 @@ int memcpy_fromiovecend(unsigned char *kdata, struct iovec *iov, int offset, | |||
158 | * call to this function will be unaligned also. | 158 | * call to this function will be unaligned also. |
159 | */ | 159 | */ |
160 | int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov, | 160 | int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov, |
161 | int offset, unsigned int len, int *csump) | 161 | int offset, unsigned int len, __wsum *csump) |
162 | { | 162 | { |
163 | int csum = *csump; | 163 | __wsum csum = *csump; |
164 | int partial_cnt = 0, err = 0; | 164 | int partial_cnt = 0, err = 0; |
165 | 165 | ||
166 | /* Skip over the finished iovecs */ | 166 | /* Skip over the finished iovecs */ |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index b4b478353b27..ba509a4a8e92 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -1266,10 +1266,9 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, | |||
1266 | struct neigh_parms *neigh_parms_alloc(struct net_device *dev, | 1266 | struct neigh_parms *neigh_parms_alloc(struct net_device *dev, |
1267 | struct neigh_table *tbl) | 1267 | struct neigh_table *tbl) |
1268 | { | 1268 | { |
1269 | struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL); | 1269 | struct neigh_parms *p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL); |
1270 | 1270 | ||
1271 | if (p) { | 1271 | if (p) { |
1272 | memcpy(p, &tbl->parms, sizeof(*p)); | ||
1273 | p->tbl = tbl; | 1272 | p->tbl = tbl; |
1274 | atomic_set(&p->refcnt, 1); | 1273 | atomic_set(&p->refcnt, 1); |
1275 | INIT_RCU_HEAD(&p->rcu_head); | 1274 | INIT_RCU_HEAD(&p->rcu_head); |
@@ -2410,20 +2409,27 @@ static struct file_operations neigh_stat_seq_fops = { | |||
2410 | #endif /* CONFIG_PROC_FS */ | 2409 | #endif /* CONFIG_PROC_FS */ |
2411 | 2410 | ||
2412 | #ifdef CONFIG_ARPD | 2411 | #ifdef CONFIG_ARPD |
2412 | static inline size_t neigh_nlmsg_size(void) | ||
2413 | { | ||
2414 | return NLMSG_ALIGN(sizeof(struct ndmsg)) | ||
2415 | + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */ | ||
2416 | + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */ | ||
2417 | + nla_total_size(sizeof(struct nda_cacheinfo)) | ||
2418 | + nla_total_size(4); /* NDA_PROBES */ | ||
2419 | } | ||
2420 | |||
2413 | static void __neigh_notify(struct neighbour *n, int type, int flags) | 2421 | static void __neigh_notify(struct neighbour *n, int type, int flags) |
2414 | { | 2422 | { |
2415 | struct sk_buff *skb; | 2423 | struct sk_buff *skb; |
2416 | int err = -ENOBUFS; | 2424 | int err = -ENOBUFS; |
2417 | 2425 | ||
2418 | skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); | 2426 | skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC); |
2419 | if (skb == NULL) | 2427 | if (skb == NULL) |
2420 | goto errout; | 2428 | goto errout; |
2421 | 2429 | ||
2422 | err = neigh_fill_info(skb, n, 0, 0, type, flags); | 2430 | err = neigh_fill_info(skb, n, 0, 0, type, flags); |
2423 | if (err < 0) { | 2431 | /* failure implies BUG in neigh_nlmsg_size() */ |
2424 | kfree_skb(skb); | 2432 | BUG_ON(err < 0); |
2425 | goto errout; | ||
2426 | } | ||
2427 | 2433 | ||
2428 | err = rtnl_notify(skb, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); | 2434 | err = rtnl_notify(skb, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); |
2429 | errout: | 2435 | errout: |
@@ -2618,14 +2624,14 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, | |||
2618 | int p_id, int pdev_id, char *p_name, | 2624 | int p_id, int pdev_id, char *p_name, |
2619 | proc_handler *handler, ctl_handler *strategy) | 2625 | proc_handler *handler, ctl_handler *strategy) |
2620 | { | 2626 | { |
2621 | struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL); | 2627 | struct neigh_sysctl_table *t = kmemdup(&neigh_sysctl_template, |
2628 | sizeof(*t), GFP_KERNEL); | ||
2622 | const char *dev_name_source = NULL; | 2629 | const char *dev_name_source = NULL; |
2623 | char *dev_name = NULL; | 2630 | char *dev_name = NULL; |
2624 | int err = 0; | 2631 | int err = 0; |
2625 | 2632 | ||
2626 | if (!t) | 2633 | if (!t) |
2627 | return -ENOBUFS; | 2634 | return -ENOBUFS; |
2628 | memcpy(t, &neigh_sysctl_template, sizeof(*t)); | ||
2629 | t->neigh_vars[0].data = &p->mcast_probes; | 2635 | t->neigh_vars[0].data = &p->mcast_probes; |
2630 | t->neigh_vars[1].data = &p->ucast_probes; | 2636 | t->neigh_vars[1].data = &p->ucast_probes; |
2631 | t->neigh_vars[2].data = &p->app_probes; | 2637 | t->neigh_vars[2].data = &p->app_probes; |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 63f24c914ddb..b3c559b9ac35 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -34,18 +34,12 @@ | |||
34 | #define MAX_UDP_CHUNK 1460 | 34 | #define MAX_UDP_CHUNK 1460 |
35 | #define MAX_SKBS 32 | 35 | #define MAX_SKBS 32 |
36 | #define MAX_QUEUE_DEPTH (MAX_SKBS / 2) | 36 | #define MAX_QUEUE_DEPTH (MAX_SKBS / 2) |
37 | #define MAX_RETRIES 20000 | ||
38 | 37 | ||
39 | static DEFINE_SPINLOCK(skb_list_lock); | 38 | static struct sk_buff_head skb_pool; |
40 | static int nr_skbs; | ||
41 | static struct sk_buff *skbs; | ||
42 | |||
43 | static DEFINE_SPINLOCK(queue_lock); | ||
44 | static int queue_depth; | ||
45 | static struct sk_buff *queue_head, *queue_tail; | ||
46 | 39 | ||
47 | static atomic_t trapped; | 40 | static atomic_t trapped; |
48 | 41 | ||
42 | #define USEC_PER_POLL 50 | ||
49 | #define NETPOLL_RX_ENABLED 1 | 43 | #define NETPOLL_RX_ENABLED 1 |
50 | #define NETPOLL_RX_DROP 2 | 44 | #define NETPOLL_RX_DROP 2 |
51 | 45 | ||
@@ -58,52 +52,34 @@ static void arp_reply(struct sk_buff *skb); | |||
58 | 52 | ||
59 | static void queue_process(struct work_struct *work) | 53 | static void queue_process(struct work_struct *work) |
60 | { | 54 | { |
61 | unsigned long flags; | 55 | struct netpoll_info *npinfo = |
56 | container_of(work, struct netpoll_info, tx_work.work); | ||
62 | struct sk_buff *skb; | 57 | struct sk_buff *skb; |
63 | 58 | ||
64 | while (queue_head) { | 59 | while ((skb = skb_dequeue(&npinfo->txq))) { |
65 | spin_lock_irqsave(&queue_lock, flags); | 60 | struct net_device *dev = skb->dev; |
66 | |||
67 | skb = queue_head; | ||
68 | queue_head = skb->next; | ||
69 | if (skb == queue_tail) | ||
70 | queue_head = NULL; | ||
71 | |||
72 | queue_depth--; | ||
73 | |||
74 | spin_unlock_irqrestore(&queue_lock, flags); | ||
75 | |||
76 | dev_queue_xmit(skb); | ||
77 | } | ||
78 | } | ||
79 | 61 | ||
80 | static DECLARE_WORK(send_queue, queue_process); | 62 | if (!netif_device_present(dev) || !netif_running(dev)) { |
63 | __kfree_skb(skb); | ||
64 | continue; | ||
65 | } | ||
81 | 66 | ||
82 | void netpoll_queue(struct sk_buff *skb) | 67 | netif_tx_lock_bh(dev); |
83 | { | 68 | if (netif_queue_stopped(dev) || |
84 | unsigned long flags; | 69 | dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { |
70 | skb_queue_head(&npinfo->txq, skb); | ||
71 | netif_tx_unlock_bh(dev); | ||
85 | 72 | ||
86 | if (queue_depth == MAX_QUEUE_DEPTH) { | 73 | schedule_delayed_work(&npinfo->tx_work, HZ/10); |
87 | __kfree_skb(skb); | 74 | return; |
88 | return; | 75 | } |
89 | } | 76 | } |
90 | |||
91 | spin_lock_irqsave(&queue_lock, flags); | ||
92 | if (!queue_head) | ||
93 | queue_head = skb; | ||
94 | else | ||
95 | queue_tail->next = skb; | ||
96 | queue_tail = skb; | ||
97 | queue_depth++; | ||
98 | spin_unlock_irqrestore(&queue_lock, flags); | ||
99 | |||
100 | schedule_work(&send_queue); | ||
101 | } | 77 | } |
102 | 78 | ||
103 | static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, | 79 | static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, |
104 | unsigned short ulen, u32 saddr, u32 daddr) | 80 | unsigned short ulen, __be32 saddr, __be32 daddr) |
105 | { | 81 | { |
106 | unsigned int psum; | 82 | __wsum psum; |
107 | 83 | ||
108 | if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY) | 84 | if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY) |
109 | return 0; | 85 | return 0; |
@@ -111,7 +87,7 @@ static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, | |||
111 | psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0); | 87 | psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0); |
112 | 88 | ||
113 | if (skb->ip_summed == CHECKSUM_COMPLETE && | 89 | if (skb->ip_summed == CHECKSUM_COMPLETE && |
114 | !(u16)csum_fold(csum_add(psum, skb->csum))) | 90 | !csum_fold(csum_add(psum, skb->csum))) |
115 | return 0; | 91 | return 0; |
116 | 92 | ||
117 | skb->csum = psum; | 93 | skb->csum = psum; |
@@ -167,12 +143,11 @@ static void service_arp_queue(struct netpoll_info *npi) | |||
167 | arp_reply(skb); | 143 | arp_reply(skb); |
168 | skb = skb_dequeue(&npi->arp_tx); | 144 | skb = skb_dequeue(&npi->arp_tx); |
169 | } | 145 | } |
170 | return; | ||
171 | } | 146 | } |
172 | 147 | ||
173 | void netpoll_poll(struct netpoll *np) | 148 | void netpoll_poll(struct netpoll *np) |
174 | { | 149 | { |
175 | if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller) | 150 | if (!np->dev || !netif_running(np->dev) || !np->dev->poll_controller) |
176 | return; | 151 | return; |
177 | 152 | ||
178 | /* Process pending work on NIC */ | 153 | /* Process pending work on NIC */ |
@@ -190,17 +165,15 @@ static void refill_skbs(void) | |||
190 | struct sk_buff *skb; | 165 | struct sk_buff *skb; |
191 | unsigned long flags; | 166 | unsigned long flags; |
192 | 167 | ||
193 | spin_lock_irqsave(&skb_list_lock, flags); | 168 | spin_lock_irqsave(&skb_pool.lock, flags); |
194 | while (nr_skbs < MAX_SKBS) { | 169 | while (skb_pool.qlen < MAX_SKBS) { |
195 | skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); | 170 | skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); |
196 | if (!skb) | 171 | if (!skb) |
197 | break; | 172 | break; |
198 | 173 | ||
199 | skb->next = skbs; | 174 | __skb_queue_tail(&skb_pool, skb); |
200 | skbs = skb; | ||
201 | nr_skbs++; | ||
202 | } | 175 | } |
203 | spin_unlock_irqrestore(&skb_list_lock, flags); | 176 | spin_unlock_irqrestore(&skb_pool.lock, flags); |
204 | } | 177 | } |
205 | 178 | ||
206 | static void zap_completion_queue(void) | 179 | static void zap_completion_queue(void) |
@@ -219,7 +192,7 @@ static void zap_completion_queue(void) | |||
219 | while (clist != NULL) { | 192 | while (clist != NULL) { |
220 | struct sk_buff *skb = clist; | 193 | struct sk_buff *skb = clist; |
221 | clist = clist->next; | 194 | clist = clist->next; |
222 | if(skb->destructor) | 195 | if (skb->destructor) |
223 | dev_kfree_skb_any(skb); /* put this one back */ | 196 | dev_kfree_skb_any(skb); /* put this one back */ |
224 | else | 197 | else |
225 | __kfree_skb(skb); | 198 | __kfree_skb(skb); |
@@ -229,38 +202,25 @@ static void zap_completion_queue(void) | |||
229 | put_cpu_var(softnet_data); | 202 | put_cpu_var(softnet_data); |
230 | } | 203 | } |
231 | 204 | ||
232 | static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve) | 205 | static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) |
233 | { | 206 | { |
234 | int once = 1, count = 0; | 207 | int count = 0; |
235 | unsigned long flags; | 208 | struct sk_buff *skb; |
236 | struct sk_buff *skb = NULL; | ||
237 | 209 | ||
238 | zap_completion_queue(); | 210 | zap_completion_queue(); |
211 | refill_skbs(); | ||
239 | repeat: | 212 | repeat: |
240 | if (nr_skbs < MAX_SKBS) | ||
241 | refill_skbs(); | ||
242 | 213 | ||
243 | skb = alloc_skb(len, GFP_ATOMIC); | 214 | skb = alloc_skb(len, GFP_ATOMIC); |
215 | if (!skb) | ||
216 | skb = skb_dequeue(&skb_pool); | ||
244 | 217 | ||
245 | if (!skb) { | 218 | if (!skb) { |
246 | spin_lock_irqsave(&skb_list_lock, flags); | 219 | if (++count < 10) { |
247 | skb = skbs; | 220 | netpoll_poll(np); |
248 | if (skb) { | 221 | goto repeat; |
249 | skbs = skb->next; | ||
250 | skb->next = NULL; | ||
251 | nr_skbs--; | ||
252 | } | 222 | } |
253 | spin_unlock_irqrestore(&skb_list_lock, flags); | 223 | return NULL; |
254 | } | ||
255 | |||
256 | if(!skb) { | ||
257 | count++; | ||
258 | if (once && (count == 1000000)) { | ||
259 | printk("out of netpoll skbs!\n"); | ||
260 | once = 0; | ||
261 | } | ||
262 | netpoll_poll(np); | ||
263 | goto repeat; | ||
264 | } | 224 | } |
265 | 225 | ||
266 | atomic_set(&skb->users, 1); | 226 | atomic_set(&skb->users, 1); |
@@ -270,50 +230,40 @@ repeat: | |||
270 | 230 | ||
271 | static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) | 231 | static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) |
272 | { | 232 | { |
273 | int status; | 233 | int status = NETDEV_TX_BUSY; |
274 | struct netpoll_info *npinfo; | 234 | unsigned long tries; |
275 | 235 | struct net_device *dev = np->dev; | |
276 | if (!np || !np->dev || !netif_running(np->dev)) { | 236 | struct netpoll_info *npinfo = np->dev->npinfo; |
277 | __kfree_skb(skb); | 237 | |
278 | return; | 238 | if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { |
279 | } | 239 | __kfree_skb(skb); |
280 | 240 | return; | |
281 | npinfo = np->dev->npinfo; | 241 | } |
282 | 242 | ||
283 | /* avoid recursion */ | 243 | /* don't get messages out of order, and no recursion */ |
284 | if (npinfo->poll_owner == smp_processor_id() || | 244 | if (skb_queue_len(&npinfo->txq) == 0 && |
285 | np->dev->xmit_lock_owner == smp_processor_id()) { | 245 | npinfo->poll_owner != smp_processor_id() && |
286 | if (np->drop) | 246 | netif_tx_trylock(dev)) { |
287 | np->drop(skb); | 247 | /* try until next clock tick */ |
288 | else | 248 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) { |
289 | __kfree_skb(skb); | 249 | if (!netif_queue_stopped(dev)) |
290 | return; | 250 | status = dev->hard_start_xmit(skb, dev); |
291 | } | ||
292 | |||
293 | do { | ||
294 | npinfo->tries--; | ||
295 | netif_tx_lock(np->dev); | ||
296 | 251 | ||
297 | /* | 252 | if (status == NETDEV_TX_OK) |
298 | * network drivers do not expect to be called if the queue is | 253 | break; |
299 | * stopped. | ||
300 | */ | ||
301 | status = NETDEV_TX_BUSY; | ||
302 | if (!netif_queue_stopped(np->dev)) | ||
303 | status = np->dev->hard_start_xmit(skb, np->dev); | ||
304 | 254 | ||
305 | netif_tx_unlock(np->dev); | 255 | /* tickle device maybe there is some cleanup */ |
256 | netpoll_poll(np); | ||
306 | 257 | ||
307 | /* success */ | 258 | udelay(USEC_PER_POLL); |
308 | if(!status) { | ||
309 | npinfo->tries = MAX_RETRIES; /* reset */ | ||
310 | return; | ||
311 | } | 259 | } |
260 | netif_tx_unlock(dev); | ||
261 | } | ||
312 | 262 | ||
313 | /* transmit busy */ | 263 | if (status != NETDEV_TX_OK) { |
314 | netpoll_poll(np); | 264 | skb_queue_tail(&npinfo->txq, skb); |
315 | udelay(50); | 265 | schedule_delayed_work(&npinfo->tx_work,0); |
316 | } while (npinfo->tries > 0); | 266 | } |
317 | } | 267 | } |
318 | 268 | ||
319 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len) | 269 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len) |
@@ -345,7 +295,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) | |||
345 | udp_len, IPPROTO_UDP, | 295 | udp_len, IPPROTO_UDP, |
346 | csum_partial((unsigned char *)udph, udp_len, 0)); | 296 | csum_partial((unsigned char *)udph, udp_len, 0)); |
347 | if (udph->check == 0) | 297 | if (udph->check == 0) |
348 | udph->check = -1; | 298 | udph->check = CSUM_MANGLED_0; |
349 | 299 | ||
350 | skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph)); | 300 | skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph)); |
351 | 301 | ||
@@ -379,7 +329,7 @@ static void arp_reply(struct sk_buff *skb) | |||
379 | struct arphdr *arp; | 329 | struct arphdr *arp; |
380 | unsigned char *arp_ptr; | 330 | unsigned char *arp_ptr; |
381 | int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; | 331 | int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; |
382 | u32 sip, tip; | 332 | __be32 sip, tip; |
383 | struct sk_buff *send_skb; | 333 | struct sk_buff *send_skb; |
384 | struct netpoll *np = NULL; | 334 | struct netpoll *np = NULL; |
385 | 335 | ||
@@ -431,8 +381,8 @@ static void arp_reply(struct sk_buff *skb) | |||
431 | 381 | ||
432 | if (np->dev->hard_header && | 382 | if (np->dev->hard_header && |
433 | np->dev->hard_header(send_skb, skb->dev, ptype, | 383 | np->dev->hard_header(send_skb, skb->dev, ptype, |
434 | np->remote_mac, np->local_mac, | 384 | np->remote_mac, np->local_mac, |
435 | send_skb->len) < 0) { | 385 | send_skb->len) < 0) { |
436 | kfree_skb(send_skb); | 386 | kfree_skb(send_skb); |
437 | return; | 387 | return; |
438 | } | 388 | } |
@@ -470,7 +420,6 @@ int __netpoll_rx(struct sk_buff *skb) | |||
470 | struct netpoll_info *npi = skb->dev->npinfo; | 420 | struct netpoll_info *npi = skb->dev->npinfo; |
471 | struct netpoll *np = npi->rx_np; | 421 | struct netpoll *np = npi->rx_np; |
472 | 422 | ||
473 | |||
474 | if (!np) | 423 | if (!np) |
475 | goto out; | 424 | goto out; |
476 | if (skb->dev->type != ARPHRD_ETHER) | 425 | if (skb->dev->type != ARPHRD_ETHER) |
@@ -543,47 +492,47 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
543 | { | 492 | { |
544 | char *cur=opt, *delim; | 493 | char *cur=opt, *delim; |
545 | 494 | ||
546 | if(*cur != '@') { | 495 | if (*cur != '@') { |
547 | if ((delim = strchr(cur, '@')) == NULL) | 496 | if ((delim = strchr(cur, '@')) == NULL) |
548 | goto parse_failed; | 497 | goto parse_failed; |
549 | *delim=0; | 498 | *delim = 0; |
550 | np->local_port=simple_strtol(cur, NULL, 10); | 499 | np->local_port = simple_strtol(cur, NULL, 10); |
551 | cur=delim; | 500 | cur = delim; |
552 | } | 501 | } |
553 | cur++; | 502 | cur++; |
554 | printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port); | 503 | printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port); |
555 | 504 | ||
556 | if(*cur != '/') { | 505 | if (*cur != '/') { |
557 | if ((delim = strchr(cur, '/')) == NULL) | 506 | if ((delim = strchr(cur, '/')) == NULL) |
558 | goto parse_failed; | 507 | goto parse_failed; |
559 | *delim=0; | 508 | *delim = 0; |
560 | np->local_ip=ntohl(in_aton(cur)); | 509 | np->local_ip = ntohl(in_aton(cur)); |
561 | cur=delim; | 510 | cur = delim; |
562 | 511 | ||
563 | printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n", | 512 | printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n", |
564 | np->name, HIPQUAD(np->local_ip)); | 513 | np->name, HIPQUAD(np->local_ip)); |
565 | } | 514 | } |
566 | cur++; | 515 | cur++; |
567 | 516 | ||
568 | if ( *cur != ',') { | 517 | if (*cur != ',') { |
569 | /* parse out dev name */ | 518 | /* parse out dev name */ |
570 | if ((delim = strchr(cur, ',')) == NULL) | 519 | if ((delim = strchr(cur, ',')) == NULL) |
571 | goto parse_failed; | 520 | goto parse_failed; |
572 | *delim=0; | 521 | *delim = 0; |
573 | strlcpy(np->dev_name, cur, sizeof(np->dev_name)); | 522 | strlcpy(np->dev_name, cur, sizeof(np->dev_name)); |
574 | cur=delim; | 523 | cur = delim; |
575 | } | 524 | } |
576 | cur++; | 525 | cur++; |
577 | 526 | ||
578 | printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name); | 527 | printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name); |
579 | 528 | ||
580 | if ( *cur != '@' ) { | 529 | if (*cur != '@') { |
581 | /* dst port */ | 530 | /* dst port */ |
582 | if ((delim = strchr(cur, '@')) == NULL) | 531 | if ((delim = strchr(cur, '@')) == NULL) |
583 | goto parse_failed; | 532 | goto parse_failed; |
584 | *delim=0; | 533 | *delim = 0; |
585 | np->remote_port=simple_strtol(cur, NULL, 10); | 534 | np->remote_port = simple_strtol(cur, NULL, 10); |
586 | cur=delim; | 535 | cur = delim; |
587 | } | 536 | } |
588 | cur++; | 537 | cur++; |
589 | printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port); | 538 | printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port); |
@@ -591,42 +540,41 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
591 | /* dst ip */ | 540 | /* dst ip */ |
592 | if ((delim = strchr(cur, '/')) == NULL) | 541 | if ((delim = strchr(cur, '/')) == NULL) |
593 | goto parse_failed; | 542 | goto parse_failed; |
594 | *delim=0; | 543 | *delim = 0; |
595 | np->remote_ip=ntohl(in_aton(cur)); | 544 | np->remote_ip = ntohl(in_aton(cur)); |
596 | cur=delim+1; | 545 | cur = delim + 1; |
597 | 546 | ||
598 | printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n", | 547 | printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n", |
599 | np->name, HIPQUAD(np->remote_ip)); | 548 | np->name, HIPQUAD(np->remote_ip)); |
600 | 549 | ||
601 | if( *cur != 0 ) | 550 | if (*cur != 0) { |
602 | { | ||
603 | /* MAC address */ | 551 | /* MAC address */ |
604 | if ((delim = strchr(cur, ':')) == NULL) | 552 | if ((delim = strchr(cur, ':')) == NULL) |
605 | goto parse_failed; | 553 | goto parse_failed; |
606 | *delim=0; | 554 | *delim = 0; |
607 | np->remote_mac[0]=simple_strtol(cur, NULL, 16); | 555 | np->remote_mac[0] = simple_strtol(cur, NULL, 16); |
608 | cur=delim+1; | 556 | cur = delim + 1; |
609 | if ((delim = strchr(cur, ':')) == NULL) | 557 | if ((delim = strchr(cur, ':')) == NULL) |
610 | goto parse_failed; | 558 | goto parse_failed; |
611 | *delim=0; | 559 | *delim = 0; |
612 | np->remote_mac[1]=simple_strtol(cur, NULL, 16); | 560 | np->remote_mac[1] = simple_strtol(cur, NULL, 16); |
613 | cur=delim+1; | 561 | cur = delim + 1; |
614 | if ((delim = strchr(cur, ':')) == NULL) | 562 | if ((delim = strchr(cur, ':')) == NULL) |
615 | goto parse_failed; | 563 | goto parse_failed; |
616 | *delim=0; | 564 | *delim = 0; |
617 | np->remote_mac[2]=simple_strtol(cur, NULL, 16); | 565 | np->remote_mac[2] = simple_strtol(cur, NULL, 16); |
618 | cur=delim+1; | 566 | cur = delim + 1; |
619 | if ((delim = strchr(cur, ':')) == NULL) | 567 | if ((delim = strchr(cur, ':')) == NULL) |
620 | goto parse_failed; | 568 | goto parse_failed; |
621 | *delim=0; | 569 | *delim = 0; |
622 | np->remote_mac[3]=simple_strtol(cur, NULL, 16); | 570 | np->remote_mac[3] = simple_strtol(cur, NULL, 16); |
623 | cur=delim+1; | 571 | cur = delim + 1; |
624 | if ((delim = strchr(cur, ':')) == NULL) | 572 | if ((delim = strchr(cur, ':')) == NULL) |
625 | goto parse_failed; | 573 | goto parse_failed; |
626 | *delim=0; | 574 | *delim = 0; |
627 | np->remote_mac[4]=simple_strtol(cur, NULL, 16); | 575 | np->remote_mac[4] = simple_strtol(cur, NULL, 16); |
628 | cur=delim+1; | 576 | cur = delim + 1; |
629 | np->remote_mac[5]=simple_strtol(cur, NULL, 16); | 577 | np->remote_mac[5] = simple_strtol(cur, NULL, 16); |
630 | } | 578 | } |
631 | 579 | ||
632 | printk(KERN_INFO "%s: remote ethernet address " | 580 | printk(KERN_INFO "%s: remote ethernet address " |
@@ -653,34 +601,44 @@ int netpoll_setup(struct netpoll *np) | |||
653 | struct in_device *in_dev; | 601 | struct in_device *in_dev; |
654 | struct netpoll_info *npinfo; | 602 | struct netpoll_info *npinfo; |
655 | unsigned long flags; | 603 | unsigned long flags; |
604 | int err; | ||
656 | 605 | ||
657 | if (np->dev_name) | 606 | if (np->dev_name) |
658 | ndev = dev_get_by_name(np->dev_name); | 607 | ndev = dev_get_by_name(np->dev_name); |
659 | if (!ndev) { | 608 | if (!ndev) { |
660 | printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", | 609 | printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", |
661 | np->name, np->dev_name); | 610 | np->name, np->dev_name); |
662 | return -1; | 611 | return -ENODEV; |
663 | } | 612 | } |
664 | 613 | ||
665 | np->dev = ndev; | 614 | np->dev = ndev; |
666 | if (!ndev->npinfo) { | 615 | if (!ndev->npinfo) { |
667 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); | 616 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); |
668 | if (!npinfo) | 617 | if (!npinfo) { |
618 | err = -ENOMEM; | ||
669 | goto release; | 619 | goto release; |
620 | } | ||
670 | 621 | ||
671 | npinfo->rx_flags = 0; | 622 | npinfo->rx_flags = 0; |
672 | npinfo->rx_np = NULL; | 623 | npinfo->rx_np = NULL; |
673 | spin_lock_init(&npinfo->poll_lock); | 624 | spin_lock_init(&npinfo->poll_lock); |
674 | npinfo->poll_owner = -1; | 625 | npinfo->poll_owner = -1; |
675 | npinfo->tries = MAX_RETRIES; | 626 | |
676 | spin_lock_init(&npinfo->rx_lock); | 627 | spin_lock_init(&npinfo->rx_lock); |
677 | skb_queue_head_init(&npinfo->arp_tx); | 628 | skb_queue_head_init(&npinfo->arp_tx); |
678 | } else | 629 | skb_queue_head_init(&npinfo->txq); |
630 | INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); | ||
631 | |||
632 | atomic_set(&npinfo->refcnt, 1); | ||
633 | } else { | ||
679 | npinfo = ndev->npinfo; | 634 | npinfo = ndev->npinfo; |
635 | atomic_inc(&npinfo->refcnt); | ||
636 | } | ||
680 | 637 | ||
681 | if (!ndev->poll_controller) { | 638 | if (!ndev->poll_controller) { |
682 | printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", | 639 | printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", |
683 | np->name, np->dev_name); | 640 | np->name, np->dev_name); |
641 | err = -ENOTSUPP; | ||
684 | goto release; | 642 | goto release; |
685 | } | 643 | } |
686 | 644 | ||
@@ -691,13 +649,14 @@ int netpoll_setup(struct netpoll *np) | |||
691 | np->name, np->dev_name); | 649 | np->name, np->dev_name); |
692 | 650 | ||
693 | rtnl_lock(); | 651 | rtnl_lock(); |
694 | if (dev_change_flags(ndev, ndev->flags | IFF_UP) < 0) { | 652 | err = dev_open(ndev); |
653 | rtnl_unlock(); | ||
654 | |||
655 | if (err) { | ||
695 | printk(KERN_ERR "%s: failed to open %s\n", | 656 | printk(KERN_ERR "%s: failed to open %s\n", |
696 | np->name, np->dev_name); | 657 | np->name, ndev->name); |
697 | rtnl_unlock(); | ||
698 | goto release; | 658 | goto release; |
699 | } | 659 | } |
700 | rtnl_unlock(); | ||
701 | 660 | ||
702 | atleast = jiffies + HZ/10; | 661 | atleast = jiffies + HZ/10; |
703 | atmost = jiffies + 4*HZ; | 662 | atmost = jiffies + 4*HZ; |
@@ -735,6 +694,7 @@ int netpoll_setup(struct netpoll *np) | |||
735 | rcu_read_unlock(); | 694 | rcu_read_unlock(); |
736 | printk(KERN_ERR "%s: no IP address for %s, aborting\n", | 695 | printk(KERN_ERR "%s: no IP address for %s, aborting\n", |
737 | np->name, np->dev_name); | 696 | np->name, np->dev_name); |
697 | err = -EDESTADDRREQ; | ||
738 | goto release; | 698 | goto release; |
739 | } | 699 | } |
740 | 700 | ||
@@ -767,9 +727,16 @@ int netpoll_setup(struct netpoll *np) | |||
767 | kfree(npinfo); | 727 | kfree(npinfo); |
768 | np->dev = NULL; | 728 | np->dev = NULL; |
769 | dev_put(ndev); | 729 | dev_put(ndev); |
770 | return -1; | 730 | return err; |
771 | } | 731 | } |
772 | 732 | ||
733 | static int __init netpoll_init(void) | ||
734 | { | ||
735 | skb_queue_head_init(&skb_pool); | ||
736 | return 0; | ||
737 | } | ||
738 | core_initcall(netpoll_init); | ||
739 | |||
773 | void netpoll_cleanup(struct netpoll *np) | 740 | void netpoll_cleanup(struct netpoll *np) |
774 | { | 741 | { |
775 | struct netpoll_info *npinfo; | 742 | struct netpoll_info *npinfo; |
@@ -777,12 +744,25 @@ void netpoll_cleanup(struct netpoll *np) | |||
777 | 744 | ||
778 | if (np->dev) { | 745 | if (np->dev) { |
779 | npinfo = np->dev->npinfo; | 746 | npinfo = np->dev->npinfo; |
780 | if (npinfo && npinfo->rx_np == np) { | 747 | if (npinfo) { |
781 | spin_lock_irqsave(&npinfo->rx_lock, flags); | 748 | if (npinfo->rx_np == np) { |
782 | npinfo->rx_np = NULL; | 749 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
783 | npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; | 750 | npinfo->rx_np = NULL; |
784 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 751 | npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; |
752 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | ||
753 | } | ||
754 | |||
755 | np->dev->npinfo = NULL; | ||
756 | if (atomic_dec_and_test(&npinfo->refcnt)) { | ||
757 | skb_queue_purge(&npinfo->arp_tx); | ||
758 | skb_queue_purge(&npinfo->txq); | ||
759 | cancel_rearming_delayed_work(&npinfo->tx_work); | ||
760 | flush_scheduled_work(); | ||
761 | |||
762 | kfree(npinfo); | ||
763 | } | ||
785 | } | 764 | } |
765 | |||
786 | dev_put(np->dev); | 766 | dev_put(np->dev); |
787 | } | 767 | } |
788 | 768 | ||
@@ -809,4 +789,3 @@ EXPORT_SYMBOL(netpoll_setup); | |||
809 | EXPORT_SYMBOL(netpoll_cleanup); | 789 | EXPORT_SYMBOL(netpoll_cleanup); |
810 | EXPORT_SYMBOL(netpoll_send_udp); | 790 | EXPORT_SYMBOL(netpoll_send_udp); |
811 | EXPORT_SYMBOL(netpoll_poll); | 791 | EXPORT_SYMBOL(netpoll_poll); |
812 | EXPORT_SYMBOL(netpoll_queue); | ||
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 733d86d0a4fb..1897a3a385d8 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -207,7 +207,7 @@ static struct proc_dir_entry *pg_proc_dir = NULL; | |||
207 | #define SVLAN_TAG_SIZE(x) ((x)->svlan_id == 0xffff ? 0 : 4) | 207 | #define SVLAN_TAG_SIZE(x) ((x)->svlan_id == 0xffff ? 0 : 4) |
208 | 208 | ||
209 | struct flow_state { | 209 | struct flow_state { |
210 | __u32 cur_daddr; | 210 | __be32 cur_daddr; |
211 | int count; | 211 | int count; |
212 | }; | 212 | }; |
213 | 213 | ||
@@ -282,10 +282,10 @@ struct pktgen_dev { | |||
282 | /* If we're doing ranges, random or incremental, then this | 282 | /* If we're doing ranges, random or incremental, then this |
283 | * defines the min/max for those ranges. | 283 | * defines the min/max for those ranges. |
284 | */ | 284 | */ |
285 | __u32 saddr_min; /* inclusive, source IP address */ | 285 | __be32 saddr_min; /* inclusive, source IP address */ |
286 | __u32 saddr_max; /* exclusive, source IP address */ | 286 | __be32 saddr_max; /* exclusive, source IP address */ |
287 | __u32 daddr_min; /* inclusive, dest IP address */ | 287 | __be32 daddr_min; /* inclusive, dest IP address */ |
288 | __u32 daddr_max; /* exclusive, dest IP address */ | 288 | __be32 daddr_max; /* exclusive, dest IP address */ |
289 | 289 | ||
290 | __u16 udp_src_min; /* inclusive, source UDP port */ | 290 | __u16 udp_src_min; /* inclusive, source UDP port */ |
291 | __u16 udp_src_max; /* exclusive, source UDP port */ | 291 | __u16 udp_src_max; /* exclusive, source UDP port */ |
@@ -317,8 +317,8 @@ struct pktgen_dev { | |||
317 | 317 | ||
318 | __u32 cur_dst_mac_offset; | 318 | __u32 cur_dst_mac_offset; |
319 | __u32 cur_src_mac_offset; | 319 | __u32 cur_src_mac_offset; |
320 | __u32 cur_saddr; | 320 | __be32 cur_saddr; |
321 | __u32 cur_daddr; | 321 | __be32 cur_daddr; |
322 | __u16 cur_udp_dst; | 322 | __u16 cur_udp_dst; |
323 | __u16 cur_udp_src; | 323 | __u16 cur_udp_src; |
324 | __u32 cur_pkt_size; | 324 | __u32 cur_pkt_size; |
@@ -350,10 +350,10 @@ struct pktgen_dev { | |||
350 | }; | 350 | }; |
351 | 351 | ||
352 | struct pktgen_hdr { | 352 | struct pktgen_hdr { |
353 | __u32 pgh_magic; | 353 | __be32 pgh_magic; |
354 | __u32 seq_num; | 354 | __be32 seq_num; |
355 | __u32 tv_sec; | 355 | __be32 tv_sec; |
356 | __u32 tv_usec; | 356 | __be32 tv_usec; |
357 | }; | 357 | }; |
358 | 358 | ||
359 | struct pktgen_thread { | 359 | struct pktgen_thread { |
@@ -2160,7 +2160,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) | |||
2160 | for(i = 0; i < pkt_dev->nr_labels; i++) | 2160 | for(i = 0; i < pkt_dev->nr_labels; i++) |
2161 | if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) | 2161 | if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) |
2162 | pkt_dev->labels[i] = MPLS_STACK_BOTTOM | | 2162 | pkt_dev->labels[i] = MPLS_STACK_BOTTOM | |
2163 | (pktgen_random() & | 2163 | ((__force __be32)pktgen_random() & |
2164 | htonl(0x000fffff)); | 2164 | htonl(0x000fffff)); |
2165 | } | 2165 | } |
2166 | 2166 | ||
@@ -2220,29 +2220,25 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) | |||
2220 | if (pkt_dev->cflows && pkt_dev->flows[flow].count != 0) { | 2220 | if (pkt_dev->cflows && pkt_dev->flows[flow].count != 0) { |
2221 | pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr; | 2221 | pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr; |
2222 | } else { | 2222 | } else { |
2223 | 2223 | imn = ntohl(pkt_dev->daddr_min); | |
2224 | if ((imn = ntohl(pkt_dev->daddr_min)) < (imx = | 2224 | imx = ntohl(pkt_dev->daddr_max); |
2225 | ntohl(pkt_dev-> | 2225 | if (imn < imx) { |
2226 | daddr_max))) | ||
2227 | { | ||
2228 | __u32 t; | 2226 | __u32 t; |
2227 | __be32 s; | ||
2229 | if (pkt_dev->flags & F_IPDST_RND) { | 2228 | if (pkt_dev->flags & F_IPDST_RND) { |
2230 | 2229 | ||
2231 | t = ((pktgen_random() % (imx - imn)) + | 2230 | t = pktgen_random() % (imx - imn) + imn; |
2232 | imn); | 2231 | s = htonl(t); |
2233 | t = htonl(t); | ||
2234 | 2232 | ||
2235 | while (LOOPBACK(t) || MULTICAST(t) | 2233 | while (LOOPBACK(s) || MULTICAST(s) |
2236 | || BADCLASS(t) || ZERONET(t) | 2234 | || BADCLASS(s) || ZERONET(s) |
2237 | || LOCAL_MCAST(t)) { | 2235 | || LOCAL_MCAST(s)) { |
2238 | t = ((pktgen_random() % | 2236 | t = (pktgen_random() % |
2239 | (imx - imn)) + imn); | 2237 | (imx - imn)) + imn; |
2240 | t = htonl(t); | 2238 | s = htonl(t); |
2241 | } | 2239 | } |
2242 | pkt_dev->cur_daddr = t; | 2240 | pkt_dev->cur_daddr = s; |
2243 | } | 2241 | } else { |
2244 | |||
2245 | else { | ||
2246 | t = ntohl(pkt_dev->cur_daddr); | 2242 | t = ntohl(pkt_dev->cur_daddr); |
2247 | t++; | 2243 | t++; |
2248 | if (t > imx) { | 2244 | if (t > imx) { |
@@ -2270,7 +2266,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) | |||
2270 | 2266 | ||
2271 | for (i = 0; i < 4; i++) { | 2267 | for (i = 0; i < 4; i++) { |
2272 | pkt_dev->cur_in6_daddr.s6_addr32[i] = | 2268 | pkt_dev->cur_in6_daddr.s6_addr32[i] = |
2273 | ((pktgen_random() | | 2269 | (((__force __be32)pktgen_random() | |
2274 | pkt_dev->min_in6_daddr.s6_addr32[i]) & | 2270 | pkt_dev->min_in6_daddr.s6_addr32[i]) & |
2275 | pkt_dev->max_in6_daddr.s6_addr32[i]); | 2271 | pkt_dev->max_in6_daddr.s6_addr32[i]); |
2276 | } | 2272 | } |
@@ -2377,7 +2373,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2377 | udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); | 2373 | udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); |
2378 | 2374 | ||
2379 | memcpy(eth, pkt_dev->hh, 12); | 2375 | memcpy(eth, pkt_dev->hh, 12); |
2380 | *(u16 *) & eth[12] = protocol; | 2376 | *(__be16 *) & eth[12] = protocol; |
2381 | 2377 | ||
2382 | /* Eth + IPh + UDPh + mpls */ | 2378 | /* Eth + IPh + UDPh + mpls */ |
2383 | datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - | 2379 | datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - |
@@ -2497,7 +2493,7 @@ static unsigned int scan_ip6(const char *s, char ip[16]) | |||
2497 | char suffix[16]; | 2493 | char suffix[16]; |
2498 | unsigned int prefixlen = 0; | 2494 | unsigned int prefixlen = 0; |
2499 | unsigned int suffixlen = 0; | 2495 | unsigned int suffixlen = 0; |
2500 | __u32 tmp; | 2496 | __be32 tmp; |
2501 | 2497 | ||
2502 | for (i = 0; i < 16; i++) | 2498 | for (i = 0; i < 16; i++) |
2503 | ip[i] = 0; | 2499 | ip[i] = 0; |
@@ -2713,7 +2709,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | |||
2713 | udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); | 2709 | udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); |
2714 | 2710 | ||
2715 | memcpy(eth, pkt_dev->hh, 12); | 2711 | memcpy(eth, pkt_dev->hh, 12); |
2716 | *(u16 *) & eth[12] = protocol; | 2712 | *(__be16 *) & eth[12] = protocol; |
2717 | 2713 | ||
2718 | /* Eth + IPh + UDPh + mpls */ | 2714 | /* Eth + IPh + UDPh + mpls */ |
2719 | datalen = pkt_dev->cur_pkt_size - 14 - | 2715 | datalen = pkt_dev->cur_pkt_size - 14 - |
@@ -2732,11 +2728,11 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | |||
2732 | udph->len = htons(datalen + sizeof(struct udphdr)); | 2728 | udph->len = htons(datalen + sizeof(struct udphdr)); |
2733 | udph->check = 0; /* No checksum */ | 2729 | udph->check = 0; /* No checksum */ |
2734 | 2730 | ||
2735 | *(u32 *) iph = __constant_htonl(0x60000000); /* Version + flow */ | 2731 | *(__be32 *) iph = __constant_htonl(0x60000000); /* Version + flow */ |
2736 | 2732 | ||
2737 | if (pkt_dev->traffic_class) { | 2733 | if (pkt_dev->traffic_class) { |
2738 | /* Version + traffic class + flow (0) */ | 2734 | /* Version + traffic class + flow (0) */ |
2739 | *(u32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20)); | 2735 | *(__be32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20)); |
2740 | } | 2736 | } |
2741 | 2737 | ||
2742 | iph->hop_limit = 32; | 2738 | iph->hop_limit = 32; |
diff --git a/net/core/request_sock.c b/net/core/request_sock.c index 79ebd75fbe4d..5f0818d815e6 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/random.h> | 15 | #include <linux/random.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/string.h> | 17 | #include <linux/string.h> |
18 | #include <linux/vmalloc.h> | ||
18 | 19 | ||
19 | #include <net/request_sock.h> | 20 | #include <net/request_sock.h> |
20 | 21 | ||
@@ -29,22 +30,31 @@ | |||
29 | * it is absolutely not enough even at 100conn/sec. 256 cures most | 30 | * it is absolutely not enough even at 100conn/sec. 256 cures most |
30 | * of problems. This value is adjusted to 128 for very small machines | 31 | * of problems. This value is adjusted to 128 for very small machines |
31 | * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb). | 32 | * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb). |
32 | * Further increasing requires to change hash table size. | 33 | * Note : Dont forget somaxconn that may limit backlog too. |
33 | */ | 34 | */ |
34 | int sysctl_max_syn_backlog = 256; | 35 | int sysctl_max_syn_backlog = 256; |
35 | 36 | ||
36 | int reqsk_queue_alloc(struct request_sock_queue *queue, | 37 | int reqsk_queue_alloc(struct request_sock_queue *queue, |
37 | const int nr_table_entries) | 38 | unsigned int nr_table_entries) |
38 | { | 39 | { |
39 | const int lopt_size = sizeof(struct listen_sock) + | 40 | size_t lopt_size = sizeof(struct listen_sock); |
40 | nr_table_entries * sizeof(struct request_sock *); | 41 | struct listen_sock *lopt; |
41 | struct listen_sock *lopt = kzalloc(lopt_size, GFP_KERNEL); | 42 | |
42 | 43 | nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog); | |
44 | nr_table_entries = max_t(u32, nr_table_entries, 8); | ||
45 | nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); | ||
46 | lopt_size += nr_table_entries * sizeof(struct request_sock *); | ||
47 | if (lopt_size > PAGE_SIZE) | ||
48 | lopt = __vmalloc(lopt_size, | ||
49 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | ||
50 | PAGE_KERNEL); | ||
51 | else | ||
52 | lopt = kzalloc(lopt_size, GFP_KERNEL); | ||
43 | if (lopt == NULL) | 53 | if (lopt == NULL) |
44 | return -ENOMEM; | 54 | return -ENOMEM; |
45 | 55 | ||
46 | for (lopt->max_qlen_log = 6; | 56 | for (lopt->max_qlen_log = 3; |
47 | (1 << lopt->max_qlen_log) < sysctl_max_syn_backlog; | 57 | (1 << lopt->max_qlen_log) < nr_table_entries; |
48 | lopt->max_qlen_log++); | 58 | lopt->max_qlen_log++); |
49 | 59 | ||
50 | get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); | 60 | get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); |
@@ -65,9 +75,11 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) | |||
65 | { | 75 | { |
66 | /* make all the listen_opt local to us */ | 76 | /* make all the listen_opt local to us */ |
67 | struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue); | 77 | struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue); |
78 | size_t lopt_size = sizeof(struct listen_sock) + | ||
79 | lopt->nr_table_entries * sizeof(struct request_sock *); | ||
68 | 80 | ||
69 | if (lopt->qlen != 0) { | 81 | if (lopt->qlen != 0) { |
70 | int i; | 82 | unsigned int i; |
71 | 83 | ||
72 | for (i = 0; i < lopt->nr_table_entries; i++) { | 84 | for (i = 0; i < lopt->nr_table_entries; i++) { |
73 | struct request_sock *req; | 85 | struct request_sock *req; |
@@ -81,7 +93,10 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) | |||
81 | } | 93 | } |
82 | 94 | ||
83 | BUG_TRAP(lopt->qlen == 0); | 95 | BUG_TRAP(lopt->qlen == 0); |
84 | kfree(lopt); | 96 | if (lopt_size > PAGE_SIZE) |
97 | vfree(lopt); | ||
98 | else | ||
99 | kfree(lopt); | ||
85 | } | 100 | } |
86 | 101 | ||
87 | EXPORT_SYMBOL(reqsk_queue_destroy); | 102 | EXPORT_SYMBOL(reqsk_queue_destroy); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 02f3c7947898..e76539a5eb5e 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -108,7 +108,6 @@ static const int rtm_min[RTM_NR_FAMILIES] = | |||
108 | [RTM_FAM(RTM_NEWTCLASS)] = NLMSG_LENGTH(sizeof(struct tcmsg)), | 108 | [RTM_FAM(RTM_NEWTCLASS)] = NLMSG_LENGTH(sizeof(struct tcmsg)), |
109 | [RTM_FAM(RTM_NEWTFILTER)] = NLMSG_LENGTH(sizeof(struct tcmsg)), | 109 | [RTM_FAM(RTM_NEWTFILTER)] = NLMSG_LENGTH(sizeof(struct tcmsg)), |
110 | [RTM_FAM(RTM_NEWACTION)] = NLMSG_LENGTH(sizeof(struct tcamsg)), | 110 | [RTM_FAM(RTM_NEWACTION)] = NLMSG_LENGTH(sizeof(struct tcamsg)), |
111 | [RTM_FAM(RTM_NEWPREFIX)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), | ||
112 | [RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), | 111 | [RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), |
113 | [RTM_FAM(RTM_GETANYCAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), | 112 | [RTM_FAM(RTM_GETANYCAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), |
114 | }; | 113 | }; |
@@ -213,6 +212,26 @@ nla_put_failure: | |||
213 | return nla_nest_cancel(skb, mx); | 212 | return nla_nest_cancel(skb, mx); |
214 | } | 213 | } |
215 | 214 | ||
215 | int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, | ||
216 | u32 ts, u32 tsage, long expires, u32 error) | ||
217 | { | ||
218 | struct rta_cacheinfo ci = { | ||
219 | .rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse), | ||
220 | .rta_used = dst->__use, | ||
221 | .rta_clntref = atomic_read(&(dst->__refcnt)), | ||
222 | .rta_error = error, | ||
223 | .rta_id = id, | ||
224 | .rta_ts = ts, | ||
225 | .rta_tsage = tsage, | ||
226 | }; | ||
227 | |||
228 | if (expires) | ||
229 | ci.rta_expires = jiffies_to_clock_t(expires); | ||
230 | |||
231 | return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); | ||
232 | } | ||
233 | |||
234 | EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); | ||
216 | 235 | ||
217 | static void set_operstate(struct net_device *dev, unsigned char transition) | 236 | static void set_operstate(struct net_device *dev, unsigned char transition) |
218 | { | 237 | { |
@@ -273,6 +292,25 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a, | |||
273 | a->tx_compressed = b->tx_compressed; | 292 | a->tx_compressed = b->tx_compressed; |
274 | }; | 293 | }; |
275 | 294 | ||
295 | static inline size_t if_nlmsg_size(int iwbuflen) | ||
296 | { | ||
297 | return NLMSG_ALIGN(sizeof(struct ifinfomsg)) | ||
298 | + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ | ||
299 | + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ | ||
300 | + nla_total_size(sizeof(struct rtnl_link_ifmap)) | ||
301 | + nla_total_size(sizeof(struct rtnl_link_stats)) | ||
302 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ | ||
303 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ | ||
304 | + nla_total_size(4) /* IFLA_TXQLEN */ | ||
305 | + nla_total_size(4) /* IFLA_WEIGHT */ | ||
306 | + nla_total_size(4) /* IFLA_MTU */ | ||
307 | + nla_total_size(4) /* IFLA_LINK */ | ||
308 | + nla_total_size(4) /* IFLA_MASTER */ | ||
309 | + nla_total_size(1) /* IFLA_OPERSTATE */ | ||
310 | + nla_total_size(1) /* IFLA_LINKMODE */ | ||
311 | + nla_total_size(iwbuflen); | ||
312 | } | ||
313 | |||
276 | static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | 314 | static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, |
277 | void *iwbuf, int iwbuflen, int type, u32 pid, | 315 | void *iwbuf, int iwbuflen, int type, u32 pid, |
278 | u32 seq, u32 change, unsigned int flags) | 316 | u32 seq, u32 change, unsigned int flags) |
@@ -558,7 +596,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
558 | struct sk_buff *nskb; | 596 | struct sk_buff *nskb; |
559 | char *iw_buf = NULL, *iw = NULL; | 597 | char *iw_buf = NULL, *iw = NULL; |
560 | int iw_buf_len = 0; | 598 | int iw_buf_len = 0; |
561 | int err, payload; | 599 | int err; |
562 | 600 | ||
563 | err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); | 601 | err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); |
564 | if (err < 0) | 602 | if (err < 0) |
@@ -587,9 +625,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
587 | } | 625 | } |
588 | #endif /* CONFIG_NET_WIRELESS_RTNETLINK */ | 626 | #endif /* CONFIG_NET_WIRELESS_RTNETLINK */ |
589 | 627 | ||
590 | payload = NLMSG_ALIGN(sizeof(struct ifinfomsg) + | 628 | nskb = nlmsg_new(if_nlmsg_size(iw_buf_len), GFP_KERNEL); |
591 | nla_total_size(iw_buf_len)); | ||
592 | nskb = nlmsg_new(nlmsg_total_size(payload), GFP_KERNEL); | ||
593 | if (nskb == NULL) { | 629 | if (nskb == NULL) { |
594 | err = -ENOBUFS; | 630 | err = -ENOBUFS; |
595 | goto errout; | 631 | goto errout; |
@@ -597,10 +633,8 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
597 | 633 | ||
598 | err = rtnl_fill_ifinfo(nskb, dev, iw, iw_buf_len, RTM_NEWLINK, | 634 | err = rtnl_fill_ifinfo(nskb, dev, iw, iw_buf_len, RTM_NEWLINK, |
599 | NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0, 0); | 635 | NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0, 0); |
600 | if (err <= 0) { | 636 | /* failure impilies BUG in if_nlmsg_size or wireless_rtnetlink_get */ |
601 | kfree_skb(nskb); | 637 | BUG_ON(err < 0); |
602 | goto errout; | ||
603 | } | ||
604 | 638 | ||
605 | err = rtnl_unicast(nskb, NETLINK_CB(skb).pid); | 639 | err = rtnl_unicast(nskb, NETLINK_CB(skb).pid); |
606 | errout: | 640 | errout: |
@@ -639,15 +673,13 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change) | |||
639 | struct sk_buff *skb; | 673 | struct sk_buff *skb; |
640 | int err = -ENOBUFS; | 674 | int err = -ENOBUFS; |
641 | 675 | ||
642 | skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | 676 | skb = nlmsg_new(if_nlmsg_size(0), GFP_KERNEL); |
643 | if (skb == NULL) | 677 | if (skb == NULL) |
644 | goto errout; | 678 | goto errout; |
645 | 679 | ||
646 | err = rtnl_fill_ifinfo(skb, dev, NULL, 0, type, 0, 0, change, 0); | 680 | err = rtnl_fill_ifinfo(skb, dev, NULL, 0, type, 0, 0, change, 0); |
647 | if (err < 0) { | 681 | /* failure implies BUG in if_nlmsg_size() */ |
648 | kfree_skb(skb); | 682 | BUG_ON(err < 0); |
649 | goto errout; | ||
650 | } | ||
651 | 683 | ||
652 | err = rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_KERNEL); | 684 | err = rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_KERNEL); |
653 | errout: | 685 | errout: |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b8b106358040..a90bc439488e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -473,8 +473,8 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) | |||
473 | #endif | 473 | #endif |
474 | C(protocol); | 474 | C(protocol); |
475 | n->destructor = NULL; | 475 | n->destructor = NULL; |
476 | C(mark); | ||
476 | #ifdef CONFIG_NETFILTER | 477 | #ifdef CONFIG_NETFILTER |
477 | C(nfmark); | ||
478 | C(nfct); | 478 | C(nfct); |
479 | nf_conntrack_get(skb->nfct); | 479 | nf_conntrack_get(skb->nfct); |
480 | C(nfctinfo); | 480 | C(nfctinfo); |
@@ -534,8 +534,8 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
534 | new->pkt_type = old->pkt_type; | 534 | new->pkt_type = old->pkt_type; |
535 | new->tstamp = old->tstamp; | 535 | new->tstamp = old->tstamp; |
536 | new->destructor = NULL; | 536 | new->destructor = NULL; |
537 | new->mark = old->mark; | ||
537 | #ifdef CONFIG_NETFILTER | 538 | #ifdef CONFIG_NETFILTER |
538 | new->nfmark = old->nfmark; | ||
539 | new->nfct = old->nfct; | 539 | new->nfct = old->nfct; |
540 | nf_conntrack_get(old->nfct); | 540 | nf_conntrack_get(old->nfct); |
541 | new->nfctinfo = old->nfctinfo; | 541 | new->nfctinfo = old->nfctinfo; |
@@ -1240,8 +1240,8 @@ EXPORT_SYMBOL(skb_store_bits); | |||
1240 | 1240 | ||
1241 | /* Checksum skb data. */ | 1241 | /* Checksum skb data. */ |
1242 | 1242 | ||
1243 | unsigned int skb_checksum(const struct sk_buff *skb, int offset, | 1243 | __wsum skb_checksum(const struct sk_buff *skb, int offset, |
1244 | int len, unsigned int csum) | 1244 | int len, __wsum csum) |
1245 | { | 1245 | { |
1246 | int start = skb_headlen(skb); | 1246 | int start = skb_headlen(skb); |
1247 | int i, copy = start - offset; | 1247 | int i, copy = start - offset; |
@@ -1265,7 +1265,7 @@ unsigned int skb_checksum(const struct sk_buff *skb, int offset, | |||
1265 | 1265 | ||
1266 | end = start + skb_shinfo(skb)->frags[i].size; | 1266 | end = start + skb_shinfo(skb)->frags[i].size; |
1267 | if ((copy = end - offset) > 0) { | 1267 | if ((copy = end - offset) > 0) { |
1268 | unsigned int csum2; | 1268 | __wsum csum2; |
1269 | u8 *vaddr; | 1269 | u8 *vaddr; |
1270 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 1270 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1271 | 1271 | ||
@@ -1294,7 +1294,7 @@ unsigned int skb_checksum(const struct sk_buff *skb, int offset, | |||
1294 | 1294 | ||
1295 | end = start + list->len; | 1295 | end = start + list->len; |
1296 | if ((copy = end - offset) > 0) { | 1296 | if ((copy = end - offset) > 0) { |
1297 | unsigned int csum2; | 1297 | __wsum csum2; |
1298 | if (copy > len) | 1298 | if (copy > len) |
1299 | copy = len; | 1299 | copy = len; |
1300 | csum2 = skb_checksum(list, offset - start, | 1300 | csum2 = skb_checksum(list, offset - start, |
@@ -1315,8 +1315,8 @@ unsigned int skb_checksum(const struct sk_buff *skb, int offset, | |||
1315 | 1315 | ||
1316 | /* Both of above in one bottle. */ | 1316 | /* Both of above in one bottle. */ |
1317 | 1317 | ||
1318 | unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | 1318 | __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, |
1319 | u8 *to, int len, unsigned int csum) | 1319 | u8 *to, int len, __wsum csum) |
1320 | { | 1320 | { |
1321 | int start = skb_headlen(skb); | 1321 | int start = skb_headlen(skb); |
1322 | int i, copy = start - offset; | 1322 | int i, copy = start - offset; |
@@ -1342,7 +1342,7 @@ unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |||
1342 | 1342 | ||
1343 | end = start + skb_shinfo(skb)->frags[i].size; | 1343 | end = start + skb_shinfo(skb)->frags[i].size; |
1344 | if ((copy = end - offset) > 0) { | 1344 | if ((copy = end - offset) > 0) { |
1345 | unsigned int csum2; | 1345 | __wsum csum2; |
1346 | u8 *vaddr; | 1346 | u8 *vaddr; |
1347 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 1347 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1348 | 1348 | ||
@@ -1368,7 +1368,7 @@ unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |||
1368 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1368 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
1369 | 1369 | ||
1370 | for (; list; list = list->next) { | 1370 | for (; list; list = list->next) { |
1371 | unsigned int csum2; | 1371 | __wsum csum2; |
1372 | int end; | 1372 | int end; |
1373 | 1373 | ||
1374 | BUG_TRAP(start <= offset + len); | 1374 | BUG_TRAP(start <= offset + len); |
@@ -1396,7 +1396,7 @@ unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |||
1396 | 1396 | ||
1397 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) | 1397 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) |
1398 | { | 1398 | { |
1399 | unsigned int csum; | 1399 | __wsum csum; |
1400 | long csstart; | 1400 | long csstart; |
1401 | 1401 | ||
1402 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 1402 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
@@ -1414,9 +1414,9 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) | |||
1414 | skb->len - csstart, 0); | 1414 | skb->len - csstart, 0); |
1415 | 1415 | ||
1416 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1416 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1417 | long csstuff = csstart + skb->csum; | 1417 | long csstuff = csstart + skb->csum_offset; |
1418 | 1418 | ||
1419 | *((unsigned short *)(to + csstuff)) = csum_fold(csum); | 1419 | *((__sum16 *)(to + csstuff)) = csum_fold(csum); |
1420 | } | 1420 | } |
1421 | } | 1421 | } |
1422 | 1422 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index ee6cd2541d35..ab8fafadb4ba 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -270,7 +270,7 @@ out: | |||
270 | } | 270 | } |
271 | EXPORT_SYMBOL(sock_queue_rcv_skb); | 271 | EXPORT_SYMBOL(sock_queue_rcv_skb); |
272 | 272 | ||
273 | int sk_receive_skb(struct sock *sk, struct sk_buff *skb) | 273 | int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) |
274 | { | 274 | { |
275 | int rc = NET_RX_SUCCESS; | 275 | int rc = NET_RX_SUCCESS; |
276 | 276 | ||
@@ -279,7 +279,10 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb) | |||
279 | 279 | ||
280 | skb->dev = NULL; | 280 | skb->dev = NULL; |
281 | 281 | ||
282 | bh_lock_sock(sk); | 282 | if (nested) |
283 | bh_lock_sock_nested(sk); | ||
284 | else | ||
285 | bh_lock_sock(sk); | ||
283 | if (!sock_owned_by_user(sk)) { | 286 | if (!sock_owned_by_user(sk)) { |
284 | /* | 287 | /* |
285 | * trylock + unlock semantics: | 288 | * trylock + unlock semantics: |
@@ -1527,7 +1530,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
1527 | atomic_set(&sk->sk_refcnt, 1); | 1530 | atomic_set(&sk->sk_refcnt, 1); |
1528 | } | 1531 | } |
1529 | 1532 | ||
1530 | void fastcall lock_sock(struct sock *sk) | 1533 | void fastcall lock_sock_nested(struct sock *sk, int subclass) |
1531 | { | 1534 | { |
1532 | might_sleep(); | 1535 | might_sleep(); |
1533 | spin_lock_bh(&sk->sk_lock.slock); | 1536 | spin_lock_bh(&sk->sk_lock.slock); |
@@ -1538,11 +1541,11 @@ void fastcall lock_sock(struct sock *sk) | |||
1538 | /* | 1541 | /* |
1539 | * The sk_lock has mutex_lock() semantics here: | 1542 | * The sk_lock has mutex_lock() semantics here: |
1540 | */ | 1543 | */ |
1541 | mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); | 1544 | mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); |
1542 | local_bh_enable(); | 1545 | local_bh_enable(); |
1543 | } | 1546 | } |
1544 | 1547 | ||
1545 | EXPORT_SYMBOL(lock_sock); | 1548 | EXPORT_SYMBOL(lock_sock_nested); |
1546 | 1549 | ||
1547 | void fastcall release_sock(struct sock *sk) | 1550 | void fastcall release_sock(struct sock *sk) |
1548 | { | 1551 | { |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 02534131d88e..1e75b1585460 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
@@ -21,10 +21,6 @@ extern __u32 sysctl_rmem_max; | |||
21 | 21 | ||
22 | extern int sysctl_core_destroy_delay; | 22 | extern int sysctl_core_destroy_delay; |
23 | 23 | ||
24 | #ifdef CONFIG_NET_DIVERT | ||
25 | extern char sysctl_divert_version[]; | ||
26 | #endif /* CONFIG_NET_DIVERT */ | ||
27 | |||
28 | #ifdef CONFIG_XFRM | 24 | #ifdef CONFIG_XFRM |
29 | extern u32 sysctl_xfrm_aevent_etime; | 25 | extern u32 sysctl_xfrm_aevent_etime; |
30 | extern u32 sysctl_xfrm_aevent_rseqth; | 26 | extern u32 sysctl_xfrm_aevent_rseqth; |
@@ -105,16 +101,6 @@ ctl_table core_table[] = { | |||
105 | .mode = 0644, | 101 | .mode = 0644, |
106 | .proc_handler = &proc_dointvec | 102 | .proc_handler = &proc_dointvec |
107 | }, | 103 | }, |
108 | #ifdef CONFIG_NET_DIVERT | ||
109 | { | ||
110 | .ctl_name = NET_CORE_DIVERT_VERSION, | ||
111 | .procname = "divert_version", | ||
112 | .data = (void *)sysctl_divert_version, | ||
113 | .maxlen = 32, | ||
114 | .mode = 0444, | ||
115 | .proc_handler = &proc_dostring | ||
116 | }, | ||
117 | #endif /* CONFIG_NET_DIVERT */ | ||
118 | #ifdef CONFIG_XFRM | 104 | #ifdef CONFIG_XFRM |
119 | { | 105 | { |
120 | .ctl_name = NET_CORE_AEVENT_ETIME, | 106 | .ctl_name = NET_CORE_AEVENT_ETIME, |
diff --git a/net/core/utils.c b/net/core/utils.c index d93fe64f6693..61556065f07e 100644 --- a/net/core/utils.c +++ b/net/core/utils.c | |||
@@ -88,7 +88,7 @@ EXPORT_SYMBOL(in_aton); | |||
88 | #define IN6PTON_NULL 0x20000000 /* first/tail */ | 88 | #define IN6PTON_NULL 0x20000000 /* first/tail */ |
89 | #define IN6PTON_UNKNOWN 0x40000000 | 89 | #define IN6PTON_UNKNOWN 0x40000000 |
90 | 90 | ||
91 | static inline int digit2bin(char c, char delim) | 91 | static inline int digit2bin(char c, int delim) |
92 | { | 92 | { |
93 | if (c == delim || c == '\0') | 93 | if (c == delim || c == '\0') |
94 | return IN6PTON_DELIM; | 94 | return IN6PTON_DELIM; |
@@ -99,7 +99,7 @@ static inline int digit2bin(char c, char delim) | |||
99 | return IN6PTON_UNKNOWN; | 99 | return IN6PTON_UNKNOWN; |
100 | } | 100 | } |
101 | 101 | ||
102 | static inline int xdigit2bin(char c, char delim) | 102 | static inline int xdigit2bin(char c, int delim) |
103 | { | 103 | { |
104 | if (c == delim || c == '\0') | 104 | if (c == delim || c == '\0') |
105 | return IN6PTON_DELIM; | 105 | return IN6PTON_DELIM; |
@@ -113,12 +113,14 @@ static inline int xdigit2bin(char c, char delim) | |||
113 | return (IN6PTON_XDIGIT | (c - 'a' + 10)); | 113 | return (IN6PTON_XDIGIT | (c - 'a' + 10)); |
114 | if (c >= 'A' && c <= 'F') | 114 | if (c >= 'A' && c <= 'F') |
115 | return (IN6PTON_XDIGIT | (c - 'A' + 10)); | 115 | return (IN6PTON_XDIGIT | (c - 'A' + 10)); |
116 | if (delim == -1) | ||
117 | return IN6PTON_DELIM; | ||
116 | return IN6PTON_UNKNOWN; | 118 | return IN6PTON_UNKNOWN; |
117 | } | 119 | } |
118 | 120 | ||
119 | int in4_pton(const char *src, int srclen, | 121 | int in4_pton(const char *src, int srclen, |
120 | u8 *dst, | 122 | u8 *dst, |
121 | char delim, const char **end) | 123 | int delim, const char **end) |
122 | { | 124 | { |
123 | const char *s; | 125 | const char *s; |
124 | u8 *d; | 126 | u8 *d; |
@@ -173,7 +175,7 @@ EXPORT_SYMBOL(in4_pton); | |||
173 | 175 | ||
174 | int in6_pton(const char *src, int srclen, | 176 | int in6_pton(const char *src, int srclen, |
175 | u8 *dst, | 177 | u8 *dst, |
176 | char delim, const char **end) | 178 | int delim, const char **end) |
177 | { | 179 | { |
178 | const char *s, *tok = NULL; | 180 | const char *s, *tok = NULL; |
179 | u8 *d, *dc = NULL; | 181 | u8 *d, *dc = NULL; |