aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-25 18:50:32 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-25 18:50:32 -0400
commit06dbbfef8296d6dc23e5d8030a0e8e7b20df3b7c (patch)
tree9f93c1a56082d80e5741bb2a231e93314efbcd7e
parent22fa8d59be28affbecc6ae87abf528aebeebff24 (diff)
parent03cf786c4e83dba404ad23ca58f49147ae52dffd (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: [IPV4]: Explicitly call fib_get_table() in fib_frontend.c [NET]: Use BUILD_BUG_ON in net/core/flowi.c [NET]: Remove in-code externs for some functions from net/core/dev.c [NET]: Don't declare extern variables in net/core/sysctl_net_core.c [TCP]: Remove unneeded implicit type cast when calling tcp_minshall_update() [NET]: Treat the sign of the result of skb_headroom() consistently [9P]: Fix missing unlock before return in p9_mux_poll_start [PKT_SCHED]: Fix sch_prio.c build with CONFIG_NETDEVICES_MULTIQUEUE [IPV4] ip_gre: sendto/recvfrom NBMA address [SCTP]: Consolidate sctp_ulpq_renege_xxx functions [NETLINK]: Fix ACK processing after netlink_dump_start [VLAN]: MAINTAINERS update [DCCP]: Implement SIOCINQ/FIONREAD [NET]: Validate device addr prior to interface-up
-rw-r--r--MAINTAINERS6
-rw-r--r--drivers/net/rrunner.c8
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/skbuff.h4
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/net/xfrm.h2
-rw-r--r--net/9p/mux.c4
-rw-r--r--net/core/dev.c20
-rw-r--r--net/core/flow.c5
-rw-r--r--net/core/net-sysfs.c2
-rw-r--r--net/core/net-sysfs.h8
-rw-r--r--net/core/sysctl_net_core.c17
-rw-r--r--net/dccp/proto.c33
-rw-r--r--net/ethernet/eth.c9
-rw-r--r--net/ipv4/fib_frontend.c12
-rw-r--r--net/ipv4/ip_gre.c14
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_xmit.c2
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/netlink/af_netlink.c16
-rw-r--r--net/sched/sch_prio.c4
-rw-r--r--net/sctp/ulpqueue.c34
26 files changed, 133 insertions, 83 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 5862b7860094..f985dfa5941c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4148,6 +4148,12 @@ W: http://linuxtv.org
4148T: git kernel.org:/pub/scm/linux/kernel/git/mchehab/v4l-dvb.git 4148T: git kernel.org:/pub/scm/linux/kernel/git/mchehab/v4l-dvb.git
4149S: Maintained 4149S: Maintained
4150 4150
4151VLAN (802.1Q)
4152P: Patrick McHardy
4153M: kaber@trash.net
4154L: netdev@vger.kernel.org
4155S: Maintained
4156
4151VT1211 HARDWARE MONITOR DRIVER 4157VT1211 HARDWARE MONITOR DRIVER
4152P: Juerg Haefliger 4158P: Juerg Haefliger
4153M: juergh@gmail.com 4159M: juergh@gmail.com
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index 19152f54ef2b..b822859c8de3 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -79,12 +79,10 @@ static char version[] __devinitdata = "rrunner.c: v0.50 11/11/2002 Jes Sorensen
79 */ 79 */
80 80
81/* 81/*
82 * These are checked at init time to see if they are at least 256KB 82 * sysctl_[wr]mem_max are checked at init time to see if they are at
83 * and increased to 256KB if they are not. This is done to avoid ending 83 * least 256KB and increased to 256KB if they are not. This is done to
84 * up with socket buffers smaller than the MTU size, 84 * avoid ending up with socket buffers smaller than the MTU size,
85 */ 85 */
86extern __u32 sysctl_wmem_max;
87extern __u32 sysctl_rmem_max;
88 86
89static int __devinit rr_init_one(struct pci_dev *pdev, 87static int __devinit rr_init_one(struct pci_dev *pdev,
90 const struct pci_device_id *ent) 88 const struct pci_device_id *ent)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c4de536cefa3..811024e311bd 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -669,6 +669,8 @@ struct net_device
669#define HAVE_SET_MAC_ADDR 669#define HAVE_SET_MAC_ADDR
670 int (*set_mac_address)(struct net_device *dev, 670 int (*set_mac_address)(struct net_device *dev,
671 void *addr); 671 void *addr);
672#define HAVE_VALIDATE_ADDR
673 int (*validate_addr)(struct net_device *dev);
672#define HAVE_PRIVATE_IOCTL 674#define HAVE_PRIVATE_IOCTL
673 int (*do_ioctl)(struct net_device *dev, 675 int (*do_ioctl)(struct net_device *dev,
674 struct ifreq *ifr, int cmd); 676 struct ifreq *ifr, int cmd);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index fd4e12f24270..94e49915a8c0 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -994,7 +994,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
994 * 994 *
995 * Return the number of bytes of free space at the head of an &sk_buff. 995 * Return the number of bytes of free space at the head of an &sk_buff.
996 */ 996 */
997static inline int skb_headroom(const struct sk_buff *skb) 997static inline unsigned int skb_headroom(const struct sk_buff *skb)
998{ 998{
999 return skb->data - skb->head; 999 return skb->data - skb->head;
1000} 1000}
@@ -1347,7 +1347,7 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1347 * Returns true if modifying the header part of the cloned buffer 1347 * Returns true if modifying the header part of the cloned buffer
1348 * does not requires the data to be copied. 1348 * does not requires the data to be copied.
1349 */ 1349 */
1350static inline int skb_clone_writable(struct sk_buff *skb, int len) 1350static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len)
1351{ 1351{
1352 return !skb_header_cloned(skb) && 1352 return !skb_header_cloned(skb) &&
1353 skb_headroom(skb) + len <= skb->hdr_len; 1353 skb_headroom(skb) + len <= skb->hdr_len;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 92049e681258..d695cea7730d 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -803,7 +803,7 @@ static inline int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
803 return left <= tcp_max_burst(tp); 803 return left <= tcp_max_burst(tp);
804} 804}
805 805
806static inline void tcp_minshall_update(struct tcp_sock *tp, int mss, 806static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
807 const struct sk_buff *skb) 807 const struct sk_buff *skb)
808{ 808{
809 if (skb->len < mss) 809 if (skb->len < mss)
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 688f6f5d3285..58dfa82889aa 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -37,6 +37,8 @@
37extern struct sock *xfrm_nl; 37extern struct sock *xfrm_nl;
38extern u32 sysctl_xfrm_aevent_etime; 38extern u32 sysctl_xfrm_aevent_etime;
39extern u32 sysctl_xfrm_aevent_rseqth; 39extern u32 sysctl_xfrm_aevent_rseqth;
40extern int sysctl_xfrm_larval_drop;
41extern u32 sysctl_xfrm_acq_expires;
40 42
41extern struct mutex xfrm_cfg_mutex; 43extern struct mutex xfrm_cfg_mutex;
42 44
diff --git a/net/9p/mux.c b/net/9p/mux.c
index f14014793bed..c9f0805048e4 100644
--- a/net/9p/mux.c
+++ b/net/9p/mux.c
@@ -222,8 +222,10 @@ static int p9_mux_poll_start(struct p9_conn *m)
222 } 222 }
223 223
224 if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) { 224 if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) {
225 if (vptlast == NULL) 225 if (vptlast == NULL) {
226 mutex_unlock(&p9_mux_task_lock);
226 return -ENOMEM; 227 return -ENOMEM;
228 }
227 229
228 P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i); 230 P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i);
229 list_add(&m->mux_list, &vptlast->mux_list); 231 list_add(&m->mux_list, &vptlast->mux_list);
diff --git a/net/core/dev.c b/net/core/dev.c
index 872658927e47..f1647d7dd14b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -120,6 +120,8 @@
120#include <linux/ctype.h> 120#include <linux/ctype.h>
121#include <linux/if_arp.h> 121#include <linux/if_arp.h>
122 122
123#include "net-sysfs.h"
124
123/* 125/*
124 * The list of packet types we will receive (as opposed to discard) 126 * The list of packet types we will receive (as opposed to discard)
125 * and the routines to invoke. 127 * and the routines to invoke.
@@ -249,10 +251,6 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
249 251
250DEFINE_PER_CPU(struct softnet_data, softnet_data); 252DEFINE_PER_CPU(struct softnet_data, softnet_data);
251 253
252extern int netdev_kobject_init(void);
253extern int netdev_register_kobject(struct net_device *);
254extern void netdev_unregister_kobject(struct net_device *);
255
256#ifdef CONFIG_DEBUG_LOCK_ALLOC 254#ifdef CONFIG_DEBUG_LOCK_ALLOC
257/* 255/*
258 * register_netdevice() inits dev->_xmit_lock and sets lockdep class 256 * register_netdevice() inits dev->_xmit_lock and sets lockdep class
@@ -1007,17 +1005,20 @@ int dev_open(struct net_device *dev)
1007 * Call device private open method 1005 * Call device private open method
1008 */ 1006 */
1009 set_bit(__LINK_STATE_START, &dev->state); 1007 set_bit(__LINK_STATE_START, &dev->state);
1010 if (dev->open) { 1008
1009 if (dev->validate_addr)
1010 ret = dev->validate_addr(dev);
1011
1012 if (!ret && dev->open)
1011 ret = dev->open(dev); 1013 ret = dev->open(dev);
1012 if (ret)
1013 clear_bit(__LINK_STATE_START, &dev->state);
1014 }
1015 1014
1016 /* 1015 /*
1017 * If it went open OK then: 1016 * If it went open OK then:
1018 */ 1017 */
1019 1018
1020 if (!ret) { 1019 if (ret)
1020 clear_bit(__LINK_STATE_START, &dev->state);
1021 else {
1021 /* 1022 /*
1022 * Set the flags. 1023 * Set the flags.
1023 */ 1024 */
@@ -1038,6 +1039,7 @@ int dev_open(struct net_device *dev)
1038 */ 1039 */
1039 call_netdevice_notifiers(NETDEV_UP, dev); 1040 call_netdevice_notifiers(NETDEV_UP, dev);
1040 } 1041 }
1042
1041 return ret; 1043 return ret;
1042} 1044}
1043 1045
diff --git a/net/core/flow.c b/net/core/flow.c
index 0ab5234b17d8..3ed2b4b1d6d4 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -142,8 +142,6 @@ typedef u64 flow_compare_t;
142typedef u32 flow_compare_t; 142typedef u32 flow_compare_t;
143#endif 143#endif
144 144
145extern void flowi_is_missized(void);
146
147/* I hear what you're saying, use memcmp. But memcmp cannot make 145/* I hear what you're saying, use memcmp. But memcmp cannot make
148 * important assumptions that we can here, such as alignment and 146 * important assumptions that we can here, such as alignment and
149 * constant size. 147 * constant size.
@@ -153,8 +151,7 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2)
153 flow_compare_t *k1, *k1_lim, *k2; 151 flow_compare_t *k1, *k1_lim, *k2;
154 const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t); 152 const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
155 153
156 if (sizeof(struct flowi) % sizeof(flow_compare_t)) 154 BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
157 flowi_is_missized();
158 155
159 k1 = (flow_compare_t *) key1; 156 k1 = (flow_compare_t *) key1;
160 k1_lim = k1 + n_elem; 157 k1_lim = k1 + n_elem;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 6628e457ddc0..61ead1d11132 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -18,6 +18,8 @@
18#include <linux/wireless.h> 18#include <linux/wireless.h>
19#include <net/iw_handler.h> 19#include <net/iw_handler.h>
20 20
21#include "net-sysfs.h"
22
21#ifdef CONFIG_SYSFS 23#ifdef CONFIG_SYSFS
22static const char fmt_hex[] = "%#x\n"; 24static const char fmt_hex[] = "%#x\n";
23static const char fmt_long_hex[] = "%#lx\n"; 25static const char fmt_long_hex[] = "%#lx\n";
diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h
new file mode 100644
index 000000000000..f5f108db3924
--- /dev/null
+++ b/net/core/net-sysfs.h
@@ -0,0 +1,8 @@
1#ifndef __NET_SYSFS_H__
2#define __NET_SYSFS_H__
3
4int netdev_kobject_init(void);
5int netdev_register_kobject(struct net_device *);
6void netdev_unregister_kobject(struct net_device *);
7
8#endif
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 6d5ea9762040..113cc728dc31 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -9,25 +9,12 @@
9#include <linux/sysctl.h> 9#include <linux/sysctl.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/socket.h> 11#include <linux/socket.h>
12#include <linux/netdevice.h>
12#include <net/sock.h> 13#include <net/sock.h>
14#include <net/xfrm.h>
13 15
14#ifdef CONFIG_SYSCTL 16#ifdef CONFIG_SYSCTL
15 17
16extern int netdev_max_backlog;
17extern int weight_p;
18
19extern __u32 sysctl_wmem_max;
20extern __u32 sysctl_rmem_max;
21
22extern int sysctl_core_destroy_delay;
23
24#ifdef CONFIG_XFRM
25extern u32 sysctl_xfrm_aevent_etime;
26extern u32 sysctl_xfrm_aevent_rseqth;
27extern int sysctl_xfrm_larval_drop;
28extern u32 sysctl_xfrm_acq_expires;
29#endif
30
31ctl_table core_table[] = { 18ctl_table core_table[] = {
32#ifdef CONFIG_NET 19#ifdef CONFIG_NET
33 { 20 {
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index cc9bf1cb2646..d84973928033 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -26,6 +26,7 @@
26#include <net/sock.h> 26#include <net/sock.h>
27#include <net/xfrm.h> 27#include <net/xfrm.h>
28 28
29#include <asm/ioctls.h>
29#include <asm/semaphore.h> 30#include <asm/semaphore.h>
30#include <linux/spinlock.h> 31#include <linux/spinlock.h>
31#include <linux/timer.h> 32#include <linux/timer.h>
@@ -378,8 +379,36 @@ EXPORT_SYMBOL_GPL(dccp_poll);
378 379
379int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) 380int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
380{ 381{
381 dccp_pr_debug("entry\n"); 382 int rc = -ENOTCONN;
382 return -ENOIOCTLCMD; 383
384 lock_sock(sk);
385
386 if (sk->sk_state == DCCP_LISTEN)
387 goto out;
388
389 switch (cmd) {
390 case SIOCINQ: {
391 struct sk_buff *skb;
392 unsigned long amount = 0;
393
394 skb = skb_peek(&sk->sk_receive_queue);
395 if (skb != NULL) {
396 /*
397 * We will only return the amount of this packet since
398 * that is all that will be read.
399 */
400 amount = skb->len;
401 }
402 rc = put_user(amount, (int __user *)arg);
403 }
404 break;
405 default:
406 rc = -ENOIOCTLCMD;
407 break;
408 }
409out:
410 release_sock(sk);
411 return rc;
383} 412}
384 413
385EXPORT_SYMBOL_GPL(dccp_ioctl); 414EXPORT_SYMBOL_GPL(dccp_ioctl);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index ed8a3d49487d..6b2e454ae313 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -298,6 +298,14 @@ static int eth_change_mtu(struct net_device *dev, int new_mtu)
298 return 0; 298 return 0;
299} 299}
300 300
301static int eth_validate_addr(struct net_device *dev)
302{
303 if (!is_valid_ether_addr(dev->dev_addr))
304 return -EINVAL;
305
306 return 0;
307}
308
301const struct header_ops eth_header_ops ____cacheline_aligned = { 309const struct header_ops eth_header_ops ____cacheline_aligned = {
302 .create = eth_header, 310 .create = eth_header,
303 .parse = eth_header_parse, 311 .parse = eth_header_parse,
@@ -317,6 +325,7 @@ void ether_setup(struct net_device *dev)
317 325
318 dev->change_mtu = eth_change_mtu; 326 dev->change_mtu = eth_change_mtu;
319 dev->set_mac_address = eth_mac_addr; 327 dev->set_mac_address = eth_mac_addr;
328 dev->validate_addr = eth_validate_addr;
320 329
321 dev->type = ARPHRD_ETHER; 330 dev->type = ARPHRD_ETHER;
322 dev->hard_header_len = ETH_HLEN; 331 dev->hard_header_len = ETH_HLEN;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 78b514ba1414..60123905dbbf 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -128,13 +128,14 @@ struct net_device * ip_dev_find(__be32 addr)
128 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } }; 128 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } };
129 struct fib_result res; 129 struct fib_result res;
130 struct net_device *dev = NULL; 130 struct net_device *dev = NULL;
131 struct fib_table *local_table;
131 132
132#ifdef CONFIG_IP_MULTIPLE_TABLES 133#ifdef CONFIG_IP_MULTIPLE_TABLES
133 res.r = NULL; 134 res.r = NULL;
134#endif 135#endif
135 136
136 if (!ip_fib_local_table || 137 local_table = fib_get_table(RT_TABLE_LOCAL);
137 ip_fib_local_table->tb_lookup(ip_fib_local_table, &fl, &res)) 138 if (!local_table || local_table->tb_lookup(local_table, &fl, &res))
138 return NULL; 139 return NULL;
139 if (res.type != RTN_LOCAL) 140 if (res.type != RTN_LOCAL)
140 goto out; 141 goto out;
@@ -152,6 +153,7 @@ unsigned inet_addr_type(__be32 addr)
152 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } }; 153 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } };
153 struct fib_result res; 154 struct fib_result res;
154 unsigned ret = RTN_BROADCAST; 155 unsigned ret = RTN_BROADCAST;
156 struct fib_table *local_table;
155 157
156 if (ZERONET(addr) || BADCLASS(addr)) 158 if (ZERONET(addr) || BADCLASS(addr))
157 return RTN_BROADCAST; 159 return RTN_BROADCAST;
@@ -162,10 +164,10 @@ unsigned inet_addr_type(__be32 addr)
162 res.r = NULL; 164 res.r = NULL;
163#endif 165#endif
164 166
165 if (ip_fib_local_table) { 167 local_table = fib_get_table(RT_TABLE_LOCAL);
168 if (local_table) {
166 ret = RTN_UNICAST; 169 ret = RTN_UNICAST;
167 if (!ip_fib_local_table->tb_lookup(ip_fib_local_table, 170 if (!local_table->tb_lookup(local_table, &fl, &res)) {
168 &fl, &res)) {
169 ret = res.type; 171 ret = res.type;
170 fib_res_put(&res); 172 fib_res_put(&res);
171 } 173 }
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f151900efaf9..02b02a8d681c 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -674,7 +674,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
674 struct rtable *rt; /* Route to the other host */ 674 struct rtable *rt; /* Route to the other host */
675 struct net_device *tdev; /* Device to other host */ 675 struct net_device *tdev; /* Device to other host */
676 struct iphdr *iph; /* Our new IP header */ 676 struct iphdr *iph; /* Our new IP header */
677 int max_headroom; /* The extra header space needed */ 677 unsigned int max_headroom; /* The extra header space needed */
678 int gre_hlen; 678 int gre_hlen;
679 __be32 dst; 679 __be32 dst;
680 int mtu; 680 int mtu;
@@ -1033,7 +1033,6 @@ static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1033 return 0; 1033 return 0;
1034} 1034}
1035 1035
1036#ifdef CONFIG_NET_IPGRE_BROADCAST
1037/* Nice toy. Unfortunately, useless in real life :-) 1036/* Nice toy. Unfortunately, useless in real life :-)
1038 It allows to construct virtual multiprotocol broadcast "LAN" 1037 It allows to construct virtual multiprotocol broadcast "LAN"
1039 over the Internet, provided multicast routing is tuned. 1038 over the Internet, provided multicast routing is tuned.
@@ -1092,10 +1091,19 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1092 return -t->hlen; 1091 return -t->hlen;
1093} 1092}
1094 1093
1094static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1095{
1096 struct iphdr *iph = (struct iphdr*) skb_mac_header(skb);
1097 memcpy(haddr, &iph->saddr, 4);
1098 return 4;
1099}
1100
1095static const struct header_ops ipgre_header_ops = { 1101static const struct header_ops ipgre_header_ops = {
1096 .create = ipgre_header, 1102 .create = ipgre_header,
1103 .parse = ipgre_header_parse,
1097}; 1104};
1098 1105
1106#ifdef CONFIG_NET_IPGRE_BROADCAST
1099static int ipgre_open(struct net_device *dev) 1107static int ipgre_open(struct net_device *dev)
1100{ 1108{
1101 struct ip_tunnel *t = netdev_priv(dev); 1109 struct ip_tunnel *t = netdev_priv(dev);
@@ -1197,6 +1205,8 @@ static int ipgre_tunnel_init(struct net_device *dev)
1197 dev->stop = ipgre_close; 1205 dev->stop = ipgre_close;
1198 } 1206 }
1199#endif 1207#endif
1208 } else {
1209 dev->header_ops = &ipgre_header_ops;
1200 } 1210 }
1201 1211
1202 if (!tdev && tunnel->parms.link) 1212 if (!tdev && tunnel->parms.link)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index f508835ba713..e5f7dc2de303 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -161,7 +161,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
161 struct dst_entry *dst = skb->dst; 161 struct dst_entry *dst = skb->dst;
162 struct rtable *rt = (struct rtable *)dst; 162 struct rtable *rt = (struct rtable *)dst;
163 struct net_device *dev = dst->dev; 163 struct net_device *dev = dst->dev;
164 int hh_len = LL_RESERVED_SPACE(dev); 164 unsigned int hh_len = LL_RESERVED_SPACE(dev);
165 165
166 if (rt->rt_type == RTN_MULTICAST) 166 if (rt->rt_type == RTN_MULTICAST)
167 IP_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS); 167 IP_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 5cd5bbe1379a..8c2b2b0741da 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -515,7 +515,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
515 struct net_device *tdev; /* Device to other host */ 515 struct net_device *tdev; /* Device to other host */
516 struct iphdr *old_iph = ip_hdr(skb); 516 struct iphdr *old_iph = ip_hdr(skb);
517 struct iphdr *iph; /* Our new IP header */ 517 struct iphdr *iph; /* Our new IP header */
518 int max_headroom; /* The extra header space needed */ 518 unsigned int max_headroom; /* The extra header space needed */
519 __be32 dst = tiph->daddr; 519 __be32 dst = tiph->daddr;
520 int mtu; 520 int mtu;
521 521
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c
index d0a92dec1050..7c074e386c17 100644
--- a/net/ipv4/ipvs/ip_vs_xmit.c
+++ b/net/ipv4/ipvs/ip_vs_xmit.c
@@ -325,7 +325,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
325 __be16 df = old_iph->frag_off; 325 __be16 df = old_iph->frag_off;
326 sk_buff_data_t old_transport_header = skb->transport_header; 326 sk_buff_data_t old_transport_header = skb->transport_header;
327 struct iphdr *iph; /* Our new IP header */ 327 struct iphdr *iph; /* Our new IP header */
328 int max_headroom; /* The extra header space needed */ 328 unsigned int max_headroom; /* The extra header space needed */
329 int mtu; 329 int mtu;
330 330
331 EnterFunction(10); 331 EnterFunction(10);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9288220b73a8..3dbbb44b3e7d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3909,7 +3909,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
3909 3909
3910 while (before(start, end)) { 3910 while (before(start, end)) {
3911 struct sk_buff *nskb; 3911 struct sk_buff *nskb;
3912 int header = skb_headroom(skb); 3912 unsigned int header = skb_headroom(skb);
3913 int copy = SKB_MAX_ORDER(header, 0); 3913 int copy = SKB_MAX_ORDER(header, 0);
3914 3914
3915 /* Too big header? This can happen with IPv6. */ 3915 /* Too big header? This can happen with IPv6. */
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 13565dfb1b45..653fc0a8235b 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -171,7 +171,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
171 u32 mtu; 171 u32 mtu;
172 172
173 if (opt) { 173 if (opt) {
174 int head_room; 174 unsigned int head_room;
175 175
176 /* First: exthdrs may take lots of space (~8K for now) 176 /* First: exthdrs may take lots of space (~8K for now)
177 MAX_HEADER is not enough. 177 MAX_HEADER is not enough.
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 2320cc27ff9e..5383b33db8ca 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -838,7 +838,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
838 struct dst_entry *dst; 838 struct dst_entry *dst;
839 struct net_device *tdev; 839 struct net_device *tdev;
840 int mtu; 840 int mtu;
841 int max_headroom = sizeof(struct ipv6hdr); 841 unsigned int max_headroom = sizeof(struct ipv6hdr);
842 u8 proto; 842 u8 proto;
843 int err = -1; 843 int err = -1;
844 int pkt_len; 844 int pkt_len;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 466657a9a8bd..71433d29d884 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -430,7 +430,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
430 struct rtable *rt; /* Route to the other host */ 430 struct rtable *rt; /* Route to the other host */
431 struct net_device *tdev; /* Device to other host */ 431 struct net_device *tdev; /* Device to other host */
432 struct iphdr *iph; /* Our new IP header */ 432 struct iphdr *iph; /* Our new IP header */
433 int max_headroom; /* The extra header space needed */ 433 unsigned int max_headroom; /* The extra header space needed */
434 __be32 dst = tiph->daddr; 434 __be32 dst = tiph->daddr;
435 int mtu; 435 int mtu;
436 struct in6_addr *addr6; 436 struct in6_addr *addr6;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 98e313e5e594..325272925d0f 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1565,7 +1565,11 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1565 1565
1566 netlink_dump(sk); 1566 netlink_dump(sk);
1567 sock_put(sk); 1567 sock_put(sk);
1568 return 0; 1568
1569 /* We successfully started a dump, by returning -EINTR we
1570 * signal not to send ACK even if it was requested.
1571 */
1572 return -EINTR;
1569} 1573}
1570 1574
1571void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) 1575void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
@@ -1619,17 +1623,21 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1619 1623
1620 /* Only requests are handled by the kernel */ 1624 /* Only requests are handled by the kernel */
1621 if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) 1625 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
1622 goto skip; 1626 goto ack;
1623 1627
1624 /* Skip control messages */ 1628 /* Skip control messages */
1625 if (nlh->nlmsg_type < NLMSG_MIN_TYPE) 1629 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
1626 goto skip; 1630 goto ack;
1627 1631
1628 err = cb(skb, nlh); 1632 err = cb(skb, nlh);
1629skip: 1633 if (err == -EINTR)
1634 goto skip;
1635
1636ack:
1630 if (nlh->nlmsg_flags & NLM_F_ACK || err) 1637 if (nlh->nlmsg_flags & NLM_F_ACK || err)
1631 netlink_ack(skb, nlh, err); 1638 netlink_ack(skb, nlh, err);
1632 1639
1640skip:
1633 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 1641 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
1634 if (msglen > skb->len) 1642 if (msglen > skb->len)
1635 msglen = skb->len; 1643 msglen = skb->len;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index abd82fc3ec60..de894096e442 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -136,7 +136,7 @@ prio_dequeue(struct Qdisc* sch)
136 * pulling an skb. This way we avoid excessive requeues 136 * pulling an skb. This way we avoid excessive requeues
137 * for slower queues. 137 * for slower queues.
138 */ 138 */
139 if (!netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) { 139 if (!__netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) {
140 qdisc = q->queues[prio]; 140 qdisc = q->queues[prio];
141 skb = qdisc->dequeue(qdisc); 141 skb = qdisc->dequeue(qdisc);
142 if (skb) { 142 if (skb) {
@@ -165,7 +165,7 @@ static struct sk_buff *rr_dequeue(struct Qdisc* sch)
165 * for slower queues. If the queue is stopped, try the 165 * for slower queues. If the queue is stopped, try the
166 * next queue. 166 * next queue.
167 */ 167 */
168 if (!netif_subqueue_stopped(sch->dev, 168 if (!__netif_subqueue_stopped(sch->dev,
169 (q->mq ? q->curband : 0))) { 169 (q->mq ? q->curband : 0))) {
170 qdisc = q->queues[q->curband]; 170 qdisc = q->queues[q->curband];
171 skb = qdisc->dequeue(qdisc); 171 skb = qdisc->dequeue(qdisc);
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index b9370956b187..4be92d0a2cab 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -908,8 +908,8 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
908 return; 908 return;
909} 909}
910 910
911/* Renege 'needed' bytes from the ordering queue. */ 911static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
912static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) 912 struct sk_buff_head *list, __u16 needed)
913{ 913{
914 __u16 freed = 0; 914 __u16 freed = 0;
915 __u32 tsn; 915 __u32 tsn;
@@ -919,7 +919,7 @@ static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
919 919
920 tsnmap = &ulpq->asoc->peer.tsn_map; 920 tsnmap = &ulpq->asoc->peer.tsn_map;
921 921
922 while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) { 922 while ((skb = __skb_dequeue_tail(list)) != NULL) {
923 freed += skb_headlen(skb); 923 freed += skb_headlen(skb);
924 event = sctp_skb2event(skb); 924 event = sctp_skb2event(skb);
925 tsn = event->tsn; 925 tsn = event->tsn;
@@ -933,30 +933,16 @@ static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
933 return freed; 933 return freed;
934} 934}
935 935
936/* Renege 'needed' bytes from the ordering queue. */
937static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
938{
939 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
940}
941
936/* Renege 'needed' bytes from the reassembly queue. */ 942/* Renege 'needed' bytes from the reassembly queue. */
937static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) 943static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
938{ 944{
939 __u16 freed = 0; 945 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
940 __u32 tsn;
941 struct sk_buff *skb;
942 struct sctp_ulpevent *event;
943 struct sctp_tsnmap *tsnmap;
944
945 tsnmap = &ulpq->asoc->peer.tsn_map;
946
947 /* Walk backwards through the list, reneges the newest tsns. */
948 while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) {
949 freed += skb_headlen(skb);
950 event = sctp_skb2event(skb);
951 tsn = event->tsn;
952
953 sctp_ulpevent_free(event);
954 sctp_tsnmap_renege(tsnmap, tsn);
955 if (freed >= needed)
956 return freed;
957 }
958
959 return freed;
960} 946}
961 947
962/* Partial deliver the first message as there is pressure on rwnd. */ 948/* Partial deliver the first message as there is pressure on rwnd. */