aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/tr.c2
-rw-r--r--net/8021q/vlan_core.c10
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/9p/protocol.c22
-rw-r--r--net/bridge/br_forward.c7
-rw-r--r--net/core/dev.c78
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/core/net-sysfs.c4
-rw-r--r--net/core/net_namespace.c89
-rw-r--r--net/core/skbuff.c23
-rw-r--r--net/core/sock.c3
-rw-r--r--net/ipv4/cipso_ipv4.c9
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/ip_fragment.c3
-rw-r--r--net/ipv4/ipconfig.c6
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_output.c13
-rw-r--r--net/ipv4/tcp_scalable.c2
-rw-r--r--net/ipv4/udp.c68
-rw-r--r--net/ipv6/addrconf.c55
-rw-r--r--net/ipv6/af_inet6.c24
-rw-r--r--net/ipv6/icmp.c4
-rw-r--r--net/ipv6/inet6_hashtables.c4
-rw-r--r--net/ipv6/ip6_flowlabel.c8
-rw-r--r--net/ipv6/ip6_input.c2
-rw-r--r--net/ipv6/ip6_output.c67
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ip6mr.c24
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c30
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c8
-rw-r--r--net/ipv6/reassembly.c7
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/mac80211/tx.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c16
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netfilter/nfnetlink_log.c8
-rw-r--r--net/netfilter/x_tables.c199
-rw-r--r--net/netfilter/xt_recent.c2
-rw-r--r--net/netfilter/xt_sctp.c2
-rw-r--r--net/netlink/af_netlink.c10
-rw-r--r--net/packet/af_packet.c17
-rw-r--r--net/phonet/pep-gprs.c1
-rw-r--r--net/phonet/pep.c2
-rw-r--r--net/rxrpc/af_rxrpc.c5
-rw-r--r--net/sched/act_police.c13
-rw-r--r--net/sched/sch_drr.c6
-rw-r--r--net/sctp/endpointola.c3
-rw-r--r--net/sctp/protocol.c16
-rw-r--r--net/sctp/sm_sideeffect.c54
-rw-r--r--net/sctp/sm_statefuns.c16
-rw-r--r--net/sunrpc/Kconfig2
-rw-r--r--net/sunrpc/sched.c33
-rw-r--r--net/sunrpc/xprt.c2
-rw-r--r--net/sunrpc/xprtsock.c23
-rw-r--r--net/wimax/debugfs.c11
-rw-r--r--net/wimax/id-table.c9
-rw-r--r--net/wimax/stack.c13
-rw-r--r--net/wireless/Kconfig10
-rw-r--r--net/wireless/lib80211_crypt_ccmp.c2
-rw-r--r--net/wireless/lib80211_crypt_tkip.c4
-rw-r--r--net/wireless/nl80211.c11
-rw-r--r--net/wireless/reg.c35
-rw-r--r--net/xfrm/xfrm_state.c90
67 files changed, 800 insertions, 436 deletions
diff --git a/net/802/tr.c b/net/802/tr.c
index 158150fee462..f47ae289d83b 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -668,3 +668,5 @@ module_init(rif_init);
668 668
669EXPORT_SYMBOL(tr_type_trans); 669EXPORT_SYMBOL(tr_type_trans);
670EXPORT_SYMBOL(alloc_trdev); 670EXPORT_SYMBOL(alloc_trdev);
671
672MODULE_LICENSE("GPL");
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index e9db889d6222..2886d2fb9ab5 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -1,12 +1,16 @@
1#include <linux/skbuff.h> 1#include <linux/skbuff.h>
2#include <linux/netdevice.h> 2#include <linux/netdevice.h>
3#include <linux/if_vlan.h> 3#include <linux/if_vlan.h>
4#include <linux/netpoll.h>
4#include "vlan.h" 5#include "vlan.h"
5 6
6/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ 7/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
7int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 8int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
8 u16 vlan_tci, int polling) 9 u16 vlan_tci, int polling)
9{ 10{
11 if (netpoll_rx(skb))
12 return NET_RX_DROP;
13
10 if (skb_bond_should_drop(skb)) 14 if (skb_bond_should_drop(skb))
11 goto drop; 15 goto drop;
12 16
@@ -100,6 +104,9 @@ int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
100{ 104{
101 int err = NET_RX_SUCCESS; 105 int err = NET_RX_SUCCESS;
102 106
107 if (netpoll_receive_skb(skb))
108 return NET_RX_DROP;
109
103 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { 110 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
104 case -1: 111 case -1:
105 return netif_receive_skb(skb); 112 return netif_receive_skb(skb);
@@ -126,6 +133,9 @@ int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
126 if (!skb) 133 if (!skb)
127 goto out; 134 goto out;
128 135
136 if (netpoll_receive_skb(skb))
137 goto out;
138
129 err = NET_RX_SUCCESS; 139 err = NET_RX_SUCCESS;
130 140
131 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { 141 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 4a19acd3a32b..1b34135cf990 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -553,7 +553,7 @@ static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
553 int err = 0; 553 int err = 0;
554 554
555 if (netif_device_present(real_dev) && ops->ndo_neigh_setup) 555 if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
556 err = ops->ndo_neigh_setup(dev, pa); 556 err = ops->ndo_neigh_setup(real_dev, pa);
557 557
558 return err; 558 return err;
559} 559}
@@ -639,6 +639,7 @@ static int vlan_dev_init(struct net_device *dev)
639 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; 639 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
640 dev->netdev_ops = &vlan_netdev_ops; 640 dev->netdev_ops = &vlan_netdev_ops;
641 } 641 }
642 netdev_resync_ops(dev);
642 643
643 if (is_vlan_dev(real_dev)) 644 if (is_vlan_dev(real_dev))
644 subclass = 1; 645 subclass = 1;
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index dcd7666824ba..fc70147c771e 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -29,6 +29,7 @@
29#include <linux/errno.h> 29#include <linux/errno.h>
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <linux/types.h>
32#include <net/9p/9p.h> 33#include <net/9p/9p.h>
33#include <net/9p/client.h> 34#include <net/9p/client.h>
34#include "protocol.h" 35#include "protocol.h"
@@ -160,29 +161,32 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
160 break; 161 break;
161 case 'w':{ 162 case 'w':{
162 int16_t *val = va_arg(ap, int16_t *); 163 int16_t *val = va_arg(ap, int16_t *);
163 if (pdu_read(pdu, val, sizeof(*val))) { 164 __le16 le_val;
165 if (pdu_read(pdu, &le_val, sizeof(le_val))) {
164 errcode = -EFAULT; 166 errcode = -EFAULT;
165 break; 167 break;
166 } 168 }
167 *val = cpu_to_le16(*val); 169 *val = le16_to_cpu(le_val);
168 } 170 }
169 break; 171 break;
170 case 'd':{ 172 case 'd':{
171 int32_t *val = va_arg(ap, int32_t *); 173 int32_t *val = va_arg(ap, int32_t *);
172 if (pdu_read(pdu, val, sizeof(*val))) { 174 __le32 le_val;
175 if (pdu_read(pdu, &le_val, sizeof(le_val))) {
173 errcode = -EFAULT; 176 errcode = -EFAULT;
174 break; 177 break;
175 } 178 }
176 *val = cpu_to_le32(*val); 179 *val = le32_to_cpu(le_val);
177 } 180 }
178 break; 181 break;
179 case 'q':{ 182 case 'q':{
180 int64_t *val = va_arg(ap, int64_t *); 183 int64_t *val = va_arg(ap, int64_t *);
181 if (pdu_read(pdu, val, sizeof(*val))) { 184 __le64 le_val;
185 if (pdu_read(pdu, &le_val, sizeof(le_val))) {
182 errcode = -EFAULT; 186 errcode = -EFAULT;
183 break; 187 break;
184 } 188 }
185 *val = cpu_to_le64(*val); 189 *val = le64_to_cpu(le_val);
186 } 190 }
187 break; 191 break;
188 case 's':{ 192 case 's':{
@@ -362,19 +366,19 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
362 } 366 }
363 break; 367 break;
364 case 'w':{ 368 case 'w':{
365 int16_t val = va_arg(ap, int); 369 __le16 val = cpu_to_le16(va_arg(ap, int));
366 if (pdu_write(pdu, &val, sizeof(val))) 370 if (pdu_write(pdu, &val, sizeof(val)))
367 errcode = -EFAULT; 371 errcode = -EFAULT;
368 } 372 }
369 break; 373 break;
370 case 'd':{ 374 case 'd':{
371 int32_t val = va_arg(ap, int32_t); 375 __le32 val = cpu_to_le32(va_arg(ap, int32_t));
372 if (pdu_write(pdu, &val, sizeof(val))) 376 if (pdu_write(pdu, &val, sizeof(val)))
373 errcode = -EFAULT; 377 errcode = -EFAULT;
374 } 378 }
375 break; 379 break;
376 case 'q':{ 380 case 'q':{
377 int64_t val = va_arg(ap, int64_t); 381 __le64 val = cpu_to_le64(va_arg(ap, int64_t));
378 if (pdu_write(pdu, &val, sizeof(val))) 382 if (pdu_write(pdu, &val, sizeof(val)))
379 errcode = -EFAULT; 383 errcode = -EFAULT;
380 } 384 }
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index bdd9ccea17ce..d2c27c808d3b 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -67,6 +67,11 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
67{ 67{
68 struct net_device *indev; 68 struct net_device *indev;
69 69
70 if (skb_warn_if_lro(skb)) {
71 kfree_skb(skb);
72 return;
73 }
74
70 indev = skb->dev; 75 indev = skb->dev;
71 skb->dev = to->dev; 76 skb->dev = to->dev;
72 skb_forward_csum(skb); 77 skb_forward_csum(skb);
@@ -89,7 +94,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
89/* called with rcu_read_lock */ 94/* called with rcu_read_lock */
90void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) 95void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
91{ 96{
92 if (!skb_warn_if_lro(skb) && should_deliver(to, skb)) { 97 if (should_deliver(to, skb)) {
93 __br_forward(to, skb); 98 __br_forward(to, skb);
94 return; 99 return;
95 } 100 }
diff --git a/net/core/dev.c b/net/core/dev.c
index 5379b0c1190a..e3fe5c705606 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1090,7 +1090,7 @@ int dev_open(struct net_device *dev)
1090 /* 1090 /*
1091 * Enable NET_DMA 1091 * Enable NET_DMA
1092 */ 1092 */
1093 dmaengine_get(); 1093 net_dmaengine_get();
1094 1094
1095 /* 1095 /*
1096 * Initialize multicasting status 1096 * Initialize multicasting status
@@ -1172,7 +1172,7 @@ int dev_close(struct net_device *dev)
1172 /* 1172 /*
1173 * Shutdown NET_DMA 1173 * Shutdown NET_DMA
1174 */ 1174 */
1175 dmaengine_put(); 1175 net_dmaengine_put();
1176 1176
1177 return 0; 1177 return 0;
1178} 1178}
@@ -2267,12 +2267,6 @@ int netif_receive_skb(struct sk_buff *skb)
2267 2267
2268 rcu_read_lock(); 2268 rcu_read_lock();
2269 2269
2270 /* Don't receive packets in an exiting network namespace */
2271 if (!net_alive(dev_net(skb->dev))) {
2272 kfree_skb(skb);
2273 goto out;
2274 }
2275
2276#ifdef CONFIG_NET_CLS_ACT 2270#ifdef CONFIG_NET_CLS_ACT
2277 if (skb->tc_verd & TC_NCLS) { 2271 if (skb->tc_verd & TC_NCLS) {
2278 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 2272 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
@@ -2488,6 +2482,9 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2488 2482
2489int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2483int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2490{ 2484{
2485 if (netpoll_receive_skb(skb))
2486 return NET_RX_DROP;
2487
2491 switch (__napi_gro_receive(napi, skb)) { 2488 switch (__napi_gro_receive(napi, skb)) {
2492 case -1: 2489 case -1:
2493 return netif_receive_skb(skb); 2490 return netif_receive_skb(skb);
@@ -2558,6 +2555,9 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2558 if (!skb) 2555 if (!skb)
2559 goto out; 2556 goto out;
2560 2557
2558 if (netpoll_receive_skb(skb))
2559 goto out;
2560
2561 err = NET_RX_SUCCESS; 2561 err = NET_RX_SUCCESS;
2562 2562
2563 switch (__napi_gro_receive(napi, skb)) { 2563 switch (__napi_gro_receive(napi, skb)) {
@@ -2588,9 +2588,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
2588 local_irq_disable(); 2588 local_irq_disable();
2589 skb = __skb_dequeue(&queue->input_pkt_queue); 2589 skb = __skb_dequeue(&queue->input_pkt_queue);
2590 if (!skb) { 2590 if (!skb) {
2591 __napi_complete(napi);
2592 local_irq_enable(); 2591 local_irq_enable();
2593 break; 2592 napi_complete(napi);
2593 goto out;
2594 } 2594 }
2595 local_irq_enable(); 2595 local_irq_enable();
2596 2596
@@ -2599,6 +2599,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
2599 2599
2600 napi_gro_flush(napi); 2600 napi_gro_flush(napi);
2601 2601
2602out:
2602 return work; 2603 return work;
2603} 2604}
2604 2605
@@ -2671,7 +2672,7 @@ void netif_napi_del(struct napi_struct *napi)
2671 struct sk_buff *skb, *next; 2672 struct sk_buff *skb, *next;
2672 2673
2673 list_del_init(&napi->dev_list); 2674 list_del_init(&napi->dev_list);
2674 kfree(napi->skb); 2675 kfree_skb(napi->skb);
2675 2676
2676 for (skb = napi->gro_list; skb; skb = next) { 2677 for (skb = napi->gro_list; skb; skb = next) {
2677 next = skb->next; 2678 next = skb->next;
@@ -4282,6 +4283,39 @@ unsigned long netdev_fix_features(unsigned long features, const char *name)
4282} 4283}
4283EXPORT_SYMBOL(netdev_fix_features); 4284EXPORT_SYMBOL(netdev_fix_features);
4284 4285
4286/* Some devices need to (re-)set their netdev_ops inside
4287 * ->init() or similar. If that happens, we have to setup
4288 * the compat pointers again.
4289 */
4290void netdev_resync_ops(struct net_device *dev)
4291{
4292#ifdef CONFIG_COMPAT_NET_DEV_OPS
4293 const struct net_device_ops *ops = dev->netdev_ops;
4294
4295 dev->init = ops->ndo_init;
4296 dev->uninit = ops->ndo_uninit;
4297 dev->open = ops->ndo_open;
4298 dev->change_rx_flags = ops->ndo_change_rx_flags;
4299 dev->set_rx_mode = ops->ndo_set_rx_mode;
4300 dev->set_multicast_list = ops->ndo_set_multicast_list;
4301 dev->set_mac_address = ops->ndo_set_mac_address;
4302 dev->validate_addr = ops->ndo_validate_addr;
4303 dev->do_ioctl = ops->ndo_do_ioctl;
4304 dev->set_config = ops->ndo_set_config;
4305 dev->change_mtu = ops->ndo_change_mtu;
4306 dev->neigh_setup = ops->ndo_neigh_setup;
4307 dev->tx_timeout = ops->ndo_tx_timeout;
4308 dev->get_stats = ops->ndo_get_stats;
4309 dev->vlan_rx_register = ops->ndo_vlan_rx_register;
4310 dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
4311 dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
4312#ifdef CONFIG_NET_POLL_CONTROLLER
4313 dev->poll_controller = ops->ndo_poll_controller;
4314#endif
4315#endif
4316}
4317EXPORT_SYMBOL(netdev_resync_ops);
4318
4285/** 4319/**
4286 * register_netdevice - register a network device 4320 * register_netdevice - register a network device
4287 * @dev: device to register 4321 * @dev: device to register
@@ -4326,27 +4360,7 @@ int register_netdevice(struct net_device *dev)
4326 * This is temporary until all network devices are converted. 4360 * This is temporary until all network devices are converted.
4327 */ 4361 */
4328 if (dev->netdev_ops) { 4362 if (dev->netdev_ops) {
4329 const struct net_device_ops *ops = dev->netdev_ops; 4363 netdev_resync_ops(dev);
4330
4331 dev->init = ops->ndo_init;
4332 dev->uninit = ops->ndo_uninit;
4333 dev->open = ops->ndo_open;
4334 dev->change_rx_flags = ops->ndo_change_rx_flags;
4335 dev->set_rx_mode = ops->ndo_set_rx_mode;
4336 dev->set_multicast_list = ops->ndo_set_multicast_list;
4337 dev->set_mac_address = ops->ndo_set_mac_address;
4338 dev->validate_addr = ops->ndo_validate_addr;
4339 dev->do_ioctl = ops->ndo_do_ioctl;
4340 dev->set_config = ops->ndo_set_config;
4341 dev->change_mtu = ops->ndo_change_mtu;
4342 dev->tx_timeout = ops->ndo_tx_timeout;
4343 dev->get_stats = ops->ndo_get_stats;
4344 dev->vlan_rx_register = ops->ndo_vlan_rx_register;
4345 dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
4346 dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
4347#ifdef CONFIG_NET_POLL_CONTROLLER
4348 dev->poll_controller = ops->ndo_poll_controller;
4349#endif
4350 } else { 4364 } else {
4351 char drivername[64]; 4365 char drivername[64];
4352 pr_info("%s (%s): not using net_device_ops yet\n", 4366 pr_info("%s (%s): not using net_device_ops yet\n",
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f66c58df8953..278a142d1047 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1994,8 +1994,8 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1994 if (!net_eq(neigh_parms_net(p), net)) 1994 if (!net_eq(neigh_parms_net(p), net))
1995 continue; 1995 continue;
1996 1996
1997 if (nidx++ < neigh_skip) 1997 if (nidx < neigh_skip)
1998 continue; 1998 goto next;
1999 1999
2000 if (neightbl_fill_param_info(skb, tbl, p, 2000 if (neightbl_fill_param_info(skb, tbl, p,
2001 NETLINK_CB(cb->skb).pid, 2001 NETLINK_CB(cb->skb).pid,
@@ -2003,6 +2003,8 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2003 RTM_NEWNEIGHTBL, 2003 RTM_NEWNEIGHTBL,
2004 NLM_F_MULTI) <= 0) 2004 NLM_F_MULTI) <= 0)
2005 goto out; 2005 goto out;
2006 next:
2007 nidx++;
2006 } 2008 }
2007 2009
2008 neigh_skip = 0; 2010 neigh_skip = 0;
@@ -2082,12 +2084,10 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2082 if (h > s_h) 2084 if (h > s_h)
2083 s_idx = 0; 2085 s_idx = 0;
2084 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) { 2086 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
2085 int lidx;
2086 if (dev_net(n->dev) != net) 2087 if (dev_net(n->dev) != net)
2087 continue; 2088 continue;
2088 lidx = idx++; 2089 if (idx < s_idx)
2089 if (lidx < s_idx) 2090 goto next;
2090 continue;
2091 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid, 2091 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2092 cb->nlh->nlmsg_seq, 2092 cb->nlh->nlmsg_seq,
2093 RTM_NEWNEIGH, 2093 RTM_NEWNEIGH,
@@ -2096,6 +2096,8 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2096 rc = -1; 2096 rc = -1;
2097 goto out; 2097 goto out;
2098 } 2098 }
2099 next:
2100 idx++;
2099 } 2101 }
2100 } 2102 }
2101 read_unlock_bh(&tbl->lock); 2103 read_unlock_bh(&tbl->lock);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 6ac29a46e23e..484f58750eba 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -77,7 +77,9 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
77 if (endp == buf) 77 if (endp == buf)
78 goto err; 78 goto err;
79 79
80 rtnl_lock(); 80 if (!rtnl_trylock())
81 return -ERESTARTSYS;
82
81 if (dev_isalive(net)) { 83 if (dev_isalive(net)) {
82 if ((ret = (*set)(net, new)) == 0) 84 if ((ret = (*set)(net, new)) == 0)
83 ret = len; 85 ret = len;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 55151faaf90c..e3bebd36f053 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -32,24 +32,14 @@ static __net_init int setup_net(struct net *net)
32{ 32{
33 /* Must be called with net_mutex held */ 33 /* Must be called with net_mutex held */
34 struct pernet_operations *ops; 34 struct pernet_operations *ops;
35 int error; 35 int error = 0;
36 struct net_generic *ng;
37 36
38 atomic_set(&net->count, 1); 37 atomic_set(&net->count, 1);
38
39#ifdef NETNS_REFCNT_DEBUG 39#ifdef NETNS_REFCNT_DEBUG
40 atomic_set(&net->use_count, 0); 40 atomic_set(&net->use_count, 0);
41#endif 41#endif
42 42
43 error = -ENOMEM;
44 ng = kzalloc(sizeof(struct net_generic) +
45 INITIAL_NET_GEN_PTRS * sizeof(void *), GFP_KERNEL);
46 if (ng == NULL)
47 goto out;
48
49 ng->len = INITIAL_NET_GEN_PTRS;
50 rcu_assign_pointer(net->gen, ng);
51
52 error = 0;
53 list_for_each_entry(ops, &pernet_list, list) { 43 list_for_each_entry(ops, &pernet_list, list) {
54 if (ops->init) { 44 if (ops->init) {
55 error = ops->init(net); 45 error = ops->init(net);
@@ -70,24 +60,50 @@ out_undo:
70 } 60 }
71 61
72 rcu_barrier(); 62 rcu_barrier();
73 kfree(ng);
74 goto out; 63 goto out;
75} 64}
76 65
66static struct net_generic *net_alloc_generic(void)
67{
68 struct net_generic *ng;
69 size_t generic_size = sizeof(struct net_generic) +
70 INITIAL_NET_GEN_PTRS * sizeof(void *);
71
72 ng = kzalloc(generic_size, GFP_KERNEL);
73 if (ng)
74 ng->len = INITIAL_NET_GEN_PTRS;
75
76 return ng;
77}
78
77#ifdef CONFIG_NET_NS 79#ifdef CONFIG_NET_NS
78static struct kmem_cache *net_cachep; 80static struct kmem_cache *net_cachep;
79static struct workqueue_struct *netns_wq; 81static struct workqueue_struct *netns_wq;
80 82
81static struct net *net_alloc(void) 83static struct net *net_alloc(void)
82{ 84{
83 return kmem_cache_zalloc(net_cachep, GFP_KERNEL); 85 struct net *net = NULL;
86 struct net_generic *ng;
87
88 ng = net_alloc_generic();
89 if (!ng)
90 goto out;
91
92 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
93 if (!net)
94 goto out_free;
95
96 rcu_assign_pointer(net->gen, ng);
97out:
98 return net;
99
100out_free:
101 kfree(ng);
102 goto out;
84} 103}
85 104
86static void net_free(struct net *net) 105static void net_free(struct net *net)
87{ 106{
88 if (!net)
89 return;
90
91#ifdef NETNS_REFCNT_DEBUG 107#ifdef NETNS_REFCNT_DEBUG
92 if (unlikely(atomic_read(&net->use_count) != 0)) { 108 if (unlikely(atomic_read(&net->use_count) != 0)) {
93 printk(KERN_EMERG "network namespace not free! Usage: %d\n", 109 printk(KERN_EMERG "network namespace not free! Usage: %d\n",
@@ -112,27 +128,28 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
112 err = -ENOMEM; 128 err = -ENOMEM;
113 new_net = net_alloc(); 129 new_net = net_alloc();
114 if (!new_net) 130 if (!new_net)
115 goto out; 131 goto out_err;
116 132
117 mutex_lock(&net_mutex); 133 mutex_lock(&net_mutex);
118 err = setup_net(new_net); 134 err = setup_net(new_net);
119 if (err) 135 if (!err) {
120 goto out_unlock; 136 rtnl_lock();
121 137 list_add_tail(&new_net->list, &net_namespace_list);
122 rtnl_lock(); 138 rtnl_unlock();
123 list_add_tail(&new_net->list, &net_namespace_list); 139 }
124 rtnl_unlock();
125
126
127out_unlock:
128 mutex_unlock(&net_mutex); 140 mutex_unlock(&net_mutex);
141
142 if (err)
143 goto out_free;
129out: 144out:
130 put_net(old_net); 145 put_net(old_net);
131 if (err) {
132 net_free(new_net);
133 new_net = ERR_PTR(err);
134 }
135 return new_net; 146 return new_net;
147
148out_free:
149 net_free(new_net);
150out_err:
151 new_net = ERR_PTR(err);
152 goto out;
136} 153}
137 154
138static void cleanup_net(struct work_struct *work) 155static void cleanup_net(struct work_struct *work)
@@ -140,9 +157,6 @@ static void cleanup_net(struct work_struct *work)
140 struct pernet_operations *ops; 157 struct pernet_operations *ops;
141 struct net *net; 158 struct net *net;
142 159
143 /* Be very certain incoming network packets will not find us */
144 rcu_barrier();
145
146 net = container_of(work, struct net, work); 160 net = container_of(work, struct net, work);
147 161
148 mutex_lock(&net_mutex); 162 mutex_lock(&net_mutex);
@@ -188,6 +202,7 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
188 202
189static int __init net_ns_init(void) 203static int __init net_ns_init(void)
190{ 204{
205 struct net_generic *ng;
191 int err; 206 int err;
192 207
193 printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net)); 208 printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net));
@@ -202,6 +217,12 @@ static int __init net_ns_init(void)
202 panic("Could not create netns workq"); 217 panic("Could not create netns workq");
203#endif 218#endif
204 219
220 ng = net_alloc_generic();
221 if (!ng)
222 panic("Could not allocate generic netns");
223
224 rcu_assign_pointer(init_net.gen, ng);
225
205 mutex_lock(&net_mutex); 226 mutex_lock(&net_mutex);
206 err = setup_net(&init_net); 227 err = setup_net(&init_net);
207 228
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2e5f2ca3bdcd..c6a6b166f8d6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -143,14 +143,6 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
143 BUG(); 143 BUG();
144} 144}
145 145
146void skb_truesize_bug(struct sk_buff *skb)
147{
148 WARN(net_ratelimit(), KERN_ERR "SKB BUG: Invalid truesize (%u) "
149 "len=%u, sizeof(sk_buff)=%Zd\n",
150 skb->truesize, skb->len, sizeof(struct sk_buff));
151}
152EXPORT_SYMBOL(skb_truesize_bug);
153
154/* Allocate a new skbuff. We do this ourselves so we can fill in a few 146/* Allocate a new skbuff. We do this ourselves so we can fill in a few
155 * 'private' fields and also do memory statistics to find all the 147 * 'private' fields and also do memory statistics to find all the
156 * [BEEP] leaks. 148 * [BEEP] leaks.
@@ -2212,10 +2204,10 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2212 return 0; 2204 return 0;
2213 2205
2214next_skb: 2206next_skb:
2215 block_limit = skb_headlen(st->cur_skb); 2207 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2216 2208
2217 if (abs_offset < block_limit) { 2209 if (abs_offset < block_limit) {
2218 *data = st->cur_skb->data + abs_offset; 2210 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2219 return block_limit - abs_offset; 2211 return block_limit - abs_offset;
2220 } 2212 }
2221 2213
@@ -2250,13 +2242,14 @@ next_skb:
2250 st->frag_data = NULL; 2242 st->frag_data = NULL;
2251 } 2243 }
2252 2244
2253 if (st->cur_skb->next) { 2245 if (st->root_skb == st->cur_skb &&
2254 st->cur_skb = st->cur_skb->next; 2246 skb_shinfo(st->root_skb)->frag_list) {
2247 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2255 st->frag_idx = 0; 2248 st->frag_idx = 0;
2256 goto next_skb; 2249 goto next_skb;
2257 } else if (st->root_skb == st->cur_skb && 2250 } else if (st->cur_skb->next) {
2258 skb_shinfo(st->root_skb)->frag_list) { 2251 st->cur_skb = st->cur_skb->next;
2259 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2252 st->frag_idx = 0;
2260 goto next_skb; 2253 goto next_skb;
2261 } 2254 }
2262 2255
diff --git a/net/core/sock.c b/net/core/sock.c
index f3a0d08cbb48..5f97caa158e8 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -696,6 +696,8 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
696 if (len < 0) 696 if (len < 0)
697 return -EINVAL; 697 return -EINVAL;
698 698
699 memset(&v, 0, sizeof(v));
700
699 switch(optname) { 701 switch(optname) {
700 case SO_DEBUG: 702 case SO_DEBUG:
701 v.val = sock_flag(sk, SOCK_DBG); 703 v.val = sock_flag(sk, SOCK_DBG);
@@ -1135,7 +1137,6 @@ void sock_rfree(struct sk_buff *skb)
1135{ 1137{
1136 struct sock *sk = skb->sk; 1138 struct sock *sk = skb->sk;
1137 1139
1138 skb_truesize_check(skb);
1139 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1140 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
1140 sk_mem_uncharge(skb->sk, skb->truesize); 1141 sk_mem_uncharge(skb->sk, skb->truesize);
1141} 1142}
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 6bb2635b5ded..7bc992976d29 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -3,11 +3,16 @@
3 * 3 *
4 * This is an implementation of the CIPSO 2.2 protocol as specified in 4 * This is an implementation of the CIPSO 2.2 protocol as specified in
5 * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in 5 * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in
6 * FIPS-188, copies of both documents can be found in the Documentation 6 * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors
7 * directory. While CIPSO never became a full IETF RFC standard many vendors
8 * have chosen to adopt the protocol and over the years it has become a 7 * have chosen to adopt the protocol and over the years it has become a
9 * de-facto standard for labeled networking. 8 * de-facto standard for labeled networking.
10 * 9 *
10 * The CIPSO draft specification can be found in the kernel's Documentation
11 * directory as well as the following URL:
12 * http://netlabel.sourceforge.net/files/draft-ietf-cipso-ipsecurity-01.txt
13 * The FIPS-188 specification can be found at the following URL:
14 * http://www.itl.nist.gov/fipspubs/fip188.htm
15 *
11 * Author: Paul Moore <paul.moore@hp.com> 16 * Author: Paul Moore <paul.moore@hp.com>
12 * 17 *
13 */ 18 */
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 705b33b184a3..fc562d29cc46 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1205,7 +1205,7 @@ static struct pernet_operations __net_initdata icmp_sk_ops = {
1205 1205
1206int __init icmp_init(void) 1206int __init icmp_init(void)
1207{ 1207{
1208 return register_pernet_device(&icmp_sk_ops); 1208 return register_pernet_subsys(&icmp_sk_ops);
1209} 1209}
1210 1210
1211EXPORT_SYMBOL(icmp_err_convert); 1211EXPORT_SYMBOL(icmp_err_convert);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 6659ac000eeb..7985346653bd 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -463,6 +463,7 @@ err:
463static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, 463static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
464 struct net_device *dev) 464 struct net_device *dev)
465{ 465{
466 struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
466 struct iphdr *iph; 467 struct iphdr *iph;
467 struct sk_buff *fp, *head = qp->q.fragments; 468 struct sk_buff *fp, *head = qp->q.fragments;
468 int len; 469 int len;
@@ -548,7 +549,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
548 iph = ip_hdr(head); 549 iph = ip_hdr(head);
549 iph->frag_off = 0; 550 iph->frag_off = 0;
550 iph->tot_len = htons(len); 551 iph->tot_len = htons(len);
551 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMOKS); 552 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
552 qp->q.fragments = NULL; 553 qp->q.fragments = NULL;
553 return 0; 554 return 0;
554 555
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 42a0f3dd3fd6..d722013c1cae 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1268,6 +1268,9 @@ __be32 __init root_nfs_parse_addr(char *name)
1268static int __init ip_auto_config(void) 1268static int __init ip_auto_config(void)
1269{ 1269{
1270 __be32 addr; 1270 __be32 addr;
1271#ifdef IPCONFIG_DYNAMIC
1272 int retries = CONF_OPEN_RETRIES;
1273#endif
1271 1274
1272#ifdef CONFIG_PROC_FS 1275#ifdef CONFIG_PROC_FS
1273 proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); 1276 proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops);
@@ -1304,9 +1307,6 @@ static int __init ip_auto_config(void)
1304#endif 1307#endif
1305 ic_first_dev->next) { 1308 ic_first_dev->next) {
1306#ifdef IPCONFIG_DYNAMIC 1309#ifdef IPCONFIG_DYNAMIC
1307
1308 int retries = CONF_OPEN_RETRIES;
1309
1310 if (ic_dynamic() < 0) { 1310 if (ic_dynamic() < 0) {
1311 ic_close_devs(); 1311 ic_close_devs();
1312 1312
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0cd71b84e483..76b148bcb0dc 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -524,7 +524,8 @@ static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
524 struct tcp_splice_state *tss = rd_desc->arg.data; 524 struct tcp_splice_state *tss = rd_desc->arg.data;
525 int ret; 525 int ret;
526 526
527 ret = skb_splice_bits(skb, offset, tss->pipe, rd_desc->count, tss->flags); 527 ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
528 tss->flags);
528 if (ret > 0) 529 if (ret > 0)
529 rd_desc->count -= ret; 530 rd_desc->count -= ret;
530 return ret; 531 return ret;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a6961d75c7ea..c28976a7e596 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1374,7 +1374,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1374 1374
1375static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, 1375static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1376 struct tcp_sacktag_state *state, 1376 struct tcp_sacktag_state *state,
1377 unsigned int pcount, int shifted, int mss) 1377 unsigned int pcount, int shifted, int mss,
1378 int dup_sack)
1378{ 1379{
1379 struct tcp_sock *tp = tcp_sk(sk); 1380 struct tcp_sock *tp = tcp_sk(sk);
1380 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); 1381 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
@@ -1410,7 +1411,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1410 } 1411 }
1411 1412
1412 /* We discard results */ 1413 /* We discard results */
1413 tcp_sacktag_one(skb, sk, state, 0, pcount); 1414 tcp_sacktag_one(skb, sk, state, dup_sack, pcount);
1414 1415
1415 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1416 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1416 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); 1417 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@@ -1561,7 +1562,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1561 1562
1562 if (!skb_shift(prev, skb, len)) 1563 if (!skb_shift(prev, skb, len))
1563 goto fallback; 1564 goto fallback;
1564 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss)) 1565 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
1565 goto out; 1566 goto out;
1566 1567
1567 /* Hole filled allows collapsing with the next as well, this is very 1568 /* Hole filled allows collapsing with the next as well, this is very
@@ -1580,7 +1581,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1580 len = skb->len; 1581 len = skb->len;
1581 if (skb_shift(prev, skb, len)) { 1582 if (skb_shift(prev, skb, len)) {
1582 pcount += tcp_skb_pcount(skb); 1583 pcount += tcp_skb_pcount(skb);
1583 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss); 1584 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
1584 } 1585 }
1585 1586
1586out: 1587out:
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 19d7b429a262..cf74c416831a 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2443,7 +2443,7 @@ static struct pernet_operations __net_initdata tcp_sk_ops = {
2443void __init tcp_v4_init(void) 2443void __init tcp_v4_init(void)
2444{ 2444{
2445 inet_hashinfo_init(&tcp_hashinfo); 2445 inet_hashinfo_init(&tcp_hashinfo);
2446 if (register_pernet_device(&tcp_sk_ops)) 2446 if (register_pernet_subsys(&tcp_sk_ops))
2447 panic("Failed to create the TCP control socket.\n"); 2447 panic("Failed to create the TCP control socket.\n");
2448} 2448}
2449 2449
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 557fe16cbfb0..da2c3b8794f2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -663,14 +663,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
663 th->urg_ptr = 0; 663 th->urg_ptr = 0;
664 664
665 /* The urg_mode check is necessary during a below snd_una win probe */ 665 /* The urg_mode check is necessary during a below snd_una win probe */
666 if (unlikely(tcp_urg_mode(tp))) { 666 if (unlikely(tcp_urg_mode(tp) &&
667 if (between(tp->snd_up, tcb->seq + 1, tcb->seq + 0xFFFF)) { 667 between(tp->snd_up, tcb->seq + 1, tcb->seq + 0xFFFF))) {
668 th->urg_ptr = htons(tp->snd_up - tcb->seq); 668 th->urg_ptr = htons(tp->snd_up - tcb->seq);
669 th->urg = 1; 669 th->urg = 1;
670 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
671 th->urg_ptr = 0xFFFF;
672 th->urg = 1;
673 }
674 } 670 }
675 671
676 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location); 672 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location);
@@ -2027,7 +2023,6 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2027 last_lost = tp->snd_una; 2023 last_lost = tp->snd_una;
2028 } 2024 }
2029 2025
2030 /* First pass: retransmit lost packets. */
2031 tcp_for_write_queue_from(skb, sk) { 2026 tcp_for_write_queue_from(skb, sk) {
2032 __u8 sacked = TCP_SKB_CB(skb)->sacked; 2027 __u8 sacked = TCP_SKB_CB(skb)->sacked;
2033 2028
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 2747ec7bfb63..4660b088a8ce 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -1,6 +1,6 @@
1/* Tom Kelly's Scalable TCP 1/* Tom Kelly's Scalable TCP
2 * 2 *
3 * See htt://www-lce.eng.cam.ac.uk/~ctk21/scalable/ 3 * See http://www.deneholme.net/tom/scalable/
4 * 4 *
5 * John Heffner <jheffner@sc.edu> 5 * John Heffner <jheffner@sc.edu>
6 */ 6 */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index cf5ab0581eba..c47c989cb1fb 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -120,8 +120,11 @@ EXPORT_SYMBOL(sysctl_udp_wmem_min);
120atomic_t udp_memory_allocated; 120atomic_t udp_memory_allocated;
121EXPORT_SYMBOL(udp_memory_allocated); 121EXPORT_SYMBOL(udp_memory_allocated);
122 122
123#define PORTS_PER_CHAIN (65536 / UDP_HTABLE_SIZE)
124
123static int udp_lib_lport_inuse(struct net *net, __u16 num, 125static int udp_lib_lport_inuse(struct net *net, __u16 num,
124 const struct udp_hslot *hslot, 126 const struct udp_hslot *hslot,
127 unsigned long *bitmap,
125 struct sock *sk, 128 struct sock *sk,
126 int (*saddr_comp)(const struct sock *sk1, 129 int (*saddr_comp)(const struct sock *sk1,
127 const struct sock *sk2)) 130 const struct sock *sk2))
@@ -132,12 +135,17 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
132 sk_nulls_for_each(sk2, node, &hslot->head) 135 sk_nulls_for_each(sk2, node, &hslot->head)
133 if (net_eq(sock_net(sk2), net) && 136 if (net_eq(sock_net(sk2), net) &&
134 sk2 != sk && 137 sk2 != sk &&
135 sk2->sk_hash == num && 138 (bitmap || sk2->sk_hash == num) &&
136 (!sk2->sk_reuse || !sk->sk_reuse) && 139 (!sk2->sk_reuse || !sk->sk_reuse) &&
137 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if 140 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
138 || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 141 || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
139 (*saddr_comp)(sk, sk2)) 142 (*saddr_comp)(sk, sk2)) {
140 return 1; 143 if (bitmap)
144 __set_bit(sk2->sk_hash / UDP_HTABLE_SIZE,
145 bitmap);
146 else
147 return 1;
148 }
141 return 0; 149 return 0;
142} 150}
143 151
@@ -160,32 +168,47 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
160 if (!snum) { 168 if (!snum) {
161 int low, high, remaining; 169 int low, high, remaining;
162 unsigned rand; 170 unsigned rand;
163 unsigned short first; 171 unsigned short first, last;
172 DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
164 173
165 inet_get_local_port_range(&low, &high); 174 inet_get_local_port_range(&low, &high);
166 remaining = (high - low) + 1; 175 remaining = (high - low) + 1;
167 176
168 rand = net_random(); 177 rand = net_random();
169 snum = first = rand % remaining + low; 178 first = (((u64)rand * remaining) >> 32) + low;
170 rand |= 1; 179 /*
171 for (;;) { 180 * force rand to be an odd multiple of UDP_HTABLE_SIZE
172 hslot = &udptable->hash[udp_hashfn(net, snum)]; 181 */
182 rand = (rand | 1) * UDP_HTABLE_SIZE;
183 for (last = first + UDP_HTABLE_SIZE; first != last; first++) {
184 hslot = &udptable->hash[udp_hashfn(net, first)];
185 bitmap_zero(bitmap, PORTS_PER_CHAIN);
173 spin_lock_bh(&hslot->lock); 186 spin_lock_bh(&hslot->lock);
174 if (!udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp)) 187 udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
175 break; 188 saddr_comp);
176 spin_unlock_bh(&hslot->lock); 189
190 snum = first;
191 /*
192 * Iterate on all possible values of snum for this hash.
193 * Using steps of an odd multiple of UDP_HTABLE_SIZE
194 * give us randomization and full range coverage.
195 */
177 do { 196 do {
178 snum = snum + rand; 197 if (low <= snum && snum <= high &&
179 } while (snum < low || snum > high); 198 !test_bit(snum / UDP_HTABLE_SIZE, bitmap))
180 if (snum == first) 199 goto found;
181 goto fail; 200 snum += rand;
201 } while (snum != first);
202 spin_unlock_bh(&hslot->lock);
182 } 203 }
204 goto fail;
183 } else { 205 } else {
184 hslot = &udptable->hash[udp_hashfn(net, snum)]; 206 hslot = &udptable->hash[udp_hashfn(net, snum)];
185 spin_lock_bh(&hslot->lock); 207 spin_lock_bh(&hslot->lock);
186 if (udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp)) 208 if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp))
187 goto fail_unlock; 209 goto fail_unlock;
188 } 210 }
211found:
189 inet_sk(sk)->num = snum; 212 inet_sk(sk)->num = snum;
190 sk->sk_hash = snum; 213 sk->sk_hash = snum;
191 if (sk_unhashed(sk)) { 214 if (sk_unhashed(sk)) {
@@ -992,9 +1015,11 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
992 1015
993 if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) { 1016 if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
994 /* Note that an ENOMEM error is charged twice */ 1017 /* Note that an ENOMEM error is charged twice */
995 if (rc == -ENOMEM) 1018 if (rc == -ENOMEM) {
996 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, 1019 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
997 is_udplite); 1020 is_udplite);
1021 atomic_inc(&sk->sk_drops);
1022 }
998 goto drop; 1023 goto drop;
999 } 1024 }
1000 1025
@@ -1206,11 +1231,10 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1206 int proto) 1231 int proto)
1207{ 1232{
1208 struct sock *sk; 1233 struct sock *sk;
1209 struct udphdr *uh = udp_hdr(skb); 1234 struct udphdr *uh;
1210 unsigned short ulen; 1235 unsigned short ulen;
1211 struct rtable *rt = (struct rtable*)skb->dst; 1236 struct rtable *rt = (struct rtable*)skb->dst;
1212 __be32 saddr = ip_hdr(skb)->saddr; 1237 __be32 saddr, daddr;
1213 __be32 daddr = ip_hdr(skb)->daddr;
1214 struct net *net = dev_net(skb->dev); 1238 struct net *net = dev_net(skb->dev);
1215 1239
1216 /* 1240 /*
@@ -1219,6 +1243,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1219 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 1243 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1220 goto drop; /* No space for header. */ 1244 goto drop; /* No space for header. */
1221 1245
1246 uh = udp_hdr(skb);
1222 ulen = ntohs(uh->len); 1247 ulen = ntohs(uh->len);
1223 if (ulen > skb->len) 1248 if (ulen > skb->len)
1224 goto short_packet; 1249 goto short_packet;
@@ -1233,6 +1258,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1233 if (udp4_csum_init(skb, uh, proto)) 1258 if (udp4_csum_init(skb, uh, proto))
1234 goto csum_error; 1259 goto csum_error;
1235 1260
1261 saddr = ip_hdr(skb)->saddr;
1262 daddr = ip_hdr(skb)->daddr;
1263
1236 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 1264 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1237 return __udp4_lib_mcast_deliver(net, skb, uh, 1265 return __udp4_lib_mcast_deliver(net, skb, uh,
1238 saddr, daddr, udptable); 1266 saddr, daddr, udptable);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e92ad8455c63..1220e2c7831e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -493,15 +493,17 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
493 read_unlock(&dev_base_lock); 493 read_unlock(&dev_base_lock);
494} 494}
495 495
496static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) 496static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
497{ 497{
498 struct net *net; 498 struct net *net;
499 499
500 net = (struct net *)table->extra2; 500 net = (struct net *)table->extra2;
501 if (p == &net->ipv6.devconf_dflt->forwarding) 501 if (p == &net->ipv6.devconf_dflt->forwarding)
502 return; 502 return 0;
503
504 if (!rtnl_trylock())
505 return -ERESTARTSYS;
503 506
504 rtnl_lock();
505 if (p == &net->ipv6.devconf_all->forwarding) { 507 if (p == &net->ipv6.devconf_all->forwarding) {
506 __s32 newf = net->ipv6.devconf_all->forwarding; 508 __s32 newf = net->ipv6.devconf_all->forwarding;
507 net->ipv6.devconf_dflt->forwarding = newf; 509 net->ipv6.devconf_dflt->forwarding = newf;
@@ -512,6 +514,7 @@ static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
512 514
513 if (*p) 515 if (*p)
514 rt6_purge_dflt_routers(net); 516 rt6_purge_dflt_routers(net);
517 return 1;
515} 518}
516#endif 519#endif
517 520
@@ -2608,9 +2611,6 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2608 2611
2609 ASSERT_RTNL(); 2612 ASSERT_RTNL();
2610 2613
2611 if ((dev->flags & IFF_LOOPBACK) && how == 1)
2612 how = 0;
2613
2614 rt6_ifdown(net, dev); 2614 rt6_ifdown(net, dev);
2615 neigh_ifdown(&nd_tbl, dev); 2615 neigh_ifdown(&nd_tbl, dev);
2616 2616
@@ -3983,7 +3983,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
3983 ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); 3983 ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
3984 3984
3985 if (write) 3985 if (write)
3986 addrconf_fixup_forwarding(ctl, valp, val); 3986 ret = addrconf_fixup_forwarding(ctl, valp, val);
3987 return ret; 3987 return ret;
3988} 3988}
3989 3989
@@ -4019,8 +4019,7 @@ static int addrconf_sysctl_forward_strategy(ctl_table *table,
4019 } 4019 }
4020 4020
4021 *valp = new; 4021 *valp = new;
4022 addrconf_fixup_forwarding(table, valp, val); 4022 return addrconf_fixup_forwarding(table, valp, val);
4023 return 1;
4024} 4023}
4025 4024
4026static struct addrconf_sysctl_table 4025static struct addrconf_sysctl_table
@@ -4250,7 +4249,7 @@ static struct addrconf_sysctl_table
4250 .procname = "mc_forwarding", 4249 .procname = "mc_forwarding",
4251 .data = &ipv6_devconf.mc_forwarding, 4250 .data = &ipv6_devconf.mc_forwarding,
4252 .maxlen = sizeof(int), 4251 .maxlen = sizeof(int),
4253 .mode = 0644, 4252 .mode = 0444,
4254 .proc_handler = proc_dointvec, 4253 .proc_handler = proc_dointvec,
4255 }, 4254 },
4256#endif 4255#endif
@@ -4446,25 +4445,6 @@ int unregister_inet6addr_notifier(struct notifier_block *nb)
4446 4445
4447EXPORT_SYMBOL(unregister_inet6addr_notifier); 4446EXPORT_SYMBOL(unregister_inet6addr_notifier);
4448 4447
4449static void addrconf_net_exit(struct net *net)
4450{
4451 struct net_device *dev;
4452
4453 rtnl_lock();
4454 /* clean dev list */
4455 for_each_netdev(net, dev) {
4456 if (__in6_dev_get(dev) == NULL)
4457 continue;
4458 addrconf_ifdown(dev, 1);
4459 }
4460 addrconf_ifdown(net->loopback_dev, 2);
4461 rtnl_unlock();
4462}
4463
4464static struct pernet_operations addrconf_net_ops = {
4465 .exit = addrconf_net_exit,
4466};
4467
4468/* 4448/*
4469 * Init / cleanup code 4449 * Init / cleanup code
4470 */ 4450 */
@@ -4506,10 +4486,6 @@ int __init addrconf_init(void)
4506 if (err) 4486 if (err)
4507 goto errlo; 4487 goto errlo;
4508 4488
4509 err = register_pernet_device(&addrconf_net_ops);
4510 if (err)
4511 return err;
4512
4513 register_netdevice_notifier(&ipv6_dev_notf); 4489 register_netdevice_notifier(&ipv6_dev_notf);
4514 4490
4515 addrconf_verify(0); 4491 addrconf_verify(0);
@@ -4539,15 +4515,22 @@ errlo:
4539void addrconf_cleanup(void) 4515void addrconf_cleanup(void)
4540{ 4516{
4541 struct inet6_ifaddr *ifa; 4517 struct inet6_ifaddr *ifa;
4518 struct net_device *dev;
4542 int i; 4519 int i;
4543 4520
4544 unregister_netdevice_notifier(&ipv6_dev_notf); 4521 unregister_netdevice_notifier(&ipv6_dev_notf);
4545 unregister_pernet_device(&addrconf_net_ops);
4546
4547 unregister_pernet_subsys(&addrconf_ops); 4522 unregister_pernet_subsys(&addrconf_ops);
4548 4523
4549 rtnl_lock(); 4524 rtnl_lock();
4550 4525
4526 /* clean dev list */
4527 for_each_netdev(&init_net, dev) {
4528 if (__in6_dev_get(dev) == NULL)
4529 continue;
4530 addrconf_ifdown(dev, 1);
4531 }
4532 addrconf_ifdown(init_net.loopback_dev, 2);
4533
4551 /* 4534 /*
4552 * Check hash table. 4535 * Check hash table.
4553 */ 4536 */
@@ -4568,6 +4551,4 @@ void addrconf_cleanup(void)
4568 4551
4569 del_timer(&addr_chk_timer); 4552 del_timer(&addr_chk_timer);
4570 rtnl_unlock(); 4553 rtnl_unlock();
4571
4572 unregister_pernet_subsys(&addrconf_net_ops);
4573} 4554}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index c802bc1658a8..9c8309ed35cf 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -72,6 +72,10 @@ MODULE_LICENSE("GPL");
72static struct list_head inetsw6[SOCK_MAX]; 72static struct list_head inetsw6[SOCK_MAX];
73static DEFINE_SPINLOCK(inetsw6_lock); 73static DEFINE_SPINLOCK(inetsw6_lock);
74 74
75static int disable_ipv6 = 0;
76module_param_named(disable, disable_ipv6, int, 0);
77MODULE_PARM_DESC(disable, "Disable IPv6 such that it is non-functional");
78
75static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) 79static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
76{ 80{
77 const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo); 81 const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo);
@@ -991,10 +995,21 @@ static int __init inet6_init(void)
991{ 995{
992 struct sk_buff *dummy_skb; 996 struct sk_buff *dummy_skb;
993 struct list_head *r; 997 struct list_head *r;
994 int err; 998 int err = 0;
995 999
996 BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb)); 1000 BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb));
997 1001
1002 /* Register the socket-side information for inet6_create. */
1003 for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
1004 INIT_LIST_HEAD(r);
1005
1006 if (disable_ipv6) {
1007 printk(KERN_INFO
1008 "IPv6: Loaded, but administratively disabled, "
1009 "reboot required to enable\n");
1010 goto out;
1011 }
1012
998 err = proto_register(&tcpv6_prot, 1); 1013 err = proto_register(&tcpv6_prot, 1);
999 if (err) 1014 if (err)
1000 goto out; 1015 goto out;
@@ -1012,10 +1027,6 @@ static int __init inet6_init(void)
1012 goto out_unregister_udplite_proto; 1027 goto out_unregister_udplite_proto;
1013 1028
1014 1029
1015 /* Register the socket-side information for inet6_create. */
1016 for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
1017 INIT_LIST_HEAD(r);
1018
1019 /* We MUST register RAW sockets before we create the ICMP6, 1030 /* We MUST register RAW sockets before we create the ICMP6,
1020 * IGMP6, or NDISC control sockets. 1031 * IGMP6, or NDISC control sockets.
1021 */ 1032 */
@@ -1181,6 +1192,9 @@ module_init(inet6_init);
1181 1192
1182static void __exit inet6_exit(void) 1193static void __exit inet6_exit(void)
1183{ 1194{
1195 if (disable_ipv6)
1196 return;
1197
1184 /* First of all disallow new sockets creation. */ 1198 /* First of all disallow new sockets creation. */
1185 sock_unregister(PF_INET6); 1199 sock_unregister(PF_INET6);
1186 /* Disallow any further netlink messages */ 1200 /* Disallow any further netlink messages */
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 4f433847d95f..36dff8807183 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -443,10 +443,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
443 if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6)) 443 if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6))
444 goto relookup_failed; 444 goto relookup_failed;
445 445
446 if (ip6_dst_lookup(sk, &dst2, &fl)) 446 if (ip6_dst_lookup(sk, &dst2, &fl2))
447 goto relookup_failed; 447 goto relookup_failed;
448 448
449 err = xfrm_lookup(net, &dst2, &fl, sk, XFRM_LOOKUP_ICMP); 449 err = xfrm_lookup(net, &dst2, &fl2, sk, XFRM_LOOKUP_ICMP);
450 switch (err) { 450 switch (err) {
451 case 0: 451 case 0:
452 dst_release(dst); 452 dst_release(dst);
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 8fe267feb81e..1bcc3431859e 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -258,11 +258,11 @@ unique:
258 258
259 if (twp != NULL) { 259 if (twp != NULL) {
260 *twp = tw; 260 *twp = tw;
261 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED); 261 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
262 } else if (tw != NULL) { 262 } else if (tw != NULL) {
263 /* Silly. Should hash-dance instead... */ 263 /* Silly. Should hash-dance instead... */
264 inet_twsk_deschedule(tw, death_row); 264 inet_twsk_deschedule(tw, death_row);
265 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED); 265 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
266 266
267 inet_twsk_put(tw); 267 inet_twsk_put(tw);
268 } 268 }
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index c62dd247774f..7712578bdc66 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -323,17 +323,21 @@ static struct ip6_flowlabel *
323fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, 323fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
324 int optlen, int *err_p) 324 int optlen, int *err_p)
325{ 325{
326 struct ip6_flowlabel *fl; 326 struct ip6_flowlabel *fl = NULL;
327 int olen; 327 int olen;
328 int addr_type; 328 int addr_type;
329 int err; 329 int err;
330 330
331 olen = optlen - CMSG_ALIGN(sizeof(*freq));
332 err = -EINVAL;
333 if (olen > 64 * 1024)
334 goto done;
335
331 err = -ENOMEM; 336 err = -ENOMEM;
332 fl = kzalloc(sizeof(*fl), GFP_KERNEL); 337 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
333 if (fl == NULL) 338 if (fl == NULL)
334 goto done; 339 goto done;
335 340
336 olen = optlen - CMSG_ALIGN(sizeof(*freq));
337 if (olen > 0) { 341 if (olen > 0) {
338 struct msghdr msg; 342 struct msghdr msg;
339 struct flowi flowi; 343 struct flowi flowi;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 936f48946e20..f171e8dbac91 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -255,6 +255,7 @@ int ip6_mc_input(struct sk_buff *skb)
255 * IPv6 multicast router mode is now supported ;) 255 * IPv6 multicast router mode is now supported ;)
256 */ 256 */
257 if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && 257 if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding &&
258 !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) &&
258 likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { 259 likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
259 /* 260 /*
260 * Okay, we try to forward - split and duplicate 261 * Okay, we try to forward - split and duplicate
@@ -316,7 +317,6 @@ int ip6_mc_input(struct sk_buff *skb)
316 } 317 }
317 318
318 if (skb2) { 319 if (skb2) {
319 skb2->dev = skb2->dst->dev;
320 ip6_mr_input(skb2); 320 ip6_mr_input(skb2);
321 } 321 }
322 } 322 }
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 4b15938bef4d..9fb49c3b518a 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1105,6 +1105,18 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1105 return err; 1105 return err;
1106} 1106}
1107 1107
1108static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1109 gfp_t gfp)
1110{
1111 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1112}
1113
1114static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1115 gfp_t gfp)
1116{
1117 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1118}
1119
1108int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, 1120int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1109 int offset, int len, int odd, struct sk_buff *skb), 1121 int offset, int len, int odd, struct sk_buff *skb),
1110 void *from, int length, int transhdrlen, 1122 void *from, int length, int transhdrlen,
@@ -1130,17 +1142,37 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1130 * setup for corking 1142 * setup for corking
1131 */ 1143 */
1132 if (opt) { 1144 if (opt) {
1133 if (np->cork.opt == NULL) { 1145 if (WARN_ON(np->cork.opt))
1134 np->cork.opt = kmalloc(opt->tot_len,
1135 sk->sk_allocation);
1136 if (unlikely(np->cork.opt == NULL))
1137 return -ENOBUFS;
1138 } else if (np->cork.opt->tot_len < opt->tot_len) {
1139 printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
1140 return -EINVAL; 1146 return -EINVAL;
1141 } 1147
1142 memcpy(np->cork.opt, opt, opt->tot_len); 1148 np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
1143 inet->cork.flags |= IPCORK_OPT; 1149 if (unlikely(np->cork.opt == NULL))
1150 return -ENOBUFS;
1151
1152 np->cork.opt->tot_len = opt->tot_len;
1153 np->cork.opt->opt_flen = opt->opt_flen;
1154 np->cork.opt->opt_nflen = opt->opt_nflen;
1155
1156 np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1157 sk->sk_allocation);
1158 if (opt->dst0opt && !np->cork.opt->dst0opt)
1159 return -ENOBUFS;
1160
1161 np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1162 sk->sk_allocation);
1163 if (opt->dst1opt && !np->cork.opt->dst1opt)
1164 return -ENOBUFS;
1165
1166 np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
1167 sk->sk_allocation);
1168 if (opt->hopopt && !np->cork.opt->hopopt)
1169 return -ENOBUFS;
1170
1171 np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1172 sk->sk_allocation);
1173 if (opt->srcrt && !np->cork.opt->srcrt)
1174 return -ENOBUFS;
1175
1144 /* need source address above miyazawa*/ 1176 /* need source address above miyazawa*/
1145 } 1177 }
1146 dst_hold(&rt->u.dst); 1178 dst_hold(&rt->u.dst);
@@ -1167,8 +1199,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1167 } else { 1199 } else {
1168 rt = (struct rt6_info *)inet->cork.dst; 1200 rt = (struct rt6_info *)inet->cork.dst;
1169 fl = &inet->cork.fl; 1201 fl = &inet->cork.fl;
1170 if (inet->cork.flags & IPCORK_OPT) 1202 opt = np->cork.opt;
1171 opt = np->cork.opt;
1172 transhdrlen = 0; 1203 transhdrlen = 0;
1173 exthdrlen = 0; 1204 exthdrlen = 0;
1174 mtu = inet->cork.fragsize; 1205 mtu = inet->cork.fragsize;
@@ -1407,9 +1438,15 @@ error:
1407 1438
1408static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np) 1439static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1409{ 1440{
1410 inet->cork.flags &= ~IPCORK_OPT; 1441 if (np->cork.opt) {
1411 kfree(np->cork.opt); 1442 kfree(np->cork.opt->dst0opt);
1412 np->cork.opt = NULL; 1443 kfree(np->cork.opt->dst1opt);
1444 kfree(np->cork.opt->hopopt);
1445 kfree(np->cork.opt->srcrt);
1446 kfree(np->cork.opt);
1447 np->cork.opt = NULL;
1448 }
1449
1413 if (inet->cork.dst) { 1450 if (inet->cork.dst) {
1414 dst_release(inet->cork.dst); 1451 dst_release(inet->cork.dst);
1415 inet->cork.dst = NULL; 1452 inet->cork.dst = NULL;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 58e2b0d93758..d994c55a5b16 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -249,8 +249,8 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
249 } 249 }
250 250
251 t = netdev_priv(dev); 251 t = netdev_priv(dev);
252 ip6_tnl_dev_init(dev);
253 t->parms = *p; 252 t->parms = *p;
253 ip6_tnl_dev_init(dev);
254 254
255 if ((err = register_netdevice(dev)) < 0) 255 if ((err = register_netdevice(dev)) < 0)
256 goto failed_free; 256 goto failed_free;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 3c51b2d827f4..228be551e9c1 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -48,6 +48,7 @@
48#include <linux/pim.h> 48#include <linux/pim.h>
49#include <net/addrconf.h> 49#include <net/addrconf.h>
50#include <linux/netfilter_ipv6.h> 50#include <linux/netfilter_ipv6.h>
51#include <net/ip6_checksum.h>
51 52
52/* Big lock, protecting vif table, mrt cache and mroute socket state. 53/* Big lock, protecting vif table, mrt cache and mroute socket state.
53 Note that the changes are semaphored via rtnl_lock. 54 Note that the changes are semaphored via rtnl_lock.
@@ -365,7 +366,9 @@ static int pim6_rcv(struct sk_buff *skb)
365 pim = (struct pimreghdr *)skb_transport_header(skb); 366 pim = (struct pimreghdr *)skb_transport_header(skb);
366 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) || 367 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
367 (pim->flags & PIM_NULL_REGISTER) || 368 (pim->flags & PIM_NULL_REGISTER) ||
368 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && 369 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
370 sizeof(*pim), IPPROTO_PIM,
371 csum_partial((void *)pim, sizeof(*pim), 0)) &&
369 csum_fold(skb_checksum(skb, 0, skb->len, 0)))) 372 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
370 goto drop; 373 goto drop;
371 374
@@ -392,7 +395,7 @@ static int pim6_rcv(struct sk_buff *skb)
392 skb_pull(skb, (u8 *)encap - skb->data); 395 skb_pull(skb, (u8 *)encap - skb->data);
393 skb_reset_network_header(skb); 396 skb_reset_network_header(skb);
394 skb->dev = reg_dev; 397 skb->dev = reg_dev;
395 skb->protocol = htons(ETH_P_IP); 398 skb->protocol = htons(ETH_P_IPV6);
396 skb->ip_summed = 0; 399 skb->ip_summed = 0;
397 skb->pkt_type = PACKET_HOST; 400 skb->pkt_type = PACKET_HOST;
398 dst_release(skb->dst); 401 dst_release(skb->dst);
@@ -481,6 +484,7 @@ static int mif6_delete(struct net *net, int vifi)
481{ 484{
482 struct mif_device *v; 485 struct mif_device *v;
483 struct net_device *dev; 486 struct net_device *dev;
487 struct inet6_dev *in6_dev;
484 if (vifi < 0 || vifi >= net->ipv6.maxvif) 488 if (vifi < 0 || vifi >= net->ipv6.maxvif)
485 return -EADDRNOTAVAIL; 489 return -EADDRNOTAVAIL;
486 490
@@ -513,6 +517,10 @@ static int mif6_delete(struct net *net, int vifi)
513 517
514 dev_set_allmulti(dev, -1); 518 dev_set_allmulti(dev, -1);
515 519
520 in6_dev = __in6_dev_get(dev);
521 if (in6_dev)
522 in6_dev->cnf.mc_forwarding--;
523
516 if (v->flags & MIFF_REGISTER) 524 if (v->flags & MIFF_REGISTER)
517 unregister_netdevice(dev); 525 unregister_netdevice(dev);
518 526
@@ -622,6 +630,7 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
622 int vifi = vifc->mif6c_mifi; 630 int vifi = vifc->mif6c_mifi;
623 struct mif_device *v = &net->ipv6.vif6_table[vifi]; 631 struct mif_device *v = &net->ipv6.vif6_table[vifi];
624 struct net_device *dev; 632 struct net_device *dev;
633 struct inet6_dev *in6_dev;
625 int err; 634 int err;
626 635
627 /* Is vif busy ? */ 636 /* Is vif busy ? */
@@ -662,6 +671,10 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
662 return -EINVAL; 671 return -EINVAL;
663 } 672 }
664 673
674 in6_dev = __in6_dev_get(dev);
675 if (in6_dev)
676 in6_dev->cnf.mc_forwarding++;
677
665 /* 678 /*
666 * Fill in the VIF structures 679 * Fill in the VIF structures
667 */ 680 */
@@ -838,8 +851,6 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
838 851
839 skb->dst = dst_clone(pkt->dst); 852 skb->dst = dst_clone(pkt->dst);
840 skb->ip_summed = CHECKSUM_UNNECESSARY; 853 skb->ip_summed = CHECKSUM_UNNECESSARY;
841
842 skb_pull(skb, sizeof(struct ipv6hdr));
843 } 854 }
844 855
845 if (net->ipv6.mroute6_sk == NULL) { 856 if (net->ipv6.mroute6_sk == NULL) {
@@ -1222,8 +1233,10 @@ static int ip6mr_sk_init(struct sock *sk)
1222 1233
1223 rtnl_lock(); 1234 rtnl_lock();
1224 write_lock_bh(&mrt_lock); 1235 write_lock_bh(&mrt_lock);
1225 if (likely(net->ipv6.mroute6_sk == NULL)) 1236 if (likely(net->ipv6.mroute6_sk == NULL)) {
1226 net->ipv6.mroute6_sk = sk; 1237 net->ipv6.mroute6_sk = sk;
1238 net->ipv6.devconf_all->mc_forwarding++;
1239 }
1227 else 1240 else
1228 err = -EADDRINUSE; 1241 err = -EADDRINUSE;
1229 write_unlock_bh(&mrt_lock); 1242 write_unlock_bh(&mrt_lock);
@@ -1242,6 +1255,7 @@ int ip6mr_sk_done(struct sock *sk)
1242 if (sk == net->ipv6.mroute6_sk) { 1255 if (sk == net->ipv6.mroute6_sk) {
1243 write_lock_bh(&mrt_lock); 1256 write_lock_bh(&mrt_lock);
1244 net->ipv6.mroute6_sk = NULL; 1257 net->ipv6.mroute6_sk = NULL;
1258 net->ipv6.devconf_all->mc_forwarding--;
1245 write_unlock_bh(&mrt_lock); 1259 write_unlock_bh(&mrt_lock);
1246 1260
1247 mroute_clean_tables(net); 1261 mroute_clean_tables(net);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index c455cf4ee756..72dbb6d1a6b3 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -49,8 +49,19 @@ static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
49static const u_int8_t invmap[] = { 49static const u_int8_t invmap[] = {
50 [ICMPV6_ECHO_REQUEST - 128] = ICMPV6_ECHO_REPLY + 1, 50 [ICMPV6_ECHO_REQUEST - 128] = ICMPV6_ECHO_REPLY + 1,
51 [ICMPV6_ECHO_REPLY - 128] = ICMPV6_ECHO_REQUEST + 1, 51 [ICMPV6_ECHO_REPLY - 128] = ICMPV6_ECHO_REQUEST + 1,
52 [ICMPV6_NI_QUERY - 128] = ICMPV6_NI_QUERY + 1, 52 [ICMPV6_NI_QUERY - 128] = ICMPV6_NI_REPLY + 1,
53 [ICMPV6_NI_REPLY - 128] = ICMPV6_NI_REPLY +1 53 [ICMPV6_NI_REPLY - 128] = ICMPV6_NI_QUERY +1
54};
55
56static const u_int8_t noct_valid_new[] = {
57 [ICMPV6_MGM_QUERY - 130] = 1,
58 [ICMPV6_MGM_REPORT -130] = 1,
59 [ICMPV6_MGM_REDUCTION - 130] = 1,
60 [NDISC_ROUTER_SOLICITATION - 130] = 1,
61 [NDISC_ROUTER_ADVERTISEMENT - 130] = 1,
62 [NDISC_NEIGHBOUR_SOLICITATION - 130] = 1,
63 [NDISC_NEIGHBOUR_ADVERTISEMENT - 130] = 1,
64 [ICMPV6_MLD2_REPORT - 130] = 1
54}; 65};
55 66
56static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple, 67static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
@@ -178,6 +189,7 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
178{ 189{
179 const struct icmp6hdr *icmp6h; 190 const struct icmp6hdr *icmp6h;
180 struct icmp6hdr _ih; 191 struct icmp6hdr _ih;
192 int type;
181 193
182 icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih); 194 icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
183 if (icmp6h == NULL) { 195 if (icmp6h == NULL) {
@@ -189,11 +201,21 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
189 201
190 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && 202 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
191 nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) { 203 nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) {
192 nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL, 204 if (LOG_INVALID(net, IPPROTO_ICMPV6))
193 "nf_ct_icmpv6: ICMPv6 checksum failed\n"); 205 nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL,
206 "nf_ct_icmpv6: ICMPv6 checksum failed ");
194 return -NF_ACCEPT; 207 return -NF_ACCEPT;
195 } 208 }
196 209
210 type = icmp6h->icmp6_type - 130;
211 if (type >= 0 && type < sizeof(noct_valid_new) &&
212 noct_valid_new[type]) {
213 skb->nfct = &nf_conntrack_untracked.ct_general;
214 skb->nfctinfo = IP_CT_NEW;
215 nf_conntrack_get(skb->nfct);
216 return NF_ACCEPT;
217 }
218
197 /* is not error message ? */ 219 /* is not error message ? */
198 if (icmp6h->icmp6_type >= 128) 220 if (icmp6h->icmp6_type >= 128)
199 return NF_ACCEPT; 221 return NF_ACCEPT;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index ed4d79a9e4a6..058a5e4a60c3 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -528,14 +528,14 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
528 if (!ipv6_ext_hdr(nexthdr)) { 528 if (!ipv6_ext_hdr(nexthdr)) {
529 return -1; 529 return -1;
530 } 530 }
531 if (len < (int)sizeof(struct ipv6_opt_hdr)) {
532 pr_debug("too short\n");
533 return -1;
534 }
535 if (nexthdr == NEXTHDR_NONE) { 531 if (nexthdr == NEXTHDR_NONE) {
536 pr_debug("next header is none\n"); 532 pr_debug("next header is none\n");
537 return -1; 533 return -1;
538 } 534 }
535 if (len < (int)sizeof(struct ipv6_opt_hdr)) {
536 pr_debug("too short\n");
537 return -1;
538 }
539 if (skb_copy_bits(skb, start, &hdr, sizeof(hdr))) 539 if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
540 BUG(); 540 BUG();
541 if (nexthdr == NEXTHDR_AUTH) 541 if (nexthdr == NEXTHDR_AUTH)
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 3c575118fca5..e9ac7a12f595 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -452,6 +452,7 @@ err:
452static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, 452static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
453 struct net_device *dev) 453 struct net_device *dev)
454{ 454{
455 struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
455 struct sk_buff *fp, *head = fq->q.fragments; 456 struct sk_buff *fp, *head = fq->q.fragments;
456 int payload_len; 457 int payload_len;
457 unsigned int nhoff; 458 unsigned int nhoff;
@@ -551,8 +552,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
551 head->csum); 552 head->csum);
552 553
553 rcu_read_lock(); 554 rcu_read_lock();
554 IP6_INC_STATS_BH(dev_net(dev), 555 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
555 __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
556 rcu_read_unlock(); 556 rcu_read_unlock();
557 fq->q.fragments = NULL; 557 fq->q.fragments = NULL;
558 return 1; 558 return 1;
@@ -566,8 +566,7 @@ out_oom:
566 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); 566 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
567out_fail: 567out_fail:
568 rcu_read_lock(); 568 rcu_read_lock();
569 IP6_INC_STATS_BH(dev_net(dev), 569 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
570 __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
571 rcu_read_unlock(); 570 rcu_read_unlock();
572 return -1; 571 return -1;
573} 572}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c4a59824ac2c..9c574235c905 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -794,7 +794,7 @@ void ip6_route_input(struct sk_buff *skb)
794 .proto = iph->nexthdr, 794 .proto = iph->nexthdr,
795 }; 795 };
796 796
797 if (rt6_need_strict(&iph->daddr)) 797 if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
798 flags |= RT6_LOOKUP_F_IFACE; 798 flags |= RT6_LOOKUP_F_IFACE;
799 799
800 skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input); 800 skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index d3467e563f02..5cee2bcbcece 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -188,9 +188,9 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct net *net,
188 } 188 }
189 189
190 nt = netdev_priv(dev); 190 nt = netdev_priv(dev);
191 ipip6_tunnel_init(dev);
192 191
193 nt->parms = *parms; 192 nt->parms = *parms;
193 ipip6_tunnel_init(dev);
194 194
195 if (parms->i_flags & SIT_ISATAP) 195 if (parms->i_flags & SIT_ISATAP)
196 dev->priv_flags |= IFF_ISATAP; 196 dev->priv_flags |= IFF_ISATAP;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 4278e545638f..37e3d5ef7e3f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -752,6 +752,8 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
752 skb_copy_queue_mapping(frag, first); 752 skb_copy_queue_mapping(frag, first);
753 753
754 frag->do_not_encrypt = first->do_not_encrypt; 754 frag->do_not_encrypt = first->do_not_encrypt;
755 frag->dev = first->dev;
756 frag->iif = first->iif;
755 757
756 pos += copylen; 758 pos += copylen;
757 left -= copylen; 759 left -= copylen;
@@ -1343,6 +1345,8 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1343 list) { 1345 list) {
1344 if (!netif_running(sdata->dev)) 1346 if (!netif_running(sdata->dev))
1345 continue; 1347 continue;
1348 if (sdata->vif.type != NL80211_IFTYPE_AP)
1349 continue;
1346 if (compare_ether_addr(sdata->dev->dev_addr, 1350 if (compare_ether_addr(sdata->dev->dev_addr,
1347 hdr->addr2)) { 1351 hdr->addr2)) {
1348 dev_hold(sdata->dev); 1352 dev_hold(sdata->dev);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 90ce9ddb9451..f4935e344b61 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -726,7 +726,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
726 NF_CT_ASSERT(skb->nfct); 726 NF_CT_ASSERT(skb->nfct);
727 727
728 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum); 728 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);
729 if (ret < 0) { 729 if (ret <= 0) {
730 /* Invalid: inverse of the return code tells 730 /* Invalid: inverse of the return code tells
731 * the netfilter core what to do */ 731 * the netfilter core what to do */
732 pr_debug("nf_conntrack_in: Can't track with proto module\n"); 732 pr_debug("nf_conntrack_in: Can't track with proto module\n");
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index c32a7e8e3a1b..ed6d873ad384 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -434,7 +434,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
434 } else 434 } else
435 return NOTIFY_DONE; 435 return NOTIFY_DONE;
436 436
437 if (!nfnetlink_has_listeners(group)) 437 if (!item->report && !nfnetlink_has_listeners(group))
438 return NOTIFY_DONE; 438 return NOTIFY_DONE;
439 439
440 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); 440 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
@@ -1215,6 +1215,16 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1215 } 1215 }
1216 } 1216 }
1217 1217
1218#ifdef CONFIG_NF_NAT_NEEDED
1219 if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1220 err = ctnetlink_change_nat_seq_adj(ct, cda);
1221 if (err < 0) {
1222 rcu_read_unlock();
1223 goto err;
1224 }
1225 }
1226#endif
1227
1218 if (cda[CTA_PROTOINFO]) { 1228 if (cda[CTA_PROTOINFO]) {
1219 err = ctnetlink_change_protoinfo(ct, cda); 1229 err = ctnetlink_change_protoinfo(ct, cda);
1220 if (err < 0) { 1230 if (err < 0) {
@@ -1492,7 +1502,8 @@ static int ctnetlink_expect_event(struct notifier_block *this,
1492 } else 1502 } else
1493 return NOTIFY_DONE; 1503 return NOTIFY_DONE;
1494 1504
1495 if (!nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW)) 1505 if (!item->report &&
1506 !nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW))
1496 return NOTIFY_DONE; 1507 return NOTIFY_DONE;
1497 1508
1498 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); 1509 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
@@ -1769,6 +1780,7 @@ ctnetlink_create_expect(struct nlattr *cda[], u_int8_t u3, u32 pid, int report)
1769 goto out; 1780 goto out;
1770 } 1781 }
1771 1782
1783 exp->class = 0;
1772 exp->expectfn = NULL; 1784 exp->expectfn = NULL;
1773 exp->flags = 0; 1785 exp->flags = 0;
1774 exp->master = ct; 1786 exp->master = ct;
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index a1edb9c1adee..f3fd154d1ddd 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -859,7 +859,7 @@ static int tcp_packet(struct nf_conn *ct,
859 */ 859 */
860 if (nf_ct_kill(ct)) 860 if (nf_ct_kill(ct))
861 return -NF_REPEAT; 861 return -NF_REPEAT;
862 return -NF_DROP; 862 return NF_DROP;
863 } 863 }
864 /* Fall through */ 864 /* Fall through */
865 case TCP_CONNTRACK_IGNORE: 865 case TCP_CONNTRACK_IGNORE:
@@ -892,7 +892,7 @@ static int tcp_packet(struct nf_conn *ct,
892 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 892 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
893 "nf_ct_tcp: killing out of sync session "); 893 "nf_ct_tcp: killing out of sync session ");
894 nf_ct_kill(ct); 894 nf_ct_kill(ct);
895 return -NF_DROP; 895 return NF_DROP;
896 } 896 }
897 ct->proto.tcp.last_index = index; 897 ct->proto.tcp.last_index = index;
898 ct->proto.tcp.last_dir = dir; 898 ct->proto.tcp.last_dir = dir;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index fa49dc7fe100..c712e9fc6bba 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -39,7 +39,7 @@
39#endif 39#endif
40 40
41#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE 41#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE
42#define NFULNL_TIMEOUT_DEFAULT HZ /* every second */ 42#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
43#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ 43#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
44#define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */ 44#define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */
45 45
@@ -590,8 +590,10 @@ nfulnl_log_packet(u_int8_t pf,
590 590
591 qthreshold = inst->qthreshold; 591 qthreshold = inst->qthreshold;
592 /* per-rule qthreshold overrides per-instance */ 592 /* per-rule qthreshold overrides per-instance */
593 if (qthreshold > li->u.ulog.qthreshold) 593 if (li->u.ulog.qthreshold)
594 qthreshold = li->u.ulog.qthreshold; 594 if (qthreshold > li->u.ulog.qthreshold)
595 qthreshold = li->u.ulog.qthreshold;
596
595 597
596 switch (inst->copy_mode) { 598 switch (inst->copy_mode) {
597 case NFULNL_COPY_META: 599 case NFULNL_COPY_META:
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index bfbf521f6ea5..5baccfa5a0de 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -827,59 +827,143 @@ static const struct file_operations xt_table_ops = {
827 .release = seq_release_net, 827 .release = seq_release_net,
828}; 828};
829 829
830static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) 830/*
831 * Traverse state for ip{,6}_{tables,matches} for helping crossing
832 * the multi-AF mutexes.
833 */
834struct nf_mttg_trav {
835 struct list_head *head, *curr;
836 uint8_t class, nfproto;
837};
838
839enum {
840 MTTG_TRAV_INIT,
841 MTTG_TRAV_NFP_UNSPEC,
842 MTTG_TRAV_NFP_SPEC,
843 MTTG_TRAV_DONE,
844};
845
846static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
847 bool is_target)
831{ 848{
832 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; 849 static const uint8_t next_class[] = {
833 u_int16_t af = (unsigned long)pde->data; 850 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
851 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
852 };
853 struct nf_mttg_trav *trav = seq->private;
854
855 switch (trav->class) {
856 case MTTG_TRAV_INIT:
857 trav->class = MTTG_TRAV_NFP_UNSPEC;
858 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
859 trav->head = trav->curr = is_target ?
860 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
861 break;
862 case MTTG_TRAV_NFP_UNSPEC:
863 trav->curr = trav->curr->next;
864 if (trav->curr != trav->head)
865 break;
866 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
867 mutex_lock(&xt[trav->nfproto].mutex);
868 trav->head = trav->curr = is_target ?
869 &xt[trav->nfproto].target : &xt[trav->nfproto].match;
870 trav->class = next_class[trav->class];
871 break;
872 case MTTG_TRAV_NFP_SPEC:
873 trav->curr = trav->curr->next;
874 if (trav->curr != trav->head)
875 break;
876 /* fallthru, _stop will unlock */
877 default:
878 return NULL;
879 }
834 880
835 mutex_lock(&xt[af].mutex); 881 if (ppos != NULL)
836 return seq_list_start(&xt[af].match, *pos); 882 ++*ppos;
883 return trav;
837} 884}
838 885
839static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *pos) 886static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
887 bool is_target)
840{ 888{
841 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; 889 struct nf_mttg_trav *trav = seq->private;
842 u_int16_t af = (unsigned long)pde->data; 890 unsigned int j;
843 891
844 return seq_list_next(v, &xt[af].match, pos); 892 trav->class = MTTG_TRAV_INIT;
893 for (j = 0; j < *pos; ++j)
894 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
895 return NULL;
896 return trav;
845} 897}
846 898
847static void xt_match_seq_stop(struct seq_file *seq, void *v) 899static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
848{ 900{
849 struct proc_dir_entry *pde = seq->private; 901 struct nf_mttg_trav *trav = seq->private;
850 u_int16_t af = (unsigned long)pde->data; 902
903 switch (trav->class) {
904 case MTTG_TRAV_NFP_UNSPEC:
905 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
906 break;
907 case MTTG_TRAV_NFP_SPEC:
908 mutex_unlock(&xt[trav->nfproto].mutex);
909 break;
910 }
911}
851 912
852 mutex_unlock(&xt[af].mutex); 913static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
914{
915 return xt_mttg_seq_start(seq, pos, false);
853} 916}
854 917
855static int xt_match_seq_show(struct seq_file *seq, void *v) 918static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
856{ 919{
857 struct xt_match *match = list_entry(v, struct xt_match, list); 920 return xt_mttg_seq_next(seq, v, ppos, false);
921}
858 922
859 if (strlen(match->name)) 923static int xt_match_seq_show(struct seq_file *seq, void *v)
860 return seq_printf(seq, "%s\n", match->name); 924{
861 else 925 const struct nf_mttg_trav *trav = seq->private;
862 return 0; 926 const struct xt_match *match;
927
928 switch (trav->class) {
929 case MTTG_TRAV_NFP_UNSPEC:
930 case MTTG_TRAV_NFP_SPEC:
931 if (trav->curr == trav->head)
932 return 0;
933 match = list_entry(trav->curr, struct xt_match, list);
934 return (*match->name == '\0') ? 0 :
935 seq_printf(seq, "%s\n", match->name);
936 }
937 return 0;
863} 938}
864 939
865static const struct seq_operations xt_match_seq_ops = { 940static const struct seq_operations xt_match_seq_ops = {
866 .start = xt_match_seq_start, 941 .start = xt_match_seq_start,
867 .next = xt_match_seq_next, 942 .next = xt_match_seq_next,
868 .stop = xt_match_seq_stop, 943 .stop = xt_mttg_seq_stop,
869 .show = xt_match_seq_show, 944 .show = xt_match_seq_show,
870}; 945};
871 946
872static int xt_match_open(struct inode *inode, struct file *file) 947static int xt_match_open(struct inode *inode, struct file *file)
873{ 948{
949 struct seq_file *seq;
950 struct nf_mttg_trav *trav;
874 int ret; 951 int ret;
875 952
876 ret = seq_open(file, &xt_match_seq_ops); 953 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
877 if (!ret) { 954 if (trav == NULL)
878 struct seq_file *seq = file->private_data; 955 return -ENOMEM;
879 956
880 seq->private = PDE(inode); 957 ret = seq_open(file, &xt_match_seq_ops);
958 if (ret < 0) {
959 kfree(trav);
960 return ret;
881 } 961 }
882 return ret; 962
963 seq = file->private_data;
964 seq->private = trav;
965 trav->nfproto = (unsigned long)PDE(inode)->data;
966 return 0;
883} 967}
884 968
885static const struct file_operations xt_match_ops = { 969static const struct file_operations xt_match_ops = {
@@ -887,62 +971,63 @@ static const struct file_operations xt_match_ops = {
887 .open = xt_match_open, 971 .open = xt_match_open,
888 .read = seq_read, 972 .read = seq_read,
889 .llseek = seq_lseek, 973 .llseek = seq_lseek,
890 .release = seq_release, 974 .release = seq_release_private,
891}; 975};
892 976
893static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) 977static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
894{ 978{
895 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; 979 return xt_mttg_seq_start(seq, pos, true);
896 u_int16_t af = (unsigned long)pde->data;
897
898 mutex_lock(&xt[af].mutex);
899 return seq_list_start(&xt[af].target, *pos);
900} 980}
901 981
902static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *pos) 982static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
903{ 983{
904 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; 984 return xt_mttg_seq_next(seq, v, ppos, true);
905 u_int16_t af = (unsigned long)pde->data;
906
907 return seq_list_next(v, &xt[af].target, pos);
908}
909
910static void xt_target_seq_stop(struct seq_file *seq, void *v)
911{
912 struct proc_dir_entry *pde = seq->private;
913 u_int16_t af = (unsigned long)pde->data;
914
915 mutex_unlock(&xt[af].mutex);
916} 985}
917 986
918static int xt_target_seq_show(struct seq_file *seq, void *v) 987static int xt_target_seq_show(struct seq_file *seq, void *v)
919{ 988{
920 struct xt_target *target = list_entry(v, struct xt_target, list); 989 const struct nf_mttg_trav *trav = seq->private;
921 990 const struct xt_target *target;
922 if (strlen(target->name)) 991
923 return seq_printf(seq, "%s\n", target->name); 992 switch (trav->class) {
924 else 993 case MTTG_TRAV_NFP_UNSPEC:
925 return 0; 994 case MTTG_TRAV_NFP_SPEC:
995 if (trav->curr == trav->head)
996 return 0;
997 target = list_entry(trav->curr, struct xt_target, list);
998 return (*target->name == '\0') ? 0 :
999 seq_printf(seq, "%s\n", target->name);
1000 }
1001 return 0;
926} 1002}
927 1003
928static const struct seq_operations xt_target_seq_ops = { 1004static const struct seq_operations xt_target_seq_ops = {
929 .start = xt_target_seq_start, 1005 .start = xt_target_seq_start,
930 .next = xt_target_seq_next, 1006 .next = xt_target_seq_next,
931 .stop = xt_target_seq_stop, 1007 .stop = xt_mttg_seq_stop,
932 .show = xt_target_seq_show, 1008 .show = xt_target_seq_show,
933}; 1009};
934 1010
935static int xt_target_open(struct inode *inode, struct file *file) 1011static int xt_target_open(struct inode *inode, struct file *file)
936{ 1012{
1013 struct seq_file *seq;
1014 struct nf_mttg_trav *trav;
937 int ret; 1015 int ret;
938 1016
939 ret = seq_open(file, &xt_target_seq_ops); 1017 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
940 if (!ret) { 1018 if (trav == NULL)
941 struct seq_file *seq = file->private_data; 1019 return -ENOMEM;
942 1020
943 seq->private = PDE(inode); 1021 ret = seq_open(file, &xt_target_seq_ops);
1022 if (ret < 0) {
1023 kfree(trav);
1024 return ret;
944 } 1025 }
945 return ret; 1026
1027 seq = file->private_data;
1028 seq->private = trav;
1029 trav->nfproto = (unsigned long)PDE(inode)->data;
1030 return 0;
946} 1031}
947 1032
948static const struct file_operations xt_target_ops = { 1033static const struct file_operations xt_target_ops = {
@@ -950,7 +1035,7 @@ static const struct file_operations xt_target_ops = {
950 .open = xt_target_open, 1035 .open = xt_target_open,
951 .read = seq_read, 1036 .read = seq_read,
952 .llseek = seq_lseek, 1037 .llseek = seq_lseek,
953 .release = seq_release, 1038 .release = seq_release_private,
954}; 1039};
955 1040
956#define FORMAT_TABLES "_tables_names" 1041#define FORMAT_TABLES "_tables_names"
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index fe80b614a400..791e030ea903 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -542,7 +542,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
542 struct recent_entry *e; 542 struct recent_entry *e;
543 char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")]; 543 char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")];
544 const char *c = buf; 544 const char *c = buf;
545 union nf_inet_addr addr; 545 union nf_inet_addr addr = {};
546 u_int16_t family; 546 u_int16_t family;
547 bool add, succ; 547 bool add, succ;
548 548
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
index e223cb43ae8e..a189ada9128f 100644
--- a/net/netfilter/xt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -105,7 +105,7 @@ match_packet(const struct sk_buff *skb,
105 105
106 switch (chunk_match_type) { 106 switch (chunk_match_type) {
107 case SCTP_CHUNK_MATCH_ALL: 107 case SCTP_CHUNK_MATCH_ALL:
108 return SCTP_CHUNKMAP_IS_CLEAR(info->chunkmap); 108 return SCTP_CHUNKMAP_IS_CLEAR(chunkmapcopy);
109 case SCTP_CHUNK_MATCH_ANY: 109 case SCTP_CHUNK_MATCH_ANY:
110 return false; 110 return false;
111 case SCTP_CHUNK_MATCH_ONLY: 111 case SCTP_CHUNK_MATCH_ONLY:
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 9eb895c7a2a9..3ae3cb816563 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1084,6 +1084,13 @@ out:
1084 return 0; 1084 return 0;
1085} 1085}
1086 1086
1087/**
1088 * netlink_set_err - report error to broadcast listeners
1089 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1090 * @pid: the PID of a process that we want to skip (if any)
1091 * @groups: the broadcast group that will notice the error
1092 * @code: error code, must be negative (as usual in kernelspace)
1093 */
1087void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) 1094void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1088{ 1095{
1089 struct netlink_set_err_data info; 1096 struct netlink_set_err_data info;
@@ -1093,7 +1100,8 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1093 info.exclude_sk = ssk; 1100 info.exclude_sk = ssk;
1094 info.pid = pid; 1101 info.pid = pid;
1095 info.group = group; 1102 info.group = group;
1096 info.code = code; 1103 /* sk->sk_err wants a positive error value */
1104 info.code = -code;
1097 1105
1098 read_lock(&nl_table_lock); 1106 read_lock(&nl_table_lock);
1099 1107
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 5f94db2f3e9e..1fc4a7885c41 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -77,6 +77,7 @@
77#include <linux/poll.h> 77#include <linux/poll.h>
78#include <linux/module.h> 78#include <linux/module.h>
79#include <linux/init.h> 79#include <linux/init.h>
80#include <linux/mutex.h>
80 81
81#ifdef CONFIG_INET 82#ifdef CONFIG_INET
82#include <net/inet_common.h> 83#include <net/inet_common.h>
@@ -175,6 +176,7 @@ struct packet_sock {
175#endif 176#endif
176 struct packet_type prot_hook; 177 struct packet_type prot_hook;
177 spinlock_t bind_lock; 178 spinlock_t bind_lock;
179 struct mutex pg_vec_lock;
178 unsigned int running:1, /* prot_hook is attached*/ 180 unsigned int running:1, /* prot_hook is attached*/
179 auxdata:1, 181 auxdata:1,
180 origdev:1; 182 origdev:1;
@@ -220,13 +222,13 @@ static void *packet_lookup_frame(struct packet_sock *po, unsigned int position,
220 h.raw = po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size); 222 h.raw = po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size);
221 switch (po->tp_version) { 223 switch (po->tp_version) {
222 case TPACKET_V1: 224 case TPACKET_V1:
223 if (status != h.h1->tp_status ? TP_STATUS_USER : 225 if (status != (h.h1->tp_status ? TP_STATUS_USER :
224 TP_STATUS_KERNEL) 226 TP_STATUS_KERNEL))
225 return NULL; 227 return NULL;
226 break; 228 break;
227 case TPACKET_V2: 229 case TPACKET_V2:
228 if (status != h.h2->tp_status ? TP_STATUS_USER : 230 if (status != (h.h2->tp_status ? TP_STATUS_USER :
229 TP_STATUS_KERNEL) 231 TP_STATUS_KERNEL))
230 return NULL; 232 return NULL;
231 break; 233 break;
232 } 234 }
@@ -1069,6 +1071,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol)
1069 */ 1071 */
1070 1072
1071 spin_lock_init(&po->bind_lock); 1073 spin_lock_init(&po->bind_lock);
1074 mutex_init(&po->pg_vec_lock);
1072 po->prot_hook.func = packet_rcv; 1075 po->prot_hook.func = packet_rcv;
1073 1076
1074 if (sock->type == SOCK_PACKET) 1077 if (sock->type == SOCK_PACKET)
@@ -1865,6 +1868,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
1865 synchronize_net(); 1868 synchronize_net();
1866 1869
1867 err = -EBUSY; 1870 err = -EBUSY;
1871 mutex_lock(&po->pg_vec_lock);
1868 if (closing || atomic_read(&po->mapped) == 0) { 1872 if (closing || atomic_read(&po->mapped) == 0) {
1869 err = 0; 1873 err = 0;
1870#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; }) 1874#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
@@ -1886,6 +1890,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
1886 if (atomic_read(&po->mapped)) 1890 if (atomic_read(&po->mapped))
1887 printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); 1891 printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped));
1888 } 1892 }
1893 mutex_unlock(&po->pg_vec_lock);
1889 1894
1890 spin_lock(&po->bind_lock); 1895 spin_lock(&po->bind_lock);
1891 if (was_running && !po->running) { 1896 if (was_running && !po->running) {
@@ -1918,7 +1923,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
1918 1923
1919 size = vma->vm_end - vma->vm_start; 1924 size = vma->vm_end - vma->vm_start;
1920 1925
1921 lock_sock(sk); 1926 mutex_lock(&po->pg_vec_lock);
1922 if (po->pg_vec == NULL) 1927 if (po->pg_vec == NULL)
1923 goto out; 1928 goto out;
1924 if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE) 1929 if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE)
@@ -1941,7 +1946,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
1941 err = 0; 1946 err = 0;
1942 1947
1943out: 1948out:
1944 release_sock(sk); 1949 mutex_unlock(&po->pg_vec_lock);
1945 return err; 1950 return err;
1946} 1951}
1947#endif 1952#endif
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index 6a91a32a80c1..4aa888584d20 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -207,7 +207,6 @@ static int gprs_xmit(struct sk_buff *skb, struct net_device *dev)
207 dev->name, err); 207 dev->name, err);
208 dev->stats.tx_aborted_errors++; 208 dev->stats.tx_aborted_errors++;
209 dev->stats.tx_errors++; 209 dev->stats.tx_errors++;
210 dev_kfree_skb(skb);
211 } else { 210 } else {
212 dev->stats.tx_packets++; 211 dev->stats.tx_packets++;
213 dev->stats.tx_bytes += len; 212 dev->stats.tx_bytes += len;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index bb3e67849b38..8ad2b5333881 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -553,7 +553,7 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
553{ 553{
554 struct pep_sock *pn = pep_sk(sk); 554 struct pep_sock *pn = pep_sk(sk);
555 struct sock *sknode; 555 struct sock *sknode;
556 struct pnpipehdr *hdr = pnp_hdr(skb); 556 struct pnpipehdr *hdr;
557 struct sockaddr_pn dst; 557 struct sockaddr_pn dst;
558 int err = NET_RX_SUCCESS; 558 int err = NET_RX_SUCCESS;
559 u8 pipe_handle; 559 u8 pipe_handle;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index d7d2bed7a699..eac5e7bb7365 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -284,13 +284,13 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
284 if (IS_ERR(trans)) { 284 if (IS_ERR(trans)) {
285 call = ERR_CAST(trans); 285 call = ERR_CAST(trans);
286 trans = NULL; 286 trans = NULL;
287 goto out; 287 goto out_notrans;
288 } 288 }
289 } else { 289 } else {
290 trans = rx->trans; 290 trans = rx->trans;
291 if (!trans) { 291 if (!trans) {
292 call = ERR_PTR(-ENOTCONN); 292 call = ERR_PTR(-ENOTCONN);
293 goto out; 293 goto out_notrans;
294 } 294 }
295 atomic_inc(&trans->usage); 295 atomic_inc(&trans->usage);
296 } 296 }
@@ -315,6 +315,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
315 rxrpc_put_bundle(trans, bundle); 315 rxrpc_put_bundle(trans, bundle);
316out: 316out:
317 rxrpc_put_transport(trans); 317 rxrpc_put_transport(trans);
318out_notrans:
318 release_sock(&rx->sk); 319 release_sock(&rx->sk);
319 _leave(" = %p", call); 320 _leave(" = %p", call);
320 return call; 321 return call;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 5c72a116b1a4..f8f047b61245 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -183,13 +183,6 @@ override:
183 if (R_tab == NULL) 183 if (R_tab == NULL)
184 goto failure; 184 goto failure;
185 185
186 if (!est && (ret == ACT_P_CREATED ||
187 !gen_estimator_active(&police->tcf_bstats,
188 &police->tcf_rate_est))) {
189 err = -EINVAL;
190 goto failure;
191 }
192
193 if (parm->peakrate.rate) { 186 if (parm->peakrate.rate) {
194 P_tab = qdisc_get_rtab(&parm->peakrate, 187 P_tab = qdisc_get_rtab(&parm->peakrate,
195 tb[TCA_POLICE_PEAKRATE]); 188 tb[TCA_POLICE_PEAKRATE]);
@@ -205,6 +198,12 @@ override:
205 &police->tcf_lock, est); 198 &police->tcf_lock, est);
206 if (err) 199 if (err)
207 goto failure_unlock; 200 goto failure_unlock;
201 } else if (tb[TCA_POLICE_AVRATE] &&
202 (ret == ACT_P_CREATED ||
203 !gen_estimator_active(&police->tcf_bstats,
204 &police->tcf_rate_est))) {
205 err = -EINVAL;
206 goto failure_unlock;
208 } 207 }
209 208
210 /* No failure allowed after this point */ 209 /* No failure allowed after this point */
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index f6b4fa97df70..e36e94ab4e10 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -66,11 +66,15 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
66{ 66{
67 struct drr_sched *q = qdisc_priv(sch); 67 struct drr_sched *q = qdisc_priv(sch);
68 struct drr_class *cl = (struct drr_class *)*arg; 68 struct drr_class *cl = (struct drr_class *)*arg;
69 struct nlattr *opt = tca[TCA_OPTIONS];
69 struct nlattr *tb[TCA_DRR_MAX + 1]; 70 struct nlattr *tb[TCA_DRR_MAX + 1];
70 u32 quantum; 71 u32 quantum;
71 int err; 72 int err;
72 73
73 err = nla_parse_nested(tb, TCA_DRR_MAX, tca[TCA_OPTIONS], drr_policy); 74 if (!opt)
75 return -EINVAL;
76
77 err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy);
74 if (err < 0) 78 if (err < 0)
75 return err; 79 return err;
76 80
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 4c8d9f45ce09..905fda582b92 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -111,7 +111,8 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
111 if (sctp_addip_enable) { 111 if (sctp_addip_enable) {
112 auth_chunks->chunks[0] = SCTP_CID_ASCONF; 112 auth_chunks->chunks[0] = SCTP_CID_ASCONF;
113 auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; 113 auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK;
114 auth_chunks->param_hdr.length += htons(2); 114 auth_chunks->param_hdr.length =
115 htons(sizeof(sctp_paramhdr_t) + 2);
115 } 116 }
116 } 117 }
117 118
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index b78e3be69013..c4986d0f7419 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -717,15 +717,20 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
717static int sctp_ctl_sock_init(void) 717static int sctp_ctl_sock_init(void)
718{ 718{
719 int err; 719 int err;
720 sa_family_t family; 720 sa_family_t family = PF_INET;
721 721
722 if (sctp_get_pf_specific(PF_INET6)) 722 if (sctp_get_pf_specific(PF_INET6))
723 family = PF_INET6; 723 family = PF_INET6;
724 else
725 family = PF_INET;
726 724
727 err = inet_ctl_sock_create(&sctp_ctl_sock, family, 725 err = inet_ctl_sock_create(&sctp_ctl_sock, family,
728 SOCK_SEQPACKET, IPPROTO_SCTP, &init_net); 726 SOCK_SEQPACKET, IPPROTO_SCTP, &init_net);
727
728 /* If IPv6 socket could not be created, try the IPv4 socket */
729 if (err < 0 && family == PF_INET6)
730 err = inet_ctl_sock_create(&sctp_ctl_sock, AF_INET,
731 SOCK_SEQPACKET, IPPROTO_SCTP,
732 &init_net);
733
729 if (err < 0) { 734 if (err < 0) {
730 printk(KERN_ERR 735 printk(KERN_ERR
731 "SCTP: Failed to create the SCTP control socket.\n"); 736 "SCTP: Failed to create the SCTP control socket.\n");
@@ -1322,9 +1327,8 @@ SCTP_STATIC __init int sctp_init(void)
1322out: 1327out:
1323 return status; 1328 return status;
1324err_v6_add_protocol: 1329err_v6_add_protocol:
1325 sctp_v6_del_protocol();
1326err_add_protocol:
1327 sctp_v4_del_protocol(); 1330 sctp_v4_del_protocol();
1331err_add_protocol:
1328 inet_ctl_sock_destroy(sctp_ctl_sock); 1332 inet_ctl_sock_destroy(sctp_ctl_sock);
1329err_ctl_sock_init: 1333err_ctl_sock_init:
1330 sctp_v6_protosw_exit(); 1334 sctp_v6_protosw_exit();
@@ -1335,7 +1339,6 @@ err_protosw_init:
1335 sctp_v4_pf_exit(); 1339 sctp_v4_pf_exit();
1336 sctp_v6_pf_exit(); 1340 sctp_v6_pf_exit();
1337 sctp_sysctl_unregister(); 1341 sctp_sysctl_unregister();
1338 list_del(&sctp_af_inet.list);
1339 free_pages((unsigned long)sctp_port_hashtable, 1342 free_pages((unsigned long)sctp_port_hashtable,
1340 get_order(sctp_port_hashsize * 1343 get_order(sctp_port_hashsize *
1341 sizeof(struct sctp_bind_hashbucket))); 1344 sizeof(struct sctp_bind_hashbucket)));
@@ -1383,7 +1386,6 @@ SCTP_STATIC __exit void sctp_exit(void)
1383 sctp_v4_pf_exit(); 1386 sctp_v4_pf_exit();
1384 1387
1385 sctp_sysctl_unregister(); 1388 sctp_sysctl_unregister();
1386 list_del(&sctp_af_inet.list);
1387 1389
1388 free_pages((unsigned long)sctp_assoc_hashtable, 1390 free_pages((unsigned long)sctp_assoc_hashtable,
1389 get_order(sctp_assoc_hashsize * 1391 get_order(sctp_assoc_hashsize *
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index e1d6076b4f59..b5495aecab60 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -787,36 +787,48 @@ static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds,
787 struct sctp_association *asoc, 787 struct sctp_association *asoc,
788 struct sctp_chunk *chunk) 788 struct sctp_chunk *chunk)
789{ 789{
790 struct sctp_operr_chunk *operr_chunk;
791 struct sctp_errhdr *err_hdr; 790 struct sctp_errhdr *err_hdr;
791 struct sctp_ulpevent *ev;
792 792
793 operr_chunk = (struct sctp_operr_chunk *)chunk->chunk_hdr; 793 while (chunk->chunk_end > chunk->skb->data) {
794 err_hdr = &operr_chunk->err_hdr; 794 err_hdr = (struct sctp_errhdr *)(chunk->skb->data);
795 795
796 switch (err_hdr->cause) { 796 ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
797 case SCTP_ERROR_UNKNOWN_CHUNK: 797 GFP_ATOMIC);
798 { 798 if (!ev)
799 struct sctp_chunkhdr *unk_chunk_hdr; 799 return;
800 800
801 unk_chunk_hdr = (struct sctp_chunkhdr *)err_hdr->variable; 801 sctp_ulpq_tail_event(&asoc->ulpq, ev);
802 switch (unk_chunk_hdr->type) { 802
803 /* ADDIP 4.1 A9) If the peer responds to an ASCONF with an 803 switch (err_hdr->cause) {
804 * ERROR chunk reporting that it did not recognized the ASCONF 804 case SCTP_ERROR_UNKNOWN_CHUNK:
805 * chunk type, the sender of the ASCONF MUST NOT send any 805 {
806 * further ASCONF chunks and MUST stop its T-4 timer. 806 sctp_chunkhdr_t *unk_chunk_hdr;
807 */ 807
808 case SCTP_CID_ASCONF: 808 unk_chunk_hdr = (sctp_chunkhdr_t *)err_hdr->variable;
809 asoc->peer.asconf_capable = 0; 809 switch (unk_chunk_hdr->type) {
810 sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP, 810 /* ADDIP 4.1 A9) If the peer responds to an ASCONF with
811 * an ERROR chunk reporting that it did not recognized
812 * the ASCONF chunk type, the sender of the ASCONF MUST
813 * NOT send any further ASCONF chunks and MUST stop its
814 * T-4 timer.
815 */
816 case SCTP_CID_ASCONF:
817 if (asoc->peer.asconf_capable == 0)
818 break;
819
820 asoc->peer.asconf_capable = 0;
821 sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
811 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 822 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
823 break;
824 default:
825 break;
826 }
812 break; 827 break;
828 }
813 default: 829 default:
814 break; 830 break;
815 } 831 }
816 break;
817 }
818 default:
819 break;
820 } 832 }
821} 833}
822 834
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 3a0cd075914f..f88dfded0e3a 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3163,7 +3163,6 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
3163 sctp_cmd_seq_t *commands) 3163 sctp_cmd_seq_t *commands)
3164{ 3164{
3165 struct sctp_chunk *chunk = arg; 3165 struct sctp_chunk *chunk = arg;
3166 struct sctp_ulpevent *ev;
3167 3166
3168 if (!sctp_vtag_verify(chunk, asoc)) 3167 if (!sctp_vtag_verify(chunk, asoc))
3169 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3168 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -3173,21 +3172,10 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
3173 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 3172 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3174 commands); 3173 commands);
3175 3174
3176 while (chunk->chunk_end > chunk->skb->data) { 3175 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
3177 ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0, 3176 SCTP_CHUNK(chunk));
3178 GFP_ATOMIC);
3179 if (!ev)
3180 goto nomem;
3181 3177
3182 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
3183 SCTP_ULPEVENT(ev));
3184 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
3185 SCTP_CHUNK(chunk));
3186 }
3187 return SCTP_DISPOSITION_CONSUME; 3178 return SCTP_DISPOSITION_CONSUME;
3188
3189nomem:
3190 return SCTP_DISPOSITION_NOMEM;
3191} 3179}
3192 3180
3193/* 3181/*
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index dcef600d0bf5..5592883e1e4a 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -6,7 +6,7 @@ config SUNRPC_GSS
6 6
7config SUNRPC_XPRT_RDMA 7config SUNRPC_XPRT_RDMA
8 tristate 8 tristate
9 depends on SUNRPC && INFINIBAND && EXPERIMENTAL 9 depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS && EXPERIMENTAL
10 default SUNRPC && INFINIBAND 10 default SUNRPC && INFINIBAND
11 help 11 help
12 This option allows the NFS client and server to support 12 This option allows the NFS client and server to support
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 385f427bedad..ff50a0546865 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -293,11 +293,6 @@ static void rpc_make_runnable(struct rpc_task *task)
293 rpc_clear_queued(task); 293 rpc_clear_queued(task);
294 if (rpc_test_and_set_running(task)) 294 if (rpc_test_and_set_running(task))
295 return; 295 return;
296 /* We might have raced */
297 if (RPC_IS_QUEUED(task)) {
298 rpc_clear_running(task);
299 return;
300 }
301 if (RPC_IS_ASYNC(task)) { 296 if (RPC_IS_ASYNC(task)) {
302 int status; 297 int status;
303 298
@@ -607,7 +602,9 @@ void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
607 */ 602 */
608static void __rpc_execute(struct rpc_task *task) 603static void __rpc_execute(struct rpc_task *task)
609{ 604{
610 int status = 0; 605 struct rpc_wait_queue *queue;
606 int task_is_async = RPC_IS_ASYNC(task);
607 int status = 0;
611 608
612 dprintk("RPC: %5u __rpc_execute flags=0x%x\n", 609 dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
613 task->tk_pid, task->tk_flags); 610 task->tk_pid, task->tk_flags);
@@ -647,15 +644,25 @@ static void __rpc_execute(struct rpc_task *task)
647 */ 644 */
648 if (!RPC_IS_QUEUED(task)) 645 if (!RPC_IS_QUEUED(task))
649 continue; 646 continue;
650 rpc_clear_running(task); 647 /*
651 if (RPC_IS_ASYNC(task)) { 648 * The queue->lock protects against races with
652 /* Careful! we may have raced... */ 649 * rpc_make_runnable().
653 if (RPC_IS_QUEUED(task)) 650 *
654 return; 651 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
655 if (rpc_test_and_set_running(task)) 652 * rpc_task, rpc_make_runnable() can assign it to a
656 return; 653 * different workqueue. We therefore cannot assume that the
654 * rpc_task pointer may still be dereferenced.
655 */
656 queue = task->tk_waitqueue;
657 spin_lock_bh(&queue->lock);
658 if (!RPC_IS_QUEUED(task)) {
659 spin_unlock_bh(&queue->lock);
657 continue; 660 continue;
658 } 661 }
662 rpc_clear_running(task);
663 spin_unlock_bh(&queue->lock);
664 if (task_is_async)
665 return;
659 666
660 /* sync task: sleep here */ 667 /* sync task: sleep here */
661 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); 668 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 29e401bb612e..62098d101a1f 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -663,7 +663,7 @@ void xprt_connect(struct rpc_task *task)
663 xprt, (xprt_connected(xprt) ? "is" : "is not")); 663 xprt, (xprt_connected(xprt) ? "is" : "is not"));
664 664
665 if (!xprt_bound(xprt)) { 665 if (!xprt_bound(xprt)) {
666 task->tk_status = -EIO; 666 task->tk_status = -EAGAIN;
667 return; 667 return;
668 } 668 }
669 if (!xprt_lock_write(xprt, task)) 669 if (!xprt_lock_write(xprt, task))
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 5cbb404c4cdf..29c71e645b27 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -467,7 +467,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
467 int err, sent = 0; 467 int err, sent = 0;
468 468
469 if (unlikely(!sock)) 469 if (unlikely(!sock))
470 return -ENOTCONN; 470 return -ENOTSOCK;
471 471
472 clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); 472 clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
473 if (base != 0) { 473 if (base != 0) {
@@ -577,6 +577,8 @@ static int xs_udp_send_request(struct rpc_task *task)
577 req->rq_svec->iov_base, 577 req->rq_svec->iov_base,
578 req->rq_svec->iov_len); 578 req->rq_svec->iov_len);
579 579
580 if (!xprt_bound(xprt))
581 return -ENOTCONN;
580 status = xs_sendpages(transport->sock, 582 status = xs_sendpages(transport->sock,
581 xs_addr(xprt), 583 xs_addr(xprt),
582 xprt->addrlen, xdr, 584 xprt->addrlen, xdr,
@@ -594,6 +596,10 @@ static int xs_udp_send_request(struct rpc_task *task)
594 } 596 }
595 597
596 switch (status) { 598 switch (status) {
599 case -ENOTSOCK:
600 status = -ENOTCONN;
601 /* Should we call xs_close() here? */
602 break;
597 case -EAGAIN: 603 case -EAGAIN:
598 xs_nospace(task); 604 xs_nospace(task);
599 break; 605 break;
@@ -693,6 +699,10 @@ static int xs_tcp_send_request(struct rpc_task *task)
693 } 699 }
694 700
695 switch (status) { 701 switch (status) {
702 case -ENOTSOCK:
703 status = -ENOTCONN;
704 /* Should we call xs_close() here? */
705 break;
696 case -EAGAIN: 706 case -EAGAIN:
697 xs_nospace(task); 707 xs_nospace(task);
698 break; 708 break;
@@ -1523,7 +1533,7 @@ static void xs_udp_connect_worker4(struct work_struct *work)
1523 struct socket *sock = transport->sock; 1533 struct socket *sock = transport->sock;
1524 int err, status = -EIO; 1534 int err, status = -EIO;
1525 1535
1526 if (xprt->shutdown || !xprt_bound(xprt)) 1536 if (xprt->shutdown)
1527 goto out; 1537 goto out;
1528 1538
1529 /* Start by resetting any existing state */ 1539 /* Start by resetting any existing state */
@@ -1564,7 +1574,7 @@ static void xs_udp_connect_worker6(struct work_struct *work)
1564 struct socket *sock = transport->sock; 1574 struct socket *sock = transport->sock;
1565 int err, status = -EIO; 1575 int err, status = -EIO;
1566 1576
1567 if (xprt->shutdown || !xprt_bound(xprt)) 1577 if (xprt->shutdown)
1568 goto out; 1578 goto out;
1569 1579
1570 /* Start by resetting any existing state */ 1580 /* Start by resetting any existing state */
@@ -1648,6 +1658,9 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1648 write_unlock_bh(&sk->sk_callback_lock); 1658 write_unlock_bh(&sk->sk_callback_lock);
1649 } 1659 }
1650 1660
1661 if (!xprt_bound(xprt))
1662 return -ENOTCONN;
1663
1651 /* Tell the socket layer to start connecting... */ 1664 /* Tell the socket layer to start connecting... */
1652 xprt->stat.connect_count++; 1665 xprt->stat.connect_count++;
1653 xprt->stat.connect_start = jiffies; 1666 xprt->stat.connect_start = jiffies;
@@ -1668,7 +1681,7 @@ static void xs_tcp_connect_worker4(struct work_struct *work)
1668 struct socket *sock = transport->sock; 1681 struct socket *sock = transport->sock;
1669 int err, status = -EIO; 1682 int err, status = -EIO;
1670 1683
1671 if (xprt->shutdown || !xprt_bound(xprt)) 1684 if (xprt->shutdown)
1672 goto out; 1685 goto out;
1673 1686
1674 if (!sock) { 1687 if (!sock) {
@@ -1728,7 +1741,7 @@ static void xs_tcp_connect_worker6(struct work_struct *work)
1728 struct socket *sock = transport->sock; 1741 struct socket *sock = transport->sock;
1729 int err, status = -EIO; 1742 int err, status = -EIO;
1730 1743
1731 if (xprt->shutdown || !xprt_bound(xprt)) 1744 if (xprt->shutdown)
1732 goto out; 1745 goto out;
1733 1746
1734 if (!sock) { 1747 if (!sock) {
diff --git a/net/wimax/debugfs.c b/net/wimax/debugfs.c
index 87cf4430079c..94d216a46407 100644
--- a/net/wimax/debugfs.c
+++ b/net/wimax/debugfs.c
@@ -28,17 +28,6 @@
28#include "debug-levels.h" 28#include "debug-levels.h"
29 29
30 30
31/* Debug framework control of debug levels */
32struct d_level D_LEVEL[] = {
33 D_SUBMODULE_DEFINE(debugfs),
34 D_SUBMODULE_DEFINE(id_table),
35 D_SUBMODULE_DEFINE(op_msg),
36 D_SUBMODULE_DEFINE(op_reset),
37 D_SUBMODULE_DEFINE(op_rfkill),
38 D_SUBMODULE_DEFINE(stack),
39};
40size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
41
42#define __debugfs_register(prefix, name, parent) \ 31#define __debugfs_register(prefix, name, parent) \
43do { \ 32do { \
44 result = d_level_register_debugfs(prefix, name, parent); \ 33 result = d_level_register_debugfs(prefix, name, parent); \
diff --git a/net/wimax/id-table.c b/net/wimax/id-table.c
index 5e685f7eda90..72273abfcb16 100644
--- a/net/wimax/id-table.c
+++ b/net/wimax/id-table.c
@@ -94,12 +94,13 @@ struct wimax_dev *wimax_dev_get_by_genl_info(
94 list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) { 94 list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
95 if (wimax_dev->net_dev->ifindex == ifindex) { 95 if (wimax_dev->net_dev->ifindex == ifindex) {
96 dev_hold(wimax_dev->net_dev); 96 dev_hold(wimax_dev->net_dev);
97 break; 97 goto found;
98 } 98 }
99 } 99 }
100 if (wimax_dev == NULL) 100 wimax_dev = NULL;
101 d_printf(1, NULL, "wimax: no devices found with ifindex %d\n", 101 d_printf(1, NULL, "wimax: no devices found with ifindex %d\n",
102 ifindex); 102 ifindex);
103found:
103 spin_unlock(&wimax_id_table_lock); 104 spin_unlock(&wimax_id_table_lock);
104 d_fnend(3, NULL, "(info %p ifindex %d) = %p\n", 105 d_fnend(3, NULL, "(info %p ifindex %d) = %p\n",
105 info, ifindex, wimax_dev); 106 info, ifindex, wimax_dev);
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index d4da92f8981a..3869c0327882 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -516,6 +516,19 @@ void wimax_dev_rm(struct wimax_dev *wimax_dev)
516} 516}
517EXPORT_SYMBOL_GPL(wimax_dev_rm); 517EXPORT_SYMBOL_GPL(wimax_dev_rm);
518 518
519
520/* Debug framework control of debug levels */
521struct d_level D_LEVEL[] = {
522 D_SUBMODULE_DEFINE(debugfs),
523 D_SUBMODULE_DEFINE(id_table),
524 D_SUBMODULE_DEFINE(op_msg),
525 D_SUBMODULE_DEFINE(op_reset),
526 D_SUBMODULE_DEFINE(op_rfkill),
527 D_SUBMODULE_DEFINE(stack),
528};
529size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
530
531
519struct genl_family wimax_gnl_family = { 532struct genl_family wimax_gnl_family = {
520 .id = GENL_ID_GENERATE, 533 .id = GENL_ID_GENERATE,
521 .name = "WiMAX", 534 .name = "WiMAX",
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index e28e2b8fa436..092ae6faccca 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -102,3 +102,13 @@ config LIB80211_CRYPT_CCMP
102 102
103config LIB80211_CRYPT_TKIP 103config LIB80211_CRYPT_TKIP
104 tristate 104 tristate
105
106config LIB80211_DEBUG
107 bool "lib80211 debugging messages"
108 depends on LIB80211
109 default n
110 ---help---
111 You can enable this if you want verbose debugging messages
112 from lib80211.
113
114 If unsure, say N.
diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c
index db428194c16a..2301dc1edc4c 100644
--- a/net/wireless/lib80211_crypt_ccmp.c
+++ b/net/wireless/lib80211_crypt_ccmp.c
@@ -337,6 +337,7 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
337 pos += 8; 337 pos += 8;
338 338
339 if (ccmp_replay_check(pn, key->rx_pn)) { 339 if (ccmp_replay_check(pn, key->rx_pn)) {
340#ifdef CONFIG_LIB80211_DEBUG
340 if (net_ratelimit()) { 341 if (net_ratelimit()) {
341 printk(KERN_DEBUG "CCMP: replay detected: STA=%pM " 342 printk(KERN_DEBUG "CCMP: replay detected: STA=%pM "
342 "previous PN %02x%02x%02x%02x%02x%02x " 343 "previous PN %02x%02x%02x%02x%02x%02x "
@@ -346,6 +347,7 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
346 key->rx_pn[3], key->rx_pn[4], key->rx_pn[5], 347 key->rx_pn[3], key->rx_pn[4], key->rx_pn[5],
347 pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]); 348 pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]);
348 } 349 }
350#endif
349 key->dot11RSNAStatsCCMPReplays++; 351 key->dot11RSNAStatsCCMPReplays++;
350 return -4; 352 return -4;
351 } 353 }
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index 7e8e22bfed90..c36287399d7e 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -465,12 +465,14 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
465 pos += 8; 465 pos += 8;
466 466
467 if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) { 467 if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) {
468#ifdef CONFIG_LIB80211_DEBUG
468 if (net_ratelimit()) { 469 if (net_ratelimit()) {
469 printk(KERN_DEBUG "TKIP: replay detected: STA=%pM" 470 printk(KERN_DEBUG "TKIP: replay detected: STA=%pM"
470 " previous TSC %08x%04x received TSC " 471 " previous TSC %08x%04x received TSC "
471 "%08x%04x\n", hdr->addr2, 472 "%08x%04x\n", hdr->addr2,
472 tkey->rx_iv32, tkey->rx_iv16, iv32, iv16); 473 tkey->rx_iv32, tkey->rx_iv16, iv32, iv16);
473 } 474 }
475#endif
474 tkey->dot11RSNAStatsTKIPReplays++; 476 tkey->dot11RSNAStatsTKIPReplays++;
475 return -4; 477 return -4;
476 } 478 }
@@ -505,10 +507,12 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
505 * it needs to be recalculated for the next packet. */ 507 * it needs to be recalculated for the next packet. */
506 tkey->rx_phase1_done = 0; 508 tkey->rx_phase1_done = 0;
507 } 509 }
510#ifdef CONFIG_LIB80211_DEBUG
508 if (net_ratelimit()) { 511 if (net_ratelimit()) {
509 printk(KERN_DEBUG "TKIP: ICV error detected: STA=" 512 printk(KERN_DEBUG "TKIP: ICV error detected: STA="
510 "%pM\n", hdr->addr2); 513 "%pM\n", hdr->addr2);
511 } 514 }
515#endif
512 tkey->dot11RSNAStatsTKIPICVErrors++; 516 tkey->dot11RSNAStatsTKIPICVErrors++;
513 return -5; 517 return -5;
514 } 518 }
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 1e728fff474e..31b807af3235 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1908,6 +1908,11 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
1908 if (err) 1908 if (err)
1909 return err; 1909 return err;
1910 1910
1911 if (!drv->ops->get_mesh_params) {
1912 err = -EOPNOTSUPP;
1913 goto out;
1914 }
1915
1911 /* Get the mesh params */ 1916 /* Get the mesh params */
1912 rtnl_lock(); 1917 rtnl_lock();
1913 err = drv->ops->get_mesh_params(&drv->wiphy, dev, &cur_params); 1918 err = drv->ops->get_mesh_params(&drv->wiphy, dev, &cur_params);
@@ -2017,6 +2022,11 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info)
2017 if (err) 2022 if (err)
2018 return err; 2023 return err;
2019 2024
2025 if (!drv->ops->set_mesh_params) {
2026 err = -EOPNOTSUPP;
2027 goto out;
2028 }
2029
2020 /* This makes sure that there aren't more than 32 mesh config 2030 /* This makes sure that there aren't more than 32 mesh config
2021 * parameters (otherwise our bitfield scheme would not work.) */ 2031 * parameters (otherwise our bitfield scheme would not work.) */
2022 BUILD_BUG_ON(NL80211_MESHCONF_ATTR_MAX > 32); 2032 BUILD_BUG_ON(NL80211_MESHCONF_ATTR_MAX > 32);
@@ -2061,6 +2071,7 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info)
2061 err = drv->ops->set_mesh_params(&drv->wiphy, dev, &cfg, mask); 2071 err = drv->ops->set_mesh_params(&drv->wiphy, dev, &cfg, mask);
2062 rtnl_unlock(); 2072 rtnl_unlock();
2063 2073
2074 out:
2064 /* cleanup */ 2075 /* cleanup */
2065 cfg80211_put_dev(drv); 2076 cfg80211_put_dev(drv);
2066 dev_put(dev); 2077 dev_put(dev);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index bc494cef2102..bd0a16c3de5e 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -380,7 +380,8 @@ static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule)
380 380
381 freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; 381 freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz;
382 382
383 if (freq_diff <= 0 || freq_range->max_bandwidth_khz > freq_diff) 383 if (freq_range->end_freq_khz <= freq_range->start_freq_khz ||
384 freq_range->max_bandwidth_khz > freq_diff)
384 return false; 385 return false;
385 386
386 return true; 387 return true;
@@ -498,6 +499,7 @@ static struct ieee80211_regdomain *country_ie_2_rd(
498 * calculate the number of reg rules we will need. We will need one 499 * calculate the number of reg rules we will need. We will need one
499 * for each channel subband */ 500 * for each channel subband */
500 while (country_ie_len >= 3) { 501 while (country_ie_len >= 3) {
502 int end_channel = 0;
501 struct ieee80211_country_ie_triplet *triplet = 503 struct ieee80211_country_ie_triplet *triplet =
502 (struct ieee80211_country_ie_triplet *) country_ie; 504 (struct ieee80211_country_ie_triplet *) country_ie;
503 int cur_sub_max_channel = 0, cur_channel = 0; 505 int cur_sub_max_channel = 0, cur_channel = 0;
@@ -509,9 +511,25 @@ static struct ieee80211_regdomain *country_ie_2_rd(
509 continue; 511 continue;
510 } 512 }
511 513
514 /* 2 GHz */
515 if (triplet->chans.first_channel <= 14)
516 end_channel = triplet->chans.first_channel +
517 triplet->chans.num_channels;
518 else
519 /*
520 * 5 GHz -- For example in country IEs if the first
521 * channel given is 36 and the number of channels is 4
522 * then the individual channel numbers defined for the
523 * 5 GHz PHY by these parameters are: 36, 40, 44, and 48
524 * and not 36, 37, 38, 39.
525 *
526 * See: http://tinyurl.com/11d-clarification
527 */
528 end_channel = triplet->chans.first_channel +
529 (4 * (triplet->chans.num_channels - 1));
530
512 cur_channel = triplet->chans.first_channel; 531 cur_channel = triplet->chans.first_channel;
513 cur_sub_max_channel = ieee80211_channel_to_frequency( 532 cur_sub_max_channel = end_channel;
514 cur_channel + triplet->chans.num_channels);
515 533
516 /* Basic sanity check */ 534 /* Basic sanity check */
517 if (cur_sub_max_channel < cur_channel) 535 if (cur_sub_max_channel < cur_channel)
@@ -590,15 +608,6 @@ static struct ieee80211_regdomain *country_ie_2_rd(
590 end_channel = triplet->chans.first_channel + 608 end_channel = triplet->chans.first_channel +
591 triplet->chans.num_channels; 609 triplet->chans.num_channels;
592 else 610 else
593 /*
594 * 5 GHz -- For example in country IEs if the first
595 * channel given is 36 and the number of channels is 4
596 * then the individual channel numbers defined for the
597 * 5 GHz PHY by these parameters are: 36, 40, 44, and 48
598 * and not 36, 37, 38, 39.
599 *
600 * See: http://tinyurl.com/11d-clarification
601 */
602 end_channel = triplet->chans.first_channel + 611 end_channel = triplet->chans.first_channel +
603 (4 * (triplet->chans.num_channels - 1)); 612 (4 * (triplet->chans.num_channels - 1));
604 613
@@ -1276,7 +1285,7 @@ static void reg_country_ie_process_debug(
1276 if (intersected_rd) { 1285 if (intersected_rd) {
1277 printk(KERN_DEBUG "cfg80211: We intersect both of these " 1286 printk(KERN_DEBUG "cfg80211: We intersect both of these "
1278 "and get:\n"); 1287 "and get:\n");
1279 print_regdomain_info(rd); 1288 print_regdomain_info(intersected_rd);
1280 return; 1289 return;
1281 } 1290 }
1282 printk(KERN_DEBUG "cfg80211: Intersection between both failed\n"); 1291 printk(KERN_DEBUG "cfg80211: Intersection between both failed\n");
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index e25ff62ab2a6..62a5425cc6aa 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -748,12 +748,51 @@ static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
748 schedule_work(&net->xfrm.state_hash_work); 748 schedule_work(&net->xfrm.state_hash_work);
749} 749}
750 750
751static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
752 struct flowi *fl, unsigned short family,
753 xfrm_address_t *daddr, xfrm_address_t *saddr,
754 struct xfrm_state **best, int *acq_in_progress,
755 int *error)
756{
757 /* Resolution logic:
758 * 1. There is a valid state with matching selector. Done.
759 * 2. Valid state with inappropriate selector. Skip.
760 *
761 * Entering area of "sysdeps".
762 *
763 * 3. If state is not valid, selector is temporary, it selects
764 * only session which triggered previous resolution. Key
765 * manager will do something to install a state with proper
766 * selector.
767 */
768 if (x->km.state == XFRM_STATE_VALID) {
769 if ((x->sel.family &&
770 !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
771 !security_xfrm_state_pol_flow_match(x, pol, fl))
772 return;
773
774 if (!*best ||
775 (*best)->km.dying > x->km.dying ||
776 ((*best)->km.dying == x->km.dying &&
777 (*best)->curlft.add_time < x->curlft.add_time))
778 *best = x;
779 } else if (x->km.state == XFRM_STATE_ACQ) {
780 *acq_in_progress = 1;
781 } else if (x->km.state == XFRM_STATE_ERROR ||
782 x->km.state == XFRM_STATE_EXPIRED) {
783 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
784 security_xfrm_state_pol_flow_match(x, pol, fl))
785 *error = -ESRCH;
786 }
787}
788
751struct xfrm_state * 789struct xfrm_state *
752xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, 790xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
753 struct flowi *fl, struct xfrm_tmpl *tmpl, 791 struct flowi *fl, struct xfrm_tmpl *tmpl,
754 struct xfrm_policy *pol, int *err, 792 struct xfrm_policy *pol, int *err,
755 unsigned short family) 793 unsigned short family)
756{ 794{
795 static xfrm_address_t saddr_wildcard = { };
757 struct net *net = xp_net(pol); 796 struct net *net = xp_net(pol);
758 unsigned int h; 797 unsigned int h;
759 struct hlist_node *entry; 798 struct hlist_node *entry;
@@ -773,40 +812,27 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
773 xfrm_state_addr_check(x, daddr, saddr, family) && 812 xfrm_state_addr_check(x, daddr, saddr, family) &&
774 tmpl->mode == x->props.mode && 813 tmpl->mode == x->props.mode &&
775 tmpl->id.proto == x->id.proto && 814 tmpl->id.proto == x->id.proto &&
776 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) { 815 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
777 /* Resolution logic: 816 xfrm_state_look_at(pol, x, fl, family, daddr, saddr,
778 1. There is a valid state with matching selector. 817 &best, &acquire_in_progress, &error);
779 Done. 818 }
780 2. Valid state with inappropriate selector. Skip. 819 if (best)
781 820 goto found;
782 Entering area of "sysdeps". 821
783 822 h = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family);
784 3. If state is not valid, selector is temporary, 823 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
785 it selects only session which triggered 824 if (x->props.family == family &&
786 previous resolution. Key manager will do 825 x->props.reqid == tmpl->reqid &&
787 something to install a state with proper 826 !(x->props.flags & XFRM_STATE_WILDRECV) &&
788 selector. 827 xfrm_state_addr_check(x, daddr, saddr, family) &&
789 */ 828 tmpl->mode == x->props.mode &&
790 if (x->km.state == XFRM_STATE_VALID) { 829 tmpl->id.proto == x->id.proto &&
791 if ((x->sel.family && !xfrm_selector_match(&x->sel, fl, x->sel.family)) || 830 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
792 !security_xfrm_state_pol_flow_match(x, pol, fl)) 831 xfrm_state_look_at(pol, x, fl, family, daddr, saddr,
793 continue; 832 &best, &acquire_in_progress, &error);
794 if (!best ||
795 best->km.dying > x->km.dying ||
796 (best->km.dying == x->km.dying &&
797 best->curlft.add_time < x->curlft.add_time))
798 best = x;
799 } else if (x->km.state == XFRM_STATE_ACQ) {
800 acquire_in_progress = 1;
801 } else if (x->km.state == XFRM_STATE_ERROR ||
802 x->km.state == XFRM_STATE_EXPIRED) {
803 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
804 security_xfrm_state_pol_flow_match(x, pol, fl))
805 error = -ESRCH;
806 }
807 }
808 } 833 }
809 834
835found:
810 x = best; 836 x = best;
811 if (!x && !error && !acquire_in_progress) { 837 if (!x && !error && !acquire_in_progress) {
812 if (tmpl->id.spi && 838 if (tmpl->id.spi &&