aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/garp.c4
-rw-r--r--net/802/stp.c4
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/8021q/vlan_core.c2
-rw-r--r--net/8021q/vlan_dev.c1
-rw-r--r--net/atm/br2684.c2
-rw-r--r--net/atm/lec.c2
-rw-r--r--net/bluetooth/bnep/netdev.c2
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_if.c6
-rw-r--r--net/bridge/br_notify.c7
-rw-r--r--net/bridge/netfilter/ebtable_broute.c4
-rw-r--r--net/bridge/netfilter/ebtables.c3
-rw-r--r--net/caif/cfmuxl.c6
-rw-r--r--net/can/af_can.c4
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/dev.c95
-rw-r--r--net/core/dev_addr_lists.c4
-rw-r--r--net/core/dst.c15
-rw-r--r--net/core/fib_rules.c4
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/neighbour.c40
-rw-r--r--net/core/net-sysfs.c8
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/rtnetlink.c1
-rw-r--r--net/core/scm.c2
-rw-r--r--net/core/secure_seq.c184
-rw-r--r--net/core/skbuff.c19
-rw-r--r--net/core/sock.c4
-rw-r--r--net/dccp/ccids/ccid2.c84
-rw-r--r--net/dccp/ccids/ccid2.h6
-rw-r--r--net/dccp/dccp.h1
-rw-r--r--net/dccp/feat.c202
-rw-r--r--net/dccp/feat.h1
-rw-r--r--net/dccp/ipv4.c1
-rw-r--r--net/dccp/ipv6.c9
-rw-r--r--net/dccp/proto.c1
-rw-r--r--net/decnet/dn_dev.c6
-rw-r--r--net/dsa/slave.c3
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/fib_trie.c12
-rw-r--r--net/ipv4/gre.c4
-rw-r--r--net/ipv4/igmp.c14
-rw-r--r--net/ipv4/inet_hashtables.c1
-rw-r--r--net/ipv4/inetpeer.c1
-rw-r--r--net/ipv4/ip_output.c11
-rw-r--r--net/ipv4/ip_sockglue.c9
-rw-r--r--net/ipv4/ipip.c10
-rw-r--r--net/ipv4/ipmr.c8
-rw-r--r--net/ipv4/netfilter.c18
-rw-r--r--net/ipv4/netfilter/nf_nat_amanda.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c24
-rw-r--r--net/ipv4/netfilter/nf_nat_ftp.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c36
-rw-r--r--net/ipv4/netfilter/nf_nat_irc.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c16
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_common.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c28
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_tftp.c4
-rw-r--r--net/ipv4/raw.c3
-rw-r--r--net/ipv4/route.c30
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp_ipv4.c7
-rw-r--r--net/ipv4/udp.c11
-rw-r--r--net/ipv6/addrconf.c75
-rw-r--r--net/ipv6/datagram.c11
-rw-r--r--net/ipv6/exthdrs.c7
-rw-r--r--net/ipv6/icmp.c25
-rw-r--r--net/ipv6/inet6_connection_sock.c9
-rw-r--r--net/ipv6/inet6_hashtables.c1
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_output.c13
-rw-r--r--net/ipv6/ip6_tunnel.c37
-rw-r--r--net/ipv6/ipv6_sockglue.c9
-rw-r--r--net/ipv6/ndisc.c31
-rw-r--r--net/ipv6/raw.c8
-rw-r--r--net/ipv6/route.c35
-rw-r--r--net/ipv6/sit.c15
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c7
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/irda/irlan/irlan_eth.c2
-rw-r--r--net/iucv/Kconfig14
-rw-r--r--net/iucv/af_iucv.c868
-rw-r--r--net/iucv/iucv.c23
-rw-r--r--net/mac80211/agg-rx.c4
-rw-r--r--net/mac80211/cfg.c8
-rw-r--r--net/mac80211/ibss.c6
-rw-r--r--net/mac80211/iface.c6
-rw-r--r--net/mac80211/mesh_pathtbl.c4
-rw-r--r--net/mac80211/sta_info.c8
-rw-r--r--net/netfilter/core.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c1
-rw-r--r--net/netfilter/nf_conntrack_core.c12
-rw-r--r--net/netfilter/nf_conntrack_ecache.c8
-rw-r--r--net/netfilter/nf_conntrack_extend.c4
-rw-r--r--net/netfilter/nf_conntrack_helper.c6
-rw-r--r--net/netfilter/nf_conntrack_netlink.c6
-rw-r--r--net/netfilter/nf_log.c10
-rw-r--r--net/netfilter/nf_queue.c7
-rw-r--r--net/netfilter/nfnetlink.c6
-rw-r--r--net/netlabel/Makefile2
-rw-r--r--net/netlabel/netlabel_addrlist.c2
-rw-r--r--net/netlabel/netlabel_addrlist.h2
-rw-r--r--net/netlabel/netlabel_cipso_v4.c2
-rw-r--r--net/netlabel/netlabel_cipso_v4.h2
-rw-r--r--net/netlabel/netlabel_domainhash.c8
-rw-r--r--net/netlabel/netlabel_domainhash.h2
-rw-r--r--net/netlabel/netlabel_kapi.c22
-rw-r--r--net/netlabel/netlabel_mgmt.c2
-rw-r--r--net/netlabel/netlabel_mgmt.h2
-rw-r--r--net/netlabel/netlabel_unlabeled.c8
-rw-r--r--net/netlabel/netlabel_unlabeled.h2
-rw-r--r--net/netlabel/netlabel_user.c2
-rw-r--r--net/netlabel/netlabel_user.h2
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/phonet/af_phonet.c4
-rw-r--r--net/phonet/pn_dev.c6
-rw-r--r--net/phonet/socket.c6
-rw-r--r--net/sched/act_mirred.c3
-rw-r--r--net/sched/sch_prio.c2
-rw-r--r--net/sched/sch_sfq.c7
-rw-r--r--net/socket.c77
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c4
-rw-r--r--net/sunrpc/xprt.c1
-rw-r--r--net/xfrm/xfrm_algo.c4
-rw-r--r--net/xfrm/xfrm_user.c4
129 files changed, 1878 insertions, 627 deletions
diff --git a/net/802/garp.c b/net/802/garp.c
index 16102951d36a..070bf4403bf8 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -553,7 +553,7 @@ static void garp_release_port(struct net_device *dev)
553 if (rtnl_dereference(port->applicants[i])) 553 if (rtnl_dereference(port->applicants[i]))
554 return; 554 return;
555 } 555 }
556 rcu_assign_pointer(dev->garp_port, NULL); 556 RCU_INIT_POINTER(dev->garp_port, NULL);
557 kfree_rcu(port, rcu); 557 kfree_rcu(port, rcu);
558} 558}
559 559
@@ -605,7 +605,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
605 605
606 ASSERT_RTNL(); 606 ASSERT_RTNL();
607 607
608 rcu_assign_pointer(port->applicants[appl->type], NULL); 608 RCU_INIT_POINTER(port->applicants[appl->type], NULL);
609 609
610 /* Delete timer and generate a final TRANSMIT_PDU event to flush out 610 /* Delete timer and generate a final TRANSMIT_PDU event to flush out
611 * all pending messages before the applicant is gone. */ 611 * all pending messages before the applicant is gone. */
diff --git a/net/802/stp.c b/net/802/stp.c
index 978c30b1b36b..0e136ef1e4ba 100644
--- a/net/802/stp.c
+++ b/net/802/stp.c
@@ -88,9 +88,9 @@ void stp_proto_unregister(const struct stp_proto *proto)
88{ 88{
89 mutex_lock(&stp_proto_mutex); 89 mutex_lock(&stp_proto_mutex);
90 if (is_zero_ether_addr(proto->group_address)) 90 if (is_zero_ether_addr(proto->group_address))
91 rcu_assign_pointer(stp_proto, NULL); 91 RCU_INIT_POINTER(stp_proto, NULL);
92 else 92 else
93 rcu_assign_pointer(garp_protos[proto->group_address[5] - 93 RCU_INIT_POINTER(garp_protos[proto->group_address[5] -
94 GARP_ADDR_MIN], NULL); 94 GARP_ADDR_MIN], NULL);
95 synchronize_rcu(); 95 synchronize_rcu();
96 96
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 8970ba139d73..5471628d3ffe 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -133,7 +133,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
133 if (grp->nr_vlans == 0) { 133 if (grp->nr_vlans == 0) {
134 vlan_gvrp_uninit_applicant(real_dev); 134 vlan_gvrp_uninit_applicant(real_dev);
135 135
136 rcu_assign_pointer(real_dev->vlgrp, NULL); 136 RCU_INIT_POINTER(real_dev->vlgrp, NULL);
137 137
138 /* Free the group, after all cpu's are done. */ 138 /* Free the group, after all cpu's are done. */
139 call_rcu(&grp->rcu, vlan_rcu_free); 139 call_rcu(&grp->rcu, vlan_rcu_free);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 5f27f8e30254..f1f2f7bb6661 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -167,6 +167,8 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
167 if (unlikely(!skb)) 167 if (unlikely(!skb))
168 goto err_free; 168 goto err_free;
169 169
170 skb_reset_network_header(skb);
171 skb_reset_transport_header(skb);
170 return skb; 172 return skb;
171 173
172err_free: 174err_free:
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 9d40a071d038..eba705b92d6f 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -674,7 +674,6 @@ static const struct net_device_ops vlan_netdev_ops = {
674 .ndo_validate_addr = eth_validate_addr, 674 .ndo_validate_addr = eth_validate_addr,
675 .ndo_set_mac_address = vlan_dev_set_mac_address, 675 .ndo_set_mac_address = vlan_dev_set_mac_address,
676 .ndo_set_rx_mode = vlan_dev_set_rx_mode, 676 .ndo_set_rx_mode = vlan_dev_set_rx_mode,
677 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
678 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 677 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
679 .ndo_do_ioctl = vlan_dev_ioctl, 678 .ndo_do_ioctl = vlan_dev_ioctl,
680 .ndo_neigh_setup = vlan_dev_neigh_setup, 679 .ndo_neigh_setup = vlan_dev_neigh_setup,
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 2252c2085dac..52cfd0c3ea71 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -242,8 +242,6 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
242 if (brdev->payload == p_bridged) { 242 if (brdev->payload == p_bridged) {
243 skb_push(skb, 2); 243 skb_push(skb, 2);
244 memset(skb->data, 0, 2); 244 memset(skb->data, 0, 2);
245 } else { /* p_routed */
246 skb_pull(skb, ETH_HLEN);
247 } 245 }
248 } 246 }
249 skb_debug(skb); 247 skb_debug(skb);
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 215c9fad7cdf..f1964caa0f83 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -643,7 +643,7 @@ static const struct net_device_ops lec_netdev_ops = {
643 .ndo_start_xmit = lec_start_xmit, 643 .ndo_start_xmit = lec_start_xmit,
644 .ndo_change_mtu = lec_change_mtu, 644 .ndo_change_mtu = lec_change_mtu,
645 .ndo_tx_timeout = lec_tx_timeout, 645 .ndo_tx_timeout = lec_tx_timeout,
646 .ndo_set_multicast_list = lec_set_multicast_list, 646 .ndo_set_rx_mode = lec_set_multicast_list,
647}; 647};
648 648
649static const unsigned char lec_ctrl_magic[] = { 649static const unsigned char lec_ctrl_magic[] = {
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index d4f5dff7c955..bc4086480d97 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -217,7 +217,7 @@ static const struct net_device_ops bnep_netdev_ops = {
217 .ndo_stop = bnep_net_close, 217 .ndo_stop = bnep_net_close,
218 .ndo_start_xmit = bnep_net_xmit, 218 .ndo_start_xmit = bnep_net_xmit,
219 .ndo_validate_addr = eth_validate_addr, 219 .ndo_validate_addr = eth_validate_addr,
220 .ndo_set_multicast_list = bnep_net_set_mc_list, 220 .ndo_set_rx_mode = bnep_net_set_mc_list,
221 .ndo_set_mac_address = bnep_net_set_mac_addr, 221 .ndo_set_mac_address = bnep_net_set_mac_addr,
222 .ndo_tx_timeout = bnep_net_timeout, 222 .ndo_tx_timeout = bnep_net_timeout,
223 .ndo_change_mtu = eth_change_mtu, 223 .ndo_change_mtu = eth_change_mtu,
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 32b8f9f7f79e..ee68eee79e52 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -304,7 +304,7 @@ static const struct net_device_ops br_netdev_ops = {
304 .ndo_start_xmit = br_dev_xmit, 304 .ndo_start_xmit = br_dev_xmit,
305 .ndo_get_stats64 = br_get_stats64, 305 .ndo_get_stats64 = br_get_stats64,
306 .ndo_set_mac_address = br_set_mac_address, 306 .ndo_set_mac_address = br_set_mac_address,
307 .ndo_set_multicast_list = br_dev_set_multicast_list, 307 .ndo_set_rx_mode = br_dev_set_multicast_list,
308 .ndo_change_mtu = br_change_mtu, 308 .ndo_change_mtu = br_change_mtu,
309 .ndo_do_ioctl = br_dev_ioctl, 309 .ndo_do_ioctl = br_dev_ioctl,
310#ifdef CONFIG_NET_POLL_CONTROLLER 310#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 3176e2e13d9b..2cdf0070419f 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -417,6 +417,7 @@ put_back:
417int br_del_if(struct net_bridge *br, struct net_device *dev) 417int br_del_if(struct net_bridge *br, struct net_device *dev)
418{ 418{
419 struct net_bridge_port *p; 419 struct net_bridge_port *p;
420 bool changed_addr;
420 421
421 p = br_port_get_rtnl(dev); 422 p = br_port_get_rtnl(dev);
422 if (!p || p->br != br) 423 if (!p || p->br != br)
@@ -425,9 +426,12 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
425 del_nbp(p); 426 del_nbp(p);
426 427
427 spin_lock_bh(&br->lock); 428 spin_lock_bh(&br->lock);
428 br_stp_recalculate_bridge_id(br); 429 changed_addr = br_stp_recalculate_bridge_id(br);
429 spin_unlock_bh(&br->lock); 430 spin_unlock_bh(&br->lock);
430 431
432 if (changed_addr)
433 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
434
431 netdev_update_features(br->dev); 435 netdev_update_features(br->dev);
432 436
433 return 0; 437 return 0;
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 6545ee9591d1..a76b62135558 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -34,6 +34,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
34 struct net_device *dev = ptr; 34 struct net_device *dev = ptr;
35 struct net_bridge_port *p; 35 struct net_bridge_port *p;
36 struct net_bridge *br; 36 struct net_bridge *br;
37 bool changed_addr;
37 int err; 38 int err;
38 39
39 /* register of bridge completed, add sysfs entries */ 40 /* register of bridge completed, add sysfs entries */
@@ -57,8 +58,12 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
57 case NETDEV_CHANGEADDR: 58 case NETDEV_CHANGEADDR:
58 spin_lock_bh(&br->lock); 59 spin_lock_bh(&br->lock);
59 br_fdb_changeaddr(p, dev->dev_addr); 60 br_fdb_changeaddr(p, dev->dev_addr);
60 br_stp_recalculate_bridge_id(br); 61 changed_addr = br_stp_recalculate_bridge_id(br);
61 spin_unlock_bh(&br->lock); 62 spin_unlock_bh(&br->lock);
63
64 if (changed_addr)
65 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
66
62 break; 67 break;
63 68
64 case NETDEV_CHANGE: 69 case NETDEV_CHANGE:
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index 1bcaf36ad612..40d8258bf74f 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -87,14 +87,14 @@ static int __init ebtable_broute_init(void)
87 if (ret < 0) 87 if (ret < 0)
88 return ret; 88 return ret;
89 /* see br_input.c */ 89 /* see br_input.c */
90 rcu_assign_pointer(br_should_route_hook, 90 RCU_INIT_POINTER(br_should_route_hook,
91 (br_should_route_hook_t *)ebt_broute); 91 (br_should_route_hook_t *)ebt_broute);
92 return 0; 92 return 0;
93} 93}
94 94
95static void __exit ebtable_broute_fini(void) 95static void __exit ebtable_broute_fini(void)
96{ 96{
97 rcu_assign_pointer(br_should_route_hook, NULL); 97 RCU_INIT_POINTER(br_should_route_hook, NULL);
98 synchronize_net(); 98 synchronize_net();
99 unregister_pernet_subsys(&broute_net_ops); 99 unregister_pernet_subsys(&broute_net_ops);
100} 100}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 2b5ca1a0054d..5864cc491369 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1198,7 +1198,8 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table)
1198 1198
1199 if (table->check && table->check(newinfo, table->valid_hooks)) { 1199 if (table->check && table->check(newinfo, table->valid_hooks)) {
1200 BUGPRINT("The table doesn't like its own initial data, lol\n"); 1200 BUGPRINT("The table doesn't like its own initial data, lol\n");
1201 return ERR_PTR(-EINVAL); 1201 ret = -EINVAL;
1202 goto free_chainstack;
1202 } 1203 }
1203 1204
1204 table->private = newinfo; 1205 table->private = newinfo;
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index c23979e79dfa..b36f24a4c8e7 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -108,7 +108,7 @@ struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
108 int idx = phyid % DN_CACHE_SIZE; 108 int idx = phyid % DN_CACHE_SIZE;
109 109
110 spin_lock_bh(&muxl->transmit_lock); 110 spin_lock_bh(&muxl->transmit_lock);
111 rcu_assign_pointer(muxl->dn_cache[idx], NULL); 111 RCU_INIT_POINTER(muxl->dn_cache[idx], NULL);
112 dn = get_from_id(&muxl->frml_list, phyid); 112 dn = get_from_id(&muxl->frml_list, phyid);
113 if (dn == NULL) 113 if (dn == NULL)
114 goto out; 114 goto out;
@@ -164,7 +164,7 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
164 if (up == NULL) 164 if (up == NULL)
165 goto out; 165 goto out;
166 166
167 rcu_assign_pointer(muxl->up_cache[idx], NULL); 167 RCU_INIT_POINTER(muxl->up_cache[idx], NULL);
168 list_del_rcu(&up->node); 168 list_del_rcu(&up->node);
169out: 169out:
170 spin_unlock_bh(&muxl->receive_lock); 170 spin_unlock_bh(&muxl->receive_lock);
@@ -261,7 +261,7 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
261 261
262 idx = layer->id % UP_CACHE_SIZE; 262 idx = layer->id % UP_CACHE_SIZE;
263 spin_lock_bh(&muxl->receive_lock); 263 spin_lock_bh(&muxl->receive_lock);
264 rcu_assign_pointer(muxl->up_cache[idx], NULL); 264 RCU_INIT_POINTER(muxl->up_cache[idx], NULL);
265 list_del_rcu(&layer->node); 265 list_del_rcu(&layer->node);
266 spin_unlock_bh(&muxl->receive_lock); 266 spin_unlock_bh(&muxl->receive_lock);
267 } 267 }
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 8ce926d3b2cb..b9efa944cab9 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -719,7 +719,7 @@ int can_proto_register(const struct can_proto *cp)
719 proto); 719 proto);
720 err = -EBUSY; 720 err = -EBUSY;
721 } else 721 } else
722 rcu_assign_pointer(proto_tab[proto], cp); 722 RCU_INIT_POINTER(proto_tab[proto], cp);
723 723
724 mutex_unlock(&proto_tab_lock); 724 mutex_unlock(&proto_tab_lock);
725 725
@@ -740,7 +740,7 @@ void can_proto_unregister(const struct can_proto *cp)
740 740
741 mutex_lock(&proto_tab_lock); 741 mutex_lock(&proto_tab_lock);
742 BUG_ON(proto_tab[proto] != cp); 742 BUG_ON(proto_tab[proto] != cp);
743 rcu_assign_pointer(proto_tab[proto], NULL); 743 RCU_INIT_POINTER(proto_tab[proto], NULL);
744 mutex_unlock(&proto_tab_lock); 744 mutex_unlock(&proto_tab_lock);
745 745
746 synchronize_rcu(); 746 synchronize_rcu();
diff --git a/net/core/Makefile b/net/core/Makefile
index 8a04dd22cf77..0d357b1c4e57 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \ 5obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
6 gen_stats.o gen_estimator.o net_namespace.o 6 gen_stats.o gen_estimator.o net_namespace.o secure_seq.o
7 7
8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o 8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
9 9
diff --git a/net/core/dev.c b/net/core/dev.c
index 17d67b579beb..c2442b46646e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -133,6 +133,8 @@
133#include <linux/pci.h> 133#include <linux/pci.h>
134#include <linux/inetdevice.h> 134#include <linux/inetdevice.h>
135#include <linux/cpu_rmap.h> 135#include <linux/cpu_rmap.h>
136#include <linux/if_tunnel.h>
137#include <linux/if_pppox.h>
136 138
137#include "net-sysfs.h" 139#include "net-sysfs.h"
138 140
@@ -2519,24 +2521,29 @@ static inline void ____napi_schedule(struct softnet_data *sd,
2519 2521
2520/* 2522/*
2521 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses 2523 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2522 * and src/dst port numbers. Returns a non-zero hash number on success 2524 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
2523 * and 0 on failure. 2525 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
2526 * if hash is a canonical 4-tuple hash over transport ports.
2524 */ 2527 */
2525__u32 __skb_get_rxhash(struct sk_buff *skb) 2528void __skb_get_rxhash(struct sk_buff *skb)
2526{ 2529{
2527 int nhoff, hash = 0, poff; 2530 int nhoff, hash = 0, poff;
2528 const struct ipv6hdr *ip6; 2531 const struct ipv6hdr *ip6;
2529 const struct iphdr *ip; 2532 const struct iphdr *ip;
2533 const struct vlan_hdr *vlan;
2530 u8 ip_proto; 2534 u8 ip_proto;
2531 u32 addr1, addr2, ihl; 2535 u32 addr1, addr2;
2536 u16 proto;
2532 union { 2537 union {
2533 u32 v32; 2538 u32 v32;
2534 u16 v16[2]; 2539 u16 v16[2];
2535 } ports; 2540 } ports;
2536 2541
2537 nhoff = skb_network_offset(skb); 2542 nhoff = skb_network_offset(skb);
2543 proto = skb->protocol;
2538 2544
2539 switch (skb->protocol) { 2545again:
2546 switch (proto) {
2540 case __constant_htons(ETH_P_IP): 2547 case __constant_htons(ETH_P_IP):
2541 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff)) 2548 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
2542 goto done; 2549 goto done;
@@ -2548,7 +2555,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2548 ip_proto = ip->protocol; 2555 ip_proto = ip->protocol;
2549 addr1 = (__force u32) ip->saddr; 2556 addr1 = (__force u32) ip->saddr;
2550 addr2 = (__force u32) ip->daddr; 2557 addr2 = (__force u32) ip->daddr;
2551 ihl = ip->ihl; 2558 nhoff += ip->ihl * 4;
2552 break; 2559 break;
2553 case __constant_htons(ETH_P_IPV6): 2560 case __constant_htons(ETH_P_IPV6):
2554 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff)) 2561 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
@@ -2558,20 +2565,62 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2558 ip_proto = ip6->nexthdr; 2565 ip_proto = ip6->nexthdr;
2559 addr1 = (__force u32) ip6->saddr.s6_addr32[3]; 2566 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2560 addr2 = (__force u32) ip6->daddr.s6_addr32[3]; 2567 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
2561 ihl = (40 >> 2); 2568 nhoff += 40;
2562 break; 2569 break;
2570 case __constant_htons(ETH_P_8021Q):
2571 if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff))
2572 goto done;
2573 vlan = (const struct vlan_hdr *) (skb->data + nhoff);
2574 proto = vlan->h_vlan_encapsulated_proto;
2575 nhoff += sizeof(*vlan);
2576 goto again;
2577 case __constant_htons(ETH_P_PPP_SES):
2578 if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff))
2579 goto done;
2580 proto = *((__be16 *) (skb->data + nhoff +
2581 sizeof(struct pppoe_hdr)));
2582 nhoff += PPPOE_SES_HLEN;
2583 goto again;
2563 default: 2584 default:
2564 goto done; 2585 goto done;
2565 } 2586 }
2566 2587
2588 switch (ip_proto) {
2589 case IPPROTO_GRE:
2590 if (pskb_may_pull(skb, nhoff + 16)) {
2591 u8 *h = skb->data + nhoff;
2592 __be16 flags = *(__be16 *)h;
2593
2594 /*
2595 * Only look inside GRE if version zero and no
2596 * routing
2597 */
2598 if (!(flags & (GRE_VERSION|GRE_ROUTING))) {
2599 proto = *(__be16 *)(h + 2);
2600 nhoff += 4;
2601 if (flags & GRE_CSUM)
2602 nhoff += 4;
2603 if (flags & GRE_KEY)
2604 nhoff += 4;
2605 if (flags & GRE_SEQ)
2606 nhoff += 4;
2607 goto again;
2608 }
2609 }
2610 break;
2611 default:
2612 break;
2613 }
2614
2567 ports.v32 = 0; 2615 ports.v32 = 0;
2568 poff = proto_ports_offset(ip_proto); 2616 poff = proto_ports_offset(ip_proto);
2569 if (poff >= 0) { 2617 if (poff >= 0) {
2570 nhoff += ihl * 4 + poff; 2618 nhoff += poff;
2571 if (pskb_may_pull(skb, nhoff + 4)) { 2619 if (pskb_may_pull(skb, nhoff + 4)) {
2572 ports.v32 = * (__force u32 *) (skb->data + nhoff); 2620 ports.v32 = * (__force u32 *) (skb->data + nhoff);
2573 if (ports.v16[1] < ports.v16[0]) 2621 if (ports.v16[1] < ports.v16[0])
2574 swap(ports.v16[0], ports.v16[1]); 2622 swap(ports.v16[0], ports.v16[1]);
2623 skb->l4_rxhash = 1;
2575 } 2624 }
2576 } 2625 }
2577 2626
@@ -2584,7 +2633,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2584 hash = 1; 2633 hash = 1;
2585 2634
2586done: 2635done:
2587 return hash; 2636 skb->rxhash = hash;
2588} 2637}
2589EXPORT_SYMBOL(__skb_get_rxhash); 2638EXPORT_SYMBOL(__skb_get_rxhash);
2590 2639
@@ -2673,13 +2722,13 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2673 map = rcu_dereference(rxqueue->rps_map); 2722 map = rcu_dereference(rxqueue->rps_map);
2674 if (map) { 2723 if (map) {
2675 if (map->len == 1 && 2724 if (map->len == 1 &&
2676 !rcu_dereference_raw(rxqueue->rps_flow_table)) { 2725 !rcu_access_pointer(rxqueue->rps_flow_table)) {
2677 tcpu = map->cpus[0]; 2726 tcpu = map->cpus[0];
2678 if (cpu_online(tcpu)) 2727 if (cpu_online(tcpu))
2679 cpu = tcpu; 2728 cpu = tcpu;
2680 goto done; 2729 goto done;
2681 } 2730 }
2682 } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) { 2731 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2683 goto done; 2732 goto done;
2684 } 2733 }
2685 2734
@@ -3094,8 +3143,8 @@ void netdev_rx_handler_unregister(struct net_device *dev)
3094{ 3143{
3095 3144
3096 ASSERT_RTNL(); 3145 ASSERT_RTNL();
3097 rcu_assign_pointer(dev->rx_handler, NULL); 3146 RCU_INIT_POINTER(dev->rx_handler, NULL);
3098 rcu_assign_pointer(dev->rx_handler_data, NULL); 3147 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3099} 3148}
3100EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 3149EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3101 3150
@@ -4489,9 +4538,7 @@ void __dev_set_rx_mode(struct net_device *dev)
4489 if (!netif_device_present(dev)) 4538 if (!netif_device_present(dev))
4490 return; 4539 return;
4491 4540
4492 if (ops->ndo_set_rx_mode) 4541 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4493 ops->ndo_set_rx_mode(dev);
4494 else {
4495 /* Unicast addresses changes may only happen under the rtnl, 4542 /* Unicast addresses changes may only happen under the rtnl,
4496 * therefore calling __dev_set_promiscuity here is safe. 4543 * therefore calling __dev_set_promiscuity here is safe.
4497 */ 4544 */
@@ -4502,10 +4549,10 @@ void __dev_set_rx_mode(struct net_device *dev)
4502 __dev_set_promiscuity(dev, -1); 4549 __dev_set_promiscuity(dev, -1);
4503 dev->uc_promisc = false; 4550 dev->uc_promisc = false;
4504 } 4551 }
4505
4506 if (ops->ndo_set_multicast_list)
4507 ops->ndo_set_multicast_list(dev);
4508 } 4552 }
4553
4554 if (ops->ndo_set_rx_mode)
4555 ops->ndo_set_rx_mode(dev);
4509} 4556}
4510 4557
4511void dev_set_rx_mode(struct net_device *dev) 4558void dev_set_rx_mode(struct net_device *dev)
@@ -4855,7 +4902,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4855 return -EOPNOTSUPP; 4902 return -EOPNOTSUPP;
4856 4903
4857 case SIOCADDMULTI: 4904 case SIOCADDMULTI:
4858 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || 4905 if (!ops->ndo_set_rx_mode ||
4859 ifr->ifr_hwaddr.sa_family != AF_UNSPEC) 4906 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4860 return -EINVAL; 4907 return -EINVAL;
4861 if (!netif_device_present(dev)) 4908 if (!netif_device_present(dev))
@@ -4863,7 +4910,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4863 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); 4910 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
4864 4911
4865 case SIOCDELMULTI: 4912 case SIOCDELMULTI:
4866 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || 4913 if (!ops->ndo_set_rx_mode ||
4867 ifr->ifr_hwaddr.sa_family != AF_UNSPEC) 4914 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4868 return -EINVAL; 4915 return -EINVAL;
4869 if (!netif_device_present(dev)) 4916 if (!netif_device_present(dev))
@@ -5727,8 +5774,8 @@ void netdev_run_todo(void)
5727 5774
5728 /* paranoia */ 5775 /* paranoia */
5729 BUG_ON(netdev_refcnt_read(dev)); 5776 BUG_ON(netdev_refcnt_read(dev));
5730 WARN_ON(rcu_dereference_raw(dev->ip_ptr)); 5777 WARN_ON(rcu_access_pointer(dev->ip_ptr));
5731 WARN_ON(rcu_dereference_raw(dev->ip6_ptr)); 5778 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
5732 WARN_ON(dev->dn_ptr); 5779 WARN_ON(dev->dn_ptr);
5733 5780
5734 if (dev->destructor) 5781 if (dev->destructor)
@@ -5932,7 +5979,7 @@ void free_netdev(struct net_device *dev)
5932 kfree(dev->_rx); 5979 kfree(dev->_rx);
5933#endif 5980#endif
5934 5981
5935 kfree(rcu_dereference_raw(dev->ingress_queue)); 5982 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
5936 5983
5937 /* Flush device addresses */ 5984 /* Flush device addresses */
5938 dev_addr_flush(dev); 5985 dev_addr_flush(dev);
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index e2e66939ed00..283d1b863876 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -591,8 +591,8 @@ EXPORT_SYMBOL(dev_mc_del_global);
591 * addresses that have no users left. The source device must be 591 * addresses that have no users left. The source device must be
592 * locked by netif_tx_lock_bh. 592 * locked by netif_tx_lock_bh.
593 * 593 *
594 * This function is intended to be called from the dev->set_multicast_list 594 * This function is intended to be called from the ndo_set_rx_mode
595 * or dev->set_rx_mode function of layered software devices. 595 * function of layered software devices.
596 */ 596 */
597int dev_mc_sync(struct net_device *to, struct net_device *from) 597int dev_mc_sync(struct net_device *to, struct net_device *from)
598{ 598{
diff --git a/net/core/dst.c b/net/core/dst.c
index 14b33baf0733..d5e2c4c09107 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -171,7 +171,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
171 dst_init_metrics(dst, dst_default_metrics, true); 171 dst_init_metrics(dst, dst_default_metrics, true);
172 dst->expires = 0UL; 172 dst->expires = 0UL;
173 dst->path = dst; 173 dst->path = dst;
174 dst->_neighbour = NULL; 174 RCU_INIT_POINTER(dst->_neighbour, NULL);
175#ifdef CONFIG_XFRM 175#ifdef CONFIG_XFRM
176 dst->xfrm = NULL; 176 dst->xfrm = NULL;
177#endif 177#endif
@@ -229,11 +229,11 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
229 smp_rmb(); 229 smp_rmb();
230 230
231again: 231again:
232 neigh = dst->_neighbour; 232 neigh = rcu_dereference_protected(dst->_neighbour, 1);
233 child = dst->child; 233 child = dst->child;
234 234
235 if (neigh) { 235 if (neigh) {
236 dst->_neighbour = NULL; 236 RCU_INIT_POINTER(dst->_neighbour, NULL);
237 neigh_release(neigh); 237 neigh_release(neigh);
238 } 238 }
239 239
@@ -360,14 +360,19 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
360 if (!unregister) { 360 if (!unregister) {
361 dst->input = dst->output = dst_discard; 361 dst->input = dst->output = dst_discard;
362 } else { 362 } else {
363 struct neighbour *neigh;
364
363 dst->dev = dev_net(dst->dev)->loopback_dev; 365 dst->dev = dev_net(dst->dev)->loopback_dev;
364 dev_hold(dst->dev); 366 dev_hold(dst->dev);
365 dev_put(dev); 367 dev_put(dev);
366 if (dst->_neighbour && dst->_neighbour->dev == dev) { 368 rcu_read_lock();
367 dst->_neighbour->dev = dst->dev; 369 neigh = dst_get_neighbour(dst);
370 if (neigh && neigh->dev == dev) {
371 neigh->dev = dst->dev;
368 dev_hold(dst->dev); 372 dev_hold(dst->dev);
369 dev_put(dev); 373 dev_put(dev);
370 } 374 }
375 rcu_read_unlock();
371 } 376 }
372} 377}
373 378
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index e7ab0c0285b5..67c5c288cd80 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -487,7 +487,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
487 if (ops->nr_goto_rules > 0) { 487 if (ops->nr_goto_rules > 0) {
488 list_for_each_entry(tmp, &ops->rules_list, list) { 488 list_for_each_entry(tmp, &ops->rules_list, list) {
489 if (rtnl_dereference(tmp->ctarget) == rule) { 489 if (rtnl_dereference(tmp->ctarget) == rule) {
490 rcu_assign_pointer(tmp->ctarget, NULL); 490 RCU_INIT_POINTER(tmp->ctarget, NULL);
491 ops->unresolved_rules++; 491 ops->unresolved_rules++;
492 } 492 }
493 } 493 }
@@ -545,7 +545,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
545 frh->flags = rule->flags; 545 frh->flags = rule->flags;
546 546
547 if (rule->action == FR_ACT_GOTO && 547 if (rule->action == FR_ACT_GOTO &&
548 rcu_dereference_raw(rule->ctarget) == NULL) 548 rcu_access_pointer(rule->ctarget) == NULL)
549 frh->flags |= FIB_RULE_UNRESOLVED; 549 frh->flags |= FIB_RULE_UNRESOLVED;
550 550
551 if (rule->iifname[0]) { 551 if (rule->iifname[0]) {
diff --git a/net/core/filter.c b/net/core/filter.c
index 36f975fa87cb..8fcc2d776e09 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -645,7 +645,7 @@ int sk_detach_filter(struct sock *sk)
645 filter = rcu_dereference_protected(sk->sk_filter, 645 filter = rcu_dereference_protected(sk->sk_filter,
646 sock_owned_by_user(sk)); 646 sock_owned_by_user(sk));
647 if (filter) { 647 if (filter) {
648 rcu_assign_pointer(sk->sk_filter, NULL); 648 RCU_INIT_POINTER(sk->sk_filter, NULL);
649 sk_filter_uncharge(sk, filter); 649 sk_filter_uncharge(sk, filter);
650 ret = 0; 650 ret = 0;
651 } 651 }
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8fab9b0bb203..4002261f20d1 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -844,6 +844,19 @@ static void neigh_invalidate(struct neighbour *neigh)
844 skb_queue_purge(&neigh->arp_queue); 844 skb_queue_purge(&neigh->arp_queue);
845} 845}
846 846
847static void neigh_probe(struct neighbour *neigh)
848 __releases(neigh->lock)
849{
850 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
851 /* keep skb alive even if arp_queue overflows */
852 if (skb)
853 skb = skb_copy(skb, GFP_ATOMIC);
854 write_unlock(&neigh->lock);
855 neigh->ops->solicit(neigh, skb);
856 atomic_inc(&neigh->probes);
857 kfree_skb(skb);
858}
859
847/* Called when a timer expires for a neighbour entry. */ 860/* Called when a timer expires for a neighbour entry. */
848 861
849static void neigh_timer_handler(unsigned long arg) 862static void neigh_timer_handler(unsigned long arg)
@@ -920,14 +933,7 @@ static void neigh_timer_handler(unsigned long arg)
920 neigh_hold(neigh); 933 neigh_hold(neigh);
921 } 934 }
922 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { 935 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
923 struct sk_buff *skb = skb_peek(&neigh->arp_queue); 936 neigh_probe(neigh);
924 /* keep skb alive even if arp_queue overflows */
925 if (skb)
926 skb = skb_copy(skb, GFP_ATOMIC);
927 write_unlock(&neigh->lock);
928 neigh->ops->solicit(neigh, skb);
929 atomic_inc(&neigh->probes);
930 kfree_skb(skb);
931 } else { 937 } else {
932out: 938out:
933 write_unlock(&neigh->lock); 939 write_unlock(&neigh->lock);
@@ -942,7 +948,7 @@ out:
942int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) 948int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
943{ 949{
944 int rc; 950 int rc;
945 unsigned long now; 951 bool immediate_probe = false;
946 952
947 write_lock_bh(&neigh->lock); 953 write_lock_bh(&neigh->lock);
948 954
@@ -950,14 +956,16 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
950 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)) 956 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
951 goto out_unlock_bh; 957 goto out_unlock_bh;
952 958
953 now = jiffies;
954
955 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) { 959 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
956 if (neigh->parms->mcast_probes + neigh->parms->app_probes) { 960 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
961 unsigned long next, now = jiffies;
962
957 atomic_set(&neigh->probes, neigh->parms->ucast_probes); 963 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
958 neigh->nud_state = NUD_INCOMPLETE; 964 neigh->nud_state = NUD_INCOMPLETE;
959 neigh->updated = jiffies; 965 neigh->updated = now;
960 neigh_add_timer(neigh, now + 1); 966 next = now + max(neigh->parms->retrans_time, HZ/2);
967 neigh_add_timer(neigh, next);
968 immediate_probe = true;
961 } else { 969 } else {
962 neigh->nud_state = NUD_FAILED; 970 neigh->nud_state = NUD_FAILED;
963 neigh->updated = jiffies; 971 neigh->updated = jiffies;
@@ -989,7 +997,11 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
989 rc = 1; 997 rc = 1;
990 } 998 }
991out_unlock_bh: 999out_unlock_bh:
992 write_unlock_bh(&neigh->lock); 1000 if (immediate_probe)
1001 neigh_probe(neigh);
1002 else
1003 write_unlock(&neigh->lock);
1004 local_bh_enable();
993 return rc; 1005 return rc;
994} 1006}
995EXPORT_SYMBOL(__neigh_event_send); 1007EXPORT_SYMBOL(__neigh_event_send);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 1683e5db2f27..56e42ab7cbc6 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -712,13 +712,13 @@ static void rx_queue_release(struct kobject *kobj)
712 struct rps_dev_flow_table *flow_table; 712 struct rps_dev_flow_table *flow_table;
713 713
714 714
715 map = rcu_dereference_raw(queue->rps_map); 715 map = rcu_dereference_protected(queue->rps_map, 1);
716 if (map) { 716 if (map) {
717 RCU_INIT_POINTER(queue->rps_map, NULL); 717 RCU_INIT_POINTER(queue->rps_map, NULL);
718 kfree_rcu(map, rcu); 718 kfree_rcu(map, rcu);
719 } 719 }
720 720
721 flow_table = rcu_dereference_raw(queue->rps_flow_table); 721 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
722 if (flow_table) { 722 if (flow_table) {
723 RCU_INIT_POINTER(queue->rps_flow_table, NULL); 723 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
724 call_rcu(&flow_table->rcu, rps_dev_flow_table_release); 724 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
@@ -987,10 +987,10 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
987 } 987 }
988 988
989 if (nonempty) 989 if (nonempty)
990 rcu_assign_pointer(dev->xps_maps, new_dev_maps); 990 RCU_INIT_POINTER(dev->xps_maps, new_dev_maps);
991 else { 991 else {
992 kfree(new_dev_maps); 992 kfree(new_dev_maps);
993 rcu_assign_pointer(dev->xps_maps, NULL); 993 RCU_INIT_POINTER(dev->xps_maps, NULL);
994 } 994 }
995 995
996 if (dev_maps) 996 if (dev_maps)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index adf84dd8c7b5..d676a561d983 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -760,7 +760,7 @@ int __netpoll_setup(struct netpoll *np)
760 } 760 }
761 761
762 /* last thing to do is link it to the net device structure */ 762 /* last thing to do is link it to the net device structure */
763 rcu_assign_pointer(ndev->npinfo, npinfo); 763 RCU_INIT_POINTER(ndev->npinfo, npinfo);
764 764
765 return 0; 765 return 0;
766 766
@@ -901,7 +901,7 @@ void __netpoll_cleanup(struct netpoll *np)
901 if (ops->ndo_netpoll_cleanup) 901 if (ops->ndo_netpoll_cleanup)
902 ops->ndo_netpoll_cleanup(np->dev); 902 ops->ndo_netpoll_cleanup(np->dev);
903 903
904 rcu_assign_pointer(np->dev->npinfo, NULL); 904 RCU_INIT_POINTER(np->dev->npinfo, NULL);
905 905
906 /* avoid racing with NAPI reading npinfo */ 906 /* avoid racing with NAPI reading npinfo */
907 synchronize_rcu_bh(); 907 synchronize_rcu_bh();
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 99d9e953fe39..39f8dd6a2821 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1604,7 +1604,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
1604 dev_net_set(dev, net); 1604 dev_net_set(dev, net);
1605 dev->rtnl_link_ops = ops; 1605 dev->rtnl_link_ops = ops;
1606 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 1606 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
1607 dev->real_num_tx_queues = real_num_queues;
1608 1607
1609 if (tb[IFLA_MTU]) 1608 if (tb[IFLA_MTU])
1610 dev->mtu = nla_get_u32(tb[IFLA_MTU]); 1609 dev->mtu = nla_get_u32(tb[IFLA_MTU]);
diff --git a/net/core/scm.c b/net/core/scm.c
index 4c1ef026d695..811b53fb330e 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -192,7 +192,7 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
192 goto error; 192 goto error;
193 193
194 cred->uid = cred->euid = p->creds.uid; 194 cred->uid = cred->euid = p->creds.uid;
195 cred->gid = cred->egid = p->creds.uid; 195 cred->gid = cred->egid = p->creds.gid;
196 put_cred(p->cred); 196 put_cred(p->cred);
197 p->cred = cred; 197 p->cred = cred;
198 } 198 }
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
new file mode 100644
index 000000000000..45329d7c9dd9
--- /dev/null
+++ b/net/core/secure_seq.c
@@ -0,0 +1,184 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/cryptohash.h>
4#include <linux/module.h>
5#include <linux/cache.h>
6#include <linux/random.h>
7#include <linux/hrtimer.h>
8#include <linux/ktime.h>
9#include <linux/string.h>
10
11#include <net/secure_seq.h>
12
13static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
14
15static int __init net_secret_init(void)
16{
17 get_random_bytes(net_secret, sizeof(net_secret));
18 return 0;
19}
20late_initcall(net_secret_init);
21
22static u32 seq_scale(u32 seq)
23{
24 /*
25 * As close as possible to RFC 793, which
26 * suggests using a 250 kHz clock.
27 * Further reading shows this assumes 2 Mb/s networks.
28 * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
29 * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
30 * we also need to limit the resolution so that the u32 seq
31 * overlaps less than one time per MSL (2 minutes).
32 * Choosing a clock of 64 ns period is OK. (period of 274 s)
33 */
34 return seq + (ktime_to_ns(ktime_get_real()) >> 6);
35}
36
37#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
38__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
39 __be16 sport, __be16 dport)
40{
41 u32 secret[MD5_MESSAGE_BYTES / 4];
42 u32 hash[MD5_DIGEST_WORDS];
43 u32 i;
44
45 memcpy(hash, saddr, 16);
46 for (i = 0; i < 4; i++)
47 secret[i] = net_secret[i] + daddr[i];
48 secret[4] = net_secret[4] +
49 (((__force u16)sport << 16) + (__force u16)dport);
50 for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
51 secret[i] = net_secret[i];
52
53 md5_transform(hash, secret);
54
55 return seq_scale(hash[0]);
56}
57EXPORT_SYMBOL(secure_tcpv6_sequence_number);
58
59u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
60 __be16 dport)
61{
62 u32 secret[MD5_MESSAGE_BYTES / 4];
63 u32 hash[MD5_DIGEST_WORDS];
64 u32 i;
65
66 memcpy(hash, saddr, 16);
67 for (i = 0; i < 4; i++)
68 secret[i] = net_secret[i] + (__force u32) daddr[i];
69 secret[4] = net_secret[4] + (__force u32)dport;
70 for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
71 secret[i] = net_secret[i];
72
73 md5_transform(hash, secret);
74
75 return hash[0];
76}
77#endif
78
79#ifdef CONFIG_INET
80__u32 secure_ip_id(__be32 daddr)
81{
82 u32 hash[MD5_DIGEST_WORDS];
83
84 hash[0] = (__force __u32) daddr;
85 hash[1] = net_secret[13];
86 hash[2] = net_secret[14];
87 hash[3] = net_secret[15];
88
89 md5_transform(hash, net_secret);
90
91 return hash[0];
92}
93
94__u32 secure_ipv6_id(const __be32 daddr[4])
95{
96 __u32 hash[4];
97
98 memcpy(hash, daddr, 16);
99 md5_transform(hash, net_secret);
100
101 return hash[0];
102}
103
104__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
105 __be16 sport, __be16 dport)
106{
107 u32 hash[MD5_DIGEST_WORDS];
108
109 hash[0] = (__force u32)saddr;
110 hash[1] = (__force u32)daddr;
111 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
112 hash[3] = net_secret[15];
113
114 md5_transform(hash, net_secret);
115
116 return seq_scale(hash[0]);
117}
118
119u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
120{
121 u32 hash[MD5_DIGEST_WORDS];
122
123 hash[0] = (__force u32)saddr;
124 hash[1] = (__force u32)daddr;
125 hash[2] = (__force u32)dport ^ net_secret[14];
126 hash[3] = net_secret[15];
127
128 md5_transform(hash, net_secret);
129
130 return hash[0];
131}
132EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
133#endif
134
135#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
136u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
137 __be16 sport, __be16 dport)
138{
139 u32 hash[MD5_DIGEST_WORDS];
140 u64 seq;
141
142 hash[0] = (__force u32)saddr;
143 hash[1] = (__force u32)daddr;
144 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
145 hash[3] = net_secret[15];
146
147 md5_transform(hash, net_secret);
148
149 seq = hash[0] | (((u64)hash[1]) << 32);
150 seq += ktime_to_ns(ktime_get_real());
151 seq &= (1ull << 48) - 1;
152
153 return seq;
154}
155EXPORT_SYMBOL(secure_dccp_sequence_number);
156
157#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
158u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
159 __be16 sport, __be16 dport)
160{
161 u32 secret[MD5_MESSAGE_BYTES / 4];
162 u32 hash[MD5_DIGEST_WORDS];
163 u64 seq;
164 u32 i;
165
166 memcpy(hash, saddr, 16);
167 for (i = 0; i < 4; i++)
168 secret[i] = net_secret[i] + daddr[i];
169 secret[4] = net_secret[4] +
170 (((__force u16)sport << 16) + (__force u16)dport);
171 for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
172 secret[i] = net_secret[i];
173
174 md5_transform(hash, secret);
175
176 seq = hash[0] | (((u64)hash[1]) << 32);
177 seq += ktime_to_ns(ktime_get_real());
178 seq &= (1ull << 48) - 1;
179
180 return seq;
181}
182EXPORT_SYMBOL(secure_dccpv6_sequence_number);
183#endif
184#endif
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2beda824636e..e27334ec367a 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -529,6 +529,8 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
529 new->mac_header = old->mac_header; 529 new->mac_header = old->mac_header;
530 skb_dst_copy(new, old); 530 skb_dst_copy(new, old);
531 new->rxhash = old->rxhash; 531 new->rxhash = old->rxhash;
532 new->ooo_okay = old->ooo_okay;
533 new->l4_rxhash = old->l4_rxhash;
532#ifdef CONFIG_XFRM 534#ifdef CONFIG_XFRM
533 new->sp = secpath_get(old->sp); 535 new->sp = secpath_get(old->sp);
534#endif 536#endif
@@ -1369,8 +1371,21 @@ pull_pages:
1369} 1371}
1370EXPORT_SYMBOL(__pskb_pull_tail); 1372EXPORT_SYMBOL(__pskb_pull_tail);
1371 1373
1372/* Copy some data bits from skb to kernel buffer. */ 1374/**
1373 1375 * skb_copy_bits - copy bits from skb to kernel buffer
1376 * @skb: source skb
1377 * @offset: offset in source
1378 * @to: destination buffer
1379 * @len: number of bytes to copy
1380 *
1381 * Copy the specified number of bytes from the source skb to the
1382 * destination buffer.
1383 *
1384 * CAUTION ! :
1385 * If its prototype is ever changed,
1386 * check arch/{*}/net/{*}.S files,
1387 * since it is called from BPF assembly code.
1388 */
1374int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1389int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1375{ 1390{
1376 int start = skb_headlen(skb); 1391 int start = skb_headlen(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index bc745d00ea4d..9997026b44b2 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -387,7 +387,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
387 387
388 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 388 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
389 sk_tx_queue_clear(sk); 389 sk_tx_queue_clear(sk);
390 rcu_assign_pointer(sk->sk_dst_cache, NULL); 390 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
391 dst_release(dst); 391 dst_release(dst);
392 return NULL; 392 return NULL;
393 } 393 }
@@ -1158,7 +1158,7 @@ static void __sk_free(struct sock *sk)
1158 atomic_read(&sk->sk_wmem_alloc) == 0); 1158 atomic_read(&sk->sk_wmem_alloc) == 0);
1159 if (filter) { 1159 if (filter) {
1160 sk_filter_uncharge(sk, filter); 1160 sk_filter_uncharge(sk, filter);
1161 rcu_assign_pointer(sk->sk_filter, NULL); 1161 RCU_INIT_POINTER(sk->sk_filter, NULL);
1162 } 1162 }
1163 1163
1164 sock_disable_timestamp(sk, SOCK_TIMESTAMP); 1164 sock_disable_timestamp(sk, SOCK_TIMESTAMP);
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 0462040fc818..67164bb6ae4d 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -85,7 +85,6 @@ static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
85 85
86static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) 86static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
87{ 87{
88 struct dccp_sock *dp = dccp_sk(sk);
89 u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2); 88 u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2);
90 89
91 /* 90 /*
@@ -98,14 +97,33 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
98 DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio); 97 DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
99 val = max_ratio; 98 val = max_ratio;
100 } 99 }
101 if (val > DCCPF_ACK_RATIO_MAX) 100 dccp_feat_signal_nn_change(sk, DCCPF_ACK_RATIO,
102 val = DCCPF_ACK_RATIO_MAX; 101 min_t(u32, val, DCCPF_ACK_RATIO_MAX));
102}
103 103
104 if (val == dp->dccps_l_ack_ratio) 104static void ccid2_check_l_ack_ratio(struct sock *sk)
105 return; 105{
106 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
106 107
107 ccid2_pr_debug("changing local ack ratio to %u\n", val); 108 /*
108 dp->dccps_l_ack_ratio = val; 109 * After a loss, idle period, application limited period, or RTO we
110 * need to check that the ack ratio is still less than the congestion
111 * window. Otherwise, we will send an entire congestion window of
112 * packets and got no response because we haven't sent ack ratio
113 * packets yet.
114 * If the ack ratio does need to be reduced, we reduce it to half of
115 * the congestion window (or 1 if that's zero) instead of to the
116 * congestion window. This prevents problems if one ack is lost.
117 */
118 if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd)
119 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U);
120}
121
122static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
123{
124 dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW,
125 clamp_val(val, DCCPF_SEQ_WMIN,
126 DCCPF_SEQ_WMAX));
109} 127}
110 128
111static void ccid2_hc_tx_rto_expire(unsigned long data) 129static void ccid2_hc_tx_rto_expire(unsigned long data)
@@ -187,6 +205,8 @@ static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now)
187 } 205 }
188 hc->tx_cwnd_used = 0; 206 hc->tx_cwnd_used = 0;
189 hc->tx_cwnd_stamp = now; 207 hc->tx_cwnd_stamp = now;
208
209 ccid2_check_l_ack_ratio(sk);
190} 210}
191 211
192/* This borrows the code of tcp_cwnd_restart() */ 212/* This borrows the code of tcp_cwnd_restart() */
@@ -205,6 +225,8 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
205 225
206 hc->tx_cwnd_stamp = now; 226 hc->tx_cwnd_stamp = now;
207 hc->tx_cwnd_used = 0; 227 hc->tx_cwnd_used = 0;
228
229 ccid2_check_l_ack_ratio(sk);
208} 230}
209 231
210static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len) 232static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
@@ -405,17 +427,37 @@ static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
405 unsigned int *maxincr) 427 unsigned int *maxincr)
406{ 428{
407 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 429 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
408 430 struct dccp_sock *dp = dccp_sk(sk);
409 if (hc->tx_cwnd < hc->tx_ssthresh) { 431 int r_seq_used = hc->tx_cwnd / dp->dccps_l_ack_ratio;
410 if (*maxincr > 0 && ++hc->tx_packets_acked == 2) { 432
433 if (hc->tx_cwnd < dp->dccps_l_seq_win &&
434 r_seq_used < dp->dccps_r_seq_win) {
435 if (hc->tx_cwnd < hc->tx_ssthresh) {
436 if (*maxincr > 0 && ++hc->tx_packets_acked >= 2) {
437 hc->tx_cwnd += 1;
438 *maxincr -= 1;
439 hc->tx_packets_acked = 0;
440 }
441 } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
411 hc->tx_cwnd += 1; 442 hc->tx_cwnd += 1;
412 *maxincr -= 1;
413 hc->tx_packets_acked = 0; 443 hc->tx_packets_acked = 0;
414 } 444 }
415 } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
416 hc->tx_cwnd += 1;
417 hc->tx_packets_acked = 0;
418 } 445 }
446
447 /*
448 * Adjust the local sequence window and the ack ratio to allow about
449 * 5 times the number of packets in the network (RFC 4340 7.5.2)
450 */
451 if (r_seq_used * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_r_seq_win)
452 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio * 2);
453 else if (r_seq_used * CCID2_WIN_CHANGE_FACTOR < dp->dccps_r_seq_win/2)
454 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio / 2 ? : 1U);
455
456 if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_l_seq_win)
457 ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win * 2);
458 else if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR < dp->dccps_l_seq_win/2)
459 ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win / 2);
460
419 /* 461 /*
420 * FIXME: RTT is sampled several times per acknowledgment (for each 462 * FIXME: RTT is sampled several times per acknowledgment (for each
421 * entry in the Ack Vector), instead of once per Ack (as in TCP SACK). 463 * entry in the Ack Vector), instead of once per Ack (as in TCP SACK).
@@ -441,9 +483,7 @@ static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
441 hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U; 483 hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
442 hc->tx_ssthresh = max(hc->tx_cwnd, 2U); 484 hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
443 485
444 /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */ 486 ccid2_check_l_ack_ratio(sk);
445 if (dccp_sk(sk)->dccps_l_ack_ratio > hc->tx_cwnd)
446 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd);
447} 487}
448 488
449static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type, 489static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
@@ -494,8 +534,16 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
494 if (hc->tx_rpdupack >= NUMDUPACK) { 534 if (hc->tx_rpdupack >= NUMDUPACK) {
495 hc->tx_rpdupack = -1; /* XXX lame */ 535 hc->tx_rpdupack = -1; /* XXX lame */
496 hc->tx_rpseq = 0; 536 hc->tx_rpseq = 0;
497 537#ifdef __CCID2_COPES_GRACEFULLY_WITH_ACK_CONGESTION_CONTROL__
538 /*
539 * FIXME: Ack Congestion Control is broken; in
540 * the current state instabilities occurred with
541 * Ack Ratios greater than 1; causing hang-ups
542 * and long RTO timeouts. This needs to be fixed
543 * before opening up dynamic changes. -- gerrit
544 */
498 ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio); 545 ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
546#endif
499 } 547 }
500 } 548 }
501 } 549 }
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index f585d330e1e5..18c97543e522 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -43,6 +43,12 @@ struct ccid2_seq {
43#define CCID2_SEQBUF_LEN 1024 43#define CCID2_SEQBUF_LEN 1024
44#define CCID2_SEQBUF_MAX 128 44#define CCID2_SEQBUF_MAX 128
45 45
46/*
47 * Multiple of congestion window to keep the sequence window at
48 * (RFC 4340 7.5.2)
49 */
50#define CCID2_WIN_CHANGE_FACTOR 5
51
46/** 52/**
47 * struct ccid2_hc_tx_sock - CCID2 TX half connection 53 * struct ccid2_hc_tx_sock - CCID2 TX half connection
48 * @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5 54 * @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 5fdb07229017..583490aaf56f 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -474,6 +474,7 @@ static inline int dccp_ack_pending(const struct sock *sk)
474 return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk); 474 return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
475} 475}
476 476
477extern int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val);
477extern int dccp_feat_finalise_settings(struct dccp_sock *dp); 478extern int dccp_feat_finalise_settings(struct dccp_sock *dp);
478extern int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq); 479extern int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
479extern int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*, 480extern int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*,
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 568def952722..23cea0ee3101 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -12,6 +12,7 @@
12 * ----------- 12 * -----------
13 * o Feature negotiation is coordinated with connection setup (as in TCP), wild 13 * o Feature negotiation is coordinated with connection setup (as in TCP), wild
14 * changes of parameters of an established connection are not supported. 14 * changes of parameters of an established connection are not supported.
15 * o Changing non-negotiable (NN) values is supported in state OPEN/PARTOPEN.
15 * o All currently known SP features have 1-byte quantities. If in the future 16 * o All currently known SP features have 1-byte quantities. If in the future
16 * extensions of RFCs 4340..42 define features with item lengths larger than 17 * extensions of RFCs 4340..42 define features with item lengths larger than
17 * one byte, a feature-specific extension of the code will be required. 18 * one byte, a feature-specific extension of the code will be required.
@@ -343,6 +344,20 @@ static int __dccp_feat_activate(struct sock *sk, const int idx,
343 return dccp_feat_table[idx].activation_hdlr(sk, val, rx); 344 return dccp_feat_table[idx].activation_hdlr(sk, val, rx);
344} 345}
345 346
347/**
348 * dccp_feat_activate - Activate feature value on socket
349 * @sk: fully connected DCCP socket (after handshake is complete)
350 * @feat_num: feature to activate, one of %dccp_feature_numbers
351 * @local: whether local (1) or remote (0) @feat_num is meant
352 * @fval: the value (SP or NN) to activate, or NULL to use the default value
353 * For general use this function is preferable over __dccp_feat_activate().
354 */
355static int dccp_feat_activate(struct sock *sk, u8 feat_num, bool local,
356 dccp_feat_val const *fval)
357{
358 return __dccp_feat_activate(sk, dccp_feat_index(feat_num), local, fval);
359}
360
346/* Test for "Req'd" feature (RFC 4340, 6.4) */ 361/* Test for "Req'd" feature (RFC 4340, 6.4) */
347static inline int dccp_feat_must_be_understood(u8 feat_num) 362static inline int dccp_feat_must_be_understood(u8 feat_num)
348{ 363{
@@ -650,11 +665,22 @@ int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq,
650 return -1; 665 return -1;
651 if (pos->needs_mandatory && dccp_insert_option_mandatory(skb)) 666 if (pos->needs_mandatory && dccp_insert_option_mandatory(skb))
652 return -1; 667 return -1;
653 /* 668
654 * Enter CHANGING after transmitting the Change option (6.6.2). 669 if (skb->sk->sk_state == DCCP_OPEN &&
655 */ 670 (opt == DCCPO_CONFIRM_R || opt == DCCPO_CONFIRM_L)) {
656 if (pos->state == FEAT_INITIALISING) 671 /*
657 pos->state = FEAT_CHANGING; 672 * Confirms don't get retransmitted (6.6.3) once the
673 * connection is in state OPEN
674 */
675 dccp_feat_list_pop(pos);
676 } else {
677 /*
678 * Enter CHANGING after transmitting the Change
679 * option (6.6.2).
680 */
681 if (pos->state == FEAT_INITIALISING)
682 pos->state = FEAT_CHANGING;
683 }
658 } 684 }
659 return 0; 685 return 0;
660} 686}
@@ -730,6 +756,70 @@ int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
730 0, list, len); 756 0, list, len);
731} 757}
732 758
759/**
760 * dccp_feat_nn_get - Query current/pending value of NN feature
761 * @sk: DCCP socket of an established connection
762 * @feat: NN feature number from %dccp_feature_numbers
763 * For a known NN feature, returns value currently being negotiated, or
764 * current (confirmed) value if no negotiation is going on.
765 */
766u64 dccp_feat_nn_get(struct sock *sk, u8 feat)
767{
768 if (dccp_feat_type(feat) == FEAT_NN) {
769 struct dccp_sock *dp = dccp_sk(sk);
770 struct dccp_feat_entry *entry;
771
772 entry = dccp_feat_list_lookup(&dp->dccps_featneg, feat, 1);
773 if (entry != NULL)
774 return entry->val.nn;
775
776 switch (feat) {
777 case DCCPF_ACK_RATIO:
778 return dp->dccps_l_ack_ratio;
779 case DCCPF_SEQUENCE_WINDOW:
780 return dp->dccps_l_seq_win;
781 }
782 }
783 DCCP_BUG("attempt to look up unsupported feature %u", feat);
784 return 0;
785}
786EXPORT_SYMBOL_GPL(dccp_feat_nn_get);
787
788/**
789 * dccp_feat_signal_nn_change - Update NN values for an established connection
790 * @sk: DCCP socket of an established connection
791 * @feat: NN feature number from %dccp_feature_numbers
792 * @nn_val: the new value to use
793 * This function is used to communicate NN updates out-of-band.
794 */
795int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val)
796{
797 struct list_head *fn = &dccp_sk(sk)->dccps_featneg;
798 dccp_feat_val fval = { .nn = nn_val };
799 struct dccp_feat_entry *entry;
800
801 if (sk->sk_state != DCCP_OPEN && sk->sk_state != DCCP_PARTOPEN)
802 return 0;
803
804 if (dccp_feat_type(feat) != FEAT_NN ||
805 !dccp_feat_is_valid_nn_val(feat, nn_val))
806 return -EINVAL;
807
808 if (nn_val == dccp_feat_nn_get(sk, feat))
809 return 0; /* already set or negotiation under way */
810
811 entry = dccp_feat_list_lookup(fn, feat, 1);
812 if (entry != NULL) {
813 dccp_pr_debug("Clobbering existing NN entry %llu -> %llu\n",
814 (unsigned long long)entry->val.nn,
815 (unsigned long long)nn_val);
816 dccp_feat_list_pop(entry);
817 }
818
819 inet_csk_schedule_ack(sk);
820 return dccp_feat_push_change(fn, feat, 1, 0, &fval);
821}
822EXPORT_SYMBOL_GPL(dccp_feat_signal_nn_change);
733 823
734/* 824/*
735 * Tracking features whose value depend on the choice of CCID 825 * Tracking features whose value depend on the choice of CCID
@@ -1187,6 +1277,100 @@ confirmation_failed:
1187} 1277}
1188 1278
1189/** 1279/**
1280 * dccp_feat_handle_nn_established - Fast-path reception of NN options
1281 * @sk: socket of an established DCCP connection
1282 * @mandatory: whether @opt was preceded by a Mandatory option
1283 * @opt: %DCCPO_CHANGE_L | %DCCPO_CONFIRM_R (NN only)
1284 * @feat: NN number, one of %dccp_feature_numbers
1285 * @val: NN value
1286 * @len: length of @val in bytes
1287 * This function combines the functionality of change_recv/confirm_recv, with
1288 * the following differences (reset codes are the same):
1289 * - cleanup after receiving the Confirm;
1290 * - values are directly activated after successful parsing;
1291 * - deliberately restricted to NN features.
1292 * The restriction to NN features is essential since SP features can have non-
1293 * predictable outcomes (depending on the remote configuration), and are inter-
1294 * dependent (CCIDs for instance cause further dependencies).
1295 */
1296static u8 dccp_feat_handle_nn_established(struct sock *sk, u8 mandatory, u8 opt,
1297 u8 feat, u8 *val, u8 len)
1298{
1299 struct list_head *fn = &dccp_sk(sk)->dccps_featneg;
1300 const bool local = (opt == DCCPO_CONFIRM_R);
1301 struct dccp_feat_entry *entry;
1302 u8 type = dccp_feat_type(feat);
1303 dccp_feat_val fval;
1304
1305 dccp_feat_print_opt(opt, feat, val, len, mandatory);
1306
1307 /* Ignore non-mandatory unknown and non-NN features */
1308 if (type == FEAT_UNKNOWN) {
1309 if (local && !mandatory)
1310 return 0;
1311 goto fast_path_unknown;
1312 } else if (type != FEAT_NN) {
1313 return 0;
1314 }
1315
1316 /*
1317 * We don't accept empty Confirms, since in fast-path feature
1318 * negotiation the values are enabled immediately after sending
1319 * the Change option.
1320 * Empty Changes on the other hand are invalid (RFC 4340, 6.1).
1321 */
1322 if (len == 0 || len > sizeof(fval.nn))
1323 goto fast_path_unknown;
1324
1325 if (opt == DCCPO_CHANGE_L) {
1326 fval.nn = dccp_decode_value_var(val, len);
1327 if (!dccp_feat_is_valid_nn_val(feat, fval.nn))
1328 goto fast_path_unknown;
1329
1330 if (dccp_feat_push_confirm(fn, feat, local, &fval) ||
1331 dccp_feat_activate(sk, feat, local, &fval))
1332 return DCCP_RESET_CODE_TOO_BUSY;
1333
1334 /* set the `Ack Pending' flag to piggyback a Confirm */
1335 inet_csk_schedule_ack(sk);
1336
1337 } else if (opt == DCCPO_CONFIRM_R) {
1338 entry = dccp_feat_list_lookup(fn, feat, local);
1339 if (entry == NULL || entry->state != FEAT_CHANGING)
1340 return 0;
1341
1342 fval.nn = dccp_decode_value_var(val, len);
1343 /*
1344 * Just ignore a value that doesn't match our current value.
1345 * If the option changes twice within two RTTs, then at least
1346 * one CONFIRM will be received for the old value after a
1347 * new CHANGE was sent.
1348 */
1349 if (fval.nn != entry->val.nn)
1350 return 0;
1351
1352 /* Only activate after receiving the Confirm option (6.6.1). */
1353 dccp_feat_activate(sk, feat, local, &fval);
1354
1355 /* It has been confirmed - so remove the entry */
1356 dccp_feat_list_pop(entry);
1357
1358 } else {
1359 DCCP_WARN("Received illegal option %u\n", opt);
1360 goto fast_path_failed;
1361 }
1362 return 0;
1363
1364fast_path_unknown:
1365 if (!mandatory)
1366 return dccp_push_empty_confirm(fn, feat, local);
1367
1368fast_path_failed:
1369 return mandatory ? DCCP_RESET_CODE_MANDATORY_ERROR
1370 : DCCP_RESET_CODE_OPTION_ERROR;
1371}
1372
1373/**
1190 * dccp_feat_parse_options - Process Feature-Negotiation Options 1374 * dccp_feat_parse_options - Process Feature-Negotiation Options
1191 * @sk: for general use and used by the client during connection setup 1375 * @sk: for general use and used by the client during connection setup
1192 * @dreq: used by the server during connection setup 1376 * @dreq: used by the server during connection setup
@@ -1221,6 +1405,14 @@ int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
1221 return dccp_feat_confirm_recv(fn, mandatory, opt, feat, 1405 return dccp_feat_confirm_recv(fn, mandatory, opt, feat,
1222 val, len, server); 1406 val, len, server);
1223 } 1407 }
1408 break;
1409 /*
1410 * Support for exchanging NN options on an established connection.
1411 */
1412 case DCCP_OPEN:
1413 case DCCP_PARTOPEN:
1414 return dccp_feat_handle_nn_established(sk, mandatory, opt, feat,
1415 val, len);
1224 } 1416 }
1225 return 0; /* ignore FN options in all other states */ 1417 return 0; /* ignore FN options in all other states */
1226} 1418}
diff --git a/net/dccp/feat.h b/net/dccp/feat.h
index e56a4e5e634e..90b957d34d26 100644
--- a/net/dccp/feat.h
+++ b/net/dccp/feat.h
@@ -129,6 +129,7 @@ extern int dccp_feat_clone_list(struct list_head const *, struct list_head *);
129 129
130extern void dccp_encode_value_var(const u64 value, u8 *to, const u8 len); 130extern void dccp_encode_value_var(const u64 value, u8 *to, const u8 len);
131extern u64 dccp_decode_value_var(const u8 *bf, const u8 len); 131extern u64 dccp_decode_value_var(const u8 *bf, const u8 len);
132extern u64 dccp_feat_nn_get(struct sock *sk, u8 feat);
132 133
133extern int dccp_insert_option_mandatory(struct sk_buff *skb); 134extern int dccp_insert_option_mandatory(struct sk_buff *skb);
134extern int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat, 135extern int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat,
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 8c36adfd1919..332639b56f4d 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -26,6 +26,7 @@
26#include <net/timewait_sock.h> 26#include <net/timewait_sock.h>
27#include <net/tcp_states.h> 27#include <net/tcp_states.h>
28#include <net/xfrm.h> 28#include <net/xfrm.h>
29#include <net/secure_seq.h>
29 30
30#include "ackvec.h" 31#include "ackvec.h"
31#include "ccid.h" 32#include "ccid.h"
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 8dc4348774a5..b74f76117dcf 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -29,6 +29,7 @@
29#include <net/transp_v6.h> 29#include <net/transp_v6.h>
30#include <net/ip6_checksum.h> 30#include <net/ip6_checksum.h>
31#include <net/xfrm.h> 31#include <net/xfrm.h>
32#include <net/secure_seq.h>
32 33
33#include "dccp.h" 34#include "dccp.h"
34#include "ipv6.h" 35#include "ipv6.h"
@@ -69,13 +70,7 @@ static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
69 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr); 70 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
70} 71}
71 72
72static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, 73static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
73 __be16 sport, __be16 dport )
74{
75 return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
76}
77
78static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
79{ 74{
80 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, 75 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
81 ipv6_hdr(skb)->saddr.s6_addr32, 76 ipv6_hdr(skb)->saddr.s6_addr32,
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 152975d942d9..e742f90a6858 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -184,7 +184,6 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
184 dp->dccps_rate_last = jiffies; 184 dp->dccps_rate_last = jiffies;
185 dp->dccps_role = DCCP_ROLE_UNDEFINED; 185 dp->dccps_role = DCCP_ROLE_UNDEFINED;
186 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT; 186 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
187 dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1;
188 dp->dccps_tx_qlen = sysctl_dccp_tx_qlen; 187 dp->dccps_tx_qlen = sysctl_dccp_tx_qlen;
189 188
190 dccp_init_xmit_timers(sk); 189 dccp_init_xmit_timers(sk);
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index ba4faceec405..2ab16e12520c 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -388,7 +388,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
388 } 388 }
389 389
390 ifa->ifa_next = dn_db->ifa_list; 390 ifa->ifa_next = dn_db->ifa_list;
391 rcu_assign_pointer(dn_db->ifa_list, ifa); 391 RCU_INIT_POINTER(dn_db->ifa_list, ifa);
392 392
393 dn_ifaddr_notify(RTM_NEWADDR, ifa); 393 dn_ifaddr_notify(RTM_NEWADDR, ifa);
394 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa); 394 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
@@ -1093,7 +1093,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
1093 1093
1094 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms)); 1094 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
1095 1095
1096 rcu_assign_pointer(dev->dn_ptr, dn_db); 1096 RCU_INIT_POINTER(dev->dn_ptr, dn_db);
1097 dn_db->dev = dev; 1097 dn_db->dev = dev;
1098 init_timer(&dn_db->timer); 1098 init_timer(&dn_db->timer);
1099 1099
@@ -1101,7 +1101,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
1101 1101
1102 dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table); 1102 dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
1103 if (!dn_db->neigh_parms) { 1103 if (!dn_db->neigh_parms) {
1104 rcu_assign_pointer(dev->dn_ptr, NULL); 1104 RCU_INIT_POINTER(dev->dn_ptr, NULL);
1105 kfree(dn_db); 1105 kfree(dn_db);
1106 return NULL; 1106 return NULL;
1107 } 1107 }
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 0a47b6c37038..56cf9b8e1c7c 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -301,7 +301,6 @@ static const struct net_device_ops dsa_netdev_ops = {
301 .ndo_start_xmit = dsa_xmit, 301 .ndo_start_xmit = dsa_xmit,
302 .ndo_change_rx_flags = dsa_slave_change_rx_flags, 302 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
303 .ndo_set_rx_mode = dsa_slave_set_rx_mode, 303 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
304 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
305 .ndo_set_mac_address = dsa_slave_set_mac_address, 304 .ndo_set_mac_address = dsa_slave_set_mac_address,
306 .ndo_do_ioctl = dsa_slave_ioctl, 305 .ndo_do_ioctl = dsa_slave_ioctl,
307}; 306};
@@ -314,7 +313,6 @@ static const struct net_device_ops edsa_netdev_ops = {
314 .ndo_start_xmit = edsa_xmit, 313 .ndo_start_xmit = edsa_xmit,
315 .ndo_change_rx_flags = dsa_slave_change_rx_flags, 314 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
316 .ndo_set_rx_mode = dsa_slave_set_rx_mode, 315 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
317 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
318 .ndo_set_mac_address = dsa_slave_set_mac_address, 316 .ndo_set_mac_address = dsa_slave_set_mac_address,
319 .ndo_do_ioctl = dsa_slave_ioctl, 317 .ndo_do_ioctl = dsa_slave_ioctl,
320}; 318};
@@ -327,7 +325,6 @@ static const struct net_device_ops trailer_netdev_ops = {
327 .ndo_start_xmit = trailer_xmit, 325 .ndo_start_xmit = trailer_xmit,
328 .ndo_change_rx_flags = dsa_slave_change_rx_flags, 326 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
329 .ndo_set_rx_mode = dsa_slave_set_rx_mode, 327 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
330 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
331 .ndo_set_mac_address = dsa_slave_set_mac_address, 328 .ndo_set_mac_address = dsa_slave_set_mac_address,
332 .ndo_do_ioctl = dsa_slave_ioctl, 329 .ndo_do_ioctl = dsa_slave_ioctl,
333}; 330};
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index bc19bd06dd00..c6b5092f29a1 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -258,7 +258,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
258 ip_mc_up(in_dev); 258 ip_mc_up(in_dev);
259 259
260 /* we can receive as soon as ip_ptr is set -- do this last */ 260 /* we can receive as soon as ip_ptr is set -- do this last */
261 rcu_assign_pointer(dev->ip_ptr, in_dev); 261 RCU_INIT_POINTER(dev->ip_ptr, in_dev);
262out: 262out:
263 return in_dev; 263 return in_dev;
264out_kfree: 264out_kfree:
@@ -291,7 +291,7 @@ static void inetdev_destroy(struct in_device *in_dev)
291 inet_free_ifa(ifa); 291 inet_free_ifa(ifa);
292 } 292 }
293 293
294 rcu_assign_pointer(dev->ip_ptr, NULL); 294 RCU_INIT_POINTER(dev->ip_ptr, NULL);
295 295
296 devinet_sysctl_unregister(in_dev); 296 devinet_sysctl_unregister(in_dev);
297 neigh_parms_release(&arp_tbl, in_dev->arp_parms); 297 neigh_parms_release(&arp_tbl, in_dev->arp_parms);
@@ -1175,7 +1175,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1175 switch (event) { 1175 switch (event) {
1176 case NETDEV_REGISTER: 1176 case NETDEV_REGISTER:
1177 printk(KERN_DEBUG "inetdev_event: bug\n"); 1177 printk(KERN_DEBUG "inetdev_event: bug\n");
1178 rcu_assign_pointer(dev->ip_ptr, NULL); 1178 RCU_INIT_POINTER(dev->ip_ptr, NULL);
1179 break; 1179 break;
1180 case NETDEV_UP: 1180 case NETDEV_UP:
1181 if (!inetdev_valid_mtu(dev->mtu)) 1181 if (!inetdev_valid_mtu(dev->mtu))
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index de9e2978476f..89d6f71a6a99 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -204,7 +204,7 @@ static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node)
204 return (struct tnode *)(parent & ~NODE_TYPE_MASK); 204 return (struct tnode *)(parent & ~NODE_TYPE_MASK);
205} 205}
206 206
207/* Same as rcu_assign_pointer 207/* Same as RCU_INIT_POINTER
208 * but that macro() assumes that value is a pointer. 208 * but that macro() assumes that value is a pointer.
209 */ 209 */
210static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr) 210static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
@@ -528,7 +528,7 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *
528 if (n) 528 if (n)
529 node_set_parent(n, tn); 529 node_set_parent(n, tn);
530 530
531 rcu_assign_pointer(tn->child[i], n); 531 RCU_INIT_POINTER(tn->child[i], n);
532} 532}
533 533
534#define MAX_WORK 10 534#define MAX_WORK 10
@@ -1014,7 +1014,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
1014 1014
1015 tp = node_parent((struct rt_trie_node *) tn); 1015 tp = node_parent((struct rt_trie_node *) tn);
1016 if (!tp) 1016 if (!tp)
1017 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); 1017 RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
1018 1018
1019 tnode_free_flush(); 1019 tnode_free_flush();
1020 if (!tp) 1020 if (!tp)
@@ -1026,7 +1026,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
1026 if (IS_TNODE(tn)) 1026 if (IS_TNODE(tn))
1027 tn = (struct tnode *)resize(t, (struct tnode *)tn); 1027 tn = (struct tnode *)resize(t, (struct tnode *)tn);
1028 1028
1029 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); 1029 RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
1030 tnode_free_flush(); 1030 tnode_free_flush();
1031} 1031}
1032 1032
@@ -1163,7 +1163,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1163 put_child(t, (struct tnode *)tp, cindex, 1163 put_child(t, (struct tnode *)tp, cindex,
1164 (struct rt_trie_node *)tn); 1164 (struct rt_trie_node *)tn);
1165 } else { 1165 } else {
1166 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); 1166 RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
1167 tp = tn; 1167 tp = tn;
1168 } 1168 }
1169 } 1169 }
@@ -1621,7 +1621,7 @@ static void trie_leaf_remove(struct trie *t, struct leaf *l)
1621 put_child(t, (struct tnode *)tp, cindex, NULL); 1621 put_child(t, (struct tnode *)tp, cindex, NULL);
1622 trie_rebalance(t, tp); 1622 trie_rebalance(t, tp);
1623 } else 1623 } else
1624 rcu_assign_pointer(t->trie, NULL); 1624 RCU_INIT_POINTER(t->trie, NULL);
1625 1625
1626 free_leaf(l); 1626 free_leaf(l);
1627} 1627}
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index dbfc21de3479..8cb1ebb7cd74 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -34,7 +34,7 @@ int gre_add_protocol(const struct gre_protocol *proto, u8 version)
34 if (gre_proto[version]) 34 if (gre_proto[version])
35 goto err_out_unlock; 35 goto err_out_unlock;
36 36
37 rcu_assign_pointer(gre_proto[version], proto); 37 RCU_INIT_POINTER(gre_proto[version], proto);
38 spin_unlock(&gre_proto_lock); 38 spin_unlock(&gre_proto_lock);
39 return 0; 39 return 0;
40 40
@@ -54,7 +54,7 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
54 if (rcu_dereference_protected(gre_proto[version], 54 if (rcu_dereference_protected(gre_proto[version],
55 lockdep_is_held(&gre_proto_lock)) != proto) 55 lockdep_is_held(&gre_proto_lock)) != proto)
56 goto err_out_unlock; 56 goto err_out_unlock;
57 rcu_assign_pointer(gre_proto[version], NULL); 57 RCU_INIT_POINTER(gre_proto[version], NULL);
58 spin_unlock(&gre_proto_lock); 58 spin_unlock(&gre_proto_lock);
59 synchronize_rcu(); 59 synchronize_rcu();
60 return 0; 60 return 0;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index f1d27f6c9351..ce57bdee14cb 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1009,7 +1009,7 @@ static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
1009 1009
1010 /* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG. 1010 /* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG.
1011 We will get multicast token leakage, when IFF_MULTICAST 1011 We will get multicast token leakage, when IFF_MULTICAST
1012 is changed. This check should be done in dev->set_multicast_list 1012 is changed. This check should be done in ndo_set_rx_mode
1013 routine. Something sort of: 1013 routine. Something sort of:
1014 if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; } 1014 if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; }
1015 --ANK 1015 --ANK
@@ -1242,7 +1242,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1242 1242
1243 im->next_rcu = in_dev->mc_list; 1243 im->next_rcu = in_dev->mc_list;
1244 in_dev->mc_count++; 1244 in_dev->mc_count++;
1245 rcu_assign_pointer(in_dev->mc_list, im); 1245 RCU_INIT_POINTER(in_dev->mc_list, im);
1246 1246
1247#ifdef CONFIG_IP_MULTICAST 1247#ifdef CONFIG_IP_MULTICAST
1248 igmpv3_del_delrec(in_dev, im->multiaddr); 1248 igmpv3_del_delrec(in_dev, im->multiaddr);
@@ -1718,7 +1718,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1718 1718
1719 pmc->sfcount[sfmode]--; 1719 pmc->sfcount[sfmode]--;
1720 for (j=0; j<i; j++) 1720 for (j=0; j<i; j++)
1721 (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[i]); 1721 (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
1722 } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) { 1722 } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
1723#ifdef CONFIG_IP_MULTICAST 1723#ifdef CONFIG_IP_MULTICAST
1724 struct ip_sf_list *psf; 1724 struct ip_sf_list *psf;
@@ -1813,7 +1813,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1813 iml->next_rcu = inet->mc_list; 1813 iml->next_rcu = inet->mc_list;
1814 iml->sflist = NULL; 1814 iml->sflist = NULL;
1815 iml->sfmode = MCAST_EXCLUDE; 1815 iml->sfmode = MCAST_EXCLUDE;
1816 rcu_assign_pointer(inet->mc_list, iml); 1816 RCU_INIT_POINTER(inet->mc_list, iml);
1817 ip_mc_inc_group(in_dev, addr); 1817 ip_mc_inc_group(in_dev, addr);
1818 err = 0; 1818 err = 0;
1819done: 1819done:
@@ -1835,7 +1835,7 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1835 } 1835 }
1836 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, 1836 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
1837 iml->sfmode, psf->sl_count, psf->sl_addr, 0); 1837 iml->sfmode, psf->sl_count, psf->sl_addr, 0);
1838 rcu_assign_pointer(iml->sflist, NULL); 1838 RCU_INIT_POINTER(iml->sflist, NULL);
1839 /* decrease mem now to avoid the memleak warning */ 1839 /* decrease mem now to avoid the memleak warning */
1840 atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc); 1840 atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
1841 kfree_rcu(psf, rcu); 1841 kfree_rcu(psf, rcu);
@@ -2000,7 +2000,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
2000 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc); 2000 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2001 kfree_rcu(psl, rcu); 2001 kfree_rcu(psl, rcu);
2002 } 2002 }
2003 rcu_assign_pointer(pmc->sflist, newpsl); 2003 RCU_INIT_POINTER(pmc->sflist, newpsl);
2004 psl = newpsl; 2004 psl = newpsl;
2005 } 2005 }
2006 rv = 1; /* > 0 for insert logic below if sl_count is 0 */ 2006 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
@@ -2103,7 +2103,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2103 } else 2103 } else
2104 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2104 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2105 0, NULL, 0); 2105 0, NULL, 0);
2106 rcu_assign_pointer(pmc->sflist, newpsl); 2106 RCU_INIT_POINTER(pmc->sflist, newpsl);
2107 pmc->sfmode = msf->imsf_fmode; 2107 pmc->sfmode = msf->imsf_fmode;
2108 err = 0; 2108 err = 0;
2109done: 2109done:
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 3c0369a3a663..984ec656b03b 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -21,6 +21,7 @@
21 21
22#include <net/inet_connection_sock.h> 22#include <net/inet_connection_sock.h>
23#include <net/inet_hashtables.h> 23#include <net/inet_hashtables.h>
24#include <net/secure_seq.h>
24#include <net/ip.h> 25#include <net/ip.h>
25 26
26/* 27/*
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index e38213817d0a..86f13c67ea85 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -19,6 +19,7 @@
19#include <linux/net.h> 19#include <linux/net.h>
20#include <net/ip.h> 20#include <net/ip.h>
21#include <net/inetpeer.h> 21#include <net/inetpeer.h>
22#include <net/secure_seq.h>
22 23
23/* 24/*
24 * Theory of operations. 25 * Theory of operations.
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index ccaaa851ab42..8c6563361ab5 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -122,6 +122,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb)
122 newskb->pkt_type = PACKET_LOOPBACK; 122 newskb->pkt_type = PACKET_LOOPBACK;
123 newskb->ip_summed = CHECKSUM_UNNECESSARY; 123 newskb->ip_summed = CHECKSUM_UNNECESSARY;
124 WARN_ON(!skb_dst(newskb)); 124 WARN_ON(!skb_dst(newskb));
125 skb_dst_force(newskb);
125 netif_rx_ni(newskb); 126 netif_rx_ni(newskb);
126 return 0; 127 return 0;
127} 128}
@@ -204,9 +205,15 @@ static inline int ip_finish_output2(struct sk_buff *skb)
204 skb = skb2; 205 skb = skb2;
205 } 206 }
206 207
208 rcu_read_lock();
207 neigh = dst_get_neighbour(dst); 209 neigh = dst_get_neighbour(dst);
208 if (neigh) 210 if (neigh) {
209 return neigh_output(neigh, skb); 211 int res = neigh_output(neigh, skb);
212
213 rcu_read_unlock();
214 return res;
215 }
216 rcu_read_unlock();
210 217
211 if (net_ratelimit()) 218 if (net_ratelimit())
212 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n"); 219 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index ab0c9efd1efa..8905e92f896a 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1067,7 +1067,7 @@ EXPORT_SYMBOL(compat_ip_setsockopt);
1067 */ 1067 */
1068 1068
1069static int do_ip_getsockopt(struct sock *sk, int level, int optname, 1069static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1070 char __user *optval, int __user *optlen) 1070 char __user *optval, int __user *optlen, unsigned flags)
1071{ 1071{
1072 struct inet_sock *inet = inet_sk(sk); 1072 struct inet_sock *inet = inet_sk(sk);
1073 int val; 1073 int val;
@@ -1240,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1240 1240
1241 msg.msg_control = optval; 1241 msg.msg_control = optval;
1242 msg.msg_controllen = len; 1242 msg.msg_controllen = len;
1243 msg.msg_flags = 0; 1243 msg.msg_flags = flags;
1244 1244
1245 if (inet->cmsg_flags & IP_CMSG_PKTINFO) { 1245 if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
1246 struct in_pktinfo info; 1246 struct in_pktinfo info;
@@ -1294,7 +1294,7 @@ int ip_getsockopt(struct sock *sk, int level,
1294{ 1294{
1295 int err; 1295 int err;
1296 1296
1297 err = do_ip_getsockopt(sk, level, optname, optval, optlen); 1297 err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
1298#ifdef CONFIG_NETFILTER 1298#ifdef CONFIG_NETFILTER
1299 /* we need to exclude all possible ENOPROTOOPTs except default case */ 1299 /* we need to exclude all possible ENOPROTOOPTs except default case */
1300 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS && 1300 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
@@ -1327,7 +1327,8 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
1327 return compat_mc_getsockopt(sk, level, optname, optval, optlen, 1327 return compat_mc_getsockopt(sk, level, optname, optval, optlen,
1328 ip_getsockopt); 1328 ip_getsockopt);
1329 1329
1330 err = do_ip_getsockopt(sk, level, optname, optval, optlen); 1330 err = do_ip_getsockopt(sk, level, optname, optval, optlen,
1331 MSG_CMSG_COMPAT);
1331 1332
1332#ifdef CONFIG_NETFILTER 1333#ifdef CONFIG_NETFILTER
1333 /* we need to exclude all possible ENOPROTOOPTs except default case */ 1334 /* we need to exclude all possible ENOPROTOOPTs except default case */
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 378b20b7ca6e..065effd8349a 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -231,7 +231,7 @@ static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
231 (iter = rtnl_dereference(*tp)) != NULL; 231 (iter = rtnl_dereference(*tp)) != NULL;
232 tp = &iter->next) { 232 tp = &iter->next) {
233 if (t == iter) { 233 if (t == iter) {
234 rcu_assign_pointer(*tp, t->next); 234 RCU_INIT_POINTER(*tp, t->next);
235 break; 235 break;
236 } 236 }
237 } 237 }
@@ -241,8 +241,8 @@ static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
241{ 241{
242 struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t); 242 struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t);
243 243
244 rcu_assign_pointer(t->next, rtnl_dereference(*tp)); 244 RCU_INIT_POINTER(t->next, rtnl_dereference(*tp));
245 rcu_assign_pointer(*tp, t); 245 RCU_INIT_POINTER(*tp, t);
246} 246}
247 247
248static struct ip_tunnel * ipip_tunnel_locate(struct net *net, 248static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
@@ -301,7 +301,7 @@ static void ipip_tunnel_uninit(struct net_device *dev)
301 struct ipip_net *ipn = net_generic(net, ipip_net_id); 301 struct ipip_net *ipn = net_generic(net, ipip_net_id);
302 302
303 if (dev == ipn->fb_tunnel_dev) 303 if (dev == ipn->fb_tunnel_dev)
304 rcu_assign_pointer(ipn->tunnels_wc[0], NULL); 304 RCU_INIT_POINTER(ipn->tunnels_wc[0], NULL);
305 else 305 else
306 ipip_tunnel_unlink(ipn, netdev_priv(dev)); 306 ipip_tunnel_unlink(ipn, netdev_priv(dev));
307 dev_put(dev); 307 dev_put(dev);
@@ -791,7 +791,7 @@ static int __net_init ipip_fb_tunnel_init(struct net_device *dev)
791 return -ENOMEM; 791 return -ENOMEM;
792 792
793 dev_hold(dev); 793 dev_hold(dev);
794 rcu_assign_pointer(ipn->tunnels_wc[0], tunnel); 794 RCU_INIT_POINTER(ipn->tunnels_wc[0], tunnel);
795 return 0; 795 return 0;
796} 796}
797 797
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 58e879157976..6164e982e0ef 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1176,7 +1176,7 @@ static void mrtsock_destruct(struct sock *sk)
1176 ipmr_for_each_table(mrt, net) { 1176 ipmr_for_each_table(mrt, net) {
1177 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1177 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1178 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; 1178 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1179 rcu_assign_pointer(mrt->mroute_sk, NULL); 1179 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1180 mroute_clean_tables(mrt); 1180 mroute_clean_tables(mrt);
1181 } 1181 }
1182 } 1182 }
@@ -1203,7 +1203,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1203 return -ENOENT; 1203 return -ENOENT;
1204 1204
1205 if (optname != MRT_INIT) { 1205 if (optname != MRT_INIT) {
1206 if (sk != rcu_dereference_raw(mrt->mroute_sk) && 1206 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1207 !capable(CAP_NET_ADMIN)) 1207 !capable(CAP_NET_ADMIN))
1208 return -EACCES; 1208 return -EACCES;
1209 } 1209 }
@@ -1224,13 +1224,13 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1224 1224
1225 ret = ip_ra_control(sk, 1, mrtsock_destruct); 1225 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1226 if (ret == 0) { 1226 if (ret == 0) {
1227 rcu_assign_pointer(mrt->mroute_sk, sk); 1227 RCU_INIT_POINTER(mrt->mroute_sk, sk);
1228 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; 1228 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1229 } 1229 }
1230 rtnl_unlock(); 1230 rtnl_unlock();
1231 return ret; 1231 return ret;
1232 case MRT_DONE: 1232 case MRT_DONE:
1233 if (sk != rcu_dereference_raw(mrt->mroute_sk)) 1233 if (sk != rcu_access_pointer(mrt->mroute_sk))
1234 return -EACCES; 1234 return -EACCES;
1235 return ip_ra_control(sk, 0, NULL); 1235 return ip_ra_control(sk, 0, NULL);
1236 case MRT_ADD_VIF: 1236 case MRT_ADD_VIF:
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 2e97e3ec1eb7..929b27bdeb79 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -18,17 +18,15 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
18 struct rtable *rt; 18 struct rtable *rt;
19 struct flowi4 fl4 = {}; 19 struct flowi4 fl4 = {};
20 __be32 saddr = iph->saddr; 20 __be32 saddr = iph->saddr;
21 __u8 flags = 0; 21 __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
22 unsigned int hh_len; 22 unsigned int hh_len;
23 23
24 if (!skb->sk && addr_type != RTN_LOCAL) { 24 if (addr_type == RTN_UNSPEC)
25 if (addr_type == RTN_UNSPEC) 25 addr_type = inet_addr_type(net, saddr);
26 addr_type = inet_addr_type(net, saddr); 26 if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
27 if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) 27 flags |= FLOWI_FLAG_ANYSRC;
28 flags |= FLOWI_FLAG_ANYSRC; 28 else
29 else 29 saddr = 0;
30 saddr = 0;
31 }
32 30
33 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause 31 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
34 * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook. 32 * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook.
@@ -38,7 +36,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
38 fl4.flowi4_tos = RT_TOS(iph->tos); 36 fl4.flowi4_tos = RT_TOS(iph->tos);
39 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; 37 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
40 fl4.flowi4_mark = skb->mark; 38 fl4.flowi4_mark = skb->mark;
41 fl4.flowi4_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : flags; 39 fl4.flowi4_flags = flags;
42 rt = ip_route_output_key(net, &fl4); 40 rt = ip_route_output_key(net, &fl4);
43 if (IS_ERR(rt)) 41 if (IS_ERR(rt))
44 return -1; 42 return -1;
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c
index 703f366fd235..7b22382ff0e9 100644
--- a/net/ipv4/netfilter/nf_nat_amanda.c
+++ b/net/ipv4/netfilter/nf_nat_amanda.c
@@ -70,14 +70,14 @@ static unsigned int help(struct sk_buff *skb,
70 70
71static void __exit nf_nat_amanda_fini(void) 71static void __exit nf_nat_amanda_fini(void)
72{ 72{
73 rcu_assign_pointer(nf_nat_amanda_hook, NULL); 73 RCU_INIT_POINTER(nf_nat_amanda_hook, NULL);
74 synchronize_rcu(); 74 synchronize_rcu();
75} 75}
76 76
77static int __init nf_nat_amanda_init(void) 77static int __init nf_nat_amanda_init(void)
78{ 78{
79 BUG_ON(nf_nat_amanda_hook != NULL); 79 BUG_ON(nf_nat_amanda_hook != NULL);
80 rcu_assign_pointer(nf_nat_amanda_hook, help); 80 RCU_INIT_POINTER(nf_nat_amanda_hook, help);
81 return 0; 81 return 0;
82} 82}
83 83
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 3346de5d94d0..447bc5cfdc6c 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -514,7 +514,7 @@ int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
514 ret = -EBUSY; 514 ret = -EBUSY;
515 goto out; 515 goto out;
516 } 516 }
517 rcu_assign_pointer(nf_nat_protos[proto->protonum], proto); 517 RCU_INIT_POINTER(nf_nat_protos[proto->protonum], proto);
518 out: 518 out:
519 spin_unlock_bh(&nf_nat_lock); 519 spin_unlock_bh(&nf_nat_lock);
520 return ret; 520 return ret;
@@ -525,7 +525,7 @@ EXPORT_SYMBOL(nf_nat_protocol_register);
525void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto) 525void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
526{ 526{
527 spin_lock_bh(&nf_nat_lock); 527 spin_lock_bh(&nf_nat_lock);
528 rcu_assign_pointer(nf_nat_protos[proto->protonum], 528 RCU_INIT_POINTER(nf_nat_protos[proto->protonum],
529 &nf_nat_unknown_protocol); 529 &nf_nat_unknown_protocol);
530 spin_unlock_bh(&nf_nat_lock); 530 spin_unlock_bh(&nf_nat_lock);
531 synchronize_rcu(); 531 synchronize_rcu();
@@ -736,10 +736,10 @@ static int __init nf_nat_init(void)
736 /* Sew in builtin protocols. */ 736 /* Sew in builtin protocols. */
737 spin_lock_bh(&nf_nat_lock); 737 spin_lock_bh(&nf_nat_lock);
738 for (i = 0; i < MAX_IP_NAT_PROTO; i++) 738 for (i = 0; i < MAX_IP_NAT_PROTO; i++)
739 rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol); 739 RCU_INIT_POINTER(nf_nat_protos[i], &nf_nat_unknown_protocol);
740 rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp); 740 RCU_INIT_POINTER(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
741 rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp); 741 RCU_INIT_POINTER(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
742 rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp); 742 RCU_INIT_POINTER(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
743 spin_unlock_bh(&nf_nat_lock); 743 spin_unlock_bh(&nf_nat_lock);
744 744
745 /* Initialize fake conntrack so that NAT will skip it */ 745 /* Initialize fake conntrack so that NAT will skip it */
@@ -748,12 +748,12 @@ static int __init nf_nat_init(void)
748 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); 748 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
749 749
750 BUG_ON(nf_nat_seq_adjust_hook != NULL); 750 BUG_ON(nf_nat_seq_adjust_hook != NULL);
751 rcu_assign_pointer(nf_nat_seq_adjust_hook, nf_nat_seq_adjust); 751 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
752 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); 752 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
753 rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, 753 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook,
754 nfnetlink_parse_nat_setup); 754 nfnetlink_parse_nat_setup);
755 BUG_ON(nf_ct_nat_offset != NULL); 755 BUG_ON(nf_ct_nat_offset != NULL);
756 rcu_assign_pointer(nf_ct_nat_offset, nf_nat_get_offset); 756 RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset);
757 return 0; 757 return 0;
758 758
759 cleanup_extend: 759 cleanup_extend:
@@ -766,9 +766,9 @@ static void __exit nf_nat_cleanup(void)
766 unregister_pernet_subsys(&nf_nat_net_ops); 766 unregister_pernet_subsys(&nf_nat_net_ops);
767 nf_ct_l3proto_put(l3proto); 767 nf_ct_l3proto_put(l3proto);
768 nf_ct_extend_unregister(&nat_extend); 768 nf_ct_extend_unregister(&nat_extend);
769 rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL); 769 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
770 rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, NULL); 770 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
771 rcu_assign_pointer(nf_ct_nat_offset, NULL); 771 RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
772 synchronize_net(); 772 synchronize_net();
773} 773}
774 774
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c
index dc73abb3fe27..e462a957d080 100644
--- a/net/ipv4/netfilter/nf_nat_ftp.c
+++ b/net/ipv4/netfilter/nf_nat_ftp.c
@@ -113,14 +113,14 @@ out:
113 113
114static void __exit nf_nat_ftp_fini(void) 114static void __exit nf_nat_ftp_fini(void)
115{ 115{
116 rcu_assign_pointer(nf_nat_ftp_hook, NULL); 116 RCU_INIT_POINTER(nf_nat_ftp_hook, NULL);
117 synchronize_rcu(); 117 synchronize_rcu();
118} 118}
119 119
120static int __init nf_nat_ftp_init(void) 120static int __init nf_nat_ftp_init(void)
121{ 121{
122 BUG_ON(nf_nat_ftp_hook != NULL); 122 BUG_ON(nf_nat_ftp_hook != NULL);
123 rcu_assign_pointer(nf_nat_ftp_hook, nf_nat_ftp); 123 RCU_INIT_POINTER(nf_nat_ftp_hook, nf_nat_ftp);
124 return 0; 124 return 0;
125} 125}
126 126
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 790f3160e012..b9a1136addbd 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -581,30 +581,30 @@ static int __init init(void)
581 BUG_ON(nat_callforwarding_hook != NULL); 581 BUG_ON(nat_callforwarding_hook != NULL);
582 BUG_ON(nat_q931_hook != NULL); 582 BUG_ON(nat_q931_hook != NULL);
583 583
584 rcu_assign_pointer(set_h245_addr_hook, set_h245_addr); 584 RCU_INIT_POINTER(set_h245_addr_hook, set_h245_addr);
585 rcu_assign_pointer(set_h225_addr_hook, set_h225_addr); 585 RCU_INIT_POINTER(set_h225_addr_hook, set_h225_addr);
586 rcu_assign_pointer(set_sig_addr_hook, set_sig_addr); 586 RCU_INIT_POINTER(set_sig_addr_hook, set_sig_addr);
587 rcu_assign_pointer(set_ras_addr_hook, set_ras_addr); 587 RCU_INIT_POINTER(set_ras_addr_hook, set_ras_addr);
588 rcu_assign_pointer(nat_rtp_rtcp_hook, nat_rtp_rtcp); 588 RCU_INIT_POINTER(nat_rtp_rtcp_hook, nat_rtp_rtcp);
589 rcu_assign_pointer(nat_t120_hook, nat_t120); 589 RCU_INIT_POINTER(nat_t120_hook, nat_t120);
590 rcu_assign_pointer(nat_h245_hook, nat_h245); 590 RCU_INIT_POINTER(nat_h245_hook, nat_h245);
591 rcu_assign_pointer(nat_callforwarding_hook, nat_callforwarding); 591 RCU_INIT_POINTER(nat_callforwarding_hook, nat_callforwarding);
592 rcu_assign_pointer(nat_q931_hook, nat_q931); 592 RCU_INIT_POINTER(nat_q931_hook, nat_q931);
593 return 0; 593 return 0;
594} 594}
595 595
596/****************************************************************************/ 596/****************************************************************************/
597static void __exit fini(void) 597static void __exit fini(void)
598{ 598{
599 rcu_assign_pointer(set_h245_addr_hook, NULL); 599 RCU_INIT_POINTER(set_h245_addr_hook, NULL);
600 rcu_assign_pointer(set_h225_addr_hook, NULL); 600 RCU_INIT_POINTER(set_h225_addr_hook, NULL);
601 rcu_assign_pointer(set_sig_addr_hook, NULL); 601 RCU_INIT_POINTER(set_sig_addr_hook, NULL);
602 rcu_assign_pointer(set_ras_addr_hook, NULL); 602 RCU_INIT_POINTER(set_ras_addr_hook, NULL);
603 rcu_assign_pointer(nat_rtp_rtcp_hook, NULL); 603 RCU_INIT_POINTER(nat_rtp_rtcp_hook, NULL);
604 rcu_assign_pointer(nat_t120_hook, NULL); 604 RCU_INIT_POINTER(nat_t120_hook, NULL);
605 rcu_assign_pointer(nat_h245_hook, NULL); 605 RCU_INIT_POINTER(nat_h245_hook, NULL);
606 rcu_assign_pointer(nat_callforwarding_hook, NULL); 606 RCU_INIT_POINTER(nat_callforwarding_hook, NULL);
607 rcu_assign_pointer(nat_q931_hook, NULL); 607 RCU_INIT_POINTER(nat_q931_hook, NULL);
608 synchronize_rcu(); 608 synchronize_rcu();
609} 609}
610 610
diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/ipv4/netfilter/nf_nat_irc.c
index 535e1a802356..979ae165f4ef 100644
--- a/net/ipv4/netfilter/nf_nat_irc.c
+++ b/net/ipv4/netfilter/nf_nat_irc.c
@@ -75,14 +75,14 @@ static unsigned int help(struct sk_buff *skb,
75 75
76static void __exit nf_nat_irc_fini(void) 76static void __exit nf_nat_irc_fini(void)
77{ 77{
78 rcu_assign_pointer(nf_nat_irc_hook, NULL); 78 RCU_INIT_POINTER(nf_nat_irc_hook, NULL);
79 synchronize_rcu(); 79 synchronize_rcu();
80} 80}
81 81
82static int __init nf_nat_irc_init(void) 82static int __init nf_nat_irc_init(void)
83{ 83{
84 BUG_ON(nf_nat_irc_hook != NULL); 84 BUG_ON(nf_nat_irc_hook != NULL);
85 rcu_assign_pointer(nf_nat_irc_hook, help); 85 RCU_INIT_POINTER(nf_nat_irc_hook, help);
86 return 0; 86 return 0;
87} 87}
88 88
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 4c060038d29f..3e8284ba46b8 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -282,25 +282,25 @@ static int __init nf_nat_helper_pptp_init(void)
282 nf_nat_need_gre(); 282 nf_nat_need_gre();
283 283
284 BUG_ON(nf_nat_pptp_hook_outbound != NULL); 284 BUG_ON(nf_nat_pptp_hook_outbound != NULL);
285 rcu_assign_pointer(nf_nat_pptp_hook_outbound, pptp_outbound_pkt); 285 RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, pptp_outbound_pkt);
286 286
287 BUG_ON(nf_nat_pptp_hook_inbound != NULL); 287 BUG_ON(nf_nat_pptp_hook_inbound != NULL);
288 rcu_assign_pointer(nf_nat_pptp_hook_inbound, pptp_inbound_pkt); 288 RCU_INIT_POINTER(nf_nat_pptp_hook_inbound, pptp_inbound_pkt);
289 289
290 BUG_ON(nf_nat_pptp_hook_exp_gre != NULL); 290 BUG_ON(nf_nat_pptp_hook_exp_gre != NULL);
291 rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, pptp_exp_gre); 291 RCU_INIT_POINTER(nf_nat_pptp_hook_exp_gre, pptp_exp_gre);
292 292
293 BUG_ON(nf_nat_pptp_hook_expectfn != NULL); 293 BUG_ON(nf_nat_pptp_hook_expectfn != NULL);
294 rcu_assign_pointer(nf_nat_pptp_hook_expectfn, pptp_nat_expected); 294 RCU_INIT_POINTER(nf_nat_pptp_hook_expectfn, pptp_nat_expected);
295 return 0; 295 return 0;
296} 296}
297 297
298static void __exit nf_nat_helper_pptp_fini(void) 298static void __exit nf_nat_helper_pptp_fini(void)
299{ 299{
300 rcu_assign_pointer(nf_nat_pptp_hook_expectfn, NULL); 300 RCU_INIT_POINTER(nf_nat_pptp_hook_expectfn, NULL);
301 rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, NULL); 301 RCU_INIT_POINTER(nf_nat_pptp_hook_exp_gre, NULL);
302 rcu_assign_pointer(nf_nat_pptp_hook_inbound, NULL); 302 RCU_INIT_POINTER(nf_nat_pptp_hook_inbound, NULL);
303 rcu_assign_pointer(nf_nat_pptp_hook_outbound, NULL); 303 RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, NULL);
304 synchronize_rcu(); 304 synchronize_rcu();
305} 305}
306 306
diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/ipv4/netfilter/nf_nat_proto_common.c
index 3e61faf23a9a..f52d41ea0690 100644
--- a/net/ipv4/netfilter/nf_nat_proto_common.c
+++ b/net/ipv4/netfilter/nf_nat_proto_common.c
@@ -12,6 +12,7 @@
12#include <linux/ip.h> 12#include <linux/ip.h>
13 13
14#include <linux/netfilter.h> 14#include <linux/netfilter.h>
15#include <net/secure_seq.h>
15#include <net/netfilter/nf_nat.h> 16#include <net/netfilter/nf_nat.h>
16#include <net/netfilter/nf_nat_core.h> 17#include <net/netfilter/nf_nat_core.h>
17#include <net/netfilter/nf_nat_rule.h> 18#include <net/netfilter/nf_nat_rule.h>
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index e40cf7816fdb..78844d9208f1 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -528,13 +528,13 @@ err1:
528 528
529static void __exit nf_nat_sip_fini(void) 529static void __exit nf_nat_sip_fini(void)
530{ 530{
531 rcu_assign_pointer(nf_nat_sip_hook, NULL); 531 RCU_INIT_POINTER(nf_nat_sip_hook, NULL);
532 rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, NULL); 532 RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, NULL);
533 rcu_assign_pointer(nf_nat_sip_expect_hook, NULL); 533 RCU_INIT_POINTER(nf_nat_sip_expect_hook, NULL);
534 rcu_assign_pointer(nf_nat_sdp_addr_hook, NULL); 534 RCU_INIT_POINTER(nf_nat_sdp_addr_hook, NULL);
535 rcu_assign_pointer(nf_nat_sdp_port_hook, NULL); 535 RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL);
536 rcu_assign_pointer(nf_nat_sdp_session_hook, NULL); 536 RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL);
537 rcu_assign_pointer(nf_nat_sdp_media_hook, NULL); 537 RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL);
538 synchronize_rcu(); 538 synchronize_rcu();
539} 539}
540 540
@@ -547,13 +547,13 @@ static int __init nf_nat_sip_init(void)
547 BUG_ON(nf_nat_sdp_port_hook != NULL); 547 BUG_ON(nf_nat_sdp_port_hook != NULL);
548 BUG_ON(nf_nat_sdp_session_hook != NULL); 548 BUG_ON(nf_nat_sdp_session_hook != NULL);
549 BUG_ON(nf_nat_sdp_media_hook != NULL); 549 BUG_ON(nf_nat_sdp_media_hook != NULL);
550 rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip); 550 RCU_INIT_POINTER(nf_nat_sip_hook, ip_nat_sip);
551 rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust); 551 RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust);
552 rcu_assign_pointer(nf_nat_sip_expect_hook, ip_nat_sip_expect); 552 RCU_INIT_POINTER(nf_nat_sip_expect_hook, ip_nat_sip_expect);
553 rcu_assign_pointer(nf_nat_sdp_addr_hook, ip_nat_sdp_addr); 553 RCU_INIT_POINTER(nf_nat_sdp_addr_hook, ip_nat_sdp_addr);
554 rcu_assign_pointer(nf_nat_sdp_port_hook, ip_nat_sdp_port); 554 RCU_INIT_POINTER(nf_nat_sdp_port_hook, ip_nat_sdp_port);
555 rcu_assign_pointer(nf_nat_sdp_session_hook, ip_nat_sdp_session); 555 RCU_INIT_POINTER(nf_nat_sdp_session_hook, ip_nat_sdp_session);
556 rcu_assign_pointer(nf_nat_sdp_media_hook, ip_nat_sdp_media); 556 RCU_INIT_POINTER(nf_nat_sdp_media_hook, ip_nat_sdp_media);
557 return 0; 557 return 0;
558} 558}
559 559
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 076b7c8c4aa4..d1cb412c18e0 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1310,7 +1310,7 @@ static int __init nf_nat_snmp_basic_init(void)
1310 int ret = 0; 1310 int ret = 0;
1311 1311
1312 BUG_ON(nf_nat_snmp_hook != NULL); 1312 BUG_ON(nf_nat_snmp_hook != NULL);
1313 rcu_assign_pointer(nf_nat_snmp_hook, help); 1313 RCU_INIT_POINTER(nf_nat_snmp_hook, help);
1314 1314
1315 ret = nf_conntrack_helper_register(&snmp_trap_helper); 1315 ret = nf_conntrack_helper_register(&snmp_trap_helper);
1316 if (ret < 0) { 1316 if (ret < 0) {
@@ -1322,7 +1322,7 @@ static int __init nf_nat_snmp_basic_init(void)
1322 1322
1323static void __exit nf_nat_snmp_basic_fini(void) 1323static void __exit nf_nat_snmp_basic_fini(void)
1324{ 1324{
1325 rcu_assign_pointer(nf_nat_snmp_hook, NULL); 1325 RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
1326 nf_conntrack_helper_unregister(&snmp_trap_helper); 1326 nf_conntrack_helper_unregister(&snmp_trap_helper);
1327} 1327}
1328 1328
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index a6e606e84820..92900482edea 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -284,7 +284,7 @@ static int __init nf_nat_standalone_init(void)
284 284
285#ifdef CONFIG_XFRM 285#ifdef CONFIG_XFRM
286 BUG_ON(ip_nat_decode_session != NULL); 286 BUG_ON(ip_nat_decode_session != NULL);
287 rcu_assign_pointer(ip_nat_decode_session, nat_decode_session); 287 RCU_INIT_POINTER(ip_nat_decode_session, nat_decode_session);
288#endif 288#endif
289 ret = nf_nat_rule_init(); 289 ret = nf_nat_rule_init();
290 if (ret < 0) { 290 if (ret < 0) {
@@ -302,7 +302,7 @@ static int __init nf_nat_standalone_init(void)
302 nf_nat_rule_cleanup(); 302 nf_nat_rule_cleanup();
303 cleanup_decode_session: 303 cleanup_decode_session:
304#ifdef CONFIG_XFRM 304#ifdef CONFIG_XFRM
305 rcu_assign_pointer(ip_nat_decode_session, NULL); 305 RCU_INIT_POINTER(ip_nat_decode_session, NULL);
306 synchronize_net(); 306 synchronize_net();
307#endif 307#endif
308 return ret; 308 return ret;
@@ -313,7 +313,7 @@ static void __exit nf_nat_standalone_fini(void)
313 nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops)); 313 nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops));
314 nf_nat_rule_cleanup(); 314 nf_nat_rule_cleanup();
315#ifdef CONFIG_XFRM 315#ifdef CONFIG_XFRM
316 rcu_assign_pointer(ip_nat_decode_session, NULL); 316 RCU_INIT_POINTER(ip_nat_decode_session, NULL);
317 synchronize_net(); 317 synchronize_net();
318#endif 318#endif
319 /* Conntrack caches are unregistered in nf_conntrack_cleanup */ 319 /* Conntrack caches are unregistered in nf_conntrack_cleanup */
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c
index 7274a43c7a12..a2901bf829c0 100644
--- a/net/ipv4/netfilter/nf_nat_tftp.c
+++ b/net/ipv4/netfilter/nf_nat_tftp.c
@@ -36,14 +36,14 @@ static unsigned int help(struct sk_buff *skb,
36 36
37static void __exit nf_nat_tftp_fini(void) 37static void __exit nf_nat_tftp_fini(void)
38{ 38{
39 rcu_assign_pointer(nf_nat_tftp_hook, NULL); 39 RCU_INIT_POINTER(nf_nat_tftp_hook, NULL);
40 synchronize_rcu(); 40 synchronize_rcu();
41} 41}
42 42
43static int __init nf_nat_tftp_init(void) 43static int __init nf_nat_tftp_init(void)
44{ 44{
45 BUG_ON(nf_nat_tftp_hook != NULL); 45 BUG_ON(nf_nat_tftp_hook != NULL);
46 rcu_assign_pointer(nf_nat_tftp_hook, help); 46 RCU_INIT_POINTER(nf_nat_tftp_hook, help);
47 return 0; 47 return 0;
48} 48}
49 49
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 1457acb39cec..61714bd52925 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -563,7 +563,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
563 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, 563 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
564 RT_SCOPE_UNIVERSE, 564 RT_SCOPE_UNIVERSE,
565 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, 565 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
566 FLOWI_FLAG_CAN_SLEEP, daddr, saddr, 0, 0); 566 inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP,
567 daddr, saddr, 0, 0);
567 568
568 if (!inet->hdrincl) { 569 if (!inet->hdrincl) {
569 err = raw_probe_proto_opt(&fl4, msg); 570 err = raw_probe_proto_opt(&fl4, msg);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 1730689f560e..2c21d3be891b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -109,6 +109,7 @@
109#include <linux/sysctl.h> 109#include <linux/sysctl.h>
110#endif 110#endif
111#include <net/atmclip.h> 111#include <net/atmclip.h>
112#include <net/secure_seq.h>
112 113
113#define RT_FL_TOS(oldflp4) \ 114#define RT_FL_TOS(oldflp4) \
114 ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))) 115 ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
@@ -323,7 +324,7 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
323 struct rtable *r = NULL; 324 struct rtable *r = NULL;
324 325
325 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { 326 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
326 if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain)) 327 if (!rcu_access_pointer(rt_hash_table[st->bucket].chain))
327 continue; 328 continue;
328 rcu_read_lock_bh(); 329 rcu_read_lock_bh();
329 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); 330 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
@@ -349,7 +350,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
349 do { 350 do {
350 if (--st->bucket < 0) 351 if (--st->bucket < 0)
351 return NULL; 352 return NULL;
352 } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain)); 353 } while (!rcu_access_pointer(rt_hash_table[st->bucket].chain));
353 rcu_read_lock_bh(); 354 rcu_read_lock_bh();
354 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); 355 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
355 } 356 }
@@ -721,7 +722,7 @@ static inline bool compare_hash_inputs(const struct rtable *rt1,
721{ 722{
722 return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) | 723 return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
723 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) | 724 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
724 (rt1->rt_iif ^ rt2->rt_iif)) == 0); 725 (rt1->rt_route_iif ^ rt2->rt_route_iif)) == 0);
725} 726}
726 727
727static inline int compare_keys(struct rtable *rt1, struct rtable *rt2) 728static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
@@ -730,8 +731,8 @@ static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
730 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) | 731 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
731 (rt1->rt_mark ^ rt2->rt_mark) | 732 (rt1->rt_mark ^ rt2->rt_mark) |
732 (rt1->rt_key_tos ^ rt2->rt_key_tos) | 733 (rt1->rt_key_tos ^ rt2->rt_key_tos) |
733 (rt1->rt_oif ^ rt2->rt_oif) | 734 (rt1->rt_route_iif ^ rt2->rt_route_iif) |
734 (rt1->rt_iif ^ rt2->rt_iif)) == 0; 735 (rt1->rt_oif ^ rt2->rt_oif)) == 0;
735} 736}
736 737
737static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) 738static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
@@ -760,7 +761,7 @@ static void rt_do_flush(struct net *net, int process_context)
760 761
761 if (process_context && need_resched()) 762 if (process_context && need_resched())
762 cond_resched(); 763 cond_resched();
763 rth = rcu_dereference_raw(rt_hash_table[i].chain); 764 rth = rcu_access_pointer(rt_hash_table[i].chain);
764 if (!rth) 765 if (!rth)
765 continue; 766 continue;
766 767
@@ -1628,16 +1629,18 @@ static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1628{ 1629{
1629 struct rtable *rt = (struct rtable *) dst; 1630 struct rtable *rt = (struct rtable *) dst;
1630 __be32 orig_gw = rt->rt_gateway; 1631 __be32 orig_gw = rt->rt_gateway;
1631 struct neighbour *n; 1632 struct neighbour *n, *old_n;
1632 1633
1633 dst_confirm(&rt->dst); 1634 dst_confirm(&rt->dst);
1634 1635
1635 neigh_release(dst_get_neighbour(&rt->dst));
1636 dst_set_neighbour(&rt->dst, NULL);
1637
1638 rt->rt_gateway = peer->redirect_learned.a4; 1636 rt->rt_gateway = peer->redirect_learned.a4;
1639 rt_bind_neighbour(rt); 1637
1640 n = dst_get_neighbour(&rt->dst); 1638 n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
1639 if (IS_ERR(n))
1640 return PTR_ERR(n);
1641 old_n = xchg(&rt->dst._neighbour, n);
1642 if (old_n)
1643 neigh_release(old_n);
1641 if (!n || !(n->nud_state & NUD_VALID)) { 1644 if (!n || !(n->nud_state & NUD_VALID)) {
1642 if (n) 1645 if (n)
1643 neigh_event_send(n, NULL); 1646 neigh_event_send(n, NULL);
@@ -2317,8 +2320,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2317 rth = rcu_dereference(rth->dst.rt_next)) { 2320 rth = rcu_dereference(rth->dst.rt_next)) {
2318 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) | 2321 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
2319 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) | 2322 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2320 (rth->rt_iif ^ iif) | 2323 (rth->rt_route_iif ^ iif) |
2321 rth->rt_oif |
2322 (rth->rt_key_tos ^ tos)) == 0 && 2324 (rth->rt_key_tos ^ tos)) == 0 &&
2323 rth->rt_mark == skb->mark && 2325 rth->rt_mark == skb->mark &&
2324 net_eq(dev_net(rth->dst.dev), net) && 2326 net_eq(dev_net(rth->dst.dev), net) &&
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 92bb9434b338..3bc5c8f7c71b 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -276,7 +276,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
276 int mss; 276 int mss;
277 struct rtable *rt; 277 struct rtable *rt;
278 __u8 rcv_wscale; 278 __u8 rcv_wscale;
279 bool ecn_ok; 279 bool ecn_ok = false;
280 280
281 if (!sysctl_tcp_syncookies || !th->ack || th->rst) 281 if (!sysctl_tcp_syncookies || !th->ack || th->rst)
282 goto out; 282 goto out;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 955b8e65b69e..b3f26114b03e 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -72,6 +72,7 @@
72#include <net/timewait_sock.h> 72#include <net/timewait_sock.h>
73#include <net/xfrm.h> 73#include <net/xfrm.h>
74#include <net/netdma.h> 74#include <net/netdma.h>
75#include <net/secure_seq.h>
75 76
76#include <linux/inet.h> 77#include <linux/inet.h>
77#include <linux/ipv6.h> 78#include <linux/ipv6.h>
@@ -1577,7 +1578,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1577#endif 1578#endif
1578 1579
1579 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1580 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1580 sock_rps_save_rxhash(sk, skb->rxhash); 1581 sock_rps_save_rxhash(sk, skb);
1581 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { 1582 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1582 rsk = sk; 1583 rsk = sk;
1583 goto reset; 1584 goto reset;
@@ -1594,7 +1595,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1594 goto discard; 1595 goto discard;
1595 1596
1596 if (nsk != sk) { 1597 if (nsk != sk) {
1597 sock_rps_save_rxhash(nsk, skb->rxhash); 1598 sock_rps_save_rxhash(nsk, skb);
1598 if (tcp_child_process(sk, nsk, skb)) { 1599 if (tcp_child_process(sk, nsk, skb)) {
1599 rsk = nsk; 1600 rsk = nsk;
1600 goto reset; 1601 goto reset;
@@ -1602,7 +1603,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1602 return 0; 1603 return 0;
1603 } 1604 }
1604 } else 1605 } else
1605 sock_rps_save_rxhash(sk, skb->rxhash); 1606 sock_rps_save_rxhash(sk, skb);
1606 1607
1607 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { 1608 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1608 rsk = sk; 1609 rsk = sk;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1b5a19340a95..ebaa96bd3464 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1267,7 +1267,7 @@ int udp_disconnect(struct sock *sk, int flags)
1267 sk->sk_state = TCP_CLOSE; 1267 sk->sk_state = TCP_CLOSE;
1268 inet->inet_daddr = 0; 1268 inet->inet_daddr = 0;
1269 inet->inet_dport = 0; 1269 inet->inet_dport = 0;
1270 sock_rps_save_rxhash(sk, 0); 1270 sock_rps_reset_rxhash(sk);
1271 sk->sk_bound_dev_if = 0; 1271 sk->sk_bound_dev_if = 0;
1272 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 1272 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1273 inet_reset_saddr(sk); 1273 inet_reset_saddr(sk);
@@ -1355,7 +1355,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1355 int rc; 1355 int rc;
1356 1356
1357 if (inet_sk(sk)->inet_daddr) 1357 if (inet_sk(sk)->inet_daddr)
1358 sock_rps_save_rxhash(sk, skb->rxhash); 1358 sock_rps_save_rxhash(sk, skb);
1359 1359
1360 rc = ip_queue_rcv_skb(sk, skb); 1360 rc = ip_queue_rcv_skb(sk, skb);
1361 if (rc < 0) { 1361 if (rc < 0) {
@@ -1461,10 +1461,9 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1461 } 1461 }
1462 } 1462 }
1463 1463
1464 if (rcu_dereference_raw(sk->sk_filter)) { 1464 if (rcu_access_pointer(sk->sk_filter) &&
1465 if (udp_lib_checksum_complete(skb)) 1465 udp_lib_checksum_complete(skb))
1466 goto drop; 1466 goto drop;
1467 }
1468 1467
1469 1468
1470 if (sk_rcvqueues_full(sk, skb)) 1469 if (sk_rcvqueues_full(sk, skb))
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a55500cc0b29..8f1e5be26d91 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -428,7 +428,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
428 ndev->tstamp = jiffies; 428 ndev->tstamp = jiffies;
429 addrconf_sysctl_register(ndev); 429 addrconf_sysctl_register(ndev);
430 /* protected by rtnl_lock */ 430 /* protected by rtnl_lock */
431 rcu_assign_pointer(dev->ip6_ptr, ndev); 431 RCU_INIT_POINTER(dev->ip6_ptr, ndev);
432 432
433 /* Join all-node multicast group */ 433 /* Join all-node multicast group */
434 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes); 434 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
@@ -656,7 +656,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
656 * layer address of our nexhop router 656 * layer address of our nexhop router
657 */ 657 */
658 658
659 if (dst_get_neighbour(&rt->dst) == NULL) 659 if (dst_get_neighbour_raw(&rt->dst) == NULL)
660 ifa->flags &= ~IFA_F_OPTIMISTIC; 660 ifa->flags &= ~IFA_F_OPTIMISTIC;
661 661
662 ifa->idev = idev; 662 ifa->idev = idev;
@@ -824,12 +824,13 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
824{ 824{
825 struct inet6_dev *idev = ifp->idev; 825 struct inet6_dev *idev = ifp->idev;
826 struct in6_addr addr, *tmpaddr; 826 struct in6_addr addr, *tmpaddr;
827 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp, age; 827 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_tstamp, age;
828 unsigned long regen_advance; 828 unsigned long regen_advance;
829 int tmp_plen; 829 int tmp_plen;
830 int ret = 0; 830 int ret = 0;
831 int max_addresses; 831 int max_addresses;
832 u32 addr_flags; 832 u32 addr_flags;
833 unsigned long now = jiffies;
833 834
834 write_lock(&idev->lock); 835 write_lock(&idev->lock);
835 if (ift) { 836 if (ift) {
@@ -874,7 +875,7 @@ retry:
874 goto out; 875 goto out;
875 } 876 }
876 memcpy(&addr.s6_addr[8], idev->rndid, 8); 877 memcpy(&addr.s6_addr[8], idev->rndid, 8);
877 age = (jiffies - ifp->tstamp) / HZ; 878 age = (now - ifp->tstamp) / HZ;
878 tmp_valid_lft = min_t(__u32, 879 tmp_valid_lft = min_t(__u32,
879 ifp->valid_lft, 880 ifp->valid_lft,
880 idev->cnf.temp_valid_lft + age); 881 idev->cnf.temp_valid_lft + age);
@@ -884,7 +885,6 @@ retry:
884 idev->cnf.max_desync_factor); 885 idev->cnf.max_desync_factor);
885 tmp_plen = ifp->prefix_len; 886 tmp_plen = ifp->prefix_len;
886 max_addresses = idev->cnf.max_addresses; 887 max_addresses = idev->cnf.max_addresses;
887 tmp_cstamp = ifp->cstamp;
888 tmp_tstamp = ifp->tstamp; 888 tmp_tstamp = ifp->tstamp;
889 spin_unlock_bh(&ifp->lock); 889 spin_unlock_bh(&ifp->lock);
890 890
@@ -929,7 +929,7 @@ retry:
929 ift->ifpub = ifp; 929 ift->ifpub = ifp;
930 ift->valid_lft = tmp_valid_lft; 930 ift->valid_lft = tmp_valid_lft;
931 ift->prefered_lft = tmp_prefered_lft; 931 ift->prefered_lft = tmp_prefered_lft;
932 ift->cstamp = tmp_cstamp; 932 ift->cstamp = now;
933 ift->tstamp = tmp_tstamp; 933 ift->tstamp = tmp_tstamp;
934 spin_unlock_bh(&ift->lock); 934 spin_unlock_bh(&ift->lock);
935 935
@@ -1999,25 +1999,50 @@ ok:
1999#ifdef CONFIG_IPV6_PRIVACY 1999#ifdef CONFIG_IPV6_PRIVACY
2000 read_lock_bh(&in6_dev->lock); 2000 read_lock_bh(&in6_dev->lock);
2001 /* update all temporary addresses in the list */ 2001 /* update all temporary addresses in the list */
2002 list_for_each_entry(ift, &in6_dev->tempaddr_list, tmp_list) { 2002 list_for_each_entry(ift, &in6_dev->tempaddr_list,
2003 /* 2003 tmp_list) {
2004 * When adjusting the lifetimes of an existing 2004 int age, max_valid, max_prefered;
2005 * temporary address, only lower the lifetimes. 2005
2006 * Implementations must not increase the
2007 * lifetimes of an existing temporary address
2008 * when processing a Prefix Information Option.
2009 */
2010 if (ifp != ift->ifpub) 2006 if (ifp != ift->ifpub)
2011 continue; 2007 continue;
2012 2008
2009 /*
2010 * RFC 4941 section 3.3:
2011 * If a received option will extend the lifetime
2012 * of a public address, the lifetimes of
2013 * temporary addresses should be extended,
2014 * subject to the overall constraint that no
2015 * temporary addresses should ever remain
2016 * "valid" or "preferred" for a time longer than
2017 * (TEMP_VALID_LIFETIME) or
2018 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR),
2019 * respectively.
2020 */
2021 age = (now - ift->cstamp) / HZ;
2022 max_valid = in6_dev->cnf.temp_valid_lft - age;
2023 if (max_valid < 0)
2024 max_valid = 0;
2025
2026 max_prefered = in6_dev->cnf.temp_prefered_lft -
2027 in6_dev->cnf.max_desync_factor -
2028 age;
2029 if (max_prefered < 0)
2030 max_prefered = 0;
2031
2032 if (valid_lft > max_valid)
2033 valid_lft = max_valid;
2034
2035 if (prefered_lft > max_prefered)
2036 prefered_lft = max_prefered;
2037
2013 spin_lock(&ift->lock); 2038 spin_lock(&ift->lock);
2014 flags = ift->flags; 2039 flags = ift->flags;
2015 if (ift->valid_lft > valid_lft && 2040 ift->valid_lft = valid_lft;
2016 ift->valid_lft - valid_lft > (jiffies - ift->tstamp) / HZ) 2041 ift->prefered_lft = prefered_lft;
2017 ift->valid_lft = valid_lft + (jiffies - ift->tstamp) / HZ; 2042 ift->tstamp = now;
2018 if (ift->prefered_lft > prefered_lft && 2043 if (prefered_lft > 0)
2019 ift->prefered_lft - prefered_lft > (jiffies - ift->tstamp) / HZ) 2044 ift->flags &= ~IFA_F_DEPRECATED;
2020 ift->prefered_lft = prefered_lft + (jiffies - ift->tstamp) / HZ; 2045
2021 spin_unlock(&ift->lock); 2046 spin_unlock(&ift->lock);
2022 if (!(flags&IFA_F_TENTATIVE)) 2047 if (!(flags&IFA_F_TENTATIVE))
2023 ipv6_ifa_notify(0, ift); 2048 ipv6_ifa_notify(0, ift);
@@ -2025,9 +2050,11 @@ ok:
2025 2050
2026 if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) { 2051 if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
2027 /* 2052 /*
2028 * When a new public address is created as described in [ADDRCONF], 2053 * When a new public address is created as
2029 * also create a new temporary address. Also create a temporary 2054 * described in [ADDRCONF], also create a new
2030 * address if it's enabled but no temporary address currently exists. 2055 * temporary address. Also create a temporary
2056 * address if it's enabled but no temporary
2057 * address currently exists.
2031 */ 2058 */
2032 read_unlock_bh(&in6_dev->lock); 2059 read_unlock_bh(&in6_dev->lock);
2033 ipv6_create_tempaddr(ifp, NULL); 2060 ipv6_create_tempaddr(ifp, NULL);
@@ -2706,7 +2733,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2706 idev->dead = 1; 2733 idev->dead = 1;
2707 2734
2708 /* protected by rtnl_lock */ 2735 /* protected by rtnl_lock */
2709 rcu_assign_pointer(dev->ip6_ptr, NULL); 2736 RCU_INIT_POINTER(dev->ip6_ptr, NULL);
2710 2737
2711 /* Step 1.5: remove snmp6 entry */ 2738 /* Step 1.5: remove snmp6 entry */
2712 snmp6_unregister_dev(idev); 2739 snmp6_unregister_dev(idev);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 16560336eb72..9ef1831746ef 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -33,6 +33,11 @@
33#include <linux/errqueue.h> 33#include <linux/errqueue.h>
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35 35
36static inline int ipv6_mapped_addr_any(const struct in6_addr *a)
37{
38 return (ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0));
39}
40
36int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 41int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
37{ 42{
38 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 43 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
@@ -102,10 +107,12 @@ ipv4_connected:
102 107
103 ipv6_addr_set_v4mapped(inet->inet_daddr, &np->daddr); 108 ipv6_addr_set_v4mapped(inet->inet_daddr, &np->daddr);
104 109
105 if (ipv6_addr_any(&np->saddr)) 110 if (ipv6_addr_any(&np->saddr) ||
111 ipv6_mapped_addr_any(&np->saddr))
106 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); 112 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
107 113
108 if (ipv6_addr_any(&np->rcv_saddr)) { 114 if (ipv6_addr_any(&np->rcv_saddr) ||
115 ipv6_mapped_addr_any(&np->rcv_saddr)) {
109 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, 116 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
110 &np->rcv_saddr); 117 &np->rcv_saddr);
111 if (sk->sk_prot->rehash) 118 if (sk->sk_prot->rehash)
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 79a485e8a700..1318de4c3e8d 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -273,12 +273,12 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
273#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 273#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
274 __u16 dstbuf; 274 __u16 dstbuf;
275#endif 275#endif
276 struct dst_entry *dst; 276 struct dst_entry *dst = skb_dst(skb);
277 277
278 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || 278 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
279 !pskb_may_pull(skb, (skb_transport_offset(skb) + 279 !pskb_may_pull(skb, (skb_transport_offset(skb) +
280 ((skb_transport_header(skb)[1] + 1) << 3)))) { 280 ((skb_transport_header(skb)[1] + 1) << 3)))) {
281 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), 281 IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst),
282 IPSTATS_MIB_INHDRERRORS); 282 IPSTATS_MIB_INHDRERRORS);
283 kfree_skb(skb); 283 kfree_skb(skb);
284 return -1; 284 return -1;
@@ -289,9 +289,7 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
289 dstbuf = opt->dst1; 289 dstbuf = opt->dst1;
290#endif 290#endif
291 291
292 dst = dst_clone(skb_dst(skb));
293 if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) { 292 if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) {
294 dst_release(dst);
295 skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; 293 skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
296 opt = IP6CB(skb); 294 opt = IP6CB(skb);
297#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 295#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
@@ -304,7 +302,6 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
304 302
305 IP6_INC_STATS_BH(dev_net(dst->dev), 303 IP6_INC_STATS_BH(dev_net(dst->dev),
306 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); 304 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
307 dst_release(dst);
308 return -1; 305 return -1;
309} 306}
310 307
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 11900417b1cc..2b59154c65d3 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -490,7 +490,8 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
490 goto out_dst_release; 490 goto out_dst_release;
491 } 491 }
492 492
493 idev = in6_dev_get(skb->dev); 493 rcu_read_lock();
494 idev = __in6_dev_get(skb->dev);
494 495
495 err = ip6_append_data(sk, icmpv6_getfrag, &msg, 496 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
496 len + sizeof(struct icmp6hdr), 497 len + sizeof(struct icmp6hdr),
@@ -500,19 +501,16 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
500 if (err) { 501 if (err) {
501 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); 502 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
502 ip6_flush_pending_frames(sk); 503 ip6_flush_pending_frames(sk);
503 goto out_put; 504 } else {
505 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
506 len + sizeof(struct icmp6hdr));
504 } 507 }
505 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, len + sizeof(struct icmp6hdr)); 508 rcu_read_unlock();
506
507out_put:
508 if (likely(idev != NULL))
509 in6_dev_put(idev);
510out_dst_release: 509out_dst_release:
511 dst_release(dst); 510 dst_release(dst);
512out: 511out:
513 icmpv6_xmit_unlock(sk); 512 icmpv6_xmit_unlock(sk);
514} 513}
515
516EXPORT_SYMBOL(icmpv6_send); 514EXPORT_SYMBOL(icmpv6_send);
517 515
518static void icmpv6_echo_reply(struct sk_buff *skb) 516static void icmpv6_echo_reply(struct sk_buff *skb)
@@ -569,7 +567,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
569 if (hlimit < 0) 567 if (hlimit < 0)
570 hlimit = ip6_dst_hoplimit(dst); 568 hlimit = ip6_dst_hoplimit(dst);
571 569
572 idev = in6_dev_get(skb->dev); 570 idev = __in6_dev_get(skb->dev);
573 571
574 msg.skb = skb; 572 msg.skb = skb;
575 msg.offset = 0; 573 msg.offset = 0;
@@ -583,13 +581,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
583 if (err) { 581 if (err) {
584 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); 582 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
585 ip6_flush_pending_frames(sk); 583 ip6_flush_pending_frames(sk);
586 goto out_put; 584 } else {
585 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
586 skb->len + sizeof(struct icmp6hdr));
587 } 587 }
588 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
589
590out_put:
591 if (likely(idev != NULL))
592 in6_dev_put(idev);
593 dst_release(dst); 588 dst_release(dst);
594out: 589out:
595 icmpv6_xmit_unlock(sk); 590 icmpv6_xmit_unlock(sk);
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 8a58e8cf6646..2916200f90c1 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -211,6 +211,7 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
211 struct flowi6 fl6; 211 struct flowi6 fl6;
212 struct dst_entry *dst; 212 struct dst_entry *dst;
213 struct in6_addr *final_p, final; 213 struct in6_addr *final_p, final;
214 int res;
214 215
215 memset(&fl6, 0, sizeof(fl6)); 216 memset(&fl6, 0, sizeof(fl6));
216 fl6.flowi6_proto = sk->sk_protocol; 217 fl6.flowi6_proto = sk->sk_protocol;
@@ -241,12 +242,14 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
241 __inet6_csk_dst_store(sk, dst, NULL, NULL); 242 __inet6_csk_dst_store(sk, dst, NULL, NULL);
242 } 243 }
243 244
244 skb_dst_set(skb, dst_clone(dst)); 245 rcu_read_lock();
246 skb_dst_set_noref(skb, dst);
245 247
246 /* Restore final destination back after routing done */ 248 /* Restore final destination back after routing done */
247 ipv6_addr_copy(&fl6.daddr, &np->daddr); 249 ipv6_addr_copy(&fl6.daddr, &np->daddr);
248 250
249 return ip6_xmit(sk, skb, &fl6, np->opt); 251 res = ip6_xmit(sk, skb, &fl6, np->opt);
252 rcu_read_unlock();
253 return res;
250} 254}
251
252EXPORT_SYMBOL_GPL(inet6_csk_xmit); 255EXPORT_SYMBOL_GPL(inet6_csk_xmit);
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index b53197233709..73f1a00a96af 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -20,6 +20,7 @@
20#include <net/inet_connection_sock.h> 20#include <net/inet_connection_sock.h>
21#include <net/inet_hashtables.h> 21#include <net/inet_hashtables.h>
22#include <net/inet6_hashtables.h> 22#include <net/inet6_hashtables.h>
23#include <net/secure_seq.h>
23#include <net/ip.h> 24#include <net/ip.h>
24 25
25int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw) 26int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 54a4678955bf..320d91d20ad7 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1455,7 +1455,7 @@ static int fib6_age(struct rt6_info *rt, void *arg)
1455 RT6_TRACE("aging clone %p\n", rt); 1455 RT6_TRACE("aging clone %p\n", rt);
1456 return -1; 1456 return -1;
1457 } else if ((rt->rt6i_flags & RTF_GATEWAY) && 1457 } else if ((rt->rt6i_flags & RTF_GATEWAY) &&
1458 (!(dst_get_neighbour(&rt->dst)->flags & NTF_ROUTER))) { 1458 (!(dst_get_neighbour_raw(&rt->dst)->flags & NTF_ROUTER))) {
1459 RT6_TRACE("purging route %p via non-router but gateway\n", 1459 RT6_TRACE("purging route %p via non-router but gateway\n",
1460 rt); 1460 rt);
1461 return -1; 1461 return -1;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 32e5339db0c8..4c882cf4e8a1 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -135,10 +135,15 @@ static int ip6_finish_output2(struct sk_buff *skb)
135 skb->len); 135 skb->len);
136 } 136 }
137 137
138 rcu_read_lock();
138 neigh = dst_get_neighbour(dst); 139 neigh = dst_get_neighbour(dst);
139 if (neigh) 140 if (neigh) {
140 return neigh_output(neigh, skb); 141 int res = neigh_output(neigh, skb);
141 142
143 rcu_read_unlock();
144 return res;
145 }
146 rcu_read_unlock();
142 IP6_INC_STATS_BH(dev_net(dst->dev), 147 IP6_INC_STATS_BH(dev_net(dst->dev),
143 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 148 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
144 kfree_skb(skb); 149 kfree_skb(skb);
@@ -975,12 +980,14 @@ static int ip6_dst_lookup_tail(struct sock *sk,
975 * dst entry and replace it instead with the 980 * dst entry and replace it instead with the
976 * dst entry of the nexthop router 981 * dst entry of the nexthop router
977 */ 982 */
983 rcu_read_lock();
978 n = dst_get_neighbour(*dst); 984 n = dst_get_neighbour(*dst);
979 if (n && !(n->nud_state & NUD_VALID)) { 985 if (n && !(n->nud_state & NUD_VALID)) {
980 struct inet6_ifaddr *ifp; 986 struct inet6_ifaddr *ifp;
981 struct flowi6 fl_gw6; 987 struct flowi6 fl_gw6;
982 int redirect; 988 int redirect;
983 989
990 rcu_read_unlock();
984 ifp = ipv6_get_ifaddr(net, &fl6->saddr, 991 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
985 (*dst)->dev, 1); 992 (*dst)->dev, 1);
986 993
@@ -1000,6 +1007,8 @@ static int ip6_dst_lookup_tail(struct sock *sk,
1000 if ((err = (*dst)->error)) 1007 if ((err = (*dst)->error))
1001 goto out_err_release; 1008 goto out_err_release;
1002 } 1009 }
1010 } else {
1011 rcu_read_unlock();
1003 } 1012 }
1004#endif 1013#endif
1005 1014
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 0bc98886c383..694d70a8a0ee 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -218,8 +218,8 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
218{ 218{
219 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms); 219 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
220 220
221 rcu_assign_pointer(t->next , rtnl_dereference(*tp)); 221 RCU_INIT_POINTER(t->next , rtnl_dereference(*tp));
222 rcu_assign_pointer(*tp, t); 222 RCU_INIT_POINTER(*tp, t);
223} 223}
224 224
225/** 225/**
@@ -237,7 +237,7 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
237 (iter = rtnl_dereference(*tp)) != NULL; 237 (iter = rtnl_dereference(*tp)) != NULL;
238 tp = &iter->next) { 238 tp = &iter->next) {
239 if (t == iter) { 239 if (t == iter) {
240 rcu_assign_pointer(*tp, t->next); 240 RCU_INIT_POINTER(*tp, t->next);
241 break; 241 break;
242 } 242 }
243 } 243 }
@@ -350,7 +350,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
350 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 350 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
351 351
352 if (dev == ip6n->fb_tnl_dev) 352 if (dev == ip6n->fb_tnl_dev)
353 rcu_assign_pointer(ip6n->tnls_wc[0], NULL); 353 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
354 else 354 else
355 ip6_tnl_unlink(ip6n, t); 355 ip6_tnl_unlink(ip6n, t);
356 ip6_tnl_dst_reset(t); 356 ip6_tnl_dst_reset(t);
@@ -889,7 +889,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
889 struct net_device_stats *stats = &t->dev->stats; 889 struct net_device_stats *stats = &t->dev->stats;
890 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 890 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
891 struct ipv6_tel_txoption opt; 891 struct ipv6_tel_txoption opt;
892 struct dst_entry *dst; 892 struct dst_entry *dst, *ndst = NULL;
893 struct net_device *tdev; 893 struct net_device *tdev;
894 int mtu; 894 int mtu;
895 unsigned int max_headroom = sizeof(struct ipv6hdr); 895 unsigned int max_headroom = sizeof(struct ipv6hdr);
@@ -897,19 +897,19 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
897 int err = -1; 897 int err = -1;
898 int pkt_len; 898 int pkt_len;
899 899
900 if ((dst = ip6_tnl_dst_check(t)) != NULL) 900 dst = ip6_tnl_dst_check(t);
901 dst_hold(dst); 901 if (!dst) {
902 else { 902 ndst = ip6_route_output(net, NULL, fl6);
903 dst = ip6_route_output(net, NULL, fl6);
904 903
905 if (dst->error) 904 if (ndst->error)
906 goto tx_err_link_failure; 905 goto tx_err_link_failure;
907 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); 906 ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
908 if (IS_ERR(dst)) { 907 if (IS_ERR(ndst)) {
909 err = PTR_ERR(dst); 908 err = PTR_ERR(ndst);
910 dst = NULL; 909 ndst = NULL;
911 goto tx_err_link_failure; 910 goto tx_err_link_failure;
912 } 911 }
912 dst = ndst;
913 } 913 }
914 914
915 tdev = dst->dev; 915 tdev = dst->dev;
@@ -955,7 +955,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
955 skb = new_skb; 955 skb = new_skb;
956 } 956 }
957 skb_dst_drop(skb); 957 skb_dst_drop(skb);
958 skb_dst_set(skb, dst_clone(dst)); 958 skb_dst_set_noref(skb, dst);
959 959
960 skb->transport_header = skb->network_header; 960 skb->transport_header = skb->network_header;
961 961
@@ -987,13 +987,14 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
987 stats->tx_errors++; 987 stats->tx_errors++;
988 stats->tx_aborted_errors++; 988 stats->tx_aborted_errors++;
989 } 989 }
990 ip6_tnl_dst_store(t, dst); 990 if (ndst)
991 ip6_tnl_dst_store(t, ndst);
991 return 0; 992 return 0;
992tx_err_link_failure: 993tx_err_link_failure:
993 stats->tx_carrier_errors++; 994 stats->tx_carrier_errors++;
994 dst_link_failure(skb); 995 dst_link_failure(skb);
995tx_err_dst_release: 996tx_err_dst_release:
996 dst_release(dst); 997 dst_release(ndst);
997 return err; 998 return err;
998} 999}
999 1000
@@ -1439,7 +1440,7 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1439 1440
1440 t->parms.proto = IPPROTO_IPV6; 1441 t->parms.proto = IPPROTO_IPV6;
1441 dev_hold(dev); 1442 dev_hold(dev);
1442 rcu_assign_pointer(ip6n->tnls_wc[0], t); 1443 RCU_INIT_POINTER(ip6n->tnls_wc[0], t);
1443 return 0; 1444 return 0;
1444} 1445}
1445 1446
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 9cb191ecaba8..147ede38ab48 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -913,7 +913,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
913} 913}
914 914
915static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, 915static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
916 char __user *optval, int __user *optlen) 916 char __user *optval, int __user *optlen, unsigned flags)
917{ 917{
918 struct ipv6_pinfo *np = inet6_sk(sk); 918 struct ipv6_pinfo *np = inet6_sk(sk);
919 int len; 919 int len;
@@ -962,7 +962,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
962 962
963 msg.msg_control = optval; 963 msg.msg_control = optval;
964 msg.msg_controllen = len; 964 msg.msg_controllen = len;
965 msg.msg_flags = 0; 965 msg.msg_flags = flags;
966 966
967 lock_sock(sk); 967 lock_sock(sk);
968 skb = np->pktoptions; 968 skb = np->pktoptions;
@@ -1222,7 +1222,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
1222 if(level != SOL_IPV6) 1222 if(level != SOL_IPV6)
1223 return -ENOPROTOOPT; 1223 return -ENOPROTOOPT;
1224 1224
1225 err = do_ipv6_getsockopt(sk, level, optname, optval, optlen); 1225 err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, 0);
1226#ifdef CONFIG_NETFILTER 1226#ifdef CONFIG_NETFILTER
1227 /* we need to exclude all possible ENOPROTOOPTs except default case */ 1227 /* we need to exclude all possible ENOPROTOOPTs except default case */
1228 if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { 1228 if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) {
@@ -1264,7 +1264,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
1264 return compat_mc_getsockopt(sk, level, optname, optval, optlen, 1264 return compat_mc_getsockopt(sk, level, optname, optval, optlen,
1265 ipv6_getsockopt); 1265 ipv6_getsockopt);
1266 1266
1267 err = do_ipv6_getsockopt(sk, level, optname, optval, optlen); 1267 err = do_ipv6_getsockopt(sk, level, optname, optval, optlen,
1268 MSG_CMSG_COMPAT);
1268#ifdef CONFIG_NETFILTER 1269#ifdef CONFIG_NETFILTER
1269 /* we need to exclude all possible ENOPROTOOPTs except default case */ 1270 /* we need to exclude all possible ENOPROTOOPTs except default case */
1270 if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { 1271 if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) {
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 9da6e02eaaeb..1f52dd257631 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -533,7 +533,8 @@ void ndisc_send_skb(struct sk_buff *skb,
533 533
534 skb_dst_set(skb, dst); 534 skb_dst_set(skb, dst);
535 535
536 idev = in6_dev_get(dst->dev); 536 rcu_read_lock();
537 idev = __in6_dev_get(dst->dev);
537 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); 538 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
538 539
539 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, 540 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
@@ -543,8 +544,7 @@ void ndisc_send_skb(struct sk_buff *skb,
543 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 544 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
544 } 545 }
545 546
546 if (likely(idev != NULL)) 547 rcu_read_unlock();
547 in6_dev_put(idev);
548} 548}
549 549
550EXPORT_SYMBOL(ndisc_send_skb); 550EXPORT_SYMBOL(ndisc_send_skb);
@@ -1039,7 +1039,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
1039 if (skb->len < sizeof(*rs_msg)) 1039 if (skb->len < sizeof(*rs_msg))
1040 return; 1040 return;
1041 1041
1042 idev = in6_dev_get(skb->dev); 1042 idev = __in6_dev_get(skb->dev);
1043 if (!idev) { 1043 if (!idev) {
1044 if (net_ratelimit()) 1044 if (net_ratelimit())
1045 ND_PRINTK1("ICMP6 RS: can't find in6 device\n"); 1045 ND_PRINTK1("ICMP6 RS: can't find in6 device\n");
@@ -1080,7 +1080,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
1080 neigh_release(neigh); 1080 neigh_release(neigh);
1081 } 1081 }
1082out: 1082out:
1083 in6_dev_put(idev); 1083 return;
1084} 1084}
1085 1085
1086static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt) 1086static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
@@ -1179,7 +1179,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1179 * set the RA_RECV flag in the interface 1179 * set the RA_RECV flag in the interface
1180 */ 1180 */
1181 1181
1182 in6_dev = in6_dev_get(skb->dev); 1182 in6_dev = __in6_dev_get(skb->dev);
1183 if (in6_dev == NULL) { 1183 if (in6_dev == NULL) {
1184 ND_PRINTK0(KERN_ERR 1184 ND_PRINTK0(KERN_ERR
1185 "ICMPv6 RA: can't find inet6 device for %s.\n", 1185 "ICMPv6 RA: can't find inet6 device for %s.\n",
@@ -1188,7 +1188,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1188 } 1188 }
1189 1189
1190 if (!ndisc_parse_options(opt, optlen, &ndopts)) { 1190 if (!ndisc_parse_options(opt, optlen, &ndopts)) {
1191 in6_dev_put(in6_dev);
1192 ND_PRINTK2(KERN_WARNING 1191 ND_PRINTK2(KERN_WARNING
1193 "ICMP6 RA: invalid ND options\n"); 1192 "ICMP6 RA: invalid ND options\n");
1194 return; 1193 return;
@@ -1255,7 +1254,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1255 ND_PRINTK0(KERN_ERR 1254 ND_PRINTK0(KERN_ERR
1256 "ICMPv6 RA: %s() failed to add default route.\n", 1255 "ICMPv6 RA: %s() failed to add default route.\n",
1257 __func__); 1256 __func__);
1258 in6_dev_put(in6_dev);
1259 return; 1257 return;
1260 } 1258 }
1261 1259
@@ -1265,7 +1263,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1265 "ICMPv6 RA: %s() got default router without neighbour.\n", 1263 "ICMPv6 RA: %s() got default router without neighbour.\n",
1266 __func__); 1264 __func__);
1267 dst_release(&rt->dst); 1265 dst_release(&rt->dst);
1268 in6_dev_put(in6_dev);
1269 return; 1266 return;
1270 } 1267 }
1271 neigh->flags |= NTF_ROUTER; 1268 neigh->flags |= NTF_ROUTER;
@@ -1422,7 +1419,6 @@ out:
1422 dst_release(&rt->dst); 1419 dst_release(&rt->dst);
1423 else if (neigh) 1420 else if (neigh)
1424 neigh_release(neigh); 1421 neigh_release(neigh);
1425 in6_dev_put(in6_dev);
1426} 1422}
1427 1423
1428static void ndisc_redirect_rcv(struct sk_buff *skb) 1424static void ndisc_redirect_rcv(struct sk_buff *skb)
@@ -1481,13 +1477,11 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1481 return; 1477 return;
1482 } 1478 }
1483 1479
1484 in6_dev = in6_dev_get(skb->dev); 1480 in6_dev = __in6_dev_get(skb->dev);
1485 if (!in6_dev) 1481 if (!in6_dev)
1486 return; 1482 return;
1487 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects) { 1483 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
1488 in6_dev_put(in6_dev);
1489 return; 1484 return;
1490 }
1491 1485
1492 /* RFC2461 8.1: 1486 /* RFC2461 8.1:
1493 * The IP source address of the Redirect MUST be the same as the current 1487 * The IP source address of the Redirect MUST be the same as the current
@@ -1497,7 +1491,6 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1497 if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) { 1491 if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) {
1498 ND_PRINTK2(KERN_WARNING 1492 ND_PRINTK2(KERN_WARNING
1499 "ICMPv6 Redirect: invalid ND options\n"); 1493 "ICMPv6 Redirect: invalid ND options\n");
1500 in6_dev_put(in6_dev);
1501 return; 1494 return;
1502 } 1495 }
1503 if (ndopts.nd_opts_tgt_lladdr) { 1496 if (ndopts.nd_opts_tgt_lladdr) {
@@ -1506,7 +1499,6 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1506 if (!lladdr) { 1499 if (!lladdr) {
1507 ND_PRINTK2(KERN_WARNING 1500 ND_PRINTK2(KERN_WARNING
1508 "ICMPv6 Redirect: invalid link-layer address length\n"); 1501 "ICMPv6 Redirect: invalid link-layer address length\n");
1509 in6_dev_put(in6_dev);
1510 return; 1502 return;
1511 } 1503 }
1512 } 1504 }
@@ -1518,7 +1510,6 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1518 on_link); 1510 on_link);
1519 neigh_release(neigh); 1511 neigh_release(neigh);
1520 } 1512 }
1521 in6_dev_put(in6_dev);
1522} 1513}
1523 1514
1524void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, 1515void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
@@ -1651,7 +1642,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1651 csum_partial(icmph, len, 0)); 1642 csum_partial(icmph, len, 0));
1652 1643
1653 skb_dst_set(buff, dst); 1644 skb_dst_set(buff, dst);
1654 idev = in6_dev_get(dst->dev); 1645 rcu_read_lock();
1646 idev = __in6_dev_get(dst->dev);
1655 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); 1647 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1656 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev, 1648 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev,
1657 dst_output); 1649 dst_output);
@@ -1660,8 +1652,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1660 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 1652 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1661 } 1653 }
1662 1654
1663 if (likely(idev != NULL)) 1655 rcu_read_unlock();
1664 in6_dev_put(idev);
1665 return; 1656 return;
1666 1657
1667release: 1658release:
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 6a79f3081bdb..f34902f1ba33 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -130,14 +130,14 @@ static mh_filter_t __rcu *mh_filter __read_mostly;
130 130
131int rawv6_mh_filter_register(mh_filter_t filter) 131int rawv6_mh_filter_register(mh_filter_t filter)
132{ 132{
133 rcu_assign_pointer(mh_filter, filter); 133 RCU_INIT_POINTER(mh_filter, filter);
134 return 0; 134 return 0;
135} 135}
136EXPORT_SYMBOL(rawv6_mh_filter_register); 136EXPORT_SYMBOL(rawv6_mh_filter_register);
137 137
138int rawv6_mh_filter_unregister(mh_filter_t filter) 138int rawv6_mh_filter_unregister(mh_filter_t filter)
139{ 139{
140 rcu_assign_pointer(mh_filter, NULL); 140 RCU_INIT_POINTER(mh_filter, NULL);
141 synchronize_rcu(); 141 synchronize_rcu();
142 return 0; 142 return 0;
143} 143}
@@ -372,9 +372,9 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
372 read_unlock(&raw_v6_hashinfo.lock); 372 read_unlock(&raw_v6_hashinfo.lock);
373} 373}
374 374
375static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) 375static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
376{ 376{
377 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) && 377 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
378 skb_checksum_complete(skb)) { 378 skb_checksum_complete(skb)) {
379 atomic_inc(&sk->sk_drops); 379 atomic_inc(&sk->sk_drops);
380 kfree_skb(skb); 380 kfree_skb(skb);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e8987da06667..9e69eb0ec6dd 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -364,7 +364,7 @@ out:
364#ifdef CONFIG_IPV6_ROUTER_PREF 364#ifdef CONFIG_IPV6_ROUTER_PREF
365static void rt6_probe(struct rt6_info *rt) 365static void rt6_probe(struct rt6_info *rt)
366{ 366{
367 struct neighbour *neigh = rt ? dst_get_neighbour(&rt->dst) : NULL; 367 struct neighbour *neigh;
368 /* 368 /*
369 * Okay, this does not seem to be appropriate 369 * Okay, this does not seem to be appropriate
370 * for now, however, we need to check if it 370 * for now, however, we need to check if it
@@ -373,8 +373,10 @@ static void rt6_probe(struct rt6_info *rt)
373 * Router Reachability Probe MUST be rate-limited 373 * Router Reachability Probe MUST be rate-limited
374 * to no more than one per minute. 374 * to no more than one per minute.
375 */ 375 */
376 rcu_read_lock();
377 neigh = rt ? dst_get_neighbour(&rt->dst) : NULL;
376 if (!neigh || (neigh->nud_state & NUD_VALID)) 378 if (!neigh || (neigh->nud_state & NUD_VALID))
377 return; 379 goto out;
378 read_lock_bh(&neigh->lock); 380 read_lock_bh(&neigh->lock);
379 if (!(neigh->nud_state & NUD_VALID) && 381 if (!(neigh->nud_state & NUD_VALID) &&
380 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) { 382 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
@@ -387,8 +389,11 @@ static void rt6_probe(struct rt6_info *rt)
387 target = (struct in6_addr *)&neigh->primary_key; 389 target = (struct in6_addr *)&neigh->primary_key;
388 addrconf_addr_solict_mult(target, &mcaddr); 390 addrconf_addr_solict_mult(target, &mcaddr);
389 ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL); 391 ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
390 } else 392 } else {
391 read_unlock_bh(&neigh->lock); 393 read_unlock_bh(&neigh->lock);
394 }
395out:
396 rcu_read_unlock();
392} 397}
393#else 398#else
394static inline void rt6_probe(struct rt6_info *rt) 399static inline void rt6_probe(struct rt6_info *rt)
@@ -412,8 +417,11 @@ static inline int rt6_check_dev(struct rt6_info *rt, int oif)
412 417
413static inline int rt6_check_neigh(struct rt6_info *rt) 418static inline int rt6_check_neigh(struct rt6_info *rt)
414{ 419{
415 struct neighbour *neigh = dst_get_neighbour(&rt->dst); 420 struct neighbour *neigh;
416 int m; 421 int m;
422
423 rcu_read_lock();
424 neigh = dst_get_neighbour(&rt->dst);
417 if (rt->rt6i_flags & RTF_NONEXTHOP || 425 if (rt->rt6i_flags & RTF_NONEXTHOP ||
418 !(rt->rt6i_flags & RTF_GATEWAY)) 426 !(rt->rt6i_flags & RTF_GATEWAY))
419 m = 1; 427 m = 1;
@@ -430,6 +438,7 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
430 read_unlock_bh(&neigh->lock); 438 read_unlock_bh(&neigh->lock);
431 } else 439 } else
432 m = 0; 440 m = 0;
441 rcu_read_unlock();
433 return m; 442 return m;
434} 443}
435 444
@@ -769,7 +778,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
769 rt->rt6i_dst.plen = 128; 778 rt->rt6i_dst.plen = 128;
770 rt->rt6i_flags |= RTF_CACHE; 779 rt->rt6i_flags |= RTF_CACHE;
771 rt->dst.flags |= DST_HOST; 780 rt->dst.flags |= DST_HOST;
772 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour(&ort->dst))); 781 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
773 } 782 }
774 return rt; 783 return rt;
775} 784}
@@ -803,7 +812,7 @@ restart:
803 dst_hold(&rt->dst); 812 dst_hold(&rt->dst);
804 read_unlock_bh(&table->tb6_lock); 813 read_unlock_bh(&table->tb6_lock);
805 814
806 if (!dst_get_neighbour(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP)) 815 if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
807 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); 816 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
808 else if (!(rt->dst.flags & DST_HOST)) 817 else if (!(rt->dst.flags & DST_HOST))
809 nrt = rt6_alloc_clone(rt, &fl6->daddr); 818 nrt = rt6_alloc_clone(rt, &fl6->daddr);
@@ -1587,7 +1596,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1587 dst_confirm(&rt->dst); 1596 dst_confirm(&rt->dst);
1588 1597
1589 /* Duplicate redirect: silently ignore. */ 1598 /* Duplicate redirect: silently ignore. */
1590 if (neigh == dst_get_neighbour(&rt->dst)) 1599 if (neigh == dst_get_neighbour_raw(&rt->dst))
1591 goto out; 1600 goto out;
1592 1601
1593 nrt = ip6_rt_copy(rt, dest); 1602 nrt = ip6_rt_copy(rt, dest);
@@ -1682,7 +1691,7 @@ again:
1682 1. It is connected route. Action: COW 1691 1. It is connected route. Action: COW
1683 2. It is gatewayed route or NONEXTHOP route. Action: clone it. 1692 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1684 */ 1693 */
1685 if (!dst_get_neighbour(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP)) 1694 if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
1686 nrt = rt6_alloc_cow(rt, daddr, saddr); 1695 nrt = rt6_alloc_cow(rt, daddr, saddr);
1687 else 1696 else
1688 nrt = rt6_alloc_clone(rt, daddr); 1697 nrt = rt6_alloc_clone(rt, daddr);
@@ -2326,6 +2335,7 @@ static int rt6_fill_node(struct net *net,
2326 struct nlmsghdr *nlh; 2335 struct nlmsghdr *nlh;
2327 long expires; 2336 long expires;
2328 u32 table; 2337 u32 table;
2338 struct neighbour *n;
2329 2339
2330 if (prefix) { /* user wants prefix routes only */ 2340 if (prefix) { /* user wants prefix routes only */
2331 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) { 2341 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
@@ -2414,8 +2424,11 @@ static int rt6_fill_node(struct net *net,
2414 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) 2424 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2415 goto nla_put_failure; 2425 goto nla_put_failure;
2416 2426
2417 if (dst_get_neighbour(&rt->dst)) 2427 rcu_read_lock();
2418 NLA_PUT(skb, RTA_GATEWAY, 16, &dst_get_neighbour(&rt->dst)->primary_key); 2428 n = dst_get_neighbour(&rt->dst);
2429 if (n)
2430 NLA_PUT(skb, RTA_GATEWAY, 16, &n->primary_key);
2431 rcu_read_unlock();
2419 2432
2420 if (rt->dst.dev) 2433 if (rt->dst.dev)
2421 NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex); 2434 NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
@@ -2608,12 +2621,14 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2608#else 2621#else
2609 seq_puts(m, "00000000000000000000000000000000 00 "); 2622 seq_puts(m, "00000000000000000000000000000000 00 ");
2610#endif 2623#endif
2624 rcu_read_lock();
2611 n = dst_get_neighbour(&rt->dst); 2625 n = dst_get_neighbour(&rt->dst);
2612 if (n) { 2626 if (n) {
2613 seq_printf(m, "%pi6", n->primary_key); 2627 seq_printf(m, "%pi6", n->primary_key);
2614 } else { 2628 } else {
2615 seq_puts(m, "00000000000000000000000000000000"); 2629 seq_puts(m, "00000000000000000000000000000000");
2616 } 2630 }
2631 rcu_read_unlock();
2617 seq_printf(m, " %08x %08x %08x %08x %8s\n", 2632 seq_printf(m, " %08x %08x %08x %08x %8s\n",
2618 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt), 2633 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
2619 rt->dst.__use, rt->rt6i_flags, 2634 rt->dst.__use, rt->rt6i_flags,
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 07bf1085458f..a7a18602a046 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -182,7 +182,7 @@ static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
182 (iter = rtnl_dereference(*tp)) != NULL; 182 (iter = rtnl_dereference(*tp)) != NULL;
183 tp = &iter->next) { 183 tp = &iter->next) {
184 if (t == iter) { 184 if (t == iter) {
185 rcu_assign_pointer(*tp, t->next); 185 RCU_INIT_POINTER(*tp, t->next);
186 break; 186 break;
187 } 187 }
188 } 188 }
@@ -192,8 +192,8 @@ static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
192{ 192{
193 struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t); 193 struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t);
194 194
195 rcu_assign_pointer(t->next, rtnl_dereference(*tp)); 195 RCU_INIT_POINTER(t->next, rtnl_dereference(*tp));
196 rcu_assign_pointer(*tp, t); 196 RCU_INIT_POINTER(*tp, t);
197} 197}
198 198
199static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn) 199static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
@@ -391,7 +391,7 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
391 p->addr = a->addr; 391 p->addr = a->addr;
392 p->flags = a->flags; 392 p->flags = a->flags;
393 t->prl_count++; 393 t->prl_count++;
394 rcu_assign_pointer(t->prl, p); 394 RCU_INIT_POINTER(t->prl, p);
395out: 395out:
396 return err; 396 return err;
397} 397}
@@ -474,7 +474,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
474 struct sit_net *sitn = net_generic(net, sit_net_id); 474 struct sit_net *sitn = net_generic(net, sit_net_id);
475 475
476 if (dev == sitn->fb_tunnel_dev) { 476 if (dev == sitn->fb_tunnel_dev) {
477 rcu_assign_pointer(sitn->tunnels_wc[0], NULL); 477 RCU_INIT_POINTER(sitn->tunnels_wc[0], NULL);
478 } else { 478 } else {
479 ipip6_tunnel_unlink(sitn, netdev_priv(dev)); 479 ipip6_tunnel_unlink(sitn, netdev_priv(dev));
480 ipip6_tunnel_del_prl(netdev_priv(dev), NULL); 480 ipip6_tunnel_del_prl(netdev_priv(dev), NULL);
@@ -672,6 +672,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
672 if (skb->protocol != htons(ETH_P_IPV6)) 672 if (skb->protocol != htons(ETH_P_IPV6))
673 goto tx_error; 673 goto tx_error;
674 674
675 if (tos == 1)
676 tos = ipv6_get_dsfield(iph6);
677
675 /* ISATAP (RFC4214) - must come before 6to4 */ 678 /* ISATAP (RFC4214) - must come before 6to4 */
676 if (dev->priv_flags & IFF_ISATAP) { 679 if (dev->priv_flags & IFF_ISATAP) {
677 struct neighbour *neigh = NULL; 680 struct neighbour *neigh = NULL;
@@ -1173,7 +1176,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
1173 if (!dev->tstats) 1176 if (!dev->tstats)
1174 return -ENOMEM; 1177 return -ENOMEM;
1175 dev_hold(dev); 1178 dev_hold(dev);
1176 rcu_assign_pointer(sitn->tunnels_wc[0], tunnel); 1179 RCU_INIT_POINTER(sitn->tunnels_wc[0], tunnel);
1177 return 0; 1180 return 0;
1178} 1181}
1179 1182
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 89d5bf806222..ac838965ff34 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -165,7 +165,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
165 int mss; 165 int mss;
166 struct dst_entry *dst; 166 struct dst_entry *dst;
167 __u8 rcv_wscale; 167 __u8 rcv_wscale;
168 bool ecn_ok; 168 bool ecn_ok = false;
169 169
170 if (!sysctl_tcp_syncookies || !th->ack || th->rst) 170 if (!sysctl_tcp_syncookies || !th->ack || th->rst)
171 goto out; 171 goto out;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 78aa53492b3e..44a5859535b5 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -61,6 +61,7 @@
61#include <net/timewait_sock.h> 61#include <net/timewait_sock.h>
62#include <net/netdma.h> 62#include <net/netdma.h>
63#include <net/inet_common.h> 63#include <net/inet_common.h>
64#include <net/secure_seq.h>
64 65
65#include <asm/uaccess.h> 66#include <asm/uaccess.h>
66 67
@@ -1627,7 +1628,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1627 opt_skb = skb_clone(skb, GFP_ATOMIC); 1628 opt_skb = skb_clone(skb, GFP_ATOMIC);
1628 1629
1629 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1630 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1630 sock_rps_save_rxhash(sk, skb->rxhash); 1631 sock_rps_save_rxhash(sk, skb);
1631 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) 1632 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1632 goto reset; 1633 goto reset;
1633 if (opt_skb) 1634 if (opt_skb)
@@ -1649,7 +1650,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1649 * the new socket.. 1650 * the new socket..
1650 */ 1651 */
1651 if(nsk != sk) { 1652 if(nsk != sk) {
1652 sock_rps_save_rxhash(nsk, skb->rxhash); 1653 sock_rps_save_rxhash(nsk, skb);
1653 if (tcp_child_process(sk, nsk, skb)) 1654 if (tcp_child_process(sk, nsk, skb))
1654 goto reset; 1655 goto reset;
1655 if (opt_skb) 1656 if (opt_skb)
@@ -1657,7 +1658,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1657 return 0; 1658 return 0;
1658 } 1659 }
1659 } else 1660 } else
1660 sock_rps_save_rxhash(sk, skb->rxhash); 1661 sock_rps_save_rxhash(sk, skb);
1661 1662
1662 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) 1663 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1663 goto reset; 1664 goto reset;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 29213b51c499..35bbdc42241e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -509,7 +509,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
509 int is_udplite = IS_UDPLITE(sk); 509 int is_udplite = IS_UDPLITE(sk);
510 510
511 if (!ipv6_addr_any(&inet6_sk(sk)->daddr)) 511 if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
512 sock_rps_save_rxhash(sk, skb->rxhash); 512 sock_rps_save_rxhash(sk, skb);
513 513
514 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 514 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
515 goto drop; 515 goto drop;
@@ -533,7 +533,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
533 } 533 }
534 } 534 }
535 535
536 if (rcu_dereference_raw(sk->sk_filter)) { 536 if (rcu_access_pointer(sk->sk_filter)) {
537 if (udp_lib_checksum_complete(skb)) 537 if (udp_lib_checksum_complete(skb))
538 goto drop; 538 goto drop;
539 } 539 }
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index e8d5f4405d68..d14152e866d9 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -50,7 +50,7 @@ static const struct net_device_ops irlan_eth_netdev_ops = {
50 .ndo_open = irlan_eth_open, 50 .ndo_open = irlan_eth_open,
51 .ndo_stop = irlan_eth_close, 51 .ndo_stop = irlan_eth_close,
52 .ndo_start_xmit = irlan_eth_xmit, 52 .ndo_start_xmit = irlan_eth_xmit,
53 .ndo_set_multicast_list = irlan_eth_set_multicast_list, 53 .ndo_set_rx_mode = irlan_eth_set_multicast_list,
54 .ndo_change_mtu = eth_change_mtu, 54 .ndo_change_mtu = eth_change_mtu,
55 .ndo_validate_addr = eth_validate_addr, 55 .ndo_validate_addr = eth_validate_addr,
56}; 56};
diff --git a/net/iucv/Kconfig b/net/iucv/Kconfig
index 16ce9cd4f39e..497fbe732def 100644
--- a/net/iucv/Kconfig
+++ b/net/iucv/Kconfig
@@ -1,15 +1,17 @@
1config IUCV 1config IUCV
2 tristate "IUCV support (S390 - z/VM only)"
3 depends on S390 2 depends on S390
3 def_tristate y if S390
4 prompt "IUCV support (S390 - z/VM only)"
4 help 5 help
5 Select this option if you want to use inter-user communication 6 Select this option if you want to use inter-user communication
6 under VM or VIF. If you run on z/VM, say "Y" to enable a fast 7 under VM or VIF. If you run on z/VM, say "Y" to enable a fast
7 communication link between VM guests. 8 communication link between VM guests.
8 9
9config AFIUCV 10config AFIUCV
10 tristate "AF_IUCV support (S390 - z/VM only)" 11 depends on S390
11 depends on IUCV 12 def_tristate m if QETH_L3 || IUCV
13 prompt "AF_IUCV Socket support (S390 - z/VM and HiperSockets transport)"
12 help 14 help
13 Select this option if you want to use inter-user communication under 15 Select this option if you want to use AF_IUCV socket applications
14 VM or VIF sockets. If you run on z/VM, say "Y" to enable a fast 16 based on z/VM inter-user communication vehicle or based on
15 communication link between VM guests. 17 HiperSockets.
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index e2013e434d03..c39f3a43cd80 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -27,10 +27,9 @@
27#include <asm/cpcmd.h> 27#include <asm/cpcmd.h>
28#include <linux/kmod.h> 28#include <linux/kmod.h>
29 29
30#include <net/iucv/iucv.h>
31#include <net/iucv/af_iucv.h> 30#include <net/iucv/af_iucv.h>
32 31
33#define VERSION "1.1" 32#define VERSION "1.2"
34 33
35static char iucv_userid[80]; 34static char iucv_userid[80];
36 35
@@ -42,6 +41,8 @@ static struct proto iucv_proto = {
42 .obj_size = sizeof(struct iucv_sock), 41 .obj_size = sizeof(struct iucv_sock),
43}; 42};
44 43
44static struct iucv_interface *pr_iucv;
45
45/* special AF_IUCV IPRM messages */ 46/* special AF_IUCV IPRM messages */
46static const u8 iprm_shutdown[8] = 47static const u8 iprm_shutdown[8] =
47 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; 48 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
@@ -90,6 +91,12 @@ do { \
90static void iucv_sock_kill(struct sock *sk); 91static void iucv_sock_kill(struct sock *sk);
91static void iucv_sock_close(struct sock *sk); 92static void iucv_sock_close(struct sock *sk);
92 93
94static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
95 struct packet_type *pt, struct net_device *orig_dev);
96static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
97 struct sk_buff *skb, u8 flags);
98static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
99
93/* Call Back functions */ 100/* Call Back functions */
94static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); 101static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
95static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); 102static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
@@ -165,7 +172,7 @@ static int afiucv_pm_freeze(struct device *dev)
165 case IUCV_CLOSING: 172 case IUCV_CLOSING:
166 case IUCV_CONNECTED: 173 case IUCV_CONNECTED:
167 if (iucv->path) { 174 if (iucv->path) {
168 err = iucv_path_sever(iucv->path, NULL); 175 err = pr_iucv->path_sever(iucv->path, NULL);
169 iucv_path_free(iucv->path); 176 iucv_path_free(iucv->path);
170 iucv->path = NULL; 177 iucv->path = NULL;
171 } 178 }
@@ -229,7 +236,7 @@ static const struct dev_pm_ops afiucv_pm_ops = {
229static struct device_driver af_iucv_driver = { 236static struct device_driver af_iucv_driver = {
230 .owner = THIS_MODULE, 237 .owner = THIS_MODULE,
231 .name = "afiucv", 238 .name = "afiucv",
232 .bus = &iucv_bus, 239 .bus = NULL,
233 .pm = &afiucv_pm_ops, 240 .pm = &afiucv_pm_ops,
234}; 241};
235 242
@@ -294,7 +301,11 @@ static inline int iucv_below_msglim(struct sock *sk)
294 301
295 if (sk->sk_state != IUCV_CONNECTED) 302 if (sk->sk_state != IUCV_CONNECTED)
296 return 1; 303 return 1;
297 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); 304 if (iucv->transport == AF_IUCV_TRANS_IUCV)
305 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
306 else
307 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
308 (atomic_read(&iucv->pendings) <= 0));
298} 309}
299 310
300/** 311/**
@@ -312,6 +323,79 @@ static void iucv_sock_wake_msglim(struct sock *sk)
312 rcu_read_unlock(); 323 rcu_read_unlock();
313} 324}
314 325
326/**
327 * afiucv_hs_send() - send a message through HiperSockets transport
328 */
329static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
330 struct sk_buff *skb, u8 flags)
331{
332 struct net *net = sock_net(sock);
333 struct iucv_sock *iucv = iucv_sk(sock);
334 struct af_iucv_trans_hdr *phs_hdr;
335 struct sk_buff *nskb;
336 int err, confirm_recv = 0;
337
338 memset(skb->head, 0, ETH_HLEN);
339 phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
340 sizeof(struct af_iucv_trans_hdr));
341 skb_reset_mac_header(skb);
342 skb_reset_network_header(skb);
343 skb_push(skb, ETH_HLEN);
344 skb_reset_mac_header(skb);
345 memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
346
347 phs_hdr->magic = ETH_P_AF_IUCV;
348 phs_hdr->version = 1;
349 phs_hdr->flags = flags;
350 if (flags == AF_IUCV_FLAG_SYN)
351 phs_hdr->window = iucv->msglimit;
352 else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
353 confirm_recv = atomic_read(&iucv->msg_recv);
354 phs_hdr->window = confirm_recv;
355 if (confirm_recv)
356 phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
357 }
358 memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
359 memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
360 memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
361 memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
362 ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
363 ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
364 ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
365 ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
366 if (imsg)
367 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
368
369 rcu_read_lock();
370 skb->dev = dev_get_by_index_rcu(net, sock->sk_bound_dev_if);
371 rcu_read_unlock();
372 if (!skb->dev)
373 return -ENODEV;
374 if (!(skb->dev->flags & IFF_UP))
375 return -ENETDOWN;
376 if (skb->len > skb->dev->mtu) {
377 if (sock->sk_type == SOCK_SEQPACKET)
378 return -EMSGSIZE;
379 else
380 skb_trim(skb, skb->dev->mtu);
381 }
382 skb->protocol = ETH_P_AF_IUCV;
383 skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
384 nskb = skb_clone(skb, GFP_ATOMIC);
385 if (!nskb)
386 return -ENOMEM;
387 skb_queue_tail(&iucv->send_skb_q, nskb);
388 err = dev_queue_xmit(skb);
389 if (err) {
390 skb_unlink(nskb, &iucv->send_skb_q);
391 kfree_skb(nskb);
392 } else {
393 atomic_sub(confirm_recv, &iucv->msg_recv);
394 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
395 }
396 return err;
397}
398
315/* Timers */ 399/* Timers */
316static void iucv_sock_timeout(unsigned long arg) 400static void iucv_sock_timeout(unsigned long arg)
317{ 401{
@@ -380,6 +464,8 @@ static void iucv_sock_close(struct sock *sk)
380 unsigned char user_data[16]; 464 unsigned char user_data[16];
381 struct iucv_sock *iucv = iucv_sk(sk); 465 struct iucv_sock *iucv = iucv_sk(sk);
382 unsigned long timeo; 466 unsigned long timeo;
467 int err, blen;
468 struct sk_buff *skb;
383 469
384 iucv_sock_clear_timer(sk); 470 iucv_sock_clear_timer(sk);
385 lock_sock(sk); 471 lock_sock(sk);
@@ -390,6 +476,20 @@ static void iucv_sock_close(struct sock *sk)
390 break; 476 break;
391 477
392 case IUCV_CONNECTED: 478 case IUCV_CONNECTED:
479 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
480 /* send fin */
481 blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
482 skb = sock_alloc_send_skb(sk, blen, 1, &err);
483 if (skb) {
484 skb_reserve(skb,
485 sizeof(struct af_iucv_trans_hdr) +
486 ETH_HLEN);
487 err = afiucv_hs_send(NULL, sk, skb,
488 AF_IUCV_FLAG_FIN);
489 }
490 sk->sk_state = IUCV_DISCONN;
491 sk->sk_state_change(sk);
492 }
393 case IUCV_DISCONN: 493 case IUCV_DISCONN:
394 sk->sk_state = IUCV_CLOSING; 494 sk->sk_state = IUCV_CLOSING;
395 sk->sk_state_change(sk); 495 sk->sk_state_change(sk);
@@ -412,7 +512,7 @@ static void iucv_sock_close(struct sock *sk)
412 low_nmcpy(user_data, iucv->src_name); 512 low_nmcpy(user_data, iucv->src_name);
413 high_nmcpy(user_data, iucv->dst_name); 513 high_nmcpy(user_data, iucv->dst_name);
414 ASCEBC(user_data, sizeof(user_data)); 514 ASCEBC(user_data, sizeof(user_data));
415 iucv_path_sever(iucv->path, user_data); 515 pr_iucv->path_sever(iucv->path, user_data);
416 iucv_path_free(iucv->path); 516 iucv_path_free(iucv->path);
417 iucv->path = NULL; 517 iucv->path = NULL;
418 } 518 }
@@ -444,23 +544,33 @@ static void iucv_sock_init(struct sock *sk, struct sock *parent)
444static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) 544static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
445{ 545{
446 struct sock *sk; 546 struct sock *sk;
547 struct iucv_sock *iucv;
447 548
448 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto); 549 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
449 if (!sk) 550 if (!sk)
450 return NULL; 551 return NULL;
552 iucv = iucv_sk(sk);
451 553
452 sock_init_data(sock, sk); 554 sock_init_data(sock, sk);
453 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); 555 INIT_LIST_HEAD(&iucv->accept_q);
454 spin_lock_init(&iucv_sk(sk)->accept_q_lock); 556 spin_lock_init(&iucv->accept_q_lock);
455 skb_queue_head_init(&iucv_sk(sk)->send_skb_q); 557 skb_queue_head_init(&iucv->send_skb_q);
456 INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list); 558 INIT_LIST_HEAD(&iucv->message_q.list);
457 spin_lock_init(&iucv_sk(sk)->message_q.lock); 559 spin_lock_init(&iucv->message_q.lock);
458 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); 560 skb_queue_head_init(&iucv->backlog_skb_q);
459 iucv_sk(sk)->send_tag = 0; 561 iucv->send_tag = 0;
460 iucv_sk(sk)->flags = 0; 562 atomic_set(&iucv->pendings, 0);
461 iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT; 563 iucv->flags = 0;
462 iucv_sk(sk)->path = NULL; 564 iucv->msglimit = 0;
463 memset(&iucv_sk(sk)->src_user_id , 0, 32); 565 atomic_set(&iucv->msg_sent, 0);
566 atomic_set(&iucv->msg_recv, 0);
567 iucv->path = NULL;
568 iucv->sk_txnotify = afiucv_hs_callback_txnotify;
569 memset(&iucv->src_user_id , 0, 32);
570 if (pr_iucv)
571 iucv->transport = AF_IUCV_TRANS_IUCV;
572 else
573 iucv->transport = AF_IUCV_TRANS_HIPER;
464 574
465 sk->sk_destruct = iucv_sock_destruct; 575 sk->sk_destruct = iucv_sock_destruct;
466 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; 576 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
@@ -591,7 +701,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
591 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 701 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
592 struct sock *sk = sock->sk; 702 struct sock *sk = sock->sk;
593 struct iucv_sock *iucv; 703 struct iucv_sock *iucv;
594 int err; 704 int err = 0;
705 struct net_device *dev;
706 char uid[9];
595 707
596 /* Verify the input sockaddr */ 708 /* Verify the input sockaddr */
597 if (!addr || addr->sa_family != AF_IUCV) 709 if (!addr || addr->sa_family != AF_IUCV)
@@ -610,19 +722,46 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
610 err = -EADDRINUSE; 722 err = -EADDRINUSE;
611 goto done_unlock; 723 goto done_unlock;
612 } 724 }
613 if (iucv->path) { 725 if (iucv->path)
614 err = 0;
615 goto done_unlock; 726 goto done_unlock;
616 }
617 727
618 /* Bind the socket */ 728 /* Bind the socket */
619 memcpy(iucv->src_name, sa->siucv_name, 8);
620 729
621 /* Copy the user id */ 730 if (pr_iucv)
622 memcpy(iucv->src_user_id, iucv_userid, 8); 731 if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
623 sk->sk_state = IUCV_BOUND; 732 goto vm_bind; /* VM IUCV transport */
624 err = 0;
625 733
734 /* try hiper transport */
735 memcpy(uid, sa->siucv_user_id, sizeof(uid));
736 ASCEBC(uid, 8);
737 rcu_read_lock();
738 for_each_netdev_rcu(&init_net, dev) {
739 if (!memcmp(dev->perm_addr, uid, 8)) {
740 memcpy(iucv->src_name, sa->siucv_name, 8);
741 memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
742 sock->sk->sk_bound_dev_if = dev->ifindex;
743 sk->sk_state = IUCV_BOUND;
744 iucv->transport = AF_IUCV_TRANS_HIPER;
745 if (!iucv->msglimit)
746 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
747 rcu_read_unlock();
748 goto done_unlock;
749 }
750 }
751 rcu_read_unlock();
752vm_bind:
753 if (pr_iucv) {
754 /* use local userid for backward compat */
755 memcpy(iucv->src_name, sa->siucv_name, 8);
756 memcpy(iucv->src_user_id, iucv_userid, 8);
757 sk->sk_state = IUCV_BOUND;
758 iucv->transport = AF_IUCV_TRANS_IUCV;
759 if (!iucv->msglimit)
760 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
761 goto done_unlock;
762 }
763 /* found no dev to bind */
764 err = -ENODEV;
626done_unlock: 765done_unlock:
627 /* Release the socket list lock */ 766 /* Release the socket list lock */
628 write_unlock_bh(&iucv_sk_list.lock); 767 write_unlock_bh(&iucv_sk_list.lock);
@@ -658,45 +797,44 @@ static int iucv_sock_autobind(struct sock *sk)
658 797
659 memcpy(&iucv->src_name, name, 8); 798 memcpy(&iucv->src_name, name, 8);
660 799
800 if (!iucv->msglimit)
801 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
802
661 return err; 803 return err;
662} 804}
663 805
664/* Connect an unconnected socket */ 806static int afiucv_hs_connect(struct socket *sock)
665static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
666 int alen, int flags)
667{ 807{
668 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
669 struct sock *sk = sock->sk; 808 struct sock *sk = sock->sk;
670 struct iucv_sock *iucv; 809 struct sk_buff *skb;
671 unsigned char user_data[16]; 810 int blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
672 int err; 811 int err = 0;
673
674 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
675 return -EINVAL;
676
677 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
678 return -EBADFD;
679
680 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
681 return -EINVAL;
682 812
683 if (sk->sk_state == IUCV_OPEN) { 813 /* send syn */
684 err = iucv_sock_autobind(sk); 814 skb = sock_alloc_send_skb(sk, blen, 1, &err);
685 if (unlikely(err)) 815 if (!skb) {
686 return err; 816 err = -ENOMEM;
817 goto done;
687 } 818 }
819 skb->dev = NULL;
820 skb_reserve(skb, blen);
821 err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_SYN);
822done:
823 return err;
824}
688 825
689 lock_sock(sk); 826static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
690 827{
691 /* Set the destination information */ 828 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
692 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8); 829 struct sock *sk = sock->sk;
693 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8); 830 struct iucv_sock *iucv = iucv_sk(sk);
831 unsigned char user_data[16];
832 int err;
694 833
695 high_nmcpy(user_data, sa->siucv_name); 834 high_nmcpy(user_data, sa->siucv_name);
696 low_nmcpy(user_data, iucv_sk(sk)->src_name); 835 low_nmcpy(user_data, iucv->src_name);
697 ASCEBC(user_data, sizeof(user_data)); 836 ASCEBC(user_data, sizeof(user_data));
698 837
699 iucv = iucv_sk(sk);
700 /* Create path. */ 838 /* Create path. */
701 iucv->path = iucv_path_alloc(iucv->msglimit, 839 iucv->path = iucv_path_alloc(iucv->msglimit,
702 IUCV_IPRMDATA, GFP_KERNEL); 840 IUCV_IPRMDATA, GFP_KERNEL);
@@ -704,8 +842,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
704 err = -ENOMEM; 842 err = -ENOMEM;
705 goto done; 843 goto done;
706 } 844 }
707 err = iucv_path_connect(iucv->path, &af_iucv_handler, 845 err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
708 sa->siucv_user_id, NULL, user_data, sk); 846 sa->siucv_user_id, NULL, user_data,
847 sk);
709 if (err) { 848 if (err) {
710 iucv_path_free(iucv->path); 849 iucv_path_free(iucv->path);
711 iucv->path = NULL; 850 iucv->path = NULL;
@@ -724,21 +863,62 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
724 err = -ECONNREFUSED; 863 err = -ECONNREFUSED;
725 break; 864 break;
726 } 865 }
727 goto done;
728 } 866 }
867done:
868 return err;
869}
729 870
730 if (sk->sk_state != IUCV_CONNECTED) { 871/* Connect an unconnected socket */
872static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
873 int alen, int flags)
874{
875 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
876 struct sock *sk = sock->sk;
877 struct iucv_sock *iucv = iucv_sk(sk);
878 int err;
879
880 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
881 return -EINVAL;
882
883 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
884 return -EBADFD;
885
886 if (sk->sk_state == IUCV_OPEN &&
887 iucv->transport == AF_IUCV_TRANS_HIPER)
888 return -EBADFD; /* explicit bind required */
889
890 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
891 return -EINVAL;
892
893 if (sk->sk_state == IUCV_OPEN) {
894 err = iucv_sock_autobind(sk);
895 if (unlikely(err))
896 return err;
897 }
898
899 lock_sock(sk);
900
901 /* Set the destination information */
902 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
903 memcpy(iucv->dst_name, sa->siucv_name, 8);
904
905 if (iucv->transport == AF_IUCV_TRANS_HIPER)
906 err = afiucv_hs_connect(sock);
907 else
908 err = afiucv_path_connect(sock, addr);
909 if (err)
910 goto done;
911
912 if (sk->sk_state != IUCV_CONNECTED)
731 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, 913 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
732 IUCV_DISCONN), 914 IUCV_DISCONN),
733 sock_sndtimeo(sk, flags & O_NONBLOCK)); 915 sock_sndtimeo(sk, flags & O_NONBLOCK));
734 }
735 916
736 if (sk->sk_state == IUCV_DISCONN) { 917 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
737 err = -ECONNREFUSED; 918 err = -ECONNREFUSED;
738 }
739 919
740 if (err) { 920 if (err && iucv->transport == AF_IUCV_TRANS_IUCV) {
741 iucv_path_sever(iucv->path, NULL); 921 pr_iucv->path_sever(iucv->path, NULL);
742 iucv_path_free(iucv->path); 922 iucv_path_free(iucv->path);
743 iucv->path = NULL; 923 iucv->path = NULL;
744 } 924 }
@@ -833,20 +1013,21 @@ static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
833{ 1013{
834 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr; 1014 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
835 struct sock *sk = sock->sk; 1015 struct sock *sk = sock->sk;
1016 struct iucv_sock *iucv = iucv_sk(sk);
836 1017
837 addr->sa_family = AF_IUCV; 1018 addr->sa_family = AF_IUCV;
838 *len = sizeof(struct sockaddr_iucv); 1019 *len = sizeof(struct sockaddr_iucv);
839 1020
840 if (peer) { 1021 if (peer) {
841 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8); 1022 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
842 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8); 1023 memcpy(siucv->siucv_name, iucv->dst_name, 8);
843 } else { 1024 } else {
844 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8); 1025 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
845 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8); 1026 memcpy(siucv->siucv_name, iucv->src_name, 8);
846 } 1027 }
847 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); 1028 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
848 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); 1029 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
849 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); 1030 memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
850 1031
851 return 0; 1032 return 0;
852} 1033}
@@ -871,7 +1052,7 @@ static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
871 1052
872 memcpy(prmdata, (void *) skb->data, skb->len); 1053 memcpy(prmdata, (void *) skb->data, skb->len);
873 prmdata[7] = 0xff - (u8) skb->len; 1054 prmdata[7] = 0xff - (u8) skb->len;
874 return iucv_message_send(path, msg, IUCV_IPRMDATA, 0, 1055 return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
875 (void *) prmdata, 8); 1056 (void *) prmdata, 8);
876} 1057}
877 1058
@@ -960,9 +1141,16 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
960 * this is fine for SOCK_SEQPACKET (unless we want to support 1141 * this is fine for SOCK_SEQPACKET (unless we want to support
961 * segmented records using the MSG_EOR flag), but 1142 * segmented records using the MSG_EOR flag), but
962 * for SOCK_STREAM we might want to improve it in future */ 1143 * for SOCK_STREAM we might want to improve it in future */
963 skb = sock_alloc_send_skb(sk, len, noblock, &err); 1144 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1145 skb = sock_alloc_send_skb(sk,
1146 len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
1147 noblock, &err);
1148 else
1149 skb = sock_alloc_send_skb(sk, len, noblock, &err);
964 if (!skb) 1150 if (!skb)
965 goto out; 1151 goto out;
1152 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1153 skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
966 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 1154 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
967 err = -EFAULT; 1155 err = -EFAULT;
968 goto fail; 1156 goto fail;
@@ -983,6 +1171,15 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
983 /* increment and save iucv message tag for msg_completion cbk */ 1171 /* increment and save iucv message tag for msg_completion cbk */
984 txmsg.tag = iucv->send_tag++; 1172 txmsg.tag = iucv->send_tag++;
985 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); 1173 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
1174 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1175 atomic_inc(&iucv->msg_sent);
1176 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1177 if (err) {
1178 atomic_dec(&iucv->msg_sent);
1179 goto fail;
1180 }
1181 goto release;
1182 }
986 skb_queue_tail(&iucv->send_skb_q, skb); 1183 skb_queue_tail(&iucv->send_skb_q, skb);
987 1184
988 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) 1185 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
@@ -999,13 +1196,13 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
999 /* this error should never happen since the 1196 /* this error should never happen since the
1000 * IUCV_IPRMDATA path flag is set... sever path */ 1197 * IUCV_IPRMDATA path flag is set... sever path */
1001 if (err == 0x15) { 1198 if (err == 0x15) {
1002 iucv_path_sever(iucv->path, NULL); 1199 pr_iucv->path_sever(iucv->path, NULL);
1003 skb_unlink(skb, &iucv->send_skb_q); 1200 skb_unlink(skb, &iucv->send_skb_q);
1004 err = -EPIPE; 1201 err = -EPIPE;
1005 goto fail; 1202 goto fail;
1006 } 1203 }
1007 } else 1204 } else
1008 err = iucv_message_send(iucv->path, &txmsg, 0, 0, 1205 err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1009 (void *) skb->data, skb->len); 1206 (void *) skb->data, skb->len);
1010 if (err) { 1207 if (err) {
1011 if (err == 3) { 1208 if (err == 3) {
@@ -1023,6 +1220,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1023 goto fail; 1220 goto fail;
1024 } 1221 }
1025 1222
1223release:
1026 release_sock(sk); 1224 release_sock(sk);
1027 return len; 1225 return len;
1028 1226
@@ -1095,8 +1293,9 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1095 skb->len = 0; 1293 skb->len = 0;
1096 } 1294 }
1097 } else { 1295 } else {
1098 rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA, 1296 rc = pr_iucv->message_receive(path, msg,
1099 skb->data, len, NULL); 1297 msg->flags & IUCV_IPRMDATA,
1298 skb->data, len, NULL);
1100 if (rc) { 1299 if (rc) {
1101 kfree_skb(skb); 1300 kfree_skb(skb);
1102 return; 1301 return;
@@ -1110,7 +1309,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1110 kfree_skb(skb); 1309 kfree_skb(skb);
1111 skb = NULL; 1310 skb = NULL;
1112 if (rc) { 1311 if (rc) {
1113 iucv_path_sever(path, NULL); 1312 pr_iucv->path_sever(path, NULL);
1114 return; 1313 return;
1115 } 1314 }
1116 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); 1315 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
@@ -1154,7 +1353,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1154 struct sock *sk = sock->sk; 1353 struct sock *sk = sock->sk;
1155 struct iucv_sock *iucv = iucv_sk(sk); 1354 struct iucv_sock *iucv = iucv_sk(sk);
1156 unsigned int copied, rlen; 1355 unsigned int copied, rlen;
1157 struct sk_buff *skb, *rskb, *cskb; 1356 struct sk_buff *skb, *rskb, *cskb, *sskb;
1357 int blen;
1158 int err = 0; 1358 int err = 0;
1159 1359
1160 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && 1360 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
@@ -1179,7 +1379,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1179 copied = min_t(unsigned int, rlen, len); 1379 copied = min_t(unsigned int, rlen, len);
1180 1380
1181 cskb = skb; 1381 cskb = skb;
1182 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) { 1382 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
1183 if (!(flags & MSG_PEEK)) 1383 if (!(flags & MSG_PEEK))
1184 skb_queue_head(&sk->sk_receive_queue, skb); 1384 skb_queue_head(&sk->sk_receive_queue, skb);
1185 return -EFAULT; 1385 return -EFAULT;
@@ -1217,6 +1417,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1217 } 1417 }
1218 1418
1219 kfree_skb(skb); 1419 kfree_skb(skb);
1420 atomic_inc(&iucv->msg_recv);
1220 1421
1221 /* Queue backlog skbs */ 1422 /* Queue backlog skbs */
1222 spin_lock_bh(&iucv->message_q.lock); 1423 spin_lock_bh(&iucv->message_q.lock);
@@ -1233,6 +1434,24 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1233 if (skb_queue_empty(&iucv->backlog_skb_q)) { 1434 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1234 if (!list_empty(&iucv->message_q.list)) 1435 if (!list_empty(&iucv->message_q.list))
1235 iucv_process_message_q(sk); 1436 iucv_process_message_q(sk);
1437 if (atomic_read(&iucv->msg_recv) >=
1438 iucv->msglimit / 2) {
1439 /* send WIN to peer */
1440 blen = sizeof(struct af_iucv_trans_hdr) +
1441 ETH_HLEN;
1442 sskb = sock_alloc_send_skb(sk, blen, 1, &err);
1443 if (sskb) {
1444 skb_reserve(sskb,
1445 sizeof(struct af_iucv_trans_hdr)
1446 + ETH_HLEN);
1447 err = afiucv_hs_send(NULL, sk, sskb,
1448 AF_IUCV_FLAG_WIN);
1449 }
1450 if (err) {
1451 sk->sk_state = IUCV_DISCONN;
1452 sk->sk_state_change(sk);
1453 }
1454 }
1236 } 1455 }
1237 spin_unlock_bh(&iucv->message_q.lock); 1456 spin_unlock_bh(&iucv->message_q.lock);
1238 } 1457 }
@@ -1327,8 +1546,8 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
1327 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { 1546 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1328 txmsg.class = 0; 1547 txmsg.class = 0;
1329 txmsg.tag = 0; 1548 txmsg.tag = 0;
1330 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, 1549 err = pr_iucv->message_send(iucv->path, &txmsg, IUCV_IPRMDATA,
1331 (void *) iprm_shutdown, 8); 1550 0, (void *) iprm_shutdown, 8);
1332 if (err) { 1551 if (err) {
1333 switch (err) { 1552 switch (err) {
1334 case 1: 1553 case 1:
@@ -1345,7 +1564,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
1345 } 1564 }
1346 1565
1347 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { 1566 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1348 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL); 1567 err = pr_iucv->path_quiesce(iucv->path, NULL);
1349 if (err) 1568 if (err)
1350 err = -ENOTCONN; 1569 err = -ENOTCONN;
1351 1570
@@ -1372,7 +1591,7 @@ static int iucv_sock_release(struct socket *sock)
1372 1591
1373 /* Unregister with IUCV base support */ 1592 /* Unregister with IUCV base support */
1374 if (iucv_sk(sk)->path) { 1593 if (iucv_sk(sk)->path) {
1375 iucv_path_sever(iucv_sk(sk)->path, NULL); 1594 pr_iucv->path_sever(iucv_sk(sk)->path, NULL);
1376 iucv_path_free(iucv_sk(sk)->path); 1595 iucv_path_free(iucv_sk(sk)->path);
1377 iucv_sk(sk)->path = NULL; 1596 iucv_sk(sk)->path = NULL;
1378 } 1597 }
@@ -1514,14 +1733,14 @@ static int iucv_callback_connreq(struct iucv_path *path,
1514 high_nmcpy(user_data, iucv->dst_name); 1733 high_nmcpy(user_data, iucv->dst_name);
1515 ASCEBC(user_data, sizeof(user_data)); 1734 ASCEBC(user_data, sizeof(user_data));
1516 if (sk->sk_state != IUCV_LISTEN) { 1735 if (sk->sk_state != IUCV_LISTEN) {
1517 err = iucv_path_sever(path, user_data); 1736 err = pr_iucv->path_sever(path, user_data);
1518 iucv_path_free(path); 1737 iucv_path_free(path);
1519 goto fail; 1738 goto fail;
1520 } 1739 }
1521 1740
1522 /* Check for backlog size */ 1741 /* Check for backlog size */
1523 if (sk_acceptq_is_full(sk)) { 1742 if (sk_acceptq_is_full(sk)) {
1524 err = iucv_path_sever(path, user_data); 1743 err = pr_iucv->path_sever(path, user_data);
1525 iucv_path_free(path); 1744 iucv_path_free(path);
1526 goto fail; 1745 goto fail;
1527 } 1746 }
@@ -1529,7 +1748,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
1529 /* Create the new socket */ 1748 /* Create the new socket */
1530 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC); 1749 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1531 if (!nsk) { 1750 if (!nsk) {
1532 err = iucv_path_sever(path, user_data); 1751 err = pr_iucv->path_sever(path, user_data);
1533 iucv_path_free(path); 1752 iucv_path_free(path);
1534 goto fail; 1753 goto fail;
1535 } 1754 }
@@ -1553,9 +1772,9 @@ static int iucv_callback_connreq(struct iucv_path *path,
1553 /* set message limit for path based on msglimit of accepting socket */ 1772 /* set message limit for path based on msglimit of accepting socket */
1554 niucv->msglimit = iucv->msglimit; 1773 niucv->msglimit = iucv->msglimit;
1555 path->msglim = iucv->msglimit; 1774 path->msglim = iucv->msglimit;
1556 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); 1775 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1557 if (err) { 1776 if (err) {
1558 err = iucv_path_sever(path, user_data); 1777 err = pr_iucv->path_sever(path, user_data);
1559 iucv_path_free(path); 1778 iucv_path_free(path);
1560 iucv_sock_kill(nsk); 1779 iucv_sock_kill(nsk);
1561 goto fail; 1780 goto fail;
@@ -1589,7 +1808,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1589 int len; 1808 int len;
1590 1809
1591 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1810 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1592 iucv_message_reject(path, msg); 1811 pr_iucv->message_reject(path, msg);
1593 return; 1812 return;
1594 } 1813 }
1595 1814
@@ -1692,6 +1911,389 @@ static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1692 bh_unlock_sock(sk); 1911 bh_unlock_sock(sk);
1693} 1912}
1694 1913
1914/***************** HiperSockets transport callbacks ********************/
1915static void afiucv_swap_src_dest(struct sk_buff *skb)
1916{
1917 struct af_iucv_trans_hdr *trans_hdr =
1918 (struct af_iucv_trans_hdr *)skb->data;
1919 char tmpID[8];
1920 char tmpName[8];
1921
1922 ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1923 ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1924 ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1925 ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1926 memcpy(tmpID, trans_hdr->srcUserID, 8);
1927 memcpy(tmpName, trans_hdr->srcAppName, 8);
1928 memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1929 memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1930 memcpy(trans_hdr->destUserID, tmpID, 8);
1931 memcpy(trans_hdr->destAppName, tmpName, 8);
1932 skb_push(skb, ETH_HLEN);
1933 memset(skb->data, 0, ETH_HLEN);
1934}
1935
1936/**
1937 * afiucv_hs_callback_syn - react on received SYN
1938 **/
1939static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1940{
1941 struct sock *nsk;
1942 struct iucv_sock *iucv, *niucv;
1943 struct af_iucv_trans_hdr *trans_hdr;
1944 int err;
1945
1946 iucv = iucv_sk(sk);
1947 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
1948 if (!iucv) {
1949 /* no sock - connection refused */
1950 afiucv_swap_src_dest(skb);
1951 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1952 err = dev_queue_xmit(skb);
1953 goto out;
1954 }
1955
1956 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1957 bh_lock_sock(sk);
1958 if ((sk->sk_state != IUCV_LISTEN) ||
1959 sk_acceptq_is_full(sk) ||
1960 !nsk) {
1961 /* error on server socket - connection refused */
1962 if (nsk)
1963 sk_free(nsk);
1964 afiucv_swap_src_dest(skb);
1965 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1966 err = dev_queue_xmit(skb);
1967 bh_unlock_sock(sk);
1968 goto out;
1969 }
1970
1971 niucv = iucv_sk(nsk);
1972 iucv_sock_init(nsk, sk);
1973 niucv->transport = AF_IUCV_TRANS_HIPER;
1974 niucv->msglimit = iucv->msglimit;
1975 if (!trans_hdr->window)
1976 niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
1977 else
1978 niucv->msglimit_peer = trans_hdr->window;
1979 memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
1980 memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
1981 memcpy(niucv->src_name, iucv->src_name, 8);
1982 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1983 nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1984 afiucv_swap_src_dest(skb);
1985 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1986 trans_hdr->window = niucv->msglimit;
1987 /* if receiver acks the xmit connection is established */
1988 err = dev_queue_xmit(skb);
1989 if (!err) {
1990 iucv_accept_enqueue(sk, nsk);
1991 nsk->sk_state = IUCV_CONNECTED;
1992 sk->sk_data_ready(sk, 1);
1993 } else
1994 iucv_sock_kill(nsk);
1995 bh_unlock_sock(sk);
1996
1997out:
1998 return NET_RX_SUCCESS;
1999}
2000
2001/**
2002 * afiucv_hs_callback_synack() - react on received SYN-ACK
2003 **/
2004static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
2005{
2006 struct iucv_sock *iucv = iucv_sk(sk);
2007 struct af_iucv_trans_hdr *trans_hdr =
2008 (struct af_iucv_trans_hdr *)skb->data;
2009
2010 if (!iucv)
2011 goto out;
2012 if (sk->sk_state != IUCV_BOUND)
2013 goto out;
2014 bh_lock_sock(sk);
2015 iucv->msglimit_peer = trans_hdr->window;
2016 sk->sk_state = IUCV_CONNECTED;
2017 sk->sk_state_change(sk);
2018 bh_unlock_sock(sk);
2019out:
2020 kfree_skb(skb);
2021 return NET_RX_SUCCESS;
2022}
2023
2024/**
2025 * afiucv_hs_callback_synfin() - react on received SYN_FIN
2026 **/
2027static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2028{
2029 struct iucv_sock *iucv = iucv_sk(sk);
2030
2031 if (!iucv)
2032 goto out;
2033 if (sk->sk_state != IUCV_BOUND)
2034 goto out;
2035 bh_lock_sock(sk);
2036 sk->sk_state = IUCV_DISCONN;
2037 sk->sk_state_change(sk);
2038 bh_unlock_sock(sk);
2039out:
2040 kfree_skb(skb);
2041 return NET_RX_SUCCESS;
2042}
2043
2044/**
2045 * afiucv_hs_callback_fin() - react on received FIN
2046 **/
2047static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2048{
2049 struct iucv_sock *iucv = iucv_sk(sk);
2050
2051 /* other end of connection closed */
2052 if (iucv) {
2053 bh_lock_sock(sk);
2054 if (!list_empty(&iucv->accept_q))
2055 sk->sk_state = IUCV_SEVERED;
2056 else
2057 sk->sk_state = IUCV_DISCONN;
2058 sk->sk_state_change(sk);
2059 bh_unlock_sock(sk);
2060 }
2061 kfree_skb(skb);
2062 return NET_RX_SUCCESS;
2063}
2064
2065/**
2066 * afiucv_hs_callback_win() - react on received WIN
2067 **/
2068static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2069{
2070 struct iucv_sock *iucv = iucv_sk(sk);
2071 struct af_iucv_trans_hdr *trans_hdr =
2072 (struct af_iucv_trans_hdr *)skb->data;
2073
2074 if (!iucv)
2075 return NET_RX_SUCCESS;
2076
2077 if (sk->sk_state != IUCV_CONNECTED)
2078 return NET_RX_SUCCESS;
2079
2080 atomic_sub(trans_hdr->window, &iucv->msg_sent);
2081 iucv_sock_wake_msglim(sk);
2082 return NET_RX_SUCCESS;
2083}
2084
2085/**
2086 * afiucv_hs_callback_rx() - react on received data
2087 **/
2088static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2089{
2090 struct iucv_sock *iucv = iucv_sk(sk);
2091
2092 if (!iucv) {
2093 kfree_skb(skb);
2094 return NET_RX_SUCCESS;
2095 }
2096
2097 if (sk->sk_state != IUCV_CONNECTED) {
2098 kfree_skb(skb);
2099 return NET_RX_SUCCESS;
2100 }
2101
2102 /* write stuff from iucv_msg to skb cb */
2103 if (skb->len <= sizeof(struct af_iucv_trans_hdr)) {
2104 kfree_skb(skb);
2105 return NET_RX_SUCCESS;
2106 }
2107 skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2108 skb_reset_transport_header(skb);
2109 skb_reset_network_header(skb);
2110 spin_lock(&iucv->message_q.lock);
2111 if (skb_queue_empty(&iucv->backlog_skb_q)) {
2112 if (sock_queue_rcv_skb(sk, skb)) {
2113 /* handle rcv queue full */
2114 skb_queue_tail(&iucv->backlog_skb_q, skb);
2115 }
2116 } else
2117 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2118 spin_unlock(&iucv->message_q.lock);
2119 return NET_RX_SUCCESS;
2120}
2121
2122/**
2123 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2124 * transport
2125 * called from netif RX softirq
2126 **/
2127static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2128 struct packet_type *pt, struct net_device *orig_dev)
2129{
2130 struct hlist_node *node;
2131 struct sock *sk;
2132 struct iucv_sock *iucv;
2133 struct af_iucv_trans_hdr *trans_hdr;
2134 char nullstring[8];
2135 int err = 0;
2136
2137 skb_pull(skb, ETH_HLEN);
2138 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
2139 EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2140 EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2141 EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2142 EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2143 memset(nullstring, 0, sizeof(nullstring));
2144 iucv = NULL;
2145 sk = NULL;
2146 read_lock(&iucv_sk_list.lock);
2147 sk_for_each(sk, node, &iucv_sk_list.head) {
2148 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2149 if ((!memcmp(&iucv_sk(sk)->src_name,
2150 trans_hdr->destAppName, 8)) &&
2151 (!memcmp(&iucv_sk(sk)->src_user_id,
2152 trans_hdr->destUserID, 8)) &&
2153 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2154 (!memcmp(&iucv_sk(sk)->dst_user_id,
2155 nullstring, 8))) {
2156 iucv = iucv_sk(sk);
2157 break;
2158 }
2159 } else {
2160 if ((!memcmp(&iucv_sk(sk)->src_name,
2161 trans_hdr->destAppName, 8)) &&
2162 (!memcmp(&iucv_sk(sk)->src_user_id,
2163 trans_hdr->destUserID, 8)) &&
2164 (!memcmp(&iucv_sk(sk)->dst_name,
2165 trans_hdr->srcAppName, 8)) &&
2166 (!memcmp(&iucv_sk(sk)->dst_user_id,
2167 trans_hdr->srcUserID, 8))) {
2168 iucv = iucv_sk(sk);
2169 break;
2170 }
2171 }
2172 }
2173 read_unlock(&iucv_sk_list.lock);
2174 if (!iucv)
2175 sk = NULL;
2176
2177 /* no sock
2178 how should we send with no sock
2179 1) send without sock no send rc checking?
2180 2) introduce default sock to handle this cases
2181
2182 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2183 data -> send FIN
2184 SYN|ACK, SYN|FIN, FIN -> no action? */
2185
2186 switch (trans_hdr->flags) {
2187 case AF_IUCV_FLAG_SYN:
2188 /* connect request */
2189 err = afiucv_hs_callback_syn(sk, skb);
2190 break;
2191 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2192 /* connect request confirmed */
2193 err = afiucv_hs_callback_synack(sk, skb);
2194 break;
2195 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2196 /* connect request refused */
2197 err = afiucv_hs_callback_synfin(sk, skb);
2198 break;
2199 case (AF_IUCV_FLAG_FIN):
2200 /* close request */
2201 err = afiucv_hs_callback_fin(sk, skb);
2202 break;
2203 case (AF_IUCV_FLAG_WIN):
2204 err = afiucv_hs_callback_win(sk, skb);
2205 if (skb->len > sizeof(struct af_iucv_trans_hdr))
2206 err = afiucv_hs_callback_rx(sk, skb);
2207 else
2208 kfree(skb);
2209 break;
2210 case 0:
2211 /* plain data frame */
2212 err = afiucv_hs_callback_rx(sk, skb);
2213 break;
2214 default:
2215 ;
2216 }
2217
2218 return err;
2219}
2220
2221/**
2222 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2223 * transport
2224 **/
2225static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2226 enum iucv_tx_notify n)
2227{
2228 struct sock *isk = skb->sk;
2229 struct sock *sk = NULL;
2230 struct iucv_sock *iucv = NULL;
2231 struct sk_buff_head *list;
2232 struct sk_buff *list_skb;
2233 struct sk_buff *this = NULL;
2234 unsigned long flags;
2235 struct hlist_node *node;
2236
2237 read_lock(&iucv_sk_list.lock);
2238 sk_for_each(sk, node, &iucv_sk_list.head)
2239 if (sk == isk) {
2240 iucv = iucv_sk(sk);
2241 break;
2242 }
2243 read_unlock(&iucv_sk_list.lock);
2244
2245 if (!iucv)
2246 return;
2247
2248 bh_lock_sock(sk);
2249 list = &iucv->send_skb_q;
2250 list_skb = list->next;
2251 if (skb_queue_empty(list))
2252 goto out_unlock;
2253
2254 spin_lock_irqsave(&list->lock, flags);
2255 while (list_skb != (struct sk_buff *)list) {
2256 if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2257 this = list_skb;
2258 switch (n) {
2259 case TX_NOTIFY_OK:
2260 __skb_unlink(this, list);
2261 iucv_sock_wake_msglim(sk);
2262 kfree_skb(this);
2263 break;
2264 case TX_NOTIFY_PENDING:
2265 atomic_inc(&iucv->pendings);
2266 break;
2267 case TX_NOTIFY_DELAYED_OK:
2268 __skb_unlink(this, list);
2269 atomic_dec(&iucv->pendings);
2270 if (atomic_read(&iucv->pendings) <= 0)
2271 iucv_sock_wake_msglim(sk);
2272 kfree_skb(this);
2273 break;
2274 case TX_NOTIFY_UNREACHABLE:
2275 case TX_NOTIFY_DELAYED_UNREACHABLE:
2276 case TX_NOTIFY_TPQFULL: /* not yet used */
2277 case TX_NOTIFY_GENERALERROR:
2278 case TX_NOTIFY_DELAYED_GENERALERROR:
2279 __skb_unlink(this, list);
2280 kfree_skb(this);
2281 if (!list_empty(&iucv->accept_q))
2282 sk->sk_state = IUCV_SEVERED;
2283 else
2284 sk->sk_state = IUCV_DISCONN;
2285 sk->sk_state_change(sk);
2286 break;
2287 }
2288 break;
2289 }
2290 list_skb = list_skb->next;
2291 }
2292 spin_unlock_irqrestore(&list->lock, flags);
2293
2294out_unlock:
2295 bh_unlock_sock(sk);
2296}
1695static const struct proto_ops iucv_sock_ops = { 2297static const struct proto_ops iucv_sock_ops = {
1696 .family = PF_IUCV, 2298 .family = PF_IUCV,
1697 .owner = THIS_MODULE, 2299 .owner = THIS_MODULE,
@@ -1718,71 +2320,104 @@ static const struct net_proto_family iucv_sock_family_ops = {
1718 .create = iucv_sock_create, 2320 .create = iucv_sock_create,
1719}; 2321};
1720 2322
1721static int __init afiucv_init(void) 2323static struct packet_type iucv_packet_type = {
2324 .type = cpu_to_be16(ETH_P_AF_IUCV),
2325 .func = afiucv_hs_rcv,
2326};
2327
2328static int afiucv_iucv_init(void)
1722{ 2329{
1723 int err; 2330 int err;
1724 2331
1725 if (!MACHINE_IS_VM) { 2332 err = pr_iucv->iucv_register(&af_iucv_handler, 0);
1726 pr_err("The af_iucv module cannot be loaded"
1727 " without z/VM\n");
1728 err = -EPROTONOSUPPORT;
1729 goto out;
1730 }
1731 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1732 if (unlikely(err)) {
1733 WARN_ON(err);
1734 err = -EPROTONOSUPPORT;
1735 goto out;
1736 }
1737
1738 err = iucv_register(&af_iucv_handler, 0);
1739 if (err) 2333 if (err)
1740 goto out; 2334 goto out;
1741 err = proto_register(&iucv_proto, 0);
1742 if (err)
1743 goto out_iucv;
1744 err = sock_register(&iucv_sock_family_ops);
1745 if (err)
1746 goto out_proto;
1747 /* establish dummy device */ 2335 /* establish dummy device */
2336 af_iucv_driver.bus = pr_iucv->bus;
1748 err = driver_register(&af_iucv_driver); 2337 err = driver_register(&af_iucv_driver);
1749 if (err) 2338 if (err)
1750 goto out_sock; 2339 goto out_iucv;
1751 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); 2340 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1752 if (!af_iucv_dev) { 2341 if (!af_iucv_dev) {
1753 err = -ENOMEM; 2342 err = -ENOMEM;
1754 goto out_driver; 2343 goto out_driver;
1755 } 2344 }
1756 dev_set_name(af_iucv_dev, "af_iucv"); 2345 dev_set_name(af_iucv_dev, "af_iucv");
1757 af_iucv_dev->bus = &iucv_bus; 2346 af_iucv_dev->bus = pr_iucv->bus;
1758 af_iucv_dev->parent = iucv_root; 2347 af_iucv_dev->parent = pr_iucv->root;
1759 af_iucv_dev->release = (void (*)(struct device *))kfree; 2348 af_iucv_dev->release = (void (*)(struct device *))kfree;
1760 af_iucv_dev->driver = &af_iucv_driver; 2349 af_iucv_dev->driver = &af_iucv_driver;
1761 err = device_register(af_iucv_dev); 2350 err = device_register(af_iucv_dev);
1762 if (err) 2351 if (err)
1763 goto out_driver; 2352 goto out_driver;
1764
1765 return 0; 2353 return 0;
1766 2354
1767out_driver: 2355out_driver:
1768 driver_unregister(&af_iucv_driver); 2356 driver_unregister(&af_iucv_driver);
2357out_iucv:
2358 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2359out:
2360 return err;
2361}
2362
2363static int __init afiucv_init(void)
2364{
2365 int err;
2366
2367 if (MACHINE_IS_VM) {
2368 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2369 if (unlikely(err)) {
2370 WARN_ON(err);
2371 err = -EPROTONOSUPPORT;
2372 goto out;
2373 }
2374
2375 pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2376 if (!pr_iucv) {
2377 printk(KERN_WARNING "iucv_if lookup failed\n");
2378 memset(&iucv_userid, 0, sizeof(iucv_userid));
2379 }
2380 } else {
2381 memset(&iucv_userid, 0, sizeof(iucv_userid));
2382 pr_iucv = NULL;
2383 }
2384
2385 err = proto_register(&iucv_proto, 0);
2386 if (err)
2387 goto out;
2388 err = sock_register(&iucv_sock_family_ops);
2389 if (err)
2390 goto out_proto;
2391
2392 if (pr_iucv) {
2393 err = afiucv_iucv_init();
2394 if (err)
2395 goto out_sock;
2396 }
2397 dev_add_pack(&iucv_packet_type);
2398 return 0;
2399
1769out_sock: 2400out_sock:
1770 sock_unregister(PF_IUCV); 2401 sock_unregister(PF_IUCV);
1771out_proto: 2402out_proto:
1772 proto_unregister(&iucv_proto); 2403 proto_unregister(&iucv_proto);
1773out_iucv:
1774 iucv_unregister(&af_iucv_handler, 0);
1775out: 2404out:
2405 if (pr_iucv)
2406 symbol_put(iucv_if);
1776 return err; 2407 return err;
1777} 2408}
1778 2409
1779static void __exit afiucv_exit(void) 2410static void __exit afiucv_exit(void)
1780{ 2411{
1781 device_unregister(af_iucv_dev); 2412 if (pr_iucv) {
1782 driver_unregister(&af_iucv_driver); 2413 device_unregister(af_iucv_dev);
2414 driver_unregister(&af_iucv_driver);
2415 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2416 symbol_put(iucv_if);
2417 }
2418 dev_remove_pack(&iucv_packet_type);
1783 sock_unregister(PF_IUCV); 2419 sock_unregister(PF_IUCV);
1784 proto_unregister(&iucv_proto); 2420 proto_unregister(&iucv_proto);
1785 iucv_unregister(&af_iucv_handler, 0);
1786} 2421}
1787 2422
1788module_init(afiucv_init); 2423module_init(afiucv_init);
@@ -1793,3 +2428,4 @@ MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1793MODULE_VERSION(VERSION); 2428MODULE_VERSION(VERSION);
1794MODULE_LICENSE("GPL"); 2429MODULE_LICENSE("GPL");
1795MODULE_ALIAS_NETPROTO(PF_IUCV); 2430MODULE_ALIAS_NETPROTO(PF_IUCV);
2431
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 075a3808aa40..403be43b793d 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1974,6 +1974,27 @@ out:
1974 return rc; 1974 return rc;
1975} 1975}
1976 1976
1977struct iucv_interface iucv_if = {
1978 .message_receive = iucv_message_receive,
1979 .__message_receive = __iucv_message_receive,
1980 .message_reply = iucv_message_reply,
1981 .message_reject = iucv_message_reject,
1982 .message_send = iucv_message_send,
1983 .__message_send = __iucv_message_send,
1984 .message_send2way = iucv_message_send2way,
1985 .message_purge = iucv_message_purge,
1986 .path_accept = iucv_path_accept,
1987 .path_connect = iucv_path_connect,
1988 .path_quiesce = iucv_path_quiesce,
1989 .path_resume = iucv_path_resume,
1990 .path_sever = iucv_path_sever,
1991 .iucv_register = iucv_register,
1992 .iucv_unregister = iucv_unregister,
1993 .bus = NULL,
1994 .root = NULL,
1995};
1996EXPORT_SYMBOL(iucv_if);
1997
1977/** 1998/**
1978 * iucv_init 1999 * iucv_init
1979 * 2000 *
@@ -2038,6 +2059,8 @@ static int __init iucv_init(void)
2038 rc = bus_register(&iucv_bus); 2059 rc = bus_register(&iucv_bus);
2039 if (rc) 2060 if (rc)
2040 goto out_reboot; 2061 goto out_reboot;
2062 iucv_if.root = iucv_root;
2063 iucv_if.bus = &iucv_bus;
2041 return 0; 2064 return 0;
2042 2065
2043out_reboot: 2066out_reboot:
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index fd1aaf2a4a6c..9b5bd8cafc20 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -69,7 +69,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
69 if (!tid_rx) 69 if (!tid_rx)
70 return; 70 return;
71 71
72 rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], NULL); 72 RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
73 73
74#ifdef CONFIG_MAC80211_HT_DEBUG 74#ifdef CONFIG_MAC80211_HT_DEBUG
75 printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", 75 printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
@@ -340,7 +340,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
340 status = WLAN_STATUS_SUCCESS; 340 status = WLAN_STATUS_SUCCESS;
341 341
342 /* activate it for RX */ 342 /* activate it for RX */
343 rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx); 343 RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
344 344
345 if (timeout) 345 if (timeout)
346 mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout)); 346 mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout));
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index c1fa5775cef2..a589addf6ce1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -62,7 +62,7 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
62 62
63 if (type == NL80211_IFTYPE_AP_VLAN && 63 if (type == NL80211_IFTYPE_AP_VLAN &&
64 params && params->use_4addr == 0) 64 params && params->use_4addr == 0)
65 rcu_assign_pointer(sdata->u.vlan.sta, NULL); 65 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
66 else if (type == NL80211_IFTYPE_STATION && 66 else if (type == NL80211_IFTYPE_STATION &&
67 params && params->use_4addr >= 0) 67 params && params->use_4addr >= 0)
68 sdata->u.mgd.use_4addr = params->use_4addr; 68 sdata->u.mgd.use_4addr = params->use_4addr;
@@ -542,7 +542,7 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
542 542
543 sdata->vif.bss_conf.dtim_period = new->dtim_period; 543 sdata->vif.bss_conf.dtim_period = new->dtim_period;
544 544
545 rcu_assign_pointer(sdata->u.ap.beacon, new); 545 RCU_INIT_POINTER(sdata->u.ap.beacon, new);
546 546
547 synchronize_rcu(); 547 synchronize_rcu();
548 548
@@ -594,7 +594,7 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
594 if (!old) 594 if (!old)
595 return -ENOENT; 595 return -ENOENT;
596 596
597 rcu_assign_pointer(sdata->u.ap.beacon, NULL); 597 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
598 synchronize_rcu(); 598 synchronize_rcu();
599 kfree(old); 599 kfree(old);
600 600
@@ -857,7 +857,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
857 return -EBUSY; 857 return -EBUSY;
858 } 858 }
859 859
860 rcu_assign_pointer(vlansdata->u.vlan.sta, sta); 860 RCU_INIT_POINTER(vlansdata->u.vlan.sta, sta);
861 } 861 }
862 862
863 sta->sdata = vlansdata; 863 sta->sdata = vlansdata;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 56c24cabf26d..4f9235b18a03 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -84,7 +84,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
84 drv_reset_tsf(local); 84 drv_reset_tsf(local);
85 85
86 skb = ifibss->skb; 86 skb = ifibss->skb;
87 rcu_assign_pointer(ifibss->presp, NULL); 87 RCU_INIT_POINTER(ifibss->presp, NULL);
88 synchronize_rcu(); 88 synchronize_rcu();
89 skb->data = skb->head; 89 skb->data = skb->head;
90 skb->len = 0; 90 skb->len = 0;
@@ -184,7 +184,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
184 *pos++ = 0; /* U-APSD no in use */ 184 *pos++ = 0; /* U-APSD no in use */
185 } 185 }
186 186
187 rcu_assign_pointer(ifibss->presp, skb); 187 RCU_INIT_POINTER(ifibss->presp, skb);
188 188
189 sdata->vif.bss_conf.beacon_int = beacon_int; 189 sdata->vif.bss_conf.beacon_int = beacon_int;
190 sdata->vif.bss_conf.basic_rates = basic_rates; 190 sdata->vif.bss_conf.basic_rates = basic_rates;
@@ -995,7 +995,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
995 kfree(sdata->u.ibss.ie); 995 kfree(sdata->u.ibss.ie);
996 skb = rcu_dereference_protected(sdata->u.ibss.presp, 996 skb = rcu_dereference_protected(sdata->u.ibss.presp,
997 lockdep_is_held(&sdata->u.ibss.mtx)); 997 lockdep_is_held(&sdata->u.ibss.mtx));
998 rcu_assign_pointer(sdata->u.ibss.presp, NULL); 998 RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
999 sdata->vif.bss_conf.ibss_joined = false; 999 sdata->vif.bss_conf.ibss_joined = false;
1000 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | 1000 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
1001 BSS_CHANGED_IBSS); 1001 BSS_CHANGED_IBSS);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 556e7e6ddf0a..d10dc4df60b6 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -456,7 +456,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
456 BSS_CHANGED_BEACON_ENABLED); 456 BSS_CHANGED_BEACON_ENABLED);
457 457
458 /* remove beacon */ 458 /* remove beacon */
459 rcu_assign_pointer(sdata->u.ap.beacon, NULL); 459 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
460 synchronize_rcu(); 460 synchronize_rcu();
461 kfree(old_beacon); 461 kfree(old_beacon);
462 462
@@ -645,7 +645,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
645 .ndo_stop = ieee80211_stop, 645 .ndo_stop = ieee80211_stop,
646 .ndo_uninit = ieee80211_teardown_sdata, 646 .ndo_uninit = ieee80211_teardown_sdata,
647 .ndo_start_xmit = ieee80211_subif_start_xmit, 647 .ndo_start_xmit = ieee80211_subif_start_xmit,
648 .ndo_set_multicast_list = ieee80211_set_multicast_list, 648 .ndo_set_rx_mode = ieee80211_set_multicast_list,
649 .ndo_change_mtu = ieee80211_change_mtu, 649 .ndo_change_mtu = ieee80211_change_mtu,
650 .ndo_set_mac_address = ieee80211_change_mac, 650 .ndo_set_mac_address = ieee80211_change_mac,
651 .ndo_select_queue = ieee80211_netdev_select_queue, 651 .ndo_select_queue = ieee80211_netdev_select_queue,
@@ -689,7 +689,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
689 .ndo_stop = ieee80211_stop, 689 .ndo_stop = ieee80211_stop,
690 .ndo_uninit = ieee80211_teardown_sdata, 690 .ndo_uninit = ieee80211_teardown_sdata,
691 .ndo_start_xmit = ieee80211_monitor_start_xmit, 691 .ndo_start_xmit = ieee80211_monitor_start_xmit,
692 .ndo_set_multicast_list = ieee80211_set_multicast_list, 692 .ndo_set_rx_mode = ieee80211_set_multicast_list,
693 .ndo_change_mtu = ieee80211_change_mtu, 693 .ndo_change_mtu = ieee80211_change_mtu,
694 .ndo_set_mac_address = eth_mac_addr, 694 .ndo_set_mac_address = eth_mac_addr,
695 .ndo_select_queue = ieee80211_monitor_select_queue, 695 .ndo_select_queue = ieee80211_monitor_select_queue,
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 068ee6518254..dc7ae8d31aaf 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -843,6 +843,6 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
843void mesh_pathtbl_unregister(void) 843void mesh_pathtbl_unregister(void)
844{ 844{
845 /* no need for locking during exit path */ 845 /* no need for locking during exit path */
846 mesh_table_free(rcu_dereference_raw(mesh_paths), true); 846 mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
847 mesh_table_free(rcu_dereference_raw(mpp_paths), true); 847 mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
848} 848}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 5eaa1673a8f5..0bdbf3b8f28b 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -72,7 +72,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
72 if (!s) 72 if (!s)
73 return -ENOENT; 73 return -ENOENT;
74 if (s == sta) { 74 if (s == sta) {
75 rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], 75 RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)],
76 s->hnext); 76 s->hnext);
77 return 0; 77 return 0;
78 } 78 }
@@ -82,7 +82,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
82 s = rcu_dereference_protected(s->hnext, 82 s = rcu_dereference_protected(s->hnext,
83 lockdep_is_held(&local->sta_lock)); 83 lockdep_is_held(&local->sta_lock));
84 if (rcu_access_pointer(s->hnext)) { 84 if (rcu_access_pointer(s->hnext)) {
85 rcu_assign_pointer(s->hnext, sta->hnext); 85 RCU_INIT_POINTER(s->hnext, sta->hnext);
86 return 0; 86 return 0;
87 } 87 }
88 88
@@ -184,7 +184,7 @@ static void sta_info_hash_add(struct ieee80211_local *local,
184 struct sta_info *sta) 184 struct sta_info *sta)
185{ 185{
186 sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)]; 186 sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)];
187 rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta); 187 RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
188} 188}
189 189
190static void sta_unblock(struct work_struct *wk) 190static void sta_unblock(struct work_struct *wk)
@@ -673,7 +673,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
673 local->sta_generation++; 673 local->sta_generation++;
674 674
675 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 675 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
676 rcu_assign_pointer(sdata->u.vlan.sta, NULL); 676 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
677 677
678 if (sta->uploaded) { 678 if (sta->uploaded) {
679 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 679 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 899b71c0ff5d..3346829ea07f 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -37,7 +37,7 @@ int nf_register_afinfo(const struct nf_afinfo *afinfo)
37 err = mutex_lock_interruptible(&afinfo_mutex); 37 err = mutex_lock_interruptible(&afinfo_mutex);
38 if (err < 0) 38 if (err < 0)
39 return err; 39 return err;
40 rcu_assign_pointer(nf_afinfo[afinfo->family], afinfo); 40 RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo);
41 mutex_unlock(&afinfo_mutex); 41 mutex_unlock(&afinfo_mutex);
42 return 0; 42 return 0;
43} 43}
@@ -46,7 +46,7 @@ EXPORT_SYMBOL_GPL(nf_register_afinfo);
46void nf_unregister_afinfo(const struct nf_afinfo *afinfo) 46void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
47{ 47{
48 mutex_lock(&afinfo_mutex); 48 mutex_lock(&afinfo_mutex);
49 rcu_assign_pointer(nf_afinfo[afinfo->family], NULL); 49 RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL);
50 mutex_unlock(&afinfo_mutex); 50 mutex_unlock(&afinfo_mutex);
51 synchronize_rcu(); 51 synchronize_rcu();
52} 52}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index be43fd805bd0..2b771dc708a3 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3771,6 +3771,7 @@ err_sock:
3771void ip_vs_control_cleanup(void) 3771void ip_vs_control_cleanup(void)
3772{ 3772{
3773 EnterFunction(2); 3773 EnterFunction(2);
3774 unregister_netdevice_notifier(&ip_vs_dst_notifier);
3774 ip_vs_genl_unregister(); 3775 ip_vs_genl_unregister();
3775 nf_unregister_sockopt(&ip_vs_sockopts); 3776 nf_unregister_sockopt(&ip_vs_sockopts);
3776 LeaveFunction(2); 3777 LeaveFunction(2);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index f7af8b866017..5acfaf59a9c3 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -779,7 +779,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
779 if (exp->helper) { 779 if (exp->helper) {
780 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 780 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
781 if (help) 781 if (help)
782 rcu_assign_pointer(help->helper, exp->helper); 782 RCU_INIT_POINTER(help->helper, exp->helper);
783 } 783 }
784 784
785#ifdef CONFIG_NF_CONNTRACK_MARK 785#ifdef CONFIG_NF_CONNTRACK_MARK
@@ -1317,7 +1317,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
1317void nf_conntrack_cleanup(struct net *net) 1317void nf_conntrack_cleanup(struct net *net)
1318{ 1318{
1319 if (net_eq(net, &init_net)) 1319 if (net_eq(net, &init_net))
1320 rcu_assign_pointer(ip_ct_attach, NULL); 1320 RCU_INIT_POINTER(ip_ct_attach, NULL);
1321 1321
1322 /* This makes sure all current packets have passed through 1322 /* This makes sure all current packets have passed through
1323 netfilter framework. Roll on, two-stage module 1323 netfilter framework. Roll on, two-stage module
@@ -1327,7 +1327,7 @@ void nf_conntrack_cleanup(struct net *net)
1327 nf_conntrack_cleanup_net(net); 1327 nf_conntrack_cleanup_net(net);
1328 1328
1329 if (net_eq(net, &init_net)) { 1329 if (net_eq(net, &init_net)) {
1330 rcu_assign_pointer(nf_ct_destroy, NULL); 1330 RCU_INIT_POINTER(nf_ct_destroy, NULL);
1331 nf_conntrack_cleanup_init_net(); 1331 nf_conntrack_cleanup_init_net();
1332 } 1332 }
1333} 1333}
@@ -1576,11 +1576,11 @@ int nf_conntrack_init(struct net *net)
1576 1576
1577 if (net_eq(net, &init_net)) { 1577 if (net_eq(net, &init_net)) {
1578 /* For use by REJECT target */ 1578 /* For use by REJECT target */
1579 rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach); 1579 RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1580 rcu_assign_pointer(nf_ct_destroy, destroy_conntrack); 1580 RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
1581 1581
1582 /* Howto get NAT offsets */ 1582 /* Howto get NAT offsets */
1583 rcu_assign_pointer(nf_ct_nat_offset, NULL); 1583 RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
1584 } 1584 }
1585 return 0; 1585 return 0;
1586 1586
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 63a1b915a7e4..3add99439059 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -94,7 +94,7 @@ int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
94 ret = -EBUSY; 94 ret = -EBUSY;
95 goto out_unlock; 95 goto out_unlock;
96 } 96 }
97 rcu_assign_pointer(nf_conntrack_event_cb, new); 97 RCU_INIT_POINTER(nf_conntrack_event_cb, new);
98 mutex_unlock(&nf_ct_ecache_mutex); 98 mutex_unlock(&nf_ct_ecache_mutex);
99 return ret; 99 return ret;
100 100
@@ -112,7 +112,7 @@ void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
112 notify = rcu_dereference_protected(nf_conntrack_event_cb, 112 notify = rcu_dereference_protected(nf_conntrack_event_cb,
113 lockdep_is_held(&nf_ct_ecache_mutex)); 113 lockdep_is_held(&nf_ct_ecache_mutex));
114 BUG_ON(notify != new); 114 BUG_ON(notify != new);
115 rcu_assign_pointer(nf_conntrack_event_cb, NULL); 115 RCU_INIT_POINTER(nf_conntrack_event_cb, NULL);
116 mutex_unlock(&nf_ct_ecache_mutex); 116 mutex_unlock(&nf_ct_ecache_mutex);
117} 117}
118EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); 118EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
@@ -129,7 +129,7 @@ int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
129 ret = -EBUSY; 129 ret = -EBUSY;
130 goto out_unlock; 130 goto out_unlock;
131 } 131 }
132 rcu_assign_pointer(nf_expect_event_cb, new); 132 RCU_INIT_POINTER(nf_expect_event_cb, new);
133 mutex_unlock(&nf_ct_ecache_mutex); 133 mutex_unlock(&nf_ct_ecache_mutex);
134 return ret; 134 return ret;
135 135
@@ -147,7 +147,7 @@ void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
147 notify = rcu_dereference_protected(nf_expect_event_cb, 147 notify = rcu_dereference_protected(nf_expect_event_cb,
148 lockdep_is_held(&nf_ct_ecache_mutex)); 148 lockdep_is_held(&nf_ct_ecache_mutex));
149 BUG_ON(notify != new); 149 BUG_ON(notify != new);
150 rcu_assign_pointer(nf_expect_event_cb, NULL); 150 RCU_INIT_POINTER(nf_expect_event_cb, NULL);
151 mutex_unlock(&nf_ct_ecache_mutex); 151 mutex_unlock(&nf_ct_ecache_mutex);
152} 152}
153EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier); 153EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 05ecdc281a53..4605c947dcc4 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -169,7 +169,7 @@ int nf_ct_extend_register(struct nf_ct_ext_type *type)
169 before updating alloc_size */ 169 before updating alloc_size */
170 type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align) 170 type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
171 + type->len; 171 + type->len;
172 rcu_assign_pointer(nf_ct_ext_types[type->id], type); 172 RCU_INIT_POINTER(nf_ct_ext_types[type->id], type);
173 update_alloc_size(type); 173 update_alloc_size(type);
174out: 174out:
175 mutex_unlock(&nf_ct_ext_type_mutex); 175 mutex_unlock(&nf_ct_ext_type_mutex);
@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(nf_ct_extend_register);
181void nf_ct_extend_unregister(struct nf_ct_ext_type *type) 181void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
182{ 182{
183 mutex_lock(&nf_ct_ext_type_mutex); 183 mutex_lock(&nf_ct_ext_type_mutex);
184 rcu_assign_pointer(nf_ct_ext_types[type->id], NULL); 184 RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
185 update_alloc_size(type); 185 update_alloc_size(type);
186 mutex_unlock(&nf_ct_ext_type_mutex); 186 mutex_unlock(&nf_ct_ext_type_mutex);
187 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 187 rcu_barrier(); /* Wait for completion of call_rcu()'s */
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 1bdfea357955..93c4bdbfc1ae 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -131,7 +131,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
131 helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 131 helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
132 if (helper == NULL) { 132 if (helper == NULL) {
133 if (help) 133 if (help)
134 rcu_assign_pointer(help->helper, NULL); 134 RCU_INIT_POINTER(help->helper, NULL);
135 goto out; 135 goto out;
136 } 136 }
137 137
@@ -145,7 +145,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
145 memset(&help->help, 0, sizeof(help->help)); 145 memset(&help->help, 0, sizeof(help->help));
146 } 146 }
147 147
148 rcu_assign_pointer(help->helper, helper); 148 RCU_INIT_POINTER(help->helper, helper);
149out: 149out:
150 return ret; 150 return ret;
151} 151}
@@ -162,7 +162,7 @@ static inline int unhelp(struct nf_conntrack_tuple_hash *i,
162 lockdep_is_held(&nf_conntrack_lock) 162 lockdep_is_held(&nf_conntrack_lock)
163 ) == me) { 163 ) == me) {
164 nf_conntrack_event(IPCT_HELPER, ct); 164 nf_conntrack_event(IPCT_HELPER, ct);
165 rcu_assign_pointer(help->helper, NULL); 165 RCU_INIT_POINTER(help->helper, NULL);
166 } 166 }
167 return 0; 167 return 0;
168} 168}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 7dec88a1755b..e58aa9b1fe8a 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1125,7 +1125,7 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1125 if (help && help->helper) { 1125 if (help && help->helper) {
1126 /* we had a helper before ... */ 1126 /* we had a helper before ... */
1127 nf_ct_remove_expectations(ct); 1127 nf_ct_remove_expectations(ct);
1128 rcu_assign_pointer(help->helper, NULL); 1128 RCU_INIT_POINTER(help->helper, NULL);
1129 } 1129 }
1130 1130
1131 return 0; 1131 return 0;
@@ -1163,7 +1163,7 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1163 return -EOPNOTSUPP; 1163 return -EOPNOTSUPP;
1164 } 1164 }
1165 1165
1166 rcu_assign_pointer(help->helper, helper); 1166 RCU_INIT_POINTER(help->helper, helper);
1167 1167
1168 return 0; 1168 return 0;
1169} 1169}
@@ -1386,7 +1386,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1386 } 1386 }
1387 1387
1388 /* not in hash table yet so not strictly necessary */ 1388 /* not in hash table yet so not strictly necessary */
1389 rcu_assign_pointer(help->helper, helper); 1389 RCU_INIT_POINTER(help->helper, helper);
1390 } 1390 }
1391 } else { 1391 } else {
1392 /* try an implicit helper assignation */ 1392 /* try an implicit helper assignation */
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 20714edf6cd2..ce0c406f58a8 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -55,7 +55,7 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger)
55 llog = rcu_dereference_protected(nf_loggers[pf], 55 llog = rcu_dereference_protected(nf_loggers[pf],
56 lockdep_is_held(&nf_log_mutex)); 56 lockdep_is_held(&nf_log_mutex));
57 if (llog == NULL) 57 if (llog == NULL)
58 rcu_assign_pointer(nf_loggers[pf], logger); 58 RCU_INIT_POINTER(nf_loggers[pf], logger);
59 } 59 }
60 60
61 mutex_unlock(&nf_log_mutex); 61 mutex_unlock(&nf_log_mutex);
@@ -74,7 +74,7 @@ void nf_log_unregister(struct nf_logger *logger)
74 c_logger = rcu_dereference_protected(nf_loggers[i], 74 c_logger = rcu_dereference_protected(nf_loggers[i],
75 lockdep_is_held(&nf_log_mutex)); 75 lockdep_is_held(&nf_log_mutex));
76 if (c_logger == logger) 76 if (c_logger == logger)
77 rcu_assign_pointer(nf_loggers[i], NULL); 77 RCU_INIT_POINTER(nf_loggers[i], NULL);
78 list_del(&logger->list[i]); 78 list_del(&logger->list[i]);
79 } 79 }
80 mutex_unlock(&nf_log_mutex); 80 mutex_unlock(&nf_log_mutex);
@@ -92,7 +92,7 @@ int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
92 mutex_unlock(&nf_log_mutex); 92 mutex_unlock(&nf_log_mutex);
93 return -ENOENT; 93 return -ENOENT;
94 } 94 }
95 rcu_assign_pointer(nf_loggers[pf], logger); 95 RCU_INIT_POINTER(nf_loggers[pf], logger);
96 mutex_unlock(&nf_log_mutex); 96 mutex_unlock(&nf_log_mutex);
97 return 0; 97 return 0;
98} 98}
@@ -103,7 +103,7 @@ void nf_log_unbind_pf(u_int8_t pf)
103 if (pf >= ARRAY_SIZE(nf_loggers)) 103 if (pf >= ARRAY_SIZE(nf_loggers))
104 return; 104 return;
105 mutex_lock(&nf_log_mutex); 105 mutex_lock(&nf_log_mutex);
106 rcu_assign_pointer(nf_loggers[pf], NULL); 106 RCU_INIT_POINTER(nf_loggers[pf], NULL);
107 mutex_unlock(&nf_log_mutex); 107 mutex_unlock(&nf_log_mutex);
108} 108}
109EXPORT_SYMBOL(nf_log_unbind_pf); 109EXPORT_SYMBOL(nf_log_unbind_pf);
@@ -250,7 +250,7 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
250 mutex_unlock(&nf_log_mutex); 250 mutex_unlock(&nf_log_mutex);
251 return -ENOENT; 251 return -ENOENT;
252 } 252 }
253 rcu_assign_pointer(nf_loggers[tindex], logger); 253 RCU_INIT_POINTER(nf_loggers[tindex], logger);
254 mutex_unlock(&nf_log_mutex); 254 mutex_unlock(&nf_log_mutex);
255 } else { 255 } else {
256 mutex_lock(&nf_log_mutex); 256 mutex_lock(&nf_log_mutex);
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 5b466cd1272f..99ffd2885088 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -40,7 +40,7 @@ int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
40 else if (old) 40 else if (old)
41 ret = -EBUSY; 41 ret = -EBUSY;
42 else { 42 else {
43 rcu_assign_pointer(queue_handler[pf], qh); 43 RCU_INIT_POINTER(queue_handler[pf], qh);
44 ret = 0; 44 ret = 0;
45 } 45 }
46 mutex_unlock(&queue_handler_mutex); 46 mutex_unlock(&queue_handler_mutex);
@@ -65,7 +65,7 @@ int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
65 return -EINVAL; 65 return -EINVAL;
66 } 66 }
67 67
68 rcu_assign_pointer(queue_handler[pf], NULL); 68 RCU_INIT_POINTER(queue_handler[pf], NULL);
69 mutex_unlock(&queue_handler_mutex); 69 mutex_unlock(&queue_handler_mutex);
70 70
71 synchronize_rcu(); 71 synchronize_rcu();
@@ -84,7 +84,7 @@ void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
84 queue_handler[pf], 84 queue_handler[pf],
85 lockdep_is_held(&queue_handler_mutex) 85 lockdep_is_held(&queue_handler_mutex)
86 ) == qh) 86 ) == qh)
87 rcu_assign_pointer(queue_handler[pf], NULL); 87 RCU_INIT_POINTER(queue_handler[pf], NULL);
88 } 88 }
89 mutex_unlock(&queue_handler_mutex); 89 mutex_unlock(&queue_handler_mutex);
90 90
@@ -312,6 +312,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
312 } 312 }
313 break; 313 break;
314 case NF_STOLEN: 314 case NF_STOLEN:
315 break;
315 default: 316 default:
316 kfree_skb(skb); 317 kfree_skb(skb);
317 } 318 }
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 1905976b5135..c879c1a2370e 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -59,7 +59,7 @@ int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
59 nfnl_unlock(); 59 nfnl_unlock();
60 return -EBUSY; 60 return -EBUSY;
61 } 61 }
62 rcu_assign_pointer(subsys_table[n->subsys_id], n); 62 RCU_INIT_POINTER(subsys_table[n->subsys_id], n);
63 nfnl_unlock(); 63 nfnl_unlock();
64 64
65 return 0; 65 return 0;
@@ -210,7 +210,7 @@ static int __net_init nfnetlink_net_init(struct net *net)
210 if (!nfnl) 210 if (!nfnl)
211 return -ENOMEM; 211 return -ENOMEM;
212 net->nfnl_stash = nfnl; 212 net->nfnl_stash = nfnl;
213 rcu_assign_pointer(net->nfnl, nfnl); 213 RCU_INIT_POINTER(net->nfnl, nfnl);
214 return 0; 214 return 0;
215} 215}
216 216
@@ -219,7 +219,7 @@ static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list)
219 struct net *net; 219 struct net *net;
220 220
221 list_for_each_entry(net, net_exit_list, exit_list) 221 list_for_each_entry(net, net_exit_list, exit_list)
222 rcu_assign_pointer(net->nfnl, NULL); 222 RCU_INIT_POINTER(net->nfnl, NULL);
223 synchronize_net(); 223 synchronize_net();
224 list_for_each_entry(net, net_exit_list, exit_list) 224 list_for_each_entry(net, net_exit_list, exit_list)
225 netlink_kernel_release(net->nfnl_stash); 225 netlink_kernel_release(net->nfnl_stash);
diff --git a/net/netlabel/Makefile b/net/netlabel/Makefile
index ea750e9df65f..d2732fc952e2 100644
--- a/net/netlabel/Makefile
+++ b/net/netlabel/Makefile
@@ -1,8 +1,6 @@
1# 1#
2# Makefile for the NetLabel subsystem. 2# Makefile for the NetLabel subsystem.
3# 3#
4# Feb 9, 2006, Paul Moore <paul.moore@hp.com>
5#
6 4
7# base objects 5# base objects
8obj-y := netlabel_user.o netlabel_kapi.o 6obj-y := netlabel_user.o netlabel_kapi.o
diff --git a/net/netlabel/netlabel_addrlist.c b/net/netlabel/netlabel_addrlist.c
index c0519139679e..96b749dacc34 100644
--- a/net/netlabel/netlabel_addrlist.c
+++ b/net/netlabel/netlabel_addrlist.c
@@ -6,7 +6,7 @@
6 * system manages static and dynamic label mappings for network protocols such 6 * system manages static and dynamic label mappings for network protocols such
7 * as CIPSO and RIPSO. 7 * as CIPSO and RIPSO.
8 * 8 *
9 * Author: Paul Moore <paul.moore@hp.com> 9 * Author: Paul Moore <paul@paul-moore.com>
10 * 10 *
11 */ 11 */
12 12
diff --git a/net/netlabel/netlabel_addrlist.h b/net/netlabel/netlabel_addrlist.h
index 2b9644e19de0..fdbc1d2c7352 100644
--- a/net/netlabel/netlabel_addrlist.h
+++ b/net/netlabel/netlabel_addrlist.h
@@ -6,7 +6,7 @@
6 * system manages static and dynamic label mappings for network protocols such 6 * system manages static and dynamic label mappings for network protocols such
7 * as CIPSO and RIPSO. 7 * as CIPSO and RIPSO.
8 * 8 *
9 * Author: Paul Moore <paul.moore@hp.com> 9 * Author: Paul Moore <paul@paul-moore.com>
10 * 10 *
11 */ 11 */
12 12
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index dd53a36d89af..6bf878335d94 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -5,7 +5,7 @@
5 * NetLabel system manages static and dynamic label mappings for network 5 * NetLabel system manages static and dynamic label mappings for network
6 * protocols such as CIPSO and RIPSO. 6 * protocols such as CIPSO and RIPSO.
7 * 7 *
8 * Author: Paul Moore <paul.moore@hp.com> 8 * Author: Paul Moore <paul@paul-moore.com>
9 * 9 *
10 */ 10 */
11 11
diff --git a/net/netlabel/netlabel_cipso_v4.h b/net/netlabel/netlabel_cipso_v4.h
index af7f3355103e..d24d774bfd62 100644
--- a/net/netlabel/netlabel_cipso_v4.h
+++ b/net/netlabel/netlabel_cipso_v4.h
@@ -5,7 +5,7 @@
5 * NetLabel system manages static and dynamic label mappings for network 5 * NetLabel system manages static and dynamic label mappings for network
6 * protocols such as CIPSO and RIPSO. 6 * protocols such as CIPSO and RIPSO.
7 * 7 *
8 * Author: Paul Moore <paul.moore@hp.com> 8 * Author: Paul Moore <paul@paul-moore.com>
9 * 9 *
10 */ 10 */
11 11
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 2aa975e5452d..3f905e5370c2 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -6,7 +6,7 @@
6 * system manages static and dynamic label mappings for network protocols such 6 * system manages static and dynamic label mappings for network protocols such
7 * as CIPSO and RIPSO. 7 * as CIPSO and RIPSO.
8 * 8 *
9 * Author: Paul Moore <paul.moore@hp.com> 9 * Author: Paul Moore <paul@paul-moore.com>
10 * 10 *
11 */ 11 */
12 12
@@ -282,7 +282,7 @@ int __init netlbl_domhsh_init(u32 size)
282 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); 282 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
283 283
284 spin_lock(&netlbl_domhsh_lock); 284 spin_lock(&netlbl_domhsh_lock);
285 rcu_assign_pointer(netlbl_domhsh, hsh_tbl); 285 RCU_INIT_POINTER(netlbl_domhsh, hsh_tbl);
286 spin_unlock(&netlbl_domhsh_lock); 286 spin_unlock(&netlbl_domhsh_lock);
287 287
288 return 0; 288 return 0;
@@ -330,7 +330,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
330 &rcu_dereference(netlbl_domhsh)->tbl[bkt]); 330 &rcu_dereference(netlbl_domhsh)->tbl[bkt]);
331 } else { 331 } else {
332 INIT_LIST_HEAD(&entry->list); 332 INIT_LIST_HEAD(&entry->list);
333 rcu_assign_pointer(netlbl_domhsh_def, entry); 333 RCU_INIT_POINTER(netlbl_domhsh_def, entry);
334 } 334 }
335 335
336 if (entry->type == NETLBL_NLTYPE_ADDRSELECT) { 336 if (entry->type == NETLBL_NLTYPE_ADDRSELECT) {
@@ -451,7 +451,7 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
451 if (entry != rcu_dereference(netlbl_domhsh_def)) 451 if (entry != rcu_dereference(netlbl_domhsh_def))
452 list_del_rcu(&entry->list); 452 list_del_rcu(&entry->list);
453 else 453 else
454 rcu_assign_pointer(netlbl_domhsh_def, NULL); 454 RCU_INIT_POINTER(netlbl_domhsh_def, NULL);
455 } else 455 } else
456 ret_val = -ENOENT; 456 ret_val = -ENOENT;
457 spin_unlock(&netlbl_domhsh_lock); 457 spin_unlock(&netlbl_domhsh_lock);
diff --git a/net/netlabel/netlabel_domainhash.h b/net/netlabel/netlabel_domainhash.h
index 0261dda3f2d2..bfcc0f7024c5 100644
--- a/net/netlabel/netlabel_domainhash.h
+++ b/net/netlabel/netlabel_domainhash.h
@@ -6,7 +6,7 @@
6 * system manages static and dynamic label mappings for network protocols such 6 * system manages static and dynamic label mappings for network protocols such
7 * as CIPSO and RIPSO. 7 * as CIPSO and RIPSO.
8 * 8 *
9 * Author: Paul Moore <paul.moore@hp.com> 9 * Author: Paul Moore <paul@paul-moore.com>
10 * 10 *
11 */ 11 */
12 12
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index b528dd928d3c..9c24de10a657 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -5,7 +5,7 @@
5 * system manages static and dynamic label mappings for network protocols such 5 * system manages static and dynamic label mappings for network protocols such
6 * as CIPSO and RIPSO. 6 * as CIPSO and RIPSO.
7 * 7 *
8 * Author: Paul Moore <paul.moore@hp.com> 8 * Author: Paul Moore <paul@paul-moore.com>
9 * 9 *
10 */ 10 */
11 11
@@ -341,11 +341,11 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
341 341
342 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 342 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
343 if (entry == NULL) 343 if (entry == NULL)
344 return -ENOMEM; 344 goto out_entry;
345 if (domain != NULL) { 345 if (domain != NULL) {
346 entry->domain = kstrdup(domain, GFP_ATOMIC); 346 entry->domain = kstrdup(domain, GFP_ATOMIC);
347 if (entry->domain == NULL) 347 if (entry->domain == NULL)
348 goto cfg_cipsov4_map_add_failure; 348 goto out_domain;
349 } 349 }
350 350
351 if (addr == NULL && mask == NULL) { 351 if (addr == NULL && mask == NULL) {
@@ -354,13 +354,13 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
354 } else if (addr != NULL && mask != NULL) { 354 } else if (addr != NULL && mask != NULL) {
355 addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC); 355 addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC);
356 if (addrmap == NULL) 356 if (addrmap == NULL)
357 goto cfg_cipsov4_map_add_failure; 357 goto out_addrmap;
358 INIT_LIST_HEAD(&addrmap->list4); 358 INIT_LIST_HEAD(&addrmap->list4);
359 INIT_LIST_HEAD(&addrmap->list6); 359 INIT_LIST_HEAD(&addrmap->list6);
360 360
361 addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC); 361 addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC);
362 if (addrinfo == NULL) 362 if (addrinfo == NULL)
363 goto cfg_cipsov4_map_add_failure; 363 goto out_addrinfo;
364 addrinfo->type_def.cipsov4 = doi_def; 364 addrinfo->type_def.cipsov4 = doi_def;
365 addrinfo->type = NETLBL_NLTYPE_CIPSOV4; 365 addrinfo->type = NETLBL_NLTYPE_CIPSOV4;
366 addrinfo->list.addr = addr->s_addr & mask->s_addr; 366 addrinfo->list.addr = addr->s_addr & mask->s_addr;
@@ -374,7 +374,7 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
374 entry->type = NETLBL_NLTYPE_ADDRSELECT; 374 entry->type = NETLBL_NLTYPE_ADDRSELECT;
375 } else { 375 } else {
376 ret_val = -EINVAL; 376 ret_val = -EINVAL;
377 goto cfg_cipsov4_map_add_failure; 377 goto out_addrmap;
378 } 378 }
379 379
380 ret_val = netlbl_domhsh_add(entry, audit_info); 380 ret_val = netlbl_domhsh_add(entry, audit_info);
@@ -384,11 +384,15 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
384 return 0; 384 return 0;
385 385
386cfg_cipsov4_map_add_failure: 386cfg_cipsov4_map_add_failure:
387 cipso_v4_doi_putdef(doi_def); 387 kfree(addrinfo);
388out_addrinfo:
389 kfree(addrmap);
390out_addrmap:
388 kfree(entry->domain); 391 kfree(entry->domain);
392out_domain:
389 kfree(entry); 393 kfree(entry);
390 kfree(addrmap); 394out_entry:
391 kfree(addrinfo); 395 cipso_v4_doi_putdef(doi_def);
392 return ret_val; 396 return ret_val;
393} 397}
394 398
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index dff8a0809245..bfa555869775 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -5,7 +5,7 @@
5 * NetLabel system manages static and dynamic label mappings for network 5 * NetLabel system manages static and dynamic label mappings for network
6 * protocols such as CIPSO and RIPSO. 6 * protocols such as CIPSO and RIPSO.
7 * 7 *
8 * Author: Paul Moore <paul.moore@hp.com> 8 * Author: Paul Moore <paul@paul-moore.com>
9 * 9 *
10 */ 10 */
11 11
diff --git a/net/netlabel/netlabel_mgmt.h b/net/netlabel/netlabel_mgmt.h
index 8db37f4c10f7..5a9f31ce5799 100644
--- a/net/netlabel/netlabel_mgmt.h
+++ b/net/netlabel/netlabel_mgmt.h
@@ -5,7 +5,7 @@
5 * NetLabel system manages static and dynamic label mappings for network 5 * NetLabel system manages static and dynamic label mappings for network
6 * protocols such as CIPSO and RIPSO. 6 * protocols such as CIPSO and RIPSO.
7 * 7 *
8 * Author: Paul Moore <paul.moore@hp.com> 8 * Author: Paul Moore <paul@paul-moore.com>
9 * 9 *
10 */ 10 */
11 11
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index f1ecf848e3ac..e251c2c88521 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -5,7 +5,7 @@
5 * NetLabel system. The NetLabel system manages static and dynamic label 5 * NetLabel system. The NetLabel system manages static and dynamic label
6 * mappings for network protocols such as CIPSO and RIPSO. 6 * mappings for network protocols such as CIPSO and RIPSO.
7 * 7 *
8 * Author: Paul Moore <paul.moore@hp.com> 8 * Author: Paul Moore <paul@paul-moore.com>
9 * 9 *
10 */ 10 */
11 11
@@ -354,7 +354,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
354 INIT_LIST_HEAD(&iface->list); 354 INIT_LIST_HEAD(&iface->list);
355 if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL) 355 if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL)
356 goto add_iface_failure; 356 goto add_iface_failure;
357 rcu_assign_pointer(netlbl_unlhsh_def, iface); 357 RCU_INIT_POINTER(netlbl_unlhsh_def, iface);
358 } 358 }
359 spin_unlock(&netlbl_unlhsh_lock); 359 spin_unlock(&netlbl_unlhsh_lock);
360 360
@@ -621,7 +621,7 @@ static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface)
621 if (iface->ifindex > 0) 621 if (iface->ifindex > 0)
622 list_del_rcu(&iface->list); 622 list_del_rcu(&iface->list);
623 else 623 else
624 rcu_assign_pointer(netlbl_unlhsh_def, NULL); 624 RCU_INIT_POINTER(netlbl_unlhsh_def, NULL);
625 spin_unlock(&netlbl_unlhsh_lock); 625 spin_unlock(&netlbl_unlhsh_lock);
626 626
627 call_rcu(&iface->rcu, netlbl_unlhsh_free_iface); 627 call_rcu(&iface->rcu, netlbl_unlhsh_free_iface);
@@ -1449,7 +1449,7 @@ int __init netlbl_unlabel_init(u32 size)
1449 1449
1450 rcu_read_lock(); 1450 rcu_read_lock();
1451 spin_lock(&netlbl_unlhsh_lock); 1451 spin_lock(&netlbl_unlhsh_lock);
1452 rcu_assign_pointer(netlbl_unlhsh, hsh_tbl); 1452 RCU_INIT_POINTER(netlbl_unlhsh, hsh_tbl);
1453 spin_unlock(&netlbl_unlhsh_lock); 1453 spin_unlock(&netlbl_unlhsh_lock);
1454 rcu_read_unlock(); 1454 rcu_read_unlock();
1455 1455
diff --git a/net/netlabel/netlabel_unlabeled.h b/net/netlabel/netlabel_unlabeled.h
index 0bc8dc3f9e3c..700af49022a0 100644
--- a/net/netlabel/netlabel_unlabeled.h
+++ b/net/netlabel/netlabel_unlabeled.h
@@ -5,7 +5,7 @@
5 * NetLabel system. The NetLabel system manages static and dynamic label 5 * NetLabel system. The NetLabel system manages static and dynamic label
6 * mappings for network protocols such as CIPSO and RIPSO. 6 * mappings for network protocols such as CIPSO and RIPSO.
7 * 7 *
8 * Author: Paul Moore <paul.moore@hp.com> 8 * Author: Paul Moore <paul@paul-moore.com>
9 * 9 *
10 */ 10 */
11 11
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c
index a3fd75ac3fa5..9fae63f10298 100644
--- a/net/netlabel/netlabel_user.c
+++ b/net/netlabel/netlabel_user.c
@@ -5,7 +5,7 @@
5 * NetLabel system manages static and dynamic label mappings for network 5 * NetLabel system manages static and dynamic label mappings for network
6 * protocols such as CIPSO and RIPSO. 6 * protocols such as CIPSO and RIPSO.
7 * 7 *
8 * Author: Paul Moore <paul.moore@hp.com> 8 * Author: Paul Moore <paul@paul-moore.com>
9 * 9 *
10 */ 10 */
11 11
diff --git a/net/netlabel/netlabel_user.h b/net/netlabel/netlabel_user.h
index f4fc4c9ad567..81969785e279 100644
--- a/net/netlabel/netlabel_user.h
+++ b/net/netlabel/netlabel_user.h
@@ -5,7 +5,7 @@
5 * NetLabel system manages static and dynamic label mappings for network 5 * NetLabel system manages static and dynamic label mappings for network
6 * protocols such as CIPSO and RIPSO. 6 * protocols such as CIPSO and RIPSO.
7 * 7 *
8 * Author: Paul Moore <paul.moore@hp.com> 8 * Author: Paul Moore <paul@paul-moore.com>
9 * 9 *
10 */ 10 */
11 11
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 0a4db0211da0..4330db99fabf 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1578,7 +1578,7 @@ int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
1578 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC); 1578 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
1579 if (!new) 1579 if (!new)
1580 return -ENOMEM; 1580 return -ENOMEM;
1581 old = rcu_dereference_raw(tbl->listeners); 1581 old = rcu_dereference_protected(tbl->listeners, 1);
1582 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); 1582 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
1583 rcu_assign_pointer(tbl->listeners, new); 1583 rcu_assign_pointer(tbl->listeners, new);
1584 1584
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index c6fffd946d42..bf10ea8fbbf9 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -480,7 +480,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
480 if (proto_tab[protocol]) 480 if (proto_tab[protocol])
481 err = -EBUSY; 481 err = -EBUSY;
482 else 482 else
483 rcu_assign_pointer(proto_tab[protocol], pp); 483 RCU_INIT_POINTER(proto_tab[protocol], pp);
484 mutex_unlock(&proto_tab_lock); 484 mutex_unlock(&proto_tab_lock);
485 485
486 return err; 486 return err;
@@ -491,7 +491,7 @@ void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp)
491{ 491{
492 mutex_lock(&proto_tab_lock); 492 mutex_lock(&proto_tab_lock);
493 BUG_ON(proto_tab[protocol] != pp); 493 BUG_ON(proto_tab[protocol] != pp);
494 rcu_assign_pointer(proto_tab[protocol], NULL); 494 RCU_INIT_POINTER(proto_tab[protocol], NULL);
495 mutex_unlock(&proto_tab_lock); 495 mutex_unlock(&proto_tab_lock);
496 synchronize_rcu(); 496 synchronize_rcu();
497 proto_unregister(pp->prot); 497 proto_unregister(pp->prot);
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index d2df8f33160b..c5827614376b 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -276,7 +276,7 @@ static void phonet_route_autodel(struct net_device *dev)
276 mutex_lock(&pnn->routes.lock); 276 mutex_lock(&pnn->routes.lock);
277 for (i = 0; i < 64; i++) 277 for (i = 0; i < 64; i++)
278 if (dev == pnn->routes.table[i]) { 278 if (dev == pnn->routes.table[i]) {
279 rcu_assign_pointer(pnn->routes.table[i], NULL); 279 RCU_INIT_POINTER(pnn->routes.table[i], NULL);
280 set_bit(i, deleted); 280 set_bit(i, deleted);
281 } 281 }
282 mutex_unlock(&pnn->routes.lock); 282 mutex_unlock(&pnn->routes.lock);
@@ -390,7 +390,7 @@ int phonet_route_add(struct net_device *dev, u8 daddr)
390 daddr = daddr >> 2; 390 daddr = daddr >> 2;
391 mutex_lock(&routes->lock); 391 mutex_lock(&routes->lock);
392 if (routes->table[daddr] == NULL) { 392 if (routes->table[daddr] == NULL) {
393 rcu_assign_pointer(routes->table[daddr], dev); 393 RCU_INIT_POINTER(routes->table[daddr], dev);
394 dev_hold(dev); 394 dev_hold(dev);
395 err = 0; 395 err = 0;
396 } 396 }
@@ -406,7 +406,7 @@ int phonet_route_del(struct net_device *dev, u8 daddr)
406 daddr = daddr >> 2; 406 daddr = daddr >> 2;
407 mutex_lock(&routes->lock); 407 mutex_lock(&routes->lock);
408 if (dev == routes->table[daddr]) 408 if (dev == routes->table[daddr])
409 rcu_assign_pointer(routes->table[daddr], NULL); 409 RCU_INIT_POINTER(routes->table[daddr], NULL);
410 else 410 else
411 dev = NULL; 411 dev = NULL;
412 mutex_unlock(&routes->lock); 412 mutex_unlock(&routes->lock);
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index ab07711cf2f4..676d18dc75b7 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -679,7 +679,7 @@ int pn_sock_bind_res(struct sock *sk, u8 res)
679 mutex_lock(&resource_mutex); 679 mutex_lock(&resource_mutex);
680 if (pnres.sk[res] == NULL) { 680 if (pnres.sk[res] == NULL) {
681 sock_hold(sk); 681 sock_hold(sk);
682 rcu_assign_pointer(pnres.sk[res], sk); 682 RCU_INIT_POINTER(pnres.sk[res], sk);
683 ret = 0; 683 ret = 0;
684 } 684 }
685 mutex_unlock(&resource_mutex); 685 mutex_unlock(&resource_mutex);
@@ -695,7 +695,7 @@ int pn_sock_unbind_res(struct sock *sk, u8 res)
695 695
696 mutex_lock(&resource_mutex); 696 mutex_lock(&resource_mutex);
697 if (pnres.sk[res] == sk) { 697 if (pnres.sk[res] == sk) {
698 rcu_assign_pointer(pnres.sk[res], NULL); 698 RCU_INIT_POINTER(pnres.sk[res], NULL);
699 ret = 0; 699 ret = 0;
700 } 700 }
701 mutex_unlock(&resource_mutex); 701 mutex_unlock(&resource_mutex);
@@ -714,7 +714,7 @@ void pn_sock_unbind_all_res(struct sock *sk)
714 mutex_lock(&resource_mutex); 714 mutex_lock(&resource_mutex);
715 for (res = 0; res < 256; res++) { 715 for (res = 0; res < 256; res++) {
716 if (pnres.sk[res] == sk) { 716 if (pnres.sk[res] == sk) {
717 rcu_assign_pointer(pnres.sk[res], NULL); 717 RCU_INIT_POINTER(pnres.sk[res], NULL);
718 match++; 718 match++;
719 } 719 }
720 } 720 }
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 102fc212cd64..e051398fdf6b 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -196,8 +196,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
196 196
197 skb2->skb_iif = skb->dev->ifindex; 197 skb2->skb_iif = skb->dev->ifindex;
198 skb2->dev = dev; 198 skb2->dev = dev;
199 dev_queue_xmit(skb2); 199 err = dev_queue_xmit(skb2);
200 err = 0;
201 200
202out: 201out:
203 if (err) { 202 if (err) {
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 2a318f2dc3e5..b5d56a22b1d2 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -112,7 +112,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch)
112 112
113 for (prio = 0; prio < q->bands; prio++) { 113 for (prio = 0; prio < q->bands; prio++) {
114 struct Qdisc *qdisc = q->queues[prio]; 114 struct Qdisc *qdisc = q->queues[prio];
115 struct sk_buff *skb = qdisc->dequeue(qdisc); 115 struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
116 if (skb) { 116 if (skb) {
117 qdisc_bstats_update(sch, skb); 117 qdisc_bstats_update(sch, skb);
118 sch->q.qlen--; 118 sch->q.qlen--;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 4536ee64383e..4f5510e2bd6f 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -410,7 +410,12 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
410 /* Return Congestion Notification only if we dropped a packet 410 /* Return Congestion Notification only if we dropped a packet
411 * from this flow. 411 * from this flow.
412 */ 412 */
413 return (qlen != slot->qlen) ? NET_XMIT_CN : NET_XMIT_SUCCESS; 413 if (qlen != slot->qlen)
414 return NET_XMIT_CN;
415
416 /* As we dropped a packet, better let upper stack know this */
417 qdisc_tree_decrease_qlen(sch, 1);
418 return NET_XMIT_SUCCESS;
414} 419}
415 420
416static struct sk_buff * 421static struct sk_buff *
diff --git a/net/socket.c b/net/socket.c
index b1cbbcd92558..2517e11a5300 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1871,8 +1871,14 @@ SYSCALL_DEFINE2(shutdown, int, fd, int, how)
1871#define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen) 1871#define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen)
1872#define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags) 1872#define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags)
1873 1873
1874struct used_address {
1875 struct sockaddr_storage name;
1876 unsigned int name_len;
1877};
1878
1874static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, 1879static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1875 struct msghdr *msg_sys, unsigned flags, int nosec) 1880 struct msghdr *msg_sys, unsigned flags,
1881 struct used_address *used_address)
1876{ 1882{
1877 struct compat_msghdr __user *msg_compat = 1883 struct compat_msghdr __user *msg_compat =
1878 (struct compat_msghdr __user *)msg; 1884 (struct compat_msghdr __user *)msg;
@@ -1953,8 +1959,28 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1953 1959
1954 if (sock->file->f_flags & O_NONBLOCK) 1960 if (sock->file->f_flags & O_NONBLOCK)
1955 msg_sys->msg_flags |= MSG_DONTWAIT; 1961 msg_sys->msg_flags |= MSG_DONTWAIT;
1956 err = (nosec ? sock_sendmsg_nosec : sock_sendmsg)(sock, msg_sys, 1962 /*
1957 total_len); 1963 * If this is sendmmsg() and current destination address is same as
1964 * previously succeeded address, omit asking LSM's decision.
1965 * used_address->name_len is initialized to UINT_MAX so that the first
1966 * destination address never matches.
1967 */
1968 if (used_address && used_address->name_len == msg_sys->msg_namelen &&
1969 !memcmp(&used_address->name, msg->msg_name,
1970 used_address->name_len)) {
1971 err = sock_sendmsg_nosec(sock, msg_sys, total_len);
1972 goto out_freectl;
1973 }
1974 err = sock_sendmsg(sock, msg_sys, total_len);
1975 /*
1976 * If this is sendmmsg() and sending to current destination address was
1977 * successful, remember it.
1978 */
1979 if (used_address && err >= 0) {
1980 used_address->name_len = msg_sys->msg_namelen;
1981 memcpy(&used_address->name, msg->msg_name,
1982 used_address->name_len);
1983 }
1958 1984
1959out_freectl: 1985out_freectl:
1960 if (ctl_buf != ctl) 1986 if (ctl_buf != ctl)
@@ -1979,7 +2005,7 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
1979 if (!sock) 2005 if (!sock)
1980 goto out; 2006 goto out;
1981 2007
1982 err = __sys_sendmsg(sock, msg, &msg_sys, flags, 0); 2008 err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
1983 2009
1984 fput_light(sock->file, fput_needed); 2010 fput_light(sock->file, fput_needed);
1985out: 2011out:
@@ -1998,6 +2024,10 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
1998 struct mmsghdr __user *entry; 2024 struct mmsghdr __user *entry;
1999 struct compat_mmsghdr __user *compat_entry; 2025 struct compat_mmsghdr __user *compat_entry;
2000 struct msghdr msg_sys; 2026 struct msghdr msg_sys;
2027 struct used_address used_address;
2028
2029 if (vlen > UIO_MAXIOV)
2030 vlen = UIO_MAXIOV;
2001 2031
2002 datagrams = 0; 2032 datagrams = 0;
2003 2033
@@ -2005,27 +2035,22 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2005 if (!sock) 2035 if (!sock)
2006 return err; 2036 return err;
2007 2037
2008 err = sock_error(sock->sk); 2038 used_address.name_len = UINT_MAX;
2009 if (err)
2010 goto out_put;
2011
2012 entry = mmsg; 2039 entry = mmsg;
2013 compat_entry = (struct compat_mmsghdr __user *)mmsg; 2040 compat_entry = (struct compat_mmsghdr __user *)mmsg;
2041 err = 0;
2014 2042
2015 while (datagrams < vlen) { 2043 while (datagrams < vlen) {
2016 /*
2017 * No need to ask LSM for more than the first datagram.
2018 */
2019 if (MSG_CMSG_COMPAT & flags) { 2044 if (MSG_CMSG_COMPAT & flags) {
2020 err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry, 2045 err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
2021 &msg_sys, flags, datagrams); 2046 &msg_sys, flags, &used_address);
2022 if (err < 0) 2047 if (err < 0)
2023 break; 2048 break;
2024 err = __put_user(err, &compat_entry->msg_len); 2049 err = __put_user(err, &compat_entry->msg_len);
2025 ++compat_entry; 2050 ++compat_entry;
2026 } else { 2051 } else {
2027 err = __sys_sendmsg(sock, (struct msghdr __user *)entry, 2052 err = __sys_sendmsg(sock, (struct msghdr __user *)entry,
2028 &msg_sys, flags, datagrams); 2053 &msg_sys, flags, &used_address);
2029 if (err < 0) 2054 if (err < 0)
2030 break; 2055 break;
2031 err = put_user(err, &entry->msg_len); 2056 err = put_user(err, &entry->msg_len);
@@ -2037,29 +2062,11 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2037 ++datagrams; 2062 ++datagrams;
2038 } 2063 }
2039 2064
2040out_put:
2041 fput_light(sock->file, fput_needed); 2065 fput_light(sock->file, fput_needed);
2042 2066
2043 if (err == 0) 2067 /* We only return an error if no datagrams were able to be sent */
2044 return datagrams; 2068 if (datagrams != 0)
2045
2046 if (datagrams != 0) {
2047 /*
2048 * We may send less entries than requested (vlen) if the
2049 * sock is non blocking...
2050 */
2051 if (err != -EAGAIN) {
2052 /*
2053 * ... or if sendmsg returns an error after we
2054 * send some datagrams, where we record the
2055 * error to return on the next call or if the
2056 * app asks about it using getsockopt(SO_ERROR).
2057 */
2058 sock->sk->sk_err = -err;
2059 }
2060
2061 return datagrams; 2069 return datagrams;
2062 }
2063 2070
2064 return err; 2071 return err;
2065} 2072}
@@ -2463,7 +2470,7 @@ int sock_register(const struct net_proto_family *ops)
2463 lockdep_is_held(&net_family_lock))) 2470 lockdep_is_held(&net_family_lock)))
2464 err = -EEXIST; 2471 err = -EEXIST;
2465 else { 2472 else {
2466 rcu_assign_pointer(net_families[ops->family], ops); 2473 RCU_INIT_POINTER(net_families[ops->family], ops);
2467 err = 0; 2474 err = 0;
2468 } 2475 }
2469 spin_unlock(&net_family_lock); 2476 spin_unlock(&net_family_lock);
@@ -2491,7 +2498,7 @@ void sock_unregister(int family)
2491 BUG_ON(family < 0 || family >= NPROTO); 2498 BUG_ON(family < 0 || family >= NPROTO);
2492 2499
2493 spin_lock(&net_family_lock); 2500 spin_lock(&net_family_lock);
2494 rcu_assign_pointer(net_families[family], NULL); 2501 RCU_INIT_POINTER(net_families[family], NULL);
2495 spin_unlock(&net_family_lock); 2502 spin_unlock(&net_family_lock);
2496 2503
2497 synchronize_rcu(); 2504 synchronize_rcu();
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 364eb45e989d..d4132754cbe1 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -122,7 +122,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
122 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) 122 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
123 return; 123 return;
124 gss_get_ctx(ctx); 124 gss_get_ctx(ctx);
125 rcu_assign_pointer(gss_cred->gc_ctx, ctx); 125 RCU_INIT_POINTER(gss_cred->gc_ctx, ctx);
126 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 126 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
127 smp_mb__before_clear_bit(); 127 smp_mb__before_clear_bit();
128 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); 128 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
@@ -970,7 +970,7 @@ gss_destroy_nullcred(struct rpc_cred *cred)
970 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 970 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
971 struct gss_cl_ctx *ctx = gss_cred->gc_ctx; 971 struct gss_cl_ctx *ctx = gss_cred->gc_ctx;
972 972
973 rcu_assign_pointer(gss_cred->gc_ctx, NULL); 973 RCU_INIT_POINTER(gss_cred->gc_ctx, NULL);
974 call_rcu(&cred->cr_rcu, gss_free_cred_callback); 974 call_rcu(&cred->cr_rcu, gss_free_cred_callback);
975 if (ctx) 975 if (ctx)
976 gss_put_ctx(ctx); 976 gss_put_ctx(ctx);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 9b6a4d1ea8f8..f4385e45a5fc 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -187,6 +187,7 @@ EXPORT_SYMBOL_GPL(xprt_load_transport);
187/** 187/**
188 * xprt_reserve_xprt - serialize write access to transports 188 * xprt_reserve_xprt - serialize write access to transports
189 * @task: task that is requesting access to the transport 189 * @task: task that is requesting access to the transport
190 * @xprt: pointer to the target transport
190 * 191 *
191 * This prevents mixing the payload of separate requests, and prevents 192 * This prevents mixing the payload of separate requests, and prevents
192 * transport connects from colliding with writes. No congestion control 193 * transport connects from colliding with writes. No congestion control
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 58064d9e565d..791ab2e77f3f 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -462,8 +462,8 @@ static struct xfrm_algo_desc ealg_list[] = {
462 .desc = { 462 .desc = {
463 .sadb_alg_id = SADB_X_EALG_AESCTR, 463 .sadb_alg_id = SADB_X_EALG_AESCTR,
464 .sadb_alg_ivlen = 8, 464 .sadb_alg_ivlen = 8,
465 .sadb_alg_minbits = 128, 465 .sadb_alg_minbits = 160,
466 .sadb_alg_maxbits = 256 466 .sadb_alg_maxbits = 288
467 } 467 }
468}, 468},
469}; 469};
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 0256b8a0a7cf..d0a42df5160e 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2927,7 +2927,7 @@ static int __net_init xfrm_user_net_init(struct net *net)
2927 if (nlsk == NULL) 2927 if (nlsk == NULL)
2928 return -ENOMEM; 2928 return -ENOMEM;
2929 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */ 2929 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
2930 rcu_assign_pointer(net->xfrm.nlsk, nlsk); 2930 RCU_INIT_POINTER(net->xfrm.nlsk, nlsk);
2931 return 0; 2931 return 0;
2932} 2932}
2933 2933
@@ -2935,7 +2935,7 @@ static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
2935{ 2935{
2936 struct net *net; 2936 struct net *net;
2937 list_for_each_entry(net, net_exit_list, exit_list) 2937 list_for_each_entry(net, net_exit_list, exit_list)
2938 rcu_assign_pointer(net->xfrm.nlsk, NULL); 2938 RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
2939 synchronize_net(); 2939 synchronize_net();
2940 list_for_each_entry(net, net_exit_list, exit_list) 2940 list_for_each_entry(net, net_exit_list, exit_list)
2941 netlink_kernel_release(net->xfrm.nlsk_stash); 2941 netlink_kernel_release(net->xfrm.nlsk_stash);